From 76cf9cce7ae0bafd37626a1ca1f27d7cc43a1556 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Sun, 23 Jul 2023 21:07:35 +0300 Subject: [PATCH 01/50] feat: extend support for select api features --- command/select.go | 63 ++++++++++++++++++++------ storage/s3.go | 113 +++++++++++++++++++++++++++++++++++++--------- 2 files changed, 141 insertions(+), 35 deletions(-) diff --git a/command/select.go b/command/select.go index eb2c2eddf..9d66afd57 100644 --- a/command/select.go +++ b/command/select.go @@ -27,7 +27,7 @@ Options: {{range .VisibleFlags}}{{.}} {{end}} Examples: - 01. Search for all JSON objects with the foo property set to 'bar' and spit them into stdout + 01. Search for all json objects with the foo property set to 'bar' and spit them into stdout > s5cmd {{.HelpName}} --compression gzip --query "SELECT * FROM S3Object s WHERE s.foo='bar'" "s3://bucket/*" ` @@ -42,17 +42,48 @@ func NewSelectCommand() *cli.Command { Aliases: []string{"e"}, Usage: "SQL expression to use to select from the objects", }, - &cli.StringFlag{ + &cli.GenericFlag{ Name: "compression", Usage: "input compression format", - Value: "NONE", + Value: &EnumValue{ + Enum: []string{"none", "gzip", "bzip2"}, + Default: "none", + ConditionFunction: func(str, target string) bool { + return strings.ToUpper(target) == str + }, + }, }, &cli.GenericFlag{ Name: "format", - Usage: "input data format (only JSON supported for the moment)", + Usage: "input data format", + Value: &EnumValue{ + Enum: []string{"json", "csv"}, + Default: "json", + ConditionFunction: func(str, target string) bool { + return strings.ToUpper(target) == str + }, + }, + }, + &cli.GenericFlag{ + Name: "input-structure", + Usage: "structure of the data", Value: &EnumValue{ - Enum: []string{"JSON"}, - Default: "JSON", + Enum: []string{"lines", "document", "comma", "tab"}, + Default: "lines", + ConditionFunction: func(str, target string) bool { + return strings.ToLower(target) == str + }, + }, + }, + &cli.GenericFlag{ + Name: "output-format", + Usage: "output data format", + Value: &EnumValue{ + Enum: []string{"json", "csv"}, + Default: "json", + ConditionFunction: func(str, target string) bool { + return strings.ToUpper(target) == str + }, }, }, &cli.StringSliceFlag{ @@ -104,8 +135,10 @@ func NewSelectCommand() *cli.Command { op: c.Command.Name, fullCommand: fullCommand, // flags + inputFormat: c.String("format"), + inputStructure: c.String("input-structure"), + outputFormat: c.String("output-format"), query: c.String("query"), - compressionType: c.String("compression"), exclude: c.StringSlice("exclude"), forceGlacierTransfer: c.Bool("force-glacier-transfer"), ignoreGlacierWarnings: c.Bool("ignore-glacier-warnings"), @@ -126,7 +159,10 @@ type Select struct { fullCommand string query string + inputFormat string compressionType string + inputStructure string + outputFormat string exclude []string forceGlacierTransfer bool ignoreGlacierWarnings bool @@ -237,9 +273,12 @@ func (s Select) Run(ctx context.Context) error { func (s Select) prepareTask(ctx context.Context, client *storage.S3, url *url.URL, resultCh chan<- json.RawMessage) func() error { return func() error { query := &storage.SelectQuery{ - ExpressionType: "SQL", - Expression: s.query, - CompressionType: s.compressionType, + ExpressionType: "SQL", + Expression: s.query, + InputFormat: s.inputFormat, + InputContentStructure: s.inputStructure, + OutputFormat: s.outputFormat, + CompressionType: s.compressionType, } return client.Select(ctx, url, query, resultCh) @@ -269,9 +308,5 @@ func validateSelectCommand(c *cli.Context) error { return fmt.Errorf("source must be remote") } - if !strings.EqualFold(c.String("format"), "JSON") { - return fmt.Errorf("only json supported") - } - return nil } diff --git a/storage/s3.go b/storage/s3.go index 58bd96a29..44a901106 100644 --- a/storage/s3.go +++ b/storage/s3.go @@ -4,6 +4,7 @@ import ( "context" "crypto/rand" "crypto/tls" + "encoding/csv" "encoding/json" "errors" "fmt" @@ -587,31 +588,106 @@ func (s *S3) Get( }) } +type EventStreamDecoder interface { + Decode() ([]byte, error) +} + +type JsonDecoder struct { + decoder *json.Decoder +} + +func NewJsonDecoder(reader io.Reader) EventStreamDecoder { + return &JsonDecoder{ + decoder: json.NewDecoder(reader), + } +} + +func (jd *JsonDecoder) Decode() ([]byte, error) { + var val json.RawMessage + err := jd.decoder.Decode(&val) + if err != nil { + return nil, err + } + return val, nil +} + +type CsvDecoder struct { + decoder *csv.Reader +} + +func NewCsvDecoder(reader io.Reader) EventStreamDecoder { + csvDecoder := &CsvDecoder{ + decoder: csv.NewReader(reader), + } + // returned values from AWS has double quotes in it + // so we enable lazy quotes + csvDecoder.decoder.LazyQuotes = true + return csvDecoder +} + +func (cd *CsvDecoder) Decode() ([]byte, error) { + res, err := cd.decoder.Read() + if err != nil { + return nil, err + } + + result := make([]byte, len(res)) + for _, str := range res { + result = append(result, []byte(str)...) + } + return result, nil +} + type SelectQuery struct { - ExpressionType string - Expression string - CompressionType string + InputFormat string + InputContentStructure string + OutputFormat string + ExpressionType string + Expression string + CompressionType string } func (s *S3) Select(ctx context.Context, url *url.URL, query *SelectQuery, resultCh chan<- json.RawMessage) error { if s.dryRun { return nil } + var inputSerialization *s3.InputSerialization + var outputSerialization *s3.OutputSerialization + var decoder EventStreamDecoder + reader, writer := io.Pipe() - input := &s3.SelectObjectContentInput{ - Bucket: aws.String(url.Bucket), - Key: aws.String(url.Path), - ExpressionType: aws.String(query.ExpressionType), - Expression: aws.String(query.Expression), - InputSerialization: &s3.InputSerialization{ - CompressionType: aws.String(query.CompressionType), + if query.InputFormat == "json" { + inputSerialization = &s3.InputSerialization{ JSON: &s3.JSONInput{ - Type: aws.String("Lines"), + Type: aws.String(query.InputContentStructure), }, - }, - OutputSerialization: &s3.OutputSerialization{ + } + decoder = NewJsonDecoder(reader) + } else if query.InputFormat == "csv" { + inputSerialization = &s3.InputSerialization{ + CSV: &s3.CSVInput{ + FieldDelimiter: aws.String(","), + }, + } + decoder = NewCsvDecoder(reader) + } + + if query.OutputFormat == "json" { + outputSerialization = &s3.OutputSerialization{ JSON: &s3.JSONOutput{}, - }, + } + } else if query.OutputFormat == "csv" { + outputSerialization = &s3.OutputSerialization{ + CSV: &s3.CSVOutput{}, + } + } + input := &s3.SelectObjectContentInput{ + Bucket: aws.String(url.Bucket), + Key: aws.String(url.Path), + ExpressionType: aws.String(query.ExpressionType), + Expression: aws.String(query.Expression), + InputSerialization: inputSerialization, + OutputSerialization: outputSerialization, } resp, err := s.api.SelectObjectContentWithContext(ctx, input) @@ -619,8 +695,6 @@ func (s *S3) Select(ctx context.Context, url *url.URL, query *SelectQuery, resul return err } - reader, writer := io.Pipe() - go func() { defer writer.Close() @@ -643,18 +717,15 @@ func (s *S3) Select(ctx context.Context, url *url.URL, query *SelectQuery, resul } } }() - - decoder := json.NewDecoder(reader) for { - var record json.RawMessage - err := decoder.Decode(&record) + val, err := decoder.Decode() if err == io.EOF { break } if err != nil { return err } - resultCh <- record + resultCh <- val } return resp.EventStream.Reader.Err() From 1b65e6926221452bbee9386c34bf87a6eca23c76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Sun, 23 Jul 2023 21:09:41 +0300 Subject: [PATCH 02/50] fix: check for lowercase --- command/select.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/command/select.go b/command/select.go index 9d66afd57..fc921cb6c 100644 --- a/command/select.go +++ b/command/select.go @@ -49,7 +49,7 @@ func NewSelectCommand() *cli.Command { Enum: []string{"none", "gzip", "bzip2"}, Default: "none", ConditionFunction: func(str, target string) bool { - return strings.ToUpper(target) == str + return strings.ToLower(target) == str }, }, }, @@ -82,7 +82,7 @@ func NewSelectCommand() *cli.Command { Enum: []string{"json", "csv"}, Default: "json", ConditionFunction: func(str, target string) bool { - return strings.ToUpper(target) == str + return strings.ToLower(target) == str }, }, }, From e393d7337c43544d350be6f5762b4f74854c2e2b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Mon, 24 Jul 2023 16:12:44 +0300 Subject: [PATCH 03/50] feat: split into subcommands --- command/select.go | 286 ++++++++++++++++++++++++++++++++++------------ storage/s3.go | 74 +++++++++--- 2 files changed, 269 insertions(+), 91 deletions(-) diff --git a/command/select.go b/command/select.go index fc921cb6c..aac72b2eb 100644 --- a/command/select.go +++ b/command/select.go @@ -17,7 +17,7 @@ import ( "github.com/peak/s5cmd/v2/storage/url" ) -var selectHelpTemplate = `Name: +var defaultSelectHelpTemplate = `Name: {{.HelpName}} - {{.Usage}} Usage: @@ -31,94 +31,227 @@ Examples: > s5cmd {{.HelpName}} --compression gzip --query "SELECT * FROM S3Object s WHERE s.foo='bar'" "s3://bucket/*" ` +func beforeFunc(c *cli.Context) error { + err := validateSelectCommand(c) + if err != nil { + printError(commandFromContext(c), c.Command.Name, err) + } + return err +} + func NewSelectCommand() *cli.Command { + sharedFlags := []cli.Flag{ + &cli.StringFlag{ + Name: "query", + Aliases: []string{"e"}, + Usage: "SQL expression to use to select from the objects", + }, + &cli.GenericFlag{ + Name: "output-format", + Usage: "output format of the result", + Value: &EnumValue{ + Enum: []string{"json", "csv"}, + Default: "json", + ConditionFunction: func(str, target string) bool { + return strings.ToLower(target) == str + }, + }, + }, + &cli.StringSliceFlag{ + Name: "exclude", + Usage: "exclude objects with given pattern", + }, + &cli.BoolFlag{ + Name: "force-glacier-transfer", + Usage: "force transfer of glacier objects whether they are restored or not", + }, + &cli.BoolFlag{ + Name: "ignore-glacier-warnings", + Usage: "turns off glacier warnings: ignore errors encountered during selecting objects", + }, + &cli.BoolFlag{ + Name: "raw", + Usage: "disable the wildcard operations, useful with filenames that contains glob characters", + }, + &cli.BoolFlag{ + Name: "all-versions", + Usage: "list all versions of object(s)", + }, + &cli.StringFlag{ + Name: "version-id", + Usage: "use the specified version of the object", + }, + } cmd := &cli.Command{ Name: "select", HelpName: "select", Usage: "run SQL queries on objects", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "query", - Aliases: []string{"e"}, - Usage: "SQL expression to use to select from the objects", - }, - &cli.GenericFlag{ - Name: "compression", - Usage: "input compression format", - Value: &EnumValue{ - Enum: []string{"none", "gzip", "bzip2"}, - Default: "none", - ConditionFunction: func(str, target string) bool { - return strings.ToLower(target) == str + Subcommands: []*cli.Command{ + { + Name: "csv", + Usage: "write queries for csv files", + Flags: append([]cli.Flag{ + &cli.GenericFlag{ + Name: "compression", + Usage: "input compression format", + Value: &EnumValue{ + Enum: []string{"none", "gzip", "bzip2"}, + Default: "none", + ConditionFunction: func(str, target string) bool { + return strings.ToLower(target) == str + }, + }, }, - }, - }, - &cli.GenericFlag{ - Name: "format", - Usage: "input data format", - Value: &EnumValue{ - Enum: []string{"json", "csv"}, - Default: "json", - ConditionFunction: func(str, target string) bool { - return strings.ToUpper(target) == str + &cli.GenericFlag{ + Name: "delimiter", + Usage: "delimiter of the csv file", + Value: &EnumValue{ + Enum: []string{",", "\t"}, + Default: ",", + ConditionFunction: func(str, target string) bool { + return strings.ToLower(target) == str + }, + }, }, + }, sharedFlags...), + CustomHelpTemplate: defaultSelectHelpTemplate, + Before: beforeFunc, + Action: func(c *cli.Context) (err error) { + defer stat.Collect(c.Command.FullName(), &err)() + + fullCommand := commandFromContext(c) + + src, err := url.New(c.Args().Get(0), url.WithVersion(c.String("version-id")), + url.WithRaw(c.Bool("raw")), url.WithAllVersions(c.Bool("all-versions"))) + if err != nil { + printError(fullCommand, c.Command.Name, err) + return err + } + return Select{ + src: src, + op: c.Command.Name, + fullCommand: fullCommand, + // flags + inputFormat: "csv", + inputStructure: c.String("delimiter"), + outputFormat: c.String("output-format"), + compressionType: c.String("compression"), + query: c.String("query"), + exclude: c.StringSlice("exclude"), + forceGlacierTransfer: c.Bool("force-glacier-transfer"), + ignoreGlacierWarnings: c.Bool("ignore-glacier-warnings"), + + storageOpts: NewStorageOpts(c), + }.Run(c.Context) }, }, - &cli.GenericFlag{ - Name: "input-structure", - Usage: "structure of the data", - Value: &EnumValue{ - Enum: []string{"lines", "document", "comma", "tab"}, - Default: "lines", - ConditionFunction: func(str, target string) bool { - return strings.ToLower(target) == str + { + Name: "json", + Usage: "write queries for json files", + Flags: append([]cli.Flag{ + &cli.GenericFlag{ + Name: "compression", + Usage: "input compression format", + Value: &EnumValue{ + Enum: []string{"none", "gzip", "bzip2"}, + Default: "none", + ConditionFunction: func(str, target string) bool { + return strings.ToLower(target) == str + }, + }, }, + &cli.GenericFlag{ + Name: "structure", + Usage: "how objects are aligned in the json file", + Value: &EnumValue{ + Enum: []string{"lines", "document"}, + Default: "lines", + ConditionFunction: func(str, target string) bool { + return strings.ToLower(target) == str + }, + }, + }, + }, sharedFlags...), + CustomHelpTemplate: defaultSelectHelpTemplate, + Before: beforeFunc, + Action: func(c *cli.Context) (err error) { + defer stat.Collect(c.Command.FullName(), &err)() + + fullCommand := commandFromContext(c) + + src, err := url.New(c.Args().Get(0), url.WithVersion(c.String("version-id")), + url.WithRaw(c.Bool("raw")), url.WithAllVersions(c.Bool("all-versions"))) + if err != nil { + printError(fullCommand, c.Command.Name, err) + return err + } + return Select{ + src: src, + op: c.Command.Name, + fullCommand: fullCommand, + // flags + inputFormat: "json", + inputStructure: c.String("structure"), + outputFormat: c.String("output-format"), + query: c.String("query"), + compressionType: c.String("compression"), + exclude: c.StringSlice("exclude"), + forceGlacierTransfer: c.Bool("force-glacier-transfer"), + ignoreGlacierWarnings: c.Bool("ignore-glacier-warnings"), + + storageOpts: NewStorageOpts(c), + }.Run(c.Context) }, }, - &cli.GenericFlag{ - Name: "output-format", - Usage: "output data format", - Value: &EnumValue{ - Enum: []string{"json", "csv"}, - Default: "json", - ConditionFunction: func(str, target string) bool { - return strings.ToLower(target) == str + { + Name: "parquet", + Usage: "write queries for parquet files", + Flags: append([]cli.Flag{ + &cli.GenericFlag{ + Name: "output-format", + Usage: "output data format", + Value: &EnumValue{ + Enum: []string{"json", "csv"}, + Default: "json", + ConditionFunction: func(str, target string) bool { + return strings.ToLower(target) == str + }, + }, }, + }, sharedFlags...), + CustomHelpTemplate: defaultSelectHelpTemplate, + Before: beforeFunc, + Action: func(c *cli.Context) (err error) { + defer stat.Collect(c.Command.FullName(), &err)() + + fullCommand := commandFromContext(c) + + src, err := url.New(c.Args().Get(0), url.WithVersion(c.String("version-id")), + url.WithRaw(c.Bool("raw")), url.WithAllVersions(c.Bool("all-versions"))) + if err != nil { + printError(fullCommand, c.Command.Name, err) + return err + } + return Select{ + src: src, + op: c.Command.Name, + fullCommand: fullCommand, + // flags + inputFormat: "parquet", + outputFormat: c.String("output-format"), + query: c.String("query"), + compressionType: c.String("compression"), + exclude: c.StringSlice("exclude"), + forceGlacierTransfer: c.Bool("force-glacier-transfer"), + ignoreGlacierWarnings: c.Bool("ignore-glacier-warnings"), + + storageOpts: NewStorageOpts(c), + }.Run(c.Context) }, }, - &cli.StringSliceFlag{ - Name: "exclude", - Usage: "exclude objects with given pattern", - }, - &cli.BoolFlag{ - Name: "force-glacier-transfer", - Usage: "force transfer of glacier objects whether they are restored or not", - }, - &cli.BoolFlag{ - Name: "ignore-glacier-warnings", - Usage: "turns off glacier warnings: ignore errors encountered during selecting objects", - }, - &cli.BoolFlag{ - Name: "raw", - Usage: "disable the wildcard operations, useful with filenames that contains glob characters", - }, - &cli.BoolFlag{ - Name: "all-versions", - Usage: "list all versions of object(s)", - }, - &cli.StringFlag{ - Name: "version-id", - Usage: "use the specified version of the object", - }, - }, - CustomHelpTemplate: selectHelpTemplate, - Before: func(c *cli.Context) error { - err := validateSelectCommand(c) - if err != nil { - printError(commandFromContext(c), c.Command.Name, err) - } - return err }, + Flags: sharedFlags, + Before: beforeFunc, Action: func(c *cli.Context) (err error) { defer stat.Collect(c.Command.FullName(), &err)() @@ -135,17 +268,20 @@ func NewSelectCommand() *cli.Command { op: c.Command.Name, fullCommand: fullCommand, // flags - inputFormat: c.String("format"), - inputStructure: c.String("input-structure"), + inputFormat: "json", + inputStructure: "lines", outputFormat: c.String("output-format"), query: c.String("query"), + compressionType: c.String("compression"), exclude: c.StringSlice("exclude"), forceGlacierTransfer: c.Bool("force-glacier-transfer"), ignoreGlacierWarnings: c.Bool("ignore-glacier-warnings"), storageOpts: NewStorageOpts(c), }.Run(c.Context) + return nil }, + CustomHelpTemplate: defaultSelectHelpTemplate, } cmd.BashComplete = getBashCompleteFn(cmd, true, false) diff --git a/storage/s3.go b/storage/s3.go index 44a901106..af0e1a209 100644 --- a/storage/s3.go +++ b/storage/s3.go @@ -647,40 +647,82 @@ type SelectQuery struct { CompressionType string } -func (s *S3) Select(ctx context.Context, url *url.URL, query *SelectQuery, resultCh chan<- json.RawMessage) error { - if s.dryRun { - return nil - } +const ( + jsonType = "json" + csvType = "csv" + parquetType = "parquet" +) + +func parseInputSerialization(val string, delimiter string) (*s3.InputSerialization, error) { var inputSerialization *s3.InputSerialization - var outputSerialization *s3.OutputSerialization - var decoder EventStreamDecoder - reader, writer := io.Pipe() - if query.InputFormat == "json" { + switch val { + case jsonType: inputSerialization = &s3.InputSerialization{ JSON: &s3.JSONInput{ - Type: aws.String(query.InputContentStructure), + Type: aws.String(delimiter), }, } - decoder = NewJsonDecoder(reader) - } else if query.InputFormat == "csv" { + case csvType: inputSerialization = &s3.InputSerialization{ CSV: &s3.CSVInput{ - FieldDelimiter: aws.String(","), + FieldDelimiter: aws.String(delimiter), }, } - decoder = NewCsvDecoder(reader) + case parquetType: + inputSerialization = &s3.InputSerialization{ + Parquet: &s3.ParquetInput{}, + } + default: + return nil, errors.New("Input serialization is not valid") } - if query.OutputFormat == "json" { + return inputSerialization, nil +} + +func parseOutputSerialization(val string, reader io.Reader) (*s3.OutputSerialization, EventStreamDecoder, error) { + var outputSerialization *s3.OutputSerialization + var decoder EventStreamDecoder + + switch val { + case jsonType: outputSerialization = &s3.OutputSerialization{ JSON: &s3.JSONOutput{}, } - } else if query.OutputFormat == "csv" { + decoder = NewCsvDecoder(reader) + case csvType: outputSerialization = &s3.OutputSerialization{ - CSV: &s3.CSVOutput{}, + CSV: &s3.CSVOutput{ + FieldDelimiter: aws.String(","), + }, } + decoder = NewCsvDecoder(reader) + default: + return nil, nil, errors.New("Output serialization is not valid") } + return outputSerialization, decoder, nil +} + +func (s *S3) Select(ctx context.Context, url *url.URL, query *SelectQuery, resultCh chan<- json.RawMessage) error { + if s.dryRun { + return nil + } + var inputSerialization *s3.InputSerialization + var outputSerialization *s3.OutputSerialization + var decoder EventStreamDecoder + + reader, writer := io.Pipe() + + inputSerialization, err := parseInputSerialization(query.InputFormat, query.InputContentStructure) + if err != nil { + return err + } + + outputSerialization, decoder, err = parseOutputSerialization(query.OutputFormat, reader) + if err != nil { + return err + } + input := &s3.SelectObjectContentInput{ Bucket: aws.String(url.Bucket), Key: aws.String(url.Path), From eb906acc40e0ece3f1396f9c84bb0b6a1ee49d5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Mon, 24 Jul 2023 18:58:06 +0300 Subject: [PATCH 04/50] refactor: prettier command --- command/select.go | 1 - 1 file changed, 1 deletion(-) diff --git a/command/select.go b/command/select.go index aac72b2eb..3c10523a2 100644 --- a/command/select.go +++ b/command/select.go @@ -279,7 +279,6 @@ func NewSelectCommand() *cli.Command { storageOpts: NewStorageOpts(c), }.Run(c.Context) - return nil }, CustomHelpTemplate: defaultSelectHelpTemplate, } From 0119d45b35aed99d0346a794b0864ec67d31873c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Tue, 25 Jul 2023 12:33:43 +0300 Subject: [PATCH 05/50] feat: add credential passing configuration in test utilities --- e2e/select_test.go | 101 +++++++++++++++++++++++++++++++++++++++++++++ e2e/util_test.go | 72 +++++++++++++++++++++++++++++--- 2 files changed, 167 insertions(+), 6 deletions(-) create mode 100644 e2e/select_test.go diff --git a/e2e/select_test.go b/e2e/select_test.go new file mode 100644 index 000000000..428108f88 --- /dev/null +++ b/e2e/select_test.go @@ -0,0 +1,101 @@ +// go:build external +package e2e + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/docker/go-connections/nat" + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/wait" + "gotest.tools/v3/icmd" +) + +func getSequentialFile(n int, form string) (string, map[int]compareFunc) { + sb := strings.Builder{} + expectedLines := make(map[int]compareFunc) + + for i := 0; i < n; i++ { + var line string + switch form { + case "json": + line = fmt.Sprintf(`{ "line": "%d", "id": "i%d", data: "some event %d" }`, i, i, i) + case "csv": + if i == 0 { + line = "line,id,data" + } else { + line = fmt.Sprintf(`%d,i%d,"some event %d"`, i, i, i) + } + } + sb.WriteString(line) + sb.WriteString("\n") + + expectedLines[i] = equals(line) + } + + return sb.String(), expectedLines +} + +func startMinio(t *testing.T, ctx context.Context) (testcontainers.Container, string, error) { + t.Helper() + port, err := nat.NewPort("", "9000") + if err != nil { + t.Errorf("error while building port\n") + return nil, "", err + } + + req := testcontainers.ContainerRequest{ + Image: "minio/minio", + ExposedPorts: []string{string(port)}, + Cmd: []string{"server", "/data"}, + WaitingFor: wait.ForListeningPort(port), + } + + minioC, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ + ContainerRequest: req, + Started: true, + }) + if err != nil { + return nil, "", err + } + host, err := minioC.Host(ctx) + if err != nil { + return nil, "", err + } + + mappedPort, err := minioC.MappedPort(ctx, port) + if err != nil { + return nil, "", err + } + address := fmt.Sprintf("http://%s:%d", host, mappedPort.Int()) + return minioC, address, nil +} + +func TestSelectCommand(t *testing.T) { + ctx := context.Background() + container, address, err := startMinio(t, ctx) + if err != nil { + t.Fatalf("Could not start the container.Error:%v\n", err) + } + + t.Cleanup(func() { + if err := container.Terminate(ctx); err != nil { + t.Errorf("Could not terminate the container. Error:%v\n", err) + } + }) + region := "us-east-1" + accessKeyId := "minioadmin" + secretKey := "minioadmin" + + s3client, s5cmd := setup(t, withEndpointURL(address), withRegion(region), withAccessKeyId(accessKeyId), withSecretKey(secretKey)) + + createBucket(t, s3client, "bucket") + putFile(t, s3client, "bucket", "filename.txt", "content") + + c := s5cmd("ls", "s3://bucket/") + result := icmd.RunCmd(c, withEnv("AWS_ACCESS_KEY_ID", accessKeyId), withEnv("AWS_SECRET_ACCESS_KEY", secretKey)) + + fmt.Println(result.Stdout()) +} diff --git a/e2e/util_test.go b/e2e/util_test.go index 4ddadf415..f8a1437eb 100644 --- a/e2e/util_test.go +++ b/e2e/util_test.go @@ -77,6 +77,9 @@ func init() { type setupOpts struct { s3backend string endpointURL string + accessKeyId string + secretKey string + region string timeSource gofakes3.TimeSource enableProxy bool } @@ -95,6 +98,24 @@ func withEndpointURL(url string) option { } } +func withAccessKeyId(key string) option { + return func(opts *setupOpts) { + opts.accessKeyId = key + } +} + +func withSecretKey(key string) option { + return func(opts *setupOpts) { + opts.secretKey = key + } +} + +func withRegion(region string) option { + return func(opts *setupOpts) { + opts.region = region + } +} + func withTimeSource(timeSource gofakes3.TimeSource) option { return func(opts *setupOpts) { opts.timeSource = timeSource @@ -107,6 +128,12 @@ func withProxy() option { } } +type credentialCfg struct { + AccessKeyId string + SecretKey string + Region string +} + func setup(t *testing.T, options ...option) (*s3.S3, func(...string) icmd.Cmd) { t.Helper() @@ -133,10 +160,35 @@ func setup(t *testing.T, options ...option) (*s3.S3, func(...string) icmd.Cmd) { if opts.endpointURL != "" { endpoint = opts.endpointURL } + secretKey := "" + if opts.secretKey != "" { + secretKey = opts.secretKey + } + + accessKeyId := "" + if opts.accessKeyId != "" { + accessKeyId = opts.accessKeyId + } + + region := "" + if opts.region != "" { + region = opts.accessKeyId + } + + var cfg *credentialCfg + if region != "" || accessKeyId != "" || secretKey != "" { + cfg = &credentialCfg{ + AccessKeyId: accessKeyId, + SecretKey: secretKey, + Region: region, + } + } else { + cfg = nil + } client := s3client(t, storage.Options{ Endpoint: endpoint, NoVerifySSL: true, - }) + }, cfg) return client, s5cmd(workdir, endpoint) } @@ -171,7 +223,7 @@ func server(t *testing.T, testdir *fs.Dir, opts *setupOpts) string { return endpoint } -func s3client(t *testing.T, options storage.Options) *s3.S3 { +func s3client(t *testing.T, options storage.Options, creds *credentialCfg) *s3.S3 { t.Helper() awsLogLevel := aws.LogOff @@ -180,12 +232,20 @@ func s3client(t *testing.T, options storage.Options) *s3.S3 { } s3Config := aws.NewConfig() - id := defaultAccessKeyID - key := defaultSecretAccessKey + var id, key, region string + + if creds != nil { + id = creds.AccessKeyId + key = creds.SecretKey + region = creds.Region + } else { + id = defaultAccessKeyID + key = defaultSecretAccessKey + region = endpoints.UsEast1RegionID + } + endpoint := options.Endpoint - region := endpoints.UsEast1RegionID isVirtualHost := false - // get environment variables and use external endpoint url. // this can be used to test s3 sources such as GCS, amazon, wasabi etc. if isEndpointFromEnv() { From 813468d9e72d107f0e29a698e7aa5f10ca1b7867 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Tue, 25 Jul 2023 21:13:24 +0300 Subject: [PATCH 06/50] try to extend testing, add compression to shared flags --- command/select.go | 203 +++++++++++------------------------ e2e/select_test.go | 258 ++++++++++++++++++++++++++++++++++++++++----- go.mod | 50 ++++++--- 3 files changed, 332 insertions(+), 179 deletions(-) diff --git a/command/select.go b/command/select.go index 3c10523a2..fba5460d9 100644 --- a/command/select.go +++ b/command/select.go @@ -39,6 +39,40 @@ func beforeFunc(c *cli.Context) error { return err } +func buildSelect(c *cli.Context, inputFormat string, inputStructure *string) (cmd *Select, err error) { + defer stat.Collect(c.Command.FullName(), &err)() + + fullCommand := commandFromContext(c) + + src, err := url.New(c.Args().Get(0), url.WithVersion(c.String("version-id")), + url.WithRaw(c.Bool("raw")), url.WithAllVersions(c.Bool("all-versions"))) + if err != nil { + printError(fullCommand, c.Command.Name, err) + return nil, err + } + cmd = &Select{ + src: src, + op: c.Command.Name, + fullCommand: fullCommand, + // flags + inputFormat: inputFormat, + outputFormat: c.String("output-format"), + query: c.String("query"), + compressionType: c.String("compression"), + exclude: c.StringSlice("exclude"), + forceGlacierTransfer: c.Bool("force-glacier-transfer"), + ignoreGlacierWarnings: c.Bool("ignore-glacier-warnings"), + + storageOpts: NewStorageOpts(c), + } + + // parquet files don't have an input structure + if inputStructure != nil { + cmd.inputStructure = *inputStructure + } + return cmd, nil +} + func NewSelectCommand() *cli.Command { sharedFlags := []cli.Flag{ &cli.StringFlag{ @@ -46,6 +80,17 @@ func NewSelectCommand() *cli.Command { Aliases: []string{"e"}, Usage: "SQL expression to use to select from the objects", }, + &cli.GenericFlag{ + Name: "compression", + Usage: "input compression format", + Value: &EnumValue{ + Enum: []string{"none", "gzip", "bzip2"}, + Default: "none", + ConditionFunction: func(str, target string) bool { + return strings.ToLower(target) == str + }, + }, + }, &cli.GenericFlag{ Name: "output-format", Usage: "output format of the result", @@ -91,75 +136,27 @@ func NewSelectCommand() *cli.Command { Name: "csv", Usage: "write queries for csv files", Flags: append([]cli.Flag{ - &cli.GenericFlag{ - Name: "compression", - Usage: "input compression format", - Value: &EnumValue{ - Enum: []string{"none", "gzip", "bzip2"}, - Default: "none", - ConditionFunction: func(str, target string) bool { - return strings.ToLower(target) == str - }, - }, - }, - &cli.GenericFlag{ + &cli.StringFlag{ Name: "delimiter", Usage: "delimiter of the csv file", - Value: &EnumValue{ - Enum: []string{",", "\t"}, - Default: ",", - ConditionFunction: func(str, target string) bool { - return strings.ToLower(target) == str - }, - }, }, }, sharedFlags...), CustomHelpTemplate: defaultSelectHelpTemplate, Before: beforeFunc, Action: func(c *cli.Context) (err error) { - defer stat.Collect(c.Command.FullName(), &err)() - - fullCommand := commandFromContext(c) - - src, err := url.New(c.Args().Get(0), url.WithVersion(c.String("version-id")), - url.WithRaw(c.Bool("raw")), url.WithAllVersions(c.Bool("all-versions"))) + delimiter := c.String("delimiter") + cmd, err := buildSelect(c, "csv", &delimiter) if err != nil { - printError(fullCommand, c.Command.Name, err) + printError(cmd.fullCommand, c.Command.Name, err) return err } - return Select{ - src: src, - op: c.Command.Name, - fullCommand: fullCommand, - // flags - inputFormat: "csv", - inputStructure: c.String("delimiter"), - outputFormat: c.String("output-format"), - compressionType: c.String("compression"), - query: c.String("query"), - exclude: c.StringSlice("exclude"), - forceGlacierTransfer: c.Bool("force-glacier-transfer"), - ignoreGlacierWarnings: c.Bool("ignore-glacier-warnings"), - - storageOpts: NewStorageOpts(c), - }.Run(c.Context) + return cmd.Run(c.Context) }, }, { Name: "json", Usage: "write queries for json files", Flags: append([]cli.Flag{ - &cli.GenericFlag{ - Name: "compression", - Usage: "input compression format", - Value: &EnumValue{ - Enum: []string{"none", "gzip", "bzip2"}, - Default: "none", - ConditionFunction: func(str, target string) bool { - return strings.ToLower(target) == str - }, - }, - }, &cli.GenericFlag{ Name: "structure", Usage: "how objects are aligned in the json file", @@ -175,111 +172,31 @@ func NewSelectCommand() *cli.Command { CustomHelpTemplate: defaultSelectHelpTemplate, Before: beforeFunc, Action: func(c *cli.Context) (err error) { - defer stat.Collect(c.Command.FullName(), &err)() - - fullCommand := commandFromContext(c) - - src, err := url.New(c.Args().Get(0), url.WithVersion(c.String("version-id")), - url.WithRaw(c.Bool("raw")), url.WithAllVersions(c.Bool("all-versions"))) + structure := c.String("structure") + cmd, err := buildSelect(c, "json", &structure) if err != nil { - printError(fullCommand, c.Command.Name, err) + printError(cmd.fullCommand, c.Command.Name, err) return err } - return Select{ - src: src, - op: c.Command.Name, - fullCommand: fullCommand, - // flags - inputFormat: "json", - inputStructure: c.String("structure"), - outputFormat: c.String("output-format"), - query: c.String("query"), - compressionType: c.String("compression"), - exclude: c.StringSlice("exclude"), - forceGlacierTransfer: c.Bool("force-glacier-transfer"), - ignoreGlacierWarnings: c.Bool("ignore-glacier-warnings"), - - storageOpts: NewStorageOpts(c), - }.Run(c.Context) + return cmd.Run(c.Context) }, }, { - Name: "parquet", - Usage: "write queries for parquet files", - Flags: append([]cli.Flag{ - &cli.GenericFlag{ - Name: "output-format", - Usage: "output data format", - Value: &EnumValue{ - Enum: []string{"json", "csv"}, - Default: "json", - ConditionFunction: func(str, target string) bool { - return strings.ToLower(target) == str - }, - }, - }, - }, sharedFlags...), + Name: "parquet", + Usage: "write queries for parquet files", + Flags: sharedFlags, CustomHelpTemplate: defaultSelectHelpTemplate, Before: beforeFunc, Action: func(c *cli.Context) (err error) { - defer stat.Collect(c.Command.FullName(), &err)() - - fullCommand := commandFromContext(c) - - src, err := url.New(c.Args().Get(0), url.WithVersion(c.String("version-id")), - url.WithRaw(c.Bool("raw")), url.WithAllVersions(c.Bool("all-versions"))) + cmd, err := buildSelect(c, "parquet", nil) if err != nil { - printError(fullCommand, c.Command.Name, err) + printError(cmd.fullCommand, c.Command.Name, err) return err } - return Select{ - src: src, - op: c.Command.Name, - fullCommand: fullCommand, - // flags - inputFormat: "parquet", - outputFormat: c.String("output-format"), - query: c.String("query"), - compressionType: c.String("compression"), - exclude: c.StringSlice("exclude"), - forceGlacierTransfer: c.Bool("force-glacier-transfer"), - ignoreGlacierWarnings: c.Bool("ignore-glacier-warnings"), - - storageOpts: NewStorageOpts(c), - }.Run(c.Context) + return cmd.Run(c.Context) }, }, }, - Flags: sharedFlags, - Before: beforeFunc, - Action: func(c *cli.Context) (err error) { - defer stat.Collect(c.Command.FullName(), &err)() - - fullCommand := commandFromContext(c) - - src, err := url.New(c.Args().Get(0), url.WithVersion(c.String("version-id")), - url.WithRaw(c.Bool("raw")), url.WithAllVersions(c.Bool("all-versions"))) - if err != nil { - printError(fullCommand, c.Command.Name, err) - return err - } - return Select{ - src: src, - op: c.Command.Name, - fullCommand: fullCommand, - // flags - inputFormat: "json", - inputStructure: "lines", - outputFormat: c.String("output-format"), - query: c.String("query"), - compressionType: c.String("compression"), - exclude: c.StringSlice("exclude"), - forceGlacierTransfer: c.Bool("force-glacier-transfer"), - ignoreGlacierWarnings: c.Bool("ignore-glacier-warnings"), - - storageOpts: NewStorageOpts(c), - }.Run(c.Context) - }, CustomHelpTemplate: defaultSelectHelpTemplate, } diff --git a/e2e/select_test.go b/e2e/select_test.go index 428108f88..34f3612e8 100644 --- a/e2e/select_test.go +++ b/e2e/select_test.go @@ -13,26 +13,67 @@ import ( "gotest.tools/v3/icmd" ) -func getSequentialFile(n int, form string) (string, map[int]compareFunc) { +/* +query csv to json {_1, _2, ..}, n objects with header +query json(lines) to json {} n objects +query csv to csv a,b,c, with header +query json(lines) to csv n lines, without header +query json(document) to json -> same object +query json(document) to csv -> not supported(?) +*/ +func getSequentialFile(n int, inputForm, outputForm, structure string) (string, map[int]compareFunc) { sb := strings.Builder{} expectedLines := make(map[int]compareFunc) - for i := 0; i < n; i++ { + for i := 0; i < n+1; i++ { var line string - switch form { + switch inputForm { case "json": - line = fmt.Sprintf(`{ "line": "%d", "id": "i%d", data: "some event %d" }`, i, i, i) + switch structure { + case "lines": + if i == n { + break + } + line = fmt.Sprintf(`{ "line": "%d", "id": "i%d", data: "some event %d" }`, i, i, i) + case "document": + if i == n { + break + } else if i == 0 { + line = fmt.Sprintf(`{\n"obj%d": {"line": "%d", "id": "i%d", data: "some event %d"}`, i, i, i, i) + } else if i == n-1 { + line = fmt.Sprintf(`"obj%d": {"line": "%d", "id": "i%d", data: "some event %d"}\n}`, i, i, i, i) + } else { + line = fmt.Sprintf(`"obj%d": {"line": "%d", "id": "i%d", data: "some event %d"},`, i, i, i, i) + } + + } case "csv": if i == 0 { - line = "line,id,data" + line = fmt.Sprintf("line%sid%sdata", structure, structure) } else { - line = fmt.Sprintf(`%d,i%d,"some event %d"`, i, i, i) + line = fmt.Sprintf(`%d%si%d%s"some event %d"`, i-1, structure, i-1, structure, i-1) } } sb.WriteString(line) sb.WriteString("\n") - - expectedLines[i] = equals(line) + switch outputForm { + case "json": + if i == n { + break + } + line = fmt.Sprintf(`{ "line": "%d", "id": "i%d", data: "some event %d" }`, i, i, i) + expectedLines[i] = equals(line) + case "csv": + structure := "," + if i == 0 { + line = fmt.Sprintf("line%sid%sdata", structure, structure) + } else { + line = fmt.Sprintf(`%d%si%d%s"some event %d"`, i-1, structure, i-1, structure, i-1) + } + expectedLines[i] = equals(line) + default: + expectedLines[i] = equals(line) + } } return sb.String(), expectedLines @@ -73,29 +114,198 @@ func startMinio(t *testing.T, ctx context.Context) (testcontainers.Container, st return minioC, address, nil } -func TestSelectCommand(t *testing.T) { - ctx := context.Background() - container, address, err := startMinio(t, ctx) - if err != nil { - t.Fatalf("Could not start the container.Error:%v\n", err) - } +/* +test cases: +json: + + default input structure and output + default input structure and output as csv + document input structure and default output + document input structure and output as csv + +csv: - t.Cleanup(func() { - if err := container.Terminate(ctx); err != nil { - t.Errorf("Could not terminate the container. Error:%v\n", err) + default input structure and output + default input structure and output as csv + \t input structure and default output + \t input structure and output as csv + +parquet: + + // TODO: discuss about testing + default input structure and output + default input structure and output as csv +*/ +func TestSelectCommand(t *testing.T) { + t.Skip() + t.Parallel() + /* + ctx := context.Background() + container, address, err := startMinio(t, ctx) + if err != nil { + t.Fatalf("could not start the container. Error:%v\n", err) } - }) + + t.Cleanup(func() { + if err := container.Terminate(ctx); err != nil { + t.Errorf("could not terminate the container. Error:%v\n", err) + } + }) + */ + // credentials are same for all test cases region := "us-east-1" accessKeyId := "minioadmin" secretKey := "minioadmin" - s3client, s5cmd := setup(t, withEndpointURL(address), withRegion(region), withAccessKeyId(accessKeyId), withSecretKey(secretKey)) + address := "http://127.0.0.1:9000" + // The query is default for all cases, we want to assert the output + // is as expected after a query. + query := "SELECT * FROM s3object s LIMIT 1" + testcases := []struct { + name string + cmd []string + in string + structure string + out string + expected map[int]compareFunc + src string + }{ + // json + { + name: "json-lines select with default input structure and output", + cmd: []string{ + "select", + "json", + "--query", + query, + }, + in: "json", + structure: "lines", + out: "json", + }, + { + name: "json-lines select with default input structure and csv output", + cmd: []string{ + "select", + "json", + "--output-format", + "csv", + "--query", + query, + }, + in: "json", + structure: "lines", + out: "csv", + }, + { + name: "json-lines select with document input structure and output", + cmd: []string{ + "select", + "json", + "--structure", + "document", + "--query", + query, + }, + in: "json", + structure: "document", + out: "json", + }, + { + name: "json-lines select with document input structure and csv output", + cmd: []string{ + "select", + "json", + "--structure", + "document", + "--output-format", + "csv", + "--query", + query, + }, + in: "json", + structure: "document", + out: "csv", + }, + // csv + { + name: "csv select with default delimiter and output", + cmd: []string{ + "select", + "csv", + "--query", + query, + }, + in: "csv", + structure: ",", + out: "json", + }, + { + name: "csv select with default delimiter and csv output", + cmd: []string{ + "select", + "csv", + "--output-format", + "csv", + "--query", + query, + }, + in: "csv", + structure: ",", + out: "csv", + }, + { + name: "csv select with custom delimiter and default output", + cmd: []string{ + "select", + "csv", + "--delimiter", + "\t", + "document", + "--query", + query, + }, + in: "csv", + structure: "\t", + out: "json", + }, + { + name: "csv select with custom delimiter and csv output", + cmd: []string{ + "select", + "csv", + "--delimiter", + "\t", + "--output-format", + "csv", + "--query", + query, + }, + in: "csv", + structure: "\t", + out: "csv", + }, + } + for _, tc := range testcases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + contents, expectedLines := getSequentialFile(5, tc.in, tc.out, tc.structure) + filename := fmt.Sprintf("file.%s", tc.in) + bucket := s3BucketFromTestName(t) + src := fmt.Sprintf("s3://%s/%s", bucket, filename) + tc.cmd = append(tc.cmd, src) - createBucket(t, s3client, "bucket") - putFile(t, s3client, "bucket", "filename.txt", "content") + s3client, s5cmd := setup(t, withEndpointURL(address), withRegion(region), withAccessKeyId(accessKeyId), withSecretKey(secretKey)) - c := s5cmd("ls", "s3://bucket/") - result := icmd.RunCmd(c, withEnv("AWS_ACCESS_KEY_ID", accessKeyId), withEnv("AWS_SECRET_ACCESS_KEY", secretKey)) + createBucket(t, s3client, bucket) + putFile(t, s3client, bucket, filename, contents) - fmt.Println(result.Stdout()) + cmd := s5cmd(tc.cmd...) + result := icmd.RunCmd(cmd, withEnv("AWS_ACCESS_KEY_ID", accessKeyId), withEnv("AWS_SECRET_ACCESS_KEY", secretKey)) + fmt.Println(result.Stderr()) + assertLines(t, result.Stdout(), expectedLines) + }) + } } diff --git a/go.mod b/go.mod index d31ead9bc..5c165b4a5 100644 --- a/go.mod +++ b/go.mod @@ -3,36 +3,62 @@ module github.com/peak/s5cmd/v2 go 1.17 require ( - github.com/aws/aws-sdk-go v1.40.25 + github.com/aws/aws-sdk-go v1.40.44 + github.com/docker/go-connections v0.4.0 github.com/golang/mock v1.6.0 - github.com/google/go-cmp v0.4.0 - github.com/hashicorp/go-multierror v1.0.0 + github.com/google/go-cmp v0.5.9 + github.com/hashicorp/go-multierror v1.1.1 github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 github.com/igungor/gofakes3 v0.0.13 github.com/karrick/godirwalk v1.15.3 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 github.com/lanrat/extsort v1.0.0 github.com/termie/go-shutil v0.0.0-20140729215957-bcacb06fecae + github.com/testcontainers/testcontainers-go v0.21.0 github.com/urfave/cli/v2 v2.11.2 - gotest.tools/v3 v3.0.2 + gotest.tools/v3 v3.0.3 ) require ( + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Microsoft/go-winio v0.5.2 // indirect + github.com/cenkalti/backoff/v4 v4.2.0 // indirect + github.com/containerd/containerd v1.6.19 // indirect + github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/docker/distribution v2.8.2+incompatible // indirect + github.com/docker/docker v23.0.5+incompatible // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/imdario/mergo v0.3.15 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/kr/pretty v0.3.0 // indirect + github.com/klauspost/compress v1.11.13 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/moby/patternmatcher v0.5.0 // indirect + github.com/moby/sys/sequential v0.5.0 // indirect + github.com/moby/term v0.5.0 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc2 // indirect + github.com/opencontainers/runc v1.1.5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63 // indirect + github.com/sirupsen/logrus v1.9.0 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect go.etcd.io/bbolt v1.3.6 // indirect - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect - golang.org/x/sys v0.0.0-20220405210540-1e041c57c461 // indirect - golang.org/x/tools v0.1.1 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect + golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea // indirect + golang.org/x/net v0.7.0 // indirect + golang.org/x/sync v0.1.0 // indirect + golang.org/x/sys v0.8.0 // indirect + golang.org/x/tools v0.2.0 // indirect + google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad // indirect + google.golang.org/grpc v1.47.0 // indirect + google.golang.org/protobuf v1.28.0 // indirect gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce // indirect ) From bc50544c5f2c6fc347ffd38cfe2503e3e039b70f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Wed, 26 Jul 2023 12:01:08 +0300 Subject: [PATCH 07/50] fix: add testing --- command/select.go | 1 + e2e/select_test.go | 103 +- go.sum | 253 +- storage/s3.go | 21 +- vendor/github.com/Azure/go-ansiterm/LICENSE | 21 + vendor/github.com/Azure/go-ansiterm/README.md | 12 + .../github.com/Azure/go-ansiterm/constants.go | 188 + .../github.com/Azure/go-ansiterm/context.go | 7 + .../Azure/go-ansiterm/csi_entry_state.go | 49 + .../Azure/go-ansiterm/csi_param_state.go | 38 + .../go-ansiterm/escape_intermediate_state.go | 36 + .../Azure/go-ansiterm/escape_state.go | 47 + .../Azure/go-ansiterm/event_handler.go | 90 + .../Azure/go-ansiterm/ground_state.go | 24 + .../Azure/go-ansiterm/osc_string_state.go | 31 + vendor/github.com/Azure/go-ansiterm/parser.go | 151 + .../go-ansiterm/parser_action_helpers.go | 99 + .../Azure/go-ansiterm/parser_actions.go | 119 + vendor/github.com/Azure/go-ansiterm/states.go | 71 + .../github.com/Azure/go-ansiterm/utilities.go | 21 + .../Azure/go-ansiterm/winterm/ansi.go | 196 + .../Azure/go-ansiterm/winterm/api.go | 327 + .../go-ansiterm/winterm/attr_translation.go | 100 + .../go-ansiterm/winterm/cursor_helpers.go | 101 + .../go-ansiterm/winterm/erase_helpers.go | 84 + .../go-ansiterm/winterm/scroll_helper.go | 118 + .../Azure/go-ansiterm/winterm/utilities.go | 9 + .../go-ansiterm/winterm/win_event_handler.go | 743 + .../github.com/Microsoft/go-winio/.gitignore | 1 + .../github.com/Microsoft/go-winio/CODEOWNERS | 1 + vendor/github.com/Microsoft/go-winio/LICENSE | 22 + .../github.com/Microsoft/go-winio/README.md | 37 + .../github.com/Microsoft/go-winio/backup.go | 280 + vendor/github.com/Microsoft/go-winio/ea.go | 137 + vendor/github.com/Microsoft/go-winio/file.go | 329 + .../github.com/Microsoft/go-winio/fileinfo.go | 73 + .../github.com/Microsoft/go-winio/hvsock.go | 316 + vendor/github.com/Microsoft/go-winio/pipe.go | 517 + .../Microsoft/go-winio/pkg/guid/guid.go | 228 + .../go-winio/pkg/guid/guid_nonwindows.go | 15 + .../go-winio/pkg/guid/guid_windows.go | 10 + .../Microsoft/go-winio/privilege.go | 203 + .../github.com/Microsoft/go-winio/reparse.go | 128 + vendor/github.com/Microsoft/go-winio/sd.go | 98 + .../github.com/Microsoft/go-winio/syscall.go | 3 + .../Microsoft/go-winio/zsyscall_windows.go | 427 + .../aws/aws-sdk-go/aws/awsutil/prettify.go | 12 +- .../aws-sdk-go/aws/awsutil/string_value.go | 2 + .../aws/aws-sdk-go/aws/client/logger.go | 10 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 397 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws-sdk-go/private/protocol/timestamp.go | 4 +- .../aws/aws-sdk-go/service/s3/api.go | 4330 +++++- .../service/s3/s3manager/upload_input.go | 12 +- .../aws/aws-sdk-go/service/sso/api.go | 204 +- .../aws/aws-sdk-go/service/sts/api.go | 252 +- .../github.com/cenkalti/backoff/v4/.gitignore | 25 + vendor/github.com/cenkalti/backoff/v4/LICENSE | 20 + .../github.com/cenkalti/backoff/v4/README.md | 32 + .../github.com/cenkalti/backoff/v4/backoff.go | 66 + .../github.com/cenkalti/backoff/v4/context.go | 62 + .../cenkalti/backoff/v4/exponential.go | 161 + .../github.com/cenkalti/backoff/v4/retry.go | 146 + .../github.com/cenkalti/backoff/v4/ticker.go | 97 + .../github.com/cenkalti/backoff/v4/timer.go | 35 + .../github.com/cenkalti/backoff/v4/tries.go | 38 + .../github.com/containerd/containerd/LICENSE | 191 + .../github.com/containerd/containerd/NOTICE | 16 + .../containerd/containerd/errdefs/errors.go | 92 + .../containerd/containerd/errdefs/grpc.go | 147 + .../containerd/containerd/log/context.go | 69 + .../containerd/pkg/userns/userns_linux.go | 62 + .../pkg/userns/userns_unsupported.go | 26 + .../containerd/platforms/compare.go | 203 + .../containerd/platforms/cpuinfo.go | 131 + .../containerd/platforms/database.go | 116 + .../containerd/platforms/defaults.go | 27 + .../containerd/platforms/defaults_darwin.go | 45 + .../containerd/platforms/defaults_unix.go | 41 + .../containerd/platforms/defaults_windows.go | 95 + .../containerd/platforms/platforms.go | 261 + vendor/github.com/cpuguy83/dockercfg/LICENSE | 21 + .../github.com/cpuguy83/dockercfg/README.md | 8 + vendor/github.com/cpuguy83/dockercfg/auth.go | 184 + .../github.com/cpuguy83/dockercfg/config.go | 65 + vendor/github.com/cpuguy83/dockercfg/load.go | 51 + vendor/github.com/docker/distribution/LICENSE | 202 + .../docker/distribution/digestset/set.go | 247 + .../docker/distribution/reference/helpers.go | 42 + .../distribution/reference/normalize.go | 199 + .../distribution/reference/reference.go | 433 + .../docker/distribution/reference/regexp.go | 143 + vendor/github.com/docker/docker/AUTHORS | 2372 +++ vendor/github.com/docker/docker/LICENSE | 191 + vendor/github.com/docker/docker/NOTICE | 19 + vendor/github.com/docker/docker/api/README.md | 42 + vendor/github.com/docker/docker/api/common.go | 11 + .../docker/docker/api/common_unix.go | 7 + .../docker/docker/api/common_windows.go | 8 + .../docker/docker/api/swagger-gen.yaml | 12 + .../github.com/docker/docker/api/swagger.yaml | 12152 ++++++++++++++++ .../docker/docker/api/types/auth.go | 22 + .../docker/docker/api/types/blkiodev/blkio.go | 23 + .../docker/docker/api/types/client.go | 443 + .../docker/docker/api/types/configs.go | 67 + .../docker/api/types/container/config.go | 96 + .../api/types/container/container_changes.go | 20 + .../api/types/container/container_top.go | 22 + .../api/types/container/container_update.go | 16 + .../api/types/container/create_response.go | 19 + .../docker/api/types/container/deprecated.go | 16 + .../docker/api/types/container/host_config.go | 465 + .../api/types/container/hostconfig_unix.go | 42 + .../api/types/container/hostconfig_windows.go | 40 + .../api/types/container/wait_exit_error.go | 12 + .../api/types/container/wait_response.go | 18 + .../api/types/container/waitcondition.go | 22 + .../docker/docker/api/types/deprecated.go | 14 + .../docker/docker/api/types/error_response.go | 13 + .../docker/api/types/error_response_ext.go | 6 + .../docker/docker/api/types/events/events.go | 47 + .../docker/docker/api/types/filters/parse.go | 323 + .../docker/api/types/graph_driver_data.go | 23 + .../docker/docker/api/types/id_response.go | 13 + .../docker/api/types/image/image_history.go | 36 + .../api/types/image_delete_response_item.go | 15 + .../docker/docker/api/types/image_summary.go | 97 + .../docker/docker/api/types/mount/mount.go | 140 + .../docker/api/types/network/network.go | 126 + .../docker/docker/api/types/plugin.go | 203 + .../docker/docker/api/types/plugin_device.go | 25 + .../docker/docker/api/types/plugin_env.go | 25 + .../docker/api/types/plugin_interface_type.go | 21 + .../docker/docker/api/types/plugin_mount.go | 37 + .../docker/api/types/plugin_responses.go | 71 + .../docker/docker/api/types/port.go | 23 + .../docker/api/types/registry/authenticate.go | 21 + .../docker/api/types/registry/registry.go | 120 + .../api/types/service_update_response.go | 12 + .../docker/docker/api/types/stats.go | 181 + .../docker/api/types/strslice/strslice.go | 30 + .../docker/docker/api/types/swarm/common.go | 48 + .../docker/docker/api/types/swarm/config.go | 40 + .../docker/api/types/swarm/container.go | 80 + .../docker/docker/api/types/swarm/network.go | 121 + .../docker/docker/api/types/swarm/node.go | 139 + .../docker/docker/api/types/swarm/runtime.go | 27 + .../docker/api/types/swarm/runtime/gen.go | 3 + .../api/types/swarm/runtime/plugin.pb.go | 754 + .../api/types/swarm/runtime/plugin.proto | 21 + .../docker/docker/api/types/swarm/secret.go | 36 + .../docker/docker/api/types/swarm/service.go | 202 + .../docker/docker/api/types/swarm/swarm.go | 237 + .../docker/docker/api/types/swarm/task.go | 225 + .../docker/docker/api/types/time/timestamp.go | 131 + .../docker/docker/api/types/types.go | 809 + .../docker/api/types/versions/README.md | 14 + .../docker/api/types/versions/compare.go | 65 + .../docker/api/types/volume/cluster_volume.go | 420 + .../docker/api/types/volume/create_options.go | 29 + .../docker/api/types/volume/deprecated.go | 11 + .../docker/api/types/volume/list_response.go | 18 + .../docker/docker/api/types/volume/options.go | 8 + .../docker/docker/api/types/volume/volume.go | 75 + .../docker/api/types/volume/volume_update.go | 7 + .../github.com/docker/docker/client/README.md | 35 + .../docker/docker/client/build_cancel.go | 16 + .../docker/docker/client/build_prune.go | 45 + .../docker/docker/client/checkpoint_create.go | 14 + .../docker/docker/client/checkpoint_delete.go | 20 + .../docker/docker/client/checkpoint_list.go | 28 + .../github.com/docker/docker/client/client.go | 321 + .../docker/docker/client/client_deprecated.go | 27 + .../docker/docker/client/client_unix.go | 11 + .../docker/docker/client/client_windows.go | 8 + .../docker/docker/client/config_create.go | 25 + .../docker/docker/client/config_inspect.go | 36 + .../docker/docker/client/config_list.go | 38 + .../docker/docker/client/config_remove.go | 13 + .../docker/docker/client/config_update.go | 20 + .../docker/docker/client/container_attach.go | 59 + .../docker/docker/client/container_commit.go | 55 + .../docker/docker/client/container_copy.go | 93 + .../docker/docker/client/container_create.go | 83 + .../docker/docker/client/container_diff.go | 23 + .../docker/docker/client/container_exec.go | 66 + .../docker/docker/client/container_export.go | 19 + .../docker/docker/client/container_inspect.go | 53 + .../docker/docker/client/container_kill.go | 18 + .../docker/docker/client/container_list.go | 57 + .../docker/docker/client/container_logs.go | 80 + .../docker/docker/client/container_pause.go | 10 + .../docker/docker/client/container_prune.go | 36 + .../docker/docker/client/container_remove.go | 27 + .../docker/docker/client/container_rename.go | 15 + .../docker/docker/client/container_resize.go | 29 + .../docker/docker/client/container_restart.go | 26 + .../docker/docker/client/container_start.go | 23 + .../docker/docker/client/container_stats.go | 42 + .../docker/docker/client/container_stop.go | 30 + .../docker/docker/client/container_top.go | 28 + .../docker/docker/client/container_unpause.go | 10 + .../docker/docker/client/container_update.go | 21 + .../docker/docker/client/container_wait.go | 104 + .../docker/docker/client/disk_usage.go | 33 + .../docker/client/distribution_inspect.go | 38 + .../docker/docker/client/envvars.go | 90 + .../github.com/docker/docker/client/errors.go | 93 + .../github.com/docker/docker/client/events.go | 101 + .../github.com/docker/docker/client/hijack.go | 153 + .../docker/docker/client/image_build.go | 146 + .../docker/docker/client/image_create.go | 37 + .../docker/docker/client/image_history.go | 22 + .../docker/docker/client/image_import.go | 40 + .../docker/docker/client/image_inspect.go | 32 + .../docker/docker/client/image_list.go | 49 + .../docker/docker/client/image_load.go | 29 + .../docker/docker/client/image_prune.go | 36 + .../docker/docker/client/image_pull.go | 64 + .../docker/docker/client/image_push.go | 54 + .../docker/docker/client/image_remove.go | 31 + .../docker/docker/client/image_save.go | 21 + .../docker/docker/client/image_search.go | 53 + .../docker/docker/client/image_tag.go | 37 + .../github.com/docker/docker/client/info.go | 26 + .../docker/docker/client/interface.go | 201 + .../docker/client/interface_experimental.go | 18 + .../docker/docker/client/interface_stable.go | 10 + .../github.com/docker/docker/client/login.go | 25 + .../docker/docker/client/network_connect.go | 19 + .../docker/docker/client/network_create.go | 25 + .../docker/client/network_disconnect.go | 15 + .../docker/docker/client/network_inspect.go | 49 + .../docker/docker/client/network_list.go | 32 + .../docker/docker/client/network_prune.go | 36 + .../docker/docker/client/network_remove.go | 10 + .../docker/docker/client/node_inspect.go | 32 + .../docker/docker/client/node_list.go | 36 + .../docker/docker/client/node_remove.go | 20 + .../docker/docker/client/node_update.go | 17 + .../docker/docker/client/options.go | 210 + .../github.com/docker/docker/client/ping.go | 75 + .../docker/docker/client/plugin_create.go | 23 + .../docker/docker/client/plugin_disable.go | 19 + .../docker/docker/client/plugin_enable.go | 19 + .../docker/docker/client/plugin_inspect.go | 31 + .../docker/docker/client/plugin_install.go | 113 + .../docker/docker/client/plugin_list.go | 33 + .../docker/docker/client/plugin_push.go | 16 + .../docker/docker/client/plugin_remove.go | 20 + .../docker/docker/client/plugin_set.go | 12 + .../docker/docker/client/plugin_upgrade.go | 39 + .../docker/docker/client/request.go | 277 + .../docker/docker/client/secret_create.go | 25 + .../docker/docker/client/secret_inspect.go | 36 + .../docker/docker/client/secret_list.go | 38 + .../docker/docker/client/secret_remove.go | 13 + .../docker/docker/client/secret_update.go | 20 + .../docker/docker/client/service_create.go | 178 + .../docker/docker/client/service_inspect.go | 37 + .../docker/docker/client/service_list.go | 39 + .../docker/docker/client/service_logs.go | 52 + .../docker/docker/client/service_remove.go | 10 + .../docker/docker/client/service_update.go | 74 + .../docker/client/swarm_get_unlock_key.go | 21 + .../docker/docker/client/swarm_init.go | 21 + .../docker/docker/client/swarm_inspect.go | 21 + .../docker/docker/client/swarm_join.go | 14 + .../docker/docker/client/swarm_leave.go | 17 + .../docker/docker/client/swarm_unlock.go | 14 + .../docker/docker/client/swarm_update.go | 21 + .../docker/docker/client/task_inspect.go | 32 + .../docker/docker/client/task_list.go | 35 + .../docker/docker/client/task_logs.go | 51 + .../docker/docker/client/transport.go | 17 + .../github.com/docker/docker/client/utils.go | 34 + .../docker/docker/client/version.go | 21 + .../docker/docker/client/volume_create.go | 20 + .../docker/docker/client/volume_inspect.go | 38 + .../docker/docker/client/volume_list.go | 33 + .../docker/docker/client/volume_prune.go | 36 + .../docker/docker/client/volume_remove.go | 21 + .../docker/docker/client/volume_update.go | 24 + .../github.com/docker/docker/errdefs/defs.go | 69 + .../github.com/docker/docker/errdefs/doc.go | 8 + .../docker/docker/errdefs/helpers.go | 279 + .../docker/docker/errdefs/http_helpers.go | 46 + vendor/github.com/docker/docker/errdefs/is.go | 107 + .../docker/docker/pkg/archive/README.md | 1 + .../docker/docker/pkg/archive/archive.go | 1474 ++ .../docker/pkg/archive/archive_linux.go | 100 + .../docker/pkg/archive/archive_other.go | 8 + .../docker/docker/pkg/archive/archive_unix.go | 125 + .../docker/pkg/archive/archive_windows.go | 67 + .../docker/docker/pkg/archive/changes.go | 442 + .../docker/pkg/archive/changes_linux.go | 286 + .../docker/pkg/archive/changes_other.go | 98 + .../docker/docker/pkg/archive/changes_unix.go | 44 + .../docker/pkg/archive/changes_windows.go | 34 + .../docker/docker/pkg/archive/copy.go | 485 + .../docker/docker/pkg/archive/copy_unix.go | 12 + .../docker/docker/pkg/archive/copy_windows.go | 9 + .../docker/docker/pkg/archive/diff.go | 244 + .../docker/docker/pkg/archive/diff_unix.go | 22 + .../docker/docker/pkg/archive/diff_windows.go | 6 + .../docker/docker/pkg/archive/time_linux.go | 16 + .../docker/pkg/archive/time_unsupported.go | 17 + .../docker/docker/pkg/archive/whiteouts.go | 23 + .../docker/docker/pkg/archive/wrap.go | 59 + .../docker/docker/pkg/idtools/idtools.go | 243 + .../docker/docker/pkg/idtools/idtools_unix.go | 325 + .../docker/pkg/idtools/idtools_windows.go | 34 + .../docker/pkg/idtools/usergroupadd_linux.go | 163 + .../pkg/idtools/usergroupadd_unsupported.go | 13 + .../docker/docker/pkg/idtools/utils_unix.go | 32 + .../docker/docker/pkg/ioutils/buffer.go | 51 + .../docker/docker/pkg/ioutils/bytespipe.go | 187 + .../docker/docker/pkg/ioutils/fswriters.go | 161 + .../docker/docker/pkg/ioutils/readers.go | 151 + .../docker/docker/pkg/ioutils/temp_unix.go | 11 + .../docker/docker/pkg/ioutils/temp_windows.go | 16 + .../docker/docker/pkg/ioutils/writeflusher.go | 92 + .../docker/docker/pkg/ioutils/writers.go | 66 + .../docker/pkg/jsonmessage/jsonmessage.go | 283 + .../docker/docker/pkg/longpath/longpath.go | 26 + .../docker/docker/pkg/pools/pools.go | 137 + .../docker/docker/pkg/stdcopy/stdcopy.go | 190 + .../docker/docker/pkg/system/args_windows.go | 16 + .../docker/docker/pkg/system/chtimes.go | 31 + .../docker/pkg/system/chtimes_nowindows.go | 15 + .../docker/pkg/system/chtimes_windows.go | 26 + .../docker/docker/pkg/system/errors.go | 13 + .../docker/docker/pkg/system/filesys.go | 19 + .../docker/pkg/system/filesys_deprecated.go | 35 + .../docker/docker/pkg/system/filesys_unix.go | 17 + .../docker/pkg/system/filesys_windows.go | 118 + .../docker/docker/pkg/system/image_os.go | 10 + .../docker/docker/pkg/system/init.go | 22 + .../docker/docker/pkg/system/init_windows.go | 18 + .../docker/docker/pkg/system/lstat_unix.go | 21 + .../docker/docker/pkg/system/lstat_windows.go | 14 + .../docker/docker/pkg/system/meminfo.go | 17 + .../docker/docker/pkg/system/meminfo_linux.go | 69 + .../docker/pkg/system/meminfo_unsupported.go | 9 + .../docker/pkg/system/meminfo_windows.go | 45 + .../docker/docker/pkg/system/mknod.go | 17 + .../docker/docker/pkg/system/mknod_freebsd.go | 14 + .../docker/docker/pkg/system/mknod_unix.go | 14 + .../docker/docker/pkg/system/mknod_windows.go | 11 + .../docker/docker/pkg/system/path.go | 41 + .../docker/docker/pkg/system/path_unix.go | 17 + .../docker/docker/pkg/system/path_windows.go | 48 + .../docker/docker/pkg/system/process_unix.go | 46 + .../docker/pkg/system/process_windows.go | 18 + .../docker/docker/pkg/system/stat_bsd.go | 16 + .../docker/docker/pkg/system/stat_darwin.go | 13 + .../docker/docker/pkg/system/stat_linux.go | 20 + .../docker/docker/pkg/system/stat_openbsd.go | 13 + .../docker/docker/pkg/system/stat_solaris.go | 13 + .../docker/docker/pkg/system/stat_unix.go | 67 + .../docker/docker/pkg/system/stat_windows.go | 49 + .../docker/docker/pkg/system/utimes_unix.go | 25 + .../docker/pkg/system/utimes_unsupported.go | 11 + .../docker/docker/pkg/system/xattrs_linux.go | 37 + .../docker/pkg/system/xattrs_unsupported.go | 14 + .../github.com/docker/go-connections/LICENSE | 191 + .../docker/go-connections/nat/nat.go | 242 + .../docker/go-connections/nat/parse.go | 57 + .../docker/go-connections/nat/sort.go | 96 + .../docker/go-connections/sockets/README.md | 0 .../go-connections/sockets/inmem_socket.go | 81 + .../docker/go-connections/sockets/proxy.go | 51 + .../docker/go-connections/sockets/sockets.go | 38 + .../go-connections/sockets/sockets_unix.go | 35 + .../go-connections/sockets/sockets_windows.go | 27 + .../go-connections/sockets/tcp_socket.go | 22 + .../go-connections/sockets/unix_socket.go | 32 + .../go-connections/tlsconfig/certpool_go17.go | 18 + .../tlsconfig/certpool_other.go | 13 + .../docker/go-connections/tlsconfig/config.go | 254 + .../tlsconfig/config_client_ciphers.go | 17 + .../tlsconfig/config_legacy_client_ciphers.go | 15 + .../docker/go-units/CONTRIBUTING.md | 67 + vendor/github.com/docker/go-units/LICENSE | 191 + vendor/github.com/docker/go-units/MAINTAINERS | 46 + vendor/github.com/docker/go-units/README.md | 16 + vendor/github.com/docker/go-units/circle.yml | 11 + vendor/github.com/docker/go-units/duration.go | 35 + vendor/github.com/docker/go-units/size.go | 154 + vendor/github.com/docker/go-units/ulimit.go | 123 + vendor/github.com/gogo/protobuf/AUTHORS | 15 + vendor/github.com/gogo/protobuf/CONTRIBUTORS | 23 + vendor/github.com/gogo/protobuf/LICENSE | 35 + .../github.com/gogo/protobuf/proto/Makefile | 43 + .../github.com/gogo/protobuf/proto/clone.go | 258 + .../gogo/protobuf/proto/custom_gogo.go | 39 + .../github.com/gogo/protobuf/proto/decode.go | 427 + .../gogo/protobuf/proto/deprecated.go | 63 + .../github.com/gogo/protobuf/proto/discard.go | 350 + .../gogo/protobuf/proto/duration.go | 100 + .../gogo/protobuf/proto/duration_gogo.go | 49 + .../github.com/gogo/protobuf/proto/encode.go | 205 + .../gogo/protobuf/proto/encode_gogo.go | 33 + .../github.com/gogo/protobuf/proto/equal.go | 300 + .../gogo/protobuf/proto/extensions.go | 605 + .../gogo/protobuf/proto/extensions_gogo.go | 389 + vendor/github.com/gogo/protobuf/proto/lib.go | 973 ++ .../gogo/protobuf/proto/lib_gogo.go | 50 + .../gogo/protobuf/proto/message_set.go | 181 + .../gogo/protobuf/proto/pointer_reflect.go | 357 + .../protobuf/proto/pointer_reflect_gogo.go | 59 + .../gogo/protobuf/proto/pointer_unsafe.go | 308 + .../protobuf/proto/pointer_unsafe_gogo.go | 56 + .../gogo/protobuf/proto/properties.go | 610 + .../gogo/protobuf/proto/properties_gogo.go | 36 + .../gogo/protobuf/proto/skip_gogo.go | 119 + .../gogo/protobuf/proto/table_marshal.go | 3009 ++++ .../gogo/protobuf/proto/table_marshal_gogo.go | 388 + .../gogo/protobuf/proto/table_merge.go | 676 + .../gogo/protobuf/proto/table_unmarshal.go | 2249 +++ .../protobuf/proto/table_unmarshal_gogo.go | 385 + vendor/github.com/gogo/protobuf/proto/text.go | 930 ++ .../gogo/protobuf/proto/text_gogo.go | 57 + .../gogo/protobuf/proto/text_parser.go | 1018 ++ .../gogo/protobuf/proto/timestamp.go | 113 + .../gogo/protobuf/proto/timestamp_gogo.go | 49 + .../gogo/protobuf/proto/wrappers.go | 1888 +++ .../gogo/protobuf/proto/wrappers_gogo.go | 113 + .../golang/protobuf}/AUTHORS | 0 .../golang/protobuf}/CONTRIBUTORS | 0 vendor/github.com/golang/protobuf/LICENSE | 28 + .../golang/protobuf/proto/buffer.go | 324 + .../golang/protobuf/proto/defaults.go | 63 + .../golang/protobuf/proto/deprecated.go | 113 + .../golang/protobuf/proto/discard.go | 58 + .../golang/protobuf/proto/extensions.go | 356 + .../golang/protobuf/proto/properties.go | 306 + .../github.com/golang/protobuf/proto/proto.go | 167 + .../golang/protobuf/proto/registry.go | 317 + .../golang/protobuf/proto/text_decode.go | 801 + .../golang/protobuf/proto/text_encode.go | 560 + .../github.com/golang/protobuf/proto/wire.go | 78 + .../golang/protobuf/proto/wrappers.go | 34 + .../github.com/golang/protobuf/ptypes/any.go | 179 + .../golang/protobuf/ptypes/any/any.pb.go | 62 + .../github.com/golang/protobuf/ptypes/doc.go | 10 + .../golang/protobuf/ptypes/duration.go | 76 + .../protobuf/ptypes/duration/duration.pb.go | 63 + .../golang/protobuf/ptypes/timestamp.go | 112 + .../protobuf/ptypes/timestamp/timestamp.pb.go | 64 + .../google/go-cmp/cmp/cmpopts/equate.go | 10 +- .../google/go-cmp/cmp/cmpopts/ignore.go | 11 +- .../google/go-cmp/cmp/cmpopts/sort.go | 16 +- .../go-cmp/cmp/cmpopts/struct_filter.go | 23 +- .../google/go-cmp/cmp/cmpopts/xform.go | 3 +- .../github.com/google/go-cmp/cmp/compare.go | 166 +- .../google/go-cmp/cmp/export_panic.go | 5 +- .../google/go-cmp/cmp/export_unsafe.go | 23 +- .../go-cmp/cmp/internal/diff/debug_disable.go | 3 +- .../go-cmp/cmp/internal/diff/debug_enable.go | 3 +- .../google/go-cmp/cmp/internal/diff/diff.go | 88 +- .../google/go-cmp/cmp/internal/flags/flags.go | 2 +- .../cmp/internal/flags/toolchain_legacy.go | 10 - .../cmp/internal/flags/toolchain_recent.go | 10 - .../go-cmp/cmp/internal/function/func.go | 2 +- .../google/go-cmp/cmp/internal/value/name.go | 164 + .../cmp/internal/value/pointer_purego.go | 13 +- .../cmp/internal/value/pointer_unsafe.go | 13 +- .../google/go-cmp/cmp/internal/value/sort.go | 2 +- .../google/go-cmp/cmp/internal/value/zero.go | 48 - .../github.com/google/go-cmp/cmp/options.go | 17 +- vendor/github.com/google/go-cmp/cmp/path.go | 31 +- vendor/github.com/google/go-cmp/cmp/report.go | 7 +- .../google/go-cmp/cmp/report_compare.go | 215 +- .../google/go-cmp/cmp/report_references.go | 264 + .../google/go-cmp/cmp/report_reflect.go | 312 +- .../google/go-cmp/cmp/report_slices.go | 365 +- .../google/go-cmp/cmp/report_text.go | 89 +- .../google/go-cmp/cmp/report_value.go | 2 +- vendor/github.com/google/uuid/.travis.yml | 9 + vendor/github.com/google/uuid/CONTRIBUTING.md | 10 + vendor/github.com/google/uuid/CONTRIBUTORS | 9 + vendor/github.com/google/uuid/LICENSE | 27 + vendor/github.com/google/uuid/README.md | 19 + vendor/github.com/google/uuid/dce.go | 80 + vendor/github.com/google/uuid/doc.go | 12 + vendor/github.com/google/uuid/hash.go | 53 + vendor/github.com/google/uuid/marshal.go | 38 + vendor/github.com/google/uuid/node.go | 90 + vendor/github.com/google/uuid/node_js.go | 12 + vendor/github.com/google/uuid/node_net.go | 33 + vendor/github.com/google/uuid/null.go | 118 + vendor/github.com/google/uuid/sql.go | 59 + vendor/github.com/google/uuid/time.go | 123 + vendor/github.com/google/uuid/util.go | 43 + vendor/github.com/google/uuid/uuid.go | 294 + vendor/github.com/google/uuid/version1.go | 44 + vendor/github.com/google/uuid/version4.go | 76 + .../github.com/hashicorp/errwrap/errwrap.go | 9 + .../hashicorp/go-multierror/.travis.yml | 12 - .../hashicorp/go-multierror/README.md | 69 +- .../hashicorp/go-multierror/append.go | 2 + .../hashicorp/go-multierror/group.go | 38 + .../hashicorp/go-multierror/multierror.go | 82 +- .../github.com/imdario/mergo/.deepsource.toml | 12 + vendor/github.com/imdario/mergo/.gitignore | 33 + vendor/github.com/imdario/mergo/.travis.yml | 12 + .../imdario/mergo/CODE_OF_CONDUCT.md | 46 + .../github.com/imdario/mergo/CONTRIBUTING.md | 112 + vendor/github.com/imdario/mergo/LICENSE | 28 + vendor/github.com/imdario/mergo/README.md | 236 + vendor/github.com/imdario/mergo/SECURITY.md | 14 + vendor/github.com/imdario/mergo/doc.go | 143 + vendor/github.com/imdario/mergo/map.go | 178 + vendor/github.com/imdario/mergo/merge.go | 409 + vendor/github.com/imdario/mergo/mergo.go | 81 + vendor/github.com/klauspost/compress/LICENSE | 28 + .../klauspost/compress/fse/README.md | 79 + .../klauspost/compress/fse/bitreader.go | 122 + .../klauspost/compress/fse/bitwriter.go | 168 + .../klauspost/compress/fse/bytereader.go | 47 + .../klauspost/compress/fse/compress.go | 684 + .../klauspost/compress/fse/decompress.go | 374 + .../github.com/klauspost/compress/fse/fse.go | 144 + .../klauspost/compress/huff0/.gitignore | 1 + .../klauspost/compress/huff0/README.md | 89 + .../klauspost/compress/huff0/bitreader.go | 329 + .../klauspost/compress/huff0/bitwriter.go | 210 + .../klauspost/compress/huff0/bytereader.go | 54 + .../klauspost/compress/huff0/compress.go | 657 + .../klauspost/compress/huff0/decompress.go | 1164 ++ .../klauspost/compress/huff0/huff0.go | 273 + .../klauspost/compress/snappy/.gitignore | 16 + .../klauspost/compress/snappy/AUTHORS | 15 + .../klauspost/compress/snappy/CONTRIBUTORS | 37 + .../klauspost/compress/snappy/LICENSE | 27 + .../klauspost/compress/snappy/README | 107 + .../klauspost/compress/snappy/decode.go | 237 + .../klauspost/compress/snappy/decode_amd64.go | 14 + .../klauspost/compress/snappy/decode_amd64.s | 482 + .../klauspost/compress/snappy/decode_other.go | 115 + .../klauspost/compress/snappy/encode.go | 285 + .../klauspost/compress/snappy/encode_amd64.go | 29 + .../klauspost/compress/snappy/encode_amd64.s | 730 + .../klauspost/compress/snappy/encode_other.go | 238 + .../klauspost/compress/snappy/runbench.cmd | 2 + .../klauspost/compress/snappy/snappy.go | 98 + .../klauspost/compress/zstd/README.md | 417 + .../klauspost/compress/zstd/bitreader.go | 136 + .../klauspost/compress/zstd/bitwriter.go | 169 + .../klauspost/compress/zstd/blockdec.go | 739 + .../klauspost/compress/zstd/blockenc.go | 871 ++ .../compress/zstd/blocktype_string.go | 85 + .../klauspost/compress/zstd/bytebuf.go | 127 + .../klauspost/compress/zstd/bytereader.go | 88 + .../klauspost/compress/zstd/decodeheader.go | 202 + .../klauspost/compress/zstd/decoder.go | 561 + .../compress/zstd/decoder_options.go | 84 + .../klauspost/compress/zstd/dict.go | 122 + .../klauspost/compress/zstd/enc_base.go | 177 + .../klauspost/compress/zstd/enc_best.go | 487 + .../klauspost/compress/zstd/enc_better.go | 1170 ++ .../klauspost/compress/zstd/enc_dfast.go | 1121 ++ .../klauspost/compress/zstd/enc_fast.go | 1018 ++ .../klauspost/compress/zstd/encoder.go | 576 + .../compress/zstd/encoder_options.go | 312 + .../klauspost/compress/zstd/framedec.go | 494 + .../klauspost/compress/zstd/frameenc.go | 137 + .../klauspost/compress/zstd/fse_decoder.go | 385 + .../klauspost/compress/zstd/fse_encoder.go | 726 + .../klauspost/compress/zstd/fse_predefined.go | 158 + .../klauspost/compress/zstd/hash.go | 77 + .../klauspost/compress/zstd/history.go | 89 + .../compress/zstd/internal/xxhash/LICENSE.txt | 22 + .../compress/zstd/internal/xxhash/README.md | 58 + .../compress/zstd/internal/xxhash/xxhash.go | 238 + .../zstd/internal/xxhash/xxhash_amd64.go | 13 + .../zstd/internal/xxhash/xxhash_amd64.s | 215 + .../zstd/internal/xxhash/xxhash_other.go | 76 + .../zstd/internal/xxhash/xxhash_safe.go | 11 + .../klauspost/compress/zstd/seqdec.go | 492 + .../klauspost/compress/zstd/seqenc.go | 115 + .../klauspost/compress/zstd/snappy.go | 436 + .../klauspost/compress/zstd/zstd.go | 156 + .../magiconair/properties/.gitignore | 6 + .../magiconair/properties/CHANGELOG.md | 205 + .../magiconair/properties/LICENSE.md | 24 + .../magiconair/properties/README.md | 128 + .../magiconair/properties/decode.go | 289 + .../github.com/magiconair/properties/doc.go | 155 + .../magiconair/properties/integrate.go | 35 + .../github.com/magiconair/properties/lex.go | 395 + .../github.com/magiconair/properties/load.go | 293 + .../magiconair/properties/parser.go | 86 + .../magiconair/properties/properties.go | 848 ++ .../magiconair/properties/rangecheck.go | 31 + vendor/github.com/moby/patternmatcher/LICENSE | 191 + vendor/github.com/moby/patternmatcher/NOTICE | 16 + .../moby/patternmatcher/patternmatcher.go | 474 + vendor/github.com/moby/sys/sequential/LICENSE | 202 + vendor/github.com/moby/sys/sequential/doc.go | 15 + .../moby/sys/sequential/sequential_unix.go | 45 + .../moby/sys/sequential/sequential_windows.go | 161 + vendor/github.com/moby/term/.gitignore | 8 + vendor/github.com/moby/term/LICENSE | 191 + vendor/github.com/moby/term/README.md | 36 + vendor/github.com/moby/term/ascii.go | 66 + vendor/github.com/moby/term/doc.go | 3 + vendor/github.com/moby/term/proxy.go | 88 + vendor/github.com/moby/term/term.go | 85 + vendor/github.com/moby/term/term_unix.go | 98 + vendor/github.com/moby/term/term_windows.go | 176 + vendor/github.com/moby/term/termios_bsd.go | 13 + vendor/github.com/moby/term/termios_nonbsd.go | 13 + vendor/github.com/moby/term/termios_unix.go | 35 + .../github.com/moby/term/termios_windows.go | 37 + .../moby/term/windows/ansi_reader.go | 252 + .../moby/term/windows/ansi_writer.go | 57 + .../github.com/moby/term/windows/console.go | 43 + vendor/github.com/moby/term/windows/doc.go | 5 + vendor/github.com/morikuni/aec/LICENSE | 21 + vendor/github.com/morikuni/aec/README.md | 178 + vendor/github.com/morikuni/aec/aec.go | 137 + vendor/github.com/morikuni/aec/ansi.go | 59 + vendor/github.com/morikuni/aec/builder.go | 388 + vendor/github.com/morikuni/aec/sample.gif | Bin 0 -> 12548 bytes vendor/github.com/morikuni/aec/sgr.go | 202 + .../opencontainers/go-digest/.mailmap | 4 + .../opencontainers/go-digest/.pullapprove.yml | 28 + .../opencontainers/go-digest/.travis.yml | 5 + .../opencontainers/go-digest/CONTRIBUTING.md | 72 + .../opencontainers/go-digest/LICENSE | 192 + .../opencontainers/go-digest/LICENSE.docs | 425 + .../opencontainers/go-digest/MAINTAINERS | 5 + .../opencontainers/go-digest/README.md | 96 + .../opencontainers/go-digest/algorithm.go | 193 + .../opencontainers/go-digest/digest.go | 157 + .../opencontainers/go-digest/digester.go | 40 + .../opencontainers/go-digest/doc.go | 62 + .../opencontainers/go-digest/verifiers.go | 46 + .../opencontainers/image-spec/LICENSE | 191 + .../image-spec/specs-go/v1/annotations.go | 71 + .../image-spec/specs-go/v1/artifact.go | 34 + .../image-spec/specs-go/v1/config.go | 114 + .../image-spec/specs-go/v1/descriptor.go | 72 + .../image-spec/specs-go/v1/index.go | 32 + .../image-spec/specs-go/v1/layout.go | 28 + .../image-spec/specs-go/v1/manifest.go | 38 + .../image-spec/specs-go/v1/mediatype.go | 60 + .../image-spec/specs-go/version.go | 32 + .../image-spec/specs-go/versioned.go | 23 + vendor/github.com/opencontainers/runc/LICENSE | 191 + vendor/github.com/opencontainers/runc/NOTICE | 17 + .../runc/libcontainer/user/lookup_unix.go | 157 + .../runc/libcontainer/user/user.go | 605 + .../runc/libcontainer/user/user_fuzzer.go | 43 + vendor/github.com/sirupsen/logrus/.gitignore | 4 + .../github.com/sirupsen/logrus/.golangci.yml | 40 + vendor/github.com/sirupsen/logrus/.travis.yml | 15 + .../github.com/sirupsen/logrus/CHANGELOG.md | 259 + vendor/github.com/sirupsen/logrus/LICENSE | 21 + vendor/github.com/sirupsen/logrus/README.md | 513 + vendor/github.com/sirupsen/logrus/alt_exit.go | 76 + .../github.com/sirupsen/logrus/appveyor.yml | 14 + .../github.com/sirupsen/logrus/buffer_pool.go | 43 + vendor/github.com/sirupsen/logrus/doc.go | 26 + vendor/github.com/sirupsen/logrus/entry.go | 442 + vendor/github.com/sirupsen/logrus/exported.go | 270 + .../github.com/sirupsen/logrus/formatter.go | 78 + vendor/github.com/sirupsen/logrus/hooks.go | 34 + .../sirupsen/logrus/json_formatter.go | 128 + vendor/github.com/sirupsen/logrus/logger.go | 417 + vendor/github.com/sirupsen/logrus/logrus.go | 186 + .../logrus/terminal_check_appengine.go | 11 + .../sirupsen/logrus/terminal_check_bsd.go | 13 + .../sirupsen/logrus/terminal_check_js.go | 7 + .../logrus/terminal_check_no_terminal.go | 11 + .../logrus/terminal_check_notappengine.go | 17 + .../sirupsen/logrus/terminal_check_solaris.go | 11 + .../sirupsen/logrus/terminal_check_unix.go | 13 + .../sirupsen/logrus/terminal_check_windows.go | 27 + .../sirupsen/logrus/text_formatter.go | 339 + vendor/github.com/sirupsen/logrus/writer.go | 70 + .../testcontainers-go/.gitignore | 13 + .../testcontainers-go/.golangci.yml | 16 + .../testcontainers-go/CONTRIBUTING.md | 13 + .../testcontainers/testcontainers-go/LICENSE | 21 + .../testcontainers/testcontainers-go/Makefile | 9 + .../testcontainers/testcontainers-go/Pipfile | 15 + .../testcontainers-go/Pipfile.lock | 298 + .../testcontainers-go/README.md | 17 + .../testcontainers-go/RELEASING.md | 203 + .../testcontainers-go/commons-test.mk | 21 + .../testcontainers-go/config.go | 29 + .../testcontainers-go/container.go | 263 + .../testcontainers-go/docker.go | 1404 ++ .../testcontainers-go/docker_auth.go | 108 + .../testcontainers-go/docker_mounts.go | 116 + .../testcontainers-go/exec/processor.go | 44 + .../testcontainers/testcontainers-go/file.go | 141 + .../testcontainers-go/generic.go | 157 + .../internal/config/config.go | 100 + .../internal/testcontainersdocker/client.go | 65 + .../testcontainersdocker/docker_host.go | 264 + .../testcontainersdocker/docker_rootless.go | 141 + .../testcontainersdocker/docker_socket.go | 46 + .../internal/testcontainersdocker/images.go | 126 + .../internal/testcontainersdocker/labels.go | 9 + .../internal/testcontainerssession/session.go | 22 + .../testcontainers-go/internal/version.go | 4 + .../testcontainers-go/lifecycle.go | 379 + .../testcontainers-go/logconsumer.go | 22 + .../testcontainers-go/logger.go | 75 + .../testcontainers-go/mkdocs.yml | 106 + .../testcontainers-go/mounts.go | 116 + .../testcontainers-go/network.go | 45 + .../testcontainers-go/parallel.go | 122 + .../testcontainers-go/provider.go | 163 + .../testcontainers-go/reaper.go | 212 + .../testcontainers-go/requirements.txt | 4 + .../testcontainers-go/runtime.txt | 1 + .../testcontainers-go/testcontainer.go | 1 + .../testcontainers-go/testing.go | 42 + .../testcontainers-go/wait/all.go | 80 + .../testcontainers-go/wait/errors.go | 10 + .../testcontainers-go/wait/errors_windows.go | 9 + .../testcontainers-go/wait/exec.go | 99 + .../testcontainers-go/wait/exit.go | 89 + .../testcontainers-go/wait/health.go | 91 + .../testcontainers-go/wait/host_port.go | 204 + .../testcontainers-go/wait/http.go | 285 + .../testcontainers-go/wait/log.go | 117 + .../testcontainers-go/wait/nop.go | 69 + .../testcontainers-go/wait/sql.go | 116 + .../testcontainers-go/wait/wait.go | 62 + vendor/golang.org/x/{xerrors => exp}/LICENSE | 2 +- vendor/golang.org/x/{xerrors => exp}/PATENTS | 0 .../x/exp/constraints/constraints.go | 50 + vendor/golang.org/x/exp/slices/slices.go | 258 + vendor/golang.org/x/exp/slices/sort.go | 128 + vendor/golang.org/x/exp/slices/zsortfunc.go | 479 + .../golang.org/x/exp/slices/zsortordered.go | 481 + vendor/golang.org/x/net/LICENSE | 27 + vendor/golang.org/x/net/PATENTS | 22 + .../golang.org/x/net/internal/socks/client.go | 168 + .../golang.org/x/net/internal/socks/socks.go | 317 + vendor/golang.org/x/net/proxy/dial.go | 54 + vendor/golang.org/x/net/proxy/direct.go | 31 + vendor/golang.org/x/net/proxy/per_host.go | 155 + vendor/golang.org/x/net/proxy/proxy.go | 149 + vendor/golang.org/x/net/proxy/socks5.go | 42 + vendor/golang.org/x/sync/errgroup/errgroup.go | 74 +- vendor/golang.org/x/sys/execabs/execabs.go | 102 + .../golang.org/x/sys/execabs/execabs_go118.go | 18 + .../golang.org/x/sys/execabs/execabs_go119.go | 21 + vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s | 31 + .../golang.org/x/sys/unix/asm_bsd_riscv64.s | 29 + .../golang.org/x/sys/unix/asm_linux_loong64.s | 54 + vendor/golang.org/x/sys/unix/dirent.go | 4 +- vendor/golang.org/x/sys/unix/endian_little.go | 4 +- .../x/sys/unix/errors_freebsd_386.go | 233 - .../x/sys/unix/errors_freebsd_amd64.go | 233 - .../x/sys/unix/errors_freebsd_arm.go | 226 - .../x/sys/unix/errors_freebsd_arm64.go | 17 - vendor/golang.org/x/sys/unix/gccgo.go | 4 +- vendor/golang.org/x/sys/unix/gccgo_c.c | 4 +- vendor/golang.org/x/sys/unix/ifreq_linux.go | 9 +- vendor/golang.org/x/sys/unix/ioctl_linux.go | 20 +- vendor/golang.org/x/sys/unix/ioctl_signed.go | 70 + .../sys/unix/{ioctl.go => ioctl_unsigned.go} | 21 +- vendor/golang.org/x/sys/unix/ioctl_zos.go | 20 +- vendor/golang.org/x/sys/unix/mkall.sh | 62 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 21 +- vendor/golang.org/x/sys/unix/ptrace_darwin.go | 6 + vendor/golang.org/x/sys/unix/ptrace_ios.go | 6 + vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 14 + vendor/golang.org/x/sys/unix/syscall.go | 10 +- vendor/golang.org/x/sys/unix/syscall_aix.go | 74 +- .../golang.org/x/sys/unix/syscall_aix_ppc.go | 1 - .../x/sys/unix/syscall_aix_ppc64.go | 1 - vendor/golang.org/x/sys/unix/syscall_bsd.go | 49 +- .../x/sys/unix/syscall_darwin.1_12.go | 32 - .../x/sys/unix/syscall_darwin.1_13.go | 108 - .../golang.org/x/sys/unix/syscall_darwin.go | 115 +- .../x/sys/unix/syscall_darwin_amd64.go | 1 + .../x/sys/unix/syscall_darwin_arm64.go | 1 + .../x/sys/unix/syscall_dragonfly.go | 5 +- .../golang.org/x/sys/unix/syscall_freebsd.go | 364 +- .../x/sys/unix/syscall_freebsd_386.go | 12 +- .../x/sys/unix/syscall_freebsd_amd64.go | 12 +- .../x/sys/unix/syscall_freebsd_arm.go | 10 +- .../x/sys/unix/syscall_freebsd_arm64.go | 10 +- .../x/sys/unix/syscall_freebsd_riscv64.go | 61 + vendor/golang.org/x/sys/unix/syscall_hurd.go | 30 + .../golang.org/x/sys/unix/syscall_hurd_386.go | 29 + .../golang.org/x/sys/unix/syscall_illumos.go | 111 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 304 +- .../x/sys/unix/syscall_linux_386.go | 31 - .../x/sys/unix/syscall_linux_amd64.go | 5 - .../x/sys/unix/syscall_linux_arm.go | 31 - .../x/sys/unix/syscall_linux_arm64.go | 14 - .../x/sys/unix/syscall_linux_loong64.go | 217 + .../x/sys/unix/syscall_linux_mips64x.go | 5 - .../x/sys/unix/syscall_linux_mipsx.go | 31 - .../x/sys/unix/syscall_linux_ppc.go | 31 - .../x/sys/unix/syscall_linux_ppc64x.go | 5 - .../x/sys/unix/syscall_linux_riscv64.go | 6 +- .../x/sys/unix/syscall_linux_s390x.go | 5 - .../x/sys/unix/syscall_linux_sparc64.go | 5 - .../golang.org/x/sys/unix/syscall_netbsd.go | 22 +- .../golang.org/x/sys/unix/syscall_openbsd.go | 5 +- .../x/sys/unix/syscall_openbsd_libc.go | 27 + .../x/sys/unix/syscall_openbsd_mips64.go | 4 + .../x/sys/unix/syscall_openbsd_ppc64.go | 42 + .../x/sys/unix/syscall_openbsd_riscv64.go | 42 + .../golang.org/x/sys/unix/syscall_solaris.go | 291 +- vendor/golang.org/x/sys/unix/syscall_unix.go | 136 +- .../golang.org/x/sys/unix/syscall_unix_gc.go | 6 +- .../x/sys/unix/syscall_zos_s390x.go | 179 +- vendor/golang.org/x/sys/unix/sysvshm_unix.go | 13 +- vendor/golang.org/x/sys/unix/timestruct.go | 2 +- vendor/golang.org/x/sys/unix/xattr_bsd.go | 104 +- .../x/sys/unix/zerrors_darwin_amd64.go | 19 + .../x/sys/unix/zerrors_darwin_arm64.go | 19 + .../x/sys/unix/zerrors_freebsd_386.go | 109 +- .../x/sys/unix/zerrors_freebsd_amd64.go | 107 +- .../x/sys/unix/zerrors_freebsd_arm.go | 220 +- .../x/sys/unix/zerrors_freebsd_arm64.go | 100 +- .../x/sys/unix/zerrors_freebsd_riscv64.go | 2148 +++ vendor/golang.org/x/sys/unix/zerrors_linux.go | 463 +- .../x/sys/unix/zerrors_linux_386.go | 7 +- .../x/sys/unix/zerrors_linux_amd64.go | 7 +- .../x/sys/unix/zerrors_linux_arm.go | 7 +- .../x/sys/unix/zerrors_linux_arm64.go | 8 +- .../x/sys/unix/zerrors_linux_loong64.go | 819 ++ .../x/sys/unix/zerrors_linux_mips.go | 7 +- .../x/sys/unix/zerrors_linux_mips64.go | 7 +- .../x/sys/unix/zerrors_linux_mips64le.go | 7 +- .../x/sys/unix/zerrors_linux_mipsle.go | 7 +- .../x/sys/unix/zerrors_linux_ppc.go | 7 +- .../x/sys/unix/zerrors_linux_ppc64.go | 7 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 7 +- .../x/sys/unix/zerrors_linux_riscv64.go | 7 +- .../x/sys/unix/zerrors_linux_s390x.go | 7 +- .../x/sys/unix/zerrors_linux_sparc64.go | 7 +- .../x/sys/unix/zerrors_openbsd_386.go | 356 +- .../x/sys/unix/zerrors_openbsd_amd64.go | 189 +- .../x/sys/unix/zerrors_openbsd_arm.go | 348 +- .../x/sys/unix/zerrors_openbsd_arm64.go | 160 +- .../x/sys/unix/zerrors_openbsd_mips64.go | 95 +- .../x/sys/unix/zerrors_openbsd_ppc64.go | 1905 +++ .../x/sys/unix/zerrors_openbsd_riscv64.go | 1904 +++ .../x/sys/unix/zptrace_armnn_linux.go | 8 +- .../x/sys/unix/zptrace_linux_arm64.go | 4 +- .../x/sys/unix/zptrace_mipsnn_linux.go | 8 +- .../x/sys/unix/zptrace_mipsnnle_linux.go | 8 +- .../x/sys/unix/zptrace_x86_linux.go | 8 +- .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 23 +- .../x/sys/unix/zsyscall_aix_ppc64.go | 24 +- .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 17 +- .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 18 +- .../x/sys/unix/zsyscall_darwin_amd64.1_13.go | 40 - .../x/sys/unix/zsyscall_darwin_amd64.1_13.s | 25 - .../x/sys/unix/zsyscall_darwin_amd64.go | 111 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 40 +- .../x/sys/unix/zsyscall_darwin_arm64.1_13.go | 40 - .../x/sys/unix/zsyscall_darwin_arm64.1_13.s | 25 - .../x/sys/unix/zsyscall_darwin_arm64.go | 111 +- .../x/sys/unix/zsyscall_darwin_arm64.s | 40 +- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 30 +- .../x/sys/unix/zsyscall_freebsd_386.go | 181 +- .../x/sys/unix/zsyscall_freebsd_amd64.go | 179 +- .../x/sys/unix/zsyscall_freebsd_arm.go | 209 +- .../x/sys/unix/zsyscall_freebsd_arm64.go | 179 +- .../x/sys/unix/zsyscall_freebsd_riscv64.go | 1909 +++ .../x/sys/unix/zsyscall_illumos_amd64.go | 28 +- .../golang.org/x/sys/unix/zsyscall_linux.go | 104 +- .../x/sys/unix/zsyscall_linux_386.go | 50 - .../x/sys/unix/zsyscall_linux_amd64.go | 50 - .../x/sys/unix/zsyscall_linux_arm.go | 50 - .../x/sys/unix/zsyscall_linux_arm64.go | 50 - .../x/sys/unix/zsyscall_linux_loong64.go | 487 + .../x/sys/unix/zsyscall_linux_mips.go | 50 - .../x/sys/unix/zsyscall_linux_mips64.go | 50 - .../x/sys/unix/zsyscall_linux_mips64le.go | 50 - .../x/sys/unix/zsyscall_linux_mipsle.go | 50 - .../x/sys/unix/zsyscall_linux_ppc.go | 50 - .../x/sys/unix/zsyscall_linux_ppc64.go | 50 - .../x/sys/unix/zsyscall_linux_ppc64le.go | 50 - .../x/sys/unix/zsyscall_linux_riscv64.go | 61 +- .../x/sys/unix/zsyscall_linux_s390x.go | 50 - .../x/sys/unix/zsyscall_linux_sparc64.go | 50 - .../x/sys/unix/zsyscall_netbsd_386.go | 30 +- .../x/sys/unix/zsyscall_netbsd_amd64.go | 30 +- .../x/sys/unix/zsyscall_netbsd_arm.go | 30 +- .../x/sys/unix/zsyscall_netbsd_arm64.go | 30 +- .../x/sys/unix/zsyscall_openbsd_386.go | 820 +- .../x/sys/unix/zsyscall_openbsd_386.s | 664 + .../x/sys/unix/zsyscall_openbsd_amd64.go | 820 +- .../x/sys/unix/zsyscall_openbsd_amd64.s | 664 + .../x/sys/unix/zsyscall_openbsd_arm.go | 820 +- .../x/sys/unix/zsyscall_openbsd_arm.s | 664 + .../x/sys/unix/zsyscall_openbsd_arm64.go | 820 +- .../x/sys/unix/zsyscall_openbsd_arm64.s | 664 + .../x/sys/unix/zsyscall_openbsd_mips64.go | 820 +- .../x/sys/unix/zsyscall_openbsd_mips64.s | 664 + .../x/sys/unix/zsyscall_openbsd_ppc64.go | 2229 +++ .../x/sys/unix/zsyscall_openbsd_ppc64.s | 796 + .../x/sys/unix/zsyscall_openbsd_riscv64.go | 2229 +++ .../x/sys/unix/zsyscall_openbsd_riscv64.s | 664 + .../x/sys/unix/zsyscall_solaris_amd64.go | 81 +- .../x/sys/unix/zsyscall_zos_s390x.go | 12 +- .../x/sys/unix/zsysctl_openbsd_386.go | 51 +- .../x/sys/unix/zsysctl_openbsd_amd64.go | 17 +- .../x/sys/unix/zsysctl_openbsd_arm.go | 51 +- .../x/sys/unix/zsysctl_openbsd_arm64.go | 11 +- .../x/sys/unix/zsysctl_openbsd_mips64.go | 3 +- .../x/sys/unix/zsysctl_openbsd_ppc64.go | 281 + .../x/sys/unix/zsysctl_openbsd_riscv64.go | 282 + .../x/sys/unix/zsysnum_freebsd_386.go | 107 +- .../x/sys/unix/zsysnum_freebsd_amd64.go | 107 +- .../x/sys/unix/zsysnum_freebsd_arm.go | 107 +- .../x/sys/unix/zsysnum_freebsd_arm64.go | 107 +- .../x/sys/unix/zsysnum_freebsd_riscv64.go | 394 + .../x/sys/unix/zsysnum_linux_386.go | 3 +- .../x/sys/unix/zsysnum_linux_amd64.go | 3 +- .../x/sys/unix/zsysnum_linux_arm.go | 3 +- .../x/sys/unix/zsysnum_linux_arm64.go | 3 +- .../x/sys/unix/zsysnum_linux_loong64.go | 311 + .../x/sys/unix/zsysnum_linux_mips.go | 3 +- .../x/sys/unix/zsysnum_linux_mips64.go | 3 +- .../x/sys/unix/zsysnum_linux_mips64le.go | 3 +- .../x/sys/unix/zsysnum_linux_mipsle.go | 3 +- .../x/sys/unix/zsysnum_linux_ppc.go | 3 +- .../x/sys/unix/zsysnum_linux_ppc64.go | 3 +- .../x/sys/unix/zsysnum_linux_ppc64le.go | 3 +- .../x/sys/unix/zsysnum_linux_riscv64.go | 4 +- .../x/sys/unix/zsysnum_linux_s390x.go | 3 +- .../x/sys/unix/zsysnum_linux_sparc64.go | 3 +- .../x/sys/unix/zsysnum_openbsd_386.go | 1 + .../x/sys/unix/zsysnum_openbsd_amd64.go | 1 + .../x/sys/unix/zsysnum_openbsd_arm.go | 1 + .../x/sys/unix/zsysnum_openbsd_arm64.go | 1 + .../x/sys/unix/zsysnum_openbsd_mips64.go | 1 + .../x/sys/unix/zsysnum_openbsd_ppc64.go | 218 + .../x/sys/unix/zsysnum_openbsd_riscv64.go | 219 + .../x/sys/unix/ztypes_darwin_amd64.go | 84 +- .../x/sys/unix/ztypes_darwin_arm64.go | 84 +- .../x/sys/unix/ztypes_freebsd_386.go | 112 +- .../x/sys/unix/ztypes_freebsd_amd64.go | 110 +- .../x/sys/unix/ztypes_freebsd_arm.go | 161 +- .../x/sys/unix/ztypes_freebsd_arm64.go | 108 +- .../x/sys/unix/ztypes_freebsd_riscv64.go | 638 + .../x/sys/unix/ztypes_illumos_amd64.go | 42 - vendor/golang.org/x/sys/unix/ztypes_linux.go | 430 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 19 +- .../x/sys/unix/ztypes_linux_amd64.go | 18 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 19 +- .../x/sys/unix/ztypes_linux_arm64.go | 18 +- .../x/sys/unix/ztypes_linux_loong64.go | 691 + .../x/sys/unix/ztypes_linux_mips.go | 19 +- .../x/sys/unix/ztypes_linux_mips64.go | 18 +- .../x/sys/unix/ztypes_linux_mips64le.go | 18 +- .../x/sys/unix/ztypes_linux_mipsle.go | 19 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 19 +- .../x/sys/unix/ztypes_linux_ppc64.go | 18 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 18 +- .../x/sys/unix/ztypes_linux_riscv64.go | 18 +- .../x/sys/unix/ztypes_linux_s390x.go | 18 +- .../x/sys/unix/ztypes_linux_sparc64.go | 18 +- .../x/sys/unix/ztypes_netbsd_386.go | 84 + .../x/sys/unix/ztypes_netbsd_amd64.go | 84 + .../x/sys/unix/ztypes_netbsd_arm.go | 84 + .../x/sys/unix/ztypes_netbsd_arm64.go | 84 + .../x/sys/unix/ztypes_openbsd_386.go | 105 +- .../x/sys/unix/ztypes_openbsd_amd64.go | 41 +- .../x/sys/unix/ztypes_openbsd_arm.go | 17 +- .../x/sys/unix/ztypes_openbsd_arm64.go | 17 +- .../x/sys/unix/ztypes_openbsd_mips64.go | 17 +- .../x/sys/unix/ztypes_openbsd_ppc64.go | 571 + .../x/sys/unix/ztypes_openbsd_riscv64.go | 571 + .../x/sys/unix/ztypes_solaris_amd64.go | 37 +- .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 11 +- vendor/golang.org/x/sys/windows/aliases.go | 13 + .../golang.org/x/sys/windows/dll_windows.go | 416 + vendor/golang.org/x/sys/windows/empty.s | 9 + .../golang.org/x/sys/windows/env_windows.go | 54 + vendor/golang.org/x/sys/windows/eventlog.go | 21 + .../golang.org/x/sys/windows/exec_windows.go | 183 + .../x/sys/windows/memory_windows.go | 48 + vendor/golang.org/x/sys/windows/mkerrors.bash | 70 + .../x/sys/windows/mkknownfolderids.bash | 27 + vendor/golang.org/x/sys/windows/mksyscall.go | 10 + vendor/golang.org/x/sys/windows/race.go | 31 + vendor/golang.org/x/sys/windows/race0.go | 26 + .../x/sys/windows/security_windows.go | 1444 ++ vendor/golang.org/x/sys/windows/service.go | 254 + .../x/sys/windows/setupapi_windows.go | 1425 ++ .../golang.org/x/sys/{unix => windows}/str.go | 12 +- vendor/golang.org/x/sys/windows/syscall.go | 105 + .../x/sys/windows/syscall_windows.go | 1808 +++ .../golang.org/x/sys/windows/types_windows.go | 3349 +++++ .../x/sys/windows/types_windows_386.go | 35 + .../x/sys/windows/types_windows_amd64.go | 34 + .../x/sys/windows/types_windows_arm.go | 35 + .../x/sys/windows/types_windows_arm64.go | 34 + .../x/sys/windows/zerrors_windows.go | 9468 ++++++++++++ .../x/sys/windows/zknownfolderids_windows.go | 149 + .../x/sys/windows/zsyscall_windows.go | 4345 ++++++ vendor/golang.org/x/tools/AUTHORS | 3 - vendor/golang.org/x/tools/CONTRIBUTORS | 3 - vendor/golang.org/x/tools/cover/profile.go | 11 +- vendor/golang.org/x/xerrors/README | 2 - vendor/golang.org/x/xerrors/adaptor.go | 193 - vendor/golang.org/x/xerrors/codereview.cfg | 1 - vendor/golang.org/x/xerrors/doc.go | 22 - vendor/golang.org/x/xerrors/errors.go | 33 - vendor/golang.org/x/xerrors/fmt.go | 187 - vendor/golang.org/x/xerrors/format.go | 34 - vendor/golang.org/x/xerrors/frame.go | 56 - vendor/golang.org/x/xerrors/wrap.go | 106 - vendor/google.golang.org/genproto/LICENSE | 202 + .../googleapis/rpc/status/status.pb.go | 201 + vendor/google.golang.org/grpc/AUTHORS | 1 + vendor/google.golang.org/grpc/LICENSE | 202 + vendor/google.golang.org/grpc/NOTICE.txt | 13 + .../grpc/codes/code_string.go | 62 + vendor/google.golang.org/grpc/codes/codes.go | 244 + .../grpc/internal/status/status.go | 166 + .../google.golang.org/grpc/status/status.go | 135 + .../protobuf}/AUTHORS | 2 +- .../protobuf}/CONTRIBUTORS | 2 +- vendor/google.golang.org/protobuf/LICENSE | 27 + vendor/google.golang.org/protobuf/PATENTS | 22 + .../protobuf/encoding/prototext/decode.go | 770 + .../protobuf/encoding/prototext/doc.go | 7 + .../protobuf/encoding/prototext/encode.go | 371 + .../protobuf/encoding/protowire/wire.go | 547 + .../protobuf/internal/descfmt/stringer.go | 318 + .../protobuf/internal/descopts/options.go | 29 + .../protobuf/internal/detrand/rand.go | 69 + .../internal/encoding/defval/default.go | 213 + .../encoding/messageset/messageset.go | 241 + .../protobuf/internal/encoding/tag/tag.go | 207 + .../protobuf/internal/encoding/text/decode.go | 665 + .../internal/encoding/text/decode_number.go | 190 + .../internal/encoding/text/decode_string.go | 161 + .../internal/encoding/text/decode_token.go | 373 + .../protobuf/internal/encoding/text/doc.go | 29 + .../protobuf/internal/encoding/text/encode.go | 270 + .../protobuf/internal/errors/errors.go | 89 + .../protobuf/internal/errors/is_go112.go | 40 + .../protobuf/internal/errors/is_go113.go | 13 + .../protobuf/internal/filedesc/build.go | 158 + .../protobuf/internal/filedesc/desc.go | 631 + .../protobuf/internal/filedesc/desc_init.go | 471 + .../protobuf/internal/filedesc/desc_lazy.go | 704 + .../protobuf/internal/filedesc/desc_list.go | 450 + .../internal/filedesc/desc_list_gen.go | 356 + .../protobuf/internal/filedesc/placeholder.go | 107 + .../protobuf/internal/filetype/build.go | 297 + .../protobuf/internal/flags/flags.go | 24 + .../internal/flags/proto_legacy_disable.go} | 8 +- .../internal/flags/proto_legacy_enable.go | 10 + .../protobuf/internal/genid/any_gen.go | 34 + .../protobuf/internal/genid/api_gen.go | 106 + .../protobuf/internal/genid/descriptor_gen.go | 829 ++ .../protobuf/internal/genid/doc.go | 11 + .../protobuf/internal/genid/duration_gen.go | 34 + .../protobuf/internal/genid/empty_gen.go | 19 + .../protobuf/internal/genid/field_mask_gen.go | 31 + .../protobuf/internal/genid/goname.go | 25 + .../protobuf/internal/genid/map_entry.go | 16 + .../internal/genid/source_context_gen.go | 31 + .../protobuf/internal/genid/struct_gen.go | 116 + .../protobuf/internal/genid/timestamp_gen.go | 34 + .../protobuf/internal/genid/type_gen.go | 184 + .../protobuf/internal/genid/wrappers.go | 13 + .../protobuf/internal/genid/wrappers_gen.go | 175 + .../protobuf/internal/impl/api_export.go | 177 + .../protobuf/internal/impl/checkinit.go | 141 + .../protobuf/internal/impl/codec_extension.go | 223 + .../protobuf/internal/impl/codec_field.go | 830 ++ .../protobuf/internal/impl/codec_gen.go | 5637 +++++++ .../protobuf/internal/impl/codec_map.go | 388 + .../protobuf/internal/impl/codec_map_go111.go | 38 + .../protobuf/internal/impl/codec_map_go112.go | 12 + .../protobuf/internal/impl/codec_message.go | 217 + .../internal/impl/codec_messageset.go | 123 + .../protobuf/internal/impl/codec_reflect.go | 210 + .../protobuf/internal/impl/codec_tables.go | 557 + .../protobuf/internal/impl/codec_unsafe.go | 18 + .../protobuf/internal/impl/convert.go | 496 + .../protobuf/internal/impl/convert_list.go | 141 + .../protobuf/internal/impl/convert_map.go | 121 + .../protobuf/internal/impl/decode.go | 284 + .../protobuf/internal/impl/encode.go | 201 + .../protobuf/internal/impl/enum.go | 21 + .../protobuf/internal/impl/extension.go | 156 + .../protobuf/internal/impl/legacy_enum.go | 219 + .../protobuf/internal/impl/legacy_export.go | 92 + .../internal/impl/legacy_extension.go | 176 + .../protobuf/internal/impl/legacy_file.go | 81 + .../protobuf/internal/impl/legacy_message.go | 565 + .../protobuf/internal/impl/merge.go | 176 + .../protobuf/internal/impl/merge_gen.go | 209 + .../protobuf/internal/impl/message.go | 276 + .../protobuf/internal/impl/message_reflect.go | 465 + .../internal/impl/message_reflect_field.go | 543 + .../internal/impl/message_reflect_gen.go | 249 + .../protobuf/internal/impl/pointer_reflect.go | 179 + .../protobuf/internal/impl/pointer_unsafe.go | 175 + .../protobuf/internal/impl/validate.go | 576 + .../protobuf/internal/impl/weak.go | 74 + .../protobuf/internal/order/order.go | 89 + .../protobuf/internal/order/range.go | 115 + .../protobuf/internal/pragma/pragma.go | 29 + .../protobuf/internal/set/ints.go | 58 + .../protobuf/internal/strs/strings.go | 196 + .../protobuf/internal/strs/strings_pure.go | 28 + .../protobuf/internal/strs/strings_unsafe.go | 95 + .../protobuf/internal/version/version.go | 79 + .../protobuf/proto/checkinit.go | 71 + .../protobuf/proto/decode.go | 293 + .../protobuf/proto/decode_gen.go | 603 + .../google.golang.org/protobuf/proto/doc.go | 94 + .../protobuf/proto/encode.go | 319 + .../protobuf/proto/encode_gen.go | 97 + .../google.golang.org/protobuf/proto/equal.go | 167 + .../protobuf/proto/extension.go | 92 + .../google.golang.org/protobuf/proto/merge.go | 139 + .../protobuf/proto/messageset.go | 93 + .../google.golang.org/protobuf/proto/proto.go | 43 + .../protobuf/proto/proto_methods.go | 20 + .../protobuf/proto/proto_reflect.go | 20 + .../google.golang.org/protobuf/proto/reset.go | 43 + .../google.golang.org/protobuf/proto/size.go | 97 + .../protobuf/proto/size_gen.go | 55 + .../protobuf/proto/wrappers.go | 29 + .../protobuf/reflect/protodesc/desc.go | 276 + .../protobuf/reflect/protodesc/desc_init.go | 248 + .../reflect/protodesc/desc_resolve.go | 286 + .../reflect/protodesc/desc_validate.go | 374 + .../protobuf/reflect/protodesc/proto.go | 252 + .../protobuf/reflect/protoreflect/methods.go | 78 + .../protobuf/reflect/protoreflect/proto.go | 504 + .../protobuf/reflect/protoreflect/source.go | 128 + .../reflect/protoreflect/source_gen.go | 461 + .../protobuf/reflect/protoreflect/type.go | 665 + .../protobuf/reflect/protoreflect/value.go | 285 + .../reflect/protoreflect/value_pure.go | 60 + .../reflect/protoreflect/value_union.go | 436 + .../reflect/protoreflect/value_unsafe.go | 99 + .../reflect/protoregistry/registry.go | 880 ++ .../protobuf/runtime/protoiface/legacy.go | 15 + .../protobuf/runtime/protoiface/methods.go | 168 + .../protobuf/runtime/protoimpl/impl.go | 44 + .../protobuf/runtime/protoimpl/version.go | 56 + .../types/descriptorpb/descriptor.pb.go | 3957 +++++ .../protobuf/types/known/anypb/any.pb.go | 498 + .../types/known/durationpb/duration.pb.go | 379 + .../types/known/timestamppb/timestamp.pb.go | 390 + vendor/gotest.tools/v3/assert/assert.go | 172 +- vendor/gotest.tools/v3/assert/cmp/compare.go | 2 +- vendor/gotest.tools/v3/assert/cmp/result.go | 3 +- .../gotest.tools/v3/internal/assert/assert.go | 143 + .../v3/{ => internal}/assert/result.go | 31 +- .../v3/internal/cleanup/cleanup.go | 9 +- .../gotest.tools/v3/internal/source/source.go | 21 +- vendor/gotest.tools/v3/x/subtest/context.go | 84 - vendor/modules.txt | 203 +- 1171 files changed, 208283 insertions(+), 7775 deletions(-) create mode 100644 vendor/github.com/Azure/go-ansiterm/LICENSE create mode 100644 vendor/github.com/Azure/go-ansiterm/README.md create mode 100644 vendor/github.com/Azure/go-ansiterm/constants.go create mode 100644 vendor/github.com/Azure/go-ansiterm/context.go create mode 100644 vendor/github.com/Azure/go-ansiterm/csi_entry_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/csi_param_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/escape_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/event_handler.go create mode 100644 vendor/github.com/Azure/go-ansiterm/ground_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/osc_string_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/parser.go create mode 100644 vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go create mode 100644 vendor/github.com/Azure/go-ansiterm/parser_actions.go create mode 100644 vendor/github.com/Azure/go-ansiterm/states.go create mode 100644 vendor/github.com/Azure/go-ansiterm/utilities.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/ansi.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/api.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/utilities.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go create mode 100644 vendor/github.com/Microsoft/go-winio/.gitignore create mode 100644 vendor/github.com/Microsoft/go-winio/CODEOWNERS create mode 100644 vendor/github.com/Microsoft/go-winio/LICENSE create mode 100644 vendor/github.com/Microsoft/go-winio/README.md create mode 100644 vendor/github.com/Microsoft/go-winio/backup.go create mode 100644 vendor/github.com/Microsoft/go-winio/ea.go create mode 100644 vendor/github.com/Microsoft/go-winio/file.go create mode 100644 vendor/github.com/Microsoft/go-winio/fileinfo.go create mode 100644 vendor/github.com/Microsoft/go-winio/hvsock.go create mode 100644 vendor/github.com/Microsoft/go-winio/pipe.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go create mode 100644 vendor/github.com/Microsoft/go-winio/privilege.go create mode 100644 vendor/github.com/Microsoft/go-winio/reparse.go create mode 100644 vendor/github.com/Microsoft/go-winio/sd.go create mode 100644 vendor/github.com/Microsoft/go-winio/syscall.go create mode 100644 vendor/github.com/Microsoft/go-winio/zsyscall_windows.go create mode 100644 vendor/github.com/cenkalti/backoff/v4/.gitignore create mode 100644 vendor/github.com/cenkalti/backoff/v4/LICENSE create mode 100644 vendor/github.com/cenkalti/backoff/v4/README.md create mode 100644 vendor/github.com/cenkalti/backoff/v4/backoff.go create mode 100644 vendor/github.com/cenkalti/backoff/v4/context.go create mode 100644 vendor/github.com/cenkalti/backoff/v4/exponential.go create mode 100644 vendor/github.com/cenkalti/backoff/v4/retry.go create mode 100644 vendor/github.com/cenkalti/backoff/v4/ticker.go create mode 100644 vendor/github.com/cenkalti/backoff/v4/timer.go create mode 100644 vendor/github.com/cenkalti/backoff/v4/tries.go create mode 100644 vendor/github.com/containerd/containerd/LICENSE create mode 100644 vendor/github.com/containerd/containerd/NOTICE create mode 100644 vendor/github.com/containerd/containerd/errdefs/errors.go create mode 100644 vendor/github.com/containerd/containerd/errdefs/grpc.go create mode 100644 vendor/github.com/containerd/containerd/log/context.go create mode 100644 vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go create mode 100644 vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go create mode 100644 vendor/github.com/containerd/containerd/platforms/compare.go create mode 100644 vendor/github.com/containerd/containerd/platforms/cpuinfo.go create mode 100644 vendor/github.com/containerd/containerd/platforms/database.go create mode 100644 vendor/github.com/containerd/containerd/platforms/defaults.go create mode 100644 vendor/github.com/containerd/containerd/platforms/defaults_darwin.go create mode 100644 vendor/github.com/containerd/containerd/platforms/defaults_unix.go create mode 100644 vendor/github.com/containerd/containerd/platforms/defaults_windows.go create mode 100644 vendor/github.com/containerd/containerd/platforms/platforms.go create mode 100644 vendor/github.com/cpuguy83/dockercfg/LICENSE create mode 100644 vendor/github.com/cpuguy83/dockercfg/README.md create mode 100644 vendor/github.com/cpuguy83/dockercfg/auth.go create mode 100644 vendor/github.com/cpuguy83/dockercfg/config.go create mode 100644 vendor/github.com/cpuguy83/dockercfg/load.go create mode 100644 vendor/github.com/docker/distribution/LICENSE create mode 100644 vendor/github.com/docker/distribution/digestset/set.go create mode 100644 vendor/github.com/docker/distribution/reference/helpers.go create mode 100644 vendor/github.com/docker/distribution/reference/normalize.go create mode 100644 vendor/github.com/docker/distribution/reference/reference.go create mode 100644 vendor/github.com/docker/distribution/reference/regexp.go create mode 100644 vendor/github.com/docker/docker/AUTHORS create mode 100644 vendor/github.com/docker/docker/LICENSE create mode 100644 vendor/github.com/docker/docker/NOTICE create mode 100644 vendor/github.com/docker/docker/api/README.md create mode 100644 vendor/github.com/docker/docker/api/common.go create mode 100644 vendor/github.com/docker/docker/api/common_unix.go create mode 100644 vendor/github.com/docker/docker/api/common_windows.go create mode 100644 vendor/github.com/docker/docker/api/swagger-gen.yaml create mode 100644 vendor/github.com/docker/docker/api/swagger.yaml create mode 100644 vendor/github.com/docker/docker/api/types/auth.go create mode 100644 vendor/github.com/docker/docker/api/types/blkiodev/blkio.go create mode 100644 vendor/github.com/docker/docker/api/types/client.go create mode 100644 vendor/github.com/docker/docker/api/types/configs.go create mode 100644 vendor/github.com/docker/docker/api/types/container/config.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container_changes.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container_top.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container_update.go create mode 100644 vendor/github.com/docker/docker/api/types/container/create_response.go create mode 100644 vendor/github.com/docker/docker/api/types/container/deprecated.go create mode 100644 vendor/github.com/docker/docker/api/types/container/host_config.go create mode 100644 vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go create mode 100644 vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go create mode 100644 vendor/github.com/docker/docker/api/types/container/wait_exit_error.go create mode 100644 vendor/github.com/docker/docker/api/types/container/wait_response.go create mode 100644 vendor/github.com/docker/docker/api/types/container/waitcondition.go create mode 100644 vendor/github.com/docker/docker/api/types/deprecated.go create mode 100644 vendor/github.com/docker/docker/api/types/error_response.go create mode 100644 vendor/github.com/docker/docker/api/types/error_response_ext.go create mode 100644 vendor/github.com/docker/docker/api/types/events/events.go create mode 100644 vendor/github.com/docker/docker/api/types/filters/parse.go create mode 100644 vendor/github.com/docker/docker/api/types/graph_driver_data.go create mode 100644 vendor/github.com/docker/docker/api/types/id_response.go create mode 100644 vendor/github.com/docker/docker/api/types/image/image_history.go create mode 100644 vendor/github.com/docker/docker/api/types/image_delete_response_item.go create mode 100644 vendor/github.com/docker/docker/api/types/image_summary.go create mode 100644 vendor/github.com/docker/docker/api/types/mount/mount.go create mode 100644 vendor/github.com/docker/docker/api/types/network/network.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_device.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_env.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_interface_type.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_mount.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_responses.go create mode 100644 vendor/github.com/docker/docker/api/types/port.go create mode 100644 vendor/github.com/docker/docker/api/types/registry/authenticate.go create mode 100644 vendor/github.com/docker/docker/api/types/registry/registry.go create mode 100644 vendor/github.com/docker/docker/api/types/service_update_response.go create mode 100644 vendor/github.com/docker/docker/api/types/stats.go create mode 100644 vendor/github.com/docker/docker/api/types/strslice/strslice.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/common.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/config.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/container.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/network.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/node.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto create mode 100644 vendor/github.com/docker/docker/api/types/swarm/secret.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/service.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/swarm.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/task.go create mode 100644 vendor/github.com/docker/docker/api/types/time/timestamp.go create mode 100644 vendor/github.com/docker/docker/api/types/types.go create mode 100644 vendor/github.com/docker/docker/api/types/versions/README.md create mode 100644 vendor/github.com/docker/docker/api/types/versions/compare.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/cluster_volume.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/create_options.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/deprecated.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/list_response.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/options.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/volume.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/volume_update.go create mode 100644 vendor/github.com/docker/docker/client/README.md create mode 100644 vendor/github.com/docker/docker/client/build_cancel.go create mode 100644 vendor/github.com/docker/docker/client/build_prune.go create mode 100644 vendor/github.com/docker/docker/client/checkpoint_create.go create mode 100644 vendor/github.com/docker/docker/client/checkpoint_delete.go create mode 100644 vendor/github.com/docker/docker/client/checkpoint_list.go create mode 100644 vendor/github.com/docker/docker/client/client.go create mode 100644 vendor/github.com/docker/docker/client/client_deprecated.go create mode 100644 vendor/github.com/docker/docker/client/client_unix.go create mode 100644 vendor/github.com/docker/docker/client/client_windows.go create mode 100644 vendor/github.com/docker/docker/client/config_create.go create mode 100644 vendor/github.com/docker/docker/client/config_inspect.go create mode 100644 vendor/github.com/docker/docker/client/config_list.go create mode 100644 vendor/github.com/docker/docker/client/config_remove.go create mode 100644 vendor/github.com/docker/docker/client/config_update.go create mode 100644 vendor/github.com/docker/docker/client/container_attach.go create mode 100644 vendor/github.com/docker/docker/client/container_commit.go create mode 100644 vendor/github.com/docker/docker/client/container_copy.go create mode 100644 vendor/github.com/docker/docker/client/container_create.go create mode 100644 vendor/github.com/docker/docker/client/container_diff.go create mode 100644 vendor/github.com/docker/docker/client/container_exec.go create mode 100644 vendor/github.com/docker/docker/client/container_export.go create mode 100644 vendor/github.com/docker/docker/client/container_inspect.go create mode 100644 vendor/github.com/docker/docker/client/container_kill.go create mode 100644 vendor/github.com/docker/docker/client/container_list.go create mode 100644 vendor/github.com/docker/docker/client/container_logs.go create mode 100644 vendor/github.com/docker/docker/client/container_pause.go create mode 100644 vendor/github.com/docker/docker/client/container_prune.go create mode 100644 vendor/github.com/docker/docker/client/container_remove.go create mode 100644 vendor/github.com/docker/docker/client/container_rename.go create mode 100644 vendor/github.com/docker/docker/client/container_resize.go create mode 100644 vendor/github.com/docker/docker/client/container_restart.go create mode 100644 vendor/github.com/docker/docker/client/container_start.go create mode 100644 vendor/github.com/docker/docker/client/container_stats.go create mode 100644 vendor/github.com/docker/docker/client/container_stop.go create mode 100644 vendor/github.com/docker/docker/client/container_top.go create mode 100644 vendor/github.com/docker/docker/client/container_unpause.go create mode 100644 vendor/github.com/docker/docker/client/container_update.go create mode 100644 vendor/github.com/docker/docker/client/container_wait.go create mode 100644 vendor/github.com/docker/docker/client/disk_usage.go create mode 100644 vendor/github.com/docker/docker/client/distribution_inspect.go create mode 100644 vendor/github.com/docker/docker/client/envvars.go create mode 100644 vendor/github.com/docker/docker/client/errors.go create mode 100644 vendor/github.com/docker/docker/client/events.go create mode 100644 vendor/github.com/docker/docker/client/hijack.go create mode 100644 vendor/github.com/docker/docker/client/image_build.go create mode 100644 vendor/github.com/docker/docker/client/image_create.go create mode 100644 vendor/github.com/docker/docker/client/image_history.go create mode 100644 vendor/github.com/docker/docker/client/image_import.go create mode 100644 vendor/github.com/docker/docker/client/image_inspect.go create mode 100644 vendor/github.com/docker/docker/client/image_list.go create mode 100644 vendor/github.com/docker/docker/client/image_load.go create mode 100644 vendor/github.com/docker/docker/client/image_prune.go create mode 100644 vendor/github.com/docker/docker/client/image_pull.go create mode 100644 vendor/github.com/docker/docker/client/image_push.go create mode 100644 vendor/github.com/docker/docker/client/image_remove.go create mode 100644 vendor/github.com/docker/docker/client/image_save.go create mode 100644 vendor/github.com/docker/docker/client/image_search.go create mode 100644 vendor/github.com/docker/docker/client/image_tag.go create mode 100644 vendor/github.com/docker/docker/client/info.go create mode 100644 vendor/github.com/docker/docker/client/interface.go create mode 100644 vendor/github.com/docker/docker/client/interface_experimental.go create mode 100644 vendor/github.com/docker/docker/client/interface_stable.go create mode 100644 vendor/github.com/docker/docker/client/login.go create mode 100644 vendor/github.com/docker/docker/client/network_connect.go create mode 100644 vendor/github.com/docker/docker/client/network_create.go create mode 100644 vendor/github.com/docker/docker/client/network_disconnect.go create mode 100644 vendor/github.com/docker/docker/client/network_inspect.go create mode 100644 vendor/github.com/docker/docker/client/network_list.go create mode 100644 vendor/github.com/docker/docker/client/network_prune.go create mode 100644 vendor/github.com/docker/docker/client/network_remove.go create mode 100644 vendor/github.com/docker/docker/client/node_inspect.go create mode 100644 vendor/github.com/docker/docker/client/node_list.go create mode 100644 vendor/github.com/docker/docker/client/node_remove.go create mode 100644 vendor/github.com/docker/docker/client/node_update.go create mode 100644 vendor/github.com/docker/docker/client/options.go create mode 100644 vendor/github.com/docker/docker/client/ping.go create mode 100644 vendor/github.com/docker/docker/client/plugin_create.go create mode 100644 vendor/github.com/docker/docker/client/plugin_disable.go create mode 100644 vendor/github.com/docker/docker/client/plugin_enable.go create mode 100644 vendor/github.com/docker/docker/client/plugin_inspect.go create mode 100644 vendor/github.com/docker/docker/client/plugin_install.go create mode 100644 vendor/github.com/docker/docker/client/plugin_list.go create mode 100644 vendor/github.com/docker/docker/client/plugin_push.go create mode 100644 vendor/github.com/docker/docker/client/plugin_remove.go create mode 100644 vendor/github.com/docker/docker/client/plugin_set.go create mode 100644 vendor/github.com/docker/docker/client/plugin_upgrade.go create mode 100644 vendor/github.com/docker/docker/client/request.go create mode 100644 vendor/github.com/docker/docker/client/secret_create.go create mode 100644 vendor/github.com/docker/docker/client/secret_inspect.go create mode 100644 vendor/github.com/docker/docker/client/secret_list.go create mode 100644 vendor/github.com/docker/docker/client/secret_remove.go create mode 100644 vendor/github.com/docker/docker/client/secret_update.go create mode 100644 vendor/github.com/docker/docker/client/service_create.go create mode 100644 vendor/github.com/docker/docker/client/service_inspect.go create mode 100644 vendor/github.com/docker/docker/client/service_list.go create mode 100644 vendor/github.com/docker/docker/client/service_logs.go create mode 100644 vendor/github.com/docker/docker/client/service_remove.go create mode 100644 vendor/github.com/docker/docker/client/service_update.go create mode 100644 vendor/github.com/docker/docker/client/swarm_get_unlock_key.go create mode 100644 vendor/github.com/docker/docker/client/swarm_init.go create mode 100644 vendor/github.com/docker/docker/client/swarm_inspect.go create mode 100644 vendor/github.com/docker/docker/client/swarm_join.go create mode 100644 vendor/github.com/docker/docker/client/swarm_leave.go create mode 100644 vendor/github.com/docker/docker/client/swarm_unlock.go create mode 100644 vendor/github.com/docker/docker/client/swarm_update.go create mode 100644 vendor/github.com/docker/docker/client/task_inspect.go create mode 100644 vendor/github.com/docker/docker/client/task_list.go create mode 100644 vendor/github.com/docker/docker/client/task_logs.go create mode 100644 vendor/github.com/docker/docker/client/transport.go create mode 100644 vendor/github.com/docker/docker/client/utils.go create mode 100644 vendor/github.com/docker/docker/client/version.go create mode 100644 vendor/github.com/docker/docker/client/volume_create.go create mode 100644 vendor/github.com/docker/docker/client/volume_inspect.go create mode 100644 vendor/github.com/docker/docker/client/volume_list.go create mode 100644 vendor/github.com/docker/docker/client/volume_prune.go create mode 100644 vendor/github.com/docker/docker/client/volume_remove.go create mode 100644 vendor/github.com/docker/docker/client/volume_update.go create mode 100644 vendor/github.com/docker/docker/errdefs/defs.go create mode 100644 vendor/github.com/docker/docker/errdefs/doc.go create mode 100644 vendor/github.com/docker/docker/errdefs/helpers.go create mode 100644 vendor/github.com/docker/docker/errdefs/http_helpers.go create mode 100644 vendor/github.com/docker/docker/errdefs/is.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/README.md create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_other.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_other.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/diff.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/diff_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/diff_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/time_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/time_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/whiteouts.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/wrap.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/utils_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/buffer.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/fswriters.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/readers.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/writers.go create mode 100644 vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go create mode 100644 vendor/github.com/docker/docker/pkg/longpath/longpath.go create mode 100644 vendor/github.com/docker/docker/pkg/pools/pools.go create mode 100644 vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go create mode 100644 vendor/github.com/docker/docker/pkg/system/args_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes.go create mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/errors.go create mode 100644 vendor/github.com/docker/docker/pkg/system/filesys.go create mode 100644 vendor/github.com/docker/docker/pkg/system/filesys_deprecated.go create mode 100644 vendor/github.com/docker/docker/pkg/system/filesys_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/filesys_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/image_os.go create mode 100644 vendor/github.com/docker/docker/pkg/system/init.go create mode 100644 vendor/github.com/docker/docker/pkg/system/init_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/lstat_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/lstat_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo.go create mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/mknod.go create mode 100644 vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go create mode 100644 vendor/github.com/docker/docker/pkg/system/mknod_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/mknod_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/path.go create mode 100644 vendor/github.com/docker/docker/pkg/system/path_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/path_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/process_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/process_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_bsd.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_darwin.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_openbsd.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/utimes_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/system/xattrs_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go create mode 100644 vendor/github.com/docker/go-connections/LICENSE create mode 100644 vendor/github.com/docker/go-connections/nat/nat.go create mode 100644 vendor/github.com/docker/go-connections/nat/parse.go create mode 100644 vendor/github.com/docker/go-connections/nat/sort.go create mode 100644 vendor/github.com/docker/go-connections/sockets/README.md create mode 100644 vendor/github.com/docker/go-connections/sockets/inmem_socket.go create mode 100644 vendor/github.com/docker/go-connections/sockets/proxy.go create mode 100644 vendor/github.com/docker/go-connections/sockets/sockets.go create mode 100644 vendor/github.com/docker/go-connections/sockets/sockets_unix.go create mode 100644 vendor/github.com/docker/go-connections/sockets/sockets_windows.go create mode 100644 vendor/github.com/docker/go-connections/sockets/tcp_socket.go create mode 100644 vendor/github.com/docker/go-connections/sockets/unix_socket.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go create mode 100644 vendor/github.com/docker/go-units/CONTRIBUTING.md create mode 100644 vendor/github.com/docker/go-units/LICENSE create mode 100644 vendor/github.com/docker/go-units/MAINTAINERS create mode 100644 vendor/github.com/docker/go-units/README.md create mode 100644 vendor/github.com/docker/go-units/circle.yml create mode 100644 vendor/github.com/docker/go-units/duration.go create mode 100644 vendor/github.com/docker/go-units/size.go create mode 100644 vendor/github.com/docker/go-units/ulimit.go create mode 100644 vendor/github.com/gogo/protobuf/AUTHORS create mode 100644 vendor/github.com/gogo/protobuf/CONTRIBUTORS create mode 100644 vendor/github.com/gogo/protobuf/LICENSE create mode 100644 vendor/github.com/gogo/protobuf/proto/Makefile create mode 100644 vendor/github.com/gogo/protobuf/proto/clone.go create mode 100644 vendor/github.com/gogo/protobuf/proto/custom_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/decode.go create mode 100644 vendor/github.com/gogo/protobuf/proto/deprecated.go create mode 100644 vendor/github.com/gogo/protobuf/proto/discard.go create mode 100644 vendor/github.com/gogo/protobuf/proto/duration.go create mode 100644 vendor/github.com/gogo/protobuf/proto/duration_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/encode.go create mode 100644 vendor/github.com/gogo/protobuf/proto/encode_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/equal.go create mode 100644 vendor/github.com/gogo/protobuf/proto/extensions.go create mode 100644 vendor/github.com/gogo/protobuf/proto/extensions_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/lib.go create mode 100644 vendor/github.com/gogo/protobuf/proto/lib_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/message_set.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/properties.go create mode 100644 vendor/github.com/gogo/protobuf/proto/properties_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/skip_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/table_marshal.go create mode 100644 vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/table_merge.go create mode 100644 vendor/github.com/gogo/protobuf/proto/table_unmarshal.go create mode 100644 vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text_parser.go create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp.go create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers.go create mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go rename vendor/{golang.org/x/sync => github.com/golang/protobuf}/AUTHORS (100%) rename vendor/{golang.org/x/sync => github.com/golang/protobuf}/CONTRIBUTORS (100%) create mode 100644 vendor/github.com/golang/protobuf/LICENSE create mode 100644 vendor/github.com/golang/protobuf/proto/buffer.go create mode 100644 vendor/github.com/golang/protobuf/proto/defaults.go create mode 100644 vendor/github.com/golang/protobuf/proto/deprecated.go create mode 100644 vendor/github.com/golang/protobuf/proto/discard.go create mode 100644 vendor/github.com/golang/protobuf/proto/extensions.go create mode 100644 vendor/github.com/golang/protobuf/proto/properties.go create mode 100644 vendor/github.com/golang/protobuf/proto/proto.go create mode 100644 vendor/github.com/golang/protobuf/proto/registry.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_decode.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_encode.go create mode 100644 vendor/github.com/golang/protobuf/proto/wire.go create mode 100644 vendor/github.com/golang/protobuf/proto/wrappers.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/doc.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/name.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/zero.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_references.go create mode 100644 vendor/github.com/google/uuid/.travis.yml create mode 100644 vendor/github.com/google/uuid/CONTRIBUTING.md create mode 100644 vendor/github.com/google/uuid/CONTRIBUTORS create mode 100644 vendor/github.com/google/uuid/LICENSE create mode 100644 vendor/github.com/google/uuid/README.md create mode 100644 vendor/github.com/google/uuid/dce.go create mode 100644 vendor/github.com/google/uuid/doc.go create mode 100644 vendor/github.com/google/uuid/hash.go create mode 100644 vendor/github.com/google/uuid/marshal.go create mode 100644 vendor/github.com/google/uuid/node.go create mode 100644 vendor/github.com/google/uuid/node_js.go create mode 100644 vendor/github.com/google/uuid/node_net.go create mode 100644 vendor/github.com/google/uuid/null.go create mode 100644 vendor/github.com/google/uuid/sql.go create mode 100644 vendor/github.com/google/uuid/time.go create mode 100644 vendor/github.com/google/uuid/util.go create mode 100644 vendor/github.com/google/uuid/uuid.go create mode 100644 vendor/github.com/google/uuid/version1.go create mode 100644 vendor/github.com/google/uuid/version4.go delete mode 100644 vendor/github.com/hashicorp/go-multierror/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-multierror/group.go create mode 100644 vendor/github.com/imdario/mergo/.deepsource.toml create mode 100644 vendor/github.com/imdario/mergo/.gitignore create mode 100644 vendor/github.com/imdario/mergo/.travis.yml create mode 100644 vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/imdario/mergo/CONTRIBUTING.md create mode 100644 vendor/github.com/imdario/mergo/LICENSE create mode 100644 vendor/github.com/imdario/mergo/README.md create mode 100644 vendor/github.com/imdario/mergo/SECURITY.md create mode 100644 vendor/github.com/imdario/mergo/doc.go create mode 100644 vendor/github.com/imdario/mergo/map.go create mode 100644 vendor/github.com/imdario/mergo/merge.go create mode 100644 vendor/github.com/imdario/mergo/mergo.go create mode 100644 vendor/github.com/klauspost/compress/LICENSE create mode 100644 vendor/github.com/klauspost/compress/fse/README.md create mode 100644 vendor/github.com/klauspost/compress/fse/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/fse/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/fse/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/fse/compress.go create mode 100644 vendor/github.com/klauspost/compress/fse/decompress.go create mode 100644 vendor/github.com/klauspost/compress/fse/fse.go create mode 100644 vendor/github.com/klauspost/compress/huff0/.gitignore create mode 100644 vendor/github.com/klauspost/compress/huff0/README.md create mode 100644 vendor/github.com/klauspost/compress/huff0/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/huff0/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/huff0/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/huff0/compress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/huff0.go create mode 100644 vendor/github.com/klauspost/compress/snappy/.gitignore create mode 100644 vendor/github.com/klauspost/compress/snappy/AUTHORS create mode 100644 vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS create mode 100644 vendor/github.com/klauspost/compress/snappy/LICENSE create mode 100644 vendor/github.com/klauspost/compress/snappy/README create mode 100644 vendor/github.com/klauspost/compress/snappy/decode.go create mode 100644 vendor/github.com/klauspost/compress/snappy/decode_amd64.go create mode 100644 vendor/github.com/klauspost/compress/snappy/decode_amd64.s create mode 100644 vendor/github.com/klauspost/compress/snappy/decode_other.go create mode 100644 vendor/github.com/klauspost/compress/snappy/encode.go create mode 100644 vendor/github.com/klauspost/compress/snappy/encode_amd64.go create mode 100644 vendor/github.com/klauspost/compress/snappy/encode_amd64.s create mode 100644 vendor/github.com/klauspost/compress/snappy/encode_other.go create mode 100644 vendor/github.com/klauspost/compress/snappy/runbench.cmd create mode 100644 vendor/github.com/klauspost/compress/snappy/snappy.go create mode 100644 vendor/github.com/klauspost/compress/zstd/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blocktype_string.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytebuf.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decodeheader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/dict.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_base.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_best.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_better.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_dfast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_fast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/framedec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/frameenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_predefined.go create mode 100644 vendor/github.com/klauspost/compress/zstd/hash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/history.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/snappy.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zstd.go create mode 100644 vendor/github.com/magiconair/properties/.gitignore create mode 100644 vendor/github.com/magiconair/properties/CHANGELOG.md create mode 100644 vendor/github.com/magiconair/properties/LICENSE.md create mode 100644 vendor/github.com/magiconair/properties/README.md create mode 100644 vendor/github.com/magiconair/properties/decode.go create mode 100644 vendor/github.com/magiconair/properties/doc.go create mode 100644 vendor/github.com/magiconair/properties/integrate.go create mode 100644 vendor/github.com/magiconair/properties/lex.go create mode 100644 vendor/github.com/magiconair/properties/load.go create mode 100644 vendor/github.com/magiconair/properties/parser.go create mode 100644 vendor/github.com/magiconair/properties/properties.go create mode 100644 vendor/github.com/magiconair/properties/rangecheck.go create mode 100644 vendor/github.com/moby/patternmatcher/LICENSE create mode 100644 vendor/github.com/moby/patternmatcher/NOTICE create mode 100644 vendor/github.com/moby/patternmatcher/patternmatcher.go create mode 100644 vendor/github.com/moby/sys/sequential/LICENSE create mode 100644 vendor/github.com/moby/sys/sequential/doc.go create mode 100644 vendor/github.com/moby/sys/sequential/sequential_unix.go create mode 100644 vendor/github.com/moby/sys/sequential/sequential_windows.go create mode 100644 vendor/github.com/moby/term/.gitignore create mode 100644 vendor/github.com/moby/term/LICENSE create mode 100644 vendor/github.com/moby/term/README.md create mode 100644 vendor/github.com/moby/term/ascii.go create mode 100644 vendor/github.com/moby/term/doc.go create mode 100644 vendor/github.com/moby/term/proxy.go create mode 100644 vendor/github.com/moby/term/term.go create mode 100644 vendor/github.com/moby/term/term_unix.go create mode 100644 vendor/github.com/moby/term/term_windows.go create mode 100644 vendor/github.com/moby/term/termios_bsd.go create mode 100644 vendor/github.com/moby/term/termios_nonbsd.go create mode 100644 vendor/github.com/moby/term/termios_unix.go create mode 100644 vendor/github.com/moby/term/termios_windows.go create mode 100644 vendor/github.com/moby/term/windows/ansi_reader.go create mode 100644 vendor/github.com/moby/term/windows/ansi_writer.go create mode 100644 vendor/github.com/moby/term/windows/console.go create mode 100644 vendor/github.com/moby/term/windows/doc.go create mode 100644 vendor/github.com/morikuni/aec/LICENSE create mode 100644 vendor/github.com/morikuni/aec/README.md create mode 100644 vendor/github.com/morikuni/aec/aec.go create mode 100644 vendor/github.com/morikuni/aec/ansi.go create mode 100644 vendor/github.com/morikuni/aec/builder.go create mode 100644 vendor/github.com/morikuni/aec/sample.gif create mode 100644 vendor/github.com/morikuni/aec/sgr.go create mode 100644 vendor/github.com/opencontainers/go-digest/.mailmap create mode 100644 vendor/github.com/opencontainers/go-digest/.pullapprove.yml create mode 100644 vendor/github.com/opencontainers/go-digest/.travis.yml create mode 100644 vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md create mode 100644 vendor/github.com/opencontainers/go-digest/LICENSE create mode 100644 vendor/github.com/opencontainers/go-digest/LICENSE.docs create mode 100644 vendor/github.com/opencontainers/go-digest/MAINTAINERS create mode 100644 vendor/github.com/opencontainers/go-digest/README.md create mode 100644 vendor/github.com/opencontainers/go-digest/algorithm.go create mode 100644 vendor/github.com/opencontainers/go-digest/digest.go create mode 100644 vendor/github.com/opencontainers/go-digest/digester.go create mode 100644 vendor/github.com/opencontainers/go-digest/doc.go create mode 100644 vendor/github.com/opencontainers/go-digest/verifiers.go create mode 100644 vendor/github.com/opencontainers/image-spec/LICENSE create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/version.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/versioned.go create mode 100644 vendor/github.com/opencontainers/runc/LICENSE create mode 100644 vendor/github.com/opencontainers/runc/NOTICE create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/user.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go create mode 100644 vendor/github.com/sirupsen/logrus/.gitignore create mode 100644 vendor/github.com/sirupsen/logrus/.golangci.yml create mode 100644 vendor/github.com/sirupsen/logrus/.travis.yml create mode 100644 vendor/github.com/sirupsen/logrus/CHANGELOG.md create mode 100644 vendor/github.com/sirupsen/logrus/LICENSE create mode 100644 vendor/github.com/sirupsen/logrus/README.md create mode 100644 vendor/github.com/sirupsen/logrus/alt_exit.go create mode 100644 vendor/github.com/sirupsen/logrus/appveyor.yml create mode 100644 vendor/github.com/sirupsen/logrus/buffer_pool.go create mode 100644 vendor/github.com/sirupsen/logrus/doc.go create mode 100644 vendor/github.com/sirupsen/logrus/entry.go create mode 100644 vendor/github.com/sirupsen/logrus/exported.go create mode 100644 vendor/github.com/sirupsen/logrus/formatter.go create mode 100644 vendor/github.com/sirupsen/logrus/hooks.go create mode 100644 vendor/github.com/sirupsen/logrus/json_formatter.go create mode 100644 vendor/github.com/sirupsen/logrus/logger.go create mode 100644 vendor/github.com/sirupsen/logrus/logrus.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_appengine.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_bsd.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_js.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_solaris.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_unix.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_windows.go create mode 100644 vendor/github.com/sirupsen/logrus/text_formatter.go create mode 100644 vendor/github.com/sirupsen/logrus/writer.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/.gitignore create mode 100644 vendor/github.com/testcontainers/testcontainers-go/.golangci.yml create mode 100644 vendor/github.com/testcontainers/testcontainers-go/CONTRIBUTING.md create mode 100644 vendor/github.com/testcontainers/testcontainers-go/LICENSE create mode 100644 vendor/github.com/testcontainers/testcontainers-go/Makefile create mode 100644 vendor/github.com/testcontainers/testcontainers-go/Pipfile create mode 100644 vendor/github.com/testcontainers/testcontainers-go/Pipfile.lock create mode 100644 vendor/github.com/testcontainers/testcontainers-go/README.md create mode 100644 vendor/github.com/testcontainers/testcontainers-go/RELEASING.md create mode 100644 vendor/github.com/testcontainers/testcontainers-go/commons-test.mk create mode 100644 vendor/github.com/testcontainers/testcontainers-go/config.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/container.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/docker.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/docker_auth.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/docker_mounts.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/exec/processor.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/file.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/generic.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/internal/config/config.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/client.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/docker_host.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/docker_rootless.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/docker_socket.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/images.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/labels.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/internal/testcontainerssession/session.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/internal/version.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/lifecycle.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/logconsumer.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/logger.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/mkdocs.yml create mode 100644 vendor/github.com/testcontainers/testcontainers-go/mounts.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/network.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/parallel.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/provider.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/reaper.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/requirements.txt create mode 100644 vendor/github.com/testcontainers/testcontainers-go/runtime.txt create mode 100644 vendor/github.com/testcontainers/testcontainers-go/testcontainer.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/testing.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/all.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/errors.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/errors_windows.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/exec.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/exit.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/health.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/host_port.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/http.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/log.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/nop.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/sql.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/wait.go rename vendor/golang.org/x/{xerrors => exp}/LICENSE (96%) rename vendor/golang.org/x/{xerrors => exp}/PATENTS (100%) create mode 100644 vendor/golang.org/x/exp/constraints/constraints.go create mode 100644 vendor/golang.org/x/exp/slices/slices.go create mode 100644 vendor/golang.org/x/exp/slices/sort.go create mode 100644 vendor/golang.org/x/exp/slices/zsortfunc.go create mode 100644 vendor/golang.org/x/exp/slices/zsortordered.go create mode 100644 vendor/golang.org/x/net/LICENSE create mode 100644 vendor/golang.org/x/net/PATENTS create mode 100644 vendor/golang.org/x/net/internal/socks/client.go create mode 100644 vendor/golang.org/x/net/internal/socks/socks.go create mode 100644 vendor/golang.org/x/net/proxy/dial.go create mode 100644 vendor/golang.org/x/net/proxy/direct.go create mode 100644 vendor/golang.org/x/net/proxy/per_host.go create mode 100644 vendor/golang.org/x/net/proxy/proxy.go create mode 100644 vendor/golang.org/x/net/proxy/socks5.go create mode 100644 vendor/golang.org/x/sys/execabs/execabs.go create mode 100644 vendor/golang.org/x/sys/execabs/execabs_go118.go create mode 100644 vendor/golang.org/x/sys/execabs/execabs_go119.go create mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s create mode 100644 vendor/golang.org/x/sys/unix/asm_linux_loong64.s delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_386.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_arm.go delete mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/ioctl_signed.go rename vendor/golang.org/x/sys/unix/{ioctl.go => ioctl_unsigned.go} (77%) delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go delete mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_hurd.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_hurd_386.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go delete mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go delete mode 100644 vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go create mode 100644 vendor/golang.org/x/sys/windows/aliases.go create mode 100644 vendor/golang.org/x/sys/windows/dll_windows.go create mode 100644 vendor/golang.org/x/sys/windows/empty.s create mode 100644 vendor/golang.org/x/sys/windows/env_windows.go create mode 100644 vendor/golang.org/x/sys/windows/eventlog.go create mode 100644 vendor/golang.org/x/sys/windows/exec_windows.go create mode 100644 vendor/golang.org/x/sys/windows/memory_windows.go create mode 100644 vendor/golang.org/x/sys/windows/mkerrors.bash create mode 100644 vendor/golang.org/x/sys/windows/mkknownfolderids.bash create mode 100644 vendor/golang.org/x/sys/windows/mksyscall.go create mode 100644 vendor/golang.org/x/sys/windows/race.go create mode 100644 vendor/golang.org/x/sys/windows/race0.go create mode 100644 vendor/golang.org/x/sys/windows/security_windows.go create mode 100644 vendor/golang.org/x/sys/windows/service.go create mode 100644 vendor/golang.org/x/sys/windows/setupapi_windows.go rename vendor/golang.org/x/sys/{unix => windows}/str.go (62%) create mode 100644 vendor/golang.org/x/sys/windows/syscall.go create mode 100644 vendor/golang.org/x/sys/windows/syscall_windows.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_386.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_amd64.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_arm.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_arm64.go create mode 100644 vendor/golang.org/x/sys/windows/zerrors_windows.go create mode 100644 vendor/golang.org/x/sys/windows/zknownfolderids_windows.go create mode 100644 vendor/golang.org/x/sys/windows/zsyscall_windows.go delete mode 100644 vendor/golang.org/x/tools/AUTHORS delete mode 100644 vendor/golang.org/x/tools/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/xerrors/README delete mode 100644 vendor/golang.org/x/xerrors/adaptor.go delete mode 100644 vendor/golang.org/x/xerrors/codereview.cfg delete mode 100644 vendor/golang.org/x/xerrors/doc.go delete mode 100644 vendor/golang.org/x/xerrors/errors.go delete mode 100644 vendor/golang.org/x/xerrors/fmt.go delete mode 100644 vendor/golang.org/x/xerrors/format.go delete mode 100644 vendor/golang.org/x/xerrors/frame.go delete mode 100644 vendor/golang.org/x/xerrors/wrap.go create mode 100644 vendor/google.golang.org/genproto/LICENSE create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go create mode 100644 vendor/google.golang.org/grpc/AUTHORS create mode 100644 vendor/google.golang.org/grpc/LICENSE create mode 100644 vendor/google.golang.org/grpc/NOTICE.txt create mode 100644 vendor/google.golang.org/grpc/codes/code_string.go create mode 100644 vendor/google.golang.org/grpc/codes/codes.go create mode 100644 vendor/google.golang.org/grpc/internal/status/status.go create mode 100644 vendor/google.golang.org/grpc/status/status.go rename vendor/{golang.org/x/sys => google.golang.org/protobuf}/AUTHORS (74%) rename vendor/{golang.org/x/sys => google.golang.org/protobuf}/CONTRIBUTORS (70%) create mode 100644 vendor/google.golang.org/protobuf/LICENSE create mode 100644 vendor/google.golang.org/protobuf/PATENTS create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/decode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/doc.go create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/encode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protowire/wire.go create mode 100644 vendor/google.golang.org/protobuf/internal/descfmt/stringer.go create mode 100644 vendor/google.golang.org/protobuf/internal/descopts/options.go create mode 100644 vendor/google.golang.org/protobuf/internal/detrand/rand.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/defval/default.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/doc.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/encode.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/errors.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go112.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go113.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/build.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go create mode 100644 vendor/google.golang.org/protobuf/internal/filetype/build.go create mode 100644 vendor/google.golang.org/protobuf/internal/flags/flags.go rename vendor/{golang.org/x/xerrors/internal/internal.go => google.golang.org/protobuf/internal/flags/proto_legacy_disable.go} (57%) create mode 100644 vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/any_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/api_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/doc.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/duration_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/empty_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/goname.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/map_entry.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/struct_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/type_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/api_export.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/checkinit.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_field.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_tables.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_list.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_map.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/encode.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/enum.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_export.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_file.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/validate.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/weak.go create mode 100644 vendor/google.golang.org/protobuf/internal/order/order.go create mode 100644 vendor/google.golang.org/protobuf/internal/order/range.go create mode 100644 vendor/google.golang.org/protobuf/internal/pragma/pragma.go create mode 100644 vendor/google.golang.org/protobuf/internal/set/ints.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_pure.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/version/version.go create mode 100644 vendor/google.golang.org/protobuf/proto/checkinit.go create mode 100644 vendor/google.golang.org/protobuf/proto/decode.go create mode 100644 vendor/google.golang.org/protobuf/proto/decode_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/doc.go create mode 100644 vendor/google.golang.org/protobuf/proto/encode.go create mode 100644 vendor/google.golang.org/protobuf/proto/encode_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/equal.go create mode 100644 vendor/google.golang.org/protobuf/proto/extension.go create mode 100644 vendor/google.golang.org/protobuf/proto/merge.go create mode 100644 vendor/google.golang.org/protobuf/proto/messageset.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto_methods.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto_reflect.go create mode 100644 vendor/google.golang.org/protobuf/proto/reset.go create mode 100644 vendor/google.golang.org/protobuf/proto/size.go create mode 100644 vendor/google.golang.org/protobuf/proto/size_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/wrappers.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/proto.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/type.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/methods.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/version.go create mode 100644 vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go create mode 100644 vendor/gotest.tools/v3/internal/assert/assert.go rename vendor/gotest.tools/v3/{ => internal}/assert/result.go (69%) delete mode 100644 vendor/gotest.tools/v3/x/subtest/context.go diff --git a/command/select.go b/command/select.go index fba5460d9..0f76a9dcc 100644 --- a/command/select.go +++ b/command/select.go @@ -139,6 +139,7 @@ func NewSelectCommand() *cli.Command { &cli.StringFlag{ Name: "delimiter", Usage: "delimiter of the csv file", + Value: ",", }, }, sharedFlags...), CustomHelpTemplate: defaultSelectHelpTemplate, diff --git a/e2e/select_test.go b/e2e/select_test.go index 34f3612e8..ba0722af7 100644 --- a/e2e/select_test.go +++ b/e2e/select_test.go @@ -24,7 +24,6 @@ query json(document) to csv -> not supported(?) func getSequentialFile(n int, inputForm, outputForm, structure string) (string, map[int]compareFunc) { sb := strings.Builder{} expectedLines := make(map[int]compareFunc) - for i := 0; i < n+1; i++ { var line string switch inputForm { @@ -34,20 +33,25 @@ func getSequentialFile(n int, inputForm, outputForm, structure string) (string, if i == n { break } - line = fmt.Sprintf(`{ "line": "%d", "id": "i%d", data: "some event %d" }`, i, i, i) + line = fmt.Sprintf(`{ "line": "%d", "id": "i%d", "data": "some event %d" }`, i, i, i) case "document": - if i == n { - break - } else if i == 0 { - line = fmt.Sprintf(`{\n"obj%d": {"line": "%d", "id": "i%d", data: "some event %d"}`, i, i, i, i) - } else if i == n-1 { - line = fmt.Sprintf(`"obj%d": {"line": "%d", "id": "i%d", data: "some event %d"}\n}`, i, i, i, i) + if i == 0 { + line = fmt.Sprintf("{\n%s", line) + } else if i == n { + line = fmt.Sprintf("%s\n}", line) } else { - line = fmt.Sprintf(`"obj%d": {"line": "%d", "id": "i%d", data: "some event %d"},`, i, i, i, i) + if i == n-1 { + line = fmt.Sprintf(` "obj%d": {"line": "%d", "id": "i%d", "data": "some event %d"}`, i, i, i, i) + } else { + line = fmt.Sprintf(` "obj%d": {"line": "%d", "id": "i%d", "data": "some event %d"},`, i, i, i, i) + } } } case "csv": + if i == n { + break + } if i == 0 { line = fmt.Sprintf("line%sid%sdata", structure, structure) } else { @@ -56,22 +60,66 @@ func getSequentialFile(n int, inputForm, outputForm, structure string) (string, } sb.WriteString(line) sb.WriteString("\n") - switch outputForm { + + switch inputForm { case "json": + // edge case + if structure == "document" && i == n { + totalLine := "" + expectedLines := make(map[int]compareFunc, 1) + for j := 0; j < n+1; j++ { + if j == 0 { + line = "{" + } else if j == n { + line = "}" + } else { + if j == n-1 { + line = fmt.Sprintf(`"obj%d":{"line":"%d","id":"i%d","data":"some event %d"}`, j, j, j, j) + } else { + line = fmt.Sprintf(`"obj%d":{"line":"%d","id":"i%d","data":"some event %d"},`, j, j, j, j) + } + } + totalLine = fmt.Sprintf("%s%s", totalLine, line) + } + expectedLines[0] = equals(totalLine) + return sb.String(), expectedLines + + } if i == n { break } - line = fmt.Sprintf(`{ "line": "%d", "id": "i%d", data: "some event %d" }`, i, i, i) - expectedLines[i] = equals(line) + switch outputForm { + case "csv": + structure := "," + line = fmt.Sprintf(`%d%si%d%ssome event %d`, i, structure, i, structure, i) + case "json": + if i != n { + line = fmt.Sprintf(`{"line":"%d","id":"i%d","data":"some event %d"}`, i, i, i) + } + + } case "csv": - structure := "," - if i == 0 { - line = fmt.Sprintf("line%sid%sdata", structure, structure) - } else { - line = fmt.Sprintf(`%d%si%d%s"some event %d"`, i-1, structure, i-1, structure, i-1) + if i == n { + break } - expectedLines[i] = equals(line) - default: + switch outputForm { + case "csv": + structure := "," + if i == 0 { + line = fmt.Sprintf("line%sid%sdata", structure, structure) + } else { + line = fmt.Sprintf(`%d%si%d%ssome event %d`, i-1, structure, i-1, structure, i-1) + } + case "json": + if i == 0 { + line = `{"_1":"line","_2":"id","_3":"data"}` + } else { + form := `{"_1":"%d","_2":"i%d","_3":"some event %d"}` + line = fmt.Sprintf(form, i-1, i-1, i-1) + } + } + } + if i != n { expectedLines[i] = equals(line) } } @@ -127,8 +175,8 @@ csv: default input structure and output default input structure and output as csv - \t input structure and default output - \t input structure and output as csv + tab input structure and default output + tab input structure and output as csv parquet: @@ -137,7 +185,6 @@ parquet: default input structure and output as csv */ func TestSelectCommand(t *testing.T) { - t.Skip() t.Parallel() /* ctx := context.Background() @@ -156,11 +203,10 @@ func TestSelectCommand(t *testing.T) { region := "us-east-1" accessKeyId := "minioadmin" secretKey := "minioadmin" - address := "http://127.0.0.1:9000" // The query is default for all cases, we want to assert the output // is as expected after a query. - query := "SELECT * FROM s3object s LIMIT 1" + query := "SELECT * FROM s3object s LIMIT 5" testcases := []struct { name string cmd []string @@ -211,7 +257,7 @@ func TestSelectCommand(t *testing.T) { structure: "document", out: "json", }, - { + /* { name: "json-lines select with document input structure and csv output", cmd: []string{ "select", @@ -226,7 +272,7 @@ func TestSelectCommand(t *testing.T) { in: "json", structure: "document", out: "csv", - }, + }, */ // This case is not supported by AWS itself. // csv { name: "csv select with default delimiter and output", @@ -261,7 +307,6 @@ func TestSelectCommand(t *testing.T) { "csv", "--delimiter", "\t", - "document", "--query", query, }, @@ -298,13 +343,11 @@ func TestSelectCommand(t *testing.T) { tc.cmd = append(tc.cmd, src) s3client, s5cmd := setup(t, withEndpointURL(address), withRegion(region), withAccessKeyId(accessKeyId), withSecretKey(secretKey)) - createBucket(t, s3client, bucket) putFile(t, s3client, bucket, filename, contents) - cmd := s5cmd(tc.cmd...) result := icmd.RunCmd(cmd, withEnv("AWS_ACCESS_KEY_ID", accessKeyId), withEnv("AWS_SECRET_ACCESS_KEY", secretKey)) - fmt.Println(result.Stderr()) + assertLines(t, result.Stdout(), expectedLines) }) } diff --git a/go.sum b/go.sum index 95e593fca..525bf27f5 100644 --- a/go.sum +++ b/go.sum @@ -1,26 +1,108 @@ -github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/hcsshim v0.9.7 h1:mKNHW/Xvv1aFH87Jb6ERDzXTJTLPlmzfZ28VBFD/bfg= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/aws/aws-sdk-go v1.17.4/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.40.25 h1:Depnx7O86HWgOCLD5nMto6F9Ju85Q1QuFDnbpZYQWno= -github.com/aws/aws-sdk-go v1.40.25/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aws/aws-sdk-go v1.40.44 h1:kECaYybTWYZY5IKHvQMxbE6Wi5Qrb+7hbkV7zQV3Sg8= +github.com/aws/aws-sdk-go v1.40.44/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4= +github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= +github.com/containerd/containerd v1.6.19 h1:F0qgQPrG0P2JPgwpxWxYavrVeXAG0ezUIB9Z/4FTUAU= +github.com/containerd/containerd v1.6.19/go.mod h1:HZCDMn4v/Xl2579/MvtOC2M206i+JJ6VxFWU/NetrGY= +github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E= +github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v23.0.5+incompatible h1:DaxtlTJjFSnLOXVNUBU1+6kXGz2lpDoEAH6QoxaSg8k= +github.com/docker/docker v23.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 h1:VHgatEHNcBFEB7inlalqfNqw65aNkM1lGX2yt3NmbS8= github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/igungor/gofakes3 v0.0.13 h1:rMi9KRo3jrqMyNnR9rc+d+cWTFR3mIQXnO1y+5gL98A= github.com/igungor/gofakes3 v0.0.13/go.mod h1:id+2y7yvTVoN1HW6VOtnfSqFt4EAfH5piVL4p9JsmQc= +github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= +github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -30,90 +112,213 @@ github.com/karrick/godirwalk v1.15.3 h1:0a2pXOgtB16CqIqXTiT7+K9L73f74n/aNQUnH6Or github.com/karrick/godirwalk v1.15.3/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.11.13 h1:eSvu8Tmq6j2psUJqJrLcWH6K3w5Dwc+qipbaA6eVEN4= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lanrat/extsort v1.0.0 h1:JjvkCUbD55+gs5s64FHmCU93kWjegEAM5n10XN6GB3c= github.com/lanrat/extsort v1.0.0/go.mod h1:bkDEvem4UnD1h87yKICydXs63mKrIGW3W9OGPMg93Ww= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo= +github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= +github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/opencontainers/runc v1.1.5 h1:L44KXEpKmfWDcS02aeGm8QNTFXTo2D+8MYGDIJ/GDEs= +github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8= +github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63 h1:J6qvD6rbmOil46orKqJaRPG+zTpoGlBTUdyv8ki63L0= github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63/go.mod h1:n+VKSARF5y/tS9XFSP7vWDfS+GUC5vs/YT7M5XDTUEM= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/termie/go-shutil v0.0.0-20140729215957-bcacb06fecae h1:vgGSvdW5Lqg+I1aZOlG32uyE6xHpLdKhZzcTEktz5wM= github.com/termie/go-shutil v0.0.0-20140729215957-bcacb06fecae/go.mod h1:quDq6Se6jlGwiIKia/itDZxqC5rj6/8OdFyMMAwTxCs= +github.com/testcontainers/testcontainers-go v0.21.0 h1:syePAxdeTzfkap+RrJaQZpJQ/s/fsUgn11xIvHrOE9U= +github.com/testcontainers/testcontainers-go v0.21.0/go.mod h1:c1ez3WVRHq7T/Aj+X3TIipFBwkBaNT5iNCY8+1b83Ng= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.11.2 h1:FVfNg4m3vbjbBpLYxW//WjxUoHvJ9TlppXcqY9Q9ZfA= github.com/urfave/cli/v2 v2.11.2/go.mod h1:f8iq5LtQ/bLxafbdBSLPPNsgaW0l/2fYYEHhAyPlwvo= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea h1:vLCWI/yYrdEHyN2JzIzPO3aaQJHQdp89IZBA/+azVC4= +golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190310074541-c10a0554eabf/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220405210540-1e041c57c461 h1:kHVeDEnfKn3T238CvrUcz6KeEsFHVaKh4kMTt6Wsysg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190308174544-00c44ba9c14f/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.1 h1:wGiQel/hW0NnEkJUk8lbzkX2gFJU6PFxf1v5OlCfuOs= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad h1:kqrS+lhvaMHCxul6sKQvKJ8nAAhlVItmZV822hYFH/U= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/storage/s3.go b/storage/s3.go index af0e1a209..f382ab19a 100644 --- a/storage/s3.go +++ b/storage/s3.go @@ -612,12 +612,14 @@ func (jd *JsonDecoder) Decode() ([]byte, error) { } type CsvDecoder struct { - decoder *csv.Reader + decoder *csv.Reader + delimiter string } func NewCsvDecoder(reader io.Reader) EventStreamDecoder { csvDecoder := &CsvDecoder{ - decoder: csv.NewReader(reader), + decoder: csv.NewReader(reader), + delimiter: ",", } // returned values from AWS has double quotes in it // so we enable lazy quotes @@ -631,8 +633,13 @@ func (cd *CsvDecoder) Decode() ([]byte, error) { return nil, err } - result := make([]byte, len(res)) - for _, str := range res { + result := []byte{} + for i, str := range res { + // csv results return carriage returns, which puts \x00 + // to byte array, which breaks the output result. + if i != len(res)-1 { + str = fmt.Sprintf("%s%s", str, cd.delimiter) + } result = append(result, []byte(str)...) } return result, nil @@ -674,7 +681,7 @@ func parseInputSerialization(val string, delimiter string) (*s3.InputSerializati Parquet: &s3.ParquetInput{}, } default: - return nil, errors.New("Input serialization is not valid") + return nil, errors.New("input serialization is not valid") } return inputSerialization, nil @@ -689,7 +696,7 @@ func parseOutputSerialization(val string, reader io.Reader) (*s3.OutputSerializa outputSerialization = &s3.OutputSerialization{ JSON: &s3.JSONOutput{}, } - decoder = NewCsvDecoder(reader) + decoder = NewJsonDecoder(reader) case csvType: outputSerialization = &s3.OutputSerialization{ CSV: &s3.CSVOutput{ @@ -698,7 +705,7 @@ func parseOutputSerialization(val string, reader io.Reader) (*s3.OutputSerializa } decoder = NewCsvDecoder(reader) default: - return nil, nil, errors.New("Output serialization is not valid") + return nil, nil, errors.New("output serialization is not valid") } return outputSerialization, decoder, nil } diff --git a/vendor/github.com/Azure/go-ansiterm/LICENSE b/vendor/github.com/Azure/go-ansiterm/LICENSE new file mode 100644 index 000000000..e3d9a64d1 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Azure/go-ansiterm/README.md b/vendor/github.com/Azure/go-ansiterm/README.md new file mode 100644 index 000000000..261c041e7 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/README.md @@ -0,0 +1,12 @@ +# go-ansiterm + +This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent. + +For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position. + +The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go). + +See parser_test.go for examples exercising the state machine and generating appropriate function calls. + +----- +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/vendor/github.com/Azure/go-ansiterm/constants.go b/vendor/github.com/Azure/go-ansiterm/constants.go new file mode 100644 index 000000000..96504a33b --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/constants.go @@ -0,0 +1,188 @@ +package ansiterm + +const LogEnv = "DEBUG_TERMINAL" + +// ANSI constants +// References: +// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm +// -- http://man7.org/linux/man-pages/man4/console_codes.4.html +// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html +// -- http://en.wikipedia.org/wiki/ANSI_escape_code +// -- http://vt100.net/emu/dec_ansi_parser +// -- http://vt100.net/emu/vt500_parser.svg +// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html +// -- http://www.inwap.com/pdp10/ansicode.txt +const ( + // ECMA-48 Set Graphics Rendition + // Note: + // -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved + // -- Fonts could possibly be supported via SetCurrentConsoleFontEx + // -- Windows does not expose the per-window cursor (i.e., caret) blink times + ANSI_SGR_RESET = 0 + ANSI_SGR_BOLD = 1 + ANSI_SGR_DIM = 2 + _ANSI_SGR_ITALIC = 3 + ANSI_SGR_UNDERLINE = 4 + _ANSI_SGR_BLINKSLOW = 5 + _ANSI_SGR_BLINKFAST = 6 + ANSI_SGR_REVERSE = 7 + _ANSI_SGR_INVISIBLE = 8 + _ANSI_SGR_LINETHROUGH = 9 + _ANSI_SGR_FONT_00 = 10 + _ANSI_SGR_FONT_01 = 11 + _ANSI_SGR_FONT_02 = 12 + _ANSI_SGR_FONT_03 = 13 + _ANSI_SGR_FONT_04 = 14 + _ANSI_SGR_FONT_05 = 15 + _ANSI_SGR_FONT_06 = 16 + _ANSI_SGR_FONT_07 = 17 + _ANSI_SGR_FONT_08 = 18 + _ANSI_SGR_FONT_09 = 19 + _ANSI_SGR_FONT_10 = 20 + _ANSI_SGR_DOUBLEUNDERLINE = 21 + ANSI_SGR_BOLD_DIM_OFF = 22 + _ANSI_SGR_ITALIC_OFF = 23 + ANSI_SGR_UNDERLINE_OFF = 24 + _ANSI_SGR_BLINK_OFF = 25 + _ANSI_SGR_RESERVED_00 = 26 + ANSI_SGR_REVERSE_OFF = 27 + _ANSI_SGR_INVISIBLE_OFF = 28 + _ANSI_SGR_LINETHROUGH_OFF = 29 + ANSI_SGR_FOREGROUND_BLACK = 30 + ANSI_SGR_FOREGROUND_RED = 31 + ANSI_SGR_FOREGROUND_GREEN = 32 + ANSI_SGR_FOREGROUND_YELLOW = 33 + ANSI_SGR_FOREGROUND_BLUE = 34 + ANSI_SGR_FOREGROUND_MAGENTA = 35 + ANSI_SGR_FOREGROUND_CYAN = 36 + ANSI_SGR_FOREGROUND_WHITE = 37 + _ANSI_SGR_RESERVED_01 = 38 + ANSI_SGR_FOREGROUND_DEFAULT = 39 + ANSI_SGR_BACKGROUND_BLACK = 40 + ANSI_SGR_BACKGROUND_RED = 41 + ANSI_SGR_BACKGROUND_GREEN = 42 + ANSI_SGR_BACKGROUND_YELLOW = 43 + ANSI_SGR_BACKGROUND_BLUE = 44 + ANSI_SGR_BACKGROUND_MAGENTA = 45 + ANSI_SGR_BACKGROUND_CYAN = 46 + ANSI_SGR_BACKGROUND_WHITE = 47 + _ANSI_SGR_RESERVED_02 = 48 + ANSI_SGR_BACKGROUND_DEFAULT = 49 + // 50 - 65: Unsupported + + ANSI_MAX_CMD_LENGTH = 4096 + + MAX_INPUT_EVENTS = 128 + DEFAULT_WIDTH = 80 + DEFAULT_HEIGHT = 24 + + ANSI_BEL = 0x07 + ANSI_BACKSPACE = 0x08 + ANSI_TAB = 0x09 + ANSI_LINE_FEED = 0x0A + ANSI_VERTICAL_TAB = 0x0B + ANSI_FORM_FEED = 0x0C + ANSI_CARRIAGE_RETURN = 0x0D + ANSI_ESCAPE_PRIMARY = 0x1B + ANSI_ESCAPE_SECONDARY = 0x5B + ANSI_OSC_STRING_ENTRY = 0x5D + ANSI_COMMAND_FIRST = 0x40 + ANSI_COMMAND_LAST = 0x7E + DCS_ENTRY = 0x90 + CSI_ENTRY = 0x9B + OSC_STRING = 0x9D + ANSI_PARAMETER_SEP = ";" + ANSI_CMD_G0 = '(' + ANSI_CMD_G1 = ')' + ANSI_CMD_G2 = '*' + ANSI_CMD_G3 = '+' + ANSI_CMD_DECPNM = '>' + ANSI_CMD_DECPAM = '=' + ANSI_CMD_OSC = ']' + ANSI_CMD_STR_TERM = '\\' + + KEY_CONTROL_PARAM_2 = ";2" + KEY_CONTROL_PARAM_3 = ";3" + KEY_CONTROL_PARAM_4 = ";4" + KEY_CONTROL_PARAM_5 = ";5" + KEY_CONTROL_PARAM_6 = ";6" + KEY_CONTROL_PARAM_7 = ";7" + KEY_CONTROL_PARAM_8 = ";8" + KEY_ESC_CSI = "\x1B[" + KEY_ESC_N = "\x1BN" + KEY_ESC_O = "\x1BO" + + FILL_CHARACTER = ' ' +) + +func getByteRange(start byte, end byte) []byte { + bytes := make([]byte, 0, 32) + for i := start; i <= end; i++ { + bytes = append(bytes, byte(i)) + } + + return bytes +} + +var toGroundBytes = getToGroundBytes() +var executors = getExecuteBytes() + +// SPACE 20+A0 hex Always and everywhere a blank space +// Intermediate 20-2F hex !"#$%&'()*+,-./ +var intermeds = getByteRange(0x20, 0x2F) + +// Parameters 30-3F hex 0123456789:;<=>? +// CSI Parameters 30-39, 3B hex 0123456789; +var csiParams = getByteRange(0x30, 0x3F) + +var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...) + +// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_ +var upperCase = getByteRange(0x40, 0x5F) + +// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~ +var lowerCase = getByteRange(0x60, 0x7E) + +// Alphabetics 40-7E hex (all of upper and lower case) +var alphabetics = append(upperCase, lowerCase...) + +var printables = getByteRange(0x20, 0x7F) + +var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E) +var escapeToGroundBytes = getEscapeToGroundBytes() + +// See http://www.vt100.net/emu/vt500_parser.png for description of the complex +// byte ranges below + +func getEscapeToGroundBytes() []byte { + escapeToGroundBytes := getByteRange(0x30, 0x4F) + escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...) + escapeToGroundBytes = append(escapeToGroundBytes, 0x59) + escapeToGroundBytes = append(escapeToGroundBytes, 0x5A) + escapeToGroundBytes = append(escapeToGroundBytes, 0x5C) + escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...) + return escapeToGroundBytes +} + +func getExecuteBytes() []byte { + executeBytes := getByteRange(0x00, 0x17) + executeBytes = append(executeBytes, 0x19) + executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...) + return executeBytes +} + +func getToGroundBytes() []byte { + groundBytes := []byte{0x18} + groundBytes = append(groundBytes, 0x1A) + groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...) + groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...) + groundBytes = append(groundBytes, 0x99) + groundBytes = append(groundBytes, 0x9A) + groundBytes = append(groundBytes, 0x9C) + return groundBytes +} + +// Delete 7F hex Always and everywhere ignored +// C1 Control 80-9F hex 32 additional control characters +// G1 Displayable A1-FE hex 94 additional displayable characters +// Special A0+FF hex Same as SPACE and DELETE diff --git a/vendor/github.com/Azure/go-ansiterm/context.go b/vendor/github.com/Azure/go-ansiterm/context.go new file mode 100644 index 000000000..8d66e777c --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/context.go @@ -0,0 +1,7 @@ +package ansiterm + +type ansiContext struct { + currentChar byte + paramBuffer []byte + interBuffer []byte +} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go new file mode 100644 index 000000000..bcbe00d0c --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go @@ -0,0 +1,49 @@ +package ansiterm + +type csiEntryState struct { + baseState +} + +func (csiState csiEntryState) Handle(b byte) (s state, e error) { + csiState.parser.logf("CsiEntry::Handle %#x", b) + + nextState, err := csiState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(alphabetics, b): + return csiState.parser.ground, nil + case sliceContains(csiCollectables, b): + return csiState.parser.csiParam, nil + case sliceContains(executors, b): + return csiState, csiState.parser.execute() + } + + return csiState, nil +} + +func (csiState csiEntryState) Transition(s state) error { + csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) + csiState.baseState.Transition(s) + + switch s { + case csiState.parser.ground: + return csiState.parser.csiDispatch() + case csiState.parser.csiParam: + switch { + case sliceContains(csiParams, csiState.parser.context.currentChar): + csiState.parser.collectParam() + case sliceContains(intermeds, csiState.parser.context.currentChar): + csiState.parser.collectInter() + } + } + + return nil +} + +func (csiState csiEntryState) Enter() error { + csiState.parser.clear() + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go new file mode 100644 index 000000000..7ed5e01c3 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go @@ -0,0 +1,38 @@ +package ansiterm + +type csiParamState struct { + baseState +} + +func (csiState csiParamState) Handle(b byte) (s state, e error) { + csiState.parser.logf("CsiParam::Handle %#x", b) + + nextState, err := csiState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(alphabetics, b): + return csiState.parser.ground, nil + case sliceContains(csiCollectables, b): + csiState.parser.collectParam() + return csiState, nil + case sliceContains(executors, b): + return csiState, csiState.parser.execute() + } + + return csiState, nil +} + +func (csiState csiParamState) Transition(s state) error { + csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) + csiState.baseState.Transition(s) + + switch s { + case csiState.parser.ground: + return csiState.parser.csiDispatch() + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go new file mode 100644 index 000000000..1c719db9e --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go @@ -0,0 +1,36 @@ +package ansiterm + +type escapeIntermediateState struct { + baseState +} + +func (escState escapeIntermediateState) Handle(b byte) (s state, e error) { + escState.parser.logf("escapeIntermediateState::Handle %#x", b) + nextState, err := escState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(intermeds, b): + return escState, escState.parser.collectInter() + case sliceContains(executors, b): + return escState, escState.parser.execute() + case sliceContains(escapeIntermediateToGroundBytes, b): + return escState.parser.ground, nil + } + + return escState, nil +} + +func (escState escapeIntermediateState) Transition(s state) error { + escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) + escState.baseState.Transition(s) + + switch s { + case escState.parser.ground: + return escState.parser.escDispatch() + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_state.go b/vendor/github.com/Azure/go-ansiterm/escape_state.go new file mode 100644 index 000000000..6390abd23 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/escape_state.go @@ -0,0 +1,47 @@ +package ansiterm + +type escapeState struct { + baseState +} + +func (escState escapeState) Handle(b byte) (s state, e error) { + escState.parser.logf("escapeState::Handle %#x", b) + nextState, err := escState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case b == ANSI_ESCAPE_SECONDARY: + return escState.parser.csiEntry, nil + case b == ANSI_OSC_STRING_ENTRY: + return escState.parser.oscString, nil + case sliceContains(executors, b): + return escState, escState.parser.execute() + case sliceContains(escapeToGroundBytes, b): + return escState.parser.ground, nil + case sliceContains(intermeds, b): + return escState.parser.escapeIntermediate, nil + } + + return escState, nil +} + +func (escState escapeState) Transition(s state) error { + escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name()) + escState.baseState.Transition(s) + + switch s { + case escState.parser.ground: + return escState.parser.escDispatch() + case escState.parser.escapeIntermediate: + return escState.parser.collectInter() + } + + return nil +} + +func (escState escapeState) Enter() error { + escState.parser.clear() + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/event_handler.go b/vendor/github.com/Azure/go-ansiterm/event_handler.go new file mode 100644 index 000000000..98087b38c --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/event_handler.go @@ -0,0 +1,90 @@ +package ansiterm + +type AnsiEventHandler interface { + // Print + Print(b byte) error + + // Execute C0 commands + Execute(b byte) error + + // CUrsor Up + CUU(int) error + + // CUrsor Down + CUD(int) error + + // CUrsor Forward + CUF(int) error + + // CUrsor Backward + CUB(int) error + + // Cursor to Next Line + CNL(int) error + + // Cursor to Previous Line + CPL(int) error + + // Cursor Horizontal position Absolute + CHA(int) error + + // Vertical line Position Absolute + VPA(int) error + + // CUrsor Position + CUP(int, int) error + + // Horizontal and Vertical Position (depends on PUM) + HVP(int, int) error + + // Text Cursor Enable Mode + DECTCEM(bool) error + + // Origin Mode + DECOM(bool) error + + // 132 Column Mode + DECCOLM(bool) error + + // Erase in Display + ED(int) error + + // Erase in Line + EL(int) error + + // Insert Line + IL(int) error + + // Delete Line + DL(int) error + + // Insert Character + ICH(int) error + + // Delete Character + DCH(int) error + + // Set Graphics Rendition + SGR([]int) error + + // Pan Down + SU(int) error + + // Pan Up + SD(int) error + + // Device Attributes + DA([]string) error + + // Set Top and Bottom Margins + DECSTBM(int, int) error + + // Index + IND() error + + // Reverse Index + RI() error + + // Flush updates from previous commands + Flush() error +} diff --git a/vendor/github.com/Azure/go-ansiterm/ground_state.go b/vendor/github.com/Azure/go-ansiterm/ground_state.go new file mode 100644 index 000000000..52451e946 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/ground_state.go @@ -0,0 +1,24 @@ +package ansiterm + +type groundState struct { + baseState +} + +func (gs groundState) Handle(b byte) (s state, e error) { + gs.parser.context.currentChar = b + + nextState, err := gs.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(printables, b): + return gs, gs.parser.print() + + case sliceContains(executors, b): + return gs, gs.parser.execute() + } + + return gs, nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go new file mode 100644 index 000000000..593b10ab6 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go @@ -0,0 +1,31 @@ +package ansiterm + +type oscStringState struct { + baseState +} + +func (oscState oscStringState) Handle(b byte) (s state, e error) { + oscState.parser.logf("OscString::Handle %#x", b) + nextState, err := oscState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case isOscStringTerminator(b): + return oscState.parser.ground, nil + } + + return oscState, nil +} + +// See below for OSC string terminators for linux +// http://man7.org/linux/man-pages/man4/console_codes.4.html +func isOscStringTerminator(b byte) bool { + + if b == ANSI_BEL || b == 0x5C { + return true + } + + return false +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go new file mode 100644 index 000000000..03cec7ada --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser.go @@ -0,0 +1,151 @@ +package ansiterm + +import ( + "errors" + "log" + "os" +) + +type AnsiParser struct { + currState state + eventHandler AnsiEventHandler + context *ansiContext + csiEntry state + csiParam state + dcsEntry state + escape state + escapeIntermediate state + error state + ground state + oscString state + stateMap []state + + logf func(string, ...interface{}) +} + +type Option func(*AnsiParser) + +func WithLogf(f func(string, ...interface{})) Option { + return func(ap *AnsiParser) { + ap.logf = f + } +} + +func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser { + ap := &AnsiParser{ + eventHandler: evtHandler, + context: &ansiContext{}, + } + for _, o := range opts { + o(ap) + } + + if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { + logFile, _ := os.Create("ansiParser.log") + logger := log.New(logFile, "", log.LstdFlags) + if ap.logf != nil { + l := ap.logf + ap.logf = func(s string, v ...interface{}) { + l(s, v...) + logger.Printf(s, v...) + } + } else { + ap.logf = logger.Printf + } + } + + if ap.logf == nil { + ap.logf = func(string, ...interface{}) {} + } + + ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}} + ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}} + ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}} + ap.escape = escapeState{baseState{name: "Escape", parser: ap}} + ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}} + ap.error = errorState{baseState{name: "Error", parser: ap}} + ap.ground = groundState{baseState{name: "Ground", parser: ap}} + ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}} + + ap.stateMap = []state{ + ap.csiEntry, + ap.csiParam, + ap.dcsEntry, + ap.escape, + ap.escapeIntermediate, + ap.error, + ap.ground, + ap.oscString, + } + + ap.currState = getState(initialState, ap.stateMap) + + ap.logf("CreateParser: parser %p", ap) + return ap +} + +func getState(name string, states []state) state { + for _, el := range states { + if el.Name() == name { + return el + } + } + + return nil +} + +func (ap *AnsiParser) Parse(bytes []byte) (int, error) { + for i, b := range bytes { + if err := ap.handle(b); err != nil { + return i, err + } + } + + return len(bytes), ap.eventHandler.Flush() +} + +func (ap *AnsiParser) handle(b byte) error { + ap.context.currentChar = b + newState, err := ap.currState.Handle(b) + if err != nil { + return err + } + + if newState == nil { + ap.logf("WARNING: newState is nil") + return errors.New("New state of 'nil' is invalid.") + } + + if newState != ap.currState { + if err := ap.changeState(newState); err != nil { + return err + } + } + + return nil +} + +func (ap *AnsiParser) changeState(newState state) error { + ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) + + // Exit old state + if err := ap.currState.Exit(); err != nil { + ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err) + return err + } + + // Perform transition action + if err := ap.currState.Transition(newState); err != nil { + ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err) + return err + } + + // Enter new state + if err := newState.Enter(); err != nil { + ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err) + return err + } + + ap.currState = newState + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go new file mode 100644 index 000000000..de0a1f9cd --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go @@ -0,0 +1,99 @@ +package ansiterm + +import ( + "strconv" +) + +func parseParams(bytes []byte) ([]string, error) { + paramBuff := make([]byte, 0, 0) + params := []string{} + + for _, v := range bytes { + if v == ';' { + if len(paramBuff) > 0 { + // Completed parameter, append it to the list + s := string(paramBuff) + params = append(params, s) + paramBuff = make([]byte, 0, 0) + } + } else { + paramBuff = append(paramBuff, v) + } + } + + // Last parameter may not be terminated with ';' + if len(paramBuff) > 0 { + s := string(paramBuff) + params = append(params, s) + } + + return params, nil +} + +func parseCmd(context ansiContext) (string, error) { + return string(context.currentChar), nil +} + +func getInt(params []string, dflt int) int { + i := getInts(params, 1, dflt)[0] + return i +} + +func getInts(params []string, minCount int, dflt int) []int { + ints := []int{} + + for _, v := range params { + i, _ := strconv.Atoi(v) + // Zero is mapped to the default value in VT100. + if i == 0 { + i = dflt + } + ints = append(ints, i) + } + + if len(ints) < minCount { + remaining := minCount - len(ints) + for i := 0; i < remaining; i++ { + ints = append(ints, dflt) + } + } + + return ints +} + +func (ap *AnsiParser) modeDispatch(param string, set bool) error { + switch param { + case "?3": + return ap.eventHandler.DECCOLM(set) + case "?6": + return ap.eventHandler.DECOM(set) + case "?25": + return ap.eventHandler.DECTCEM(set) + } + return nil +} + +func (ap *AnsiParser) hDispatch(params []string) error { + if len(params) == 1 { + return ap.modeDispatch(params[0], true) + } + + return nil +} + +func (ap *AnsiParser) lDispatch(params []string) error { + if len(params) == 1 { + return ap.modeDispatch(params[0], false) + } + + return nil +} + +func getEraseParam(params []string) int { + param := getInt(params, 0) + if param < 0 || 3 < param { + param = 0 + } + + return param +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/github.com/Azure/go-ansiterm/parser_actions.go new file mode 100644 index 000000000..0bb5e51e9 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser_actions.go @@ -0,0 +1,119 @@ +package ansiterm + +func (ap *AnsiParser) collectParam() error { + currChar := ap.context.currentChar + ap.logf("collectParam %#x", currChar) + ap.context.paramBuffer = append(ap.context.paramBuffer, currChar) + return nil +} + +func (ap *AnsiParser) collectInter() error { + currChar := ap.context.currentChar + ap.logf("collectInter %#x", currChar) + ap.context.paramBuffer = append(ap.context.interBuffer, currChar) + return nil +} + +func (ap *AnsiParser) escDispatch() error { + cmd, _ := parseCmd(*ap.context) + intermeds := ap.context.interBuffer + ap.logf("escDispatch currentChar: %#x", ap.context.currentChar) + ap.logf("escDispatch: %v(%v)", cmd, intermeds) + + switch cmd { + case "D": // IND + return ap.eventHandler.IND() + case "E": // NEL, equivalent to CRLF + err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN) + if err == nil { + err = ap.eventHandler.Execute(ANSI_LINE_FEED) + } + return err + case "M": // RI + return ap.eventHandler.RI() + } + + return nil +} + +func (ap *AnsiParser) csiDispatch() error { + cmd, _ := parseCmd(*ap.context) + params, _ := parseParams(ap.context.paramBuffer) + ap.logf("Parsed params: %v with length: %d", params, len(params)) + + ap.logf("csiDispatch: %v(%v)", cmd, params) + + switch cmd { + case "@": + return ap.eventHandler.ICH(getInt(params, 1)) + case "A": + return ap.eventHandler.CUU(getInt(params, 1)) + case "B": + return ap.eventHandler.CUD(getInt(params, 1)) + case "C": + return ap.eventHandler.CUF(getInt(params, 1)) + case "D": + return ap.eventHandler.CUB(getInt(params, 1)) + case "E": + return ap.eventHandler.CNL(getInt(params, 1)) + case "F": + return ap.eventHandler.CPL(getInt(params, 1)) + case "G": + return ap.eventHandler.CHA(getInt(params, 1)) + case "H": + ints := getInts(params, 2, 1) + x, y := ints[0], ints[1] + return ap.eventHandler.CUP(x, y) + case "J": + param := getEraseParam(params) + return ap.eventHandler.ED(param) + case "K": + param := getEraseParam(params) + return ap.eventHandler.EL(param) + case "L": + return ap.eventHandler.IL(getInt(params, 1)) + case "M": + return ap.eventHandler.DL(getInt(params, 1)) + case "P": + return ap.eventHandler.DCH(getInt(params, 1)) + case "S": + return ap.eventHandler.SU(getInt(params, 1)) + case "T": + return ap.eventHandler.SD(getInt(params, 1)) + case "c": + return ap.eventHandler.DA(params) + case "d": + return ap.eventHandler.VPA(getInt(params, 1)) + case "f": + ints := getInts(params, 2, 1) + x, y := ints[0], ints[1] + return ap.eventHandler.HVP(x, y) + case "h": + return ap.hDispatch(params) + case "l": + return ap.lDispatch(params) + case "m": + return ap.eventHandler.SGR(getInts(params, 1, 0)) + case "r": + ints := getInts(params, 2, 1) + top, bottom := ints[0], ints[1] + return ap.eventHandler.DECSTBM(top, bottom) + default: + ap.logf("ERROR: Unsupported CSI command: '%s', with full context: %v", cmd, ap.context) + return nil + } + +} + +func (ap *AnsiParser) print() error { + return ap.eventHandler.Print(ap.context.currentChar) +} + +func (ap *AnsiParser) clear() error { + ap.context = &ansiContext{} + return nil +} + +func (ap *AnsiParser) execute() error { + return ap.eventHandler.Execute(ap.context.currentChar) +} diff --git a/vendor/github.com/Azure/go-ansiterm/states.go b/vendor/github.com/Azure/go-ansiterm/states.go new file mode 100644 index 000000000..f2ea1fcd1 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/states.go @@ -0,0 +1,71 @@ +package ansiterm + +type stateID int + +type state interface { + Enter() error + Exit() error + Handle(byte) (state, error) + Name() string + Transition(state) error +} + +type baseState struct { + name string + parser *AnsiParser +} + +func (base baseState) Enter() error { + return nil +} + +func (base baseState) Exit() error { + return nil +} + +func (base baseState) Handle(b byte) (s state, e error) { + + switch { + case b == CSI_ENTRY: + return base.parser.csiEntry, nil + case b == DCS_ENTRY: + return base.parser.dcsEntry, nil + case b == ANSI_ESCAPE_PRIMARY: + return base.parser.escape, nil + case b == OSC_STRING: + return base.parser.oscString, nil + case sliceContains(toGroundBytes, b): + return base.parser.ground, nil + } + + return nil, nil +} + +func (base baseState) Name() string { + return base.name +} + +func (base baseState) Transition(s state) error { + if s == base.parser.ground { + execBytes := []byte{0x18} + execBytes = append(execBytes, 0x1A) + execBytes = append(execBytes, getByteRange(0x80, 0x8F)...) + execBytes = append(execBytes, getByteRange(0x91, 0x97)...) + execBytes = append(execBytes, 0x99) + execBytes = append(execBytes, 0x9A) + + if sliceContains(execBytes, base.parser.context.currentChar) { + return base.parser.execute() + } + } + + return nil +} + +type dcsEntryState struct { + baseState +} + +type errorState struct { + baseState +} diff --git a/vendor/github.com/Azure/go-ansiterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/utilities.go new file mode 100644 index 000000000..392114493 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/utilities.go @@ -0,0 +1,21 @@ +package ansiterm + +import ( + "strconv" +) + +func sliceContains(bytes []byte, b byte) bool { + for _, v := range bytes { + if v == b { + return true + } + } + + return false +} + +func convertBytesToInteger(bytes []byte) int { + s := string(bytes) + i, _ := strconv.Atoi(s) + return i +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go new file mode 100644 index 000000000..5599082ae --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go @@ -0,0 +1,196 @@ +// +build windows + +package winterm + +import ( + "fmt" + "os" + "strconv" + "strings" + "syscall" + + "github.com/Azure/go-ansiterm" + windows "golang.org/x/sys/windows" +) + +// Windows keyboard constants +// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx. +const ( + VK_PRIOR = 0x21 // PAGE UP key + VK_NEXT = 0x22 // PAGE DOWN key + VK_END = 0x23 // END key + VK_HOME = 0x24 // HOME key + VK_LEFT = 0x25 // LEFT ARROW key + VK_UP = 0x26 // UP ARROW key + VK_RIGHT = 0x27 // RIGHT ARROW key + VK_DOWN = 0x28 // DOWN ARROW key + VK_SELECT = 0x29 // SELECT key + VK_PRINT = 0x2A // PRINT key + VK_EXECUTE = 0x2B // EXECUTE key + VK_SNAPSHOT = 0x2C // PRINT SCREEN key + VK_INSERT = 0x2D // INS key + VK_DELETE = 0x2E // DEL key + VK_HELP = 0x2F // HELP key + VK_F1 = 0x70 // F1 key + VK_F2 = 0x71 // F2 key + VK_F3 = 0x72 // F3 key + VK_F4 = 0x73 // F4 key + VK_F5 = 0x74 // F5 key + VK_F6 = 0x75 // F6 key + VK_F7 = 0x76 // F7 key + VK_F8 = 0x77 // F8 key + VK_F9 = 0x78 // F9 key + VK_F10 = 0x79 // F10 key + VK_F11 = 0x7A // F11 key + VK_F12 = 0x7B // F12 key + + RIGHT_ALT_PRESSED = 0x0001 + LEFT_ALT_PRESSED = 0x0002 + RIGHT_CTRL_PRESSED = 0x0004 + LEFT_CTRL_PRESSED = 0x0008 + SHIFT_PRESSED = 0x0010 + NUMLOCK_ON = 0x0020 + SCROLLLOCK_ON = 0x0040 + CAPSLOCK_ON = 0x0080 + ENHANCED_KEY = 0x0100 +) + +type ansiCommand struct { + CommandBytes []byte + Command string + Parameters []string + IsSpecial bool +} + +func newAnsiCommand(command []byte) *ansiCommand { + + if isCharacterSelectionCmdChar(command[1]) { + // Is Character Set Selection commands + return &ansiCommand{ + CommandBytes: command, + Command: string(command), + IsSpecial: true, + } + } + + // last char is command character + lastCharIndex := len(command) - 1 + + ac := &ansiCommand{ + CommandBytes: command, + Command: string(command[lastCharIndex]), + IsSpecial: false, + } + + // more than a single escape + if lastCharIndex != 0 { + start := 1 + // skip if double char escape sequence + if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY { + start++ + } + // convert this to GetNextParam method + ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP) + } + + return ac +} + +func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 { + if index < 0 || index >= len(ac.Parameters) { + return defaultValue + } + + param, err := strconv.ParseInt(ac.Parameters[index], 10, 16) + if err != nil { + return defaultValue + } + + return int16(param) +} + +func (ac *ansiCommand) String() string { + return fmt.Sprintf("0x%v \"%v\" (\"%v\")", + bytesToHex(ac.CommandBytes), + ac.Command, + strings.Join(ac.Parameters, "\",\"")) +} + +// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands. +// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html. +func isAnsiCommandChar(b byte) bool { + switch { + case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY: + return true + case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM: + // non-CSI escape sequence terminator + return true + case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL: + // String escape sequence terminator + return true + } + return false +} + +func isXtermOscSequence(command []byte, current byte) bool { + return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL) +} + +func isCharacterSelectionCmdChar(b byte) bool { + return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3) +} + +// bytesToHex converts a slice of bytes to a human-readable string. +func bytesToHex(b []byte) string { + hex := make([]string, len(b)) + for i, ch := range b { + hex[i] = fmt.Sprintf("%X", ch) + } + return strings.Join(hex, "") +} + +// ensureInRange adjusts the passed value, if necessary, to ensure it is within +// the passed min / max range. +func ensureInRange(n int16, min int16, max int16) int16 { + if n < min { + return min + } else if n > max { + return max + } else { + return n + } +} + +func GetStdFile(nFile int) (*os.File, uintptr) { + var file *os.File + + // syscall uses negative numbers + // windows package uses very big uint32 + // Keep these switches split so we don't have to convert ints too much. + switch uint32(nFile) { + case windows.STD_INPUT_HANDLE: + file = os.Stdin + case windows.STD_OUTPUT_HANDLE: + file = os.Stdout + case windows.STD_ERROR_HANDLE: + file = os.Stderr + default: + switch nFile { + case syscall.STD_INPUT_HANDLE: + file = os.Stdin + case syscall.STD_OUTPUT_HANDLE: + file = os.Stdout + case syscall.STD_ERROR_HANDLE: + file = os.Stderr + default: + panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile)) + } + } + + fd, err := syscall.GetStdHandle(nFile) + if err != nil { + panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err)) + } + + return file, uintptr(fd) +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/Azure/go-ansiterm/winterm/api.go new file mode 100644 index 000000000..6055e33b9 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/api.go @@ -0,0 +1,327 @@ +// +build windows + +package winterm + +import ( + "fmt" + "syscall" + "unsafe" +) + +//=========================================================================================================== +// IMPORTANT NOTE: +// +// The methods below make extensive use of the "unsafe" package to obtain the required pointers. +// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack +// variables) the pointers reference *before* the API completes. +// +// As a result, in those cases, the code must hint that the variables remain in active by invoking the +// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer +// require unsafe pointers. +// +// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform +// the garbage collector the variables remain in use if: +// +// -- The value is not a pointer (e.g., int32, struct) +// -- The value is not referenced by the method after passing the pointer to Windows +// +// See http://golang.org/doc/go1.3. +//=========================================================================================================== + +var ( + kernel32DLL = syscall.NewLazyDLL("kernel32.dll") + + getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo") + setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo") + setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition") + setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") + getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") + setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize") + scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA") + setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") + setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo") + writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW") + readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW") + waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject") +) + +// Windows Console constants +const ( + // Console modes + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. + ENABLE_PROCESSED_INPUT = 0x0001 + ENABLE_LINE_INPUT = 0x0002 + ENABLE_ECHO_INPUT = 0x0004 + ENABLE_WINDOW_INPUT = 0x0008 + ENABLE_MOUSE_INPUT = 0x0010 + ENABLE_INSERT_MODE = 0x0020 + ENABLE_QUICK_EDIT_MODE = 0x0040 + ENABLE_EXTENDED_FLAGS = 0x0080 + ENABLE_AUTO_POSITION = 0x0100 + ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200 + + ENABLE_PROCESSED_OUTPUT = 0x0001 + ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 + ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 + DISABLE_NEWLINE_AUTO_RETURN = 0x0008 + ENABLE_LVB_GRID_WORLDWIDE = 0x0010 + + // Character attributes + // Note: + // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan). + // Clearing all foreground or background colors results in black; setting all creates white. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes. + FOREGROUND_BLUE uint16 = 0x0001 + FOREGROUND_GREEN uint16 = 0x0002 + FOREGROUND_RED uint16 = 0x0004 + FOREGROUND_INTENSITY uint16 = 0x0008 + FOREGROUND_MASK uint16 = 0x000F + + BACKGROUND_BLUE uint16 = 0x0010 + BACKGROUND_GREEN uint16 = 0x0020 + BACKGROUND_RED uint16 = 0x0040 + BACKGROUND_INTENSITY uint16 = 0x0080 + BACKGROUND_MASK uint16 = 0x00F0 + + COMMON_LVB_MASK uint16 = 0xFF00 + COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000 + COMMON_LVB_UNDERSCORE uint16 = 0x8000 + + // Input event types + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. + KEY_EVENT = 0x0001 + MOUSE_EVENT = 0x0002 + WINDOW_BUFFER_SIZE_EVENT = 0x0004 + MENU_EVENT = 0x0008 + FOCUS_EVENT = 0x0010 + + // WaitForSingleObject return codes + WAIT_ABANDONED = 0x00000080 + WAIT_FAILED = 0xFFFFFFFF + WAIT_SIGNALED = 0x0000000 + WAIT_TIMEOUT = 0x00000102 + + // WaitForSingleObject wait duration + WAIT_INFINITE = 0xFFFFFFFF + WAIT_ONE_SECOND = 1000 + WAIT_HALF_SECOND = 500 + WAIT_QUARTER_SECOND = 250 +) + +// Windows API Console types +// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD) +// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment +type ( + CHAR_INFO struct { + UnicodeChar uint16 + Attributes uint16 + } + + CONSOLE_CURSOR_INFO struct { + Size uint32 + Visible int32 + } + + CONSOLE_SCREEN_BUFFER_INFO struct { + Size COORD + CursorPosition COORD + Attributes uint16 + Window SMALL_RECT + MaximumWindowSize COORD + } + + COORD struct { + X int16 + Y int16 + } + + SMALL_RECT struct { + Left int16 + Top int16 + Right int16 + Bottom int16 + } + + // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. + INPUT_RECORD struct { + EventType uint16 + KeyEvent KEY_EVENT_RECORD + } + + KEY_EVENT_RECORD struct { + KeyDown int32 + RepeatCount uint16 + VirtualKeyCode uint16 + VirtualScanCode uint16 + UnicodeChar uint16 + ControlKeyState uint32 + } + + WINDOW_BUFFER_SIZE struct { + Size COORD + } +) + +// boolToBOOL converts a Go bool into a Windows int32. +func boolToBOOL(f bool) int32 { + if f { + return int32(1) + } else { + return int32(0) + } +} + +// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx. +func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { + r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) + return checkError(r1, r2, err) +} + +// SetConsoleCursorInfo sets the size and visiblity of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx. +func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { + r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) + return checkError(r1, r2, err) +} + +// SetConsoleCursorPosition location of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx. +func SetConsoleCursorPosition(handle uintptr, coord COORD) error { + r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord)) + use(coord) + return checkError(r1, r2, err) +} + +// GetConsoleMode gets the console mode for given file descriptor +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx. +func GetConsoleMode(handle uintptr) (mode uint32, err error) { + err = syscall.GetConsoleMode(syscall.Handle(handle), &mode) + return mode, err +} + +// SetConsoleMode sets the console mode for given file descriptor +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. +func SetConsoleMode(handle uintptr, mode uint32) error { + r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0) + use(mode) + return checkError(r1, r2, err) +} + +// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx. +func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { + info := CONSOLE_SCREEN_BUFFER_INFO{} + err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)) + if err != nil { + return nil, err + } + return &info, nil +} + +func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error { + r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char))) + use(scrollRect) + use(clipRect) + use(destOrigin) + use(char) + return checkError(r1, r2, err) +} + +// SetConsoleScreenBufferSize sets the size of the console screen buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx. +func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error { + r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord)) + use(coord) + return checkError(r1, r2, err) +} + +// SetConsoleTextAttribute sets the attributes of characters written to the +// console screen buffer by the WriteFile or WriteConsole function. +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx. +func SetConsoleTextAttribute(handle uintptr, attribute uint16) error { + r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0) + use(attribute) + return checkError(r1, r2, err) +} + +// SetConsoleWindowInfo sets the size and position of the console screen buffer's window. +// Note that the size and location must be within and no larger than the backing console screen buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx. +func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error { + r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect))) + use(isAbsolute) + use(rect) + return checkError(r1, r2, err) +} + +// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx. +func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error { + r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion))) + use(buffer) + use(bufferSize) + use(bufferCoord) + return checkError(r1, r2, err) +} + +// ReadConsoleInput reads (and removes) data from the console input buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx. +func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error { + r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count))) + use(buffer) + return checkError(r1, r2, err) +} + +// WaitForSingleObject waits for the passed handle to be signaled. +// It returns true if the handle was signaled; false otherwise. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx. +func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) { + r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait))) + switch r1 { + case WAIT_ABANDONED, WAIT_TIMEOUT: + return false, nil + case WAIT_SIGNALED: + return true, nil + } + use(msWait) + return false, err +} + +// String helpers +func (info CONSOLE_SCREEN_BUFFER_INFO) String() string { + return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize) +} + +func (coord COORD) String() string { + return fmt.Sprintf("%v,%v", coord.X, coord.Y) +} + +func (rect SMALL_RECT) String() string { + return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom) +} + +// checkError evaluates the results of a Windows API call and returns the error if it failed. +func checkError(r1, r2 uintptr, err error) error { + // Windows APIs return non-zero to indicate success + if r1 != 0 { + return nil + } + + // Return the error if provided, otherwise default to EINVAL + if err != nil { + return err + } + return syscall.EINVAL +} + +// coordToPointer converts a COORD into a uintptr (by fooling the type system). +func coordToPointer(c COORD) uintptr { + // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass. + return uintptr(*((*uint32)(unsafe.Pointer(&c)))) +} + +// use is a no-op, but the compiler cannot see that it is. +// Calling use(p) ensures that p is kept live until that point. +func use(p interface{}) {} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go new file mode 100644 index 000000000..cbec8f728 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go @@ -0,0 +1,100 @@ +// +build windows + +package winterm + +import "github.com/Azure/go-ansiterm" + +const ( + FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE + BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE +) + +// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the +// request represented by the passed ANSI mode. +func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) { + switch ansiMode { + + // Mode styles + case ansiterm.ANSI_SGR_BOLD: + windowsMode = windowsMode | FOREGROUND_INTENSITY + + case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF: + windowsMode &^= FOREGROUND_INTENSITY + + case ansiterm.ANSI_SGR_UNDERLINE: + windowsMode = windowsMode | COMMON_LVB_UNDERSCORE + + case ansiterm.ANSI_SGR_REVERSE: + inverted = true + + case ansiterm.ANSI_SGR_REVERSE_OFF: + inverted = false + + case ansiterm.ANSI_SGR_UNDERLINE_OFF: + windowsMode &^= COMMON_LVB_UNDERSCORE + + // Foreground colors + case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT: + windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK) + + case ansiterm.ANSI_SGR_FOREGROUND_BLACK: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) + + case ansiterm.ANSI_SGR_FOREGROUND_RED: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED + + case ansiterm.ANSI_SGR_FOREGROUND_GREEN: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN + + case ansiterm.ANSI_SGR_FOREGROUND_YELLOW: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN + + case ansiterm.ANSI_SGR_FOREGROUND_BLUE: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_CYAN: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_WHITE: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE + + // Background colors + case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT: + // Black with no intensity + windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK) + + case ansiterm.ANSI_SGR_BACKGROUND_BLACK: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) + + case ansiterm.ANSI_SGR_BACKGROUND_RED: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED + + case ansiterm.ANSI_SGR_BACKGROUND_GREEN: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN + + case ansiterm.ANSI_SGR_BACKGROUND_YELLOW: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN + + case ansiterm.ANSI_SGR_BACKGROUND_BLUE: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_CYAN: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_WHITE: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE + } + + return windowsMode, inverted +} + +// invertAttributes inverts the foreground and background colors of a Windows attributes value +func invertAttributes(windowsMode uint16) uint16 { + return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4) +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go new file mode 100644 index 000000000..3ee06ea72 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go @@ -0,0 +1,101 @@ +// +build windows + +package winterm + +const ( + horizontal = iota + vertical +) + +func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT { + if h.originMode { + sr := h.effectiveSr(info.Window) + return SMALL_RECT{ + Top: sr.top, + Bottom: sr.bottom, + Left: 0, + Right: info.Size.X - 1, + } + } else { + return SMALL_RECT{ + Top: info.Window.Top, + Bottom: info.Window.Bottom, + Left: 0, + Right: info.Size.X - 1, + } + } +} + +// setCursorPosition sets the cursor to the specified position, bounded to the screen size +func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error { + position.X = ensureInRange(position.X, window.Left, window.Right) + position.Y = ensureInRange(position.Y, window.Top, window.Bottom) + err := SetConsoleCursorPosition(h.fd, position) + if err != nil { + return err + } + h.logf("Cursor position set: (%d, %d)", position.X, position.Y) + return err +} + +func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error { + return h.moveCursor(vertical, param) +} + +func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error { + return h.moveCursor(horizontal, param) +} + +func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + switch moveMode { + case horizontal: + position.X += int16(param) + case vertical: + position.Y += int16(param) + } + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) moveCursorLine(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + position.X = 0 + position.Y += int16(param) + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + position.X = int16(param) - 1 + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go new file mode 100644 index 000000000..244b5fa25 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go @@ -0,0 +1,84 @@ +// +build windows + +package winterm + +import "github.com/Azure/go-ansiterm" + +func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error { + // Ignore an invalid (negative area) request + if toCoord.Y < fromCoord.Y { + return nil + } + + var err error + + var coordStart = COORD{} + var coordEnd = COORD{} + + xCurrent, yCurrent := fromCoord.X, fromCoord.Y + xEnd, yEnd := toCoord.X, toCoord.Y + + // Clear any partial initial line + if xCurrent > 0 { + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yCurrent + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + xCurrent = 0 + yCurrent += 1 + } + + // Clear intervening rectangular section + if yCurrent < yEnd { + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yEnd-1 + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + xCurrent = 0 + yCurrent = yEnd + } + + // Clear remaining partial ending line + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yEnd + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error { + region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X} + width := toCoord.X - fromCoord.X + 1 + height := toCoord.Y - fromCoord.Y + 1 + size := uint32(width) * uint32(height) + + if size <= 0 { + return nil + } + + buffer := make([]CHAR_INFO, size) + + char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes} + for i := 0; i < int(size); i++ { + buffer[i] = char + } + + err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go new file mode 100644 index 000000000..2d27fa1d0 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go @@ -0,0 +1,118 @@ +// +build windows + +package winterm + +// effectiveSr gets the current effective scroll region in buffer coordinates +func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion { + top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom) + bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom) + if top >= bottom { + top = window.Top + bottom = window.Bottom + } + return scrollRegion{top: top, bottom: bottom} +} + +func (h *windowsAnsiEventHandler) scrollUp(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + sr := h.effectiveSr(info.Window) + return h.scroll(param, sr, info) +} + +func (h *windowsAnsiEventHandler) scrollDown(param int) error { + return h.scrollUp(-param) +} + +func (h *windowsAnsiEventHandler) deleteLines(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + start := info.CursorPosition.Y + sr := h.effectiveSr(info.Window) + // Lines cannot be inserted or deleted outside the scrolling region. + if start >= sr.top && start <= sr.bottom { + sr.top = start + return h.scroll(param, sr, info) + } else { + return nil + } +} + +func (h *windowsAnsiEventHandler) insertLines(param int) error { + return h.deleteLines(-param) +} + +// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates. +func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error { + h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) + h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) + + // Copy from and clip to the scroll region (full buffer width) + scrollRect := SMALL_RECT{ + Top: sr.top, + Bottom: sr.bottom, + Left: 0, + Right: info.Size.X - 1, + } + + // Origin to which area should be copied + destOrigin := COORD{ + X: 0, + Y: sr.top - int16(param), + } + + char := CHAR_INFO{ + UnicodeChar: ' ', + Attributes: h.attributes, + } + + if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { + return err + } + return nil +} + +func (h *windowsAnsiEventHandler) deleteCharacters(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + return h.scrollLine(param, info.CursorPosition, info) +} + +func (h *windowsAnsiEventHandler) insertCharacters(param int) error { + return h.deleteCharacters(-param) +} + +// scrollLine scrolls a line horizontally starting at the provided position by a number of columns. +func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error { + // Copy from and clip to the scroll region (full buffer width) + scrollRect := SMALL_RECT{ + Top: position.Y, + Bottom: position.Y, + Left: position.X, + Right: info.Size.X - 1, + } + + // Origin to which area should be copied + destOrigin := COORD{ + X: position.X - int16(columns), + Y: position.Y, + } + + char := CHAR_INFO{ + UnicodeChar: ' ', + Attributes: h.attributes, + } + + if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go new file mode 100644 index 000000000..afa7635d7 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go @@ -0,0 +1,9 @@ +// +build windows + +package winterm + +// AddInRange increments a value by the passed quantity while ensuring the values +// always remain within the supplied min / max range. +func addInRange(n int16, increment int16, min int16, max int16) int16 { + return ensureInRange(n+increment, min, max) +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go new file mode 100644 index 000000000..2d40fb75a --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go @@ -0,0 +1,743 @@ +// +build windows + +package winterm + +import ( + "bytes" + "log" + "os" + "strconv" + + "github.com/Azure/go-ansiterm" +) + +type windowsAnsiEventHandler struct { + fd uintptr + file *os.File + infoReset *CONSOLE_SCREEN_BUFFER_INFO + sr scrollRegion + buffer bytes.Buffer + attributes uint16 + inverted bool + wrapNext bool + drewMarginByte bool + originMode bool + marginByte byte + curInfo *CONSOLE_SCREEN_BUFFER_INFO + curPos COORD + logf func(string, ...interface{}) +} + +type Option func(*windowsAnsiEventHandler) + +func WithLogf(f func(string, ...interface{})) Option { + return func(w *windowsAnsiEventHandler) { + w.logf = f + } +} + +func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler { + infoReset, err := GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil + } + + h := &windowsAnsiEventHandler{ + fd: fd, + file: file, + infoReset: infoReset, + attributes: infoReset.Attributes, + } + for _, o := range opts { + o(h) + } + + if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { + logFile, _ := os.Create("winEventHandler.log") + logger := log.New(logFile, "", log.LstdFlags) + if h.logf != nil { + l := h.logf + h.logf = func(s string, v ...interface{}) { + l(s, v...) + logger.Printf(s, v...) + } + } else { + h.logf = logger.Printf + } + } + + if h.logf == nil { + h.logf = func(string, ...interface{}) {} + } + + return h +} + +type scrollRegion struct { + top int16 + bottom int16 +} + +// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the +// current cursor position and scroll region settings, in which case it returns +// true. If no special handling is necessary, then it does nothing and returns +// false. +// +// In the false case, the caller should ensure that a carriage return +// and line feed are inserted or that the text is otherwise wrapped. +func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { + if h.wrapNext { + if err := h.Flush(); err != nil { + return false, err + } + h.clearWrap() + } + pos, info, err := h.getCurrentInfo() + if err != nil { + return false, err + } + sr := h.effectiveSr(info.Window) + if pos.Y == sr.bottom { + // Scrolling is necessary. Let Windows automatically scroll if the scrolling region + // is the full window. + if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom { + if includeCR { + pos.X = 0 + h.updatePos(pos) + } + return false, nil + } + + // A custom scroll region is active. Scroll the window manually to simulate + // the LF. + if err := h.Flush(); err != nil { + return false, err + } + h.logf("Simulating LF inside scroll region") + if err := h.scrollUp(1); err != nil { + return false, err + } + if includeCR { + pos.X = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return false, err + } + } + return true, nil + + } else if pos.Y < info.Window.Bottom { + // Let Windows handle the LF. + pos.Y++ + if includeCR { + pos.X = 0 + } + h.updatePos(pos) + return false, nil + } else { + // The cursor is at the bottom of the screen but outside the scroll + // region. Skip the LF. + h.logf("Simulating LF outside scroll region") + if includeCR { + if err := h.Flush(); err != nil { + return false, err + } + pos.X = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return false, err + } + } + return true, nil + } +} + +// executeLF executes a LF without a CR. +func (h *windowsAnsiEventHandler) executeLF() error { + handled, err := h.simulateLF(false) + if err != nil { + return err + } + if !handled { + // Windows LF will reset the cursor column position. Write the LF + // and restore the cursor position. + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) + if pos.X != 0 { + if err := h.Flush(); err != nil { + return err + } + h.logf("Resetting cursor position for LF without CR") + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + } + } + return nil +} + +func (h *windowsAnsiEventHandler) Print(b byte) error { + if h.wrapNext { + h.buffer.WriteByte(h.marginByte) + h.clearWrap() + if _, err := h.simulateLF(true); err != nil { + return err + } + } + pos, info, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X == info.Size.X-1 { + h.wrapNext = true + h.marginByte = b + } else { + pos.X++ + h.updatePos(pos) + h.buffer.WriteByte(b) + } + return nil +} + +func (h *windowsAnsiEventHandler) Execute(b byte) error { + switch b { + case ansiterm.ANSI_TAB: + h.logf("Execute(TAB)") + // Move to the next tab stop, but preserve auto-wrap if already set. + if !h.wrapNext { + pos, info, err := h.getCurrentInfo() + if err != nil { + return err + } + pos.X = (pos.X + 8) - pos.X%8 + if pos.X >= info.Size.X { + pos.X = info.Size.X - 1 + } + if err := h.Flush(); err != nil { + return err + } + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + } + return nil + + case ansiterm.ANSI_BEL: + h.buffer.WriteByte(ansiterm.ANSI_BEL) + return nil + + case ansiterm.ANSI_BACKSPACE: + if h.wrapNext { + if err := h.Flush(); err != nil { + return err + } + h.clearWrap() + } + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X > 0 { + pos.X-- + h.updatePos(pos) + h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE) + } + return nil + + case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED: + // Treat as true LF. + return h.executeLF() + + case ansiterm.ANSI_LINE_FEED: + // Simulate a CR and LF for now since there is no way in go-ansiterm + // to tell if the LF should include CR (and more things break when it's + // missing than when it's incorrectly added). + handled, err := h.simulateLF(true) + if handled || err != nil { + return err + } + return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) + + case ansiterm.ANSI_CARRIAGE_RETURN: + if h.wrapNext { + if err := h.Flush(); err != nil { + return err + } + h.clearWrap() + } + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X != 0 { + pos.X = 0 + h.updatePos(pos) + h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN) + } + return nil + + default: + return nil + } +} + +func (h *windowsAnsiEventHandler) CUU(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUU: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorVertical(-param) +} + +func (h *windowsAnsiEventHandler) CUD(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUD: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorVertical(param) +} + +func (h *windowsAnsiEventHandler) CUF(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUF: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorHorizontal(param) +} + +func (h *windowsAnsiEventHandler) CUB(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUB: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorHorizontal(-param) +} + +func (h *windowsAnsiEventHandler) CNL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CNL: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorLine(param) +} + +func (h *windowsAnsiEventHandler) CPL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CPL: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorLine(-param) +} + +func (h *windowsAnsiEventHandler) CHA(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CHA: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorColumn(param) +} + +func (h *windowsAnsiEventHandler) VPA(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("VPA: [[%d]]", param) + h.clearWrap() + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + window := h.getCursorWindow(info) + position := info.CursorPosition + position.Y = window.Top + int16(param) - 1 + return h.setCursorPosition(position, window) +} + +func (h *windowsAnsiEventHandler) CUP(row int, col int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUP: [[%d %d]]", row, col) + h.clearWrap() + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + window := h.getCursorWindow(info) + position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1} + return h.setCursorPosition(position, window) +} + +func (h *windowsAnsiEventHandler) HVP(row int, col int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("HVP: [[%d %d]]", row, col) + h.clearWrap() + return h.CUP(row, col) +} + +func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)}) + h.clearWrap() + return nil +} + +func (h *windowsAnsiEventHandler) DECOM(enable bool) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)}) + h.clearWrap() + h.originMode = enable + return h.CUP(1, 1) +} + +func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)}) + h.clearWrap() + if err := h.ED(2); err != nil { + return err + } + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + targetWidth := int16(80) + if use132 { + targetWidth = 132 + } + if info.Size.X < targetWidth { + if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { + h.logf("set buffer failed: %v", err) + return err + } + } + window := info.Window + window.Left = 0 + window.Right = targetWidth - 1 + if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { + h.logf("set window failed: %v", err) + return err + } + if info.Size.X > targetWidth { + if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { + h.logf("set buffer failed: %v", err) + return err + } + } + return SetConsoleCursorPosition(h.fd, COORD{0, 0}) +} + +func (h *windowsAnsiEventHandler) ED(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("ED: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + + // [J -- Erases from the cursor to the end of the screen, including the cursor position. + // [1J -- Erases from the beginning of the screen to the cursor, including the cursor position. + // [2J -- Erases the complete display. The cursor does not move. + // Notes: + // -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + var start COORD + var end COORD + + switch param { + case 0: + start = info.CursorPosition + end = COORD{info.Size.X - 1, info.Size.Y - 1} + + case 1: + start = COORD{0, 0} + end = info.CursorPosition + + case 2: + start = COORD{0, 0} + end = COORD{info.Size.X - 1, info.Size.Y - 1} + } + + err = h.clearRange(h.attributes, start, end) + if err != nil { + return err + } + + // If the whole buffer was cleared, move the window to the top while preserving + // the window-relative cursor position. + if param == 2 { + pos := info.CursorPosition + window := info.Window + pos.Y -= window.Top + window.Bottom -= window.Top + window.Top = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { + return err + } + } + + return nil +} + +func (h *windowsAnsiEventHandler) EL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("EL: [%v]", strconv.Itoa(param)) + h.clearWrap() + + // [K -- Erases from the cursor to the end of the line, including the cursor position. + // [1K -- Erases from the beginning of the line to the cursor, including the cursor position. + // [2K -- Erases the complete line. + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + var start COORD + var end COORD + + switch param { + case 0: + start = info.CursorPosition + end = COORD{info.Size.X, info.CursorPosition.Y} + + case 1: + start = COORD{0, info.CursorPosition.Y} + end = info.CursorPosition + + case 2: + start = COORD{0, info.CursorPosition.Y} + end = COORD{info.Size.X, info.CursorPosition.Y} + } + + err = h.clearRange(h.attributes, start, end) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) IL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("IL: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.insertLines(param) +} + +func (h *windowsAnsiEventHandler) DL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DL: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.deleteLines(param) +} + +func (h *windowsAnsiEventHandler) ICH(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("ICH: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.insertCharacters(param) +} + +func (h *windowsAnsiEventHandler) DCH(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DCH: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.deleteCharacters(param) +} + +func (h *windowsAnsiEventHandler) SGR(params []int) error { + if err := h.Flush(); err != nil { + return err + } + strings := []string{} + for _, v := range params { + strings = append(strings, strconv.Itoa(v)) + } + + h.logf("SGR: [%v]", strings) + + if len(params) <= 0 { + h.attributes = h.infoReset.Attributes + h.inverted = false + } else { + for _, attr := range params { + + if attr == ansiterm.ANSI_SGR_RESET { + h.attributes = h.infoReset.Attributes + h.inverted = false + continue + } + + h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr)) + } + } + + attributes := h.attributes + if h.inverted { + attributes = invertAttributes(attributes) + } + err := SetConsoleTextAttribute(h.fd, attributes) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) SU(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("SU: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.scrollUp(param) +} + +func (h *windowsAnsiEventHandler) SD(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("SD: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.scrollDown(param) +} + +func (h *windowsAnsiEventHandler) DA(params []string) error { + h.logf("DA: [%v]", params) + // DA cannot be implemented because it must send data on the VT100 input stream, + // which is not available to go-ansiterm. + return nil +} + +func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECSTBM: [%d, %d]", top, bottom) + + // Windows is 0 indexed, Linux is 1 indexed + h.sr.top = int16(top - 1) + h.sr.bottom = int16(bottom - 1) + + // This command also moves the cursor to the origin. + h.clearWrap() + return h.CUP(1, 1) +} + +func (h *windowsAnsiEventHandler) RI() error { + if err := h.Flush(); err != nil { + return err + } + h.logf("RI: []") + h.clearWrap() + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + sr := h.effectiveSr(info.Window) + if info.CursorPosition.Y == sr.top { + return h.scrollDown(1) + } + + return h.moveCursorVertical(-1) +} + +func (h *windowsAnsiEventHandler) IND() error { + h.logf("IND: []") + return h.executeLF() +} + +func (h *windowsAnsiEventHandler) Flush() error { + h.curInfo = nil + if h.buffer.Len() > 0 { + h.logf("Flush: [%s]", h.buffer.Bytes()) + if _, err := h.buffer.WriteTo(h.file); err != nil { + return err + } + } + + if h.wrapNext && !h.drewMarginByte { + h.logf("Flush: drawing margin byte '%c'", h.marginByte) + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}} + size := COORD{1, 1} + position := COORD{0, 0} + region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y} + if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil { + return err + } + h.drewMarginByte = true + } + return nil +} + +// cacheConsoleInfo ensures that the current console screen information has been queried +// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos. +func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) { + if h.curInfo == nil { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return COORD{}, nil, err + } + h.curInfo = info + h.curPos = info.CursorPosition + } + return h.curPos, h.curInfo, nil +} + +func (h *windowsAnsiEventHandler) updatePos(pos COORD) { + if h.curInfo == nil { + panic("failed to call getCurrentInfo before calling updatePos") + } + h.curPos = pos +} + +// clearWrap clears the state where the cursor is in the margin +// waiting for the next character before wrapping the line. This must +// be done before most operations that act on the cursor. +func (h *windowsAnsiEventHandler) clearWrap() { + h.wrapNext = false + h.drewMarginByte = false +} diff --git a/vendor/github.com/Microsoft/go-winio/.gitignore b/vendor/github.com/Microsoft/go-winio/.gitignore new file mode 100644 index 000000000..b883f1fdc --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/.gitignore @@ -0,0 +1 @@ +*.exe diff --git a/vendor/github.com/Microsoft/go-winio/CODEOWNERS b/vendor/github.com/Microsoft/go-winio/CODEOWNERS new file mode 100644 index 000000000..ae1b4942b --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/CODEOWNERS @@ -0,0 +1 @@ + * @microsoft/containerplat diff --git a/vendor/github.com/Microsoft/go-winio/LICENSE b/vendor/github.com/Microsoft/go-winio/LICENSE new file mode 100644 index 000000000..b8b569d77 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md new file mode 100644 index 000000000..683be1dcf --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/README.md @@ -0,0 +1,37 @@ +# go-winio [![Build Status](https://github.com/microsoft/go-winio/actions/workflows/ci.yml/badge.svg)](https://github.com/microsoft/go-winio/actions/workflows/ci.yml) + +This repository contains utilities for efficiently performing Win32 IO operations in +Go. Currently, this is focused on accessing named pipes and other file handles, and +for using named pipes as a net transport. + +This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go +to reuse the thread to schedule another goroutine. This limits support to Windows Vista and +newer operating systems. This is similar to the implementation of network sockets in Go's net +package. + +Please see the LICENSE file for licensing information. + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) +declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR +appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. + +We also require that contributors sign their commits using git commit -s or git commit --signoff to certify they either authored the work themselves +or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for more info, as well as to make sure that you can +attest to the rules listed. Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off. + + +## Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + + + +## Special Thanks +Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe +for another named pipe implementation. diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go new file mode 100644 index 000000000..2be34af43 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backup.go @@ -0,0 +1,280 @@ +// +build windows + +package winio + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "syscall" + "unicode/utf16" +) + +//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead +//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite + +const ( + BackupData = uint32(iota + 1) + BackupEaData + BackupSecurity + BackupAlternateData + BackupLink + BackupPropertyData + BackupObjectId + BackupReparseData + BackupSparseBlock + BackupTxfsData +) + +const ( + StreamSparseAttributes = uint32(8) +) + +const ( + WRITE_DAC = 0x40000 + WRITE_OWNER = 0x80000 + ACCESS_SYSTEM_SECURITY = 0x1000000 +) + +// BackupHeader represents a backup stream of a file. +type BackupHeader struct { + Id uint32 // The backup stream ID + Attributes uint32 // Stream attributes + Size int64 // The size of the stream in bytes + Name string // The name of the stream (for BackupAlternateData only). + Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). +} + +type win32StreamId struct { + StreamId uint32 + Attributes uint32 + Size uint64 + NameSize uint32 +} + +// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series +// of BackupHeader values. +type BackupStreamReader struct { + r io.Reader + bytesLeft int64 +} + +// NewBackupStreamReader produces a BackupStreamReader from any io.Reader. +func NewBackupStreamReader(r io.Reader) *BackupStreamReader { + return &BackupStreamReader{r, 0} +} + +// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if +// it was not completely read. +func (r *BackupStreamReader) Next() (*BackupHeader, error) { + if r.bytesLeft > 0 { + if s, ok := r.r.(io.Seeker); ok { + // Make sure Seek on io.SeekCurrent sometimes succeeds + // before trying the actual seek. + if _, err := s.Seek(0, io.SeekCurrent); err == nil { + if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil { + return nil, err + } + r.bytesLeft = 0 + } + } + if _, err := io.Copy(ioutil.Discard, r); err != nil { + return nil, err + } + } + var wsi win32StreamId + if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { + return nil, err + } + hdr := &BackupHeader{ + Id: wsi.StreamId, + Attributes: wsi.Attributes, + Size: int64(wsi.Size), + } + if wsi.NameSize != 0 { + name := make([]uint16, int(wsi.NameSize/2)) + if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { + return nil, err + } + hdr.Name = syscall.UTF16ToString(name) + } + if wsi.StreamId == BackupSparseBlock { + if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { + return nil, err + } + hdr.Size -= 8 + } + r.bytesLeft = hdr.Size + return hdr, nil +} + +// Read reads from the current backup stream. +func (r *BackupStreamReader) Read(b []byte) (int, error) { + if r.bytesLeft == 0 { + return 0, io.EOF + } + if int64(len(b)) > r.bytesLeft { + b = b[:r.bytesLeft] + } + n, err := r.r.Read(b) + r.bytesLeft -= int64(n) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } else if r.bytesLeft == 0 && err == nil { + err = io.EOF + } + return n, err +} + +// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API. +type BackupStreamWriter struct { + w io.Writer + bytesLeft int64 +} + +// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer. +func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { + return &BackupStreamWriter{w, 0} +} + +// WriteHeader writes the next backup stream header and prepares for calls to Write(). +func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { + if w.bytesLeft != 0 { + return fmt.Errorf("missing %d bytes", w.bytesLeft) + } + name := utf16.Encode([]rune(hdr.Name)) + wsi := win32StreamId{ + StreamId: hdr.Id, + Attributes: hdr.Attributes, + Size: uint64(hdr.Size), + NameSize: uint32(len(name) * 2), + } + if hdr.Id == BackupSparseBlock { + // Include space for the int64 block offset + wsi.Size += 8 + } + if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { + return err + } + if len(name) != 0 { + if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { + return err + } + } + if hdr.Id == BackupSparseBlock { + if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { + return err + } + } + w.bytesLeft = hdr.Size + return nil +} + +// Write writes to the current backup stream. +func (w *BackupStreamWriter) Write(b []byte) (int, error) { + if w.bytesLeft < int64(len(b)) { + return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) + } + n, err := w.w.Write(b) + w.bytesLeft -= int64(n) + return n, err +} + +// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API. +type BackupFileReader struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true, +// Read will attempt to read the security descriptor of the file. +func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { + r := &BackupFileReader{f, includeSecurity, 0} + return r +} + +// Read reads a backup stream from the file by calling the Win32 API BackupRead(). +func (r *BackupFileReader) Read(b []byte) (int, error) { + var bytesRead uint32 + err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) + if err != nil { + return 0, &os.PathError{"BackupRead", r.f.Name(), err} + } + runtime.KeepAlive(r.f) + if bytesRead == 0 { + return 0, io.EOF + } + return int(bytesRead), nil +} + +// Close frees Win32 resources associated with the BackupFileReader. It does not close +// the underlying file. +func (r *BackupFileReader) Close() error { + if r.ctx != 0 { + backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) + runtime.KeepAlive(r.f) + r.ctx = 0 + } + return nil +} + +// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API. +type BackupFileWriter struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true, +// Write() will attempt to restore the security descriptor from the stream. +func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { + w := &BackupFileWriter{f, includeSecurity, 0} + return w +} + +// Write restores a portion of the file using the provided backup stream. +func (w *BackupFileWriter) Write(b []byte) (int, error) { + var bytesWritten uint32 + err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) + if err != nil { + return 0, &os.PathError{"BackupWrite", w.f.Name(), err} + } + runtime.KeepAlive(w.f) + if int(bytesWritten) != len(b) { + return int(bytesWritten), errors.New("not all bytes could be written") + } + return len(b), nil +} + +// Close frees Win32 resources associated with the BackupFileWriter. It does not +// close the underlying file. +func (w *BackupFileWriter) Close() error { + if w.ctx != 0 { + backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) + runtime.KeepAlive(w.f) + w.ctx = 0 + } + return nil +} + +// OpenForBackup opens a file or directory, potentially skipping access checks if the backup +// or restore privileges have been acquired. +// +// If the file opened was a directory, it cannot be used with Readdir(). +func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { + winPath, err := syscall.UTF16FromString(path) + if err != nil { + return nil, err + } + h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0) + if err != nil { + err = &os.PathError{Op: "open", Path: path, Err: err} + return nil, err + } + return os.NewFile(uintptr(h), path), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/ea.go b/vendor/github.com/Microsoft/go-winio/ea.go new file mode 100644 index 000000000..4051c1b33 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/ea.go @@ -0,0 +1,137 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "errors" +) + +type fileFullEaInformation struct { + NextEntryOffset uint32 + Flags uint8 + NameLength uint8 + ValueLength uint16 +} + +var ( + fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) + + errInvalidEaBuffer = errors.New("invalid extended attribute buffer") + errEaNameTooLarge = errors.New("extended attribute name too large") + errEaValueTooLarge = errors.New("extended attribute value too large") +) + +// ExtendedAttribute represents a single Windows EA. +type ExtendedAttribute struct { + Name string + Value []byte + Flags uint8 +} + +func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { + var info fileFullEaInformation + err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) + if err != nil { + err = errInvalidEaBuffer + return + } + + nameOffset := fileFullEaInformationSize + nameLen := int(info.NameLength) + valueOffset := nameOffset + int(info.NameLength) + 1 + valueLen := int(info.ValueLength) + nextOffset := int(info.NextEntryOffset) + if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { + err = errInvalidEaBuffer + return + } + + ea.Name = string(b[nameOffset : nameOffset+nameLen]) + ea.Value = b[valueOffset : valueOffset+valueLen] + ea.Flags = info.Flags + if info.NextEntryOffset != 0 { + nb = b[info.NextEntryOffset:] + } + return +} + +// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION +// buffer retrieved from BackupRead, ZwQueryEaFile, etc. +func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { + for len(b) != 0 { + ea, nb, err := parseEa(b) + if err != nil { + return nil, err + } + + eas = append(eas, ea) + b = nb + } + return +} + +func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { + if int(uint8(len(ea.Name))) != len(ea.Name) { + return errEaNameTooLarge + } + if int(uint16(len(ea.Value))) != len(ea.Value) { + return errEaValueTooLarge + } + entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) + withPadding := (entrySize + 3) &^ 3 + nextOffset := uint32(0) + if !last { + nextOffset = withPadding + } + info := fileFullEaInformation{ + NextEntryOffset: nextOffset, + Flags: ea.Flags, + NameLength: uint8(len(ea.Name)), + ValueLength: uint16(len(ea.Value)), + } + + err := binary.Write(buf, binary.LittleEndian, &info) + if err != nil { + return err + } + + _, err = buf.Write([]byte(ea.Name)) + if err != nil { + return err + } + + err = buf.WriteByte(0) + if err != nil { + return err + } + + _, err = buf.Write(ea.Value) + if err != nil { + return err + } + + _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) + if err != nil { + return err + } + + return nil +} + +// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION +// buffer for use with BackupWrite, ZwSetEaFile, etc. +func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { + var buf bytes.Buffer + for i := range eas { + last := false + if i == len(eas)-1 { + last = true + } + + err := writeEa(&buf, &eas[i], last) + if err != nil { + return nil, err + } + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go new file mode 100644 index 000000000..293ab54c8 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -0,0 +1,329 @@ +//go:build windows +// +build windows + +package winio + +import ( + "errors" + "io" + "runtime" + "sync" + "sync/atomic" + "syscall" + "time" +) + +//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx +//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort +//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus +//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes +//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult + +type atomicBool int32 + +func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } +func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } +func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } +func (b *atomicBool) swap(new bool) bool { + var newInt int32 + if new { + newInt = 1 + } + return atomic.SwapInt32((*int32)(b), newInt) == 1 +} + +const ( + cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 + cFILE_SKIP_SET_EVENT_ON_HANDLE = 2 +) + +var ( + ErrFileClosed = errors.New("file has already been closed") + ErrTimeout = &timeoutError{} +) + +type timeoutError struct{} + +func (e *timeoutError) Error() string { return "i/o timeout" } +func (e *timeoutError) Timeout() bool { return true } +func (e *timeoutError) Temporary() bool { return true } + +type timeoutChan chan struct{} + +var ioInitOnce sync.Once +var ioCompletionPort syscall.Handle + +// ioResult contains the result of an asynchronous IO operation +type ioResult struct { + bytes uint32 + err error +} + +// ioOperation represents an outstanding asynchronous Win32 IO +type ioOperation struct { + o syscall.Overlapped + ch chan ioResult +} + +func initIo() { + h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) + if err != nil { + panic(err) + } + ioCompletionPort = h + go ioCompletionProcessor(h) +} + +// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. +// It takes ownership of this handle and will close it if it is garbage collected. +type win32File struct { + handle syscall.Handle + wg sync.WaitGroup + wgLock sync.RWMutex + closing atomicBool + socket bool + readDeadline deadlineHandler + writeDeadline deadlineHandler +} + +type deadlineHandler struct { + setLock sync.Mutex + channel timeoutChan + channelLock sync.RWMutex + timer *time.Timer + timedout atomicBool +} + +// makeWin32File makes a new win32File from an existing file handle +func makeWin32File(h syscall.Handle) (*win32File, error) { + f := &win32File{handle: h} + ioInitOnce.Do(initIo) + _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) + if err != nil { + return nil, err + } + err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE) + if err != nil { + return nil, err + } + f.readDeadline.channel = make(timeoutChan) + f.writeDeadline.channel = make(timeoutChan) + return f, nil +} + +func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { + // If we return the result of makeWin32File directly, it can result in an + // interface-wrapped nil, rather than a nil interface value. + f, err := makeWin32File(h) + if err != nil { + return nil, err + } + return f, nil +} + +// closeHandle closes the resources associated with a Win32 handle +func (f *win32File) closeHandle() { + f.wgLock.Lock() + // Atomically set that we are closing, releasing the resources only once. + if !f.closing.swap(true) { + f.wgLock.Unlock() + // cancel all IO and wait for it to complete + cancelIoEx(f.handle, nil) + f.wg.Wait() + // at this point, no new IO can start + syscall.Close(f.handle) + f.handle = 0 + } else { + f.wgLock.Unlock() + } +} + +// Close closes a win32File. +func (f *win32File) Close() error { + f.closeHandle() + return nil +} + +// IsClosed checks if the file has been closed +func (f *win32File) IsClosed() bool { + return f.closing.isSet() +} + +// prepareIo prepares for a new IO operation. +// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. +func (f *win32File) prepareIo() (*ioOperation, error) { + f.wgLock.RLock() + if f.closing.isSet() { + f.wgLock.RUnlock() + return nil, ErrFileClosed + } + f.wg.Add(1) + f.wgLock.RUnlock() + c := &ioOperation{} + c.ch = make(chan ioResult) + return c, nil +} + +// ioCompletionProcessor processes completed async IOs forever +func ioCompletionProcessor(h syscall.Handle) { + for { + var bytes uint32 + var key uintptr + var op *ioOperation + err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) + if op == nil { + panic(err) + } + op.ch <- ioResult{bytes, err} + } +} + +// asyncIo processes the return value from ReadFile or WriteFile, blocking until +// the operation has actually completed. +func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { + if err != syscall.ERROR_IO_PENDING { + return int(bytes), err + } + + if f.closing.isSet() { + cancelIoEx(f.handle, &c.o) + } + + var timeout timeoutChan + if d != nil { + d.channelLock.Lock() + timeout = d.channel + d.channelLock.Unlock() + } + + var r ioResult + select { + case r = <-c.ch: + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { + if f.closing.isSet() { + err = ErrFileClosed + } + } else if err != nil && f.socket { + // err is from Win32. Query the overlapped structure to get the winsock error. + var bytes, flags uint32 + err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) + } + case <-timeout: + cancelIoEx(f.handle, &c.o) + r = <-c.ch + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { + err = ErrTimeout + } + } + + // runtime.KeepAlive is needed, as c is passed via native + // code to ioCompletionProcessor, c must remain alive + // until the channel read is complete. + runtime.KeepAlive(c) + return int(r.bytes), err +} + +// Read reads from a file handle. +func (f *win32File) Read(b []byte) (int, error) { + c, err := f.prepareIo() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.readDeadline.timedout.isSet() { + return 0, ErrTimeout + } + + var bytes uint32 + err = syscall.ReadFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIo(c, &f.readDeadline, bytes, err) + runtime.KeepAlive(b) + + // Handle EOF conditions. + if err == nil && n == 0 && len(b) != 0 { + return 0, io.EOF + } else if err == syscall.ERROR_BROKEN_PIPE { + return 0, io.EOF + } else { + return n, err + } +} + +// Write writes to a file handle. +func (f *win32File) Write(b []byte) (int, error) { + c, err := f.prepareIo() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.writeDeadline.timedout.isSet() { + return 0, ErrTimeout + } + + var bytes uint32 + err = syscall.WriteFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIo(c, &f.writeDeadline, bytes, err) + runtime.KeepAlive(b) + return n, err +} + +func (f *win32File) SetReadDeadline(deadline time.Time) error { + return f.readDeadline.set(deadline) +} + +func (f *win32File) SetWriteDeadline(deadline time.Time) error { + return f.writeDeadline.set(deadline) +} + +func (f *win32File) Flush() error { + return syscall.FlushFileBuffers(f.handle) +} + +func (f *win32File) Fd() uintptr { + return uintptr(f.handle) +} + +func (d *deadlineHandler) set(deadline time.Time) error { + d.setLock.Lock() + defer d.setLock.Unlock() + + if d.timer != nil { + if !d.timer.Stop() { + <-d.channel + } + d.timer = nil + } + d.timedout.setFalse() + + select { + case <-d.channel: + d.channelLock.Lock() + d.channel = make(chan struct{}) + d.channelLock.Unlock() + default: + } + + if deadline.IsZero() { + return nil + } + + timeoutIO := func() { + d.timedout.setTrue() + close(d.channel) + } + + now := time.Now() + duration := deadline.Sub(now) + if deadline.After(now) { + // Deadline is in the future, set a timer to wait + d.timer = time.AfterFunc(duration, timeoutIO) + } else { + // Deadline is in the past. Cancel all pending IO now. + timeoutIO() + } + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go new file mode 100644 index 000000000..3ab6bff69 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -0,0 +1,73 @@ +// +build windows + +package winio + +import ( + "os" + "runtime" + "unsafe" + + "golang.org/x/sys/windows" +) + +// FileBasicInfo contains file access time and file attributes information. +type FileBasicInfo struct { + CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime + FileAttributes uint32 + pad uint32 // padding +} + +// GetFileBasicInfo retrieves times and attributes for a file. +func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { + bi := &FileBasicInfo{} + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return bi, nil +} + +// SetFileBasicInfo sets times and attributes for a file. +func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { + if err := windows.SetFileInformationByHandle(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return nil +} + +// FileStandardInfo contains extended information for the file. +// FILE_STANDARD_INFO in WinBase.h +// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info +type FileStandardInfo struct { + AllocationSize, EndOfFile int64 + NumberOfLinks uint32 + DeletePending, Directory bool +} + +// GetFileStandardInfo retrieves ended information for the file. +func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) { + si := &FileStandardInfo{} + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileStandardInfo, (*byte)(unsafe.Pointer(si)), uint32(unsafe.Sizeof(*si))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return si, nil +} + +// FileIDInfo contains the volume serial number and file ID for a file. This pair should be +// unique on a system. +type FileIDInfo struct { + VolumeSerialNumber uint64 + FileID [16]byte +} + +// GetFileID retrieves the unique (volume, file ID) pair for a file. +func GetFileID(f *os.File) (*FileIDInfo, error) { + fileID := &FileIDInfo{} + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileIdInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return fileID, nil +} diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go new file mode 100644 index 000000000..b2b644d00 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -0,0 +1,316 @@ +//go:build windows +// +build windows + +package winio + +import ( + "fmt" + "io" + "net" + "os" + "syscall" + "time" + "unsafe" + + "github.com/Microsoft/go-winio/pkg/guid" +) + +//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind + +const ( + afHvSock = 34 // AF_HYPERV + + socketError = ^uintptr(0) +) + +// An HvsockAddr is an address for a AF_HYPERV socket. +type HvsockAddr struct { + VMID guid.GUID + ServiceID guid.GUID +} + +type rawHvsockAddr struct { + Family uint16 + _ uint16 + VMID guid.GUID + ServiceID guid.GUID +} + +// Network returns the address's network name, "hvsock". +func (addr *HvsockAddr) Network() string { + return "hvsock" +} + +func (addr *HvsockAddr) String() string { + return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID) +} + +// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. +func VsockServiceID(port uint32) guid.GUID { + g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3") + g.Data1 = port + return g +} + +func (addr *HvsockAddr) raw() rawHvsockAddr { + return rawHvsockAddr{ + Family: afHvSock, + VMID: addr.VMID, + ServiceID: addr.ServiceID, + } +} + +func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { + addr.VMID = raw.VMID + addr.ServiceID = raw.ServiceID +} + +// HvsockListener is a socket listener for the AF_HYPERV address family. +type HvsockListener struct { + sock *win32File + addr HvsockAddr +} + +// HvsockConn is a connected socket of the AF_HYPERV address family. +type HvsockConn struct { + sock *win32File + local, remote HvsockAddr +} + +func newHvSocket() (*win32File, error) { + fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1) + if err != nil { + return nil, os.NewSyscallError("socket", err) + } + f, err := makeWin32File(fd) + if err != nil { + syscall.Close(fd) + return nil, err + } + f.socket = true + return f, nil +} + +// ListenHvsock listens for connections on the specified hvsock address. +func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { + l := &HvsockListener{addr: *addr} + sock, err := newHvSocket() + if err != nil { + return nil, l.opErr("listen", err) + } + sa := addr.raw() + err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa))) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("socket", err)) + } + err = syscall.Listen(sock.handle, 16) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("listen", err)) + } + return &HvsockListener{sock: sock, addr: *addr}, nil +} + +func (l *HvsockListener) opErr(op string, err error) error { + return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err} +} + +// Addr returns the listener's network address. +func (l *HvsockListener) Addr() net.Addr { + return &l.addr +} + +// Accept waits for the next connection and returns it. +func (l *HvsockListener) Accept() (_ net.Conn, err error) { + sock, err := newHvSocket() + if err != nil { + return nil, l.opErr("accept", err) + } + defer func() { + if sock != nil { + sock.Close() + } + }() + c, err := l.sock.prepareIo() + if err != nil { + return nil, l.opErr("accept", err) + } + defer l.sock.wg.Done() + + // AcceptEx, per documentation, requires an extra 16 bytes per address. + const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) + var addrbuf [addrlen * 2]byte + + var bytes uint32 + err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o) + _, err = l.sock.asyncIo(c, nil, bytes, err) + if err != nil { + return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) + } + conn := &HvsockConn{ + sock: sock, + } + conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) + conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) + sock = nil + return conn, nil +} + +// Close closes the listener, causing any pending Accept calls to fail. +func (l *HvsockListener) Close() error { + return l.sock.Close() +} + +/* Need to finish ConnectEx handling +func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) { + sock, err := newHvSocket() + if err != nil { + return nil, err + } + defer func() { + if sock != nil { + sock.Close() + } + }() + c, err := sock.prepareIo() + if err != nil { + return nil, err + } + defer sock.wg.Done() + var bytes uint32 + err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o) + _, err = sock.asyncIo(ctx, c, nil, bytes, err) + if err != nil { + return nil, err + } + conn := &HvsockConn{ + sock: sock, + remote: *addr, + } + sock = nil + return conn, nil +} +*/ + +func (conn *HvsockConn) opErr(op string, err error) error { + return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} +} + +func (conn *HvsockConn) Read(b []byte) (int, error) { + c, err := conn.sock.prepareIo() + if err != nil { + return 0, conn.opErr("read", err) + } + defer conn.sock.wg.Done() + buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var flags, bytes uint32 + err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) + n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err) + if err != nil { + if _, ok := err.(syscall.Errno); ok { + err = os.NewSyscallError("wsarecv", err) + } + return 0, conn.opErr("read", err) + } else if n == 0 { + err = io.EOF + } + return n, err +} + +func (conn *HvsockConn) Write(b []byte) (int, error) { + t := 0 + for len(b) != 0 { + n, err := conn.write(b) + if err != nil { + return t + n, err + } + t += n + b = b[n:] + } + return t, nil +} + +func (conn *HvsockConn) write(b []byte) (int, error) { + c, err := conn.sock.prepareIo() + if err != nil { + return 0, conn.opErr("write", err) + } + defer conn.sock.wg.Done() + buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var bytes uint32 + err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) + n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err) + if err != nil { + if _, ok := err.(syscall.Errno); ok { + err = os.NewSyscallError("wsasend", err) + } + return 0, conn.opErr("write", err) + } + return n, err +} + +// Close closes the socket connection, failing any pending read or write calls. +func (conn *HvsockConn) Close() error { + return conn.sock.Close() +} + +func (conn *HvsockConn) IsClosed() bool { + return conn.sock.IsClosed() +} + +func (conn *HvsockConn) shutdown(how int) error { + if conn.IsClosed() { + return ErrFileClosed + } + + err := syscall.Shutdown(conn.sock.handle, how) + if err != nil { + return os.NewSyscallError("shutdown", err) + } + return nil +} + +// CloseRead shuts down the read end of the socket, preventing future read operations. +func (conn *HvsockConn) CloseRead() error { + err := conn.shutdown(syscall.SHUT_RD) + if err != nil { + return conn.opErr("close", err) + } + return nil +} + +// CloseWrite shuts down the write end of the socket, preventing future write operations and +// notifying the other endpoint that no more data will be written. +func (conn *HvsockConn) CloseWrite() error { + err := conn.shutdown(syscall.SHUT_WR) + if err != nil { + return conn.opErr("close", err) + } + return nil +} + +// LocalAddr returns the local address of the connection. +func (conn *HvsockConn) LocalAddr() net.Addr { + return &conn.local +} + +// RemoteAddr returns the remote address of the connection. +func (conn *HvsockConn) RemoteAddr() net.Addr { + return &conn.remote +} + +// SetDeadline implements the net.Conn SetDeadline method. +func (conn *HvsockConn) SetDeadline(t time.Time) error { + conn.SetReadDeadline(t) + conn.SetWriteDeadline(t) + return nil +} + +// SetReadDeadline implements the net.Conn SetReadDeadline method. +func (conn *HvsockConn) SetReadDeadline(t time.Time) error { + return conn.sock.SetReadDeadline(t) +} + +// SetWriteDeadline implements the net.Conn SetWriteDeadline method. +func (conn *HvsockConn) SetWriteDeadline(t time.Time) error { + return conn.sock.SetWriteDeadline(t) +} diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go new file mode 100644 index 000000000..96700a73d --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -0,0 +1,517 @@ +// +build windows + +package winio + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "os" + "runtime" + "syscall" + "time" + "unsafe" +) + +//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe +//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW +//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW +//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo +//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW +//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc +//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile +//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb +//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U +//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl + +type ioStatusBlock struct { + Status, Information uintptr +} + +type objectAttributes struct { + Length uintptr + RootDirectory uintptr + ObjectName *unicodeString + Attributes uintptr + SecurityDescriptor *securityDescriptor + SecurityQoS uintptr +} + +type unicodeString struct { + Length uint16 + MaximumLength uint16 + Buffer uintptr +} + +type securityDescriptor struct { + Revision byte + Sbz1 byte + Control uint16 + Owner uintptr + Group uintptr + Sacl uintptr + Dacl uintptr +} + +type ntstatus int32 + +func (status ntstatus) Err() error { + if status >= 0 { + return nil + } + return rtlNtStatusToDosError(status) +} + +const ( + cERROR_PIPE_BUSY = syscall.Errno(231) + cERROR_NO_DATA = syscall.Errno(232) + cERROR_PIPE_CONNECTED = syscall.Errno(535) + cERROR_SEM_TIMEOUT = syscall.Errno(121) + + cSECURITY_SQOS_PRESENT = 0x100000 + cSECURITY_ANONYMOUS = 0 + + cPIPE_TYPE_MESSAGE = 4 + + cPIPE_READMODE_MESSAGE = 2 + + cFILE_OPEN = 1 + cFILE_CREATE = 2 + + cFILE_PIPE_MESSAGE_TYPE = 1 + cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2 + + cSE_DACL_PRESENT = 4 +) + +var ( + // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. + // This error should match net.errClosing since docker takes a dependency on its text. + ErrPipeListenerClosed = errors.New("use of closed network connection") + + errPipeWriteClosed = errors.New("pipe has been closed for write") +) + +type win32Pipe struct { + *win32File + path string +} + +type win32MessageBytePipe struct { + win32Pipe + writeClosed bool + readEOF bool +} + +type pipeAddress string + +func (f *win32Pipe) LocalAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) RemoteAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) SetDeadline(t time.Time) error { + f.SetReadDeadline(t) + f.SetWriteDeadline(t) + return nil +} + +// CloseWrite closes the write side of a message pipe in byte mode. +func (f *win32MessageBytePipe) CloseWrite() error { + if f.writeClosed { + return errPipeWriteClosed + } + err := f.win32File.Flush() + if err != nil { + return err + } + _, err = f.win32File.Write(nil) + if err != nil { + return err + } + f.writeClosed = true + return nil +} + +// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since +// they are used to implement CloseWrite(). +func (f *win32MessageBytePipe) Write(b []byte) (int, error) { + if f.writeClosed { + return 0, errPipeWriteClosed + } + if len(b) == 0 { + return 0, nil + } + return f.win32File.Write(b) +} + +// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message +// mode pipe will return io.EOF, as will all subsequent reads. +func (f *win32MessageBytePipe) Read(b []byte) (int, error) { + if f.readEOF { + return 0, io.EOF + } + n, err := f.win32File.Read(b) + if err == io.EOF { + // If this was the result of a zero-byte read, then + // it is possible that the read was due to a zero-size + // message. Since we are simulating CloseWrite with a + // zero-byte message, ensure that all future Read() calls + // also return EOF. + f.readEOF = true + } else if err == syscall.ERROR_MORE_DATA { + // ERROR_MORE_DATA indicates that the pipe's read mode is message mode + // and the message still has more bytes. Treat this as a success, since + // this package presents all named pipes as byte streams. + err = nil + } + return n, err +} + +func (s pipeAddress) Network() string { + return "pipe" +} + +func (s pipeAddress) String() string { + return string(s) +} + +// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. +func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) { + for { + + select { + case <-ctx.Done(): + return syscall.Handle(0), ctx.Err() + default: + h, err := createFile(*path, access, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + if err == nil { + return h, nil + } + if err != cERROR_PIPE_BUSY { + return h, &os.PathError{Err: err, Op: "open", Path: *path} + } + // Wait 10 msec and try again. This is a rather simplistic + // view, as we always try each 10 milliseconds. + time.Sleep(10 * time.Millisecond) + } + } +} + +// DialPipe connects to a named pipe by path, timing out if the connection +// takes longer than the specified duration. If timeout is nil, then we use +// a default timeout of 2 seconds. (We do not use WaitNamedPipe.) +func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { + var absTimeout time.Time + if timeout != nil { + absTimeout = time.Now().Add(*timeout) + } else { + absTimeout = time.Now().Add(2 * time.Second) + } + ctx, _ := context.WithDeadline(context.Background(), absTimeout) + conn, err := DialPipeContext(ctx, path) + if err == context.DeadlineExceeded { + return nil, ErrTimeout + } + return conn, err +} + +// DialPipeContext attempts to connect to a named pipe by `path` until `ctx` +// cancellation or timeout. +func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { + return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE) +} + +// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx` +// cancellation or timeout. +func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) { + var err error + var h syscall.Handle + h, err = tryDialPipe(ctx, &path, access) + if err != nil { + return nil, err + } + + var flags uint32 + err = getNamedPipeInfo(h, &flags, nil, nil, nil) + if err != nil { + return nil, err + } + + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + + // If the pipe is in message mode, return a message byte pipe, which + // supports CloseWrite(). + if flags&cPIPE_TYPE_MESSAGE != 0 { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: f, path: path}, + }, nil + } + return &win32Pipe{win32File: f, path: path}, nil +} + +type acceptResponse struct { + f *win32File + err error +} + +type win32PipeListener struct { + firstHandle syscall.Handle + path string + config PipeConfig + acceptCh chan (chan acceptResponse) + closeCh chan int + doneCh chan int +} + +func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) { + path16, err := syscall.UTF16FromString(path) + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + + var oa objectAttributes + oa.Length = unsafe.Sizeof(oa) + + var ntPath unicodeString + if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + defer localFree(ntPath.Buffer) + oa.ObjectName = &ntPath + + // The security descriptor is only needed for the first pipe. + if first { + if sd != nil { + len := uint32(len(sd)) + sdb := localAlloc(0, len) + defer localFree(sdb) + copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) + oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) + } else { + // Construct the default named pipe security descriptor. + var dacl uintptr + if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { + return 0, fmt.Errorf("getting default named pipe ACL: %s", err) + } + defer localFree(dacl) + + sdb := &securityDescriptor{ + Revision: 1, + Control: cSE_DACL_PRESENT, + Dacl: dacl, + } + oa.SecurityDescriptor = sdb + } + } + + typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS) + if c.MessageMode { + typ |= cFILE_PIPE_MESSAGE_TYPE + } + + disposition := uint32(cFILE_OPEN) + access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE) + if first { + disposition = cFILE_CREATE + // By not asking for read or write access, the named pipe file system + // will put this pipe into an initially disconnected state, blocking + // client connections until the next call with first == false. + access = syscall.SYNCHRONIZE + } + + timeout := int64(-50 * 10000) // 50ms + + var ( + h syscall.Handle + iosb ioStatusBlock + ) + err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err() + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + + runtime.KeepAlive(ntPath) + return h, nil +} + +func (l *win32PipeListener) makeServerPipe() (*win32File, error) { + h, err := makeServerPipeHandle(l.path, nil, &l.config, false) + if err != nil { + return nil, err + } + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + return f, nil +} + +func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { + p, err := l.makeServerPipe() + if err != nil { + return nil, err + } + + // Wait for the client to connect. + ch := make(chan error) + go func(p *win32File) { + ch <- connectPipe(p) + }(p) + + select { + case err = <-ch: + if err != nil { + p.Close() + p = nil + } + case <-l.closeCh: + // Abort the connect request by closing the handle. + p.Close() + p = nil + err = <-ch + if err == nil || err == ErrFileClosed { + err = ErrPipeListenerClosed + } + } + return p, err +} + +func (l *win32PipeListener) listenerRoutine() { + closed := false + for !closed { + select { + case <-l.closeCh: + closed = true + case responseCh := <-l.acceptCh: + var ( + p *win32File + err error + ) + for { + p, err = l.makeConnectedServerPipe() + // If the connection was immediately closed by the client, try + // again. + if err != cERROR_NO_DATA { + break + } + } + responseCh <- acceptResponse{p, err} + closed = err == ErrPipeListenerClosed + } + } + syscall.Close(l.firstHandle) + l.firstHandle = 0 + // Notify Close() and Accept() callers that the handle has been closed. + close(l.doneCh) +} + +// PipeConfig contain configuration for the pipe listener. +type PipeConfig struct { + // SecurityDescriptor contains a Windows security descriptor in SDDL format. + SecurityDescriptor string + + // MessageMode determines whether the pipe is in byte or message mode. In either + // case the pipe is read in byte mode by default. The only practical difference in + // this implementation is that CloseWrite() is only supported for message mode pipes; + // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only + // transferred to the reader (and returned as io.EOF in this implementation) + // when the pipe is in message mode. + MessageMode bool + + // InputBufferSize specifies the size of the input buffer, in bytes. + InputBufferSize int32 + + // OutputBufferSize specifies the size of the output buffer, in bytes. + OutputBufferSize int32 +} + +// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe. +// The pipe must not already exist. +func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { + var ( + sd []byte + err error + ) + if c == nil { + c = &PipeConfig{} + } + if c.SecurityDescriptor != "" { + sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) + if err != nil { + return nil, err + } + } + h, err := makeServerPipeHandle(path, sd, c, true) + if err != nil { + return nil, err + } + l := &win32PipeListener{ + firstHandle: h, + path: path, + config: *c, + acceptCh: make(chan (chan acceptResponse)), + closeCh: make(chan int), + doneCh: make(chan int), + } + go l.listenerRoutine() + return l, nil +} + +func connectPipe(p *win32File) error { + c, err := p.prepareIo() + if err != nil { + return err + } + defer p.wg.Done() + + err = connectNamedPipe(p.handle, &c.o) + _, err = p.asyncIo(c, nil, 0, err) + if err != nil && err != cERROR_PIPE_CONNECTED { + return err + } + return nil +} + +func (l *win32PipeListener) Accept() (net.Conn, error) { + ch := make(chan acceptResponse) + select { + case l.acceptCh <- ch: + response := <-ch + err := response.err + if err != nil { + return nil, err + } + if l.config.MessageMode { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: response.f, path: l.path}, + }, nil + } + return &win32Pipe{win32File: response.f, path: l.path}, nil + case <-l.doneCh: + return nil, ErrPipeListenerClosed + } +} + +func (l *win32PipeListener) Close() error { + select { + case l.closeCh <- 1: + <-l.doneCh + case <-l.doneCh: + } + return nil +} + +func (l *win32PipeListener) Addr() net.Addr { + return pipeAddress(l.path) +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go new file mode 100644 index 000000000..2d9161e2d --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go @@ -0,0 +1,228 @@ +// +build windows + +// Package guid provides a GUID type. The backing structure for a GUID is +// identical to that used by the golang.org/x/sys/windows GUID type. +// There are two main binary encodings used for a GUID, the big-endian encoding, +// and the Windows (mixed-endian) encoding. See here for details: +// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding +package guid + +import ( + "crypto/rand" + "crypto/sha1" + "encoding" + "encoding/binary" + "fmt" + "strconv" +) + +// Variant specifies which GUID variant (or "type") of the GUID. It determines +// how the entirety of the rest of the GUID is interpreted. +type Variant uint8 + +// The variants specified by RFC 4122. +const ( + // VariantUnknown specifies a GUID variant which does not conform to one of + // the variant encodings specified in RFC 4122. + VariantUnknown Variant = iota + VariantNCS + VariantRFC4122 + VariantMicrosoft + VariantFuture +) + +// Version specifies how the bits in the GUID were generated. For instance, a +// version 4 GUID is randomly generated, and a version 5 is generated from the +// hash of an input string. +type Version uint8 + +var _ = (encoding.TextMarshaler)(GUID{}) +var _ = (encoding.TextUnmarshaler)(&GUID{}) + +// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. +func NewV4() (GUID, error) { + var b [16]byte + if _, err := rand.Read(b[:]); err != nil { + return GUID{}, err + } + + g := FromArray(b) + g.setVersion(4) // Version 4 means randomly generated. + g.setVariant(VariantRFC4122) + + return g, nil +} + +// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing) +// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name, +// and the sample code treats it as a series of bytes, so we do the same here. +// +// Some implementations, such as those found on Windows, treat the name as a +// big-endian UTF16 stream of bytes. If that is desired, the string can be +// encoded as such before being passed to this function. +func NewV5(namespace GUID, name []byte) (GUID, error) { + b := sha1.New() + namespaceBytes := namespace.ToArray() + b.Write(namespaceBytes[:]) + b.Write(name) + + a := [16]byte{} + copy(a[:], b.Sum(nil)) + + g := FromArray(a) + g.setVersion(5) // Version 5 means generated from a string. + g.setVariant(VariantRFC4122) + + return g, nil +} + +func fromArray(b [16]byte, order binary.ByteOrder) GUID { + var g GUID + g.Data1 = order.Uint32(b[0:4]) + g.Data2 = order.Uint16(b[4:6]) + g.Data3 = order.Uint16(b[6:8]) + copy(g.Data4[:], b[8:16]) + return g +} + +func (g GUID) toArray(order binary.ByteOrder) [16]byte { + b := [16]byte{} + order.PutUint32(b[0:4], g.Data1) + order.PutUint16(b[4:6], g.Data2) + order.PutUint16(b[6:8], g.Data3) + copy(b[8:16], g.Data4[:]) + return b +} + +// FromArray constructs a GUID from a big-endian encoding array of 16 bytes. +func FromArray(b [16]byte) GUID { + return fromArray(b, binary.BigEndian) +} + +// ToArray returns an array of 16 bytes representing the GUID in big-endian +// encoding. +func (g GUID) ToArray() [16]byte { + return g.toArray(binary.BigEndian) +} + +// FromWindowsArray constructs a GUID from a Windows encoding array of bytes. +func FromWindowsArray(b [16]byte) GUID { + return fromArray(b, binary.LittleEndian) +} + +// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows +// encoding. +func (g GUID) ToWindowsArray() [16]byte { + return g.toArray(binary.LittleEndian) +} + +func (g GUID) String() string { + return fmt.Sprintf( + "%08x-%04x-%04x-%04x-%012x", + g.Data1, + g.Data2, + g.Data3, + g.Data4[:2], + g.Data4[2:]) +} + +// FromString parses a string containing a GUID and returns the GUID. The only +// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` +// format. +func FromString(s string) (GUID, error) { + if len(s) != 36 { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + + var g GUID + + data1, err := strconv.ParseUint(s[0:8], 16, 32) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data1 = uint32(data1) + + data2, err := strconv.ParseUint(s[9:13], 16, 16) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data2 = uint16(data2) + + data3, err := strconv.ParseUint(s[14:18], 16, 16) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data3 = uint16(data3) + + for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} { + v, err := strconv.ParseUint(s[x:x+2], 16, 8) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data4[i] = uint8(v) + } + + return g, nil +} + +func (g *GUID) setVariant(v Variant) { + d := g.Data4[0] + switch v { + case VariantNCS: + d = (d & 0x7f) + case VariantRFC4122: + d = (d & 0x3f) | 0x80 + case VariantMicrosoft: + d = (d & 0x1f) | 0xc0 + case VariantFuture: + d = (d & 0x0f) | 0xe0 + case VariantUnknown: + fallthrough + default: + panic(fmt.Sprintf("invalid variant: %d", v)) + } + g.Data4[0] = d +} + +// Variant returns the GUID variant, as defined in RFC 4122. +func (g GUID) Variant() Variant { + b := g.Data4[0] + if b&0x80 == 0 { + return VariantNCS + } else if b&0xc0 == 0x80 { + return VariantRFC4122 + } else if b&0xe0 == 0xc0 { + return VariantMicrosoft + } else if b&0xe0 == 0xe0 { + return VariantFuture + } + return VariantUnknown +} + +func (g *GUID) setVersion(v Version) { + g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12) +} + +// Version returns the GUID version, as defined in RFC 4122. +func (g GUID) Version() Version { + return Version((g.Data3 & 0xF000) >> 12) +} + +// MarshalText returns the textual representation of the GUID. +func (g GUID) MarshalText() ([]byte, error) { + return []byte(g.String()), nil +} + +// UnmarshalText takes the textual representation of a GUID, and unmarhals it +// into this GUID. +func (g *GUID) UnmarshalText(text []byte) error { + g2, err := FromString(string(text)) + if err != nil { + return err + } + *g = g2 + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go new file mode 100644 index 000000000..f64d828c0 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go @@ -0,0 +1,15 @@ +// +build !windows + +package guid + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type as that is only available to builds +// targeted at `windows`. The representation matches that used by native Windows +// code. +type GUID struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go new file mode 100644 index 000000000..83617f4ee --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go @@ -0,0 +1,10 @@ +package guid + +import "golang.org/x/sys/windows" + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type so that stringification and +// marshaling can be supported. The representation matches that used by native +// Windows code. +type GUID windows.GUID diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go new file mode 100644 index 000000000..c3dd7c217 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/privilege.go @@ -0,0 +1,203 @@ +// +build windows + +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "runtime" + "sync" + "syscall" + "unicode/utf16" + + "golang.org/x/sys/windows" +) + +//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges +//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf +//sys revertToSelf() (err error) = advapi32.RevertToSelf +//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken +//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread +//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW +//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW +//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW + +const ( + SE_PRIVILEGE_ENABLED = 2 + + ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 + + SeBackupPrivilege = "SeBackupPrivilege" + SeRestorePrivilege = "SeRestorePrivilege" + SeSecurityPrivilege = "SeSecurityPrivilege" +) + +const ( + securityAnonymous = iota + securityIdentification + securityImpersonation + securityDelegation +) + +var ( + privNames = make(map[string]uint64) + privNameMutex sync.Mutex +) + +// PrivilegeError represents an error enabling privileges. +type PrivilegeError struct { + privileges []uint64 +} + +func (e *PrivilegeError) Error() string { + s := "" + if len(e.privileges) > 1 { + s = "Could not enable privileges " + } else { + s = "Could not enable privilege " + } + for i, p := range e.privileges { + if i != 0 { + s += ", " + } + s += `"` + s += getPrivilegeName(p) + s += `"` + } + return s +} + +// RunWithPrivilege enables a single privilege for a function call. +func RunWithPrivilege(name string, fn func() error) error { + return RunWithPrivileges([]string{name}, fn) +} + +// RunWithPrivileges enables privileges for a function call. +func RunWithPrivileges(names []string, fn func() error) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + runtime.LockOSThread() + defer runtime.UnlockOSThread() + token, err := newThreadToken() + if err != nil { + return err + } + defer releaseThreadToken(token) + err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED) + if err != nil { + return err + } + return fn() +} + +func mapPrivileges(names []string) ([]uint64, error) { + var privileges []uint64 + privNameMutex.Lock() + defer privNameMutex.Unlock() + for _, name := range names { + p, ok := privNames[name] + if !ok { + err := lookupPrivilegeValue("", name, &p) + if err != nil { + return nil, err + } + privNames[name] = p + } + privileges = append(privileges, p) + } + return privileges, nil +} + +// EnableProcessPrivileges enables privileges globally for the process. +func EnableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) +} + +// DisableProcessPrivileges disables privileges globally for the process. +func DisableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, 0) +} + +func enableDisableProcessPrivilege(names []string, action uint32) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + + p, _ := windows.GetCurrentProcess() + var token windows.Token + err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) + if err != nil { + return err + } + + defer token.Close() + return adjustPrivileges(token, privileges, action) +} + +func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { + var b bytes.Buffer + binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) + for _, p := range privileges { + binary.Write(&b, binary.LittleEndian, p) + binary.Write(&b, binary.LittleEndian, action) + } + prevState := make([]byte, b.Len()) + reqSize := uint32(0) + success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) + if !success { + return err + } + if err == ERROR_NOT_ALL_ASSIGNED { + return &PrivilegeError{privileges} + } + return nil +} + +func getPrivilegeName(luid uint64) string { + var nameBuffer [256]uint16 + bufSize := uint32(len(nameBuffer)) + err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) + if err != nil { + return fmt.Sprintf("", luid) + } + + var displayNameBuffer [256]uint16 + displayBufSize := uint32(len(displayNameBuffer)) + var langID uint32 + err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) + if err != nil { + return fmt.Sprintf("", string(utf16.Decode(nameBuffer[:bufSize]))) + } + + return string(utf16.Decode(displayNameBuffer[:displayBufSize])) +} + +func newThreadToken() (windows.Token, error) { + err := impersonateSelf(securityImpersonation) + if err != nil { + return 0, err + } + + var token windows.Token + err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) + if err != nil { + rerr := revertToSelf() + if rerr != nil { + panic(rerr) + } + return 0, err + } + return token, nil +} + +func releaseThreadToken(h windows.Token) { + err := revertToSelf() + if err != nil { + panic(err) + } + h.Close() +} diff --git a/vendor/github.com/Microsoft/go-winio/reparse.go b/vendor/github.com/Microsoft/go-winio/reparse.go new file mode 100644 index 000000000..fc1ee4d3a --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/reparse.go @@ -0,0 +1,128 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "strings" + "unicode/utf16" + "unsafe" +) + +const ( + reparseTagMountPoint = 0xA0000003 + reparseTagSymlink = 0xA000000C +) + +type reparseDataBuffer struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 +} + +// ReparsePoint describes a Win32 symlink or mount point. +type ReparsePoint struct { + Target string + IsMountPoint bool +} + +// UnsupportedReparsePointError is returned when trying to decode a non-symlink or +// mount point reparse point. +type UnsupportedReparsePointError struct { + Tag uint32 +} + +func (e *UnsupportedReparsePointError) Error() string { + return fmt.Sprintf("unsupported reparse point %x", e.Tag) +} + +// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink +// or a mount point. +func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { + tag := binary.LittleEndian.Uint32(b[0:4]) + return DecodeReparsePointData(tag, b[8:]) +} + +func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) { + isMountPoint := false + switch tag { + case reparseTagMountPoint: + isMountPoint = true + case reparseTagSymlink: + default: + return nil, &UnsupportedReparsePointError{tag} + } + nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6]) + if !isMountPoint { + nameOffset += 4 + } + nameLength := binary.LittleEndian.Uint16(b[6:8]) + name := make([]uint16, nameLength/2) + err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) + if err != nil { + return nil, err + } + return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil +} + +func isDriveLetter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or +// mount point. +func EncodeReparsePoint(rp *ReparsePoint) []byte { + // Generate an NT path and determine if this is a relative path. + var ntTarget string + relative := false + if strings.HasPrefix(rp.Target, `\\?\`) { + ntTarget = `\??\` + rp.Target[4:] + } else if strings.HasPrefix(rp.Target, `\\`) { + ntTarget = `\??\UNC\` + rp.Target[2:] + } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { + ntTarget = `\??\` + rp.Target + } else { + ntTarget = rp.Target + relative = true + } + + // The paths must be NUL-terminated even though they are counted strings. + target16 := utf16.Encode([]rune(rp.Target + "\x00")) + ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) + + size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 + size += len(ntTarget16)*2 + len(target16)*2 + + tag := uint32(reparseTagMountPoint) + if !rp.IsMountPoint { + tag = reparseTagSymlink + size += 4 // Add room for symlink flags + } + + data := reparseDataBuffer{ + ReparseTag: tag, + ReparseDataLength: uint16(size), + SubstituteNameOffset: 0, + SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), + PrintNameOffset: uint16(len(ntTarget16) * 2), + PrintNameLength: uint16((len(target16) - 1) * 2), + } + + var b bytes.Buffer + binary.Write(&b, binary.LittleEndian, &data) + if !rp.IsMountPoint { + flags := uint32(0) + if relative { + flags |= 1 + } + binary.Write(&b, binary.LittleEndian, flags) + } + + binary.Write(&b, binary.LittleEndian, ntTarget16) + binary.Write(&b, binary.LittleEndian, target16) + return b.Bytes() +} diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go new file mode 100644 index 000000000..db1b370a1 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/sd.go @@ -0,0 +1,98 @@ +// +build windows + +package winio + +import ( + "syscall" + "unsafe" +) + +//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW +//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW +//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW +//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW +//sys localFree(mem uintptr) = LocalFree +//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength + +const ( + cERROR_NONE_MAPPED = syscall.Errno(1332) +) + +type AccountLookupError struct { + Name string + Err error +} + +func (e *AccountLookupError) Error() string { + if e.Name == "" { + return "lookup account: empty account name specified" + } + var s string + switch e.Err { + case cERROR_NONE_MAPPED: + s = "not found" + default: + s = e.Err.Error() + } + return "lookup account " + e.Name + ": " + s +} + +type SddlConversionError struct { + Sddl string + Err error +} + +func (e *SddlConversionError) Error() string { + return "convert " + e.Sddl + ": " + e.Err.Error() +} + +// LookupSidByName looks up the SID of an account by name +func LookupSidByName(name string) (sid string, err error) { + if name == "" { + return "", &AccountLookupError{name, cERROR_NONE_MAPPED} + } + + var sidSize, sidNameUse, refDomainSize uint32 + err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) + if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { + return "", &AccountLookupError{name, err} + } + sidBuffer := make([]byte, sidSize) + refDomainBuffer := make([]uint16, refDomainSize) + err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) + if err != nil { + return "", &AccountLookupError{name, err} + } + var strBuffer *uint16 + err = convertSidToStringSid(&sidBuffer[0], &strBuffer) + if err != nil { + return "", &AccountLookupError{name, err} + } + sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) + localFree(uintptr(unsafe.Pointer(strBuffer))) + return sid, nil +} + +func SddlToSecurityDescriptor(sddl string) ([]byte, error) { + var sdBuffer uintptr + err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) + if err != nil { + return nil, &SddlConversionError{sddl, err} + } + defer localFree(sdBuffer) + sd := make([]byte, getSecurityDescriptorLength(sdBuffer)) + copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)]) + return sd, nil +} + +func SecurityDescriptorToSddl(sd []byte) (string, error) { + var sddl *uint16 + // The returned string length seems to including an aribtrary number of terminating NULs. + // Don't use it. + err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) + if err != nil { + return "", err + } + defer localFree(uintptr(unsafe.Pointer(sddl))) + return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go new file mode 100644 index 000000000..5955c99fd --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/syscall.go @@ -0,0 +1,3 @@ +package winio + +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go new file mode 100644 index 000000000..176ff75e3 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -0,0 +1,427 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package winio + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modntdll = windows.NewLazySystemDLL("ntdll.dll") + modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") + + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") + procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procBackupRead = modkernel32.NewProc("BackupRead") + procBackupWrite = modkernel32.NewProc("BackupWrite") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") + procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procLocalFree = modkernel32.NewProc("LocalFree") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") + procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") + procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") + procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") + procbind = modws2_32.NewProc("bind") +) + +func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { + var _p0 uint32 + if releaseAll { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) + success = r0 != 0 + if true { + err = errnoErr(e1) + } + return +} + +func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func convertSidToStringSid(sid *byte, str **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(str) + if err != nil { + return + } + return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) +} + +func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getSecurityDescriptorLength(sd uintptr) (len uint32) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) + len = uint32(r0) + return +} + +func impersonateSelf(level uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(accountName) + if err != nil { + return + } + return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) +} + +func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) +} + +func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeName(_p0, luid, buffer, size) +} + +func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + var _p1 *uint16 + _p1, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _lookupPrivilegeValue(_p0, _p1, luid) +} + +func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func revertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } + r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } + r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) +} + +func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = errnoErr(e1) + } + return +} + +func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) + newport = syscall.Handle(r0) + if newport == 0 { + err = errnoErr(e1) + } + return +} + +func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) +} + +func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = errnoErr(e1) + } + return +} + +func getCurrentThread() (h syscall.Handle) { + r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) + h = syscall.Handle(r0) + return +} + +func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { + r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) + ptr = uintptr(r0) + return +} + +func localFree(mem uintptr) { + syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) + return +} + +func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) { + r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) + status = ntstatus(r0) + return +} + +func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) { + r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) + status = ntstatus(r0) + return +} + +func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) { + r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) + status = ntstatus(r0) + return +} + +func rtlNtStatusToDosError(status ntstatus) (winerr error) { + r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) + if r0 != 0 { + winerr = syscall.Errno(r0) + } + return +} + +func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socketError { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go index 710eb432f..11d4240d6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -50,9 +50,19 @@ func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { for i, n := range names { val := v.FieldByName(n) + ft, ok := v.Type().FieldByName(n) + if !ok { + panic(fmt.Sprintf("expected to find field %v on type %v, but was not found", n, v.Type())) + } + buf.WriteString(strings.Repeat(" ", indent+2)) buf.WriteString(n + ": ") - prettify(val, indent+2, buf) + + if tag := ft.Tag.Get("sensitive"); tag == "true" { + buf.WriteString("") + } else { + prettify(val, indent+2, buf) + } if i < len(names)-1 { buf.WriteString(",\n") diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go index 645df2450..3f7cffd95 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -8,6 +8,8 @@ import ( ) // StringValue returns the string representation of a value. +// +// Deprecated: Use Prettify instead. func StringValue(i interface{}) string { var buf bytes.Buffer stringValue(reflect.ValueOf(i), 0, &buf) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go index 1d774cfa2..5ac5c24a1 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -53,7 +53,7 @@ var LogHTTPRequestHandler = request.NamedHandler{ } func logRequest(r *request.Request) { - if !r.Config.LogLevel.AtLeast(aws.LogDebug) { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { return } @@ -94,6 +94,10 @@ var LogHTTPRequestHeaderHandler = request.NamedHandler{ } func logRequestHeader(r *request.Request) { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { + return + } + b, err := httputil.DumpRequestOut(r.HTTPRequest, false) if err != nil { r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, @@ -124,7 +128,7 @@ var LogHTTPResponseHandler = request.NamedHandler{ } func logResponse(r *request.Request) { - if !r.Config.LogLevel.AtLeast(aws.LogDebug) { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { return } @@ -186,7 +190,7 @@ var LogHTTPResponseHeaderHandler = request.NamedHandler{ } func logResponseHeader(r *request.Request) { - if r.Config.Logger == nil { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { return } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 4538d2f08..b8bdec889 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -355,11 +355,17 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -731,6 +737,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -947,6 +954,18 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "aps": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "athena": service{ Endpoints: endpoints{ @@ -1129,6 +1148,14 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "braket": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "budgets": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -1174,9 +1201,11 @@ var awsPartition = partition{ "cloud9": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -1887,6 +1916,59 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, + "data.jobs.iot": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "data.jobs.iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "data.jobs.iot-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "data.jobs.iot-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "data.jobs.iot-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "data.jobs.iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "data.mediastore": service{ Endpoints: endpoints{ @@ -3321,6 +3403,17 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "frauddetector": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "fsx": service{ Endpoints: endpoints{ @@ -3808,6 +3901,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, @@ -3911,12 +4005,42 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "iotthingsgraph": service{ @@ -3969,6 +4093,14 @@ var awsPartition = partition{ }, }, }, + "ivs": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "kafka": service{ Endpoints: endpoints{ @@ -4573,6 +4705,26 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "mediapackage-vod": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "mediastore": service{ Endpoints: endpoints{ @@ -4647,6 +4799,20 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, + "models-v2-lex": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "models.lex": service{ Defaults: endpoint{ CredentialScope: credentialScope{ @@ -4900,6 +5066,62 @@ var awsPartition = partition{ }, }, }, + "network-firewall": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "network-firewall-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "network-firewall-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "network-firewall-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "network-firewall-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "network-firewall-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "oidc": service{ Endpoints: endpoints{ @@ -5159,6 +5381,7 @@ var awsPartition = partition{ "polly": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -5664,6 +5887,20 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "runtime-v2-lex": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "runtime.lex": service{ Defaults: endpoint{ CredentialScope: credentialScope{ @@ -5700,6 +5937,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6849,6 +7087,20 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "ssm-incidents": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "states": service{ Endpoints: endpoints{ @@ -7199,9 +7451,33 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "transcribestreaming-fips-ca-central-1": endpoint{ + Hostname: "transcribestreaming-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "transcribestreaming-fips-us-east-1": endpoint{ + Hostname: "transcribestreaming-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "transcribestreaming-fips-us-east-2": endpoint{ + Hostname: "transcribestreaming-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "transcribestreaming-fips-us-west-2": endpoint{ + Hostname: "transcribestreaming-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "transfer": service{ @@ -7914,6 +8190,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "data.jobs.iot": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "dax": service{ Endpoints: endpoints{ @@ -8045,6 +8328,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "emr-containers": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "es": service{ Endpoints: endpoints{ @@ -8066,6 +8356,15 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "fms": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "fsx": service{ Endpoints: endpoints{ @@ -9103,6 +9402,25 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "data.jobs.iot": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "data.jobs.iot-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "data.jobs.iot-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "datasync": service{ Endpoints: endpoints{ @@ -9629,6 +9947,18 @@ var awsusgovPartition = partition{ "iotsecuredtunneling": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -9837,6 +10167,36 @@ var awsusgovPartition = partition{ }, }, }, + "network-firewall": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "network-firewall-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "network-firewall-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "oidc": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Hostname: "oidc.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "organizations": service{ PartitionEndpoint: "aws-us-gov-global", IsRegionalized: boxedFalse, @@ -10220,6 +10580,19 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "servicediscovery": service{ + + Endpoints: endpoints{ + "servicediscovery-fips": endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "servicequotas": service{ Defaults: endpoint{ Protocols: []string{"https"}, @@ -10885,6 +11258,12 @@ var awsisoPartition = partition{ }, }, }, + "route53resolver": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "runtime.sagemaker": service{ Endpoints: endpoints{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 308fcb737..e429d9037 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.40.25" +const SDKVersion = "1.40.44" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go index d486a4c2a..d9a4e7649 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -29,7 +29,8 @@ const ( RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z - ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" + ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" + iso8601TimeFormatNoZ = "2006-01-02T15:04:05.999999999" // This format is used for output time with fractional second precision up to milliseconds ISO8601OutputTimeFormat = "2006-01-02T15:04:05.999999999Z" @@ -82,6 +83,7 @@ func ParseTime(formatName, value string) (time.Time, error) { case ISO8601TimeFormatName: // Smithy DateTime format return tryParse(value, ISO8601TimeFormat, + iso8601TimeFormatNoZ, time.RFC3339Nano, time.RFC3339, ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 331533077..633d3c09f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -726,16 +726,15 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // You can optionally request server-side encryption. For server-side encryption, // Amazon S3 encrypts your data as it writes it to disks in its data centers // and decrypts it when you access it. You can provide your own encryption key, -// or use Amazon Web Services Key Management Service (Amazon Web Services KMS) -// customer master keys (CMKs) or Amazon S3-managed encryption keys. If you -// choose to provide your own encryption key, the request headers you provide -// in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// or use Amazon Web Services KMS keys or Amazon S3-managed encryption keys. +// If you choose to provide your own encryption key, the request headers you +// provide in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) // requests must match the headers you used in the request to initiate the upload // by using CreateMultipartUpload. // // To perform a multipart upload with encryption using an Amazon Web Services -// KMS CMK, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey* +// KMS key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey* // actions on the key. These permissions are required because Amazon S3 must // decrypt and read data from the encrypted file parts before it completes the // multipart upload. For more information, see Multipart upload API and permissions @@ -743,10 +742,10 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // in the Amazon S3 User Guide. // // If your Identity and Access Management (IAM) user or role is in the same -// Amazon Web Services account as the Amazon Web Services KMS CMK, then you -// must have these permissions on the key policy. If your IAM user or role belongs -// to a different account than the key, then you must have the permissions on -// both the key policy and your IAM user or role. +// Amazon Web Services account as the KMS key, then you must have these permissions +// on the key policy. If your IAM user or role belongs to a different account +// than the key, then you must have the permissions on both the key policy and +// your IAM user or role. // // For more information, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). // @@ -776,27 +775,25 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // use Amazon Web Services managed encryption keys or provide your own encryption // key. // -// * Use encryption keys managed by Amazon S3 or customer master keys (CMKs) -// stored in Amazon Web Services Key Management Service (Amazon Web Services -// KMS) – If you want Amazon Web Services to manage the keys used to encrypt +// * Use encryption keys managed by Amazon S3 or customer managed key stored +// in Amazon Web Services Key Management Service (Amazon Web Services KMS) +// – If you want Amazon Web Services to manage the keys used to encrypt // data, specify the following headers in the request. x-amz-server-side-encryption // x-amz-server-side-encryption-aws-kms-key-id x-amz-server-side-encryption-context // If you specify x-amz-server-side-encryption:aws:kms, but don't provide // x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon -// Web Services managed CMK in Amazon Web Services KMS to protect the data. +// Web Services managed key in Amazon Web Services KMS to protect the data. // All GET and PUT requests for an object protected by Amazon Web Services // KMS fail if you don't make them with SSL or by using SigV4. For more information -// about server-side encryption with CMKs stored in Amazon Web Services KMS -// (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs -// stored in Amazon Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// about server-side encryption with KMS key (SSE-KMS), see Protecting Data +// Using Server-Side Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). // // * Use customer-provided encryption keys – If you want to manage your // own encryption keys, provide all the following headers in the request. // x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key // x-amz-server-side-encryption-customer-key-MD5 For more information about -// server-side encryption with CMKs stored in Amazon Web Services KMS (SSE-KMS), -// see Protecting Data Using Server-Side Encryption with CMKs stored in Amazon -// Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using +// Server-Side Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). // // Access-Control-List (ACL)-Specific Request Headers // @@ -1279,20 +1276,21 @@ func (c *S3) DeleteBucketIntelligentTieringConfigurationRequest(input *DeleteBuc // // The S3 Intelligent-Tiering storage class is designed to optimize storage // costs by automatically moving data to the most cost-effective storage access -// tier, without additional operational overhead. S3 Intelligent-Tiering delivers -// automatic cost savings by moving data between access tiers, when access patterns -// change. -// -// The S3 Intelligent-Tiering storage class is suitable for objects larger than -// 128 KB that you plan to store for at least 30 days. If the size of an object -// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects -// can be stored, but they are always charged at the frequent access tier rates -// in the S3 Intelligent-Tiering storage class. -// -// If you delete an object before the end of the 30-day minimum storage duration -// period, you are charged for 30 days. For more information, see Storage class -// for automatically optimizing frequently and infrequently accessed objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// tier, without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in two low latency and high throughput access +// tiers. For data that can be accessed asynchronously, you can choose to activate +// automatic archiving capabilities within the S3 Intelligent-Tiering storage +// class. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of +// object size or retention period. If the size of an object is less than 128 +// KB, it is not eligible for auto-tiering. Smaller objects can be stored, but +// they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering +// storage class. +// +// For more information, see Storage class for automatically optimizing frequently +// and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). // // Operations related to DeleteBucketIntelligentTieringConfiguration include: // @@ -2968,20 +2966,21 @@ func (c *S3) GetBucketIntelligentTieringConfigurationRequest(input *GetBucketInt // // The S3 Intelligent-Tiering storage class is designed to optimize storage // costs by automatically moving data to the most cost-effective storage access -// tier, without additional operational overhead. S3 Intelligent-Tiering delivers -// automatic cost savings by moving data between access tiers, when access patterns -// change. -// -// The S3 Intelligent-Tiering storage class is suitable for objects larger than -// 128 KB that you plan to store for at least 30 days. If the size of an object -// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects -// can be stored, but they are always charged at the frequent access tier rates -// in the S3 Intelligent-Tiering storage class. -// -// If you delete an object before the end of the 30-day minimum storage duration -// period, you are charged for 30 days. For more information, see Storage class -// for automatically optimizing frequently and infrequently accessed objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// tier, without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in two low latency and high throughput access +// tiers. For data that can be accessed asynchronously, you can choose to activate +// automatic archiving capabilities within the S3 Intelligent-Tiering storage +// class. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of +// object size or retention period. If the size of an object is less than 128 +// KB, it is not eligible for auto-tiering. Smaller objects can be stored, but +// they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering +// storage class. +// +// For more information, see Storage class for automatically optimizing frequently +// and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). // // Operations related to GetBucketIntelligentTieringConfiguration include: // @@ -4547,9 +4546,9 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // // Encryption request headers, like x-amz-server-side-encryption, should not // be sent for GET requests if your object uses server-side encryption with -// CMKs stored in Amazon Web Services KMS (SSE-KMS) or server-side encryption -// with Amazon S3–managed encryption keys (SSE-S3). If your object does use -// these types of keys, you’ll get an HTTP 400 BadRequest error. +// KMS keys (SSE-KMS) or server-side encryption with Amazon S3–managed encryption +// keys (SSE-S3). If your object does use these types of keys, you’ll get +// an HTTP 400 BadRequest error. // // If you encrypt an object by using server-side encryption with customer-provided // encryption keys (SSE-C) when you store the object in Amazon S3, then when @@ -4564,16 +4563,15 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). // -// Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging -// action), the response also returns the x-amz-tagging-count header that provides -// the count of number of tags associated with the object. You can use GetObjectTagging -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// Assuming you have the relevant permission to read object tags, the response +// also returns the x-amz-tagging-count header that provides the count of number +// of tags associated with the object. You can use GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) // to retrieve the tag set associated with an object. // // Permissions // -// You need the s3:GetObject permission for this operation. For more information, -// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// You need the relevant read object (or version) permission for this operation. +// For more information, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). // If the object you request does not exist, the error Amazon S3 returns depends // on whether you also have the s3:ListBucket permission. // @@ -5456,9 +5454,9 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // // * Encryption request headers, like x-amz-server-side-encryption, should // not be sent for GET requests if your object uses server-side encryption -// with CMKs stored in Amazon Web Services KMS (SSE-KMS) or server-side encryption -// with Amazon S3–managed encryption keys (SSE-S3). If your object does -// use these types of keys, you’ll get an HTTP 400 BadRequest error. +// with KMS keys (SSE-KMS) or server-side encryption with Amazon S3–managed +// encryption keys (SSE-S3). If your object does use these types of keys, +// you’ll get an HTTP 400 BadRequest error. // // * The last modified property in this case is the creation date of the // object. @@ -5482,8 +5480,8 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // // Permissions // -// You need the s3:GetObject permission for this operation. For more information, -// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// You need the relevant read object (or version) permission for this operation. +// For more information, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). // If the object you request does not exist, the error Amazon S3 returns depends // on whether you also have the s3:ListBucket permission. // @@ -5676,20 +5674,21 @@ func (c *S3) ListBucketIntelligentTieringConfigurationsRequest(input *ListBucket // // The S3 Intelligent-Tiering storage class is designed to optimize storage // costs by automatically moving data to the most cost-effective storage access -// tier, without additional operational overhead. S3 Intelligent-Tiering delivers -// automatic cost savings by moving data between access tiers, when access patterns -// change. -// -// The S3 Intelligent-Tiering storage class is suitable for objects larger than -// 128 KB that you plan to store for at least 30 days. If the size of an object -// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects -// can be stored, but they are always charged at the frequent access tier rates -// in the S3 Intelligent-Tiering storage class. -// -// If you delete an object before the end of the 30-day minimum storage duration -// period, you are charged for 30 days. For more information, see Storage class -// for automatically optimizing frequently and infrequently accessed objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// tier, without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in two low latency and high throughput access +// tiers. For data that can be accessed asynchronously, you can choose to activate +// automatic archiving capabilities within the S3 Intelligent-Tiering storage +// class. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of +// object size or retention period. If the size of an object is less than 128 +// KB, it is not eligible for auto-tiering. Smaller objects can be stored, but +// they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering +// storage class. +// +// For more information, see Storage class for automatically optimizing frequently +// and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). // // Operations related to ListBucketIntelligentTieringConfigurations include: // @@ -7372,10 +7371,10 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r // and Amazon S3 Bucket Key for an existing bucket. // // Default encryption for a bucket can use server-side encryption with Amazon -// S3-managed keys (SSE-S3) or Amazon Web Services KMS customer master keys -// (SSE-KMS). If you specify default encryption using SSE-KMS, you can also -// configure Amazon S3 Bucket Key. For information about default encryption, -// see Amazon S3 default bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). If you specify +// default encryption using SSE-KMS, you can also configure Amazon S3 Bucket +// Key. For information about default encryption, see Amazon S3 default bucket +// encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) // in the Amazon S3 User Guide. For more information about S3 Bucket Keys, see // Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) // in the Amazon S3 User Guide. @@ -7474,20 +7473,21 @@ func (c *S3) PutBucketIntelligentTieringConfigurationRequest(input *PutBucketInt // // The S3 Intelligent-Tiering storage class is designed to optimize storage // costs by automatically moving data to the most cost-effective storage access -// tier, without additional operational overhead. S3 Intelligent-Tiering delivers -// automatic cost savings by moving data between access tiers, when access patterns -// change. -// -// The S3 Intelligent-Tiering storage class is suitable for objects larger than -// 128 KB that you plan to store for at least 30 days. If the size of an object -// is less than 128 KB, it is not eligible for auto-tiering. Smaller objects -// can be stored, but they are always charged at the frequent access tier rates -// in the S3 Intelligent-Tiering storage class. -// -// If you delete an object before the end of the 30-day minimum storage duration -// period, you are charged for 30 days. For more information, see Storage class -// for automatically optimizing frequently and infrequently accessed objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// tier, without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in two low latency and high throughput access +// tiers. For data that can be accessed asynchronously, you can choose to activate +// automatic archiving capabilities within the S3 Intelligent-Tiering storage +// class. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of +// object size or retention period. If the size of an object is less than 128 +// KB, it is not eligible for auto-tiering. Smaller objects can be stored, but +// they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering +// storage class. +// +// For more information, see Storage class for automatically optimizing frequently +// and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). // // Operations related to PutBucketIntelligentTieringConfiguration include: // @@ -8129,7 +8129,7 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu // // * DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) // -// * PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// * GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) // // * ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) // @@ -8649,11 +8649,11 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req // Handling Replication of Encrypted Objects // // By default, Amazon S3 doesn't replicate objects that are stored at rest using -// server-side encryption with CMKs stored in Amazon Web Services KMS. To replicate -// Amazon Web Services KMS-encrypted objects, add the following: SourceSelectionCriteria, -// SseKmsEncryptedObjects, Status, EncryptionConfiguration, and ReplicaKmsKeyID. -// For information about replication configuration, see Replicating Objects -// Created with SSE Using CMKs stored in Amazon Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html). +// server-side encryption with KMS keys. To replicate Amazon Web Services KMS-encrypted +// objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects, +// Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about +// replication configuration, see Replicating Objects Created with SSE Using +// KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html). // // For information on PutBucketReplication errors, see List of replication-related // error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) @@ -10340,11 +10340,10 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) // in the Amazon S3 User Guide. For objects that are encrypted with Amazon -// S3 managed encryption keys (SSE-S3) and customer master keys (CMKs) stored -// in Amazon Web Services Key Management Service (SSE-KMS), server-side encryption -// is handled transparently, so you don't need to specify anything. For more -// information about server-side encryption, including SSE-S3 and SSE-KMS, -// see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// S3 managed encryption keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), +// server-side encryption is handled transparently, so you don't need to +// specify anything. For more information about server-side encryption, including +// SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) // in the Amazon S3 User Guide. // // Working with the Response Body @@ -10952,8 +10951,8 @@ func (c *S3) WriteGetObjectResponseRequest(input *WriteGetObjectResponseInput) ( // WriteGetObjectResponse API operation for Amazon Simple Storage Service. // // Passes transformed objects to a GetObject operation when using Object Lambda -// Access Points. For information about Object Lambda Access Points, see Transforming -// objects with Object Lambda Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html) +// access points. For information about Object Lambda access points, see Transforming +// objects with Object Lambda access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html) // in the Amazon S3 User Guide. // // This operation supports metadata that can be returned by GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html), @@ -10973,7 +10972,7 @@ func (c *S3) WriteGetObjectResponseRequest(input *WriteGetObjectResponseInput) ( // (PII) and decompress S3 objects. These Lambda functions are available in // the Amazon Web Services Serverless Application Repository, and can be selected // through the Amazon Web Services Management Console when you create your Object -// Lambda Access Point. +// Lambda access point. // // Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, // a natural language processing (NLP) service using machine learning to find @@ -11036,12 +11035,20 @@ type AbortIncompleteMultipartUpload struct { DaysAfterInitiation *int64 `type:"integer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AbortIncompleteMultipartUpload) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AbortIncompleteMultipartUpload) GoString() string { return s.String() } @@ -11098,12 +11105,20 @@ type AbortMultipartUploadInput struct { UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AbortMultipartUploadInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AbortMultipartUploadInput) GoString() string { return s.String() } @@ -11205,12 +11220,20 @@ type AbortMultipartUploadOutput struct { RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AbortMultipartUploadOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AbortMultipartUploadOutput) GoString() string { return s.String() } @@ -11231,12 +11254,20 @@ type AccelerateConfiguration struct { Status *string `type:"string" enum:"BucketAccelerateStatus"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AccelerateConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AccelerateConfiguration) GoString() string { return s.String() } @@ -11258,12 +11289,20 @@ type AccessControlPolicy struct { Owner *Owner `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AccessControlPolicy) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AccessControlPolicy) GoString() string { return s.String() } @@ -11312,12 +11351,20 @@ type AccessControlTranslation struct { Owner *string `type:"string" required:"true" enum:"OwnerOverride"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AccessControlTranslation) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AccessControlTranslation) GoString() string { return s.String() } @@ -11355,12 +11402,20 @@ type AnalyticsAndOperator struct { Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AnalyticsAndOperator) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AnalyticsAndOperator) GoString() string { return s.String() } @@ -11419,12 +11474,20 @@ type AnalyticsConfiguration struct { StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AnalyticsConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AnalyticsConfiguration) GoString() string { return s.String() } @@ -11483,12 +11546,20 @@ type AnalyticsExportDestination struct { S3BucketDestination *AnalyticsS3BucketDestination `type:"structure" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AnalyticsExportDestination) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AnalyticsExportDestination) GoString() string { return s.String() } @@ -11534,12 +11605,20 @@ type AnalyticsFilter struct { Tag *Tag `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AnalyticsFilter) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AnalyticsFilter) GoString() string { return s.String() } @@ -11607,12 +11686,20 @@ type AnalyticsS3BucketDestination struct { Prefix *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AnalyticsS3BucketDestination) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AnalyticsS3BucketDestination) GoString() string { return s.String() } @@ -11678,12 +11765,20 @@ type Bucket struct { Name *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Bucket) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Bucket) GoString() string { return s.String() } @@ -11712,12 +11807,20 @@ type BucketLifecycleConfiguration struct { Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s BucketLifecycleConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s BucketLifecycleConfiguration) GoString() string { return s.String() } @@ -11762,12 +11865,20 @@ type BucketLoggingStatus struct { LoggingEnabled *LoggingEnabled `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s BucketLoggingStatus) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s BucketLoggingStatus) GoString() string { return s.String() } @@ -11807,12 +11918,20 @@ type CORSConfiguration struct { CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CORSConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CORSConfiguration) GoString() string { return s.String() } @@ -11880,12 +11999,20 @@ type CORSRule struct { MaxAgeSeconds *int64 `type:"integer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CORSRule) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CORSRule) GoString() string { return s.String() } @@ -11994,12 +12121,20 @@ type CSVInput struct { RecordDelimiter *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CSVInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CSVInput) GoString() string { return s.String() } @@ -12076,12 +12211,20 @@ type CSVOutput struct { RecordDelimiter *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CSVOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CSVOutput) GoString() string { return s.String() } @@ -12140,12 +12283,20 @@ type CloudFunctionConfiguration struct { InvocationRole *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CloudFunctionConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CloudFunctionConfiguration) GoString() string { return s.String() } @@ -12192,12 +12343,20 @@ type CommonPrefix struct { Prefix *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CommonPrefix) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CommonPrefix) GoString() string { return s.String() } @@ -12257,12 +12416,20 @@ type CompleteMultipartUploadInput struct { UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CompleteMultipartUploadInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CompleteMultipartUploadInput) GoString() string { return s.String() } @@ -12411,14 +12578,18 @@ type CompleteMultipartUploadOutput struct { RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key - // (CMK) that was used for the object. + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // the object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CompleteMultipartUploadOutput's + // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // If you specified server-side encryption either with an Amazon S3-managed - // encryption key or an Amazon Web Services KMS customer master key (CMK) in - // your initiate multipart upload request, the response includes this header. - // It confirms the encryption algorithm that Amazon S3 used to encrypt the object. + // encryption key or an Amazon Web Services KMS key in your initiate multipart + // upload request, the response includes this header. It confirms the encryption + // algorithm that Amazon S3 used to encrypt the object. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Version ID of the newly created object, in case the bucket has versioning @@ -12426,12 +12597,20 @@ type CompleteMultipartUploadOutput struct { VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CompleteMultipartUploadOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CompleteMultipartUploadOutput) GoString() string { return s.String() } @@ -12511,12 +12690,20 @@ type CompletedMultipartUpload struct { Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CompletedMultipartUpload) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CompletedMultipartUpload) GoString() string { return s.String() } @@ -12539,12 +12726,20 @@ type CompletedPart struct { PartNumber *int64 `type:"integer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CompletedPart) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CompletedPart) GoString() string { return s.String() } @@ -12589,12 +12784,20 @@ type Condition struct { KeyPrefixEquals *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Condition) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Condition) GoString() string { return s.String() } @@ -12615,12 +12818,20 @@ type ContinuationEvent struct { _ struct{} `locationName:"ContinuationEvent" type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ContinuationEvent) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ContinuationEvent) GoString() string { return s.String() } @@ -12752,6 +12963,10 @@ type CopyObjectInput struct { // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one // that was used when the source object was created. + // + // CopySourceSSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectInput's + // String and GoString methods. CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. @@ -12829,6 +13044,10 @@ type CopyObjectInput struct { // S3 does not store the encryption key. The key must be appropriate for use // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectInput's + // String and GoString methods. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. @@ -12839,6 +13058,10 @@ type CopyObjectInput struct { // Specifies the Amazon Web Services KMS Encryption Context to use for object // encryption. The value of this header is a base64-encoded UTF-8 string holding // JSON with the encryption context key-value pairs. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectInput's + // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` // Specifies the Amazon Web Services KMS key ID to use for object encryption. @@ -12848,6 +13071,10 @@ type CopyObjectInput struct { // Web Services CLI, see Specifying the Signature Version in Request Authentication // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) // in the Amazon S3 User Guide. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectInput's + // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon @@ -12877,12 +13104,20 @@ type CopyObjectInput struct { WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CopyObjectInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CopyObjectInput) GoString() string { return s.String() } @@ -13233,11 +13468,19 @@ type CopyObjectOutput struct { // If present, specifies the Amazon Web Services KMS Encryption Context to use // for object encryption. The value of this header is a base64-encoded UTF-8 // string holding JSON with the encryption context key-value pairs. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectOutput's + // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key - // (CMK) that was used for the object. + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // the object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectOutput's + // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon @@ -13248,12 +13491,20 @@ type CopyObjectOutput struct { VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CopyObjectOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CopyObjectOutput) GoString() string { return s.String() } @@ -13336,12 +13587,20 @@ type CopyObjectResult struct { LastModified *time.Time `type:"timestamp"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CopyObjectResult) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CopyObjectResult) GoString() string { return s.String() } @@ -13369,12 +13628,20 @@ type CopyPartResult struct { LastModified *time.Time `type:"timestamp"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CopyPartResult) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CopyPartResult) GoString() string { return s.String() } @@ -13400,12 +13667,20 @@ type CreateBucketConfiguration struct { LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CreateBucketConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CreateBucketConfiguration) GoString() string { return s.String() } @@ -13453,12 +13728,20 @@ type CreateBucketInput struct { ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CreateBucketInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CreateBucketInput) GoString() string { return s.String() } @@ -13549,12 +13832,20 @@ type CreateBucketOutput struct { Location *string `location:"header" locationName:"Location" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CreateBucketOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CreateBucketOutput) GoString() string { return s.String() } @@ -13680,6 +13971,10 @@ type CreateMultipartUploadInput struct { // S3 does not store the encryption key. The key must be appropriate for use // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateMultipartUploadInput's + // String and GoString methods. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. @@ -13690,15 +13985,23 @@ type CreateMultipartUploadInput struct { // Specifies the Amazon Web Services KMS Encryption Context to use for object // encryption. The value of this header is a base64-encoded UTF-8 string holding // JSON with the encryption context key-value pairs. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateMultipartUploadInput's + // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // Specifies the ID of the symmetric customer managed Amazon Web Services KMS - // CMK to use for object encryption. All GET and PUT requests for an object - // protected by Amazon Web Services KMS will fail if not made via SSL or using - // SigV4. For information about configuring using any of the officially supported - // Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the - // Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // Specifies the ID of the symmetric customer managed key to use for object + // encryption. All GET and PUT requests for an object protected by Amazon Web + // Services KMS will fail if not made via SSL or using SigV4. For information + // about configuring using any of the officially supported Amazon Web Services + // SDKs and Amazon Web Services CLI, see Specifying the Signature Version in + // Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) // in the Amazon S3 User Guide. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateMultipartUploadInput's + // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon @@ -13722,12 +14025,20 @@ type CreateMultipartUploadInput struct { WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CreateMultipartUploadInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CreateMultipartUploadInput) GoString() string { return s.String() } @@ -14031,11 +14342,19 @@ type CreateMultipartUploadOutput struct { // If present, specifies the Amazon Web Services KMS Encryption Context to use // for object encryption. The value of this header is a base64-encoded UTF-8 // string holding JSON with the encryption context key-value pairs. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateMultipartUploadOutput's + // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key - // (CMK) that was used for the object. + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // the object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateMultipartUploadOutput's + // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon @@ -14046,12 +14365,20 @@ type CreateMultipartUploadOutput struct { UploadId *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CreateMultipartUploadOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s CreateMultipartUploadOutput) GoString() string { return s.String() } @@ -14158,12 +14485,20 @@ type DefaultRetention struct { Years *int64 `type:"integer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DefaultRetention) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DefaultRetention) GoString() string { return s.String() } @@ -14200,12 +14535,20 @@ type Delete struct { Quiet *bool `type:"boolean"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Delete) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Delete) GoString() string { return s.String() } @@ -14264,12 +14607,20 @@ type DeleteBucketAnalyticsConfigurationInput struct { Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketAnalyticsConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketAnalyticsConfigurationInput) GoString() string { return s.String() } @@ -14349,12 +14700,20 @@ type DeleteBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketAnalyticsConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { return s.String() } @@ -14373,12 +14732,20 @@ type DeleteBucketCorsInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketCorsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketCorsInput) GoString() string { return s.String() } @@ -14449,12 +14816,20 @@ type DeleteBucketCorsOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketCorsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketCorsOutput) GoString() string { return s.String() } @@ -14474,12 +14849,20 @@ type DeleteBucketEncryptionInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketEncryptionInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketEncryptionInput) GoString() string { return s.String() } @@ -14550,12 +14933,20 @@ type DeleteBucketEncryptionOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketEncryptionOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketEncryptionOutput) GoString() string { return s.String() } @@ -14574,12 +14965,20 @@ type DeleteBucketInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketInput) GoString() string { return s.String() } @@ -14661,12 +15060,20 @@ type DeleteBucketIntelligentTieringConfigurationInput struct { Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketIntelligentTieringConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketIntelligentTieringConfigurationInput) GoString() string { return s.String() } @@ -14740,12 +15147,20 @@ type DeleteBucketIntelligentTieringConfigurationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketIntelligentTieringConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketIntelligentTieringConfigurationOutput) GoString() string { return s.String() } @@ -14769,12 +15184,20 @@ type DeleteBucketInventoryConfigurationInput struct { Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketInventoryConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketInventoryConfigurationInput) GoString() string { return s.String() } @@ -14854,12 +15277,20 @@ type DeleteBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketInventoryConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketInventoryConfigurationOutput) GoString() string { return s.String() } @@ -14878,12 +15309,20 @@ type DeleteBucketLifecycleInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketLifecycleInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketLifecycleInput) GoString() string { return s.String() } @@ -14954,12 +15393,20 @@ type DeleteBucketLifecycleOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketLifecycleOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketLifecycleOutput) GoString() string { return s.String() } @@ -14983,12 +15430,20 @@ type DeleteBucketMetricsConfigurationInput struct { Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketMetricsConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketMetricsConfigurationInput) GoString() string { return s.String() } @@ -15068,12 +15523,20 @@ type DeleteBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketMetricsConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketMetricsConfigurationOutput) GoString() string { return s.String() } @@ -15082,12 +15545,20 @@ type DeleteBucketOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketOutput) GoString() string { return s.String() } @@ -15106,12 +15577,20 @@ type DeleteBucketOwnershipControlsInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketOwnershipControlsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketOwnershipControlsInput) GoString() string { return s.String() } @@ -15182,12 +15661,20 @@ type DeleteBucketOwnershipControlsOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketOwnershipControlsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketOwnershipControlsOutput) GoString() string { return s.String() } @@ -15206,12 +15693,20 @@ type DeleteBucketPolicyInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketPolicyInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketPolicyInput) GoString() string { return s.String() } @@ -15282,12 +15777,20 @@ type DeleteBucketPolicyOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketPolicyOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketPolicyOutput) GoString() string { return s.String() } @@ -15306,12 +15809,20 @@ type DeleteBucketReplicationInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketReplicationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketReplicationInput) GoString() string { return s.String() } @@ -15382,12 +15893,20 @@ type DeleteBucketReplicationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketReplicationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketReplicationOutput) GoString() string { return s.String() } @@ -15406,12 +15925,20 @@ type DeleteBucketTaggingInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketTaggingInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketTaggingInput) GoString() string { return s.String() } @@ -15482,12 +16009,20 @@ type DeleteBucketTaggingOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketTaggingOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketTaggingOutput) GoString() string { return s.String() } @@ -15506,12 +16041,20 @@ type DeleteBucketWebsiteInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketWebsiteInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketWebsiteInput) GoString() string { return s.String() } @@ -15582,12 +16125,20 @@ type DeleteBucketWebsiteOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketWebsiteOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteBucketWebsiteOutput) GoString() string { return s.String() } @@ -15613,12 +16164,20 @@ type DeleteMarkerEntry struct { VersionId *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteMarkerEntry) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteMarkerEntry) GoString() string { return s.String() } @@ -15675,12 +16234,20 @@ type DeleteMarkerReplication struct { Status *string `type:"string" enum:"DeleteMarkerReplicationStatus"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteMarkerReplication) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteMarkerReplication) GoString() string { return s.String() } @@ -15715,7 +16282,8 @@ type DeleteObjectInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Indicates whether S3 Object Lock should bypass Governance-mode restrictions - // to process this operation. + // to process this operation. To use this header, you must have the s3:PutBucketPublicAccessBlock + // permission. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` // The account ID of the expected bucket owner. If the bucket is owned by a @@ -15745,12 +16313,20 @@ type DeleteObjectInput struct { VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteObjectInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteObjectInput) GoString() string { return s.String() } @@ -15869,12 +16445,20 @@ type DeleteObjectOutput struct { VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteObjectOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteObjectOutput) GoString() string { return s.String() } @@ -15935,12 +16519,20 @@ type DeleteObjectTaggingInput struct { VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteObjectTaggingInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteObjectTaggingInput) GoString() string { return s.String() } @@ -16032,12 +16624,20 @@ type DeleteObjectTaggingOutput struct { VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteObjectTaggingOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteObjectTaggingOutput) GoString() string { return s.String() } @@ -16072,8 +16672,8 @@ type DeleteObjectsInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Specifies whether you want to delete this object even if it has a Governance-type - // Object Lock in place. You must have sufficient permissions to perform this - // operation. + // Object Lock in place. To use this header, you must have the s3:PutBucketPublicAccessBlock + // permission. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` // Container for the request. @@ -16100,12 +16700,20 @@ type DeleteObjectsInput struct { RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteObjectsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteObjectsInput) GoString() string { return s.String() } @@ -16220,12 +16828,20 @@ type DeleteObjectsOutput struct { RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteObjectsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeleteObjectsOutput) GoString() string { return s.String() } @@ -16262,12 +16878,20 @@ type DeletePublicAccessBlockInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeletePublicAccessBlockInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeletePublicAccessBlockInput) GoString() string { return s.String() } @@ -16338,12 +16962,20 @@ type DeletePublicAccessBlockOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeletePublicAccessBlockOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeletePublicAccessBlockOutput) GoString() string { return s.String() } @@ -16369,12 +17001,20 @@ type DeletedObject struct { VersionId *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeletedObject) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DeletedObject) GoString() string { return s.String() } @@ -16453,12 +17093,20 @@ type Destination struct { StorageClass *string `type:"string" enum:"StorageClass"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Destination) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Destination) GoString() string { return s.String() } @@ -16555,19 +17203,31 @@ type Encryption struct { KMSContext *string `type:"string"` // If the encryption type is aws:kms, this optional value specifies the ID of - // the symmetric customer managed Amazon Web Services KMS CMK to use for encryption - // of job results. Amazon S3 only supports symmetric CMKs. For more information, - // see Using symmetric and asymmetric keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // the symmetric customer managed key to use for encryption of job results. + // Amazon S3 only supports symmetric keys. For more information, see Using symmetric + // and asymmetric keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) // in the Amazon Web Services Key Management Service Developer Guide. + // + // KMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by Encryption's + // String and GoString methods. KMSKeyId *string `type:"string" sensitive:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Encryption) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Encryption) GoString() string { return s.String() } @@ -16617,12 +17277,20 @@ type EncryptionConfiguration struct { ReplicaKmsKeyID *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s EncryptionConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s EncryptionConfiguration) GoString() string { return s.String() } @@ -16640,12 +17308,20 @@ type EndEvent struct { _ struct{} `locationName:"EndEvent" type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s EndEvent) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s EndEvent) GoString() string { return s.String() } @@ -17041,12 +17717,20 @@ type Error struct { VersionId *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Error) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Error) GoString() string { return s.String() } @@ -17089,12 +17773,20 @@ type ErrorDocument struct { Key *string `min:"1" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ErrorDocument) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ErrorDocument) GoString() string { return s.String() } @@ -17131,12 +17823,20 @@ type ExistingObjectReplication struct { Status *string `type:"string" required:"true" enum:"ExistingObjectReplicationStatus"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ExistingObjectReplication) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ExistingObjectReplication) GoString() string { return s.String() } @@ -17176,12 +17876,20 @@ type FilterRule struct { Value *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s FilterRule) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s FilterRule) GoString() string { return s.String() } @@ -17212,12 +17920,20 @@ type GetBucketAccelerateConfigurationInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketAccelerateConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketAccelerateConfigurationInput) GoString() string { return s.String() } @@ -17291,12 +18007,20 @@ type GetBucketAccelerateConfigurationOutput struct { Status *string `type:"string" enum:"BucketAccelerateStatus"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketAccelerateConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketAccelerateConfigurationOutput) GoString() string { return s.String() } @@ -17321,12 +18045,20 @@ type GetBucketAclInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketAclInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketAclInput) GoString() string { return s.String() } @@ -17403,12 +18135,20 @@ type GetBucketAclOutput struct { Owner *Owner `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketAclOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketAclOutput) GoString() string { return s.String() } @@ -17444,12 +18184,20 @@ type GetBucketAnalyticsConfigurationInput struct { Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketAnalyticsConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketAnalyticsConfigurationInput) GoString() string { return s.String() } @@ -17532,12 +18280,20 @@ type GetBucketAnalyticsConfigurationOutput struct { AnalyticsConfiguration *AnalyticsConfiguration `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketAnalyticsConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketAnalyticsConfigurationOutput) GoString() string { return s.String() } @@ -17562,12 +18318,20 @@ type GetBucketCorsInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketCorsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketCorsInput) GoString() string { return s.String() } @@ -17642,12 +18406,20 @@ type GetBucketCorsOutput struct { CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketCorsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketCorsOutput) GoString() string { return s.String() } @@ -17673,12 +18445,20 @@ type GetBucketEncryptionInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketEncryptionInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketEncryptionInput) GoString() string { return s.String() } @@ -17752,12 +18532,20 @@ type GetBucketEncryptionOutput struct { ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketEncryptionOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketEncryptionOutput) GoString() string { return s.String() } @@ -17783,12 +18571,20 @@ type GetBucketIntelligentTieringConfigurationInput struct { Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketIntelligentTieringConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketIntelligentTieringConfigurationInput) GoString() string { return s.String() } @@ -17865,12 +18661,20 @@ type GetBucketIntelligentTieringConfigurationOutput struct { IntelligentTieringConfiguration *IntelligentTieringConfiguration `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketIntelligentTieringConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketIntelligentTieringConfigurationOutput) GoString() string { return s.String() } @@ -17900,12 +18704,20 @@ type GetBucketInventoryConfigurationInput struct { Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketInventoryConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketInventoryConfigurationInput) GoString() string { return s.String() } @@ -17988,12 +18800,20 @@ type GetBucketInventoryConfigurationOutput struct { InventoryConfiguration *InventoryConfiguration `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketInventoryConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketInventoryConfigurationOutput) GoString() string { return s.String() } @@ -18018,12 +18838,20 @@ type GetBucketLifecycleConfigurationInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLifecycleConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLifecycleConfigurationInput) GoString() string { return s.String() } @@ -18097,12 +18925,20 @@ type GetBucketLifecycleConfigurationOutput struct { Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLifecycleConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLifecycleConfigurationOutput) GoString() string { return s.String() } @@ -18127,12 +18963,20 @@ type GetBucketLifecycleInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLifecycleInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLifecycleInput) GoString() string { return s.String() } @@ -18206,12 +19050,20 @@ type GetBucketLifecycleOutput struct { Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLifecycleOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLifecycleOutput) GoString() string { return s.String() } @@ -18236,12 +19088,20 @@ type GetBucketLocationInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLocationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLocationInput) GoString() string { return s.String() } @@ -18317,12 +19177,20 @@ type GetBucketLocationOutput struct { LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLocationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLocationOutput) GoString() string { return s.String() } @@ -18347,12 +19215,20 @@ type GetBucketLoggingInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLoggingInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLoggingInput) GoString() string { return s.String() } @@ -18429,12 +19305,20 @@ type GetBucketLoggingOutput struct { LoggingEnabled *LoggingEnabled `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLoggingOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketLoggingOutput) GoString() string { return s.String() } @@ -18464,12 +19348,20 @@ type GetBucketMetricsConfigurationInput struct { Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketMetricsConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketMetricsConfigurationInput) GoString() string { return s.String() } @@ -18552,12 +19444,20 @@ type GetBucketMetricsConfigurationOutput struct { MetricsConfiguration *MetricsConfiguration `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketMetricsConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketMetricsConfigurationOutput) GoString() string { return s.String() } @@ -18582,12 +19482,20 @@ type GetBucketNotificationConfigurationRequest struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketNotificationConfigurationRequest) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketNotificationConfigurationRequest) GoString() string { return s.String() } @@ -18668,12 +19576,20 @@ type GetBucketOwnershipControlsInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketOwnershipControlsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketOwnershipControlsInput) GoString() string { return s.String() } @@ -18748,12 +19664,20 @@ type GetBucketOwnershipControlsOutput struct { OwnershipControls *OwnershipControls `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketOwnershipControlsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketOwnershipControlsOutput) GoString() string { return s.String() } @@ -18778,12 +19702,20 @@ type GetBucketPolicyInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketPolicyInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketPolicyInput) GoString() string { return s.String() } @@ -18857,12 +19789,20 @@ type GetBucketPolicyOutput struct { Policy *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketPolicyOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketPolicyOutput) GoString() string { return s.String() } @@ -18887,12 +19827,20 @@ type GetBucketPolicyStatusInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketPolicyStatusInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketPolicyStatusInput) GoString() string { return s.String() } @@ -18966,12 +19914,20 @@ type GetBucketPolicyStatusOutput struct { PolicyStatus *PolicyStatus `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketPolicyStatusOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketPolicyStatusOutput) GoString() string { return s.String() } @@ -18996,12 +19952,20 @@ type GetBucketReplicationInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketReplicationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketReplicationInput) GoString() string { return s.String() } @@ -19076,12 +20040,20 @@ type GetBucketReplicationOutput struct { ReplicationConfiguration *ReplicationConfiguration `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketReplicationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketReplicationOutput) GoString() string { return s.String() } @@ -19106,12 +20078,20 @@ type GetBucketRequestPaymentInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketRequestPaymentInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketRequestPaymentInput) GoString() string { return s.String() } @@ -19185,12 +20165,20 @@ type GetBucketRequestPaymentOutput struct { Payer *string `type:"string" enum:"Payer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketRequestPaymentOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketRequestPaymentOutput) GoString() string { return s.String() } @@ -19215,12 +20203,20 @@ type GetBucketTaggingInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketTaggingInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketTaggingInput) GoString() string { return s.String() } @@ -19296,12 +20292,20 @@ type GetBucketTaggingOutput struct { TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketTaggingOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketTaggingOutput) GoString() string { return s.String() } @@ -19326,12 +20330,20 @@ type GetBucketVersioningInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketVersioningInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketVersioningInput) GoString() string { return s.String() } @@ -19410,12 +20422,20 @@ type GetBucketVersioningOutput struct { Status *string `type:"string" enum:"BucketVersioningStatus"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketVersioningOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketVersioningOutput) GoString() string { return s.String() } @@ -19446,12 +20466,20 @@ type GetBucketWebsiteInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketWebsiteInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketWebsiteInput) GoString() string { return s.String() } @@ -19535,12 +20563,20 @@ type GetBucketWebsiteOutput struct { RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketWebsiteOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetBucketWebsiteOutput) GoString() string { return s.String() } @@ -19605,12 +20641,20 @@ type GetObjectAclInput struct { VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectAclInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectAclInput) GoString() string { return s.String() } @@ -19715,12 +20759,20 @@ type GetObjectAclOutput struct { RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectAclOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectAclOutput) GoString() string { return s.String() } @@ -19755,6 +20807,8 @@ type GetObjectInput struct { // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // + // When using an Object Lambda access point the hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. + // // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When @@ -19838,6 +20892,10 @@ type GetObjectInput struct { // must match the one used when storing the data. The key must be appropriate // for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by GetObjectInput's + // String and GoString methods. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. @@ -19849,12 +20907,20 @@ type GetObjectInput struct { VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectInput) GoString() string { return s.String() } @@ -20079,12 +21145,20 @@ type GetObjectLegalHoldInput struct { VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectLegalHoldInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectLegalHoldInput) GoString() string { return s.String() } @@ -20182,12 +21256,20 @@ type GetObjectLegalHoldOutput struct { LegalHold *ObjectLockLegalHold `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectLegalHoldOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectLegalHoldOutput) GoString() string { return s.String() } @@ -20219,12 +21301,20 @@ type GetObjectLockConfigurationInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectLockConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectLockConfigurationInput) GoString() string { return s.String() } @@ -20298,12 +21388,20 @@ type GetObjectLockConfigurationOutput struct { ObjectLockConfiguration *ObjectLockConfiguration `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectLockConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectLockConfigurationOutput) GoString() string { return s.String() } @@ -20419,8 +21517,12 @@ type GetObjectOutput struct { SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key - // (CMK) that was used for the object. + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // the object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by GetObjectOutput's + // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon @@ -20443,12 +21545,20 @@ type GetObjectOutput struct { WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectOutput) GoString() string { return s.String() } @@ -20682,12 +21792,20 @@ type GetObjectRetentionInput struct { VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectRetentionInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectRetentionInput) GoString() string { return s.String() } @@ -20785,12 +21903,20 @@ type GetObjectRetentionOutput struct { Retention *ObjectLockRetention `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectRetentionOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectRetentionOutput) GoString() string { return s.String() } @@ -20845,12 +21971,20 @@ type GetObjectTaggingInput struct { VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectTaggingInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectTaggingInput) GoString() string { return s.String() } @@ -20953,12 +22087,20 @@ type GetObjectTaggingOutput struct { VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectTaggingOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectTaggingOutput) GoString() string { return s.String() } @@ -21002,12 +22144,20 @@ type GetObjectTorrentInput struct { RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectTorrentInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectTorrentInput) GoString() string { return s.String() } @@ -21103,12 +22253,20 @@ type GetObjectTorrentOutput struct { RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectTorrentOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetObjectTorrentOutput) GoString() string { return s.String() } @@ -21140,12 +22298,20 @@ type GetPublicAccessBlockInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetPublicAccessBlockInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetPublicAccessBlockInput) GoString() string { return s.String() } @@ -21220,12 +22386,20 @@ type GetPublicAccessBlockOutput struct { PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetPublicAccessBlockOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetPublicAccessBlockOutput) GoString() string { return s.String() } @@ -21246,12 +22420,20 @@ type GlacierJobParameters struct { Tier *string `type:"string" required:"true" enum:"Tier"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GlacierJobParameters) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GlacierJobParameters) GoString() string { return s.String() } @@ -21286,12 +22468,20 @@ type Grant struct { Permission *string `type:"string" enum:"Permission"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Grant) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Grant) GoString() string { return s.String() } @@ -21368,12 +22558,20 @@ type Grantee struct { URI *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Grantee) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Grantee) GoString() string { return s.String() } @@ -21450,12 +22648,20 @@ type HeadBucketInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s HeadBucketInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s HeadBucketInput) GoString() string { return s.String() } @@ -21526,12 +22732,20 @@ type HeadBucketOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s HeadBucketOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s HeadBucketOutput) GoString() string { return s.String() } @@ -21614,6 +22828,10 @@ type HeadObjectInput struct { // S3 does not store the encryption key. The key must be appropriate for use // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by HeadObjectInput's + // String and GoString methods. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. @@ -21625,12 +22843,20 @@ type HeadObjectInput struct { VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s HeadObjectInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s HeadObjectInput) GoString() string { return s.String() } @@ -21933,15 +23159,18 @@ type HeadObjectOutput struct { SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key - // (CMK) that was used for the object. + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // the object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by HeadObjectOutput's + // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // If the object is stored using server-side encryption either with an Amazon - // Web Services KMS customer master key (CMK) or an Amazon S3-managed encryption - // key, the response includes this header with the value of the server-side - // encryption algorithm used when storing this object in Amazon S3 (for example, - // AES256, aws:kms). + // Web Services KMS key or an Amazon S3-managed encryption key, the response + // includes this header with the value of the server-side encryption algorithm + // used when storing this object in Amazon S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Provides storage class information of the object. Amazon S3 returns this @@ -21959,12 +23188,20 @@ type HeadObjectOutput struct { WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s HeadObjectOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s HeadObjectOutput) GoString() string { return s.String() } @@ -22167,12 +23404,20 @@ type IndexDocument struct { Suffix *string `type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s IndexDocument) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s IndexDocument) GoString() string { return s.String() } @@ -22208,12 +23453,20 @@ type Initiator struct { ID *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Initiator) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Initiator) GoString() string { return s.String() } @@ -22248,12 +23501,20 @@ type InputSerialization struct { Parquet *ParquetInput `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InputSerialization) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InputSerialization) GoString() string { return s.String() } @@ -22296,12 +23557,20 @@ type IntelligentTieringAndOperator struct { Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s IntelligentTieringAndOperator) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s IntelligentTieringAndOperator) GoString() string { return s.String() } @@ -22366,12 +23635,20 @@ type IntelligentTieringConfiguration struct { Tierings []*Tiering `locationName:"Tiering" type:"list" flattened:"true" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s IntelligentTieringConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s IntelligentTieringConfiguration) GoString() string { return s.String() } @@ -22456,12 +23733,20 @@ type IntelligentTieringFilter struct { Tag *Tag `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s IntelligentTieringFilter) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s IntelligentTieringFilter) GoString() string { return s.String() } @@ -22547,12 +23832,20 @@ type InventoryConfiguration struct { Schedule *InventorySchedule `type:"structure" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InventoryConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InventoryConfiguration) GoString() string { return s.String() } @@ -22650,12 +23943,20 @@ type InventoryDestination struct { S3BucketDestination *InventoryS3BucketDestination `type:"structure" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InventoryDestination) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InventoryDestination) GoString() string { return s.String() } @@ -22696,12 +23997,20 @@ type InventoryEncryption struct { SSES3 *SSES3 `locationName:"SSE-S3" type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InventoryEncryption) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InventoryEncryption) GoString() string { return s.String() } @@ -22744,12 +24053,20 @@ type InventoryFilter struct { Prefix *string `type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InventoryFilter) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InventoryFilter) GoString() string { return s.String() } @@ -22804,12 +24121,20 @@ type InventoryS3BucketDestination struct { Prefix *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InventoryS3BucketDestination) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InventoryS3BucketDestination) GoString() string { return s.String() } @@ -22882,12 +24207,20 @@ type InventorySchedule struct { Frequency *string `type:"string" required:"true" enum:"InventoryFrequency"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InventorySchedule) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InventorySchedule) GoString() string { return s.String() } @@ -22919,12 +24252,20 @@ type JSONInput struct { Type *string `type:"string" enum:"JSONType"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s JSONInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s JSONInput) GoString() string { return s.String() } @@ -22944,12 +24285,20 @@ type JSONOutput struct { RecordDelimiter *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s JSONOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s JSONOutput) GoString() string { return s.String() } @@ -22969,12 +24318,20 @@ type KeyFilter struct { FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s KeyFilter) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s KeyFilter) GoString() string { return s.String() } @@ -23012,12 +24369,20 @@ type LambdaFunctionConfiguration struct { LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LambdaFunctionConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LambdaFunctionConfiguration) GoString() string { return s.String() } @@ -23072,12 +24437,20 @@ type LifecycleConfiguration struct { Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LifecycleConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LifecycleConfiguration) GoString() string { return s.String() } @@ -23130,12 +24503,20 @@ type LifecycleExpiration struct { ExpiredObjectDeleteMarker *bool `type:"boolean"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LifecycleExpiration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LifecycleExpiration) GoString() string { return s.String() } @@ -23215,12 +24596,20 @@ type LifecycleRule struct { Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LifecycleRule) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LifecycleRule) GoString() string { return s.String() } @@ -23311,12 +24700,20 @@ type LifecycleRuleAndOperator struct { Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LifecycleRuleAndOperator) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LifecycleRuleAndOperator) GoString() string { return s.String() } @@ -23374,12 +24771,20 @@ type LifecycleRuleFilter struct { Tag *Tag `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LifecycleRuleFilter) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LifecycleRuleFilter) GoString() string { return s.String() } @@ -23440,12 +24845,20 @@ type ListBucketAnalyticsConfigurationsInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketAnalyticsConfigurationsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketAnalyticsConfigurationsInput) GoString() string { return s.String() } @@ -23539,12 +24952,20 @@ type ListBucketAnalyticsConfigurationsOutput struct { NextContinuationToken *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketAnalyticsConfigurationsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketAnalyticsConfigurationsOutput) GoString() string { return s.String() } @@ -23587,12 +25008,20 @@ type ListBucketIntelligentTieringConfigurationsInput struct { ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketIntelligentTieringConfigurationsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketIntelligentTieringConfigurationsInput) GoString() string { return s.String() } @@ -23680,12 +25109,20 @@ type ListBucketIntelligentTieringConfigurationsOutput struct { NextContinuationToken *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketIntelligentTieringConfigurationsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketIntelligentTieringConfigurationsOutput) GoString() string { return s.String() } @@ -23734,12 +25171,20 @@ type ListBucketInventoryConfigurationsInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketInventoryConfigurationsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketInventoryConfigurationsInput) GoString() string { return s.String() } @@ -23833,12 +25278,20 @@ type ListBucketInventoryConfigurationsOutput struct { NextContinuationToken *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketInventoryConfigurationsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketInventoryConfigurationsOutput) GoString() string { return s.String() } @@ -23887,12 +25340,20 @@ type ListBucketMetricsConfigurationsInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketMetricsConfigurationsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketMetricsConfigurationsInput) GoString() string { return s.String() } @@ -23987,12 +25448,20 @@ type ListBucketMetricsConfigurationsOutput struct { NextContinuationToken *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketMetricsConfigurationsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketMetricsConfigurationsOutput) GoString() string { return s.String() } @@ -24025,12 +25494,20 @@ type ListBucketsInput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketsInput) GoString() string { return s.String() } @@ -24045,12 +25522,20 @@ type ListBucketsOutput struct { Owner *Owner `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListBucketsOutput) GoString() string { return s.String() } @@ -24143,12 +25628,20 @@ type ListMultipartUploadsInput struct { UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListMultipartUploadsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListMultipartUploadsInput) GoString() string { return s.String() } @@ -24309,12 +25802,20 @@ type ListMultipartUploadsOutput struct { Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListMultipartUploadsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListMultipartUploadsOutput) GoString() string { return s.String() } @@ -24448,12 +25949,20 @@ type ListObjectVersionsInput struct { VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListObjectVersionsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListObjectVersionsInput) GoString() string { return s.String() } @@ -24620,12 +26129,20 @@ type ListObjectVersionsOutput struct { Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListObjectVersionsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListObjectVersionsOutput) GoString() string { return s.String() } @@ -24765,12 +26282,20 @@ type ListObjectsInput struct { RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListObjectsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListObjectsInput) GoString() string { return s.String() } @@ -24933,12 +26458,20 @@ type ListObjectsOutput struct { Prefix *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListObjectsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListObjectsOutput) GoString() string { return s.String() } @@ -25065,12 +26598,20 @@ type ListObjectsV2Input struct { StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListObjectsV2Input) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListObjectsV2Input) GoString() string { return s.String() } @@ -25273,12 +26814,20 @@ type ListObjectsV2Output struct { StartAfter *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListObjectsV2Output) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListObjectsV2Output) GoString() string { return s.String() } @@ -25408,12 +26957,20 @@ type ListPartsInput struct { UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListPartsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListPartsInput) GoString() string { return s.String() } @@ -25590,12 +27147,20 @@ type ListPartsOutput struct { UploadId *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListPartsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListPartsOutput) GoString() string { return s.String() } @@ -25725,12 +27290,20 @@ type Location struct { UserMetadata []*MetadataEntry `locationNameList:"MetadataEntry" type:"list"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Location) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Location) GoString() string { return s.String() } @@ -25847,12 +27420,20 @@ type LoggingEnabled struct { TargetPrefix *string `type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LoggingEnabled) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LoggingEnabled) GoString() string { return s.String() } @@ -25912,12 +27493,20 @@ type MetadataEntry struct { Value *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s MetadataEntry) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s MetadataEntry) GoString() string { return s.String() } @@ -25949,12 +27538,20 @@ type Metrics struct { Status *string `type:"string" required:"true" enum:"MetricsStatus"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Metrics) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Metrics) GoString() string { return s.String() } @@ -25990,6 +27587,9 @@ func (s *Metrics) SetStatus(v string) *Metrics { type MetricsAndOperator struct { _ struct{} `type:"structure"` + // The access point ARN used when evaluating an AND predicate. + AccessPointArn *string `type:"string"` + // The prefix used when evaluating an AND predicate. Prefix *string `type:"string"` @@ -25997,12 +27597,20 @@ type MetricsAndOperator struct { Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s MetricsAndOperator) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s MetricsAndOperator) GoString() string { return s.String() } @@ -26027,6 +27635,12 @@ func (s *MetricsAndOperator) Validate() error { return nil } +// SetAccessPointArn sets the AccessPointArn field's value. +func (s *MetricsAndOperator) SetAccessPointArn(v string) *MetricsAndOperator { + s.AccessPointArn = &v + return s +} + // SetPrefix sets the Prefix field's value. func (s *MetricsAndOperator) SetPrefix(v string) *MetricsAndOperator { s.Prefix = &v @@ -26043,15 +27657,14 @@ func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { // by the metrics configuration ID) from an Amazon S3 bucket. If you're updating // an existing metrics configuration, note that this is a full replacement of // the existing metrics configuration. If you don't include the elements you -// want to keep, they are erased. For more information, see PUT Bucket metrics -// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html) -// in the Amazon S3 API Reference. +// want to keep, they are erased. For more information, see PutBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html). type MetricsConfiguration struct { _ struct{} `type:"structure"` // Specifies a metrics configuration filter. The metrics configuration will // only include objects that meet the filter's criteria. A filter must be a - // prefix, a tag, or a conjunction (MetricsAndOperator). + // prefix, an object tag, an access point ARN, or a conjunction (MetricsAndOperator). Filter *MetricsFilter `type:"structure"` // The ID used to identify the metrics configuration. @@ -26060,12 +27673,20 @@ type MetricsConfiguration struct { Id *string `type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s MetricsConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s MetricsConfiguration) GoString() string { return s.String() } @@ -26102,10 +27723,14 @@ func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration { // Specifies a metrics configuration filter. The metrics configuration only // includes objects that meet the filter's criteria. A filter must be a prefix, -// a tag, or a conjunction (MetricsAndOperator). +// an object tag, an access point ARN, or a conjunction (MetricsAndOperator). +// For more information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html). type MetricsFilter struct { _ struct{} `type:"structure"` + // The access point ARN used when evaluating a metrics filter. + AccessPointArn *string `type:"string"` + // A conjunction (logical AND) of predicates, which is used in evaluating a // metrics filter. The operator must have at least two predicates, and an object // must match all of the predicates in order for the filter to apply. @@ -26118,12 +27743,20 @@ type MetricsFilter struct { Tag *Tag `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s MetricsFilter) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s MetricsFilter) GoString() string { return s.String() } @@ -26148,6 +27781,12 @@ func (s *MetricsFilter) Validate() error { return nil } +// SetAccessPointArn sets the AccessPointArn field's value. +func (s *MetricsFilter) SetAccessPointArn(v string) *MetricsFilter { + s.AccessPointArn = &v + return s +} + // SetAnd sets the And field's value. func (s *MetricsFilter) SetAnd(v *MetricsAndOperator) *MetricsFilter { s.And = v @@ -26189,12 +27828,20 @@ type MultipartUpload struct { UploadId *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s MultipartUpload) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s MultipartUpload) GoString() string { return s.String() } @@ -26251,12 +27898,20 @@ type NoncurrentVersionExpiration struct { NoncurrentDays *int64 `type:"integer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s NoncurrentVersionExpiration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s NoncurrentVersionExpiration) GoString() string { return s.String() } @@ -26288,12 +27943,20 @@ type NoncurrentVersionTransition struct { StorageClass *string `type:"string" enum:"TransitionStorageClass"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s NoncurrentVersionTransition) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s NoncurrentVersionTransition) GoString() string { return s.String() } @@ -26328,12 +27991,20 @@ type NotificationConfiguration struct { TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s NotificationConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s NotificationConfiguration) GoString() string { return s.String() } @@ -26413,12 +28084,20 @@ type NotificationConfigurationDeprecated struct { TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s NotificationConfigurationDeprecated) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s NotificationConfigurationDeprecated) GoString() string { return s.String() } @@ -26451,12 +28130,20 @@ type NotificationConfigurationFilter struct { Key *KeyFilter `locationName:"S3Key" type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s NotificationConfigurationFilter) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s NotificationConfigurationFilter) GoString() string { return s.String() } @@ -26508,12 +28195,20 @@ type Object struct { StorageClass *string `type:"string" enum:"ObjectStorageClass"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Object) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Object) GoString() string { return s.String() } @@ -26571,12 +28266,20 @@ type ObjectIdentifier struct { VersionId *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ObjectIdentifier) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ObjectIdentifier) GoString() string { return s.String() } @@ -26624,12 +28327,20 @@ type ObjectLockConfiguration struct { Rule *ObjectLockRule `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ObjectLockConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ObjectLockConfiguration) GoString() string { return s.String() } @@ -26654,12 +28365,20 @@ type ObjectLockLegalHold struct { Status *string `type:"string" enum:"ObjectLockLegalHoldStatus"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ObjectLockLegalHold) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ObjectLockLegalHold) GoString() string { return s.String() } @@ -26681,12 +28400,20 @@ type ObjectLockRetention struct { RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ObjectLockRetention) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ObjectLockRetention) GoString() string { return s.String() } @@ -26714,12 +28441,20 @@ type ObjectLockRule struct { DefaultRetention *DefaultRetention `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ObjectLockRule) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ObjectLockRule) GoString() string { return s.String() } @@ -26760,12 +28495,20 @@ type ObjectVersion struct { VersionId *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ObjectVersion) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ObjectVersion) GoString() string { return s.String() } @@ -26826,12 +28569,20 @@ type OutputLocation struct { S3 *Location `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s OutputLocation) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s OutputLocation) GoString() string { return s.String() } @@ -26868,12 +28619,20 @@ type OutputSerialization struct { JSON *JSONOutput `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s OutputSerialization) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s OutputSerialization) GoString() string { return s.String() } @@ -26901,12 +28660,20 @@ type Owner struct { ID *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Owner) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Owner) GoString() string { return s.String() } @@ -26933,12 +28700,20 @@ type OwnershipControls struct { Rules []*OwnershipControlsRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s OwnershipControls) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s OwnershipControls) GoString() string { return s.String() } @@ -26989,12 +28764,20 @@ type OwnershipControlsRule struct { ObjectOwnership *string `type:"string" required:"true" enum:"ObjectOwnership"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s OwnershipControlsRule) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s OwnershipControlsRule) GoString() string { return s.String() } @@ -27023,12 +28806,20 @@ type ParquetInput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ParquetInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ParquetInput) GoString() string { return s.String() } @@ -27051,12 +28842,20 @@ type Part struct { Size *int64 `type:"integer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Part) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Part) GoString() string { return s.String() } @@ -27094,12 +28893,20 @@ type PolicyStatus struct { IsPublic *bool `locationName:"IsPublic" type:"boolean"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PolicyStatus) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PolicyStatus) GoString() string { return s.String() } @@ -27124,12 +28931,20 @@ type Progress struct { BytesScanned *int64 `type:"long"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Progress) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Progress) GoString() string { return s.String() } @@ -27160,12 +28975,20 @@ type ProgressEvent struct { Details *Progress `locationName:"Details" type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ProgressEvent) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ProgressEvent) GoString() string { return s.String() } @@ -27253,12 +29076,20 @@ type PublicAccessBlockConfiguration struct { RestrictPublicBuckets *bool `locationName:"RestrictPublicBuckets" type:"boolean"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PublicAccessBlockConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PublicAccessBlockConfiguration) GoString() string { return s.String() } @@ -27306,12 +29137,20 @@ type PutBucketAccelerateConfigurationInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketAccelerateConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketAccelerateConfigurationInput) GoString() string { return s.String() } @@ -27391,12 +29230,20 @@ type PutBucketAccelerateConfigurationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketAccelerateConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketAccelerateConfigurationOutput) GoString() string { return s.String() } @@ -27440,12 +29287,20 @@ type PutBucketAclInput struct { GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketAclInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketAclInput) GoString() string { return s.String() } @@ -27563,12 +29418,20 @@ type PutBucketAclOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketAclOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketAclOutput) GoString() string { return s.String() } @@ -27597,12 +29460,20 @@ type PutBucketAnalyticsConfigurationInput struct { Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketAnalyticsConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketAnalyticsConfigurationInput) GoString() string { return s.String() } @@ -27696,12 +29567,20 @@ type PutBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketAnalyticsConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketAnalyticsConfigurationOutput) GoString() string { return s.String() } @@ -27728,12 +29607,20 @@ type PutBucketCorsInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketCorsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketCorsInput) GoString() string { return s.String() } @@ -27818,12 +29705,20 @@ type PutBucketCorsOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketCorsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketCorsOutput) GoString() string { return s.String() } @@ -27832,9 +29727,9 @@ type PutBucketEncryptionInput struct { _ struct{} `locationName:"PutBucketEncryptionRequest" type:"structure" payload:"ServerSideEncryptionConfiguration"` // Specifies default encryption for a bucket using server-side encryption with - // Amazon S3-managed keys (SSE-S3) or customer master keys stored in Amazon - // Web Services KMS (SSE-KMS). For information about the Amazon S3 default encryption - // feature, see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) + // Amazon S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). For information + // about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket + // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -27851,12 +29746,20 @@ type PutBucketEncryptionInput struct { ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketEncryptionInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketEncryptionInput) GoString() string { return s.String() } @@ -27941,12 +29844,20 @@ type PutBucketEncryptionOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketEncryptionOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketEncryptionOutput) GoString() string { return s.String() } @@ -27971,12 +29882,20 @@ type PutBucketIntelligentTieringConfigurationInput struct { IntelligentTieringConfiguration *IntelligentTieringConfiguration `locationName:"IntelligentTieringConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketIntelligentTieringConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketIntelligentTieringConfigurationInput) GoString() string { return s.String() } @@ -28064,12 +29983,20 @@ type PutBucketIntelligentTieringConfigurationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketIntelligentTieringConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketIntelligentTieringConfigurationOutput) GoString() string { return s.String() } @@ -28098,12 +30025,20 @@ type PutBucketInventoryConfigurationInput struct { InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketInventoryConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketInventoryConfigurationInput) GoString() string { return s.String() } @@ -28197,12 +30132,20 @@ type PutBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketInventoryConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketInventoryConfigurationOutput) GoString() string { return s.String() } @@ -28224,12 +30167,20 @@ type PutBucketLifecycleConfigurationInput struct { LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketLifecycleConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketLifecycleConfigurationInput) GoString() string { return s.String() } @@ -28311,12 +30262,20 @@ type PutBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketLifecycleConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketLifecycleConfigurationOutput) GoString() string { return s.String() } @@ -28336,12 +30295,20 @@ type PutBucketLifecycleInput struct { LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketLifecycleInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketLifecycleInput) GoString() string { return s.String() } @@ -28423,12 +30390,20 @@ type PutBucketLifecycleOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketLifecycleOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketLifecycleOutput) GoString() string { return s.String() } @@ -28452,12 +30427,20 @@ type PutBucketLoggingInput struct { ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketLoggingInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketLoggingInput) GoString() string { return s.String() } @@ -28542,12 +30525,20 @@ type PutBucketLoggingOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketLoggingOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketLoggingOutput) GoString() string { return s.String() } @@ -28576,12 +30567,20 @@ type PutBucketMetricsConfigurationInput struct { MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketMetricsConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketMetricsConfigurationInput) GoString() string { return s.String() } @@ -28675,12 +30674,20 @@ type PutBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketMetricsConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketMetricsConfigurationOutput) GoString() string { return s.String() } @@ -28705,12 +30712,20 @@ type PutBucketNotificationConfigurationInput struct { NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketNotificationConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketNotificationConfigurationInput) GoString() string { return s.String() } @@ -28795,12 +30810,20 @@ type PutBucketNotificationConfigurationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketNotificationConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketNotificationConfigurationOutput) GoString() string { return s.String() } @@ -28824,12 +30847,20 @@ type PutBucketNotificationInput struct { NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketNotificationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketNotificationInput) GoString() string { return s.String() } @@ -28909,12 +30940,20 @@ type PutBucketNotificationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketNotificationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketNotificationOutput) GoString() string { return s.String() } @@ -28939,12 +30978,20 @@ type PutBucketOwnershipControlsInput struct { OwnershipControls *OwnershipControls `locationName:"OwnershipControls" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketOwnershipControlsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketOwnershipControlsInput) GoString() string { return s.String() } @@ -29029,12 +31076,20 @@ type PutBucketOwnershipControlsOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketOwnershipControlsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketOwnershipControlsOutput) GoString() string { return s.String() } @@ -29062,12 +31117,20 @@ type PutBucketPolicyInput struct { Policy *string `type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketPolicyInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketPolicyInput) GoString() string { return s.String() } @@ -29153,12 +31216,20 @@ type PutBucketPolicyOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketPolicyOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketPolicyOutput) GoString() string { return s.String() } @@ -29186,12 +31257,20 @@ type PutBucketReplicationInput struct { Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketReplicationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketReplicationInput) GoString() string { return s.String() } @@ -29282,12 +31361,20 @@ type PutBucketReplicationOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketReplicationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketReplicationOutput) GoString() string { return s.String() } @@ -29311,12 +31398,20 @@ type PutBucketRequestPaymentInput struct { RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketRequestPaymentInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketRequestPaymentInput) GoString() string { return s.String() } @@ -29401,12 +31496,20 @@ type PutBucketRequestPaymentOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketRequestPaymentOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketRequestPaymentOutput) GoString() string { return s.String() } @@ -29430,12 +31533,20 @@ type PutBucketTaggingInput struct { Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketTaggingInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketTaggingInput) GoString() string { return s.String() } @@ -29520,12 +31631,20 @@ type PutBucketTaggingOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketTaggingOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketTaggingOutput) GoString() string { return s.String() } @@ -29553,12 +31672,20 @@ type PutBucketVersioningInput struct { VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketVersioningInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketVersioningInput) GoString() string { return s.String() } @@ -29644,12 +31771,20 @@ type PutBucketVersioningOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketVersioningOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketVersioningOutput) GoString() string { return s.String() } @@ -29673,12 +31808,20 @@ type PutBucketWebsiteInput struct { WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketWebsiteInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketWebsiteInput) GoString() string { return s.String() } @@ -29763,12 +31906,20 @@ type PutBucketWebsiteOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketWebsiteOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutBucketWebsiteOutput) GoString() string { return s.String() } @@ -29859,12 +32010,20 @@ type PutObjectAclInput struct { VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectAclInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectAclInput) GoString() string { return s.String() } @@ -30010,12 +32169,20 @@ type PutObjectAclOutput struct { RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectAclOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectAclOutput) GoString() string { return s.String() } @@ -30166,6 +32333,10 @@ type PutObjectInput struct { // S3 does not store the encryption key. The key must be appropriate for use // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutObjectInput's + // String and GoString methods. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. @@ -30176,16 +32347,24 @@ type PutObjectInput struct { // Specifies the Amazon Web Services KMS Encryption Context to use for object // encryption. The value of this header is a base64-encoded UTF-8 string holding // JSON with the encryption context key-value pairs. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutObjectInput's + // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` // If x-amz-server-side-encryption is present and has the value of aws:kms, // this header specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetrical customer managed customer master key - // (CMK) that was used for the object. If you specify x-amz-server-side-encryption:aws:kms, - // but do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 - // uses the Amazon Web Services managed CMK in Amazon Web Services to protect - // the data. If the KMS key does not exist in the same account issuing the command, - // you must use the full ARN and not just the ID. + // (Amazon Web Services KMS) symmetrical customer managed key that was used + // for the object. If you specify x-amz-server-side-encryption:aws:kms, but + // do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses + // the Amazon Web Services managed key to protect the data. If the KMS key does + // not exist in the same account issuing the command, you must use the full + // ARN and not just the ID. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutObjectInput's + // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon @@ -30225,12 +32404,20 @@ type PutObjectInput struct { WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectInput) GoString() string { return s.String() } @@ -30531,12 +32718,20 @@ type PutObjectLegalHoldInput struct { VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectLegalHoldInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectLegalHoldInput) GoString() string { return s.String() } @@ -30641,12 +32836,20 @@ type PutObjectLegalHoldOutput struct { RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectLegalHoldOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectLegalHoldOutput) GoString() string { return s.String() } @@ -30684,12 +32887,20 @@ type PutObjectLockConfigurationInput struct { Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectLockConfigurationInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectLockConfigurationInput) GoString() string { return s.String() } @@ -30782,12 +32993,20 @@ type PutObjectLockConfigurationOutput struct { RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectLockConfigurationOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectLockConfigurationOutput) GoString() string { return s.String() } @@ -30832,30 +33051,46 @@ type PutObjectOutput struct { // If present, specifies the Amazon Web Services KMS Encryption Context to use // for object encryption. The value of this header is a base64-encoded UTF-8 // string holding JSON with the encryption context key-value pairs. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutObjectOutput's + // String and GoString methods. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` // If x-amz-server-side-encryption is present and has the value of aws:kms, // this header specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key - // (CMK) that was used for the object. + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // the object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutObjectOutput's + // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // If you specified server-side encryption either with an Amazon Web Services - // KMS customer master key (CMK) or Amazon S3-managed encryption key in your - // PUT request, the response includes this header. It confirms the encryption - // algorithm that Amazon S3 used to encrypt the object. + // KMS key or Amazon S3-managed encryption key in your PUT request, the response + // includes this header. It confirms the encryption algorithm that Amazon S3 + // used to encrypt the object. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Version of the object. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectOutput) GoString() string { return s.String() } @@ -30965,12 +33200,20 @@ type PutObjectRetentionInput struct { VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectRetentionInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectRetentionInput) GoString() string { return s.String() } @@ -31081,12 +33324,20 @@ type PutObjectRetentionOutput struct { RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectRetentionOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectRetentionOutput) GoString() string { return s.String() } @@ -31146,12 +33397,20 @@ type PutObjectTaggingInput struct { VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectTaggingInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectTaggingInput) GoString() string { return s.String() } @@ -31263,12 +33522,20 @@ type PutObjectTaggingOutput struct { VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectTaggingOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutObjectTaggingOutput) GoString() string { return s.String() } @@ -31303,12 +33570,20 @@ type PutPublicAccessBlockInput struct { PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `locationName:"PublicAccessBlockConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutPublicAccessBlockInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutPublicAccessBlockInput) GoString() string { return s.String() } @@ -31388,12 +33663,20 @@ type PutPublicAccessBlockOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutPublicAccessBlockOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PutPublicAccessBlockOutput) GoString() string { return s.String() } @@ -31424,12 +33707,20 @@ type QueueConfiguration struct { QueueArn *string `locationName:"Queue" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s QueueConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s QueueConfiguration) GoString() string { return s.String() } @@ -31498,12 +33789,20 @@ type QueueConfigurationDeprecated struct { Queue *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s QueueConfigurationDeprecated) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s QueueConfigurationDeprecated) GoString() string { return s.String() } @@ -31537,17 +33836,24 @@ type RecordsEvent struct { _ struct{} `locationName:"RecordsEvent" type:"structure" payload:"Payload"` // The byte array of partial, one or more result records. - // // Payload is automatically base64 encoded/decoded by the SDK. Payload []byte `type:"blob"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RecordsEvent) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RecordsEvent) GoString() string { return s.String() } @@ -31619,12 +33925,20 @@ type Redirect struct { ReplaceKeyWith *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Redirect) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Redirect) GoString() string { return s.String() } @@ -31674,12 +33988,20 @@ type RedirectAllRequestsTo struct { Protocol *string `type:"string" enum:"Protocol"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RedirectAllRequestsTo) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RedirectAllRequestsTo) GoString() string { return s.String() } @@ -31727,12 +34049,20 @@ type ReplicaModifications struct { Status *string `type:"string" required:"true" enum:"ReplicaModificationsStatus"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicaModifications) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicaModifications) GoString() string { return s.String() } @@ -31776,12 +34106,20 @@ type ReplicationConfiguration struct { Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicationConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicationConfiguration) GoString() string { return s.String() } @@ -31886,8 +34224,8 @@ type ReplicationRule struct { // objects that you want to replicate. You can choose to enable or disable the // replication of these objects. Currently, Amazon S3 supports only the filter // that you can specify for objects created with server-side encryption using - // a customer master key (CMK) stored in Amazon Web Services Key Management - // Service (SSE-KMS). + // a customer managed key stored in Amazon Web Services Key Management Service + // (SSE-KMS). SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"` // Specifies whether the rule is enabled. @@ -31896,12 +34234,20 @@ type ReplicationRule struct { Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicationRule) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicationRule) GoString() string { return s.String() } @@ -32018,12 +34364,20 @@ type ReplicationRuleAndOperator struct { Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicationRuleAndOperator) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicationRuleAndOperator) GoString() string { return s.String() } @@ -32090,12 +34444,20 @@ type ReplicationRuleFilter struct { Tag *Tag `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicationRuleFilter) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicationRuleFilter) GoString() string { return s.String() } @@ -32157,12 +34519,20 @@ type ReplicationTime struct { Time *ReplicationTimeValue `type:"structure" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicationTime) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicationTime) GoString() string { return s.String() } @@ -32206,12 +34576,20 @@ type ReplicationTimeValue struct { Minutes *int64 `type:"integer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicationTimeValue) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ReplicationTimeValue) GoString() string { return s.String() } @@ -32232,12 +34610,20 @@ type RequestPaymentConfiguration struct { Payer *string `type:"string" required:"true" enum:"Payer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RequestPaymentConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RequestPaymentConfiguration) GoString() string { return s.String() } @@ -32270,12 +34656,20 @@ type RequestProgress struct { Enabled *bool `type:"boolean"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RequestProgress) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RequestProgress) GoString() string { return s.String() } @@ -32333,12 +34727,20 @@ type RestoreObjectInput struct { VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RestoreObjectInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RestoreObjectInput) GoString() string { return s.String() } @@ -32452,12 +34854,20 @@ type RestoreObjectOutput struct { RestoreOutputPath *string `location:"header" locationName:"x-amz-restore-output-path" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RestoreObjectOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RestoreObjectOutput) GoString() string { return s.String() } @@ -32505,12 +34915,20 @@ type RestoreRequest struct { Type *string `type:"string" enum:"RestoreRequestType"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RestoreRequest) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RestoreRequest) GoString() string { return s.String() } @@ -32603,12 +35021,20 @@ type RoutingRule struct { Redirect *Redirect `type:"structure" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RoutingRule) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RoutingRule) GoString() string { return s.String() } @@ -32697,12 +35123,20 @@ type Rule struct { Transition *Transition `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Rule) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Rule) GoString() string { return s.String() } @@ -32776,19 +35210,31 @@ type SSEKMS struct { _ struct{} `locationName:"SSE-KMS" type:"structure"` // Specifies the ID of the Amazon Web Services Key Management Service (Amazon - // Web Services KMS) symmetric customer managed customer master key (CMK) to - // use for encrypting inventory reports. + // Web Services KMS) symmetric customer managed key to use for encrypting inventory + // reports. + // + // KeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SSEKMS's + // String and GoString methods. // // KeyId is a required field KeyId *string `type:"string" required:"true" sensitive:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SSEKMS) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SSEKMS) GoString() string { return s.String() } @@ -32817,12 +35263,20 @@ type SSES3 struct { _ struct{} `locationName:"SSE-S3" type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SSES3) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SSES3) GoString() string { return s.String() } @@ -32848,12 +35302,20 @@ type ScanRange struct { Start *int64 `type:"long"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ScanRange) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ScanRange) GoString() string { return s.String() } @@ -33092,6 +35554,10 @@ type SelectObjectContentInput struct { // The SSE Customer Key. For more information, see Server-Side Encryption (Using // Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SelectObjectContentInput's + // String and GoString methods. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // The SSE Customer Key MD5. For more information, see Server-Side Encryption @@ -33117,12 +35583,20 @@ type SelectObjectContentInput struct { ScanRange *ScanRange `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SelectObjectContentInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SelectObjectContentInput) GoString() string { return s.String() } @@ -33280,12 +35754,20 @@ type SelectObjectContentOutput struct { EventStream *SelectObjectContentEventStream } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SelectObjectContentOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SelectObjectContentOutput) GoString() string { return s.String() } @@ -33328,12 +35810,20 @@ type SelectParameters struct { OutputSerialization *OutputSerialization `type:"structure" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SelectParameters) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SelectParameters) GoString() string { return s.String() } @@ -33410,6 +35900,10 @@ type ServerSideEncryptionByDefault struct { // Amazon S3 only supports symmetric KMS keys and not asymmetric KMS keys. For // more information, see Using symmetric and asymmetric keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) // in the Amazon Web Services Key Management Service Developer Guide. + // + // KMSMasterKeyID is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ServerSideEncryptionByDefault's + // String and GoString methods. KMSMasterKeyID *string `type:"string" sensitive:"true"` // Server-side encryption algorithm to use for the default encryption. @@ -33418,12 +35912,20 @@ type ServerSideEncryptionByDefault struct { SSEAlgorithm *string `type:"string" required:"true" enum:"ServerSideEncryption"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ServerSideEncryptionByDefault) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ServerSideEncryptionByDefault) GoString() string { return s.String() } @@ -33464,12 +35966,20 @@ type ServerSideEncryptionConfiguration struct { Rules []*ServerSideEncryptionRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ServerSideEncryptionConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ServerSideEncryptionConfiguration) GoString() string { return s.String() } @@ -33522,12 +36032,20 @@ type ServerSideEncryptionRule struct { BucketKeyEnabled *bool `type:"boolean"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ServerSideEncryptionRule) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ServerSideEncryptionRule) GoString() string { return s.String() } @@ -33563,8 +36081,8 @@ func (s *ServerSideEncryptionRule) SetBucketKeyEnabled(v bool) *ServerSideEncryp // objects that you want to replicate. You can choose to enable or disable the // replication of these objects. Currently, Amazon S3 supports only the filter // that you can specify for objects created with server-side encryption using -// a customer master key (CMK) stored in Amazon Web Services Key Management -// Service (SSE-KMS). +// a customer managed key stored in Amazon Web Services Key Management Service +// (SSE-KMS). type SourceSelectionCriteria struct { _ struct{} `type:"structure"` @@ -33585,12 +36103,20 @@ type SourceSelectionCriteria struct { SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SourceSelectionCriteria) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SourceSelectionCriteria) GoString() string { return s.String() } @@ -33640,12 +36166,20 @@ type SseKmsEncryptedObjects struct { Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SseKmsEncryptedObjects) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s SseKmsEncryptedObjects) GoString() string { return s.String() } @@ -33683,12 +36217,20 @@ type Stats struct { BytesScanned *int64 `type:"long"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Stats) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Stats) GoString() string { return s.String() } @@ -33719,12 +36261,20 @@ type StatsEvent struct { Details *Stats `locationName:"Details" type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s StatsEvent) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s StatsEvent) GoString() string { return s.String() } @@ -33775,12 +36325,20 @@ type StorageClassAnalysis struct { DataExport *StorageClassAnalysisDataExport `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s StorageClassAnalysis) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s StorageClassAnalysis) GoString() string { return s.String() } @@ -33822,12 +36380,20 @@ type StorageClassAnalysisDataExport struct { OutputSchemaVersion *string `type:"string" required:"true" enum:"StorageClassAnalysisSchemaVersion"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s StorageClassAnalysisDataExport) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s StorageClassAnalysisDataExport) GoString() string { return s.String() } @@ -33880,12 +36446,20 @@ type Tag struct { Value *string `type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Tag) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Tag) GoString() string { return s.String() } @@ -33931,12 +36505,20 @@ type Tagging struct { TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Tagging) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Tagging) GoString() string { return s.String() } @@ -33981,12 +36563,20 @@ type TargetGrant struct { Permission *string `type:"string" enum:"BucketLogsPermission"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s TargetGrant) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s TargetGrant) GoString() string { return s.String() } @@ -34041,12 +36631,20 @@ type Tiering struct { Days *int64 `type:"integer" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Tiering) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Tiering) GoString() string { return s.String() } @@ -34108,12 +36706,20 @@ type TopicConfiguration struct { TopicArn *string `locationName:"Topic" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s TopicConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s TopicConfiguration) GoString() string { return s.String() } @@ -34183,12 +36789,20 @@ type TopicConfigurationDeprecated struct { Topic *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s TopicConfigurationDeprecated) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s TopicConfigurationDeprecated) GoString() string { return s.String() } @@ -34236,12 +36850,20 @@ type Transition struct { StorageClass *string `type:"string" enum:"TransitionStorageClass"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Transition) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Transition) GoString() string { return s.String() } @@ -34347,6 +36969,10 @@ type UploadPartCopyInput struct { // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one // that was used when the source object was created. + // + // CopySourceSSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UploadPartCopyInput's + // String and GoString methods. CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. @@ -34392,6 +37018,10 @@ type UploadPartCopyInput struct { // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UploadPartCopyInput's + // String and GoString methods. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. @@ -34405,12 +37035,20 @@ type UploadPartCopyInput struct { UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s UploadPartCopyInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s UploadPartCopyInput) GoString() string { return s.String() } @@ -34637,8 +37275,12 @@ type UploadPartCopyOutput struct { SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key - // (CMK) that was used for the object. + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // the object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UploadPartCopyOutput's + // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon @@ -34646,12 +37288,20 @@ type UploadPartCopyOutput struct { ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s UploadPartCopyOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s UploadPartCopyOutput) GoString() string { return s.String() } @@ -34772,6 +37422,10 @@ type UploadPartInput struct { // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UploadPartInput's + // String and GoString methods. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. @@ -34785,12 +37439,20 @@ type UploadPartInput struct { UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s UploadPartInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s UploadPartInput) GoString() string { return s.String() } @@ -34961,8 +37623,12 @@ type UploadPartOutput struct { SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key - // (CMK) was used for the object. + // (Amazon Web Services KMS) symmetric customer managed key was used for the + // object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UploadPartOutput's + // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon @@ -34970,12 +37636,20 @@ type UploadPartOutput struct { ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s UploadPartOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s UploadPartOutput) GoString() string { return s.String() } @@ -35037,12 +37711,20 @@ type VersioningConfiguration struct { Status *string `type:"string" enum:"BucketVersioningStatus"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s VersioningConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s VersioningConfiguration) GoString() string { return s.String() } @@ -35078,12 +37760,20 @@ type WebsiteConfiguration struct { RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s WebsiteConfiguration) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s WebsiteConfiguration) GoString() string { return s.String() } @@ -35276,8 +37966,12 @@ type WriteGetObjectResponseInput struct { SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-customer-key-MD5" type:"string"` // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed customer master key - // (CMK) that was used for stored in Amazon S3 object. + // (Amazon Web Services KMS) symmetric customer managed key that was used for + // stored in Amazon S3 object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by WriteGetObjectResponseInput's + // String and GoString methods. SSEKMSKeyId *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing requested object in @@ -35328,12 +38022,20 @@ type WriteGetObjectResponseInput struct { VersionId *string `location:"header" locationName:"x-amz-fwd-header-x-amz-version-id" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s WriteGetObjectResponseInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s WriteGetObjectResponseInput) GoString() string { return s.String() } @@ -35583,12 +38285,20 @@ type WriteGetObjectResponseOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s WriteGetObjectResponseOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s WriteGetObjectResponseOutput) GoString() string { return s.String() } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go index 1a6a70311..45f414fa3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go @@ -160,12 +160,12 @@ type UploadInput struct { // If x-amz-server-side-encryption is present and has the value of aws:kms, // this header specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetrical customer managed customer master key - // (CMK) that was used for the object. If you specify x-amz-server-side-encryption:aws:kms, - // but do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 - // uses the Amazon Web Services managed CMK in Amazon Web Services to protect - // the data. If the KMS key does not exist in the same account issuing the command, - // you must use the full ARN and not just the ID. + // (Amazon Web Services KMS) symmetrical customer managed key that was used + // for the object. If you specify x-amz-server-side-encryption:aws:kms, but + // do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses + // the Amazon Web Services managed key to protect the data. If the KMS key does + // not exist in the same account issuing the command, you must use the full + // ARN and not just the ID. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The server-side encryption algorithm used when storing this object in Amazon diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/api.go b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go index 4498f285e..e3711e642 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go @@ -513,12 +513,20 @@ type AccountInfo struct { EmailAddress *string `locationName:"emailAddress" min:"1" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AccountInfo) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AccountInfo) GoString() string { return s.String() } @@ -548,6 +556,10 @@ type GetRoleCredentialsInput struct { // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) // in the AWS SSO OIDC API Reference Guide. // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by GetRoleCredentialsInput's + // String and GoString methods. + // // AccessToken is a required field AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` @@ -562,12 +574,20 @@ type GetRoleCredentialsInput struct { RoleName *string `location:"querystring" locationName:"role_name" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetRoleCredentialsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetRoleCredentialsInput) GoString() string { return s.String() } @@ -616,12 +636,20 @@ type GetRoleCredentialsOutput struct { RoleCredentials *RoleCredentials `locationName:"roleCredentials" type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetRoleCredentialsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetRoleCredentialsOutput) GoString() string { return s.String() } @@ -641,12 +669,20 @@ type InvalidRequestException struct { Message_ *string `locationName:"message" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InvalidRequestException) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s InvalidRequestException) GoString() string { return s.String() } @@ -696,6 +732,10 @@ type ListAccountRolesInput struct { // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) // in the AWS SSO OIDC API Reference Guide. // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ListAccountRolesInput's + // String and GoString methods. + // // AccessToken is a required field AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` @@ -712,12 +752,20 @@ type ListAccountRolesInput struct { NextToken *string `location:"querystring" locationName:"next_token" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListAccountRolesInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListAccountRolesInput) GoString() string { return s.String() } @@ -776,12 +824,20 @@ type ListAccountRolesOutput struct { RoleList []*RoleInfo `locationName:"roleList" type:"list"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListAccountRolesOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListAccountRolesOutput) GoString() string { return s.String() } @@ -805,6 +861,10 @@ type ListAccountsInput struct { // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) // in the AWS SSO OIDC API Reference Guide. // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ListAccountsInput's + // String and GoString methods. + // // AccessToken is a required field AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` @@ -816,12 +876,20 @@ type ListAccountsInput struct { NextToken *string `location:"querystring" locationName:"next_token" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListAccountsInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListAccountsInput) GoString() string { return s.String() } @@ -871,12 +939,20 @@ type ListAccountsOutput struct { NextToken *string `locationName:"nextToken" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListAccountsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ListAccountsOutput) GoString() string { return s.String() } @@ -900,16 +976,28 @@ type LogoutInput struct { // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) // in the AWS SSO OIDC API Reference Guide. // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by LogoutInput's + // String and GoString methods. + // // AccessToken is a required field AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LogoutInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LogoutInput) GoString() string { return s.String() } @@ -937,12 +1025,20 @@ type LogoutOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LogoutOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s LogoutOutput) GoString() string { return s.String() } @@ -955,12 +1051,20 @@ type ResourceNotFoundException struct { Message_ *string `locationName:"message" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ResourceNotFoundException) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s ResourceNotFoundException) GoString() string { return s.String() } @@ -1020,20 +1124,36 @@ type RoleCredentials struct { // The key that is used to sign the request. For more information, see Using // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) // in the AWS IAM User Guide. + // + // SecretAccessKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by RoleCredentials's + // String and GoString methods. SecretAccessKey *string `locationName:"secretAccessKey" type:"string" sensitive:"true"` // The token used for temporary credentials. For more information, see Using // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) // in the AWS IAM User Guide. + // + // SessionToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by RoleCredentials's + // String and GoString methods. SessionToken *string `locationName:"sessionToken" type:"string" sensitive:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RoleCredentials) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RoleCredentials) GoString() string { return s.String() } @@ -1073,12 +1193,20 @@ type RoleInfo struct { RoleName *string `locationName:"roleName" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RoleInfo) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s RoleInfo) GoString() string { return s.String() } @@ -1104,12 +1232,20 @@ type TooManyRequestsException struct { Message_ *string `locationName:"message" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s TooManyRequestsException) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s TooManyRequestsException) GoString() string { return s.String() } @@ -1161,12 +1297,20 @@ type UnauthorizedException struct { Message_ *string `locationName:"message" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s UnauthorizedException) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s UnauthorizedException) GoString() string { return s.String() } diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 3cffd533d..a1a8a0952 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -1605,12 +1605,20 @@ type AssumeRoleInput struct { TransitiveTagKeys []*string `type:"list"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumeRoleInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumeRoleInput) GoString() string { return s.String() } @@ -1785,12 +1793,20 @@ type AssumeRoleOutput struct { SourceIdentity *string `min:"2" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumeRoleOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumeRoleOutput) GoString() string { return s.String() } @@ -1917,12 +1933,20 @@ type AssumeRoleWithSAMLInput struct { SAMLAssertion *string `min:"4" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumeRoleWithSAMLInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumeRoleWithSAMLInput) GoString() string { return s.String() } @@ -2089,12 +2113,20 @@ type AssumeRoleWithSAMLOutput struct { SubjectType *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumeRoleWithSAMLOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumeRoleWithSAMLOutput) GoString() string { return s.String() } @@ -2264,12 +2296,20 @@ type AssumeRoleWithWebIdentityInput struct { WebIdentityToken *string `min:"4" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumeRoleWithWebIdentityInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumeRoleWithWebIdentityInput) GoString() string { return s.String() } @@ -2432,12 +2472,20 @@ type AssumeRoleWithWebIdentityOutput struct { SubjectFromWebIdentityToken *string `min:"6" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumeRoleWithWebIdentityOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumeRoleWithWebIdentityOutput) GoString() string { return s.String() } @@ -2505,12 +2553,20 @@ type AssumedRoleUser struct { AssumedRoleId *string `min:"2" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumedRoleUser) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s AssumedRoleUser) GoString() string { return s.String() } @@ -2552,12 +2608,20 @@ type Credentials struct { SessionToken *string `type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Credentials) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Credentials) GoString() string { return s.String() } @@ -2595,12 +2659,20 @@ type DecodeAuthorizationMessageInput struct { EncodedMessage *string `min:"1" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DecodeAuthorizationMessageInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DecodeAuthorizationMessageInput) GoString() string { return s.String() } @@ -2637,12 +2709,20 @@ type DecodeAuthorizationMessageOutput struct { DecodedMessage *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DecodeAuthorizationMessageOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s DecodeAuthorizationMessageOutput) GoString() string { return s.String() } @@ -2672,12 +2752,20 @@ type FederatedUser struct { FederatedUserId *string `min:"2" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s FederatedUser) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s FederatedUser) GoString() string { return s.String() } @@ -2706,12 +2794,20 @@ type GetAccessKeyInfoInput struct { AccessKeyId *string `min:"16" type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetAccessKeyInfoInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetAccessKeyInfoInput) GoString() string { return s.String() } @@ -2745,12 +2841,20 @@ type GetAccessKeyInfoOutput struct { Account *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetAccessKeyInfoOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetAccessKeyInfoOutput) GoString() string { return s.String() } @@ -2765,12 +2869,20 @@ type GetCallerIdentityInput struct { _ struct{} `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetCallerIdentityInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetCallerIdentityInput) GoString() string { return s.String() } @@ -2794,12 +2906,20 @@ type GetCallerIdentityOutput struct { UserId *string `type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetCallerIdentityOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetCallerIdentityOutput) GoString() string { return s.String() } @@ -2949,12 +3069,20 @@ type GetFederationTokenInput struct { Tags []*Tag `type:"list"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetFederationTokenInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetFederationTokenInput) GoString() string { return s.String() } @@ -3057,12 +3185,20 @@ type GetFederationTokenOutput struct { PackedPolicySize *int64 `type:"integer"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetFederationTokenOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetFederationTokenOutput) GoString() string { return s.String() } @@ -3120,12 +3256,20 @@ type GetSessionTokenInput struct { TokenCode *string `min:"6" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetSessionTokenInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetSessionTokenInput) GoString() string { return s.String() } @@ -3181,12 +3325,20 @@ type GetSessionTokenOutput struct { Credentials *Credentials `type:"structure"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetSessionTokenOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s GetSessionTokenOutput) GoString() string { return s.String() } @@ -3209,12 +3361,20 @@ type PolicyDescriptorType struct { Arn *string `locationName:"arn" min:"20" type:"string"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PolicyDescriptorType) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s PolicyDescriptorType) GoString() string { return s.String() } @@ -3267,12 +3427,20 @@ type Tag struct { Value *string `type:"string" required:"true"` } -// String returns the string representation +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Tag) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s Tag) GoString() string { return s.String() } diff --git a/vendor/github.com/cenkalti/backoff/v4/.gitignore b/vendor/github.com/cenkalti/backoff/v4/.gitignore new file mode 100644 index 000000000..50d95c548 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +# IDEs +.idea/ diff --git a/vendor/github.com/cenkalti/backoff/v4/LICENSE b/vendor/github.com/cenkalti/backoff/v4/LICENSE new file mode 100644 index 000000000..89b817996 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md new file mode 100644 index 000000000..16abdfc08 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/README.md @@ -0,0 +1,32 @@ +# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] + +This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. + +[Exponential backoff][exponential backoff wiki] +is an algorithm that uses feedback to multiplicatively decrease the rate of some process, +in order to gradually find an acceptable rate. +The retries exponentially increase and stop increasing when a certain threshold is met. + +## Usage + +Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end. + +Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. + +## Contributing + +* I would like to keep this library as small as possible. +* Please don't send a PR without opening an issue and discussing it first. +* If proposed change is not a common use case, I will probably not accept it. + +[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4 +[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png +[travis]: https://travis-ci.org/cenkalti/backoff +[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master +[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master +[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master + +[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java +[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff + +[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples diff --git a/vendor/github.com/cenkalti/backoff/v4/backoff.go b/vendor/github.com/cenkalti/backoff/v4/backoff.go new file mode 100644 index 000000000..3676ee405 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/backoff.go @@ -0,0 +1,66 @@ +// Package backoff implements backoff algorithms for retrying operations. +// +// Use Retry function for retrying operations that may fail. +// If Retry does not meet your needs, +// copy/paste the function into your project and modify as you wish. +// +// There is also Ticker type similar to time.Ticker. +// You can use it if you need to work with channels. +// +// See Examples section below for usage examples. +package backoff + +import "time" + +// BackOff is a backoff policy for retrying an operation. +type BackOff interface { + // NextBackOff returns the duration to wait before retrying the operation, + // or backoff. Stop to indicate that no more retries should be made. + // + // Example usage: + // + // duration := backoff.NextBackOff(); + // if (duration == backoff.Stop) { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } + // + NextBackOff() time.Duration + + // Reset to initial state. + Reset() +} + +// Stop indicates that no more retries should be made for use in NextBackOff(). +const Stop time.Duration = -1 + +// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, indefinitely. +type ZeroBackOff struct{} + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } + +// StopBackOff is a fixed backoff policy that always returns backoff.Stop for +// NextBackOff(), meaning that the operation should never be retried. +type StopBackOff struct{} + +func (b *StopBackOff) Reset() {} + +func (b *StopBackOff) NextBackOff() time.Duration { return Stop } + +// ConstantBackOff is a backoff policy that always returns the same backoff delay. +// This is in contrast to an exponential backoff policy, +// which returns a delay that grows longer as you call NextBackOff() over and over again. +type ConstantBackOff struct { + Interval time.Duration +} + +func (b *ConstantBackOff) Reset() {} +func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } + +func NewConstantBackOff(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/vendor/github.com/cenkalti/backoff/v4/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go new file mode 100644 index 000000000..48482330e --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/context.go @@ -0,0 +1,62 @@ +package backoff + +import ( + "context" + "time" +) + +// BackOffContext is a backoff policy that stops retrying after the context +// is canceled. +type BackOffContext interface { // nolint: golint + BackOff + Context() context.Context +} + +type backOffContext struct { + BackOff + ctx context.Context +} + +// WithContext returns a BackOffContext with context ctx +// +// ctx must not be nil +func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint + if ctx == nil { + panic("nil context") + } + + if b, ok := b.(*backOffContext); ok { + return &backOffContext{ + BackOff: b.BackOff, + ctx: ctx, + } + } + + return &backOffContext{ + BackOff: b, + ctx: ctx, + } +} + +func getContext(b BackOff) context.Context { + if cb, ok := b.(BackOffContext); ok { + return cb.Context() + } + if tb, ok := b.(*backOffTries); ok { + return getContext(tb.delegate) + } + return context.Background() +} + +func (b *backOffContext) Context() context.Context { + return b.ctx +} + +func (b *backOffContext) NextBackOff() time.Duration { + select { + case <-b.ctx.Done(): + return Stop + default: + return b.BackOff.NextBackOff() + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go new file mode 100644 index 000000000..2c56c1e71 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/exponential.go @@ -0,0 +1,161 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + // After MaxElapsedTime the ExponentialBackOff returns Stop. + // It never stops if MaxElapsedTime == 0. + MaxElapsedTime time.Duration + Stop time.Duration + Clock Clock + + currentInterval time.Duration + startTime time.Time +} + +// Clock is an interface that returns current time for BackOff. +type Clock interface { + Now() time.Time +} + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second + DefaultMaxElapsedTime = 15 * time.Minute +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff() *ExponentialBackOff { + b := &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + MaxElapsedTime: DefaultMaxElapsedTime, + Stop: Stop, + Clock: SystemClock, + } + b.Reset() + return b +} + +type systemClock struct{} + +func (t systemClock) Now() time.Time { + return time.Now() +} + +// SystemClock implements Clock interface that uses time.Now(). +var SystemClock = systemClock{} + +// Reset the interval back to the initial retry interval and restarts the timer. +// Reset must be called before using b. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval + b.startTime = b.Clock.Now() +} + +// NextBackOff calculates the next backoff interval using the formula: +// Randomized interval = RetryInterval * (1 ± RandomizationFactor) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + // Make sure we have not gone over the maximum elapsed time. + elapsed := b.GetElapsedTime() + next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + b.incrementCurrentInterval() + if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime { + return b.Stop + } + return next +} + +// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance +// is created and is reset when Reset() is called. +// +// The elapsed time is computed using time.Now().UnixNano(). It is +// safe to call even while the backoff policy is used by a running +// ticker. +func (b *ExponentialBackOff) GetElapsedTime() time.Duration { + return b.Clock.Now().Sub(b.startTime) +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + if randomizationFactor == 0 { + return currentInterval // make sure no randomness is used when randomizationFactor is 0. + } + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go new file mode 100644 index 000000000..b9c0c51cd --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/retry.go @@ -0,0 +1,146 @@ +package backoff + +import ( + "errors" + "time" +) + +// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData(). +// The operation will be retried using a backoff policy if it returns an error. +type OperationWithData[T any] func() (T, error) + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +func (o Operation) withEmptyData() OperationWithData[struct{}] { + return func() (struct{}, error) { + return struct{}{}, o() + } +} + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the operation o until it does not return error or BackOff stops. +// o is guaranteed to be run at least once. +// +// If o returns a *PermanentError, the operation is not retried, and the +// wrapped error is returned. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b BackOff) error { + return RetryNotify(o, b, nil) +} + +// RetryWithData is like Retry but returns data in the response too. +func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) { + return RetryNotifyWithData(o, b, nil) +} + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b BackOff, notify Notify) error { + return RetryNotifyWithTimer(operation, b, notify, nil) +} + +// RetryNotifyWithData is like RetryNotify but returns data in the response too. +func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) { + return doRetryNotify(operation, b, notify, nil) +} + +// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer +// for each failed attempt before sleep. +// A default timer that uses system timer is used when nil is passed. +func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { + _, err := doRetryNotify(operation.withEmptyData(), b, notify, t) + return err +} + +// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too. +func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { + return doRetryNotify(operation, b, notify, t) +} + +func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { + var ( + err error + next time.Duration + res T + ) + if t == nil { + t = &defaultTimer{} + } + + defer func() { + t.Stop() + }() + + ctx := getContext(b) + + b.Reset() + for { + res, err = operation() + if err == nil { + return res, nil + } + + var permanent *PermanentError + if errors.As(err, &permanent) { + return res, permanent.Err + } + + if next = b.NextBackOff(); next == Stop { + if cerr := ctx.Err(); cerr != nil { + return res, cerr + } + + return res, err + } + + if notify != nil { + notify(err, next) + } + + t.Start(next) + + select { + case <-ctx.Done(): + return res, ctx.Err() + case <-t.C(): + } + } +} + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +func (e *PermanentError) Unwrap() error { + return e.Err +} + +func (e *PermanentError) Is(target error) bool { + _, ok := target.(*PermanentError) + return ok +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) error { + if err == nil { + return nil + } + return &PermanentError{ + Err: err, + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/ticker.go b/vendor/github.com/cenkalti/backoff/v4/ticker.go new file mode 100644 index 000000000..df9d68bce --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/ticker.go @@ -0,0 +1,97 @@ +package backoff + +import ( + "context" + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOff + ctx context.Context + timer Timer + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send +// the time at times specified by the BackOff argument. Ticker is +// guaranteed to tick at least once. The channel is closed when Stop +// method is called or BackOff stops. It is not safe to manipulate the +// provided backoff policy (notably calling NextBackOff or Reset) +// while the ticker is running. +func NewTicker(b BackOff) *Ticker { + return NewTickerWithTimer(b, &defaultTimer{}) +} + +// NewTickerWithTimer returns a new Ticker with a custom timer. +// A default timer that uses system timer is used when nil is passed. +func NewTickerWithTimer(b BackOff, timer Timer) *Ticker { + if timer == nil { + timer = &defaultTimer{} + } + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: b, + ctx: getContext(b), + timer: timer, + stop: make(chan struct{}), + } + t.b.Reset() + go t.run() + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + case <-t.ctx.Done(): + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + t.timer.Start(next) + return t.timer.C() +} diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v4/timer.go new file mode 100644 index 000000000..8120d0213 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/timer.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +type Timer interface { + Start(duration time.Duration) + Stop() + C() <-chan time.Time +} + +// defaultTimer implements Timer interface using time.Timer +type defaultTimer struct { + timer *time.Timer +} + +// C returns the timers channel which receives the current time when the timer fires. +func (t *defaultTimer) C() <-chan time.Time { + return t.timer.C +} + +// Start starts the timer to fire after the given duration +func (t *defaultTimer) Start(duration time.Duration) { + if t.timer == nil { + t.timer = time.NewTimer(duration) + } else { + t.timer.Reset(duration) + } +} + +// Stop is called when the timer is not used anymore and resources may be freed. +func (t *defaultTimer) Stop() { + if t.timer != nil { + t.timer.Stop() + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go new file mode 100644 index 000000000..28d58ca37 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/tries.go @@ -0,0 +1,38 @@ +package backoff + +import "time" + +/* +WithMaxRetries creates a wrapper around another BackOff, which will +return Stop if NextBackOff() has been called too many times since +the last time Reset() was called + +Note: Implementation is not thread-safe. +*/ +func WithMaxRetries(b BackOff, max uint64) BackOff { + return &backOffTries{delegate: b, maxTries: max} +} + +type backOffTries struct { + delegate BackOff + maxTries uint64 + numTries uint64 +} + +func (b *backOffTries) NextBackOff() time.Duration { + if b.maxTries == 0 { + return Stop + } + if b.maxTries > 0 { + if b.maxTries <= b.numTries { + return Stop + } + b.numTries++ + } + return b.delegate.NextBackOff() +} + +func (b *backOffTries) Reset() { + b.numTries = 0 + b.delegate.Reset() +} diff --git a/vendor/github.com/containerd/containerd/LICENSE b/vendor/github.com/containerd/containerd/LICENSE new file mode 100644 index 000000000..584149b6e --- /dev/null +++ b/vendor/github.com/containerd/containerd/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/containerd/NOTICE b/vendor/github.com/containerd/containerd/NOTICE new file mode 100644 index 000000000..8915f0277 --- /dev/null +++ b/vendor/github.com/containerd/containerd/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go new file mode 100644 index 000000000..876225597 --- /dev/null +++ b/vendor/github.com/containerd/containerd/errdefs/errors.go @@ -0,0 +1,92 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package errdefs defines the common errors used throughout containerd +// packages. +// +// Use with fmt.Errorf to add context to an error. +// +// To detect an error class, use the IsXXX functions to tell whether an error +// is of a certain type. +// +// The functions ToGRPC and FromGRPC can be used to map server-side and +// client-side errors to the correct types. +package errdefs + +import ( + "context" + "errors" +) + +// Definitions of common error types used throughout containerd. All containerd +// errors returned by most packages will map into one of these errors classes. +// Packages should return errors of these types when they want to instruct a +// client to take a particular action. +// +// For the most part, we just try to provide local grpc errors. Most conditions +// map very well to those defined by grpc. +var ( + ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping. + ErrInvalidArgument = errors.New("invalid argument") + ErrNotFound = errors.New("not found") + ErrAlreadyExists = errors.New("already exists") + ErrFailedPrecondition = errors.New("failed precondition") + ErrUnavailable = errors.New("unavailable") + ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented +) + +// IsInvalidArgument returns true if the error is due to an invalid argument +func IsInvalidArgument(err error) bool { + return errors.Is(err, ErrInvalidArgument) +} + +// IsNotFound returns true if the error is due to a missing object +func IsNotFound(err error) bool { + return errors.Is(err, ErrNotFound) +} + +// IsAlreadyExists returns true if the error is due to an already existing +// metadata item +func IsAlreadyExists(err error) bool { + return errors.Is(err, ErrAlreadyExists) +} + +// IsFailedPrecondition returns true if an operation could not proceed to the +// lack of a particular condition +func IsFailedPrecondition(err error) bool { + return errors.Is(err, ErrFailedPrecondition) +} + +// IsUnavailable returns true if the error is due to a resource being unavailable +func IsUnavailable(err error) bool { + return errors.Is(err, ErrUnavailable) +} + +// IsNotImplemented returns true if the error is due to not being implemented +func IsNotImplemented(err error) bool { + return errors.Is(err, ErrNotImplemented) +} + +// IsCanceled returns true if the error is due to `context.Canceled`. +func IsCanceled(err error) bool { + return errors.Is(err, context.Canceled) +} + +// IsDeadlineExceeded returns true if the error is due to +// `context.DeadlineExceeded`. +func IsDeadlineExceeded(err error) bool { + return errors.Is(err, context.DeadlineExceeded) +} diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go new file mode 100644 index 000000000..7a9b33e05 --- /dev/null +++ b/vendor/github.com/containerd/containerd/errdefs/grpc.go @@ -0,0 +1,147 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errdefs + +import ( + "context" + "fmt" + "strings" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ToGRPC will attempt to map the backend containerd error into a grpc error, +// using the original error message as a description. +// +// Further information may be extracted from certain errors depending on their +// type. +// +// If the error is unmapped, the original error will be returned to be handled +// by the regular grpc error handling stack. +func ToGRPC(err error) error { + if err == nil { + return nil + } + + if isGRPCError(err) { + // error has already been mapped to grpc + return err + } + + switch { + case IsInvalidArgument(err): + return status.Errorf(codes.InvalidArgument, err.Error()) + case IsNotFound(err): + return status.Errorf(codes.NotFound, err.Error()) + case IsAlreadyExists(err): + return status.Errorf(codes.AlreadyExists, err.Error()) + case IsFailedPrecondition(err): + return status.Errorf(codes.FailedPrecondition, err.Error()) + case IsUnavailable(err): + return status.Errorf(codes.Unavailable, err.Error()) + case IsNotImplemented(err): + return status.Errorf(codes.Unimplemented, err.Error()) + case IsCanceled(err): + return status.Errorf(codes.Canceled, err.Error()) + case IsDeadlineExceeded(err): + return status.Errorf(codes.DeadlineExceeded, err.Error()) + } + + return err +} + +// ToGRPCf maps the error to grpc error codes, assembling the formatting string +// and combining it with the target error string. +// +// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) +func ToGRPCf(err error, format string, args ...interface{}) error { + return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) +} + +// FromGRPC returns the underlying error from a grpc service based on the grpc error code +func FromGRPC(err error) error { + if err == nil { + return nil + } + + var cls error // divide these into error classes, becomes the cause + + switch code(err) { + case codes.InvalidArgument: + cls = ErrInvalidArgument + case codes.AlreadyExists: + cls = ErrAlreadyExists + case codes.NotFound: + cls = ErrNotFound + case codes.Unavailable: + cls = ErrUnavailable + case codes.FailedPrecondition: + cls = ErrFailedPrecondition + case codes.Unimplemented: + cls = ErrNotImplemented + case codes.Canceled: + cls = context.Canceled + case codes.DeadlineExceeded: + cls = context.DeadlineExceeded + default: + cls = ErrUnknown + } + + msg := rebaseMessage(cls, err) + if msg != "" { + err = fmt.Errorf("%s: %w", msg, cls) + } else { + err = cls + } + + return err +} + +// rebaseMessage removes the repeats for an error at the end of an error +// string. This will happen when taking an error over grpc then remapping it. +// +// Effectively, we just remove the string of cls from the end of err if it +// appears there. +func rebaseMessage(cls error, err error) string { + desc := errDesc(err) + clss := cls.Error() + if desc == clss { + return "" + } + + return strings.TrimSuffix(desc, ": "+clss) +} + +func isGRPCError(err error) bool { + _, ok := status.FromError(err) + return ok +} + +func code(err error) codes.Code { + if s, ok := status.FromError(err); ok { + return s.Code() + } + return codes.Unknown +} + +func errDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go new file mode 100644 index 000000000..0db9562b8 --- /dev/null +++ b/vendor/github.com/containerd/containerd/log/context.go @@ -0,0 +1,69 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package log + +import ( + "context" + + "github.com/sirupsen/logrus" +) + +var ( + // G is an alias for GetLogger. + // + // We may want to define this locally to a package to get package tagged log + // messages. + G = GetLogger + + // L is an alias for the standard logger. + L = logrus.NewEntry(logrus.StandardLogger()) +) + +type ( + loggerKey struct{} +) + +const ( + // RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to + // ensure the formatted time is always the same number of characters. + RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + + // TextFormat represents the text logging format + TextFormat = "text" + + // JSONFormat represents the JSON logging format + JSONFormat = "json" +) + +// WithLogger returns a new context with the provided logger. Use in +// combination with logger.WithField(s) for great effect. +func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { + e := logger.WithContext(ctx) + return context.WithValue(ctx, loggerKey{}, e) +} + +// GetLogger retrieves the current logger from the context. If no logger is +// available, the default logger is returned. +func GetLogger(ctx context.Context) *logrus.Entry { + logger := ctx.Value(loggerKey{}) + + if logger == nil { + return L.WithContext(ctx) + } + + return logger.(*logrus.Entry) +} diff --git a/vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go b/vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go new file mode 100644 index 000000000..6656465ef --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go @@ -0,0 +1,62 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package userns + +import ( + "bufio" + "fmt" + "os" + "sync" +) + +var ( + inUserNS bool + nsOnce sync.Once +) + +// RunningInUserNS detects whether we are currently running in a user namespace. +// Originally copied from github.com/lxc/lxd/shared/util.go +func RunningInUserNS() bool { + nsOnce.Do(func() { + file, err := os.Open("/proc/self/uid_map") + if err != nil { + // This kernel-provided file only exists if user namespaces are supported + return + } + defer file.Close() + + buf := bufio.NewReader(file) + l, _, err := buf.ReadLine() + if err != nil { + return + } + + line := string(l) + var a, b, c int64 + fmt.Sscanf(line, "%d %d %d", &a, &b, &c) + + /* + * We assume we are in the initial user namespace if we have a full + * range - 4294967295 uids starting at uid 0. + */ + if a == 0 && b == 0 && c == 4294967295 { + return + } + inUserNS = true + }) + return inUserNS +} diff --git a/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go b/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go new file mode 100644 index 000000000..4f8d7dd2d --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go @@ -0,0 +1,26 @@ +//go:build !linux +// +build !linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package userns + +// RunningInUserNS is a stub for non-Linux systems +// Always returns false +func RunningInUserNS() bool { + return false +} diff --git a/vendor/github.com/containerd/containerd/platforms/compare.go b/vendor/github.com/containerd/containerd/platforms/compare.go new file mode 100644 index 000000000..3913ef663 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/compare.go @@ -0,0 +1,203 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "strconv" + "strings" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// MatchComparer is able to match and compare platforms to +// filter and sort platforms. +type MatchComparer interface { + Matcher + + Less(specs.Platform, specs.Platform) bool +} + +// platformVector returns an (ordered) vector of appropriate specs.Platform +// objects to try matching for the given platform object (see platforms.Only). +func platformVector(platform specs.Platform) []specs.Platform { + vector := []specs.Platform{platform} + + switch platform.Architecture { + case "amd64": + if amd64Version, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && amd64Version > 1 { + for amd64Version--; amd64Version >= 1; amd64Version-- { + vector = append(vector, specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v" + strconv.Itoa(amd64Version), + }) + } + } + vector = append(vector, specs.Platform{ + Architecture: "386", + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + }) + case "arm": + if armVersion, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && armVersion > 5 { + for armVersion--; armVersion >= 5; armVersion-- { + vector = append(vector, specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v" + strconv.Itoa(armVersion), + }) + } + } + case "arm64": + variant := platform.Variant + if variant == "" { + variant = "v8" + } + vector = append(vector, platformVector(specs.Platform{ + Architecture: "arm", + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: variant, + })...) + } + + return vector +} + +// Only returns a match comparer for a single platform +// using default resolution logic for the platform. +// +// For arm/v8, will also match arm/v7, arm/v6 and arm/v5 +// For arm/v7, will also match arm/v6 and arm/v5 +// For arm/v6, will also match arm/v5 +// For amd64, will also match 386 +func Only(platform specs.Platform) MatchComparer { + return Ordered(platformVector(Normalize(platform))...) +} + +// OnlyStrict returns a match comparer for a single platform. +// +// Unlike Only, OnlyStrict does not match sub platforms. +// So, "arm/vN" will not match "arm/vM" where M < N, +// and "amd64" will not also match "386". +// +// OnlyStrict matches non-canonical forms. +// So, "arm64" matches "arm/64/v8". +func OnlyStrict(platform specs.Platform) MatchComparer { + return Ordered(Normalize(platform)) +} + +// Ordered returns a platform MatchComparer which matches any of the platforms +// but orders them in order they are provided. +func Ordered(platforms ...specs.Platform) MatchComparer { + matchers := make([]Matcher, len(platforms)) + for i := range platforms { + matchers[i] = NewMatcher(platforms[i]) + } + return orderedPlatformComparer{ + matchers: matchers, + } +} + +// Any returns a platform MatchComparer which matches any of the platforms +// with no preference for ordering. +func Any(platforms ...specs.Platform) MatchComparer { + matchers := make([]Matcher, len(platforms)) + for i := range platforms { + matchers[i] = NewMatcher(platforms[i]) + } + return anyPlatformComparer{ + matchers: matchers, + } +} + +// All is a platform MatchComparer which matches all platforms +// with preference for ordering. +var All MatchComparer = allPlatformComparer{} + +type orderedPlatformComparer struct { + matchers []Matcher +} + +func (c orderedPlatformComparer) Match(platform specs.Platform) bool { + for _, m := range c.matchers { + if m.Match(platform) { + return true + } + } + return false +} + +func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool { + for _, m := range c.matchers { + p1m := m.Match(p1) + p2m := m.Match(p2) + if p1m && !p2m { + return true + } + if p1m || p2m { + return false + } + } + return false +} + +type anyPlatformComparer struct { + matchers []Matcher +} + +func (c anyPlatformComparer) Match(platform specs.Platform) bool { + for _, m := range c.matchers { + if m.Match(platform) { + return true + } + } + return false +} + +func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool { + var p1m, p2m bool + for _, m := range c.matchers { + if !p1m && m.Match(p1) { + p1m = true + } + if !p2m && m.Match(p2) { + p2m = true + } + if p1m && p2m { + return false + } + } + // If one matches, and the other does, sort match first + return p1m && !p2m +} + +type allPlatformComparer struct{} + +func (allPlatformComparer) Match(specs.Platform) bool { + return true +} + +func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool { + return false +} diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go new file mode 100644 index 000000000..046e0356d --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go @@ -0,0 +1,131 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "bufio" + "fmt" + "os" + "runtime" + "strings" + "sync" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" +) + +// Present the ARM instruction set architecture, eg: v7, v8 +// Don't use this value directly; call cpuVariant() instead. +var cpuVariantValue string + +var cpuVariantOnce sync.Once + +func cpuVariant() string { + cpuVariantOnce.Do(func() { + if isArmArch(runtime.GOARCH) { + cpuVariantValue = getCPUVariant() + } + }) + return cpuVariantValue +} + +// For Linux, the kernel has already detected the ABI, ISA and Features. +// So we don't need to access the ARM registers to detect platform information +// by ourselves. We can just parse these information from /proc/cpuinfo +func getCPUInfo(pattern string) (info string, err error) { + if !isLinuxOS(runtime.GOOS) { + return "", fmt.Errorf("getCPUInfo for OS %s: %w", runtime.GOOS, errdefs.ErrNotImplemented) + } + + cpuinfo, err := os.Open("/proc/cpuinfo") + if err != nil { + return "", err + } + defer cpuinfo.Close() + + // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse + // the first core is enough. + scanner := bufio.NewScanner(cpuinfo) + for scanner.Scan() { + newline := scanner.Text() + list := strings.Split(newline, ":") + + if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { + return strings.TrimSpace(list[1]), nil + } + } + + // Check whether the scanner encountered errors + err = scanner.Err() + if err != nil { + return "", err + } + + return "", fmt.Errorf("getCPUInfo for pattern: %s: %w", pattern, errdefs.ErrNotFound) +} + +func getCPUVariant() string { + if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use + // runtime.GOARCH to determine the variants + var variant string + switch runtime.GOARCH { + case "arm64": + variant = "v8" + case "arm": + variant = "v7" + default: + variant = "unknown" + } + + return variant + } + + variant, err := getCPUInfo("Cpu architecture") + if err != nil { + log.L.WithError(err).Error("failure getting variant") + return "" + } + + // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7") + // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 + if runtime.GOARCH == "arm" && variant == "7" { + model, err := getCPUInfo("model name") + if err == nil && strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { + variant = "6" + } + } + + switch strings.ToLower(variant) { + case "8", "aarch64": + variant = "v8" + case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": + variant = "v7" + case "6", "6tej": + variant = "v6" + case "5", "5t", "5te", "5tej": + variant = "v5" + case "4", "4t": + variant = "v4" + case "3": + variant = "v3" + default: + variant = "unknown" + } + + return variant +} diff --git a/vendor/github.com/containerd/containerd/platforms/database.go b/vendor/github.com/containerd/containerd/platforms/database.go new file mode 100644 index 000000000..dbe9957ca --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/database.go @@ -0,0 +1,116 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + "strings" +) + +// isLinuxOS returns true if the operating system is Linux. +// +// The OS value should be normalized before calling this function. +func isLinuxOS(os string) bool { + return os == "linux" +} + +// These function are generated from https://golang.org/src/go/build/syslist.go. +// +// We use switch statements because they are slightly faster than map lookups +// and use a little less memory. + +// isKnownOS returns true if we know about the operating system. +// +// The OS value should be normalized before calling this function. +func isKnownOS(os string) bool { + switch os { + case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "ios", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": + return true + } + return false +} + +// isArmArch returns true if the architecture is ARM. +// +// The arch value should be normalized before being passed to this function. +func isArmArch(arch string) bool { + switch arch { + case "arm", "arm64": + return true + } + return false +} + +// isKnownArch returns true if we know about the architecture. +// +// The arch value should be normalized before being passed to this function. +func isKnownArch(arch string) bool { + switch arch { + case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "loong64", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": + return true + } + return false +} + +func normalizeOS(os string) string { + if os == "" { + return runtime.GOOS + } + os = strings.ToLower(os) + + switch os { + case "macos": + os = "darwin" + } + return os +} + +// normalizeArch normalizes the architecture. +func normalizeArch(arch, variant string) (string, string) { + arch, variant = strings.ToLower(arch), strings.ToLower(variant) + switch arch { + case "i386": + arch = "386" + variant = "" + case "x86_64", "x86-64", "amd64": + arch = "amd64" + if variant == "v1" { + variant = "" + } + case "aarch64", "arm64": + arch = "arm64" + switch variant { + case "8", "v8": + variant = "" + } + case "armhf": + arch = "arm" + variant = "v7" + case "armel": + arch = "arm" + variant = "v6" + case "arm": + switch variant { + case "", "7": + variant = "v7" + case "5", "6", "8": + variant = "v" + variant + } + } + + return arch, variant +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults.go b/vendor/github.com/containerd/containerd/platforms/defaults.go new file mode 100644 index 000000000..cfa3ff34a --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults.go @@ -0,0 +1,27 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +// DefaultString returns the default string specifier for the platform. +func DefaultString() string { + return Format(DefaultSpec()) +} + +// DefaultStrict returns strict form of Default. +func DefaultStrict() MatchComparer { + return OnlyStrict(DefaultSpec()) +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go b/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go new file mode 100644 index 000000000..e249fe48d --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go @@ -0,0 +1,45 @@ +//go:build darwin +// +build darwin + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Ordered(DefaultSpec(), specs.Platform{ + // darwin runtime also supports Linux binary via runu/LKL + OS: "linux", + Architecture: runtime.GOARCH, + }) +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go new file mode 100644 index 000000000..49690f1b3 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go @@ -0,0 +1,41 @@ +//go:build !windows && !darwin +// +build !windows,!darwin + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Only(DefaultSpec()) +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go new file mode 100644 index 000000000..ff9771a60 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go @@ -0,0 +1,95 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "fmt" + "runtime" + "strconv" + "strings" + + imagespec "github.com/opencontainers/image-spec/specs-go/v1" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sys/windows" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + major, minor, build := windows.RtlGetNtVersionNumbers() + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + OSVersion: fmt.Sprintf("%d.%d.%d", major, minor, build), + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +type matchComparer struct { + defaults Matcher + osVersionPrefix string +} + +// Match matches platform with the same windows major, minor +// and build version. +func (m matchComparer) Match(p specs.Platform) bool { + match := m.defaults.Match(p) + + if match && p.OS == "windows" { + if strings.HasPrefix(p.OSVersion, m.osVersionPrefix) { + return true + } + return p.OSVersion == "" + } + return false +} + +// Less sorts matched platforms in front of other platforms. +// For matched platforms, it puts platforms with larger revision +// number in front. +func (m matchComparer) Less(p1, p2 imagespec.Platform) bool { + m1, m2 := m.Match(p1), m.Match(p2) + if m1 && m2 { + r1, r2 := revision(p1.OSVersion), revision(p2.OSVersion) + return r1 > r2 + } + return m1 && !m2 +} + +func revision(v string) int { + parts := strings.Split(v, ".") + if len(parts) < 4 { + return 0 + } + r, err := strconv.Atoi(parts[3]) + if err != nil { + return 0 + } + return r +} + +// Default returns the current platform's default platform specification. +func Default() MatchComparer { + major, minor, build := windows.RtlGetNtVersionNumbers() + return matchComparer{ + defaults: Ordered(DefaultSpec(), specs.Platform{ + OS: "linux", + Architecture: runtime.GOARCH, + }), + osVersionPrefix: fmt.Sprintf("%d.%d.%d", major, minor, build), + } +} diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go new file mode 100644 index 000000000..234309941 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/platforms.go @@ -0,0 +1,261 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package platforms provides a toolkit for normalizing, matching and +// specifying container platforms. +// +// Centered around OCI platform specifications, we define a string-based +// specifier syntax that can be used for user input. With a specifier, users +// only need to specify the parts of the platform that are relevant to their +// context, providing an operating system or architecture or both. +// +// How do I use this package? +// +// The vast majority of use cases should simply use the match function with +// user input. The first step is to parse a specifier into a matcher: +// +// m, err := Parse("linux") +// if err != nil { ... } +// +// Once you have a matcher, use it to match against the platform declared by a +// component, typically from an image or runtime. Since extracting an images +// platform is a little more involved, we'll use an example against the +// platform default: +// +// if ok := m.Match(Default()); !ok { /* doesn't match */ } +// +// This can be composed in loops for resolving runtimes or used as a filter for +// fetch and select images. +// +// More details of the specifier syntax and platform spec follow. +// +// # Declaring Platform Support +// +// Components that have strict platform requirements should use the OCI +// platform specification to declare their support. Typically, this will be +// images and runtimes that should make these declaring which platform they +// support specifically. This looks roughly as follows: +// +// type Platform struct { +// Architecture string +// OS string +// Variant string +// } +// +// Most images and runtimes should at least set Architecture and OS, according +// to their GOARCH and GOOS values, respectively (follow the OCI image +// specification when in doubt). ARM should set variant under certain +// discussions, which are outlined below. +// +// # Platform Specifiers +// +// While the OCI platform specifications provide a tool for components to +// specify structured information, user input typically doesn't need the full +// context and much can be inferred. To solve this problem, we introduced +// "specifiers". A specifier has the format +// `||/[/]`. The user can provide either the +// operating system or the architecture or both. +// +// An example of a common specifier is `linux/amd64`. If the host has a default +// of runtime that matches this, the user can simply provide the component that +// matters. For example, if a image provides amd64 and arm64 support, the +// operating system, `linux` can be inferred, so they only have to provide +// `arm64` or `amd64`. Similar behavior is implemented for operating systems, +// where the architecture may be known but a runtime may support images from +// different operating systems. +// +// # Normalization +// +// Because not all users are familiar with the way the Go runtime represents +// platforms, several normalizations have been provided to make this package +// easier to user. +// +// The following are performed for architectures: +// +// Value Normalized +// aarch64 arm64 +// armhf arm +// armel arm/v6 +// i386 386 +// x86_64 amd64 +// x86-64 amd64 +// +// We also normalize the operating system `macos` to `darwin`. +// +// # ARM Support +// +// To qualify ARM architecture, the Variant field is used to qualify the arm +// version. The most common arm version, v7, is represented without the variant +// unless it is explicitly provided. This is treated as equivalent to armhf. A +// previous architecture, armel, will be normalized to arm/v6. +// +// While these normalizations are provided, their support on arm platforms has +// not yet been fully implemented and tested. +package platforms + +import ( + "fmt" + "path" + "regexp" + "runtime" + "strconv" + "strings" + + "github.com/containerd/containerd/errdefs" + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +var ( + specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`) +) + +// Matcher matches platforms specifications, provided by an image or runtime. +type Matcher interface { + Match(platform specs.Platform) bool +} + +// NewMatcher returns a simple matcher based on the provided platform +// specification. The returned matcher only looks for equality based on os, +// architecture and variant. +// +// One may implement their own matcher if this doesn't provide the required +// functionality. +// +// Applications should opt to use `Match` over directly parsing specifiers. +func NewMatcher(platform specs.Platform) Matcher { + return &matcher{ + Platform: Normalize(platform), + } +} + +type matcher struct { + specs.Platform +} + +func (m *matcher) Match(platform specs.Platform) bool { + normalized := Normalize(platform) + return m.OS == normalized.OS && + m.Architecture == normalized.Architecture && + m.Variant == normalized.Variant +} + +func (m *matcher) String() string { + return Format(m.Platform) +} + +// Parse parses the platform specifier syntax into a platform declaration. +// +// Platform specifiers are in the format `||/[/]`. +// The minimum required information for a platform specifier is the operating +// system or architecture. If there is only a single string (no slashes), the +// value will be matched against the known set of operating systems, then fall +// back to the known set of architectures. The missing component will be +// inferred based on the local environment. +func Parse(specifier string) (specs.Platform, error) { + if strings.Contains(specifier, "*") { + // TODO(stevvooe): need to work out exact wildcard handling + return specs.Platform{}, fmt.Errorf("%q: wildcards not yet supported: %w", specifier, errdefs.ErrInvalidArgument) + } + + parts := strings.Split(specifier, "/") + + for _, part := range parts { + if !specifierRe.MatchString(part) { + return specs.Platform{}, fmt.Errorf("%q is an invalid component of %q: platform specifier component must match %q: %w", part, specifier, specifierRe.String(), errdefs.ErrInvalidArgument) + } + } + + var p specs.Platform + switch len(parts) { + case 1: + // in this case, we will test that the value might be an OS, then look + // it up. If it is not known, we'll treat it as an architecture. Since + // we have very little information about the platform here, we are + // going to be a little more strict if we don't know about the argument + // value. + p.OS = normalizeOS(parts[0]) + if isKnownOS(p.OS) { + // picks a default architecture + p.Architecture = runtime.GOARCH + if p.Architecture == "arm" && cpuVariant() != "v7" { + p.Variant = cpuVariant() + } + + return p, nil + } + + p.Architecture, p.Variant = normalizeArch(parts[0], "") + if p.Architecture == "arm" && p.Variant == "v7" { + p.Variant = "" + } + if isKnownArch(p.Architecture) { + p.OS = runtime.GOOS + return p, nil + } + + return specs.Platform{}, fmt.Errorf("%q: unknown operating system or architecture: %w", specifier, errdefs.ErrInvalidArgument) + case 2: + // In this case, we treat as a regular os/arch pair. We don't care + // about whether or not we know of the platform. + p.OS = normalizeOS(parts[0]) + p.Architecture, p.Variant = normalizeArch(parts[1], "") + if p.Architecture == "arm" && p.Variant == "v7" { + p.Variant = "" + } + + return p, nil + case 3: + // we have a fully specified variant, this is rare + p.OS = normalizeOS(parts[0]) + p.Architecture, p.Variant = normalizeArch(parts[1], parts[2]) + if p.Architecture == "arm64" && p.Variant == "" { + p.Variant = "v8" + } + + return p, nil + } + + return specs.Platform{}, fmt.Errorf("%q: cannot parse platform specifier: %w", specifier, errdefs.ErrInvalidArgument) +} + +// MustParse is like Parses but panics if the specifier cannot be parsed. +// Simplifies initialization of global variables. +func MustParse(specifier string) specs.Platform { + p, err := Parse(specifier) + if err != nil { + panic("platform: Parse(" + strconv.Quote(specifier) + "): " + err.Error()) + } + return p +} + +// Format returns a string specifier from the provided platform specification. +func Format(platform specs.Platform) string { + if platform.OS == "" { + return "unknown" + } + + return path.Join(platform.OS, platform.Architecture, platform.Variant) +} + +// Normalize validates and translate the platform to the canonical value. +// +// For example, if "Aarch64" is encountered, we change it to "arm64" or if +// "x86_64" is encountered, it becomes "amd64". +func Normalize(platform specs.Platform) specs.Platform { + platform.OS = normalizeOS(platform.OS) + platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant) + return platform +} diff --git a/vendor/github.com/cpuguy83/dockercfg/LICENSE b/vendor/github.com/cpuguy83/dockercfg/LICENSE new file mode 100644 index 000000000..8ed681804 --- /dev/null +++ b/vendor/github.com/cpuguy83/dockercfg/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Brian Goff + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/cpuguy83/dockercfg/README.md b/vendor/github.com/cpuguy83/dockercfg/README.md new file mode 100644 index 000000000..880ab8010 --- /dev/null +++ b/vendor/github.com/cpuguy83/dockercfg/README.md @@ -0,0 +1,8 @@ +### github.com/cpuguy83/dockercfg +Go library to load docker CLI configs, auths, etc. with minimal deps. +So far the only deps are on the stdlib. + +### Usage +See the [godoc](https://godoc.org/github.com/cpuguy83/dockercfg) for API details. + +I'm currently using this in [zapp](https://github.com/cpuguy83/zapp/blob/d25c43d4cd7ccf29fba184aafbc720a753e1a15d/main.go#L58-L83) to handle registry auth instead of always asking the user to enter it. \ No newline at end of file diff --git a/vendor/github.com/cpuguy83/dockercfg/auth.go b/vendor/github.com/cpuguy83/dockercfg/auth.go new file mode 100644 index 000000000..5a9891b87 --- /dev/null +++ b/vendor/github.com/cpuguy83/dockercfg/auth.go @@ -0,0 +1,184 @@ +package dockercfg + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "runtime" + "strings" +) + +// This is used by the docker CLI in casses where an oauth identity token is used. +// In that case the username is stored litterally as `` +// When fetching the credentials we check for this value to determine if +const tokenUsername = "" + +// GetRegistryCredentials gets registry credentials for the passed in registry host. +// +// This will use `LoadDefaultConfig` to read registry auth details from the config. +// If the config doesn't exist, it will attempt to load registry credentials using the default credential helper for the platform. +func GetRegistryCredentials(hostname string) (string, string, error) { + cfg, err := LoadDefaultConfig() + if err != nil { + if !os.IsNotExist(err) { + return "", "", err + } + return GetCredentialsFromHelper("", hostname) + } + return cfg.GetRegistryCredentials(hostname) +} + +// ResolveRegistryHost can be used to transform a docker registry host name into what is used for the docker config/cred helpers +// +// This is useful for using with containerd authorizers. +// Natrually this only transforms docker hub URLs. +func ResolveRegistryHost(host string) string { + switch host { + case "index.docker.io", "docker.io", "https://index.docker.io/v1/", "registry-1.docker.io": + return "https://index.docker.io/v1/" + } + return host +} + +// GetRegistryCredentials gets credentials, if any, for the provided hostname +// +// Hostnames should already be resolved using `ResolveRegistryAuth` +// +// If the returned username string is empty, the password is an identity token. +func (c *Config) GetRegistryCredentials(hostname string) (string, string, error) { + h, ok := c.CredentialHelpers[hostname] + if ok { + return GetCredentialsFromHelper(h, hostname) + } + + if c.CredentialsStore != "" { + return GetCredentialsFromHelper(c.CredentialsStore, hostname) + } + + auth, ok := c.AuthConfigs[hostname] + if !ok { + return GetCredentialsFromHelper("", hostname) + } + + if auth.IdentityToken != "" { + return "", auth.IdentityToken, nil + } + + if auth.Username != "" && auth.Password != "" { + return auth.Username, auth.Password, nil + } + + return DecodeBase64Auth(auth) +} + +// DecodeBase64Auth decodes the legacy file-based auth storage from the docker CLI. +// It takes the "Auth" filed from AuthConfig and decodes that into a username and password. +// +// If "Auth" is empty, an empty user/pass will be returned, but not an error. +func DecodeBase64Auth(auth AuthConfig) (string, string, error) { + if auth.Auth == "" { + return "", "", nil + } + + decLen := base64.StdEncoding.DecodedLen(len(auth.Auth)) + decoded := make([]byte, decLen) + n, err := base64.StdEncoding.Decode(decoded, []byte(auth.Auth)) + if err != nil { + return "", "", fmt.Errorf("error decoding auth from file: %w", err) + } + + if n > decLen { + return "", "", fmt.Errorf("decoded value is longer than expected length, expected: %d, actual: %d", decLen, n) + } + + split := strings.SplitN(string(decoded), ":", 2) + if len(split) != 2 { + return "", "", errors.New("invalid auth string") + } + + return split[0], strings.Trim(split[1], "\x00"), nil +} + +// Errors from credential helpers +var ( + ErrCredentialsNotFound = errors.New("credentials not found in native keychain") + ErrCredentialsMissingServerURL = errors.New("no credentials server URL") +) + +// GetCredentialsFromHelper attempts to lookup credentials from the passed in docker credential helper. +// +// The credential helpoer should just be the suffix name (no "docker-credential-"). +// If the passed in helper program is empty this will look up the default helper for the platform. +// +// If the credentials are not found, no error is returned, only empty credentials. +// +// Hostnames should already be resolved using `ResolveRegistryAuth` +// +// If the username string is empty, the password string is an identity token. +func GetCredentialsFromHelper(helper, hostname string) (string, string, error) { + if helper == "" { + helper = getCredentialHelper() + } + if helper == "" { + return "", "", nil + } + + p, err := exec.LookPath("docker-credential-" + helper) + if err != nil { + return "", "", nil + } + + cmd := exec.Command(p, "get") + cmd.Stdin = strings.NewReader(hostname) + + b, err := cmd.Output() + if err != nil { + s := strings.TrimSpace(string(b)) + + switch s { + case ErrCredentialsNotFound.Error(): + return "", "", nil + case ErrCredentialsMissingServerURL.Error(): + return "", "", errors.New(s) + default: + } + + return "", "", err + } + + var creds struct { + Username string + Secret string + } + + if err := json.Unmarshal(b, &creds); err != nil { + return "", "", err + } + + // When tokenUsername is used, the output is an identity token and the username is garbage + if creds.Username == tokenUsername { + creds.Username = "" + } + + return creds.Username, creds.Secret, nil +} + +// getCredentialHelper gets the default credential helper name for the current platform. +func getCredentialHelper() string { + switch runtime.GOOS { + case "linux": + if _, err := exec.LookPath("pass"); err == nil { + return "pass" + } + return "secretservice" + case "darwin": + return "osxkeychain" + case "windows": + return "wincred" + default: + return "" + } +} diff --git a/vendor/github.com/cpuguy83/dockercfg/config.go b/vendor/github.com/cpuguy83/dockercfg/config.go new file mode 100644 index 000000000..88736cabc --- /dev/null +++ b/vendor/github.com/cpuguy83/dockercfg/config.go @@ -0,0 +1,65 @@ +package dockercfg + +// Config represents the on disk format of the docker CLI's config file. +type Config struct { + AuthConfigs map[string]AuthConfig `json:"auths"` + HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` + PsFormat string `json:"psFormat,omitempty"` + ImagesFormat string `json:"imagesFormat,omitempty"` + NetworksFormat string `json:"networksFormat,omitempty"` + PluginsFormat string `json:"pluginsFormat,omitempty"` + VolumesFormat string `json:"volumesFormat,omitempty"` + StatsFormat string `json:"statsFormat,omitempty"` + DetachKeys string `json:"detachKeys,omitempty"` + CredentialsStore string `json:"credsStore,omitempty"` + CredentialHelpers map[string]string `json:"credHelpers,omitempty"` + Filename string `json:"-"` // Note: for internal use only + ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"` + ServicesFormat string `json:"servicesFormat,omitempty"` + TasksFormat string `json:"tasksFormat,omitempty"` + SecretFormat string `json:"secretFormat,omitempty"` + ConfigFormat string `json:"configFormat,omitempty"` + NodesFormat string `json:"nodesFormat,omitempty"` + PruneFilters []string `json:"pruneFilters,omitempty"` + Proxies map[string]ProxyConfig `json:"proxies,omitempty"` + Experimental string `json:"experimental,omitempty"` + StackOrchestrator string `json:"stackOrchestrator,omitempty"` + Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"` + CurrentContext string `json:"currentContext,omitempty"` + CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` + Aliases map[string]string `json:"aliases,omitempty"` +} + +// ProxyConfig contains proxy configuration settings +type ProxyConfig struct { + HTTPProxy string `json:"httpProxy,omitempty"` + HTTPSProxy string `json:"httpsProxy,omitempty"` + NoProxy string `json:"noProxy,omitempty"` + FTPProxy string `json:"ftpProxy,omitempty"` +} + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} + +// KubernetesConfig contains Kubernetes orchestrator settings +type KubernetesConfig struct { + AllNamespaces string `json:"allNamespaces,omitempty"` +} diff --git a/vendor/github.com/cpuguy83/dockercfg/load.go b/vendor/github.com/cpuguy83/dockercfg/load.go new file mode 100644 index 000000000..262545b9d --- /dev/null +++ b/vendor/github.com/cpuguy83/dockercfg/load.go @@ -0,0 +1,51 @@ +package dockercfg + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" +) + +// UserHomeConfigPath returns the path to the docker config in the current user's home dir. +func UserHomeConfigPath() (string, error) { + home, err := os.UserHomeDir() + if err != nil { + return "", fmt.Errorf("error looking up user home dir: %w", err) + } + + return filepath.Join(home, ".docker", "config.json"), nil +} + +// ConfigPath returns the path to the docker cli config. +// +// It will either use the DOCKER_CONFIG env var if set, or the value from `UserHomeConfigPath` +// DOCKER_CONFIG would be the dir path where `config.json` is stored, this returns the path to config.json. +func ConfigPath() (string, error) { + if p := os.Getenv("DOCKER_CONFIG"); p != "" { + return filepath.Join(p, "config.json"), nil + } + return UserHomeConfigPath() +} + +// LoadDefaultConfig loads the docker cli config from the path returned from `ConfigPath` +func LoadDefaultConfig() (Config, error) { + var cfg Config + p, err := ConfigPath() + if err != nil { + return cfg, err + } + return cfg, FromFile(p, &cfg) +} + +// FromFile loads config from the specified path into cfg +func FromFile(configPath string, cfg *Config) error { + f, err := os.Open(configPath) + if err != nil { + return err + } + + err = json.NewDecoder(f).Decode(&cfg) + f.Close() + return err +} diff --git a/vendor/github.com/docker/distribution/LICENSE b/vendor/github.com/docker/distribution/LICENSE new file mode 100644 index 000000000..e06d20818 --- /dev/null +++ b/vendor/github.com/docker/distribution/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/docker/distribution/digestset/set.go b/vendor/github.com/docker/distribution/digestset/set.go new file mode 100644 index 000000000..71327dca7 --- /dev/null +++ b/vendor/github.com/docker/distribution/digestset/set.go @@ -0,0 +1,247 @@ +package digestset + +import ( + "errors" + "sort" + "strings" + "sync" + + digest "github.com/opencontainers/go-digest" +) + +var ( + // ErrDigestNotFound is used when a matching digest + // could not be found in a set. + ErrDigestNotFound = errors.New("digest not found") + + // ErrDigestAmbiguous is used when multiple digests + // are found in a set. None of the matching digests + // should be considered valid matches. + ErrDigestAmbiguous = errors.New("ambiguous digest string") +) + +// Set is used to hold a unique set of digests which +// may be easily referenced by easily referenced by a string +// representation of the digest as well as short representation. +// The uniqueness of the short representation is based on other +// digests in the set. If digests are omitted from this set, +// collisions in a larger set may not be detected, therefore it +// is important to always do short representation lookups on +// the complete set of digests. To mitigate collisions, an +// appropriately long short code should be used. +type Set struct { + mutex sync.RWMutex + entries digestEntries +} + +// NewSet creates an empty set of digests +// which may have digests added. +func NewSet() *Set { + return &Set{ + entries: digestEntries{}, + } +} + +// checkShortMatch checks whether two digests match as either whole +// values or short values. This function does not test equality, +// rather whether the second value could match against the first +// value. +func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { + if len(hex) == len(shortHex) { + if hex != shortHex { + return false + } + if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + } else if !strings.HasPrefix(hex, shortHex) { + return false + } else if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + return true +} + +// Lookup looks for a digest matching the given string representation. +// If no digests could be found ErrDigestNotFound will be returned +// with an empty digest value. If multiple matches are found +// ErrDigestAmbiguous will be returned with an empty digest value. +func (dst *Set) Lookup(d string) (digest.Digest, error) { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + if len(dst.entries) == 0 { + return "", ErrDigestNotFound + } + var ( + searchFunc func(int) bool + alg digest.Algorithm + hex string + ) + dgst, err := digest.Parse(d) + if err == digest.ErrDigestInvalidFormat { + hex = d + searchFunc = func(i int) bool { + return dst.entries[i].val >= d + } + } else { + hex = dgst.Hex() + alg = dgst.Algorithm() + searchFunc = func(i int) bool { + if dst.entries[i].val == hex { + return dst.entries[i].alg >= alg + } + return dst.entries[i].val >= hex + } + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { + return "", ErrDigestNotFound + } + if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { + return dst.entries[idx].digest, nil + } + if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { + return "", ErrDigestAmbiguous + } + + return dst.entries[idx].digest, nil +} + +// Add adds the given digest to the set. An error will be returned +// if the given digest is invalid. If the digest already exists in the +// set, this operation will be a no-op. +func (dst *Set) Add(d digest.Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) { + dst.entries = append(dst.entries, entry) + return nil + } else if dst.entries[idx].digest == d { + return nil + } + + entries := append(dst.entries, nil) + copy(entries[idx+1:], entries[idx:len(entries)-1]) + entries[idx] = entry + dst.entries = entries + return nil +} + +// Remove removes the given digest from the set. An err will be +// returned if the given digest is invalid. If the digest does +// not exist in the set, this operation will be a no-op. +func (dst *Set) Remove(d digest.Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + // Not found if idx is after or value at idx is not digest + if idx == len(dst.entries) || dst.entries[idx].digest != d { + return nil + } + + entries := dst.entries + copy(entries[idx:], entries[idx+1:]) + entries = entries[:len(entries)-1] + dst.entries = entries + + return nil +} + +// All returns all the digests in the set +func (dst *Set) All() []digest.Digest { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + retValues := make([]digest.Digest, len(dst.entries)) + for i := range dst.entries { + retValues[i] = dst.entries[i].digest + } + + return retValues +} + +// ShortCodeTable returns a map of Digest to unique short codes. The +// length represents the minimum value, the maximum length may be the +// entire value of digest if uniqueness cannot be achieved without the +// full value. This function will attempt to make short codes as short +// as possible to be unique. +func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + m := make(map[digest.Digest]string, len(dst.entries)) + l := length + resetIdx := 0 + for i := 0; i < len(dst.entries); i++ { + var short string + extended := true + for extended { + extended = false + if len(dst.entries[i].val) <= l { + short = dst.entries[i].digest.String() + } else { + short = dst.entries[i].val[:l] + for j := i + 1; j < len(dst.entries); j++ { + if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { + if j > resetIdx { + resetIdx = j + } + extended = true + } else { + break + } + } + if extended { + l++ + } + } + } + m[dst.entries[i].digest] = short + if i >= resetIdx { + l = length + } + } + return m +} + +type digestEntry struct { + alg digest.Algorithm + val string + digest digest.Digest +} + +type digestEntries []*digestEntry + +func (d digestEntries) Len() int { + return len(d) +} + +func (d digestEntries) Less(i, j int) bool { + if d[i].val != d[j].val { + return d[i].val < d[j].val + } + return d[i].alg < d[j].alg +} + +func (d digestEntries) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} diff --git a/vendor/github.com/docker/distribution/reference/helpers.go b/vendor/github.com/docker/distribution/reference/helpers.go new file mode 100644 index 000000000..978df7eab --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/helpers.go @@ -0,0 +1,42 @@ +package reference + +import "path" + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/docker/distribution/reference/normalize.go new file mode 100644 index 000000000..b3dfb7a6d --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/normalize.go @@ -0,0 +1,199 @@ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/distribution/digestset" + "github.com/opencontainers/go-digest" +) + +var ( + legacyDefaultDomain = "index.docker.io" + defaultDomain = "docker.io" + officialRepoName = "library" + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remoteName string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remoteName = remainder[:tagSep] + } else { + remoteName = remainder + } + if strings.ToLower(remoteName) != remoteName { + return nil, errors.New("invalid reference format: repository name must be lowercase") + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// ParseDockerRef normalizes the image reference following the docker convention. This is added +// mainly for backward compatibility. +// The reference returned can only be either tagged or digested. For reference contains both tag +// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ +// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. +func ParseDockerRef(ref string) (Named, error) { + named, err := ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, ok := named.(NamedTagged); ok { + if canonical, ok := named.(Canonical); ok { + // The reference is both tagged and digested, only + // return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err + } + newCanonical, err := WithDigest(newNamed, canonical.Digest()) + if err != nil { + return nil, err + } + return newCanonical, nil + } + } + return TagNameOnly(named), nil +} + +// splitDockerDomain splits a repository name to domain and remotename string. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { + repo.path = split[1] + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} + +// ParseAnyReferenceWithSet parses a reference string as a possible short +// identifier to be matched in a digest set, a full digest, or familiar name. +func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { + if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { + dgst, err := ds.Lookup(ref) + if err == nil { + return digestReference(dgst), nil + } + } else { + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + } + + return ParseNormalizedNamed(ref) +} diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go new file mode 100644 index 000000000..b7cd00b0d --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -0,0 +1,433 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [domain '/'] path-component ['/' path-component]* +// domain := domain-component ['.' domain-component]* [':' port-number] +// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// path-component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +// +// identifier := /[a-f0-9]{64}/ +// short-identifier := /[a-f0-9]{6,64}/ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + + // ErrNameNotCanonical is returned when a name is not canonical. + ErrNameNotCanonical = errors.New("repository name must be canonical") +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with domain and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// namedRepository is a reference to a repository with a name. +// A namedRepository has both domain and path components. +type namedRepository interface { + Named + Domain() string + Path() string +} + +// Domain returns the domain part of the Named reference +func Domain(named Named) string { + if r, ok := named.(namedRepository); ok { + return r.Domain() + } + domain, _ := splitDomain(named.Name()) + return domain +} + +// Path returns the name without the domain part of the Named reference +func Path(named Named) (name string) { + if r, ok := named.(namedRepository); ok { + return r.Path() + } + _, path := splitDomain(named.Name()) + return path +} + +func splitDomain(name string) (string, string) { + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +// DEPRECATED: Use Domain or Path +func SplitHostname(named Named) (string, string) { + if r, ok := named.(namedRepository); ok { + return r.Domain(), r.Path() + } + return splitDomain(named.Name()) +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + var repo repository + + nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) + if len(nameMatch) == 3 { + repo.domain = nameMatch[1] + repo.path = nameMatch[2] + } else { + repo.domain = "" + repo.path = matches[1] + } + + ref := reference{ + namedRepository: repo, + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.Parse(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name and be in the canonical +// form, otherwise an error is returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + named, err := ParseNormalizedNamed(s) + if err != nil { + return nil, err + } + if named.String() != s { + return nil, ErrNameNotCanonical + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { + return nil, ErrReferenceInvalidFormat + } + return repository{ + domain: match[1], + path: match[2], + }, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if canonical, ok := name.(Canonical); ok { + return reference{ + namedRepository: repo, + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + namedRepository: repo, + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if tagged, ok := name.(Tagged); ok { + return reference{ + namedRepository: repo, + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + namedRepository: repo, + digest: digest, + }, nil +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + domain, path := SplitHostname(ref) + return repository{ + domain: domain, + path: path, + } +} + +func getBestReferenceType(ref reference) Reference { + if ref.Name() == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + namedRepository: ref.namedRepository, + digest: ref.digest, + } + } + return ref.namedRepository + } + if ref.digest == "" { + return taggedReference{ + namedRepository: ref.namedRepository, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + namedRepository + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.Name() + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository struct { + domain string + path string +} + +func (r repository) String() string { + return r.Name() +} + +func (r repository) Name() string { + if r.domain == "" { + return r.path + } + return r.domain + "/" + r.path +} + +func (r repository) Domain() string { + return r.domain +} + +func (r repository) Path() string { + return r.path +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return digest.Digest(d).String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + namedRepository + tag string +} + +func (t taggedReference) String() string { + return t.Name() + ":" + t.tag +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + namedRepository + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.Name() + "@" + c.digest.String() +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go new file mode 100644 index 000000000..786034932 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/regexp.go @@ -0,0 +1,143 @@ +package reference + +import "regexp" + +var ( + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) + + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. + separatorRegexp = match(`(?:[._]|__|[-]*)`) + + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) + + // domainComponentRegexp restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp + // and followed by an optional port. + domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + + // DomainRegexp defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + DomainRegexp = expression( + domainComponentRegexp, + optional(repeated(literal(`.`), domainComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = match(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = anchored(TagRegexp) + + // DigestRegexp matches valid digests. + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = anchored(DigestRegexp) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the domain and name part omitting + // the separating forward slash from either. + NameRegexp = expression( + optional(DomainRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) + + // anchoredNameRegexp is used to parse a name value, capturing the + // domain and trailing components. + anchoredNameRegexp = anchored( + optional(capture(DomainRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) + + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) + + // IdentifierRegexp is the format for string identifier used as a + // content addressable identifier using sha256. These identifiers + // are like digests without the algorithm, since sha256 is used. + IdentifierRegexp = match(`([a-f0-9]{64})`) + + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) + + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = anchored(IdentifierRegexp) + + // anchoredShortIdentifierRegexp is used to check if a value + // is a possible identifier prefix, anchored at start and end + // of string. + anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) +) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS new file mode 100644 index 000000000..0728bfe18 --- /dev/null +++ b/vendor/github.com/docker/docker/AUTHORS @@ -0,0 +1,2372 @@ +# File @generated by hack/generate-authors.sh. DO NOT EDIT. +# This file lists all contributors to the repository. +# See hack/generate-authors.sh to make modifications. + +Aanand Prasad +Aaron Davidson +Aaron Feng +Aaron Hnatiw +Aaron Huslage +Aaron L. Xu +Aaron Lehmann +Aaron Welch +Abel Muiño +Abhijeet Kasurde +Abhinandan Prativadi +Abhinav Ajgaonkar +Abhishek Chanda +Abhishek Sharma +Abin Shahab +Abirdcfly +Ada Mancini +Adam Avilla +Adam Dobrawy +Adam Eijdenberg +Adam Kunk +Adam Miller +Adam Mills +Adam Pointer +Adam Singer +Adam Walz +Adam Williams +Addam Hardy +Aditi Rajagopal +Aditya +Adnan Khan +Adolfo Ochagavía +Adria Casas +Adrian Moisey +Adrian Mouat +Adrian Oprea +Adrien Folie +Adrien Gallouët +Ahmed Kamal +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Ajey Charantimath +ajneu +Akash Gupta +Akhil Mohan +Akihiro Matsushima +Akihiro Suda +Akim Demaille +Akira Koyasu +Akshay Karle +Akshay Moghe +Al Tobey +alambike +Alan Hoyle +Alan Scherger +Alan Thompson +Albert Callarisa +Albert Zhang +Albin Kerouanton +Alec Benson +Alejandro González Hevia +Aleksa Sarai +Aleksandr Chebotov +Aleksandrs Fadins +Alena Prokharchyk +Alessandro Boch +Alessio Biancalana +Alex Chan +Alex Chen +Alex Coventry +Alex Crawford +Alex Ellis +Alex Gaynor +Alex Goodman +Alex Nordlund +Alex Olshansky +Alex Samorukov +Alex Warhawk +Alexander Artemenko +Alexander Boyd +Alexander Larsson +Alexander Midlash +Alexander Morozov +Alexander Polakov +Alexander Shopov +Alexandre Beslic +Alexandre Garnier +Alexandre González +Alexandre Jomin +Alexandru Sfirlogea +Alexei Margasov +Alexey Guskov +Alexey Kotlyarov +Alexey Shamrin +Alexis Ries +Alexis Thomas +Alfred Landrum +Ali Dehghani +Alicia Lauerman +Alihan Demir +Allen Madsen +Allen Sun +almoehi +Alvaro Saurin +Alvin Deng +Alvin Richards +amangoel +Amen Belayneh +Ameya Gawde +Amir Goldstein +Amit Bakshi +Amit Krishnan +Amit Shukla +Amr Gawish +Amy Lindburg +Anand Patil +AnandkumarPatel +Anatoly Borodin +Anca Iordache +Anchal Agrawal +Anda Xu +Anders Janmyr +Andre Dublin <81dublin@gmail.com> +Andre Granovsky +Andrea Denisse Gómez +Andrea Luzzardi +Andrea Turli +Andreas Elvers +Andreas Köhler +Andreas Savvides +Andreas Tiefenthaler +Andrei Gherzan +Andrei Ushakov +Andrei Vagin +Andrew C. Bodine +Andrew Clay Shafer +Andrew Duckworth +Andrew France +Andrew Gerrand +Andrew Guenther +Andrew He +Andrew Hsu +Andrew Kim +Andrew Kuklewicz +Andrew Macgregor +Andrew Macpherson +Andrew Martin +Andrew McDonnell +Andrew Munsell +Andrew Pennebaker +Andrew Po +Andrew Weiss +Andrew Williams +Andrews Medina +Andrey Kolomentsev +Andrey Petrov +Andrey Stolbovsky +André Martins +Andy Chambers +andy diller +Andy Goldstein +Andy Kipp +Andy Lindeman +Andy Rothfusz +Andy Smith +Andy Wilson +Andy Zhang +Anes Hasicic +Angel Velazquez +Anil Belur +Anil Madhavapeddy +Ankit Jain +Ankush Agarwal +Anonmily +Anran Qiao +Anshul Pundir +Anthon van der Neut +Anthony Baire +Anthony Bishopric +Anthony Dahanne +Anthony Sottile +Anton Löfgren +Anton Nikitin +Anton Polonskiy +Anton Tiurin +Antonio Murdaca +Antonis Kalipetis +Antony Messerli +Anuj Bahuguna +Anuj Varma +Anusha Ragunathan +Anyu Wang +apocas +Arash Deshmeh +ArikaChen +Arko Dasgupta +Arnaud Lefebvre +Arnaud Porterie +Arnaud Rebillout +Artem Khramov +Arthur Barr +Arthur Gautier +Artur Meyster +Arun Gupta +Asad Saeeduddin +Asbjørn Enge +Austin Vazquez +averagehuman +Avi Das +Avi Kivity +Avi Miller +Avi Vaid +ayoshitake +Azat Khuyiyakhmetov +Bao Yonglei +Bardia Keyoumarsi +Barnaby Gray +Barry Allard +Bartłomiej Piotrowski +Bastiaan Bakker +Bastien Pascard +bdevloed +Bearice Ren +Ben Bonnefoy +Ben Firshman +Ben Golub +Ben Gould +Ben Hall +Ben Langfeld +Ben Sargent +Ben Severson +Ben Toews +Ben Wiklund +Benjamin Atkin +Benjamin Baker +Benjamin Boudreau +Benjamin Böhmke +Benjamin Yolken +Benny Ng +Benoit Chesneau +Bernerd Schaefer +Bernhard M. Wiedemann +Bert Goethals +Bertrand Roussel +Bevisy Zhang +Bharath Thiruveedula +Bhiraj Butala +Bhumika Bayani +Bilal Amarni +Bill Wang +Billy Ridgway +Bily Zhang +Bin Liu +Bingshen Wang +Bjorn Neergaard +Blake Geno +Boaz Shuster +bobby abbott +Bojun Zhu +Boqin Qin +Boris Pruessmann +Boshi Lian +Bouke Haarsma +Boyd Hemphill +boynux +Bradley Cicenas +Bradley Wright +Brandon Liu +Brandon Philips +Brandon Rhodes +Brendan Dixon +Brent Salisbury +Brett Higgins +Brett Kochendorfer +Brett Milford +Brett Randall +Brian (bex) Exelbierd +Brian Bland +Brian DeHamer +Brian Dorsey +Brian Flad +Brian Goff +Brian McCallister +Brian Olsen +Brian Schwind +Brian Shumate +Brian Torres-Gil +Brian Trump +Brice Jaglin +Briehan Lombaard +Brielle Broder +Bruno Bigras +Bruno Binet +Bruno Gazzera +Bruno Renié +Bruno Tavares +Bryan Bess +Bryan Boreham +Bryan Matsuo +Bryan Murphy +Burke Libbey +Byung Kang +Caleb Spare +Calen Pennington +Cameron Boehmer +Cameron Sparr +Cameron Spear +Campbell Allen +Candid Dauth +Cao Weiwei +Carl Henrik Lunde +Carl Loa Odin +Carl X. Su +Carlo Mion +Carlos Alexandro Becker +Carlos de Paula +Carlos Sanchez +Carol Fager-Higgins +Cary +Casey Bisson +Catalin Pirvu +Ce Gao +Cedric Davies +Cezar Sa Espinola +Chad Swenson +Chance Zibolski +Chander Govindarajan +Chanhun Jeong +Chao Wang +Charles Chan +Charles Hooper +Charles Law +Charles Lindsay +Charles Merriam +Charles Sarrazin +Charles Smith +Charlie Drage +Charlie Lewis +Chase Bolt +ChaYoung You +Chee Hau Lim +Chen Chao +Chen Chuanliang +Chen Hanxiao +Chen Min +Chen Mingjie +Chen Qiu +Cheng-mean Liu +Chengfei Shang +Chengguang Xu +Chenyang Yan +chenyuzhu +Chetan Birajdar +Chewey +Chia-liang Kao +chli +Cholerae Hu +Chris Alfonso +Chris Armstrong +Chris Dias +Chris Dituri +Chris Fordham +Chris Gavin +Chris Gibson +Chris Khoo +Chris Kreussling (Flatbush Gardener) +Chris McKinnel +Chris McKinnel +Chris Price +Chris Seto +Chris Snow +Chris St. Pierre +Chris Stivers +Chris Swan +Chris Telfer +Chris Wahl +Chris Weyl +Chris White +Christian Becker +Christian Berendt +Christian Brauner +Christian Böhme +Christian Muehlhaeuser +Christian Persson +Christian Rotzoll +Christian Simon +Christian Stefanescu +Christoph Ziebuhr +Christophe Mehay +Christophe Troestler +Christophe Vidal +Christopher Biscardi +Christopher Crone +Christopher Currie +Christopher Jones +Christopher Latham +Christopher Rigor +Christy Norman +Chun Chen +Ciro S. Costa +Clayton Coleman +Clint Armstrong +Clinton Kitson +clubby789 +Cody Roseborough +Coenraad Loubser +Colin Dunklau +Colin Hebert +Colin Panisset +Colin Rice +Colin Walters +Collin Guarino +Colm Hally +companycy +Conor Evans +Corbin Coleman +Corey Farrell +Cory Forsyth +Cory Snider +cressie176 +Cristian Ariza +Cristian Staretu +cristiano balducci +Cristina Yenyxe Gonzalez Garcia +Cruceru Calin-Cristian +CUI Wei +cuishuang +Cuong Manh Le +Cyprian Gracz +Cyril F +Da McGrady +Daan van Berkel +Daehyeok Mun +Dafydd Crosby +dalanlan +Damian Smyth +Damien Nadé +Damien Nozay +Damjan Georgievski +Dan Anolik +Dan Buch +Dan Cotora +Dan Feldman +Dan Griffin +Dan Hirsch +Dan Keder +Dan Levy +Dan McPherson +Dan Plamadeala +Dan Stine +Dan Williams +Dani Hodovic +Dani Louca +Daniel Antlinger +Daniel Black +Daniel Dao +Daniel Exner +Daniel Farrell +Daniel Garcia +Daniel Gasienica +Daniel Grunwell +Daniel Helfand +Daniel Hiltgen +Daniel J Walsh +Daniel Menet +Daniel Mizyrycki +Daniel Nephin +Daniel Norberg +Daniel Nordberg +Daniel P. Berrangé +Daniel Robinson +Daniel S +Daniel Sweet +Daniel Von Fange +Daniel Watkins +Daniel X Moore +Daniel YC Lin +Daniel Zhang +Daniele Rondina +Danny Berger +Danny Milosavljevic +Danny Yates +Danyal Khaliq +Darren Coxall +Darren Shepherd +Darren Stahl +Dattatraya Kumbhar +Davanum Srinivas +Dave Barboza +Dave Goodchild +Dave Henderson +Dave MacDonald +Dave Tucker +David Anderson +David Bellotti +David Calavera +David Chung +David Corking +David Cramer +David Currie +David Davis +David Dooling +David Gageot +David Gebler +David Glasser +David Lawrence +David Lechner +David M. Karr +David Mackey +David Manouchehri +David Mat +David Mcanulty +David McKay +David O'Rourke +David P Hilton +David Pelaez +David R. Jenni +David Röthlisberger +David Sheets +David Sissitka +David Trott +David Wang <00107082@163.com> +David Williamson +David Xia +David Young +Davide Ceretti +Dawn Chen +dbdd +dcylabs +Debayan De +Deborah Gertrude Digges +deed02392 +Deep Debroy +Deng Guangxing +Deni Bertovic +Denis Defreyne +Denis Gladkikh +Denis Ollier +Dennis Chen +Dennis Chen +Dennis Docter +Derek +Derek +Derek Ch +Derek McGowan +Deric Crago +Deshi Xiao +Devon Estes +Devvyn Murphy +Dharmit Shah +Dhawal Yogesh Bhanushali +Dhilip Kumars +Diego Romero +Diego Siqueira +Dieter Reuter +Dillon Dixon +Dima Stopel +Dimitri John Ledkov +Dimitris Mandalidis +Dimitris Rozakis +Dimitry Andric +Dinesh Subhraveti +Ding Fei +dingwei +Diogo Monica +DiuDiugirl +Djibril Koné +Djordje Lukic +dkumor +Dmitri Logvinenko +Dmitri Shuralyov +Dmitry Demeshchuk +Dmitry Gusev +Dmitry Kononenko +Dmitry Sharshakov +Dmitry Shyshkin +Dmitry Smirnov +Dmitry V. Krivenok +Dmitry Vorobev +Dmytro Iakovliev +docker-unir[bot] +Dolph Mathews +Dominic Tubach +Dominic Yin +Dominik Dingel +Dominik Finkbeiner +Dominik Honnef +Don Kirkby +Don Kjer +Don Spaulding +Donald Huang +Dong Chen +Donghwa Kim +Donovan Jones +Doron Podoleanu +Doug Davis +Doug MacEachern +Doug Tangren +Douglas Curtis +Dr Nic Williams +dragon788 +Dražen Lučanin +Drew Erny +Drew Hubl +Dustin Sallings +Ed Costello +Edmund Wagner +Eiichi Tsukata +Eike Herzbach +Eivin Giske Skaaren +Eivind Uggedal +Elan Ruusamäe +Elango Sivanandam +Elena Morozova +Eli Uriegas +Elias Faxö +Elias Koromilas +Elias Probst +Elijah Zupancic +eluck +Elvir Kuric +Emil Davtyan +Emil Hernvall +Emily Maier +Emily Rose +Emir Ozer +Eng Zer Jun +Enguerran +Eohyung Lee +epeterso +Eric Barch +Eric Curtin +Eric G. Noriega +Eric Hanchrow +Eric Lee +Eric Mountain +Eric Myhre +Eric Paris +Eric Rafaloff +Eric Rosenberg +Eric Sage +Eric Soderstrom +Eric Yang +Eric-Olivier Lamey +Erica Windisch +Erich Cordoba +Erik Bray +Erik Dubbelboer +Erik Hollensbe +Erik Inge Bolsø +Erik Kristensen +Erik Sipsma +Erik St. Martin +Erik Weathers +Erno Hopearuoho +Erwin van der Koogh +Espen Suenson +Ethan Bell +Ethan Mosbaugh +Euan Harris +Euan Kemp +Eugen Krizo +Eugene Yakubovich +Evan Allrich +Evan Carmi +Evan Hazlett +Evan Krall +Evan Phoenix +Evan Wies +Evelyn Xu +Everett Toews +Evgeniy Makhrov +Evgeny Shmarnev +Evgeny Vereshchagin +Ewa Czechowska +Eystein Måløy Stenberg +ezbercih +Ezra Silvera +Fabian Kramm +Fabian Lauer +Fabian Raetz +Fabiano Rosas +Fabio Falci +Fabio Kung +Fabio Rapposelli +Fabio Rehm +Fabrizio Regini +Fabrizio Soppelsa +Faiz Khan +falmp +Fangming Fang +Fangyuan Gao <21551127@zju.edu.cn> +fanjiyun +Fareed Dudhia +Fathi Boudra +Federico Gimenez +Felipe Oliveira +Felipe Ruhland +Felix Abecassis +Felix Geisendörfer +Felix Hupfeld +Felix Rabe +Felix Ruess +Felix Schindler +Feng Yan +Fengtu Wang +Ferenc Szabo +Fernando +Fero Volar +Feroz Salam +Ferran Rodenas +Filipe Brandenburger +Filipe Oliveira +Flavio Castelli +Flavio Crisciani +Florian +Florian Klein +Florian Maier +Florian Noeding +Florian Schmaus +Florian Weingarten +Florin Asavoaie +Florin Patan +fonglh +Foysal Iqbal +Francesc Campoy +Francesco Degrassi +Francesco Mari +Francis Chuang +Francisco Carriedo +Francisco Souza +Frank Groeneveld +Frank Herrmann +Frank Macreery +Frank Rosquin +Frank Yang +Fred Lifton +Frederick F. Kautz IV +Frederico F. de Oliveira +Frederik Loeffert +Frederik Nordahl Jul Sabroe +Freek Kalter +Frieder Bluemle +frobnicaty <92033765+frobnicaty@users.noreply.github.com> +Frédéric Dalleau +Fu JinLin +Félix Baylac-Jacqué +Félix Cantournet +Gabe Rosenhouse +Gabor Nagy +Gabriel Goller +Gabriel L. Somlo +Gabriel Linder +Gabriel Monroy +Gabriel Nicolas Avellaneda +Gaetan de Villele +Galen Sampson +Gang Qiao +Gareth Rushgrove +Garrett Barboza +Gary Schaetz +Gaurav +Gaurav Singh +Gaël PORTAY +Genki Takiuchi +GennadySpb +Geoff Levand +Geoffrey Bachelet +Geon Kim +George Kontridze +George MacRorie +George Xie +Georgi Hristozov +Georgy Yakovlev +Gereon Frey +German DZ +Gert van Valkenhoef +Gerwim Feiken +Ghislain Bourgeois +Giampaolo Mancini +Gianluca Borello +Gildas Cuisinier +Giovan Isa Musthofa +gissehel +Giuseppe Mazzotta +Giuseppe Scrivano +Gleb Fotengauer-Malinovskiy +Gleb M Borisov +Glyn Normington +GoBella +Goffert van Gool +Goldwyn Rodrigues +Gopikannan Venugopalsamy +Gosuke Miyashita +Gou Rao +Govinda Fichtner +Grant Millar +Grant Reaber +Graydon Hoare +Greg Fausak +Greg Pflaum +Greg Stephens +Greg Thornton +Grzegorz Jaśkiewicz +Guilhem Lettron +Guilherme Salgado +Guillaume Dufour +Guillaume J. Charmes +Gunadhya S. <6939749+gunadhya@users.noreply.github.com> +Guoqiang QI +guoxiuyan +Guri +Gurjeet Singh +Guruprasad +Gustav Sinder +gwx296173 +Günter Zöchbauer +Haichao Yang +haikuoliu +haining.cao +Hakan Özler +Hamish Hutchings +Hannes Ljungberg +Hans Kristian Flaatten +Hans Rødtang +Hao Shu Wei +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harald Niesche +Harley Laue +Harold Cooper +Harrison Turton +Harry Zhang +Harshal Patil +Harshal Patil +He Simei +He Xiaoxi +He Xin +heartlock <21521209@zju.edu.cn> +Hector Castro +Helen Xie +Henning Sprang +Hiroshi Hatake +Hiroyuki Sasagawa +Hobofan +Hollie Teal +Hong Xu +Hongbin Lu +Hongxu Jia +Honza Pokorny +Hsing-Hui Hsu +hsinko <21551195@zju.edu.cn> +Hu Keping +Hu Tao +HuanHuan Ye +Huanzhong Zhang +Huayi Zhang +Hugo Barrera +Hugo Duncan +Hugo Marisco <0x6875676f@gmail.com> +Hui Kang +Hunter Blanks +huqun +Huu Nguyen +Hyeongkyu Lee +Hyzhou Zhy +Iago López Galeiras +Ian Bishop +Ian Bull +Ian Calvert +Ian Campbell +Ian Chen +Ian Lee +Ian Main +Ian Philpot +Ian Truslove +Iavael +Icaro Seara +Ignacio Capurro +Igor Dolzhikov +Igor Karpovich +Iliana Weller +Ilkka Laukkanen +Illo Abdulrahim +Ilya Dmitrichenko +Ilya Gusev +Ilya Khlopotov +imre Fitos +inglesp +Ingo Gottwald +Innovimax +Isaac Dupree +Isabel Jimenez +Isaiah Grace +Isao Jonas +Iskander Sharipov +Ivan Babrou +Ivan Fraixedes +Ivan Grcic +Ivan Markin +J Bruni +J. Nunn +Jack Danger Canty +Jack Laxson +Jacob Atzen +Jacob Edelman +Jacob Tomlinson +Jacob Vallejo +Jacob Wen +Jaime Cepeda +Jaivish Kothari +Jake Champlin +Jake Moshenko +Jake Sanders +Jakub Drahos +Jakub Guzik +James Allen +James Carey +James Carr +James DeFelice +James Harrison Fisher +James Kyburz +James Kyle +James Lal +James Mills +James Nesbitt +James Nugent +James Sanders +James Turnbull +James Watkins-Harvey +Jamie Hannaford +Jamshid Afshar +Jan Breig +Jan Chren +Jan Götte +Jan Keromnes +Jan Koprowski +Jan Pazdziora +Jan Toebes +Jan-Gerd Tenberge +Jan-Jaap Driessen +Jana Radhakrishnan +Jannick Fahlbusch +Januar Wayong +Jared Biel +Jared Hocutt +Jaroslaw Zabiello +Jasmine Hegman +Jason A. Donenfeld +Jason Divock +Jason Giedymin +Jason Green +Jason Hall +Jason Heiss +Jason Livesay +Jason McVetta +Jason Plum +Jason Shepherd +Jason Smith +Jason Sommer +Jason Stangroome +Javier Bassi +jaxgeller +Jay +Jay Kamat +Jay Lim +Jean Rouge +Jean-Baptiste Barth +Jean-Baptiste Dalido +Jean-Christophe Berthon +Jean-Paul Calderone +Jean-Pierre Huynh +Jean-Tiare Le Bigot +Jeeva S. Chelladhurai +Jeff Anderson +Jeff Hajewski +Jeff Johnston +Jeff Lindsay +Jeff Mickey +Jeff Minard +Jeff Nickoloff +Jeff Silberman +Jeff Welch +Jeff Zvier +Jeffrey Bolle +Jeffrey Morgan +Jeffrey van Gogh +Jenny Gebske +Jeremy Chambers +Jeremy Grosser +Jeremy Huntwork +Jeremy Price +Jeremy Qian +Jeremy Unruh +Jeremy Yallop +Jeroen Franse +Jeroen Jacobs +Jesse Dearing +Jesse Dubay +Jessica Frazelle +Jezeniel Zapanta +Jhon Honce +Ji.Zhilong +Jian Liao +Jian Zhang +Jiang Jinyang +Jianyong Wu +Jie Luo +Jie Ma +Jihyun Hwang +Jilles Oldenbeuving +Jim Alateras +Jim Carroll +Jim Ehrismann +Jim Galasyn +Jim Lin +Jim Minter +Jim Perrin +Jimmy Cuadra +Jimmy Puckett +Jimmy Song +Jinsoo Park +Jintao Zhang +Jiri Appl +Jiri Popelka +Jiuyue Ma +Jiří Župka +Joakim Roubert +Joao Fernandes +Joao Trindade +Joe Beda +Joe Doliner +Joe Ferguson +Joe Gordon +Joe Shaw +Joe Van Dyk +Joel Friedly +Joel Handwell +Joel Hansson +Joel Wurtz +Joey Geiger +Joey Geiger +Joey Gibson +Joffrey F +Johan Euphrosine +Johan Rydberg +Johanan Lieberman +Johannes 'fish' Ziemke +John Costa +John Feminella +John Gardiner Myers +John Gossman +John Harris +John Howard +John Laswell +John Maguire +John Mulhausen +John OBrien III +John Starks +John Stephens +John Tims +John V. Martinez +John Warwick +John Willis +Jon Johnson +Jon Surrell +Jon Wedaman +Jonas Dohse +Jonas Heinrich +Jonas Pfenniger +Jonathan A. Schweder +Jonathan A. Sternberg +Jonathan Boulle +Jonathan Camp +Jonathan Choy +Jonathan Dowland +Jonathan Lebon +Jonathan Lomas +Jonathan McCrohan +Jonathan Mueller +Jonathan Pares +Jonathan Rudenberg +Jonathan Stoppani +Jonh Wendell +Joni Sar +Joost Cassee +Jordan Arentsen +Jordan Jennings +Jordan Sissel +Jordi Massaguer Pla +Jorge Marin +Jorit Kleine-Möllhoff +Jose Diaz-Gonzalez +Joseph Anthony Pasquale Holsten +Joseph Hager +Joseph Kern +Joseph Rothrock +Josh +Josh Bodah +Josh Bonczkowski +Josh Chorlton +Josh Eveleth +Josh Hawn +Josh Horwitz +Josh Poimboeuf +Josh Soref +Josh Wilson +Josiah Kiehl +José Tomás Albornoz +Joyce Jang +JP +Julian Taylor +Julien Barbier +Julien Bisconti +Julien Bordellier +Julien Dubois +Julien Kassar +Julien Maitrehenry +Julien Pervillé +Julien Pivotto +Julio Guerra +Julio Montes +Jun Du +Jun-Ru Chang +junxu +Jussi Nummelin +Justas Brazauskas +Justen Martin +Justin Cormack +Justin Force +Justin Keller <85903732+jk-vb@users.noreply.github.com> +Justin Menga +Justin Plock +Justin Simonelis +Justin Terry +Justyn Temme +Jyrki Puttonen +Jérémy Leherpeur +Jérôme Petazzoni +Jörg Thalheim +K. Heller +Kai Blin +Kai Qiang Wu (Kennan) +Kaijie Chen +Kamil Domański +Kamjar Gerami +Kanstantsin Shautsou +Kara Alexandra +Karan Lyons +Kareem Khazem +kargakis +Karl Grzeszczak +Karol Duleba +Karthik Karanth +Karthik Nayak +Kasper Fabæch Brandt +Kate Heddleston +Katie McLaughlin +Kato Kazuyoshi +Katrina Owen +Kawsar Saiyeed +Kay Yan +kayrus +Kazuhiro Sera +Kazuyoshi Kato +Ke Li +Ke Xu +Kei Ohmura +Keith Hudgins +Keli Hu +Ken Cochrane +Ken Herner +Ken ICHIKAWA +Ken Reese +Kenfe-Mickaël Laventure +Kenjiro Nakayama +Kent Johnson +Kenta Tada +Kevin "qwazerty" Houdebert +Kevin Alvarez +Kevin Burke +Kevin Clark +Kevin Feyrer +Kevin J. Lynagh +Kevin Jing Qiu +Kevin Kern +Kevin Menard +Kevin Meredith +Kevin P. Kucharczyk +Kevin Parsons +Kevin Richardson +Kevin Shi +Kevin Wallace +Kevin Yap +Keyvan Fatehi +kies +Kim BKC Carlbacker +Kim Eik +Kimbro Staken +Kir Kolyshkin +Kiran Gangadharan +Kirill SIbirev +knappe +Kohei Tsuruta +Koichi Shiraishi +Konrad Kleine +Konrad Ponichtera +Konstantin Gribov +Konstantin L +Konstantin Pelykh +Kostadin Plachkov +Krasi Georgiev +Krasimir Georgiev +Kris-Mikael Krister +Kristian Haugene +Kristina Zabunova +Krystian Wojcicki +Kunal Kushwaha +Kunal Tyagi +Kyle Conroy +Kyle Linden +Kyle Squizzato +Kyle Wuolle +kyu +Lachlan Coote +Lai Jiangshan +Lajos Papp +Lakshan Perera +Lalatendu Mohanty +Lance Chen +Lance Kinley +Lars Butler +Lars Kellogg-Stedman +Lars R. Damerow +Lars-Magnus Skog +Laszlo Meszaros +Laura Frank +Laurent Bernaille +Laurent Erignoux +Laurie Voss +Leandro Siqueira +Lee Calcote +Lee Chao <932819864@qq.com> +Lee, Meng-Han +Lei Gong +Lei Jitang +Leiiwang +Len Weincier +Lennie +Leo Gallucci +Leonardo Nodari +Leonardo Taccari +Leszek Kowalski +Levi Blackstone +Levi Gross +Levi Harrison +Lewis Daly +Lewis Marshall +Lewis Peckover +Li Yi +Liam Macgillavry +Liana Lo +Liang Mingqiang +Liang-Chi Hsieh +liangwei +Liao Qingwei +Lifubang +Lihua Tang +Lily Guo +limeidan +Lin Lu +LingFaKe +Linus Heckemann +Liran Tal +Liron Levin +Liu Bo +Liu Hua +liwenqi +lixiaobing10051267 +Liz Zhang +LIZAO LI +Lizzie Dixon <_@lizzie.io> +Lloyd Dewolf +Lokesh Mandvekar +longliqiang88 <394564827@qq.com> +Lorenz Leutgeb +Lorenzo Fontana +Lotus Fenn +Louis Delossantos +Louis Opter +Luca Favatella +Luca Marturana +Luca Orlandi +Luca-Bogdan Grigorescu +Lucas Chan +Lucas Chi +Lucas Molas +Lucas Silvestre +Luciano Mores +Luis Henrique Mulinari +Luis Martínez de Bartolomé Izquierdo +Luiz Svoboda +Lukas Heeren +Lukas Waslowski +lukaspustina +Lukasz Zajaczkowski +Luke Marsden +Lyn +Lynda O'Leary +Lénaïc Huard +Ma Müller +Ma Shimiao +Mabin +Madhan Raj Mookkandy +Madhav Puri +Madhu Venugopal +Mageee +Mahesh Tiyyagura +malnick +Malte Janduda +Manfred Touron +Manfred Zabarauskas +Manjunath A Kumatagi +Mansi Nahar +Manuel Meurer +Manuel Rüger +Manuel Woelker +mapk0y +Marc Abramowitz +Marc Kuo +Marc Tamsky +Marcel Edmund Franke +Marcelo Horacio Fortino +Marcelo Salazar +Marco Hennings +Marcus Cobden +Marcus Farkas +Marcus Linke +Marcus Martins +Marcus Ramberg +Marek Goldmann +Marian Marinov +Marianna Tessel +Mario Loriedo +Marius Gundersen +Marius Sturm +Marius Voila +Mark Allen +Mark Feit +Mark Jeromin +Mark McGranaghan +Mark McKinstry +Mark Milstein +Mark Oates +Mark Parker +Mark Vainomaa +Mark West +Markan Patel +Marko Mikulicic +Marko Tibold +Markus Fix +Markus Kortlang +Martijn Dwars +Martijn van Oosterhout +Martin Braun +Martin Dojcak +Martin Honermeyer +Martin Kelly +Martin Mosegaard Amdisen +Martin Muzatko +Martin Redmond +Maru Newby +Mary Anthony +Masahito Zembutsu +Masato Ohba +Masayuki Morita +Mason Malone +Mateusz Sulima +Mathias Monnerville +Mathieu Champlon +Mathieu Le Marec - Pasquet +Mathieu Parent +Mathieu Paturel +Matt Apperson +Matt Bachmann +Matt Bajor +Matt Bentley +Matt Haggard +Matt Hoyle +Matt McCormick +Matt Moore +Matt Morrison <3maven@gmail.com> +Matt Richardson +Matt Rickard +Matt Robenolt +Matt Schurenko +Matt Williams +Matthew Heon +Matthew Lapworth +Matthew Mayer +Matthew Mosesohn +Matthew Mueller +Matthew Riley +Matthias Klumpp +Matthias Kühnle +Matthias Rampke +Matthieu Fronton +Matthieu Hauglustaine +Mattias Jernberg +Mauricio Garavaglia +mauriyouth +Max Harmathy +Max Shytikov +Max Timchenko +Maxim Fedchyshyn +Maxim Ivanov +Maxim Kulkin +Maxim Treskin +Maxime Petazzoni +Maximiliano Maccanti +Maxwell +Meaglith Ma +meejah +Megan Kostick +Mehul Kar +Mei ChunTao +Mengdi Gao +Menghui Chen +Mert Yazıcıoğlu +mgniu +Micah Zoltu +Michael A. Smith +Michael Beskin +Michael Bridgen +Michael Brown +Michael Chiang +Michael Crosby +Michael Currie +Michael Friis +Michael Gorsuch +Michael Grauer +Michael Holzheu +Michael Hudson-Doyle +Michael Huettermann +Michael Irwin +Michael Kuehn +Michael Käufl +Michael Neale +Michael Nussbaum +Michael Prokop +Michael Scharf +Michael Spetsiotis +Michael Stapelberg +Michael Steinert +Michael Thies +Michael Weidmann +Michael West +Michael Zhao +Michal Fojtik +Michal Gebauer +Michal Jemala +Michal Kostrzewa +Michal Minář +Michal Rostecki +Michal Wieczorek +Michaël Pailloncy +Michał Czeraszkiewicz +Michał Gryko +Michał Kosek +Michiel de Jong +Mickaël Fortunato +Mickaël Remars +Miguel Angel Fernández +Miguel Morales +Miguel Perez +Mihai Borobocea +Mihuleacc Sergiu +Mikael Davranche +Mike Brown +Mike Bush +Mike Casas +Mike Chelen +Mike Danese +Mike Dillon +Mike Dougherty +Mike Estes +Mike Gaffney +Mike Goelzer +Mike Leone +Mike Lundy +Mike MacCana +Mike Naberezny +Mike Snitzer +mikelinjie <294893458@qq.com> +Mikhail Sobolev +Miklos Szegedi +Milas Bowman +Milind Chawre +Miloslav Trmač +mingqing +Mingzhen Feng +Misty Stanley-Jones +Mitch Capper +Mizuki Urushida +mlarcher +Mohammad Banikazemi +Mohammad Nasirifar +Mohammed Aaqib Ansari +Mohit Soni +Moorthy RS +Morgan Bauer +Morgante Pell +Morgy93 +Morten Siebuhr +Morton Fox +Moysés Borges +mrfly +Mrunal Patel +Muayyad Alsadi +Muhammad Zohaib Aslam +Mustafa Akın +Muthukumar R +Máximo Cuadros +Médi-Rémi Hashim +Nace Oroz +Nahum Shalman +Nakul Pathak +Nalin Dahyabhai +Nan Monnand Deng +Naoki Orii +Natalie Parker +Natanael Copa +Natasha Jarus +Nate Brennand +Nate Eagleson +Nate Jones +Nathan Carlson +Nathan Herald +Nathan Hsieh +Nathan Kleyn +Nathan LeClaire +Nathan McCauley +Nathan Williams +Naveed Jamil +Neal McBurnett +Neil Horman +Neil Peterson +Nelson Chen +Neyazul Haque +Nghia Tran +Niall O'Higgins +Nicholas E. Rabenau +Nick Adcock +Nick DeCoursin +Nick Irvine +Nick Neisen +Nick Parker +Nick Payne +Nick Russo +Nick Stenning +Nick Stinemates +Nick Wood +NickrenREN +Nicola Kabar +Nicolas Borboën +Nicolas De Loof +Nicolas Dudebout +Nicolas Goy +Nicolas Kaiser +Nicolas Sterchele +Nicolas V Castet +Nicolás Hock Isaza +Niel Drummond +Nigel Poulton +Nik Nyby +Nikhil Chawla +NikolaMandic +Nikolas Garofil +Nikolay Edigaryev +Nikolay Milovanov +Nirmal Mehta +Nishant Totla +NIWA Hideyuki +Noah Meyerhans +Noah Treuhaft +NobodyOnSE +noducks +Nolan Darilek +Noriki Nakamura +nponeccop +Nurahmadie +Nuutti Kotivuori +nzwsch +O.S. Tezer +objectified +Odin Ugedal +Oguz Bilgic +Oh Jinkyun +Ohad Schneider +ohmystack +Ole Reifschneider +Oliver Neal +Oliver Reason +Olivier Gambier +Olle Jonsson +Olli Janatuinen +Olly Pomeroy +Omri Shiv +Onur Filiz +Oriol Francès +Oscar Bonilla <6f6231@gmail.com> +Oskar Niburski +Otto Kekäläinen +Ouyang Liduo +Ovidio Mallo +Panagiotis Moustafellos +Paolo G. Giarrusso +Pascal +Pascal Bach +Pascal Borreli +Pascal Hartig +Patrick Böänziger +Patrick Devine +Patrick Haas +Patrick Hemmer +Patrick Stapleton +Patrik Cyvoct +pattichen +Paul "TBBle" Hampson +Paul +paul +Paul Annesley +Paul Bellamy +Paul Bowsher +Paul Furtado +Paul Hammond +Paul Jimenez +Paul Kehrer +Paul Lietar +Paul Liljenberg +Paul Morie +Paul Nasrat +Paul Weaver +Paulo Gomes +Paulo Ribeiro +Pavel Lobashov +Pavel Matěja +Pavel Pletenev +Pavel Pospisil +Pavel Sutyrin +Pavel Tikhomirov +Pavlos Ratis +Pavol Vargovcik +Pawel Konczalski +Paweł Gronowski +Peeyush Gupta +Peggy Li +Pei Su +Peng Tao +Penghan Wang +Per Weijnitz +perhapszzy@sina.com +Pete Woods +Peter Bourgon +Peter Braden +Peter Bücker +Peter Choi +Peter Dave Hello +Peter Edge +Peter Ericson +Peter Esbensen +Peter Jaffe +Peter Kang +Peter Malmgren +Peter Salvatore +Peter Volpe +Peter Waller +Petr Švihlík +Petros Angelatos +Phil +Phil Estes +Phil Sphicas +Phil Spitler +Philip Alexander Etling +Philip Monroe +Philipp Gillé +Philipp Wahala +Philipp Weissensteiner +Phillip Alexander +phineas +pidster +Piergiuliano Bossi +Pierre +Pierre Carrier +Pierre Dal-Pra +Pierre Wacrenier +Pierre-Alain RIVIERE +Piotr Bogdan +Piotr Karbowski +Porjo +Poul Kjeldager Sørensen +Pradeep Chhetri +Pradip Dhara +Pradipta Kr. Banerjee +Prasanna Gautam +Pratik Karki +Prayag Verma +Priya Wadhwa +Projjol Banerji +Przemek Hejman +Puneet Pruthi +Pure White +pysqz +Qiang Huang +Qin TianHuan +Qinglan Peng +Quan Tian +qudongfang +Quentin Brossard +Quentin Perez +Quentin Tayssier +r0n22 +Radostin Stoyanov +Rafal Jeczalik +Rafe Colton +Raghavendra K T +Raghuram Devarakonda +Raja Sami +Rajat Pandit +Rajdeep Dua +Ralf Sippl +Ralle +Ralph Bean +Ramkumar Ramachandra +Ramon Brooker +Ramon van Alteren +RaviTeja Pothana +Ray Tsang +ReadmeCritic +realityone +Recursive Madman +Reficul +Regan McCooey +Remi Rampin +Remy Suen +Renato Riccieri Santos Zannon +Renaud Gaubert +Rhys Hiltner +Ri Xu +Ricardo N Feliciano +Rich Horwood +Rich Moyse +Rich Seymour +Richard Burnison +Richard Harvey +Richard Mathie +Richard Metzler +Richard Scothern +Richo Healey +Rick Bradley +Rick van de Loo +Rick Wieman +Rik Nijessen +Riku Voipio +Riley Guerin +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Cowsill <42620235+rcowsill@users.noreply.github.com> +Rob Gulewich +Rob Vesse +Robert Bachmann +Robert Bittle +Robert Obryk +Robert Schneider +Robert Shade +Robert Stern +Robert Terhaar +Robert Wallis +Robert Wang +Roberto G. Hashioka +Roberto Muñoz Fernández +Robin Naundorf +Robin Schneider +Robin Speekenbrink +Robin Thoni +robpc +Rodolfo Carvalho +Rodrigo Campos +Rodrigo Vaz +Roel Van Nyen +Roger Peppe +Rohit Jnagal +Rohit Kadam +Rohit Kapur +Rojin George +Roland Huß +Roland Kammerer +Roland Moriz +Roma Sokolov +Roman Dudin +Roman Mazur +Roman Strashkin +Roman Volosatovs +Roman Zabaluev +Ron Smits +Ron Williams +Rong Gao +Rong Zhang +Rongxiang Song +Rony Weng +root +root +root +root +Rory Hunter +Rory McCune +Ross Boucher +Rovanion Luckey +Royce Remer +Rozhnov Alexandr +Rudolph Gottesheim +Rui Cao +Rui Lopes +Ruilin Li +Runshen Zhu +Russ Magee +Ryan Abrams +Ryan Anderson +Ryan Aslett +Ryan Barry +Ryan Belgrave +Ryan Campbell +Ryan Detzel +Ryan Fowler +Ryan Liu +Ryan McLaughlin +Ryan O'Donnell +Ryan Seto +Ryan Shea +Ryan Simmen +Ryan Stelly +Ryan Thomas +Ryan Trauntvein +Ryan Wallner +Ryan Zhang +ryancooper7 +RyanDeng +Ryo Nakao +Ryoga Saito +Rémy Greinhofer +s. rannou +Sabin Basyal +Sachin Joshi +Sagar Hani +Sainath Grandhi +Sakeven Jiang +Salahuddin Khan +Sally O'Malley +Sam Abed +Sam Alba +Sam Bailey +Sam J Sharpe +Sam Neirinck +Sam Reis +Sam Rijs +Sam Whited +Sambuddha Basu +Sami Wagiaalla +Samuel Andaya +Samuel Dion-Girardeau +Samuel Karp +Samuel PHAN +sanchayanghosh +Sandeep Bansal +Sankar சங்கர் +Sanket Saurav +Santhosh Manohar +sapphiredev +Sargun Dhillon +Sascha Andres +Sascha Grunert +SataQiu +Satnam Singh +Satoshi Amemiya +Satoshi Tagomori +Scott Bessler +Scott Collier +Scott Johnston +Scott Percival +Scott Stamp +Scott Walls +sdreyesg +Sean Christopherson +Sean Cronin +Sean Lee +Sean McIntyre +Sean OMeara +Sean P. Kane +Sean Rodman +Sebastiaan van Steenis +Sebastiaan van Stijn +Sebastian Höffner +Sebastian Radloff +Sebastien Goasguen +Senthil Kumar Selvaraj +Senthil Kumaran +SeongJae Park +Seongyeol Lim +Serge Hallyn +Sergey Alekseev +Sergey Evstifeev +Sergii Kabashniuk +Sergio Lopez +Serhat Gülçiçek +SeungUkLee +Sevki Hasirci +Shane Canon +Shane da Silva +Shaun Kaasten +shaunol +Shawn Landden +Shawn Siefkas +shawnhe +Shayan Pooya +Shayne Wang +Shekhar Gulati +Sheng Yang +Shengbo Song +Shengjing Zhu +Shev Yan +Shih-Yuan Lee +Shihao Xia +Shijiang Wei +Shijun Qin +Shishir Mahajan +Shoubhik Bose +Shourya Sarcar +Shu-Wai Chow +shuai-z +Shukui Yang +Sian Lerk Lau +Siarhei Rasiukevich +Sidhartha Mani +sidharthamani +Silas Sewell +Silvan Jegen +Simão Reis +Simon Barendse +Simon Eskildsen +Simon Ferquel +Simon Leinen +Simon Menke +Simon Taranto +Simon Vikstrom +Sindhu S +Sjoerd Langkemper +skanehira +Smark Meng +Solganik Alexander +Solomon Hykes +Song Gao +Soshi Katsuta +Sotiris Salloumis +Soulou +Spencer Brown +Spencer Smith +Spike Curtis +Sridatta Thatipamala +Sridhar Ratnakumar +Srini Brahmaroutu +Srinivasan Srivatsan +Staf Wagemakers +Stanislav Bondarenko +Stanislav Levin +Steeve Morin +Stefan Berger +Stefan J. Wernli +Stefan Praszalowicz +Stefan S. +Stefan Scherer +Stefan Staudenmeyer +Stefan Weil +Steffen Butzer +Stephan Spindler +Stephen Benjamin +Stephen Crosby +Stephen Day +Stephen Drake +Stephen Rust +Steve Desmond +Steve Dougherty +Steve Durrheimer +Steve Francia +Steve Koch +Steven Burgess +Steven Erenst +Steven Hartland +Steven Iveson +Steven Merrill +Steven Richards +Steven Taylor +Stéphane Este-Gracias +Stig Larsson +Su Wang +Subhajit Ghosh +Sujith Haridasan +Sun Gengze <690388648@qq.com> +Sun Jianbo +Sune Keller +Sunny Gogoi +Suryakumar Sudar +Sven Dowideit +Swapnil Daingade +Sylvain Baubeau +Sylvain Bellemare +Sébastien +Sébastien HOUZÉ +Sébastien Luttringer +Sébastien Stormacq +Sören Tempel +Tabakhase +Tadej Janež +Takuto Sato +tang0th +Tangi Colin +Tatsuki Sugiura +Tatsushi Inagaki +Taylan Isikdemir +Taylor Jones +Ted M. Young +Tehmasp Chaudhri +Tejaswini Duggaraju +Tejesh Mehta +Terry Chu +terryding77 <550147740@qq.com> +Thatcher Peskens +theadactyl +Thell 'Bo' Fowler +Thermionix +Thiago Alves Silva +Thijs Terlouw +Thomas Bikeev +Thomas Frössman +Thomas Gazagnaire +Thomas Graf +Thomas Grainger +Thomas Hansen +Thomas Ledos +Thomas Leonard +Thomas Léveil +Thomas Orozco +Thomas Riccardi +Thomas Schroeter +Thomas Sjögren +Thomas Swift +Thomas Tanaka +Thomas Texier +Ti Zhou +Tiago Seabra +Tianon Gravi +Tianyi Wang +Tibor Vass +Tiffany Jernigan +Tiffany Low +Till Claassen +Till Wegmüller +Tim +Tim Bart +Tim Bosse +Tim Dettrick +Tim Düsterhus +Tim Hockin +Tim Potter +Tim Ruffles +Tim Smith +Tim Terhorst +Tim Wagner +Tim Wang +Tim Waugh +Tim Wraight +Tim Zju <21651152@zju.edu.cn> +timchenxiaoyu <837829664@qq.com> +timfeirg +Timo Rothenpieler +Timothy Hobbs +tjwebb123 +tobe +Tobias Bieniek +Tobias Bradtke +Tobias Gesellchen +Tobias Klauser +Tobias Munk +Tobias Pfandzelter +Tobias Schmidt +Tobias Schwab +Todd Crane +Todd Lunter +Todd Whiteman +Toli Kuznets +Tom Barlow +Tom Booth +Tom Denham +Tom Fotherby +Tom Howe +Tom Hulihan +Tom Maaswinkel +Tom Parker +Tom Sweeney +Tom Wilkie +Tom X. Tobin +Tom Zhao +Tomas Janousek +Tomas Kral +Tomas Tomecek +Tomasz Kopczynski +Tomasz Lipinski +Tomasz Nurkiewicz +Tomek Mańko +Tommaso Visconti +Tomoya Tabuchi +Tomáš Hrčka +tonic +Tonny Xu +Tony Abboud +Tony Daws +Tony Miller +toogley +Torstein Husebø +Toshiaki Makita +Tõnis Tiigi +Trace Andreason +tracylihui <793912329@qq.com> +Trapier Marshall +Travis Cline +Travis Thieman +Trent Ogren +Trevor +Trevor Pounds +Trevor Sullivan +Trishna Guha +Tristan Carel +Troy Denton +Tudor Brindus +Ty Alexander +Tycho Andersen +Tyler Brock +Tyler Brown +Tzu-Jung Lee +uhayate +Ulysse Carion +Umesh Yadav +Utz Bacher +vagrant +Vaidas Jablonskis +Valentin Kulesh +vanderliang +Velko Ivanov +Veres Lajos +Victor Algaze +Victor Coisne +Victor Costan +Victor I. Wood +Victor Lyuboslavsky +Victor Marmol +Victor Palma +Victor Vieux +Victoria Bialas +Vijaya Kumar K +Vikas Choudhary +Vikram bir Singh +Viktor Stanchev +Viktor Vojnovski +VinayRaghavanKS +Vincent Batts +Vincent Bernat +Vincent Boulineau +Vincent Demeester +Vincent Giersch +Vincent Mayers +Vincent Woo +Vinod Kulkarni +Vishal Doshi +Vishnu Kannan +Vitaly Ostrosablin +Vitor Monteiro +Vivek Agarwal +Vivek Dasgupta +Vivek Goyal +Vladimir Bulyga +Vladimir Kirillov +Vladimir Pouzanov +Vladimir Rutsky +Vladimir Varankin +VladimirAus +Vladislav Kolesnikov +Vlastimil Zeman +Vojtech Vitek (V-Teq) +Walter Leibbrandt +Walter Stanish +Wang Chao +Wang Guoliang +Wang Jie +Wang Long +Wang Ping +Wang Xing +Wang Yuexiao +Wang Yumu <37442693@qq.com> +wanghuaiqing +Ward Vandewege +WarheadsSE +Wassim Dhif +Wataru Ishida +Wayne Chang +Wayne Song +Weerasak Chongnguluam +Wei Fu +Wei Wu +Wei-Ting Kuo +weipeng +weiyan +Weiyang Zhu +Wen Cheng Ma +Wendel Fleming +Wenjun Tang +Wenkai Yin +wenlxie +Wenxuan Zhao +Wenyu You <21551128@zju.edu.cn> +Wenzhi Liang +Wes Morgan +Wewang Xiaorenfine +Wiktor Kwapisiewicz +Will Dietz +Will Rouesnel +Will Weaver +willhf +William Delanoue +William Henry +William Hubbs +William Martin +William Riancho +William Thurston +Wilson Júnior +Wing-Kam Wong +WiseTrem +Wolfgang Nagele +Wolfgang Powisch +Wonjun Kim +WuLonghui +xamyzhao +Xia Wu +Xian Chaobo +Xianglin Gao +Xianjie +Xianlu Bird +Xiao YongBiao +Xiao Zhang +XiaoBing Jiang +Xiaodong Liu +Xiaodong Zhang +Xiaohua Ding +Xiaoxi He +Xiaoxu Chen +Xiaoyu Zhang +xichengliudui <1693291525@qq.com> +xiekeyang +Ximo Guanter Gonzálbez +Xinbo Weng +Xinfeng Liu +Xinzi Zhou +Xiuming Chen +Xuecong Liao +xuzhaokui +Yadnyawalkya Tale +Yahya +yalpul +YAMADA Tsuyoshi +Yamasaki Masahide +Yan Feng +Yan Zhu +Yang Bai +Yang Li +Yang Pengfei +yangchenliang +Yann Autissier +Yanqiang Miao +Yao Zaiyong +Yash Murty +Yassine Tijani +Yasunori Mahata +Yazhong Liu +Yestin Sun +Yi EungJun +Yibai Zhang +Yihang Ho +Ying Li +Yohei Ueda +Yong Tang +Yongxin Li +Yongzhi Pan +Yosef Fertel +You-Sheng Yang (楊有勝) +youcai +Youcef YEKHLEF +Youfu Zhang +Yu Changchun +Yu Chengxia +Yu Peng +Yu-Ju Hong +Yuan Sun +Yuanhong Peng +Yue Zhang +Yufei Xiong +Yuhao Fang +Yuichiro Kaneko +YujiOshima +Yunxiang Huang +Yurii Rashkovskii +Yusuf Tarık Günaydın +Yves Blusseau <90z7oey02@sneakemail.com> +Yves Junqueira +Zac Dover +Zach Borboa +Zach Gershman +Zachary Jaffee +Zain Memon +Zaiste! +Zane DeGraffenried +Zefan Li +Zen Lin(Zhinan Lin) +Zhang Kun +Zhang Wei +Zhang Wentao +ZhangHang +zhangxianwei +Zhenan Ye <21551168@zju.edu.cn> +zhenghenghuo +Zhenhai Gao +Zhenkun Bi +ZhiPeng Lu +zhipengzuo +Zhou Hao +Zhoulin Xie +Zhu Guihua +Zhu Kunjia +Zhuoyun Wei +Ziheng Liu +Zilin Du +zimbatm +Ziming Dong +ZJUshuaizhou <21551191@zju.edu.cn> +zmarouf +Zoltan Tombol +Zou Yu +zqh +Zuhayr Elahi +Zunayed Ali +Álvaro Lázaro +Átila Camurça Alves +尹吉峰 +屈骏 +徐俊杰 +慕陶 +搏通 +黄艳红00139573 +정재영 diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE new file mode 100644 index 000000000..6d8d58fb6 --- /dev/null +++ b/vendor/github.com/docker/docker/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2018 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE new file mode 100644 index 000000000..58b19b6d1 --- /dev/null +++ b/vendor/github.com/docker/docker/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/creack/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md new file mode 100644 index 000000000..f136c3433 --- /dev/null +++ b/vendor/github.com/docker/docker/api/README.md @@ -0,0 +1,42 @@ +# Working on the Engine API + +The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon. + +It consists of various components in this repository: + +- `api/swagger.yaml` A Swagger definition of the API. +- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this. +- `cli/` The command-line client. +- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs. +- `daemon/` The daemon, which serves the API. + +## Swagger definition + +The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: + +1. Automatically generate documentation. +2. Automatically generate the Go server and client. (A work-in-progress.) +3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc. + +## Updating the API documentation + +The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, edit this file to represent the change in the documentation. + +The file is split into two main sections: + +- `definitions`, which defines re-usable objects used in requests and responses +- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable) + +To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section. + +There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919). + +`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful when making edits to ensure you are doing the right thing. + +## Viewing the API documentation + +When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. + +Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation. + +The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io). diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go new file mode 100644 index 000000000..bee9b875a --- /dev/null +++ b/vendor/github.com/docker/docker/api/common.go @@ -0,0 +1,11 @@ +package api // import "github.com/docker/docker/api" + +// Common constants for daemon and client. +const ( + // DefaultVersion of Current REST API + DefaultVersion = "1.42" + + // NoBaseImageSpecifier is the symbol used by the FROM + // command to specify that no base image is to be used. + NoBaseImageSpecifier = "scratch" +) diff --git a/vendor/github.com/docker/docker/api/common_unix.go b/vendor/github.com/docker/docker/api/common_unix.go new file mode 100644 index 000000000..19fc63d65 --- /dev/null +++ b/vendor/github.com/docker/docker/api/common_unix.go @@ -0,0 +1,7 @@ +//go:build !windows +// +build !windows + +package api // import "github.com/docker/docker/api" + +// MinVersion represents Minimum REST API version supported +const MinVersion = "1.12" diff --git a/vendor/github.com/docker/docker/api/common_windows.go b/vendor/github.com/docker/docker/api/common_windows.go new file mode 100644 index 000000000..590ba5479 --- /dev/null +++ b/vendor/github.com/docker/docker/api/common_windows.go @@ -0,0 +1,8 @@ +package api // import "github.com/docker/docker/api" + +// MinVersion represents Minimum REST API version supported +// Technically the first daemon API version released on Windows is v1.25 in +// engine version 1.13. However, some clients are explicitly using downlevel +// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive. +// Hence also allowing 1.24 on Windows. +const MinVersion string = "1.24" diff --git a/vendor/github.com/docker/docker/api/swagger-gen.yaml b/vendor/github.com/docker/docker/api/swagger-gen.yaml new file mode 100644 index 000000000..f07a02737 --- /dev/null +++ b/vendor/github.com/docker/docker/api/swagger-gen.yaml @@ -0,0 +1,12 @@ + +layout: + models: + - name: definition + source: asset:model + target: "{{ joinFilePath .Target .ModelPackage }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" + operations: + - name: handler + source: asset:serverOperation + target: "{{ joinFilePath .Target .APIPackage .Package }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml new file mode 100644 index 000000000..afe7a8c37 --- /dev/null +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -0,0 +1,12152 @@ +# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. +# +# This is used for generating API documentation and the types used by the +# client/server. See api/README.md for more information. +# +# Some style notes: +# - This file is used by ReDoc, which allows GitHub Flavored Markdown in +# descriptions. +# - There is no maximum line length, for ease of editing and pretty diffs. +# - operationIds are in the format "NounVerb", with a singular noun. + +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.42" +info: + title: "Docker Engine API" + version: "1.42" + x-logo: + url: "https://docs.docker.com/assets/images/logo-docker-main.png" + description: | + The Engine API is an HTTP API served by Docker Engine. It is the API the + Docker client uses to communicate with the Engine, so everything the Docker + client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` + is `GET /containers/json`). The notable exception is running containers, + which consists of several API calls. + + # Errors + + The API uses standard HTTP status codes to indicate the success or failure + of the API call. The body of the response will be JSON in the following + format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release, so API calls are versioned to + ensure that clients don't break. To lock to a specific version of the API, + you prefix the URL with its version, for example, call `/v1.30/info` to use + the v1.30 version of the `/info` endpoint. If the API version specified in + the URL is not supported by the daemon, a HTTP `400 Bad Request` error message + is returned. + + If you omit the version-prefix, the current version of the API (v1.42) is used. + For example, calling `/info` is the same as calling `/v1.42/info`. Using the + API without a version-prefix is deprecated and will be removed in a future release. + + Engine releases in the near future should support this version of the API, + so your client will continue to work even if it is talking to a newer Engine. + + The API uses an open schema model, which means server may add extra properties + to responses. Likewise, the server will ignore any extra query parameters and + request body properties. When you write clients, you need to ignore additional + properties in responses to ensure they do not break when talking to newer + daemons. + + + # Authentication + + Authentication for registries is handled client side. The client has to send + authentication details to various endpoints that need to communicate with + registries, such as `POST /images/(name)/push`. These are sent as + `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) + (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "email": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this + structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), + you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +# The tags on paths define the menu sections in the ReDoc documentation, so +# the usage of tags must make sense for that: +# - They should be singular, not plural. +# - There should not be too many tags, or the menu becomes unwieldy. For +# example, it is preferable to add a path to the "System" tag instead of +# creating a tag with a single path in it. +# - The order of tags in this list defines the order in the menu. +tags: + # Primary objects + - name: "Container" + x-displayName: "Containers" + description: | + Create and manage containers. + - name: "Image" + x-displayName: "Images" + - name: "Network" + x-displayName: "Networks" + description: | + Networks are user-defined networks that containers can be attached to. + See the [networking documentation](https://docs.docker.com/network/) + for more information. + - name: "Volume" + x-displayName: "Volumes" + description: | + Create and manage persistent storage that can be attached to containers. + - name: "Exec" + x-displayName: "Exec" + description: | + Run new commands inside running containers. Refer to the + [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) + for more information. + + To exec a command in a container, you first need to create an exec instance, + then start it. These two API endpoints are wrapped up in a single command-line + command, `docker exec`. + + # Swarm things + - name: "Swarm" + x-displayName: "Swarm" + description: | + Engines can be clustered together in a swarm. Refer to the + [swarm mode documentation](https://docs.docker.com/engine/swarm/) + for more information. + - name: "Node" + x-displayName: "Nodes" + description: | + Nodes are instances of the Engine participating in a swarm. Swarm mode + must be enabled for these endpoints to work. + - name: "Service" + x-displayName: "Services" + description: | + Services are the definitions of tasks to run on a swarm. Swarm mode must + be enabled for these endpoints to work. + - name: "Task" + x-displayName: "Tasks" + description: | + A task is a container running on a swarm. It is the atomic scheduling unit + of swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Secret" + x-displayName: "Secrets" + description: | + Secrets are sensitive data that can be used by services. Swarm mode must + be enabled for these endpoints to work. + - name: "Config" + x-displayName: "Configs" + description: | + Configs are application configurations that can be used by services. Swarm + mode must be enabled for these endpoints to work. + # System things + - name: "Plugin" + x-displayName: "Plugins" + - name: "System" + x-displayName: "System" + +definitions: + Port: + type: "object" + description: "An open port on a container" + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + description: "Host IP address that the container's port is mapped to" + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp", "sctp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountPoint: + type: "object" + description: | + MountPoint represents a mount point configuration inside the container. + This is used for reporting the mountpoints in use by a container. + properties: + Type: + description: | + The mount type: + + - `bind` a mount of a file or directory from the host into the container. + - `volume` a docker volume with the given `Name`. + - `tmpfs` a `tmpfs`. + - `npipe` a named pipe from the host into the container. + - `cluster` a Swarm cluster volume + type: "string" + enum: + - "bind" + - "volume" + - "tmpfs" + - "npipe" + - "cluster" + example: "volume" + Name: + description: | + Name is the name reference to the underlying data defined by `Source` + e.g., the volume name. + type: "string" + example: "myvolume" + Source: + description: | + Source location of the mount. + + For volumes, this contains the storage location of the volume (within + `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + the source (host) part of the bind-mount. For `tmpfs` mount points, this + field is empty. + type: "string" + example: "/var/lib/docker/volumes/myvolume/_data" + Destination: + description: | + Destination is the path relative to the container root (`/`) where + the `Source` is mounted inside the container. + type: "string" + example: "/usr/share/nginx/html/" + Driver: + description: | + Driver is the volume driver used to create the volume (if it is a volume). + type: "string" + example: "local" + Mode: + description: | + Mode is a comma separated list of options supplied by the user when + creating the bind/volume mount. + + The default is platform-specific (`"z"` on Linux, empty on Windows). + type: "string" + example: "z" + RW: + description: | + Whether the mount is mounted writable (read-write). + type: "boolean" + example: true + Propagation: + description: | + Propagation describes how mounts are propagated from the host into the + mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) + for details. This field is not used on Windows. + type: "string" + example: "" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + DeviceRequest: + type: "object" + description: "A request for devices to be sent to device drivers" + properties: + Driver: + type: "string" + example: "nvidia" + Count: + type: "integer" + example: -1 + DeviceIDs: + type: "array" + items: + type: "string" + example: + - "0" + - "1" + - "GPU-fef8089b-4820-abfc-e83e-94318197576e" + Capabilities: + description: | + A list of capabilities; an OR list of AND lists of capabilities. + type: "array" + items: + type: "array" + items: + type: "string" + example: + # gpu AND nvidia AND compute + - ["gpu", "nvidia", "compute"] + Options: + description: | + Driver-specific options, specified as a key/value pairs. These options + are passed directly to the driver. + type: "object" + additionalProperties: + type: "string" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: "Mount source (e.g. a volume name, a host path)." + type: "string" + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. + - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + - `cluster` a Swarm cluster volume + type: "string" + enum: + - "bind" + - "volume" + - "tmpfs" + - "npipe" + - "cluster" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + Consistency: + description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." + type: "string" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + type: "string" + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + NonRecursive: + description: "Disable recursive bind mount." + type: "boolean" + default: false + CreateMountpoint: + description: "Create mount point on host if missing" + type: "boolean" + default: false + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: "The permission mode for the tmpfs mount in an integer." + type: "integer" + + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to + restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is + added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - Empty string means not to restart + - `no` Do not automatically restart + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "" + - "no" + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: | + If `on-failure` is used, the number of times to retry before giving up. + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: | + An integer value representing this container's relative CPU weight + versus other containers. + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + format: "int64" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: | + Path to `cgroups` under which the container's `cgroup` is created. If + the path is not absolute, the path is considered to be relative to the + `cgroups` path of the init process. Cgroups are created if they do not + already exist. + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form: + + ``` + [{"Path": "device_path", "Weight": weight}] + ``` + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: | + Microseconds of CPU time that the container can get in a CPU period. + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: | + The length of a CPU real-time period in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: | + The length of a CPU real-time runtime in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpusetCpus: + description: | + CPUs in which to allow execution (e.g., `0-3`, `0,1`). + type: "string" + example: "0-3" + CpusetMems: + description: | + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only + effective on NUMA systems. + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DeviceCgroupRules: + description: "a list of cgroup rules to apply to the container" + type: "array" + items: + type: "string" + example: "c 13:* rwm" + DeviceRequests: + description: | + A list of requests for devices to be sent to device drivers. + type: "array" + items: + $ref: "#/definitions/DeviceRequest" + KernelMemoryTCP: + description: | + Hard limit for kernel TCP buffer memory (in bytes). Depending on the + OCI runtime in use, this option may be ignored. It is no longer supported + by the default (runc) runtime. + + This field is omitted when empty. + type: "integer" + format: "int64" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: | + Total memory limit (memory + swap). Set as `-1` to enable unlimited + swap. + type: "integer" + format: "int64" + MemorySwappiness: + description: | + Tune a container's memory swappiness behavior. Accepts an integer + between 0 and 100. + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCpus: + description: "CPU quota in units of 10-9 CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + PidsLimit: + description: | + Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` + to not change. + type: "integer" + format: "int64" + x-nullable: true + Ulimits: + description: | + A list of resource limits to set in the container. For example: + + ``` + {"Name": "nofile", "Soft": 1024, "Hard": 2048} + ``` + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: | + Maximum IO in bytes per second for the container system drive + (Windows only). + type: "integer" + format: "int64" + + Limit: + description: | + An object describing a limit on resources which can be requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + Pids: + description: | + Limits the maximum number of PIDs in the container. Set `0` for unlimited. + type: "integer" + format: "int64" + default: 0 + example: 100 + + ResourceObject: + description: | + An object describing the resources which can be advertised by a node and + requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + GenericResources: + $ref: "#/definitions/GenericResources" + + GenericResources: + description: | + User-defined resources can be either Integer resources (e.g, `SSD=3`) or + String resources (e.g, `GPU=UUID1`). + type: "array" + items: + type: "object" + properties: + NamedResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "string" + DiscreteResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "integer" + format: "int64" + example: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + HealthConfig: + description: "A test to perform to check that the container is healthy." + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `[]` inherit healthcheck from image or parent image + - `["NONE"]` disable healthcheck + - `["CMD", args...]` exec arguments directly + - `["CMD-SHELL", command]` run command with system's default shell + type: "array" + items: + type: "string" + Interval: + description: | + The time to wait between checks in nanoseconds. It should be 0 or at + least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + Timeout: + description: | + The time to wait before considering the check to have hung. It should + be 0 or at least 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + Retries: + description: | + The number of consecutive failures needed to consider a container as + unhealthy. 0 means inherit. + type: "integer" + StartPeriod: + description: | + Start period for the container to initialize before starting + health-retries countdown in nanoseconds. It should be 0 or at least + 1000000 (1 ms). 0 means inherit. + type: "integer" + format: "int64" + + Health: + description: | + Health stores information about the container's healthcheck results. + type: "object" + x-nullable: true + properties: + Status: + description: | + Status is one of `none`, `starting`, `healthy` or `unhealthy` + + - "none" Indicates there is no healthcheck + - "starting" Starting indicates that the container is not yet ready + - "healthy" Healthy indicates that the container is running correctly + - "unhealthy" Unhealthy indicates that the container has a problem + type: "string" + enum: + - "none" + - "starting" + - "healthy" + - "unhealthy" + example: "healthy" + FailingStreak: + description: "FailingStreak is the number of consecutive failures" + type: "integer" + example: 0 + Log: + type: "array" + description: | + Log contains the last few results (oldest first) + items: + $ref: "#/definitions/HealthcheckResult" + + HealthcheckResult: + description: | + HealthcheckResult stores information about a single run of a healthcheck probe + type: "object" + x-nullable: true + properties: + Start: + description: | + Date and time at which this check started in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "date-time" + example: "2020-01-04T10:44:24.496525531Z" + End: + description: | + Date and time at which this check ended in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2020-01-04T10:45:21.364524523Z" + ExitCode: + description: | + ExitCode meanings: + + - `0` healthy + - `1` unhealthy + - `2` reserved (considered unhealthy) + - other values: error running probe + type: "integer" + example: 0 + Output: + description: "Output from last check" + type: "string" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding + is a string in one of these forms: + + - `host-src:container-dest[:options]` to bind-mount a host path + into the container. Both `host-src`, and `container-dest` must + be an _absolute_ path. + - `volume-name:container-dest[:options]` to bind-mount a volume + managed by a volume driver into the container. `container-dest` + must be an _absolute_ path. + + `options` is an optional, comma-delimited list of: + + - `nocopy` disables automatic copying of data from the container + path to the volume. The `nocopy` flag only applies to named volumes. + - `[ro|rw]` mounts a volume read-only or read-write, respectively. + If omitted or set to `rw`, volumes are mounted read-write. + - `[z|Z]` applies SELinux labels to allow or deny multiple containers + to read and write to the same volume. + - `z`: a _shared_ content label is applied to the content. This + label indicates that multiple containers can share the volume + content, for both reading and writing. + - `Z`: a _private unshared_ label is applied to the content. + This label indicates that only the current container can use + a private volume. Labeling systems such as SELinux require + proper labels to be placed on volume content that is mounted + into a container. Without a label, the security system can + prevent a container's processes from using the content. By + default, the labels set by the host operating system are not + modified. + - `[[r]shared|[r]slave|[r]private]` specifies mount + [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). + This only applies to bind-mounted volumes, not internal volumes + or named volumes. Mount propagation requires the source mount + point (the location where the source directory is mounted in the + host operating system) to have the correct propagation properties. + For shared volumes, the source mount point must be set to `shared`. + For slave volumes, the mount must be set to either `shared` or + `slave`. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + type: "string" + enum: + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + type: "object" + additionalProperties: + type: "string" + NetworkMode: + type: "string" + description: | + Network mode to use for this container. Supported standard values + are: `bridge`, `host`, `none`, and `container:`. Any + other value is taken as a custom network's name to which this + container should connect to. + PortBindings: + $ref: "#/definitions/PortMap" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: | + Automatically remove the container when the container's process + exits. This has no effect if `RestartPolicy` is set. + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: | + A list of volumes to inherit from another container, specified in + the form `[:]`. + items: + type: "string" + Mounts: + description: | + Specification for mounts to be added to the container. + type: "array" + items: + $ref: "#/definitions/Mount" + ConsoleSize: + type: "array" + description: | + Initial console size, as an `[height, width]` array. + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + + # Applicable to UNIX platforms + CapAdd: + type: "array" + description: | + A list of kernel capabilities to add to the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CapDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CgroupnsMode: + type: "string" + enum: + - "private" + - "host" + description: | + cgroup namespace mode for the container. Possible values are: + + - `"private"`: the container runs in its own private cgroup namespace + - `"host"`: use the host system's cgroup namespace + + If not specified, the daemon default is used, which can either be `"private"` + or `"host"`, depending on daemon version, kernel support and configuration. + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` + file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + IpcMode: + type: "string" + description: | + IPC sharing mode for the container. Possible values are: + + - `"none"`: own private IPC namespace, with /dev/shm not mounted + - `"private"`: own private IPC namespace + - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers + - `"container:"`: join another (shareable) container's IPC namespace + - `"host"`: use the host system's IPC namespace + + If not specified, daemon default is used, which can either be `"private"` + or `"shareable"`, depending on daemon version and configuration. + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: | + A list of links for the container in the form `container_name:alias`. + items: + type: "string" + OomScoreAdj: + type: "integer" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 500 + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be + either: + + - `"container:"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: "Gives the container full access to the host." + PublishAllPorts: + type: "boolean" + description: | + Allocates an ephemeral host port for all of a container's + exposed ports. + + Ports are de-allocated when the container stops and allocated when + the container starts. The allocated port might be changed when + restarting the container. + + The port is selected from the ephemeral port range that depends on + the kernel. For example, on Linux the range is defined by + `/proc/sys/net/ipv4/ip_local_port_range`. + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: | + A list of string values to customize labels for MLS systems, such + as SELinux. + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: | + A map of container directories which should be replaced by tmpfs + mounts, and their corresponding mount options. For example: + + ``` + { "/run": "rw,noexec,nosuid,size=65536k" } + ``` + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: | + Sets the usernamespace mode for the container when usernamespace + remapping option is enabled. + ShmSize: + type: "integer" + description: | + Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. + minimum: 0 + Sysctls: + type: "object" + description: | + A list of kernel parameters (sysctls) to set in the container. + For example: + + ``` + {"net.ipv4.ip_forward": "1"} + ``` + additionalProperties: + type: "string" + Runtime: + type: "string" + description: "Runtime to use with this container." + # Applicable to Windows + Isolation: + type: "string" + description: | + Isolation technology of the container. (Windows only) + enum: + - "default" + - "process" + - "hyperv" + MaskedPaths: + type: "array" + description: | + The list of paths to be masked inside the container (this overrides + the default set of paths). + items: + type: "string" + ReadonlyPaths: + type: "array" + description: | + The list of paths to be set as read-only inside the container + (this overrides the default set of paths). + items: + type: "string" + + ContainerConfig: + description: | + Configuration for a container that is portable between hosts. + + When used as `ContainerConfig` field in an image, `ContainerConfig` is an + optional field containing the configuration of the container that was last + committed when creating the image. + + Previous versions of Docker builder used this field to store build cache, + and it is not in active use anymore. + type: "object" + properties: + Hostname: + description: | + The hostname to use for the container, as a valid RFC 1123 hostname. + type: "string" + example: "439f4e91bd1d" + Domainname: + description: | + The domain name to use for the container. + type: "string" + User: + description: "The user that commands are run as inside the container." + type: "string" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Tty: + description: | + Attach standard streams to a TTY, including `stdin` if it is not closed. + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Image: + description: | + The name (or reference) of the image to use when creating the container, + or which was used when the container was created. + type: "string" + example: "example-image:1.0" + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + x-nullable: true + MacAddress: + description: "MAC address of the container." + type: "string" + x-nullable: true + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + + NetworkingConfig: + description: | + NetworkingConfig represents the container's networking configuration for + each of its interfaces. + It is used for the networking configs specified in the `docker create` + and `docker network connect` commands. + type: "object" + properties: + EndpointsConfig: + description: | + A mapping of network name to endpoint configuration for that network. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + # putting an example here, instead of using the example values from + # /definitions/EndpointSettings, because containers/create currently + # does not support attaching to multiple networks, so the example request + # would be confusing if it showed that multiple networks can be contained + # in the EndpointsConfig. + # TODO remove once we support multiple networks on container create (see https://github.com/moby/moby/blob/07e6b843594e061f82baa5fa23c2ff7d536c2a05/daemon/create.go#L323) + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + + NetworkSettings: + description: "NetworkSettings exposes the network settings in the API" + type: "object" + properties: + Bridge: + description: Name of the network's bridge (for example, `docker0`). + type: "string" + example: "docker0" + SandboxID: + description: SandboxID uniquely represents a container's network stack. + type: "string" + example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" + HairpinMode: + description: | + Indicates if hairpin NAT should be enabled on the virtual interface. + type: "boolean" + example: false + LinkLocalIPv6Address: + description: IPv6 unicast address using the link-local prefix. + type: "string" + example: "fe80::42:acff:fe11:1" + LinkLocalIPv6PrefixLen: + description: Prefix length of the IPv6 unicast address. + type: "integer" + example: "64" + Ports: + $ref: "#/definitions/PortMap" + SandboxKey: + description: SandboxKey identifies the sandbox + type: "string" + example: "/var/run/docker/netns/8ab54b426c38" + + # TODO is SecondaryIPAddresses actually used? + SecondaryIPAddresses: + description: "" + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + # TODO is SecondaryIPv6Addresses actually used? + SecondaryIPv6Addresses: + description: "" + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + # TODO properties below are part of DefaultNetworkSettings, which is + # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 + EndpointID: + description: | + EndpointID uniquely represents a service endpoint in a Sandbox. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.1" + GlobalIPv6Address: + description: | + Global IPv6 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 64 + IPAddress: + description: | + IPv4 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address for this network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8:2::100" + MacAddress: + description: | + MAC address for the container on the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "02:42:ac:11:00:04" + Networks: + description: | + Information about all networks that the container is connected to. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + + Address: + description: Address represents an IPv4 or IPv6 IP address. + type: "object" + properties: + Addr: + description: IP address. + type: "string" + PrefixLen: + description: Mask length of the IP address. + type: "integer" + + PortMap: + description: | + PortMap describes the mapping of container ports to host ports, using the + container's port-number and protocol as key in the format `/`, + for example, `80/udp`. + + If a container's port is mapped for multiple protocols, separate entries + are added to the mapping table. + type: "object" + additionalProperties: + type: "array" + x-nullable: true + items: + $ref: "#/definitions/PortBinding" + example: + "443/tcp": + - HostIp: "127.0.0.1" + HostPort: "4443" + "80/tcp": + - HostIp: "0.0.0.0" + HostPort: "80" + - HostIp: "0.0.0.0" + HostPort: "8080" + "80/udp": + - HostIp: "0.0.0.0" + HostPort: "80" + "53/udp": + - HostIp: "0.0.0.0" + HostPort: "53" + "2377/tcp": null + + PortBinding: + description: | + PortBinding represents a binding between a host IP address and a host + port. + type: "object" + properties: + HostIp: + description: "Host IP address that the container's port is mapped to." + type: "string" + example: "127.0.0.1" + HostPort: + description: "Host port number that the container's port is mapped to." + type: "string" + example: "4443" + + GraphDriverData: + description: | + Information about the storage driver used to store the container's and + image's filesystem. + type: "object" + required: [Name, Data] + properties: + Name: + description: "Name of the storage driver." + type: "string" + x-nullable: false + example: "overlay2" + Data: + description: | + Low-level storage metadata, provided as key/value pairs. + + This information is driver-specific, and depends on the storage-driver + in use, and should be used for informational purposes only. + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: { + "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged", + "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff", + "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work" + } + + ImageInspect: + description: | + Information about an image in the local image cache. + type: "object" + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Parent: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. + type: "string" + x-nullable: false + example: "" + Comment: + description: | + Optional message that was set when committing or importing the image. + type: "string" + x-nullable: false + example: "" + Created: + description: | + Date and time at which the image was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + x-nullable: false + example: "2022-02-04T21:20:12.497794809Z" + Container: + description: | + The ID of the container that was used to create the image. + + Depending on how the image was created, this field may be empty. + type: "string" + x-nullable: false + example: "65974bc86f1770ae4bff79f651ebdbce166ae9aada632ee3fa9af3a264911735" + ContainerConfig: + $ref: "#/definitions/ContainerConfig" + DockerVersion: + description: | + The version of Docker that was used to build the image. + + Depending on how the image was created, this field may be empty. + type: "string" + x-nullable: false + example: "20.10.7" + Author: + description: | + Name of the author that was specified when committing the image, or as + specified through MAINTAINER (deprecated) in the Dockerfile. + type: "string" + x-nullable: false + example: "" + Config: + $ref: "#/definitions/ContainerConfig" + Architecture: + description: | + Hardware CPU architecture that the image runs on. + type: "string" + x-nullable: false + example: "arm" + Variant: + description: | + CPU architecture variant (presently ARM-only). + type: "string" + x-nullable: true + example: "v7" + Os: + description: | + Operating System the image is built to run on. + type: "string" + x-nullable: false + example: "linux" + OsVersion: + description: | + Operating System version the image is built to run on (especially + for Windows). + type: "string" + example: "" + x-nullable: true + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + VirtualSize: + description: | + Total size of the image including all layers it is composed of. + + In versions of Docker before v1.10, this field was calculated from + the image itself and all of its parent images. Docker v1.10 and up + store images self-contained, and no longer use a parent-chain, making + this field an equivalent of the Size field. + + This field is kept for backward compatibility, but may be removed in + a future version of the API. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + GraphDriver: + $ref: "#/definitions/GraphDriverData" + RootFS: + description: | + Information about the image's RootFS, including the layer IDs. + type: "object" + required: [Type] + properties: + Type: + type: "string" + x-nullable: false + example: "layers" + Layers: + type: "array" + items: + type: "string" + example: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + Metadata: + description: | + Additional metadata of the image in the local cache. This information + is local to the daemon, and not part of the image itself. + type: "object" + properties: + LastTagTime: + description: | + Date and time at which the image was last tagged in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if the image was tagged locally, + and omitted otherwise. + type: "string" + format: "dateTime" + example: "2022-02-28T14:40:02.623929178Z" + x-nullable: true + ImageSummary: + type: "object" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - VirtualSize + - Labels + - Containers + properties: + Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. + type: "string" + x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" + ParentId: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. + type: "string" + x-nullable: false + example: "" + RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" + RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. + type: "array" + x-nullable: false + items: + type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + Created: + description: | + Date and time at which the image was created as a Unix timestamp + (number of seconds sinds EPOCH). + type: "integer" + x-nullable: false + example: "1644009612" + Size: + description: | + Total size of the image including all layers it is composed of. + type: "integer" + format: "int64" + x-nullable: false + example: 172064416 + SharedSize: + description: | + Total size of image layers that are shared between this image and other + images. + + This size is not calculated by default. `-1` indicates that the value + has not been set / calculated. + type: "integer" + format: "int64" + x-nullable: false + example: 1239828 + VirtualSize: + description: | + Total size of the image including all layers it is composed of. + + In versions of Docker before v1.10, this field was calculated from + the image itself and all of its parent images. Docker v1.10 and up + store images self-contained, and no longer use a parent-chain, making + this field an equivalent of the Size field. + + This field is kept for backward compatibility, but may be removed in + a future version of the API. + type: "integer" + format: "int64" + x-nullable: false + example: 172064416 + Labels: + description: "User-defined key/value metadata." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Containers: + description: | + Number of containers using this image. Includes both stopped and running + containers. + + This size is not calculated by default, and depends on which API endpoint + is used. `-1` indicates that the value has not been set / calculated. + x-nullable: false + type: "integer" + example: 2 + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + email: + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + example: "tardis" + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + example: "custom" + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + example: "/var/lib/docker/volumes/tardis" + CreatedAt: + type: "string" + format: "dateTime" + description: "Date/Time the volume was created." + example: "2016-06-07T20:31:11.853781916Z" + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + example: + hello: "world" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: + type: "string" + description: | + The level at which the volume exists. Either `global` for cluster-wide, + or `local` for machine level. + default: "local" + x-nullable: false + enum: ["local", "global"] + example: "local" + ClusterVolume: + $ref: "#/definitions/ClusterVolume" + Options: + type: "object" + description: | + The driver specific options used when creating the volume. + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + UsageData: + type: "object" + x-nullable: true + x-go-name: "UsageData" + required: [Size, RefCount] + description: | + Usage details about the volume. This information is used by the + `GET /system/df` endpoint, and omitted in other endpoints. + properties: + Size: + type: "integer" + format: "int64" + default: -1 + description: | + Amount of disk space used by the volume (in bytes). This information + is only available for volumes created with the `"local"` volume + driver. For volumes created with other volume drivers, this field + is set to `-1` ("not available") + x-nullable: false + RefCount: + type: "integer" + format: "int64" + default: -1 + description: | + The number of containers referencing this volume. This field + is set to `-1` if the reference-count is not available. + x-nullable: false + + VolumeCreateOptions: + description: "Volume configuration" + type: "object" + title: "VolumeConfig" + x-go-name: "CreateOptions" + properties: + Name: + description: | + The new volume's name. If not specified, Docker generates a name. + type: "string" + x-nullable: false + example: "tardis" + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + example: "custom" + DriverOpts: + description: | + A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + type: "object" + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + ClusterVolumeSpec: + $ref: "#/definitions/ClusterVolumeSpec" + + VolumeListResponse: + type: "object" + title: "VolumeListResponse" + x-go-name: "ListResponse" + description: "Volume list response" + properties: + Volumes: + type: "array" + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + description: | + Warnings that occurred when fetching the list of volumes. + items: + type: "string" + example: [] + + Network: + type: "object" + properties: + Name: + type: "string" + Id: + type: "string" + Created: + type: "string" + format: "dateTime" + Scope: + type: "string" + Driver: + type: "string" + EnableIPv6: + type: "boolean" + IPAM: + $ref: "#/definitions/IPAM" + Internal: + type: "boolean" + Attachable: + type: "boolean" + Ingress: + type: "boolean" + Containers: + type: "object" + additionalProperties: + $ref: "#/definitions/NetworkContainer" + Options: + type: "object" + additionalProperties: + type: "string" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + Name: "net01" + Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: "2016-10-19T04:33:30.360899459Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + IPAM: + Driver: "default" + Config: + - Subnet: "172.19.0.0/16" + Gateway: "172.19.0.1" + Options: + foo: "bar" + Internal: false + Attachable: false + Ingress: false + Containers: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + IPAM: + type: "object" + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + Config: + description: | + List of IPAM configuration options, specified as a map: + + ``` + {"Subnet": , "IPRange": , "Gateway": , "AuxAddress": } + ``` + type: "array" + items: + $ref: "#/definitions/IPAMConfig" + Options: + description: "Driver-specific options, specified as a map." + type: "object" + additionalProperties: + type: "string" + + IPAMConfig: + type: "object" + properties: + Subnet: + type: "string" + IPRange: + type: "string" + Gateway: + type: "string" + AuxiliaryAddresses: + type: "object" + additionalProperties: + type: "string" + + NetworkContainer: + type: "object" + properties: + Name: + type: "string" + EndpointID: + type: "string" + MacAddress: + type: "string" + IPv4Address: + type: "string" + IPv6Address: + type: "string" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + error: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + aux: + $ref: "#/definitions/ImageID" + + BuildCache: + type: "object" + description: | + BuildCache contains information about a build cache record. + properties: + ID: + type: "string" + description: | + Unique ID of the build cache record. + example: "ndlpt0hhvkqcdfkputsk4cq9c" + Parent: + description: | + ID of the parent build cache record. + + > **Deprecated**: This field is deprecated, and omitted if empty. + type: "string" + x-nullable: true + example: "" + Parents: + description: | + List of parent build cache record IDs. + type: "array" + items: + type: "string" + x-nullable: true + example: ["hw53o5aio51xtltp5xjp8v7fx"] + Type: + type: "string" + description: | + Cache record type. + example: "regular" + # see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84 + enum: + - "internal" + - "frontend" + - "source.local" + - "source.git.checkout" + - "exec.cachemount" + - "regular" + Description: + type: "string" + description: | + Description of the build-step that produced the build cache. + example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: + type: "boolean" + description: | + Indicates if the build cache is in use. + example: false + Shared: + type: "boolean" + description: | + Indicates if the build cache is shared. + example: true + Size: + description: | + Amount of disk space used by the build cache (in bytes). + type: "integer" + example: 51 + CreatedAt: + description: | + Date and time at which the build cache was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + LastUsedAt: + description: | + Date and time at which the build cache was last used in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2017-08-09T07:09:37.632105588Z" + UsageCount: + type: "integer" + example: 26 + + ImageID: + type: "object" + description: "Image ID or Digest" + properties: + ID: + type: "string" + example: + ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + + CreateImageInfo: + type: "object" + properties: + id: + type: "string" + error: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + + ProgressDetail: + type: "object" + properties: + current: + type: "integer" + total: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IdResponse: + description: "Response to an API call that returns just an Id" + type: "object" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + # Configurations + IPAMConfig: + $ref: "#/definitions/EndpointIPAMConfig" + Links: + type: "array" + items: + type: "string" + example: + - "container_1" + - "container_2" + Aliases: + type: "array" + items: + type: "string" + example: + - "server_x" + - "server_y" + + # Operational data + NetworkID: + description: | + Unique ID of the network. + type: "string" + example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" + EndpointID: + description: | + Unique ID for the service endpoint in a Sandbox. + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for this network. + type: "string" + example: "172.17.0.1" + IPAddress: + description: | + IPv4 address. + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address. + type: "string" + example: "2001:db8:2::100" + GlobalIPv6Address: + description: | + Global IPv6 address. + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + type: "integer" + format: "int64" + example: 64 + MacAddress: + description: | + MAC address for the endpoint on this network. + type: "string" + example: "02:42:ac:11:00:04" + DriverOpts: + description: | + DriverOpts is a mapping of driver options and values. These options + are passed directly to the driver and are driver specific. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + + EndpointIPAMConfig: + description: | + EndpointIPAMConfig represents an endpoint's IPAM configuration. + type: "object" + x-nullable: true + properties: + IPv4Address: + type: "string" + example: "172.20.30.33" + IPv6Address: + type: "string" + example: "2001:db8:abcd::3033" + LinkLocalIPs: + type: "array" + items: + type: "string" + example: + - "169.254.34.68" + - "fe80::3468" + + PluginMount: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + example: "some-mount" + Description: + type: "string" + x-nullable: false + example: "This is a mount that's used by the plugin." + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + example: "/var/lib/docker/plugins/" + Destination: + type: "string" + x-nullable: false + example: "/mnt/state" + Type: + type: "string" + x-nullable: false + example: "bind" + Options: + type: "array" + items: + type: "string" + example: + - "rbind" + - "rw" + + PluginDevice: + type: "object" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + example: "/dev/fuse" + + PluginEnv: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginInterfaceType: + type: "object" + x-nullable: false + required: [Prefix, Capability, Version] + properties: + Prefix: + type: "string" + x-nullable: false + Capability: + type: "string" + x-nullable: false + Version: + type: "string" + x-nullable: false + + PluginPrivilege: + description: | + Describes a permission the user has to accept upon installing + the plugin. + type: "object" + x-go-name: "PluginPrivilege" + properties: + Name: + type: "string" + example: "network" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - "host" + + Plugin: + description: "A plugin for the Engine API" + type: "object" + required: [Settings, Enabled, Config, Name] + properties: + Id: + type: "string" + example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: + type: "string" + x-nullable: false + example: "tiborvass/sample-volume-plugin" + Enabled: + description: + True if the plugin is running. False if the plugin is not running, + only installed. + type: "boolean" + x-nullable: false + example: true + Settings: + description: "Settings that can be modified by users." + type: "object" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + example: + - "DEBUG=0" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PluginReference: + description: "plugin remote reference used to push/pull the plugin" + type: "string" + x-nullable: false + example: "localhost:5000/tiborvass/sample-volume-plugin:latest" + Config: + description: "The config of a plugin." + type: "object" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - WorkDir + - Network + - Linux + - PidHost + - PropagatedMount + - IpcHost + - Mounts + - Env + - Args + properties: + DockerVersion: + description: "Docker Version used to create the plugin" + type: "string" + x-nullable: false + example: "17.06.0-ce" + Description: + type: "string" + x-nullable: false + example: "A sample volume plugin for Docker" + Documentation: + type: "string" + x-nullable: false + example: "https://docs.docker.com/engine/extend/plugins/" + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + $ref: "#/definitions/PluginInterfaceType" + example: + - "docker.volumedriver/1.0" + Socket: + type: "string" + x-nullable: false + example: "plugins.sock" + ProtocolScheme: + type: "string" + example: "some.protocol/v1.0" + description: "Protocol to use for clients connecting to the plugin." + enum: + - "" + - "moby.plugins.http/v1" + Entrypoint: + type: "array" + items: + type: "string" + example: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: + type: "string" + x-nullable: false + example: "/bin/" + User: + type: "object" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + example: 1000 + GID: + type: "integer" + format: "uint32" + example: 1000 + Network: + type: "object" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + example: "host" + Linux: + type: "object" + x-nullable: false + required: [Capabilities, AllowAllDevices, Devices] + properties: + Capabilities: + type: "array" + items: + type: "string" + example: + - "CAP_SYS_ADMIN" + - "CAP_SYSLOG" + AllowAllDevices: + type: "boolean" + x-nullable: false + example: false + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PropagatedMount: + type: "string" + x-nullable: false + example: "/mnt/volumes" + IpcHost: + type: "boolean" + x-nullable: false + example: false + PidHost: + type: "boolean" + x-nullable: false + example: false + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + example: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + example: "args" + Description: + x-nullable: false + type: "string" + example: "command line arguments" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + rootfs: + type: "object" + properties: + type: + type: "string" + example: "layers" + diff_ids: + type: "array" + items: + type: "string" + example: + - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" + - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + + ObjectVersion: + description: | + The version number of the object such as node, service, etc. This is needed + to avoid conflicting writes. The client must send the version number along + with the modified specification when updating these objects. + + This approach ensures safe concurrency and determinism in that the change + on the object may not be applied if the version number has changed from the + last read. In other words, if two update requests specify the same base + version, only one of the requests can succeed. As a result, two separate + update requests that happen at the same time will not unintentionally + overwrite each other. + type: "object" + properties: + Index: + type: "integer" + format: "uint64" + example: 373531 + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + example: "my-node" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + example: "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: "active" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + + Node: + type: "object" + properties: + ID: + type: "string" + example: "24ifsmvkjbyhk" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the node was added to the swarm in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the node was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + $ref: "#/definitions/NodeDescription" + Status: + $ref: "#/definitions/NodeStatus" + ManagerStatus: + $ref: "#/definitions/ManagerStatus" + + NodeDescription: + description: | + NodeDescription encapsulates the properties of the Node as reported by the + agent. + type: "object" + properties: + Hostname: + type: "string" + example: "bf3067039e47" + Platform: + $ref: "#/definitions/Platform" + Resources: + $ref: "#/definitions/ResourceObject" + Engine: + $ref: "#/definitions/EngineDescription" + TLSInfo: + $ref: "#/definitions/TLSInfo" + + Platform: + description: | + Platform represents the platform (Arch/OS). + type: "object" + properties: + Architecture: + description: | + Architecture represents the hardware architecture (for example, + `x86_64`). + type: "string" + example: "x86_64" + OS: + description: | + OS represents the Operating System (for example, `linux` or `windows`). + type: "string" + example: "linux" + + EngineDescription: + description: "EngineDescription provides information about an engine." + type: "object" + properties: + EngineVersion: + type: "string" + example: "17.06.0" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + example: + - Type: "Log" + Name: "awslogs" + - Type: "Log" + Name: "fluentd" + - Type: "Log" + Name: "gcplogs" + - Type: "Log" + Name: "gelf" + - Type: "Log" + Name: "journald" + - Type: "Log" + Name: "json-file" + - Type: "Log" + Name: "logentries" + - Type: "Log" + Name: "splunk" + - Type: "Log" + Name: "syslog" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "host" + - Type: "Network" + Name: "ipvlan" + - Type: "Network" + Name: "macvlan" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + - Type: "Volume" + Name: "local" + - Type: "Volume" + Name: "localhost:5000/vieux/sshfs:latest" + - Type: "Volume" + Name: "vieux/sshfs:latest" + + TLSInfo: + description: | + Information about the issuer of leaf TLS certificates and the trusted root + CA certificate. + type: "object" + properties: + TrustRoot: + description: | + The root CA certificate(s) that are used to validate leaf TLS + certificates. + type: "string" + CertIssuerSubject: + description: + The base64-url-safe-encoded raw subject bytes of the issuer. + type: "string" + CertIssuerPublicKey: + description: | + The base64-url-safe-encoded raw public key bytes of the issuer. + type: "string" + example: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + + NodeStatus: + description: | + NodeStatus represents the status of a node. + + It provides the current status of the node, as seen by the manager. + type: "object" + properties: + State: + $ref: "#/definitions/NodeState" + Message: + type: "string" + example: "" + Addr: + description: "IP address of the node." + type: "string" + example: "172.17.0.2" + + NodeState: + description: "NodeState represents the state of a node." + type: "string" + enum: + - "unknown" + - "down" + - "ready" + - "disconnected" + example: "ready" + + ManagerStatus: + description: | + ManagerStatus represents the status of a manager. + + It provides the current status of a node's manager component, if the node + is a manager. + x-nullable: true + type: "object" + properties: + Leader: + type: "boolean" + default: false + example: true + Reachability: + $ref: "#/definitions/Reachability" + Addr: + description: | + The IP address and port at which the manager is reachable. + type: "string" + example: "10.0.0.46:2377" + + Reachability: + description: "Reachability represents the reachability of a node." + type: "string" + enum: + - "unknown" + - "unreachable" + - "reachable" + example: "reachable" + + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + example: "default" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.corp.type: "production" + com.example.corp.department: "engineering" + Orchestration: + description: "Orchestration configuration." + type: "object" + x-nullable: true + properties: + TaskHistoryRetentionLimit: + description: | + The number of historic tasks to keep per instance or node. If + negative, never remove completed or failed tasks. + type: "integer" + format: "int64" + example: 10 + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "uint64" + example: 10000 + KeepOldSnapshots: + description: | + The number of snapshots to keep beyond the current snapshot. + type: "integer" + format: "uint64" + LogEntriesForSlowFollowers: + description: | + The number of log entries to keep around to sync up slow followers + after a snapshot is created. + type: "integer" + format: "uint64" + example: 500 + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from + the leader before becoming a candidate and starting an election. + `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 3 + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, + the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 1 + Dispatcher: + description: "Dispatcher configuration." + type: "object" + x-nullable: true + properties: + HeartbeatPeriod: + description: | + The delay for an agent to send a heartbeat to the dispatcher. + type: "integer" + format: "int64" + example: 5000000000 + CAConfig: + description: "CA configuration." + type: "object" + x-nullable: true + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + example: 7776000000000000 + ExternalCAs: + description: | + Configuration for forwarding signing requests to an external + certificate authority. + type: "array" + items: + type: "object" + properties: + Protocol: + description: | + Protocol for communication with the external CA (currently + only `cfssl` is supported). + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: | + URL where certificate signing requests should be sent. + type: "string" + Options: + description: | + An object with key/value pairs that are interpreted as + protocol-specific options for the external CA driver. + type: "object" + additionalProperties: + type: "string" + CACert: + description: | + The root CA certificate (in PEM format) this external CA uses + to issue TLS certificates (assumed to be to the current swarm + root CA certificate if not provided). + type: "string" + SigningCACert: + description: | + The desired signing CA certificate for all swarm node TLS leaf + certificates, in PEM format. + type: "string" + SigningCAKey: + description: | + The desired signing CA key for all swarm node TLS leaf certificates, + in PEM format. + type: "string" + ForceRotate: + description: | + An integer whose purpose is to force swarm to generate a new + signing CA certificate and key, if none have been specified in + `SigningCACert` and `SigningCAKey` + format: "uint64" + type: "integer" + EncryptionConfig: + description: "Parameters related to encryption-at-rest." + type: "object" + properties: + AutoLockManagers: + description: | + If set, generate a key and use it to lock data stored on the + managers. + type: "boolean" + example: false + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if + unspecified by a service. + + Updating this value only affects new tasks. Existing tasks continue + to use their previously configured log driver until recreated. + type: "object" + properties: + Name: + description: | + The log driver to use as a default for new tasks. + type: "string" + example: "json-file" + Options: + description: | + Driver-specific options for the selectd log driver, specified + as key/value pairs. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "10" + "max-size": "100m" + + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + description: | + ClusterInfo represents information about the swarm as is returned by the + "/info" endpoint. Join-tokens are not included. + x-nullable: true + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + example: "abajmipo7b4xz5ip2nrla6b11" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the swarm was initialised in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the swarm was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/SwarmSpec" + TLSInfo: + $ref: "#/definitions/TLSInfo" + RootRotationInProgress: + description: | + Whether there is currently a root CA rotation in progress for the swarm + type: "boolean" + example: false + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + If no port is set or is set to 0, the default port (4789) is used. + type: "integer" + format: "uint32" + default: 4789 + example: 4789 + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global scope + networks. + type: "array" + items: + type: "string" + format: "CIDR" + example: ["10.10.0.0/16", "20.20.0.0/16"] + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created from the + default subnet pool. + type: "integer" + format: "uint32" + maximum: 29 + default: 24 + example: 24 + + JoinTokens: + description: | + JoinTokens contains the tokens workers and managers need to join the swarm. + type: "object" + properties: + Worker: + description: | + The token workers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: + description: | + The token managers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + + Swarm: + type: "object" + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + $ref: "#/definitions/JoinTokens" + + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + PluginSpec: + type: "object" + description: | + Plugin spec for the service. *(Experimental release only.)* + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Name: + description: "The name or 'alias' to use for the plugin." + type: "string" + Remote: + description: "The plugin image reference to use." + type: "string" + Disabled: + description: "Disable the plugin once scheduled." + type: "boolean" + PluginPrivilege: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + ContainerSpec: + type: "object" + description: | + Container spec for the service. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Image: + description: "The image name to use for the container" + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Hostname: + description: | + The hostname to use for the container, as a valid + [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. + type: "string" + Env: + description: | + A list of environment variables in the form `VAR=value`. + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Groups: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + Privileges: + type: "object" + description: "Security options for the container" + properties: + CredentialSpec: + type: "object" + description: "CredentialSpec for managed service account (Windows only)" + properties: + Config: + type: "string" + example: "0bt9dmxjvjiqermk6xrop3ekq" + description: | + Load credential spec from a Swarm Config with the given ID. + The specified config must also be present in the Configs + field with the Runtime property set. + +


+ + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + File: + type: "string" + example: "spec.json" + description: | + Load credential spec from this file. The file is read by + the daemon, and must be present in the `CredentialSpecs` + subdirectory in the docker data directory, which defaults + to `C:\ProgramData\Docker\` on Windows. + + For example, specifying `spec.json` loads + `C:\ProgramData\Docker\CredentialSpecs\spec.json`. + +


+ + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + Registry: + type: "string" + description: | + Load credential spec from this value in the Windows + registry. The specified registry value must be located in: + + `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + +


+ + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + SELinuxContext: + type: "object" + description: "SELinux labels of the container" + properties: + Disable: + type: "boolean" + description: "Disable SELinux" + User: + type: "string" + description: "SELinux user label" + Role: + type: "string" + description: "SELinux role label" + Type: + type: "string" + description: "SELinux type label" + Level: + type: "string" + description: "SELinux level label" + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + OpenStdin: + description: "Open `stdin`" + type: "boolean" + ReadOnly: + description: "Mount the container's root filesystem as read only." + type: "boolean" + Mounts: + description: | + Specification for mounts to be added to containers created as part + of the service. + type: "array" + items: + $ref: "#/definitions/Mount" + StopSignal: + description: "Signal to stop the container." + type: "string" + StopGracePeriod: + description: | + Amount of time to wait for the container to terminate before + forcefully killing it. + type: "integer" + format: "int64" + HealthCheck: + $ref: "#/definitions/HealthConfig" + Hosts: + type: "array" + description: | + A list of hostname/IP mappings to add to the container's `hosts` + file. The format of extra hosts is specified in the + [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) + man page: + + IP_address canonical_hostname [aliases...] + items: + type: "string" + DNSConfig: + description: | + Specification for DNS related configurations in resolver configuration + file (`resolv.conf`). + type: "object" + properties: + Nameservers: + description: "The IP addresses of the name servers." + type: "array" + items: + type: "string" + Search: + description: "A search list for host-name lookup." + type: "array" + items: + type: "string" + Options: + description: | + A list of internal resolver variables to be modified (e.g., + `debug`, `ndots:3`, etc.). + type: "array" + items: + type: "string" + Secrets: + description: | + Secrets contains references to zero or more secrets that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + SecretID: + description: | + SecretID represents the ID of the specific secret that we're + referencing. + type: "string" + SecretName: + description: | + SecretName is the name of the secret that this references, + but this is just provided for lookup/display purposes. The + secret in the reference will be identified by its ID. + type: "string" + Configs: + description: | + Configs contains references to zero or more configs that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + Runtime: + description: | + Runtime represents a target that is not mounted into the + container but is used by the task + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually + > exclusive + type: "object" + ConfigID: + description: | + ConfigID represents the ID of the specific config that we're + referencing. + type: "string" + ConfigName: + description: | + ConfigName is the name of the config that this references, + but this is just provided for lookup/display purposes. The + config in the reference will be identified by its ID. + type: "string" + Isolation: + type: "string" + description: | + Isolation technology of the containers running the service. + (Windows only) + enum: + - "default" + - "process" + - "hyperv" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + Sysctls: + description: | + Set kernel namedspaced parameters (sysctls) in the container. + The Sysctls option on services accepts the same sysctls as the + are supported on containers. Note that while the same sysctls are + supported, no guarantees or checks are made about their + suitability for a clustered environment, and it's up to the user + to determine whether a given sysctl will work properly in a + Service. + type: "object" + additionalProperties: + type: "string" + # This option is not used by Windows containers + CapabilityAdd: + type: "array" + description: | + A list of kernel capabilities to add to the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + - "CAP_SYS_ADMIN" + - "CAP_SYS_CHROOT" + - "CAP_SYSLOG" + CapabilityDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + NetworkAttachmentSpec: + description: | + Read-only spec type for non-swarm containers attached to swarm overlay + networks. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + type: "object" + properties: + ContainerID: + description: "ID of the container represented by this task" + type: "string" + Resources: + description: | + Resource requirements which apply to each individual container created + as part of the service. + type: "object" + properties: + Limits: + description: "Define resources limits." + $ref: "#/definitions/Limit" + Reservations: + description: "Define resources reservation." + $ref: "#/definitions/ResourceObject" + RestartPolicy: + description: | + Specification for the restart policy which applies to containers + created as part of this service. + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: | + Maximum attempts to restart a given container before giving up + (default value is 0, which is ignored). + type: "integer" + format: "int64" + default: 0 + Window: + description: | + Windows is the time window used to evaluate the restart policy + (default value is 0, which is unbounded). + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: | + An array of constraint expressions to limit the set of nodes where + a task can be scheduled. Constraint expressions can either use a + _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find + nodes that satisfy every expression (AND match). Constraints can + match node or Docker Engine labels as follows: + + node attribute | matches | example + ---------------------|--------------------------------|----------------------------------------------- + `node.id` | Node ID | `node.id==2ivku8v2gvtg4` + `node.hostname` | Node hostname | `node.hostname!=node-2` + `node.role` | Node role (`manager`/`worker`) | `node.role==manager` + `node.platform.os` | Node operating system | `node.platform.os==windows` + `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` + `node.labels` | User-defined node labels | `node.labels.security==high` + `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04` + + `engine.labels` apply to Docker Engine labels like operating system, + drivers, etc. Swarm administrators add `node.labels` for operational + purposes by using the [`node update endpoint`](#operation/NodeUpdate). + + type: "array" + items: + type: "string" + example: + - "node.hostname!=node3.corp.example.com" + - "node.role!=manager" + - "node.labels.type==production" + - "node.platform.os==linux" + - "node.platform.arch==x86_64" + Preferences: + description: | + Preferences provide a way to make the scheduler aware of factors + such as topology. They are provided in order from highest to + lowest precedence. + type: "array" + items: + type: "object" + properties: + Spread: + type: "object" + properties: + SpreadDescriptor: + description: | + label descriptor, such as `engine.labels.az`. + type: "string" + example: + - Spread: + SpreadDescriptor: "node.labels.datacenter" + - Spread: + SpreadDescriptor: "node.labels.rack" + MaxReplicas: + description: | + Maximum number of replicas for per node (default value is 0, which + is unlimited) + type: "integer" + format: "int64" + default: 0 + Platforms: + description: | + Platforms stores all the platforms that the service's image can + run on. This field is used in the platform filter for scheduling. + If empty, then the platform filter is off, meaning there are no + scheduling restrictions. + type: "array" + items: + $ref: "#/definitions/Platform" + ForceUpdate: + description: | + A counter that triggers an update even if no relevant parameters have + been changed. + type: "integer" + Runtime: + description: | + Runtime is the type of runtime specified for the task executor. + type: "string" + Networks: + description: "Specifies which networks the service should attach to." + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + LogDriver: + description: | + Specifies the log driver to use for tasks created from this spec. If + not present, the default one for the swarm will be used, finally + falling back to the engine default if not specified. + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + - "remove" + - "orphaned" + + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + AssignedGenericResources: + $ref: "#/definitions/GenericResources" + Status: + type: "object" + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + type: "object" + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + DesiredState: + $ref: "#/definitions/TaskState" + JobIteration: + description: | + If the Service this Task belongs to is a job-mode service, contains + the JobIteration of the Service this Task was created for. Absent if + the Task was created for a Replicated or Global Service. + $ref: "#/definitions/ObjectVersion" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + AssignedGenericResources: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + ServiceSpec: + description: "User modifiable configuration for a service." + type: object + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + ReplicatedJob: + description: | + The mode used for services with a finite number of tasks that run + to a completed state. + type: "object" + properties: + MaxConcurrent: + description: | + The maximum number of replicas to run simultaneously. + type: "integer" + format: "int64" + default: 1 + TotalCompletions: + description: | + The total number of replicas desired to reach the Completed + state. If unset, will default to the value of `MaxConcurrent` + type: "integer" + format: "int64" + GlobalJob: + description: | + The mode used for services which run a task to the completed state + on each valid node. + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be updated in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an updated task fails to run, or stops running + during the update. + type: "string" + enum: + - "continue" + - "pause" + - "rollback" + Monitor: + description: | + Amount of time to monitor each updated task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during an update before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling out an updated task. Either + the old task is shut down before the new task is started, or the + new task is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + RollbackConfig: + description: "Specification for the rollback strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be rolled back in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: | + Amount of time between rollback iterations, in nanoseconds. + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an rolled back task fails to run, or stops + running during the rollback. + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: | + Amount of time to monitor each rolled back task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during a rollback before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling back a task. Either the old + task is shut down before the new task is started, or the new task + is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + Networks: + description: "Specifies which networks the service should attach to." + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + - "sctp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + PublishMode: + description: | + The mode in which port is published. + +


+ + - "ingress" makes the target port accessible on every node, + regardless of whether there is a task for the service running on + that node or not. + - "host" bypasses the routing mesh and publish the port directly on + the swarm node where that service is running. + + type: "string" + enum: + - "ingress" + - "host" + default: "ingress" + example: "ingress" + + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: | + The mode of resolution to use for internal load balancing between tasks. + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: | + List of exposed ports that this service is accessible on from the + outside. Ports can only be provided if `vip` resolution mode is used. + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + Service: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + ServiceStatus: + description: | + The status of the service's tasks. Provided only when requested as + part of a ServiceList operation. + type: "object" + properties: + RunningTasks: + description: | + The number of tasks for the service currently in the Running state. + type: "integer" + format: "uint64" + example: 7 + DesiredTasks: + description: | + The number of tasks for the service desired to be running. + For replicated services, this is the replica count from the + service spec. For global services, this is computed by taking + count of all tasks for the service with a Desired State other + than Shutdown. + type: "integer" + format: "uint64" + example: 10 + CompletedTasks: + description: | + The number of tasks for a job that are in the Completed state. + This field must be cross-referenced with the service type, as the + value of 0 may mean the service is not in a job mode, or it may + mean the job-mode service has no tasks yet Completed. + type: "integer" + format: "uint64" + JobStatus: + description: | + The status of the service when it is in one of ReplicatedJob or + GlobalJob modes. Absent on Replicated and Global mode services. The + JobIteration is an ObjectVersion, but unlike the Service's version, + does not need to be sent with an update request. + type: "object" + properties: + JobIteration: + description: | + JobIteration is a value increased each time a Job is executed, + successfully or otherwise. "Executed", in this case, means the + job as a whole has been started, not that an individual Task has + been launched. A job is "Executed" when its ServiceSpec is + updated. JobIteration can be used to disambiguate Tasks belonging + to different executions of a job. Though JobIteration will + increase with each subsequent execution, it may not necessarily + increase by 1, and so JobIteration should not be used to + $ref: "#/definitions/ObjectVersion" + LastExecution: + description: | + The last time, as observed by the server, that this job was + started. + type: "string" + format: "dateTime" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + + ImageDeleteResponseItem: + type: "object" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + + ServiceUpdateResponse: + type: "object" + properties: + Warnings: + description: "Optional warning messages" + type: "array" + items: + type: "string" + example: + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ContainerSummary: + type: "object" + properties: + Id: + description: "The ID of this container" + type: "string" + x-go-name: "ID" + Names: + description: "The names that this container has been given" + type: "array" + items: + type: "string" + Image: + description: "The name of the image used when creating this container" + type: "string" + ImageID: + description: "The ID of the image that this container was created from" + type: "string" + Command: + description: "Command to run when starting the container" + type: "string" + Created: + description: "When the container was created" + type: "integer" + format: "int64" + Ports: + description: "The ports exposed by this container" + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: "The size of files that have been created or changed by this container" + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container" + type: "integer" + format: "int64" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + State: + description: "The state of this container (e.g. `Exited`)" + type: "string" + Status: + description: "Additional human-readable status of this container (e.g. `Exit 0`)" + type: "string" + HostConfig: + type: "object" + properties: + NetworkMode: + type: "string" + NetworkSettings: + description: "A summary of the container's network settings" + type: "object" + properties: + Networks: + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + items: + $ref: "#/definitions/MountPoint" + + Driver: + description: "Driver represents a driver (network, logging, secrets)." + type: "object" + required: [Name] + properties: + Name: + description: "Name of the driver." + type: "string" + x-nullable: false + example: "some-driver" + Options: + description: "Key/value map of driver-specific options." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + OptionA: "value for driver-specific option A" + OptionB: "value for driver-specific option B" + + SecretSpec: + type: "object" + properties: + Name: + description: "User-defined name of the secret." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Data: + description: | + Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) + data to store as secret. + + This field is only used to _create_ a secret, and is not returned by + other endpoints. + type: "string" + example: "" + Driver: + description: | + Name of the secrets driver used to fetch the secret's value from an + external secret store. + $ref: "#/definitions/Driver" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Secret: + type: "object" + properties: + ID: + type: "string" + example: "blt1owaxmitz71s9v5zh81zun" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + Spec: + $ref: "#/definitions/SecretSpec" + + ConfigSpec: + type: "object" + properties: + Name: + description: "User-defined name of the config." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: | + Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) + config data. + type: "string" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Config: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ConfigSpec" + + ContainerState: + description: | + ContainerState stores container's running state. It's part of ContainerJSONBase + and will be returned by the "inspect" command. + type: "object" + x-nullable: true + properties: + Status: + description: | + String representation of the container state. Can be one of "created", + "running", "paused", "restarting", "removing", "exited", or "dead". + type: "string" + enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] + example: "running" + Running: + description: | + Whether this container is running. + + Note that a running container can be _paused_. The `Running` and `Paused` + booleans are not mutually exclusive: + + When pausing a container (on Linux), the freezer cgroup is used to suspend + all processes in the container. Freezing the process requires the process to + be running. As a result, paused containers are both `Running` _and_ `Paused`. + + Use the `Status` field instead to determine if a container's state is "running". + type: "boolean" + example: true + Paused: + description: "Whether this container is paused." + type: "boolean" + example: false + Restarting: + description: "Whether this container is restarting." + type: "boolean" + example: false + OOMKilled: + description: | + Whether this container has been killed because it ran out of memory. + type: "boolean" + example: false + Dead: + type: "boolean" + example: false + Pid: + description: "The process ID of this container" + type: "integer" + example: 1234 + ExitCode: + description: "The last exit code of this container" + type: "integer" + example: 0 + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + example: "2020-01-06T09:06:59.461876391Z" + FinishedAt: + description: "The time when this container last exited." + type: "string" + example: "2020-01-06T09:07:59.461876391Z" + Health: + $ref: "#/definitions/Health" + + ContainerCreateResponse: + description: "OK response to ContainerCreate operation" + type: "object" + title: "ContainerCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + example: [] + + ContainerWaitResponse: + description: "OK response to ContainerWait operation" + type: "object" + x-go-name: "WaitResponse" + title: "ContainerWaitResponse" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + format: "int64" + x-nullable: false + Error: + $ref: "#/definitions/ContainerWaitExitError" + + ContainerWaitExitError: + description: "container waiting error, if any" + type: "object" + x-go-name: "WaitExitError" + properties: + Message: + description: "Details of an error" + type: "string" + + SystemVersion: + type: "object" + description: | + Response of Engine API: GET "/version" + properties: + Platform: + type: "object" + required: [Name] + properties: + Name: + type: "string" + Components: + type: "array" + description: | + Information about system components + items: + type: "object" + x-go-name: ComponentVersion + required: [Name, Version] + properties: + Name: + description: | + Name of the component + type: "string" + example: "Engine" + Version: + description: | + Version of the component + type: "string" + x-nullable: false + example: "19.03.12" + Details: + description: | + Key/value pairs of strings with additional information about the + component. These values are intended for informational purposes + only, and their content is not defined, and not part of the API + specification. + + These messages can be printed by the client as information to the user. + type: "object" + x-nullable: true + Version: + description: "The version of the daemon" + type: "string" + example: "19.03.12" + ApiVersion: + description: | + The default (and highest) API version that is supported by the daemon + type: "string" + example: "1.40" + MinAPIVersion: + description: | + The minimum API version that is supported by the daemon + type: "string" + example: "1.12" + GitCommit: + description: | + The Git commit of the source code that was used to build the daemon + type: "string" + example: "48a66213fe" + GoVersion: + description: | + The version Go used to compile the daemon, and the version of the Go + runtime in use. + type: "string" + example: "go1.13.14" + Os: + description: | + The operating system that the daemon is running on ("linux" or "windows") + type: "string" + example: "linux" + Arch: + description: | + The architecture that the daemon is running on + type: "string" + example: "amd64" + KernelVersion: + description: | + The kernel version (`uname -r`) that the daemon is running on. + + This field is omitted when empty. + type: "string" + example: "4.19.76-linuxkit" + Experimental: + description: | + Indicates if the daemon is started with experimental features enabled. + + This field is omitted when empty / false. + type: "boolean" + example: true + BuildTime: + description: | + The date and time that the daemon was compiled. + type: "string" + example: "2020-06-22T15:49:27.000000000+00:00" + + SystemInfo: + type: "object" + properties: + ID: + description: | + Unique identifier of the daemon. + +


+ + > **Note**: The format of the ID itself is not part of the API, and + > should not be considered stable. + type: "string" + example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + Containers: + description: "Total number of containers on the host." + type: "integer" + example: 14 + ContainersRunning: + description: | + Number of containers with status `"running"`. + type: "integer" + example: 3 + ContainersPaused: + description: | + Number of containers with status `"paused"`. + type: "integer" + example: 1 + ContainersStopped: + description: | + Number of containers with status `"stopped"`. + type: "integer" + example: 10 + Images: + description: | + Total number of images on the host. + + Both _tagged_ and _untagged_ (dangling) images are counted. + type: "integer" + example: 508 + Driver: + description: "Name of the storage driver in use." + type: "string" + example: "overlay2" + DriverStatus: + description: | + Information specific to the storage driver, provided as + "label" / "value" pairs. + + This information is provided by the storage driver, and formatted + in a way consistent with the output of `docker info` on the command + line. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["Backing Filesystem", "extfs"] + - ["Supports d_type", "true"] + - ["Native Overlay Diff", "true"] + DockerRootDir: + description: | + Root directory of persistent Docker state. + + Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` + on Windows. + type: "string" + example: "/var/lib/docker" + Plugins: + $ref: "#/definitions/PluginsInfo" + MemoryLimit: + description: "Indicates if the host has memory limit support enabled." + type: "boolean" + example: true + SwapLimit: + description: "Indicates if the host has memory swap limit support enabled." + type: "boolean" + example: true + KernelMemoryTCP: + description: | + Indicates if the host has kernel memory TCP limit support enabled. This + field is omitted if not supported. + + Kernel memory TCP limits are not supported when using cgroups v2, which + does not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup. + type: "boolean" + example: true + CpuCfsPeriod: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) period is supported by + the host. + type: "boolean" + example: true + CpuCfsQuota: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by + the host. + type: "boolean" + example: true + CPUShares: + description: | + Indicates if CPU Shares limiting is supported by the host. + type: "boolean" + example: true + CPUSet: + description: | + Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. + + See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) + type: "boolean" + example: true + PidsLimit: + description: "Indicates if the host kernel has PID limit support enabled." + type: "boolean" + example: true + OomKillDisable: + description: "Indicates if OOM killer disable is supported on the host." + type: "boolean" + IPv4Forwarding: + description: "Indicates IPv4 forwarding is enabled." + type: "boolean" + example: true + BridgeNfIptables: + description: "Indicates if `bridge-nf-call-iptables` is available on the host." + type: "boolean" + example: true + BridgeNfIp6tables: + description: "Indicates if `bridge-nf-call-ip6tables` is available on the host." + type: "boolean" + example: true + Debug: + description: | + Indicates if the daemon is running in debug-mode / with debug-level + logging enabled. + type: "boolean" + example: true + NFd: + description: | + The total number of file Descriptors in use by the daemon process. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 64 + NGoroutines: + description: | + The number of goroutines that currently exist. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 174 + SystemTime: + description: | + Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + format with nano-seconds. + type: "string" + example: "2017-08-08T20:28:29.06202363Z" + LoggingDriver: + description: | + The logging driver to use as a default for new containers. + type: "string" + CgroupDriver: + description: | + The driver to use for managing cgroups. + type: "string" + enum: ["cgroupfs", "systemd", "none"] + default: "cgroupfs" + example: "cgroupfs" + CgroupVersion: + description: | + The version of the cgroup. + type: "string" + enum: ["1", "2"] + default: "1" + example: "1" + NEventsListener: + description: "Number of event listeners subscribed." + type: "integer" + example: 30 + KernelVersion: + description: | + Kernel version of the host. + + On Linux, this information obtained from `uname`. On Windows this + information is queried from the HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ + registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. + type: "string" + example: "4.9.38-moby" + OperatingSystem: + description: | + Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS" + or "Windows Server 2016 Datacenter" + type: "string" + example: "Alpine Linux v3.5" + OSVersion: + description: | + Version of the host's operating system + +


+ + > **Note**: The information returned in this field, including its + > very existence, and the formatting of values, should not be considered + > stable, and may change without notice. + type: "string" + example: "16.04" + OSType: + description: | + Generic type of the operating system of the host, as returned by the + Go runtime (`GOOS`). + + Currently returned values are "linux" and "windows". A full list of + possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). + type: "string" + example: "linux" + Architecture: + description: | + Hardware architecture of the host, as returned by the Go runtime + (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). + type: "string" + example: "x86_64" + NCPU: + description: | + The number of logical CPUs usable by the daemon. + + The number of available CPUs is checked by querying the operating + system when the daemon starts. Changes to operating system CPU + allocation after the daemon is started are not reflected. + type: "integer" + example: 4 + MemTotal: + description: | + Total amount of physical memory available on the host, in bytes. + type: "integer" + format: "int64" + example: 2095882240 + + IndexServerAddress: + description: | + Address / URL of the index server that is used for image search, + and as a default for user authentication for Docker Hub and Docker Cloud. + default: "https://index.docker.io/v1/" + type: "string" + example: "https://index.docker.io/v1/" + RegistryConfig: + $ref: "#/definitions/RegistryServiceConfig" + GenericResources: + $ref: "#/definitions/GenericResources" + HttpProxy: + description: | + HTTP-proxy configured for the daemon. This value is obtained from the + [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080" + HttpsProxy: + description: | + HTTPS-proxy configured for the daemon. This value is obtained from the + [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443" + NoProxy: + description: | + Comma-separated list of domain extensions for which no proxy should be + used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) + environment variable. + + Containers do not automatically inherit this configuration. + type: "string" + example: "*.local, 169.254/16" + Name: + description: "Hostname of the host." + type: "string" + example: "node5.corp.example.com" + Labels: + description: | + User-defined labels (key/value metadata) as set on the daemon. + +


+ + > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, + > set through the daemon configuration, and _node_ labels, set from a + > manager node in the Swarm. Node labels are not included in this + > field. Node labels can be retrieved using the `/nodes/(id)` endpoint + > on a manager node in the Swarm. + type: "array" + items: + type: "string" + example: ["storage=ssd", "production"] + ExperimentalBuild: + description: | + Indicates if experimental features are enabled on the daemon. + type: "boolean" + example: true + ServerVersion: + description: | + Version string of the daemon. + + > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) + > returns the Swarm version instead of the daemon version, for example + > `swarm/1.2.8`. + type: "string" + example: "17.06.0-ce" + ClusterStore: + description: | + URL of the distributed storage backend. + + + The storage backend is used for multihost networking (to store + network and endpoint information) and by the node discovery mechanism. + +


+ + > **Deprecated**: This field is only propagated when using standalone Swarm + > mode, and overlay networking using an external k/v store. Overlay + > networks with Swarm mode enabled use the built-in raft store, and + > this field will be empty. + type: "string" + example: "consul://consul.corp.example.com:8600/some/path" + ClusterAdvertise: + description: | + The network endpoint that the Engine advertises for the purpose of + node discovery. ClusterAdvertise is a `host:port` combination on which + the daemon is reachable by other hosts. + +


+ + > **Deprecated**: This field is only propagated when using standalone Swarm + > mode, and overlay networking using an external k/v store. Overlay + > networks with Swarm mode enabled use the built-in raft store, and + > this field will be empty. + type: "string" + example: "node5.corp.example.com:8000" + Runtimes: + description: | + List of [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtimes configured on the daemon. Keys hold the "name" used to + reference the runtime. + + The Docker daemon relies on an OCI compliant runtime (invoked via the + `containerd` daemon) as its interface to the Linux kernel namespaces, + cgroups, and SELinux. + + The default runtime is `runc`, and automatically configured. Additional + runtimes can be configured by the user and will be listed here. + type: "object" + additionalProperties: + $ref: "#/definitions/Runtime" + default: + runc: + path: "runc" + example: + runc: + path: "runc" + runc-master: + path: "/go/bin/runc" + custom: + path: "/usr/local/bin/my-oci-runtime" + runtimeArgs: ["--debug", "--systemd-cgroup=false"] + DefaultRuntime: + description: | + Name of the default OCI runtime that is used when starting containers. + + The default can be overridden per-container at create time. + type: "string" + default: "runc" + example: "runc" + Swarm: + $ref: "#/definitions/SwarmInfo" + LiveRestoreEnabled: + description: | + Indicates if live restore is enabled. + + If enabled, containers are kept running when the daemon is shutdown + or upon daemon start if running containers are detected. + type: "boolean" + default: false + example: false + Isolation: + description: | + Represents the isolation technology to use as a default for containers. + The supported values are platform-specific. + + If no isolation value is specified on daemon start, on Windows client, + the default is `hyperv`, and on Windows server, the default is `process`. + + This option is currently not used on other platforms. + default: "default" + type: "string" + enum: + - "default" + - "hyperv" + - "process" + InitBinary: + description: | + Name and, optional, path of the `docker-init` binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "docker-init" + ContainerdCommit: + $ref: "#/definitions/Commit" + RuncCommit: + $ref: "#/definitions/Commit" + InitCommit: + $ref: "#/definitions/Commit" + SecurityOptions: + description: | + List of security features that are enabled on the daemon, such as + apparmor, seccomp, SELinux, user-namespaces (userns), and rootless. + + Additional configuration options for each security feature may + be present, and are included as a comma-separated list of key/value + pairs. + type: "array" + items: + type: "string" + example: + - "name=apparmor" + - "name=seccomp,profile=default" + - "name=selinux" + - "name=userns" + - "name=rootless" + ProductLicense: + description: | + Reports a summary of the product license on the daemon. + + If a commercial license has been applied to the daemon, information + such as number of nodes, and expiration are included. + type: "string" + example: "Community Engine" + DefaultAddressPools: + description: | + List of custom default address pools for local networks, which can be + specified in the daemon.json file or dockerd option. + + Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 + 10.10.[0-255].0/24 address pools. + type: "array" + items: + type: "object" + properties: + Base: + description: "The network address in CIDR format" + type: "string" + example: "10.10.0.0/16" + Size: + description: "The network pool size" + type: "integer" + example: "24" + Warnings: + description: | + List of warnings / informational messages about missing features, or + issues related to the daemon configuration. + + These messages can be printed by the client as information to the user. + type: "array" + items: + type: "string" + example: + - "WARNING: No memory limit support" + - "WARNING: bridge-nf-call-iptables is disabled" + - "WARNING: bridge-nf-call-ip6tables is disabled" + + + # PluginsInfo is a temp struct holding Plugins name + # registered with docker daemon. It is used by Info struct + PluginsInfo: + description: | + Available plugins per type. + +


+ + > **Note**: Only unmanaged (V1) plugins are included in this list. + > V1 plugins are "lazily" loaded, and are not returned in this list + > if there is no resource using the plugin. + type: "object" + properties: + Volume: + description: "Names of available volume-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["local"] + Network: + description: "Names of available network-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] + Authorization: + description: "Names of available authorization plugins." + type: "array" + items: + type: "string" + example: ["img-authz-plugin", "hbm"] + Log: + description: "Names of available logging-drivers, and logging-driver plugins." + type: "array" + items: + type: "string" + example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"] + + + RegistryServiceConfig: + description: | + RegistryServiceConfig stores daemon registry services configuration. + type: "object" + x-nullable: true + properties: + AllowNondistributableArtifactsCIDRs: + description: | + List of IP ranges to which nondistributable artifacts can be pushed, + using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). + + Some images (for example, Windows base images) contain artifacts + whose distribution is restricted by license. When these images are + pushed to a registry, restricted artifacts are not included. + + This configuration override this behavior, and enables the daemon to + push nondistributable artifacts to all registries whose resolved IP + address is within the subnet described by the CIDR syntax. + + This option is useful when pushing images containing + nondistributable artifacts to a registry on an air-gapped network so + hosts on that network can pull the images without connecting to + another server. + + > **Warning**: Nondistributable artifacts typically have restrictions + > on how and where they can be distributed and shared. Only use this + > feature to push artifacts to private registries and ensure that you + > are in compliance with any terms that cover redistributing + > nondistributable artifacts. + + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + AllowNondistributableArtifactsHostnames: + description: | + List of registry hostnames to which nondistributable artifacts can be + pushed, using the format `[:]` or `[:]`. + + Some images (for example, Windows base images) contain artifacts + whose distribution is restricted by license. When these images are + pushed to a registry, restricted artifacts are not included. + + This configuration override this behavior for the specified + registries. + + This option is useful when pushing images containing + nondistributable artifacts to a registry on an air-gapped network so + hosts on that network can pull the images without connecting to + another server. + + > **Warning**: Nondistributable artifacts typically have restrictions + > on how and where they can be distributed and shared. Only use this + > feature to push artifacts to private registries and ensure that you + > are in compliance with any terms that cover redistributing + > nondistributable artifacts. + type: "array" + items: + type: "string" + example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + InsecureRegistryCIDRs: + description: | + List of IP ranges of insecure registries, using the CIDR syntax + ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries + accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates + from unknown CAs) communication. + + By default, local registries (`127.0.0.0/8`) are configured as + insecure. All other registries are secure. Communicating with an + insecure registry is not possible if the daemon assumes that registry + is secure. + + This configuration override this behavior, insecure communication with + registries whose resolved IP address is within the subnet described by + the CIDR syntax. + + Registries can also be marked insecure by hostname. Those registries + are listed under `IndexConfigs` and have their `Secure` field set to + `false`. + + > **Warning**: Using this option can be useful when running a local + > registry, but introduces security vulnerabilities. This option + > should therefore ONLY be used for testing purposes. For increased + > security, users should add their CA to their system's list of trusted + > CAs instead of enabling this option. + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + IndexConfigs: + type: "object" + additionalProperties: + $ref: "#/definitions/IndexInfo" + example: + "127.0.0.1:5000": + "Name": "127.0.0.1:5000" + "Mirrors": [] + "Secure": false + "Official": false + "[2001:db8:a0b:12f0::1]:80": + "Name": "[2001:db8:a0b:12f0::1]:80" + "Mirrors": [] + "Secure": false + "Official": false + "docker.io": + Name: "docker.io" + Mirrors: ["https://hub-mirror.corp.example.com:5000/"] + Secure: true + Official: true + "registry.internal.corp.example.com:3000": + Name: "registry.internal.corp.example.com:3000" + Mirrors: [] + Secure: false + Official: false + Mirrors: + description: | + List of registry URLs that act as a mirror for the official + (`docker.io`) registry. + + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://[2001:db8:a0b:12f0::1]/" + + IndexInfo: + description: + IndexInfo contains information about a registry. + type: "object" + x-nullable: true + properties: + Name: + description: | + Name of the registry, such as "docker.io". + type: "string" + example: "docker.io" + Mirrors: + description: | + List of mirrors, expressed as URIs. + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://registry-2.docker.io/" + - "https://registry-3.docker.io/" + Secure: + description: | + Indicates if the registry is part of the list of insecure + registries. + + If `false`, the registry is insecure. Insecure registries accept + un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from + unknown CAs) communication. + + > **Warning**: Insecure registries can be useful when running a local + > registry. However, because its use creates security vulnerabilities + > it should ONLY be enabled for testing purposes. For increased + > security, users should add their CA to their system's list of + > trusted CAs instead of enabling this option. + type: "boolean" + example: true + Official: + description: | + Indicates whether this is an official registry (i.e., Docker Hub / docker.io) + type: "boolean" + example: true + + Runtime: + description: | + Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtime. + + The runtime is invoked by the daemon via the `containerd` daemon. OCI + runtimes act as an interface to the Linux kernel namespaces, cgroups, + and SELinux. + type: "object" + properties: + path: + description: | + Name and, optional, path, of the OCI executable binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "/usr/local/bin/my-oci-runtime" + runtimeArgs: + description: | + List of command-line arguments to pass to the runtime when invoked. + type: "array" + x-nullable: true + items: + type: "string" + example: ["--debug", "--systemd-cgroup=false"] + + Commit: + description: | + Commit holds the Git-commit (SHA1) that a binary was built from, as + reported in the version-string of external tools, such as `containerd`, + or `runC`. + type: "object" + properties: + ID: + description: "Actual commit ID of external tool." + type: "string" + example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" + Expected: + description: | + Commit ID of external tool expected by dockerd as set at build time. + type: "string" + example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" + + SwarmInfo: + description: | + Represents generic information about swarm. + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + default: "" + example: "k67qz4598weg5unwwffg6z1m1" + NodeAddr: + description: | + IP address at which this node can be reached by other nodes in the + swarm. + type: "string" + default: "" + example: "10.0.0.46" + LocalNodeState: + $ref: "#/definitions/LocalNodeState" + ControlAvailable: + type: "boolean" + default: false + example: true + Error: + type: "string" + default: "" + RemoteManagers: + description: | + List of ID's and addresses of other managers in the swarm. + type: "array" + default: null + x-nullable: true + items: + $ref: "#/definitions/PeerNode" + example: + - NodeID: "71izy0goik036k48jg985xnds" + Addr: "10.0.0.158:2377" + - NodeID: "79y6h1o4gv8n120drcprv5nmc" + Addr: "10.0.0.159:2377" + - NodeID: "k67qz4598weg5unwwffg6z1m1" + Addr: "10.0.0.46:2377" + Nodes: + description: "Total number of nodes in the swarm." + type: "integer" + x-nullable: true + example: 4 + Managers: + description: "Total number of managers in the swarm." + type: "integer" + x-nullable: true + example: 3 + Cluster: + $ref: "#/definitions/ClusterInfo" + + LocalNodeState: + description: "Current local status of this node." + type: "string" + default: "" + enum: + - "" + - "inactive" + - "pending" + - "active" + - "error" + - "locked" + example: "active" + + PeerNode: + description: "Represents a peer-node in the swarm" + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + Addr: + description: | + IP address and ports at which this node can be reached. + type: "string" + + NetworkAttachmentConfig: + description: | + Specifies how a service should be attached to a particular network. + type: "object" + properties: + Target: + description: | + The target network for attachment. Must be a network name or ID. + type: "string" + Aliases: + description: | + Discoverable alternate names for the service on this network. + type: "array" + items: + type: "string" + DriverOpts: + description: | + Driver attachment options for the network target. + type: "object" + additionalProperties: + type: "string" + + EventActor: + description: | + Actor describes something that generates events, like a container, network, + or a volume. + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + description: | + Various key/value attributes of the object, depending on its type. + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-label-value" + image: "alpine:latest" + name: "my-container" + + EventMessage: + description: | + EventMessage represents the information an event contains. + type: "object" + title: "SystemEventsResponse" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"] + example: "container" + Action: + description: "The type of event" + type: "string" + example: "create" + Actor: + $ref: "#/definitions/EventActor" + scope: + description: | + Scope of the event. Engine events are `local` scope. Cluster (Swarm) + events are `swarm` scope. + type: "string" + enum: ["local", "swarm"] + time: + description: "Timestamp of event" + type: "integer" + format: "int64" + example: 1629574695 + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + example: 1629574695515050031 + + OCIDescriptor: + type: "object" + x-go-name: Descriptor + description: | + A descriptor struct containing digest, media type, and size, as defined in + the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). + properties: + mediaType: + description: | + The media type of the object this schema refers to. + type: "string" + example: "application/vnd.docker.distribution.manifest.v2+json" + digest: + description: | + The digest of the targeted content. + type: "string" + example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + size: + description: | + The size in bytes of the blob. + type: "integer" + format: "int64" + example: 3987495 + # TODO Not yet including these fields for now, as they are nil / omitted in our response. + # urls: + # description: | + # List of URLs from which this object MAY be downloaded. + # type: "array" + # items: + # type: "string" + # format: "uri" + # annotations: + # description: | + # Arbitrary metadata relating to the targeted content. + # type: "object" + # additionalProperties: + # type: "string" + # platform: + # $ref: "#/definitions/OCIPlatform" + + OCIPlatform: + type: "object" + x-go-name: Platform + description: | + Describes the platform which the image in the manifest runs on, as defined + in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). + properties: + architecture: + description: | + The CPU architecture, for example `amd64` or `ppc64`. + type: "string" + example: "arm" + os: + description: | + The operating system, for example `linux` or `windows`. + type: "string" + example: "windows" + os.version: + description: | + Optional field specifying the operating system version, for example on + Windows `10.0.19041.1165`. + type: "string" + example: "10.0.19041.1165" + os.features: + description: | + Optional field specifying an array of strings, each listing a required + OS feature (for example on Windows `win32k`). + type: "array" + items: + type: "string" + example: + - "win32k" + variant: + description: | + Optional field specifying a variant of the CPU, for example `v7` to + specify ARMv7 when architecture is `arm`. + type: "string" + example: "v7" + + DistributionInspect: + type: "object" + x-go-name: DistributionInspect + title: "DistributionInspectResponse" + required: [Descriptor, Platforms] + description: | + Describes the result obtained from contacting the registry to retrieve + image metadata. + properties: + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Platforms: + type: "array" + description: | + An array containing all platforms supported by the image. + items: + $ref: "#/definitions/OCIPlatform" + + ClusterVolume: + type: "object" + description: | + Options and information specific to, and only present on, Swarm CSI + cluster volumes. + properties: + ID: + type: "string" + description: | + The Swarm ID of this volume. Because cluster volumes are Swarm + objects, they have an ID, unlike non-cluster volumes. This ID can + be used to refer to the Volume instead of the name. + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + Info: + type: "object" + description: | + Information about the global status of the volume. + properties: + CapacityBytes: + type: "integer" + format: "int64" + description: | + The capacity of the volume in bytes. A value of 0 indicates that + the capacity is unknown. + VolumeContext: + type: "object" + description: | + A map of strings to strings returned from the storage plugin when + the volume is created. + additionalProperties: + type: "string" + VolumeID: + type: "string" + description: | + The ID of the volume as returned by the CSI storage plugin. This + is distinct from the volume's ID as provided by Docker. This ID + is never used by the user when communicating with Docker to refer + to this volume. If the ID is blank, then the Volume has not been + successfully created in the plugin yet. + AccessibleTopology: + type: "array" + description: | + The topology this volume is actually accessible from. + items: + $ref: "#/definitions/Topology" + PublishStatus: + type: "array" + description: | + The status of the volume as it pertains to its publishing and use on + specific nodes + items: + type: "object" + properties: + NodeID: + type: "string" + description: | + The ID of the Swarm node the volume is published on. + State: + type: "string" + description: | + The published state of the volume. + * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed. + * `published` The volume is published successfully to the node. + * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so. + * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller. + enum: + - "pending-publish" + - "published" + - "pending-node-unpublish" + - "pending-controller-unpublish" + PublishContext: + type: "object" + description: | + A map of strings to strings returned by the CSI controller + plugin when a volume is published. + additionalProperties: + type: "string" + + ClusterVolumeSpec: + type: "object" + description: | + Cluster-specific options used to create the volume. + properties: + Group: + type: "string" + description: | + Group defines the volume group of this volume. Volumes belonging to + the same group can be referred to by group name when creating + Services. Referring to a volume by group instructs Swarm to treat + volumes in that group interchangeably for the purpose of scheduling. + Volumes with an empty string for a group technically all belong to + the same, emptystring group. + AccessMode: + type: "object" + description: | + Defines how the volume is used by tasks. + properties: + Scope: + type: "string" + description: | + The set of nodes this volume can be used on at one time. + - `single` The volume may only be scheduled to one node at a time. + - `multi` the volume may be scheduled to any supported number of nodes at a time. + default: "single" + enum: ["single", "multi"] + x-nullable: false + Sharing: + type: "string" + description: | + The number and way that different tasks can use this volume + at one time. + - `none` The volume may only be used by one task at a time. + - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly + - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write. + - `all` The volume may have any number of readers and writers. + default: "none" + enum: ["none", "readonly", "onewriter", "all"] + x-nullable: false + MountVolume: + type: "object" + description: | + Options for using this volume as a Mount-type volume. + + Either MountVolume or BlockVolume, but not both, must be + present. + properties: + FsType: + type: "string" + description: | + Specifies the filesystem type for the mount volume. + Optional. + MountFlags: + type: "array" + description: | + Flags to pass when mounting the volume. Optional. + items: + type: "string" + BlockVolume: + type: "object" + description: | + Options for using this volume as a Block-type volume. + Intentionally empty. + Secrets: + type: "array" + description: | + Swarm Secrets that are passed to the CSI storage plugin when + operating on this volume. + items: + type: "object" + description: | + One cluster volume secret entry. Defines a key-value pair that + is passed to the plugin. + properties: + Key: + type: "string" + description: | + Key is the name of the key of the key-value pair passed to + the plugin. + Secret: + type: "string" + description: | + Secret is the swarm Secret object from which to read data. + This can be a Secret name or ID. The Secret data is + retrieved by swarm and used as the value of the key-value + pair passed to the plugin. + AccessibilityRequirements: + type: "object" + description: | + Requirements for the accessible topology of the volume. These + fields are optional. For an in-depth description of what these + fields mean, see the CSI specification. + properties: + Requisite: + type: "array" + description: | + A list of required topologies, at least one of which the + volume must be accessible from. + items: + $ref: "#/definitions/Topology" + Preferred: + type: "array" + description: | + A list of topologies that the volume should attempt to be + provisioned in. + items: + $ref: "#/definitions/Topology" + CapacityRange: + type: "object" + description: | + The desired capacity that the volume should be created with. If + empty, the plugin will decide the capacity. + properties: + RequiredBytes: + type: "integer" + format: "int64" + description: | + The volume must be at least this big. The value of 0 + indicates an unspecified minimum + LimitBytes: + type: "integer" + format: "int64" + description: | + The volume must not be bigger than this. The value of 0 + indicates an unspecified maximum. + Availability: + type: "string" + description: | + The availability of the volume for use in tasks. + - `active` The volume is fully available for scheduling on the cluster + - `pause` No new workloads should use the volume, but existing workloads are not stopped. + - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started. + default: "active" + x-nullable: false + enum: + - "active" + - "pause" + - "drain" + + Topology: + description: | + A map of topological domains to topological segments. For in depth + details, see documentation for the Topology object in the CSI + specification. + type: "object" + additionalProperties: + type: "string" + +paths: + /containers/json: + get: + summary: "List containers" + description: | + Returns a list of containers. For details on the format, see the + [inspect endpoint](#operation/ContainerInspect). + + Note that it uses a different, smaller representation of a container + than inspecting a single container. For example, the list of linked + containers is not propagated . + operationId: "ContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: | + Return all containers. By default, only running containers are shown. + type: "boolean" + default: false + - name: "limit" + in: "query" + description: | + Return this number of most recently created containers, including + non-running ones. + type: "integer" + - name: "size" + in: "query" + description: | + Return the size of container as fields `SizeRw` and `SizeRootFs`. + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a + `map[string][]string`). For example, `{"status": ["paused"]}` will + only return paused containers. + + Available filters: + + - `ancestor`=(`[:]`, ``, or ``) + - `before`=(`` or ``) + - `expose`=(`[/]`|`/[]`) + - `exited=` containers with exit code of `` + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + - `id=` a container's ID + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `is-task=`(`true`|`false`) + - `label=key` or `label="key=value"` of a container label + - `name=` a container's name + - `network`=(`` or ``) + - `publish`=(`[/]`|`/[]`) + - `since`=(`` or ``) + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `volume`=(`` or ``) + type: "string" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + examples: + application/json: + - Id: "8dfafdbc3a40" + Names: + - "/boring_feynman" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 1" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: + - PrivatePort: 2222 + PublicPort: 3333 + Type: "tcp" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:02" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + - Id: "9cd87474be90" + Names: + - "/coolName" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 222222" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.8" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:08" + Mounts: [] + - Id: "3176a2479c92" + Names: + - "/sleepy_dog" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 3333333333333333" + Created: 1367854154 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.6" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:06" + Mounts: [] + - Id: "4cb07b47f9fb" + Names: + - "/running_cat" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 444444444444444444444444444444444" + Created: 1367854152 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.5" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:05" + Mounts: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: | + Assign the specified name to the container. Must match + `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. + type: "string" + pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" + - name: "platform" + in: "query" + description: | + Platform in the format `os[/arch[/variant]]` used for image lookup. + + When specified, the daemon checks if the requested image is present + in the local image cache with the given OS and Architecture, and + otherwise returns a `404` status. + + If the option is not set, the host's native OS and Architecture are + used to look up the image in the image cache. However, if no platform + is passed and the given image does exist in the local image cache, + but its OS or architecture does not match, the container is created + with the available image, and a warning is added to the `Warnings` + field in the response, for example; + + WARNING: The requested image's platform (linux/arm64/v8) does not + match the detected host platform (linux/amd64) and no + specific platform was requested + + type: "string" + default: "" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/ContainerConfig" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + $ref: "#/definitions/NetworkingConfig" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + MacAddress: "12:34:56:78:9a:bc" + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + NanoCpus: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + DeviceRequests: + - Driver: "nvidia" + Count: -1 + DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] + Capabilities: [["gpu", "nvidia", "compute"]] + Options: + property1: "string" + property2: "string" + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: 0 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + + required: true + responses: + 201: + description: "Container created successfully" + schema: + $ref: "#/definitions/ContainerCreateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "ContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "ContainerInspectResponse" + properties: + Id: + description: "The ID of the container" + type: "string" + Created: + description: "The time the container was created" + type: "string" + Path: + description: "The path to the command being run" + type: "string" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + State: + $ref: "#/definitions/ContainerState" + Image: + description: "The container's image ID" + type: "string" + ResolvConfPath: + type: "string" + HostnamePath: + type: "string" + HostsPath: + type: "string" + LogPath: + type: "string" + Name: + type: "string" + RestartCount: + type: "integer" + Driver: + type: "string" + Platform: + type: "string" + MountLabel: + type: "string" + ProcessLabel: + type: "string" + AppArmorProfile: + type: "string" + ExecIDs: + description: "IDs of exec instances that are running in the container." + type: "array" + items: + type: "string" + x-nullable: true + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/GraphDriverData" + SizeRw: + description: | + The size of files that have been created or changed by this + container. + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container." + type: "integer" + format: "int64" + Mounts: + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/ContainerConfig" + NetworkSettings: + $ref: "#/definitions/NetworkSettings" + examples: + application/json: + AppArmorProfile: "" + Args: + - "-c" + - "exit 9" + Config: + AttachStderr: true + AttachStdin: false + AttachStdout: true + Cmd: + - "/bin/sh" + - "-c" + - "exit 9" + Domainname: "" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Healthcheck: + Test: ["CMD-SHELL", "exit 0"] + Hostname: "ba033ac44011" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + MacAddress: "" + NetworkDisabled: false + OpenStdin: false + StdinOnce: false + Tty: false + User: "" + Volumes: + /volumes/data: {} + WorkingDir: "" + StopSignal: "SIGTERM" + StopTimeout: 10 + Created: "2015-01-06T15:47:31.485331387Z" + Driver: "devicemapper" + ExecIDs: + - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" + - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" + HostConfig: + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 0 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteIOps: + - {} + ContainerIDFile: "" + CpusetCpus: "" + CpusetMems: "" + CpuPercent: 80 + CpuShares: 0 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + Devices: [] + DeviceRequests: + - Driver: "nvidia" + Count: -1 + DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] + Capabilities: [["gpu", "nvidia", "compute"]] + Options: + property1: "string" + property2: "string" + IpcMode: "" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + OomKillDisable: false + OomScoreAdj: 500 + NetworkMode: "bridge" + PidMode: "" + PortBindings: {} + Privileged: false + ReadonlyRootfs: false + PublishAllPorts: false + RestartPolicy: + MaximumRetryCount: 2 + Name: "on-failure" + LogConfig: + Type: "json-file" + Sysctls: + net.ipv4.ip_forward: "1" + Ulimits: + - {} + VolumeDriver: "" + ShmSize: 67108864 + HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" + HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" + LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" + Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" + Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" + MountLabel: "" + Name: "/boring_euclid" + NetworkSettings: + Bridge: "" + SandboxID: "" + HairpinMode: false + LinkLocalIPv6Address: "" + LinkLocalIPv6PrefixLen: 0 + SandboxKey: "" + EndpointID: "" + Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + IPAddress: "" + IPPrefixLen: 0 + IPv6Gateway: "" + MacAddress: "" + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Path: "/bin/sh" + ProcessLabel: "" + ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" + RestartCount: 1 + State: + Error: "" + ExitCode: 9 + FinishedAt: "2015-01-06T15:47:32.080254511Z" + Health: + Status: "healthy" + FailingStreak: 0 + Log: + - Start: "2019-12-22T10:59:05.6385933Z" + End: "2019-12-22T10:59:05.8078452Z" + ExitCode: 0 + Output: "" + OOMKilled: false + Dead: false + Paused: false + Pid: 0 + Restarting: false + Running: true + StartedAt: "2015-01-06T15:47:32.072697474Z" + Status: "running" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: ["Container"] + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: | + On Unix systems, this is done by running the `ps` command. This endpoint + is not supported on Windows. + operationId: "ContainerTop" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "ContainerTopResponse" + description: "OK response to ContainerTop operation" + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + Processes: + description: | + Each process running in the container, where each is process + is an array of values corresponding to the titles. + type: "array" + items: + type: "array" + items: + type: "string" + examples: + application/json: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: ["Container"] + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or + `journald` logging driver. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ContainerLogs" + responses: + 200: + description: | + logs returned as a stream in response body. + For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + Note that unlike the attach endpoint, the logs endpoint does not + upgrade the connection and does not set Content-Type. + schema: + type: "string" + format: "binary" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "until" + in: "query" + description: "Only return logs before this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Container"] + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, + or modified. The `Kind` of modification can be one of: + + - `0`: Modified + - `1`: Added + - `2`: Deleted + operationId: "ContainerChanges" + produces: ["application/json"] + responses: + 200: + description: "The list of changes" + schema: + type: "array" + items: + type: "object" + x-go-name: "ContainerChangeResponseItem" + title: "ContainerChangeResponseItem" + description: "change item in response to ContainerChanges operation" + required: [Path, Kind] + properties: + Path: + description: "Path to file that has changed" + type: "string" + x-nullable: false + Kind: + description: "Kind of change" + type: "integer" + format: "uint8" + enum: [0, 1, 2] + x-nullable: false + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "ContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage + statistics. + + The `precpu_stats` is the CPU statistic of the *previous* read, and is + used to calculate the CPU usage percentage. It is not an exact copy + of the `cpu_stats` field. + + If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is + nil then for compatibility with older daemons the length of the + corresponding `cpu_usage.percpu_usage` array should be used. + + On a cgroup v2 host, the following fields are not set + * `blkio_stats`: all fields other than `io_service_bytes_recursive` + * `cpu_stats`: `cpu_usage.percpu_usage` + * `memory_stats`: `max_usage` and `failcnt` + Also, `memory_stats.stats` fields are incompatible with cgroup v1. + + To calculate the values shown by the `stats` command of the docker cli tool + the following formulas can be used: + * used_memory = `memory_stats.usage - memory_stats.stats.cache` + * available_memory = `memory_stats.limit` + * Memory usage % = `(used_memory / available_memory) * 100.0` + * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` + * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` + * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` + * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` + operationId: "ContainerStats" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + type: "object" + examples: + application/json: + read: "2015-01-08T22:57:31.547920715Z" + pids_stats: + current: 3 + networks: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + memory_stats: + stats: + total_pgmajfault: 0 + cache: 0 + mapped_file: 0 + total_inactive_file: 0 + pgpgout: 414 + rss: 6537216 + total_mapped_file: 0 + writeback: 0 + unevictable: 0 + pgpgin: 477 + total_unevictable: 0 + pgmajfault: 0 + total_rss: 6537216 + total_rss_huge: 6291456 + total_writeback: 0 + total_inactive_anon: 0 + rss_huge: 6291456 + hierarchical_memory_limit: 67108864 + total_pgfault: 964 + total_active_file: 0 + active_anon: 6537216 + total_active_anon: 6537216 + total_pgpgout: 414 + total_cache: 0 + inactive_anon: 0 + active_file: 0 + pgfault: 964 + inactive_file: 0 + total_pgpgin: 477 + max_usage: 6651904 + usage: 6537216 + failcnt: 0 + limit: 67108864 + blkio_stats: {} + cpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24472255 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100215355 + usage_in_kernelmode: 30000000 + system_cpu_usage: 739306590000000 + online_cpus: 4 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + precpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24350896 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100093996 + usage_in_kernelmode: 30000000 + system_cpu_usage: 9492140000000 + online_cpus: 4 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: | + Stream the output. If false, the stats will be output once and then + it will disconnect. + type: "boolean" + default: true + - name: "one-shot" + in: "query" + description: | + Only get a single stat instead of waiting for 2 cycles. Must be used + with `stream=false`. + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container." + operationId: "ContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Container"] + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "ContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container. Format is a + single character `[a-Z]` or `ctrl-` where `` is one + of: `a-z`, `@`, `^`, `[`, `,` or `_`. + type: "string" + tags: ["Container"] + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "ContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "ContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/kill: + post: + summary: "Kill a container" + description: | + Send a POSIX signal to a container, defaulting to killing to the + container. + operationId: "ContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is not running" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" + default: "SIGKILL" + tags: ["Container"] + /containers/{id}/update: + post: + summary: "Update a container" + description: | + Change various configuration options of a container without having to + recreate it. + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + type: "object" + title: "ContainerUpdateResponse" + description: "OK response to ContainerUpdate operation" + properties: + Warnings: + type: "array" + items: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: ["Container"] + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "ContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: ["Container"] + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the freezer cgroup to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, + which is observable by the process being suspended. With the freezer + cgroup the process is unaware, and unable to capture, that it is being + suspended, and subsequently resumed. + operationId: "ContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "ContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach + to the same container multiple times and you can reattach to containers + that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint + to do anything. + + See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) + for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, + and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used + for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client + can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will + similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), + the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream + and the stream over the hijacked connected is multiplexed to separate out + `stdout` and `stderr`. The stream consists of a series of frames, each + containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or + `stderr`). It also contains the size of the associated frame encoded in + the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size + encoded as big endian. + + Following the header is the payload, which is the specified number of + bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), + the stream is not multiplexed. The data exchanged over the hijacked + connection is simply the raw data from the process PTY and client's + `stdin`. + + operationId: "ContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + `@`, `^`, `[`, `,` or `_`. + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you + want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been + returned, it will seamlessly transition into streaming current + output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: | + Stream attached streams from the time the request was made onwards. + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "ContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + `@`, `^`, `[`, `,`, or `_`. + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + $ref: "#/definitions/ContainerWaitResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "condition" + in: "query" + description: | + Wait until a container state reaches the given condition. + + Defaults to `not-running` if omitted or empty. + type: "string" + enum: + - "not-running" + - "next-exit" + - "removed" + default: "not-running" + tags: ["Container"] + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "ContainerDelete" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: | + You cannot remove a running container: c2ada9df5af8. Stop the + container before attempting removal or force remove + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove anonymous volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + - name: "link" + in: "query" + description: "Remove the specified link associated with the container." + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: | + A response header `X-Docker-Container-Path-Stat` is returned, containing + a base64 - encoded JSON object with some filesystem header information + about the path. + operationId: "ContainerArchiveInfo" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: | + A base64 - encoded JSON object with some filesystem header + information about the path + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get a tar archive of a resource in the filesystem of container id." + operationId: "ContainerArchive" + produces: ["application/x-tar"] + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: | + Upload a tar archive to be extracted to a path in the filesystem of container id. + `path` parameter is asserted to be a directory. If it exists as a file, 400 error + will be returned with message "not a directory". + operationId: "PutContainerArchive" + consumes: ["application/x-tar", "application/octet-stream"] + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "not a directory" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: | + If `1`, `true`, or `True` then it will be an error if unpacking the + given content would cause an existing directory to be replaced with + a non-directory and vice versa. + type: "string" + - name: "copyUIDGID" + in: "query" + description: | + If `1`, `true`, then it will copy UID/GID maps to the dest file or + dir + type: "string" + - name: "inputStream" + in: "body" + required: true + description: | + The input stream must be a tar archive compressed with one of the + following algorithms: `identity` (no compression), `gzip`, `bzip2`, + or `xz`. + schema: + type: "string" + format: "binary" + tags: ["Container"] + /containers/prune: + post: + summary: "Delete stopped containers" + produces: + - "application/json" + operationId: "ContainerPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ContainerPruneResponse" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "ImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the images list. + + Available filters: + + - `before`=(`[:]`, `` or ``) + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `reference`=(`[:]`) + - `since`=(`[:]`, `` or ``) + type: "string" + - name: "shared-size" + in: "query" + description: "Compute and show shared size as a `SharedSize` field on each image." + type: "boolean" + default: false + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + tags: ["Image"] + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "ImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "extrahosts" + in: "query" + description: "Extra hosts to add to /etc/hosts" + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: > + JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker + uses the buildargs as the environment context for commands run via the `Dockerfile` RUN + instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for + passing secret values. + + + For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the + query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. + + + [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) + type: "string" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: | + Sets the networking mode for the run commands during build. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. + Any other value is taken as a custom network's name or ID to which this + container should connect to. + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/x-tar" + default: "application/x-tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + - name: "platform" + in: "query" + description: "Platform in the format os[/arch[/variant]]" + type: "string" + default: "" + - name: "target" + in: "query" + description: "Target build stage" + type: "string" + default: "" + - name: "outputs" + in: "query" + description: "BuildKit output configuration" + type: "string" + default: "" + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /build/prune: + post: + summary: "Delete builder cache" + produces: + - "application/json" + operationId: "BuildPrune" + parameters: + - name: "keep-storage" + in: "query" + description: "Amount of disk space in bytes to keep for cache" + type: "integer" + format: "int64" + - name: "all" + in: "query" + type: "boolean" + description: "Remove all types of build cache" + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the list of build cache objects. + + Available filters: + + - `until=`: duration relative to daemon's time, during which build cache was not used, in Go's duration format (e.g., '24h') + - `id=` + - `parent=` + - `type=` + - `description=` + - `inuse` + - `shared` + - `private` + responses: + 200: + description: "No error" + schema: + type: "object" + title: "BuildPruneResponse" + properties: + CachesDeleted: + type: "array" + items: + description: "ID of build cache object" + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/create: + post: + summary: "Create an image" + description: "Create an image by either pulling it from a registry or importing it." + operationId: "ImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "message" + in: "query" + description: "Set commit message for imported image." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "changes" + in: "query" + description: | + Apply `Dockerfile` instructions to the image that is created, + for example: `changes=ENV DEBUG=true`. + Note that `ENV DEBUG=true` should be URI component encoded. + + Supported `Dockerfile` instructions: + `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + type: "array" + items: + type: "string" + - name: "platform" + in: "query" + description: | + Platform in the format os[/arch[/variant]]. + + When used in combination with the `fromImage` option, the daemon checks + if the given image is present in the local image cache with the given + OS and Architecture, and otherwise attempts to pull the image. If the + option is not set, the host's native OS and Architecture are used. + If the given image does not exist in the local image cache, the daemon + attempts to pull the image with the host's native OS and Architecture. + If the given image does exists in the local image cache, but its OS or + architecture does not match, a warning is produced. + + When used with the `fromSrc` option to import an image from an archive, + this option sets the platform information for the imported image. If + the option is not set, the host's native OS and Architecture are used + for the imported image. + type: "string" + default: "" + tags: ["Image"] + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "ImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/ImageInspect" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Image"] + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "ImageHistory" + produces: ["application/json"] + responses: + 200: + description: "List of image layers" + schema: + type: "array" + items: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must + already have a tag which references the registry. For example, + `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "ImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID." + type: "string" + required: true + - name: "tag" + in: "query" + description: "The tag to associate with the image on the registry." + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + required: true + tags: ["Image"] + /images/{name}/tag: + post: + summary: "Tag an image" + description: "Tag an image so that it becomes part of a repository." + operationId: "ImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: ["Image"] + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were + referenced by that image. + + Images can't be removed if they have descendant images, are being + used by a running container or are being used by a build. + operationId: "ImageDelete" + produces: ["application/json"] + responses: + 200: + description: "The image was deleted successfully" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + tags: ["Image"] + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "ImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + title: "ImageSearchResponseItem" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + type: "boolean" + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "" + is_official: false + is_automated: false + name: "wma55/u1210sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "jdswinbank/sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "vgauthier/sshd" + star_count: 0 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `is-automated=(true|false)` + - `is-official=(true|false)` + - `stars=` Matches images that has at least 'number' stars. + type: "string" + tags: ["Image"] + /images/prune: + post: + summary: "Delete unused images" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: + + - `dangling=` When set to `true` (or `1`), prune only + unused *and* untagged images. When set to `false` + (or `0`), all unused images are pruned. + - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ImagePruneResponse" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /auth: + post: + summary: "Check auth configuration" + description: | + Validate credentials for a registry and, if available, get an identity + token for accessing the registry without password. + operationId: "SystemAuth" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + type: "object" + title: "SystemAuthResponse" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + x-nullable: false + examples: + application/json: + Status: "Login Succeeded" + IdentityToken: "9cbaf023786cd7..." + 204: + description: "No error" + 401: + description: "Auth error" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["System"] + /info: + get: + summary: "Get system information" + operationId: "SystemInfo" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/SystemInfo" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "SystemVersion" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/SystemVersion" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPing" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + headers: + API-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: | + Default version of docker image builder + + The default on Linux is version "2" (BuildKit), but the daemon + can be configured to recommend version "1" (classic Builder). + Windows does not yet support BuildKit for native Windows images, + and uses "1" (classic builder) as a default. + + This value is a recommendation as advertised by the daemon, and + it is up to the client to choose which builder to use. + default: "2" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + headers: + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + tags: ["System"] + head: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPingHead" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "(empty)" + headers: + API-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: "Default version of docker image builder" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /commit: + post: + summary: "Create a new image from a container" + operationId: "ImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/ContainerConfig" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith `)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: ["Image"] + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` + + Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` + + Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` + + Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` + + The Docker daemon reports these events: `reload` + + Services report these events: `create`, `update`, and `remove` + + Nodes report these events: `create`, `update`, and `remove` + + Secrets report these events: `create`, `update`, and `remove` + + Configs report these events: `create`, `update`, and `remove` + + The Builder reports `prune` events + + operationId: "SystemEvents" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/EventMessage" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `config=` config name or ID + - `container=` container name or ID + - `daemon=` daemon name or ID + - `event=` event type + - `image=` image name or ID + - `label=` image or container label + - `network=` network name or ID + - `node=` node ID + - `plugin`= plugin name or ID + - `scope`= local or swarm + - `secret=` secret name or ID + - `service=` service name or ID + - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` + - `volume=` volume name + type: "string" + tags: ["System"] + /system/df: + get: + summary: "Get data usage information" + operationId: "SystemDataUsage" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "SystemDataUsageResponse" + properties: + LayersSize: + type: "integer" + format: "int64" + Images: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + Containers: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + Volumes: + type: "array" + items: + $ref: "#/definitions/Volume" + BuildCache: + type: "array" + items: + $ref: "#/definitions/BuildCache" + example: + LayersSize: 1092588 + Images: + - + Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + ParentId: "" + RepoTags: + - "busybox:latest" + RepoDigests: + - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" + Created: 1466724217 + Size: 1092588 + SharedSize: 0 + VirtualSize: 1092588 + Labels: {} + Containers: 1 + Containers: + - + Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" + Names: + - "/top" + Image: "busybox" + ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + Command: "top" + Created: 1472592424 + Ports: [] + SizeRootFs: 1092588 + Labels: {} + State: "exited" + Status: "Exited (0) 56 minutes ago" + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + IPAMConfig: null + Links: null + Aliases: null + NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" + EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" + Gateway: "172.18.0.1" + IPAddress: "172.18.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Mounts: [] + Volumes: + - + Name: "my-volume" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/my-volume/_data" + Labels: null + Scope: "local" + Options: null + UsageData: + Size: 10920104 + RefCount: 2 + BuildCache: + - + ID: "hw53o5aio51xtltp5xjp8v7fx" + Parents: [] + Type: "regular" + Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" + InUse: false + Shared: true + Size: 0 + CreatedAt: "2021-06-28T13:31:01.474619385Z" + LastUsedAt: "2021-07-07T22:02:32.738075951Z" + UsageCount: 26 + - + ID: "ndlpt0hhvkqcdfkputsk4cq9c" + Parents: ["ndlpt0hhvkqcdfkputsk4cq9c"] + Type: "regular" + Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: false + Shared: true + Size: 51 + CreatedAt: "2021-06-28T13:31:03.002625487Z" + LastUsedAt: "2021-07-07T22:02:32.773909517Z" + UsageCount: 26 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "type" + in: "query" + description: | + Object types, for which to compute and return data. + type: "array" + collectionFormat: multi + items: + type: "string" + enum: ["container", "image", "volume", "build-cache"] + tags: ["System"] + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains one directory per image layer (named using its long ID), each containing these files: + + - `VERSION`: currently `1.0` - the file format version + - `json`: detailed layer information, similar to `docker inspect layer_id` + - `layer.tar`: A tarfile containing the filesystem changes in this layer + + The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "ImageGet" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image + repositories. + + For each value of the `names` parameter: if it is a specific name and + tag (e.g. `ubuntu:latest`), then only that image (and its parents) are + returned; if it is an image ID, similarly only that image (and its parents) + are returned and there would be no names referenced in the 'repositories' + file for this image ID. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageGetAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + tags: ["Image"] + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + tags: ["Image"] + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "ContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + title: "ExecConfig" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + DetachKeys: + type: "string" + description: | + Override the key sequence for detaching a container. Format is + a single character `[a-Z]` or `ctrl-` where `` + is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: | + A list of environment variables in the form `["VAR=value", ...]`. + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: | + The user, and optionally, group to run the exec process inside + the container. Format is one of: `user`, `user:group`, `uid`, + or `uid:gid`. + WorkingDir: + type: "string" + description: | + The working directory for the exec process inside the container. + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: ["Exec"] + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: | + Starts a previously set up exec instance. If detach is true, this endpoint + returns immediately after starting the command. Otherwise, it sets up an + interactive session with the command. + operationId: "ExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + title: "ExecStartConfig" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + example: + Detach: false + Tty: true + ConsoleSize: [80, 64] + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: | + Resize the TTY session used by an exec instance. This endpoint only works + if `tty` was specified as part of creating and starting the exec instance. + operationId: "ExecResize" + responses: + 200: + description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Exec"] + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "ExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ExecInspectResponse" + properties: + CanRemove: + type: "boolean" + DetachKeys: + type: "string" + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + + /volumes: + get: + summary: "List volumes" + operationId: "VolumeList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + $ref: "#/definitions/VolumeListResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=` Matches volumes based on their driver. + - `label=` or `label=:` Matches volumes based on + the presence of a `label` alone or a `label` and a value. + - `name=` Matches all or part of a volume name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumeCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + $ref: "#/definitions/VolumeCreateOptions" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumeInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + put: + summary: | + "Update a volume. Valid only for Swarm cluster volumes" + operationId: "VolumeUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name or ID of the volume" + type: "string" + required: true + - name: "body" + in: "body" + schema: + # though the schema for is an object that contains only a + # ClusterVolumeSpec, wrapping the ClusterVolumeSpec in this object + # means that if, later on, we support things like changing the + # labels, we can do so without duplicating that information to the + # ClusterVolumeSpec. + type: "object" + description: "Volume configuration" + properties: + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + description: | + The spec of the volume to update. Currently, only Availability may + change. All other fields must remain unchanged. + - name: "version" + in: "query" + description: | + The version number of the volume being updated. This is required to + avoid conflicting writes. Found in the volume's `ClusterVolume` + field. + type: "integer" + format: "int64" + required: true + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumeDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + + /volumes/prune: + post: + summary: "Delete unused volumes" + produces: + - "application/json" + operationId: "VolumePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. + - `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "VolumePruneResponse" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Volume"] + /networks: + get: + summary: "List networks" + description: | + Returns a list of networks. For details on the format, see the + [network inspect endpoint](#operation/NetworkInspect). + + Note that it uses a different, smaller representation of a network than + inspecting a single network. For example, the list of containers attached + to the network is not propagated in API versions 1.28 and up. + operationId: "NetworkList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Network" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process + on the networks list. + + Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + networks that are not in use by a container. When set to `false` + (or `0`), only networks that are in use by one or more + containers are returned. + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network ID. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworkInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Network" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "verbose" + in: "query" + description: "Detailed inspect output for troubleshooting" + type: "boolean" + default: false + - name: "scope" + in: "query" + description: "Filter the network by scope (swarm, global, or local)" + type: "string" + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "NetworkDelete" + responses: + 204: + description: "No error" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworkCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "No error" + schema: + type: "object" + title: "NetworkCreateResponse" + properties: + Id: + description: "The ID of the created network." + type: "string" + Warning: + type: "string" + example: + Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" + Warning: "" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + title: "NetworkCreateRequest" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + CheckDuplicate: + description: | + Check for networks with duplicate names. Since Network is + primarily keyed based on a random ID and not on the name, and + network name is strictly a user-friendly alias to the network + which is uniquely identified using ID, there is no guaranteed + way to check for duplicates. CheckDuplicate is there to provide + a best effort checking of any networks which has the same name + but it is not guaranteed to catch all name collisions. + type: "boolean" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + Internal: + description: "Restrict external access to the network." + type: "boolean" + Attachable: + description: | + Globally scoped network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + Ingress: + description: | + Ingress network is the network which provides the routing-mesh + in swarm mode. + type: "boolean" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "isolated_nw" + CheckDuplicate: false + Driver: "bridge" + EnableIPv6: true + IPAM: + Driver: "default" + Config: + - Subnet: "172.20.0.0/16" + IPRange: "172.20.10.0/24" + Gateway: "172.20.10.11" + - Subnet: "2001:db8:abcd::/64" + Gateway: "2001:db8:abcd::1011" + Options: + foo: "bar" + Internal: true + Attachable: false + Ingress: false + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + operationId: "NetworkConnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + title: "NetworkConnectRequest" + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + example: + Container: "3613f73ba0e4" + EndpointConfig: + IPAMConfig: + IPv4Address: "172.24.56.89" + IPv6Address: "2001:db8::5689" + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworkDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + title: "NetworkDisconnectRequest" + properties: + Container: + type: "string" + description: | + The ID or name of the container to disconnect from the network. + Force: + type: "boolean" + description: | + Force the container to disconnect from the network. + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + produces: + - "application/json" + operationId: "NetworkPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "NetworkPruneResponse" + properties: + NetworksDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Network"] + /plugins: + get: + summary: "List plugins" + operationId: "PluginList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the plugin list. + + Available filters: + + - `capability=` + - `enable=|` + tags: ["Plugin"] + + /plugins/privileges: + get: + summary: "Get plugin privileges" + operationId: "GetPluginPrivileges" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: + - "Plugin" + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PluginPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be + enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + produces: + - "application/json" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + Remote reference for plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "name" + in: "query" + description: | + Local name for the pulled plugin. + + The `:latest` tag is optional, and is used as the default if omitted. + required: false + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/{name}/json: + get: + summary: "Inspect a plugin" + operationId: "PluginInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}: + delete: + summary: "Remove a plugin" + operationId: "PluginDelete" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Disable the plugin before removing. This may result in issues if the + plugin is in use by a container. + type: "boolean" + default: false + tags: ["Plugin"] + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PluginEnable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "timeout" + in: "query" + description: "Set the HTTP client timeout (in seconds)" + type: "integer" + default: 0 + tags: ["Plugin"] + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PluginDisable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}/upgrade: + post: + summary: "Upgrade a plugin" + operationId: "PluginUpgrade" + responses: + 204: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "remote" + in: "query" + description: | + Remote reference to upgrade to. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + $ref: "#/definitions/PluginPrivilege" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PluginCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: ["Plugin"] + /plugins/{name}/push: + post: + summary: "Push a plugin" + operationId: "PluginPush" + description: | + Push a plugin to the registry. + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + responses: + 200: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PluginSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /nodes: + get: + summary: "List nodes" + operationId: "NodeList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `node.label=` + - `role=`(`manager`|`worker`)` + type: "string" + tags: ["Node"] + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "NodeInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + tags: ["Node"] + delete: + summary: "Delete a node" + operationId: "NodeDelete" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: ["Node"] + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "NodeUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: | + The version number of the node object being updated. This is required + to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Node"] + /swarm: + get: + summary: "Inspect swarm" + operationId: "SwarmInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Swarm" + 404: + description: "no such swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "SwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmInitRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication, as well + as determining the networking interface used for the VXLAN + Tunnel Endpoint (VTEP). This can either be an address/port + combination in the form `192.168.1.1:4567`, or an interface + followed by a port number, like `eth0:4567`. If the port number + is omitted, the default swarm listening port is used. + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + ``), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + type: "string" + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + if no port is set or is set to 0, default port 4789 will be used. + type: "integer" + format: "uint32" + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global + scope networks. + type: "array" + items: + type: "string" + example: ["10.10.0.0/16", "20.20.0.0/16"] + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created + from the default subnet pool. + type: "integer" + format: "uint32" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathPort: 4789 + DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] + SubnetSize: 24 + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + EncryptionConfig: + AutoLockManagers: false + tags: ["Swarm"] + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "SwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmJoinRequest" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication if the node + gets promoted to manager, as well as determining the networking + interface used for the VXLAN Tunnel Endpoint (VTEP). + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + ``), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + + type: "string" + RemoteAddrs: + description: | + Addresses of manager nodes already participating in the swarm. + type: "array" + items: + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: ["Swarm"] + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "SwarmLeave" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: | + Force leave swarm, even if this is the last manager or that it will + break the cluster. + in: "query" + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/update: + post: + summary: "Update a swarm" + operationId: "SwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: | + The version number of the swarm object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + - name: "rotateManagerUnlockKey" + in: "query" + description: "Rotate the manager unlock key." + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/unlockkey: + get: + summary: "Get the unlock key" + operationId: "SwarmUnlockkey" + consumes: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "UnlockKeyResponse" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/unlock: + post: + summary: "Unlock a locked manager" + operationId: "SwarmUnlock" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + title: "SwarmUnlockRequest" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /services: + get: + summary: "List services" + operationId: "ServiceList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the services list. + + Available filters: + + - `id=` + - `label=` + - `mode=["replicated"|"global"]` + - `name=` + - name: "status" + in: "query" + type: "boolean" + description: | + Include service status, with count of running and desired tasks. + tags: ["Service"] + /services/create: + post: + summary: "Create a service" + operationId: "ServiceCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + type: "object" + title: "ServiceCreateResponse" + properties: + ID: + description: "The ID of the created service." + type: "string" + Warning: + description: "Optional warning message" + type: "string" + example: + ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "network is not eligible for services" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] + User: "33" + DNSConfig: + Nameservers: ["8.8.8.8"] + Search: ["example.org"] + Options: ["timeout:3"] + Secrets: + - + File: + Name: "www.example.org.key" + UID: "33" + GID: "33" + Mode: 384 + SecretID: "fpjqlhnwb19zds35k8wn80lq9" + SecretName: "example_org_domain_key" + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + tags: ["Service"] + /services/{id}: + get: + summary: "Inspect a service" + operationId: "ServiceInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "insertDefaults" + in: "query" + description: "Fill empty fields with default values." + type: "boolean" + default: false + tags: ["Service"] + delete: + summary: "Delete a service" + operationId: "ServiceDelete" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + /services/{id}/update: + post: + summary: "Update a service" + operationId: "ServiceUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ServiceUpdateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: | + The version number of the service object being updated. This is + required to avoid conflicting writes. + This version number should be the value as currently set on the + service *before* the update. You can find the current version by + calling `GET /services/{id}` + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + description: | + If the `X-Registry-Auth` header is not specified, this parameter + indicates where to find registry authorization credentials. + type: "string" + enum: ["spec", "previous-spec"] + default: "spec" + - name: "rollback" + in: "query" + description: | + Set to this parameter to `previous` to cause a server-side rollback + to the previous service spec. The supplied spec will be ignored in + this case. + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + + tags: ["Service"] + /services/{id}/logs: + get: + summary: "Get service logs" + description: | + Get `stdout` and `stderr` logs from a service. See also + [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + operationId: "ServiceLogs" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such service: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the service" + type: "string" + - name: "details" + in: "query" + description: "Show service context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Service"] + /tasks: + get: + summary: "List tasks" + operationId: "TaskList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the tasks list. + + Available filters: + + - `desired-state=(running | shutdown | accepted)` + - `id=` + - `label=key` or `label="key=value"` + - `name=` + - `node=` + - `service=` + tags: ["Task"] + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "TaskInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: ["Task"] + /tasks/{id}/logs: + get: + summary: "Get task logs" + description: | + Get `stdout` and `stderr` logs from a task. + See also [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + operationId: "TaskLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such task: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID of the task" + type: "string" + - name: "details" + in: "query" + description: "Show task context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Task"] + /secrets: + get: + summary: "List secrets" + operationId: "SecretList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Secret" + example: + - ID: "blt1owaxmitz71s9v5zh81zun" + Version: + Index: 85 + CreatedAt: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: "2017-07-20T13:55:28.678958722Z" + Spec: + Name: "mysql-passwd" + Labels: + some.label: "some.value" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the secrets list. + + Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Secret"] + /secrets/create: + post: + summary: "Create a secret" + operationId: "SecretCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/SecretSpec" + - type: "object" + example: + Name: "app-key.crt" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + tags: ["Secret"] + /secrets/{id}: + get: + summary: "Inspect a secret" + operationId: "SecretInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Secret" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + delete: + summary: "Delete a secret" + operationId: "SecretDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + /secrets/{id}/update: + post: + summary: "Update a Secret" + operationId: "SecretUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such secret" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the secret" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/SecretSpec" + description: | + The spec of the secret to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [SecretInspect endpoint](#operation/SecretInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the secret object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Secret"] + /configs: + get: + summary: "List configs" + operationId: "ConfigList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Config" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "server.conf" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the configs list. + + Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Config"] + /configs/create: + post: + summary: "Create a config" + operationId: "ConfigCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/ConfigSpec" + - type: "object" + example: + Name: "server.conf" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Config"] + /configs/{id}: + get: + summary: "Inspect a config" + operationId: "ConfigInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Config" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + delete: + summary: "Delete a config" + operationId: "ConfigDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + /configs/{id}/update: + post: + summary: "Update a Config" + operationId: "ConfigUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such config" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the config" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/ConfigSpec" + description: | + The spec of the config to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [ConfigInspect endpoint](#operation/ConfigInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the config object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Config"] + /distribution/{name}/json: + get: + summary: "Get image information from the registry" + description: | + Return image digest and platform information by contacting the registry. + operationId: "DistributionInspect" + produces: + - "application/json" + responses: + 200: + description: "descriptor and platform information" + schema: + $ref: "#/definitions/DistributionInspect" + 401: + description: "Failed authentication or no image found" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Distribution"] + /session: + post: + summary: "Initialize interactive session" + description: | + Start a new interactive session with a server. Session allows server to + call back to the client for advanced capabilities. + + ### Hijacking + + This endpoint hijacks the HTTP connection to HTTP2 transport that allows + the client to expose gPRC services on that connection. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /session HTTP/1.1 + Upgrade: h2c + Connection: Upgrade + ``` + + The Docker daemon responds with a `101 UPGRADED` response follow with + the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Connection: Upgrade + Upgrade: h2c + ``` + operationId: "Session" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hijacking successful" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Session"] diff --git a/vendor/github.com/docker/docker/api/types/auth.go b/vendor/github.com/docker/docker/api/types/auth.go new file mode 100644 index 000000000..ddf15bb18 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/auth.go @@ -0,0 +1,22 @@ +package types // import "github.com/docker/docker/api/types" + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go new file mode 100644 index 000000000..bf3463b90 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go @@ -0,0 +1,23 @@ +package blkiodev // import "github.com/docker/docker/api/types/blkiodev" + +import "fmt" + +// WeightDevice is a structure that holds device:weight pair +type WeightDevice struct { + Path string + Weight uint16 +} + +func (w *WeightDevice) String() string { + return fmt.Sprintf("%s:%d", w.Path, w.Weight) +} + +// ThrottleDevice is a structure that holds device:rate_per_second pair +type ThrottleDevice struct { + Path string + Rate uint64 +} + +func (t *ThrottleDevice) String() string { + return fmt.Sprintf("%s:%d", t.Path, t.Rate) +} diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go new file mode 100644 index 000000000..97aca0230 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -0,0 +1,443 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "bufio" + "io" + "net" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + units "github.com/docker/go-units" +) + +// CheckpointCreateOptions holds parameters to create a checkpoint from a container +type CheckpointCreateOptions struct { + CheckpointID string + CheckpointDir string + Exit bool +} + +// CheckpointListOptions holds parameters to list checkpoints for a container +type CheckpointListOptions struct { + CheckpointDir string +} + +// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container +type CheckpointDeleteOptions struct { + CheckpointID string + CheckpointDir string +} + +// ContainerAttachOptions holds parameters to attach to a container. +type ContainerAttachOptions struct { + Stream bool + Stdin bool + Stdout bool + Stderr bool + DetachKeys string + Logs bool +} + +// ContainerCommitOptions holds parameters to commit changes into a container. +type ContainerCommitOptions struct { + Reference string + Comment string + Author string + Changes []string + Pause bool + Config *container.Config +} + +// ContainerExecInspect holds information returned by exec inspect. +type ContainerExecInspect struct { + ExecID string `json:"ID"` + ContainerID string + Running bool + ExitCode int + Pid int +} + +// ContainerListOptions holds parameters to list containers with. +type ContainerListOptions struct { + Size bool + All bool + Latest bool + Since string + Before string + Limit int + Filters filters.Args +} + +// ContainerLogsOptions holds parameters to filter logs with. +type ContainerLogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Until string + Timestamps bool + Follow bool + Tail string + Details bool +} + +// ContainerRemoveOptions holds parameters to remove containers. +type ContainerRemoveOptions struct { + RemoveVolumes bool + RemoveLinks bool + Force bool +} + +// ContainerStartOptions holds parameters to start containers. +type ContainerStartOptions struct { + CheckpointID string + CheckpointDir string +} + +// CopyToContainerOptions holds information +// about files to copy into a container +type CopyToContainerOptions struct { + AllowOverwriteDirWithFile bool + CopyUIDGID bool +} + +// EventsOptions holds parameters to filter events with. +type EventsOptions struct { + Since string + Until string + Filters filters.Args +} + +// NetworkListOptions holds parameters to filter the list of networks with. +type NetworkListOptions struct { + Filters filters.Args +} + +// NewHijackedResponse intializes a HijackedResponse type +func NewHijackedResponse(conn net.Conn, mediaType string) HijackedResponse { + return HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn), mediaType: mediaType} +} + +// HijackedResponse holds connection information for a hijacked request. +type HijackedResponse struct { + mediaType string + Conn net.Conn + Reader *bufio.Reader +} + +// Close closes the hijacked connection and reader. +func (h *HijackedResponse) Close() { + h.Conn.Close() +} + +// MediaType let client know if HijackedResponse hold a raw or multiplexed stream. +// returns false if HTTP Content-Type is not relevant, and container must be inspected +func (h *HijackedResponse) MediaType() (string, bool) { + if h.mediaType == "" { + return "", false + } + return h.mediaType, true +} + +// CloseWriter is an interface that implements structs +// that close input streams to prevent from writing. +type CloseWriter interface { + CloseWrite() error +} + +// CloseWrite closes a readWriter for writing. +func (h *HijackedResponse) CloseWrite() error { + if conn, ok := h.Conn.(CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// ImageBuildOptions holds the information +// necessary to build images. +type ImageBuildOptions struct { + Tags []string + SuppressOutput bool + RemoteContext string + NoCache bool + Remove bool + ForceRemove bool + PullParent bool + Isolation container.Isolation + CPUSetCPUs string + CPUSetMems string + CPUShares int64 + CPUQuota int64 + CPUPeriod int64 + Memory int64 + MemorySwap int64 + CgroupParent string + NetworkMode string + ShmSize int64 + Dockerfile string + Ulimits []*units.Ulimit + // BuildArgs needs to be a *string instead of just a string so that + // we can tell the difference between "" (empty string) and no value + // at all (nil). See the parsing of buildArgs in + // api/server/router/build/build_routes.go for even more info. + BuildArgs map[string]*string + AuthConfigs map[string]AuthConfig + Context io.Reader + Labels map[string]string + // squash the resulting image's layers to the parent + // preserves the original image and creates a new one from the parent with all + // the changes applied to a single layer + Squash bool + // CacheFrom specifies images that are used for matching cache. Images + // specified here do not need to have a valid parent chain to match cache. + CacheFrom []string + SecurityOpt []string + ExtraHosts []string // List of extra hosts + Target string + SessionID string + Platform string + // Version specifies the version of the unerlying builder to use + Version BuilderVersion + // BuildID is an optional identifier that can be passed together with the + // build request. The same identifier can be used to gracefully cancel the + // build with the cancel request. + BuildID string + // Outputs defines configurations for exporting build results. Only supported + // in BuildKit mode + Outputs []ImageBuildOutput +} + +// ImageBuildOutput defines configuration for exporting a build result +type ImageBuildOutput struct { + Type string + Attrs map[string]string +} + +// BuilderVersion sets the version of underlying builder to use +type BuilderVersion string + +const ( + // BuilderV1 is the first generation builder in docker daemon + BuilderV1 BuilderVersion = "1" + // BuilderBuildKit is builder based on moby/buildkit project + BuilderBuildKit BuilderVersion = "2" +) + +// ImageBuildResponse holds information +// returned by a server after building +// an image. +type ImageBuildResponse struct { + Body io.ReadCloser + OSType string +} + +// ImageCreateOptions holds information to create images. +type ImageCreateOptions struct { + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. + Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. +} + +// ImageImportSource holds source information for ImageImport +type ImageImportSource struct { + Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. + SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. +} + +// ImageImportOptions holds information to import images from the client host. +type ImageImportOptions struct { + Tag string // Tag is the name to tag this image with. This attribute is deprecated. + Message string // Message is the message to tag the image with + Changes []string // Changes are the raw changes to apply to this image + Platform string // Platform is the target platform of the image +} + +// ImageListOptions holds parameters to list images with. +type ImageListOptions struct { + // All controls whether all images in the graph are filtered, or just + // the heads. + All bool + + // Filters is a JSON-encoded set of filter arguments. + Filters filters.Args + + // SharedSize indicates whether the shared size of images should be computed. + SharedSize bool + + // ContainerCount indicates whether container count should be computed. + ContainerCount bool +} + +// ImageLoadResponse returns information to the client about a load process. +type ImageLoadResponse struct { + // Body must be closed to avoid a resource leak + Body io.ReadCloser + JSON bool +} + +// ImagePullOptions holds information to pull images. +type ImagePullOptions struct { + All bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + PrivilegeFunc RequestPrivilegeFunc + Platform string +} + +// RequestPrivilegeFunc is a function interface that +// clients can supply to retry operations after +// getting an authorization error. +// This function returns the registry authentication +// header value in base 64 format, or an error +// if the privilege request fails. +type RequestPrivilegeFunc func() (string, error) + +// ImagePushOptions holds information to push images. +type ImagePushOptions ImagePullOptions + +// ImageRemoveOptions holds parameters to remove images. +type ImageRemoveOptions struct { + Force bool + PruneChildren bool +} + +// ImageSearchOptions holds parameters to search images with. +type ImageSearchOptions struct { + RegistryAuth string + PrivilegeFunc RequestPrivilegeFunc + Filters filters.Args + Limit int +} + +// ResizeOptions holds parameters to resize a tty. +// It can be used to resize container ttys and +// exec process ttys too. +type ResizeOptions struct { + Height uint + Width uint +} + +// NodeListOptions holds parameters to list nodes with. +type NodeListOptions struct { + Filters filters.Args +} + +// NodeRemoveOptions holds parameters to remove nodes with. +type NodeRemoveOptions struct { + Force bool +} + +// ServiceCreateOptions contains the options to use when creating a service. +type ServiceCreateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceCreateResponse contains the information returned to a client +// on the creation of a new service. +type ServiceCreateResponse struct { + // ID is the ID of the created service. + ID string + // Warnings is a set of non-fatal warning messages to pass on to the user. + Warnings []string `json:",omitempty"` +} + +// Values for RegistryAuthFrom in ServiceUpdateOptions +const ( + RegistryAuthFromSpec = "spec" + RegistryAuthFromPreviousSpec = "previous-spec" +) + +// ServiceUpdateOptions contains the options to be used for updating services. +type ServiceUpdateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate + // into this field. While it does open API users up to racy writes, most + // users may not need that level of consistency in practice. + + // RegistryAuthFrom specifies where to find the registry authorization + // credentials if they are not given in EncodedRegistryAuth. Valid + // values are "spec" and "previous-spec". + RegistryAuthFrom string + + // Rollback indicates whether a server-side rollback should be + // performed. When this is set, the provided spec will be ignored. + // The valid values are "previous" and "none". An empty value is the + // same as "none". + Rollback string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceListOptions holds parameters to list services with. +type ServiceListOptions struct { + Filters filters.Args + + // Status indicates whether the server should include the service task + // count of running and desired tasks. + Status bool +} + +// ServiceInspectOptions holds parameters related to the "service inspect" +// operation. +type ServiceInspectOptions struct { + InsertDefaults bool +} + +// TaskListOptions holds parameters to list tasks with. +type TaskListOptions struct { + Filters filters.Args +} + +// PluginRemoveOptions holds parameters to remove plugins. +type PluginRemoveOptions struct { + Force bool +} + +// PluginEnableOptions holds parameters to enable plugins. +type PluginEnableOptions struct { + Timeout int +} + +// PluginDisableOptions holds parameters to disable plugins. +type PluginDisableOptions struct { + Force bool +} + +// PluginInstallOptions holds parameters to install a plugin. +type PluginInstallOptions struct { + Disabled bool + AcceptAllPermissions bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RemoteRef string // RemoteRef is the plugin name on the registry + PrivilegeFunc RequestPrivilegeFunc + AcceptPermissionsFunc func(PluginPrivileges) (bool, error) + Args []string +} + +// SwarmUnlockKeyResponse contains the response for Engine API: +// GET /swarm/unlockkey +type SwarmUnlockKeyResponse struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// PluginCreateOptions hold all options to plugin create. +type PluginCreateOptions struct { + RepoName string +} diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go new file mode 100644 index 000000000..7689f38b3 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/configs.go @@ -0,0 +1,67 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// configs holds structs used for internal communication between the +// frontend (such as an http server) and the backend (such as the +// docker daemon). + +// ContainerCreateConfig is the parameter set to ContainerCreate() +type ContainerCreateConfig struct { + Name string + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig + Platform *specs.Platform + AdjustCPUShares bool +} + +// ContainerRmConfig holds arguments for the container remove +// operation. This struct is used to tell the backend what operations +// to perform. +type ContainerRmConfig struct { + ForceRemove, RemoveVolume, RemoveLink bool +} + +// ExecConfig is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +type ExecConfig struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + Tty bool // Attach standard streams to a tty. + ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width] + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard error + AttachStdout bool // Attach the standard output + Detach bool // Execute in detach mode + DetachKeys string // Escape keys for detach + Env []string // Environment variables + WorkingDir string // Working directory + Cmd []string // Execution commands and args +} + +// PluginRmConfig holds arguments for plugin remove. +type PluginRmConfig struct { + ForceRemove bool +} + +// PluginEnableConfig holds arguments for plugin enable +type PluginEnableConfig struct { + Timeout int +} + +// PluginDisableConfig holds arguments for plugin disable. +type PluginDisableConfig struct { + ForceDisable bool +} + +// NetworkListConfig stores the options available for listing networks +type NetworkListConfig struct { + // TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here + Detailed bool + Verbose bool +} diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go new file mode 100644 index 000000000..077583e66 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/config.go @@ -0,0 +1,96 @@ +package container // import "github.com/docker/docker/api/types/container" + +import ( + "io" + "time" + + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// MinimumDuration puts a minimum on user configured duration. +// This is to prevent API error on time unit. For example, API may +// set 3 as healthcheck interval with intention of 3 seconds, but +// Docker interprets it as 3 nanoseconds. +const MinimumDuration = 1 * time.Millisecond + +// StopOptions holds the options to stop or restart a container. +type StopOptions struct { + // Signal (optional) is the signal to send to the container to (gracefully) + // stop it before forcibly terminating the container with SIGKILL after the + // timeout expires. If not value is set, the default (SIGTERM) is used. + Signal string `json:",omitempty"` + + // Timeout (optional) is the timeout (in seconds) to wait for the container + // to stop gracefully before forcibly terminating it with SIGKILL. + // + // - Use nil to use the default timeout (10 seconds). + // - Use '-1' to wait indefinitely. + // - Use '0' to not wait for the container to exit gracefully, and + // immediately proceeds to forcibly terminating the container. + // - Other positive values are used as timeout (in seconds). + Timeout *int `json:",omitempty"` +} + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// ExecStartOptions holds the options to start container's exec. +type ExecStartOptions struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + ConsoleSize *[2]uint `json:",omitempty"` +} + +// Config contains the configuration data about a container. +// It should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. +type Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific). + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_changes.go b/vendor/github.com/docker/docker/api/types/container/container_changes.go new file mode 100644 index 000000000..16dd5019e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_changes.go @@ -0,0 +1,20 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerChangeResponseItem change item in response to ContainerChanges operation +// swagger:model ContainerChangeResponseItem +type ContainerChangeResponseItem struct { + + // Kind of change + // Required: true + Kind uint8 `json:"Kind"` + + // Path to file that has changed + // Required: true + Path string `json:"Path"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/docker/docker/api/types/container/container_top.go new file mode 100644 index 000000000..63381da36 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_top.go @@ -0,0 +1,22 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerTopOKBody OK response to ContainerTop operation +// swagger:model ContainerTopOKBody +type ContainerTopOKBody struct { + + // Each process running in the container, where each is process + // is an array of values corresponding to the titles. + // + // Required: true + Processes [][]string `json:"Processes"` + + // The ps column titles + // Required: true + Titles []string `json:"Titles"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go new file mode 100644 index 000000000..c10f175ea --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_update.go @@ -0,0 +1,16 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerUpdateOKBody OK response to ContainerUpdate operation +// swagger:model ContainerUpdateOKBody +type ContainerUpdateOKBody struct { + + // warnings + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/create_response.go b/vendor/github.com/docker/docker/api/types/container/create_response.go new file mode 100644 index 000000000..aa0e7f7d0 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/create_response.go @@ -0,0 +1,19 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// CreateResponse ContainerCreateResponse +// +// OK response to ContainerCreate operation +// swagger:model CreateResponse +type CreateResponse struct { + + // The ID of the created container + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/deprecated.go b/vendor/github.com/docker/docker/api/types/container/deprecated.go new file mode 100644 index 000000000..0cb70e363 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/deprecated.go @@ -0,0 +1,16 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ContainerCreateCreatedBody OK response to ContainerCreate operation +// +// Deprecated: use CreateResponse +type ContainerCreateCreatedBody = CreateResponse + +// ContainerWaitOKBody OK response to ContainerWait operation +// +// Deprecated: use WaitResponse +type ContainerWaitOKBody = WaitResponse + +// ContainerWaitOKBodyError container waiting error, if any +// +// Deprecated: use WaitExitError +type ContainerWaitOKBodyError = WaitExitError diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go new file mode 100644 index 000000000..100f434ce --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -0,0 +1,465 @@ +package container // import "github.com/docker/docker/api/types/container" + +import ( + "strings" + + "github.com/docker/docker/api/types/blkiodev" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" + units "github.com/docker/go-units" +) + +// CgroupnsMode represents the cgroup namespace mode of the container +type CgroupnsMode string + +// cgroup namespace modes for containers +const ( + CgroupnsModeEmpty CgroupnsMode = "" + CgroupnsModePrivate CgroupnsMode = "private" + CgroupnsModeHost CgroupnsMode = "host" +) + +// IsPrivate indicates whether the container uses its own private cgroup namespace +func (c CgroupnsMode) IsPrivate() bool { + return c == CgroupnsModePrivate +} + +// IsHost indicates whether the container shares the host's cgroup namespace +func (c CgroupnsMode) IsHost() bool { + return c == CgroupnsModeHost +} + +// IsEmpty indicates whether the container cgroup namespace mode is unset +func (c CgroupnsMode) IsEmpty() bool { + return c == CgroupnsModeEmpty +} + +// Valid indicates whether the cgroup namespace mode is valid +func (c CgroupnsMode) Valid() bool { + return c.IsEmpty() || c.IsPrivate() || c.IsHost() +} + +// Isolation represents the isolation technology of a container. The supported +// values are platform specific +type Isolation string + +// Isolation modes for containers +const ( + IsolationEmpty Isolation = "" // IsolationEmpty is unspecified (same behavior as default) + IsolationDefault Isolation = "default" // IsolationDefault is the default isolation mode on current daemon + IsolationProcess Isolation = "process" // IsolationProcess is process isolation mode + IsolationHyperV Isolation = "hyperv" // IsolationHyperV is HyperV isolation mode +) + +// IsDefault indicates the default isolation technology of a container. On Linux this +// is the native driver. On Windows, this is a Windows Server Container. +func (i Isolation) IsDefault() bool { + // TODO consider making isolation-mode strict (case-sensitive) + v := Isolation(strings.ToLower(string(i))) + return v == IsolationDefault || v == IsolationEmpty +} + +// IsHyperV indicates the use of a Hyper-V partition for isolation +func (i Isolation) IsHyperV() bool { + // TODO consider making isolation-mode strict (case-sensitive) + return Isolation(strings.ToLower(string(i))) == IsolationHyperV +} + +// IsProcess indicates the use of process isolation +func (i Isolation) IsProcess() bool { + // TODO consider making isolation-mode strict (case-sensitive) + return Isolation(strings.ToLower(string(i))) == IsolationProcess +} + +// IpcMode represents the container ipc stack. +type IpcMode string + +// IpcMode constants +const ( + IPCModeNone IpcMode = "none" + IPCModeHost IpcMode = "host" + IPCModeContainer IpcMode = "container" + IPCModePrivate IpcMode = "private" + IPCModeShareable IpcMode = "shareable" +) + +// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. +func (n IpcMode) IsPrivate() bool { + return n == IPCModePrivate +} + +// IsHost indicates whether the container shares the host's ipc namespace. +func (n IpcMode) IsHost() bool { + return n == IPCModeHost +} + +// IsShareable indicates whether the container's ipc namespace can be shared with another container. +func (n IpcMode) IsShareable() bool { + return n == IPCModeShareable +} + +// IsContainer indicates whether the container uses another container's ipc namespace. +func (n IpcMode) IsContainer() bool { + return strings.HasPrefix(string(n), string(IPCModeContainer)+":") +} + +// IsNone indicates whether container IpcMode is set to "none". +func (n IpcMode) IsNone() bool { + return n == IPCModeNone +} + +// IsEmpty indicates whether container IpcMode is empty +func (n IpcMode) IsEmpty() bool { + return n == "" +} + +// Valid indicates whether the ipc mode is valid. +func (n IpcMode) Valid() bool { + return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer() +} + +// Container returns the name of the container ipc stack is going to be used. +func (n IpcMode) Container() string { + if n.IsContainer() { + return strings.TrimPrefix(string(n), string(IPCModeContainer)+":") + } + return "" +} + +// NetworkMode represents the container network stack. +type NetworkMode string + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// IsPrivate indicates whether container uses its private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsContainer indicates whether container uses a container network stack. +func (n NetworkMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// ConnectedContainer is the id of the container which network this container is connected to. +func (n NetworkMode) ConnectedContainer() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} + +// UsernsMode represents userns mode in the container. +type UsernsMode string + +// IsHost indicates whether the container uses the host's userns. +func (n UsernsMode) IsHost() bool { + return n == "host" +} + +// IsPrivate indicates whether the container uses the a private userns. +func (n UsernsMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// Valid indicates whether the userns is valid. +func (n UsernsMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// CgroupSpec represents the cgroup to use for the container. +type CgroupSpec string + +// IsContainer indicates whether the container is using another container cgroup +func (c CgroupSpec) IsContainer() bool { + parts := strings.SplitN(string(c), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the cgroup spec is valid. +func (c CgroupSpec) Valid() bool { + return c.IsContainer() || c == "" +} + +// Container returns the name of the container whose cgroup will be used. +func (c CgroupSpec) Container() string { + parts := strings.SplitN(string(c), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UTSMode represents the UTS namespace of the container. +type UTSMode string + +// IsPrivate indicates whether the container uses its private UTS namespace. +func (n UTSMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// IsHost indicates whether the container uses the host's UTS namespace. +func (n UTSMode) IsHost() bool { + return n == "host" +} + +// Valid indicates whether the UTS namespace is valid. +func (n UTSMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// PidMode represents the pid namespace of the container. +type PidMode string + +// IsPrivate indicates whether the container uses its own new pid namespace. +func (n PidMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's pid namespace. +func (n PidMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's pid namespace. +func (n PidMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the pid namespace is valid. +func (n PidMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// Container returns the name of the container whose pid namespace is going to be used. +func (n PidMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// DeviceRequest represents a request for devices from a device driver. +// Used by GPU device drivers. +type DeviceRequest struct { + Driver string // Name of device driver + Count int // Number of devices to request (-1 = All) + DeviceIDs []string // List of device IDs as recognizable by the device driver + Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu") + Options map[string]string // Options to pass onto the device driver +} + +// DeviceMapping represents the device mapping between the host and the container. +type DeviceMapping struct { + PathOnHost string + PathInContainer string + CgroupPermissions string +} + +// RestartPolicy represents the restart policies of the container. +type RestartPolicy struct { + Name string + MaximumRetryCount int +} + +// IsNone indicates whether the container has the "no" restart policy. +// This means the container will not automatically restart when exiting. +func (rp *RestartPolicy) IsNone() bool { + return rp.Name == "no" || rp.Name == "" +} + +// IsAlways indicates whether the container has the "always" restart policy. +// This means the container will automatically restart regardless of the exit status. +func (rp *RestartPolicy) IsAlways() bool { + return rp.Name == "always" +} + +// IsOnFailure indicates whether the container has the "on-failure" restart policy. +// This means the container will automatically restart of exiting with a non-zero exit status. +func (rp *RestartPolicy) IsOnFailure() bool { + return rp.Name == "on-failure" +} + +// IsUnlessStopped indicates whether the container has the +// "unless-stopped" restart policy. This means the container will +// automatically restart unless user has put it to stopped state. +func (rp *RestartPolicy) IsUnlessStopped() bool { + return rp.Name == "unless-stopped" +} + +// IsSame compares two RestartPolicy to see if they are the same +func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { + return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount +} + +// LogMode is a type to define the available modes for logging +// These modes affect how logs are handled when log messages start piling up. +type LogMode string + +// Available logging modes +const ( + LogModeUnset LogMode = "" + LogModeBlocking LogMode = "blocking" + LogModeNonBlock LogMode = "non-blocking" +) + +// LogConfig represents the logging configuration of the container. +type LogConfig struct { + Type string + Config map[string]string +} + +// Resources contains container's resources (cgroups config, ulimits...) +type Resources struct { + // Applicable to all platforms + CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) + Memory int64 // Memory limit (in bytes) + NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. + + // Applicable to UNIX platforms + CgroupParent string // Parent cgroup. + BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) + BlkioWeightDevice []*blkiodev.WeightDevice + BlkioDeviceReadBps []*blkiodev.ThrottleDevice + BlkioDeviceWriteBps []*blkiodev.ThrottleDevice + BlkioDeviceReadIOps []*blkiodev.ThrottleDevice + BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice + CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period + CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period + CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime + CpusetCpus string // CpusetCpus 0-2, 0,1 + CpusetMems string // CpusetMems 0-2, 0,1 + Devices []DeviceMapping // List of devices to map inside the container + DeviceCgroupRules []string // List of rule to be added to the device cgroup + DeviceRequests []DeviceRequest // List of device requests for device drivers + + // KernelMemory specifies the kernel memory limit (in bytes) for the container. + // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes. + KernelMemory int64 `json:",omitempty"` + KernelMemoryTCP int64 `json:",omitempty"` // Hard limit for kernel TCP buffer memory (in bytes) + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. + Ulimits []*units.Ulimit // List of ulimits to be set in the container + + // Applicable to Windows + CPUCount int64 `json:"CpuCount"` // CPU count + CPUPercent int64 `json:"CpuPercent"` // CPU percent + IOMaximumIOps uint64 // Maximum IOps for the container system drive + IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive +} + +// UpdateConfig holds the mutable attributes of a Container. +// Those attributes can be updated at runtime. +type UpdateConfig struct { + // Contains container's resources (cgroups, ulimits) + Resources + RestartPolicy RestartPolicy +} + +// HostConfig the non-portable Config structure of a container. +// Here, "non-portable" means "dependent of the host we are running on". +// Portable information *should* appear in Config. +type HostConfig struct { + // Applicable to all platforms + Binds []string // List of volume bindings for this container + ContainerIDFile string // File (path) where the containerId is written + LogConfig LogConfig // Configuration of the logs for this container + NetworkMode NetworkMode // Network mode to use for the container + PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host + RestartPolicy RestartPolicy // Restart policy to be used for the container + AutoRemove bool // Automatically remove container when it exits + VolumeDriver string // Name of the volume driver used to mount volumes + VolumesFrom []string // List of volumes to take from other container + ConsoleSize [2]uint // Initial console size (height,width) + + // Applicable to UNIX platforms + CapAdd strslice.StrSlice // List of kernel capabilities to add to the container + CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container + CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + ExtraHosts []string // List of extra hosts + GroupAdd []string // List of additional groups that the container process will run as + IpcMode IpcMode // IPC namespace to use for the container + Cgroup CgroupSpec // Cgroup to use for the container + Links []string // List of links (in the name:alias form) + OomScoreAdj int // Container preference for OOM-killing + PidMode PidMode // PID namespace to use for the container + Privileged bool // Is the container in privileged mode + PublishAllPorts bool // Should docker publish all exposed port for the container + ReadonlyRootfs bool // Is the container root filesystem in read-only + SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. + StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. + Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container + UTSMode UTSMode // UTS namespace to use for the container + UsernsMode UsernsMode // The user namespace to use for the container + ShmSize int64 // Total shm memory usage + Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container + Runtime string `json:",omitempty"` // Runtime to use with this container + + // Applicable to Windows + Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) + + // Contains container's resources (cgroups, ulimits) + Resources + + // Mounts specs used by the container + Mounts []mount.Mount `json:",omitempty"` + + // MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths) + MaskedPaths []string + + // ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths) + ReadonlyPaths []string + + // Run a custom init inside the container, if null, use the daemon's configured settings + Init *bool `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go new file mode 100644 index 000000000..24c4fa8d9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go @@ -0,0 +1,42 @@ +//go:build !windows +// +build !windows + +package container // import "github.com/docker/docker/api/types/container" + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsBridge() { + return "bridge" + } else if n.IsHost() { + return "host" + } else if n.IsContainer() { + return "container" + } else if n.IsNone() { + return "none" + } else if n.IsDefault() { + return "default" + } else if n.IsUserDefined() { + return n.UserDefined() + } + return "" +} + +// IsBridge indicates whether container uses the bridge network stack +func (n NetworkMode) IsBridge() bool { + return n == "bridge" +} + +// IsHost indicates whether container uses the host network stack. +func (n NetworkMode) IsHost() bool { + return n == "host" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go new file mode 100644 index 000000000..99f803a5b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go @@ -0,0 +1,40 @@ +package container // import "github.com/docker/docker/api/types/container" + +// IsBridge indicates whether container uses the bridge network stack +// in windows it is given the name NAT +func (n NetworkMode) IsBridge() bool { + return n == "nat" +} + +// IsHost indicates whether container uses the host network stack. +// returns false as this is not supported by windows +func (n NetworkMode) IsHost() bool { + return false +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() +} + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() || i.IsHyperV() || i.IsProcess() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsDefault() { + return "default" + } else if n.IsBridge() { + return "nat" + } else if n.IsNone() { + return "none" + } else if n.IsContainer() { + return "container" + } else if n.IsUserDefined() { + return n.UserDefined() + } + + return "" +} diff --git a/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go b/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go new file mode 100644 index 000000000..ab56d4eed --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go @@ -0,0 +1,12 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// WaitExitError container waiting error, if any +// swagger:model WaitExitError +type WaitExitError struct { + + // Details of an error + Message string `json:"Message,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/wait_response.go b/vendor/github.com/docker/docker/api/types/container/wait_response.go new file mode 100644 index 000000000..84fc6afdd --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/wait_response.go @@ -0,0 +1,18 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// WaitResponse ContainerWaitResponse +// +// OK response to ContainerWait operation +// swagger:model WaitResponse +type WaitResponse struct { + + // error + Error *WaitExitError `json:"Error,omitempty"` + + // Exit code of the container + // Required: true + StatusCode int64 `json:"StatusCode"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/vendor/github.com/docker/docker/api/types/container/waitcondition.go new file mode 100644 index 000000000..cd8311f99 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/waitcondition.go @@ -0,0 +1,22 @@ +package container // import "github.com/docker/docker/api/types/container" + +// WaitCondition is a type used to specify a container state for which +// to wait. +type WaitCondition string + +// Possible WaitCondition Values. +// +// WaitConditionNotRunning (default) is used to wait for any of the non-running +// states: "created", "exited", "dead", "removing", or "removed". +// +// WaitConditionNextExit is used to wait for the next time the state changes +// to a non-running state. If the state is currently "created" or "exited", +// this would cause Wait() to block until either the container runs and exits +// or is removed. +// +// WaitConditionRemoved is used to wait for the container to be removed. +const ( + WaitConditionNotRunning WaitCondition = "not-running" + WaitConditionNextExit WaitCondition = "next-exit" + WaitConditionRemoved WaitCondition = "removed" +) diff --git a/vendor/github.com/docker/docker/api/types/deprecated.go b/vendor/github.com/docker/docker/api/types/deprecated.go new file mode 100644 index 000000000..216d1df0f --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/deprecated.go @@ -0,0 +1,14 @@ +package types // import "github.com/docker/docker/api/types" + +import "github.com/docker/docker/api/types/volume" + +// Volume volume +// +// Deprecated: use github.com/docker/docker/api/types/volume.Volume +type Volume = volume.Volume + +// VolumeUsageData Usage details about the volume. This information is used by the +// `GET /system/df` endpoint, and omitted in other endpoints. +// +// Deprecated: use github.com/docker/docker/api/types/volume.UsageData +type VolumeUsageData = volume.UsageData diff --git a/vendor/github.com/docker/docker/api/types/error_response.go b/vendor/github.com/docker/docker/api/types/error_response.go new file mode 100644 index 000000000..dc942d9d9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/error_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ErrorResponse Represents an error. +// swagger:model ErrorResponse +type ErrorResponse struct { + + // The error message. + // Required: true + Message string `json:"message"` +} diff --git a/vendor/github.com/docker/docker/api/types/error_response_ext.go b/vendor/github.com/docker/docker/api/types/error_response_ext.go new file mode 100644 index 000000000..f84f034cd --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/error_response_ext.go @@ -0,0 +1,6 @@ +package types + +// Error returns the error message +func (e ErrorResponse) Error() string { + return e.Message +} diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go new file mode 100644 index 000000000..9fe07e26f --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/events/events.go @@ -0,0 +1,47 @@ +package events // import "github.com/docker/docker/api/types/events" + +// Type is used for event-types. +type Type = string + +// List of known event types. +const ( + BuilderEventType Type = "builder" // BuilderEventType is the event type that the builder generates. + ConfigEventType Type = "config" // ConfigEventType is the event type that configs generate. + ContainerEventType Type = "container" // ContainerEventType is the event type that containers generate. + DaemonEventType Type = "daemon" // DaemonEventType is the event type that daemon generate. + ImageEventType Type = "image" // ImageEventType is the event type that images generate. + NetworkEventType Type = "network" // NetworkEventType is the event type that networks generate. + NodeEventType Type = "node" // NodeEventType is the event type that nodes generate. + PluginEventType Type = "plugin" // PluginEventType is the event type that plugins generate. + SecretEventType Type = "secret" // SecretEventType is the event type that secrets generate. + ServiceEventType Type = "service" // ServiceEventType is the event type that services generate. + VolumeEventType Type = "volume" // VolumeEventType is the event type that volumes generate. +) + +// Actor describes something that generates events, +// like a container, or a network, or a volume. +// It has a defined name and a set of attributes. +// The container attributes are its labels, other actors +// can generate these attributes from other properties. +type Actor struct { + ID string + Attributes map[string]string +} + +// Message represents the information an event contains +type Message struct { + // Deprecated information from JSONMessage. + // With data only in container events. + Status string `json:"status,omitempty"` // Deprecated: use Action instead. + ID string `json:"id,omitempty"` // Deprecated: use Actor.ID instead. + From string `json:"from,omitempty"` // Deprecated: use Actor.Attributes["image"] instead. + + Type Type + Action string + Actor Actor + // Engine events are local scope. Cluster events are swarm scope. + Scope string `json:"scope,omitempty"` + + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go new file mode 100644 index 000000000..f8fe79407 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -0,0 +1,323 @@ +/* +Package filters provides tools for encoding a mapping of keys to a set of +multiple values. +*/ +package filters // import "github.com/docker/docker/api/types/filters" + +import ( + "encoding/json" + "regexp" + "strings" + + "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" +) + +// Args stores a mapping of keys to a set of multiple values. +type Args struct { + fields map[string]map[string]bool +} + +// KeyValuePair are used to initialize a new Args +type KeyValuePair struct { + Key string + Value string +} + +// Arg creates a new KeyValuePair for initializing Args +func Arg(key, value string) KeyValuePair { + return KeyValuePair{Key: key, Value: value} +} + +// NewArgs returns a new Args populated with the initial args +func NewArgs(initialArgs ...KeyValuePair) Args { + args := Args{fields: map[string]map[string]bool{}} + for _, arg := range initialArgs { + args.Add(arg.Key, arg.Value) + } + return args +} + +// Keys returns all the keys in list of Args +func (args Args) Keys() []string { + keys := make([]string, 0, len(args.fields)) + for k := range args.fields { + keys = append(keys, k) + } + return keys +} + +// MarshalJSON returns a JSON byte representation of the Args +func (args Args) MarshalJSON() ([]byte, error) { + if len(args.fields) == 0 { + return []byte("{}"), nil + } + return json.Marshal(args.fields) +} + +// ToJSON returns the Args as a JSON encoded string +func ToJSON(a Args) (string, error) { + if a.Len() == 0 { + return "", nil + } + buf, err := json.Marshal(a) + return string(buf), err +} + +// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 +// then the encoded format will use an older legacy format where the values are a +// list of strings, instead of a set. +// +// Deprecated: do not use in any new code; use ToJSON instead +func ToParamWithVersion(version string, a Args) (string, error) { + if a.Len() == 0 { + return "", nil + } + + if version != "" && versions.LessThan(version, "1.22") { + buf, err := json.Marshal(convertArgsToSlice(a.fields)) + return string(buf), err + } + + return ToJSON(a) +} + +// FromJSON decodes a JSON encoded string into Args +func FromJSON(p string) (Args, error) { + args := NewArgs() + + if p == "" { + return args, nil + } + + raw := []byte(p) + err := json.Unmarshal(raw, &args) + if err == nil { + return args, nil + } + + // Fallback to parsing arguments in the legacy slice format + deprecated := map[string][]string{} + if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { + return args, invalidFilter{errors.Wrap(err, "invalid filter")} + } + + args.fields = deprecatedArgs(deprecated) + return args, nil +} + +// UnmarshalJSON populates the Args from JSON encode bytes +func (args Args) UnmarshalJSON(raw []byte) error { + return json.Unmarshal(raw, &args.fields) +} + +// Get returns the list of values associated with the key +func (args Args) Get(key string) []string { + values := args.fields[key] + if values == nil { + return make([]string, 0) + } + slice := make([]string, 0, len(values)) + for key := range values { + slice = append(slice, key) + } + return slice +} + +// Add a new value to the set of values +func (args Args) Add(key, value string) { + if _, ok := args.fields[key]; ok { + args.fields[key][value] = true + } else { + args.fields[key] = map[string]bool{value: true} + } +} + +// Del removes a value from the set +func (args Args) Del(key, value string) { + if _, ok := args.fields[key]; ok { + delete(args.fields[key], value) + if len(args.fields[key]) == 0 { + delete(args.fields, key) + } + } +} + +// Len returns the number of keys in the mapping +func (args Args) Len() int { + return len(args.fields) +} + +// MatchKVList returns true if all the pairs in sources exist as key=value +// pairs in the mapping at key, or if there are no values at key. +func (args Args) MatchKVList(key string, sources map[string]string) bool { + fieldValues := args.fields[key] + + // do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + + if len(sources) == 0 { + return false + } + + for value := range fieldValues { + testKV := strings.SplitN(value, "=", 2) + + v, ok := sources[testKV[0]] + if !ok { + return false + } + if len(testKV) == 2 && testKV[1] != v { + return false + } + } + + return true +} + +// Match returns true if any of the values at key match the source string +func (args Args) Match(field, source string) bool { + if args.ExactMatch(field, source) { + return true + } + + fieldValues := args.fields[field] + for name2match := range fieldValues { + match, err := regexp.MatchString(name2match, source) + if err != nil { + continue + } + if match { + return true + } + } + return false +} + +// ExactMatch returns true if the source matches exactly one of the values. +func (args Args) ExactMatch(key, source string) bool { + fieldValues, ok := args.fields[key] + // do not filter if there is no filter set or cannot determine filter + if !ok || len(fieldValues) == 0 { + return true + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// UniqueExactMatch returns true if there is only one value and the source +// matches exactly the value. +func (args Args) UniqueExactMatch(key, source string) bool { + fieldValues := args.fields[key] + // do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + if len(args.fields[key]) != 1 { + return false + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// FuzzyMatch returns true if the source matches exactly one value, or the +// source has one of the values as a prefix. +func (args Args) FuzzyMatch(key, source string) bool { + if args.ExactMatch(key, source) { + return true + } + + fieldValues := args.fields[key] + for prefix := range fieldValues { + if strings.HasPrefix(source, prefix) { + return true + } + } + return false +} + +// Contains returns true if the key exists in the mapping +func (args Args) Contains(field string) bool { + _, ok := args.fields[field] + return ok +} + +type invalidFilter struct{ error } + +func (e invalidFilter) Error() string { + return e.error.Error() +} + +func (invalidFilter) InvalidParameter() {} + +// Validate compared the set of accepted keys against the keys in the mapping. +// An error is returned if any mapping keys are not in the accepted set. +func (args Args) Validate(accepted map[string]bool) error { + for name := range args.fields { + if !accepted[name] { + return invalidFilter{errors.New("invalid filter '" + name + "'")} + } + } + return nil +} + +// WalkValues iterates over the list of values for a key in the mapping and calls +// op() for each value. If op returns an error the iteration stops and the +// error is returned. +func (args Args) WalkValues(field string, op func(value string) error) error { + if _, ok := args.fields[field]; !ok { + return nil + } + for v := range args.fields[field] { + if err := op(v); err != nil { + return err + } + } + return nil +} + +// Clone returns a copy of args. +func (args Args) Clone() (newArgs Args) { + newArgs.fields = make(map[string]map[string]bool, len(args.fields)) + for k, m := range args.fields { + var mm map[string]bool + if m != nil { + mm = make(map[string]bool, len(m)) + for kk, v := range m { + mm[kk] = v + } + } + newArgs.fields[k] = mm + } + return newArgs +} + +func deprecatedArgs(d map[string][]string) map[string]map[string]bool { + m := map[string]map[string]bool{} + for k, v := range d { + values := map[string]bool{} + for _, vv := range v { + values[vv] = true + } + m[k] = values + } + return m +} + +func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { + m := map[string][]string{} + for k, v := range f { + values := []string{} + for kk := range v { + if v[kk] { + values = append(values, kk) + } + } + m[k] = values + } + return m +} diff --git a/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/vendor/github.com/docker/docker/api/types/graph_driver_data.go new file mode 100644 index 000000000..ce3deb331 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/graph_driver_data.go @@ -0,0 +1,23 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// GraphDriverData Information about the storage driver used to store the container's and +// image's filesystem. +// +// swagger:model GraphDriverData +type GraphDriverData struct { + + // Low-level storage metadata, provided as key/value pairs. + // + // This information is driver-specific, and depends on the storage-driver + // in use, and should be used for informational purposes only. + // + // Required: true + Data map[string]string `json:"Data"` + + // Name of the storage driver. + // Required: true + Name string `json:"Name"` +} diff --git a/vendor/github.com/docker/docker/api/types/id_response.go b/vendor/github.com/docker/docker/api/types/id_response.go new file mode 100644 index 000000000..7592d2f8b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/id_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// IDResponse Response to an API call that returns just an Id +// swagger:model IdResponse +type IDResponse struct { + + // The id of the newly created object. + // Required: true + ID string `json:"Id"` +} diff --git a/vendor/github.com/docker/docker/api/types/image/image_history.go b/vendor/github.com/docker/docker/api/types/image/image_history.go new file mode 100644 index 000000000..e302bb0ae --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image/image_history.go @@ -0,0 +1,36 @@ +package image // import "github.com/docker/docker/api/types/image" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// HistoryResponseItem individual image layer information in response to ImageHistory operation +// swagger:model HistoryResponseItem +type HistoryResponseItem struct { + + // comment + // Required: true + Comment string `json:"Comment"` + + // created + // Required: true + Created int64 `json:"Created"` + + // created by + // Required: true + CreatedBy string `json:"CreatedBy"` + + // Id + // Required: true + ID string `json:"Id"` + + // size + // Required: true + Size int64 `json:"Size"` + + // tags + // Required: true + Tags []string `json:"Tags"` +} diff --git a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go b/vendor/github.com/docker/docker/api/types/image_delete_response_item.go new file mode 100644 index 000000000..b9a65a0d8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image_delete_response_item.go @@ -0,0 +1,15 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageDeleteResponseItem image delete response item +// swagger:model ImageDeleteResponseItem +type ImageDeleteResponseItem struct { + + // The image ID of an image that was deleted + Deleted string `json:"Deleted,omitempty"` + + // The image ID of an image that was untagged + Untagged string `json:"Untagged,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/docker/docker/api/types/image_summary.go new file mode 100644 index 000000000..90b983a25 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image_summary.go @@ -0,0 +1,97 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageSummary image summary +// swagger:model ImageSummary +type ImageSummary struct { + + // Number of containers using this image. Includes both stopped and running + // containers. + // + // This size is not calculated by default, and depends on which API endpoint + // is used. `-1` indicates that the value has not been set / calculated. + // + // Required: true + Containers int64 `json:"Containers"` + + // Date and time at which the image was created as a Unix timestamp + // (number of seconds sinds EPOCH). + // + // Required: true + Created int64 `json:"Created"` + + // ID is the content-addressable ID of an image. + // + // This identifier is a content-addressable digest calculated from the + // image's configuration (which includes the digests of layers used by + // the image). + // + // Note that this digest differs from the `RepoDigests` below, which + // holds digests of image manifests that reference the image. + // + // Required: true + ID string `json:"Id"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // ID of the parent image. + // + // Depending on how the image was created, this field may be empty and + // is only set for images that were built/created locally. This field + // is empty if the image was pulled from an image registry. + // + // Required: true + ParentID string `json:"ParentId"` + + // List of content-addressable digests of locally available image manifests + // that the image is referenced from. Multiple manifests can refer to the + // same image. + // + // These digests are usually only available if the image was either pulled + // from a registry, or if the image was pushed to a registry, which is when + // the manifest is generated and its digest calculated. + // + // Required: true + RepoDigests []string `json:"RepoDigests"` + + // List of image names/tags in the local image cache that reference this + // image. + // + // Multiple image tags can refer to the same image, and this list may be + // empty if no tags reference the image, in which case the image is + // "untagged", in which case it can still be referenced by its ID. + // + // Required: true + RepoTags []string `json:"RepoTags"` + + // Total size of image layers that are shared between this image and other + // images. + // + // This size is not calculated by default. `-1` indicates that the value + // has not been set / calculated. + // + // Required: true + SharedSize int64 `json:"SharedSize"` + + // Total size of the image including all layers it is composed of. + // + // Required: true + Size int64 `json:"Size"` + + // Total size of the image including all layers it is composed of. + // + // In versions of Docker before v1.10, this field was calculated from + // the image itself and all of its parent images. Docker v1.10 and up + // store images self-contained, and no longer use a parent-chain, making + // this field an equivalent of the Size field. + // + // This field is kept for backward compatibility, but may be removed in + // a future version of the API. + // + // Required: true + VirtualSize int64 `json:"VirtualSize"` +} diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go new file mode 100644 index 000000000..ac4ce6223 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -0,0 +1,140 @@ +package mount // import "github.com/docker/docker/api/types/mount" + +import ( + "os" +) + +// Type represents the type of a mount. +type Type string + +// Type constants +const ( + // TypeBind is the type for mounting host dir + TypeBind Type = "bind" + // TypeVolume is the type for remote storage volumes + TypeVolume Type = "volume" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs Type = "tmpfs" + // TypeNamedPipe is the type for mounting Windows named pipes + TypeNamedPipe Type = "npipe" + // TypeCluster is the type for Swarm Cluster Volumes. + TypeCluster Type = "cluster" +) + +// Mount represents a mount (volume). +type Mount struct { + Type Type `json:",omitempty"` + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + // Source is not supported for tmpfs (must be an empty value) + Source string `json:",omitempty"` + Target string `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Consistency Consistency `json:",omitempty"` + + BindOptions *BindOptions `json:",omitempty"` + VolumeOptions *VolumeOptions `json:",omitempty"` + TmpfsOptions *TmpfsOptions `json:",omitempty"` + ClusterOptions *ClusterOptions `json:",omitempty"` +} + +// Propagation represents the propagation of a mount. +type Propagation string + +const ( + // PropagationRPrivate RPRIVATE + PropagationRPrivate Propagation = "rprivate" + // PropagationPrivate PRIVATE + PropagationPrivate Propagation = "private" + // PropagationRShared RSHARED + PropagationRShared Propagation = "rshared" + // PropagationShared SHARED + PropagationShared Propagation = "shared" + // PropagationRSlave RSLAVE + PropagationRSlave Propagation = "rslave" + // PropagationSlave SLAVE + PropagationSlave Propagation = "slave" +) + +// Propagations is the list of all valid mount propagations +var Propagations = []Propagation{ + PropagationRPrivate, + PropagationPrivate, + PropagationRShared, + PropagationShared, + PropagationRSlave, + PropagationSlave, +} + +// Consistency represents the consistency requirements of a mount. +type Consistency string + +const ( + // ConsistencyFull guarantees bind mount-like consistency + ConsistencyFull Consistency = "consistent" + // ConsistencyCached mounts can cache read data and FS structure + ConsistencyCached Consistency = "cached" + // ConsistencyDelegated mounts can cache read and written data and structure + ConsistencyDelegated Consistency = "delegated" + // ConsistencyDefault provides "consistent" behavior unless overridden + ConsistencyDefault Consistency = "default" +) + +// BindOptions defines options specific to mounts of type "bind". +type BindOptions struct { + Propagation Propagation `json:",omitempty"` + NonRecursive bool `json:",omitempty"` + CreateMountpoint bool `json:",omitempty"` +} + +// VolumeOptions represents the options for a mount of type volume. +type VolumeOptions struct { + NoCopy bool `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + DriverConfig *Driver `json:",omitempty"` +} + +// Driver represents a volume driver. +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TmpfsOptions defines options specific to mounts of type "tmpfs". +type TmpfsOptions struct { + // Size sets the size of the tmpfs, in bytes. + // + // This will be converted to an operating system specific value + // depending on the host. For example, on linux, it will be converted to + // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with + // docker, uses a straight byte value. + // + // Percentages are not supported. + SizeBytes int64 `json:",omitempty"` + // Mode of the tmpfs upon creation + Mode os.FileMode `json:",omitempty"` + + // TODO(stevvooe): There are several more tmpfs flags, specified in the + // daemon, that are accepted. Only the most basic are added for now. + // + // From https://github.com/moby/sys/blob/mount/v0.1.1/mount/flags.go#L47-L56 + // + // var validFlags = map[string]bool{ + // "": true, + // "size": true, X + // "mode": true, X + // "uid": true, + // "gid": true, + // "nr_inodes": true, + // "nr_blocks": true, + // "mpol": true, + // } + // + // Some of these may be straightforward to add, but others, such as + // uid/gid have implications in a clustered system. +} + +// ClusterOptions specifies options for a Cluster volume. +type ClusterOptions struct { + // intentionally empty +} diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go new file mode 100644 index 000000000..437b184c6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/network/network.go @@ -0,0 +1,126 @@ +package network // import "github.com/docker/docker/api/types/network" +import ( + "github.com/docker/docker/api/types/filters" +) + +// Address represents an IP address +type Address struct { + Addr string + PrefixLen int +} + +// IPAM represents IP Address Management +type IPAM struct { + Driver string + Options map[string]string // Per network IPAM driver options + Config []IPAMConfig +} + +// IPAMConfig represents IPAM configurations +type IPAMConfig struct { + Subnet string `json:",omitempty"` + IPRange string `json:",omitempty"` + Gateway string `json:",omitempty"` + AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` +} + +// EndpointIPAMConfig represents IPAM configurations for the endpoint +type EndpointIPAMConfig struct { + IPv4Address string `json:",omitempty"` + IPv6Address string `json:",omitempty"` + LinkLocalIPs []string `json:",omitempty"` +} + +// Copy makes a copy of the endpoint ipam config +func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { + cfgCopy := *cfg + cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) + cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) + return &cfgCopy +} + +// PeerInfo represents one peer of an overlay network +type PeerInfo struct { + Name string + IP string +} + +// EndpointSettings stores the network endpoint details +type EndpointSettings struct { + // Configurations + IPAMConfig *EndpointIPAMConfig + Links []string + Aliases []string + // Operational data + NetworkID string + EndpointID string + Gateway string + IPAddress string + IPPrefixLen int + IPv6Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + MacAddress string + DriverOpts map[string]string +} + +// Task carries the information about one backend task +type Task struct { + Name string + EndpointID string + EndpointIP string + Info map[string]string +} + +// ServiceInfo represents service parameters with the list of service's tasks +type ServiceInfo struct { + VIP string + Ports []string + LocalLBIndex int + Tasks []Task +} + +// Copy makes a deep copy of `EndpointSettings` +func (es *EndpointSettings) Copy() *EndpointSettings { + epCopy := *es + if es.IPAMConfig != nil { + epCopy.IPAMConfig = es.IPAMConfig.Copy() + } + + if es.Links != nil { + links := make([]string, 0, len(es.Links)) + epCopy.Links = append(links, es.Links...) + } + + if es.Aliases != nil { + aliases := make([]string, 0, len(es.Aliases)) + epCopy.Aliases = append(aliases, es.Aliases...) + } + return &epCopy +} + +// NetworkingConfig represents the container's networking configuration for each of its interfaces +// Carries the networking configs specified in the `docker run` and `docker network connect` commands +type NetworkingConfig struct { + EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network +} + +// ConfigReference specifies the source which provides a network's configuration +type ConfigReference struct { + Network string +} + +var acceptedFilters = map[string]bool{ + "dangling": true, + "driver": true, + "id": true, + "label": true, + "name": true, + "scope": true, + "type": true, +} + +// ValidateFilters validates the list of filter args with the available filters. +func ValidateFilters(filter filters.Args) error { + return filter.Validate(acceptedFilters) +} diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go new file mode 100644 index 000000000..abae48b9a --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin.go @@ -0,0 +1,203 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Plugin A plugin for the Engine API +// swagger:model Plugin +type Plugin struct { + + // config + // Required: true + Config PluginConfig `json:"Config"` + + // True if the plugin is running. False if the plugin is not running, only installed. + // Required: true + Enabled bool `json:"Enabled"` + + // Id + ID string `json:"Id,omitempty"` + + // name + // Required: true + Name string `json:"Name"` + + // plugin remote reference used to push/pull the plugin + PluginReference string `json:"PluginReference,omitempty"` + + // settings + // Required: true + Settings PluginSettings `json:"Settings"` +} + +// PluginConfig The config of a plugin. +// swagger:model PluginConfig +type PluginConfig struct { + + // args + // Required: true + Args PluginConfigArgs `json:"Args"` + + // description + // Required: true + Description string `json:"Description"` + + // Docker Version used to create the plugin + DockerVersion string `json:"DockerVersion,omitempty"` + + // documentation + // Required: true + Documentation string `json:"Documentation"` + + // entrypoint + // Required: true + Entrypoint []string `json:"Entrypoint"` + + // env + // Required: true + Env []PluginEnv `json:"Env"` + + // interface + // Required: true + Interface PluginConfigInterface `json:"Interface"` + + // ipc host + // Required: true + IpcHost bool `json:"IpcHost"` + + // linux + // Required: true + Linux PluginConfigLinux `json:"Linux"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` + + // network + // Required: true + Network PluginConfigNetwork `json:"Network"` + + // pid host + // Required: true + PidHost bool `json:"PidHost"` + + // propagated mount + // Required: true + PropagatedMount string `json:"PropagatedMount"` + + // user + User PluginConfigUser `json:"User,omitempty"` + + // work dir + // Required: true + WorkDir string `json:"WorkDir"` + + // rootfs + Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` +} + +// PluginConfigArgs plugin config args +// swagger:model PluginConfigArgs +type PluginConfigArgs struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value []string `json:"Value"` +} + +// PluginConfigInterface The interface between Docker and the plugin +// swagger:model PluginConfigInterface +type PluginConfigInterface struct { + + // Protocol to use for clients connecting to the plugin. + ProtocolScheme string `json:"ProtocolScheme,omitempty"` + + // socket + // Required: true + Socket string `json:"Socket"` + + // types + // Required: true + Types []PluginInterfaceType `json:"Types"` +} + +// PluginConfigLinux plugin config linux +// swagger:model PluginConfigLinux +type PluginConfigLinux struct { + + // allow all devices + // Required: true + AllowAllDevices bool `json:"AllowAllDevices"` + + // capabilities + // Required: true + Capabilities []string `json:"Capabilities"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` +} + +// PluginConfigNetwork plugin config network +// swagger:model PluginConfigNetwork +type PluginConfigNetwork struct { + + // type + // Required: true + Type string `json:"Type"` +} + +// PluginConfigRootfs plugin config rootfs +// swagger:model PluginConfigRootfs +type PluginConfigRootfs struct { + + // diff ids + DiffIds []string `json:"diff_ids"` + + // type + Type string `json:"type,omitempty"` +} + +// PluginConfigUser plugin config user +// swagger:model PluginConfigUser +type PluginConfigUser struct { + + // g ID + GID uint32 `json:"GID,omitempty"` + + // UID + UID uint32 `json:"UID,omitempty"` +} + +// PluginSettings Settings that can be modified by users. +// swagger:model PluginSettings +type PluginSettings struct { + + // args + // Required: true + Args []string `json:"Args"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` + + // env + // Required: true + Env []string `json:"Env"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_device.go b/vendor/github.com/docker/docker/api/types/plugin_device.go new file mode 100644 index 000000000..569901067 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_device.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginDevice plugin device +// swagger:model PluginDevice +type PluginDevice struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // path + // Required: true + Path *string `json:"Path"` + + // settable + // Required: true + Settable []string `json:"Settable"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_env.go b/vendor/github.com/docker/docker/api/types/plugin_env.go new file mode 100644 index 000000000..32962dc2e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_env.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginEnv plugin env +// swagger:model PluginEnv +type PluginEnv struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value *string `json:"Value"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go new file mode 100644 index 000000000..c82f204e8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go @@ -0,0 +1,21 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginInterfaceType plugin interface type +// swagger:model PluginInterfaceType +type PluginInterfaceType struct { + + // capability + // Required: true + Capability string `json:"Capability"` + + // prefix + // Required: true + Prefix string `json:"Prefix"` + + // version + // Required: true + Version string `json:"Version"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_mount.go b/vendor/github.com/docker/docker/api/types/plugin_mount.go new file mode 100644 index 000000000..5c031cf8b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_mount.go @@ -0,0 +1,37 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginMount plugin mount +// swagger:model PluginMount +type PluginMount struct { + + // description + // Required: true + Description string `json:"Description"` + + // destination + // Required: true + Destination string `json:"Destination"` + + // name + // Required: true + Name string `json:"Name"` + + // options + // Required: true + Options []string `json:"Options"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // source + // Required: true + Source *string `json:"Source"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/docker/docker/api/types/plugin_responses.go new file mode 100644 index 000000000..60d1fb5ad --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_responses.go @@ -0,0 +1,71 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "encoding/json" + "fmt" + "sort" +) + +// PluginsListResponse contains the response for the Engine API +type PluginsListResponse []*Plugin + +// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType +func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { + versionIndex := len(p) + prefixIndex := 0 + if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { + return fmt.Errorf("%q is not a plugin interface type", p) + } + p = p[1 : len(p)-1] +loop: + for i, b := range p { + switch b { + case '.': + prefixIndex = i + case '/': + versionIndex = i + break loop + } + } + t.Prefix = string(p[:prefixIndex]) + t.Capability = string(p[prefixIndex+1 : versionIndex]) + if versionIndex < len(p) { + t.Version = string(p[versionIndex+1:]) + } + return nil +} + +// MarshalJSON implements json.Marshaler for PluginInterfaceType +func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String implements fmt.Stringer for PluginInterfaceType +func (t PluginInterfaceType) String() string { + return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string + Description string + Value []string +} + +// PluginPrivileges is a list of PluginPrivilege +type PluginPrivileges []PluginPrivilege + +func (s PluginPrivileges) Len() int { + return len(s) +} + +func (s PluginPrivileges) Less(i, j int) bool { + return s[i].Name < s[j].Name +} + +func (s PluginPrivileges) Swap(i, j int) { + sort.Strings(s[i].Value) + sort.Strings(s[j].Value) + s[i], s[j] = s[j], s[i] +} diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/docker/docker/api/types/port.go new file mode 100644 index 000000000..d91234744 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/port.go @@ -0,0 +1,23 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Port An open port on a container +// swagger:model Port +type Port struct { + + // Host IP address that the container's port is mapped to + IP string `json:"IP,omitempty"` + + // Port on the container + // Required: true + PrivatePort uint16 `json:"PrivatePort"` + + // Port exposed on the host + PublicPort uint16 `json:"PublicPort,omitempty"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/docker/docker/api/types/registry/authenticate.go new file mode 100644 index 000000000..f0a2113e4 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/registry/authenticate.go @@ -0,0 +1,21 @@ +package registry // import "github.com/docker/docker/api/types/registry" + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// AuthenticateOKBody authenticate o k body +// swagger:model AuthenticateOKBody +type AuthenticateOKBody struct { + + // An opaque token used to authenticate a user after a successful login + // Required: true + IdentityToken string `json:"IdentityToken"` + + // The status of the authentication + // Required: true + Status string `json:"Status"` +} diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go new file mode 100644 index 000000000..62a88f5be --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -0,0 +1,120 @@ +package registry // import "github.com/docker/docker/api/types/registry" + +import ( + "encoding/json" + "net" + + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ServiceConfig stores daemon registry services configuration. +type ServiceConfig struct { + AllowNondistributableArtifactsCIDRs []*NetIPNet + AllowNondistributableArtifactsHostnames []string + InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string +} + +// NetIPNet is the net.IPNet type, which can be marshalled and +// unmarshalled to JSON +type NetIPNet net.IPNet + +// String returns the CIDR notation of ipnet +func (ipnet *NetIPNet) String() string { + return (*net.IPNet)(ipnet).String() +} + +// MarshalJSON returns the JSON representation of the IPNet +func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(ipnet).String()) +} + +// UnmarshalJSON sets the IPNet from a byte array of JSON +func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { + var ipnetStr string + if err = json.Unmarshal(b, &ipnetStr); err == nil { + var cidr *net.IPNet + if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { + *ipnet = NetIPNet(*cidr) + } + } + return +} + +// IndexInfo contains information about a registry +// +// RepositoryInfo Examples: +// +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } +type IndexInfo struct { + // Name is the name of the registry, such as "docker.io" + Name string + // Mirrors is a list of mirrors, expressed as URIs + Mirrors []string + // Secure is set to false if the registry is part of the list of + // insecure registries. Insecure registries accept HTTP and/or accept + // HTTPS with certificates from unknown CAs. + Secure bool + // Official indicates whether this is an official registry + Official bool +} + +// SearchResult describes a search result returned from a registry +type SearchResult struct { + // StarCount indicates the number of stars this repository has + StarCount int `json:"star_count"` + // IsOfficial is true if the result is from an official repository. + IsOfficial bool `json:"is_official"` + // Name is the name of the repository + Name string `json:"name"` + // IsAutomated indicates whether the result is automated + IsAutomated bool `json:"is_automated"` + // Description is a textual description of the repository + Description string `json:"description"` +} + +// SearchResults lists a collection search results returned from a registry +type SearchResults struct { + // Query contains the query string that generated the search results + Query string `json:"query"` + // NumResults indicates the number of results the query returned + NumResults int `json:"num_results"` + // Results is a slice containing the actual results for the search + Results []SearchResult `json:"results"` +} + +// DistributionInspect describes the result obtained from contacting the +// registry to retrieve image metadata +type DistributionInspect struct { + // Descriptor contains information about the manifest, including + // the content addressable digest + Descriptor v1.Descriptor + // Platforms contains the list of platforms supported by the image, + // obtained by parsing the manifest + Platforms []v1.Platform +} diff --git a/vendor/github.com/docker/docker/api/types/service_update_response.go b/vendor/github.com/docker/docker/api/types/service_update_response.go new file mode 100644 index 000000000..74ea64b1b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/service_update_response.go @@ -0,0 +1,12 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ServiceUpdateResponse service update response +// swagger:model ServiceUpdateResponse +type ServiceUpdateResponse struct { + + // Optional warning messages + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/stats.go new file mode 100644 index 000000000..20daebed1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/stats.go @@ -0,0 +1,181 @@ +// Package types is used for API stability in the types and response to the +// consumers of the API stats endpoint. +package types // import "github.com/docker/docker/api/types" + +import "time" + +// ThrottlingData stores CPU throttling stats of one running container. +// Not used on Windows. +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods"` + // Number of periods when the container hits its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time"` +} + +// CPUUsage stores All CPU stats aggregated since container inception. +type CPUUsage struct { + // Total CPU time consumed. + // Units: nanoseconds (Linux) + // Units: 100's of nanoseconds (Windows) + TotalUsage uint64 `json:"total_usage"` + + // Total CPU time consumed per core (Linux). Not used on Windows. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage,omitempty"` + + // Time spent by tasks of the cgroup in kernel mode (Linux). + // Time spent by all container processes in kernel mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + + // Time spent by tasks of the cgroup in user mode (Linux). + // Time spent by all container processes in user mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +// CPUStats aggregates and wraps all CPU related info of container +type CPUStats struct { + // CPU Usage. Linux and Windows. + CPUUsage CPUUsage `json:"cpu_usage"` + + // System Usage. Linux only. + SystemUsage uint64 `json:"system_cpu_usage,omitempty"` + + // Online CPUs. Linux only. + OnlineCPUs uint32 `json:"online_cpus,omitempty"` + + // Throttling Data. Linux only. + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +// MemoryStats aggregates all memory stats since container inception on Linux. +// Windows returns stats for commit and private working set only. +type MemoryStats struct { + // Linux Memory Stats + + // current res_counter usage for memory + Usage uint64 `json:"usage,omitempty"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage,omitempty"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats,omitempty"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt,omitempty"` + Limit uint64 `json:"limit,omitempty"` + + // Windows Memory Stats + // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx + + // committed bytes + Commit uint64 `json:"commitbytes,omitempty"` + // peak committed bytes + CommitPeak uint64 `json:"commitpeakbytes,omitempty"` + // private working set + PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` +} + +// BlkioStatEntry is one small entity to store a piece of Blkio stats +// Not used on Windows. +type BlkioStatEntry struct { + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Op string `json:"op"` + Value uint64 `json:"value"` +} + +// BlkioStats stores All IO service stats for data read and write. +// This is a Linux specific structure as the differences between expressing +// block I/O on Windows and Linux are sufficiently significant to make +// little sense attempting to morph into a combined structure. +type BlkioStats struct { + // number of bytes transferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` +} + +// StorageStats is the disk I/O stats for read/write on Windows. +type StorageStats struct { + ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` + ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` + WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` + WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` +} + +// NetworkStats aggregates the network stats of one container +type NetworkStats struct { + // Bytes received. Windows and Linux. + RxBytes uint64 `json:"rx_bytes"` + // Packets received. Windows and Linux. + RxPackets uint64 `json:"rx_packets"` + // Received errors. Not used on Windows. Note that we don't `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + RxErrors uint64 `json:"rx_errors"` + // Incoming packets dropped. Windows and Linux. + RxDropped uint64 `json:"rx_dropped"` + // Bytes sent. Windows and Linux. + TxBytes uint64 `json:"tx_bytes"` + // Packets sent. Windows and Linux. + TxPackets uint64 `json:"tx_packets"` + // Sent errors. Not used on Windows. Note that we don't `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + TxErrors uint64 `json:"tx_errors"` + // Outgoing packets dropped. Windows and Linux. + TxDropped uint64 `json:"tx_dropped"` + // Endpoint ID. Not used on Linux. + EndpointID string `json:"endpoint_id,omitempty"` + // Instance ID. Not used on Linux. + InstanceID string `json:"instance_id,omitempty"` +} + +// PidsStats contains the stats of a container's pids +type PidsStats struct { + // Current is the number of pids in the cgroup + Current uint64 `json:"current,omitempty"` + // Limit is the hard limit on the number of pids in the cgroup. + // A "Limit" of 0 means that there is no limit. + Limit uint64 `json:"limit,omitempty"` +} + +// Stats is Ultimate struct aggregating all types of stats of one container +type Stats struct { + // Common stats + Read time.Time `json:"read"` + PreRead time.Time `json:"preread"` + + // Linux specific stats, not populated on Windows. + PidsStats PidsStats `json:"pids_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` + + // Windows specific stats, not populated on Linux. + NumProcs uint32 `json:"num_procs"` + StorageStats StorageStats `json:"storage_stats,omitempty"` + + // Shared stats + CPUStats CPUStats `json:"cpu_stats,omitempty"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" + MemoryStats MemoryStats `json:"memory_stats,omitempty"` +} + +// StatsJSON is newly used Networks +type StatsJSON struct { + Stats + + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + + // Networks request version >=1.21 + Networks map[string]NetworkStats `json:"networks,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/vendor/github.com/docker/docker/api/types/strslice/strslice.go new file mode 100644 index 000000000..82921cebc --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/strslice/strslice.go @@ -0,0 +1,30 @@ +package strslice // import "github.com/docker/docker/api/types/strslice" + +import "encoding/json" + +// StrSlice represents a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice []string + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of +// strings. This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + // With no input, we preserve the existing value by returning nil and + // leaving the target alone. This allows defining default values for + // the type. + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + *e = p + return nil +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go new file mode 100644 index 000000000..5ded7dba8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/common.go @@ -0,0 +1,48 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "strconv" + "time" +) + +// Version represents the internal object version. +type Version struct { + Index uint64 `json:",omitempty"` +} + +// String implements fmt.Stringer interface. +func (v Version) String() string { + return strconv.FormatUint(v.Index, 10) +} + +// Meta is a base object inherited by most of the other once. +type Meta struct { + Version Version `json:",omitempty"` + CreatedAt time.Time `json:",omitempty"` + UpdatedAt time.Time `json:",omitempty"` +} + +// Annotations represents how to describe an object. +type Annotations struct { + Name string `json:",omitempty"` + Labels map[string]string `json:"Labels"` +} + +// Driver represents a driver (network, logging, secrets backend). +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TLSInfo represents the TLS information about what CA certificate is trusted, +// and who the issuer for a TLS certificate is +type TLSInfo struct { + // TrustRoot is the trusted CA root certificate in PEM format + TrustRoot string `json:",omitempty"` + + // CertIssuer is the raw subject bytes of the issuer + CertIssuerSubject []byte `json:",omitempty"` + + // CertIssuerPublicKey is the raw public key bytes of the issuer + CertIssuerPublicKey []byte `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/docker/docker/api/types/swarm/config.go new file mode 100644 index 000000000..16202ccce --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/config.go @@ -0,0 +1,40 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "os" + +// Config represents a config. +type Config struct { + ID string + Meta + Spec ConfigSpec +} + +// ConfigSpec represents a config specification from a config in swarm +type ConfigSpec struct { + Annotations + Data []byte `json:",omitempty"` + + // Templating controls whether and how to evaluate the config payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` +} + +// ConfigReferenceFileTarget is a file target in a config reference +type ConfigReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// ConfigReferenceRuntimeTarget is a target for a config specifying that it +// isn't mounted into the container but instead has some other purpose. +type ConfigReferenceRuntimeTarget struct{} + +// ConfigReference is a reference to a config in swarm +type ConfigReference struct { + File *ConfigReferenceFileTarget `json:",omitempty"` + Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"` + ConfigID string + ConfigName string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go new file mode 100644 index 000000000..af5e1c0bc --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -0,0 +1,80 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + "github.com/docker/go-units" +) + +// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) +// Detailed documentation is available in: +// http://man7.org/linux/man-pages/man5/resolv.conf.5.html +// `nameserver`, `search`, `options` have been supported. +// TODO: `domain` is not supported yet. +type DNSConfig struct { + // Nameservers specifies the IP addresses of the name servers + Nameservers []string `json:",omitempty"` + // Search specifies the search list for host-name lookup + Search []string `json:",omitempty"` + // Options allows certain internal resolver variables to be modified + Options []string `json:",omitempty"` +} + +// SELinuxContext contains the SELinux labels of the container. +type SELinuxContext struct { + Disable bool + + User string + Role string + Type string + Level string +} + +// CredentialSpec for managed service account (Windows only) +type CredentialSpec struct { + Config string + File string + Registry string +} + +// Privileges defines the security options for the container. +type Privileges struct { + CredentialSpec *CredentialSpec + SELinuxContext *SELinuxContext +} + +// ContainerSpec represents the spec of a container. +type ContainerSpec struct { + Image string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Command []string `json:",omitempty"` + Args []string `json:",omitempty"` + Hostname string `json:",omitempty"` + Env []string `json:",omitempty"` + Dir string `json:",omitempty"` + User string `json:",omitempty"` + Groups []string `json:",omitempty"` + Privileges *Privileges `json:",omitempty"` + Init *bool `json:",omitempty"` + StopSignal string `json:",omitempty"` + TTY bool `json:",omitempty"` + OpenStdin bool `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Mounts []mount.Mount `json:",omitempty"` + StopGracePeriod *time.Duration `json:",omitempty"` + Healthcheck *container.HealthConfig `json:",omitempty"` + // The format of extra hosts on swarmkit is specified in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + Hosts []string `json:",omitempty"` + DNSConfig *DNSConfig `json:",omitempty"` + Secrets []*SecretReference `json:",omitempty"` + Configs []*ConfigReference `json:",omitempty"` + Isolation container.Isolation `json:",omitempty"` + Sysctls map[string]string `json:",omitempty"` + CapabilityAdd []string `json:",omitempty"` + CapabilityDrop []string `json:",omitempty"` + Ulimits []*units.Ulimit `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go new file mode 100644 index 000000000..98ef3284d --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/network.go @@ -0,0 +1,121 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "github.com/docker/docker/api/types/network" +) + +// Endpoint represents an endpoint. +type Endpoint struct { + Spec EndpointSpec `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` + VirtualIPs []EndpointVirtualIP `json:",omitempty"` +} + +// EndpointSpec represents the spec of an endpoint. +type EndpointSpec struct { + Mode ResolutionMode `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` +} + +// ResolutionMode represents a resolution mode. +type ResolutionMode string + +const ( + // ResolutionModeVIP VIP + ResolutionModeVIP ResolutionMode = "vip" + // ResolutionModeDNSRR DNSRR + ResolutionModeDNSRR ResolutionMode = "dnsrr" +) + +// PortConfig represents the config of a port. +type PortConfig struct { + Name string `json:",omitempty"` + Protocol PortConfigProtocol `json:",omitempty"` + // TargetPort is the port inside the container + TargetPort uint32 `json:",omitempty"` + // PublishedPort is the port on the swarm hosts + PublishedPort uint32 `json:",omitempty"` + // PublishMode is the mode in which port is published + PublishMode PortConfigPublishMode `json:",omitempty"` +} + +// PortConfigPublishMode represents the mode in which the port is to +// be published. +type PortConfigPublishMode string + +const ( + // PortConfigPublishModeIngress is used for ports published + // for ingress load balancing using routing mesh. + PortConfigPublishModeIngress PortConfigPublishMode = "ingress" + // PortConfigPublishModeHost is used for ports published + // for direct host level access on the host where the task is running. + PortConfigPublishModeHost PortConfigPublishMode = "host" +) + +// PortConfigProtocol represents the protocol of a port. +type PortConfigProtocol string + +const ( + // TODO(stevvooe): These should be used generally, not just for PortConfig. + + // PortConfigProtocolTCP TCP + PortConfigProtocolTCP PortConfigProtocol = "tcp" + // PortConfigProtocolUDP UDP + PortConfigProtocolUDP PortConfigProtocol = "udp" + // PortConfigProtocolSCTP SCTP + PortConfigProtocolSCTP PortConfigProtocol = "sctp" +) + +// EndpointVirtualIP represents the virtual ip of a port. +type EndpointVirtualIP struct { + NetworkID string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Network represents a network. +type Network struct { + ID string + Meta + Spec NetworkSpec `json:",omitempty"` + DriverState Driver `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` +} + +// NetworkSpec represents the spec of a network. +type NetworkSpec struct { + Annotations + DriverConfiguration *Driver `json:",omitempty"` + IPv6Enabled bool `json:",omitempty"` + Internal bool `json:",omitempty"` + Attachable bool `json:",omitempty"` + Ingress bool `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` + ConfigFrom *network.ConfigReference `json:",omitempty"` + Scope string `json:",omitempty"` +} + +// NetworkAttachmentConfig represents the configuration of a network attachment. +type NetworkAttachmentConfig struct { + Target string `json:",omitempty"` + Aliases []string `json:",omitempty"` + DriverOpts map[string]string `json:",omitempty"` +} + +// NetworkAttachment represents a network attachment. +type NetworkAttachment struct { + Network Network `json:",omitempty"` + Addresses []string `json:",omitempty"` +} + +// IPAMOptions represents ipam options. +type IPAMOptions struct { + Driver Driver `json:",omitempty"` + Configs []IPAMConfig `json:",omitempty"` +} + +// IPAMConfig represents ipam configuration. +type IPAMConfig struct { + Subnet string `json:",omitempty"` + Range string `json:",omitempty"` + Gateway string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go new file mode 100644 index 000000000..bb98d5eed --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/node.go @@ -0,0 +1,139 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +// Node represents a node. +type Node struct { + ID string + Meta + // Spec defines the desired state of the node as specified by the user. + // The system will honor this and will *never* modify it. + Spec NodeSpec `json:",omitempty"` + // Description encapsulates the properties of the Node as reported by the + // agent. + Description NodeDescription `json:",omitempty"` + // Status provides the current status of the node, as seen by the manager. + Status NodeStatus `json:",omitempty"` + // ManagerStatus provides the current status of the node's manager + // component, if the node is a manager. + ManagerStatus *ManagerStatus `json:",omitempty"` +} + +// NodeSpec represents the spec of a node. +type NodeSpec struct { + Annotations + Role NodeRole `json:",omitempty"` + Availability NodeAvailability `json:",omitempty"` +} + +// NodeRole represents the role of a node. +type NodeRole string + +const ( + // NodeRoleWorker WORKER + NodeRoleWorker NodeRole = "worker" + // NodeRoleManager MANAGER + NodeRoleManager NodeRole = "manager" +) + +// NodeAvailability represents the availability of a node. +type NodeAvailability string + +const ( + // NodeAvailabilityActive ACTIVE + NodeAvailabilityActive NodeAvailability = "active" + // NodeAvailabilityPause PAUSE + NodeAvailabilityPause NodeAvailability = "pause" + // NodeAvailabilityDrain DRAIN + NodeAvailabilityDrain NodeAvailability = "drain" +) + +// NodeDescription represents the description of a node. +type NodeDescription struct { + Hostname string `json:",omitempty"` + Platform Platform `json:",omitempty"` + Resources Resources `json:",omitempty"` + Engine EngineDescription `json:",omitempty"` + TLSInfo TLSInfo `json:",omitempty"` + CSIInfo []NodeCSIInfo `json:",omitempty"` +} + +// Platform represents the platform (Arch/OS). +type Platform struct { + Architecture string `json:",omitempty"` + OS string `json:",omitempty"` +} + +// EngineDescription represents the description of an engine. +type EngineDescription struct { + EngineVersion string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Plugins []PluginDescription `json:",omitempty"` +} + +// NodeCSIInfo represents information about a CSI plugin available on the node +type NodeCSIInfo struct { + // PluginName is the name of the CSI plugin. + PluginName string `json:",omitempty"` + // NodeID is the ID of the node as reported by the CSI plugin. This is + // different from the swarm node ID. + NodeID string `json:",omitempty"` + // MaxVolumesPerNode is the maximum number of volumes that may be published + // to this node + MaxVolumesPerNode int64 `json:",omitempty"` + // AccessibleTopology indicates the location of this node in the CSI + // plugin's topology + AccessibleTopology *Topology `json:",omitempty"` +} + +// PluginDescription represents the description of an engine plugin. +type PluginDescription struct { + Type string `json:",omitempty"` + Name string `json:",omitempty"` +} + +// NodeStatus represents the status of a node. +type NodeStatus struct { + State NodeState `json:",omitempty"` + Message string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Reachability represents the reachability of a node. +type Reachability string + +const ( + // ReachabilityUnknown UNKNOWN + ReachabilityUnknown Reachability = "unknown" + // ReachabilityUnreachable UNREACHABLE + ReachabilityUnreachable Reachability = "unreachable" + // ReachabilityReachable REACHABLE + ReachabilityReachable Reachability = "reachable" +) + +// ManagerStatus represents the status of a manager. +type ManagerStatus struct { + Leader bool `json:",omitempty"` + Reachability Reachability `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// NodeState represents the state of a node. +type NodeState string + +const ( + // NodeStateUnknown UNKNOWN + NodeStateUnknown NodeState = "unknown" + // NodeStateDown DOWN + NodeStateDown NodeState = "down" + // NodeStateReady READY + NodeStateReady NodeState = "ready" + // NodeStateDisconnected DISCONNECTED + NodeStateDisconnected NodeState = "disconnected" +) + +// Topology defines the CSI topology of this node. This type is a duplicate of +// github.com/docker/docker/api/types.Topology. Because the type definition +// is so simple and to avoid complicated structure or circular imports, we just +// duplicate it here. See that type for full documentation +type Topology struct { + Segments map[string]string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime.go new file mode 100644 index 000000000..0c77403cc --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime.go @@ -0,0 +1,27 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +// RuntimeType is the type of runtime used for the TaskSpec +type RuntimeType string + +// RuntimeURL is the proto type url +type RuntimeURL string + +const ( + // RuntimeContainer is the container based runtime + RuntimeContainer RuntimeType = "container" + // RuntimePlugin is the plugin based runtime + RuntimePlugin RuntimeType = "plugin" + // RuntimeNetworkAttachment is the network attachment runtime + RuntimeNetworkAttachment RuntimeType = "attachment" + + // RuntimeURLContainer is the proto url for the container type + RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" + // RuntimeURLPlugin is the proto url for the plugin type + RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" +) + +// NetworkAttachmentSpec represents the runtime spec type for network +// attachment tasks +type NetworkAttachmentSpec struct { + ContainerID string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go new file mode 100644 index 000000000..98c2806c3 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto + +package runtime // import "github.com/docker/docker/api/types/swarm/runtime" diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go new file mode 100644 index 000000000..e45045866 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go @@ -0,0 +1,754 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: plugin.proto + +/* + Package runtime is a generated protocol buffer package. + + It is generated from these files: + plugin.proto + + It has these top-level messages: + PluginSpec + PluginPrivilege +*/ +package runtime + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +type PluginSpec struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` + Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` + Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` + Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` +} + +func (m *PluginSpec) Reset() { *m = PluginSpec{} } +func (m *PluginSpec) String() string { return proto.CompactTextString(m) } +func (*PluginSpec) ProtoMessage() {} +func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } + +func (m *PluginSpec) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginSpec) GetRemote() string { + if m != nil { + return m.Remote + } + return "" +} + +func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { + if m != nil { + return m.Privileges + } + return nil +} + +func (m *PluginSpec) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +func (m *PluginSpec) GetEnv() []string { + if m != nil { + return m.Env + } + return nil +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` +} + +func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } +func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } +func (*PluginPrivilege) ProtoMessage() {} +func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } + +func (m *PluginPrivilege) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginPrivilege) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *PluginPrivilege) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*PluginSpec)(nil), "PluginSpec") + proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") +} +func (m *PluginSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Remote) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) + i += copy(dAtA[i:], m.Remote) + } + if len(m.Privileges) > 0 { + for _, msg := range m.Privileges { + dAtA[i] = 0x1a + i++ + i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Disabled { + dAtA[i] = 0x20 + i++ + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Env) > 0 { + for _, s := range m.Env { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Description) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *PluginSpec) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Remote) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Privileges) > 0 { + for _, e := range m.Privileges { + l = e.Size() + n += 1 + l + sovPlugin(uint64(l)) + } + } + if m.Disabled { + n += 2 + } + if len(m.Env) > 0 { + for _, s := range m.Env { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } + return n +} + +func (m *PluginPrivilege) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } + return n +} + +func sovPlugin(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozPlugin(x uint64) (n int) { + return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PluginSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Remote = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Privileges = append(m.Privileges, &PluginPrivilege{}) + if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Disabled = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPlugin(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthPlugin + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPlugin(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } + +var fileDescriptorPlugin = []byte{ + // 256 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4d, 0x4b, 0xc3, 0x30, + 0x18, 0xc7, 0x89, 0xdd, 0xc6, 0xfa, 0x4c, 0x70, 0x04, 0x91, 0xe2, 0xa1, 0x94, 0x9d, 0x7a, 0x6a, + 0x45, 0x2f, 0x82, 0x37, 0x0f, 0x9e, 0x47, 0xbc, 0x09, 0x1e, 0xd2, 0xf6, 0xa1, 0x06, 0x9b, 0x17, + 0x92, 0xb4, 0xe2, 0x37, 0xf1, 0x23, 0x79, 0xf4, 0x23, 0x48, 0x3f, 0x89, 0x98, 0x75, 0x32, 0x64, + 0xa7, 0xff, 0x4b, 0xc2, 0x9f, 0x1f, 0x0f, 0x9c, 0x9a, 0xae, 0x6f, 0x85, 0x2a, 0x8c, 0xd5, 0x5e, + 0x6f, 0x3e, 0x08, 0xc0, 0x36, 0x14, 0x8f, 0x06, 0x6b, 0x4a, 0x61, 0xa6, 0xb8, 0xc4, 0x84, 0x64, + 0x24, 0x8f, 0x59, 0xf0, 0xf4, 0x02, 0x16, 0x16, 0xa5, 0xf6, 0x98, 0x9c, 0x84, 0x76, 0x4a, 0xf4, + 0x0a, 0xc0, 0x58, 0x31, 0x88, 0x0e, 0x5b, 0x74, 0x49, 0x94, 0x45, 0xf9, 0xea, 0x7a, 0x5d, 0xec, + 0xc6, 0xb6, 0xfb, 0x07, 0x76, 0xf0, 0x87, 0x5e, 0xc2, 0xb2, 0x11, 0x8e, 0x57, 0x1d, 0x36, 0xc9, + 0x2c, 0x23, 0xf9, 0x92, 0xfd, 0x65, 0xba, 0x86, 0x08, 0xd5, 0x90, 0xcc, 0xb3, 0x28, 0x8f, 0xd9, + 0xaf, 0xdd, 0x3c, 0xc3, 0xd9, 0xbf, 0xb1, 0xa3, 0x78, 0x19, 0xac, 0x1a, 0x74, 0xb5, 0x15, 0xc6, + 0x0b, 0xad, 0x26, 0xc6, 0xc3, 0x8a, 0x9e, 0xc3, 0x7c, 0xe0, 0x5d, 0x8f, 0x81, 0x31, 0x66, 0xbb, + 0x70, 0xff, 0xf0, 0x39, 0xa6, 0xe4, 0x6b, 0x4c, 0xc9, 0xf7, 0x98, 0x92, 0xa7, 0xdb, 0x56, 0xf8, + 0x97, 0xbe, 0x2a, 0x6a, 0x2d, 0xcb, 0x46, 0xd7, 0xaf, 0x68, 0xf7, 0xc2, 0x8d, 0x28, 0xfd, 0xbb, + 0x41, 0x57, 0xba, 0x37, 0x6e, 0x65, 0x69, 0x7b, 0xe5, 0x85, 0xc4, 0xbb, 0x49, 0xab, 0x45, 0x38, + 0xe4, 0xcd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0xa8, 0xd9, 0x9b, 0x58, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto new file mode 100644 index 000000000..9ef169046 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime"; + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +message PluginSpec { + string name = 1; + string remote = 2; + repeated PluginPrivilege privileges = 3; + bool disabled = 4; + repeated string env = 5; +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +message PluginPrivilege { + string name = 1; + string description = 2; + repeated string value = 3; +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go new file mode 100644 index 000000000..d5213ec98 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/secret.go @@ -0,0 +1,36 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "os" + +// Secret represents a secret. +type Secret struct { + ID string + Meta + Spec SecretSpec +} + +// SecretSpec represents a secret specification from a secret in swarm +type SecretSpec struct { + Annotations + Data []byte `json:",omitempty"` + Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store + + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` +} + +// SecretReferenceFileTarget is a file target in a secret reference +type SecretReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// SecretReference is a reference to a secret in swarm +type SecretReference struct { + File *SecretReferenceFileTarget + SecretID string + SecretName string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go new file mode 100644 index 000000000..6eb452d24 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/service.go @@ -0,0 +1,202 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "time" + +// Service represents a service. +type Service struct { + ID string + Meta + Spec ServiceSpec `json:",omitempty"` + PreviousSpec *ServiceSpec `json:",omitempty"` + Endpoint Endpoint `json:",omitempty"` + UpdateStatus *UpdateStatus `json:",omitempty"` + + // ServiceStatus is an optional, extra field indicating the number of + // desired and running tasks. It is provided primarily as a shortcut to + // calculating these values client-side, which otherwise would require + // listing all tasks for a service, an operation that could be + // computation and network expensive. + ServiceStatus *ServiceStatus `json:",omitempty"` + + // JobStatus is the status of a Service which is in one of ReplicatedJob or + // GlobalJob modes. It is absent on Replicated and Global services. + JobStatus *JobStatus `json:",omitempty"` +} + +// ServiceSpec represents the spec of a service. +type ServiceSpec struct { + Annotations + + // TaskTemplate defines how the service should construct new tasks when + // orchestrating this service. + TaskTemplate TaskSpec `json:",omitempty"` + Mode ServiceMode `json:",omitempty"` + UpdateConfig *UpdateConfig `json:",omitempty"` + RollbackConfig *UpdateConfig `json:",omitempty"` + + // Networks field in ServiceSpec is deprecated. The + // same field in TaskSpec should be used instead. + // This field will be removed in a future release. + Networks []NetworkAttachmentConfig `json:",omitempty"` + EndpointSpec *EndpointSpec `json:",omitempty"` +} + +// ServiceMode represents the mode of a service. +type ServiceMode struct { + Replicated *ReplicatedService `json:",omitempty"` + Global *GlobalService `json:",omitempty"` + ReplicatedJob *ReplicatedJob `json:",omitempty"` + GlobalJob *GlobalJob `json:",omitempty"` +} + +// UpdateState is the state of a service update. +type UpdateState string + +const ( + // UpdateStateUpdating is the updating state. + UpdateStateUpdating UpdateState = "updating" + // UpdateStatePaused is the paused state. + UpdateStatePaused UpdateState = "paused" + // UpdateStateCompleted is the completed state. + UpdateStateCompleted UpdateState = "completed" + // UpdateStateRollbackStarted is the state with a rollback in progress. + UpdateStateRollbackStarted UpdateState = "rollback_started" + // UpdateStateRollbackPaused is the state with a rollback in progress. + UpdateStateRollbackPaused UpdateState = "rollback_paused" + // UpdateStateRollbackCompleted is the state with a rollback in progress. + UpdateStateRollbackCompleted UpdateState = "rollback_completed" +) + +// UpdateStatus reports the status of a service update. +type UpdateStatus struct { + State UpdateState `json:",omitempty"` + StartedAt *time.Time `json:",omitempty"` + CompletedAt *time.Time `json:",omitempty"` + Message string `json:",omitempty"` +} + +// ReplicatedService is a kind of ServiceMode. +type ReplicatedService struct { + Replicas *uint64 `json:",omitempty"` +} + +// GlobalService is a kind of ServiceMode. +type GlobalService struct{} + +// ReplicatedJob is the a type of Service which executes a defined Tasks +// in parallel until the specified number of Tasks have succeeded. +type ReplicatedJob struct { + // MaxConcurrent indicates the maximum number of Tasks that should be + // executing simultaneously for this job at any given time. There may be + // fewer Tasks that MaxConcurrent executing simultaneously; for example, if + // there are fewer than MaxConcurrent tasks needed to reach + // TotalCompletions. + // + // If this field is empty, it will default to a max concurrency of 1. + MaxConcurrent *uint64 `json:",omitempty"` + + // TotalCompletions is the total number of Tasks desired to run to + // completion. + // + // If this field is empty, the value of MaxConcurrent will be used. + TotalCompletions *uint64 `json:",omitempty"` +} + +// GlobalJob is the type of a Service which executes a Task on every Node +// matching the Service's placement constraints. These tasks run to completion +// and then exit. +// +// This type is deliberately empty. +type GlobalJob struct{} + +const ( + // UpdateFailureActionPause PAUSE + UpdateFailureActionPause = "pause" + // UpdateFailureActionContinue CONTINUE + UpdateFailureActionContinue = "continue" + // UpdateFailureActionRollback ROLLBACK + UpdateFailureActionRollback = "rollback" + + // UpdateOrderStopFirst STOP_FIRST + UpdateOrderStopFirst = "stop-first" + // UpdateOrderStartFirst START_FIRST + UpdateOrderStartFirst = "start-first" +) + +// UpdateConfig represents the update configuration. +type UpdateConfig struct { + // Maximum number of tasks to be updated in one iteration. + // 0 means unlimited parallelism. + Parallelism uint64 + + // Amount of time between updates. + Delay time.Duration `json:",omitempty"` + + // FailureAction is the action to take when an update failures. + FailureAction string `json:",omitempty"` + + // Monitor indicates how long to monitor a task for failure after it is + // created. If the task fails by ending up in one of the states + // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, + // this counts as a failure. If it fails after Monitor, it does not + // count as a failure. If Monitor is unspecified, a default value will + // be used. + Monitor time.Duration `json:",omitempty"` + + // MaxFailureRatio is the fraction of tasks that may fail during + // an update before the failure action is invoked. Any task created by + // the current update which ends up in one of the states REJECTED, + // COMPLETED or FAILED within Monitor from its creation counts as a + // failure. The number of failures is divided by the number of tasks + // being updated, and if this fraction is greater than + // MaxFailureRatio, the failure action is invoked. + // + // If the failure action is CONTINUE, there is no effect. + // If the failure action is PAUSE, no more tasks will be updated until + // another update is started. + MaxFailureRatio float32 + + // Order indicates the order of operations when rolling out an updated + // task. Either the old task is shut down before the new task is + // started, or the new task is started before the old task is shut down. + Order string +} + +// ServiceStatus represents the number of running tasks in a service and the +// number of tasks desired to be running. +type ServiceStatus struct { + // RunningTasks is the number of tasks for the service actually in the + // Running state + RunningTasks uint64 + + // DesiredTasks is the number of tasks desired to be running by the + // service. For replicated services, this is the replica count. For global + // services, this is computed by taking the number of tasks with desired + // state of not-Shutdown. + DesiredTasks uint64 + + // CompletedTasks is the number of tasks in the state Completed, if this + // service is in ReplicatedJob or GlobalJob mode. This field must be + // cross-referenced with the service type, because the default value of 0 + // may mean that a service is not in a job mode, or it may mean that the + // job has yet to complete any tasks. + CompletedTasks uint64 +} + +// JobStatus is the status of a job-type service. +type JobStatus struct { + // JobIteration is a value increased each time a Job is executed, + // successfully or otherwise. "Executed", in this case, means the job as a + // whole has been started, not that an individual Task has been launched. A + // job is "Executed" when its ServiceSpec is updated. JobIteration can be + // used to disambiguate Tasks belonging to different executions of a job. + // + // Though JobIteration will increase with each subsequent execution, it may + // not necessarily increase by 1, and so JobIteration should not be used to + // keep track of the number of times a job has been executed. + JobIteration Version + + // LastExecution is the time that the job was last executed, as observed by + // Swarm manager. + LastExecution time.Time `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go new file mode 100644 index 000000000..3eae4b9b2 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -0,0 +1,237 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "time" +) + +// ClusterInfo represents info about the cluster for outputting in "info" +// it contains the same information as "Swarm", but without the JoinTokens +type ClusterInfo struct { + ID string + Meta + Spec Spec + TLSInfo TLSInfo + RootRotationInProgress bool + DefaultAddrPool []string + SubnetSize uint32 + DataPathPort uint32 +} + +// Swarm represents a swarm. +type Swarm struct { + ClusterInfo + JoinTokens JoinTokens +} + +// JoinTokens contains the tokens workers and managers need to join the swarm. +type JoinTokens struct { + // Worker is the join token workers may use to join the swarm. + Worker string + // Manager is the join token managers may use to join the swarm. + Manager string +} + +// Spec represents the spec of a swarm. +type Spec struct { + Annotations + + Orchestration OrchestrationConfig `json:",omitempty"` + Raft RaftConfig `json:",omitempty"` + Dispatcher DispatcherConfig `json:",omitempty"` + CAConfig CAConfig `json:",omitempty"` + TaskDefaults TaskDefaults `json:",omitempty"` + EncryptionConfig EncryptionConfig `json:",omitempty"` +} + +// OrchestrationConfig represents orchestration configuration. +type OrchestrationConfig struct { + // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or + // node. If negative, never remove completed or failed tasks. + TaskHistoryRetentionLimit *int64 `json:",omitempty"` +} + +// TaskDefaults parameterizes cluster-level task creation with default values. +type TaskDefaults struct { + // LogDriver selects the log driver to use for tasks created in the + // orchestrator if unspecified by a service. + // + // Updating this value will only have an affect on new tasks. Old tasks + // will continue use their previously configured log driver until + // recreated. + LogDriver *Driver `json:",omitempty"` +} + +// EncryptionConfig controls at-rest encryption of data and keys. +type EncryptionConfig struct { + // AutoLockManagers specifies whether or not managers TLS keys and raft data + // should be encrypted at rest in such a way that they must be unlocked + // before the manager node starts up again. + AutoLockManagers bool +} + +// RaftConfig represents raft configuration. +type RaftConfig struct { + // SnapshotInterval is the number of log entries between snapshots. + SnapshotInterval uint64 `json:",omitempty"` + + // KeepOldSnapshots is the number of snapshots to keep beyond the + // current snapshot. + KeepOldSnapshots *uint64 `json:",omitempty"` + + // LogEntriesForSlowFollowers is the number of log entries to keep + // around to sync up slow followers after a snapshot is created. + LogEntriesForSlowFollowers uint64 `json:",omitempty"` + + // ElectionTick is the number of ticks that a follower will wait for a message + // from the leader before becoming a candidate and starting an election. + // ElectionTick must be greater than HeartbeatTick. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + ElectionTick int + + // HeartbeatTick is the number of ticks between heartbeats. Every + // HeartbeatTick ticks, the leader will send a heartbeat to the + // followers. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + HeartbeatTick int +} + +// DispatcherConfig represents dispatcher configuration. +type DispatcherConfig struct { + // HeartbeatPeriod defines how often agent should send heartbeats to + // dispatcher. + HeartbeatPeriod time.Duration `json:",omitempty"` +} + +// CAConfig represents CA configuration. +type CAConfig struct { + // NodeCertExpiry is the duration certificates should be issued for + NodeCertExpiry time.Duration `json:",omitempty"` + + // ExternalCAs is a list of CAs to which a manager node will make + // certificate signing requests for node certificates. + ExternalCAs []*ExternalCA `json:",omitempty"` + + // SigningCACert and SigningCAKey specify the desired signing root CA and + // root CA key for the swarm. When inspecting the cluster, the key will + // be redacted. + SigningCACert string `json:",omitempty"` + SigningCAKey string `json:",omitempty"` + + // If this value changes, and there is no specified signing cert and key, + // then the swarm is forced to generate a new root certificate ane key. + ForceRotate uint64 `json:",omitempty"` +} + +// ExternalCAProtocol represents type of external CA. +type ExternalCAProtocol string + +// ExternalCAProtocolCFSSL CFSSL +const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" + +// ExternalCA defines external CA to be used by the cluster. +type ExternalCA struct { + // Protocol is the protocol used by this external CA. + Protocol ExternalCAProtocol + + // URL is the URL where the external CA can be reached. + URL string + + // Options is a set of additional key/value pairs whose interpretation + // depends on the specified CA type. + Options map[string]string `json:",omitempty"` + + // CACert specifies which root CA is used by this external CA. This certificate must + // be in PEM format. + CACert string +} + +// InitRequest is the request used to init a swarm. +type InitRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + DataPathPort uint32 + ForceNewCluster bool + Spec Spec + AutoLockManagers bool + Availability NodeAvailability + DefaultAddrPool []string + SubnetSize uint32 +} + +// JoinRequest is the request used to join a swarm. +type JoinRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + RemoteAddrs []string + JoinToken string // accept by secret + Availability NodeAvailability +} + +// UnlockRequest is the request used to unlock a swarm. +type UnlockRequest struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// LocalNodeState represents the state of the local node. +type LocalNodeState string + +const ( + // LocalNodeStateInactive INACTIVE + LocalNodeStateInactive LocalNodeState = "inactive" + // LocalNodeStatePending PENDING + LocalNodeStatePending LocalNodeState = "pending" + // LocalNodeStateActive ACTIVE + LocalNodeStateActive LocalNodeState = "active" + // LocalNodeStateError ERROR + LocalNodeStateError LocalNodeState = "error" + // LocalNodeStateLocked LOCKED + LocalNodeStateLocked LocalNodeState = "locked" +) + +// Info represents generic information about swarm. +type Info struct { + NodeID string + NodeAddr string + + LocalNodeState LocalNodeState + ControlAvailable bool + Error string + + RemoteManagers []Peer + Nodes int `json:",omitempty"` + Managers int `json:",omitempty"` + + Cluster *ClusterInfo `json:",omitempty"` + + Warnings []string `json:",omitempty"` +} + +// Status provides information about the current swarm status and role, +// obtained from the "Swarm" header in the API response. +type Status struct { + // NodeState represents the state of the node. + NodeState LocalNodeState + + // ControlAvailable indicates if the node is a swarm manager. + ControlAvailable bool +} + +// Peer represents a peer. +type Peer struct { + NodeID string + Addr string +} + +// UpdateFlags contains flags for SwarmUpdate. +type UpdateFlags struct { + RotateWorkerToken bool + RotateManagerToken bool + RotateManagerUnlockKey bool +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go new file mode 100644 index 000000000..ad3eeca0b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -0,0 +1,225 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "time" + + "github.com/docker/docker/api/types/swarm/runtime" +) + +// TaskState represents the state of a task. +type TaskState string + +const ( + // TaskStateNew NEW + TaskStateNew TaskState = "new" + // TaskStateAllocated ALLOCATED + TaskStateAllocated TaskState = "allocated" + // TaskStatePending PENDING + TaskStatePending TaskState = "pending" + // TaskStateAssigned ASSIGNED + TaskStateAssigned TaskState = "assigned" + // TaskStateAccepted ACCEPTED + TaskStateAccepted TaskState = "accepted" + // TaskStatePreparing PREPARING + TaskStatePreparing TaskState = "preparing" + // TaskStateReady READY + TaskStateReady TaskState = "ready" + // TaskStateStarting STARTING + TaskStateStarting TaskState = "starting" + // TaskStateRunning RUNNING + TaskStateRunning TaskState = "running" + // TaskStateComplete COMPLETE + TaskStateComplete TaskState = "complete" + // TaskStateShutdown SHUTDOWN + TaskStateShutdown TaskState = "shutdown" + // TaskStateFailed FAILED + TaskStateFailed TaskState = "failed" + // TaskStateRejected REJECTED + TaskStateRejected TaskState = "rejected" + // TaskStateRemove REMOVE + TaskStateRemove TaskState = "remove" + // TaskStateOrphaned ORPHANED + TaskStateOrphaned TaskState = "orphaned" +) + +// Task represents a task. +type Task struct { + ID string + Meta + Annotations + + Spec TaskSpec `json:",omitempty"` + ServiceID string `json:",omitempty"` + Slot int `json:",omitempty"` + NodeID string `json:",omitempty"` + Status TaskStatus `json:",omitempty"` + DesiredState TaskState `json:",omitempty"` + NetworksAttachments []NetworkAttachment `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` + + // JobIteration is the JobIteration of the Service that this Task was + // spawned from, if the Service is a ReplicatedJob or GlobalJob. This is + // used to determine which Tasks belong to which run of the job. This field + // is absent if the Service mode is Replicated or Global. + JobIteration *Version `json:",omitempty"` + + // Volumes is the list of VolumeAttachments for this task. It specifies + // which particular volumes are to be used by this particular task, and + // fulfilling what mounts in the spec. + Volumes []VolumeAttachment +} + +// TaskSpec represents the spec of a task. +type TaskSpec struct { + // ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive. + // PluginSpec is only used when the `Runtime` field is set to `plugin` + // NetworkAttachmentSpec is used if the `Runtime` field is set to + // `attachment`. + ContainerSpec *ContainerSpec `json:",omitempty"` + PluginSpec *runtime.PluginSpec `json:",omitempty"` + NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` + + Resources *ResourceRequirements `json:",omitempty"` + RestartPolicy *RestartPolicy `json:",omitempty"` + Placement *Placement `json:",omitempty"` + Networks []NetworkAttachmentConfig `json:",omitempty"` + + // LogDriver specifies the LogDriver to use for tasks created from this + // spec. If not present, the one on cluster default on swarm.Spec will be + // used, finally falling back to the engine default if not specified. + LogDriver *Driver `json:",omitempty"` + + // ForceUpdate is a counter that triggers an update even if no relevant + // parameters have been changed. + ForceUpdate uint64 + + Runtime RuntimeType `json:",omitempty"` +} + +// Resources represents resources (CPU/Memory) which can be advertised by a +// node and requested to be reserved for a task. +type Resources struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` +} + +// Limit describes limits on resources which can be requested by a task. +type Limit struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` + Pids int64 `json:",omitempty"` +} + +// GenericResource represents a "user defined" resource which can +// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) +type GenericResource struct { + NamedResourceSpec *NamedGenericResource `json:",omitempty"` + DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"` +} + +// NamedGenericResource represents a "user defined" resource which is defined +// as a string. +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) +type NamedGenericResource struct { + Kind string `json:",omitempty"` + Value string `json:",omitempty"` +} + +// DiscreteGenericResource represents a "user defined" resource which is defined +// as an integer +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to count the resource (SSD=5, HDD=3, ...) +type DiscreteGenericResource struct { + Kind string `json:",omitempty"` + Value int64 `json:",omitempty"` +} + +// ResourceRequirements represents resources requirements. +type ResourceRequirements struct { + Limits *Limit `json:",omitempty"` + Reservations *Resources `json:",omitempty"` +} + +// Placement represents orchestration parameters. +type Placement struct { + Constraints []string `json:",omitempty"` + Preferences []PlacementPreference `json:",omitempty"` + MaxReplicas uint64 `json:",omitempty"` + + // Platforms stores all the platforms that the image can run on. + // This field is used in the platform filter for scheduling. If empty, + // then the platform filter is off, meaning there are no scheduling restrictions. + Platforms []Platform `json:",omitempty"` +} + +// PlacementPreference provides a way to make the scheduler aware of factors +// such as topology. +type PlacementPreference struct { + Spread *SpreadOver +} + +// SpreadOver is a scheduling preference that instructs the scheduler to spread +// tasks evenly over groups of nodes identified by labels. +type SpreadOver struct { + // label descriptor, such as engine.labels.az + SpreadDescriptor string +} + +// RestartPolicy represents the restart policy. +type RestartPolicy struct { + Condition RestartPolicyCondition `json:",omitempty"` + Delay *time.Duration `json:",omitempty"` + MaxAttempts *uint64 `json:",omitempty"` + Window *time.Duration `json:",omitempty"` +} + +// RestartPolicyCondition represents when to restart. +type RestartPolicyCondition string + +const ( + // RestartPolicyConditionNone NONE + RestartPolicyConditionNone RestartPolicyCondition = "none" + // RestartPolicyConditionOnFailure ON_FAILURE + RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" + // RestartPolicyConditionAny ANY + RestartPolicyConditionAny RestartPolicyCondition = "any" +) + +// TaskStatus represents the status of a task. +type TaskStatus struct { + Timestamp time.Time `json:",omitempty"` + State TaskState `json:",omitempty"` + Message string `json:",omitempty"` + Err string `json:",omitempty"` + ContainerStatus *ContainerStatus `json:",omitempty"` + PortStatus PortStatus `json:",omitempty"` +} + +// ContainerStatus represents the status of a container. +type ContainerStatus struct { + ContainerID string + PID int + ExitCode int +} + +// PortStatus represents the port status of a task's host ports whose +// service has published host ports +type PortStatus struct { + Ports []PortConfig `json:",omitempty"` +} + +// VolumeAttachment contains the associating a Volume to a Task. +type VolumeAttachment struct { + // ID is the Swarmkit ID of the Volume. This is not the CSI VolumeId. + ID string `json:",omitempty"` + + // Source, together with Target, indicates the Mount, as specified in the + // ContainerSpec, that this volume fulfills. + Source string `json:",omitempty"` + + // Target, together with Source, indicates the Mount, as specified + // in the ContainerSpec, that this volume fulfills. + Target string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go new file mode 100644 index 000000000..2a74b7a59 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/time/timestamp.go @@ -0,0 +1,131 @@ +package time // import "github.com/docker/docker/api/types/time" + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// These are additional predefined layouts for use in Time.Format and Time.Parse +// with --since and --until parameters for `docker logs` and `docker events` +const ( + rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone + rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone + dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 + dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 +) + +// GetTimestamp tries to parse given string as golang duration, +// then RFC3339 time and finally as a Unix timestamp. If +// any of these were successful, it returns a Unix timestamp +// as string otherwise returns the given value back. +// In case of duration input, the returned timestamp is computed +// as the given reference time minus the amount of the duration. +func GetTimestamp(value string, reference time.Time) (string, error) { + if d, err := time.ParseDuration(value); value != "0" && err == nil { + return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil + } + + var format string + // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation + parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) + + if strings.Contains(value, ".") { + if parseInLocation { + format = rFC3339NanoLocal + } else { + format = time.RFC3339Nano + } + } else if strings.Contains(value, "T") { + // we want the number of colons in the T portion of the timestamp + tcolons := strings.Count(value, ":") + // if parseInLocation is off and we have a +/- zone offset (not Z) then + // there will be an extra colon in the input for the tz offset subtract that + // colon from the tcolons count + if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { + tcolons-- + } + if parseInLocation { + switch tcolons { + case 0: + format = "2006-01-02T15" + case 1: + format = "2006-01-02T15:04" + default: + format = rFC3339Local + } + } else { + switch tcolons { + case 0: + format = "2006-01-02T15Z07:00" + case 1: + format = "2006-01-02T15:04Z07:00" + default: + format = time.RFC3339 + } + } + } else if parseInLocation { + format = dateLocal + } else { + format = dateWithZone + } + + var t time.Time + var err error + + if parseInLocation { + t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) + } else { + t, err = time.Parse(format, value) + } + + if err != nil { + // if there is a `-` then it's an RFC3339 like timestamp + if strings.Contains(value, "-") { + return "", err // was probably an RFC3339 like timestamp but the parser failed with an error + } + if _, _, err := parseTimestamp(value); err != nil { + return "", fmt.Errorf("failed to parse value as time or duration: %q", value) + } + return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) + } + + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil +} + +// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the +// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) +// if the incoming nanosecond portion is longer or shorter than 9 digits it is +// converted to nanoseconds. The expectation is that the seconds and +// seconds will be used to create a time variable. For example: +// +// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) +// if err == nil since := time.Unix(seconds, nanoseconds) +// +// returns seconds as def(aultSeconds) if value == "" +func ParseTimestamps(value string, def int64) (int64, int64, error) { + if value == "" { + return def, 0, nil + } + return parseTimestamp(value) +} + +func parseTimestamp(value string) (int64, int64, error) { + sa := strings.SplitN(value, ".", 2) + s, err := strconv.ParseInt(sa[0], 10, 64) + if err != nil { + return s, 0, err + } + if len(sa) != 2 { + return s, 0, nil + } + n, err := strconv.ParseInt(sa[1], 10, 64) + if err != nil { + return s, n, err + } + // should already be in nanoseconds but just in case convert n to nanoseconds + n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) + return s, n, nil +} diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go new file mode 100644 index 000000000..036405299 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -0,0 +1,809 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "errors" + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/volume" + "github.com/docker/go-connections/nat" +) + +const ( + // MediaTypeRawStream is vendor specific MIME-Type set for raw TTY streams + MediaTypeRawStream = "application/vnd.docker.raw-stream" + + // MediaTypeMultiplexedStream is vendor specific MIME-Type set for stdin/stdout/stderr multiplexed streams + MediaTypeMultiplexedStream = "application/vnd.docker.multiplexed-stream" +) + +// RootFS returns Image's RootFS description including the layer IDs. +type RootFS struct { + Type string `json:",omitempty"` + Layers []string `json:",omitempty"` +} + +// ImageInspect contains response of Engine API: +// GET "/images/{name:.*}/json" +type ImageInspect struct { + // ID is the content-addressable ID of an image. + // + // This identifier is a content-addressable digest calculated from the + // image's configuration (which includes the digests of layers used by + // the image). + // + // Note that this digest differs from the `RepoDigests` below, which + // holds digests of image manifests that reference the image. + ID string `json:"Id"` + + // RepoTags is a list of image names/tags in the local image cache that + // reference this image. + // + // Multiple image tags can refer to the same image, and this list may be + // empty if no tags reference the image, in which case the image is + // "untagged", in which case it can still be referenced by its ID. + RepoTags []string + + // RepoDigests is a list of content-addressable digests of locally available + // image manifests that the image is referenced from. Multiple manifests can + // refer to the same image. + // + // These digests are usually only available if the image was either pulled + // from a registry, or if the image was pushed to a registry, which is when + // the manifest is generated and its digest calculated. + RepoDigests []string + + // Parent is the ID of the parent image. + // + // Depending on how the image was created, this field may be empty and + // is only set for images that were built/created locally. This field + // is empty if the image was pulled from an image registry. + Parent string + + // Comment is an optional message that can be set when committing or + // importing the image. + Comment string + + // Created is the date and time at which the image was created, formatted in + // RFC 3339 nano-seconds (time.RFC3339Nano). + Created string + + // Container is the ID of the container that was used to create the image. + // + // Depending on how the image was created, this field may be empty. + Container string + + // ContainerConfig is an optional field containing the configuration of the + // container that was last committed when creating the image. + // + // Previous versions of Docker builder used this field to store build cache, + // and it is not in active use anymore. + ContainerConfig *container.Config + + // DockerVersion is the version of Docker that was used to build the image. + // + // Depending on how the image was created, this field may be empty. + DockerVersion string + + // Author is the name of the author that was specified when committing the + // image, or as specified through MAINTAINER (deprecated) in the Dockerfile. + Author string + Config *container.Config + + // Architecture is the hardware CPU architecture that the image runs on. + Architecture string + + // Variant is the CPU architecture variant (presently ARM-only). + Variant string `json:",omitempty"` + + // OS is the Operating System the image is built to run on. + Os string + + // OsVersion is the version of the Operating System the image is built to + // run on (especially for Windows). + OsVersion string `json:",omitempty"` + + // Size is the total size of the image including all layers it is composed of. + Size int64 + + // VirtualSize is the total size of the image including all layers it is + // composed of. + // + // In versions of Docker before v1.10, this field was calculated from + // the image itself and all of its parent images. Docker v1.10 and up + // store images self-contained, and no longer use a parent-chain, making + // this field an equivalent of the Size field. + // + // This field is kept for backward compatibility, but may be removed in + // a future version of the API. + VirtualSize int64 // TODO(thaJeztah): deprecate this field + + // GraphDriver holds information about the storage driver used to store the + // container's and image's filesystem. + GraphDriver GraphDriverData + + // RootFS contains information about the image's RootFS, including the + // layer IDs. + RootFS RootFS + + // Metadata of the image in the local cache. + // + // This information is local to the daemon, and not part of the image itself. + Metadata ImageMetadata +} + +// ImageMetadata contains engine-local data about the image +type ImageMetadata struct { + // LastTagTime is the date and time at which the image was last tagged. + LastTagTime time.Time `json:",omitempty"` +} + +// Container contains response of Engine API: +// GET "/containers/json" +type Container struct { + ID string `json:"Id"` + Names []string + Image string + ImageID string + Command string + Created int64 + Ports []Port + SizeRw int64 `json:",omitempty"` + SizeRootFs int64 `json:",omitempty"` + Labels map[string]string + State string + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + } + NetworkSettings *SummaryNetworkSettings + Mounts []MountPoint +} + +// CopyConfig contains request body of Engine API: +// POST "/containers/"+containerID+"/copy" +type CopyConfig struct { + Resource string +} + +// ContainerPathStat is used to encode the header from +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. +type ContainerPathStat struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` + LinkTarget string `json:"linkTarget"` +} + +// ContainerStats contains response of Engine API: +// GET "/stats" +type ContainerStats struct { + Body io.ReadCloser `json:"body"` + OSType string `json:"ostype"` +} + +// Ping contains response of Engine API: +// GET "/_ping" +type Ping struct { + APIVersion string + OSType string + Experimental bool + BuilderVersion BuilderVersion + + // SwarmStatus provides information about the current swarm status of the + // engine, obtained from the "Swarm" header in the API response. + // + // It can be a nil struct if the API version does not provide this header + // in the ping response, or if an error occurred, in which case the client + // should use other ways to get the current swarm status, such as the /swarm + // endpoint. + SwarmStatus *swarm.Status +} + +// ComponentVersion describes the version information for a specific component. +type ComponentVersion struct { + Name string + Version string + Details map[string]string `json:",omitempty"` +} + +// Version contains response of Engine API: +// GET "/version" +type Version struct { + Platform struct{ Name string } `json:",omitempty"` + Components []ComponentVersion `json:",omitempty"` + + // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility + + Version string + APIVersion string `json:"ApiVersion"` + MinAPIVersion string `json:"MinAPIVersion,omitempty"` + GitCommit string + GoVersion string + Os string + Arch string + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} + +// Commit holds the Git-commit (SHA1) that a binary was built from, as reported +// in the version-string of external tools, such as containerd, or runC. +type Commit struct { + ID string // ID is the actual commit ID of external tool. + Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. +} + +// Info contains response of Engine API: +// GET "/info" +type Info struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + KernelMemory bool `json:",omitempty"` // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes + KernelMemoryTCP bool `json:",omitempty"` // KernelMemoryTCP is not supported on cgroups v2. + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + PidsLimit bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + LoggingDriver string + CgroupDriver string + CgroupVersion string `json:",omitempty"` + NEventsListener int + KernelVersion string + OperatingSystem string + OSVersion string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *registry.ServiceConfig + NCPU int + MemTotal int64 + GenericResources []swarm.GenericResource + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ExperimentalBuild bool + ServerVersion string + ClusterStore string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated + ClusterAdvertise string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated + Runtimes map[string]Runtime + DefaultRuntime string + Swarm swarm.Info + // LiveRestoreEnabled determines whether containers should be kept + // running when the daemon is shutdown or upon daemon start if + // running containers are detected + LiveRestoreEnabled bool + Isolation container.Isolation + InitBinary string + ContainerdCommit Commit + RuncCommit Commit + InitCommit Commit + SecurityOptions []string + ProductLicense string `json:",omitempty"` + DefaultAddressPools []NetworkAddressPool `json:",omitempty"` + + // Warnings contains a slice of warnings that occurred while collecting + // system information. These warnings are intended to be informational + // messages for the user, and are not intended to be parsed / used for + // other purposes, as they do not have a fixed format. + Warnings []string +} + +// KeyValue holds a key/value pair +type KeyValue struct { + Key, Value string +} + +// NetworkAddressPool is a temp struct used by Info struct +type NetworkAddressPool struct { + Base string + Size int +} + +// SecurityOpt contains the name and options of a security option +type SecurityOpt struct { + Name string + Options []KeyValue +} + +// DecodeSecurityOptions decodes a security options string slice to a type safe +// SecurityOpt +func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { + so := []SecurityOpt{} + for _, opt := range opts { + // support output from a < 1.13 docker daemon + if !strings.Contains(opt, "=") { + so = append(so, SecurityOpt{Name: opt}) + continue + } + secopt := SecurityOpt{} + split := strings.Split(opt, ",") + for _, s := range split { + kv := strings.SplitN(s, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("invalid security option %q", s) + } + if kv[0] == "" || kv[1] == "" { + return nil, errors.New("invalid empty security option") + } + if kv[0] == "name" { + secopt.Name = kv[1] + continue + } + secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]}) + } + so = append(so, secopt) + } + return so, nil +} + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by Info struct +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string + // List of Log plugins registered + Log []string +} + +// ExecStartCheck is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartCheck struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool + // Terminal size [height, width], unused if Tty == false + ConsoleSize *[2]uint `json:",omitempty"` +} + +// HealthcheckResult stores information about a single run of a healthcheck probe +type HealthcheckResult struct { + Start time.Time // Start is the time this check started + End time.Time // End is the time this check ended + ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe + Output string // Output from last check +} + +// Health states +const ( + NoHealthcheck = "none" // Indicates there is no healthcheck + Starting = "starting" // Starting indicates that the container is not yet ready + Healthy = "healthy" // Healthy indicates that the container is running correctly + Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem +) + +// Health stores information about the container's healthcheck results +type Health struct { + Status string // Status is one of Starting, Healthy or Unhealthy + FailingStreak int // FailingStreak is the number of consecutive failures + Log []*HealthcheckResult // Log contains the last few results (oldest first) +} + +// ContainerState stores container's running state +// it's part of ContainerJSONBase and will return by "inspect" command +type ContainerState struct { + Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt string + FinishedAt string + Health *Health `json:",omitempty"` +} + +// ContainerNode stores information about the node that a container +// is running on. It's only used by the Docker Swarm standalone API +type ContainerNode struct { + ID string + IPAddress string `json:"IP"` + Addr string + Name string + Cpus int + Memory int64 + Labels map[string]string +} + +// ContainerJSONBase contains response of Engine API: +// GET "/containers/{name:.*}/json" +type ContainerJSONBase struct { + ID string `json:"Id"` + Created string + Path string + Args []string + State *ContainerState + Image string + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Node *ContainerNode `json:",omitempty"` // Node is only propagated by Docker Swarm standalone API + Name string + RestartCount int + Driver string + Platform string + MountLabel string + ProcessLabel string + AppArmorProfile string + ExecIDs []string + HostConfig *container.HostConfig + GraphDriver GraphDriverData + SizeRw *int64 `json:",omitempty"` + SizeRootFs *int64 `json:",omitempty"` +} + +// ContainerJSON is newly used struct along with MountPoint +type ContainerJSON struct { + *ContainerJSONBase + Mounts []MountPoint + Config *container.Config + NetworkSettings *NetworkSettings +} + +// NetworkSettings exposes the network settings in the api +type NetworkSettings struct { + NetworkSettingsBase + DefaultNetworkSettings + Networks map[string]*network.EndpointSettings +} + +// SummaryNetworkSettings provides a summary of container's networks +// in /containers/json +type SummaryNetworkSettings struct { + Networks map[string]*network.EndpointSettings +} + +// NetworkSettingsBase holds basic information about networks +type NetworkSettingsBase struct { + Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) + SandboxID string // SandboxID uniquely represents a container's network stack + HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface + LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix + LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address + Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port + SandboxKey string // SandboxKey identifies the sandbox + SecondaryIPAddresses []network.Address + SecondaryIPv6Addresses []network.Address +} + +// DefaultNetworkSettings holds network information +// during the 2 release deprecation period. +// It will be removed in Docker 1.11. +type DefaultNetworkSettings struct { + EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox + Gateway string // Gateway holds the gateway address for the network + GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address + GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address + IPAddress string // IPAddress holds the IPv4 address for the network + IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address + IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 + MacAddress string // MacAddress holds the MAC address for the network +} + +// MountPoint represents a mount point configuration inside the container. +// This is used for reporting the mountpoints in use by a container. +type MountPoint struct { + // Type is the type of mount, see `Type` definitions in + // github.com/docker/docker/api/types/mount.Type + Type mount.Type `json:",omitempty"` + + // Name is the name reference to the underlying data defined by `Source` + // e.g., the volume name. + Name string `json:",omitempty"` + + // Source is the source location of the mount. + // + // For volumes, this contains the storage location of the volume (within + // `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + // the source (host) part of the bind-mount. For `tmpfs` mount points, this + // field is empty. + Source string + + // Destination is the path relative to the container root (`/`) where the + // Source is mounted inside the container. + Destination string + + // Driver is the volume driver used to create the volume (if it is a volume). + Driver string `json:",omitempty"` + + // Mode is a comma separated list of options supplied by the user when + // creating the bind/volume mount. + // + // The default is platform-specific (`"z"` on Linux, empty on Windows). + Mode string + + // RW indicates whether the mount is mounted writable (read-write). + RW bool + + // Propagation describes how mounts are propagated from the host into the + // mount point, and vice-versa. Refer to the Linux kernel documentation + // for details: + // https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt + // + // This field is not used on Windows. + Propagation mount.Propagation +} + +// NetworkResource is the body of the "get network" http response message +type NetworkResource struct { + Name string // Name is the requested name of the network + ID string `json:"Id"` // ID uniquely identifies a network on a single machine + Created time.Time // Created is the time the network created + Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) + Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) + EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 + IPAM network.IPAM // IPAM is the network's IP Address Management + Internal bool // Internal represents if the network is used internal only + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. + ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. + ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. + Containers map[string]EndpointResource // Containers contains endpoints belonging to the network + Options map[string]string // Options holds the network specific options to use for when creating the network + Labels map[string]string // Labels holds metadata specific to the network being created + Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network + Services map[string]network.ServiceInfo `json:",omitempty"` +} + +// EndpointResource contains network resources allocated and used for a container in a network +type EndpointResource struct { + Name string + EndpointID string + MacAddress string + IPv4Address string + IPv6Address string +} + +// NetworkCreate is the expected body of the "create network" http request message +type NetworkCreate struct { + // Check for networks with duplicate names. + // Network is primarily keyed based on a random ID and not on the name. + // Network name is strictly a user-friendly alias to the network + // which is uniquely identified using ID. + // And there is no guaranteed way to check for duplicates. + // Option CheckDuplicate is there to provide a best effort checking of any networks + // which has the same name but it is not guaranteed to catch all name collisions. + CheckDuplicate bool + Driver string + Scope string + EnableIPv6 bool + IPAM *network.IPAM + Internal bool + Attachable bool + Ingress bool + ConfigOnly bool + ConfigFrom *network.ConfigReference + Options map[string]string + Labels map[string]string +} + +// NetworkCreateRequest is the request message sent to the server for network create call. +type NetworkCreateRequest struct { + NetworkCreate + Name string +} + +// NetworkCreateResponse is the response message sent by the server for network create call +type NetworkCreateResponse struct { + ID string `json:"Id"` + Warning string +} + +// NetworkConnect represents the data to be used to connect a container to the network +type NetworkConnect struct { + Container string + EndpointConfig *network.EndpointSettings `json:",omitempty"` +} + +// NetworkDisconnect represents the data to be used to disconnect a container from the network +type NetworkDisconnect struct { + Container string + Force bool +} + +// NetworkInspectOptions holds parameters to inspect network +type NetworkInspectOptions struct { + Scope string + Verbose bool +} + +// Checkpoint represents the details of a checkpoint +type Checkpoint struct { + Name string // Name is the name of the checkpoint +} + +// Runtime describes an OCI runtime +type Runtime struct { + Path string `json:"path"` + Args []string `json:"runtimeArgs,omitempty"` + + // This is exposed here only for internal use + // It is not currently supported to specify custom shim configs + Shim *ShimConfig `json:"-"` +} + +// ShimConfig is used by runtime to configure containerd shims +type ShimConfig struct { + Binary string + Opts interface{} +} + +// DiskUsageObject represents an object type used for disk usage query filtering. +type DiskUsageObject string + +const ( + // ContainerObject represents a container DiskUsageObject. + ContainerObject DiskUsageObject = "container" + // ImageObject represents an image DiskUsageObject. + ImageObject DiskUsageObject = "image" + // VolumeObject represents a volume DiskUsageObject. + VolumeObject DiskUsageObject = "volume" + // BuildCacheObject represents a build-cache DiskUsageObject. + BuildCacheObject DiskUsageObject = "build-cache" +) + +// DiskUsageOptions holds parameters for system disk usage query. +type DiskUsageOptions struct { + // Types specifies what object types to include in the response. If empty, + // all object types are returned. + Types []DiskUsageObject +} + +// DiskUsage contains response of Engine API: +// GET "/system/df" +type DiskUsage struct { + LayersSize int64 + Images []*ImageSummary + Containers []*Container + Volumes []*volume.Volume + BuildCache []*BuildCache + BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40. +} + +// ContainersPruneReport contains the response for Engine API: +// POST "/containers/prune" +type ContainersPruneReport struct { + ContainersDeleted []string + SpaceReclaimed uint64 +} + +// VolumesPruneReport contains the response for Engine API: +// POST "/volumes/prune" +type VolumesPruneReport struct { + VolumesDeleted []string + SpaceReclaimed uint64 +} + +// ImagesPruneReport contains the response for Engine API: +// POST "/images/prune" +type ImagesPruneReport struct { + ImagesDeleted []ImageDeleteResponseItem + SpaceReclaimed uint64 +} + +// BuildCachePruneReport contains the response for Engine API: +// POST "/build/prune" +type BuildCachePruneReport struct { + CachesDeleted []string + SpaceReclaimed uint64 +} + +// NetworksPruneReport contains the response for Engine API: +// POST "/networks/prune" +type NetworksPruneReport struct { + NetworksDeleted []string +} + +// SecretCreateResponse contains the information returned to a client +// on the creation of a new secret. +type SecretCreateResponse struct { + // ID is the id of the created secret. + ID string +} + +// SecretListOptions holds parameters to list secrets +type SecretListOptions struct { + Filters filters.Args +} + +// ConfigCreateResponse contains the information returned to a client +// on the creation of a new config. +type ConfigCreateResponse struct { + // ID is the id of the created config. + ID string +} + +// ConfigListOptions holds parameters to list configs +type ConfigListOptions struct { + Filters filters.Args +} + +// PushResult contains the tag, manifest digest, and manifest size from the +// push. It's used to signal this information to the trust code in the client +// so it can sign the manifest if necessary. +type PushResult struct { + Tag string + Digest string + Size int +} + +// BuildResult contains the image id of a successful build +type BuildResult struct { + ID string +} + +// BuildCache contains information about a build cache record. +type BuildCache struct { + // ID is the unique ID of the build cache record. + ID string + // Parent is the ID of the parent build cache record. + // + // Deprecated: deprecated in API v1.42 and up, as it was deprecated in BuildKit; use Parents instead. + Parent string `json:"Parent,omitempty"` + // Parents is the list of parent build cache record IDs. + Parents []string `json:" Parents,omitempty"` + // Type is the cache record type. + Type string + // Description is a description of the build-step that produced the build cache. + Description string + // InUse indicates if the build cache is in use. + InUse bool + // Shared indicates if the build cache is shared. + Shared bool + // Size is the amount of disk space used by the build cache (in bytes). + Size int64 + // CreatedAt is the date and time at which the build cache was created. + CreatedAt time.Time + // LastUsedAt is the date and time at which the build cache was last used. + LastUsedAt *time.Time + UsageCount int +} + +// BuildCachePruneOptions hold parameters to prune the build cache +type BuildCachePruneOptions struct { + All bool + KeepStorage int64 + Filters filters.Args +} diff --git a/vendor/github.com/docker/docker/api/types/versions/README.md b/vendor/github.com/docker/docker/api/types/versions/README.md new file mode 100644 index 000000000..1ef911edb --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/versions/README.md @@ -0,0 +1,14 @@ +# Legacy API type versions + +This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. + +Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. + +## Package name conventions + +The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: + +1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. +2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. + +For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go new file mode 100644 index 000000000..489e917ee --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/versions/compare.go @@ -0,0 +1,65 @@ +package versions // import "github.com/docker/docker/api/types/versions" + +import ( + "strconv" + "strings" +) + +// compare compares two version strings +// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. +func compare(v1, v2 string) int { + if v1 == v2 { + return 0 + } + var ( + currTab = strings.Split(v1, ".") + otherTab = strings.Split(v2, ".") + ) + + max := len(currTab) + if len(otherTab) > max { + max = len(otherTab) + } + for i := 0; i < max; i++ { + var currInt, otherInt int + + if len(currTab) > i { + currInt, _ = strconv.Atoi(currTab[i]) + } + if len(otherTab) > i { + otherInt, _ = strconv.Atoi(otherTab[i]) + } + if currInt > otherInt { + return 1 + } + if otherInt > currInt { + return -1 + } + } + return 0 +} + +// LessThan checks if a version is less than another +func LessThan(v, other string) bool { + return compare(v, other) == -1 +} + +// LessThanOrEqualTo checks if a version is less than or equal to another +func LessThanOrEqualTo(v, other string) bool { + return compare(v, other) <= 0 +} + +// GreaterThan checks if a version is greater than another +func GreaterThan(v, other string) bool { + return compare(v, other) == 1 +} + +// GreaterThanOrEqualTo checks if a version is greater than or equal to another +func GreaterThanOrEqualTo(v, other string) bool { + return compare(v, other) >= 0 +} + +// Equal checks if a version is equal to another +func Equal(v, other string) bool { + return compare(v, other) == 0 +} diff --git a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go new file mode 100644 index 000000000..55fc5d389 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go @@ -0,0 +1,420 @@ +package volume + +import ( + "github.com/docker/docker/api/types/swarm" +) + +// ClusterVolume contains options and information specific to, and only present +// on, Swarm CSI cluster volumes. +type ClusterVolume struct { + // ID is the Swarm ID of the volume. Because cluster volumes are Swarm + // objects, they have an ID, unlike non-cluster volumes, which only have a + // Name. This ID can be used to refer to the cluster volume. + ID string + + // Meta is the swarm metadata about this volume. + swarm.Meta + + // Spec is the cluster-specific options from which this volume is derived. + Spec ClusterVolumeSpec + + // PublishStatus contains the status of the volume as it pertains to its + // publishing on Nodes. + PublishStatus []*PublishStatus `json:",omitempty"` + + // Info is information about the global status of the volume. + Info *Info `json:",omitempty"` +} + +// ClusterVolumeSpec contains the spec used to create this volume. +type ClusterVolumeSpec struct { + // Group defines the volume group of this volume. Volumes belonging to the + // same group can be referred to by group name when creating Services. + // Referring to a volume by group instructs swarm to treat volumes in that + // group interchangeably for the purpose of scheduling. Volumes with an + // empty string for a group technically all belong to the same, emptystring + // group. + Group string `json:",omitempty"` + + // AccessMode defines how the volume is used by tasks. + AccessMode *AccessMode `json:",omitempty"` + + // AccessibilityRequirements specifies where in the cluster a volume must + // be accessible from. + // + // This field must be empty if the plugin does not support + // VOLUME_ACCESSIBILITY_CONSTRAINTS capabilities. If it is present but the + // plugin does not support it, volume will not be created. + // + // If AccessibilityRequirements is empty, but the plugin does support + // VOLUME_ACCESSIBILITY_CONSTRAINTS, then Swarmkit will assume the entire + // cluster is a valid target for the volume. + AccessibilityRequirements *TopologyRequirement `json:",omitempty"` + + // CapacityRange defines the desired capacity that the volume should be + // created with. If nil, the plugin will decide the capacity. + CapacityRange *CapacityRange `json:",omitempty"` + + // Secrets defines Swarm Secrets that are passed to the CSI storage plugin + // when operating on this volume. + Secrets []Secret `json:",omitempty"` + + // Availability is the Volume's desired availability. Analogous to Node + // Availability, this allows the user to take volumes offline in order to + // update or delete them. + Availability Availability `json:",omitempty"` +} + +// Availability specifies the availability of the volume. +type Availability string + +const ( + // AvailabilityActive indicates that the volume is active and fully + // schedulable on the cluster. + AvailabilityActive Availability = "active" + + // AvailabilityPause indicates that no new workloads should use the + // volume, but existing workloads can continue to use it. + AvailabilityPause Availability = "pause" + + // AvailabilityDrain indicates that all workloads using this volume + // should be rescheduled, and the volume unpublished from all nodes. + AvailabilityDrain Availability = "drain" +) + +// AccessMode defines the access mode of a volume. +type AccessMode struct { + // Scope defines the set of nodes this volume can be used on at one time. + Scope Scope `json:",omitempty"` + + // Sharing defines the number and way that different tasks can use this + // volume at one time. + Sharing SharingMode `json:",omitempty"` + + // MountVolume defines options for using this volume as a Mount-type + // volume. + // + // Either BlockVolume or MountVolume, but not both, must be present. + MountVolume *TypeMount `json:",omitempty"` + + // BlockVolume defines options for using this volume as a Block-type + // volume. + // + // Either BlockVolume or MountVolume, but not both, must be present. + BlockVolume *TypeBlock `json:",omitempty"` +} + +// Scope defines the Scope of a Cluster Volume. This is how many nodes a +// Volume can be accessed simultaneously on. +type Scope string + +const ( + // ScopeSingleNode indicates the volume can be used on one node at a + // time. + ScopeSingleNode Scope = "single" + + // ScopeMultiNode indicates the volume can be used on many nodes at + // the same time. + ScopeMultiNode Scope = "multi" +) + +// SharingMode defines the Sharing of a Cluster Volume. This is how Tasks using a +// Volume at the same time can use it. +type SharingMode string + +const ( + // SharingNone indicates that only one Task may use the Volume at a + // time. + SharingNone SharingMode = "none" + + // SharingReadOnly indicates that the Volume may be shared by any + // number of Tasks, but they must be read-only. + SharingReadOnly SharingMode = "readonly" + + // SharingOneWriter indicates that the Volume may be shared by any + // number of Tasks, but all after the first must be read-only. + SharingOneWriter SharingMode = "onewriter" + + // SharingAll means that the Volume may be shared by any number of + // Tasks, as readers or writers. + SharingAll SharingMode = "all" +) + +// TypeBlock defines options for using a volume as a block-type volume. +// +// Intentionally empty. +type TypeBlock struct{} + +// TypeMount contains options for using a volume as a Mount-type +// volume. +type TypeMount struct { + // FsType specifies the filesystem type for the mount volume. Optional. + FsType string `json:",omitempty"` + + // MountFlags defines flags to pass when mounting the volume. Optional. + MountFlags []string `json:",omitempty"` +} + +// TopologyRequirement expresses the user's requirements for a volume's +// accessible topology. +type TopologyRequirement struct { + // Requisite specifies a list of Topologies, at least one of which the + // volume must be accessible from. + // + // Taken verbatim from the CSI Spec: + // + // Specifies the list of topologies the provisioned volume MUST be + // accessible from. + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // If requisite is specified, the provisioned volume MUST be + // accessible from at least one of the requisite topologies. + // + // Given + // x = number of topologies provisioned volume is accessible from + // n = number of requisite topologies + // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 + // If x==n, then the SP MUST make the provisioned volume available to + // all topologies from the list of requisite topologies. If it is + // unable to do so, the SP MUST fail the CreateVolume call. + // For example, if a volume should be accessible from a single zone, + // and requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2". + // Similarly, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and both "zone" "Z2" and "zone" "Z3". + // + // If xn, then the SP MUST make the provisioned volume available from + // all topologies from the list of requisite topologies and MAY choose + // the remaining x-n unique topologies from the list of all possible + // topologies. If it is unable to do so, the SP MUST fail the + // CreateVolume call. + // For example, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2" and the SP may select the second zone + // independently, e.g. "R1/Z4". + Requisite []Topology `json:",omitempty"` + + // Preferred is a list of Topologies that the volume should attempt to be + // provisioned in. + // + // Taken from the CSI spec: + // + // Specifies the list of topologies the CO would prefer the volume to + // be provisioned in. + // + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // An SP MUST attempt to make the provisioned volume available using + // the preferred topologies in order from first to last. + // + // If requisite is specified, all topologies in preferred list MUST + // also be present in the list of requisite topologies. + // + // If the SP is unable to to make the provisioned volume available + // from any of the preferred topologies, the SP MAY choose a topology + // from the list of requisite topologies. + // If the list of requisite topologies is not specified, then the SP + // MAY choose from the list of all possible topologies. + // If the list of requisite topologies is specified and the SP is + // unable to to make the provisioned volume available from any of the + // requisite topologies it MUST fail the CreateVolume call. + // + // Example 1: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // preferred = + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // available from "zone" "Z3" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. + // + // Example 2: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z2"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from "zone" "Z4" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. If that + // is not possible, the SP may choose between either the "zone" + // "Z3" or "Z5" in the "region" "R1". + // + // Example 3: + // Given a volume should be accessible from TWO zones (because an + // opaque parameter in CreateVolumeRequest, for example, specifies + // the volume is accessible from two zones, aka synchronously + // replicated), and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z5"}, + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from the combination of the two "zones" "Z5" and "Z3" in + // the "region" "R1". If that's not possible, it should fall back to + // a combination of "Z5" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of "Z3" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of other possibilities from the list of requisite. + Preferred []Topology `json:",omitempty"` +} + +// Topology is a map of topological domains to topological segments. +// +// This description is taken verbatim from the CSI Spec: +// +// A topological domain is a sub-division of a cluster, like "region", +// "zone", "rack", etc. +// A topological segment is a specific instance of a topological domain, +// like "zone3", "rack3", etc. +// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} +// Valid keys have two segments: an OPTIONAL prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// The key name segment is REQUIRED. The prefix is OPTIONAL. +// The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). +// The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// Each value (topological segment) MUST contain 1 or more strings. +// Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. +type Topology struct { + Segments map[string]string `json:",omitempty"` +} + +// CapacityRange describes the minimum and maximum capacity a volume should be +// created with +type CapacityRange struct { + // RequiredBytes specifies that a volume must be at least this big. The + // value of 0 indicates an unspecified minimum. + RequiredBytes int64 + + // LimitBytes specifies that a volume must not be bigger than this. The + // value of 0 indicates an unspecified maximum + LimitBytes int64 +} + +// Secret represents a Swarm Secret value that must be passed to the CSI +// storage plugin when operating on this Volume. It represents one key-value +// pair of possibly many. +type Secret struct { + // Key is the name of the key of the key-value pair passed to the plugin. + Key string + + // Secret is the swarm Secret object from which to read data. This can be a + // Secret name or ID. The Secret data is retrieved by Swarm and used as the + // value of the key-value pair passed to the plugin. + Secret string +} + +// PublishState represents the state of a Volume as it pertains to its +// use on a particular Node. +type PublishState string + +const ( + // StatePending indicates that the volume should be published on + // this node, but the call to ControllerPublishVolume has not been + // successfully completed yet and the result recorded by swarmkit. + StatePending PublishState = "pending-publish" + + // StatePublished means the volume is published successfully to the node. + StatePublished PublishState = "published" + + // StatePendingNodeUnpublish indicates that the Volume should be + // unpublished on the Node, and we're waiting for confirmation that it has + // done so. After the Node has confirmed that the Volume has been + // unpublished, the state will move to StatePendingUnpublish. + StatePendingNodeUnpublish PublishState = "pending-node-unpublish" + + // StatePendingUnpublish means the volume is still published to the node + // by the controller, awaiting the operation to unpublish it. + StatePendingUnpublish PublishState = "pending-controller-unpublish" +) + +// PublishStatus represents the status of the volume as published to an +// individual node +type PublishStatus struct { + // NodeID is the ID of the swarm node this Volume is published to. + NodeID string `json:",omitempty"` + + // State is the publish state of the volume. + State PublishState `json:",omitempty"` + + // PublishContext is the PublishContext returned by the CSI plugin when + // a volume is published. + PublishContext map[string]string `json:",omitempty"` +} + +// Info contains information about the Volume as a whole as provided by +// the CSI storage plugin. +type Info struct { + // CapacityBytes is the capacity of the volume in bytes. A value of 0 + // indicates that the capacity is unknown. + CapacityBytes int64 `json:",omitempty"` + + // VolumeContext is the context originating from the CSI storage plugin + // when the Volume is created. + VolumeContext map[string]string `json:",omitempty"` + + // VolumeID is the ID of the Volume as seen by the CSI storage plugin. This + // is distinct from the Volume's Swarm ID, which is the ID used by all of + // the Docker Engine to refer to the Volume. If this field is blank, then + // the Volume has not been successfully created yet. + VolumeID string `json:",omitempty"` + + // AccessibleTopolgoy is the topology this volume is actually accessible + // from. + AccessibleTopology []Topology `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/create_options.go b/vendor/github.com/docker/docker/api/types/volume/create_options.go new file mode 100644 index 000000000..37c41a609 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/create_options.go @@ -0,0 +1,29 @@ +package volume + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// CreateOptions VolumeConfig +// +// Volume configuration +// swagger:model CreateOptions +type CreateOptions struct { + + // cluster volume spec + ClusterVolumeSpec *ClusterVolumeSpec `json:"ClusterVolumeSpec,omitempty"` + + // Name of the volume driver to use. + Driver string `json:"Driver,omitempty"` + + // A mapping of driver options and values. These options are + // passed directly to the driver and are driver specific. + // + DriverOpts map[string]string `json:"DriverOpts,omitempty"` + + // User-defined key/value metadata. + Labels map[string]string `json:"Labels,omitempty"` + + // The new volume's name. If not specified, Docker generates a name. + // + Name string `json:"Name,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/deprecated.go b/vendor/github.com/docker/docker/api/types/volume/deprecated.go new file mode 100644 index 000000000..ab622d8cc --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/deprecated.go @@ -0,0 +1,11 @@ +package volume // import "github.com/docker/docker/api/types/volume" + +// VolumeCreateBody Volume configuration +// +// Deprecated: use CreateOptions +type VolumeCreateBody = CreateOptions + +// VolumeListOKBody Volume list response +// +// Deprecated: use ListResponse +type VolumeListOKBody = ListResponse diff --git a/vendor/github.com/docker/docker/api/types/volume/list_response.go b/vendor/github.com/docker/docker/api/types/volume/list_response.go new file mode 100644 index 000000000..ca5192a2a --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/list_response.go @@ -0,0 +1,18 @@ +package volume + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ListResponse VolumeListResponse +// +// Volume list response +// swagger:model ListResponse +type ListResponse struct { + + // List of volumes + Volumes []*Volume `json:"Volumes"` + + // Warnings that occurred when fetching the list of volumes. + // + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/options.go b/vendor/github.com/docker/docker/api/types/volume/options.go new file mode 100644 index 000000000..8b0dd1389 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/options.go @@ -0,0 +1,8 @@ +package volume // import "github.com/docker/docker/api/types/volume" + +import "github.com/docker/docker/api/types/filters" + +// ListOptions holds parameters to list volumes. +type ListOptions struct { + Filters filters.Args +} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume.go b/vendor/github.com/docker/docker/api/types/volume/volume.go new file mode 100644 index 000000000..ea7d555e5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/volume.go @@ -0,0 +1,75 @@ +package volume + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Volume volume +// swagger:model Volume +type Volume struct { + + // cluster volume + ClusterVolume *ClusterVolume `json:"ClusterVolume,omitempty"` + + // Date/Time the volume was created. + CreatedAt string `json:"CreatedAt,omitempty"` + + // Name of the volume driver used by the volume. + // Required: true + Driver string `json:"Driver"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // Mount path of the volume on the host. + // Required: true + Mountpoint string `json:"Mountpoint"` + + // Name of the volume. + // Required: true + Name string `json:"Name"` + + // The driver specific options used when creating the volume. + // + // Required: true + Options map[string]string `json:"Options"` + + // The level at which the volume exists. Either `global` for cluster-wide, + // or `local` for machine level. + // + // Required: true + Scope string `json:"Scope"` + + // Low-level details about the volume, provided by the volume driver. + // Details are returned as a map with key/value pairs: + // `{"key":"value","key2":"value2"}`. + // + // The `Status` field is optional, and is omitted if the volume driver + // does not support this feature. + // + Status map[string]interface{} `json:"Status,omitempty"` + + // usage data + UsageData *UsageData `json:"UsageData,omitempty"` +} + +// UsageData Usage details about the volume. This information is used by the +// `GET /system/df` endpoint, and omitted in other endpoints. +// +// swagger:model UsageData +type UsageData struct { + + // The number of containers referencing this volume. This field + // is set to `-1` if the reference-count is not available. + // + // Required: true + RefCount int64 `json:"RefCount"` + + // Amount of disk space used by the volume (in bytes). This information + // is only available for volumes created with the `"local"` volume + // driver. For volumes created with other volume drivers, this field + // is set to `-1` ("not available") + // + // Required: true + Size int64 `json:"Size"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_update.go b/vendor/github.com/docker/docker/api/types/volume/volume_update.go new file mode 100644 index 000000000..f958f80a6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/volume_update.go @@ -0,0 +1,7 @@ +package volume // import "github.com/docker/docker/api/types/volume" + +// UpdateOptions is configuration to update a Volume with. +type UpdateOptions struct { + // Spec is the ClusterVolumeSpec to update the volume to. + Spec *ClusterVolumeSpec `json:"Spec,omitempty"` +} diff --git a/vendor/github.com/docker/docker/client/README.md b/vendor/github.com/docker/docker/client/README.md new file mode 100644 index 000000000..992f18117 --- /dev/null +++ b/vendor/github.com/docker/docker/client/README.md @@ -0,0 +1,35 @@ +# Go client for the Docker Engine API + +The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc. + +For example, to list running containers (the equivalent of `docker ps`): + +```go +package main + +import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" +) + +func main() { + cli, err := client.NewClientWithOpts(client.FromEnv) + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } +} +``` + +[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client) diff --git a/vendor/github.com/docker/docker/client/build_cancel.go b/vendor/github.com/docker/docker/client/build_cancel.go new file mode 100644 index 000000000..b76bf366b --- /dev/null +++ b/vendor/github.com/docker/docker/client/build_cancel.go @@ -0,0 +1,16 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// BuildCancel requests the daemon to cancel the ongoing build request. +func (cli *Client) BuildCancel(ctx context.Context, id string) error { + query := url.Values{} + query.Set("id", id) + + serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil) + ensureReaderClosed(serverResp) + return err +} diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go new file mode 100644 index 000000000..397d67cdc --- /dev/null +++ b/vendor/github.com/docker/docker/client/build_prune.go @@ -0,0 +1,45 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/pkg/errors" +) + +// BuildCachePrune requests the daemon to delete unused cache data +func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) { + if err := cli.NewVersionError("1.31", "build prune"); err != nil { + return nil, err + } + + report := types.BuildCachePruneReport{} + + query := url.Values{} + if opts.All { + query.Set("all", "1") + } + query.Set("keep-storage", fmt.Sprintf("%d", opts.KeepStorage)) + filters, err := filters.ToJSON(opts.Filters) + if err != nil { + return nil, errors.Wrap(err, "prune could not marshal filters option") + } + query.Set("filters", filters) + + serverResp, err := cli.post(ctx, "/build/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + + if err != nil { + return nil, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return nil, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return &report, nil +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go new file mode 100644 index 000000000..921024fe4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_create.go @@ -0,0 +1,14 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types" +) + +// CheckpointCreate creates a checkpoint from the given container with the given name +func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { + resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go new file mode 100644 index 000000000..54f55fa76 --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_delete.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// CheckpointDelete deletes the checkpoint with the given name from the given container +func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error { + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go new file mode 100644 index 000000000..39cfb959f --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_list.go @@ -0,0 +1,28 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" +) + +// CheckpointList returns the checkpoints of the given container in the docker host +func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { + var checkpoints []types.Checkpoint + + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return checkpoints, err + } + + err = json.NewDecoder(resp.body).Decode(&checkpoints) + return checkpoints, err +} diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go new file mode 100644 index 000000000..09ea4851f --- /dev/null +++ b/vendor/github.com/docker/docker/client/client.go @@ -0,0 +1,321 @@ +/* +Package client is a Go client for the Docker Engine API. + +For more information about the Engine API, see the documentation: +https://docs.docker.com/engine/api/ + +# Usage + +You use the library by constructing a client object using [NewClientWithOpts] +and calling methods on it. The client can be configured from environment +variables by passing the [FromEnv] option, or configured manually by passing any +of the other available [Opts]. + +For example, to list running containers (the equivalent of "docker ps"): + + package main + + import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + ) + + func main() { + cli, err := client.NewClientWithOpts(client.FromEnv) + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } + } +*/ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net" + "net/http" + "net/url" + "path" + "strings" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/go-connections/sockets" + "github.com/pkg/errors" +) + +// ErrRedirect is the error returned by checkRedirect when the request is non-GET. +var ErrRedirect = errors.New("unexpected redirect in response") + +// Client is the API client that performs all operations +// against a docker server. +type Client struct { + // scheme sets the scheme for the client + scheme string + // host holds the server address to connect to + host string + // proto holds the client protocol i.e. unix. + proto string + // addr holds the client address. + addr string + // basePath holds the path to prepend to the requests. + basePath string + // client used to send and receive http requests. + client *http.Client + // version of the server to talk to. + version string + // custom http headers configured by users. + customHTTPHeaders map[string]string + // manualOverride is set to true when the version was set by users. + manualOverride bool + + // negotiateVersion indicates if the client should automatically negotiate + // the API version to use when making requests. API version negotiation is + // performed on the first request, after which negotiated is set to "true" + // so that subsequent requests do not re-negotiate. + negotiateVersion bool + + // negotiated indicates that API version negotiation took place + negotiated bool +} + +// CheckRedirect specifies the policy for dealing with redirect responses: +// If the request is non-GET return ErrRedirect, otherwise use the last response. +// +// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) +// in the client. The Docker client (and by extension docker API client) can be +// made to send a request like POST /containers//start where what would normally +// be in the name section of the URL is empty. This triggers an HTTP 301 from +// the daemon. +// +// In go 1.8 this 301 will be converted to a GET request, and ends up getting +// a 404 from the daemon. This behavior change manifests in the client in that +// before, the 301 was not followed and the client did not generate an error, +// but now results in a message like Error response from daemon: page not found. +func CheckRedirect(req *http.Request, via []*http.Request) error { + if via[0].Method == http.MethodGet { + return http.ErrUseLastResponse + } + return ErrRedirect +} + +// NewClientWithOpts initializes a new API client with a default HTTPClient, and +// default API host and version. It also initializes the custom HTTP headers to +// add to each request. +// +// It takes an optional list of Opt functional arguments, which are applied in +// the order they're provided, which allows modifying the defaults when creating +// the client. For example, the following initializes a client that configures +// itself with values from environment variables (client.FromEnv), and has +// automatic API version negotiation enabled (client.WithAPIVersionNegotiation()). +// +// cli, err := client.NewClientWithOpts( +// client.FromEnv, +// client.WithAPIVersionNegotiation(), +// ) +func NewClientWithOpts(ops ...Opt) (*Client, error) { + client, err := defaultHTTPClient(DefaultDockerHost) + if err != nil { + return nil, err + } + c := &Client{ + host: DefaultDockerHost, + version: api.DefaultVersion, + client: client, + proto: defaultProto, + addr: defaultAddr, + } + + for _, op := range ops { + if err := op(c); err != nil { + return nil, err + } + } + + if c.scheme == "" { + c.scheme = "http" + + tlsConfig := resolveTLSConfig(c.client.Transport) + if tlsConfig != nil { + // TODO(stevvooe): This isn't really the right way to write clients in Go. + // `NewClient` should probably only take an `*http.Client` and work from there. + // Unfortunately, the model of having a host-ish/url-thingy as the connection + // string has us confusing protocol and transport layers. We continue doing + // this to avoid breaking existing clients but this should be addressed. + c.scheme = "https" + } + } + + return c, nil +} + +func defaultHTTPClient(host string) (*http.Client, error) { + hostURL, err := ParseHostURL(host) + if err != nil { + return nil, err + } + transport := &http.Transport{} + _ = sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host) + return &http.Client{ + Transport: transport, + CheckRedirect: CheckRedirect, + }, nil +} + +// Close the transport used by the client +func (cli *Client) Close() error { + if t, ok := cli.client.Transport.(*http.Transport); ok { + t.CloseIdleConnections() + } + return nil +} + +// getAPIPath returns the versioned request path to call the api. +// It appends the query parameters to the path if they are not empty. +func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string { + var apiPath string + if cli.negotiateVersion && !cli.negotiated { + cli.NegotiateAPIVersion(ctx) + } + if cli.version != "" { + v := strings.TrimPrefix(cli.version, "v") + apiPath = path.Join(cli.basePath, "/v"+v, p) + } else { + apiPath = path.Join(cli.basePath, p) + } + return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String() +} + +// ClientVersion returns the API version used by this client. +func (cli *Client) ClientVersion() string { + return cli.version +} + +// NegotiateAPIVersion queries the API and updates the version to match the API +// version. NegotiateAPIVersion downgrades the client's API version to match the +// APIVersion if the ping version is lower than the default version. If the API +// version reported by the server is higher than the maximum version supported +// by the client, it uses the client's maximum version. +// +// If a manual override is in place, either through the "DOCKER_API_VERSION" +// (EnvOverrideAPIVersion) environment variable, or if the client is initialized +// with a fixed version (WithVersion(xx)), no negotiation is performed. +// +// If the API server's ping response does not contain an API version, or if the +// client did not get a successful ping response, it assumes it is connected with +// an old daemon that does not support API version negotiation, in which case it +// downgrades to the latest version of the API before version negotiation was +// added (1.24). +func (cli *Client) NegotiateAPIVersion(ctx context.Context) { + if !cli.manualOverride { + ping, _ := cli.Ping(ctx) + cli.negotiateAPIVersionPing(ping) + } +} + +// NegotiateAPIVersionPing downgrades the client's API version to match the +// APIVersion in the ping response. If the API version in pingResponse is higher +// than the maximum version supported by the client, it uses the client's maximum +// version. +// +// If a manual override is in place, either through the "DOCKER_API_VERSION" +// (EnvOverrideAPIVersion) environment variable, or if the client is initialized +// with a fixed version (WithVersion(xx)), no negotiation is performed. +// +// If the API server's ping response does not contain an API version, we assume +// we are connected with an old daemon without API version negotiation support, +// and downgrade to the latest version of the API before version negotiation was +// added (1.24). +func (cli *Client) NegotiateAPIVersionPing(pingResponse types.Ping) { + if !cli.manualOverride { + cli.negotiateAPIVersionPing(pingResponse) + } +} + +// negotiateAPIVersionPing queries the API and updates the version to match the +// API version from the ping response. +func (cli *Client) negotiateAPIVersionPing(pingResponse types.Ping) { + // default to the latest version before versioning headers existed + if pingResponse.APIVersion == "" { + pingResponse.APIVersion = "1.24" + } + + // if the client is not initialized with a version, start with the latest supported version + if cli.version == "" { + cli.version = api.DefaultVersion + } + + // if server version is lower than the client version, downgrade + if versions.LessThan(pingResponse.APIVersion, cli.version) { + cli.version = pingResponse.APIVersion + } + + // Store the results, so that automatic API version negotiation (if enabled) + // won't be performed on the next request. + if cli.negotiateVersion { + cli.negotiated = true + } +} + +// DaemonHost returns the host address used by the client +func (cli *Client) DaemonHost() string { + return cli.host +} + +// HTTPClient returns a copy of the HTTP client bound to the server +func (cli *Client) HTTPClient() *http.Client { + c := *cli.client + return &c +} + +// ParseHostURL parses a url string, validates the string is a host url, and +// returns the parsed URL +func ParseHostURL(host string) (*url.URL, error) { + protoAddrParts := strings.SplitN(host, "://", 2) + if len(protoAddrParts) == 1 { + return nil, errors.Errorf("unable to parse docker host `%s`", host) + } + + var basePath string + proto, addr := protoAddrParts[0], protoAddrParts[1] + if proto == "tcp" { + parsed, err := url.Parse("tcp://" + addr) + if err != nil { + return nil, err + } + addr = parsed.Host + basePath = parsed.Path + } + return &url.URL{ + Scheme: proto, + Host: addr, + Path: basePath, + }, nil +} + +// Dialer returns a dialer for a raw stream connection, with an HTTP/1.1 header, +// that can be used for proxying the daemon connection. +// +// Used by `docker dial-stdio` (docker/cli#889). +func (cli *Client) Dialer() func(context.Context) (net.Conn, error) { + return func(ctx context.Context) (net.Conn, error) { + if transport, ok := cli.client.Transport.(*http.Transport); ok { + if transport.DialContext != nil && transport.TLSClientConfig == nil { + return transport.DialContext(ctx, cli.proto, cli.addr) + } + } + return fallbackDial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) + } +} diff --git a/vendor/github.com/docker/docker/client/client_deprecated.go b/vendor/github.com/docker/docker/client/client_deprecated.go new file mode 100644 index 000000000..9e366ce20 --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_deprecated.go @@ -0,0 +1,27 @@ +package client + +import "net/http" + +// NewClient initializes a new API client for the given host and API version. +// It uses the given http client as transport. +// It also initializes the custom http headers to add to each request. +// +// It won't send any version information if the version number is empty. It is +// highly recommended that you set a version or your client may break if the +// server is upgraded. +// +// Deprecated: use [NewClientWithOpts] passing the [WithHost], [WithVersion], +// [WithHTTPClient] and [WithHTTPHeaders] options. We recommend enabling API +// version negotiation by passing the [WithAPIVersionNegotiation] option instead +// of WithVersion. +func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { + return NewClientWithOpts(WithHost(host), WithVersion(version), WithHTTPClient(client), WithHTTPHeaders(httpHeaders)) +} + +// NewEnvClient initializes a new API client based on environment variables. +// See FromEnv for a list of support environment variables. +// +// Deprecated: use [NewClientWithOpts] passing the [FromEnv] option. +func NewEnvClient() (*Client, error) { + return NewClientWithOpts(FromEnv) +} diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go new file mode 100644 index 000000000..f0783f708 --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_unix.go @@ -0,0 +1,11 @@ +//go:build linux || freebsd || openbsd || netbsd || darwin || solaris || illumos || dragonfly +// +build linux freebsd openbsd netbsd darwin solaris illumos dragonfly + +package client // import "github.com/docker/docker/client" + +// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST +// (EnvOverrideHost) environment variable is unset or empty. +const DefaultDockerHost = "unix:///var/run/docker.sock" + +const defaultProto = "unix" +const defaultAddr = "/var/run/docker.sock" diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go new file mode 100644 index 000000000..5abe60457 --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_windows.go @@ -0,0 +1,8 @@ +package client // import "github.com/docker/docker/client" + +// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST +// (EnvOverrideHost) environment variable is unset or empty. +const DefaultDockerHost = "npipe:////./pipe/docker_engine" + +const defaultProto = "npipe" +const defaultAddr = "//./pipe/docker_engine" diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go new file mode 100644 index 000000000..f6b1881fc --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_create.go @@ -0,0 +1,25 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// ConfigCreate creates a new config. +func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { + var response types.ConfigCreateResponse + if err := cli.NewVersionError("1.30", "config create"); err != nil { + return response, err + } + resp, err := cli.post(ctx, "/configs/create", nil, config, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go new file mode 100644 index 000000000..9be7882c3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_inspect.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types/swarm" +) + +// ConfigInspectWithRaw returns the config information with raw data +func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) { + if id == "" { + return swarm.Config{}, nil, objectNotFoundError{object: "config", id: id} + } + if err := cli.NewVersionError("1.30", "config inspect"); err != nil { + return swarm.Config{}, nil, err + } + resp, err := cli.get(ctx, "/configs/"+id, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return swarm.Config{}, nil, err + } + + body, err := io.ReadAll(resp.body) + if err != nil { + return swarm.Config{}, nil, err + } + + var config swarm.Config + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&config) + + return config, body, err +} diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go new file mode 100644 index 000000000..565acc6e2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_list.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// ConfigList returns the list of configs. +func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { + if err := cli.NewVersionError("1.30", "config list"); err != nil { + return nil, err + } + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/configs", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var configs []swarm.Config + err = json.NewDecoder(resp.body).Decode(&configs) + return configs, err +} diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go new file mode 100644 index 000000000..24b94e9c1 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_remove.go @@ -0,0 +1,13 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ConfigRemove removes a config. +func (cli *Client) ConfigRemove(ctx context.Context, id string) error { + if err := cli.NewVersionError("1.30", "config remove"); err != nil { + return err + } + resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) + defer ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go new file mode 100644 index 000000000..1ac298543 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_update.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types/swarm" +) + +// ConfigUpdate attempts to update a config +func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { + if err := cli.NewVersionError("1.30", "config update"); err != nil { + return err + } + query := url.Values{} + query.Set("version", version.String()) + resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go new file mode 100644 index 000000000..ba92117d3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_attach.go @@ -0,0 +1,59 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerAttach attaches a connection to a container in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +// +// The stream format on the response will be in one of two formats: +// +// If the container is using a TTY, there is only a single stream (stdout), and +// data is copied directly from the container output stream, no extra +// multiplexing or headers. +// +// If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// The format of the multiplexed stream is as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for stdout and 2 for stderr +// +// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. +// This is the size of OUTPUT. +// +// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this +// stream. +func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { + query := url.Values{} + if options.Stream { + query.Set("stream", "1") + } + if options.Stdin { + query.Set("stdin", "1") + } + if options.Stdout { + query.Set("stdout", "1") + } + if options.Stderr { + query.Set("stderr", "1") + } + if options.DetachKeys != "" { + query.Set("detachKeys", options.DetachKeys) + } + if options.Logs { + query.Set("logs", "1") + } + + headers := map[string][]string{ + "Content-Type": {"text/plain"}, + } + return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) +} diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go new file mode 100644 index 000000000..cd7f76346 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_commit.go @@ -0,0 +1,55 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "errors" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ContainerCommit applies changes to a container and creates a new tagged image. +func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { + var repository, tag string + if options.Reference != "" { + ref, err := reference.ParseNormalizedNamed(options.Reference) + if err != nil { + return types.IDResponse{}, err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference") + } + ref = reference.TagNameOnly(ref) + + if tagged, ok := ref.(reference.Tagged); ok { + tag = tagged.Tag() + } + repository = reference.FamiliarName(ref) + } + + query := url.Values{} + query.Set("container", container) + query.Set("repo", repository) + query.Set("tag", tag) + query.Set("comment", options.Comment) + query.Set("author", options.Author) + for _, change := range options.Changes { + query.Add("changes", change) + } + if !options.Pause { + query.Set("pause", "0") + } + + var response types.IDResponse + resp, err := cli.post(ctx, "/commit", query, options.Config, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go new file mode 100644 index 000000000..883be7fa3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_copy.go @@ -0,0 +1,93 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" +) + +// ContainerStatPath returns stat information about a path inside the container filesystem. +func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { + query := url.Values{} + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + + urlStr := "/containers/" + containerID + "/archive" + response, err := cli.head(ctx, urlStr, query, nil) + defer ensureReaderClosed(response) + if err != nil { + return types.ContainerPathStat{}, err + } + return getContainerPathStatFromHeader(response.header) +} + +// CopyToContainer copies content into the container filesystem. +// Note that `content` must be a Reader for a TAR archive +func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options types.CopyToContainerOptions) error { + query := url.Values{} + query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API. + // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. + if !options.AllowOverwriteDirWithFile { + query.Set("noOverwriteDirNonDir", "true") + } + + if options.CopyUIDGID { + query.Set("copyUIDGID", "true") + } + + apiPath := "/containers/" + containerID + "/archive" + + response, err := cli.putRaw(ctx, apiPath, query, content, nil) + defer ensureReaderClosed(response) + if err != nil { + return err + } + + return nil +} + +// CopyFromContainer gets the content from the container and returns it as a Reader +// for a TAR archive to manipulate it in the host. It's up to the caller to close the reader. +func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { + query := make(url.Values, 1) + query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. + + apiPath := "/containers/" + containerID + "/archive" + response, err := cli.get(ctx, apiPath, query, nil) + if err != nil { + return nil, types.ContainerPathStat{}, err + } + + // In order to get the copy behavior right, we need to know information + // about both the source and the destination. The response headers include + // stat info about the source that we can use in deciding exactly how to + // copy it locally. Along with the stat info about the local destination, + // we have everything we need to handle the multiple possibilities there + // can be when copying a file/dir from one location to another file/dir. + stat, err := getContainerPathStatFromHeader(response.header) + if err != nil { + return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) + } + return response.body, stat, err +} + +func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { + var stat types.ContainerPathStat + + encodedStat := header.Get("X-Docker-Container-Path-Stat") + statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) + + err := json.NewDecoder(statDecoder).Decode(&stat) + if err != nil { + err = fmt.Errorf("unable to decode container path stat header: %s", err) + } + + return stat, err +} diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go new file mode 100644 index 000000000..f82420b67 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -0,0 +1,83 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "path" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/versions" + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +type configWrapper struct { + *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig +} + +// ContainerCreate creates a new container based on the given configuration. +// It can be associated with a name, but it's not mandatory. +func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.CreateResponse, error) { + var response container.CreateResponse + + if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { + return response, err + } + if err := cli.NewVersionError("1.41", "specify container image platform"); platform != nil && err != nil { + return response, err + } + + if hostConfig != nil { + if versions.LessThan(cli.ClientVersion(), "1.25") { + // When using API 1.24 and under, the client is responsible for removing the container + hostConfig.AutoRemove = false + } + if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.42") || versions.LessThan(cli.ClientVersion(), "1.40") { + // KernelMemory was added in API 1.40, and deprecated in API 1.42 + hostConfig.KernelMemory = 0 + } + if platform != nil && platform.OS == "linux" && versions.LessThan(cli.ClientVersion(), "1.42") { + // When using API under 1.42, the Linux daemon doesn't respect the ConsoleSize + hostConfig.ConsoleSize = [2]uint{0, 0} + } + } + + query := url.Values{} + if p := formatPlatform(platform); p != "" { + query.Set("platform", p) + } + + if containerName != "" { + query.Set("name", containerName) + } + + body := configWrapper{ + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + } + + serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} + +// formatPlatform returns a formatted string representing platform (e.g. linux/arm/v7). +// +// Similar to containerd's platforms.Format(), but does allow components to be +// omitted (e.g. pass "architecture" only, without "os": +// https://github.com/containerd/containerd/blob/v1.5.2/platforms/platforms.go#L243-L263 +func formatPlatform(platform *specs.Platform) string { + if platform == nil { + return "" + } + return path.Join(platform.OS, platform.Architecture, platform.Variant) +} diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/docker/docker/client/container_diff.go new file mode 100644 index 000000000..29dac8491 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_diff.go @@ -0,0 +1,23 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/container" +) + +// ContainerDiff shows differences in a container filesystem since it was started. +func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.ContainerChangeResponseItem, error) { + var changes []container.ContainerChangeResponseItem + + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return changes, err + } + + err = json.NewDecoder(serverResp.body).Decode(&changes) + return changes, err +} diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go new file mode 100644 index 000000000..6a2cb006f --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_exec.go @@ -0,0 +1,66 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" +) + +// ContainerExecCreate creates a new exec configuration to run an exec process. +func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { + var response types.IDResponse + + if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { + return response, err + } + if versions.LessThan(cli.ClientVersion(), "1.42") { + config.ConsoleSize = nil + } + + resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} + +// ContainerExecStart starts an exec process already created in the docker host. +func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { + if versions.LessThan(cli.ClientVersion(), "1.42") { + config.ConsoleSize = nil + } + resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) + ensureReaderClosed(resp) + return err +} + +// ContainerExecAttach attaches a connection to an exec process in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) { + if versions.LessThan(cli.ClientVersion(), "1.42") { + config.ConsoleSize = nil + } + headers := map[string][]string{ + "Content-Type": {"application/json"}, + } + return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) +} + +// ContainerExecInspect returns information about a specific exec process on the docker host. +func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { + var response types.ContainerExecInspect + resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/docker/docker/client/container_export.go new file mode 100644 index 000000000..d0c0a5cba --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_export.go @@ -0,0 +1,19 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" +) + +// ContainerExport retrieves the raw contents of a container +// and returns them as an io.ReadCloser. It's up to the caller +// to close the stream. +func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) + if err != nil { + return nil, err + } + + return serverResp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go new file mode 100644 index 000000000..d48f0d3a6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_inspect.go @@ -0,0 +1,53 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerInspect returns the container information. +func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { + if containerID == "" { + return types.ContainerJSON{}, objectNotFoundError{object: "container", id: containerID} + } + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return types.ContainerJSON{}, err + } + + var response types.ContainerJSON + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} + +// ContainerInspectWithRaw returns the container information and its raw representation. +func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { + if containerID == "" { + return types.ContainerJSON{}, nil, objectNotFoundError{object: "container", id: containerID} + } + query := url.Values{} + if getSize { + query.Set("size", "1") + } + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return types.ContainerJSON{}, nil, err + } + + body, err := io.ReadAll(serverResp.body) + if err != nil { + return types.ContainerJSON{}, nil, err + } + + var response types.ContainerJSON + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go new file mode 100644 index 000000000..7c9529f1e --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_kill.go @@ -0,0 +1,18 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// ContainerKill terminates the container process but does not remove the container from the docker host. +func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { + query := url.Values{} + if signal != "" { + query.Set("signal", signal) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go new file mode 100644 index 000000000..bd491b3db --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_list.go @@ -0,0 +1,57 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// ContainerList returns the list of containers in the docker host. +func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + query := url.Values{} + + if options.All { + query.Set("all", "1") + } + + if options.Limit > 0 { + query.Set("limit", strconv.Itoa(options.Limit)) + } + + if options.Since != "" { + query.Set("since", options.Since) + } + + if options.Before != "" { + query.Set("before", options.Before) + } + + if options.Size { + query.Set("size", "1") + } + + if options.Filters.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/containers/json", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var containers []types.Container + err = json.NewDecoder(resp.body).Decode(&containers) + return containers, err +} diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go new file mode 100644 index 000000000..9bdf2b0fa --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_logs.go @@ -0,0 +1,80 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "time" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" + "github.com/pkg/errors" +) + +// ContainerLogs returns the logs generated by a container in an io.ReadCloser. +// It's up to the caller to close the stream. +// +// The stream format on the response will be in one of two formats: +// +// If the container is using a TTY, there is only a single stream (stdout), and +// data is copied directly from the container output stream, no extra +// multiplexing or headers. +// +// If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// The format of the multiplexed stream is as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for stdout and 2 for stderr +// +// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. +// This is the size of OUTPUT. +// +// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this +// stream. +func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, errors.Wrap(err, `invalid value for "since"`) + } + query.Set("since", ts) + } + + if options.Until != "" { + ts, err := timetypes.GetTimestamp(options.Until, time.Now()) + if err != nil { + return nil, errors.Wrap(err, `invalid value for "until"`) + } + query.Set("until", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/docker/docker/client/container_pause.go new file mode 100644 index 000000000..5e7271a37 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_pause.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ContainerPause pauses the main process of a given container without terminating it. +func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go new file mode 100644 index 000000000..04383deaa --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_prune.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// ContainersPrune requests the daemon to delete unused data +func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) { + var report types.ContainersPruneReport + + if err := cli.NewVersionError("1.25", "container prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return report, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go new file mode 100644 index 000000000..c21de609b --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_remove.go @@ -0,0 +1,27 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerRemove kills and removes a container from the docker host. +func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { + query := url.Values{} + if options.RemoveVolumes { + query.Set("v", "1") + } + if options.RemoveLinks { + query.Set("link", "1") + } + + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) + defer ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/docker/docker/client/container_rename.go new file mode 100644 index 000000000..240fdf552 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_rename.go @@ -0,0 +1,15 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// ContainerRename changes the name of a given container. +func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { + query := url.Values{} + query.Set("name", newContainerName) + resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go new file mode 100644 index 000000000..a9d4c0c79 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_resize.go @@ -0,0 +1,29 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" +) + +// ContainerResize changes the size of the tty for a container. +func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) +} + +// ContainerExecResize changes the size of the tty for an exec process running inside a container. +func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) +} + +func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error { + query := url.Values{} + query.Set("h", strconv.Itoa(int(height))) + query.Set("w", strconv.Itoa(int(width))) + + resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go new file mode 100644 index 000000000..1e0ad9998 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_restart.go @@ -0,0 +1,26 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" +) + +// ContainerRestart stops and starts a container again. +// It makes the daemon wait for the container to be up again for +// a specific amount of time, given the timeout. +func (cli *Client) ContainerRestart(ctx context.Context, containerID string, options container.StopOptions) error { + query := url.Values{} + if options.Timeout != nil { + query.Set("t", strconv.Itoa(*options.Timeout)) + } + if options.Signal != "" && versions.GreaterThanOrEqualTo(cli.version, "1.42") { + query.Set("signal", options.Signal) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go new file mode 100644 index 000000000..c2e0b15dc --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_start.go @@ -0,0 +1,23 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerStart sends a request to the docker daemon to start a container. +func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error { + query := url.Values{} + if len(options.CheckpointID) != 0 { + query.Set("checkpoint", options.CheckpointID) + } + if len(options.CheckpointDir) != 0 { + query.Set("checkpoint-dir", options.CheckpointDir) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go new file mode 100644 index 000000000..0a6488dde --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_stats.go @@ -0,0 +1,42 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerStats returns near realtime stats for a given container. +// It's up to the caller to close the io.ReadCloser returned. +func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { + query := url.Values{} + query.Set("stream", "0") + if stream { + query.Set("stream", "1") + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) + if err != nil { + return types.ContainerStats{}, err + } + + osType := getDockerOS(resp.header.Get("Server")) + return types.ContainerStats{Body: resp.body, OSType: osType}, err +} + +// ContainerStatsOneShot gets a single stat entry from a container. +// It differs from `ContainerStats` in that the API should not wait to prime the stats +func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string) (types.ContainerStats, error) { + query := url.Values{} + query.Set("stream", "0") + query.Set("one-shot", "1") + + resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) + if err != nil { + return types.ContainerStats{}, err + } + + osType := getDockerOS(resp.header.Get("Server")) + return types.ContainerStats{Body: resp.body, OSType: osType}, err +} diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go new file mode 100644 index 000000000..2a43ce227 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_stop.go @@ -0,0 +1,30 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" +) + +// ContainerStop stops a container. In case the container fails to stop +// gracefully within a time frame specified by the timeout argument, +// it is forcefully terminated (killed). +// +// If the timeout is nil, the container's StopTimeout value is used, if set, +// otherwise the engine default. A negative timeout value can be specified, +// meaning no timeout, i.e. no forceful termination is performed. +func (cli *Client) ContainerStop(ctx context.Context, containerID string, options container.StopOptions) error { + query := url.Values{} + if options.Timeout != nil { + query.Set("t", strconv.Itoa(*options.Timeout)) + } + if options.Signal != "" && versions.GreaterThanOrEqualTo(cli.version, "1.42") { + query.Set("signal", options.Signal) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go new file mode 100644 index 000000000..a5b78999b --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_top.go @@ -0,0 +1,28 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "strings" + + "github.com/docker/docker/api/types/container" +) + +// ContainerTop shows process information from within a container. +func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.ContainerTopOKBody, error) { + var response container.ContainerTopOKBody + query := url.Values{} + if len(arguments) > 0 { + query.Set("ps_args", strings.Join(arguments, " ")) + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/docker/docker/client/container_unpause.go new file mode 100644 index 000000000..1d8f87316 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_unpause.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ContainerUnpause resumes the process execution within a container +func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go new file mode 100644 index 000000000..bf68a5300 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_update.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types/container" +) + +// ContainerUpdate updates the resources of a container. +func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { + var response container.ContainerUpdateOKBody + serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go new file mode 100644 index 000000000..2375eb1e8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_wait.go @@ -0,0 +1,104 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "net/url" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" +) + +const containerWaitErrorMsgLimit = 2 * 1024 /* Max: 2KiB */ + +// ContainerWait waits until the specified container is in a certain state +// indicated by the given condition, either "not-running" (default), +// "next-exit", or "removed". +// +// If this client's API version is before 1.30, condition is ignored and +// ContainerWait will return immediately with the two channels, as the server +// will wait as if the condition were "not-running". +// +// If this client's API version is at least 1.30, ContainerWait blocks until +// the request has been acknowledged by the server (with a response header), +// then returns two channels on which the caller can wait for the exit status +// of the container or an error if there was a problem either beginning the +// wait request or in getting the response. This allows the caller to +// synchronize ContainerWait with other calls, such as specifying a +// "next-exit" condition before issuing a ContainerStart request. +func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) { + if versions.LessThan(cli.ClientVersion(), "1.30") { + return cli.legacyContainerWait(ctx, containerID) + } + + resultC := make(chan container.WaitResponse) + errC := make(chan error, 1) + + query := url.Values{} + if condition != "" { + query.Set("condition", string(condition)) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil) + if err != nil { + defer ensureReaderClosed(resp) + errC <- err + return resultC, errC + } + + go func() { + defer ensureReaderClosed(resp) + + body := resp.body + responseText := bytes.NewBuffer(nil) + stream := io.TeeReader(body, responseText) + + var res container.WaitResponse + if err := json.NewDecoder(stream).Decode(&res); err != nil { + // NOTE(nicks): The /wait API does not work well with HTTP proxies. + // At any time, the proxy could cut off the response stream. + // + // But because the HTTP status has already been written, the proxy's + // only option is to write a plaintext error message. + // + // If there's a JSON parsing error, read the real error message + // off the body and send it to the client. + _, _ = io.ReadAll(io.LimitReader(stream, containerWaitErrorMsgLimit)) + errC <- errors.New(responseText.String()) + return + } + + resultC <- res + }() + + return resultC, errC +} + +// legacyContainerWait returns immediately and doesn't have an option to wait +// until the container is removed. +func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.WaitResponse, <-chan error) { + resultC := make(chan container.WaitResponse) + errC := make(chan error) + + go func() { + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) + if err != nil { + errC <- err + return + } + defer ensureReaderClosed(resp) + + var res container.WaitResponse + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + errC <- err + return + } + + resultC <- res + }() + + return resultC, errC +} diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go new file mode 100644 index 000000000..ba0d92e9e --- /dev/null +++ b/vendor/github.com/docker/docker/client/disk_usage.go @@ -0,0 +1,33 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" +) + +// DiskUsage requests the current data usage from the daemon +func (cli *Client) DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) { + var query url.Values + if len(options.Types) > 0 { + query = url.Values{} + for _, t := range options.Types { + query.Add("type", string(t)) + } + } + + serverResp, err := cli.get(ctx, "/system/df", query, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return types.DiskUsage{}, err + } + + var du types.DiskUsage + if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { + return types.DiskUsage{}, fmt.Errorf("Error retrieving disk usage: %v", err) + } + return du, nil +} diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go new file mode 100644 index 000000000..7f36c99a0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/distribution_inspect.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + registrytypes "github.com/docker/docker/api/types/registry" +) + +// DistributionInspect returns the image digest with the full manifest. +func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registrytypes.DistributionInspect, error) { + // Contact the registry to retrieve digest and platform information + var distributionInspect registrytypes.DistributionInspect + if image == "" { + return distributionInspect, objectNotFoundError{object: "distribution", id: image} + } + + if err := cli.NewVersionError("1.30", "distribution inspect"); err != nil { + return distributionInspect, err + } + var headers map[string][]string + + if encodedRegistryAuth != "" { + headers = map[string][]string{ + "X-Registry-Auth": {encodedRegistryAuth}, + } + } + + resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers) + defer ensureReaderClosed(resp) + if err != nil { + return distributionInspect, err + } + + err = json.NewDecoder(resp.body).Decode(&distributionInspect) + return distributionInspect, err +} diff --git a/vendor/github.com/docker/docker/client/envvars.go b/vendor/github.com/docker/docker/client/envvars.go new file mode 100644 index 000000000..61dd45c1d --- /dev/null +++ b/vendor/github.com/docker/docker/client/envvars.go @@ -0,0 +1,90 @@ +package client // import "github.com/docker/docker/client" + +const ( + // EnvOverrideHost is the name of the environment variable that can be used + // to override the default host to connect to (DefaultDockerHost). + // + // This env-var is read by FromEnv and WithHostFromEnv and when set to a + // non-empty value, takes precedence over the default host (which is platform + // specific), or any host already set. + EnvOverrideHost = "DOCKER_HOST" + + // EnvOverrideAPIVersion is the name of the environment variable that can + // be used to override the API version to use. Value should be + // formatted as MAJOR.MINOR, for example, "1.19". + // + // This env-var is read by FromEnv and WithVersionFromEnv and when set to a + // non-empty value, takes precedence over API version negotiation. + // + // This environment variable should be used for debugging purposes only, as + // it can set the client to use an incompatible (or invalid) API version. + EnvOverrideAPIVersion = "DOCKER_API_VERSION" + + // EnvOverrideCertPath is the name of the environment variable that can be + // used to specify the directory from which to load the TLS certificates + // (ca.pem, cert.pem, key.pem) from. These certificates are used to configure + // the Client for a TCP connection protected by TLS client authentication. + // + // TLS certificate verification is enabled by default if the Client is configured + // to use a TLS connection. Refer to EnvTLSVerify below to learn how to + // disable verification for testing purposes. + // + // WARNING: Access to the remote API is equivalent to root access to the + // host where the daemon runs. Do not expose the API without protection, + // and only if needed. Make sure you are familiar with the "daemon attack + // surface" (https://docs.docker.com/go/attack-surface/). + // + // For local access to the API, it is recommended to connect with the daemon + // using the default local socket connection (on Linux), or the named pipe + // (on Windows). + // + // If you need to access the API of a remote daemon, consider using an SSH + // (ssh://) connection, which is easier to set up, and requires no additional + // configuration if the host is accessible using ssh. + // + // If you cannot use the alternatives above, and you must expose the API over + // a TCP connection, refer to https://docs.docker.com/engine/security/protect-access/ + // to learn how to configure the daemon and client to use a TCP connection + // with TLS client authentication. Make sure you know the differences between + // a regular TLS connection and a TLS connection protected by TLS client + // authentication, and verify that the API cannot be accessed by other clients. + EnvOverrideCertPath = "DOCKER_CERT_PATH" + + // EnvTLSVerify is the name of the environment variable that can be used to + // enable or disable TLS certificate verification. When set to a non-empty + // value, TLS certificate verification is enabled, and the client is configured + // to use a TLS connection, using certificates from the default directories + // (within `~/.docker`); refer to EnvOverrideCertPath above for additional + // details. + // + // WARNING: Access to the remote API is equivalent to root access to the + // host where the daemon runs. Do not expose the API without protection, + // and only if needed. Make sure you are familiar with the "daemon attack + // surface" (https://docs.docker.com/go/attack-surface/). + // + // Before setting up your client and daemon to use a TCP connection with TLS + // client authentication, consider using one of the alternatives mentioned + // in EnvOverrideCertPath above. + // + // Disabling TLS certificate verification (for testing purposes) + // + // TLS certificate verification is enabled by default if the Client is configured + // to use a TLS connection, and it is highly recommended to keep verification + // enabled to prevent machine-in-the-middle attacks. Refer to the documentation + // at https://docs.docker.com/engine/security/protect-access/ and pages linked + // from that page to learn how to configure the daemon and client to use a + // TCP connection with TLS client authentication enabled. + // + // Set the "DOCKER_TLS_VERIFY" environment to an empty string ("") to + // disable TLS certificate verification. Disabling verification is insecure, + // so should only be done for testing purposes. From the Go documentation + // (https://pkg.go.dev/crypto/tls#Config): + // + // InsecureSkipVerify controls whether a client verifies the server's + // certificate chain and host name. If InsecureSkipVerify is true, crypto/tls + // accepts any certificate presented by the server and any host name in that + // certificate. In this mode, TLS is susceptible to machine-in-the-middle + // attacks unless custom verification is used. This should be used only for + // testing or in combination with VerifyConnection or VerifyPeerCertificate. + EnvTLSVerify = "DOCKER_TLS_VERIFY" +) diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go new file mode 100644 index 000000000..e5a8a865f --- /dev/null +++ b/vendor/github.com/docker/docker/client/errors.go @@ -0,0 +1,93 @@ +package client // import "github.com/docker/docker/client" + +import ( + "fmt" + + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" +) + +// errConnectionFailed implements an error returned when connection failed. +type errConnectionFailed struct { + host string +} + +// Error returns a string representation of an errConnectionFailed +func (err errConnectionFailed) Error() string { + if err.host == "" { + return "Cannot connect to the Docker daemon. Is the docker daemon running on this host?" + } + return fmt.Sprintf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", err.host) +} + +// IsErrConnectionFailed returns true if the error is caused by connection failed. +func IsErrConnectionFailed(err error) bool { + return errors.As(err, &errConnectionFailed{}) +} + +// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed. +func ErrorConnectionFailed(host string) error { + return errConnectionFailed{host: host} +} + +// Deprecated: use the errdefs.NotFound() interface instead. Kept for backward compatibility +type notFound interface { + error + NotFound() bool +} + +// IsErrNotFound returns true if the error is a NotFound error, which is returned +// by the API when some object is not found. +func IsErrNotFound(err error) bool { + if errdefs.IsNotFound(err) { + return true + } + var e notFound + return errors.As(err, &e) +} + +type objectNotFoundError struct { + object string + id string +} + +func (e objectNotFoundError) NotFound() {} + +func (e objectNotFoundError) Error() string { + return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) +} + +// IsErrUnauthorized returns true if the error is caused +// when a remote registry authentication fails +// +// Deprecated: use errdefs.IsUnauthorized +func IsErrUnauthorized(err error) bool { + return errdefs.IsUnauthorized(err) +} + +type pluginPermissionDenied struct { + name string +} + +func (e pluginPermissionDenied) Error() string { + return "Permission denied while installing plugin " + e.name +} + +// IsErrNotImplemented returns true if the error is a NotImplemented error. +// This is returned by the API when a requested feature has not been +// implemented. +// +// Deprecated: use errdefs.IsNotImplemented +func IsErrNotImplemented(err error) bool { + return errdefs.IsNotImplemented(err) +} + +// NewVersionError returns an error if the APIVersion required +// if less than the current supported version +func (cli *Client) NewVersionError(APIrequired, feature string) error { + if cli.version != "" && versions.LessThan(cli.version, APIrequired) { + return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version) + } + return nil +} diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go new file mode 100644 index 000000000..a9c48a928 --- /dev/null +++ b/vendor/github.com/docker/docker/client/events.go @@ -0,0 +1,101 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + timetypes "github.com/docker/docker/api/types/time" +) + +// Events returns a stream of events in the daemon. It's up to the caller to close the stream +// by cancelling the context. Once the stream has been completely read an io.EOF error will +// be sent over the error channel. If an error is sent all processing will be stopped. It's up +// to the caller to reopen the stream in the event of an error by reinvoking this method. +func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { + messages := make(chan events.Message) + errs := make(chan error, 1) + + started := make(chan struct{}) + go func() { + defer close(errs) + + query, err := buildEventsQueryParams(cli.version, options) + if err != nil { + close(started) + errs <- err + return + } + + resp, err := cli.get(ctx, "/events", query, nil) + if err != nil { + close(started) + errs <- err + return + } + defer resp.body.Close() + + decoder := json.NewDecoder(resp.body) + + close(started) + for { + select { + case <-ctx.Done(): + errs <- ctx.Err() + return + default: + var event events.Message + if err := decoder.Decode(&event); err != nil { + errs <- err + return + } + + select { + case messages <- event: + case <-ctx.Done(): + errs <- ctx.Err() + return + } + } + } + }() + <-started + + return messages, errs +} + +func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) { + query := url.Values{} + ref := time.Now() + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, ref) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Until != "" { + ts, err := timetypes.GetTimestamp(options.Until, ref) + if err != nil { + return nil, err + } + query.Set("until", ts) + } + + if options.Filters.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters) + if err != nil { + return nil, err + } + query.Set("filters", filterJSON) + } + + return query, nil +} diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go new file mode 100644 index 000000000..6bdacab10 --- /dev/null +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -0,0 +1,153 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bufio" + "context" + "crypto/tls" + "fmt" + "net" + "net/http" + "net/http/httputil" + "net/url" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/go-connections/sockets" + "github.com/pkg/errors" +) + +// postHijacked sends a POST request and hijacks the connection. +func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { + bodyEncoded, err := encodeData(body) + if err != nil { + return types.HijackedResponse{}, err + } + + apiPath := cli.getAPIPath(ctx, path, query) + req, err := http.NewRequest(http.MethodPost, apiPath, bodyEncoded) + if err != nil { + return types.HijackedResponse{}, err + } + req = cli.addHeaders(req, headers) + + conn, mediaType, err := cli.setupHijackConn(ctx, req, "tcp") + if err != nil { + return types.HijackedResponse{}, err + } + + return types.NewHijackedResponse(conn, mediaType), err +} + +// DialHijack returns a hijacked connection with negotiated protocol proto. +func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) { + req, err := http.NewRequest(http.MethodPost, url, nil) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, meta) + + conn, _, err := cli.setupHijackConn(ctx, req, proto) + return conn, err +} + +// fallbackDial is used when WithDialer() was not called. +// See cli.Dialer(). +func fallbackDial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { + if tlsConfig != nil && proto != "unix" && proto != "npipe" { + return tls.Dial(proto, addr, tlsConfig) + } + if proto == "npipe" { + return sockets.DialPipe(addr, 32*time.Second) + } + return net.Dial(proto, addr) +} + +func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto string) (net.Conn, string, error) { + req.Host = cli.addr + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", proto) + + dialer := cli.Dialer() + conn, err := dialer(ctx) + if err != nil { + return nil, "", errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") + } + + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := conn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + clientconn := httputil.NewClientConn(conn, nil) + defer clientconn.Close() + + // Server hijacks the connection, error 'connection closed' expected + resp, err := clientconn.Do(req) + + //nolint:staticcheck // ignore SA1019 for connecting to old (pre go1.8) daemons + if err != httputil.ErrPersistEOF { + if err != nil { + return nil, "", err + } + if resp.StatusCode != http.StatusSwitchingProtocols { + resp.Body.Close() + return nil, "", fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) + } + } + + c, br := clientconn.Hijack() + if br.Buffered() > 0 { + // If there is buffered content, wrap the connection. We return an + // object that implements CloseWrite if the underlying connection + // implements it. + if _, ok := c.(types.CloseWriter); ok { + c = &hijackedConnCloseWriter{&hijackedConn{c, br}} + } else { + c = &hijackedConn{c, br} + } + } else { + br.Reset(nil) + } + + var mediaType string + if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.42") { + // Prior to 1.42, Content-Type is always set to raw-stream and not relevant + mediaType = resp.Header.Get("Content-Type") + } + + return c, mediaType, nil +} + +// hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case +// that a) there was already buffered data in the http layer when Hijack() was +// called, and b) the underlying net.Conn does *not* implement CloseWrite(). +// hijackedConn does not implement CloseWrite() either. +type hijackedConn struct { + net.Conn + r *bufio.Reader +} + +func (c *hijackedConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +// hijackedConnCloseWriter is a hijackedConn which additionally implements +// CloseWrite(). It is returned by setupHijackConn in the case that a) there +// was already buffered data in the http layer when Hijack() was called, and b) +// the underlying net.Conn *does* implement CloseWrite(). +type hijackedConnCloseWriter struct { + *hijackedConn +} + +var _ types.CloseWriter = &hijackedConnCloseWriter{} + +func (c *hijackedConnCloseWriter) CloseWrite() error { + conn := c.Conn.(types.CloseWriter) + return conn.CloseWrite() +} diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go new file mode 100644 index 000000000..d16e1d8ea --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -0,0 +1,146 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/base64" + "encoding/json" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" +) + +// ImageBuild sends a request to the daemon to build images. +// The Body in the response implements an io.ReadCloser and it's up to the caller to +// close it. +func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { + query, err := cli.imageBuildOptionsToQuery(options) + if err != nil { + return types.ImageBuildResponse{}, err + } + + headers := http.Header(make(map[string][]string)) + buf, err := json.Marshal(options.AuthConfigs) + if err != nil { + return types.ImageBuildResponse{}, err + } + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + + headers.Set("Content-Type", "application/x-tar") + + serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) + if err != nil { + return types.ImageBuildResponse{}, err + } + + osType := getDockerOS(serverResp.header.Get("Server")) + + return types.ImageBuildResponse{ + Body: serverResp.body, + OSType: osType, + }, nil +} + +func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { + query := url.Values{ + "t": options.Tags, + "securityopt": options.SecurityOpt, + "extrahosts": options.ExtraHosts, + } + if options.SuppressOutput { + query.Set("q", "1") + } + if options.RemoteContext != "" { + query.Set("remote", options.RemoteContext) + } + if options.NoCache { + query.Set("nocache", "1") + } + if options.Remove { + query.Set("rm", "1") + } else { + query.Set("rm", "0") + } + + if options.ForceRemove { + query.Set("forcerm", "1") + } + + if options.PullParent { + query.Set("pull", "1") + } + + if options.Squash { + if err := cli.NewVersionError("1.25", "squash"); err != nil { + return query, err + } + query.Set("squash", "1") + } + + if !container.Isolation.IsDefault(options.Isolation) { + query.Set("isolation", string(options.Isolation)) + } + + query.Set("cpusetcpus", options.CPUSetCPUs) + query.Set("networkmode", options.NetworkMode) + query.Set("cpusetmems", options.CPUSetMems) + query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) + query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) + query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) + query.Set("memory", strconv.FormatInt(options.Memory, 10)) + query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) + query.Set("cgroupparent", options.CgroupParent) + query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) + query.Set("dockerfile", options.Dockerfile) + query.Set("target", options.Target) + + ulimitsJSON, err := json.Marshal(options.Ulimits) + if err != nil { + return query, err + } + query.Set("ulimits", string(ulimitsJSON)) + + buildArgsJSON, err := json.Marshal(options.BuildArgs) + if err != nil { + return query, err + } + query.Set("buildargs", string(buildArgsJSON)) + + labelsJSON, err := json.Marshal(options.Labels) + if err != nil { + return query, err + } + query.Set("labels", string(labelsJSON)) + + cacheFromJSON, err := json.Marshal(options.CacheFrom) + if err != nil { + return query, err + } + query.Set("cachefrom", string(cacheFromJSON)) + if options.SessionID != "" { + query.Set("session", options.SessionID) + } + if options.Platform != "" { + if err := cli.NewVersionError("1.32", "platform"); err != nil { + return query, err + } + query.Set("platform", strings.ToLower(options.Platform)) + } + if options.BuildID != "" { + query.Set("buildid", options.BuildID) + } + query.Set("version", string(options.Version)) + + if options.Outputs != nil { + outputsJSON, err := json.Marshal(options.Outputs) + if err != nil { + return query, err + } + query.Set("outputs", string(outputsJSON)) + } + return query, nil +} diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go new file mode 100644 index 000000000..b1c022777 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_create.go @@ -0,0 +1,37 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImageCreate creates a new image based on the parent options. +// It returns the JSON content in the response body. +func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(parentReference) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", reference.FamiliarName(ref)) + query.Set("tag", getAPITagFromNamedRef(ref)) + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/create", query, nil, headers) +} diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/docker/docker/client/image_history.go new file mode 100644 index 000000000..b5bea10d8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_history.go @@ -0,0 +1,22 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/image" +) + +// ImageHistory returns the changes in an image in history format. +func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) { + var history []image.HistoryResponseItem + serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return history, err + } + + err = json.NewDecoder(serverResp.body).Decode(&history) + return history, err +} diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go new file mode 100644 index 000000000..c5de42cb7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_import.go @@ -0,0 +1,40 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImageImport creates a new image based on the source options. +// It returns the JSON content in the response body. +func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { + if ref != "" { + // Check if the given image name can be resolved + if _, err := reference.ParseNormalizedNamed(ref); err != nil { + return nil, err + } + } + + query := url.Values{} + query.Set("fromSrc", source.SourceName) + query.Set("repo", ref) + query.Set("tag", options.Tag) + query.Set("message", options.Message) + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } + for _, change := range options.Changes { + query.Add("changes", change) + } + + resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go new file mode 100644 index 000000000..1de10e5a0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_inspect.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types" +) + +// ImageInspectWithRaw returns the image information and its raw representation. +func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { + if imageID == "" { + return types.ImageInspect{}, nil, objectNotFoundError{object: "image", id: imageID} + } + serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return types.ImageInspect{}, nil, err + } + + body, err := io.ReadAll(serverResp.body) + if err != nil { + return types.ImageInspect{}, nil, err + } + + var response types.ImageInspect + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go new file mode 100644 index 000000000..950d51333 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -0,0 +1,49 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" +) + +// ImageList returns a list of images in the docker host. +func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) { + var images []types.ImageSummary + query := url.Values{} + + optionFilters := options.Filters + referenceFilters := optionFilters.Get("reference") + if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 { + query.Set("filter", referenceFilters[0]) + for _, filterValue := range referenceFilters { + optionFilters.Del("reference", filterValue) + } + } + if optionFilters.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters) + if err != nil { + return images, err + } + query.Set("filters", filterJSON) + } + if options.All { + query.Set("all", "1") + } + if options.SharedSize && versions.GreaterThanOrEqualTo(cli.version, "1.42") { + query.Set("shared-size", "1") + } + + serverResp, err := cli.get(ctx, "/images/json", query, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return images, err + } + + err = json.NewDecoder(serverResp.body).Decode(&images) + return images, err +} diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go new file mode 100644 index 000000000..91016e493 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_load.go @@ -0,0 +1,29 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ImageLoad loads an image in the docker host from the client host. +// It's up to the caller to close the io.ReadCloser in the +// ImageLoadResponse returned by this function. +func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + v := url.Values{} + v.Set("quiet", "0") + if quiet { + v.Set("quiet", "1") + } + headers := map[string][]string{"Content-Type": {"application/x-tar"}} + resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) + if err != nil { + return types.ImageLoadResponse{}, err + } + return types.ImageLoadResponse{ + Body: resp.body, + JSON: resp.header.Get("Content-Type") == "application/json", + }, nil +} diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go new file mode 100644 index 000000000..56af6d7f9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_prune.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// ImagesPrune requests the daemon to delete unused data +func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) { + var report types.ImagesPruneReport + + if err := cli.NewVersionError("1.25", "image prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return report, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go new file mode 100644 index 000000000..a23975591 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_pull.go @@ -0,0 +1,64 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" +) + +// ImagePull requests the docker host to pull an image from a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +// +// FIXME(vdemeester): there is currently used in a few way in docker/docker +// - if not in trusted content, ref is used to pass the whole reference, and tag is empty +// - if in trusted content, ref is used to pass the reference name, and tag for the digest +func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(refStr) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", reference.FamiliarName(ref)) + if !options.All { + query.Set("tag", getAPITagFromNamedRef(ref)) + } + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } + + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} + +// getAPITagFromNamedRef returns a tag from the specified reference. +// This function is necessary as long as the docker "server" api expects +// digests to be sent as tags and makes a distinction between the name +// and tag/digest part of a reference. +func getAPITagFromNamedRef(ref reference.Named) string { + if digested, ok := ref.(reference.Digested); ok { + return digested.Digest().String() + } + ref = reference.TagNameOnly(ref) + if tagged, ok := ref.(reference.Tagged); ok { + return tagged.Tag() + } + return "" +} diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go new file mode 100644 index 000000000..845580d4a --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_push.go @@ -0,0 +1,54 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "errors" + "io" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" +) + +// ImagePush requests the docker host to push an image to a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +func (cli *Client) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(image) + if err != nil { + return nil, err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return nil, errors.New("cannot push a digest reference") + } + + name := reference.FamiliarName(ref) + query := url.Values{} + if !options.All { + ref = reference.TagNameOnly(ref) + if tagged, ok := ref.(reference.Tagged); ok { + query.Set("tag", tagged.Tag()) + } + } + + resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) +} diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go new file mode 100644 index 000000000..6a9fb3f41 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_remove.go @@ -0,0 +1,31 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ImageRemove removes an image from the docker host. +func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { + query := url.Values{} + + if options.Force { + query.Set("force", "1") + } + if !options.PruneChildren { + query.Set("noprune", "1") + } + + var dels []types.ImageDeleteResponseItem + resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return dels, err + } + + err = json.NewDecoder(resp.body).Decode(&dels) + return dels, err +} diff --git a/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/docker/docker/client/image_save.go new file mode 100644 index 000000000..d1314e4b2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_save.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" +) + +// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. +// It's up to the caller to store the images and close the stream. +func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { + query := url.Values{ + "names": imageIDs, + } + + resp, err := cli.get(ctx, "/images/get", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go new file mode 100644 index 000000000..e69fa3722 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_search.go @@ -0,0 +1,53 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/errdefs" +) + +// ImageSearch makes the docker host search by a term in a remote registry. +// The list of results is not sorted in any fashion. +func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { + var results []registry.SearchResult + query := url.Values{} + query.Set("term", term) + if options.Limit > 0 { + query.Set("limit", strconv.Itoa(options.Limit)) + } + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return results, err + } + query.Set("filters", filterJSON) + } + + resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) + defer ensureReaderClosed(resp) + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return results, privilegeErr + } + resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) + } + if err != nil { + return results, err + } + + err = json.NewDecoder(resp.body).Decode(&results) + return results, err +} + +func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/images/search", query, headers) +} diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go new file mode 100644 index 000000000..5652bfc25 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_tag.go @@ -0,0 +1,37 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/pkg/errors" +) + +// ImageTag tags an image in the docker host +func (cli *Client) ImageTag(ctx context.Context, source, target string) error { + if _, err := reference.ParseAnyReference(source); err != nil { + return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source) + } + + ref, err := reference.ParseNormalizedNamed(target) + if err != nil { + return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", target) + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + + ref = reference.TagNameOnly(ref) + + query := url.Values{} + query.Set("repo", reference.FamiliarName(ref)) + if tagged, ok := ref.(reference.Tagged); ok { + query.Set("tag", tagged.Tag()) + } + + resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go new file mode 100644 index 000000000..c856704e2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/info.go @@ -0,0 +1,26 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" +) + +// Info returns information about the docker server. +func (cli *Client) Info(ctx context.Context) (types.Info, error) { + var info types.Info + serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return info, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { + return info, fmt.Errorf("Error reading remote info: %v", err) + } + + return info, nil +} diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go new file mode 100644 index 000000000..e9c1ed722 --- /dev/null +++ b/vendor/github.com/docker/docker/client/interface.go @@ -0,0 +1,201 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net" + "net/http" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/volume" + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// CommonAPIClient is the common methods between stable and experimental versions of APIClient. +type CommonAPIClient interface { + ConfigAPIClient + ContainerAPIClient + DistributionAPIClient + ImageAPIClient + NodeAPIClient + NetworkAPIClient + PluginAPIClient + ServiceAPIClient + SwarmAPIClient + SecretAPIClient + SystemAPIClient + VolumeAPIClient + ClientVersion() string + DaemonHost() string + HTTPClient() *http.Client + ServerVersion(ctx context.Context) (types.Version, error) + NegotiateAPIVersion(ctx context.Context) + NegotiateAPIVersionPing(types.Ping) + DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) + Dialer() func(context.Context) (net.Conn, error) + Close() error +} + +// ContainerAPIClient defines API client methods for the containers +type ContainerAPIClient interface { + ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) + ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) + ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.CreateResponse, error) + ContainerDiff(ctx context.Context, container string) ([]container.ContainerChangeResponseItem, error) + ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) + ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) + ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) + ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error + ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error + ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) + ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) + ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) + ContainerKill(ctx context.Context, container, signal string) error + ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) + ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) + ContainerPause(ctx context.Context, container string) error + ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error + ContainerRename(ctx context.Context, container, newContainerName string) error + ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error + ContainerRestart(ctx context.Context, container string, options container.StopOptions) error + ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) + ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) + ContainerStatsOneShot(ctx context.Context, container string) (types.ContainerStats, error) + ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error + ContainerStop(ctx context.Context, container string, options container.StopOptions) error + ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error) + ContainerUnpause(ctx context.Context, container string) error + ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) + ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) + CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) + CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error + ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) +} + +// DistributionAPIClient defines API client methods for the registry +type DistributionAPIClient interface { + DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) +} + +// ImageAPIClient defines API client methods for the images +type ImageAPIClient interface { + ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) + BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) + BuildCancel(ctx context.Context, id string) error + ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) + ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) + ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) + ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) + ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) + ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) + ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) + ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) + ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) + ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) + ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) + ImageTag(ctx context.Context, image, ref string) error + ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error) +} + +// NetworkAPIClient defines API client methods for the networks +type NetworkAPIClient interface { + NetworkConnect(ctx context.Context, network, container string, config *network.EndpointSettings) error + NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) + NetworkDisconnect(ctx context.Context, network, container string, force bool) error + NetworkInspect(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, error) + NetworkInspectWithRaw(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) + NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) + NetworkRemove(ctx context.Context, network string) error + NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error) +} + +// NodeAPIClient defines API client methods for the nodes +type NodeAPIClient interface { + NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) + NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) + NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error + NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error +} + +// PluginAPIClient defines API client methods for the plugins +type PluginAPIClient interface { + PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) + PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error + PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error + PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error + PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) + PluginSet(ctx context.Context, name string, args []string) error + PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) + PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error +} + +// ServiceAPIClient defines API client methods for the services +type ServiceAPIClient interface { + ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) + ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) + ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) + ServiceRemove(ctx context.Context, serviceID string) error + ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) + ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) + TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) +} + +// SwarmAPIClient defines API client methods for the swarm +type SwarmAPIClient interface { + SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) + SwarmJoin(ctx context.Context, req swarm.JoinRequest) error + SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) + SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error + SwarmLeave(ctx context.Context, force bool) error + SwarmInspect(ctx context.Context) (swarm.Swarm, error) + SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error +} + +// SystemAPIClient defines API client methods for the system +type SystemAPIClient interface { + Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) + Info(ctx context.Context) (types.Info, error) + RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) + DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) + Ping(ctx context.Context) (types.Ping, error) +} + +// VolumeAPIClient defines API client methods for the volumes +type VolumeAPIClient interface { + VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) + VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error) + VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) + VolumeList(ctx context.Context, filter filters.Args) (volume.ListResponse, error) + VolumeRemove(ctx context.Context, volumeID string, force bool) error + VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) + VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error +} + +// SecretAPIClient defines API client methods for secrets +type SecretAPIClient interface { + SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) + SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) + SecretRemove(ctx context.Context, id string) error + SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) + SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error +} + +// ConfigAPIClient defines API client methods for configs +type ConfigAPIClient interface { + ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) + ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) + ConfigRemove(ctx context.Context, id string) error + ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error) + ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error +} diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/interface_experimental.go new file mode 100644 index 000000000..402ffb512 --- /dev/null +++ b/vendor/github.com/docker/docker/client/interface_experimental.go @@ -0,0 +1,18 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types" +) + +type apiClientExperimental interface { + CheckpointAPIClient +} + +// CheckpointAPIClient defines API client methods for the checkpoints +type CheckpointAPIClient interface { + CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error + CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error + CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) +} diff --git a/vendor/github.com/docker/docker/client/interface_stable.go b/vendor/github.com/docker/docker/client/interface_stable.go new file mode 100644 index 000000000..5502cd742 --- /dev/null +++ b/vendor/github.com/docker/docker/client/interface_stable.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +// APIClient is an interface that clients that talk with a docker server must implement. +type APIClient interface { + CommonAPIClient + apiClientExperimental +} + +// Ensure that Client always implements APIClient. +var _ APIClient = &Client{} diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go new file mode 100644 index 000000000..f05852063 --- /dev/null +++ b/vendor/github.com/docker/docker/client/login.go @@ -0,0 +1,25 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/registry" +) + +// RegistryLogin authenticates the docker server with a given docker registry. +// It returns unauthorizedError when the authentication fails. +func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) { + resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) + defer ensureReaderClosed(resp) + + if err != nil { + return registry.AuthenticateOKBody{}, err + } + + var response registry.AuthenticateOKBody + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go new file mode 100644 index 000000000..571894613 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_connect.go @@ -0,0 +1,19 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" +) + +// NetworkConnect connects a container to an existent network in the docker host. +func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { + nc := types.NetworkConnect{ + Container: containerID, + EndpointConfig: config, + } + resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go new file mode 100644 index 000000000..278d9383a --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_create.go @@ -0,0 +1,25 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" +) + +// NetworkCreate creates a new network in the docker host. +func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { + networkCreateRequest := types.NetworkCreateRequest{ + NetworkCreate: options, + Name: name, + } + var response types.NetworkCreateResponse + serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go new file mode 100644 index 000000000..dd1567665 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_disconnect.go @@ -0,0 +1,15 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types" +) + +// NetworkDisconnect disconnects a container from an existent network in the docker host. +func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { + nd := types.NetworkDisconnect{Container: containerID, Force: force} + resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go new file mode 100644 index 000000000..0f90e2bb9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_inspect.go @@ -0,0 +1,49 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/url" + + "github.com/docker/docker/api/types" +) + +// NetworkInspect returns the information for a specific network configured in the docker host. +func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) { + networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options) + return networkResource, err +} + +// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. +func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) { + if networkID == "" { + return types.NetworkResource{}, nil, objectNotFoundError{object: "network", id: networkID} + } + var ( + networkResource types.NetworkResource + resp serverResponse + err error + ) + query := url.Values{} + if options.Verbose { + query.Set("verbose", "true") + } + if options.Scope != "" { + query.Set("scope", options.Scope) + } + resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return networkResource, nil, err + } + + body, err := io.ReadAll(resp.body) + if err != nil { + return networkResource, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&networkResource) + return networkResource, body, err +} diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go new file mode 100644 index 000000000..ed2acb557 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_list.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// NetworkList returns the list of networks configured in the docker host. +func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + query := url.Values{} + if options.Filters.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + var networkResources []types.NetworkResource + resp, err := cli.get(ctx, "/networks", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return networkResources, err + } + err = json.NewDecoder(resp.body).Decode(&networkResources) + return networkResources, err +} diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go new file mode 100644 index 000000000..cebb18821 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_prune.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// NetworksPrune requests the daemon to delete unused networks +func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) { + var report types.NetworksPruneReport + + if err := cli.NewVersionError("1.25", "network prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return report, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving network prune report: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go new file mode 100644 index 000000000..9d6c6cef0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_remove.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// NetworkRemove removes an existent network from the docker host. +func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { + resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) + defer ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go new file mode 100644 index 000000000..95ab9b1be --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_inspect.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types/swarm" +) + +// NodeInspectWithRaw returns the node information. +func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { + if nodeID == "" { + return swarm.Node{}, nil, objectNotFoundError{object: "node", id: nodeID} + } + serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return swarm.Node{}, nil, err + } + + body, err := io.ReadAll(serverResp.body) + if err != nil { + return swarm.Node{}, nil, err + } + + var response swarm.Node + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go new file mode 100644 index 000000000..c212906bc --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_list.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// NodeList returns the list of nodes. +func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/nodes", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var nodes []swarm.Node + err = json.NewDecoder(resp.body).Decode(&nodes) + return nodes, err +} diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go new file mode 100644 index 000000000..e44436deb --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_remove.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// NodeRemove removes a Node. +func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) + defer ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go new file mode 100644 index 000000000..0d0fc3b78 --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_update.go @@ -0,0 +1,17 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types/swarm" +) + +// NodeUpdate updates a Node. +func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { + query := url.Values{} + query.Set("version", version.String()) + resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/options.go b/vendor/github.com/docker/docker/client/options.go new file mode 100644 index 000000000..099ad4184 --- /dev/null +++ b/vendor/github.com/docker/docker/client/options.go @@ -0,0 +1,210 @@ +package client + +import ( + "context" + "net" + "net/http" + "os" + "path/filepath" + "time" + + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" +) + +// Opt is a configuration option to initialize a client +type Opt func(*Client) error + +// FromEnv configures the client with values from environment variables. +// +// FromEnv uses the following environment variables: +// +// DOCKER_HOST (EnvOverrideHost) to set the URL to the docker server. +// +// DOCKER_API_VERSION (EnvOverrideAPIVersion) to set the version of the API to +// use, leave empty for latest. +// +// DOCKER_CERT_PATH (EnvOverrideCertPath) to specify the directory from which to +// load the TLS certificates (ca.pem, cert.pem, key.pem). +// +// DOCKER_TLS_VERIFY (EnvTLSVerify) to enable or disable TLS verification (off by +// default). +func FromEnv(c *Client) error { + ops := []Opt{ + WithTLSClientConfigFromEnv(), + WithHostFromEnv(), + WithVersionFromEnv(), + } + for _, op := range ops { + if err := op(c); err != nil { + return err + } + } + return nil +} + +// WithDialContext applies the dialer to the client transport. This can be +// used to set the Timeout and KeepAlive settings of the client. +func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt { + return func(c *Client) error { + if transport, ok := c.client.Transport.(*http.Transport); ok { + transport.DialContext = dialContext + return nil + } + return errors.Errorf("cannot apply dialer to transport: %T", c.client.Transport) + } +} + +// WithHost overrides the client host with the specified one. +func WithHost(host string) Opt { + return func(c *Client) error { + hostURL, err := ParseHostURL(host) + if err != nil { + return err + } + c.host = host + c.proto = hostURL.Scheme + c.addr = hostURL.Host + c.basePath = hostURL.Path + if transport, ok := c.client.Transport.(*http.Transport); ok { + return sockets.ConfigureTransport(transport, c.proto, c.addr) + } + return errors.Errorf("cannot apply host to transport: %T", c.client.Transport) + } +} + +// WithHostFromEnv overrides the client host with the host specified in the +// DOCKER_HOST (EnvOverrideHost) environment variable. If DOCKER_HOST is not set, +// or set to an empty value, the host is not modified. +func WithHostFromEnv() Opt { + return func(c *Client) error { + if host := os.Getenv(EnvOverrideHost); host != "" { + return WithHost(host)(c) + } + return nil + } +} + +// WithHTTPClient overrides the client http client with the specified one +func WithHTTPClient(client *http.Client) Opt { + return func(c *Client) error { + if client != nil { + c.client = client + } + return nil + } +} + +// WithTimeout configures the time limit for requests made by the HTTP client +func WithTimeout(timeout time.Duration) Opt { + return func(c *Client) error { + c.client.Timeout = timeout + return nil + } +} + +// WithHTTPHeaders overrides the client default http headers +func WithHTTPHeaders(headers map[string]string) Opt { + return func(c *Client) error { + c.customHTTPHeaders = headers + return nil + } +} + +// WithScheme overrides the client scheme with the specified one +func WithScheme(scheme string) Opt { + return func(c *Client) error { + c.scheme = scheme + return nil + } +} + +// WithTLSClientConfig applies a tls config to the client transport. +func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt { + return func(c *Client) error { + opts := tlsconfig.Options{ + CAFile: cacertPath, + CertFile: certPath, + KeyFile: keyPath, + ExclusiveRootPools: true, + } + config, err := tlsconfig.Client(opts) + if err != nil { + return errors.Wrap(err, "failed to create tls config") + } + if transport, ok := c.client.Transport.(*http.Transport); ok { + transport.TLSClientConfig = config + return nil + } + return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport) + } +} + +// WithTLSClientConfigFromEnv configures the client's TLS settings with the +// settings in the DOCKER_CERT_PATH and DOCKER_TLS_VERIFY environment variables. +// If DOCKER_CERT_PATH is not set or empty, TLS configuration is not modified. +// +// WithTLSClientConfigFromEnv uses the following environment variables: +// +// DOCKER_CERT_PATH (EnvOverrideCertPath) to specify the directory from which to +// load the TLS certificates (ca.pem, cert.pem, key.pem). +// +// DOCKER_TLS_VERIFY (EnvTLSVerify) to enable or disable TLS verification (off by +// default). +func WithTLSClientConfigFromEnv() Opt { + return func(c *Client) error { + dockerCertPath := os.Getenv(EnvOverrideCertPath) + if dockerCertPath == "" { + return nil + } + options := tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + InsecureSkipVerify: os.Getenv(EnvTLSVerify) == "", + } + tlsc, err := tlsconfig.Client(options) + if err != nil { + return err + } + + c.client = &http.Client{ + Transport: &http.Transport{TLSClientConfig: tlsc}, + CheckRedirect: CheckRedirect, + } + return nil + } +} + +// WithVersion overrides the client version with the specified one. If an empty +// version is specified, the value will be ignored to allow version negotiation. +func WithVersion(version string) Opt { + return func(c *Client) error { + if version != "" { + c.version = version + c.manualOverride = true + } + return nil + } +} + +// WithVersionFromEnv overrides the client version with the version specified in +// the DOCKER_API_VERSION environment variable. If DOCKER_API_VERSION is not set, +// the version is not modified. +func WithVersionFromEnv() Opt { + return func(c *Client) error { + return WithVersion(os.Getenv(EnvOverrideAPIVersion))(c) + } +} + +// WithAPIVersionNegotiation enables automatic API version negotiation for the client. +// With this option enabled, the client automatically negotiates the API version +// to use when making requests. API version negotiation is performed on the first +// request; subsequent requests will not re-negotiate. +func WithAPIVersionNegotiation() Opt { + return func(c *Client) error { + c.negotiateVersion = true + return nil + } +} diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go new file mode 100644 index 000000000..27e8695cb --- /dev/null +++ b/vendor/github.com/docker/docker/client/ping.go @@ -0,0 +1,75 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/http" + "path" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/errdefs" +) + +// Ping pings the server and returns the value of the "Docker-Experimental", +// "Builder-Version", "OS-Type" & "API-Version" headers. It attempts to use +// a HEAD request on the endpoint, but falls back to GET if HEAD is not supported +// by the daemon. +func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { + var ping types.Ping + + // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest() + // because ping requests are used during API version negotiation, so we want + // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping + req, err := cli.buildRequest(http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil) + if err != nil { + return ping, err + } + serverResp, err := cli.doRequest(ctx, req) + if err == nil { + defer ensureReaderClosed(serverResp) + switch serverResp.statusCode { + case http.StatusOK, http.StatusInternalServerError: + // Server handled the request, so parse the response + return parsePingResponse(cli, serverResp) + } + } else if IsErrConnectionFailed(err) { + return ping, err + } + + req, err = cli.buildRequest(http.MethodGet, path.Join(cli.basePath, "/_ping"), nil, nil) + if err != nil { + return ping, err + } + serverResp, err = cli.doRequest(ctx, req) + defer ensureReaderClosed(serverResp) + if err != nil { + return ping, err + } + return parsePingResponse(cli, serverResp) +} + +func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) { + var ping types.Ping + if resp.header == nil { + err := cli.checkResponseErr(resp) + return ping, errdefs.FromStatusCode(err, resp.statusCode) + } + ping.APIVersion = resp.header.Get("API-Version") + ping.OSType = resp.header.Get("OSType") + if resp.header.Get("Docker-Experimental") == "true" { + ping.Experimental = true + } + if bv := resp.header.Get("Builder-Version"); bv != "" { + ping.BuilderVersion = types.BuilderVersion(bv) + } + if si := resp.header.Get("Swarm"); si != "" { + parts := strings.SplitN(si, "/", 2) + ping.SwarmStatus = &swarm.Status{ + NodeState: swarm.LocalNodeState(parts[0]), + ControlAvailable: len(parts) == 2 && parts[1] == "manager", + } + } + err := cli.checkResponseErr(resp) + return ping, errdefs.FromStatusCode(err, resp.statusCode) +} diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go new file mode 100644 index 000000000..b95dbaf68 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_create.go @@ -0,0 +1,23 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" +) + +// PluginCreate creates a plugin +func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error { + headers := http.Header(make(map[string][]string)) + headers.Set("Content-Type", "application/x-tar") + + query := url.Values{} + query.Set("name", createOptions.RepoName) + + resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go new file mode 100644 index 000000000..01f6574f9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_disable.go @@ -0,0 +1,19 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// PluginDisable disables a plugin +func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go new file mode 100644 index 000000000..736da48bd --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_enable.go @@ -0,0 +1,19 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" +) + +// PluginEnable enables a plugin +func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error { + query := url.Values{} + query.Set("timeout", strconv.Itoa(options.Timeout)) + + resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go new file mode 100644 index 000000000..f09e46066 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_inspect.go @@ -0,0 +1,31 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types" +) + +// PluginInspectWithRaw inspects an existing plugin +func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { + if name == "" { + return nil, nil, objectNotFoundError{object: "plugin", id: name} + } + resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, nil, err + } + + body, err := io.ReadAll(resp.body) + if err != nil { + return nil, nil, err + } + var p types.Plugin + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&p) + return &p, body, err +} diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go new file mode 100644 index 000000000..012afe61c --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_install.go @@ -0,0 +1,113 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "io" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" +) + +// PluginInstall installs a plugin +func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + query := url.Values{} + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + // set name for plugin pull, if empty should default to remote reference + query.Set("name", name) + + resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth) + if err != nil { + return nil, err + } + + name = resp.header.Get("Docker-Plugin-Name") + + pr, pw := io.Pipe() + go func() { // todo: the client should probably be designed more around the actual api + _, err := io.Copy(pw, resp.body) + if err != nil { + pw.CloseWithError(err) + return + } + defer func() { + if err != nil { + delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) + ensureReaderClosed(delResp) + } + }() + if len(options.Args) > 0 { + if err := cli.PluginSet(ctx, name, options.Args); err != nil { + pw.CloseWithError(err) + return + } + } + + if options.Disabled { + pw.Close() + return + } + + enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) + pw.CloseWithError(enableErr) + }() + return pr, nil +} + +func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/plugins/privileges", query, headers) +} + +func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/plugins/pull", query, privileges, headers) +} + +func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { + resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + // todo: do inspect before to check existing name before checking privileges + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + ensureReaderClosed(resp) + return nil, privilegeErr + } + options.RegistryAuth = newAuthHeader + resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + } + if err != nil { + ensureReaderClosed(resp) + return nil, err + } + + var privileges types.PluginPrivileges + if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil { + ensureReaderClosed(resp) + return nil, err + } + ensureReaderClosed(resp) + + if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 { + accept, err := options.AcceptPermissionsFunc(privileges) + if err != nil { + return nil, err + } + if !accept { + return nil, pluginPermissionDenied{options.RemoteRef} + } + } + return privileges, nil +} diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go new file mode 100644 index 000000000..2091a054d --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_list.go @@ -0,0 +1,33 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// PluginList returns the installed plugins +func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) { + var plugins types.PluginsListResponse + query := url.Values{} + + if filter.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cli.version, filter) + if err != nil { + return plugins, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/plugins", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return plugins, err + } + + err = json.NewDecoder(resp.body).Decode(&plugins) + return plugins, err +} diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go new file mode 100644 index 000000000..d20bfe844 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_push.go @@ -0,0 +1,16 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" +) + +// PluginPush pushes a plugin to a registry +func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go new file mode 100644 index 000000000..4cd66958c --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_remove.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// PluginRemove removes a plugin +func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) + defer ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go new file mode 100644 index 000000000..dcf5752ca --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_set.go @@ -0,0 +1,12 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" +) + +// PluginSet modifies settings for an existing plugin +func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error { + resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go new file mode 100644 index 000000000..115cea945 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_upgrade.go @@ -0,0 +1,39 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" +) + +// PluginUpgrade upgrades a plugin +func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + if err := cli.NewVersionError("1.26", "plugin upgrade"); err != nil { + return nil, err + } + query := url.Values{} + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, headers) +} diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go new file mode 100644 index 000000000..c799095c1 --- /dev/null +++ b/vendor/github.com/docker/docker/client/request.go @@ -0,0 +1,277 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" +) + +// serverResponse is a wrapper for http API responses. +type serverResponse struct { + body io.ReadCloser + header http.Header + statusCode int + reqURL *url.URL +} + +// head sends an http request to the docker API using the method HEAD. +func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, http.MethodHead, path, query, nil, headers) +} + +// get sends an http request to the docker API using the method GET with a specific Go context. +func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, http.MethodGet, path, query, nil, headers) +} + +// post sends an http request to the docker API using the method POST with a specific Go context. +func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) +} + +func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) +} + +func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) +} + +// putRaw sends an http request to the docker API using the method PUT. +func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) +} + +// delete sends an http request to the docker API using the method DELETE. +func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, http.MethodDelete, path, query, nil, headers) +} + +type headers map[string][]string + +func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) { + if obj == nil { + return nil, headers, nil + } + + body, err := encodeData(obj) + if err != nil { + return nil, headers, err + } + if headers == nil { + headers = make(map[string][]string) + } + headers["Content-Type"] = []string{"application/json"} + return body, headers, nil +} + +func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) { + expectedPayload := (method == http.MethodPost || method == http.MethodPut) + if expectedPayload && body == nil { + body = bytes.NewReader([]byte{}) + } + + req, err := http.NewRequest(method, path, body) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, headers) + + if cli.proto == "unix" || cli.proto == "npipe" { + // For local communications, it doesn't matter what the host is. We just + // need a valid and meaningful host name. (See #189) + req.Host = "docker" + } + + req.URL.Host = cli.addr + req.URL.Scheme = cli.scheme + + if expectedPayload && req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "text/plain") + } + return req, nil +} + +func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { + req, err := cli.buildRequest(method, cli.getAPIPath(ctx, path, query), body, headers) + if err != nil { + return serverResponse{}, err + } + + resp, err := cli.doRequest(ctx, req) + switch { + case errors.Is(err, context.Canceled): + return serverResponse{}, errdefs.Cancelled(err) + case errors.Is(err, context.DeadlineExceeded): + return serverResponse{}, errdefs.Deadline(err) + case err == nil: + err = cli.checkResponseErr(resp) + } + return resp, errdefs.FromStatusCode(err, resp.statusCode) +} + +func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { + serverResp := serverResponse{statusCode: -1, reqURL: req.URL} + + req = req.WithContext(ctx) + resp, err := cli.client.Do(req) + if err != nil { + if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { + return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) + } + + if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { + return serverResp, errors.Wrap(err, "the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings") + } + + // Don't decorate context sentinel errors; users may be comparing to + // them directly. + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return serverResp, err + } + + if nErr, ok := err.(*url.Error); ok { + if nErr, ok := nErr.Err.(*net.OpError); ok { + if os.IsPermission(nErr.Err) { + return serverResp, errors.Wrapf(err, "permission denied while trying to connect to the Docker daemon socket at %v", cli.host) + } + } + } + + if err, ok := err.(net.Error); ok { + if err.Timeout() { + return serverResp, ErrorConnectionFailed(cli.host) + } + if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { + return serverResp, ErrorConnectionFailed(cli.host) + } + } + + // Although there's not a strongly typed error for this in go-winio, + // lots of people are using the default configuration for the docker + // daemon on Windows where the daemon is listening on a named pipe + // `//./pipe/docker_engine, and the client must be running elevated. + // Give users a clue rather than the not-overly useful message + // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info: + // open //./pipe/docker_engine: The system cannot find the file specified.`. + // Note we can't string compare "The system cannot find the file specified" as + // this is localised - for example in French the error would be + // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` + if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { + // Checks if client is running with elevated privileges + if f, elevatedErr := os.Open("\\\\.\\PHYSICALDRIVE0"); elevatedErr == nil { + err = errors.Wrap(err, "in the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect") + } else { + f.Close() + err = errors.Wrap(err, "this error may indicate that the docker daemon is not running") + } + } + + return serverResp, errors.Wrap(err, "error during connect") + } + + if resp != nil { + serverResp.statusCode = resp.StatusCode + serverResp.body = resp.Body + serverResp.header = resp.Header + } + return serverResp, nil +} + +func (cli *Client) checkResponseErr(serverResp serverResponse) error { + if serverResp.statusCode >= 200 && serverResp.statusCode < 400 { + return nil + } + + var body []byte + var err error + if serverResp.body != nil { + bodyMax := 1 * 1024 * 1024 // 1 MiB + bodyR := &io.LimitedReader{ + R: serverResp.body, + N: int64(bodyMax), + } + body, err = io.ReadAll(bodyR) + if err != nil { + return err + } + if bodyR.N == 0 { + return fmt.Errorf("request returned %s with a message (> %d bytes) for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), bodyMax, serverResp.reqURL) + } + } + if len(body) == 0 { + return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) + } + + var ct string + if serverResp.header != nil { + ct = serverResp.header.Get("Content-Type") + } + + var errorMessage string + if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" { + var errorResponse types.ErrorResponse + if err := json.Unmarshal(body, &errorResponse); err != nil { + return errors.Wrap(err, "Error reading JSON") + } + errorMessage = strings.TrimSpace(errorResponse.Message) + } else { + errorMessage = strings.TrimSpace(string(body)) + } + + return errors.Wrap(errors.New(errorMessage), "Error response from daemon") +} + +func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { + // Add CLI Config's HTTP Headers BEFORE we set the Docker headers + // then the user can't change OUR headers + for k, v := range cli.customHTTPHeaders { + if versions.LessThan(cli.version, "1.25") && http.CanonicalHeaderKey(k) == "User-Agent" { + continue + } + req.Header.Set(k, v) + } + + for k, v := range headers { + req.Header[http.CanonicalHeaderKey(k)] = v + } + return req +} + +func encodeData(data interface{}) (*bytes.Buffer, error) { + params := bytes.NewBuffer(nil) + if data != nil { + if err := json.NewEncoder(params).Encode(data); err != nil { + return nil, err + } + } + return params, nil +} + +func ensureReaderClosed(response serverResponse) { + if response.body != nil { + // Drain up to 512 bytes and close the body to let the Transport reuse the connection + io.CopyN(io.Discard, response.body, 512) + response.body.Close() + } +} diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go new file mode 100644 index 000000000..c65d38a19 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_create.go @@ -0,0 +1,25 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// SecretCreate creates a new secret. +func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { + var response types.SecretCreateResponse + if err := cli.NewVersionError("1.25", "secret create"); err != nil { + return response, err + } + resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go new file mode 100644 index 000000000..5906874b1 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_inspect.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types/swarm" +) + +// SecretInspectWithRaw returns the secret information with raw data +func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { + if err := cli.NewVersionError("1.25", "secret inspect"); err != nil { + return swarm.Secret{}, nil, err + } + if id == "" { + return swarm.Secret{}, nil, objectNotFoundError{object: "secret", id: id} + } + resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return swarm.Secret{}, nil, err + } + + body, err := io.ReadAll(resp.body) + if err != nil { + return swarm.Secret{}, nil, err + } + + var secret swarm.Secret + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&secret) + + return secret, body, err +} diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go new file mode 100644 index 000000000..a0289c9f4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_list.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// SecretList returns the list of secrets. +func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + if err := cli.NewVersionError("1.25", "secret list"); err != nil { + return nil, err + } + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/secrets", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var secrets []swarm.Secret + err = json.NewDecoder(resp.body).Decode(&secrets) + return secrets, err +} diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go new file mode 100644 index 000000000..f47f68b6e --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_remove.go @@ -0,0 +1,13 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// SecretRemove removes a secret. +func (cli *Client) SecretRemove(ctx context.Context, id string) error { + if err := cli.NewVersionError("1.25", "secret remove"); err != nil { + return err + } + resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) + defer ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go new file mode 100644 index 000000000..2e939e8ce --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_update.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types/swarm" +) + +// SecretUpdate attempts to update a secret. +func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { + if err := cli.NewVersionError("1.25", "secret update"); err != nil { + return err + } + query := url.Values{} + query.Set("version", version.String()) + resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go new file mode 100644 index 000000000..23024d0f8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -0,0 +1,178 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// ServiceCreate creates a new service. +func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { + var response types.ServiceCreateResponse + headers := map[string][]string{ + "version": {cli.version}, + } + + if options.EncodedRegistryAuth != "" { + headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} + } + + // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container + if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) { + service.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} + } + + if err := validateServiceSpec(service); err != nil { + return response, err + } + + // ensure that the image is tagged + var resolveWarning string + switch { + case service.TaskTemplate.ContainerSpec != nil: + if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + service.TaskTemplate.ContainerSpec.Image = taggedImg + } + if options.QueryRegistry { + resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) + } + case service.TaskTemplate.PluginSpec != nil: + if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + service.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) + } + } + + resp, err := cli.post(ctx, "/services/create", nil, service, headers) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + if resolveWarning != "" { + response.Warnings = append(response.Warnings, resolveWarning) + } + + return response, err +} + +func resolveContainerSpecImage(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string { + var warning string + if img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.ContainerSpec.Image, encodedAuth); err != nil { + warning = digestWarning(taskSpec.ContainerSpec.Image) + } else { + taskSpec.ContainerSpec.Image = img + if len(imgPlatforms) > 0 { + if taskSpec.Placement == nil { + taskSpec.Placement = &swarm.Placement{} + } + taskSpec.Placement.Platforms = imgPlatforms + } + } + return warning +} + +func resolvePluginSpecRemote(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string { + var warning string + if img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.PluginSpec.Remote, encodedAuth); err != nil { + warning = digestWarning(taskSpec.PluginSpec.Remote) + } else { + taskSpec.PluginSpec.Remote = img + if len(imgPlatforms) > 0 { + if taskSpec.Placement == nil { + taskSpec.Placement = &swarm.Placement{} + } + taskSpec.Placement.Platforms = imgPlatforms + } + } + return warning +} + +func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) { + distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth) + var platforms []swarm.Platform + if err != nil { + return "", nil, err + } + + imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest) + + if len(distributionInspect.Platforms) > 0 { + platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms)) + for _, p := range distributionInspect.Platforms { + // clear architecture field for arm. This is a temporary patch to address + // https://github.com/docker/swarmkit/issues/2294. The issue is that while + // image manifests report "arm" as the architecture, the node reports + // something like "armv7l" (includes the variant), which causes arm images + // to stop working with swarm mode. This patch removes the architecture + // constraint for arm images to ensure tasks get scheduled. + arch := p.Architecture + if strings.ToLower(arch) == "arm" { + arch = "" + } + platforms = append(platforms, swarm.Platform{ + Architecture: arch, + OS: p.OS, + }) + } + } + return imageWithDigest, platforms, err +} + +// imageWithDigestString takes an image string and a digest, and updates +// the image string if it didn't originally contain a digest. It returns +// image unmodified in other situations. +func imageWithDigestString(image string, dgst digest.Digest) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + if _, isCanonical := namedRef.(reference.Canonical); !isCanonical { + // ensure that image gets a default tag if none is provided + img, err := reference.WithDigest(namedRef, dgst) + if err == nil { + return reference.FamiliarString(img) + } + } + } + return image +} + +// imageWithTagString takes an image string, and returns a tagged image +// string, adding a 'latest' tag if one was not provided. It returns an +// empty string if a canonical reference was provided +func imageWithTagString(image string) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + return reference.FamiliarString(reference.TagNameOnly(namedRef)) + } + return "" +} + +// digestWarning constructs a formatted warning string using the +// image name that could not be pinned by digest. The formatting +// is hardcoded, but could me made smarter in the future +func digestWarning(image string) string { + return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) +} + +func validateServiceSpec(s swarm.ServiceSpec) error { + if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil { + return errors.New("must not specify both a container spec and a plugin spec in the task template") + } + if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin { + return errors.New("mismatched runtime with plugin spec") + } + if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) { + return errors.New("mismatched runtime with container spec") + } + return nil +} diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go new file mode 100644 index 000000000..cee020c98 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_inspect.go @@ -0,0 +1,37 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// ServiceInspectWithRaw returns the service information and the raw data. +func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) { + if serviceID == "" { + return swarm.Service{}, nil, objectNotFoundError{object: "service", id: serviceID} + } + query := url.Values{} + query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults)) + serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return swarm.Service{}, nil, err + } + + body, err := io.ReadAll(serverResp.body) + if err != nil { + return swarm.Service{}, nil, err + } + + var response swarm.Service + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go new file mode 100644 index 000000000..f97ec75a5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_list.go @@ -0,0 +1,39 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// ServiceList returns the list of services. +func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + if options.Status { + query.Set("status", "true") + } + + resp, err := cli.get(ctx, "/services", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var services []swarm.Service + err = json.NewDecoder(resp.body).Decode(&services) + return services, err +} diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go new file mode 100644 index 000000000..906fd4059 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_logs.go @@ -0,0 +1,52 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "time" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" + "github.com/pkg/errors" +) + +// ServiceLogs returns the logs generated by a service in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, errors.Wrap(err, `invalid value for "since"`) + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go new file mode 100644 index 000000000..2c46326eb --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_remove.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ServiceRemove kills and removes a service. +func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { + resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) + defer ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go new file mode 100644 index 000000000..8014b8625 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_update.go @@ -0,0 +1,74 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// ServiceUpdate updates a Service. The version number is required to avoid conflicting writes. +// It should be the value as set *before* the update. You can find this value in the Meta field +// of swarm.Service, which can be found using ServiceInspectWithRaw. +func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { + var ( + query = url.Values{} + response = types.ServiceUpdateResponse{} + ) + + headers := map[string][]string{ + "version": {cli.version}, + } + + if options.EncodedRegistryAuth != "" { + headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} + } + + if options.RegistryAuthFrom != "" { + query.Set("registryAuthFrom", options.RegistryAuthFrom) + } + + if options.Rollback != "" { + query.Set("rollback", options.Rollback) + } + + query.Set("version", version.String()) + + if err := validateServiceSpec(service); err != nil { + return response, err + } + + // ensure that the image is tagged + var resolveWarning string + switch { + case service.TaskTemplate.ContainerSpec != nil: + if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + service.TaskTemplate.ContainerSpec.Image = taggedImg + } + if options.QueryRegistry { + resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) + } + case service.TaskTemplate.PluginSpec != nil: + if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + service.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) + } + } + + resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + if resolveWarning != "" { + response.Warnings = append(response.Warnings, resolveWarning) + } + + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go new file mode 100644 index 000000000..19f59dd58 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" +) + +// SwarmGetUnlockKey retrieves the swarm's unlock key. +func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { + serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return types.SwarmUnlockKeyResponse{}, err + } + + var response types.SwarmUnlockKeyResponse + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go new file mode 100644 index 000000000..da3c1637e --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_init.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmInit initializes the swarm. +func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { + serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return "", err + } + + var response string + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go new file mode 100644 index 000000000..b52b67a88 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_inspect.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmInspect inspects the swarm. +func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { + serverResp, err := cli.get(ctx, "/swarm", nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return swarm.Swarm{}, err + } + + var response swarm.Swarm + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_join.go b/vendor/github.com/docker/docker/client/swarm_join.go new file mode 100644 index 000000000..a1cf0455d --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_join.go @@ -0,0 +1,14 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmJoin joins the swarm. +func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { + resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_leave.go b/vendor/github.com/docker/docker/client/swarm_leave.go new file mode 100644 index 000000000..90ca84b36 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_leave.go @@ -0,0 +1,17 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// SwarmLeave leaves the swarm. +func (cli *Client) SwarmLeave(ctx context.Context, force bool) error { + query := url.Values{} + if force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go new file mode 100644 index 000000000..d2412f7d4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_unlock.go @@ -0,0 +1,14 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmUnlock unlocks locked swarm. +func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { + serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) + ensureReaderClosed(serverResp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go new file mode 100644 index 000000000..9fde7d75e --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_update.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmUpdate updates the swarm. +func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { + query := url.Values{} + query.Set("version", version.String()) + query.Set("rotateWorkerToken", strconv.FormatBool(flags.RotateWorkerToken)) + query.Set("rotateManagerToken", strconv.FormatBool(flags.RotateManagerToken)) + query.Set("rotateManagerUnlockKey", strconv.FormatBool(flags.RotateManagerUnlockKey)) + resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go new file mode 100644 index 000000000..dde1f6c59 --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_inspect.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types/swarm" +) + +// TaskInspectWithRaw returns the task information and its raw representation. +func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { + if taskID == "" { + return swarm.Task{}, nil, objectNotFoundError{object: "task", id: taskID} + } + serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return swarm.Task{}, nil, err + } + + body, err := io.ReadAll(serverResp.body) + if err != nil { + return swarm.Task{}, nil, err + } + + var response swarm.Task + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go new file mode 100644 index 000000000..4869b4449 --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_list.go @@ -0,0 +1,35 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// TaskList returns the list of tasks. +func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/tasks", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var tasks []swarm.Task + err = json.NewDecoder(resp.body).Decode(&tasks) + return tasks, err +} diff --git a/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/docker/docker/client/task_logs.go new file mode 100644 index 000000000..6222fab57 --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_logs.go @@ -0,0 +1,51 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "time" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// TaskLogs returns the logs generated by a task in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/transport.go b/vendor/github.com/docker/docker/client/transport.go new file mode 100644 index 000000000..554134436 --- /dev/null +++ b/vendor/github.com/docker/docker/client/transport.go @@ -0,0 +1,17 @@ +package client // import "github.com/docker/docker/client" + +import ( + "crypto/tls" + "net/http" +) + +// resolveTLSConfig attempts to resolve the TLS configuration from the +// RoundTripper. +func resolveTLSConfig(transport http.RoundTripper) *tls.Config { + switch tr := transport.(type) { + case *http.Transport: + return tr.TLSClientConfig + default: + return nil + } +} diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go new file mode 100644 index 000000000..7f3ff44eb --- /dev/null +++ b/vendor/github.com/docker/docker/client/utils.go @@ -0,0 +1,34 @@ +package client // import "github.com/docker/docker/client" + +import ( + "net/url" + "regexp" + + "github.com/docker/docker/api/types/filters" +) + +var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) + +// getDockerOS returns the operating system based on the server header from the daemon. +func getDockerOS(serverHeader string) string { + var osType string + matches := headerRegexp.FindStringSubmatch(serverHeader) + if len(matches) > 0 { + osType = matches[1] + } + return osType +} + +// getFiltersQuery returns a url query with "filters" query term, based on the +// filters provided. +func getFiltersQuery(f filters.Args) (url.Values, error) { + query := url.Values{} + if f.Len() > 0 { + filterJSON, err := filters.ToJSON(f) + if err != nil { + return query, err + } + query.Set("filters", filterJSON) + } + return query, nil +} diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go new file mode 100644 index 000000000..8f17ff4e8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/version.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" +) + +// ServerVersion returns information of the docker client and server host. +func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { + resp, err := cli.get(ctx, "/version", nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return types.Version{}, err + } + + var server types.Version + err = json.NewDecoder(resp.body).Decode(&server) + return server, err +} diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go new file mode 100644 index 000000000..b3b182437 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_create.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types/volume" +) + +// VolumeCreate creates a volume in the docker host. +func (cli *Client) VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) { + var vol volume.Volume + resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) + defer ensureReaderClosed(resp) + if err != nil { + return vol, err + } + err = json.NewDecoder(resp.body).Decode(&vol) + return vol, err +} diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go new file mode 100644 index 000000000..b3ba4e604 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_inspect.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io" + + "github.com/docker/docker/api/types/volume" +) + +// VolumeInspect returns the information about a specific volume in the docker host. +func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error) { + vol, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) + return vol, err +} + +// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation +func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) { + if volumeID == "" { + return volume.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID} + } + + var vol volume.Volume + resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return vol, nil, err + } + + body, err := io.ReadAll(resp.body) + if err != nil { + return vol, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&vol) + return vol, body, err +} diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go new file mode 100644 index 000000000..d8204f8db --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_list.go @@ -0,0 +1,33 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/volume" +) + +// VolumeList returns the volumes configured in the docker host. +func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volume.ListResponse, error) { + var volumes volume.ListResponse + query := url.Values{} + + if filter.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cli.version, filter) + if err != nil { + return volumes, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/volumes", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return volumes, err + } + + err = json.NewDecoder(resp.body).Decode(&volumes) + return volumes, err +} diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go new file mode 100644 index 000000000..6e324708f --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_prune.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// VolumesPrune requests the daemon to delete unused data +func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) { + var report types.VolumesPruneReport + + if err := cli.NewVersionError("1.25", "volume prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return report, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving volume prune report: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go new file mode 100644 index 000000000..1f2643836 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_remove.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types/versions" +) + +// VolumeRemove removes a volume from the docker host. +func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { + query := url.Values{} + if versions.GreaterThanOrEqualTo(cli.version, "1.25") { + if force { + query.Set("force", "1") + } + } + resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) + defer ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/volume_update.go b/vendor/github.com/docker/docker/client/volume_update.go new file mode 100644 index 000000000..33bd31e53 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_update.go @@ -0,0 +1,24 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/volume" +) + +// VolumeUpdate updates a volume. This only works for Cluster Volumes, and +// only some fields can be updated. +func (cli *Client) VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error { + if err := cli.NewVersionError("1.42", "volume update"); err != nil { + return err + } + + query := url.Values{} + query.Set("version", version.String()) + + resp, err := cli.put(ctx, "/volumes/"+volumeID, query, options, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/errdefs/defs.go b/vendor/github.com/docker/docker/errdefs/defs.go new file mode 100644 index 000000000..61e7456b4 --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/defs.go @@ -0,0 +1,69 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +// ErrNotFound signals that the requested object doesn't exist +type ErrNotFound interface { + NotFound() +} + +// ErrInvalidParameter signals that the user input is invalid +type ErrInvalidParameter interface { + InvalidParameter() +} + +// ErrConflict signals that some internal state conflicts with the requested action and can't be performed. +// A change in state should be able to clear this error. +type ErrConflict interface { + Conflict() +} + +// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action +type ErrUnauthorized interface { + Unauthorized() +} + +// ErrUnavailable signals that the requested action/subsystem is not available. +type ErrUnavailable interface { + Unavailable() +} + +// ErrForbidden signals that the requested action cannot be performed under any circumstances. +// When a ErrForbidden is returned, the caller should never retry the action. +type ErrForbidden interface { + Forbidden() +} + +// ErrSystem signals that some internal error occurred. +// An example of this would be a failed mount request. +type ErrSystem interface { + System() +} + +// ErrNotModified signals that an action can't be performed because it's already in the desired state +type ErrNotModified interface { + NotModified() +} + +// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured. +type ErrNotImplemented interface { + NotImplemented() +} + +// ErrUnknown signals that the kind of error that occurred is not known. +type ErrUnknown interface { + Unknown() +} + +// ErrCancelled signals that the action was cancelled. +type ErrCancelled interface { + Cancelled() +} + +// ErrDeadline signals that the deadline was reached before the action completed. +type ErrDeadline interface { + DeadlineExceeded() +} + +// ErrDataLoss indicates that data was lost or there is data corruption. +type ErrDataLoss interface { + DataLoss() +} diff --git a/vendor/github.com/docker/docker/errdefs/doc.go b/vendor/github.com/docker/docker/errdefs/doc.go new file mode 100644 index 000000000..c211f174f --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/doc.go @@ -0,0 +1,8 @@ +// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors. +// Errors that cross the package boundary should implement one (and only one) of these interfaces. +// +// Packages should not reference these interfaces directly, only implement them. +// To check if a particular error implements one of these interfaces, there are helper +// functions provided (e.g. `Is`) which can be used rather than asserting the interfaces directly. +// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`). +package errdefs // import "github.com/docker/docker/errdefs" diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go new file mode 100644 index 000000000..fe06fb6f7 --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/helpers.go @@ -0,0 +1,279 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +import "context" + +type errNotFound struct{ error } + +func (errNotFound) NotFound() {} + +func (e errNotFound) Cause() error { + return e.error +} + +func (e errNotFound) Unwrap() error { + return e.error +} + +// NotFound is a helper to create an error of the class with the same name from any error type +func NotFound(err error) error { + if err == nil || IsNotFound(err) { + return err + } + return errNotFound{err} +} + +type errInvalidParameter struct{ error } + +func (errInvalidParameter) InvalidParameter() {} + +func (e errInvalidParameter) Cause() error { + return e.error +} + +func (e errInvalidParameter) Unwrap() error { + return e.error +} + +// InvalidParameter is a helper to create an error of the class with the same name from any error type +func InvalidParameter(err error) error { + if err == nil || IsInvalidParameter(err) { + return err + } + return errInvalidParameter{err} +} + +type errConflict struct{ error } + +func (errConflict) Conflict() {} + +func (e errConflict) Cause() error { + return e.error +} + +func (e errConflict) Unwrap() error { + return e.error +} + +// Conflict is a helper to create an error of the class with the same name from any error type +func Conflict(err error) error { + if err == nil || IsConflict(err) { + return err + } + return errConflict{err} +} + +type errUnauthorized struct{ error } + +func (errUnauthorized) Unauthorized() {} + +func (e errUnauthorized) Cause() error { + return e.error +} + +func (e errUnauthorized) Unwrap() error { + return e.error +} + +// Unauthorized is a helper to create an error of the class with the same name from any error type +func Unauthorized(err error) error { + if err == nil || IsUnauthorized(err) { + return err + } + return errUnauthorized{err} +} + +type errUnavailable struct{ error } + +func (errUnavailable) Unavailable() {} + +func (e errUnavailable) Cause() error { + return e.error +} + +func (e errUnavailable) Unwrap() error { + return e.error +} + +// Unavailable is a helper to create an error of the class with the same name from any error type +func Unavailable(err error) error { + if err == nil || IsUnavailable(err) { + return err + } + return errUnavailable{err} +} + +type errForbidden struct{ error } + +func (errForbidden) Forbidden() {} + +func (e errForbidden) Cause() error { + return e.error +} + +func (e errForbidden) Unwrap() error { + return e.error +} + +// Forbidden is a helper to create an error of the class with the same name from any error type +func Forbidden(err error) error { + if err == nil || IsForbidden(err) { + return err + } + return errForbidden{err} +} + +type errSystem struct{ error } + +func (errSystem) System() {} + +func (e errSystem) Cause() error { + return e.error +} + +func (e errSystem) Unwrap() error { + return e.error +} + +// System is a helper to create an error of the class with the same name from any error type +func System(err error) error { + if err == nil || IsSystem(err) { + return err + } + return errSystem{err} +} + +type errNotModified struct{ error } + +func (errNotModified) NotModified() {} + +func (e errNotModified) Cause() error { + return e.error +} + +func (e errNotModified) Unwrap() error { + return e.error +} + +// NotModified is a helper to create an error of the class with the same name from any error type +func NotModified(err error) error { + if err == nil || IsNotModified(err) { + return err + } + return errNotModified{err} +} + +type errNotImplemented struct{ error } + +func (errNotImplemented) NotImplemented() {} + +func (e errNotImplemented) Cause() error { + return e.error +} + +func (e errNotImplemented) Unwrap() error { + return e.error +} + +// NotImplemented is a helper to create an error of the class with the same name from any error type +func NotImplemented(err error) error { + if err == nil || IsNotImplemented(err) { + return err + } + return errNotImplemented{err} +} + +type errUnknown struct{ error } + +func (errUnknown) Unknown() {} + +func (e errUnknown) Cause() error { + return e.error +} + +func (e errUnknown) Unwrap() error { + return e.error +} + +// Unknown is a helper to create an error of the class with the same name from any error type +func Unknown(err error) error { + if err == nil || IsUnknown(err) { + return err + } + return errUnknown{err} +} + +type errCancelled struct{ error } + +func (errCancelled) Cancelled() {} + +func (e errCancelled) Cause() error { + return e.error +} + +func (e errCancelled) Unwrap() error { + return e.error +} + +// Cancelled is a helper to create an error of the class with the same name from any error type +func Cancelled(err error) error { + if err == nil || IsCancelled(err) { + return err + } + return errCancelled{err} +} + +type errDeadline struct{ error } + +func (errDeadline) DeadlineExceeded() {} + +func (e errDeadline) Cause() error { + return e.error +} + +func (e errDeadline) Unwrap() error { + return e.error +} + +// Deadline is a helper to create an error of the class with the same name from any error type +func Deadline(err error) error { + if err == nil || IsDeadline(err) { + return err + } + return errDeadline{err} +} + +type errDataLoss struct{ error } + +func (errDataLoss) DataLoss() {} + +func (e errDataLoss) Cause() error { + return e.error +} + +func (e errDataLoss) Unwrap() error { + return e.error +} + +// DataLoss is a helper to create an error of the class with the same name from any error type +func DataLoss(err error) error { + if err == nil || IsDataLoss(err) { + return err + } + return errDataLoss{err} +} + +// FromContext returns the error class from the passed in context +func FromContext(ctx context.Context) error { + e := ctx.Err() + if e == nil { + return nil + } + + if e == context.Canceled { + return Cancelled(e) + } + if e == context.DeadlineExceeded { + return Deadline(e) + } + return Unknown(e) +} diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go new file mode 100644 index 000000000..77bda389d --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/http_helpers.go @@ -0,0 +1,46 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +import ( + "net/http" +) + +// FromStatusCode creates an errdef error, based on the provided HTTP status-code +func FromStatusCode(err error, statusCode int) error { + if err == nil { + return nil + } + switch statusCode { + case http.StatusNotFound: + err = NotFound(err) + case http.StatusBadRequest: + err = InvalidParameter(err) + case http.StatusConflict: + err = Conflict(err) + case http.StatusUnauthorized: + err = Unauthorized(err) + case http.StatusServiceUnavailable: + err = Unavailable(err) + case http.StatusForbidden: + err = Forbidden(err) + case http.StatusNotModified: + err = NotModified(err) + case http.StatusNotImplemented: + err = NotImplemented(err) + case http.StatusInternalServerError: + if !IsSystem(err) && !IsUnknown(err) && !IsDataLoss(err) && !IsDeadline(err) && !IsCancelled(err) { + err = System(err) + } + default: + switch { + case statusCode >= 200 && statusCode < 400: + // it's a client error + case statusCode >= 400 && statusCode < 500: + err = InvalidParameter(err) + case statusCode >= 500 && statusCode < 600: + err = System(err) + default: + err = Unknown(err) + } + } + return err +} diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go new file mode 100644 index 000000000..3abf07d0c --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/is.go @@ -0,0 +1,107 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +type causer interface { + Cause() error +} + +func getImplementer(err error) error { + switch e := err.(type) { + case + ErrNotFound, + ErrInvalidParameter, + ErrConflict, + ErrUnauthorized, + ErrUnavailable, + ErrForbidden, + ErrSystem, + ErrNotModified, + ErrNotImplemented, + ErrCancelled, + ErrDeadline, + ErrDataLoss, + ErrUnknown: + return err + case causer: + return getImplementer(e.Cause()) + default: + return err + } +} + +// IsNotFound returns if the passed in error is an ErrNotFound +func IsNotFound(err error) bool { + _, ok := getImplementer(err).(ErrNotFound) + return ok +} + +// IsInvalidParameter returns if the passed in error is an ErrInvalidParameter +func IsInvalidParameter(err error) bool { + _, ok := getImplementer(err).(ErrInvalidParameter) + return ok +} + +// IsConflict returns if the passed in error is an ErrConflict +func IsConflict(err error) bool { + _, ok := getImplementer(err).(ErrConflict) + return ok +} + +// IsUnauthorized returns if the passed in error is an ErrUnauthorized +func IsUnauthorized(err error) bool { + _, ok := getImplementer(err).(ErrUnauthorized) + return ok +} + +// IsUnavailable returns if the passed in error is an ErrUnavailable +func IsUnavailable(err error) bool { + _, ok := getImplementer(err).(ErrUnavailable) + return ok +} + +// IsForbidden returns if the passed in error is an ErrForbidden +func IsForbidden(err error) bool { + _, ok := getImplementer(err).(ErrForbidden) + return ok +} + +// IsSystem returns if the passed in error is an ErrSystem +func IsSystem(err error) bool { + _, ok := getImplementer(err).(ErrSystem) + return ok +} + +// IsNotModified returns if the passed in error is a NotModified error +func IsNotModified(err error) bool { + _, ok := getImplementer(err).(ErrNotModified) + return ok +} + +// IsNotImplemented returns if the passed in error is an ErrNotImplemented +func IsNotImplemented(err error) bool { + _, ok := getImplementer(err).(ErrNotImplemented) + return ok +} + +// IsUnknown returns if the passed in error is an ErrUnknown +func IsUnknown(err error) bool { + _, ok := getImplementer(err).(ErrUnknown) + return ok +} + +// IsCancelled returns if the passed in error is an ErrCancelled +func IsCancelled(err error) bool { + _, ok := getImplementer(err).(ErrCancelled) + return ok +} + +// IsDeadline returns if the passed in error is an ErrDeadline +func IsDeadline(err error) bool { + _, ok := getImplementer(err).(ErrDeadline) + return ok +} + +// IsDataLoss returns if the passed in error is an ErrDataLoss +func IsDataLoss(err error) bool { + _, ok := getImplementer(err).(ErrDataLoss) + return ok +} diff --git a/vendor/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/docker/docker/pkg/archive/README.md new file mode 100644 index 000000000..7307d9694 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go new file mode 100644 index 000000000..3af7c3a65 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -0,0 +1,1474 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "context" + "encoding/binary" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" + "time" + + "github.com/containerd/containerd/pkg/userns" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + "github.com/klauspost/compress/zstd" + "github.com/moby/patternmatcher" + "github.com/moby/sys/sequential" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + exec "golang.org/x/sys/execabs" +) + +// ImpliedDirectoryMode represents the mode (Unix permissions) applied to directories that are implied by files in a +// tar, but that do not have their own header entry. +// +// The permissions mask is stored in a constant instead of locally to ensure that magic numbers do not +// proliferate in the codebase. The default value 0755 has been selected based on the default umask of 0022, and +// a convention of mkdir(1) calling mkdir(2) with permissions of 0777, resulting in a final value of 0755. +// +// This value is currently implementation-defined, and not captured in any cross-runtime specification. Thus, it is +// subject to change in Moby at any time -- image authors who require consistent or known directory permissions +// should explicitly control them by ensuring that header entries exist for any applicable path. +const ImpliedDirectoryMode = 0755 + +type ( + // Compression is the state represents if compressed or not. + Compression int + // WhiteoutFormat is the format of whiteouts unpacked + WhiteoutFormat int + + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + IDMap idtools.IdentityMapping + ChownOpts *idtools.Identity + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat WhiteoutFormat + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + InUserNS bool + } +) + +// Archiver implements the Archiver interface and allows the reuse of most utility functions of +// this package with a pluggable Untar function. Also, to facilitate the passing of specific id +// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. +type Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + IDMapping idtools.IdentityMapping +} + +// NewDefaultArchiver returns a new Archiver without any IdentityMapping +func NewDefaultArchiver() *Archiver { + return &Archiver{Untar: Untar} +} + +// breakoutError is used to differentiate errors related to breaking out +// When testing archive breakout in the unit tests, this error is expected +// in order for the test to pass. +type breakoutError error + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. + Bzip2 + // Gzip is gzip compression algorithm. + Gzip + // Xz is xz compression algorithm. + Xz + // Zstd is zstd compression algorithm. + Zstd +) + +const ( + // AUFSWhiteoutFormat is the default format for whiteouts + AUFSWhiteoutFormat WhiteoutFormat = iota + // OverlayWhiteoutFormat formats whiteout according to the overlay + // standard. + OverlayWhiteoutFormat +) + +const ( + modeISDIR = 040000 // Directory + modeISFIFO = 010000 // FIFO + modeISREG = 0100000 // Regular file + modeISLNK = 0120000 // Symbolic link + modeISBLK = 060000 // Block special file + modeISCHR = 020000 // Character special file + modeISSOCK = 0140000 // Socket +) + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + defer rdr.Close() + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +const ( + zstdMagicSkippableStart = 0x184D2A50 + zstdMagicSkippableMask = 0xFFFFFFF0 +) + +var ( + bzip2Magic = []byte{0x42, 0x5A, 0x68} + gzipMagic = []byte{0x1F, 0x8B, 0x08} + xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} + zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} +) + +type matcher = func([]byte) bool + +func magicNumberMatcher(m []byte) matcher { + return func(source []byte) bool { + return bytes.HasPrefix(source, m) + } +} + +// zstdMatcher detects zstd compression algorithm. +// Zstandard compressed data is made of one or more frames. +// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. +// See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. +func zstdMatcher() matcher { + return func(source []byte) bool { + if bytes.HasPrefix(source, zstdMagic) { + // Zstandard frame + return true + } + // skippable frame + if len(source) < 8 { + return false + } + // magic number from 0x184D2A50 to 0x184D2A5F. + if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { + return true + } + return false + } +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + compressionMap := map[Compression]matcher{ + Bzip2: magicNumberMatcher(bzip2Magic), + Gzip: magicNumberMatcher(gzipMagic), + Xz: magicNumberMatcher(xzMagic), + Zstd: zstdMatcher(), + } + for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} { + fn := compressionMap[compression] + if fn(source) { + return compression + } + } + return Uncompressed +} + +func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { + args := []string{"xz", "-d", "-c", "-q"} + + return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) +} + +func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { + if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { + noPigz, err := strconv.ParseBool(noPigzEnv) + if err != nil { + logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") + } + if noPigz { + logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) + return gzip.NewReader(buf) + } + } + + unpigzPath, err := exec.LookPath("unpigz") + if err != nil { + logrus.Debugf("unpigz binary not found, falling back to go gzip library") + return gzip.NewReader(buf) + } + + logrus.Debugf("Using %s to decompress", unpigzPath) + + return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) +} + +func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { + return ioutils.NewReadCloserWrapper(readBuf, func() error { + cancel() + return readBuf.Close() + }) +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + ctx, cancel := context.WithCancel(context.Background()) + + gzReader, err := gzDecompress(ctx, buf) + if err != nil { + cancel() + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return wrapReadCloser(readBufWrapper, cancel), nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + ctx, cancel := context.WithCancel(context.Background()) + + xzReader, err := xzDecompress(ctx, buf) + if err != nil { + cancel() + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return wrapReadCloser(readBufWrapper, cancel), nil + case Zstd: + zstdReader, err := zstd.NewReader(buf) + if err != nil { + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) + return readBufWrapper, nil + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to +// modify the contents or header of an entry in the archive. If the file already +// exists in the archive the TarModifierFunc will be called with the Header and +// a reader which will return the files content. If the file does not exist both +// header and content will be nil. +type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) + +// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the +// tar stream are modified if they match any of the keys in mods. +func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + defer inputTarStream.Close() + defer tarWriter.Close() + + modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { + header, data, err := modifier(name, original, tarReader) + switch { + case err != nil: + return err + case header == nil: + return nil + } + + if header.Name == "" { + header.Name = name + } + header.Size = int64(len(data)) + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + if len(data) != 0 { + if _, err := tarWriter.Write(data); err != nil { + return err + } + } + return nil + } + + var err error + var originalHeader *tar.Header + for { + originalHeader, err = tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + modifier, ok := mods[originalHeader.Name] + if !ok { + // No modifiers for this file, copy the header and data + if err := tarWriter.WriteHeader(originalHeader); err != nil { + pipeWriter.CloseWithError(err) + return + } + if _, err := pools.Copy(tarWriter, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + continue + } + delete(mods, originalHeader.Name) + + if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + // Apply the modifiers that haven't matched any files in the archive + for name, modifier := range mods { + if err := modify(name, nil, modifier, nil); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + pipeWriter.Close() + }() + return pipeReader +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return "tar" + case Bzip2: + return "tar.bz2" + case Gzip: + return "tar.gz" + case Xz: + return "tar.xz" + case Zstd: + return "tar.zst" + } + return "" +} + +// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to +// prevent tar.FileInfoHeader from introspecting it and potentially calling into +// glibc. +type nosysFileInfo struct { + os.FileInfo +} + +func (fi nosysFileInfo) Sys() interface{} { + // A Sys value of type *tar.Header is safe as it is system-independent. + // The tar.FileInfoHeader function copies the fields into the returned + // header without performing any OS lookups. + if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok { + return sys + } + return nil +} + +// sysStat, if non-nil, populates hdr from system-dependent fields of fi. +var sysStat func(fi os.FileInfo, hdr *tar.Header) error + +// FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi. +// +// Compared to the archive/tar.FileInfoHeader function, this function is safe to +// call from a chrooted process as it does not populate fields which would +// require operating system lookups. It behaves identically to +// tar.FileInfoHeader when fi is a FileInfo value returned from +// tar.Header.FileInfo(). +// +// When fi is a FileInfo for a native file, such as returned from os.Stat() and +// os.Lstat(), the returned Header value differs from one returned from +// tar.FileInfoHeader in the following ways. The Uname and Gname fields are not +// set as OS lookups would be required to populate them. The AccessTime and +// ChangeTime fields are not currently set (not yet implemented) although that +// is subject to change. Callers which require the AccessTime or ChangeTime +// fields to be zeroed should explicitly zero them out in the returned Header +// value to avoid any compatibility issues in the future. +func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link) + if err != nil { + return nil, err + } + if sysStat != nil { + return hdr, sysStat(fi, hdr) + } + return hdr, nil +} + +// FileInfoHeader creates a populated Header from fi. +// +// Compared to the archive/tar package, this function fills in less information +// but is safe to call from a chrooted process. The AccessTime and ChangeTime +// fields are not set in the returned header, ModTime is truncated to one-second +// precision, and the Uname and Gname fields are only set when fi is a FileInfo +// value returned from tar.Header.FileInfo(). Also, regardless of Go version, +// this function fills file type bits (e.g. hdr.Mode |= modeISDIR), which have +// been deleted since Go 1.9 archive/tar. +func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := FileInfoHeaderNoLookups(fi, link) + if err != nil { + return nil, err + } + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} + hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) + hdr.Name = canonicalTarName(name, fi.IsDir()) + return hdr, nil +} + +// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar +// https://github.com/golang/go/commit/66b5a2f +func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { + fm := fi.Mode() + switch { + case fm.IsRegular(): + mode |= modeISREG + case fi.IsDir(): + mode |= modeISDIR + case fm&os.ModeSymlink != 0: + mode |= modeISLNK + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + mode |= modeISCHR + } else { + mode |= modeISBLK + } + case fm&os.ModeNamedPipe != 0: + mode |= modeISFIFO + case fm&os.ModeSocket != 0: + mode |= modeISSOCK + } + return mode +} + +// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem +// to a tar header +func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { + const ( + // Values based on linux/include/uapi/linux/capability.h + xattrCapsSz2 = 20 + versionOffset = 3 + vfsCapRevision2 = 2 + vfsCapRevision3 = 3 + ) + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + length := len(capability) + if capability[versionOffset] == vfsCapRevision3 { + // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no + // sense outside the user namespace the archive is built in. + capability[versionOffset] = vfsCapRevision2 + length = xattrCapsSz2 + } + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability[:length]) + } + return nil +} + +type tarWhiteoutConverter interface { + ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) + ConvertRead(*tar.Header, string) (bool, error) +} + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + IdentityMapping idtools.IdentityMapping + ChownOpts *idtools.Identity + + // For packing and unpacking whiteout files in the + // non standard format. The whiteout files defined + // by the AUFS standard are used as the tar whiteout + // standard. + WhiteoutConverter tarWhiteoutConverter +} + +func newTarAppender(idMapping idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { + return &tarAppender{ + SeenFiles: make(map[uint64]string), + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + IdentityMapping: idMapping, + ChownOpts: chownOpts, + } +} + +// canonicalTarName provides a platform-independent and consistent posix-style +// path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) string { + name = CanonicalTarNameForPath(name) + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name +} + +// addTarFile adds to the tar archive a file from `path` as `name` +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return err + } + } + + hdr, err := FileInfoHeader(name, fi, link) + if err != nil { + return err + } + if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { + return err + } + + // if it's not a directory and has more than 1 link, + // it's hard linked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + inode, err := getInodeFromStat(fi.Sys()) + if err != nil { + return err + } + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + // check whether the file is overlayfs whiteout + // if yes, skip re-mapping container ID mappings. + isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 + + // handle re-mapping container ID mappings back to host ID mappings before + // writing tar headers/files. We skip whiteout files because they were written + // by the kernel and already have proper ownership relative to the host + if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { + fileIDPair, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) + if err != nil { + return err + } + } + + // explicitly override with ChownOpts + if ta.ChownOpts != nil { + hdr.Uid = ta.ChownOpts.UID + hdr.Gid = ta.ChownOpts.GID + } + + if ta.WhiteoutConverter != nil { + wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) + if err != nil { + return err + } + + // If a new whiteout file exists, write original hdr, then + // replace hdr with wo to be written after. Whiteouts should + // always be written after the original. Note the original + // hdr may have been updated to be a whiteout with returning + // a whiteout header + if wo != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + return fmt.Errorf("tar: cannot use whiteout for non-empty file") + } + hdr = wo + } + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + // We use sequential file access to avoid depleting the standby list on + // Windows. On Linux, this equates to a regular os.Open. + file, err := sequential.Open(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg: + // Source is regular file. We use sequential file access to avoid depleting + // the standby list on Windows. On Linux, this equates to a regular os.OpenFile. + file, err := sequential.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(file, reader); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar: + if inUserns { // cannot create devices in a userns + return nil + } + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + // #nosec G305 -- The target path is checked for path traversal. + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // #nosec G305 -- The target path is checked for path traversal. + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debug("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != "windows" { + if chownOpts == nil { + chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} + } + if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { + msg := "failed to Lchown %q for UID %d, GID %d" + if errors.Is(err, syscall.EINVAL) && userns.RunningInUserNS() { + msg += " (try increasing the number of subordinate IDs in /etc/subuid and /etc/subgid)" + } + return errors.Wrapf(err, msg, path, hdr.Uid, hdr.Gid) + } + } + + var errors []string + for key, value := range hdr.Xattrs { + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + if err == syscall.ENOTSUP || err == syscall.EPERM { + // We ignore errors here because not all graphdrivers support + // xattrs *cough* old versions of AUFS *cough*. However only + // ENOTSUP should be emitted in that case, otherwise we still + // bail. + // EPERM occurs if modifying xattrs is not allowed. This can + // happen when running in userns with restrictions (ChromeOS). + errors = append(errors, err.Error()) + continue + } + return err + } + } + + if len(errors) > 0 { + logrus.WithFields(logrus.Fields{ + "errors": errors, + }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo); err != nil { + return err + } + + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + + pm, err := patternmatcher.New(options.ExcludePatterns) + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) + if err != nil { + return nil, err + } + + go func() { + ta := newTarAppender( + options.IDMap, + compressWriter, + options.ChownOpts, + ) + ta.WhiteoutConverter = whiteoutConverter + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Errorf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + logrus.Errorf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + logrus.Errorf("Can't close pipe writer: %s", err) + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(options.IncludeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + options.IncludeFiles = []string{base} + } + + if len(options.IncludeFiles) == 0 { + options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range options.IncludeFiles { + rebaseName := options.RebaseNames[include] + + var ( + parentMatchInfo []patternmatcher.MatchInfo + parentDirs []string + ) + + walkRoot := getWalkRoot(srcPath, include) + filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + for len(parentDirs) != 0 { + lastParentDir := parentDirs[len(parentDirs)-1] + if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { + break + } + parentDirs = parentDirs[:len(parentDirs)-1] + parentMatchInfo = parentMatchInfo[:len(parentMatchInfo)-1] + } + + var matchInfo patternmatcher.MatchInfo + if len(parentMatchInfo) != 0 { + skip, matchInfo, err = pm.MatchesUsingParentResults(relFilePath, parentMatchInfo[len(parentMatchInfo)-1]) + } else { + skip, matchInfo, err = pm.MatchesUsingParentResults(relFilePath, patternmatcher.MatchInfo{}) + } + if err != nil { + logrus.Errorf("Error matching %s: %v", relFilePath, err) + return err + } + + if f.IsDir() { + parentDirs = append(parentDirs, relFilePath) + parentMatchInfo = append(parentMatchInfo, matchInfo) + } + } + + if skip { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (e.g. !dir/file) that starts with this + // dir. If so then we can't skip this dir. + + // Its not a dir then so we can just return/skip. + if !f.IsDir() { + return nil + } + + // No exceptions (!...) in patterns so just skip dir + if !pm.Exclusions() { + return filepath.SkipDir + } + + dirSlash := relFilePath + string(filepath.Separator) + + for _, pat := range pm.Patterns() { + if !pat.Exclusion() { + continue + } + if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { + // found a match - so can't skip this dir + return nil + } + } + + // No matching exclusion dir so just skip dir + return filepath.SkipDir + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + // if pipe is broken, stop writing tar stream to it + if err == io.ErrClosedPipe { + return err + } + } + return nil + }) + } + }() + + return pipeReader, nil +} + +// Unpack unpacks the decompressedArchive to dest with options. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) + if err != nil { + return err + } + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // ignore XGlobalHeader early to avoid creating parent directories for them + if hdr.Typeflag == tar.TypeXGlobalHeader { + logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) + continue + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // Ensure that the parent directory exists. + err = createImpliedDirectories(dest, hdr, options) + if err != nil { + return err + } + + // #nosec G305 -- The joined path is checked for path traversal. + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + if err := remapIDs(options.IDMap, hdr); err != nil { + return err + } + + if whiteoutConverter != nil { + writeFile, err := whiteoutConverter.ConvertRead(hdr, path) + if err != nil { + return err + } + if !writeFile { + continue + } + } + + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + // #nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice. + path := filepath.Join(dest, hdr.Name) + + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + } + return nil +} + +// createImpliedDirectories will create all parent directories of the current path with default permissions, if they do +// not already exist. This is possible as the tar format supports 'implicit' directories, where their existence is +// defined by the paths of files in the tar, but there are no header entries for the directories themselves, and thus +// we most both create them and choose metadata like permissions. +// +// The caller should have performed filepath.Clean(hdr.Name), so hdr.Name will now be in the filepath format for the OS +// on which the daemon is running. This precondition is required because this function assumes a OS-specific path +// separator when checking that a path is not the root. +func createImpliedDirectories(dest string, hdr *tar.Header, options *TarOptions) error { + // Not the root directory, ensure that the parent directory exists + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + // RootPair() is confined inside this loop as most cases will not require a call, so we can spend some + // unneeded function calls in the uncommon case to encapsulate logic -- implied directories are a niche + // usage that reduces the portability of an image. + rootIDs := options.IDMap.RootPair() + + err = idtools.MkdirAllAndChownNew(parentPath, ImpliedDirectoryMode, rootIDs) + if err != nil { + return err + } + } + } + + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + r := tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) + if err != nil { + return err + } + defer archive.Close() + options := &TarOptions{ + IDMap: archiver.IDMapping, + } + return archiver.Untar(archive, dst, options) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + options := &TarOptions{ + IDMap: archiver.IDMapping, + } + return archiver.Untar(archive, dst, options) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this Archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootIDs := archiver.IDMapping.RootPair() + // Create dst, copy src's content into it + if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { + return err + } + return archiver.TarUntar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { + return err + } + + r, w := io.Pipe() + errC := make(chan error, 1) + + go func() { + defer close(errC) + + errC <- func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := FileInfoHeaderNoLookups(srcSt, "") + if err != nil { + return err + } + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + if err := remapIDs(archiver.IDMapping, hdr); err != nil { + return err + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }() + }() + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + err = archiver.Untar(r, filepath.Dir(dst), nil) + if err != nil { + r.CloseWithError(err) + } + return err +} + +// IdentityMapping returns the IdentityMapping of the archiver. +func (archiver *Archiver) IdentityMapping() idtools.IdentityMapping { + return archiver.IDMapping +} + +func remapIDs(idMapping idtools.IdentityMapping, hdr *tar.Header) error { + ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return err +} + +// cmdStream executes a command, and returns its stdout as a stream. +// If the command fails to run or doesn't complete successfully, an error +// will be returned, including anything written on stderr. +func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { + cmd.Stdin = input + pipeR, pipeW := io.Pipe() + cmd.Stdout = pipeW + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + // Run the command and return the pipe + if err := cmd.Start(); err != nil { + return nil, err + } + + // Ensure the command has exited before we clean anything up + done := make(chan struct{}) + + // Copy stdout to the returned pipe + go func() { + if err := cmd.Wait(); err != nil { + pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) + } else { + pipeW.Close() + } + close(done) + }() + + return ioutils.NewReadCloserWrapper(pipeR, func() error { + // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as + // cmd.Wait waits for any non-file stdout/stderr/stdin to close. + err := pipeR.Close() + <-done + return err + }), nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { + f, err := os.CreateTemp(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go new file mode 100644 index 000000000..76321a35e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go @@ -0,0 +1,100 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) (tarWhiteoutConverter, error) { + if format == OverlayWhiteoutFormat { + if inUserNS { + return nil, errors.New("specifying OverlayWhiteoutFormat is not allowed in userns") + } + return overlayWhiteoutConverter{}, nil + } + return nil, nil +} + +type overlayWhiteoutConverter struct { +} + +func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { + // convert whiteouts to AUFS format + if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { + // we just rename the file and make it normal + dir, filename := filepath.Split(hdr.Name) + hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) + hdr.Mode = 0600 + hdr.Typeflag = tar.TypeReg + hdr.Size = 0 + } + + if fi.Mode()&os.ModeDir != 0 { + // convert opaque dirs to AUFS format by writing an empty file with the prefix + opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") + if err != nil { + return nil, err + } + if len(opaque) == 1 && opaque[0] == 'y' { + if hdr.Xattrs != nil { + delete(hdr.Xattrs, "trusted.overlay.opaque") + } + + // create a header for the whiteout file + // it should inherit some properties from the parent, but be a regular file + wo = &tar.Header{ + Typeflag: tar.TypeReg, + Mode: hdr.Mode & int64(os.ModePerm), + Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), + Size: 0, + Uid: hdr.Uid, + Uname: hdr.Uname, + Gid: hdr.Gid, + Gname: hdr.Gname, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } //#nosec G305 -- An archive is being created, not extracted. + } + } + + return +} + +func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay + if base == WhiteoutOpaqueDir { + err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) + if err != nil { + return false, errors.Wrapf(err, "setxattr(%q, trusted.overlay.opaque=y)", dir) + } + // don't write the file itself + return false, err + } + + // if a file was deleted and we are using overlay, we need to create a character device + if strings.HasPrefix(base, WhiteoutPrefix) { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { + return false, errors.Wrapf(err, "failed to mknod(%q, S_IFCHR, 0)", originalPath) + } + if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + return true, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go new file mode 100644 index 000000000..28ae2769c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_other.go @@ -0,0 +1,8 @@ +//go:build !linux +// +build !linux + +package archive // import "github.com/docker/docker/pkg/archive" + +func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) (tarWhiteoutConverter, error) { + return nil, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go new file mode 100644 index 000000000..1a2aea2e6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go @@ -0,0 +1,125 @@ +//go:build !windows +// +build !windows + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/containerd/containerd/pkg/userns" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +func init() { + sysStat = statUnix +} + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return strings.TrimSuffix(srcPath, string(filepath.Separator)) + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) string { + return p // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +// statUnix populates hdr from system-dependent fields of fi without performing +// any OS lookups. +func statUnix(fi os.FileInfo, hdr *tar.Header) error { + s, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + + hdr.Uid = int(s.Uid) + hdr.Gid = int(s.Gid) + + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert + hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert + } + + return nil +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + inode = s.Ino + } + + return +} + +func getFileUIDGID(stat interface{}) (idtools.Identity, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return idtools.Identity{}, errors.New("cannot convert stat value to syscall.Stat_t") + } + return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= unix.S_IFBLK + case tar.TypeChar: + mode |= unix.S_IFCHR + case tar.TypeFifo: + mode |= unix.S_IFIFO + } + + err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) + if errors.Is(err, syscall.EPERM) && userns.RunningInUserNS() { + // In most cases, cannot create a device if running in user namespace + err = nil + } + return err +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go new file mode 100644 index 000000000..7260174bf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go @@ -0,0 +1,67 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/longpath" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return longpath.AddPrefix(srcPath) +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) string { + return filepath.ToSlash(p) +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + // perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + permPart := perm & os.ModePerm + noPermPart := perm &^ os.ModePerm + // Add the x bit: make everything +x from windows + permPart |= 0111 + permPart &= 0755 + + return noPermPart | permPart +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + // do nothing. no notion of Rdev, Nlink in stat on Windows + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Inode in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} + +func getFileUIDGID(stat interface{}) (idtools.Identity, error) { + // no notion of file ownership mapping yet on Windows + return idtools.Identity{UID: 0, GID: 0}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go new file mode 100644 index 000000000..7f7242be5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes.go @@ -0,0 +1,442 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// ChangeType represents the change type. +type ChangeType int + +const ( + // ChangeModify represents the modify operation. + ChangeModify = iota + // ChangeAdd represents the add operation. + ChangeAdd + // ChangeDelete represents the delete operation. + ChangeDelete +) + +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + return fmt.Sprintf("%s %s", change.Kind, change.Path) +} + +// for sort.Sort +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar doesn't have sub-second mtime precision. The go tar +// writer (1.10+) does when using PAX format, but we round times to seconds +// to ensure archives have the same hashes for backwards compatibility. +// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4. +// +// Non-sub-second is problematic when we apply changes via tar +// files. We handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a.Equal(b) || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip) +} + +func aufsMetadataSkip(path string) (skip bool, err error) { + skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) + if err != nil { + skip = true + } + return +} + +func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { + f := filepath.Base(path) + + // If there is a whiteout, then the file was removed + if strings.HasPrefix(f, WhiteoutPrefix) { + originalFile := f[len(WhiteoutPrefix):] + return filepath.Join(filepath.Dir(path), originalFile), nil + } + + return "", nil +} + +type skipChange func(string) (bool, error) +type deleteChange func(string, string, os.FileInfo) (string, error) + +func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + if sc != nil { + if skip, err := sc(path); skip { + return err + } + } + + change := Change{ + Path: path, + } + + deletedFile, err := dc(rw, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + change.Path = deletedFile + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + changes = append(changes, Change{Path: parent, Kind: ChangeModify}) + changedDirs[parent] = struct{}{} + } + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +// FileInfo describes the information of a file. +type FileInfo struct { + parent *FileInfo + name string + stat *system.StatT + children map[string]*FileInfo + capability []byte + added bool +} + +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := info + if path == string(os.PathSeparator) { + return info + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, newStat) || + !bytes.Equal(oldChild.capability, newChild.capability) { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } +} + +// Changes add changes to file information. +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo() *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + children: make(map[string]*FileInfo), + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir, oldDir string) ([]Change, error) { + var ( + oldRoot, newRoot *FileInfo + ) + if oldDir == "" { + emptyDir, err := os.MkdirTemp("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var ( + size int64 + sf = make(map[uint64]struct{}) + ) + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, err := os.Lstat(file) + if err != nil { + logrus.Errorf("Can not stat %q: %s", file, err) + continue + } + + if fileInfo != nil && !fileInfo.IsDir() { + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change, idMap idtools.IdentityMapping) (io.ReadCloser, error) { + reader, writer := io.Pipe() + go func() { + ta := newTarAppender(idMap, writer, nil) + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addTarFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go new file mode 100644 index 000000000..f8792b3d4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go @@ -0,0 +1,286 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sort" + "syscall" + "unsafe" + + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(), + root2: newRootFileInfo(), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for { + if ix1 >= len(names1) { + break + } + if ix2 >= len(names2) { + break + } + + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of unix.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:clen(bytes[:])]) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go new file mode 100644 index 000000000..0e4399a43 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_other.go @@ -0,0 +1,98 @@ +//go:build !linux +// +build !linux + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir) + errs <- err2 + }() + + // block until both routines have returned + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string) (*FileInfo, error) { + root := newRootFileInfo() + + err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + info.stat = s + + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go new file mode 100644 index 000000000..54aace970 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go @@ -0,0 +1,44 @@ +//go:build !windows +// +build !windows + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "os" + "syscall" + + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mode() != newStat.Mode() || + oldStat.UID() != newStat.UID() || + oldStat.GID() != newStat.GID() || + oldStat.Rdev() != newStat.Rdev() || + // Don't look at size or modification time for dirs, its not a good + // measure of change. See https://github.com/moby/moby/issues/9874 + // for a description of the issue with modification time, and + // https://github.com/moby/moby/pull/11422 for the change. + // (Note that in the Windows implementation of this function, + // modification time IS taken as a change). See + // https://github.com/moby/moby/pull/37982 for more information. + (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && + (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 +} + +func getIno(fi os.FileInfo) uint64 { + return fi.Sys().(*syscall.Stat_t).Ino +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go new file mode 100644 index 000000000..9906685e4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go @@ -0,0 +1,34 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + // Note there is slight difference between the Linux and Windows + // implementations here. Due to https://github.com/moby/moby/issues/9874, + // and the fix at https://github.com/moby/moby/pull/11422, Linux does not + // consider a change to the directory time as a change. Windows on NTFS + // does. See https://github.com/moby/moby/pull/37982 for more information. + + if !sameFsTime(oldStat.Mtim(), newStat.Mtim()) || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode().IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go new file mode 100644 index 000000000..f3111b79b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy.go @@ -0,0 +1,485 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "errors" + "io" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in the separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string { + // Ensure paths are in platform semantics + cleanedPath = strings.ReplaceAll(cleanedPath, "/", string(sep)) + originalPath = strings.ReplaceAll(originalPath, "/", string(sep)) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath, sep) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(sep) + } + cleanedPath += "." + } + + if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) { + cleanedPath += string(sep) + } + + return cleanedPath +} + +// assertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func assertsDirectory(path string, sep byte) bool { + return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path) +} + +// hasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func hasTrailingPathSeparator(path string, sep byte) bool { + return len(path) > 0 && path[len(path)-1] == sep +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(filepath.FromSlash(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(os.PathSeparator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { + sourcePath = normalizePath(sourcePath) + if _, err = os.Lstat(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + // Separate the source path between its directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + opts := TarResourceRebaseOpts(sourceBase, rebaseName) + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + return TarWithOptions(sourceDir, opts) +} + +// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase +// parameters to be sent to TarWithOptions (the TarOptions struct) +func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions { + filter := []string{sourceBase} + return &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + } +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set + path = normalizePath(path) + + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) + if err != nil { + return CopyInfo{}, err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Stat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, io.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case assertsDirectory(dstInfo.Path, os.PathSeparator): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + // srcContent tar stream, as served by TarWithOptions(), is + // definitely in PAX format, but tar.Next() mistakenly guesses it + // as USTAR, which creates a problem: if the newBase is >100 + // characters long, WriteHeader() returns an error like + // "archive/tar: cannot encode header: Format specifies USTAR; and USTAR cannot encode Name=...". + // + // To fix, set the format to PAX here. See docker/for-linux issue #484. + hdr.Format = tar.FormatPAX + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + if hdr.Typeflag == tar.TypeLink { + hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) + } + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + // Ignoring GoSec G110. See https://github.com/securego/gosec/pull/433 + // and https://cure53.de/pentest-report_opa.pdf, which recommends to + // replace io.Copy with io.CopyN7. The latter allows to specify the + // maximum number of bytes that should be read. By properly defining + // the limit, it can be assured that a GZip compression bomb cannot + // easily cause a Denial-of-Service. + // After reviewing with @tonistiigi and @cpuguy83, this should not + // affect us, because here we do not read into memory, hence should + // not be vulnerable to this code consuming memory. + //nolint:gosec // G110: Potential DoS vulnerability via decompression bomb (gosec) + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string, followLink bool) error { + var ( + srcInfo CopyInfo + err error + ) + + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator) + + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path, os.PathSeparator) && + filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && + !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path, os.PathSeparator) && + !hasTrailingPathSeparator(resolvedPath, os.PathSeparator) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go new file mode 100644 index 000000000..2ac7729f4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go @@ -0,0 +1,12 @@ +//go:build !windows +// +build !windows + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go new file mode 100644 index 000000000..a878d1bac --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go new file mode 100644 index 000000000..8eeccb608 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -0,0 +1,244 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == "windows" { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Ensure that the parent directory exists. + err = createImpliedDirectories(dest, hdr, options) + if err != nil { + return 0, err + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = os.MkdirTemp("", "dockerplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { + return 0, err + } + } + + if hdr.Name != WhiteoutOpaqueDir { + continue + } + } + //#nosec G305 -- The joined path is guarded against path traversal. + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return 0, err + } + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + if err := remapIDs(options.IDMap, srcHdr); err != nil { + return 0, err + } + + if err := createTarFile(path, dest, srcHdr, srcData, !options.NoLchown, nil, options.InUserNS); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + //#nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice. + path := filepath.Join(dest, hdr.Name) + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + restore := overrideUmask(0) + defer restore() + + if decompress { + decompLayer, err := DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompLayer.Close() + layer = decompLayer + } + return UnpackLayer(dest, layer, options) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_unix.go b/vendor/github.com/docker/docker/pkg/archive/diff_unix.go new file mode 100644 index 000000000..d7f806445 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/diff_unix.go @@ -0,0 +1,22 @@ +//go:build !windows +// +build !windows + +package archive + +import "golang.org/x/sys/unix" + +// overrideUmask sets current process's file mode creation mask to newmask +// and returns a function to restore it. +// +// WARNING for readers stumbling upon this code. Changing umask in a multi- +// threaded environment isn't safe. Don't use this without understanding the +// risks, and don't export this function for others to use (we shouldn't even +// be using this ourself). +// +// FIXME(thaJeztah): we should get rid of these hacks if possible. +func overrideUmask(newMask int) func() { + oldMask := unix.Umask(newMask) + return func() { + unix.Umask(oldMask) + } +} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_windows.go b/vendor/github.com/docker/docker/pkg/archive/diff_windows.go new file mode 100644 index 000000000..d28f5b2df --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/diff_windows.go @@ -0,0 +1,6 @@ +package archive + +// overrideUmask is a no-op on windows. +func overrideUmask(newmask int) func() { + return func() {} +} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go new file mode 100644 index 000000000..797143ee8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = (1 << 30) - 2 + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go new file mode 100644 index 000000000..d08779686 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go @@ -0,0 +1,17 @@ +//go:build !linux +// +build !linux + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go new file mode 100644 index 000000000..4c072a87e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go @@ -0,0 +1,23 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +// Whiteouts are files with a special meaning for the layered filesystem. +// Docker uses AUFS whiteout files inside exported archives. In other +// filesystems these files are generated/handled on tar creation/extraction. + +// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a +// filename this means that file has been removed from the base layer. +const WhiteoutPrefix = ".wh." + +// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not +// for removing an actual file. Normally these files are excluded from exported +// archives. +const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix + +// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other +// layers. Normally these should not go into exported archives and all changed +// hardlinks should be copied to the top layer. +const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" + +// WhiteoutOpaqueDir file means directory has been made opaque - meaning +// readdir calls to this directory do not follow to lower layers. +const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go new file mode 100644 index 000000000..032db82ce --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "bytes" + "io" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// - ./foo.txt with content "hello world" +// - ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (io.Reader, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go new file mode 100644 index 000000000..1e0a89004 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go @@ -0,0 +1,243 @@ +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// IDMap contains a single entry for user namespace range remapping. An array +// of IDMap entries represents the structure that will be provided to the Linux +// kernel for creating a user namespace. +type IDMap struct { + ContainerID int `json:"container_id"` + HostID int `json:"host_id"` + Size int `json:"size"` +} + +type subIDRange struct { + Start int + Length int +} + +type ranges []subIDRange + +func (e ranges) Len() int { return len(e) } +func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } + +const ( + subuidFileName = "/etc/subuid" + subgidFileName = "/etc/subgid" +) + +// MkdirAllAndChown creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership and permissions. +func MkdirAllAndChown(path string, mode os.FileMode, owner Identity) error { + return mkdirAs(path, mode, owner, true, true) +} + +// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership and permissions. +// Note that unlike os.Mkdir(), this function does not return IsExist error +// in case path already exists. +func MkdirAndChown(path string, mode os.FileMode, owner Identity) error { + return mkdirAs(path, mode, owner, false, true) +} + +// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies +// ownership ONLY of newly created directories to the requested uid/gid. If the +// directories along the path exist, no change of ownership or permissions will be performed +func MkdirAllAndChownNew(path string, mode os.FileMode, owner Identity) error { + return mkdirAs(path, mode, owner, true, false) +} + +// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + uid, err := toHost(0, uidMap) + if err != nil { + return -1, -1, err + } + gid, err := toHost(0, gidMap) + if err != nil { + return -1, -1, err + } + return uid, gid, nil +} + +// toContainer takes an id mapping, and uses it to translate a +// host ID to the remapped ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id +func toContainer(hostID int, idMap []IDMap) (int, error) { + if idMap == nil { + return hostID, nil + } + for _, m := range idMap { + if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { + contID := m.ContainerID + (hostID - m.HostID) + return contID, nil + } + } + return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) +} + +// toHost takes an id mapping and a remapped ID, and translates the +// ID to the mapped host ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id # +func toHost(contID int, idMap []IDMap) (int, error) { + if idMap == nil { + return contID, nil + } + for _, m := range idMap { + if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { + hostID := m.HostID + (contID - m.ContainerID) + return hostID, nil + } + } + return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) +} + +// Identity is either a UID and GID pair or a SID (but not both) +type Identity struct { + UID int + GID int + SID string +} + +// Chown changes the numeric uid and gid of the named file to id.UID and id.GID. +func (id Identity) Chown(name string) error { + return os.Chown(name, id.UID, id.GID) +} + +// IdentityMapping contains a mappings of UIDs and GIDs. +// The zero value represents an empty mapping. +type IdentityMapping struct { + UIDMaps []IDMap `json:"UIDMaps"` + GIDMaps []IDMap `json:"GIDMaps"` +} + +// RootPair returns a uid and gid pair for the root user. The error is ignored +// because a root user always exists, and the defaults are correct when the uid +// and gid maps are empty. +func (i IdentityMapping) RootPair() Identity { + uid, gid, _ := GetRootUIDGID(i.UIDMaps, i.GIDMaps) + return Identity{UID: uid, GID: gid} +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +func (i IdentityMapping) ToHost(pair Identity) (Identity, error) { + var err error + target := i.RootPair() + + if pair.UID != target.UID { + target.UID, err = toHost(pair.UID, i.UIDMaps) + if err != nil { + return target, err + } + } + + if pair.GID != target.GID { + target.GID, err = toHost(pair.GID, i.GIDMaps) + } + return target, err +} + +// ToContainer returns the container UID and GID for the host uid and gid +func (i IdentityMapping) ToContainer(pair Identity) (int, int, error) { + uid, err := toContainer(pair.UID, i.UIDMaps) + if err != nil { + return -1, -1, err + } + gid, err := toContainer(pair.GID, i.GIDMaps) + return uid, gid, err +} + +// Empty returns true if there are no id mappings +func (i IdentityMapping) Empty() bool { + return len(i.UIDMaps) == 0 && len(i.GIDMaps) == 0 +} + +// UIDs returns the mapping for UID. +// +// Deprecated: reference the UIDMaps field directly. +func (i IdentityMapping) UIDs() []IDMap { + return i.UIDMaps +} + +// GIDs returns the mapping for GID. +// +// Deprecated: reference the GIDMaps field directly. +func (i IdentityMapping) GIDs() []IDMap { + return i.GIDMaps +} + +func createIDMap(subidRanges ranges) []IDMap { + idMap := []IDMap{} + + containerID := 0 + for _, idrange := range subidRanges { + idMap = append(idMap, IDMap{ + ContainerID: containerID, + HostID: idrange.Start, + Size: idrange.Length, + }) + containerID = containerID + idrange.Length + } + return idMap +} + +func parseSubuid(username string) (ranges, error) { + return parseSubidFile(subuidFileName, username) +} + +func parseSubgid(username string) (ranges, error) { + return parseSubidFile(subgidFileName, username) +} + +// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) +// and return all found ranges for a specified username. If the special value +// "ALL" is supplied for username, then all ranges in the file will be returned +func parseSubidFile(path, username string) (ranges, error) { + var rangeList ranges + + subidFile, err := os.Open(path) + if err != nil { + return rangeList, err + } + defer subidFile.Close() + + s := bufio.NewScanner(subidFile) + for s.Scan() { + text := strings.TrimSpace(s.Text()) + if text == "" || strings.HasPrefix(text, "#") { + continue + } + parts := strings.Split(text, ":") + if len(parts) != 3 { + return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) + } + if parts[0] == username || username == "ALL" { + startid, err := strconv.Atoi(parts[1]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + length, err := strconv.Atoi(parts[2]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + rangeList = append(rangeList, subIDRange{startid, length}) + } + } + + return rangeList, s.Err() +} + +// CurrentIdentity returns the identity of the current process +func CurrentIdentity() Identity { + return Identity{UID: os.Getuid(), GID: os.Getegid()} +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go new file mode 100644 index 000000000..03846b030 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -0,0 +1,325 @@ +//go:build !windows +// +build !windows + +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "strconv" + "sync" + "syscall" + + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/runc/libcontainer/user" + "github.com/pkg/errors" +) + +var ( + entOnce sync.Once + getentCmd string +) + +func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { + // make an array containing the original path asked for, plus (for mkAll == true) + // all path components leading up to the complete path that don't exist before we MkdirAll + // so that we can chown all of them properly at the end. If chownExisting is false, we won't + // chown the full directory path if it exists + + var paths []string + path, err := filepath.Abs(path) + if err != nil { + return err + } + + stat, err := system.Stat(path) + if err == nil { + if !stat.IsDir() { + return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} + } + if !chownExisting { + return nil + } + + // short-circuit--we were called with an existing directory and chown was requested + return setPermissions(path, mode, owner.UID, owner.GID, stat) + } + + if os.IsNotExist(err) { + paths = []string{path} + } + + if mkAll { + // walk back to "/" looking for directories which do not exist + // and add them to the paths array for chown after creation + dirPath := path + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { + paths = append(paths, dirPath) + } + } + if err := system.MkdirAll(path, mode); err != nil { + return err + } + } else { + if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { + return err + } + } + // even if it existed, we will chown the requested path + any subpaths that + // didn't exist when we called MkdirAll + for _, pathComponent := range paths { + if err := setPermissions(pathComponent, mode, owner.UID, owner.GID, nil); err != nil { + return err + } + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +func CanAccess(path string, pair Identity) bool { + statInfo, err := system.Stat(path) + if err != nil { + return false + } + fileMode := os.FileMode(statInfo.Mode()) + permBits := fileMode.Perm() + return accessible(statInfo.UID() == uint32(pair.UID), + statInfo.GID() == uint32(pair.GID), permBits) +} + +func accessible(isOwner, isGroup bool, perms os.FileMode) bool { + if isOwner && (perms&0100 == 0100) { + return true + } + if isGroup && (perms&0010 == 0010) { + return true + } + if perms&0001 == 0001 { + return true + } + return false +} + +// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUser(name string) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUser(name) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + usr, err = getentUser(name) + if err != nil { + return user.User{}, err + } + return usr, nil +} + +// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUID(uid int) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUid(uid) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + return getentUser(strconv.Itoa(uid)) +} + +func getentUser(name string) (user.User, error) { + reader, err := callGetent("passwd", name) + if err != nil { + return user.User{}, err + } + users, err := user.ParsePasswd(reader) + if err != nil { + return user.User{}, err + } + if len(users) == 0 { + return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", name) + } + return users[0], nil +} + +// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGroup(name string) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGroup(name) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(name) +} + +// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGID(gid int) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGid(gid) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(strconv.Itoa(gid)) +} + +func getentGroup(name string) (user.Group, error) { + reader, err := callGetent("group", name) + if err != nil { + return user.Group{}, err + } + groups, err := user.ParseGroup(reader) + if err != nil { + return user.Group{}, err + } + if len(groups) == 0 { + return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", name) + } + return groups[0], nil +} + +func callGetent(database, key string) (io.Reader, error) { + entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) + // if no `getent` command on host, can't do anything else + if getentCmd == "" { + return nil, fmt.Errorf("unable to find getent command") + } + out, err := execCmd(getentCmd, database, key) + if err != nil { + exitCode, errC := getExitCode(err) + if errC != nil { + return nil, err + } + switch exitCode { + case 1: + return nil, fmt.Errorf("getent reported invalid parameters/database unknown") + case 2: + return nil, fmt.Errorf("getent unable to find entry %q in %s database", key, database) + case 3: + return nil, fmt.Errorf("getent database doesn't support enumeration") + default: + return nil, err + } + } + return bytes.NewReader(out), nil +} + +// getExitCode returns the ExitStatus of the specified error if its type is +// exec.ExitError, returns 0 and an error otherwise. +func getExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} + +// setPermissions performs a chown/chmod only if the uid/gid don't match what's requested +// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the +// dir is on an NFS share, so don't call chown unless we absolutely must. +// Likewise for setting permissions. +func setPermissions(p string, mode os.FileMode, uid, gid int, stat *system.StatT) error { + if stat == nil { + var err error + stat, err = system.Stat(p) + if err != nil { + return err + } + } + if os.FileMode(stat.Mode()).Perm() != mode.Perm() { + if err := os.Chmod(p, mode.Perm()); err != nil { + return err + } + } + if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { + return nil + } + return os.Chown(p, uid, gid) +} + +// NewIdentityMapping takes a requested username and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +// +// Deprecated: Use LoadIdentityMapping. +func NewIdentityMapping(name string) (*IdentityMapping, error) { + m, err := LoadIdentityMapping(name) + if err != nil { + return nil, err + } + return &m, err +} + +// LoadIdentityMapping takes a requested username and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func LoadIdentityMapping(name string) (IdentityMapping, error) { + usr, err := LookupUser(name) + if err != nil { + return IdentityMapping{}, fmt.Errorf("Could not get user for username %s: %v", name, err) + } + + subuidRanges, err := lookupSubUIDRanges(usr) + if err != nil { + return IdentityMapping{}, err + } + subgidRanges, err := lookupSubGIDRanges(usr) + if err != nil { + return IdentityMapping{}, err + } + + return IdentityMapping{ + UIDMaps: subuidRanges, + GIDMaps: subgidRanges, + }, nil +} + +func lookupSubUIDRanges(usr user.User) ([]IDMap, error) { + rangeList, err := parseSubuid(strconv.Itoa(usr.Uid)) + if err != nil { + return nil, err + } + if len(rangeList) == 0 { + rangeList, err = parseSubuid(usr.Name) + if err != nil { + return nil, err + } + } + if len(rangeList) == 0 { + return nil, errors.Errorf("no subuid ranges found for user %q", usr.Name) + } + return createIDMap(rangeList), nil +} + +func lookupSubGIDRanges(usr user.User) ([]IDMap, error) { + rangeList, err := parseSubgid(strconv.Itoa(usr.Uid)) + if err != nil { + return nil, err + } + if len(rangeList) == 0 { + rangeList, err = parseSubgid(usr.Name) + if err != nil { + return nil, err + } + } + if len(rangeList) == 0 { + return nil, errors.Errorf("no subgid ranges found for user %q", usr.Name) + } + return createIDMap(rangeList), nil +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go new file mode 100644 index 000000000..0f5aadd49 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go @@ -0,0 +1,34 @@ +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +const ( + SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" +) + +const ( + ContainerAdministratorSidString = "S-1-5-93-2-1" + ContainerUserSidString = "S-1-5-93-2-2" +) + +// This is currently a wrapper around MkdirAll, however, since currently +// permissions aren't set through this path, the identity isn't utilized. +// Ownership is handled elsewhere, but in the future could be support here +// too. +func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { + if err := system.MkdirAll(path, mode); err != nil { + return err + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +// Windows does not require/support this function, so always return true +func CanAccess(path string, identity Identity) bool { + return true +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go new file mode 100644 index 000000000..3ad9255df --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go @@ -0,0 +1,163 @@ +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "sync" +) + +// add a user and/or group to Linux /etc/passwd, /etc/group using standard +// Linux distribution commands: +// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group +// useradd -r -s /bin/false + +var ( + once sync.Once + userCommand string + idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) +) + +const ( + // default length for a UID/GID subordinate range + defaultRangeLen = 65536 + defaultRangeStart = 100000 +) + +// AddNamespaceRangesUser takes a username and uses the standard system +// utility to create a system user/group pair used to hold the +// /etc/sub{uid,gid} ranges which will be used for user namespace +// mapping ranges in containers. +func AddNamespaceRangesUser(name string) (int, int, error) { + if err := addUser(name); err != nil { + return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) + } + + // Query the system for the created uid and gid pair + out, err := execCmd("id", name) + if err != nil { + return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) + } + matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) + if len(matches) != 3 { + return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) + } + uid, err := strconv.Atoi(matches[1]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) + } + gid, err := strconv.Atoi(matches[2]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) + } + + // Now we need to create the subuid/subgid ranges for our new user/group (system users + // do not get auto-created ranges in subuid/subgid) + + if err := createSubordinateRanges(name); err != nil { + return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) + } + return uid, gid, nil +} + +func addUser(name string) error { + once.Do(func() { + // set up which commands are used for adding users/groups dependent on distro + if _, err := resolveBinary("adduser"); err == nil { + userCommand = "adduser" + } else if _, err := resolveBinary("useradd"); err == nil { + userCommand = "useradd" + } + }) + var args []string + switch userCommand { + case "adduser": + args = []string{"--system", "--shell", "/bin/false", "--no-create-home", "--disabled-login", "--disabled-password", "--group", name} + case "useradd": + args = []string{"-r", "-s", "/bin/false", name} + default: + return fmt.Errorf("cannot add user; no useradd/adduser binary found") + } + + if out, err := execCmd(userCommand, args...); err != nil { + return fmt.Errorf("failed to add user with error: %v; output: %q", err, string(out)) + } + return nil +} + +func createSubordinateRanges(name string) error { + // first, we should verify that ranges weren't automatically created + // by the distro tooling + ranges, err := parseSubuid(name) + if err != nil { + return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no UID ranges; let's create one + startID, err := findNextUIDRange() + if err != nil { + return fmt.Errorf("Can't find available subuid range: %v", err) + } + out, err := execCmd("usermod", "-v", fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1), name) + if err != nil { + return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) + } + } + + ranges, err = parseSubgid(name) + if err != nil { + return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no GID ranges; let's create one + startID, err := findNextGIDRange() + if err != nil { + return fmt.Errorf("Can't find available subgid range: %v", err) + } + out, err := execCmd("usermod", "-w", fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1), name) + if err != nil { + return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) + } + } + return nil +} + +func findNextUIDRange() (int, error) { + ranges, err := parseSubuid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextGIDRange() (int, error) { + ranges, err := parseSubgid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextRangeStart(rangeList ranges) (int, error) { + startID := defaultRangeStart + for _, arange := range rangeList { + if wouldOverlap(arange, startID) { + startID = arange.Start + arange.Length + } + } + return startID, nil +} + +func wouldOverlap(arange subIDRange, ID int) bool { + low := ID + high := ID + defaultRangeLen + if (low >= arange.Start && low <= arange.Start+arange.Length) || + (high <= arange.Start+arange.Length && high >= arange.Start) { + return true + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go new file mode 100644 index 000000000..5e24577e2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go @@ -0,0 +1,13 @@ +//go:build !linux +// +build !linux + +package idtools // import "github.com/docker/docker/pkg/idtools" + +import "fmt" + +// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair +// and calls the appropriate helper function to add the group and then +// the user to the group in /etc/group and /etc/passwd respectively. +func AddNamespaceRangesUser(name string) (int, int, error) { + return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go new file mode 100644 index 000000000..540672af5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go @@ -0,0 +1,32 @@ +//go:build !windows +// +build !windows + +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "fmt" + "os/exec" + "path/filepath" +) + +func resolveBinary(binname string) (string, error) { + binaryPath, err := exec.LookPath(binname) + if err != nil { + return "", err + } + resolvedPath, err := filepath.EvalSymlinks(binaryPath) + if err != nil { + return "", err + } + // only return no error if the final resolved binary basename + // matches what was searched for + if filepath.Base(resolvedPath) == binname { + return resolvedPath, nil + } + return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) +} + +func execCmd(cmd string, arg ...string) ([]byte, error) { + execCmd := exec.Command(cmd, arg...) + return execCmd.CombinedOutput() +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go new file mode 100644 index 000000000..466f79294 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go @@ -0,0 +1,51 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "errors" + "io" +) + +var errBufferFull = errors.New("buffer is full") + +type fixedBuffer struct { + buf []byte + pos int + lastRead int +} + +func (b *fixedBuffer) Write(p []byte) (int, error) { + n := copy(b.buf[b.pos:cap(b.buf)], p) + b.pos += n + + if n < len(p) { + if b.pos == cap(b.buf) { + return n, errBufferFull + } + return n, io.ErrShortWrite + } + return n, nil +} + +func (b *fixedBuffer) Read(p []byte) (int, error) { + n := copy(p, b.buf[b.lastRead:b.pos]) + b.lastRead += n + return n, nil +} + +func (b *fixedBuffer) Len() int { + return b.pos - b.lastRead +} + +func (b *fixedBuffer) Cap() int { + return cap(b.buf) +} + +func (b *fixedBuffer) Reset() { + b.pos = 0 + b.lastRead = 0 + b.buf = b.buf[:0] +} + +func (b *fixedBuffer) String() string { + return string(b.buf[b.lastRead:b.pos]) +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go new file mode 100644 index 000000000..c1cfa62fd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go @@ -0,0 +1,187 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "errors" + "io" + "sync" +) + +// maxCap is the highest capacity to use in byte slices that buffer data. +const maxCap = 1e6 + +// minCap is the lowest capacity to use in byte slices that buffer data +const minCap = 64 + +// blockThreshold is the minimum number of bytes in the buffer which will cause +// a write to BytesPipe to block when allocating a new slice. +const blockThreshold = 1e6 + +var ( + // ErrClosed is returned when Write is called on a closed BytesPipe. + ErrClosed = errors.New("write to closed BytesPipe") + + bufPools = make(map[int]*sync.Pool) + bufPoolsLock sync.Mutex +) + +// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). +// All written data may be read at most once. Also, BytesPipe allocates +// and releases new byte slices to adjust to current needs, so the buffer +// won't be overgrown after peak loads. +type BytesPipe struct { + mu sync.Mutex + wait *sync.Cond + buf []*fixedBuffer + bufLen int + closeErr error // error to return from next Read. set to nil if not closed. + readBlock bool // check read BytesPipe is Wait() or not +} + +// NewBytesPipe creates new BytesPipe, initialized by specified slice. +// If buf is nil, then it will be initialized with slice which cap is 64. +// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). +func NewBytesPipe() *BytesPipe { + bp := &BytesPipe{} + bp.buf = append(bp.buf, getBuffer(minCap)) + bp.wait = sync.NewCond(&bp.mu) + return bp +} + +// Write writes p to BytesPipe. +// It can allocate new []byte slices in a process of writing. +func (bp *BytesPipe) Write(p []byte) (int, error) { + bp.mu.Lock() + defer bp.mu.Unlock() + + written := 0 +loop0: + for { + if bp.closeErr != nil { + return written, ErrClosed + } + + if len(bp.buf) == 0 { + bp.buf = append(bp.buf, getBuffer(64)) + } + // get the last buffer + b := bp.buf[len(bp.buf)-1] + + n, err := b.Write(p) + written += n + bp.bufLen += n + + // errBufferFull is an error we expect to get if the buffer is full + if err != nil && err != errBufferFull { + bp.wait.Broadcast() + return written, err + } + + // if there was enough room to write all then break + if len(p) == n { + break + } + + // more data: write to the next slice + p = p[n:] + + // make sure the buffer doesn't grow too big from this write + for bp.bufLen >= blockThreshold { + if bp.readBlock { + bp.wait.Broadcast() + } + bp.wait.Wait() + if bp.closeErr != nil { + continue loop0 + } + } + + // add new byte slice to the buffers slice and continue writing + nextCap := b.Cap() * 2 + if nextCap > maxCap { + nextCap = maxCap + } + bp.buf = append(bp.buf, getBuffer(nextCap)) + } + bp.wait.Broadcast() + return written, nil +} + +// CloseWithError causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) CloseWithError(err error) error { + bp.mu.Lock() + if err != nil { + bp.closeErr = err + } else { + bp.closeErr = io.EOF + } + bp.wait.Broadcast() + bp.mu.Unlock() + return nil +} + +// Close causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) Close() error { + return bp.CloseWithError(nil) +} + +// Read reads bytes from BytesPipe. +// Data could be read only once. +func (bp *BytesPipe) Read(p []byte) (n int, err error) { + bp.mu.Lock() + defer bp.mu.Unlock() + if bp.bufLen == 0 { + if bp.closeErr != nil { + return 0, bp.closeErr + } + bp.readBlock = true + bp.wait.Wait() + bp.readBlock = false + if bp.bufLen == 0 && bp.closeErr != nil { + return 0, bp.closeErr + } + } + + for bp.bufLen > 0 { + b := bp.buf[0] + read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error + n += read + bp.bufLen -= read + + if b.Len() == 0 { + // it's empty so return it to the pool and move to the next one + returnBuffer(b) + bp.buf[0] = nil + bp.buf = bp.buf[1:] + } + + if len(p) == read { + break + } + + p = p[read:] + } + + bp.wait.Broadcast() + return +} + +func returnBuffer(b *fixedBuffer) { + b.Reset() + bufPoolsLock.Lock() + pool := bufPools[b.Cap()] + bufPoolsLock.Unlock() + if pool != nil { + pool.Put(b) + } +} + +func getBuffer(size int) *fixedBuffer { + bufPoolsLock.Lock() + pool, ok := bufPools[size] + if !ok { + pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} + bufPools[size] = pool + } + bufPoolsLock.Unlock() + return pool.Get().(*fixedBuffer) +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go new file mode 100644 index 000000000..82671d8cd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go @@ -0,0 +1,161 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "io" + "os" + "path/filepath" +) + +// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a +// temporary file and closing it atomically changes the temporary file to +// destination path. Writing and closing concurrently is not allowed. +func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { + f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) + if err != nil { + return nil, err + } + + abspath, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + return &atomicFileWriter{ + f: f, + fn: abspath, + perm: perm, + }, nil +} + +// AtomicWriteFile atomically writes data to a file named by filename. +func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := NewAtomicFileWriter(filename, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + f.(*atomicFileWriter).writeErr = err + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type atomicFileWriter struct { + f *os.File + fn string + writeErr error + perm os.FileMode +} + +func (w *atomicFileWriter) Write(dt []byte) (int, error) { + n, err := w.f.Write(dt) + if err != nil { + w.writeErr = err + } + return n, err +} + +func (w *atomicFileWriter) Close() (retErr error) { + defer func() { + if retErr != nil || w.writeErr != nil { + os.Remove(w.f.Name()) + } + }() + if err := w.f.Sync(); err != nil { + w.f.Close() + return err + } + if err := w.f.Close(); err != nil { + return err + } + if err := os.Chmod(w.f.Name(), w.perm); err != nil { + return err + } + if w.writeErr == nil { + return os.Rename(w.f.Name(), w.fn) + } + return nil +} + +// AtomicWriteSet is used to atomically write a set +// of files and ensure they are visible at the same time. +// Must be committed to a new directory. +type AtomicWriteSet struct { + root string +} + +// NewAtomicWriteSet creates a new atomic write set to +// atomically create a set of files. The given directory +// is used as the base directory for storing files before +// commit. If no temporary directory is given the system +// default is used. +func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { + td, err := os.MkdirTemp(tmpDir, "write-set-") + if err != nil { + return nil, err + } + + return &AtomicWriteSet{ + root: td, + }, nil +} + +// WriteFile writes a file to the set, guaranteeing the file +// has been synced. +func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type syncFileCloser struct { + *os.File +} + +func (w syncFileCloser) Close() error { + err := w.File.Sync() + if err1 := w.File.Close(); err == nil { + err = err1 + } + return err +} + +// FileWriter opens a file writer inside the set. The file +// should be synced and closed before calling commit. +func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { + f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) + if err != nil { + return nil, err + } + return syncFileCloser{f}, nil +} + +// Cancel cancels the set and removes all temporary data +// created in the set. +func (ws *AtomicWriteSet) Cancel() error { + return os.RemoveAll(ws.root) +} + +// Commit moves all created files to the target directory. The +// target directory must not exist and the parent of the target +// directory must exist. +func (ws *AtomicWriteSet) Commit(target string) error { + return os.Rename(ws.root, target) +} + +// String returns the location the set is writing to. +func (ws *AtomicWriteSet) String() string { + return ws.root +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go new file mode 100644 index 000000000..de00b95e3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go @@ -0,0 +1,151 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "context" + "io" + + // make sure crypto.SHA256, crypto.sha512 and crypto.SHA384 are registered + // TODO remove once https://github.com/opencontainers/go-digest/pull/64 is merged. + _ "crypto/sha256" + _ "crypto/sha512" +) + +// ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser +// It calls the given callback function when closed. It should be constructed +// with NewReadCloserWrapper +type ReadCloserWrapper struct { + io.Reader + closer func() error +} + +// Close calls back the passed closer function +func (r *ReadCloserWrapper) Close() error { + return r.closer() +} + +// NewReadCloserWrapper returns a new io.ReadCloser. +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + return &ReadCloserWrapper{ + Reader: r, + closer: closer, + } +} + +type readerErrWrapper struct { + reader io.Reader + closer func() +} + +func (r *readerErrWrapper) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if err != nil { + r.closer() + } + return n, err +} + +// NewReaderErrWrapper returns a new io.Reader. +func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { + return &readerErrWrapper{ + reader: r, + closer: closer, + } +} + +// OnEOFReader wraps an io.ReadCloser and a function +// the function will run at the end of file or close the file. +type OnEOFReader struct { + Rc io.ReadCloser + Fn func() +} + +func (r *OnEOFReader) Read(p []byte) (n int, err error) { + n, err = r.Rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +// Close closes the file and run the function. +func (r *OnEOFReader) Close() error { + err := r.Rc.Close() + r.runFunc() + return err +} + +func (r *OnEOFReader) runFunc() { + if fn := r.Fn; fn != nil { + fn() + r.Fn = nil + } +} + +// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read +// operations. +type cancelReadCloser struct { + cancel func() + pR *io.PipeReader // Stream to read from + pW *io.PipeWriter +} + +// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the +// context is cancelled. The returned io.ReadCloser must be closed when it is +// no longer needed. +func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { + pR, pW := io.Pipe() + + // Create a context used to signal when the pipe is closed + doneCtx, cancel := context.WithCancel(context.Background()) + + p := &cancelReadCloser{ + cancel: cancel, + pR: pR, + pW: pW, + } + + go func() { + _, err := io.Copy(pW, in) + select { + case <-ctx.Done(): + // If the context was closed, p.closeWithError + // was already called. Calling it again would + // change the error that Read returns. + default: + p.closeWithError(err) + } + in.Close() + }() + go func() { + for { + select { + case <-ctx.Done(): + p.closeWithError(ctx.Err()) + case <-doneCtx.Done(): + return + } + } + }() + + return p +} + +// Read wraps the Read method of the pipe that provides data from the wrapped +// ReadCloser. +func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { + return p.pR.Read(buf) +} + +// closeWithError closes the wrapper and its underlying reader. It will +// cause future calls to Read to return err. +func (p *cancelReadCloser) closeWithError(err error) { + p.pW.CloseWithError(err) + p.cancel() +} + +// Close closes the wrapper its underlying reader. It will cause +// future calls to Read to return io.EOF. +func (p *cancelReadCloser) Close() error { + p.closeWithError(io.EOF) + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go new file mode 100644 index 000000000..748912230 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go @@ -0,0 +1,11 @@ +//go:build !windows +// +build !windows + +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import "os" + +// TempDir on Unix systems is equivalent to os.MkdirTemp. +func TempDir(dir, prefix string) (string, error) { + return os.MkdirTemp(dir, prefix) +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go new file mode 100644 index 000000000..a57fd9af6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go @@ -0,0 +1,16 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "os" + + "github.com/docker/docker/pkg/longpath" +) + +// TempDir is the equivalent of os.MkdirTemp, except that the result is in Windows longpath format. +func TempDir(dir, prefix string) (string, error) { + tempDir, err := os.MkdirTemp(dir, prefix) + if err != nil { + return "", err + } + return longpath.AddPrefix(tempDir), nil +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go new file mode 100644 index 000000000..91b8d1826 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go @@ -0,0 +1,92 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "io" + "sync" +) + +// WriteFlusher wraps the Write and Flush operation ensuring that every write +// is a flush. In addition, the Close method can be called to intercept +// Read/Write calls if the targets lifecycle has already ended. +type WriteFlusher struct { + w io.Writer + flusher flusher + flushed chan struct{} + flushedOnce sync.Once + closed chan struct{} + closeLock sync.Mutex +} + +type flusher interface { + Flush() +} + +var errWriteFlusherClosed = io.EOF + +func (wf *WriteFlusher) Write(b []byte) (n int, err error) { + select { + case <-wf.closed: + return 0, errWriteFlusherClosed + default: + } + + n, err = wf.w.Write(b) + wf.Flush() // every write is a flush. + return n, err +} + +// Flush the stream immediately. +func (wf *WriteFlusher) Flush() { + select { + case <-wf.closed: + return + default: + } + + wf.flushedOnce.Do(func() { + close(wf.flushed) + }) + wf.flusher.Flush() +} + +// Flushed returns the state of flushed. +// If it's flushed, return true, or else it return false. +func (wf *WriteFlusher) Flushed() bool { + // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to + // be used to detect whether or a response code has been issued or not. + // Another hook should be used instead. + var flushed bool + select { + case <-wf.flushed: + flushed = true + default: + } + return flushed +} + +// Close closes the write flusher, disallowing any further writes to the +// target. After the flusher is closed, all calls to write or flush will +// result in an error. +func (wf *WriteFlusher) Close() error { + wf.closeLock.Lock() + defer wf.closeLock.Unlock() + + select { + case <-wf.closed: + return errWriteFlusherClosed + default: + close(wf.closed) + } + return nil +} + +// NewWriteFlusher returns a new WriteFlusher. +func NewWriteFlusher(w io.Writer) *WriteFlusher { + var fl flusher + if f, ok := w.(flusher); ok { + fl = f + } else { + fl = &NopFlusher{} + } + return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go new file mode 100644 index 000000000..61c679497 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/writers.go @@ -0,0 +1,66 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import "io" + +// NopWriter represents a type which write operation is nop. +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +// NopWriteCloser returns a nopWriteCloser. +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +// NopFlusher represents a type which flush operation is nop. +type NopFlusher struct{} + +// Flush is a nop operation. +func (f *NopFlusher) Flush() {} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (r *writeCloserWrapper) Close() error { + return r.closer() +} + +// NewWriteCloserWrapper returns a new io.WriteCloser. +func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { + return &writeCloserWrapper{ + Writer: r, + closer: closer, + } +} + +// WriteCounter wraps a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +// NewWriteCounter returns a new WriteCounter. +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go new file mode 100644 index 000000000..cf8d04b1b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go @@ -0,0 +1,283 @@ +package jsonmessage // import "github.com/docker/docker/pkg/jsonmessage" + +import ( + "encoding/json" + "fmt" + "io" + "strings" + "time" + + units "github.com/docker/go-units" + "github.com/moby/term" + "github.com/morikuni/aec" +) + +// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to +// ensure the formatted time isalways the same number of characters. +const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + +// JSONError wraps a concrete Code and Message, `Code` is +// is an integer error code, `Message` is the error message. +type JSONError struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (e *JSONError) Error() string { + return e.Message +} + +// JSONProgress describes a Progress. terminalFd is the fd of the current terminal, +// Start is the initial value for the operation. Current is the current status and +// value of the progress made towards Total. Total is the end value describing when +// we made 100% progress for an operation. +type JSONProgress struct { + terminalFd uintptr + Current int64 `json:"current,omitempty"` + Total int64 `json:"total,omitempty"` + Start int64 `json:"start,omitempty"` + // If true, don't show xB/yB + HideCounts bool `json:"hidecounts,omitempty"` + Units string `json:"units,omitempty"` + nowFunc func() time.Time + winSize int +} + +func (p *JSONProgress) String() string { + var ( + width = p.width() + pbBox string + numbersBox string + timeLeftBox string + ) + if p.Current <= 0 && p.Total <= 0 { + return "" + } + if p.Total <= 0 { + switch p.Units { + case "": + current := units.HumanSize(float64(p.Current)) + return fmt.Sprintf("%8v", current) + default: + return fmt.Sprintf("%d %s", p.Current, p.Units) + } + } + + percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 + if percentage > 50 { + percentage = 50 + } + if width > 110 { + // this number can't be negative gh#7136 + numSpaces := 0 + if 50-percentage > 0 { + numSpaces = 50 - percentage + } + pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) + } + + switch { + case p.HideCounts: + case p.Units == "": // no units, use bytes + current := units.HumanSize(float64(p.Current)) + total := units.HumanSize(float64(p.Total)) + + numbersBox = fmt.Sprintf("%8v/%v", current, total) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%8v", current) + } + default: + numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units) + } + } + + if p.Current > 0 && p.Start > 0 && percentage < 50 { + fromStart := p.now().Sub(time.Unix(p.Start, 0)) + perEntry := fromStart / time.Duration(p.Current) + left := time.Duration(p.Total-p.Current) * perEntry + left = (left / time.Second) * time.Second + + if width > 50 { + timeLeftBox = " " + left.String() + } + } + return pbBox + numbersBox + timeLeftBox +} + +// shim for testing +func (p *JSONProgress) now() time.Time { + if p.nowFunc == nil { + p.nowFunc = func() time.Time { + return time.Now().UTC() + } + } + return p.nowFunc() +} + +// shim for testing +func (p *JSONProgress) width() int { + if p.winSize != 0 { + return p.winSize + } + ws, err := term.GetWinsize(p.terminalFd) + if err == nil { + return int(ws.Width) + } + return 200 +} + +// JSONMessage defines a message struct. It describes +// the created time, where it from, status, ID of the +// message. It's used for docker events. +type JSONMessage struct { + Stream string `json:"stream,omitempty"` + Status string `json:"status,omitempty"` + Progress *JSONProgress `json:"progressDetail,omitempty"` + ProgressMessage string `json:"progress,omitempty"` // deprecated + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` + Error *JSONError `json:"errorDetail,omitempty"` + ErrorMessage string `json:"error,omitempty"` // deprecated + // Aux contains out-of-band data, such as digests for push signing and image id after building. + Aux *json.RawMessage `json:"aux,omitempty"` +} + +func clearLine(out io.Writer) { + eraseMode := aec.EraseModes.All + cl := aec.EraseLine(eraseMode) + fmt.Fprint(out, cl) +} + +func cursorUp(out io.Writer, l uint) { + fmt.Fprint(out, aec.Up(l)) +} + +func cursorDown(out io.Writer, l uint) { + fmt.Fprint(out, aec.Down(l)) +} + +// Display displays the JSONMessage to `out`. If `isTerminal` is true, it will erase the +// entire current line when displaying the progressbar. +func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { + if jm.Error != nil { + if jm.Error.Code == 401 { + return fmt.Errorf("authentication is required") + } + return jm.Error + } + var endl string + if isTerminal && jm.Stream == "" && jm.Progress != nil { + clearLine(out) + endl = "\r" + fmt.Fprint(out, endl) + } else if jm.Progress != nil && jm.Progress.String() != "" { // disable progressbar in non-terminal + return nil + } + if jm.TimeNano != 0 { + fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed)) + } else if jm.Time != 0 { + fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed)) + } + if jm.ID != "" { + fmt.Fprintf(out, "%s: ", jm.ID) + } + if jm.From != "" { + fmt.Fprintf(out, "(from %s) ", jm.From) + } + if jm.Progress != nil && isTerminal { + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) + } else if jm.ProgressMessage != "" { // deprecated + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) + } else if jm.Stream != "" { + fmt.Fprintf(out, "%s%s", jm.Stream, endl) + } else { + fmt.Fprintf(out, "%s%s\n", jm.Status, endl) + } + return nil +} + +// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` +// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of +// each line and move the cursor while displaying. +func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error { + var ( + dec = json.NewDecoder(in) + ids = make(map[string]uint) + ) + + for { + var diff uint + var jm JSONMessage + if err := dec.Decode(&jm); err != nil { + if err == io.EOF { + break + } + return err + } + + if jm.Aux != nil { + if auxCallback != nil { + auxCallback(jm) + } + continue + } + + if jm.Progress != nil { + jm.Progress.terminalFd = terminalFd + } + if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { + line, ok := ids[jm.ID] + if !ok { + // NOTE: This approach of using len(id) to + // figure out the number of lines of history + // only works as long as we clear the history + // when we output something that's not + // accounted for in the map, such as a line + // with no ID. + line = uint(len(ids)) + ids[jm.ID] = line + if isTerminal { + fmt.Fprintf(out, "\n") + } + } + diff = uint(len(ids)) - line + if isTerminal { + cursorUp(out, diff) + } + } else { + // When outputting something that isn't progress + // output, clear the history of previous lines. We + // don't want progress entries from some previous + // operation to be updated (for example, pull -a + // with multiple tags). + ids = make(map[string]uint) + } + err := jm.Display(out, isTerminal) + if jm.ID != "" && isTerminal { + cursorDown(out, diff) + } + if err != nil { + return err + } + } + return nil +} + +type stream interface { + io.Writer + FD() uintptr + IsTerminal() bool +} + +// DisplayJSONMessagesToStream prints json messages to the output stream +func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(JSONMessage)) error { + return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) +} diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/docker/docker/pkg/longpath/longpath.go new file mode 100644 index 000000000..4177affba --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/longpath/longpath.go @@ -0,0 +1,26 @@ +// longpath introduces some constants and helper functions for handling long paths +// in Windows, which are expected to be prepended with `\\?\` and followed by either +// a drive letter, a UNC server\share, or a volume identifier. + +package longpath // import "github.com/docker/docker/pkg/longpath" + +import ( + "strings" +) + +// Prefix is the longpath prefix for Windows file paths. +const Prefix = `\\?\` + +// AddPrefix will add the Windows long path prefix to the path provided if +// it does not already have it. +func AddPrefix(path string) string { + if !strings.HasPrefix(path, Prefix) { + if strings.HasPrefix(path, `\\`) { + // This is a UNC path, so we need to add 'UNC' to the path as well. + path = Prefix + `UNC` + path[1:] + } else { + path = Prefix + path + } + } + return path +} diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go new file mode 100644 index 000000000..3792c67a9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pools/pools.go @@ -0,0 +1,137 @@ +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools // import "github.com/docker/docker/pkg/pools" + +import ( + "bufio" + "io" + "sync" + + "github.com/docker/docker/pkg/ioutils" +) + +const buffer32K = 32 * 1024 + +var ( + // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) + buffer32KPool = newBufferPoolWithSize(buffer32K) +) + +// BufioReaderPool is a bufio reader that uses sync.Pool. +type BufioReaderPool struct { + pool sync.Pool +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + return &BufioReaderPool{ + pool: sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, size) }, + }, + } +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +type bufferPool struct { + pool sync.Pool +} + +func newBufferPoolWithSize(size int) *bufferPool { + return &bufferPool{ + pool: sync.Pool{ + New: func() interface{} { s := make([]byte, size); return &s }, + }, + } +} + +func (bp *bufferPool) Get() *[]byte { + return bp.pool.Get().(*[]byte) +} + +func (bp *bufferPool) Put(b *[]byte) { + bp.pool.Put(b) +} + +// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. +func Copy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := buffer32KPool.Get() + written, err = io.CopyBuffer(dst, src, *buf) + buffer32KPool.Put(buf) + return +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +// BufioWriterPool is a bufio writer that uses sync.Pool. +type BufioWriterPool struct { + pool sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + return &BufioWriterPool{ + pool: sync.Pool{ + New: func() interface{} { return bufio.NewWriterSize(nil, size) }, + }, + } +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go new file mode 100644 index 000000000..8f6e0a737 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go @@ -0,0 +1,190 @@ +package stdcopy // import "github.com/docker/docker/pkg/stdcopy" + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "sync" +) + +// StdType is the type of standard stream +// a writer can multiplex to. +type StdType byte + +const ( + // Stdin represents standard input stream type. + Stdin StdType = iota + // Stdout represents standard output stream type. + Stdout + // Stderr represents standard error steam type. + Stderr + // Systemerr represents errors originating from the system that make it + // into the multiplexed stream. + Systemerr + + stdWriterPrefixLen = 8 + stdWriterFdIndex = 0 + stdWriterSizeIndex = 4 + + startingBufLen = 32*1024 + stdWriterPrefixLen + 1 +) + +var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }} + +// stdWriter is wrapper of io.Writer with extra customized info. +type stdWriter struct { + io.Writer + prefix byte +} + +// Write sends the buffer to the underneath writer. +// It inserts the prefix header before the buffer, +// so stdcopy.StdCopy knows where to multiplex the output. +// It makes stdWriter to implement io.Writer. +func (w *stdWriter) Write(p []byte) (n int, err error) { + if w == nil || w.Writer == nil { + return 0, errors.New("Writer not instantiated") + } + if p == nil { + return 0, nil + } + + header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix} + binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p))) + buf := bufPool.Get().(*bytes.Buffer) + buf.Write(header[:]) + buf.Write(p) + + n, err = w.Writer.Write(buf.Bytes()) + n -= stdWriterPrefixLen + if n < 0 { + n = 0 + } + + buf.Reset() + bufPool.Put(buf) + return +} + +// NewStdWriter instantiates a new Writer. +// Everything written to it will be encapsulated using a custom format, +// and written to the underlying `w` stream. +// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. +// `t` indicates the id of the stream to encapsulate. +// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. +func NewStdWriter(w io.Writer, t StdType) io.Writer { + return &stdWriter{ + Writer: w, + prefix: byte(t), + } +} + +// StdCopy is a modified version of io.Copy. +// +// StdCopy will demultiplex `src`, assuming that it contains two streams, +// previously multiplexed together using a StdWriter instance. +// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. +// +// StdCopy will read until it hits EOF on `src`. It will then return a nil error. +// In other words: if `err` is non nil, it indicates a real underlying error. +// +// `written` will hold the total number of bytes written to `dstout` and `dsterr`. +func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { + var ( + buf = make([]byte, startingBufLen) + bufLen = len(buf) + nr, nw int + er, ew error + out io.Writer + frameSize int + ) + + for { + // Make sure we have at least a full header + for nr < stdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < stdWriterPrefixLen { + return written, nil + } + break + } + if er != nil { + return 0, er + } + } + + stream := StdType(buf[stdWriterFdIndex]) + // Check the first byte to know where to write + switch stream { + case Stdin: + fallthrough + case Stdout: + // Write on stdout + out = dstout + case Stderr: + // Write on stderr + out = dsterr + case Systemerr: + // If we're on Systemerr, we won't write anywhere. + // NB: if this code changes later, make sure you don't try to write + // to outstream if Systemerr is the stream + out = nil + default: + return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex]) + } + + // Retrieve the size of the frame + frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4])) + + // Check if the buffer is big enough to read the frame. + // Extend it if necessary. + if frameSize+stdWriterPrefixLen > bufLen { + buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...) + bufLen = len(buf) + } + + // While the amount of bytes read is less than the size of the frame + header, we keep reading + for nr < frameSize+stdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < frameSize+stdWriterPrefixLen { + return written, nil + } + break + } + if er != nil { + return 0, er + } + } + + // we might have an error from the source mixed up in our multiplexed + // stream. if we do, return it. + if stream == Systemerr { + return written, fmt.Errorf("error from daemon in stream: %s", string(buf[stdWriterPrefixLen:frameSize+stdWriterPrefixLen])) + } + + // Write the retrieved frame (without header) + nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen]) + if ew != nil { + return 0, ew + } + + // If the frame has not been fully written: error + if nw != frameSize { + return 0, io.ErrShortWrite + } + written += int64(nw) + + // Move the rest of the buffer to the beginning + copy(buf, buf[frameSize+stdWriterPrefixLen:]) + // Move the index + nr -= frameSize + stdWriterPrefixLen + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/args_windows.go b/vendor/github.com/docker/docker/pkg/system/args_windows.go new file mode 100644 index 000000000..b7c9487a0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/args_windows.go @@ -0,0 +1,16 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "strings" + + "golang.org/x/sys/windows" +) + +// EscapeArgs makes a Windows-style escaped command line from a set of arguments +func EscapeArgs(args []string) string { + escapedArgs := make([]string, len(args)) + for i, a := range args { + escapedArgs[i] = windows.EscapeArg(a) + } + return strings.Join(escapedArgs, " ") +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/docker/docker/pkg/system/chtimes.go new file mode 100644 index 000000000..c26a4e24b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes.go @@ -0,0 +1,31 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "time" +) + +// Chtimes changes the access time and modified time of a file at the given path +func Chtimes(name string, atime time.Time, mtime time.Time) error { + unixMinTime := time.Unix(0, 0) + unixMaxTime := maxTime + + // If the modified time is prior to the Unix Epoch, or after the + // end of Unix Time, os.Chtimes has undefined behavior + // default to Unix Epoch in this case, just in case + + if atime.Before(unixMinTime) || atime.After(unixMaxTime) { + atime = unixMinTime + } + + if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { + mtime = unixMinTime + } + + if err := os.Chtimes(name, atime, mtime); err != nil { + return err + } + + // Take platform specific action for setting create time. + return setCTime(name, mtime) +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go new file mode 100644 index 000000000..84ae15705 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go @@ -0,0 +1,15 @@ +//go:build !windows +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "time" +) + +// setCTime will set the create time on a file. On Unix, the create +// time is updated as a side effect of setting the modified time, so +// no action is required. +func setCTime(path string, ctime time.Time) error { + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go new file mode 100644 index 000000000..6664b8bca --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go @@ -0,0 +1,26 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "time" + + "golang.org/x/sys/windows" +) + +// setCTime will set the create time on a file. On Windows, this requires +// calling SetFileTime and explicitly including the create time. +func setCTime(path string, ctime time.Time) error { + ctimespec := windows.NsecToTimespec(ctime.UnixNano()) + pathp, e := windows.UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := windows.CreateFile(pathp, + windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, + windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer windows.Close(h) + c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec)) + return windows.SetFileTime(h, &c, nil, nil) +} diff --git a/vendor/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/docker/docker/pkg/system/errors.go new file mode 100644 index 000000000..2573d7162 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/errors.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "errors" +) + +var ( + // ErrNotSupportedPlatform means the platform is not supported. + ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") + + // ErrNotSupportedOperatingSystem means the operating system is not supported. + ErrNotSupportedOperatingSystem = errors.New("operating system is not supported") +) diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys.go new file mode 100644 index 000000000..ce5990c91 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/filesys.go @@ -0,0 +1,19 @@ +package system + +import ( + "os" + "path/filepath" + "strings" +) + +// IsAbs is a platform-agnostic wrapper for filepath.IsAbs. +// +// On Windows, golang filepath.IsAbs does not consider a path \windows\system32 +// as absolute as it doesn't start with a drive-letter/colon combination. However, +// in docker we need to verify things such as WORKDIR /windows/system32 in +// a Dockerfile (which gets translated to \windows\system32 when being processed +// by the daemon). This SHOULD be treated as absolute from a docker processing +// perspective. +func IsAbs(path string) bool { + return filepath.IsAbs(path) || strings.HasPrefix(path, string(os.PathSeparator)) +} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_deprecated.go b/vendor/github.com/docker/docker/pkg/system/filesys_deprecated.go new file mode 100644 index 000000000..b2ee00631 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/filesys_deprecated.go @@ -0,0 +1,35 @@ +package system + +import ( + "os" + + "github.com/moby/sys/sequential" +) + +// CreateSequential is deprecated. +// +// Deprecated: use os.Create or github.com/moby/sys/sequential.Create() +func CreateSequential(name string) (*os.File, error) { + return sequential.Create(name) +} + +// OpenSequential is deprecated. +// +// Deprecated: use os.Open or github.com/moby/sys/sequential.Open +func OpenSequential(name string) (*os.File, error) { + return sequential.Open(name) +} + +// OpenFileSequential is deprecated. +// +// Deprecated: use github.com/moby/sys/sequential.OpenFile() +func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { + return sequential.OpenFile(name, flag, perm) +} + +// TempFileSequential is deprecated. +// +// Deprecated: use os.CreateTemp or github.com/moby/sys/sequential.CreateTemp +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + return sequential.CreateTemp(dir, prefix) +} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_unix.go b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go new file mode 100644 index 000000000..380112940 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go @@ -0,0 +1,17 @@ +//go:build !windows +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import "os" + +// MkdirAllWithACL is a wrapper for os.MkdirAll on unix systems. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return os.MkdirAll(path, perm) +} + +// MkdirAll creates a directory named path along with any necessary parents, +// with permission specified by attribute perm for all dir created. +func MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go new file mode 100644 index 000000000..e3fa9f731 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go @@ -0,0 +1,118 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "regexp" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +const ( + // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System + SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" +) + +// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory +// with an appropriate SDDL defined ACL. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return mkdirall(path, true, sddl) +} + +// MkdirAll implementation that is volume path aware for Windows. It can be used +// as a drop-in replacement for os.MkdirAll() +func MkdirAll(path string, _ os.FileMode) error { + return mkdirall(path, false, "") +} + +// mkdirall is a custom version of os.MkdirAll modified for use on Windows +// so that it is both volume path aware, and can create a directory with +// a DACL. +func mkdirall(path string, applyACL bool, sddl string) error { + if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { + return nil + } + + // The rest of this method is largely copied from os.MkdirAll and should be kept + // as-is to ensure compatibility. + + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{ + Op: "mkdir", + Path: path, + Err: syscall.ENOTDIR, + } + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = mkdirall(path[0:j-1], false, sddl) + if err != nil { + return err + } + } + + // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. + if applyACL { + err = mkdirWithACL(path, sddl) + } else { + err = os.Mkdir(path, 0) + } + + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +// mkdirWithACL creates a new directory. If there is an error, it will be of +// type *PathError. . +// +// This is a modified and combined version of os.Mkdir and windows.Mkdir +// in golang to cater for creating a directory am ACL permitting full +// access, with inheritance, to any subfolder/file for Built-in Administrators +// and Local System. +func mkdirWithACL(name string, sddl string) error { + sa := windows.SecurityAttributes{Length: 0} + sd, err := windows.SecurityDescriptorFromString(sddl) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + sa.SecurityDescriptor = sd + + namep, err := windows.UTF16PtrFromString(name) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + + e := windows.CreateDirectory(namep, &sa) + if e != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: e} + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/image_os.go b/vendor/github.com/docker/docker/pkg/system/image_os.go new file mode 100644 index 000000000..e3de86be2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/image_os.go @@ -0,0 +1,10 @@ +package system // import "github.com/docker/docker/pkg/system" +import ( + "runtime" + "strings" +) + +// IsOSSupported determines if an operating system is supported by the host. +func IsOSSupported(os string) bool { + return strings.EqualFold(runtime.GOOS, os) +} diff --git a/vendor/github.com/docker/docker/pkg/system/init.go b/vendor/github.com/docker/docker/pkg/system/init.go new file mode 100644 index 000000000..a17597aab --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/init.go @@ -0,0 +1,22 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" + "time" + "unsafe" +) + +// Used by chtimes +var maxTime time.Time + +func init() { + // chtimes initialization + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/docker/docker/pkg/system/init_windows.go new file mode 100644 index 000000000..3c2a43ddb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/init_windows.go @@ -0,0 +1,18 @@ +package system // import "github.com/docker/docker/pkg/system" + +var ( + // containerdRuntimeSupported determines if containerd should be the runtime. + containerdRuntimeSupported = false +) + +// InitContainerdRuntime sets whether to use containerd for runtime on Windows. +func InitContainerdRuntime(cdPath string) { + if len(cdPath) > 0 { + containerdRuntimeSupported = true + } +} + +// ContainerdRuntimeSupported returns true if the use of containerd runtime is supported. +func ContainerdRuntimeSupported() bool { + return containerdRuntimeSupported +} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go new file mode 100644 index 000000000..654b9f2c9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go @@ -0,0 +1,21 @@ +//go:build !windows +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "syscall" +) + +// Lstat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Lstat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Lstat(path, s); err != nil { + return nil, &os.PathError{Op: "Lstat", Path: path, Err: err} + } + return fromStatT(s) +} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go new file mode 100644 index 000000000..359c791d9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go @@ -0,0 +1,14 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "os" + +// Lstat calls os.Lstat to get a fileinfo interface back. +// This is then copied into our own locally defined structure. +func Lstat(path string) (*StatT, error) { + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + + return fromStatT(&fi) +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo.go b/vendor/github.com/docker/docker/pkg/system/meminfo.go new file mode 100644 index 000000000..6667eb84d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo.go @@ -0,0 +1,17 @@ +package system // import "github.com/docker/docker/pkg/system" + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go new file mode 100644 index 000000000..02a7377c1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go @@ -0,0 +1,69 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" +) + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +// parseMemInfo parses the /proc/meminfo file into +// a MemInfo object given an io.Reader to the file. +// Throws error if there are problems reading from the file +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + memAvailable := int64(-1) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + // Convert to KiB + bytes := int64(size) * 1024 + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "MemAvailable:": + memAvailable = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + } + if memAvailable != -1 { + meminfo.MemFree = memAvailable + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go new file mode 100644 index 000000000..207ee58ee --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go @@ -0,0 +1,9 @@ +//go:build !linux && !windows +// +build !linux,!windows + +package system // import "github.com/docker/docker/pkg/system" + +// ReadMemInfo is not supported on platforms other than linux and windows. +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go new file mode 100644 index 000000000..124d2c502 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go @@ -0,0 +1,45 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") +) + +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx +type memorystatusex struct { + dwLength uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + msi := &memorystatusex{ + dwLength: 64, + } + r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) + if r1 == 0 { + return &MemInfo{}, nil + } + return &MemInfo{ + MemTotal: int64(msi.ullTotalPhys), + MemFree: int64(msi.ullAvailPhys), + SwapTotal: int64(msi.ullTotalPageFile), + SwapFree: int64(msi.ullAvailPageFile), + }, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go new file mode 100644 index 000000000..d27152c0f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/mknod.go @@ -0,0 +1,17 @@ +//go:build !windows +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "golang.org/x/sys/unix" +) + +// Mkdev is used to build the value of linux devices (in /dev/) which specifies major +// and minor number of the newly created device special file. +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor. +func Mkdev(major int64, minor int64) uint32 { + return uint32(unix.Mkdev(uint32(major), uint32(minor))) +} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go b/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go new file mode 100644 index 000000000..c890be116 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go @@ -0,0 +1,14 @@ +//go:build freebsd +// +build freebsd + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "golang.org/x/sys/unix" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev. +func Mknod(path string, mode uint32, dev int) error { + return unix.Mknod(path, mode, uint64(dev)) +} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_unix.go b/vendor/github.com/docker/docker/pkg/system/mknod_unix.go new file mode 100644 index 000000000..4586aad19 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/mknod_unix.go @@ -0,0 +1,14 @@ +//go:build !freebsd && !windows +// +build !freebsd,!windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "golang.org/x/sys/unix" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev. +func Mknod(path string, mode uint32, dev int) error { + return unix.Mknod(path, mode, dev) +} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go new file mode 100644 index 000000000..ec89d7a15 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go @@ -0,0 +1,11 @@ +package system // import "github.com/docker/docker/pkg/system" + +// Mknod is not implemented on Windows. +func Mknod(path string, mode uint32, dev int) error { + return ErrNotSupportedPlatform +} + +// Mkdev is not implemented on Windows. +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on Windows.") +} diff --git a/vendor/github.com/docker/docker/pkg/system/path.go b/vendor/github.com/docker/docker/pkg/system/path.go new file mode 100644 index 000000000..818b20efe --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/path.go @@ -0,0 +1,41 @@ +package system // import "github.com/docker/docker/pkg/system" + +const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +// For Windows containers, an empty string is returned as the default +// path will be set by the container, and Docker has no context of what the +// default path should be. +func DefaultPathEnv(os string) string { + if os == "windows" { + return "" + } + return defaultUnixPathEnv +} + +// PathVerifier defines the subset of a PathDriver that CheckSystemDriveAndRemoveDriveLetter +// actually uses in order to avoid system depending on containerd/continuity. +type PathVerifier interface { + IsAbs(string) bool +} + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, +// is the system drive. +// On Linux: this is a no-op. +// On Windows: this does the following> +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. Also, it translates it to OS semantics (IOW / to \). We +// need the path in this syntax so that it can ultimately be concatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) { + return checkSystemDriveAndRemoveDriveLetter(path, driver) +} diff --git a/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go new file mode 100644 index 000000000..197a37a21 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/path_unix.go @@ -0,0 +1,17 @@ +//go:build !windows +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +// GetLongPathName converts Windows short pathnames to full pathnames. +// For example C:\Users\ADMIN~1 --> C:\Users\Administrator. +// It is a no-op on non-Windows platforms +func GetLongPathName(path string) (string, error) { + return path, nil +} + +// checkSystemDriveAndRemoveDriveLetter is the non-Windows implementation +// of CheckSystemDriveAndRemoveDriveLetter +func checkSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) { + return path, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go new file mode 100644 index 000000000..7d375b0dd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/path_windows.go @@ -0,0 +1,48 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "fmt" + "path/filepath" + "strings" + + "golang.org/x/sys/windows" +) + +// GetLongPathName converts Windows short pathnames to full pathnames. +// For example C:\Users\ADMIN~1 --> C:\Users\Administrator. +// It is a no-op on non-Windows platforms +func GetLongPathName(path string) (string, error) { + // See https://groups.google.com/forum/#!topic/golang-dev/1tufzkruoTg + p, err := windows.UTF16FromString(path) + if err != nil { + return "", err + } + b := p // GetLongPathName says we can reuse buffer + n, err := windows.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + if n > uint32(len(b)) { + b = make([]uint16, n) + _, err = windows.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + } + return windows.UTF16ToString(b), nil +} + +// checkSystemDriveAndRemoveDriveLetter is the Windows implementation +// of CheckSystemDriveAndRemoveDriveLetter +func checkSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) { + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("No relative path specified in %q", path) + } + if !driver.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("The specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/process_unix.go b/vendor/github.com/docker/docker/pkg/system/process_unix.go new file mode 100644 index 000000000..1c2c6a309 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/process_unix.go @@ -0,0 +1,46 @@ +//go:build linux || freebsd || darwin +// +build linux freebsd darwin + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "fmt" + "os" + "strings" + "syscall" + + "golang.org/x/sys/unix" +) + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + err := unix.Kill(pid, syscall.Signal(0)) + if err == nil || err == unix.EPERM { + return true + } + + return false +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + unix.Kill(pid, unix.SIGKILL) +} + +// IsProcessZombie return true if process has a state with "Z" +// http://man7.org/linux/man-pages/man5/proc.5.html +func IsProcessZombie(pid int) (bool, error) { + statPath := fmt.Sprintf("/proc/%d/stat", pid) + dataBytes, err := os.ReadFile(statPath) + if err != nil { + // TODO(thaJeztah) should we ignore os.IsNotExist() here? ("/proc//stat" will be gone if the process exited) + return false, err + } + data := string(dataBytes) + sdata := strings.SplitN(data, " ", 4) + if len(sdata) >= 3 && sdata[2] == "Z" { + return true, nil + } + + return false, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/process_windows.go b/vendor/github.com/docker/docker/pkg/system/process_windows.go new file mode 100644 index 000000000..09bdfa0ca --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/process_windows.go @@ -0,0 +1,18 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "os" + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + _, err := os.FindProcess(pid) + + return err == nil +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + p, err := os.FindProcess(pid) + if err == nil { + _ = p.Kill() + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_bsd.go b/vendor/github.com/docker/docker/pkg/system/stat_bsd.go new file mode 100644 index 000000000..8e61d820f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_bsd.go @@ -0,0 +1,16 @@ +//go:build freebsd || netbsd +// +build freebsd netbsd + +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go new file mode 100644 index 000000000..c1c0ee9f3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go new file mode 100644 index 000000000..3ac02393f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go @@ -0,0 +1,20 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + // the type is 32bit on mips + rdev: uint64(s.Rdev), //nolint: unconvert + mtim: s.Mtim}, nil +} + +// FromStatT converts a syscall.Stat_t type to a system.Stat_t type +// This is exposed on Linux as pkg/archive/changes uses it. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go new file mode 100644 index 000000000..756b92d1e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go new file mode 100644 index 000000000..6a51ccd64 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + rdev: s.Rdev, + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unix.go b/vendor/github.com/docker/docker/pkg/system/stat_unix.go new file mode 100644 index 000000000..a45ffddf7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_unix.go @@ -0,0 +1,67 @@ +//go:build !windows +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "syscall" +) + +// StatT type contains status of a file. It contains metadata +// like permission, owner, group, size, etc about a file. +type StatT struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec +} + +// Mode returns file's permission mode. +func (s StatT) Mode() uint32 { + return s.mode +} + +// UID returns file's user id of owner. +func (s StatT) UID() uint32 { + return s.uid +} + +// GID returns file's group id of owner. +func (s StatT) GID() uint32 { + return s.gid +} + +// Rdev returns file's device ID (if it's special file). +func (s StatT) Rdev() uint64 { + return s.rdev +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() syscall.Timespec { + return s.mtim +} + +// IsDir reports whether s describes a directory. +func (s StatT) IsDir() bool { + return s.mode&syscall.S_IFDIR != 0 +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, &os.PathError{Op: "Stat", Path: path, Err: err} + } + return fromStatT(s) +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go new file mode 100644 index 000000000..0ff3af2fa --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_windows.go @@ -0,0 +1,49 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "time" +) + +// StatT type contains status of a file. It contains metadata +// like permission, size, etc about a file. +type StatT struct { + mode os.FileMode + size int64 + mtim time.Time +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mode returns file's permission mode. +func (s StatT) Mode() os.FileMode { + return s.mode +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() time.Time { + return s.mtim +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + return fromStatT(&fi) +} + +// fromStatT converts a os.FileInfo type to a system.StatT type +func fromStatT(fi *os.FileInfo) (*StatT, error) { + return &StatT{ + size: (*fi).Size(), + mode: (*fi).Mode(), + mtim: (*fi).ModTime()}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unix.go b/vendor/github.com/docker/docker/pkg/system/utimes_unix.go new file mode 100644 index 000000000..2768750a0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/utimes_unix.go @@ -0,0 +1,25 @@ +//go:build linux || freebsd +// +build linux freebsd + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + uts := []unix.Timespec{ + unix.NsecToTimespec(syscall.TimespecToNsec(ts[0])), + unix.NsecToTimespec(syscall.TimespecToNsec(ts[1])), + } + err := unix.UtimesNanoAt(unix.AT_FDCWD, path, uts, unix.AT_SYMLINK_NOFOLLOW) + if err != nil && err != unix.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go new file mode 100644 index 000000000..bfed4af03 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go @@ -0,0 +1,11 @@ +//go:build !linux && !freebsd +// +build !linux,!freebsd + +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// LUtimesNano is only supported on linux and freebsd. +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go new file mode 100644 index 000000000..95b609fe7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go @@ -0,0 +1,37 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "golang.org/x/sys/unix" + +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// It will returns a nil slice and nil error if the xattr is not set. +func Lgetxattr(path string, attr string) ([]byte, error) { + // Start with a 128 length byte array + dest := make([]byte, 128) + sz, errno := unix.Lgetxattr(path, attr, dest) + + for errno == unix.ERANGE { + // Buffer too small, use zero-sized buffer to get the actual size + sz, errno = unix.Lgetxattr(path, attr, []byte{}) + if errno != nil { + return nil, errno + } + dest = make([]byte, sz) + sz, errno = unix.Lgetxattr(path, attr, dest) + } + + switch { + case errno == unix.ENODATA: + return nil, nil + case errno != nil: + return nil, errno + } + + return dest[:sz], nil +} + +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return unix.Lsetxattr(path, attr, data, flags) +} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go new file mode 100644 index 000000000..b165a5dbf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go @@ -0,0 +1,14 @@ +//go:build !linux +// +build !linux + +package system // import "github.com/docker/docker/pkg/system" + +// Lgetxattr is not supported on platforms other than linux. +func Lgetxattr(path string, attr string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +// Lsetxattr is not supported on platforms other than linux. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/docker/go-connections/LICENSE b/vendor/github.com/docker/go-connections/LICENSE new file mode 100644 index 000000000..b55b37bc3 --- /dev/null +++ b/vendor/github.com/docker/go-connections/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go new file mode 100644 index 000000000..bb7e4e336 --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/nat.go @@ -0,0 +1,242 @@ +// Package nat is a convenience package for manipulation of strings describing network ports. +package nat + +import ( + "fmt" + "net" + "strconv" + "strings" +) + +const ( + // portSpecTemplate is the expected format for port specifications + portSpecTemplate = "ip:hostPort:containerPort" +) + +// PortBinding represents a binding between a Host IP address and a Host Port +type PortBinding struct { + // HostIP is the host IP Address + HostIP string `json:"HostIp"` + // HostPort is the host port number + HostPort string +} + +// PortMap is a collection of PortBinding indexed by Port +type PortMap map[Port][]PortBinding + +// PortSet is a collection of structs indexed by Port +type PortSet map[Port]struct{} + +// Port is a string containing port number and protocol in the format "80/tcp" +type Port string + +// NewPort creates a new instance of a Port given a protocol and port number or port range +func NewPort(proto, port string) (Port, error) { + // Check for parsing issues on "port" now so we can avoid having + // to check it later on. + + portStartInt, portEndInt, err := ParsePortRangeToInt(port) + if err != nil { + return "", err + } + + if portStartInt == portEndInt { + return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil + } + return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil +} + +// ParsePort parses the port number string and returns an int +func ParsePort(rawPort string) (int, error) { + if len(rawPort) == 0 { + return 0, nil + } + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + return 0, err + } + return int(port), nil +} + +// ParsePortRangeToInt parses the port range string and returns start/end ints +func ParsePortRangeToInt(rawPort string) (int, int, error) { + if len(rawPort) == 0 { + return 0, 0, nil + } + start, end, err := ParsePortRange(rawPort) + if err != nil { + return 0, 0, err + } + return int(start), int(end), nil +} + +// Proto returns the protocol of a Port +func (p Port) Proto() string { + proto, _ := SplitProtoPort(string(p)) + return proto +} + +// Port returns the port number of a Port +func (p Port) Port() string { + _, port := SplitProtoPort(string(p)) + return port +} + +// Int returns the port number of a Port as an int +func (p Port) Int() int { + portStr := p.Port() + // We don't need to check for an error because we're going to + // assume that any error would have been found, and reported, in NewPort() + port, _ := ParsePort(portStr) + return port +} + +// Range returns the start/end port numbers of a Port range as ints +func (p Port) Range() (int, int, error) { + return ParsePortRangeToInt(p.Port()) +} + +// SplitProtoPort splits a port in the format of proto/port +func SplitProtoPort(rawPort string) (string, string) { + parts := strings.Split(rawPort, "/") + l := len(parts) + if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { + return "", "" + } + if l == 1 { + return "tcp", rawPort + } + if len(parts[1]) == 0 { + return "tcp", parts[0] + } + return parts[1], parts[0] +} + +func validateProto(proto string) bool { + for _, availableProto := range []string{"tcp", "udp", "sctp"} { + if availableProto == proto { + return true + } + } + return false +} + +// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses +// these in to the internal types +func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { + var ( + exposedPorts = make(map[Port]struct{}, len(ports)) + bindings = make(map[Port][]PortBinding) + ) + for _, rawPort := range ports { + portMappings, err := ParsePortSpec(rawPort) + if err != nil { + return nil, nil, err + } + + for _, portMapping := range portMappings { + port := portMapping.Port + if _, exists := exposedPorts[port]; !exists { + exposedPorts[port] = struct{}{} + } + bslice, exists := bindings[port] + if !exists { + bslice = []PortBinding{} + } + bindings[port] = append(bslice, portMapping.Binding) + } + } + return exposedPorts, bindings, nil +} + +// PortMapping is a data object mapping a Port to a PortBinding +type PortMapping struct { + Port Port + Binding PortBinding +} + +func splitParts(rawport string) (string, string, string) { + parts := strings.Split(rawport, ":") + n := len(parts) + containerport := parts[n-1] + + switch n { + case 1: + return "", "", containerport + case 2: + return "", parts[0], containerport + case 3: + return parts[0], parts[1], containerport + default: + return strings.Join(parts[:n-2], ":"), parts[n-2], containerport + } +} + +// ParsePortSpec parses a port specification string into a slice of PortMappings +func ParsePortSpec(rawPort string) ([]PortMapping, error) { + var proto string + rawIP, hostPort, containerPort := splitParts(rawPort) + proto, containerPort = SplitProtoPort(containerPort) + + // Strip [] from IPV6 addresses + ip, _, err := net.SplitHostPort(rawIP + ":") + if err != nil { + return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err) + } + if ip != "" && net.ParseIP(ip) == nil { + return nil, fmt.Errorf("Invalid ip address: %s", ip) + } + if containerPort == "" { + return nil, fmt.Errorf("No port specified: %s", rawPort) + } + + startPort, endPort, err := ParsePortRange(containerPort) + if err != nil { + return nil, fmt.Errorf("Invalid containerPort: %s", containerPort) + } + + var startHostPort, endHostPort uint64 = 0, 0 + if len(hostPort) > 0 { + startHostPort, endHostPort, err = ParsePortRange(hostPort) + if err != nil { + return nil, fmt.Errorf("Invalid hostPort: %s", hostPort) + } + } + + if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { + // Allow host port range iff containerPort is not a range. + // In this case, use the host port range as the dynamic + // host port range to allocate into. + if endPort != startPort { + return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) + } + } + + if !validateProto(strings.ToLower(proto)) { + return nil, fmt.Errorf("Invalid proto: %s", proto) + } + + ports := []PortMapping{} + for i := uint64(0); i <= (endPort - startPort); i++ { + containerPort = strconv.FormatUint(startPort+i, 10) + if len(hostPort) > 0 { + hostPort = strconv.FormatUint(startHostPort+i, 10) + } + // Set hostPort to a range only if there is a single container port + // and a dynamic host port. + if startPort == endPort && startHostPort != endHostPort { + hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) + } + port, err := NewPort(strings.ToLower(proto), containerPort) + if err != nil { + return nil, err + } + + binding := PortBinding{ + HostIP: ip, + HostPort: hostPort, + } + ports = append(ports, PortMapping{Port: port, Binding: binding}) + } + return ports, nil +} diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go new file mode 100644 index 000000000..892adf8c6 --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/parse.go @@ -0,0 +1,57 @@ +package nat + +import ( + "fmt" + "strconv" + "strings" +) + +// PartParser parses and validates the specified string (data) using the specified template +// e.g. ip:public:private -> 192.168.0.1:80:8000 +// DEPRECATED: do not use, this function may be removed in a future version +func PartParser(template, data string) (map[string]string, error) { + // ip:public:private + var ( + templateParts = strings.Split(template, ":") + parts = strings.Split(data, ":") + out = make(map[string]string, len(templateParts)) + ) + if len(parts) != len(templateParts) { + return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) + } + + for i, t := range templateParts { + value := "" + if len(parts) > i { + value = parts[i] + } + out[t] = value + } + return out, nil +} + +// ParsePortRange parses and validates the specified string as a port-range (8000-9000) +func ParsePortRange(ports string) (uint64, uint64, error) { + if ports == "" { + return 0, 0, fmt.Errorf("Empty string specified for ports.") + } + if !strings.Contains(ports, "-") { + start, err := strconv.ParseUint(ports, 10, 16) + end := start + return start, end, err + } + + parts := strings.Split(ports, "-") + start, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return 0, 0, err + } + end, err := strconv.ParseUint(parts[1], 10, 16) + if err != nil { + return 0, 0, err + } + if end < start { + return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) + } + return start, end, nil +} diff --git a/vendor/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go new file mode 100644 index 000000000..ce950171e --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/sort.go @@ -0,0 +1,96 @@ +package nat + +import ( + "sort" + "strings" +) + +type portSorter struct { + ports []Port + by func(i, j Port) bool +} + +func (s *portSorter) Len() int { + return len(s.ports) +} + +func (s *portSorter) Swap(i, j int) { + s.ports[i], s.ports[j] = s.ports[j], s.ports[i] +} + +func (s *portSorter) Less(i, j int) bool { + ip := s.ports[i] + jp := s.ports[j] + + return s.by(ip, jp) +} + +// Sort sorts a list of ports using the provided predicate +// This function should compare `i` and `j`, returning true if `i` is +// considered to be less than `j` +func Sort(ports []Port, predicate func(i, j Port) bool) { + s := &portSorter{ports, predicate} + sort.Sort(s) +} + +type portMapEntry struct { + port Port + binding PortBinding +} + +type portMapSorter []portMapEntry + +func (s portMapSorter) Len() int { return len(s) } +func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// sort the port so that the order is: +// 1. port with larger specified bindings +// 2. larger port +// 3. port with tcp protocol +func (s portMapSorter) Less(i, j int) bool { + pi, pj := s[i].port, s[j].port + hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) + return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") +} + +// SortPortMap sorts the list of ports and their respected mapping. The ports +// will explicit HostPort will be placed first. +func SortPortMap(ports []Port, bindings PortMap) { + s := portMapSorter{} + for _, p := range ports { + if binding, ok := bindings[p]; ok { + for _, b := range binding { + s = append(s, portMapEntry{port: p, binding: b}) + } + bindings[p] = []PortBinding{} + } else { + s = append(s, portMapEntry{port: p}) + } + } + + sort.Sort(s) + var ( + i int + pm = make(map[Port]struct{}) + ) + // reorder ports + for _, entry := range s { + if _, ok := pm[entry.port]; !ok { + ports[i] = entry.port + pm[entry.port] = struct{}{} + i++ + } + // reorder bindings for this port + if _, ok := bindings[entry.port]; ok { + bindings[entry.port] = append(bindings[entry.port], entry.binding) + } + } +} + +func toInt(s string) uint64 { + i, _, err := ParsePortRange(s) + if err != nil { + i = 0 + } + return i +} diff --git a/vendor/github.com/docker/go-connections/sockets/README.md b/vendor/github.com/docker/go-connections/sockets/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/docker/go-connections/sockets/inmem_socket.go b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go new file mode 100644 index 000000000..99846ffdd --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go @@ -0,0 +1,81 @@ +package sockets + +import ( + "errors" + "net" + "sync" +) + +var errClosed = errors.New("use of closed network connection") + +// InmemSocket implements net.Listener using in-memory only connections. +type InmemSocket struct { + chConn chan net.Conn + chClose chan struct{} + addr string + mu sync.Mutex +} + +// dummyAddr is used to satisfy net.Addr for the in-mem socket +// it is just stored as a string and returns the string for all calls +type dummyAddr string + +// NewInmemSocket creates an in-memory only net.Listener +// The addr argument can be any string, but is used to satisfy the `Addr()` part +// of the net.Listener interface +func NewInmemSocket(addr string, bufSize int) *InmemSocket { + return &InmemSocket{ + chConn: make(chan net.Conn, bufSize), + chClose: make(chan struct{}), + addr: addr, + } +} + +// Addr returns the socket's addr string to satisfy net.Listener +func (s *InmemSocket) Addr() net.Addr { + return dummyAddr(s.addr) +} + +// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn. +func (s *InmemSocket) Accept() (net.Conn, error) { + select { + case conn := <-s.chConn: + return conn, nil + case <-s.chClose: + return nil, errClosed + } +} + +// Close closes the listener. It will be unavailable for use once closed. +func (s *InmemSocket) Close() error { + s.mu.Lock() + defer s.mu.Unlock() + select { + case <-s.chClose: + default: + close(s.chClose) + } + return nil +} + +// Dial is used to establish a connection with the in-mem server +func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) { + srvConn, clientConn := net.Pipe() + select { + case s.chConn <- srvConn: + case <-s.chClose: + return nil, errClosed + } + + return clientConn, nil +} + +// Network returns the addr string, satisfies net.Addr +func (a dummyAddr) Network() string { + return string(a) +} + +// String returns the string form +func (a dummyAddr) String() string { + return string(a) +} diff --git a/vendor/github.com/docker/go-connections/sockets/proxy.go b/vendor/github.com/docker/go-connections/sockets/proxy.go new file mode 100644 index 000000000..98e9a1dc6 --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/proxy.go @@ -0,0 +1,51 @@ +package sockets + +import ( + "net" + "net/url" + "os" + "strings" + + "golang.org/x/net/proxy" +) + +// GetProxyEnv allows access to the uppercase and the lowercase forms of +// proxy-related variables. See the Go specification for details on these +// variables. https://golang.org/pkg/net/http/ +func GetProxyEnv(key string) string { + proxyValue := os.Getenv(strings.ToUpper(key)) + if proxyValue == "" { + return os.Getenv(strings.ToLower(key)) + } + return proxyValue +} + +// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a +// proxy.Dialer which will route the connections through the proxy using the +// given dialer. +func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) { + allProxy := GetProxyEnv("all_proxy") + if len(allProxy) == 0 { + return direct, nil + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return direct, err + } + + proxyFromURL, err := proxy.FromURL(proxyURL, direct) + if err != nil { + return direct, err + } + + noProxy := GetProxyEnv("no_proxy") + if len(noProxy) == 0 { + return proxyFromURL, nil + } + + perHost := proxy.NewPerHost(proxyFromURL, direct) + perHost.AddFromString(noProxy) + + return perHost, nil +} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go new file mode 100644 index 000000000..a1d7beb4d --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/sockets.go @@ -0,0 +1,38 @@ +// Package sockets provides helper functions to create and configure Unix or TCP sockets. +package sockets + +import ( + "errors" + "net" + "net/http" + "time" +) + +// Why 32? See https://github.com/docker/docker/pull/8035. +const defaultTimeout = 32 * time.Second + +// ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system. +var ErrProtocolNotAvailable = errors.New("protocol not available") + +// ConfigureTransport configures the specified Transport according to the +// specified proto and addr. +// If the proto is unix (using a unix socket to communicate) or npipe the +// compression is disabled. +func ConfigureTransport(tr *http.Transport, proto, addr string) error { + switch proto { + case "unix": + return configureUnixTransport(tr, proto, addr) + case "npipe": + return configureNpipeTransport(tr, proto, addr) + default: + tr.Proxy = http.ProxyFromEnvironment + dialer, err := DialerFromEnvironment(&net.Dialer{ + Timeout: defaultTimeout, + }) + if err != nil { + return err + } + tr.Dial = dialer.Dial + } + return nil +} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go new file mode 100644 index 000000000..386cf0dbb --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go @@ -0,0 +1,35 @@ +// +build !windows + +package sockets + +import ( + "fmt" + "net" + "net/http" + "syscall" + "time" +) + +const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) + +func configureUnixTransport(tr *http.Transport, proto, addr string) error { + if len(addr) > maxUnixSocketPathSize { + return fmt.Errorf("Unix socket path %q is too long", addr) + } + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return net.DialTimeout(proto, addr, defaultTimeout) + } + return nil +} + +func configureNpipeTransport(tr *http.Transport, proto, addr string) error { + return ErrProtocolNotAvailable +} + +// DialPipe connects to a Windows named pipe. +// This is not supported on other OSes. +func DialPipe(_ string, _ time.Duration) (net.Conn, error) { + return nil, syscall.EAFNOSUPPORT +} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go new file mode 100644 index 000000000..5c21644e1 --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go @@ -0,0 +1,27 @@ +package sockets + +import ( + "net" + "net/http" + "time" + + "github.com/Microsoft/go-winio" +) + +func configureUnixTransport(tr *http.Transport, proto, addr string) error { + return ErrProtocolNotAvailable +} + +func configureNpipeTransport(tr *http.Transport, proto, addr string) error { + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return DialPipe(addr, defaultTimeout) + } + return nil +} + +// DialPipe connects to a Windows named pipe. +func DialPipe(addr string, timeout time.Duration) (net.Conn, error) { + return winio.DialPipe(addr, &timeout) +} diff --git a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go new file mode 100644 index 000000000..53cbb6c79 --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go @@ -0,0 +1,22 @@ +// Package sockets provides helper functions to create and configure Unix or TCP sockets. +package sockets + +import ( + "crypto/tls" + "net" +) + +// NewTCPSocket creates a TCP socket listener with the specified address and +// the specified tls configuration. If TLSConfig is set, will encapsulate the +// TCP listener inside a TLS one. +func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) { + l, err := net.Listen("tcp", addr) + if err != nil { + return nil, err + } + if tlsConfig != nil { + tlsConfig.NextProtos = []string{"http/1.1"} + l = tls.NewListener(l, tlsConfig) + } + return l, nil +} diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go new file mode 100644 index 000000000..a8b5dbb6f --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/unix_socket.go @@ -0,0 +1,32 @@ +// +build !windows + +package sockets + +import ( + "net" + "os" + "syscall" +) + +// NewUnixSocket creates a unix socket with the specified path and group. +func NewUnixSocket(path string, gid int) (net.Listener, error) { + if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { + return nil, err + } + mask := syscall.Umask(0777) + defer syscall.Umask(mask) + + l, err := net.Listen("unix", path) + if err != nil { + return nil, err + } + if err := os.Chown(path, 0, gid); err != nil { + l.Close() + return nil, err + } + if err := os.Chmod(path, 0660); err != nil { + l.Close() + return nil, err + } + return l, nil +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go new file mode 100644 index 000000000..1ca0965e0 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go @@ -0,0 +1,18 @@ +// +build go1.7 + +package tlsconfig + +import ( + "crypto/x509" + "runtime" +) + +// SystemCertPool returns a copy of the system cert pool, +// returns an error if failed to load or empty pool on windows. +func SystemCertPool() (*x509.CertPool, error) { + certpool, err := x509.SystemCertPool() + if err != nil && runtime.GOOS == "windows" { + return x509.NewCertPool(), nil + } + return certpool, err +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go new file mode 100644 index 000000000..1ff81c333 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go @@ -0,0 +1,13 @@ +// +build !go1.7 + +package tlsconfig + +import ( + "crypto/x509" +) + +// SystemCertPool returns an new empty cert pool, +// accessing system cert pool is supported in go 1.7 +func SystemCertPool() (*x509.CertPool, error) { + return x509.NewCertPool(), nil +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go new file mode 100644 index 000000000..0ef3fdcb4 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -0,0 +1,254 @@ +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +// As a reminder from https://golang.org/pkg/crypto/tls/#Config: +// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. +// A Config may be reused; the tls package will also not modify it. +package tlsconfig + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "os" + + "github.com/pkg/errors" +) + +// Options represents the information needed to create client and server TLS configurations. +type Options struct { + CAFile string + + // If either CertFile or KeyFile is empty, Client() will not load them + // preventing the client from authenticating to the server. + // However, Server() requires them and will error out if they are empty. + CertFile string + KeyFile string + + // client-only option + InsecureSkipVerify bool + // server-only option + ClientAuth tls.ClientAuthType + // If ExclusiveRootPools is set, then if a CA file is provided, the root pool used for TLS + // creds will include exclusively the roots in that CA file. If no CA file is provided, + // the system pool will be used. + ExclusiveRootPools bool + MinVersion uint16 + // If Passphrase is set, it will be used to decrypt a TLS private key + // if the key is encrypted + Passphrase string +} + +// Extra (server-side) accepted CBC cipher suites - will phase out in the future +var acceptedCBCCiphers = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, +} + +// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls +// options struct but wants to use a commonly accepted set of TLS cipher suites, with +// known weak algorithms removed. +var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) + +// allTLSVersions lists all the TLS versions and is used by the code that validates +// a uint16 value as a TLS version. +var allTLSVersions = map[uint16]struct{}{ + tls.VersionSSL30: {}, + tls.VersionTLS10: {}, + tls.VersionTLS11: {}, + tls.VersionTLS12: {}, +} + +// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. +func ServerDefault(ops ...func(*tls.Config)) *tls.Config { + tlsconfig := &tls.Config{ + // Avoid fallback by default to SSL protocols < TLS1.2 + MinVersion: tls.VersionTLS12, + PreferServerCipherSuites: true, + CipherSuites: DefaultServerAcceptedCiphers, + } + + for _, op := range ops { + op(tlsconfig) + } + + return tlsconfig +} + +// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. +func ClientDefault(ops ...func(*tls.Config)) *tls.Config { + tlsconfig := &tls.Config{ + // Prefer TLS1.2 as the client minimum + MinVersion: tls.VersionTLS12, + CipherSuites: clientCipherSuites, + } + + for _, op := range ops { + op(tlsconfig) + } + + return tlsconfig +} + +// certPool returns an X.509 certificate pool from `caFile`, the certificate file. +func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { + // If we should verify the server, we need to load a trusted ca + var ( + certPool *x509.CertPool + err error + ) + if exclusivePool { + certPool = x509.NewCertPool() + } else { + certPool, err = SystemCertPool() + if err != nil { + return nil, fmt.Errorf("failed to read system certificates: %v", err) + } + } + pem, err := ioutil.ReadFile(caFile) + if err != nil { + return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) + } + if !certPool.AppendCertsFromPEM(pem) { + return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) + } + return certPool, nil +} + +// isValidMinVersion checks that the input value is a valid tls minimum version +func isValidMinVersion(version uint16) bool { + _, ok := allTLSVersions[version] + return ok +} + +// adjustMinVersion sets the MinVersion on `config`, the input configuration. +// It assumes the current MinVersion on the `config` is the lowest allowed. +func adjustMinVersion(options Options, config *tls.Config) error { + if options.MinVersion > 0 { + if !isValidMinVersion(options.MinVersion) { + return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion) + } + if options.MinVersion < config.MinVersion { + return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion) + } + config.MinVersion = options.MinVersion + } + + return nil +} + +// IsErrEncryptedKey returns true if the 'err' is an error of incorrect +// password when tryin to decrypt a TLS private key +func IsErrEncryptedKey(err error) bool { + return errors.Cause(err) == x509.IncorrectPasswordError +} + +// getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format. +// If the private key is encrypted, 'passphrase' is used to decrypted the +// private key. +func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { + // this section makes some small changes to code from notary/tuf/utils/x509.go + pemBlock, _ := pem.Decode(keyBytes) + if pemBlock == nil { + return nil, fmt.Errorf("no valid private key found") + } + + var err error + if x509.IsEncryptedPEMBlock(pemBlock) { + keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) + if err != nil { + return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it") + } + keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes}) + } + + return keyBytes, nil +} + +// getCert returns a Certificate from the CertFile and KeyFile in 'options', +// if the key is encrypted, the Passphrase in 'options' will be used to +// decrypt it. +func getCert(options Options) ([]tls.Certificate, error) { + if options.CertFile == "" && options.KeyFile == "" { + return nil, nil + } + + errMessage := "Could not load X509 key pair" + + cert, err := ioutil.ReadFile(options.CertFile) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + prKeyBytes, err := ioutil.ReadFile(options.KeyFile) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + tlsCert, err := tls.X509KeyPair(cert, prKeyBytes) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + return []tls.Certificate{tlsCert}, nil +} + +// Client returns a TLS configuration meant to be used by a client. +func Client(options Options) (*tls.Config, error) { + tlsConfig := ClientDefault() + tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify + if !options.InsecureSkipVerify && options.CAFile != "" { + CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) + if err != nil { + return nil, err + } + tlsConfig.RootCAs = CAs + } + + tlsCerts, err := getCert(options) + if err != nil { + return nil, err + } + tlsConfig.Certificates = tlsCerts + + if err := adjustMinVersion(options, tlsConfig); err != nil { + return nil, err + } + + return tlsConfig, nil +} + +// Server returns a TLS configuration meant to be used by a server. +func Server(options Options) (*tls.Config, error) { + tlsConfig := ServerDefault() + tlsConfig.ClientAuth = options.ClientAuth + tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) + if err != nil { + if os.IsNotExist(err) { + return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) + } + return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) + } + tlsConfig.Certificates = []tls.Certificate{tlsCert} + if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" { + CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) + if err != nil { + return nil, err + } + tlsConfig.ClientCAs = CAs + } + + if err := adjustMinVersion(options, tlsConfig); err != nil { + return nil, err + } + + return tlsConfig, nil +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go new file mode 100644 index 000000000..6b4c6a7c0 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go @@ -0,0 +1,17 @@ +// +build go1.5 + +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +package tlsconfig + +import ( + "crypto/tls" +) + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go new file mode 100644 index 000000000..ee22df47c --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go @@ -0,0 +1,15 @@ +// +build !go1.5 + +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +package tlsconfig + +import ( + "crypto/tls" +) + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md new file mode 100644 index 000000000..9ea86d784 --- /dev/null +++ b/vendor/github.com/docker/go-units/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# Contributing to go-units + +Want to hack on go-units? Awesome! Here are instructions to get you started. + +go-units is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE new file mode 100644 index 000000000..b55b37bc3 --- /dev/null +++ b/vendor/github.com/docker/go-units/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS new file mode 100644 index 000000000..4aac7c741 --- /dev/null +++ b/vendor/github.com/docker/go-units/MAINTAINERS @@ -0,0 +1,46 @@ +# go-units maintainers file +# +# This file describes who runs the docker/go-units project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "akihirosuda", + "dnephin", + "thajeztah", + "vdemeester", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.akihirosuda] + Name = "Akihiro Suda" + Email = "akihiro.suda.cz@hco.ntt.co.jp" + GitHub = "AkihiroSuda" + + [people.dnephin] + Name = "Daniel Nephin" + Email = "dnephin@gmail.com" + GitHub = "dnephin" + + [people.thajeztah] + Name = "Sebastiaan van Stijn" + Email = "github@gone.nl" + GitHub = "thaJeztah" + + [people.vdemeester] + Name = "Vincent Demeester" + Email = "vincent@sbr.pm" + GitHub = "vdemeester" \ No newline at end of file diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md new file mode 100644 index 000000000..4f70a4e13 --- /dev/null +++ b/vendor/github.com/docker/go-units/README.md @@ -0,0 +1,16 @@ +[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) + +# Introduction + +go-units is a library to transform human friendly measurements into machine friendly values. + +## Usage + +See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. + +## Copyright and license + +Copyright © 2015 Docker, Inc. + +go-units is licensed under the Apache License, Version 2.0. +See [LICENSE](LICENSE) for the full text of the license. diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml new file mode 100644 index 000000000..af9d60552 --- /dev/null +++ b/vendor/github.com/docker/go-units/circle.yml @@ -0,0 +1,11 @@ +dependencies: + post: + # install golint + - go get golang.org/x/lint/golint + +test: + pre: + # run analysis before tests + - go vet ./... + - test -z "$(golint ./... | tee /dev/stderr)" + - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go new file mode 100644 index 000000000..48dd8744d --- /dev/null +++ b/vendor/github.com/docker/go-units/duration.go @@ -0,0 +1,35 @@ +// Package units provides helper function to parse and print size and time units +// in human-readable format. +package units + +import ( + "fmt" + "time" +) + +// HumanDuration returns a human-readable approximation of a duration +// (eg. "About a minute", "4 hours ago", etc.). +func HumanDuration(d time.Duration) string { + if seconds := int(d.Seconds()); seconds < 1 { + return "Less than a second" + } else if seconds == 1 { + return "1 second" + } else if seconds < 60 { + return fmt.Sprintf("%d seconds", seconds) + } else if minutes := int(d.Minutes()); minutes == 1 { + return "About a minute" + } else if minutes < 60 { + return fmt.Sprintf("%d minutes", minutes) + } else if hours := int(d.Hours() + 0.5); hours == 1 { + return "About an hour" + } else if hours < 48 { + return fmt.Sprintf("%d hours", hours) + } else if hours < 24*7*2 { + return fmt.Sprintf("%d days", hours/24) + } else if hours < 24*30*2 { + return fmt.Sprintf("%d weeks", hours/24/7) + } else if hours < 24*365*2 { + return fmt.Sprintf("%d months", hours/24/30) + } + return fmt.Sprintf("%d years", int(d.Hours())/24/365) +} diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go new file mode 100644 index 000000000..c245a8951 --- /dev/null +++ b/vendor/github.com/docker/go-units/size.go @@ -0,0 +1,154 @@ +package units + +import ( + "fmt" + "strconv" + "strings" +) + +// See: http://en.wikipedia.org/wiki/Binary_prefix +const ( + // Decimal + + KB = 1000 + MB = 1000 * KB + GB = 1000 * MB + TB = 1000 * GB + PB = 1000 * TB + + // Binary + + KiB = 1024 + MiB = 1024 * KiB + GiB = 1024 * MiB + TiB = 1024 * GiB + PiB = 1024 * TiB +) + +type unitMap map[byte]int64 + +var ( + decimalMap = unitMap{'k': KB, 'm': MB, 'g': GB, 't': TB, 'p': PB} + binaryMap = unitMap{'k': KiB, 'm': MiB, 'g': GiB, 't': TiB, 'p': PiB} +) + +var ( + decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} + binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} +) + +func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { + i := 0 + unitsLimit := len(_map) - 1 + for size >= base && i < unitsLimit { + size = size / base + i++ + } + return size, _map[i] +} + +// CustomSize returns a human-readable approximation of a size +// using custom format. +func CustomSize(format string, size float64, base float64, _map []string) string { + size, unit := getSizeAndUnit(size, base, _map) + return fmt.Sprintf(format, size, unit) +} + +// HumanSizeWithPrecision allows the size to be in any precision, +// instead of 4 digit precision used in units.HumanSize. +func HumanSizeWithPrecision(size float64, precision int) string { + size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) + return fmt.Sprintf("%.*g%s", precision, size, unit) +} + +// HumanSize returns a human-readable approximation of a size +// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). +func HumanSize(size float64) string { + return HumanSizeWithPrecision(size, 4) +} + +// BytesSize returns a human-readable size in bytes, kibibytes, +// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). +func BytesSize(size float64) string { + return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) +} + +// FromHumanSize returns an integer from a human-readable specification of a +// size using SI standard (eg. "44kB", "17MB"). +func FromHumanSize(size string) (int64, error) { + return parseSize(size, decimalMap) +} + +// RAMInBytes parses a human-readable string representing an amount of RAM +// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and +// returns the number of bytes, or -1 if the string is unparseable. +// Units are case-insensitive, and the 'b' suffix is optional. +func RAMInBytes(size string) (int64, error) { + return parseSize(size, binaryMap) +} + +// Parses the human-readable size string into the amount it represents. +func parseSize(sizeStr string, uMap unitMap) (int64, error) { + // TODO: rewrite to use strings.Cut if there's a space + // once Go < 1.18 is deprecated. + sep := strings.LastIndexAny(sizeStr, "01234567890. ") + if sep == -1 { + // There should be at least a digit. + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + } + var num, sfx string + if sizeStr[sep] != ' ' { + num = sizeStr[:sep+1] + sfx = sizeStr[sep+1:] + } else { + // Omit the space separator. + num = sizeStr[:sep] + sfx = sizeStr[sep+1:] + } + + size, err := strconv.ParseFloat(num, 64) + if err != nil { + return -1, err + } + // Backward compatibility: reject negative sizes. + if size < 0 { + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + } + + if len(sfx) == 0 { + return int64(size), nil + } + + // Process the suffix. + + if len(sfx) > 3 { // Too long. + goto badSuffix + } + sfx = strings.ToLower(sfx) + // Trivial case: b suffix. + if sfx[0] == 'b' { + if len(sfx) > 1 { // no extra characters allowed after b. + goto badSuffix + } + return int64(size), nil + } + // A suffix from the map. + if mul, ok := uMap[sfx[0]]; ok { + size *= float64(mul) + } else { + goto badSuffix + } + + // The suffix may have extra "b" or "ib" (e.g. KiB or MB). + switch { + case len(sfx) == 2 && sfx[1] != 'b': + goto badSuffix + case len(sfx) == 3 && sfx[1:] != "ib": + goto badSuffix + } + + return int64(size), nil + +badSuffix: + return -1, fmt.Errorf("invalid suffix: '%s'", sfx) +} diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go new file mode 100644 index 000000000..fca0400cc --- /dev/null +++ b/vendor/github.com/docker/go-units/ulimit.go @@ -0,0 +1,123 @@ +package units + +import ( + "fmt" + "strconv" + "strings" +) + +// Ulimit is a human friendly version of Rlimit. +type Ulimit struct { + Name string + Hard int64 + Soft int64 +} + +// Rlimit specifies the resource limits, such as max open files. +type Rlimit struct { + Type int `json:"type,omitempty"` + Hard uint64 `json:"hard,omitempty"` + Soft uint64 `json:"soft,omitempty"` +} + +const ( + // magic numbers for making the syscall + // some of these are defined in the syscall package, but not all. + // Also since Windows client doesn't get access to the syscall package, need to + // define these here + rlimitAs = 9 + rlimitCore = 4 + rlimitCPU = 0 + rlimitData = 2 + rlimitFsize = 1 + rlimitLocks = 10 + rlimitMemlock = 8 + rlimitMsgqueue = 12 + rlimitNice = 13 + rlimitNofile = 7 + rlimitNproc = 6 + rlimitRss = 5 + rlimitRtprio = 14 + rlimitRttime = 15 + rlimitSigpending = 11 + rlimitStack = 3 +) + +var ulimitNameMapping = map[string]int{ + //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. + "core": rlimitCore, + "cpu": rlimitCPU, + "data": rlimitData, + "fsize": rlimitFsize, + "locks": rlimitLocks, + "memlock": rlimitMemlock, + "msgqueue": rlimitMsgqueue, + "nice": rlimitNice, + "nofile": rlimitNofile, + "nproc": rlimitNproc, + "rss": rlimitRss, + "rtprio": rlimitRtprio, + "rttime": rlimitRttime, + "sigpending": rlimitSigpending, + "stack": rlimitStack, +} + +// ParseUlimit parses and returns a Ulimit from the specified string. +func ParseUlimit(val string) (*Ulimit, error) { + parts := strings.SplitN(val, "=", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid ulimit argument: %s", val) + } + + if _, exists := ulimitNameMapping[parts[0]]; !exists { + return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) + } + + var ( + soft int64 + hard = &soft // default to soft in case no hard was set + temp int64 + err error + ) + switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { + case 2: + temp, err = strconv.ParseInt(limitVals[1], 10, 64) + if err != nil { + return nil, err + } + hard = &temp + fallthrough + case 1: + soft, err = strconv.ParseInt(limitVals[0], 10, 64) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) + } + + if *hard != -1 { + if soft == -1 { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: soft: -1 (unlimited), hard: %d", *hard) + } + if soft > *hard { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) + } + } + + return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil +} + +// GetRlimit returns the RLimit corresponding to Ulimit. +func (u *Ulimit) GetRlimit() (*Rlimit, error) { + t, exists := ulimitNameMapping[u.Name] + if !exists { + return nil, fmt.Errorf("invalid ulimit name %s", u.Name) + } + + return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil +} + +func (u *Ulimit) String() string { + return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) +} diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS new file mode 100644 index 000000000..3d97fc7a2 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of GoGo authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS file, which +# lists people. For example, employees are listed in CONTRIBUTORS, +# but not in AUTHORS, because the employer holds the copyright. + +# Names should be added to this file as one of +# Organization's name +# Individual's name +# Individual's name + +# Please keep the list sorted. + +Sendgrid, Inc +Vastech SA (PTY) LTD +Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS new file mode 100644 index 000000000..1b4f6c208 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/CONTRIBUTORS @@ -0,0 +1,23 @@ +Anton Povarov +Brian Goff +Clayton Coleman +Denis Smirnov +DongYun Kang +Dwayne Schultz +Georg Apitz +Gustav Paul +Johan Brandhorst +John Shahid +John Tuley +Laurent +Patrick Lee +Peter Edge +Roger Johansson +Sam Nguyen +Sergio Arbeo +Stephen J Day +Tamir Duberstein +Todd Eisenberger +Tormod Erevik Lea +Vyacheslav Kim +Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE new file mode 100644 index 000000000..f57de90da --- /dev/null +++ b/vendor/github.com/gogo/protobuf/LICENSE @@ -0,0 +1,35 @@ +Copyright (c) 2013, The GoGo Authors. All rights reserved. + +Protocol Buffers for Go with Gadgets + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile new file mode 100644 index 000000000..00d65f327 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C test_proto + make -C proto3_proto + make diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go new file mode 100644 index 000000000..a26b046d9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/clone.go @@ -0,0 +1,258 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "fmt" + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(src Message) Message { + in := reflect.ValueOf(src) + if in.IsNil() { + return src + } + out := reflect.New(in.Type().Elem()) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) + } + if in.IsNil() { + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { + emOut := out.Addr().Interface().(extensionsBytes) + bIn := emIn.GetExtensions() + bOut := emOut.GetExtensions() + *bOut = append(*bOut, *bIn...) + } else if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go new file mode 100644 index 000000000..24552483c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go @@ -0,0 +1,39 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "reflect" + +type custom interface { + Marshal() ([]byte, error) + Unmarshal(data []byte) error + Size() int +} + +var customType = reflect.TypeOf((*custom)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go new file mode 100644 index 000000000..63b0f08be --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -0,0 +1,427 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. +func (p *Buffer) DecodeGroup(pb Message) error { + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF + } + err := Unmarshal(b[:x], pb) + p.index += y + return err +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go new file mode 100644 index 000000000..35b882c09 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/deprecated.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "errors" + +// Deprecated: do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go new file mode 100644 index 000000000..fe1bd7d90 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/discard.go @@ -0,0 +1,350 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +// +// For proto2 messages, the unknown fields of message extensions are only +// discarded from messages that have been accessed via GetExtension. +func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. + discardLegacy(m) +} + +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + default: // E.g., *pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + +func discardLegacy(m Message) { + v := reflect.ValueOf(m) + if v.Kind() != reflect.Ptr || v.IsNil() { + return + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return + } + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + vf := v.Field(i) + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) + case isSlice: // E.g., []*pb.T + for j := 0; j < vf.Len(); j++ { + discardLegacy(vf.Index(j).Interface().(Message)) + } + default: // E.g., *pb.T + discardLegacy(vf.Interface().(Message)) + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) + default: // E.g., map[K]V + tv := vf.Type().Elem() + if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) + for _, key := range vf.MapKeys() { + val := vf.MapIndex(key) + discardLegacy(val.Interface().(Message)) + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) + default: // E.g., test_proto.isCommunique_Union interface + if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { + vf = vf.Elem() // E.g., *test_proto.Communique_Msg + if !vf.IsNil() { + vf = vf.Elem() // E.g., test_proto.Communique_Msg + vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value + if vf.Kind() == reflect.Ptr { + discardLegacy(vf.Interface().(Message)) + } + } + } + } + } + } + + if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { + if vf.Type() != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + vf.Set(reflect.ValueOf([]byte(nil))) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(m); err == nil { + // Ignore lock since discardLegacy is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + discardLegacy(m) + } + } + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go new file mode 100644 index 000000000..93464c91c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func durationFromProto(p *duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func durationProto(d time.Duration) *duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go new file mode 100644 index 000000000..e748e1730 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go @@ -0,0 +1,49 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() + +type duration struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *duration) Reset() { *m = duration{} } +func (*duration) ProtoMessage() {} +func (*duration) String() string { return "duration" } + +func init() { + RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go new file mode 100644 index 000000000..9581ccd30 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -0,0 +1,205 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "reflect" +) + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + siz := Size(pb) + sizVar := SizeVarint(uint64(siz)) + p.grow(siz + sizVar) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go new file mode 100644 index 000000000..0f5fb173e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -0,0 +1,33 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +func NewRequiredNotSetError(field string) *RequiredNotSetError { + return &RequiredNotSetError{field} +} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go new file mode 100644 index 000000000..d4db5a1c1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/equal.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + return bytes.Equal(u1, u2) +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + return false + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go new file mode 100644 index 000000000..341c6f57f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -0,0 +1,605 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "io" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil + case extensionsBytes: + return slowExtensionAdapter{p}, nil + } + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + if ebase, ok := base.(extensionsBytes); ok { + clearExtension(base, id) + ext := ebase.GetExtensions() + *ext = append(*ext, b...) + return + } + epb, err := extendable(base) + if err != nil { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if ea, ok := pbi.(slowExtensionAdapter); ok { + pbi = ea.extensionsBytes + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + buf := *ext + o := 0 + for o < len(buf) { + tag, n := DecodeVarint(buf[o:]) + fieldNum := int32(tag >> 3) + if int32(fieldNum) == extension.Field { + return true + } + wireType := int(tag & 0x7) + o += n + l, err := size(buf[o:], wireType) + if err != nil { + return false + } + o += l + } + return false + } + // TODO: Check types, field numbers, etc.? + epb, err := extendable(pb) + if err != nil { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok := extmap[extension.Field] + mu.Unlock() + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + clearExtension(pb, extension.Field) +} + +func clearExtension(pb Message, fieldNum int32) { + if epb, ok := pb.(extensionsBytes); ok { + offset := 0 + for offset != -1 { + offset = deleteExtension(epb, fieldNum, offset) + } + return + } + epb, err := extendable(pb) + if err != nil { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, fieldNum) +} + +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + return decodeExtensionFromBytes(extension, *ext) + } + + epb, err := extendable(pb) + if err != nil { + return nil, err + } + + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if cerr := checkExtensionTypes(epb, extension); cerr != nil { + return nil, cerr + } + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + unmarshal := typeUnmarshaler(t, extension.Tag) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate space to store the pointer/slice. + value := reflect.New(t).Elem() + + var err error + for { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + wire := int(x) & 7 + + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { + return nil, err + } + + if len(b) == 0 { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + if epb, ok := pb.(extensionsBytes); ok { + ClearExtension(pb, extension) + newb, err := encodeExtension(extension, value) + if err != nil { + return err + } + bb := epb.GetExtensions() + *bb = append(*bb, newb...) + return nil + } + epb, err := extendable(pb) + if err != nil { + return err + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + *ext = []byte{} + return + } + epb, err := extendable(pb) + if err != nil { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go new file mode 100644 index 000000000..6f1ae120e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -0,0 +1,389 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strings" + "sync" +) + +type extensionsBytes interface { + Message + ExtensionRangeArray() []ExtensionRange + GetExtensions() *[]byte +} + +type slowExtensionAdapter struct { + extensionsBytes +} + +func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { + panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") +} + +func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + b := s.GetExtensions() + m, err := BytesToExtensionsMap(*b) + if err != nil { + panic(err) + } + return m, notLocker{} +} + +func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { + if reflect.ValueOf(pb).IsNil() { + return ifnotset + } + value, err := GetExtension(pb, extension) + if err != nil { + return ifnotset + } + if value == nil { + return ifnotset + } + if value.(*bool) == nil { + return ifnotset + } + return *(value.(*bool)) +} + +func (this *Extension) Equal(that *Extension) bool { + if err := this.Encode(); err != nil { + return false + } + if err := that.Encode(); err != nil { + return false + } + return bytes.Equal(this.enc, that.enc) +} + +func (this *Extension) Compare(that *Extension) int { + if err := this.Encode(); err != nil { + return 1 + } + if err := that.Encode(); err != nil { + return -1 + } + return bytes.Compare(this.enc, that.enc) +} + +func SizeOfInternalExtension(m extendableProto) (n int) { + info := getMarshalInfo(reflect.TypeOf(m)) + return info.sizeV1Extensions(m.extensionsWrite()) +} + +type sortableMapElem struct { + field int32 + ext Extension +} + +func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { + s := make(sortableExtensions, 0, len(m)) + for k, v := range m { + s = append(s, &sortableMapElem{field: k, ext: v}) + } + return s +} + +type sortableExtensions []*sortableMapElem + +func (this sortableExtensions) Len() int { return len(this) } + +func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } + +func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } + +func (this sortableExtensions) String() string { + sort.Sort(this) + ss := make([]string, len(this)) + for i := range this { + ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) + } + return "map[" + strings.Join(ss, ",") + "]" +} + +func StringFromInternalExtension(m extendableProto) string { + return StringFromExtensionsMap(m.extensionsWrite()) +} + +func StringFromExtensionsMap(m map[int32]Extension) string { + return newSortableExtensionsFromMap(m).String() +} + +func StringFromExtensionsBytes(ext []byte) string { + m, err := BytesToExtensionsMap(ext) + if err != nil { + panic(err) + } + return StringFromExtensionsMap(m) +} + +func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMap(m.extensionsWrite(), data) +} + +func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMapBackwards(m.extensionsWrite(), data) +} + +func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { + o := 0 + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[o:], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + o += n + } + return o, nil +} + +func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) { + o := 0 + end := len(data) + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[end-len(e.enc):], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + end -= n + o += n + } + return o, nil +} + +func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { + e := m[id] + if err := e.Encode(); err != nil { + return nil, err + } + return e.enc, nil +} + +func size(buf []byte, wire int) (int, error) { + switch wire { + case WireVarint: + _, n := DecodeVarint(buf) + return n, nil + case WireFixed64: + return 8, nil + case WireBytes: + v, n := DecodeVarint(buf) + return int(v) + n, nil + case WireFixed32: + return 4, nil + case WireStartGroup: + offset := 0 + for { + u, n := DecodeVarint(buf[offset:]) + fwire := int(u & 0x7) + offset += n + if fwire == WireEndGroup { + return offset, nil + } + s, err := size(buf[offset:], wire) + if err != nil { + return 0, err + } + offset += s + } + } + return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) +} + +func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { + m := make(map[int32]Extension) + i := 0 + for i < len(buf) { + tag, n := DecodeVarint(buf[i:]) + if n <= 0 { + return nil, fmt.Errorf("unable to decode varint") + } + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size(buf[i+n:], wireType) + if err != nil { + return nil, err + } + end := i + int(l) + n + m[int32(fieldNum)] = Extension{enc: buf[i:end]} + i = end + } + return m, nil +} + +func NewExtension(e []byte) Extension { + ee := Extension{enc: make([]byte, len(e))} + copy(ee.enc, e) + return ee +} + +func AppendExtension(e Message, tag int32, buf []byte) { + if ee, eok := e.(extensionsBytes); eok { + ext := ee.GetExtensions() + *ext = append(*ext, buf...) + return + } + if ee, eok := e.(extendableProto); eok { + m := ee.extensionsWrite() + ext := m[int32(tag)] // may be missing + ext.enc = append(ext.enc, buf...) + m[int32(tag)] = ext + } +} + +func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { + u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) + ei := u.getExtElemInfo(extension) + v := value + p := toAddrPointer(&v, ei.isptr) + siz := ei.sizer(p, SizeVarint(ei.wiretag)) + buf := make([]byte, 0, siz) + return ei.marshaler(buf, p, ei.wiretag, false) +} + +func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { + o := 0 + for o < len(buf) { + tag, n := DecodeVarint((buf)[o:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + if o+n > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + l, err := size((buf)[o+n:], wireType) + if err != nil { + return nil, err + } + if int32(fieldNum) == extension.Field { + if o+n+l > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + v, err := decodeExtension((buf)[o:o+n+l], extension) + if err != nil { + return nil, err + } + return v, nil + } + o += n + l + } + return defaultExtensionValue(extension) +} + +func (this *Extension) Encode() error { + if this.enc == nil { + var err error + this.enc, err = encodeExtension(this.desc, this.value) + if err != nil { + return err + } + } + return nil +} + +func (this Extension) GoString() string { + if err := this.Encode(); err != nil { + return fmt.Sprintf("error encoding extension: %v", err) + } + return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) +} + +func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return errors.New("proto: bad extension number; not in declared ranges") + } + return SetExtension(pb, desc, value) +} + +func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return nil, fmt.Errorf("unregistered field number %d", fieldNum) + } + return GetExtension(pb, desc) +} + +func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { + x := &XXX_InternalExtensions{ + p: new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }), + } + x.p.extensionMap = m + return *x +} + +func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { + pb := extendable.(extendableProto) + return pb.extensionsWrite() +} + +func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { + ext := pb.GetExtensions() + for offset < len(*ext) { + tag, n1 := DecodeVarint((*ext)[offset:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + n2, err := size((*ext)[offset+n1:], wireType) + if err != nil { + panic(err) + } + newOffset := offset + n1 + n2 + if fieldNum == theFieldNum { + *ext = append((*ext)[:offset], (*ext)[newOffset:]...) + return offset + } + offset = newOffset + } + return -1 +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go new file mode 100644 index 000000000..80db1c155 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -0,0 +1,973 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/gogo/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/gogo/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. +type RequiredNotSetError struct{ field string } + +func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } + return fmt.Sprintf("proto: required field %q not set", e.field) +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +type invalidUTF8Error struct{ field string } + +func (e *invalidUTF8Error) Error() string { + if e.field == "" { + return "proto: invalid UTF-8 detected" + } + return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) +} +func (e *invalidUTF8Error) InvalidUTF8() bool { + return true +} + +// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. +// This error should not be exposed to the external API as such errors should +// be recreated with the field information. +var errInvalidUTF8 = &invalidUTF8Error{} + +// isNonFatal reports whether the error is either a RequiredNotSet error +// or a InvalidUTF8 error. +func isNonFatal(err error) bool { + if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { + return true + } + if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { + return true + } + return false +} + +type nonFatal struct{ E error } + +// Merge merges err into nf and reports whether it was successful. +// Otherwise it returns false for any fatal non-nil errors. +func (nf *nonFatal) Merge(err error) (ok bool) { + if err == nil { + return true // not an error + } + if !isNonFatal(err) { + return false // fatal error + } + if nf.E == nil { + nf.E = err // store first instance of non-fatal error + } + return true +} + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + deterministic bool +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + sindex := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = sindex +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or T or []*T or []T + switch f.Kind() { + case reflect.Struct: + setDefaults(f, recur, zeros) + + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.Kind() == reflect.Ptr && e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Struct: + nestedMessage = true // non-nullable + + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr, reflect.Struct: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// mapKeys returns a sort.Interface to be used for sorting the map keys. +// Map fields may have key types of non-float scalars, strings and enums. +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{vs: vs} + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +const ( + // ProtoPackageIsVersion3 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + GoGoProtoPackageIsVersion3 = true + + // ProtoPackageIsVersion2 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + GoGoProtoPackageIsVersion2 = true + + // ProtoPackageIsVersion1 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + GoGoProtoPackageIsVersion1 = true +) + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go new file mode 100644 index 000000000..b3aa39190 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go @@ -0,0 +1,50 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "encoding/json" + "strconv" +) + +type Sizer interface { + Size() int +} + +type ProtoSizer interface { + ProtoSize() int +} + +func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { + s, ok := m[value] + if !ok { + s = strconv.Itoa(int(value)) + } + return json.Marshal(s) +} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go new file mode 100644 index 000000000..f48a75676 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/message_set.go @@ -0,0 +1,181 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "errors" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + return ms.find(pb) != nil +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func unmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go new file mode 100644 index 000000000..b6cad9083 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go @@ -0,0 +1,357 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" + "sync" +) + +const unsafeAllowed = false + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value +} + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + return pointer{v: u} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} +} + +func (p pointer) isNil() bool { + return p.v.IsNil() +} + +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) +} + +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) +} +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) +} +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) +} + +var int32ptr = reflect.TypeOf((*int32)(nil)) + +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) +} + +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) +} + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) +} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s +} + +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) +} +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) +} + +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) +} +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) +} +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) +} +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) +} +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) +} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) +} +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) +} +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) +} +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) +} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) +} +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) +} +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) +} +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) +} +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) +} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) +} +func (p pointer) toString() *string { + return p.v.Interface().(*string) +} +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) +} +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) +} +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) +} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) +} + +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s +} + +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) + return + } + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} + } + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct +} + +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go new file mode 100644 index 000000000..7ffd3c29d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go @@ -0,0 +1,59 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" +) + +// TODO: untested, so probably incorrect. + +func (p pointer) getRef() pointer { + return pointer{v: p.v.Addr()} +} + +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) +} + +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go new file mode 100644 index 000000000..d55a335d9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,308 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +const unsafeAllowed = true + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != invalidField +} + +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer +} + +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} +} + +func (p pointer) isNil() bool { + return p.p == nil +} + +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) +} +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) +} +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) +} +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) +} + +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) +} +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v +} + +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) +} + +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v +} + +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) +} + +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) +} +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) +} +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) +} +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) +} +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) +} +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) +} +func (p pointer) toBool() *bool { + return (*bool)(p.p) +} +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) +} +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) +} + +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) +} + +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} + +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p +} + +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} + +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go new file mode 100644 index 000000000..aca8eed02 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -0,0 +1,56 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +func (p pointer) getRef() pointer { + return pointer{p: (unsafe.Pointer)(&p.p)} +} + +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) +} + +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice +} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go new file mode 100644 index 000000000..28da1475f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -0,0 +1,610 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + CustomType string + CastType string + StdTime bool + StdDuration bool + WktPointer bool + + stype reflect.Type // set for struct types only + ctype reflect.Type // set for custom types only + sprop *StructProperties // set for struct types only + + mtype reflect.Type // set for map types only + MapKeyProp *Properties // set for map types only + MapValProp *Properties // set for map types only +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s += "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + log.Printf("proto: tag has too few fields: %q", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + case "fixed32": + p.WireType = WireFixed32 + case "fixed64": + p.WireType = WireFixed64 + case "zigzag32": + p.WireType = WireVarint + case "zigzag64": + p.WireType = WireVarint + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + log.Printf("proto: tag has unknown wire type: %q", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + +outer: + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break outer + } + case strings.HasPrefix(f, "embedded="): + p.OrigName = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "customtype="): + p.CustomType = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "casttype="): + p.CastType = strings.Split(f, "=")[1] + case f == "stdtime": + p.StdTime = true + case f == "stdduration": + p.StdDuration = true + case f == "wktptr": + p.WktPointer = true + } + } +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + isMap := typ.Kind() == reflect.Map + if len(p.CustomType) > 0 && !isMap { + p.ctype = typ + p.setTag(lockGetProp) + return + } + if p.StdTime && !isMap { + p.setTag(lockGetProp) + return + } + if p.StdDuration && !isMap { + p.setTag(lockGetProp) + return + } + if p.WktPointer && !isMap { + p.setTag(lockGetProp) + return + } + switch t1 := typ; t1.Kind() { + case reflect.Struct: + p.stype = typ + case reflect.Ptr: + if t1.Elem().Kind() == reflect.Struct { + p.stype = t1.Elem() + } + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + case reflect.Struct: + p.stype = t3 + } + case reflect.Struct: + p.stype = t2 + } + + case reflect.Map: + + p.mtype = t1 + p.MapKeyProp = &Properties{} + p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.MapValProp = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + + p.MapValProp.CustomType = p.CustomType + p.MapValProp.StdDuration = p.StdDuration + p.MapValProp.StdTime = p.StdTime + p.MapValProp.WktPointer = p.WktPointer + p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + p.setTag(lockGetProp) +} + +func (p *Properties) setTag(lockGetProp bool) { + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() +) + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if tag == "" { + return + } + p.Parse(tag) + p.setFieldProps(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +type ( + oneofFuncsIface interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + oneofWrappersIface interface { + XXX_OneofWrappers() []interface{} + } +) + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + return prop + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + isOneofMessage := false + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + isOneofMessage = true + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + if isOneofMessage { + var oots []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oots = m.XXX_OneofFuncs() + case oneofWrappersIface: + oots = m.XXX_OneofWrappers() + } + if len(oots) > 0 { + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) +var enumStringMaps = make(map[string]map[int32]string) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap + if _, ok := enumStringMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumStringMaps[typeName] = unusedNameMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypedNils[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go new file mode 100644 index 000000000..40ea3dd93 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -0,0 +1,36 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() +var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go new file mode 100644 index 000000000..5a5fd93f7 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go @@ -0,0 +1,119 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "io" +) + +func Skip(data []byte) (n int, err error) { + l := len(data) + index := 0 + for index < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + index++ + if data[index-1] < 0x80 { + break + } + } + return index, nil + case 1: + index += 8 + return index, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + index += length + return index, nil + case 3: + for { + var innerWire uint64 + var start int = index + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := Skip(data[start:]) + if err != nil { + return 0, err + } + index = start + next + } + return index, nil + case 4: + return index, nil + case 5: + index += 4 + return index, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go new file mode 100644 index 000000000..f8babdefa --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go @@ -0,0 +1,3009 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements + + hassizer bool // has custom sizer + hasprotosizer bool // has custom protosizer + + bytesExtensions field // offset of XXX_extensions where the field type is []byte +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex + + uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind() +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + // Uses the message's Size method if available + if u.hassizer { + s := ptr.asPointerTo(u.typ).Interface().(Sizer) + return s.Size() + } + // Uses the message's ProtoSize method if available + if u.hasprotosizer { + s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) + return s.ProtoSize() + } + + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + n += len(s) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errLater error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + b = append(b, s...) + } + for _, f := range u.fields { + if f.required { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name} + } + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errLater +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.bytesExtensions = invalidField + u.sizecache = invalidField + isOneofMessage := false + + if reflect.PtrTo(t).Implements(sizerType) { + u.hassizer = true + } + if reflect.PtrTo(t).Implements(protosizerType) { + u.hasprotosizer = true + } + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Tag.Get("protobuf_oneof") != "" { + isOneofMessage = true + } + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + if f.Type.Kind() == reflect.Map { + u.v1extensions = toField(&f) + } else { + u.bytesExtensions = toField(&f) + } + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // get oneof implementers + var oneofImplementers []interface{} + // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler + if isOneofMessage { + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(t, tags, false, false) + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + isptr: t.Kind() == reflect.Ptr, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + } + } +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + ctype := false + isTime := false + isDuration := false + isWktPointer := false + validateUTF8 := true + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + if strings.HasPrefix(tags[i], "customtype=") { + ctype = true + } + if tags[i] == "stdtime" { + isTime = true + } + if tags[i] == "stdduration" { + isDuration = true + } + if tags[i] == "wktptr" { + isWktPointer = true + } + } + validateUTF8 = validateUTF8 && proto3 + if !proto3 && !pointer && !slice { + nozero = false + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + if pointer { + return makeCustomPtrMarshaler(getMarshalInfo(t)) + } + return makeCustomMarshaler(getMarshalInfo(t)) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeTimePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeTimePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeTimeSliceMarshaler(getMarshalInfo(t)) + } + return makeTimeMarshaler(getMarshalInfo(t)) + } + + if isDuration { + if pointer { + if slice { + return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationPtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeDurationSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationMarshaler(getMarshalInfo(t)) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValueMarshaler(getMarshalInfo(t)) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdFloatValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValueMarshaler(getMarshalInfo(t)) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBoolValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValueMarshaler(getMarshalInfo(t)) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdStringValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValueMarshaler(getMarshalInfo(t)) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBytesValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValueMarshaler(getMarshalInfo(t)) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if validateUTF8 { + if pointer { + return sizeStringPtr, appendUTF8StringPtr + } + if slice { + return sizeStringSlice, appendUTF8StringSlice + } + if nozero { + return sizeStringValueNoZero, appendUTF8StringValueNoZero + } + return sizeStringValue, appendUTF8StringValue + } + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if pointer { + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } else { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageRefMarshaler(getMarshalInfo(t)) + } + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + tags := strings.Split(f.Tag.Get("protobuf"), ",") + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + stdOptions := false + for _, t := range tags { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "stdduration" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + + // If value is a message with nested maps, calling + // valSizer in marshal may be quadratic. We should use + // cached version in marshal (but not in size). + // If value is not message type, we don't have size cache, + // but it cannot be nested either. Just use valSizer. + valCachedSizer := valSizer + if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct { + u := getMarshalInfo(valType.Elem()) + valCachedSizer = func(ptr pointer, tagsize int) int { + // Same as message sizer, but use cache. + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.cachedsize(p) + return siz + SizeVarint(uint64(siz)) + tagsize + } + } + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + + var nerr nonFatal + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if !nerr.Merge(err) { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != ErrNil && !nerr.Merge(err) { // allow nil value in map + return b, err + } + } + return b, nerr.E + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if !nerr.Merge(err) { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + var nerr nonFatal + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if p.deterministic { + if _, ok := pb.(Marshaler); ok { + return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb) + } + } + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + pp := p.buf[len(p.buf) : len(p.buf) : len(p.buf)+siz] + pp, err = m.XXX_Marshal(pp, p.deterministic) + p.buf = append(p.buf, pp...) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + var b []byte + b, err = m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go new file mode 100644 index 000000000..997f57c1e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go @@ -0,0 +1,388 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +// makeMessageRefMarshaler differs a bit from makeMessageMarshaler +// It marshal a message T instead of a *T +func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + siz := u.size(ptr) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + b = appendVarint(b, wiretag) + siz := u.cachedsize(ptr) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, ptr, deterministic) + } +} + +// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler +// It marshals a slice of messages []T instead of []*T +func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + var err, errreq error + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + b = appendVarint(b, wiretag) + siz := u.size(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + + return b, errreq + } +} + +func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go new file mode 100644 index 000000000..60dcf70d1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_merge.go @@ -0,0 +1,676 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case isSlice && !isPointer: // E.g. []pb.T + mergeInfo := getMergeInfo(tf) + zero := reflect.Zero(tf) + mfi.merge = func(dst, src pointer) { + // TODO: Make this faster? + dstsp := dst.asPointerTo(f.Type) + dsts := dstsp.Elem() + srcs := src.asPointerTo(f.Type).Elem() + for i := 0; i < srcs.Len(); i++ { + dsts = reflect.Append(dsts, zero) + srcElement := srcs.Index(i).Addr() + dstElement := dsts.Index(dsts.Len() - 1).Addr() + mergeInfo.merge(valToPointer(dstElement), valToPointer(srcElement)) + } + if dsts.IsNil() { + dsts = reflect.MakeSlice(f.Type, 0, 0) + } + dstsp.Elem().Set(dsts) + } + case !isPointer: + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + mergeInfo.merge(dst, src) + } + case isSlice: // E.g., []*pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mergeInfo.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mergeInfo.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go new file mode 100644 index 000000000..937229386 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go @@ -0,0 +1,2249 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + if errLater == nil { + errLater = r + } + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + if u.bytesExtensions.IsValid() { + z = m.offset(u.bytesExtensions).toBytes() + break + } + panic("no extensions field available") + } + } + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if reqMask != u.reqMask && errLater == nil { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + errLater = &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return errLater +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + u.bytesExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { + u.oldExtensions = toField(&f) + continue + } else if f.Type == reflect.TypeOf(([]byte)(nil)) { + u.bytesExtensions = toField(&f) + continue + } + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask, name) + } + + // Find any types associated with oneof fields. + // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler + if len(oneofFields) > 0 { + var oneofImplementers []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + for _, v := range oneofImplementers { + tptr := reflect.TypeOf(v) // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + + } + } + + // Get extension ranges, if any. + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0, "") + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + ctype := false + isTime := false + isDuration := false + isWktPointer := false + proto3 := false + validateUTF8 := true + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + if tag == "proto3" { + proto3 = true + } + if strings.HasPrefix(tag, "customtype=") { + ctype = true + } + if tag == "stdtime" { + isTime = true + } + if tag == "stdduration" { + isDuration = true + } + if tag == "wktptr" { + isWktPointer = true + } + } + validateUTF8 = validateUTF8 && proto3 + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) + } + if pointer { + return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalCustom(getUnmarshalInfo(t), name) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTime(getUnmarshalInfo(t), name) + } + + if isDuration { + if pointer { + if slice { + return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDuration(getUnmarshalInfo(t), name) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if validateUTF8 { + if pointer { + return unmarshalUTF8StringPtr + } + if slice { + return unmarshalUTF8StringSlice + } + return unmarshalUTF8StringValue + } + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessage(getUnmarshalInfo(t), name) + } + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + tagArray := strings.Split(f.Tag.Get("protobuf"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + for _, t := range tagArray { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + } + if t == "stdduration" { + valTags = append(valTags, t) + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + var nerr nonFatal + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if nerr.Merge(err) { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nerr.E + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + var nerr nonFatal + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if !nerr.Merge(err) { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nerr.E + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) == 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go new file mode 100644 index 000000000..00d6c7ad9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go @@ -0,0 +1,385 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f // gogo: changed from v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.New(sub.typ)) + m := s.Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := reflect.New(sub.typ) + c := m.Interface().(custom) + if err := c.Unmarshal(b[:x]); err != nil { + return nil, err + } + v := valToPointer(m) + f.appendRef(v, sub.typ) + return b[x:], nil + } +} + +func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + m := f.asPointerTo(sub.typ).Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&d)) + return b[x:], nil + } +} + +func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(d)) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&d)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(d)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go new file mode 100644 index 000000000..87416afe9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -0,0 +1,930 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if name == "XXX_NoUnkeyedLiteral" { + continue + } + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, v, props); err != nil { + return err + } + } else if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.MapValProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, fv, props); err != nil { + return err + } + } else if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv + if pv.CanAddr() { + pv = sv.Addr() + } else { + pv = reflect.New(sv.Type()) + pv.Elem().Set(sv) + } + if _, err := extendable(pv.Interface()); err == nil { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + if props != nil { + if len(props.CustomType) > 0 { + custom, ok := v.Interface().(Marshaler) + if ok { + data, err := custom.Marshal() + if err != nil { + return err + } + if err := writeString(w, string(data)); err != nil { + return err + } + return nil + } + } else if len(props.CastType) > 0 { + if _, ok := v.Interface().(interface { + String() string + }); ok { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + _, err := fmt.Fprintf(w, "%d", v.Interface()) + return err + } + } + } else if props.StdTime { + t, ok := v.Interface().(time.Time) + if !ok { + return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) + } + tproto, err := timestampProto(t) + if err != nil { + return err + } + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdTime = false + err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) + return err + } else if props.StdDuration { + d, ok := v.Interface().(time.Duration) + if !ok { + return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) + } + dproto := durationProto(d) + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdDuration = false + err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) + return err + } + } + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } + if v.Type().Implements(textMarshalerType) { + text, err := v.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, ferr := fmt.Fprintf(w, "/* %v */\n", err) + return ferr + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, werr := w.Write(endBraceNewline); werr != nil { + return werr + } + continue + } + if _, ferr := fmt.Fprint(w, tag); ferr != nil { + return ferr + } + if wire != WireStartGroup { + if err = w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err = w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + e := pv.Interface().(Message) + + var m map[int32]Extension + var mu sync.Locker + if em, ok := e.(extensionsBytes); ok { + eb := em.GetExtensions() + var err error + m, err = BytesToExtensionsMap(*eb) + if err != nil { + return err + } + mu = notLocker{} + } else if _, ok := e.(extendableProto); ok { + ep, _ := extendable(e) + m, mu = ep.extensionsRead() + if m == nil { + return nil + } + } + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(e, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go new file mode 100644 index 000000000..1d6c6aa0e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_gogo.go @@ -0,0 +1,57 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" +) + +func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { + m, ok := enumStringMaps[props.Enum] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + key := int32(0) + if v.Kind() == reflect.Ptr { + key = int32(v.Elem().Int()) + } else { + key = int32(v.Int()) + } + s, ok := m[key] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + _, err := fmt.Fprint(w, s) + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go new file mode 100644 index 000000000..f85c0cc81 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -0,0 +1,1018 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(rune(i)), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.MapKeyProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.MapValProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + if len(props.CustomType) > 0 { + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + tc := reflect.TypeOf(new(Marshaler)) + ok := t.Elem().Implements(tc.Elem()) + if ok { + fv := v + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.ValueOf(custom)) + } else { + custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.Indirect(reflect.ValueOf(custom))) + } + return nil + } + if props.StdTime { + fv := v + p.back() + props.StdTime = false + tproto := ×tamp{} + err := p.readAny(reflect.ValueOf(tproto).Elem(), props) + props.StdTime = true + if err != nil { + return err + } + tim, err := timestampFromProto(tproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ts := fv.Interface().([]*time.Time) + ts = append(ts, &tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } else { + ts := fv.Interface().([]time.Time) + ts = append(ts, tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&tim)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&tim))) + } + return nil + } + if props.StdDuration { + fv := v + p.back() + props.StdDuration = false + dproto := &duration{} + err := p.readAny(reflect.ValueOf(dproto).Elem(), props) + props.StdDuration = true + if err != nil { + return err + } + dur, err := durationFromProto(dproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ds := fv.Interface().([]*time.Duration) + ds = append(ds, &dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } else { + ds := fv.Interface().([]time.Duration) + ds = append(ds, dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&dur)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&dur))) + } + return nil + } + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + ntok := p.next() + if ntok.err != nil { + return ntok.err + } + if ntok.value == "]" { + break + } + if ntok.value != "," { + return p.errorf("Expected ']' or ',' found %q", ntok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int8: + if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil { + fv.SetInt(x) + return nil + } + case reflect.Int16: + if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil { + fv.SetInt(x) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint8: + if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint16: + if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + return um.UnmarshalText([]byte(s)) + } + pb.Reset() + v := reflect.ValueOf(pb) + return newTextParser(s).readStruct(v.Elem(), "") +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go new file mode 100644 index 000000000..9324f6542 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp.go @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func timestampFromProto(ts *timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func timestampProto(t time.Time) (*timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := ×tamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go new file mode 100644 index 000000000..38439fa99 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go @@ -0,0 +1,49 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() + +type timestamp struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *timestamp) Reset() { *m = timestamp{} } +func (*timestamp) ProtoMessage() {} +func (*timestamp) String() string { return "timestamp" } + +func init() { + RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") +} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go new file mode 100644 index 000000000..b175d1b64 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers.go @@ -0,0 +1,1888 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go new file mode 100644 index 000000000..c1cf7bf85 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go @@ -0,0 +1,113 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +type float64Value struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float64Value) Reset() { *m = float64Value{} } +func (*float64Value) ProtoMessage() {} +func (*float64Value) String() string { return "float64" } + +type float32Value struct { + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float32Value) Reset() { *m = float32Value{} } +func (*float32Value) ProtoMessage() {} +func (*float32Value) String() string { return "float32" } + +type int64Value struct { + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int64Value) Reset() { *m = int64Value{} } +func (*int64Value) ProtoMessage() {} +func (*int64Value) String() string { return "int64" } + +type uint64Value struct { + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint64Value) Reset() { *m = uint64Value{} } +func (*uint64Value) ProtoMessage() {} +func (*uint64Value) String() string { return "uint64" } + +type int32Value struct { + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int32Value) Reset() { *m = int32Value{} } +func (*int32Value) ProtoMessage() {} +func (*int32Value) String() string { return "int32" } + +type uint32Value struct { + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint32Value) Reset() { *m = uint32Value{} } +func (*uint32Value) ProtoMessage() {} +func (*uint32Value) String() string { return "uint32" } + +type boolValue struct { + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *boolValue) Reset() { *m = boolValue{} } +func (*boolValue) ProtoMessage() {} +func (*boolValue) String() string { return "bool" } + +type stringValue struct { + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *stringValue) Reset() { *m = stringValue{} } +func (*stringValue) ProtoMessage() {} +func (*stringValue) String() string { return "string" } + +type bytesValue struct { + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *bytesValue) Reset() { *m = bytesValue{} } +func (*bytesValue) ProtoMessage() {} +func (*bytesValue) String() string { return "[]byte" } + +func init() { + RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue") + RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue") + RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value") + RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value") + RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value") + RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value") + RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue") + RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue") + RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue") +} diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS similarity index 100% rename from vendor/golang.org/x/sync/AUTHORS rename to vendor/github.com/golang/protobuf/AUTHORS diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS similarity index 100% rename from vendor/golang.org/x/sync/CONTRIBUTORS rename to vendor/github.com/golang/protobuf/CONTRIBUTORS diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE new file mode 100644 index 000000000..0f646931a --- /dev/null +++ b/vendor/github.com/golang/protobuf/LICENSE @@ -0,0 +1,28 @@ +Copyright 2010 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go new file mode 100644 index 000000000..e810e6fea --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/buffer.go @@ -0,0 +1,324 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "errors" + "fmt" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + WireVarint = 0 + WireFixed32 = 5 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 +) + +// EncodeVarint returns the varint encoded bytes of v. +func EncodeVarint(v uint64) []byte { + return protowire.AppendVarint(nil, v) +} + +// SizeVarint returns the length of the varint encoded bytes of v. +// This is equal to len(EncodeVarint(v)). +func SizeVarint(v uint64) int { + return protowire.SizeVarint(v) +} + +// DecodeVarint parses a varint encoded integer from b, +// returning the integer value and the length of the varint. +// It returns (0, 0) if there is a parse error. +func DecodeVarint(b []byte) (uint64, int) { + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, 0 + } + return v, n +} + +// Buffer is a buffer for encoding and decoding the protobuf wire format. +// It may be reused between invocations to reduce memory usage. +type Buffer struct { + buf []byte + idx int + deterministic bool +} + +// NewBuffer allocates a new Buffer initialized with buf, +// where the contents of buf are considered the unread portion of the buffer. +func NewBuffer(buf []byte) *Buffer { + return &Buffer{buf: buf} +} + +// SetDeterministic specifies whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (b *Buffer) SetDeterministic(deterministic bool) { + b.deterministic = deterministic +} + +// SetBuf sets buf as the internal buffer, +// where the contents of buf are considered the unread portion of the buffer. +func (b *Buffer) SetBuf(buf []byte) { + b.buf = buf + b.idx = 0 +} + +// Reset clears the internal buffer of all written and unread data. +func (b *Buffer) Reset() { + b.buf = b.buf[:0] + b.idx = 0 +} + +// Bytes returns the internal buffer. +func (b *Buffer) Bytes() []byte { + return b.buf +} + +// Unread returns the unread portion of the buffer. +func (b *Buffer) Unread() []byte { + return b.buf[b.idx:] +} + +// Marshal appends the wire-format encoding of m to the buffer. +func (b *Buffer) Marshal(m Message) error { + var err error + b.buf, err = marshalAppend(b.buf, m, b.deterministic) + return err +} + +// Unmarshal parses the wire-format message in the buffer and +// places the decoded results in m. +// It does not reset m before unmarshaling. +func (b *Buffer) Unmarshal(m Message) error { + err := UnmarshalMerge(b.Unread(), m) + b.idx = len(b.buf) + return err +} + +type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields } + +func (m *unknownFields) String() string { panic("not implemented") } +func (m *unknownFields) Reset() { panic("not implemented") } +func (m *unknownFields) ProtoMessage() { panic("not implemented") } + +// DebugPrint dumps the encoded bytes of b with a header and footer including s +// to stdout. This is only intended for debugging. +func (*Buffer) DebugPrint(s string, b []byte) { + m := MessageReflect(new(unknownFields)) + m.SetUnknown(b) + b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface()) + fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s) +} + +// EncodeVarint appends an unsigned varint encoding to the buffer. +func (b *Buffer) EncodeVarint(v uint64) error { + b.buf = protowire.AppendVarint(b.buf, v) + return nil +} + +// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer. +func (b *Buffer) EncodeZigzag32(v uint64) error { + return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) +} + +// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer. +func (b *Buffer) EncodeZigzag64(v uint64) error { + return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63)))) +} + +// EncodeFixed32 appends a 32-bit little-endian integer to the buffer. +func (b *Buffer) EncodeFixed32(v uint64) error { + b.buf = protowire.AppendFixed32(b.buf, uint32(v)) + return nil +} + +// EncodeFixed64 appends a 64-bit little-endian integer to the buffer. +func (b *Buffer) EncodeFixed64(v uint64) error { + b.buf = protowire.AppendFixed64(b.buf, uint64(v)) + return nil +} + +// EncodeRawBytes appends a length-prefixed raw bytes to the buffer. +func (b *Buffer) EncodeRawBytes(v []byte) error { + b.buf = protowire.AppendBytes(b.buf, v) + return nil +} + +// EncodeStringBytes appends a length-prefixed raw bytes to the buffer. +// It does not validate whether v contains valid UTF-8. +func (b *Buffer) EncodeStringBytes(v string) error { + b.buf = protowire.AppendString(b.buf, v) + return nil +} + +// EncodeMessage appends a length-prefixed encoded message to the buffer. +func (b *Buffer) EncodeMessage(m Message) error { + var err error + b.buf = protowire.AppendVarint(b.buf, uint64(Size(m))) + b.buf, err = marshalAppend(b.buf, m, b.deterministic) + return err +} + +// DecodeVarint consumes an encoded unsigned varint from the buffer. +func (b *Buffer) DecodeVarint() (uint64, error) { + v, n := protowire.ConsumeVarint(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer. +func (b *Buffer) DecodeZigzag32() (uint64, error) { + v, err := b.DecodeVarint() + if err != nil { + return 0, err + } + return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil +} + +// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer. +func (b *Buffer) DecodeZigzag64() (uint64, error) { + v, err := b.DecodeVarint() + if err != nil { + return 0, err + } + return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil +} + +// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer. +func (b *Buffer) DecodeFixed32() (uint64, error) { + v, n := protowire.ConsumeFixed32(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer. +func (b *Buffer) DecodeFixed64() (uint64, error) { + v, n := protowire.ConsumeFixed64(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer. +// If alloc is specified, it returns a copy the raw bytes +// rather than a sub-slice of the buffer. +func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) { + v, n := protowire.ConsumeBytes(b.buf[b.idx:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + b.idx += n + if alloc { + v = append([]byte(nil), v...) + } + return v, nil +} + +// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer. +// It does not validate whether the raw bytes contain valid UTF-8. +func (b *Buffer) DecodeStringBytes() (string, error) { + v, n := protowire.ConsumeString(b.buf[b.idx:]) + if n < 0 { + return "", protowire.ParseError(n) + } + b.idx += n + return v, nil +} + +// DecodeMessage consumes a length-prefixed message from the buffer. +// It does not reset m before unmarshaling. +func (b *Buffer) DecodeMessage(m Message) error { + v, err := b.DecodeRawBytes(false) + if err != nil { + return err + } + return UnmarshalMerge(v, m) +} + +// DecodeGroup consumes a message group from the buffer. +// It assumes that the start group marker has already been consumed and +// consumes all bytes until (and including the end group marker). +// It does not reset m before unmarshaling. +func (b *Buffer) DecodeGroup(m Message) error { + v, n, err := consumeGroup(b.buf[b.idx:]) + if err != nil { + return err + } + b.idx += n + return UnmarshalMerge(v, m) +} + +// consumeGroup parses b until it finds an end group marker, returning +// the raw bytes of the message (excluding the end group marker) and the +// the total length of the message (including the end group marker). +func consumeGroup(b []byte) ([]byte, int, error) { + b0 := b + depth := 1 // assume this follows a start group marker + for { + _, wtyp, tagLen := protowire.ConsumeTag(b) + if tagLen < 0 { + return nil, 0, protowire.ParseError(tagLen) + } + b = b[tagLen:] + + var valLen int + switch wtyp { + case protowire.VarintType: + _, valLen = protowire.ConsumeVarint(b) + case protowire.Fixed32Type: + _, valLen = protowire.ConsumeFixed32(b) + case protowire.Fixed64Type: + _, valLen = protowire.ConsumeFixed64(b) + case protowire.BytesType: + _, valLen = protowire.ConsumeBytes(b) + case protowire.StartGroupType: + depth++ + case protowire.EndGroupType: + depth-- + default: + return nil, 0, errors.New("proto: cannot parse reserved wire type") + } + if valLen < 0 { + return nil, 0, protowire.ParseError(valLen) + } + b = b[valLen:] + + if depth == 0 { + return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go new file mode 100644 index 000000000..d399bf069 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/defaults.go @@ -0,0 +1,63 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// SetDefaults sets unpopulated scalar fields to their default values. +// Fields within a oneof are not set even if they have a default value. +// SetDefaults is recursively called upon any populated message fields. +func SetDefaults(m Message) { + if m != nil { + setDefaults(MessageReflect(m)) + } +} + +func setDefaults(m protoreflect.Message) { + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if !m.Has(fd) { + if fd.HasDefault() && fd.ContainingOneof() == nil { + v := fd.Default() + if fd.Kind() == protoreflect.BytesKind { + v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes + } + m.Set(fd, v) + } + continue + } + } + + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + // Handle singular message. + case fd.Cardinality() != protoreflect.Repeated: + if fd.Message() != nil { + setDefaults(m.Get(fd).Message()) + } + // Handle list of messages. + case fd.IsList(): + if fd.Message() != nil { + ls := m.Get(fd).List() + for i := 0; i < ls.Len(); i++ { + setDefaults(ls.Get(i).Message()) + } + } + // Handle map of messages. + case fd.IsMap(): + if fd.MapValue().Message() != nil { + ms := m.Get(fd).Map() + ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { + setDefaults(v.Message()) + return true + }) + } + } + return true + }) +} diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go new file mode 100644 index 000000000..e8db57e09 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/deprecated.go @@ -0,0 +1,113 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + + protoV2 "google.golang.org/protobuf/proto" +) + +var ( + // Deprecated: No longer returned. + ErrNil = errors.New("proto: Marshal called with nil") + + // Deprecated: No longer returned. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") + + // Deprecated: No longer returned. + ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") +) + +// Deprecated: Do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: Do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: Do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: Do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: Do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: Do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: Do not use. +func RegisterMessageSetType(Message, int32, string) {} + +// Deprecated: Do not use. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// Deprecated: Do not use. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// Deprecated: Do not use; this type existed for intenal-use only. +type InternalMessageInfo struct{} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) DiscardUnknown(m Message) { + DiscardUnknown(m) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) { + return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Merge(dst, src Message) { + protoV2.Merge(MessageV2(dst), MessageV2(src)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Size(m Message) int { + return protoV2.Size(MessageV2(m)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error { + return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m)) +} diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go new file mode 100644 index 000000000..2187e877f --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/discard.go @@ -0,0 +1,58 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +func DiscardUnknown(m Message) { + if m != nil { + discardUnknown(MessageReflect(m)) + } +} + +func discardUnknown(m protoreflect.Message) { + m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool { + switch { + // Handle singular message. + case fd.Cardinality() != protoreflect.Repeated: + if fd.Message() != nil { + discardUnknown(m.Get(fd).Message()) + } + // Handle list of messages. + case fd.IsList(): + if fd.Message() != nil { + ls := m.Get(fd).List() + for i := 0; i < ls.Len(); i++ { + discardUnknown(ls.Get(i).Message()) + } + } + // Handle map of messages. + case fd.IsMap(): + if fd.MapValue().Message() != nil { + ms := m.Get(fd).Map() + ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { + discardUnknown(v.Message()) + return true + }) + } + } + return true + }) + + // Discard unknown fields. + if len(m.GetUnknown()) > 0 { + m.SetUnknown(nil) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go new file mode 100644 index 000000000..42fc120c9 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -0,0 +1,356 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "errors" + "fmt" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +type ( + // ExtensionDesc represents an extension descriptor and + // is used to interact with an extension field in a message. + // + // Variables of this type are generated in code by protoc-gen-go. + ExtensionDesc = protoimpl.ExtensionInfo + + // ExtensionRange represents a range of message extensions. + // Used in code generated by protoc-gen-go. + ExtensionRange = protoiface.ExtensionRangeV1 + + // Deprecated: Do not use; this is an internal type. + Extension = protoimpl.ExtensionFieldV1 + + // Deprecated: Do not use; this is an internal type. + XXX_InternalExtensions = protoimpl.ExtensionFields +) + +// ErrMissingExtension reports whether the extension was not present. +var ErrMissingExtension = errors.New("proto: missing extension") + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +// HasExtension reports whether the extension field is present in m +// either as an explicitly populated field or as an unknown field. +func HasExtension(m Message, xt *ExtensionDesc) (has bool) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return false + } + + // Check whether any populated known field matches the field number. + xtd := xt.TypeDescriptor() + if isValidExtension(mr.Descriptor(), xtd) { + has = mr.Has(xtd) + } else { + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + has = int32(fd.Number()) == xt.Field + return !has + }) + } + + // Check whether any unknown field matches the field number. + for b := mr.GetUnknown(); !has && len(b) > 0; { + num, _, n := protowire.ConsumeField(b) + has = int32(num) == xt.Field + b = b[n:] + } + return has +} + +// ClearExtension removes the extension field from m +// either as an explicitly populated field or as an unknown field. +func ClearExtension(m Message, xt *ExtensionDesc) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return + } + + xtd := xt.TypeDescriptor() + if isValidExtension(mr.Descriptor(), xtd) { + mr.Clear(xtd) + } else { + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + if int32(fd.Number()) == xt.Field { + mr.Clear(fd) + return false + } + return true + }) + } + clearUnknown(mr, fieldNum(xt.Field)) +} + +// ClearAllExtensions clears all extensions from m. +// This includes populated fields and unknown fields in the extension range. +func ClearAllExtensions(m Message) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return + } + + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + if fd.IsExtension() { + mr.Clear(fd) + } + return true + }) + clearUnknown(mr, mr.Descriptor().ExtensionRanges()) +} + +// GetExtension retrieves a proto2 extended field from m. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes for the extension field. +func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return nil, errNotExtendable + } + + // Retrieve the unknown fields for this extension field. + var bo protoreflect.RawFields + for bi := mr.GetUnknown(); len(bi) > 0; { + num, _, n := protowire.ConsumeField(bi) + if int32(num) == xt.Field { + bo = append(bo, bi[:n]...) + } + bi = bi[n:] + } + + // For type incomplete descriptors, only retrieve the unknown fields. + if xt.ExtensionType == nil { + return []byte(bo), nil + } + + // If the extension field only exists as unknown fields, unmarshal it. + // This is rarely done since proto.Unmarshal eagerly unmarshals extensions. + xtd := xt.TypeDescriptor() + if !isValidExtension(mr.Descriptor(), xtd) { + return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) + } + if !mr.Has(xtd) && len(bo) > 0 { + m2 := mr.New() + if err := (proto.UnmarshalOptions{ + Resolver: extensionResolver{xt}, + }.Unmarshal(bo, m2.Interface())); err != nil { + return nil, err + } + if m2.Has(xtd) { + mr.Set(xtd, m2.Get(xtd)) + clearUnknown(mr, fieldNum(xt.Field)) + } + } + + // Check whether the message has the extension field set or a default. + var pv protoreflect.Value + switch { + case mr.Has(xtd): + pv = mr.Get(xtd) + case xtd.HasDefault(): + pv = xtd.Default() + default: + return nil, ErrMissingExtension + } + + v := xt.InterfaceOf(pv) + rv := reflect.ValueOf(v) + if isScalarKind(rv.Kind()) { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + } + return v, nil +} + +// extensionResolver is a custom extension resolver that stores a single +// extension type that takes precedence over the global registry. +type extensionResolver struct{ xt protoreflect.ExtensionType } + +func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field { + return r.xt, nil + } + return protoregistry.GlobalTypes.FindExtensionByName(field) +} + +func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field { + return r.xt, nil + } + return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) +} + +// GetExtensions returns a list of the extensions values present in m, +// corresponding with the provided list of extension descriptors, xts. +// If an extension is missing in m, the corresponding value is nil. +func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return nil, errNotExtendable + } + + vs := make([]interface{}, len(xts)) + for i, xt := range xts { + v, err := GetExtension(m, xt) + if err != nil { + if err == ErrMissingExtension { + continue + } + return vs, err + } + vs[i] = v + } + return vs, nil +} + +// SetExtension sets an extension field in m to the provided value. +func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return errNotExtendable + } + + rv := reflect.ValueOf(v) + if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) { + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType) + } + if rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", v) + } + if isScalarKind(rv.Elem().Kind()) { + v = rv.Elem().Interface() + } + } + + xtd := xt.TypeDescriptor() + if !isValidExtension(mr.Descriptor(), xtd) { + return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) + } + mr.Set(xtd, xt.ValueOf(v)) + clearUnknown(mr, fieldNum(xt.Field)) + return nil +} + +// SetRawExtension inserts b into the unknown fields of m. +// +// Deprecated: Use Message.ProtoReflect.SetUnknown instead. +func SetRawExtension(m Message, fnum int32, b []byte) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return + } + + // Verify that the raw field is valid. + for b0 := b; len(b0) > 0; { + num, _, n := protowire.ConsumeField(b0) + if int32(num) != fnum { + panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum)) + } + b0 = b0[n:] + } + + ClearExtension(m, &ExtensionDesc{Field: fnum}) + mr.SetUnknown(append(mr.GetUnknown(), b...)) +} + +// ExtensionDescs returns a list of extension descriptors found in m, +// containing descriptors for both populated extension fields in m and +// also unknown fields of m that are in the extension range. +// For the later case, an type incomplete descriptor is provided where only +// the ExtensionDesc.Field field is populated. +// The order of the extension descriptors is undefined. +func ExtensionDescs(m Message) ([]*ExtensionDesc, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return nil, errNotExtendable + } + + // Collect a set of known extension descriptors. + extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc) + mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + xt := fd.(protoreflect.ExtensionTypeDescriptor) + if xd, ok := xt.Type().(*ExtensionDesc); ok { + extDescs[fd.Number()] = xd + } + } + return true + }) + + // Collect a set of unknown extension descriptors. + extRanges := mr.Descriptor().ExtensionRanges() + for b := mr.GetUnknown(); len(b) > 0; { + num, _, n := protowire.ConsumeField(b) + if extRanges.Has(num) && extDescs[num] == nil { + extDescs[num] = nil + } + b = b[n:] + } + + // Transpose the set of descriptors into a list. + var xts []*ExtensionDesc + for num, xt := range extDescs { + if xt == nil { + xt = &ExtensionDesc{Field: int32(num)} + } + xts = append(xts, xt) + } + return xts, nil +} + +// isValidExtension reports whether xtd is a valid extension descriptor for md. +func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool { + return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number()) +} + +// isScalarKind reports whether k is a protobuf scalar kind (except bytes). +// This function exists for historical reasons since the representation of +// scalars differs between v1 and v2, where v1 uses *T and v2 uses T. +func isScalarKind(k reflect.Kind) bool { + switch k { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + return true + default: + return false + } +} + +// clearUnknown removes unknown fields from m where remover.Has reports true. +func clearUnknown(m protoreflect.Message, remover interface { + Has(protoreflect.FieldNumber) bool +}) { + var bo protoreflect.RawFields + for bi := m.GetUnknown(); len(bi) > 0; { + num, _, n := protowire.ConsumeField(bi) + if !remover.Has(num) { + bo = append(bo, bi[:n]...) + } + bi = bi[n:] + } + if bi := m.GetUnknown(); len(bi) != len(bo) { + m.SetUnknown(bo) + } +} + +type fieldNum protoreflect.FieldNumber + +func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool { + return protoreflect.FieldNumber(n1) == n2 +} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go new file mode 100644 index 000000000..dcdc2202f --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -0,0 +1,306 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "sync" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// StructProperties represents protocol buffer type information for a +// generated protobuf message in the open-struct API. +// +// Deprecated: Do not use. +type StructProperties struct { + // Prop are the properties for each field. + // + // Fields belonging to a oneof are stored in OneofTypes instead, with a + // single Properties representing the parent oneof held here. + // + // The order of Prop matches the order of fields in the Go struct. + // Struct fields that are not related to protobufs have a "XXX_" prefix + // in the Properties.Name and must be ignored by the user. + Prop []*Properties + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the protobuf field name. + OneofTypes map[string]*OneofProperties +} + +// Properties represents the type information for a protobuf message field. +// +// Deprecated: Do not use. +type Properties struct { + // Name is a placeholder name with little meaningful semantic value. + // If the name has an "XXX_" prefix, the entire Properties must be ignored. + Name string + // OrigName is the protobuf field name or oneof name. + OrigName string + // JSONName is the JSON name for the protobuf field. + JSONName string + // Enum is a placeholder name for enums. + // For historical reasons, this is neither the Go name for the enum, + // nor the protobuf name for the enum. + Enum string // Deprecated: Do not use. + // Weak contains the full name of the weakly referenced message. + Weak string + // Wire is a string representation of the wire type. + Wire string + // WireType is the protobuf wire type for the field. + WireType int + // Tag is the protobuf field number. + Tag int + // Required reports whether this is a required field. + Required bool + // Optional reports whether this is a optional field. + Optional bool + // Repeated reports whether this is a repeated field. + Repeated bool + // Packed reports whether this is a packed repeated field of scalars. + Packed bool + // Proto3 reports whether this field operates under the proto3 syntax. + Proto3 bool + // Oneof reports whether this field belongs within a oneof. + Oneof bool + + // Default is the default value in string form. + Default string + // HasDefault reports whether the field has a default value. + HasDefault bool + + // MapKeyProp is the properties for the key field for a map field. + MapKeyProp *Properties + // MapValProp is the properties for the value field for a map field. + MapValProp *Properties +} + +// OneofProperties represents the type information for a protobuf oneof. +// +// Deprecated: Do not use. +type OneofProperties struct { + // Type is a pointer to the generated wrapper type for the field value. + // This is nil for messages that are not in the open-struct API. + Type reflect.Type + // Field is the index into StructProperties.Prop for the containing oneof. + Field int + // Prop is the properties for the field. + Prop *Properties +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s += "," + strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != "" { + s += ",json=" + p.JSONName + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if len(p.Weak) > 0 { + s += ",weak=" + p.Weak + } + if p.Proto3 { + s += ",proto3" + } + if p.Oneof { + s += ",oneof" + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(tag string) { + // For example: "bytes,49,opt,name=foo,def=hello!" + for len(tag) > 0 { + i := strings.IndexByte(tag, ',') + if i < 0 { + i = len(tag) + } + switch s := tag[:i]; { + case strings.HasPrefix(s, "name="): + p.OrigName = s[len("name="):] + case strings.HasPrefix(s, "json="): + p.JSONName = s[len("json="):] + case strings.HasPrefix(s, "enum="): + p.Enum = s[len("enum="):] + case strings.HasPrefix(s, "weak="): + p.Weak = s[len("weak="):] + case strings.Trim(s, "0123456789") == "": + n, _ := strconv.ParseUint(s, 10, 32) + p.Tag = int(n) + case s == "opt": + p.Optional = true + case s == "req": + p.Required = true + case s == "rep": + p.Repeated = true + case s == "varint" || s == "zigzag32" || s == "zigzag64": + p.Wire = s + p.WireType = WireVarint + case s == "fixed32": + p.Wire = s + p.WireType = WireFixed32 + case s == "fixed64": + p.Wire = s + p.WireType = WireFixed64 + case s == "bytes": + p.Wire = s + p.WireType = WireBytes + case s == "group": + p.Wire = s + p.WireType = WireStartGroup + case s == "packed": + p.Packed = true + case s == "proto3": + p.Proto3 = true + case s == "oneof": + p.Oneof = true + case strings.HasPrefix(s, "def="): + // The default tag is special in that everything afterwards is the + // default regardless of the presence of commas. + p.HasDefault = true + p.Default, i = tag[len("def="):], len(tag) + } + tag = strings.TrimPrefix(tag[i:], ",") + } +} + +// Init populates the properties from a protocol buffer struct tag. +// +// Deprecated: Do not use. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.Name = name + p.OrigName = name + if tag == "" { + return + } + p.Parse(tag) + + if typ != nil && typ.Kind() == reflect.Map { + p.MapKeyProp = new(Properties) + p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil) + p.MapValProp = new(Properties) + p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil) + } +} + +var propertiesCache sync.Map // map[reflect.Type]*StructProperties + +// GetProperties returns the list of properties for the type represented by t, +// which must be a generated protocol buffer message in the open-struct API, +// where protobuf message fields are represented by exported Go struct fields. +// +// Deprecated: Use protobuf reflection instead. +func GetProperties(t reflect.Type) *StructProperties { + if p, ok := propertiesCache.Load(t); ok { + return p.(*StructProperties) + } + p, _ := propertiesCache.LoadOrStore(t, newProperties(t)) + return p.(*StructProperties) +} + +func newProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) + } + + var hasOneof bool + prop := new(StructProperties) + + // Construct a list of properties for each field in the struct. + for i := 0; i < t.NumField(); i++ { + p := new(Properties) + f := t.Field(i) + tagField := f.Tag.Get("protobuf") + p.Init(f.Type, f.Name, tagField, &f) + + tagOneof := f.Tag.Get("protobuf_oneof") + if tagOneof != "" { + hasOneof = true + p.OrigName = tagOneof + } + + // Rename unrelated struct fields with the "XXX_" prefix since so much + // user code simply checks for this to exclude special fields. + if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") { + p.Name = "XXX_" + p.Name + p.OrigName = "XXX_" + p.OrigName + } else if p.Weak != "" { + p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field + } + + prop.Prop = append(prop.Prop, p) + } + + // Construct a mapping of oneof field names to properties. + if hasOneof { + var oneofWrappers []interface{} + if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { + oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{}) + } + if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { + oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{}) + } + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok { + if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok { + oneofWrappers = m.ProtoMessageInfo().OneofWrappers + } + } + + prop.OneofTypes = make(map[string]*OneofProperties) + for _, wrapper := range oneofWrappers { + p := &OneofProperties{ + Type: reflect.ValueOf(wrapper).Type(), // *T + Prop: new(Properties), + } + f := p.Type.Elem().Field(0) + p.Prop.Name = f.Name + p.Prop.Parse(f.Tag.Get("protobuf")) + + // Determine the struct field that contains this oneof. + // Each wrapper is assignable to exactly one parent field. + var foundOneof bool + for i := 0; i < t.NumField() && !foundOneof; i++ { + if p.Type.AssignableTo(t.Field(i).Type) { + p.Field = i + foundOneof = true + } + } + if !foundOneof { + panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) + } + prop.OneofTypes[p.Prop.OrigName] = p + } + } + + return prop +} + +func (sp *StructProperties) Len() int { return len(sp.Prop) } +func (sp *StructProperties) Less(i, j int) bool { return false } +func (sp *StructProperties) Swap(i, j int) { return } diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go new file mode 100644 index 000000000..5aee89c32 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/proto.go @@ -0,0 +1,167 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proto provides functionality for handling protocol buffer messages. +// In particular, it provides marshaling and unmarshaling between a protobuf +// message and the binary wire format. +// +// See https://developers.google.com/protocol-buffers/docs/gotutorial for +// more information. +// +// Deprecated: Use the "google.golang.org/protobuf/proto" package instead. +package proto + +import ( + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + ProtoPackageIsVersion1 = true + ProtoPackageIsVersion2 = true + ProtoPackageIsVersion3 = true + ProtoPackageIsVersion4 = true +) + +// GeneratedEnum is any enum type generated by protoc-gen-go +// which is a named int32 kind. +// This type exists for documentation purposes. +type GeneratedEnum interface{} + +// GeneratedMessage is any message type generated by protoc-gen-go +// which is a pointer to a named struct kind. +// This type exists for documentation purposes. +type GeneratedMessage interface{} + +// Message is a protocol buffer message. +// +// This is the v1 version of the message interface and is marginally better +// than an empty interface as it lacks any method to programatically interact +// with the contents of the message. +// +// A v2 message is declared in "google.golang.org/protobuf/proto".Message and +// exposes protobuf reflection as a first-class feature of the interface. +// +// To convert a v1 message to a v2 message, use the MessageV2 function. +// To convert a v2 message to a v1 message, use the MessageV1 function. +type Message = protoiface.MessageV1 + +// MessageV1 converts either a v1 or v2 message to a v1 message. +// It returns nil if m is nil. +func MessageV1(m GeneratedMessage) protoiface.MessageV1 { + return protoimpl.X.ProtoMessageV1Of(m) +} + +// MessageV2 converts either a v1 or v2 message to a v2 message. +// It returns nil if m is nil. +func MessageV2(m GeneratedMessage) protoV2.Message { + return protoimpl.X.ProtoMessageV2Of(m) +} + +// MessageReflect returns a reflective view for a message. +// It returns nil if m is nil. +func MessageReflect(m Message) protoreflect.Message { + return protoimpl.X.MessageOf(m) +} + +// Marshaler is implemented by messages that can marshal themselves. +// This interface is used by the following functions: Size, Marshal, +// Buffer.Marshal, and Buffer.EncodeMessage. +// +// Deprecated: Do not implement. +type Marshaler interface { + // Marshal formats the encoded bytes of the message. + // It should be deterministic and emit valid protobuf wire data. + // The caller takes ownership of the returned buffer. + Marshal() ([]byte, error) +} + +// Unmarshaler is implemented by messages that can unmarshal themselves. +// This interface is used by the following functions: Unmarshal, UnmarshalMerge, +// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup. +// +// Deprecated: Do not implement. +type Unmarshaler interface { + // Unmarshal parses the encoded bytes of the protobuf wire input. + // The provided buffer is only valid for during method call. + // It should not reset the receiver message. + Unmarshal([]byte) error +} + +// Merger is implemented by messages that can merge themselves. +// This interface is used by the following functions: Clone and Merge. +// +// Deprecated: Do not implement. +type Merger interface { + // Merge merges the contents of src into the receiver message. + // It clones all data structures in src such that it aliases no mutable + // memory referenced by src. + Merge(src Message) +} + +// RequiredNotSetError is an error type returned when +// marshaling or unmarshaling a message with missing required fields. +type RequiredNotSetError struct { + err error +} + +func (e *RequiredNotSetError) Error() string { + if e.err != nil { + return e.err.Error() + } + return "proto: required field not set" +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +func checkRequiredNotSet(m protoV2.Message) error { + if err := protoV2.CheckInitialized(m); err != nil { + return &RequiredNotSetError{err: err} + } + return nil +} + +// Clone returns a deep copy of src. +func Clone(src Message) Message { + return MessageV1(protoV2.Clone(MessageV2(src))) +} + +// Merge merges src into dst, which must be messages of the same type. +// +// Populated scalar fields in src are copied to dst, while populated +// singular messages in src are merged into dst by recursively calling Merge. +// The elements of every list field in src is appended to the corresponded +// list fields in dst. The entries of every map field in src is copied into +// the corresponding map field in dst, possibly replacing existing entries. +// The unknown fields of src are appended to the unknown fields of dst. +func Merge(dst, src Message) { + protoV2.Merge(MessageV2(dst), MessageV2(src)) +} + +// Equal reports whether two messages are equal. +// If two messages marshal to the same bytes under deterministic serialization, +// then Equal is guaranteed to report true. +// +// Two messages are equal if they are the same protobuf message type, +// have the same set of populated known and extension field values, +// and the same set of unknown fields values. +// +// Scalar values are compared with the equivalent of the == operator in Go, +// except bytes values which are compared using bytes.Equal and +// floating point values which specially treat NaNs as equal. +// Message values are compared by recursively calling Equal. +// Lists are equal if each element value is also equal. +// Maps are equal if they have the same set of keys, where the pair of values +// for each key is also equal. +func Equal(x, y Message) bool { + return protoV2.Equal(MessageV2(x), MessageV2(y)) +} + +func isMessageSet(md protoreflect.MessageDescriptor) bool { + ms, ok := md.(interface{ IsMessageSet() bool }) + return ok && ms.IsMessageSet() +} diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go new file mode 100644 index 000000000..066b4323b --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/registry.go @@ -0,0 +1,317 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "reflect" + "strings" + "sync" + + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// filePath is the path to the proto source file. +type filePath = string // e.g., "google/protobuf/descriptor.proto" + +// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto. +type fileDescGZIP = []byte + +var fileCache sync.Map // map[filePath]fileDescGZIP + +// RegisterFile is called from generated code to register the compressed +// FileDescriptorProto with the file path for a proto source file. +// +// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead. +func RegisterFile(s filePath, d fileDescGZIP) { + // Decompress the descriptor. + zr, err := gzip.NewReader(bytes.NewReader(d)) + if err != nil { + panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) + } + b, err := ioutil.ReadAll(zr) + if err != nil { + panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) + } + + // Construct a protoreflect.FileDescriptor from the raw descriptor. + // Note that DescBuilder.Build automatically registers the constructed + // file descriptor with the v2 registry. + protoimpl.DescBuilder{RawDescriptor: b}.Build() + + // Locally cache the raw descriptor form for the file. + fileCache.Store(s, d) +} + +// FileDescriptor returns the compressed FileDescriptorProto given the file path +// for a proto source file. It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead. +func FileDescriptor(s filePath) fileDescGZIP { + if v, ok := fileCache.Load(s); ok { + return v.(fileDescGZIP) + } + + // Find the descriptor in the v2 registry. + var b []byte + if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { + b, _ = Marshal(protodesc.ToFileDescriptorProto(fd)) + } + + // Locally cache the raw descriptor form for the file. + if len(b) > 0 { + v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b)) + return v.(fileDescGZIP) + } + return nil +} + +// enumName is the name of an enum. For historical reasons, the enum name is +// neither the full Go name nor the full protobuf name of the enum. +// The name is the dot-separated combination of just the proto package that the +// enum is declared within followed by the Go type name of the generated enum. +type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum" + +// enumsByName maps enum values by name to their numeric counterpart. +type enumsByName = map[string]int32 + +// enumsByNumber maps enum values by number to their name counterpart. +type enumsByNumber = map[int32]string + +var enumCache sync.Map // map[enumName]enumsByName +var numFilesCache sync.Map // map[protoreflect.FullName]int + +// RegisterEnum is called from the generated code to register the mapping of +// enum value names to enum numbers for the enum identified by s. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead. +func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) { + if _, ok := enumCache.Load(s); ok { + panic("proto: duplicate enum registered: " + s) + } + enumCache.Store(s, m) + + // This does not forward registration to the v2 registry since this API + // lacks sufficient information to construct a complete v2 enum descriptor. +} + +// EnumValueMap returns the mapping from enum value names to enum numbers for +// the enum of the given name. It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead. +func EnumValueMap(s enumName) enumsByName { + if v, ok := enumCache.Load(s); ok { + return v.(enumsByName) + } + + // Check whether the cache is stale. If the number of files in the current + // package differs, then it means that some enums may have been recently + // registered upstream that we do not know about. + var protoPkg protoreflect.FullName + if i := strings.LastIndexByte(s, '.'); i >= 0 { + protoPkg = protoreflect.FullName(s[:i]) + } + v, _ := numFilesCache.Load(protoPkg) + numFiles, _ := v.(int) + if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles { + return nil // cache is up-to-date; was not found earlier + } + + // Update the enum cache for all enums declared in the given proto package. + numFiles = 0 + protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool { + walkEnums(fd, func(ed protoreflect.EnumDescriptor) { + name := protoimpl.X.LegacyEnumName(ed) + if _, ok := enumCache.Load(name); !ok { + m := make(enumsByName) + evs := ed.Values() + for i := evs.Len() - 1; i >= 0; i-- { + ev := evs.Get(i) + m[string(ev.Name())] = int32(ev.Number()) + } + enumCache.LoadOrStore(name, m) + } + }) + numFiles++ + return true + }) + numFilesCache.Store(protoPkg, numFiles) + + // Check cache again for enum map. + if v, ok := enumCache.Load(s); ok { + return v.(enumsByName) + } + return nil +} + +// walkEnums recursively walks all enums declared in d. +func walkEnums(d interface { + Enums() protoreflect.EnumDescriptors + Messages() protoreflect.MessageDescriptors +}, f func(protoreflect.EnumDescriptor)) { + eds := d.Enums() + for i := eds.Len() - 1; i >= 0; i-- { + f(eds.Get(i)) + } + mds := d.Messages() + for i := mds.Len() - 1; i >= 0; i-- { + walkEnums(mds.Get(i), f) + } +} + +// messageName is the full name of protobuf message. +type messageName = string + +var messageTypeCache sync.Map // map[messageName]reflect.Type + +// RegisterType is called from generated code to register the message Go type +// for a message of the given name. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead. +func RegisterType(m Message, s messageName) { + mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s)) + if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil { + panic(err) + } + messageTypeCache.Store(s, reflect.TypeOf(m)) +} + +// RegisterMapType is called from generated code to register the Go map type +// for a protobuf message representing a map entry. +// +// Deprecated: Do not use. +func RegisterMapType(m interface{}, s messageName) { + t := reflect.TypeOf(m) + if t.Kind() != reflect.Map { + panic(fmt.Sprintf("invalid map kind: %v", t)) + } + if _, ok := messageTypeCache.Load(s); ok { + panic(fmt.Errorf("proto: duplicate proto message registered: %s", s)) + } + messageTypeCache.Store(s, t) +} + +// MessageType returns the message type for a named message. +// It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead. +func MessageType(s messageName) reflect.Type { + if v, ok := messageTypeCache.Load(s); ok { + return v.(reflect.Type) + } + + // Derive the message type from the v2 registry. + var t reflect.Type + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil { + t = messageGoType(mt) + } + + // If we could not get a concrete type, it is possible that it is a + // pseudo-message for a map entry. + if t == nil { + d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s)) + if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() { + kt := goTypeForField(md.Fields().ByNumber(1)) + vt := goTypeForField(md.Fields().ByNumber(2)) + t = reflect.MapOf(kt, vt) + } + } + + // Locally cache the message type for the given name. + if t != nil { + v, _ := messageTypeCache.LoadOrStore(s, t) + return v.(reflect.Type) + } + return nil +} + +func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type { + switch k := fd.Kind(); k { + case protoreflect.EnumKind: + if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil { + return enumGoType(et) + } + return reflect.TypeOf(protoreflect.EnumNumber(0)) + case protoreflect.MessageKind, protoreflect.GroupKind: + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil { + return messageGoType(mt) + } + return reflect.TypeOf((*protoreflect.Message)(nil)).Elem() + default: + return reflect.TypeOf(fd.Default().Interface()) + } +} + +func enumGoType(et protoreflect.EnumType) reflect.Type { + return reflect.TypeOf(et.New(0)) +} + +func messageGoType(mt protoreflect.MessageType) reflect.Type { + return reflect.TypeOf(MessageV1(mt.Zero().Interface())) +} + +// MessageName returns the full protobuf name for the given message type. +// +// Deprecated: Use protoreflect.MessageDescriptor.FullName instead. +func MessageName(m Message) messageName { + if m == nil { + return "" + } + if m, ok := m.(interface{ XXX_MessageName() messageName }); ok { + return m.XXX_MessageName() + } + return messageName(protoimpl.X.MessageDescriptorOf(m).FullName()) +} + +// RegisterExtension is called from the generated code to register +// the extension descriptor. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead. +func RegisterExtension(d *ExtensionDesc) { + if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil { + panic(err) + } +} + +type extensionsByNumber = map[int32]*ExtensionDesc + +var extensionCache sync.Map // map[messageName]extensionsByNumber + +// RegisteredExtensions returns a map of the registered extensions for the +// provided protobuf message, indexed by the extension field number. +// +// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead. +func RegisteredExtensions(m Message) extensionsByNumber { + // Check whether the cache is stale. If the number of extensions for + // the given message differs, then it means that some extensions were + // recently registered upstream that we do not know about. + s := MessageName(m) + v, _ := extensionCache.Load(s) + xs, _ := v.(extensionsByNumber) + if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) { + return xs // cache is up-to-date + } + + // Cache is stale, re-compute the extensions map. + xs = make(extensionsByNumber) + protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool { + if xd, ok := xt.(*ExtensionDesc); ok { + xs[int32(xt.TypeDescriptor().Number())] = xd + } else { + // TODO: This implies that the protoreflect.ExtensionType is a + // custom type not generated by protoc-gen-go. We could try and + // convert the type to an ExtensionDesc. + } + return true + }) + extensionCache.Store(s, xs) + return xs +} diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go new file mode 100644 index 000000000..47eb3e445 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_decode.go @@ -0,0 +1,801 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/prototext" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapTextUnmarshalV2 = false + +// ParseError is returned by UnmarshalText. +type ParseError struct { + Message string + + // Deprecated: Do not use. + Line, Offset int +} + +func (e *ParseError) Error() string { + if wrapTextUnmarshalV2 { + return e.Message + } + if e.Line == 1 { + return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message) + } + return fmt.Sprintf("line %d: %v", e.Line, e.Message) +} + +// UnmarshalText parses a proto text formatted string into m. +func UnmarshalText(s string, m Message) error { + if u, ok := m.(encoding.TextUnmarshaler); ok { + return u.UnmarshalText([]byte(s)) + } + + m.Reset() + mi := MessageV2(m) + + if wrapTextUnmarshalV2 { + err := prototext.UnmarshalOptions{ + AllowPartial: true, + }.Unmarshal([]byte(s), mi) + if err != nil { + return &ParseError{Message: err.Error()} + } + return checkRequiredNotSet(mi) + } else { + if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil { + return err + } + return checkRequiredNotSet(mi) + } +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) { + md := m.Descriptor() + fds := md.Fields() + + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + seen := make(map[protoreflect.FieldNumber]bool) + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + if err := p.unmarshalExtensionOrAny(m, seen); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := protoreflect.Name(tok.value) + fd := fds.ByName(name) + switch { + case fd == nil: + gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name)))) + if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name { + fd = gd + } + case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name: + fd = nil + case fd.IsWeak() && fd.Message().IsPlaceholder(): + fd = nil + } + if fd == nil { + typeName := string(md.FullName()) + if m, ok := m.Interface().(Message); ok { + t := reflect.TypeOf(m) + if t.Kind() == reflect.Ptr { + typeName = t.Elem().String() + } + } + return p.errorf("unknown field name %q in %v", name, typeName) + } + if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name()) + } + if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] { + return p.errorf("non-repeated field %q was repeated", fd.Name()) + } + seen[fd.Number()] = true + + // Consume any colon. + if err := p.checkForColon(fd); err != nil { + return err + } + + // Parse into the field. + v := m.Get(fd) + if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { + v = m.Mutable(fd) + } + if v, err = p.unmarshalValue(v, fd); err != nil { + return err + } + m.Set(fd, v) + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + } + return nil +} + +func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error { + name, err := p.consumeExtensionOrAnyName() + if err != nil { + return err + } + + // If it contains a slash, it's an Any type URL. + if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 { + tok := p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + + mt, err := protoregistry.GlobalTypes.FindMessageByURL(name) + if err != nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):]) + } + m2 := mt.New() + if err := p.unmarshalMessage(m2, terminator); err != nil { + return err + } + b, err := protoV2.Marshal(m2.Interface()) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err) + } + + urlFD := m.Descriptor().Fields().ByName("type_url") + valFD := m.Descriptor().Fields().ByName("value") + if seen[urlFD.Number()] { + return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name()) + } + if seen[valFD.Number()] { + return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name()) + } + m.Set(urlFD, protoreflect.ValueOfString(name)) + m.Set(valFD, protoreflect.ValueOfBytes(b)) + seen[urlFD.Number()] = true + seen[valFD.Number()] = true + return nil + } + + xname := protoreflect.FullName(name) + xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) + if xt == nil && isMessageSet(m.Descriptor()) { + xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) + } + if xt == nil { + return p.errorf("unrecognized extension %q", name) + } + fd := xt.TypeDescriptor() + if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { + return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName()) + } + + if err := p.checkForColon(fd); err != nil { + return err + } + + v := m.Get(fd) + if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { + v = m.Mutable(fd) + } + v, err = p.unmarshalValue(v, fd) + if err != nil { + return err + } + m.Set(fd, v) + return p.consumeOptionalSeparator() +} + +func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "" { + return v, p.errorf("unexpected EOF") + } + + switch { + case fd.IsList(): + lv := v.List() + var err error + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + vv := lv.NewElement() + vv, err = p.unmarshalSingularValue(vv, fd) + if err != nil { + return v, err + } + lv.Append(vv) + + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return v, p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return v, nil + } + + // One value of the repeated field. + p.back() + vv := lv.NewElement() + vv, err = p.unmarshalSingularValue(vv, fd) + if err != nil { + return v, err + } + lv.Append(vv) + return v, nil + case fd.IsMap(): + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return v, p.errorf("expected '{' or '<', found %q", tok.value) + } + + keyFD := fd.MapKey() + valFD := fd.MapValue() + + mv := v.Map() + kv := keyFD.Default() + vv := mv.NewValue() + for { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == terminator { + break + } + var err error + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return v, err + } + if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil { + return v, err + } + if err := p.consumeOptionalSeparator(); err != nil { + return v, err + } + case "value": + if err := p.checkForColon(valFD); err != nil { + return v, err + } + if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil { + return v, err + } + if err := p.consumeOptionalSeparator(); err != nil { + return v, err + } + default: + p.back() + return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + mv.Set(kv.MapKey(), vv) + return v, nil + default: + p.back() + return p.unmarshalSingularValue(v, fd) + } +} + +func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "" { + return v, p.errorf("unexpected EOF") + } + + switch fd.Kind() { + case protoreflect.BoolKind: + switch tok.value { + case "true", "1", "t", "True": + return protoreflect.ValueOfBool(true), nil + case "false", "0", "f", "False": + return protoreflect.ValueOfBool(false), nil + } + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfInt32(int32(x)), nil + } + + // The C++ parser accepts large positive hex numbers that uses + // two's complement arithmetic to represent negative numbers. + // This feature is here for backwards compatibility with C++. + if strings.HasPrefix(tok.value, "0x") { + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil + } + } + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfInt64(int64(x)), nil + } + + // The C++ parser accepts large positive hex numbers that uses + // two's complement arithmetic to represent negative numbers. + // This feature is here for backwards compatibility with C++. + if strings.HasPrefix(tok.value, "0x") { + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil + } + } + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfUint32(uint32(x)), nil + } + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfUint64(uint64(x)), nil + } + case protoreflect.FloatKind: + // Ignore 'f' for compatibility with output generated by C++, + // but don't remove 'f' when the value is "-inf" or "inf". + v := tok.value + if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { + v = v[:len(v)-len("f")] + } + if x, err := strconv.ParseFloat(v, 32); err == nil { + return protoreflect.ValueOfFloat32(float32(x)), nil + } + case protoreflect.DoubleKind: + // Ignore 'f' for compatibility with output generated by C++, + // but don't remove 'f' when the value is "-inf" or "inf". + v := tok.value + if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { + v = v[:len(v)-len("f")] + } + if x, err := strconv.ParseFloat(v, 64); err == nil { + return protoreflect.ValueOfFloat64(float64(x)), nil + } + case protoreflect.StringKind: + if isQuote(tok.value[0]) { + return protoreflect.ValueOfString(tok.unquoted), nil + } + case protoreflect.BytesKind: + if isQuote(tok.value[0]) { + return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil + } + case protoreflect.EnumKind: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil + } + vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value)) + if vd != nil { + return protoreflect.ValueOfEnum(vd.Number()), nil + } + case protoreflect.MessageKind, protoreflect.GroupKind: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return v, p.errorf("expected '{' or '<', found %q", tok.value) + } + err := p.unmarshalMessage(v.Message(), terminator) + return v, err + default: + panic(fmt.Sprintf("invalid kind %v", fd.Kind())) + } + return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value) +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + if fd.Message() == nil { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +// consumeExtensionOrAnyName consumes an extension name or an Any type URL and +// the following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtensionOrAnyName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in unmarshalMessage to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +var errBadUTF8 = errors.New("proto: bad UTF-8") + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(rune(i)), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go new file mode 100644 index 000000000..a31134eeb --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_encode.go @@ -0,0 +1,560 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "encoding" + "fmt" + "io" + "math" + "sort" + "strings" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapTextMarshalV2 = false + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line) + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes the proto text format of m to w. +func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error { + b, err := tm.marshal(m) + if len(b) > 0 { + if _, err := w.Write(b); err != nil { + return err + } + } + return err +} + +// Text returns a proto text formatted string of m. +func (tm *TextMarshaler) Text(m Message) string { + b, _ := tm.marshal(m) + return string(b) +} + +func (tm *TextMarshaler) marshal(m Message) ([]byte, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return []byte(""), nil + } + + if wrapTextMarshalV2 { + if m, ok := m.(encoding.TextMarshaler); ok { + return m.MarshalText() + } + + opts := prototext.MarshalOptions{ + AllowPartial: true, + EmitUnknown: true, + } + if !tm.Compact { + opts.Indent = " " + } + if !tm.ExpandAny { + opts.Resolver = (*protoregistry.Types)(nil) + } + return opts.Marshal(mr.Interface()) + } else { + w := &textWriter{ + compact: tm.Compact, + expandAny: tm.ExpandAny, + complete: true, + } + + if m, ok := m.(encoding.TextMarshaler); ok { + b, err := m.MarshalText() + if err != nil { + return nil, err + } + w.Write(b) + return w.buf, nil + } + + err := w.writeMessage(mr) + return w.buf, err + } +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// MarshalText writes the proto text format of m to w. +func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) } + +// MarshalTextString returns a proto text formatted string of m. +func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) } + +// CompactText writes the compact proto text format of m to w. +func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) } + +// CompactTextString returns a compact proto text formatted string of m. +func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) } + +var ( + newline = []byte("\n") + endBraceNewline = []byte("}\n") + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + compact bool // same as TextMarshaler.Compact + expandAny bool // same as TextMarshaler.ExpandAny + complete bool // whether the current position is a complete line + indent int // indentation level; never negative + buf []byte +} + +func (w *textWriter) Write(p []byte) (n int, _ error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + w.buf = append(w.buf, p...) + w.complete = false + return len(p), nil + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + w.buf = append(w.buf, ' ') + n++ + } + w.buf = append(w.buf, frag...) + n += len(frag) + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + w.buf = append(w.buf, frag...) + n += len(frag) + if i+1 < len(frags) { + w.buf = append(w.buf, '\n') + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + w.buf = append(w.buf, c) + w.complete = c == '\n' + return nil +} + +func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + + if fd.Kind() != protoreflect.GroupKind { + w.buf = append(w.buf, fd.Name()...) + w.WriteByte(':') + } else { + // Use message type name for group field name. + w.buf = append(w.buf, fd.Message().Name()...) + } + + if !w.compact { + w.WriteByte(' ') + } +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) { + md := m.Descriptor() + fdURL := md.Fields().ByName("type_url") + fdVal := md.Fields().ByName("value") + + url := m.Get(fdURL).String() + mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) + if err != nil { + return false, nil + } + + b := m.Get(fdVal).Bytes() + m2 := mt.New() + if err := proto.Unmarshal(b, m2.Interface()); err != nil { + return false, nil + } + w.Write([]byte("[")) + if requiresQuotes(url) { + w.writeQuotedString(url) + } else { + w.Write([]byte(url)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.indent++ + } + if err := w.writeMessage(m2); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.indent-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (w *textWriter) writeMessage(m protoreflect.Message) error { + md := m.Descriptor() + if w.expandAny && md.FullName() == "google.protobuf.Any" { + if canExpand, err := w.writeProto3Any(m); canExpand { + return err + } + } + + fds := md.Fields() + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + } else { + i++ + } + if fd == nil || !m.Has(fd) { + continue + } + + switch { + case fd.IsList(): + lv := m.Get(fd).List() + for j := 0; j < lv.Len(); j++ { + w.writeName(fd) + v := lv.Get(j) + if err := w.writeSingularValue(v, fd); err != nil { + return err + } + w.WriteByte('\n') + } + case fd.IsMap(): + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := m.Get(fd).Map() + + type entry struct{ key, val protoreflect.Value } + var entries []entry + mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, entry{k.Value(), v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + switch kfd.Kind() { + case protoreflect.BoolKind: + return !entries[i].key.Bool() && entries[j].key.Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return entries[i].key.Int() < entries[j].key.Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return entries[i].key.Uint() < entries[j].key.Uint() + case protoreflect.StringKind: + return entries[i].key.String() < entries[j].key.String() + default: + panic("invalid kind") + } + }) + for _, entry := range entries { + w.writeName(fd) + w.WriteByte('<') + if !w.compact { + w.WriteByte('\n') + } + w.indent++ + w.writeName(kfd) + if err := w.writeSingularValue(entry.key, kfd); err != nil { + return err + } + w.WriteByte('\n') + w.writeName(vfd) + if err := w.writeSingularValue(entry.val, vfd); err != nil { + return err + } + w.WriteByte('\n') + w.indent-- + w.WriteByte('>') + w.WriteByte('\n') + } + default: + w.writeName(fd) + if err := w.writeSingularValue(m.Get(fd), fd); err != nil { + return err + } + w.WriteByte('\n') + } + } + + if b := m.GetUnknown(); len(b) > 0 { + w.writeUnknownFields(b) + } + return w.writeExtensions(m) +} + +func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error { + switch fd.Kind() { + case protoreflect.FloatKind, protoreflect.DoubleKind: + switch vf := v.Float(); { + case math.IsInf(vf, +1): + w.Write(posInf) + case math.IsInf(vf, -1): + w.Write(negInf) + case math.IsNaN(vf): + w.Write(nan) + default: + fmt.Fprint(w, v.Interface()) + } + case protoreflect.StringKind: + // NOTE: This does not validate UTF-8 for historical reasons. + w.writeQuotedString(string(v.String())) + case protoreflect.BytesKind: + w.writeQuotedString(string(v.Bytes())) + case protoreflect.MessageKind, protoreflect.GroupKind: + var bra, ket byte = '<', '>' + if fd.Kind() == protoreflect.GroupKind { + bra, ket = '{', '}' + } + w.WriteByte(bra) + if !w.compact { + w.WriteByte('\n') + } + w.indent++ + m := v.Message() + if m2, ok := m.Interface().(encoding.TextMarshaler); ok { + b, err := m2.MarshalText() + if err != nil { + return err + } + w.Write(b) + } else { + w.writeMessage(m) + } + w.indent-- + w.WriteByte(ket) + case protoreflect.EnumKind: + if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil { + fmt.Fprint(w, ev.Name()) + } else { + fmt.Fprint(w, v.Enum()) + } + default: + fmt.Fprint(w, v.Interface()) + } + return nil +} + +// writeQuotedString writes a quoted string in the protocol buffer text format. +func (w *textWriter) writeQuotedString(s string) { + w.WriteByte('"') + for i := 0; i < len(s); i++ { + switch c := s[i]; c { + case '\n': + w.buf = append(w.buf, `\n`...) + case '\r': + w.buf = append(w.buf, `\r`...) + case '\t': + w.buf = append(w.buf, `\t`...) + case '"': + w.buf = append(w.buf, `\"`...) + case '\\': + w.buf = append(w.buf, `\\`...) + default: + if isPrint := c >= 0x20 && c < 0x7f; isPrint { + w.buf = append(w.buf, c) + } else { + w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...) + } + } + } + w.WriteByte('"') +} + +func (w *textWriter) writeUnknownFields(b []byte) { + if !w.compact { + fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b)) + } + + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return + } + b = b[n:] + + if wtyp == protowire.EndGroupType { + w.indent-- + w.Write(endBraceNewline) + continue + } + fmt.Fprint(w, num) + if wtyp != protowire.StartGroupType { + w.WriteByte(':') + } + if !w.compact || wtyp == protowire.StartGroupType { + w.WriteByte(' ') + } + switch wtyp { + case protowire.VarintType: + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.Fixed32Type: + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.Fixed64Type: + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.BytesType: + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprintf(w, "%q", v) + case protowire.StartGroupType: + w.WriteByte('{') + w.indent++ + default: + fmt.Fprintf(w, "/* unknown wire type %d */", wtyp) + } + w.WriteByte('\n') + } +} + +// writeExtensions writes all the extensions in m. +func (w *textWriter) writeExtensions(m protoreflect.Message) error { + md := m.Descriptor() + if md.ExtensionRanges().Len() == 0 { + return nil + } + + type ext struct { + desc protoreflect.FieldDescriptor + val protoreflect.Value + } + var exts []ext + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + exts = append(exts, ext{fd, v}) + } + return true + }) + sort.Slice(exts, func(i, j int) bool { + return exts[i].desc.Number() < exts[j].desc.Number() + }) + + for _, ext := range exts { + // For message set, use the name of the message as the extension name. + name := string(ext.desc.FullName()) + if isMessageSet(ext.desc.ContainingMessage()) { + name = strings.TrimSuffix(name, ".message_set_extension") + } + + if !ext.desc.IsList() { + if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil { + return err + } + } else { + lv := ext.val.List() + for i := 0; i < lv.Len(); i++ { + if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil { + return err + } + } + } + } + return nil +} + +func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error { + fmt.Fprintf(w, "[%s]:", name) + if !w.compact { + w.WriteByte(' ') + } + if err := w.writeSingularValue(v, fd); err != nil { + return err + } + w.WriteByte('\n') + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + for i := 0; i < w.indent*2; i++ { + w.buf = append(w.buf, ' ') + } + w.complete = false +} diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go new file mode 100644 index 000000000..d7c28da5a --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/wire.go @@ -0,0 +1,78 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/runtime/protoiface" +) + +// Size returns the size in bytes of the wire-format encoding of m. +func Size(m Message) int { + if m == nil { + return 0 + } + mi := MessageV2(m) + return protoV2.Size(mi) +} + +// Marshal returns the wire-format encoding of m. +func Marshal(m Message) ([]byte, error) { + b, err := marshalAppend(nil, m, false) + if b == nil { + b = zeroBytes + } + return b, err +} + +var zeroBytes = make([]byte, 0, 0) + +func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) { + if m == nil { + return nil, ErrNil + } + mi := MessageV2(m) + nbuf, err := protoV2.MarshalOptions{ + Deterministic: deterministic, + AllowPartial: true, + }.MarshalAppend(buf, mi) + if err != nil { + return buf, err + } + if len(buf) == len(nbuf) { + if !mi.ProtoReflect().IsValid() { + return buf, ErrNil + } + } + return nbuf, checkRequiredNotSet(mi) +} + +// Unmarshal parses a wire-format message in b and places the decoded results in m. +// +// Unmarshal resets m before starting to unmarshal, so any existing data in m is always +// removed. Use UnmarshalMerge to preserve and append to existing data. +func Unmarshal(b []byte, m Message) error { + m.Reset() + return UnmarshalMerge(b, m) +} + +// UnmarshalMerge parses a wire-format message in b and places the decoded results in m. +func UnmarshalMerge(b []byte, m Message) error { + mi := MessageV2(m) + out, err := protoV2.UnmarshalOptions{ + AllowPartial: true, + Merge: true, + }.UnmarshalState(protoiface.UnmarshalInput{ + Buf: b, + Message: mi.ProtoReflect(), + }) + if err != nil { + return err + } + if out.Flags&protoiface.UnmarshalInitialized > 0 { + return nil + } + return checkRequiredNotSet(mi) +} diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go new file mode 100644 index 000000000..398e34859 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/wrappers.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +// Bool stores v in a new bool value and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int stores v in a new int32 value and returns a pointer to it. +// +// Deprecated: Use Int32 instead. +func Int(v int) *int32 { return Int32(int32(v)) } + +// Int32 stores v in a new int32 value and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 stores v in a new int64 value and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Uint32 stores v in a new uint32 value and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 stores v in a new uint64 value and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// Float32 stores v in a new float32 value and returns a pointer to it. +func Float32(v float32) *float32 { return &v } + +// Float64 stores v in a new float64 value and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// String stores v in a new string value and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go new file mode 100644 index 000000000..85f9f5736 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -0,0 +1,179 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ptypes + +import ( + "fmt" + "strings" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + anypb "github.com/golang/protobuf/ptypes/any" +) + +const urlPrefix = "type.googleapis.com/" + +// AnyMessageName returns the message name contained in an anypb.Any message. +// Most type assertions should use the Is function instead. +// +// Deprecated: Call the any.MessageName method instead. +func AnyMessageName(any *anypb.Any) (string, error) { + name, err := anyMessageName(any) + return string(name), err +} +func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } + name := protoreflect.FullName(any.TypeUrl) + if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 { + name = name[i+len("/"):] + } + if !name.IsValid() { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return name, nil +} + +// MarshalAny marshals the given message m into an anypb.Any message. +// +// Deprecated: Call the anypb.New function instead. +func MarshalAny(m proto.Message) (*anypb.Any, error) { + switch dm := m.(type) { + case DynamicAny: + m = dm.Message + case *DynamicAny: + if dm == nil { + return nil, proto.ErrNil + } + m = dm.Message + } + b, err := proto.Marshal(m) + if err != nil { + return nil, err + } + return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil +} + +// Empty returns a new message of the type specified in an anypb.Any message. +// It returns protoregistry.NotFound if the corresponding message type could not +// be resolved in the global registry. +// +// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead +// to resolve the message name and create a new instance of it. +func Empty(any *anypb.Any) (proto.Message, error) { + name, err := anyMessageName(any) + if err != nil { + return nil, err + } + mt, err := protoregistry.GlobalTypes.FindMessageByName(name) + if err != nil { + return nil, err + } + return proto.MessageV1(mt.New().Interface()), nil +} + +// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message +// into the provided message m. It returns an error if the target message +// does not match the type in the Any message or if an unmarshal error occurs. +// +// The target message m may be a *DynamicAny message. If the underlying message +// type could not be resolved, then this returns protoregistry.NotFound. +// +// Deprecated: Call the any.UnmarshalTo method instead. +func UnmarshalAny(any *anypb.Any, m proto.Message) error { + if dm, ok := m.(*DynamicAny); ok { + if dm.Message == nil { + var err error + dm.Message, err = Empty(any) + if err != nil { + return err + } + } + m = dm.Message + } + + anyName, err := AnyMessageName(any) + if err != nil { + return err + } + msgName := proto.MessageName(m) + if anyName != msgName { + return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName) + } + return proto.Unmarshal(any.Value, m) +} + +// Is reports whether the Any message contains a message of the specified type. +// +// Deprecated: Call the any.MessageIs method instead. +func Is(any *anypb.Any, m proto.Message) bool { + if any == nil || m == nil { + return false + } + name := proto.MessageName(m) + if !strings.HasSuffix(any.TypeUrl, name) { + return false + } + return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/' +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in an anypb.Any message. +// The allocated message is stored in the embedded proto.Message. +// +// Example: +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +// +// Deprecated: Use the any.UnmarshalNew method instead to unmarshal +// the any message contents into a new instance of the underlying message. +type DynamicAny struct{ proto.Message } + +func (m DynamicAny) String() string { + if m.Message == nil { + return "" + } + return m.Message.String() +} +func (m DynamicAny) Reset() { + if m.Message == nil { + return + } + m.Message.Reset() +} +func (m DynamicAny) ProtoMessage() { + return +} +func (m DynamicAny) ProtoReflect() protoreflect.Message { + if m.Message == nil { + return nil + } + return dynamicAny{proto.MessageReflect(m.Message)} +} + +type dynamicAny struct{ protoreflect.Message } + +func (m dynamicAny) Type() protoreflect.MessageType { + return dynamicAnyType{m.Message.Type()} +} +func (m dynamicAny) New() protoreflect.Message { + return dynamicAnyType{m.Message.Type()}.New() +} +func (m dynamicAny) Interface() protoreflect.ProtoMessage { + return DynamicAny{proto.MessageV1(m.Message.Interface())} +} + +type dynamicAnyType struct{ protoreflect.MessageType } + +func (t dynamicAnyType) New() protoreflect.Message { + return dynamicAny{t.MessageType.New()} +} +func (t dynamicAnyType) Zero() protoreflect.Message { + return dynamicAny{t.MessageType.Zero()} +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go new file mode 100644 index 000000000..0ef27d33d --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -0,0 +1,62 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/any/any.proto + +package any + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/any.proto. + +type Any = anypb.Any + +var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() } +func file_github_com_golang_protobuf_ptypes_any_any_proto_init() { + if File_github_com_golang_protobuf_ptypes_any_any_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_any_any_proto = out.File + file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go new file mode 100644 index 000000000..d3c33259d --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -0,0 +1,10 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ptypes provides functionality for interacting with well-known types. +// +// Deprecated: Well-known types have specialized functionality directly +// injected into the generated packages for each message type. +// See the deprecation notice for each function for the suggested alternative. +package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go new file mode 100644 index 000000000..b2b55dd85 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -0,0 +1,76 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ptypes + +import ( + "errors" + "fmt" + "time" + + durationpb "github.com/golang/protobuf/ptypes/duration" +) + +// Range of google.protobuf.Duration as specified in duration.proto. +// This is about 10,000 years in seconds. +const ( + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// Duration converts a durationpb.Duration to a time.Duration. +// Duration returns an error if dur is invalid or overflows a time.Duration. +// +// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead. +func Duration(dur *durationpb.Duration) (time.Duration, error) { + if err := validateDuration(dur); err != nil { + return 0, err + } + d := time.Duration(dur.Seconds) * time.Second + if int64(d/time.Second) != dur.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) + } + if dur.Nanos != 0 { + d += time.Duration(dur.Nanos) * time.Nanosecond + if (d < 0) != (dur.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a durationpb.Duration. +// +// Deprecated: Call the durationpb.New function instead. +func DurationProto(d time.Duration) *durationpb.Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &durationpb.Duration{ + Seconds: int64(secs), + Nanos: int32(nanos), + } +} + +// validateDuration determines whether the durationpb.Duration is valid +// according to the definition in google/protobuf/duration.proto. +// A valid durpb.Duration may still be too large to fit into a time.Duration +// Note that the range of durationpb.Duration is about 10,000 years, +// while the range of time.Duration is about 290 years. +func validateDuration(dur *durationpb.Duration) error { + if dur == nil { + return errors.New("duration: nil Duration") + } + if dur.Seconds < minSeconds || dur.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", dur) + } + if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", dur) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go new file mode 100644 index 000000000..d0079ee3e --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -0,0 +1,63 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/duration/duration.proto + +package duration + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/duration.proto. + +type Duration = durationpb.Duration + +var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{ + 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() } +func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() { + if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File + file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go new file mode 100644 index 000000000..8368a3f70 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -0,0 +1,112 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ptypes + +import ( + "errors" + "fmt" + "time" + + timestamppb "github.com/golang/protobuf/ptypes/timestamp" +) + +// Range of google.protobuf.Duration as specified in timestamp.proto. +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// Timestamp converts a timestamppb.Timestamp to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return +// value is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +// +// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead. +func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampNow returns a google.protobuf.Timestamp for the current time. +// +// Deprecated: Call the timestamppb.Now function instead. +func TimestampNow() *timestamppb.Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +// +// Deprecated: Call the timestamppb.New function instead. +func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { + ts := ×tamppb.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. +// For invalid Timestamps, it returns an error message in parentheses. +// +// Deprecated: Call the ts.AsTime method instead, +// followed by a call to the Format method on the time.Time value. +func TimestampString(ts *timestamppb.Timestamp) string { + t, err := Timestamp(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01) +// and has a Nanos field in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes the problem. +// +// Every valid Timestamp can be represented by a time.Time, +// but the converse is not true. +func validateTimestamp(ts *timestamppb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go new file mode 100644 index 000000000..a76f80760 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -0,0 +1,64 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto + +package timestamp + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/timestamp.proto. + +type Timestamp = timestamppb.Timestamp + +var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ + 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, + 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } +func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { + if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil +} diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go index e102849b0..e54a76c7e 100644 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go @@ -1,17 +1,17 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // Package cmpopts provides common options for the cmp package. package cmpopts import ( + "errors" "math" "reflect" "time" "github.com/google/go-cmp/cmp" - "golang.org/x/xerrors" ) func equateAlways(_, _ interface{}) bool { return true } @@ -42,6 +42,7 @@ func isEmpty(x, y interface{}) bool { // The fraction and margin must be non-negative. // // The mathematical expression used is equivalent to: +// // |x-y| ≤ max(fraction*min(|x|, |y|), margin) // // EquateApprox can be used in conjunction with EquateNaNs. @@ -112,7 +113,7 @@ type timeApproximator struct { func (a timeApproximator) compare(x, y time.Time) bool { // Avoid subtracting times to avoid overflow when the - // difference is larger than the largest representible duration. + // difference is larger than the largest representable duration. if x.After(y) { // Ensure x is always before y x, y = y, x @@ -151,6 +152,5 @@ func areConcreteErrors(x, y interface{}) bool { func compareErrors(x, y interface{}) bool { xe := x.(error) ye := y.(error) - // TODO: Use errors.Is when go1.13 is the minimally supported version of Go. - return xerrors.Is(xe, ye) || xerrors.Is(ye, xe) + return errors.Is(xe, ye) || errors.Is(ye, xe) } diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go index ff8e785d4..80c60617e 100644 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmpopts @@ -14,14 +14,13 @@ import ( "github.com/google/go-cmp/cmp/internal/function" ) -// IgnoreFields returns an Option that ignores exported fields of the -// given names on a single struct type. +// IgnoreFields returns an Option that ignores fields of the +// given names on a single struct type. It respects the names of exported fields +// that are forwarded due to struct embedding. // The struct type is specified by passing in a value of that type. // // The name may be a dot-delimited string (e.g., "Foo.Bar") to ignore a // specific sub-field that is embedded or nested within the parent struct. -// -// This does not handle unexported fields; use IgnoreUnexported instead. func IgnoreFields(typ interface{}, names ...string) cmp.Option { sf := newStructFilter(typ, names...) return cmp.FilterPath(sf.filter, cmp.Ignore()) @@ -129,7 +128,7 @@ func newUnexportedFilter(typs ...interface{}) unexportedFilter { for _, typ := range typs { t := reflect.TypeOf(typ) if t == nil || t.Kind() != reflect.Struct { - panic(fmt.Sprintf("invalid struct type: %T", typ)) + panic(fmt.Sprintf("%T must be a non-pointer struct", typ)) } ux.m[t] = true } diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go index 3a4804621..0eb2a758c 100644 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmpopts @@ -18,9 +18,9 @@ import ( // sort any slice with element type V that is assignable to T. // // The less function must be: -// • Deterministic: less(x, y) == less(x, y) -// • Irreflexive: !less(x, x) -// • Transitive: if !less(x, y) and !less(y, z), then !less(x, z) +// - Deterministic: less(x, y) == less(x, y) +// - Irreflexive: !less(x, x) +// - Transitive: if !less(x, y) and !less(y, z), then !less(x, z) // // The less function does not have to be "total". That is, if !less(x, y) and // !less(y, x) for two elements x and y, their relative order is maintained. @@ -91,10 +91,10 @@ func (ss sliceSorter) less(v reflect.Value, i, j int) bool { // use Comparers on K or the K.Equal method if it exists. // // The less function must be: -// • Deterministic: less(x, y) == less(x, y) -// • Irreflexive: !less(x, x) -// • Transitive: if !less(x, y) and !less(y, z), then !less(x, z) -// • Total: if x != y, then either less(x, y) or less(y, x) +// - Deterministic: less(x, y) == less(x, y) +// - Irreflexive: !less(x, x) +// - Transitive: if !less(x, y) and !less(y, z), then !less(x, z) +// - Total: if x != y, then either less(x, y) or less(y, x) // // SortMaps can be used in conjunction with EquateEmpty. func SortMaps(lessFunc interface{}) cmp.Option { diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go index 97f707983..ca11a4024 100644 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmpopts @@ -42,7 +42,7 @@ func newStructFilter(typ interface{}, names ...string) structFilter { t := reflect.TypeOf(typ) if t == nil || t.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T must be a struct", typ)) + panic(fmt.Sprintf("%T must be a non-pointer struct", typ)) } var ft fieldTree for _, name := range names { @@ -67,12 +67,14 @@ func (sf structFilter) filter(p cmp.Path) bool { // fieldTree represents a set of dot-separated identifiers. // // For example, inserting the following selectors: +// // Foo // Foo.Bar.Baz // Foo.Buzz // Nuka.Cola.Quantum // // Results in a tree of the form: +// // {sub: { // "Foo": {ok: true, sub: { // "Bar": {sub: { @@ -160,14 +162,19 @@ func canonicalName(t reflect.Type, sel string) ([]string, error) { // Find the canonical name for this current field name. // If the field exists in an embedded struct, then it will be expanded. + sf, _ := t.FieldByName(name) if !isExported(name) { - // Disallow unexported fields: - // * To discourage people from actually touching unexported fields - // * FieldByName is buggy (https://golang.org/issue/4876) - return []string{name}, fmt.Errorf("name must be exported") + // Avoid using reflect.Type.FieldByName for unexported fields due to + // buggy behavior with regard to embeddeding and unexported fields. + // See https://golang.org/issue/4876 for details. + sf = reflect.StructField{} + for i := 0; i < t.NumField() && sf.Name == ""; i++ { + if t.Field(i).Name == name { + sf = t.Field(i) + } + } } - sf, ok := t.FieldByName(name) - if !ok { + if sf.Name == "" { return []string{name}, fmt.Errorf("does not exist") } var ss []string diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go index 9d651553d..8812443a2 100644 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go @@ -1,6 +1,6 @@ // Copyright 2018, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmpopts @@ -23,6 +23,7 @@ func (xf xformFilter) filter(p cmp.Path) bool { // that the transformer cannot be recursively applied upon its own output. // // An example use case is a transformer that splits a string by lines: +// // AcyclicTransformer("SplitLines", func(s string) []string{ // return strings.Split(s, "\n") // }) diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go index c9a63ceda..087320da7 100644 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -1,29 +1,33 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // Package cmp determines equality of values. // // This package is intended to be a more powerful and safer alternative to // reflect.DeepEqual for comparing whether two values are semantically equal. +// It is intended to only be used in tests, as performance is not a goal and +// it may panic if it cannot compare the values. Its propensity towards +// panicking means that its unsuitable for production environments where a +// spurious panic may be fatal. // // The primary features of cmp are: // -// • When the default behavior of equality does not suit the needs of the test, -// custom equality functions can override the equality operation. -// For example, an equality function may report floats as equal so long as they -// are within some tolerance of each other. +// - When the default behavior of equality does not suit the test's needs, +// custom equality functions can override the equality operation. +// For example, an equality function may report floats as equal so long as +// they are within some tolerance of each other. // -// • Types that have an Equal method may use that method to determine equality. -// This allows package authors to determine the equality operation for the types -// that they define. +// - Types with an Equal method may use that method to determine equality. +// This allows package authors to determine the equality operation +// for the types that they define. // -// • If no custom equality functions are used and no Equal method is defined, -// equality is determined by recursively comparing the primitive kinds on both -// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported -// fields are not compared by default; they result in panics unless suppressed -// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly -// compared using the Exporter option. +// - If no custom equality functions are used and no Equal method is defined, +// equality is determined by recursively comparing the primitive kinds on +// both values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, +// unexported fields are not compared by default; they result in panics +// unless suppressed by using an Ignore option (see cmpopts.IgnoreUnexported) +// or explicitly compared using the Exporter option. package cmp import ( @@ -32,33 +36,34 @@ import ( "strings" "github.com/google/go-cmp/cmp/internal/diff" - "github.com/google/go-cmp/cmp/internal/flags" "github.com/google/go-cmp/cmp/internal/function" "github.com/google/go-cmp/cmp/internal/value" ) +// TODO(≥go1.18): Use any instead of interface{}. + // Equal reports whether x and y are equal by recursively applying the // following rules in the given order to x and y and all of their sub-values: // -// • Let S be the set of all Ignore, Transformer, and Comparer options that -// remain after applying all path filters, value filters, and type filters. -// If at least one Ignore exists in S, then the comparison is ignored. -// If the number of Transformer and Comparer options in S is greater than one, -// then Equal panics because it is ambiguous which option to use. -// If S contains a single Transformer, then use that to transform the current -// values and recursively call Equal on the output values. -// If S contains a single Comparer, then use that to compare the current values. -// Otherwise, evaluation proceeds to the next rule. +// - Let S be the set of all Ignore, Transformer, and Comparer options that +// remain after applying all path filters, value filters, and type filters. +// If at least one Ignore exists in S, then the comparison is ignored. +// If the number of Transformer and Comparer options in S is non-zero, +// then Equal panics because it is ambiguous which option to use. +// If S contains a single Transformer, then use that to transform +// the current values and recursively call Equal on the output values. +// If S contains a single Comparer, then use that to compare the current values. +// Otherwise, evaluation proceeds to the next rule. // -// • If the values have an Equal method of the form "(T) Equal(T) bool" or -// "(T) Equal(I) bool" where T is assignable to I, then use the result of -// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and -// evaluation proceeds to the next rule. +// - If the values have an Equal method of the form "(T) Equal(T) bool" or +// "(T) Equal(I) bool" where T is assignable to I, then use the result of +// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and +// evaluation proceeds to the next rule. // -// • Lastly, try to compare x and y based on their basic kinds. -// Simple kinds like booleans, integers, floats, complex numbers, strings, and -// channels are compared using the equivalent of the == operator in Go. -// Functions are only equal if they are both nil, otherwise they are unequal. +// - Lastly, try to compare x and y based on their basic kinds. +// Simple kinds like booleans, integers, floats, complex numbers, strings, +// and channels are compared using the equivalent of the == operator in Go. +// Functions are only equal if they are both nil, otherwise they are unequal. // // Structs are equal if recursively calling Equal on all fields report equal. // If a struct contains unexported fields, Equal panics unless an Ignore option @@ -86,6 +91,52 @@ import ( // If there is a cycle, then the pointed at values are considered equal // only if both addresses were previously visited in the same path step. func Equal(x, y interface{}, opts ...Option) bool { + s := newState(opts) + s.compareAny(rootStep(x, y)) + return s.result.Equal() +} + +// Diff returns a human-readable report of the differences between two values: +// y - x. It returns an empty string if and only if Equal returns true for the +// same input values and options. +// +// The output is displayed as a literal in pseudo-Go syntax. +// At the start of each line, a "-" prefix indicates an element removed from x, +// a "+" prefix to indicates an element added from y, and the lack of a prefix +// indicates an element common to both x and y. If possible, the output +// uses fmt.Stringer.String or error.Error methods to produce more humanly +// readable outputs. In such cases, the string is prefixed with either an +// 's' or 'e' character, respectively, to indicate that the method was called. +// +// Do not depend on this output being stable. If you need the ability to +// programmatically interpret the difference, consider using a custom Reporter. +func Diff(x, y interface{}, opts ...Option) string { + s := newState(opts) + + // Optimization: If there are no other reporters, we can optimize for the + // common case where the result is equal (and thus no reported difference). + // This avoids the expensive construction of a difference tree. + if len(s.reporters) == 0 { + s.compareAny(rootStep(x, y)) + if s.result.Equal() { + return "" + } + s.result = diff.Result{} // Reset results + } + + r := new(defaultReporter) + s.reporters = append(s.reporters, reporter{r}) + s.compareAny(rootStep(x, y)) + d := r.String() + if (d == "") != s.result.Equal() { + panic("inconsistent difference and equality results") + } + return d +} + +// rootStep constructs the first path step. If x and y have differing types, +// then they are stored within an empty interface type. +func rootStep(x, y interface{}) PathStep { vx := reflect.ValueOf(x) vy := reflect.ValueOf(y) @@ -93,7 +144,7 @@ func Equal(x, y interface{}, opts ...Option) bool { // so that they have the same parent type. var t reflect.Type if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { - t = reflect.TypeOf((*interface{})(nil)).Elem() + t = anyType if vx.IsValid() { vvx := reflect.New(t).Elem() vvx.Set(vx) @@ -108,33 +159,7 @@ func Equal(x, y interface{}, opts ...Option) bool { t = vx.Type() } - s := newState(opts) - s.compareAny(&pathStep{t, vx, vy}) - return s.result.Equal() -} - -// Diff returns a human-readable report of the differences between two values. -// It returns an empty string if and only if Equal returns true for the same -// input values and options. -// -// The output is displayed as a literal in pseudo-Go syntax. -// At the start of each line, a "-" prefix indicates an element removed from x, -// a "+" prefix to indicates an element added to y, and the lack of a prefix -// indicates an element common to both x and y. If possible, the output -// uses fmt.Stringer.String or error.Error methods to produce more humanly -// readable outputs. In such cases, the string is prefixed with either an -// 's' or 'e' character, respectively, to indicate that the method was called. -// -// Do not depend on this output being stable. If you need the ability to -// programmatically interpret the difference, consider using a custom Reporter. -func Diff(x, y interface{}, opts ...Option) string { - r := new(defaultReporter) - eq := Equal(x, y, Options(opts), Reporter(r)) - d := r.String() - if (d == "") != eq { - panic("inconsistent difference and equality results") - } - return d + return &pathStep{t, vx, vy} } type state struct { @@ -295,7 +320,6 @@ func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { } func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { - v = sanitizeValue(v, f.Type().In(0)) if !s.dynChecker.Next() { return f.Call([]reflect.Value{v})[0] } @@ -319,8 +343,6 @@ func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { } func (s *state) callTTBFunc(f, x, y reflect.Value) bool { - x = sanitizeValue(x, f.Type().In(0)) - y = sanitizeValue(y, f.Type().In(1)) if !s.dynChecker.Next() { return f.Call([]reflect.Value{x, y})[0].Bool() } @@ -348,20 +370,8 @@ func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { ret = f.Call(vs)[0] } -// sanitizeValue converts nil interfaces of type T to those of type R, -// assuming that T is assignable to R. -// Otherwise, it returns the input value as is. -func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { - // TODO(dsnet): Workaround for reflect bug (https://golang.org/issue/22143). - if !flags.AtLeastGo110 { - if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { - return reflect.New(t).Elem() - } - } - return v -} - func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { + var addr bool var vax, vay reflect.Value // Addressable versions of vx and vy var mayForce, mayForceInit bool @@ -383,6 +393,7 @@ func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { // For retrieveUnexportedField to work, the parent struct must // be addressable. Create a new copy of the values if // necessary to make them addressable. + addr = vx.CanAddr() || vy.CanAddr() vax = makeAddressable(vx) vay = makeAddressable(vy) } @@ -393,6 +404,7 @@ func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { mayForceInit = true } step.mayForce = mayForce + step.paddr = addr step.pvx = vax step.pvy = vay step.field = t.Field(i) @@ -627,7 +639,9 @@ type dynChecker struct{ curr, next int } // Next increments the state and reports whether a check should be performed. // // Checks occur every Nth function call, where N is a triangular number: +// // 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... +// // See https://en.wikipedia.org/wiki/Triangular_number // // This sequence ensures that the cost of checks drops significantly as diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go index dd032354f..ae851fe53 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_panic.go +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -1,7 +1,8 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. +//go:build purego // +build purego package cmp @@ -10,6 +11,6 @@ import "reflect" const supportExporters = false -func retrieveUnexportedField(reflect.Value, reflect.StructField) reflect.Value { +func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value { panic("no support for forcibly accessing unexported fields") } diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go index 57020e26c..e2c0f74e8 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -1,7 +1,8 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. +//go:build !purego // +build !purego package cmp @@ -17,9 +18,19 @@ const supportExporters = true // a struct such that the value has read-write permissions. // // The parent struct, v, must be addressable, while f must be a StructField -// describing the field to retrieve. -func retrieveUnexportedField(v reflect.Value, f reflect.StructField) reflect.Value { - // See https://github.com/google/go-cmp/issues/167 for discussion of the - // following expression. - return reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem() +// describing the field to retrieve. If addr is false, +// then the returned value will be shallowed copied to be non-addressable. +func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value { + ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem() + if !addr { + // A field is addressable if and only if the struct is addressable. + // If the original parent value was not addressable, shallow copy the + // value to make it non-addressable to avoid leaking an implementation + // detail of how forcibly exporting a field works. + if ve.Kind() == reflect.Interface && ve.IsNil() { + return reflect.Zero(f.Type) + } + return reflect.ValueOf(ve.Interface()).Convert(f.Type) + } + return ve } diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go index fe98dcc67..36062a604 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -1,7 +1,8 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. +//go:build !cmp_debug // +build !cmp_debug package diff diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go index 597b6ae56..a3b97a1ad 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -1,7 +1,8 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. +//go:build cmp_debug // +build cmp_debug package diff diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go index 3d2e42662..a248e5436 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // Package diff implements an algorithm for producing edit-scripts. // The edit-script is a sequence of operations needed to transform one list @@ -12,6 +12,13 @@ // is more important than obtaining a minimal Levenshtein distance. package diff +import ( + "math/rand" + "time" + + "github.com/google/go-cmp/cmp/internal/flags" +) + // EditType represents a single operation within an edit-script. type EditType uint8 @@ -112,15 +119,17 @@ func (r Result) Similar() bool { return r.NumSame+1 >= r.NumDiff } +var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 + // Difference reports whether two lists of lengths nx and ny are equal // given the definition of equality provided as f. // // This function returns an edit-script, which is a sequence of operations // needed to convert one list into the other. The following invariants for // the edit-script are maintained: -// • eq == (es.Dist()==0) -// • nx == es.LenX() -// • ny == es.LenY() +// - eq == (es.Dist()==0) +// - nx == es.LenX() +// - ny == es.LenY() // // This algorithm is not guaranteed to be an optimal solution (i.e., one that // produces an edit-script with a minimal Levenshtein distance). This algorithm @@ -160,12 +169,13 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // A diagonal edge is equivalent to a matching symbol between both X and Y. // Invariants: - // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx - // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny + // - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx + // - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny // // In general: - // • fwdFrontier.X < revFrontier.X - // • fwdFrontier.Y < revFrontier.Y + // - fwdFrontier.X < revFrontier.X + // - fwdFrontier.Y < revFrontier.Y + // // Unless, it is time for the algorithm to terminate. fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} revPath := path{-1, point{nx, ny}, make(EditScript, 0)} @@ -177,37 +187,50 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // approximately the square-root of the search budget. searchBudget := 4 * (nx + ny) // O(n) + // Running the tests with the "cmp_debug" build tag prints a visualization + // of the algorithm running in real-time. This is educational for + // understanding how the algorithm works. See debug_enable.go. + f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) + // The algorithm below is a greedy, meet-in-the-middle algorithm for // computing sub-optimal edit-scripts between two lists. // // The algorithm is approximately as follows: - // • Searching for differences switches back-and-forth between - // a search that starts at the beginning (the top-left corner), and - // a search that starts at the end (the bottom-right corner). The goal of - // the search is connect with the search from the opposite corner. - // • As we search, we build a path in a greedy manner, where the first - // match seen is added to the path (this is sub-optimal, but provides a - // decent result in practice). When matches are found, we try the next pair - // of symbols in the lists and follow all matches as far as possible. - // • When searching for matches, we search along a diagonal going through - // through the "frontier" point. If no matches are found, we advance the - // frontier towards the opposite corner. - // • This algorithm terminates when either the X coordinates or the - // Y coordinates of the forward and reverse frontier points ever intersect. - // + // - Searching for differences switches back-and-forth between + // a search that starts at the beginning (the top-left corner), and + // a search that starts at the end (the bottom-right corner). + // The goal of the search is connect with the search + // from the opposite corner. + // - As we search, we build a path in a greedy manner, + // where the first match seen is added to the path (this is sub-optimal, + // but provides a decent result in practice). When matches are found, + // we try the next pair of symbols in the lists and follow all matches + // as far as possible. + // - When searching for matches, we search along a diagonal going through + // through the "frontier" point. If no matches are found, + // we advance the frontier towards the opposite corner. + // - This algorithm terminates when either the X coordinates or the + // Y coordinates of the forward and reverse frontier points ever intersect. + // This algorithm is correct even if searching only in the forward direction // or in the reverse direction. We do both because it is commonly observed // that two lists commonly differ because elements were added to the front // or end of the other list. // - // Running the tests with the "cmp_debug" build tag prints a visualization - // of the algorithm running in real-time. This is educational for - // understanding how the algorithm works. See debug_enable.go. - f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) - for { + // Non-deterministically start with either the forward or reverse direction + // to introduce some deliberate instability so that we have the flexibility + // to change this algorithm in the future. + if flags.Deterministic || randBool { + goto forwardSearch + } else { + goto reverseSearch + } + +forwardSearch: + { // Forward search from the beginning. if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { - break + goto finishSearch } for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { // Search in a diagonal pattern for a match. @@ -242,10 +265,14 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { } else { fwdFrontier.Y++ } + goto reverseSearch + } +reverseSearch: + { // Reverse search from the end. if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { - break + goto finishSearch } for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { // Search in a diagonal pattern for a match. @@ -280,8 +307,10 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { } else { revFrontier.Y-- } + goto forwardSearch } +finishSearch: // Join the forward and reverse paths and then append the reverse path. fwdPath.connect(revPath.point, f) for i := len(revPath.es) - 1; i >= 0; i-- { @@ -363,6 +392,7 @@ type point struct{ X, Y int } func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } // zigzag maps a consecutive sequence of integers to a zig-zag sequence. +// // [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] func zigzag(x int) int { if x&1 != 0 { diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go index a9e7fc0b5..d8e459c9b 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go @@ -1,6 +1,6 @@ // Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package flags diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go deleted file mode 100644 index 01aed0a15..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// +build !go1.10 - -package flags - -// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. -const AtLeastGo110 = false diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go deleted file mode 100644 index c0b667f58..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -// +build go1.10 - -package flags - -// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. -const AtLeastGo110 = true diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go index ace1dbe86..d127d4362 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // Package function provides functionality for identifying function types. package function diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go new file mode 100644 index 000000000..7b498bb2c --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go @@ -0,0 +1,164 @@ +// Copyright 2020, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package value + +import ( + "reflect" + "strconv" +) + +var anyType = reflect.TypeOf((*interface{})(nil)).Elem() + +// TypeString is nearly identical to reflect.Type.String, +// but has an additional option to specify that full type names be used. +func TypeString(t reflect.Type, qualified bool) string { + return string(appendTypeName(nil, t, qualified, false)) +} + +func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte { + // BUG: Go reflection provides no way to disambiguate two named types + // of the same name and within the same package, + // but declared within the namespace of different functions. + + // Use the "any" alias instead of "interface{}" for better readability. + if t == anyType { + return append(b, "any"...) + } + + // Named type. + if t.Name() != "" { + if qualified && t.PkgPath() != "" { + b = append(b, '"') + b = append(b, t.PkgPath()...) + b = append(b, '"') + b = append(b, '.') + b = append(b, t.Name()...) + } else { + b = append(b, t.String()...) + } + return b + } + + // Unnamed type. + switch k := t.Kind(); k { + case reflect.Bool, reflect.String, reflect.UnsafePointer, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + b = append(b, k.String()...) + case reflect.Chan: + if t.ChanDir() == reflect.RecvDir { + b = append(b, "<-"...) + } + b = append(b, "chan"...) + if t.ChanDir() == reflect.SendDir { + b = append(b, "<-"...) + } + b = append(b, ' ') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Func: + if !elideFunc { + b = append(b, "func"...) + } + b = append(b, '(') + for i := 0; i < t.NumIn(); i++ { + if i > 0 { + b = append(b, ", "...) + } + if i == t.NumIn()-1 && t.IsVariadic() { + b = append(b, "..."...) + b = appendTypeName(b, t.In(i).Elem(), qualified, false) + } else { + b = appendTypeName(b, t.In(i), qualified, false) + } + } + b = append(b, ')') + switch t.NumOut() { + case 0: + // Do nothing + case 1: + b = append(b, ' ') + b = appendTypeName(b, t.Out(0), qualified, false) + default: + b = append(b, " ("...) + for i := 0; i < t.NumOut(); i++ { + if i > 0 { + b = append(b, ", "...) + } + b = appendTypeName(b, t.Out(i), qualified, false) + } + b = append(b, ')') + } + case reflect.Struct: + b = append(b, "struct{ "...) + for i := 0; i < t.NumField(); i++ { + if i > 0 { + b = append(b, "; "...) + } + sf := t.Field(i) + if !sf.Anonymous { + if qualified && sf.PkgPath != "" { + b = append(b, '"') + b = append(b, sf.PkgPath...) + b = append(b, '"') + b = append(b, '.') + } + b = append(b, sf.Name...) + b = append(b, ' ') + } + b = appendTypeName(b, sf.Type, qualified, false) + if sf.Tag != "" { + b = append(b, ' ') + b = strconv.AppendQuote(b, string(sf.Tag)) + } + } + if b[len(b)-1] == ' ' { + b = b[:len(b)-1] + } else { + b = append(b, ' ') + } + b = append(b, '}') + case reflect.Slice, reflect.Array: + b = append(b, '[') + if k == reflect.Array { + b = strconv.AppendUint(b, uint64(t.Len()), 10) + } + b = append(b, ']') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Map: + b = append(b, "map["...) + b = appendTypeName(b, t.Key(), qualified, false) + b = append(b, ']') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Ptr: + b = append(b, '*') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Interface: + b = append(b, "interface{ "...) + for i := 0; i < t.NumMethod(); i++ { + if i > 0 { + b = append(b, "; "...) + } + m := t.Method(i) + if qualified && m.PkgPath != "" { + b = append(b, '"') + b = append(b, m.PkgPath...) + b = append(b, '"') + b = append(b, '.') + } + b = append(b, m.Name...) + b = appendTypeName(b, m.Type, qualified, true) + } + if b[len(b)-1] == ' ' { + b = b[:len(b)-1] + } else { + b = append(b, ' ') + } + b = append(b, '}') + default: + panic("invalid kind: " + k.String()) + } + return b +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go index 0a01c4796..1a71bfcbd 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -1,7 +1,8 @@ // Copyright 2018, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. +//go:build purego // +build purego package value @@ -21,3 +22,13 @@ func PointerOf(v reflect.Value) Pointer { // assumes that the GC implementation does not use a moving collector. return Pointer{v.Pointer(), v.Type()} } + +// IsNil reports whether the pointer is nil. +func (p Pointer) IsNil() bool { + return p.p == 0 +} + +// Uintptr returns the pointer as a uintptr. +func (p Pointer) Uintptr() uintptr { + return p.p +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go index da134ae2a..16e6860af 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -1,7 +1,8 @@ // Copyright 2018, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. +//go:build !purego // +build !purego package value @@ -24,3 +25,13 @@ func PointerOf(v reflect.Value) Pointer { // which is necessary if the GC ever uses a moving collector. return Pointer{unsafe.Pointer(v.Pointer()), v.Type()} } + +// IsNil reports whether the pointer is nil. +func (p Pointer) IsNil() bool { + return p.p == nil +} + +// Uintptr returns the pointer as a uintptr. +func (p Pointer) Uintptr() uintptr { + return uintptr(p.p) +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go index 24fbae6e3..98533b036 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package value diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go deleted file mode 100644 index 06a8ffd03..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. - -package value - -import ( - "math" - "reflect" -) - -// IsZero reports whether v is the zero value. -// This does not rely on Interface and so can be used on unexported fields. -func IsZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return v.Bool() == false - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return math.Float64bits(v.Float()) == 0 - case reflect.Complex64, reflect.Complex128: - return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0 - case reflect.String: - return v.String() == "" - case reflect.UnsafePointer: - return v.Pointer() == 0 - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - return v.IsNil() - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if !IsZero(v.Index(i)) { - return false - } - } - return true - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if !IsZero(v.Field(i)) { - return false - } - } - return true - } - return false -} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go index abbd2a63b..1f9ca9c48 100644 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp @@ -33,6 +33,7 @@ type Option interface { } // applicableOption represents the following types: +// // Fundamental: ignore | validator | *comparer | *transformer // Grouping: Options type applicableOption interface { @@ -43,6 +44,7 @@ type applicableOption interface { } // coreOption represents the following types: +// // Fundamental: ignore | validator | *comparer | *transformer // Filters: *pathFilter | *valuesFilter type coreOption interface { @@ -225,11 +227,14 @@ func (validator) apply(s *state, vx, vy reflect.Value) { // Unable to Interface implies unexported field without visibility access. if !vx.CanInterface() || !vy.CanInterface() { - const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported" + help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported" var name string if t := s.curPath.Index(-2).Type(); t.Name() != "" { // Named type with unexported fields. name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType + if _, ok := reflect.New(t).Interface().(error); ok { + help = "consider using cmpopts.EquateErrors to compare error values" + } } else { // Unnamed type with unexported fields. Derive PkgPath from field. var pkgPath string @@ -333,9 +338,9 @@ func (tr transformer) String() string { // both implement T. // // The equality function must be: -// • Symmetric: equal(x, y) == equal(y, x) -// • Deterministic: equal(x, y) == equal(x, y) -// • Pure: equal(x, y) does not modify x or y +// - Symmetric: equal(x, y) == equal(y, x) +// - Deterministic: equal(x, y) == equal(x, y) +// - Pure: equal(x, y) does not modify x or y func Comparer(f interface{}) Option { v := reflect.ValueOf(f) if !function.IsType(v.Type(), function.Equal) || v.IsNil() { @@ -427,7 +432,7 @@ func AllowUnexported(types ...interface{}) Option { } // Result represents the comparison result for a single node and -// is provided by cmp when calling Result (see Reporter). +// is provided by cmp when calling Report (see Reporter). type Result struct { _ [0]func() // Make Result incomparable flags resultFlags diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index 509d6b852..a0a588502 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp @@ -41,13 +41,13 @@ type PathStep interface { // The type of each valid value is guaranteed to be identical to Type. // // In some cases, one or both may be invalid or have restrictions: - // • For StructField, both are not interface-able if the current field - // is unexported and the struct type is not explicitly permitted by - // an Exporter to traverse unexported fields. - // • For SliceIndex, one may be invalid if an element is missing from - // either the x or y slice. - // • For MapIndex, one may be invalid if an entry is missing from - // either the x or y map. + // - For StructField, both are not interface-able if the current field + // is unexported and the struct type is not explicitly permitted by + // an Exporter to traverse unexported fields. + // - For SliceIndex, one may be invalid if an element is missing from + // either the x or y slice. + // - For MapIndex, one may be invalid if an entry is missing from + // either the x or y map. // // The provided values must not be mutated. Values() (vx, vy reflect.Value) @@ -94,6 +94,7 @@ func (pa Path) Index(i int) PathStep { // The simplified path only contains struct field accesses. // // For example: +// // MyMap.MySlices.MyField func (pa Path) String() string { var ss []string @@ -108,6 +109,7 @@ func (pa Path) String() string { // GoString returns the path to a specific node using Go syntax. // // For example: +// // (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField func (pa Path) GoString() string { var ssPre, ssPost []string @@ -159,7 +161,7 @@ func (ps pathStep) String() string { if ps.typ == nil { return "" } - s := ps.typ.String() + s := value.TypeString(ps.typ, false) if s == "" || strings.ContainsAny(s, "{}\n") { return "root" // Type too simple or complex to print } @@ -177,7 +179,8 @@ type structField struct { // pvx, pvy, and field are only valid if unexported is true. unexported bool mayForce bool // Forcibly allow visibility - pvx, pvy reflect.Value // Parent values + paddr bool // Was parent addressable? + pvx, pvy reflect.Value // Parent values (always addressable) field reflect.StructField // Field information } @@ -189,8 +192,8 @@ func (sf StructField) Values() (vx, vy reflect.Value) { // Forcibly obtain read-write access to an unexported struct field. if sf.mayForce { - vx = retrieveUnexportedField(sf.pvx, sf.field) - vy = retrieveUnexportedField(sf.pvy, sf.field) + vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr) + vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr) return vx, vy // CanInterface reports true } return sf.vx, sf.vy // CanInterface reports false @@ -281,7 +284,7 @@ type typeAssertion struct { func (ta TypeAssertion) Type() reflect.Type { return ta.typ } func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } -func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } +func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) } // Transform is a transformation from the parent type to the current type. type Transform struct{ *transform } @@ -314,7 +317,7 @@ func (tf Transform) Option() Option { return tf.trans } // pops the address from the stack. Thus, when traversing into a pointer from // reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles // by checking whether the pointer has already been visited. The cycle detection -// uses a seperate stack for the x and y values. +// uses a separate stack for the x and y values. // // If a cycle is detected we need to determine whether the two pointers // should be considered equal. The definition of equality chosen by Equal diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go index 6ddf29993..f43cd12eb 100644 --- a/vendor/github.com/google/go-cmp/cmp/report.go +++ b/vendor/github.com/google/go-cmp/cmp/report.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp @@ -41,7 +41,10 @@ func (r *defaultReporter) String() string { if r.root.NumDiff == 0 { return "" } - return formatOptions{}.FormatDiff(r.root).String() + ptrs := new(pointerReferences) + text := formatOptions{}.FormatDiff(r.root, ptrs) + resolveReferences(text) + return text.String() } func assert(ok bool) { diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go index 17a05eede..2050bf6b4 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_compare.go +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -1,24 +1,14 @@ // Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp import ( "fmt" "reflect" - - "github.com/google/go-cmp/cmp/internal/value" ) -// TODO: Enforce limits? -// * Enforce maximum number of records to print per node? -// * Enforce maximum size in bytes allowed? -// * As a heuristic, use less verbosity for equal nodes than unequal nodes. -// TODO: Enforce unique outputs? -// * Avoid Stringer methods if it results in same output? -// * Print pointer address if outputs still equal? - // numContextRecords is the number of surrounding equal records to print. const numContextRecords = 2 @@ -71,24 +61,69 @@ func (opts formatOptions) WithTypeMode(t typeMode) formatOptions { opts.TypeMode = t return opts } +func (opts formatOptions) WithVerbosity(level int) formatOptions { + opts.VerbosityLevel = level + opts.LimitVerbosity = true + return opts +} +func (opts formatOptions) verbosity() uint { + switch { + case opts.VerbosityLevel < 0: + return 0 + case opts.VerbosityLevel > 16: + return 16 // some reasonable maximum to avoid shift overflow + default: + return uint(opts.VerbosityLevel) + } +} + +const maxVerbosityPreset = 6 + +// verbosityPreset modifies the verbosity settings given an index +// between 0 and maxVerbosityPreset, inclusive. +func verbosityPreset(opts formatOptions, i int) formatOptions { + opts.VerbosityLevel = int(opts.verbosity()) + 2*i + if i > 0 { + opts.AvoidStringer = true + } + if i >= maxVerbosityPreset { + opts.PrintAddresses = true + opts.QualifiedNames = true + } + return opts +} // FormatDiff converts a valueNode tree into a textNode tree, where the later // is a textual representation of the differences detected in the former. -func (opts formatOptions) FormatDiff(v *valueNode) textNode { +func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) { + if opts.DiffMode == diffIdentical { + opts = opts.WithVerbosity(1) + } else if opts.verbosity() < 3 { + opts = opts.WithVerbosity(3) + } + // Check whether we have specialized formatting for this node. // This is not necessary, but helpful for producing more readable outputs. if opts.CanFormatDiffSlice(v) { return opts.FormatDiffSlice(v) } + var parentKind reflect.Kind + if v.parent != nil && v.parent.TransformerName == "" { + parentKind = v.parent.Type.Kind() + } + // For leaf nodes, format the value based on the reflect.Values alone. - if v.MaxDepth == 0 { + // As a special case, treat equal []byte as a leaf nodes. + isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType + isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0 + if v.MaxDepth == 0 || isEqualBytes { switch opts.DiffMode { case diffUnknown, diffIdentical: // Format Equal. if v.NumDiff == 0 { - outx := opts.FormatValue(v.ValueX, visitedPointers{}) - outy := opts.FormatValue(v.ValueY, visitedPointers{}) + outx := opts.FormatValue(v.ValueX, parentKind, ptrs) + outy := opts.FormatValue(v.ValueY, parentKind, ptrs) if v.NumIgnored > 0 && v.NumSame == 0 { return textEllipsis } else if outx.Len() < outy.Len() { @@ -101,8 +136,13 @@ func (opts formatOptions) FormatDiff(v *valueNode) textNode { // Format unequal. assert(opts.DiffMode == diffUnknown) var list textList - outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, visitedPointers{}) - outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, visitedPointers{}) + outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs) + outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs) + for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { + opts2 := verbosityPreset(opts, i).WithTypeMode(elideType) + outx = opts2.FormatValue(v.ValueX, parentKind, ptrs) + outy = opts2.FormatValue(v.ValueY, parentKind, ptrs) + } if outx != nil { list = append(list, textRecord{Diff: '-', Value: outx}) } @@ -111,34 +151,57 @@ func (opts formatOptions) FormatDiff(v *valueNode) textNode { } return opts.WithTypeMode(emitType).FormatType(v.Type, list) case diffRemoved: - return opts.FormatValue(v.ValueX, visitedPointers{}) + return opts.FormatValue(v.ValueX, parentKind, ptrs) case diffInserted: - return opts.FormatValue(v.ValueY, visitedPointers{}) + return opts.FormatValue(v.ValueY, parentKind, ptrs) default: panic("invalid diff mode") } } + // Register slice element to support cycle detection. + if parentKind == reflect.Slice { + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true) + defer ptrs.Pop() + defer func() { out = wrapTrunkReferences(ptrRefs, out) }() + } + // Descend into the child value node. if v.TransformerName != "" { - out := opts.WithTypeMode(emitType).FormatDiff(v.Value) - out = textWrap{"Inverse(" + v.TransformerName + ", ", out, ")"} + out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) + out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"} return opts.FormatType(v.Type, out) } else { switch k := v.Type.Kind(); k { - case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map: - return opts.FormatType(v.Type, opts.formatDiffList(v.Records, k)) + case reflect.Struct, reflect.Array, reflect.Slice: + out = opts.formatDiffList(v.Records, k, ptrs) + out = opts.FormatType(v.Type, out) + case reflect.Map: + // Register map to support cycle detection. + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) + defer ptrs.Pop() + + out = opts.formatDiffList(v.Records, k, ptrs) + out = wrapTrunkReferences(ptrRefs, out) + out = opts.FormatType(v.Type, out) case reflect.Ptr: - return textWrap{"&", opts.FormatDiff(v.Value), ""} + // Register pointer to support cycle detection. + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) + defer ptrs.Pop() + + out = opts.FormatDiff(v.Value, ptrs) + out = wrapTrunkReferences(ptrRefs, out) + out = &textWrap{Prefix: "&", Value: out} case reflect.Interface: - return opts.WithTypeMode(emitType).FormatDiff(v.Value) + out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) default: panic(fmt.Sprintf("%v cannot have children", k)) } + return out } } -func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) textNode { +func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode { // Derive record name based on the data structure kind. var name string var formatKey func(reflect.Value) string @@ -154,7 +217,17 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te case reflect.Map: name = "entry" opts = opts.WithTypeMode(elideType) - formatKey = formatMapKey + formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) } + } + + maxLen := -1 + if opts.LimitVerbosity { + if opts.DiffMode == diffIdentical { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + } else { + maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc... + } + opts.VerbosityLevel-- } // Handle unification. @@ -163,16 +236,21 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te var list textList var deferredEllipsis bool // Add final "..." to indicate records were dropped for _, r := range recs { + if len(list) == maxLen { + deferredEllipsis = true + break + } + // Elide struct fields that are zero value. if k == reflect.Struct { var isZero bool switch opts.DiffMode { case diffIdentical: - isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY) + isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero() case diffRemoved: - isZero = value.IsZero(r.Value.ValueX) + isZero = r.Value.ValueX.IsZero() case diffInserted: - isZero = value.IsZero(r.Value.ValueY) + isZero = r.Value.ValueY.IsZero() } if isZero { continue @@ -186,23 +264,31 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te } continue } - if out := opts.FormatDiff(r.Value); out != nil { + if out := opts.FormatDiff(r.Value, ptrs); out != nil { list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) } } if deferredEllipsis { list.AppendEllipsis(diffStats{}) } - return textWrap{"{", list, "}"} + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} case diffUnknown: default: panic("invalid diff mode") } // Handle differencing. + var numDiffs int var list textList + var keys []reflect.Value // invariant: len(list) == len(keys) groups := coalesceAdjacentRecords(name, recs) + maxGroup := diffStats{Name: name} for i, ds := range groups { + if maxLen >= 0 && numDiffs >= maxLen { + maxGroup = maxGroup.Append(ds) + continue + } + // Handle equal records. if ds.NumDiff() == 0 { // Compute the number of leading and trailing records to print. @@ -226,16 +312,21 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te // Format the equal values. for _, r := range recs[:numLo] { - out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) } if numEqual > numLo+numHi { ds.NumIdentical -= numLo + numHi list.AppendEllipsis(ds) + for len(keys) < len(list) { + keys = append(keys, reflect.Value{}) + } } for _, r := range recs[numEqual-numHi : numEqual] { - out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) } recs = recs[numEqual:] continue @@ -247,24 +338,70 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te case opts.CanFormatDiffSlice(r.Value): out := opts.FormatDiffSlice(r.Value) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) case r.Value.NumChildren == r.Value.MaxDepth: - outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value) - outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value) + outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) + outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) + for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { + opts2 := verbosityPreset(opts, i) + outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) + outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) + } if outx != nil { list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx}) + keys = append(keys, r.Key) } if outy != nil { list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy}) + keys = append(keys, r.Key) } default: - out := opts.FormatDiff(r.Value) + out := opts.FormatDiff(r.Value, ptrs) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) } } recs = recs[ds.NumDiff():] + numDiffs += ds.NumDiff() + } + if maxGroup.IsZero() { + assert(len(recs) == 0) + } else { + list.AppendEllipsis(maxGroup) + for len(keys) < len(list) { + keys = append(keys, reflect.Value{}) + } } - assert(len(recs) == 0) - return textWrap{"{", list, "}"} + assert(len(list) == len(keys)) + + // For maps, the default formatting logic uses fmt.Stringer which may + // produce ambiguous output. Avoid calling String to disambiguate. + if k == reflect.Map { + var ambiguous bool + seenKeys := map[string]reflect.Value{} + for i, currKey := range keys { + if currKey.IsValid() { + strKey := list[i].Key + prevKey, seen := seenKeys[strKey] + if seen && prevKey.CanInterface() && currKey.CanInterface() { + ambiguous = prevKey.Interface() != currKey.Interface() + if ambiguous { + break + } + } + seenKeys[strKey] = currKey + } + } + if ambiguous { + for i, k := range keys { + if k.IsValid() { + list[i].Key = formatMapKey(k, true, ptrs) + } + } + } + } + + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} } // coalesceAdjacentRecords coalesces the list of records into groups of diff --git a/vendor/github.com/google/go-cmp/cmp/report_references.go b/vendor/github.com/google/go-cmp/cmp/report_references.go new file mode 100644 index 000000000..be31b33a9 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_references.go @@ -0,0 +1,264 @@ +// Copyright 2020, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/value" +) + +const ( + pointerDelimPrefix = "⟪" + pointerDelimSuffix = "⟫" +) + +// formatPointer prints the address of the pointer. +func formatPointer(p value.Pointer, withDelims bool) string { + v := p.Uintptr() + if flags.Deterministic { + v = 0xdeadf00f // Only used for stable testing purposes + } + if withDelims { + return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix + } + return formatHex(uint64(v)) +} + +// pointerReferences is a stack of pointers visited so far. +type pointerReferences [][2]value.Pointer + +func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) { + if deref && vx.IsValid() { + vx = vx.Addr() + } + if deref && vy.IsValid() { + vy = vy.Addr() + } + switch d { + case diffUnknown, diffIdentical: + pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)} + case diffRemoved: + pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}} + case diffInserted: + pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)} + } + *ps = append(*ps, pp) + return pp +} + +func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) { + p = value.PointerOf(v) + for _, pp := range *ps { + if p == pp[0] || p == pp[1] { + return p, true + } + } + *ps = append(*ps, [2]value.Pointer{p, p}) + return p, false +} + +func (ps *pointerReferences) Pop() { + *ps = (*ps)[:len(*ps)-1] +} + +// trunkReferences is metadata for a textNode indicating that the sub-tree +// represents the value for either pointer in a pair of references. +type trunkReferences struct{ pp [2]value.Pointer } + +// trunkReference is metadata for a textNode indicating that the sub-tree +// represents the value for the given pointer reference. +type trunkReference struct{ p value.Pointer } + +// leafReference is metadata for a textNode indicating that the value is +// truncated as it refers to another part of the tree (i.e., a trunk). +type leafReference struct{ p value.Pointer } + +func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode { + switch { + case pp[0].IsNil(): + return &textWrap{Value: s, Metadata: trunkReference{pp[1]}} + case pp[1].IsNil(): + return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} + case pp[0] == pp[1]: + return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} + default: + return &textWrap{Value: s, Metadata: trunkReferences{pp}} + } +} +func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode { + var prefix string + if printAddress { + prefix = formatPointer(p, true) + } + return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}} +} +func makeLeafReference(p value.Pointer, printAddress bool) textNode { + out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"} + var prefix string + if printAddress { + prefix = formatPointer(p, true) + } + return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}} +} + +// resolveReferences walks the textNode tree searching for any leaf reference +// metadata and resolves each against the corresponding trunk references. +// Since pointer addresses in memory are not particularly readable to the user, +// it replaces each pointer value with an arbitrary and unique reference ID. +func resolveReferences(s textNode) { + var walkNodes func(textNode, func(textNode)) + walkNodes = func(s textNode, f func(textNode)) { + f(s) + switch s := s.(type) { + case *textWrap: + walkNodes(s.Value, f) + case textList: + for _, r := range s { + walkNodes(r.Value, f) + } + } + } + + // Collect all trunks and leaves with reference metadata. + var trunks, leaves []*textWrap + walkNodes(s, func(s textNode) { + if s, ok := s.(*textWrap); ok { + switch s.Metadata.(type) { + case leafReference: + leaves = append(leaves, s) + case trunkReference, trunkReferences: + trunks = append(trunks, s) + } + } + }) + + // No leaf references to resolve. + if len(leaves) == 0 { + return + } + + // Collect the set of all leaf references to resolve. + leafPtrs := make(map[value.Pointer]bool) + for _, leaf := range leaves { + leafPtrs[leaf.Metadata.(leafReference).p] = true + } + + // Collect the set of trunk pointers that are always paired together. + // This allows us to assign a single ID to both pointers for brevity. + // If a pointer in a pair ever occurs by itself or as a different pair, + // then the pair is broken. + pairedTrunkPtrs := make(map[value.Pointer]value.Pointer) + unpair := func(p value.Pointer) { + if !pairedTrunkPtrs[p].IsNil() { + pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half + } + pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half + } + for _, trunk := range trunks { + switch p := trunk.Metadata.(type) { + case trunkReference: + unpair(p.p) // standalone pointer cannot be part of a pair + case trunkReferences: + p0, ok0 := pairedTrunkPtrs[p.pp[0]] + p1, ok1 := pairedTrunkPtrs[p.pp[1]] + switch { + case !ok0 && !ok1: + // Register the newly seen pair. + pairedTrunkPtrs[p.pp[0]] = p.pp[1] + pairedTrunkPtrs[p.pp[1]] = p.pp[0] + case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]: + // Exact pair already seen; do nothing. + default: + // Pair conflicts with some other pair; break all pairs. + unpair(p.pp[0]) + unpair(p.pp[1]) + } + } + } + + // Correlate each pointer referenced by leaves to a unique identifier, + // and print the IDs for each trunk that matches those pointers. + var nextID uint + ptrIDs := make(map[value.Pointer]uint) + newID := func() uint { + id := nextID + nextID++ + return id + } + for _, trunk := range trunks { + switch p := trunk.Metadata.(type) { + case trunkReference: + if print := leafPtrs[p.p]; print { + id, ok := ptrIDs[p.p] + if !ok { + id = newID() + ptrIDs[p.p] = id + } + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) + } + case trunkReferences: + print0 := leafPtrs[p.pp[0]] + print1 := leafPtrs[p.pp[1]] + if print0 || print1 { + id0, ok0 := ptrIDs[p.pp[0]] + id1, ok1 := ptrIDs[p.pp[1]] + isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0] + if isPair { + var id uint + assert(ok0 == ok1) // must be seen together or not at all + if ok0 { + assert(id0 == id1) // must have the same ID + id = id0 + } else { + id = newID() + ptrIDs[p.pp[0]] = id + ptrIDs[p.pp[1]] = id + } + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) + } else { + if print0 && !ok0 { + id0 = newID() + ptrIDs[p.pp[0]] = id0 + } + if print1 && !ok1 { + id1 = newID() + ptrIDs[p.pp[1]] = id1 + } + switch { + case print0 && print1: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1)) + case print0: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)) + case print1: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1)) + } + } + } + } + } + + // Update all leaf references with the unique identifier. + for _, leaf := range leaves { + if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok { + leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id)) + } + } +} + +func formatReference(id uint) string { + return fmt.Sprintf("ref#%d", id) +} + +func updateReferencePrefix(prefix, ref string) string { + if prefix == "" { + return pointerDelimPrefix + ref + pointerDelimSuffix + } + suffix := strings.TrimPrefix(prefix, pointerDelimPrefix) + return pointerDelimPrefix + ref + ": " + suffix +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go index 2761b6289..2ab41fad3 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -1,33 +1,49 @@ // Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp import ( + "bytes" "fmt" "reflect" "strconv" "strings" "unicode" + "unicode/utf8" - "github.com/google/go-cmp/cmp/internal/flags" "github.com/google/go-cmp/cmp/internal/value" ) +var ( + anyType = reflect.TypeOf((*interface{})(nil)).Elem() + stringType = reflect.TypeOf((*string)(nil)).Elem() + bytesType = reflect.TypeOf((*[]byte)(nil)).Elem() + byteType = reflect.TypeOf((*byte)(nil)).Elem() +) + type formatValueOptions struct { // AvoidStringer controls whether to avoid calling custom stringer // methods like error.Error or fmt.Stringer.String. AvoidStringer bool - // ShallowPointers controls whether to avoid descending into pointers. - // Useful when printing map keys, where pointer comparison is performed - // on the pointer address rather than the pointed-at value. - ShallowPointers bool - // PrintAddresses controls whether to print the address of all pointers, // slice elements, and maps. PrintAddresses bool + + // QualifiedNames controls whether FormatType uses the fully qualified name + // (including the full package path as opposed to just the package name). + QualifiedNames bool + + // VerbosityLevel controls the amount of output to produce. + // A higher value produces more output. A value of zero or lower produces + // no output (represented using an ellipsis). + // If LimitVerbosity is false, then the level is treated as infinite. + VerbosityLevel int + + // LimitVerbosity specifies that formatting should respect VerbosityLevel. + LimitVerbosity bool } // FormatType prints the type as if it were wrapping s. @@ -44,12 +60,15 @@ func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { default: return s } + if opts.DiffMode == diffIdentical { + return s // elide type for identical nodes + } case elideType: return s } // Determine the type label, applying special handling for unnamed types. - typeName := t.String() + typeName := value.TypeString(t, opts.QualifiedNames) if t.Name() == "" { // According to Go grammar, certain type literals contain symbols that // do not strongly bind to the next lexicographical token (e.g., *T). @@ -57,39 +76,77 @@ func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { case reflect.Chan, reflect.Func, reflect.Ptr: typeName = "(" + typeName + ")" } - typeName = strings.Replace(typeName, "struct {", "struct{", -1) - typeName = strings.Replace(typeName, "interface {", "interface{", -1) } + return &textWrap{Prefix: typeName, Value: wrapParens(s)} +} + +// wrapParens wraps s with a set of parenthesis, but avoids it if the +// wrapped node itself is already surrounded by a pair of parenthesis or braces. +// It handles unwrapping one level of pointer-reference nodes. +func wrapParens(s textNode) textNode { + var refNode *textWrap + if s2, ok := s.(*textWrap); ok { + // Unwrap a single pointer reference node. + switch s2.Metadata.(type) { + case leafReference, trunkReference, trunkReferences: + refNode = s2 + if s3, ok := refNode.Value.(*textWrap); ok { + s2 = s3 + } + } - // Avoid wrap the value in parenthesis if unnecessary. - if s, ok := s.(textWrap); ok { - hasParens := strings.HasPrefix(s.Prefix, "(") && strings.HasSuffix(s.Suffix, ")") - hasBraces := strings.HasPrefix(s.Prefix, "{") && strings.HasSuffix(s.Suffix, "}") + // Already has delimiters that make parenthesis unnecessary. + hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")") + hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}") if hasParens || hasBraces { - return textWrap{typeName, s, ""} + return s } } - return textWrap{typeName + "(", s, ")"} + if refNode != nil { + refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"} + return s + } + return &textWrap{Prefix: "(", Value: s, Suffix: ")"} } // FormatValue prints the reflect.Value, taking extra care to avoid descending -// into pointers already in m. As pointers are visited, m is also updated. -func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out textNode) { +// into pointers already in ptrs. As pointers are visited, ptrs is also updated. +func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) { if !v.IsValid() { return nil } t := v.Type() + // Check slice element for cycles. + if parentKind == reflect.Slice { + ptrRef, visited := ptrs.Push(v.Addr()) + if visited { + return makeLeafReference(ptrRef, false) + } + defer ptrs.Pop() + defer func() { out = wrapTrunkReference(ptrRef, false, out) }() + } + // Check whether there is an Error or String method to call. if !opts.AvoidStringer && v.CanInterface() { // Avoid calling Error or String methods on nil receivers since many // implementations crash when doing so. if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() { - switch v := v.Interface().(type) { - case error: - return textLine("e" + formatString(v.Error())) - case fmt.Stringer: - return textLine("s" + formatString(v.String())) + var prefix, strVal string + func() { + // Swallow and ignore any panics from String or Error. + defer func() { recover() }() + switch v := v.Interface().(type) { + case error: + strVal = v.Error() + prefix = "e" + case fmt.Stringer: + strVal = v.String() + prefix = "s" + } + }() + if prefix != "" { + return opts.formatString(prefix, strVal) } } } @@ -102,114 +159,213 @@ func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out t } }() - var ptr string switch t.Kind() { case reflect.Bool: return textLine(fmt.Sprint(v.Bool())) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return textLine(fmt.Sprint(v.Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - // Unnamed uints are usually bytes or words, so use hexadecimal. - if t.PkgPath() == "" || t.Kind() == reflect.Uintptr { + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return textLine(fmt.Sprint(v.Uint())) + case reflect.Uint8: + if parentKind == reflect.Slice || parentKind == reflect.Array { return textLine(formatHex(v.Uint())) } return textLine(fmt.Sprint(v.Uint())) + case reflect.Uintptr: + return textLine(formatHex(v.Uint())) case reflect.Float32, reflect.Float64: return textLine(fmt.Sprint(v.Float())) case reflect.Complex64, reflect.Complex128: return textLine(fmt.Sprint(v.Complex())) case reflect.String: - return textLine(formatString(v.String())) + return opts.formatString("", v.String()) case reflect.UnsafePointer, reflect.Chan, reflect.Func: - return textLine(formatPointer(v)) + return textLine(formatPointer(value.PointerOf(v), true)) case reflect.Struct: var list textList + v := makeAddressable(v) // needed for retrieveUnexportedField + maxLen := v.NumField() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } for i := 0; i < v.NumField(); i++ { vv := v.Field(i) - if value.IsZero(vv) { + if vv.IsZero() { continue // Elide fields with zero values } - s := opts.WithTypeMode(autoType).FormatValue(vv, m) - list = append(list, textRecord{Key: t.Field(i).Name, Value: s}) + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + sf := t.Field(i) + if supportExporters && !isExported(sf.Name) { + vv = retrieveUnexportedField(v, sf, true) + } + s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs) + list = append(list, textRecord{Key: sf.Name, Value: s}) } - return textWrap{"{", list, "}"} + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} case reflect.Slice: if v.IsNil() { return textNil } - if opts.PrintAddresses { - ptr = formatPointer(v) + + // Check whether this is a []byte of text data. + if t.Elem() == byteType { + b := v.Bytes() + isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) } + if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { + out = opts.formatString("", string(b)) + skipType = true + return opts.FormatType(t, out) + } } + fallthrough case reflect.Array: + maxLen := v.Len() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } var list textList for i := 0; i < v.Len(); i++ { - vi := v.Index(i) - if vi.CanAddr() { // Check for cyclic elements - p := vi.Addr() - if m.Visit(p) { - var out textNode - out = textLine(formatPointer(p)) - out = opts.WithTypeMode(emitType).FormatType(p.Type(), out) - out = textWrap{"*", out, ""} - list = append(list, textRecord{Value: out}) - continue - } + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break } - s := opts.WithTypeMode(elideType).FormatValue(vi, m) + s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs) list = append(list, textRecord{Value: s}) } - return textWrap{ptr + "{", list, "}"} + + out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + if t.Kind() == reflect.Slice && opts.PrintAddresses { + header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap()) + out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out} + } + return out case reflect.Map: if v.IsNil() { return textNil } - if m.Visit(v) { - return textLine(formatPointer(v)) + + // Check pointer for cycles. + ptrRef, visited := ptrs.Push(v) + if visited { + return makeLeafReference(ptrRef, opts.PrintAddresses) } + defer ptrs.Pop() + maxLen := v.Len() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } var list textList for _, k := range value.SortKeys(v.MapKeys()) { - sk := formatMapKey(k) - sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), m) + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + sk := formatMapKey(k, false, ptrs) + sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs) list = append(list, textRecord{Key: sk, Value: sv}) } - if opts.PrintAddresses { - ptr = formatPointer(v) - } - return textWrap{ptr + "{", list, "}"} + + out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) + return out case reflect.Ptr: if v.IsNil() { return textNil } - if m.Visit(v) || opts.ShallowPointers { - return textLine(formatPointer(v)) + + // Check pointer for cycles. + ptrRef, visited := ptrs.Push(v) + if visited { + out = makeLeafReference(ptrRef, opts.PrintAddresses) + return &textWrap{Prefix: "&", Value: out} } - if opts.PrintAddresses { - ptr = formatPointer(v) + defer ptrs.Pop() + + // Skip the name only if this is an unnamed pointer type. + // Otherwise taking the address of a value does not reproduce + // the named pointer type. + if v.Type().Name() == "" { + skipType = true // Let the underlying value print the type instead } - skipType = true // Let the underlying value print the type instead - return textWrap{"&" + ptr, opts.FormatValue(v.Elem(), m), ""} + out = opts.FormatValue(v.Elem(), t.Kind(), ptrs) + out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) + out = &textWrap{Prefix: "&", Value: out} + return out case reflect.Interface: if v.IsNil() { return textNil } // Interfaces accept different concrete types, // so configure the underlying value to explicitly print the type. - skipType = true // Print the concrete type instead - return opts.WithTypeMode(emitType).FormatValue(v.Elem(), m) + return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs) default: panic(fmt.Sprintf("%v kind not handled", v.Kind())) } } +func (opts formatOptions) formatString(prefix, s string) textNode { + maxLen := len(s) + maxLines := strings.Count(s, "\n") + 1 + if opts.LimitVerbosity { + maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc... + maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... + } + + // For multiline strings, use the triple-quote syntax, + // but only use it when printing removed or inserted nodes since + // we only want the extra verbosity for those cases. + lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n") + isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+') + for i := 0; i < len(lines) && isTripleQuoted; i++ { + lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support + isPrintable := func(r rune) bool { + return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable + } + line := lines[i] + isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen + } + if isTripleQuoted { + var list textList + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) + for i, line := range lines { + if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 { + comment := commentString(fmt.Sprintf("%d elided lines", numElided)) + list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment}) + break + } + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true}) + } + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) + return &textWrap{Prefix: "(", Value: list, Suffix: ")"} + } + + // Format the string as a single-line quoted string. + if len(s) > maxLen+len(textEllipsis) { + return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis)) + } + return textLine(prefix + formatString(s)) +} + // formatMapKey formats v as if it were a map key. // The result is guaranteed to be a single line. -func formatMapKey(v reflect.Value) string { +func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string { var opts formatOptions + opts.DiffMode = diffIdentical opts.TypeMode = elideType - opts.ShallowPointers = true - s := opts.FormatValue(v, visitedPointers{}).String() + opts.PrintAddresses = disambiguate + opts.AvoidStringer = disambiguate + opts.QualifiedNames = disambiguate + opts.VerbosityLevel = maxVerbosityPreset + opts.LimitVerbosity = true + s := opts.FormatValue(v, reflect.Map, ptrs).String() return strings.TrimSpace(s) } @@ -227,7 +383,7 @@ func formatString(s string) string { rawInvalid := func(r rune) bool { return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t') } - if strings.IndexFunc(s, rawInvalid) < 0 { + if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 { return "`" + s + "`" } return qs @@ -256,23 +412,3 @@ func formatHex(u uint64) string { } return fmt.Sprintf(f, u) } - -// formatPointer prints the address of the pointer. -func formatPointer(v reflect.Value) string { - p := v.Pointer() - if flags.Deterministic { - p = 0xdeadf00f // Only used for stable testing purposes - } - return fmt.Sprintf("⟪0x%x⟫", p) -} - -type visitedPointers map[value.Pointer]struct{} - -// Visit inserts pointer v into the visited map and reports whether it had -// already been visited before. -func (m visitedPointers) Visit(v reflect.Value) bool { - p := value.PointerOf(v) - _, visited := m[p] - m[p] = struct{}{} - return visited -} diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index eafcf2e4c..23e444f62 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -1,13 +1,15 @@ // Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp import ( "bytes" "fmt" + "math" "reflect" + "strconv" "strings" "unicode" "unicode/utf8" @@ -23,14 +25,35 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { return false // Must be formatting in diff mode case v.NumDiff == 0: return false // No differences detected - case v.NumIgnored+v.NumCompared+v.NumTransformed > 0: - // TODO: Handle the case where someone uses bytes.Equal on a large slice. - return false // Some custom option was used to determined equality case !v.ValueX.IsValid() || !v.ValueY.IsValid(): return false // Both values must be valid + case v.NumIgnored > 0: + return false // Some ignore option was used + case v.NumTransformed > 0: + return false // Some transform option was used + case v.NumCompared > 1: + return false // More than one comparison was used + case v.NumCompared == 1 && v.Type.Name() != "": + // The need for cmp to check applicability of options on every element + // in a slice is a significant performance detriment for large []byte. + // The workaround is to specify Comparer(bytes.Equal), + // which enables cmp to compare []byte more efficiently. + // If they differ, we still want to provide batched diffing. + // The logic disallows named types since they tend to have their own + // String method, with nicer formatting than what this provides. + return false + } + + // Check whether this is an interface with the same concrete types. + t := v.Type + vx, vy := v.ValueX, v.ValueY + if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() { + vx, vy = vx.Elem(), vy.Elem() + t = vx.Type() } - switch t := v.Type; t.Kind() { + // Check whether we provide specialized diffing for this type. + switch t.Kind() { case reflect.String: case reflect.Array, reflect.Slice: // Only slices of primitive types have specialized handling. @@ -42,6 +65,11 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { return false } + // Both slice values have to be non-empty. + if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) { + return false + } + // If a sufficient number of elements already differ, // use specialized formatting even if length requirement is not met. if v.NumDiff > v.NumSame { @@ -52,8 +80,8 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { } // Use specialized string diffing for longer slices or strings. - const minLength = 64 - return v.ValueX.Len() >= minLength && v.ValueY.Len() >= minLength + const minLength = 32 + return vx.Len() >= minLength && vy.Len() >= minLength } // FormatDiffSlice prints a diff for the slices (or strings) represented by v. @@ -62,17 +90,23 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { assert(opts.DiffMode == diffUnknown) t, vx, vy := v.Type, v.ValueX, v.ValueY + if t.Kind() == reflect.Interface { + vx, vy = vx.Elem(), vy.Elem() + t = vx.Type() + opts = opts.WithTypeMode(emitType) + } // Auto-detect the type of the data. - var isLinedText, isText, isBinary bool var sx, sy string + var ssx, ssy []string + var isString, isMostlyText, isPureLinedText, isBinary bool switch { case t.Kind() == reflect.String: sx, sy = vx.String(), vy.String() - isText = true // Initial estimate, verify later - case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): + isString = true + case t.Kind() == reflect.Slice && t.Elem() == byteType: sx, sy = string(vx.Bytes()), string(vy.Bytes()) - isBinary = true // Initial estimate, verify later + isString = true case t.Kind() == reflect.Array: // Arrays need to be addressable for slice operations to work. vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem() @@ -80,13 +114,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { vy2.Set(vy) vx, vy = vx2, vy2 } - if isText || isBinary { - var numLines, lastLineIdx, maxLineLen int - isBinary = false + if isString { + var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int for i, r := range sx + sy { - if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError { - isBinary = true - break + numTotalRunes++ + if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError { + numValidRunes++ } if r == '\n' { if maxLineLen < i-lastLineIdx { @@ -96,8 +129,29 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { numLines++ } } - isText = !isBinary - isLinedText = isText && numLines >= 4 && maxLineLen <= 256 + isPureText := numValidRunes == numTotalRunes + isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes)) + isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024 + isBinary = !isMostlyText + + // Avoid diffing by lines if it produces a significantly more complex + // edit script than diffing by bytes. + if isPureLinedText { + ssx = strings.Split(sx, "\n") + ssy = strings.Split(sy, "\n") + esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result { + return diff.BoolResult(ssx[ix] == ssy[iy]) + }) + esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result { + return diff.BoolResult(sx[ix] == sy[iy]) + }) + efficiencyLines := float64(esLines.Dist()) / float64(len(esLines)) + efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes)) + quotedLength := len(strconv.Quote(sx + sy)) + unquotedLength := len(sx) + len(sy) + escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength) + isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1 + } } // Format the string into printable records. @@ -106,9 +160,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { switch { // If the text appears to be multi-lined text, // then perform differencing across individual lines. - case isLinedText: - ssx := strings.Split(sx, "\n") - ssy := strings.Split(sy, "\n") + case isPureLinedText: list = opts.formatDiffSlice( reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line", func(v reflect.Value, d diffMode) textRecord { @@ -117,10 +169,88 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { }, ) delim = "\n" + + // If possible, use a custom triple-quote (""") syntax for printing + // differences in a string literal. This format is more readable, + // but has edge-cases where differences are visually indistinguishable. + // This format is avoided under the following conditions: + // - A line starts with `"""` + // - A line starts with "..." + // - A line contains non-printable characters + // - Adjacent different lines differ only by whitespace + // + // For example: + // + // """ + // ... // 3 identical lines + // foo + // bar + // - baz + // + BAZ + // """ + isTripleQuoted := true + prevRemoveLines := map[string]bool{} + prevInsertLines := map[string]bool{} + var list2 textList + list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) + for _, r := range list { + if !r.Value.Equal(textEllipsis) { + line, _ := strconv.Unquote(string(r.Value.(textLine))) + line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support + normLine := strings.Map(func(r rune) rune { + if unicode.IsSpace(r) { + return -1 // drop whitespace to avoid visually indistinguishable output + } + return r + }, line) + isPrintable := func(r rune) bool { + return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable + } + isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" + switch r.Diff { + case diffRemoved: + isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine] + prevRemoveLines[normLine] = true + case diffInserted: + isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine] + prevInsertLines[normLine] = true + } + if !isTripleQuoted { + break + } + r.Value = textLine(line) + r.ElideComma = true + } + if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group + prevRemoveLines = map[string]bool{} + prevInsertLines = map[string]bool{} + } + list2 = append(list2, r) + } + if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 { + list2 = list2[:len(list2)-1] // elide single empty line at the end + } + list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) + if isTripleQuoted { + var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"} + switch t.Kind() { + case reflect.String: + if t != stringType { + out = opts.FormatType(t, out) + } + case reflect.Slice: + // Always emit type for slices since the triple-quote syntax + // looks like a string (not a slice). + opts = opts.WithTypeMode(emitType) + out = opts.FormatType(t, out) + } + return out + } + // If the text appears to be single-lined text, // then perform differencing in approximately fixed-sized chunks. // The output is printed as quoted strings. - case isText: + case isMostlyText: list = opts.formatDiffSlice( reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte", func(v reflect.Value, d diffMode) textRecord { @@ -128,7 +258,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { return textRecord{Diff: d, Value: textLine(s)} }, ) - delim = "" + // If the text appears to be binary data, // then perform differencing in approximately fixed-sized chunks. // The output is inspired by hexdump. @@ -145,6 +275,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { return textRecord{Diff: d, Value: textLine(s), Comment: comment} }, ) + // For all other slices of primitive types, // then perform differencing in approximately fixed-sized chunks. // The size of each chunk depends on the width of the element kind. @@ -172,7 +303,9 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { switch t.Elem().Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: ss = append(ss, fmt.Sprint(v.Index(i).Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + ss = append(ss, fmt.Sprint(v.Index(i).Uint())) + case reflect.Uint8, reflect.Uintptr: ss = append(ss, formatHex(v.Index(i).Uint())) case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: ss = append(ss, fmt.Sprint(v.Index(i).Interface())) @@ -185,8 +318,8 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } // Wrap the output with appropriate type information. - var out textNode = textWrap{"{", list, "}"} - if !isText { + var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + if !isMostlyText { // The "{...}" byte-sequence literal is not valid Go syntax for strings. // Emit the type for extra clarity (e.g. "string{...}"). if t.Kind() == reflect.String { @@ -196,13 +329,13 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } switch t.Kind() { case reflect.String: - out = textWrap{"strings.Join(", out, fmt.Sprintf(", %q)", delim)} - if t != reflect.TypeOf(string("")) { + out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} + if t != stringType { out = opts.FormatType(t, out) } case reflect.Slice: - out = textWrap{"bytes.Join(", out, fmt.Sprintf(", %q)", delim)} - if t != reflect.TypeOf([]byte(nil)) { + out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} + if t != bytesType { out = opts.FormatType(t, out) } } @@ -225,8 +358,11 @@ func (opts formatOptions) formatDiffSlice( vx, vy reflect.Value, chunkSize int, name string, makeRec func(reflect.Value, diffMode) textRecord, ) (list textList) { - es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result { - return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface()) + eq := func(ix, iy int) bool { + return vx.Index(ix).Interface() == vy.Index(iy).Interface() + } + es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result { + return diff.BoolResult(eq(ix, iy)) }) appendChunks := func(v reflect.Value, d diffMode) int { @@ -242,9 +378,23 @@ func (opts formatOptions) formatDiffSlice( return n0 - v.Len() } + var numDiffs int + maxLen := -1 + if opts.LimitVerbosity { + maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... + opts.VerbosityLevel-- + } + groups := coalesceAdjacentEdits(name, es) groups = coalesceInterveningIdentical(groups, chunkSize/4) + groups = cleanupSurroundingIdentical(groups, eq) + maxGroup := diffStats{Name: name} for i, ds := range groups { + if maxLen >= 0 && numDiffs >= maxLen { + maxGroup = maxGroup.Append(ds) + continue + } + // Print equal. if ds.NumDiff() == 0 { // Compute the number of leading and trailing equal bytes to print. @@ -273,36 +423,52 @@ func (opts formatOptions) formatDiffSlice( } // Print unequal. + len0 := len(list) nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved) vx = vx.Slice(nx, vx.Len()) ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted) vy = vy.Slice(ny, vy.Len()) + numDiffs += len(list) - len0 + } + if maxGroup.IsZero() { + assert(vx.Len() == 0 && vy.Len() == 0) + } else { + list.AppendEllipsis(maxGroup) } - assert(vx.Len() == 0 && vy.Len() == 0) return list } // coalesceAdjacentEdits coalesces the list of edits into groups of adjacent // equal or unequal counts. +// +// Example: +// +// Input: "..XXY...Y" +// Output: [ +// {NumIdentical: 2}, +// {NumRemoved: 2, NumInserted 1}, +// {NumIdentical: 3}, +// {NumInserted: 1}, +// ] func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { - var prevCase int // Arbitrary index into which case last occurred - lastStats := func(i int) *diffStats { - if prevCase != i { + var prevMode byte + lastStats := func(mode byte) *diffStats { + if prevMode != mode { groups = append(groups, diffStats{Name: name}) - prevCase = i + prevMode = mode } return &groups[len(groups)-1] } for _, e := range es { switch e { case diff.Identity: - lastStats(1).NumIdentical++ + lastStats('=').NumIdentical++ case diff.UniqueX: - lastStats(2).NumRemoved++ + lastStats('!').NumRemoved++ case diff.UniqueY: - lastStats(2).NumInserted++ + lastStats('!').NumInserted++ case diff.Modified: - lastStats(2).NumModified++ + lastStats('!').NumModified++ } } return groups @@ -312,6 +478,34 @@ func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) // equal groups into adjacent unequal groups that currently result in a // dual inserted/removed printout. This acts as a high-pass filter to smooth // out high-frequency changes within the windowSize. +// +// Example: +// +// WindowSize: 16, +// Input: [ +// {NumIdentical: 61}, // group 0 +// {NumRemoved: 3, NumInserted: 1}, // group 1 +// {NumIdentical: 6}, // ├── coalesce +// {NumInserted: 2}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 9}, // └── coalesce +// {NumIdentical: 64}, // group 2 +// {NumRemoved: 3, NumInserted: 1}, // group 3 +// {NumIdentical: 6}, // ├── coalesce +// {NumInserted: 2}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 7}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 2}, // └── coalesce +// {NumIdentical: 63}, // group 4 +// ] +// Output: [ +// {NumIdentical: 61}, +// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, +// {NumIdentical: 64}, +// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3}, +// {NumIdentical: 63}, +// ] func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { groups, groupsOrig := groups[:0], groups for i, ds := range groupsOrig { @@ -331,3 +525,90 @@ func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStat } return groups } + +// cleanupSurroundingIdentical scans through all unequal groups, and +// moves any leading sequence of equal elements to the preceding equal group and +// moves and trailing sequence of equal elements to the succeeding equal group. +// +// This is necessary since coalesceInterveningIdentical may coalesce edit groups +// together such that leading/trailing spans of equal elements becomes possible. +// Note that this can occur even with an optimal diffing algorithm. +// +// Example: +// +// Input: [ +// {NumIdentical: 61}, +// {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements +// {NumIdentical: 67}, +// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements +// {NumIdentical: 54}, +// ] +// Output: [ +// {NumIdentical: 64}, // incremented by 3 +// {NumRemoved: 9}, +// {NumIdentical: 67}, +// {NumRemoved: 9}, +// {NumIdentical: 64}, // incremented by 10 +// ] +func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats { + var ix, iy int // indexes into sequence x and y + for i, ds := range groups { + // Handle equal group. + if ds.NumDiff() == 0 { + ix += ds.NumIdentical + iy += ds.NumIdentical + continue + } + + // Handle unequal group. + nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified + ny := ds.NumIdentical + ds.NumInserted + ds.NumModified + var numLeadingIdentical, numTrailingIdentical int + for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ { + numLeadingIdentical++ + } + for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ { + numTrailingIdentical++ + } + if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 { + if numLeadingIdentical > 0 { + // Remove leading identical span from this group and + // insert it into the preceding group. + if i-1 >= 0 { + groups[i-1].NumIdentical += numLeadingIdentical + } else { + // No preceding group exists, so prepend a new group, + // but do so after we finish iterating over all groups. + defer func() { + groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...) + }() + } + // Increment indexes since the preceding group would have handled this. + ix += numLeadingIdentical + iy += numLeadingIdentical + } + if numTrailingIdentical > 0 { + // Remove trailing identical span from this group and + // insert it into the succeeding group. + if i+1 < len(groups) { + groups[i+1].NumIdentical += numTrailingIdentical + } else { + // No succeeding group exists, so append a new group, + // but do so after we finish iterating over all groups. + defer func() { + groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical}) + }() + } + // Do not increment indexes since the succeeding group will handle this. + } + + // Update this group since some identical elements were removed. + nx -= numIdentical + ny -= numIdentical + groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny} + } + ix += nx + iy += ny + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go index 8b8fcab7b..388fcf571 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_text.go +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -1,6 +1,6 @@ // Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp @@ -10,12 +10,15 @@ import ( "math/rand" "strings" "time" + "unicode/utf8" "github.com/google/go-cmp/cmp/internal/flags" ) var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 +const maxColumnLength = 80 + type indentMode int func (n indentMode) appendIndent(b []byte, d diffMode) []byte { @@ -91,21 +94,22 @@ type textNode interface { // textWrap is a wrapper that concatenates a prefix and/or a suffix // to the underlying node. type textWrap struct { - Prefix string // e.g., "bytes.Buffer{" - Value textNode // textWrap | textList | textLine - Suffix string // e.g., "}" + Prefix string // e.g., "bytes.Buffer{" + Value textNode // textWrap | textList | textLine + Suffix string // e.g., "}" + Metadata interface{} // arbitrary metadata; has no effect on formatting } -func (s textWrap) Len() int { +func (s *textWrap) Len() int { return len(s.Prefix) + s.Value.Len() + len(s.Suffix) } -func (s1 textWrap) Equal(s2 textNode) bool { - if s2, ok := s2.(textWrap); ok { +func (s1 *textWrap) Equal(s2 textNode) bool { + if s2, ok := s2.(*textWrap); ok { return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix } return false } -func (s textWrap) String() string { +func (s *textWrap) String() string { var d diffMode var n indentMode _, s2 := s.formatCompactTo(nil, d) @@ -114,7 +118,7 @@ func (s textWrap) String() string { b = append(b, '\n') // Trailing newline return string(b) } -func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { +func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { n0 := len(b) // Original buffer length b = append(b, s.Prefix...) b, s.Value = s.Value.formatCompactTo(b, d) @@ -124,7 +128,7 @@ func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { } return b, s } -func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { +func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { b = append(b, s.Prefix...) b = s.Value.formatExpandedTo(b, d, n) b = append(b, s.Suffix...) @@ -136,22 +140,23 @@ func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { // of the textList.formatCompactTo method. type textList []textRecord type textRecord struct { - Diff diffMode // e.g., 0 or '-' or '+' - Key string // e.g., "MyField" - Value textNode // textWrap | textLine - Comment fmt.Stringer // e.g., "6 identical fields" + Diff diffMode // e.g., 0 or '-' or '+' + Key string // e.g., "MyField" + Value textNode // textWrap | textLine + ElideComma bool // avoid trailing comma + Comment fmt.Stringer // e.g., "6 identical fields" } // AppendEllipsis appends a new ellipsis node to the list if none already // exists at the end. If cs is non-zero it coalesces the statistics with the // previous diffStats. func (s *textList) AppendEllipsis(ds diffStats) { - hasStats := ds != diffStats{} + hasStats := !ds.IsZero() if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) { if hasStats { - *s = append(*s, textRecord{Value: textEllipsis, Comment: ds}) + *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds}) } else { - *s = append(*s, textRecord{Value: textEllipsis}) + *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true}) } return } @@ -191,7 +196,7 @@ func (s1 textList) Equal(s2 textNode) bool { } func (s textList) String() string { - return textWrap{"{", s, "}"}.String() + return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String() } func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { @@ -221,7 +226,7 @@ func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { } // Force multi-lined output when printing a removed/inserted node that // is sufficiently long. - if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > 80 { + if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength { multiLine = true } if !multiLine { @@ -236,16 +241,50 @@ func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { _, isLine := r.Value.(textLine) return r.Key == "" || !isLine }, - func(r textRecord) int { return len(r.Key) }, + func(r textRecord) int { return utf8.RuneCountInString(r.Key) }, ) alignValueLens := s.alignLens( func(r textRecord) bool { _, isLine := r.Value.(textLine) return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil }, - func(r textRecord) int { return len(r.Value.(textLine)) }, + func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) }, ) + // Format lists of simple lists in a batched form. + // If the list is sequence of only textLine values, + // then batch multiple values on a single line. + var isSimple bool + for _, r := range s { + _, isLine := r.Value.(textLine) + isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil + if !isSimple { + break + } + } + if isSimple { + n++ + var batch []byte + emitBatch := func() { + if len(batch) > 0 { + b = n.appendIndent(append(b, '\n'), d) + b = append(b, bytes.TrimRight(batch, " ")...) + batch = batch[:0] + } + } + for _, r := range s { + line := r.Value.(textLine) + if len(batch)+len(line)+len(", ") > maxColumnLength { + emitBatch() + } + batch = append(batch, line...) + batch = append(batch, ", "...) + } + emitBatch() + n-- + return n.appendIndent(append(b, '\n'), d) + } + // Format the list as a multi-lined output. n++ for i, r := range s { @@ -256,7 +295,7 @@ func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { b = alignKeyLens[i].appendChar(b, ' ') b = r.Value.formatExpandedTo(b, d|r.Diff, n) - if !r.Value.Equal(textEllipsis) { + if !r.ElideComma { b = append(b, ',') } b = alignValueLens[i].appendChar(b, ' ') @@ -332,6 +371,11 @@ type diffStats struct { NumModified int } +func (s diffStats) IsZero() bool { + s.Name = "" + return s == diffStats{} +} + func (s diffStats) NumDiff() int { return s.NumRemoved + s.NumInserted + s.NumModified } @@ -349,6 +393,7 @@ func (s diffStats) Append(ds diffStats) diffStats { // String prints a humanly-readable summary of coalesced records. // // Example: +// // diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" func (s diffStats) String() string { var ss []string diff --git a/vendor/github.com/google/go-cmp/cmp/report_value.go b/vendor/github.com/google/go-cmp/cmp/report_value.go index 83031a7f5..668d470fd 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_value.go +++ b/vendor/github.com/google/go-cmp/cmp/report_value.go @@ -1,6 +1,6 @@ // Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml new file mode 100644 index 000000000..d8156a60b --- /dev/null +++ b/vendor/github.com/google/uuid/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.4.3 + - 1.5.3 + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md new file mode 100644 index 000000000..04fdf09f1 --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 000000000..b4bb97f6b --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 000000000..5dc68268d --- /dev/null +++ b/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md new file mode 100644 index 000000000..f765a46f9 --- /dev/null +++ b/vendor/github.com/google/uuid/README.md @@ -0,0 +1,19 @@ +# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on +[RFC 4122](http://tools.ietf.org/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +`go get github.com/google/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://pkg.go.dev/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go new file mode 100644 index 000000000..fa820b9d3 --- /dev/null +++ b/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go new file mode 100644 index 000000000..5b8a4b9af --- /dev/null +++ b/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go new file mode 100644 index 000000000..b404f4bec --- /dev/null +++ b/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) //nolint:errcheck + h.Write(data) //nolint:errcheck + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 000000000..14bd34072 --- /dev/null +++ b/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + return err + } + *uuid = id + return nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go new file mode 100644 index 000000000..d651a2b06 --- /dev/null +++ b/vendor/github.com/google/uuid/node.go @@ -0,0 +1,90 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + ifname = "random" + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go new file mode 100644 index 000000000..24b78edc9 --- /dev/null +++ b/vendor/github.com/google/uuid/node_js.go @@ -0,0 +1,12 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This remvoves the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go new file mode 100644 index 000000000..0cbbcddbd --- /dev/null +++ b/vendor/github.com/google/uuid/node_net.go @@ -0,0 +1,33 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/vendor/github.com/google/uuid/null.go b/vendor/github.com/google/uuid/null.go new file mode 100644 index 000000000..d7fcbf286 --- /dev/null +++ b/vendor/github.com/google/uuid/null.go @@ -0,0 +1,118 @@ +// Copyright 2021 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "fmt" +) + +var jsonNull = []byte("null") + +// NullUUID represents a UUID that may be null. +// NullUUID implements the SQL driver.Scanner interface so +// it can be used as a scan destination: +// +// var u uuid.NullUUID +// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u) +// ... +// if u.Valid { +// // use u.UUID +// } else { +// // NULL value +// } +// +type NullUUID struct { + UUID UUID + Valid bool // Valid is true if UUID is not NULL +} + +// Scan implements the SQL driver.Scanner interface. +func (nu *NullUUID) Scan(value interface{}) error { + if value == nil { + nu.UUID, nu.Valid = Nil, false + return nil + } + + err := nu.UUID.Scan(value) + if err != nil { + nu.Valid = false + return err + } + + nu.Valid = true + return nil +} + +// Value implements the driver Valuer interface. +func (nu NullUUID) Value() (driver.Value, error) { + if !nu.Valid { + return nil, nil + } + // Delegate to UUID Value function + return nu.UUID.Value() +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (nu NullUUID) MarshalBinary() ([]byte, error) { + if nu.Valid { + return nu.UUID[:], nil + } + + return []byte(nil), nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (nu *NullUUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(nu.UUID[:], data) + nu.Valid = true + return nil +} + +// MarshalText implements encoding.TextMarshaler. +func (nu NullUUID) MarshalText() ([]byte, error) { + if nu.Valid { + return nu.UUID.MarshalText() + } + + return jsonNull, nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (nu *NullUUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + nu.Valid = false + return err + } + nu.UUID = id + nu.Valid = true + return nil +} + +// MarshalJSON implements json.Marshaler. +func (nu NullUUID) MarshalJSON() ([]byte, error) { + if nu.Valid { + return json.Marshal(nu.UUID) + } + + return jsonNull, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (nu *NullUUID) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, jsonNull) { + *nu = NullUUID{} + return nil // valid null UUID + } + err := json.Unmarshal(data, &nu.UUID) + nu.Valid = err == nil + return err +} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go new file mode 100644 index 000000000..2e02ec06c --- /dev/null +++ b/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently. +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go new file mode 100644 index 000000000..e6ef06cdc --- /dev/null +++ b/vendor/github.com/google/uuid/time.go @@ -0,0 +1,123 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1 and 2 UUIDs. +func (uuid UUID) Time() Time { + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time) +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go new file mode 100644 index 000000000..5ea6c7378 --- /dev/null +++ b/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 000000000..a57207aeb --- /dev/null +++ b/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,294 @@ +// Copyright 2018 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" + "sync" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +const randPoolSize = 16 * 16 + +var ( + rander = rand.Reader // random function + poolEnabled = false + poolMu sync.Mutex + poolPos = randPoolSize // protected with poolMu + pool [randPoolSize]byte // protected with poolMu +) + +type invalidLengthError struct{ len int } + +func (err invalidLengthError) Error() string { + return fmt.Sprintf("invalid UUID length: %d", err.len) +} + +// IsInvalidLengthError is matcher function for custom error invalidLengthError +func IsInvalidLengthError(err error) bool { + _, ok := err.(invalidLengthError) + return ok +} + +// Parse decodes s into a UUID or returns an error. Both the standard UUID +// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the +// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex +// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. +func Parse(s string) (UUID, error) { + var uuid UUID + switch len(s) { + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36: + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: + if strings.ToLower(s[:9]) != "urn:uuid:" { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + case 36 + 2: + s = s[1:] + + // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + case 32: + var ok bool + for i := range uuid { + uuid[i], ok = xtob(s[i*2], s[i*2+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(s)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + b = b[1:] + case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + var ok bool + for i := 0; i < 32; i += 2 { + uuid[i/2], ok = xtob(b[i], b[i+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(b)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// MustParse is like Parse but panics if the string cannot be parsed. +// It simplifies safe initialization of global variables holding compiled UUIDs. +func MustParse(s string) UUID { + uuid, err := Parse(s) + if err != nil { + panic(`uuid: Parse(` + s + `): ` + err.Error()) + } + return uuid +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst, uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} + +// EnableRandPool enables internal randomness pool used for Random +// (Version 4) UUID generation. The pool contains random bytes read from +// the random number generator on demand in batches. Enabling the pool +// may improve the UUID generation throughput significantly. +// +// Since the pool is stored on the Go heap, this feature may be a bad fit +// for security sensitive applications. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func EnableRandPool() { + poolEnabled = true +} + +// DisableRandPool disables the randomness pool if it was previously +// enabled with EnableRandPool. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func DisableRandPool() { + poolEnabled = false + defer poolMu.Unlock() + poolMu.Lock() + poolPos = randPoolSize +} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go new file mode 100644 index 000000000..463109629 --- /dev/null +++ b/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go new file mode 100644 index 000000000..7697802e4 --- /dev/null +++ b/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,76 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewString creates a new random UUID and returns it as a string or panics. +// NewString is equivalent to the expression +// +// uuid.New().String() +func NewString() string { + return Must(NewRandom()).String() +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// Uses the randomness pool if it was enabled with EnableRandPool. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + if !poolEnabled { + return NewRandomFromReader(rander) + } + return newRandomFromPool() +} + +// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. +func NewRandomFromReader(r io.Reader) (UUID, error) { + var uuid UUID + _, err := io.ReadFull(r, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} + +func newRandomFromPool() (UUID, error) { + var uuid UUID + poolMu.Lock() + if poolPos == randPoolSize { + _, err := io.ReadFull(rander, pool[:]) + if err != nil { + poolMu.Unlock() + return Nil, err + } + poolPos = 0 + } + copy(uuid[:], pool[poolPos:(poolPos+16)]) + poolPos += 16 + poolMu.Unlock() + + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go index a733bef18..44e368e56 100644 --- a/vendor/github.com/hashicorp/errwrap/errwrap.go +++ b/vendor/github.com/hashicorp/errwrap/errwrap.go @@ -44,6 +44,8 @@ func Wrap(outer, inner error) error { // // format is the format of the error message. The string '{{err}}' will // be replaced with the original error message. +// +// Deprecated: Use fmt.Errorf() func Wrapf(format string, err error) error { outerMsg := "" if err != nil { @@ -148,6 +150,9 @@ func Walk(err error, cb WalkFunc) { for _, err := range e.WrappedErrors() { Walk(err, cb) } + case interface{ Unwrap() error }: + cb(err) + Walk(e.Unwrap(), cb) default: cb(err) } @@ -167,3 +172,7 @@ func (w *wrappedError) Error() string { func (w *wrappedError) WrappedErrors() []error { return []error{w.Outer, w.Inner} } + +func (w *wrappedError) Unwrap() error { + return w.Inner +} diff --git a/vendor/github.com/hashicorp/go-multierror/.travis.yml b/vendor/github.com/hashicorp/go-multierror/.travis.yml deleted file mode 100644 index 304a83595..000000000 --- a/vendor/github.com/hashicorp/go-multierror/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -sudo: false - -language: go - -go: - - 1.x - -branches: - only: - - master - -script: make test testrace diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md index ead5830f7..71dd308ed 100644 --- a/vendor/github.com/hashicorp/go-multierror/README.md +++ b/vendor/github.com/hashicorp/go-multierror/README.md @@ -1,10 +1,11 @@ # go-multierror -[![Build Status](http://img.shields.io/travis/hashicorp/go-multierror.svg?style=flat-square)][travis] -[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] +[![CircleCI](https://img.shields.io/circleci/build/github/hashicorp/go-multierror/master)](https://circleci.com/gh/hashicorp/go-multierror) +[![Go Reference](https://pkg.go.dev/badge/github.com/hashicorp/go-multierror.svg)](https://pkg.go.dev/github.com/hashicorp/go-multierror) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/hashicorp/go-multierror) -[travis]: https://travis-ci.org/hashicorp/go-multierror -[godocs]: https://godoc.org/github.com/hashicorp/go-multierror +[circleci]: https://app.circleci.com/pipelines/github/hashicorp/go-multierror +[godocs]: https://pkg.go.dev/github.com/hashicorp/go-multierror `go-multierror` is a package for Go that provides a mechanism for representing a list of `error` values as a single `error`. @@ -14,16 +15,35 @@ be a list of errors. If the caller knows this, they can unwrap the list and access the errors. If the caller doesn't know, the error formats to a nice human-readable format. -`go-multierror` implements the -[errwrap](https://github.com/hashicorp/errwrap) interface so that it can -be used with that library, as well. +`go-multierror` is fully compatible with the Go standard library +[errors](https://golang.org/pkg/errors/) package, including the +functions `As`, `Is`, and `Unwrap`. This provides a standardized approach +for introspecting on error values. ## Installation and Docs Install using `go get github.com/hashicorp/go-multierror`. Full documentation is available at -http://godoc.org/github.com/hashicorp/go-multierror +https://pkg.go.dev/github.com/hashicorp/go-multierror + +### Requires go version 1.13 or newer + +`go-multierror` requires go version 1.13 or newer. Go 1.13 introduced +[error wrapping](https://golang.org/doc/go1.13#error_wrapping), which +this library takes advantage of. + +If you need to use an earlier version of go, you can use the +[v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) +tag, which doesn't rely on features in go 1.13. + +If you see compile errors that look like the below, it's likely that +you're on an older version of go: + +``` +/go/src/github.com/hashicorp/go-multierror/multierror.go:112:9: undefined: errors.As +/go/src/github.com/hashicorp/go-multierror/multierror.go:117:9: undefined: errors.Is +``` ## Usage @@ -81,6 +101,39 @@ if err := something(); err != nil { } ``` +You can also use the standard [`errors.Unwrap`](https://golang.org/pkg/errors/#Unwrap) +function. This will continue to unwrap into subsequent errors until none exist. + +**Extracting an error** + +The standard library [`errors.As`](https://golang.org/pkg/errors/#As) +function can be used directly with a multierror to extract a specific error: + +```go +// Assume err is a multierror value +err := somefunc() + +// We want to know if "err" has a "RichErrorType" in it and extract it. +var errRich RichErrorType +if errors.As(err, &errRich) { + // It has it, and now errRich is populated. +} +``` + +**Checking for an exact error value** + +Some errors are returned as exact errors such as the [`ErrNotExist`](https://golang.org/pkg/os/#pkg-variables) +error in the `os` package. You can check if this error is present by using +the standard [`errors.Is`](https://golang.org/pkg/errors/#Is) function. + +```go +// Assume err is a multierror value +err := somefunc() +if errors.Is(err, os.ErrNotExist) { + // err contains os.ErrNotExist +} +``` + **Returning a multierror only if there are errors** If you build a `multierror.Error`, you can use the `ErrorOrNil` function diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go index 775b6e753..3e2589bfd 100644 --- a/vendor/github.com/hashicorp/go-multierror/append.go +++ b/vendor/github.com/hashicorp/go-multierror/append.go @@ -6,6 +6,8 @@ package multierror // If err is not a multierror.Error, then it will be turned into // one. If any of the errs are multierr.Error, they will be flattened // one level into err. +// Any nil errors within errs will be ignored. If err is nil, a new +// *Error will be returned. func Append(err error, errs ...error) *Error { switch err := err.(type) { case *Error: diff --git a/vendor/github.com/hashicorp/go-multierror/group.go b/vendor/github.com/hashicorp/go-multierror/group.go new file mode 100644 index 000000000..9c29efb7f --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/group.go @@ -0,0 +1,38 @@ +package multierror + +import "sync" + +// Group is a collection of goroutines which return errors that need to be +// coalesced. +type Group struct { + mutex sync.Mutex + err *Error + wg sync.WaitGroup +} + +// Go calls the given function in a new goroutine. +// +// If the function returns an error it is added to the group multierror which +// is returned by Wait. +func (g *Group) Go(f func() error) { + g.wg.Add(1) + + go func() { + defer g.wg.Done() + + if err := f(); err != nil { + g.mutex.Lock() + g.err = Append(g.err, err) + g.mutex.Unlock() + } + }() +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the multierror. +func (g *Group) Wait() *Error { + g.wg.Wait() + g.mutex.Lock() + defer g.mutex.Unlock() + return g.err +} diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go index 89b1422d1..f54574326 100644 --- a/vendor/github.com/hashicorp/go-multierror/multierror.go +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -1,6 +1,7 @@ package multierror import ( + "errors" "fmt" ) @@ -39,13 +40,82 @@ func (e *Error) GoString() string { return fmt.Sprintf("*%#v", *e) } -// WrappedErrors returns the list of errors that this Error is wrapping. -// It is an implementation of the errwrap.Wrapper interface so that -// multierror.Error can be used with that library. +// WrappedErrors returns the list of errors that this Error is wrapping. It is +// an implementation of the errwrap.Wrapper interface so that multierror.Error +// can be used with that library. // -// This method is not safe to be called concurrently and is no different -// than accessing the Errors field directly. It is implemented only to -// satisfy the errwrap.Wrapper interface. +// This method is not safe to be called concurrently. Unlike accessing the +// Errors field directly, this function also checks if the multierror is nil to +// prevent a null-pointer panic. It satisfies the errwrap.Wrapper interface. func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } return e.Errors } + +// Unwrap returns an error from Error (or nil if there are no errors). +// This error returned will further support Unwrap to get the next error, +// etc. The order will match the order of Errors in the multierror.Error +// at the time of calling. +// +// The resulting error supports errors.As/Is/Unwrap so you can continue +// to use the stdlib errors package to introspect further. +// +// This will perform a shallow copy of the errors slice. Any errors appended +// to this error after calling Unwrap will not be available until a new +// Unwrap is called on the multierror.Error. +func (e *Error) Unwrap() error { + // If we have no errors then we do nothing + if e == nil || len(e.Errors) == 0 { + return nil + } + + // If we have exactly one error, we can just return that directly. + if len(e.Errors) == 1 { + return e.Errors[0] + } + + // Shallow copy the slice + errs := make([]error, len(e.Errors)) + copy(errs, e.Errors) + return chain(errs) +} + +// chain implements the interfaces necessary for errors.Is/As/Unwrap to +// work in a deterministic way with multierror. A chain tracks a list of +// errors while accounting for the current represented error. This lets +// Is/As be meaningful. +// +// Unwrap returns the next error. In the cleanest form, Unwrap would return +// the wrapped error here but we can't do that if we want to properly +// get access to all the errors. Instead, users are recommended to use +// Is/As to get the correct error type out. +// +// Precondition: []error is non-empty (len > 0) +type chain []error + +// Error implements the error interface +func (e chain) Error() string { + return e[0].Error() +} + +// Unwrap implements errors.Unwrap by returning the next error in the +// chain or nil if there are no more errors. +func (e chain) Unwrap() error { + if len(e) == 1 { + return nil + } + + return e[1:] +} + +// As implements errors.As by attempting to map to the current value. +func (e chain) As(target interface{}) bool { + return errors.As(e[0], target) +} + +// Is implements errors.Is by comparing the current value directly. +func (e chain) Is(target error) bool { + return errors.Is(e[0], target) +} diff --git a/vendor/github.com/imdario/mergo/.deepsource.toml b/vendor/github.com/imdario/mergo/.deepsource.toml new file mode 100644 index 000000000..8a0681af8 --- /dev/null +++ b/vendor/github.com/imdario/mergo/.deepsource.toml @@ -0,0 +1,12 @@ +version = 1 + +test_patterns = [ + "*_test.go" +] + +[[analyzers]] +name = "go" +enabled = true + + [analyzers.meta] + import_path = "github.com/imdario/mergo" \ No newline at end of file diff --git a/vendor/github.com/imdario/mergo/.gitignore b/vendor/github.com/imdario/mergo/.gitignore new file mode 100644 index 000000000..529c3412b --- /dev/null +++ b/vendor/github.com/imdario/mergo/.gitignore @@ -0,0 +1,33 @@ +#### joe made this: http://goel.io/joe + +#### go #### +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +#### vim #### +# Swap +[._]*.s[a-v][a-z] +[._]*.sw[a-p] +[._]s[a-v][a-z] +[._]sw[a-p] + +# Session +Session.vim + +# Temporary +.netrwhist +*~ +# Auto-generated tag files +tags diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml new file mode 100644 index 000000000..d324c43ba --- /dev/null +++ b/vendor/github.com/imdario/mergo/.travis.yml @@ -0,0 +1,12 @@ +language: go +arch: + - amd64 + - ppc64le +install: + - go get -t + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls +script: + - go test -race -v ./... +after_script: + - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..469b44907 --- /dev/null +++ b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/imdario/mergo/CONTRIBUTING.md b/vendor/github.com/imdario/mergo/CONTRIBUTING.md new file mode 100644 index 000000000..0a1ff9f94 --- /dev/null +++ b/vendor/github.com/imdario/mergo/CONTRIBUTING.md @@ -0,0 +1,112 @@ + +# Contributing to mergo + +First off, thanks for taking the time to contribute! ❤️ + +All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉 + +> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about: +> - Star the project +> - Tweet about it +> - Refer this project in your project's readme +> - Mention the project at local meetups and tell your friends/colleagues + + +## Table of Contents + +- [Code of Conduct](#code-of-conduct) +- [I Have a Question](#i-have-a-question) +- [I Want To Contribute](#i-want-to-contribute) +- [Reporting Bugs](#reporting-bugs) +- [Suggesting Enhancements](#suggesting-enhancements) + +## Code of Conduct + +This project and everyone participating in it is governed by the +[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md). +By participating, you are expected to uphold this code. Please report unacceptable behavior +to <>. + + +## I Have a Question + +> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo). + +Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first. + +If you then still feel the need to ask a question and need clarification, we recommend the following: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). +- Provide as much context as you can about what you're running into. +- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant. + +We will then take care of the issue as soon as possible. + +## I Want To Contribute + +> ### Legal Notice +> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. + +### Reporting Bugs + + +#### Before Submitting a Bug Report + +A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. + +- Make sure that you are using the latest version. +- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)). +- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug). +- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue. +- Collect information about the bug: +- Stack trace (Traceback) +- OS, Platform and Version (Windows, Linux, macOS, x86, ARM) +- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant. +- Possibly your input and the output +- Can you reliably reproduce the issue? And can you also reproduce it with older versions? + + +#### How Do I Submit a Good Bug Report? + +> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to . + + +We use GitHub issues to track bugs and errors. If you run into an issue with the project: + +- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.) +- Explain the behavior you would expect and the actual behavior. +- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case. +- Provide the information you collected in the previous section. + +Once it's filed: + +- The project team will label the issue accordingly. +- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced. +- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone. + +### Suggesting Enhancements + +This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. + + +#### Before Submitting an Enhancement + +- Make sure that you are using the latest version. +- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration. +- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. +- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library. + + +#### How Do I Submit a Good Enhancement Suggestion? + +Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues). + +- Use a **clear and descriptive title** for the issue to identify the suggestion. +- Provide a **step-by-step description of the suggested enhancement** in as many details as possible. +- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you. +- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. +- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration. + + +## Attribution +This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)! diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE new file mode 100644 index 000000000..686680298 --- /dev/null +++ b/vendor/github.com/imdario/mergo/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2013 Dario Castañé. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md new file mode 100644 index 000000000..4f0287498 --- /dev/null +++ b/vendor/github.com/imdario/mergo/README.md @@ -0,0 +1,236 @@ +# Mergo + +[![GoDoc][3]][4] +[![GitHub release][5]][6] +[![GoCard][7]][8] +[![Build Status][1]][2] +[![Coverage Status][9]][10] +[![Sourcegraph][11]][12] +[![FOSSA Status][13]][14] +[![Become my sponsor][15]][16] +[![Tidelift][17]][18] + +[1]: https://travis-ci.org/imdario/mergo.png +[2]: https://travis-ci.org/imdario/mergo +[3]: https://godoc.org/github.com/imdario/mergo?status.svg +[4]: https://godoc.org/github.com/imdario/mergo +[5]: https://img.shields.io/github/release/imdario/mergo.svg +[6]: https://github.com/imdario/mergo/releases +[7]: https://goreportcard.com/badge/imdario/mergo +[8]: https://goreportcard.com/report/github.com/imdario/mergo +[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master +[10]: https://coveralls.io/github/imdario/mergo?branch=master +[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg +[12]: https://sourcegraph.com/github.com/imdario/mergo?badge +[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield +[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield +[15]: https://img.shields.io/github/sponsors/imdario +[16]: https://github.com/sponsors/imdario +[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo +[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo + +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. + +Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. + +## Status + +It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). + +### Important note + +Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules. + +Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. + +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). + +### Donations + +If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: + +Buy Me a Coffee at ko-fi.com +Donate using Liberapay +Become my sponsor + +### Mergo in the wild + +- [moby/moby](https://github.com/moby/moby) +- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) +- [vmware/dispatch](https://github.com/vmware/dispatch) +- [Shopify/themekit](https://github.com/Shopify/themekit) +- [imdario/zas](https://github.com/imdario/zas) +- [matcornic/hermes](https://github.com/matcornic/hermes) +- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) +- [kataras/iris](https://github.com/kataras/iris) +- [michaelsauter/crane](https://github.com/michaelsauter/crane) +- [go-task/task](https://github.com/go-task/task) +- [sensu/uchiwa](https://github.com/sensu/uchiwa) +- [ory/hydra](https://github.com/ory/hydra) +- [sisatech/vcli](https://github.com/sisatech/vcli) +- [dairycart/dairycart](https://github.com/dairycart/dairycart) +- [projectcalico/felix](https://github.com/projectcalico/felix) +- [resin-os/balena](https://github.com/resin-os/balena) +- [go-kivik/kivik](https://github.com/go-kivik/kivik) +- [Telefonica/govice](https://github.com/Telefonica/govice) +- [supergiant/supergiant](supergiant/supergiant) +- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) +- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) +- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) +- [EagerIO/Stout](https://github.com/EagerIO/Stout) +- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) +- [russross/canvasassignments](https://github.com/russross/canvasassignments) +- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) +- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) +- [divshot/gitling](https://github.com/divshot/gitling) +- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) +- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) +- [elwinar/rambler](https://github.com/elwinar/rambler) +- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) +- [jfbus/impressionist](https://github.com/jfbus/impressionist) +- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) +- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) +- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) +- [thoas/picfit](https://github.com/thoas/picfit) +- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) +- [jnuthong/item_search](https://github.com/jnuthong/item_search) +- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) +- [containerssh/containerssh](https://github.com/containerssh/containerssh) +- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) +- [tjpnz/structbot](https://github.com/tjpnz/structbot) + +## Install + + go get github.com/imdario/mergo + + // use in your .go code + import ( + "github.com/imdario/mergo" + ) + +## Usage + +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + +```go +if err := mergo.Merge(&dst, src); err != nil { + // ... +} +``` + +Also, you can merge overwriting values using the transformer `WithOverride`. + +```go +if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { + // ... +} +``` + +Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. + +```go +if err := mergo.Map(&dst, srcMap); err != nil { + // ... +} +``` + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. + +Here is a nice example: + +```go +package main + +import ( + "fmt" + "github.com/imdario/mergo" +) + +type Foo struct { + A string + B int64 +} + +func main() { + src := Foo{ + A: "one", + B: 2, + } + dest := Foo{ + A: "two", + } + mergo.Merge(&dest, src) + fmt.Println(dest) + // Will print + // {two 2} +} +``` + +Note: if test are failing due missing package, please execute: + + go get gopkg.in/yaml.v3 + +### Transformers + +Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? + +```go +package main + +import ( + "fmt" + "github.com/imdario/mergo" + "reflect" + "time" +) + +type timeTransformer struct { +} + +func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + return nil + } + } + return nil +} + +type Snapshot struct { + Time time.Time + // ... +} + +func main() { + src := Snapshot{time.Now()} + dest := Snapshot{} + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) + fmt.Println(dest) + // Will print + // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } +} +``` + +## Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) + +## About + +Written by [Dario Castañé](http://dario.im). + +## License + +[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). + + +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/github.com/imdario/mergo/SECURITY.md b/vendor/github.com/imdario/mergo/SECURITY.md new file mode 100644 index 000000000..a5de61f77 --- /dev/null +++ b/vendor/github.com/imdario/mergo/SECURITY.md @@ -0,0 +1,14 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.3.x | :white_check_mark: | +| < 0.3 | :x: | + +## Security contact information + +To report a security vulnerability, please use the +[Tidelift security contact](https://tidelift.com/security). +Tidelift will coordinate the fix and disclosure. diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go new file mode 100644 index 000000000..fcd985f99 --- /dev/null +++ b/vendor/github.com/imdario/mergo/doc.go @@ -0,0 +1,143 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. + +Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Status + +It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. + +Important note + +Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. + +Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. + +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). + +Install + +Do your usual installation procedure: + + go get github.com/imdario/mergo + + // use in your .go code + import ( + "github.com/imdario/mergo" + ) + +Usage + +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + + if err := mergo.Merge(&dst, src); err != nil { + // ... + } + +Also, you can merge overwriting values using the transformer WithOverride. + + if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { + // ... + } + +Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. + + if err := mergo.Map(&dst, srcMap); err != nil { + // ... + } + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. + +Here is a nice example: + + package main + + import ( + "fmt" + "github.com/imdario/mergo" + ) + + type Foo struct { + A string + B int64 + } + + func main() { + src := Foo{ + A: "one", + B: 2, + } + dest := Foo{ + A: "two", + } + mergo.Merge(&dest, src) + fmt.Println(dest) + // Will print + // {two 2} + } + +Transformers + +Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? + + package main + + import ( + "fmt" + "github.com/imdario/mergo" + "reflect" + "time" + ) + + type timeTransformer struct { + } + + func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + return nil + } + } + return nil + } + + type Snapshot struct { + Time time.Time + // ... + } + + func main() { + src := Snapshot{time.Now()} + dest := Snapshot{} + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) + fmt.Println(dest) + // Will print + // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } + } + +Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario + +About + +Written by Dario Castañé: https://da.rio.hn + +License + +BSD 3-Clause license, as Go language. + +*/ +package mergo diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go new file mode 100644 index 000000000..b50d5c2a4 --- /dev/null +++ b/vendor/github.com/imdario/mergo/map.go @@ -0,0 +1,178 @@ +// Copyright 2014 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" + "unicode" + "unicode/utf8" +) + +func changeInitialCase(s string, mapper func(rune) rune) string { + if s == "" { + return s + } + r, n := utf8.DecodeRuneInString(s) + return string(mapper(r)) + s[n:] +} + +func isExported(field reflect.StructField) bool { + r, _ := utf8.DecodeRuneInString(field.Name) + return r >= 'A' && r <= 'Z' +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{typ, seen, addr} + } + zeroValue := reflect.Value{} + switch dst.Kind() { + case reflect.Map: + dstMap := dst.Interface().(map[string]interface{}) + for i, n := 0, src.NumField(); i < n; i++ { + srcType := src.Type() + field := srcType.Field(i) + if !isExported(field) { + continue + } + fieldName := field.Name + fieldName = changeInitialCase(fieldName, unicode.ToLower) + if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) { + dstMap[fieldName] = src.Field(i).Interface() + } + } + case reflect.Ptr: + if dst.IsNil() { + v := reflect.New(dst.Type().Elem()) + dst.Set(v) + } + dst = dst.Elem() + fallthrough + case reflect.Struct: + srcMap := src.Interface().(map[string]interface{}) + for key := range srcMap { + config.overwriteWithEmptyValue = true + srcValue := srcMap[key] + fieldName := changeInitialCase(key, unicode.ToUpper) + dstElement := dst.FieldByName(fieldName) + if dstElement == zeroValue { + // We discard it because the field doesn't exist. + continue + } + srcElement := reflect.ValueOf(srcValue) + dstKind := dstElement.Kind() + srcKind := srcElement.Kind() + if srcKind == reflect.Ptr && dstKind != reflect.Ptr { + srcElement = srcElement.Elem() + srcKind = reflect.TypeOf(srcElement.Interface()).Kind() + } else if dstKind == reflect.Ptr { + // Can this work? I guess it can't. + if srcKind != reflect.Ptr && srcElement.CanAddr() { + srcPtr := srcElement.Addr() + srcElement = reflect.ValueOf(srcPtr) + srcKind = reflect.Ptr + } + } + + if !srcElement.IsValid() { + continue + } + if srcKind == dstKind { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if srcKind == reflect.Map { + if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else { + return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) + } + } + } + return +} + +// Map sets fields' values in dst from src. +// src can be a map with string keys or a struct. dst must be the opposite: +// if src is a map, dst must be a valid pointer to struct. If src is a struct, +// dst must be map[string]interface{}. +// It won't merge unexported (private) fields and will do recursively +// any exported field. +// If dst is a map, keys will be src fields' names in lower camel case. +// Missing key in src that doesn't match a field in dst will be skipped. This +// doesn't apply if dst is a map. +// This is separated method from Merge because it is cleaner and it keeps sane +// semantics: merging equal types, mapping different (restricted) types. +func Map(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, opts...) +} + +// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by +// non-empty src attribute values. +// Deprecated: Use Map(…) with WithOverride +func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, append(opts, WithOverride)...) +} + +func _map(dst, src interface{}, opts ...func(*Config)) error { + if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { + return ErrNonPointerArgument + } + var ( + vDst, vSrc reflect.Value + err error + ) + config := &Config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + // To be friction-less, we redirect equal-type arguments + // to deepMerge. Only because arguments can be anything. + if vSrc.Kind() == vDst.Kind() { + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) + } + switch vSrc.Kind() { + case reflect.Struct: + if vDst.Kind() != reflect.Map { + return ErrExpectedMapAsDestination + } + case reflect.Map: + if vDst.Kind() != reflect.Struct { + return ErrExpectedStructAsDestination + } + default: + return ErrNotSupported + } + return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) +} diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go new file mode 100644 index 000000000..0ef9b2138 --- /dev/null +++ b/vendor/github.com/imdario/mergo/merge.go @@ -0,0 +1,409 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" +) + +func hasMergeableFields(dst reflect.Value) (exported bool) { + for i, n := 0, dst.NumField(); i < n; i++ { + field := dst.Type().Field(i) + if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { + exported = exported || hasMergeableFields(dst.Field(i)) + } else if isExportedComponent(&field) { + exported = exported || len(field.PkgPath) == 0 + } + } + return +} + +func isExportedComponent(field *reflect.StructField) bool { + pkgPath := field.PkgPath + if len(pkgPath) > 0 { + return false + } + c := field.Name[0] + if 'a' <= c && c <= 'z' || c == '_' { + return false + } + return true +} + +type Config struct { + Transformers Transformers + Overwrite bool + ShouldNotDereference bool + AppendSlice bool + TypeCheck bool + overwriteWithEmptyValue bool + overwriteSliceWithEmptyValue bool + sliceDeepCopy bool + debug bool +} + +type Transformers interface { + Transformer(reflect.Type) func(dst, src reflect.Value) error +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + typeCheck := config.TypeCheck + overwriteWithEmptySrc := config.overwriteWithEmptyValue + overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue + sliceDeepCopy := config.sliceDeepCopy + + if !src.IsValid() { + return + } + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{typ, seen, addr} + } + + if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { + if fn := config.Transformers.Transformer(dst.Type()); fn != nil { + err = fn(dst, src) + return + } + } + + switch dst.Kind() { + case reflect.Struct: + if hasMergeableFields(dst) { + for i, n := 0, dst.NumField(); i < n; i++ { + if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { + return + } + } + } else { + if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) { + dst.Set(src) + } + } + case reflect.Map: + if dst.IsNil() && !src.IsNil() { + if dst.CanSet() { + dst.Set(reflect.MakeMap(dst.Type())) + } else { + dst = src + return + } + } + + if src.Kind() != reflect.Map { + if overwrite && dst.CanSet() { + dst.Set(src) + } + return + } + + for _, key := range src.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + continue + } + dstElement := dst.MapIndex(key) + switch srcElement.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: + if srcElement.IsNil() { + if overwrite { + dst.SetMapIndex(key, srcElement) + } + continue + } + fallthrough + default: + if !srcElement.CanInterface() { + continue + } + switch reflect.TypeOf(srcElement.Interface()).Kind() { + case reflect.Struct: + fallthrough + case reflect.Ptr: + fallthrough + case reflect.Map: + srcMapElm := srcElement + dstMapElm := dstElement + if srcMapElm.CanInterface() { + srcMapElm = reflect.ValueOf(srcMapElm.Interface()) + if dstMapElm.IsValid() { + dstMapElm = reflect.ValueOf(dstMapElm.Interface()) + } + } + if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { + return + } + case reflect.Slice: + srcSlice := reflect.ValueOf(srcElement.Interface()) + + var dstSlice reflect.Value + if !dstElement.IsValid() || dstElement.IsNil() { + dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) + } else { + dstSlice = reflect.ValueOf(dstElement.Interface()) + } + + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { + if typeCheck && srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } + dstSlice = srcSlice + } else if config.AppendSlice { + if srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } + dstSlice = reflect.AppendSlice(dstSlice, srcSlice) + } else if sliceDeepCopy { + i := 0 + for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ { + srcElement := srcSlice.Index(i) + dstElement := dstSlice.Index(i) + + if srcElement.CanInterface() { + srcElement = reflect.ValueOf(srcElement.Interface()) + } + if dstElement.CanInterface() { + dstElement = reflect.ValueOf(dstElement.Interface()) + } + + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } + + } + dst.SetMapIndex(key, dstSlice) + } + } + + if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) { + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice { + continue + } + if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map { + continue + } + } + + if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) { + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + dst.SetMapIndex(key, srcElement) + } + } + + // Ensure that all keys in dst are deleted if they are not in src. + if overwriteWithEmptySrc { + for _, key := range dst.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + dst.SetMapIndex(key, reflect.Value{}) + } + } + } + case reflect.Slice: + if !dst.CanSet() { + break + } + if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { + dst.Set(src) + } else if config.AppendSlice { + if src.Type() != dst.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) + } + dst.Set(reflect.AppendSlice(dst, src)) + } else if sliceDeepCopy { + for i := 0; i < src.Len() && i < dst.Len(); i++ { + srcElement := src.Index(i) + dstElement := dst.Index(i) + if srcElement.CanInterface() { + srcElement = reflect.ValueOf(srcElement.Interface()) + } + if dstElement.CanInterface() { + dstElement = reflect.ValueOf(dstElement.Interface()) + } + + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } + } + case reflect.Ptr: + fallthrough + case reflect.Interface: + if isReflectNil(src) { + if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + break + } + + if src.Kind() != reflect.Interface { + if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { + dst.Set(src) + } + } else if src.Kind() == reflect.Ptr { + if !config.ShouldNotDereference { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + } else { + if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() { + dst.Set(src) + } + } + } else if dst.Elem().Type() == src.Type() { + if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { + return + } + } else { + return ErrDifferentArgumentsTypes + } + break + } + + if dst.IsNil() || overwrite { + if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { + dst.Set(src) + } + break + } + + if dst.Elem().Kind() == src.Elem().Kind() { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + break + } + default: + mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) + if mustSet { + if dst.CanSet() { + dst.Set(src) + } else { + dst = src + } + } + } + + return +} + +// Merge will fill any empty for value type attributes on the dst struct using corresponding +// src attributes if they themselves are not empty. dst and src must be valid same-type structs +// and dst must be a pointer to struct. +// It won't merge unexported (private) fields and will do recursively any exported field. +func Merge(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, opts...) +} + +// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by +// non-empty src attribute values. +// Deprecated: use Merge(…) with WithOverride +func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, append(opts, WithOverride)...) +} + +// WithTransformers adds transformers to merge, allowing to customize the merging of some types. +func WithTransformers(transformers Transformers) func(*Config) { + return func(config *Config) { + config.Transformers = transformers + } +} + +// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. +func WithOverride(config *Config) { + config.Overwrite = true +} + +// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. +func WithOverwriteWithEmptyValue(config *Config) { + config.Overwrite = true + config.overwriteWithEmptyValue = true +} + +// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice. +func WithOverrideEmptySlice(config *Config) { + config.overwriteSliceWithEmptyValue = true +} + +// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty +// (i.e. a non-nil pointer is never considered empty). +func WithoutDereference(config *Config) { + config.ShouldNotDereference = true +} + +// WithAppendSlice will make merge append slices instead of overwriting it. +func WithAppendSlice(config *Config) { + config.AppendSlice = true +} + +// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). +func WithTypeCheck(config *Config) { + config.TypeCheck = true +} + +// WithSliceDeepCopy will merge slice element one by one with Overwrite flag. +func WithSliceDeepCopy(config *Config) { + config.sliceDeepCopy = true + config.Overwrite = true +} + +func merge(dst, src interface{}, opts ...func(*Config)) error { + if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { + return ErrNonPointerArgument + } + var ( + vDst, vSrc reflect.Value + err error + ) + + config := &Config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + if vDst.Type() != vSrc.Type() { + return ErrDifferentArgumentsTypes + } + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) +} + +// IsReflectNil is the reflect value provided nil +func isReflectNil(v reflect.Value) bool { + k := v.Kind() + switch k { + case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr: + // Both interface and slice are nil if first word is 0. + // Both are always bigger than a word; assume flagIndir. + return v.IsNil() + default: + return false + } +} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go new file mode 100644 index 000000000..0a721e2d8 --- /dev/null +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -0,0 +1,81 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "errors" + "reflect" +) + +// Errors reported by Mergo when it finds invalid arguments. +var ( + ErrNilArguments = errors.New("src and dst must not be nil") + ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") + ErrNotSupported = errors.New("only structs, maps, and slices are supported") + ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") + ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") + ErrNonPointerArgument = errors.New("dst must be a pointer") +) + +// During deepMerge, must keep track of checks that are +// in progress. The comparison algorithm assumes that all +// checks in progress are true when it reencounters them. +// Visited are stored in a map indexed by 17 * a1 + a2; +type visit struct { + typ reflect.Type + next *visit + ptr uintptr +} + +// From src/pkg/encoding/json/encode.go. +func isEmptyValue(v reflect.Value, shouldDereference bool) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if v.IsNil() { + return true + } + if shouldDereference { + return isEmptyValue(v.Elem(), shouldDereference) + } + return false + case reflect.Func: + return v.IsNil() + case reflect.Invalid: + return true + } + return false +} + +func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { + if dst == nil || src == nil { + err = ErrNilArguments + return + } + vDst = reflect.ValueOf(dst).Elem() + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice { + err = ErrNotSupported + return + } + vSrc = reflect.ValueOf(src) + // We check if vSrc is a pointer to dereference it. + if vSrc.Kind() == reflect.Ptr { + vSrc = vSrc.Elem() + } + return +} diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE new file mode 100644 index 000000000..1eb75ef68 --- /dev/null +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md new file mode 100644 index 000000000..ea7324da6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/README.md @@ -0,0 +1,79 @@ +# Finite State Entropy + +This package provides Finite State Entropy encoding and decoding. + +Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) +encoding provides a fast near-optimal symbol encoding/decoding +for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) + +## News + + * Feb 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `(error)` | An internal error occurred. | + +As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). + +# Performance + +A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. +All compression functions are currently only running on the calling goroutine so only one core will be used per block. + +The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input +is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be +beneficial to transpose all your input values down by 64. + +With moderate block sizes around 64k speed are typically 200MB/s per core for compression and +around 300MB/s decompression speed. + +The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. + +# Plans + +At one point, more internals will be exposed to facilitate more "expert" usage of the components. + +A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go new file mode 100644 index 000000000..f65eb3909 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitreader.go @@ -0,0 +1,122 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) uint16 { + if n == 0 || b.bitsRead >= 64 { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.bitsRead >= 64 && b.off == 0 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go new file mode 100644 index 000000000..43e463611 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -0,0 +1,168 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go new file mode 100644 index 000000000..abade2d60 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bytereader.go @@ -0,0 +1,47 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go new file mode 100644 index 000000000..0d31f5ebc --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -0,0 +1,684 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "errors" + "fmt" +) + +// Compress the input bytes. Input must be < 2GB. +// Provide a Scratch buffer to avoid memory allocations. +// Note that the output is also kept in the scratch buffer. +// If input is too hard to compress, ErrIncompressible is returned. +// If input is a single byte value repeated ErrUseRLE is returned. +func Compress(in []byte, s *Scratch) ([]byte, error) { + if len(in) <= 1 { + return nil, ErrIncompressible + } + if len(in) > (2<<30)-1 { + return nil, errors.New("input too big, must be < 2GB") + } + s, err := s.prepare(in) + if err != nil { + return nil, err + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + if maxCount == 0 { + maxCount = s.countSimple(in) + } + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount == len(in) { + // One symbol, use RLE + return nil, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, ErrIncompressible + } + s.optimalTableLog() + err = s.normalizeCount() + if err != nil { + return nil, err + } + err = s.writeCount() + if err != nil { + return nil, err + } + + if false { + err = s.validateNorm() + if err != nil { + return nil, err + } + } + + err = s.buildCTable() + if err != nil { + return nil, err + } + err = s.compress(in) + if err != nil { + return nil, err + } + s.Out = s.bw.out + // Check if we compressed. + if len(s.Out) >= len(in) { + return nil, ErrIncompressible + } + return s.Out, nil +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + first.deltaFindState + c.state = c.stateTable[lu] + return +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encodeZero(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) + c.bw.flush() +} + +// compress is the main compression loop that will encode the input from the last byte to the first. +func (s *Scratch) compress(src []byte) error { + if len(src) <= 2 { + return errors.New("compress: src too small") + } + tt := s.ct.symbolTT[:256] + s.bw.reset(s.Out) + + // Our two states each encodes every second byte. + // Last byte encoded (first byte decoded) will always be encoded by c1. + var c1, c2 cState + + // Encode so remaining size is divisible by 4. + ip := len(src) + if ip&1 == 1 { + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + c1.encodeZero(tt[src[ip-3]]) + ip -= 3 + } else { + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + ip -= 2 + } + if ip&2 != 0 { + c2.encodeZero(tt[src[ip-1]]) + c1.encodeZero(tt[src[ip-2]]) + ip -= 2 + } + + // Main compression loop. + switch { + case !s.zeroBits && s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush. + // We do not need to check if any output is 0 bits. + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + c2.encode(tt[v2]) + c1.encode(tt[v3]) + ip -= 4 + } + case !s.zeroBits: + // We do not need to check if any output is 0 bits. + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + s.bw.flush32() + c2.encode(tt[v2]) + c1.encode(tt[v3]) + ip -= 4 + } + case s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + ip -= 4 + } + default: + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + s.bw.flush32() + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + ip -= 4 + } + } + + // Flush final state. + // Used to initialize state when decoding. + c2.flush(s.actualTableLog) + c1.flush(s.actualTableLog) + + return s.bw.close() +} + +// writeCount will write the normalized histogram count to header. +// This is read back by readNCount. +func (s *Scratch) writeCount() error { + var ( + tableLog = s.actualTableLog + tableSize = 1 << tableLog + previous0 bool + charnum uint16 + + maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + + // Write Table Size + bitStream = uint32(tableLog - minTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + ) + if cap(s.Out) < maxHeaderSize { + s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) + } + outP := uint(0) + out := s.Out[:maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return errors.New("internal error: remaining<1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += (bitCount + 7) / 8 + + if charnum > s.symbolLen { + return errors.New("internal error: charnum > s.symbolLen") + } + s.Out = out[:outP] + return nil +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaFindState int32 + deltaNbBits uint32 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *Scratch) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *Scratch) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [maxSymbolValue + 2]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int32(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int32(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int) { + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + } + } + return int(m) +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 + minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > maxTableLog { + tableLog = maxTableLog + } + s.actualTableLog = tableLog +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +func (s *Scratch) normalizeCount() error { + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(s.br.remain()) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(s.br.remain() >> tableLog) + ) + + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + return s.normalizeCount2() + } + s.norm[largest] += stillToDistribute + return nil +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *Scratch) normalizeCount2() error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(s.br.remain()) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// validateNorm validates the normalized histogram table. +func (s *Scratch) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 { + if previous0 { + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + n0 += 24 + if b.off < iend-5 { + b.advance(2) + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 16 + bitCount += 16 + } + } + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + for charnum < n0 { + s.norm[charnum&0xff] = 0 + charnum++ + } + + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*(threshold) - 1) - (remaining) + var count int32 + + if (int32(bitStream) & (threshold - 1)) < max { + count = int32(bitStream) & (threshold - 1) + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + count-- // extra accuracy + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + } + bitStream = b.Uint32() >> (bitCount & 31) + } + s.symbolLen = charnum + + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return nil +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +type decSymbol struct { + newState uint16 + symbol uint8 + nbBits uint8 +} + +// allocDtable will allocate decoding tables if they are not big enough. +func (s *Scratch) allocDtable() { + tableSize := 1 << s.actualTableLog + if cap(s.decTable) < tableSize { + s.decTable = make([]decSymbol, tableSize) + } + s.decTable = s.decTable[:tableSize] + + if cap(s.ct.tableSymbol) < 256 { + s.ct.tableSymbol = make([]byte, 256) + } + s.ct.tableSymbol = s.ct.tableSymbol[:256] + + if cap(s.ct.stateTable) < 256 { + s.ct.stateTable = make([]uint16, 256) + } + s.ct.stateTable = s.ct.stateTable[:256] +} + +// buildDtable will build the decoding table. +func (s *Scratch) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + s.allocDtable() + symbolNext := s.ct.stateTable[:256] + + // Init, lay down lowprob symbols + s.zeroBits = false + { + largeLimit := int16(1 << (s.actualTableLog - 1)) + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.decTable[highThreshold].symbol = uint8(i) + highThreshold-- + symbolNext[i] = 1 + } else { + if v >= largeLimit { + s.zeroBits = true + } + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.decTable[position].symbol = uint8(ss) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.decTable { + symbol := v.symbol + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.decTable[u].nbBits = nBits + newState := (nextState << nBits) - tableSize + if newState >= tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.decTable[u].newState = newState + } + } + return nil +} + +// decompress will decompress the bitstream. +// If the buffer is over-read an error is returned. +func (s *Scratch) decompress() error { + br := &s.bits + br.init(s.br.unread()) + + var s1, s2 decoder + // Initialize and decode first state and symbol. + s1.init(br, s.decTable, s.actualTableLog) + s2.init(br, s.decTable, s.actualTableLog) + + // Use temp table to avoid bound checks/append penalty. + var tmp = s.ct.tableSymbol[:256] + var off uint8 + + // Main part + if !s.zeroBits { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.nextFast() + tmp[off+1] = s2.nextFast() + br.fillFast() + tmp[off+2] = s1.nextFast() + tmp[off+3] = s2.nextFast() + off += 4 + // When off is 0, we have overflowed and should write. + if off == 0 { + s.Out = append(s.Out, tmp...) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } else { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.next() + tmp[off+1] = s2.next() + br.fillFast() + tmp[off+2] = s1.next() + tmp[off+3] = s2.next() + off += 4 + if off == 0 { + s.Out = append(s.Out, tmp...) + // When off is 0, we have overflowed and should write. + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } + s.Out = append(s.Out, tmp[:off]...) + + // Final bits, a bit more expensive check + for { + if s1.finished() { + s.Out = append(s.Out, s1.final(), s2.final()) + break + } + br.fill() + s.Out = append(s.Out, s1.next()) + if s2.finished() { + s.Out = append(s.Out, s2.final(), s1.final()) + break + } + s.Out = append(s.Out, s2.next()) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + return br.close() +} + +// decoder keeps track of the current state and updates it from the bitstream. +type decoder struct { + state uint16 + br *bitReader + dt []decSymbol +} + +// init will initialize the decoder and read the first state from the stream. +func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { + d.dt = dt + d.br = in + d.state = in.getBits(tableLog) +} + +// next returns the next symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) next() uint8 { + n := &d.dt[d.state] + lowBits := d.br.getBits(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (d *decoder) finished() bool { + return d.br.finished() && d.dt[d.state].nbBits > 0 +} + +// final returns the current state symbol without decoding the next. +func (d *decoder) final() uint8 { + return d.dt[d.state].symbol +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) nextFast() uint8 { + n := d.dt[d.state] + lowBits := d.br.getBitsFast(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go new file mode 100644 index 000000000..535cbadfd --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fse.go @@ -0,0 +1,144 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +// Package fse provides Finite State Entropy encoding and decoding. +// +// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding +// for byte blocks as implemented in zstd. +// +// See https://github.com/klauspost/compress/tree/master/fse for more information. +package fse + +import ( + "errors" + "fmt" + "math/bits" +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = 14 + defaultMemoryUsage = 13 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + defaultTablelog = defaultMemoryUsage - 2 + minTablelog = 5 + maxSymbolValue = 255 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") +) + +// Scratch provides temporary storage for compression and decompression. +type Scratch struct { + // Private + count [maxSymbolValue + 1]uint32 + norm [maxSymbolValue + 1]int16 + br byteReader + bits bitReader + bw bitWriter + ct cTable // Compression tables. + decTable []decSymbol // Decompression table. + maxCount int // count of the most probable symbol + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // DecompressLimit limits the maximum decoded size acceptable. + // If > 0 decompression will stop when approximately this many bytes + // has been decoded. + // If 0, maximum size will be 2GB. + DecompressLimit int + + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + TableLog uint8 +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *Scratch) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = 255 + } + if s.TableLog == 0 { + s.TableLog = defaultTablelog + } + if s.TableLog > maxTableLog { + return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + s.br.init(in) + if s.DecompressLimit == 0 { + // Max size 2GB. + s.DecompressLimit = (2 << 30) - 1 + } + + return s, nil +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore new file mode 100644 index 000000000..b3d262958 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/.gitignore @@ -0,0 +1 @@ +/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md new file mode 100644 index 000000000..8b6e5c663 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -0,0 +1,89 @@ +# Huff0 entropy compression + +This package provides Huff0 encoding and decoding as used in zstd. + +[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), +a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU +(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) + +## News + +This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. + +This ensures that most functionality is well tested. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and +[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | +| `(error)` | An internal error occurred. | + + +As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. + +## Tables and re-use + +Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. + +The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) +that controls this behaviour. See the documentation for details. This can be altered between each block. + +Do however note that this information is *not* stored in the output block and it is up to the users of the package to +record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, +based on the boolean reported back from the CompressXX call. + +If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the +[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. + +## Decompressing + +The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). +This will initialize the decoding tables. +You can supply the complete block to `ReadTable` and it will return the data part of the block +which can be given to the decompressor. + +Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) +or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. + +For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. + +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go new file mode 100644 index 000000000..a4979e886 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -0,0 +1,329 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBit32(uint32(v))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) peekBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +func (b *bitReader) advance(n uint8) { + b.bitsRead += n +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderBytes struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderBytes) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderBytes) peekByteFast() uint8 { + got := uint8(b.value >> 56) + return got +} + +func (b *bitReaderBytes) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderBytes) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. +func (b *bitReaderBytes) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderBytes) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderBytes) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderBytes) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReaderShifted reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderShifted struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderShifted) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { + return uint16(b.value >> ((64 - n) & 63)) +} + +func (b *bitReaderShifted) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderShifted) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. +func (b *bitReaderShifted) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderShifted) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderShifted) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderShifted) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go new file mode 100644 index 000000000..6bce4e87d --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -0,0 +1,210 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encSymbol(ct cTable, symbol byte) { + enc := ct[symbol] + b.bitContainer |= uint64(enc.val) << (b.nBits & 63) + if false { + if enc.nBits == 0 { + panic("nbits 0") + } + } + b.nBits += enc.nBits +} + +// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { + encA := ct[av] + encB := ct[bv] + sh := b.nBits & 63 + combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) + b.bitContainer |= combined << sh + if false { + if encA.nBits == 0 { + panic("nbitsA 0") + } + if encB.nBits == 0 { + panic("nbitsB 0") + } + } + b.nBits += encA.nBits + encB.nBits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + return + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + b.bitContainer >>= 1 << 3 + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + b.bitContainer >>= 2 << 3 + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + b.bitContainer >>= 3 << 3 + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + b.bitContainer >>= 4 << 3 + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + b.bitContainer >>= 5 << 3 + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + b.bitContainer >>= 6 << 3 + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + b.bitContainer >>= 7 << 3 + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + b.bitContainer = 0 + b.nBits = 0 + return + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go new file mode 100644 index 000000000..50bcdf6ea --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go @@ -0,0 +1,54 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + v3 := int32(b.b[b.off+3]) + v2 := int32(b.b[b.off+2]) + v1 := int32(b.b[b.off+1]) + v0 := int32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + v3 := uint32(b.b[b.off+3]) + v2 := uint32(b.b[b.off+2]) + v1 := uint32(b.b[b.off+1]) + v0 := uint32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go new file mode 100644 index 000000000..dea115b33 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -0,0 +1,657 @@ +package huff0 + +import ( + "fmt" + "runtime" + "sync" +) + +// Compress1X will compress the input. +// The output can be decoded using Decompress1X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + return compress(in, s, s.compress1X) +} + +// Compress4X will compress the input. The input is split into 4 independent blocks +// and compressed similar to Compress1X. +// The output can be decoded using Decompress4X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + if false { + // TODO: compress4Xp only slightly faster. + const parallelThreshold = 8 << 10 + if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { + return compress(in, s, s.compress4X) + } + return compress(in, s, s.compress4Xp) + } + return compress(in, s, s.compress4X) +} + +func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { + // Nuke previous table if we cannot reuse anyway. + if s.Reuse == ReusePolicyNone { + s.prevTable = s.prevTable[:0] + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return nil, false, ErrIncompressible + } + // One symbol, use RLE + return nil, false, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, false, ErrIncompressible + } + if s.Reuse == ReusePolicyMust && !canReuse { + // We must reuse, but we can't. + return nil, false, ErrIncompressible + } + if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { + keepTable := s.cTable + keepTL := s.actualTableLog + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + s.cTable = keepTable + s.actualTableLog = keepTL + if err == nil && len(s.Out) < wantSize { + s.OutData = s.Out + return s.Out, true, nil + } + if s.Reuse == ReusePolicyMust { + return nil, false, ErrIncompressible + } + // Do not attempt to re-use later. + s.prevTable = s.prevTable[:0] + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return nil, false, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + if s.Reuse == ReusePolicyAllow && canReuse { + hSize := len(s.Out) + oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) + newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) + if oldSize <= hSize+newSize || hSize+12 >= wantSize { + // Retain cTable even if we re-use. + keepTable := s.cTable + keepTL := s.actualTableLog + + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + + // Restore ctable. + s.cTable = keepTable + s.actualTableLog = keepTL + if err != nil { + return nil, false, err + } + if len(s.Out) >= wantSize { + return nil, false, ErrIncompressible + } + s.OutData = s.Out + return s.Out, true, nil + } + } + + // Use new table + err = s.cTable.write(s) + if err != nil { + s.OutTable = nil + return nil, false, err + } + s.OutTable = s.Out + + // Compress using new table + s.Out, err = compressor(in) + if err != nil { + s.OutTable = nil + return nil, false, err + } + if len(s.Out) >= wantSize { + s.OutTable = nil + return nil, false, ErrIncompressible + } + // Move current table into previous. + s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] + s.OutData = s.Out[len(s.OutTable):] + return s.Out, false, nil +} + +func (s *Scratch) compress1X(src []byte) ([]byte, error) { + return s.compress1xDo(s.Out, src) +} + +func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { + var bw = bitWriter{out: dst} + + // N is length divisible by 4. + n := len(src) + n -= n & 3 + cTable := s.cTable[:256] + + // Encode last bytes. + for i := len(src) & 3; i > 0; i-- { + bw.encSymbol(cTable, src[n+i-1]) + } + n -= 4 + if s.actualTableLog <= 8 { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } else { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.flush32() + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } + err := bw.close() + return bw.out, err +} + +var sixZeros [6]byte + +func (s *Scratch) compress4X(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + segmentSize := (len(src) + 3) / 4 + + // Add placeholder for output length + offsetIdx := len(s.Out) + s.Out = append(s.Out, sixZeros[:]...) + + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + var err error + idx := len(s.Out) + s.Out, err = s.compress1xDo(s.Out, toDo) + if err != nil { + return nil, err + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + length := len(s.Out) - idx + s.Out[i*2+offsetIdx] = byte(length) + s.Out[i*2+offsetIdx+1] = byte(length >> 8) + } + } + + return s.Out, nil +} + +// compress4Xp will compress 4 streams using separate goroutines. +func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + // Add placeholder for output length + s.Out = s.Out[:6] + + segmentSize := (len(src) + 3) / 4 + var wg sync.WaitGroup + var errs [4]error + wg.Add(4) + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + // Separate goroutine for each block. + go func(i int) { + s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) + wg.Done() + }(i) + } + wg.Wait() + for i := 0; i < 4; i++ { + if errs[i] != nil { + return nil, errs[i] + } + o := s.tmpOut[i] + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + s.Out[i*2] = byte(len(o)) + s.Out[i*2+1] = byte(len(o) >> 8) + } + + // Write output. + s.Out = append(s.Out, o...) + } + return s.Out, nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { + reuse = true + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + if len(s.prevTable) > 0 { + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else { + if s.prevTable[i].nBits == 0 { + reuse = false + } + } + } + } + return int(m), reuse + } + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + } + } + return int(m), false +} + +func (s *Scratch) canUseTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 && c[i].nBits == 0 { + return false + } + } + return true +} + +func (s *Scratch) validateTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 { + if c[i].nBits == 0 { + return false + } + if c[i].nBits > s.actualTableLog { + return false + } + } + } + return true +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBit32(uint32(s.br.remain())) + 1 + minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > tableLogMax { + tableLog = tableLogMax + } + s.actualTableLog = tableLog +} + +type cTableEntry struct { + val uint16 + nBits uint8 + // We have 8 bits extra +} + +const huffNodesMask = huffNodesLen - 1 + +func (s *Scratch) buildCTable() error { + s.optimalTableLog() + s.huffSort() + if cap(s.cTable) < maxSymbolValue+1 { + s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) + } else { + s.cTable = s.cTable[:s.symbolLen] + for i := range s.cTable { + s.cTable[i] = cTableEntry{} + } + } + + var startNode = int16(s.symbolLen) + nonNullRank := s.symbolLen - 1 + + nodeNb := startNode + huffNode := s.nodes[1 : huffNodesLen+1] + + // This overlays the slice above, but allows "-1" index lookups. + // Different from reference implementation. + huffNode0 := s.nodes[0 : huffNodesLen+1] + + for huffNode[nonNullRank].count == 0 { + nonNullRank-- + } + + lowS := int16(nonNullRank) + nodeRoot := nodeNb + lowS - 1 + lowN := nodeNb + huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count + huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) + nodeNb++ + lowS -= 2 + for n := nodeNb; n <= nodeRoot; n++ { + huffNode[n].count = 1 << 30 + } + // fake entry, strong barrier + huffNode0[0].count = 1 << 31 + + // create parents + for nodeNb <= nodeRoot { + var n1, n2 int16 + if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + n1 = lowS + lowS-- + } else { + n1 = lowN + lowN++ + } + if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + n2 = lowS + lowS-- + } else { + n2 = lowN + lowN++ + } + + huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count + huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) + nodeNb++ + } + + // distribute weights (unlimited tree height) + huffNode[nodeRoot].nbBits = 0 + for n := nodeRoot - 1; n >= startNode; n-- { + huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + } + for n := uint16(0); n <= nonNullRank; n++ { + huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + } + s.actualTableLog = s.setMaxHeight(int(nonNullRank)) + maxNbBits := s.actualTableLog + + // fill result into tree (val, nbBits) + if maxNbBits > tableLogMax { + return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) + } + var nbPerRank [tableLogMax + 1]uint16 + var valPerRank [16]uint16 + for _, v := range huffNode[:nonNullRank+1] { + nbPerRank[v.nbBits]++ + } + // determine stating value per rank + { + min := uint16(0) + for n := maxNbBits; n > 0; n-- { + // get starting value within each rank + valPerRank[n] = min + min += nbPerRank[n] + min >>= 1 + } + } + + // push nbBits per symbol, symbol order + for _, v := range huffNode[:nonNullRank+1] { + s.cTable[v.symbol].nBits = v.nbBits + } + + // assign value within rank, symbol order + t := s.cTable[:s.symbolLen] + for n, val := range t { + nbits := val.nBits & 15 + v := valPerRank[nbits] + t[n].val = v + valPerRank[nbits] = v + 1 + } + + return nil +} + +// huffSort will sort symbols, decreasing order. +func (s *Scratch) huffSort() { + type rankPos struct { + base uint32 + current uint32 + } + + // Clear nodes + nodes := s.nodes[:huffNodesLen+1] + s.nodes = nodes + nodes = nodes[1 : huffNodesLen+1] + + // Sort into buckets based on length of symbol count. + var rank [32]rankPos + for _, v := range s.count[:s.symbolLen] { + r := highBit32(v+1) & 31 + rank[r].base++ + } + // maxBitLength is log2(BlockSizeMax) + 1 + const maxBitLength = 18 + 1 + for n := maxBitLength; n > 0; n-- { + rank[n-1].base += rank[n].base + } + for n := range rank[:maxBitLength] { + rank[n].current = rank[n].base + } + for n, c := range s.count[:s.symbolLen] { + r := (highBit32(c+1) + 1) & 31 + pos := rank[r].current + rank[r].current++ + prev := nodes[(pos-1)&huffNodesMask] + for pos > rank[r].base && c > prev.count { + nodes[pos&huffNodesMask] = prev + pos-- + prev = nodes[(pos-1)&huffNodesMask] + } + nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} + } + return +} + +func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { + maxNbBits := s.actualTableLog + huffNode := s.nodes[1 : huffNodesLen+1] + //huffNode = huffNode[: huffNodesLen] + + largestBits := huffNode[lastNonNull].nbBits + + // early exit : no elt > maxNbBits + if largestBits <= maxNbBits { + return largestBits + } + totalCost := int(0) + baseCost := int(1) << (largestBits - maxNbBits) + n := uint32(lastNonNull) + + for huffNode[n].nbBits > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) + huffNode[n].nbBits = maxNbBits + n-- + } + // n stops at huffNode[n].nbBits <= maxNbBits + + for huffNode[n].nbBits == maxNbBits { + n-- + } + // n end at index of smallest symbol using < maxNbBits + + // renorm totalCost + totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ + + // repay normalized cost + { + const noSymbol = 0xF0F0F0F0 + var rankLast [tableLogMax + 2]uint32 + + for i := range rankLast[:] { + rankLast[i] = noSymbol + } + + // Get pos of last (smallest) symbol per rank + { + currentNbBits := maxNbBits + for pos := int(n); pos >= 0; pos-- { + if huffNode[pos].nbBits >= currentNbBits { + continue + } + currentNbBits = huffNode[pos].nbBits // < maxNbBits + rankLast[maxNbBits-currentNbBits] = uint32(pos) + } + } + + for totalCost > 0 { + nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 + + for ; nBitsToDecrease > 1; nBitsToDecrease-- { + highPos := rankLast[nBitsToDecrease] + lowPos := rankLast[nBitsToDecrease-1] + if highPos == noSymbol { + continue + } + if lowPos == noSymbol { + break + } + highTotal := huffNode[highPos].count + lowTotal := 2 * huffNode[lowPos].count + if highTotal <= lowTotal { + break + } + } + // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) + // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary + // FIXME: try to remove + for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { + nBitsToDecrease++ + } + totalCost -= 1 << (nBitsToDecrease - 1) + if rankLast[nBitsToDecrease-1] == noSymbol { + // this rank is no longer empty + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] + } + huffNode[rankLast[nBitsToDecrease]].nbBits++ + if rankLast[nBitsToDecrease] == 0 { + /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol + } else { + rankLast[nBitsToDecrease]-- + if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { + rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ + } + } + } + + for totalCost < 0 { /* Sometimes, cost correction overshoot */ + if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + for huffNode[n].nbBits == maxNbBits { + n-- + } + huffNode[n+1].nbBits-- + rankLast[1] = n + 1 + totalCost++ + continue + } + huffNode[rankLast[1]+1].nbBits-- + rankLast[1]++ + totalCost++ + } + } + return maxNbBits +} + +type nodeElt struct { + count uint32 + parent uint16 + symbol byte + nbBits uint8 +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go new file mode 100644 index 000000000..41703bba4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -0,0 +1,1164 @@ +package huff0 + +import ( + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/fse" +) + +type dTable struct { + single []dEntrySingle + double []dEntryDouble +} + +// single-symbols decoding +type dEntrySingle struct { + entry uint16 +} + +// double-symbols decoding +type dEntryDouble struct { + seq uint16 + nBits uint8 + len uint8 +} + +// Uses special code for all tables that are < 8 bits. +const use8BitTables = true + +// ReadTable will read a table from the input. +// The size of the input may be larger than the table definition. +// Any content remaining after the table definition will be returned. +// If no Scratch is provided a new one is allocated. +// The returned Scratch can be used for encoding or decoding input using this table. +func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { + s, err = s.prepare(in) + if err != nil { + return s, nil, err + } + if len(in) <= 1 { + return s, nil, errors.New("input too small for table") + } + iSize := in[0] + in = in[1:] + if iSize >= 128 { + // Uncompressed + oSize := iSize - 127 + iSize = (oSize + 1) / 2 + if int(iSize) > len(in) { + return s, nil, errors.New("input too small for table") + } + for n := uint8(0); n < oSize; n += 2 { + v := in[n/2] + s.huffWeight[n] = v >> 4 + s.huffWeight[n+1] = v & 15 + } + s.symbolLen = uint16(oSize) + in = in[iSize:] + } else { + if len(in) < int(iSize) { + return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) + } + // FSE compressed weights + s.fse.DecompressLimit = 255 + hw := s.huffWeight[:] + s.fse.Out = hw + b, err := fse.Decompress(in[:iSize], s.fse) + s.fse.Out = nil + if err != nil { + return s, nil, err + } + if len(b) > 255 { + return s, nil, errors.New("corrupt input: output table too large") + } + s.symbolLen = uint16(len(b)) + in = in[iSize:] + } + + // collect weight stats + var rankStats [16]uint32 + weightTotal := uint32(0) + for _, v := range s.huffWeight[:s.symbolLen] { + if v > tableLogMax { + return s, nil, errors.New("corrupt input: weight too large") + } + v2 := v & 15 + rankStats[v2]++ + // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. + weightTotal += (1 << v2) >> 1 + } + if weightTotal == 0 { + return s, nil, errors.New("corrupt input: weights zero") + } + + // get last non-null symbol weight (implied, total must be 2^n) + { + tableLog := highBit32(weightTotal) + 1 + if tableLog > tableLogMax { + return s, nil, errors.New("corrupt input: tableLog too big") + } + s.actualTableLog = uint8(tableLog) + // determine last weight + { + total := uint32(1) << tableLog + rest := total - weightTotal + verif := uint32(1) << highBit32(rest) + lastWeight := highBit32(rest) + 1 + if verif != rest { + // last value must be a clean power of 2 + return s, nil, errors.New("corrupt input: last value not power of two") + } + s.huffWeight[s.symbolLen] = uint8(lastWeight) + s.symbolLen++ + rankStats[lastWeight]++ + } + } + + if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { + // by construction : at least 2 elts of rank 1, must be even + return s, nil, errors.New("corrupt input: min elt size, even check failed ") + } + + // TODO: Choose between single/double symbol decoding + + // Calculate starting value for each rank + { + var nextRankStart uint32 + for n := uint8(1); n < s.actualTableLog+1; n++ { + current := nextRankStart + nextRankStart += rankStats[n] << (n - 1) + rankStats[n] = current + } + } + + // fill DTable (always full size) + tSize := 1 << tableLogMax + if len(s.dt.single) != tSize { + s.dt.single = make([]dEntrySingle, tSize) + } + cTable := s.prevTable + if cap(cTable) < maxSymbolValue+1 { + cTable = make([]cTableEntry, 0, maxSymbolValue+1) + } + cTable = cTable[:maxSymbolValue+1] + s.prevTable = cTable[:s.symbolLen] + s.prevTableLog = s.actualTableLog + + for n, w := range s.huffWeight[:s.symbolLen] { + if w == 0 { + cTable[n] = cTableEntry{ + val: 0, + nBits: 0, + } + continue + } + length := (uint32(1) << w) >> 1 + d := dEntrySingle{ + entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), + } + + rank := &rankStats[w] + cTable[n] = cTableEntry{ + val: uint16(*rank >> (w - 1)), + nBits: uint8(d.entry), + } + + single := s.dt.single[*rank : *rank+length] + for i := range single { + single[i] = d + } + *rank += length + } + + return s, in, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { + if cap(s.Out) < s.MaxDecodedSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:s.MaxDecodedSize] + s.Out, err = s.Decoder().Decompress1X(s.Out, in) + return s.Out, err +} + +// Decompress4X will decompress a 4X encoded stream. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// The length of the supplied input must match the end of a block exactly. +// The destination size of the uncompressed data must be known and provided. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { + if dstSize > s.MaxDecodedSize { + return nil, ErrMaxDecodedSizeExceeded + } + if cap(s.Out) < dstSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:dstSize] + s.Out, err = s.Decoder().Decompress4X(s.Out, in) + return s.Out, err +} + +// Decoder will return a stateless decoder that can be used by multiple +// decompressors concurrently. +// Before this is called, the table must be initialized with ReadTable. +// The Decoder is still linked to the scratch buffer so that cannot be reused. +// However, it is safe to discard the scratch. +func (s *Scratch) Decoder() *Decoder { + return &Decoder{ + dt: s.dt, + actualTableLog: s.actualTableLog, + } +} + +// Decoder provides stateless decoding. +type Decoder struct { + dt dTable + actualTableLog uint8 +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress1X8BitExactly(dst, src) + } + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + + shift := (8 - d.actualTableLog) & 7 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + + const shift = 0 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekByteFast()>>shift] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 / 4 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream] = uint8(v.entry >> 8) + + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v2 := single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream+1] = uint8(v.entry >> 8) + + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v2 = single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream] = uint8(v.entry >> 8) + + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v2 := single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + br[stream].advance(uint8(v.entry)) + buf[off+bufoff*stream+1] = uint8(v.entry >> 8) + + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v2 = single[val2&tlMask] + br[stream2].advance(uint8(v2.entry)) + buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == bufoff { + if bufoff > dstEvery { + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[:bufoff]) + copy(out[dstEvery:], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) + off = 0 + out = out[bufoff:] + decoded += 256 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[:off]) + copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + for i := range br { + offset := dstEvery * i + br := &br[i] + bitsLeft := br.off*8 + uint(64-br.bitsRead) + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= len(out) { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress4X8bitExactly(dst, src) + } + + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + shift := (8 - d.actualTableLog) & 7 + + const tlSize = 1 << 8 + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 / 4 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + off += 4 + + if off == bufoff { + if bufoff > dstEvery { + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[:bufoff]) + copy(out[dstEvery:], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) + off = 0 + out = out[bufoff:] + decoded += 256 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[:off]) + copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + for i := range br { + offset := dstEvery * i + br := &br[i] + bitsLeft := int(br.off*8) + int(64-br.bitsRead) + for bitsLeft > 0 { + if br.finished() { + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= len(out) { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()>>shift].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= int(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const shift = 0 + const tlSize = 1 << 8 + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + var buf [256]byte + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 / 4 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + v := single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 := single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+1] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+2] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + + v = single[br[stream].peekByteFast()>>shift].entry + buf[off+bufoff*stream+3] = uint8(v >> 8) + br[stream].advance(uint8(v)) + + v2 = single[br[stream2].peekByteFast()>>shift].entry + buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + br[stream2].advance(uint8(v2)) + } + + off += 4 + + if off == bufoff { + if bufoff > dstEvery { + return nil, errors.New("corruption detected: stream overrun 1") + } + copy(out, buf[:bufoff]) + copy(out[dstEvery:], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) + off = 0 + out = out[bufoff:] + decoded += 256 + // There must at least be 3 buffers left. + if len(out) < dstEvery*3 { + return nil, errors.New("corruption detected: stream overrun 2") + } + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[:off]) + copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) + copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) + copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + for i := range br { + offset := dstEvery * i + br := &br[i] + bitsLeft := int(br.off*8) + int(64-br.bitsRead) + for bitsLeft > 0 { + if br.finished() { + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= len(out) { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()>>shift].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= int(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// matches will compare a decoding table to a coding table. +// Errors are written to the writer. +// Nothing will be written if table is ok. +func (s *Scratch) matches(ct cTable, w io.Writer) { + if s == nil || len(s.dt.single) == 0 { + return + } + dt := s.dt.single[:1<>8) == byte(sym) { + fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) + errs++ + break + } + } + if errs == 0 { + broken-- + } + continue + } + // Unused bits in input + ub := tablelog - enc.nBits + top := enc.val << ub + // decoder looks at top bits. + dec := dt[top] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 0 { + fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + continue + } + // Ensure that all combinations are covered. + for i := uint16(0); i < (1 << ub); i++ { + vval := top | i + dec := dt[vval] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 20 { + fmt.Fprintf(w, "%d errros, stopping\n", errs) + break + } + } + if errs == 0 { + ok++ + broken-- + } + } + if broken > 0 { + fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) + } +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go new file mode 100644 index 000000000..7ec2022b6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -0,0 +1,273 @@ +// Package huff0 provides fast huffman encoding as used in zstd. +// +// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. +package huff0 + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/fse" +) + +const ( + maxSymbolValue = 255 + + // zstandard limits tablelog to 11, see: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description + tableLogMax = 11 + tableLogDefault = 11 + minTablelog = 5 + huffNodesLen = 512 + + // BlockSizeMax is maximum input size for a single block uncompressed. + BlockSizeMax = 1<<18 - 1 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") + + // ErrTooBig is return if input is too large for a single block. + ErrTooBig = errors.New("input too big") + + // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. + ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") +) + +type ReusePolicy uint8 + +const ( + // ReusePolicyAllow will allow reuse if it produces smaller output. + ReusePolicyAllow ReusePolicy = iota + + // ReusePolicyPrefer will re-use aggressively if possible. + // This will not check if a new table will produce smaller output, + // except if the current table is impossible to use or + // compressed output is bigger than input. + ReusePolicyPrefer + + // ReusePolicyNone will disable re-use of tables. + // This is slightly faster than ReusePolicyAllow but may produce larger output. + ReusePolicyNone + + // ReusePolicyMust must allow reuse and produce smaller output. + ReusePolicyMust +) + +type Scratch struct { + count [maxSymbolValue + 1]uint32 + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // OutTable will contain the table data only, if a new table has been generated. + // Slice of the returned data. + OutTable []byte + + // OutData will contain the compressed data. + // Slice of the returned data. + OutData []byte + + // MaxDecodedSize will set the maximum allowed output size. + // This value will automatically be set to BlockSizeMax if not set. + // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. + MaxDecodedSize int + + br byteReader + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + // Must be <= 11 and >= 5. + TableLog uint8 + + // Reuse will specify the reuse policy + Reuse ReusePolicy + + // WantLogLess allows to specify a log 2 reduction that should at least be achieved, + // otherwise the block will be returned as incompressible. + // The reduction should then at least be (input size >> WantLogLess) + // If WantLogLess == 0 any improvement will do. + WantLogLess uint8 + + symbolLen uint16 // Length of active part of the symbol table. + maxCount int // count of the most probable symbol + clearCount bool // clear count + actualTableLog uint8 // Selected tablelog. + prevTableLog uint8 // Tablelog for previous table + prevTable cTable // Table used for previous compression. + cTable cTable // compression table + dt dTable // decompression table + nodes []nodeElt + tmpOut [4][]byte + fse *fse.Scratch + huffWeight [maxSymbolValue + 1]byte +} + +// TransferCTable will transfer the previously used compression table. +func (s *Scratch) TransferCTable(src *Scratch) { + if cap(s.prevTable) < len(src.prevTable) { + s.prevTable = make(cTable, 0, maxSymbolValue+1) + } + s.prevTable = s.prevTable[:len(src.prevTable)] + copy(s.prevTable, src.prevTable) + s.prevTableLog = src.prevTableLog +} + +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if len(in) > BlockSizeMax { + return nil, ErrTooBig + } + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = maxSymbolValue + } + if s.TableLog == 0 { + s.TableLog = tableLogDefault + } + if s.TableLog > tableLogMax || s.TableLog < minTablelog { + return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) + } + if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { + s.MaxDecodedSize = BlockSizeMax + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + s.Out = s.Out[:0] + + s.OutTable = nil + s.OutData = nil + if cap(s.nodes) < huffNodesLen+1 { + s.nodes = make([]nodeElt, 0, huffNodesLen+1) + } + s.nodes = s.nodes[:0] + if s.fse == nil { + s.fse = &fse.Scratch{} + } + s.br.init(in) + + return s, nil +} + +type cTable []cTableEntry + +func (c cTable) write(s *Scratch) error { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + s.Out = append(s.Out, uint8(len(b))) + s.Out = append(s.Out, b...) + return nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return ErrIncompressible + } + op := s.Out + // special case, pack weights 4 bits/weight. + op = append(op, 128|(maxSymbolValue-1)) + // be sure it doesn't cause msan issue in final combination + huffWeight[maxSymbolValue] = 0 + for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { + op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) + } + s.Out = op + return nil +} + +// estimateSize returns the estimated size in bytes of the input represented in the +// histogram supplied. +func (c cTable) estimateSize(hist []uint32) int { + nbBits := uint32(7) + for i, v := range c[:len(hist)] { + nbBits += uint32(v.nBits) * hist[i] + } + return int(nbBits >> 3) +} + +// minSize returns the minimum possible size considering the shannon limit. +func (s *Scratch) minSize(total int) int { + nbBits := float64(7) + fTotal := float64(total) + for _, v := range s.count[:s.symbolLen] { + n := float64(v) + if n > 0 { + nbBits += math.Log2(fTotal/n) * n + } + } + return int(nbBits) >> 3 +} + +func highBit32(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/snappy/.gitignore b/vendor/github.com/klauspost/compress/snappy/.gitignore new file mode 100644 index 000000000..042091d9b --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/.gitignore @@ -0,0 +1,16 @@ +cmd/snappytool/snappytool +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/vendor/github.com/klauspost/compress/snappy/AUTHORS b/vendor/github.com/klauspost/compress/snappy/AUTHORS new file mode 100644 index 000000000..bcfa19520 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Damian Gryski +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Rodolfo Carvalho +Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS new file mode 100644 index 000000000..931ae3160 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Damian Gryski +Jan Mercl <0xjnml@gmail.com> +Kai Backman +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/LICENSE b/vendor/github.com/klauspost/compress/snappy/LICENSE new file mode 100644 index 000000000..6050c10f4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/snappy/README b/vendor/github.com/klauspost/compress/snappy/README new file mode 100644 index 000000000..cea12879a --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/README @@ -0,0 +1,107 @@ +The Snappy compression format in the Go programming language. + +To download and install from source: +$ go get github.com/golang/snappy + +Unless otherwise noted, the Snappy-Go source files are distributed +under the BSD-style license found in the LICENSE file. + + + +Benchmarks. + +The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten +or so files, the same set used by the C++ Snappy code (github.com/google/snappy +and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ +3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: + +"go test -test.bench=." + +_UFlat0-8 2.19GB/s ± 0% html +_UFlat1-8 1.41GB/s ± 0% urls +_UFlat2-8 23.5GB/s ± 2% jpg +_UFlat3-8 1.91GB/s ± 0% jpg_200 +_UFlat4-8 14.0GB/s ± 1% pdf +_UFlat5-8 1.97GB/s ± 0% html4 +_UFlat6-8 814MB/s ± 0% txt1 +_UFlat7-8 785MB/s ± 0% txt2 +_UFlat8-8 857MB/s ± 0% txt3 +_UFlat9-8 719MB/s ± 1% txt4 +_UFlat10-8 2.84GB/s ± 0% pb +_UFlat11-8 1.05GB/s ± 0% gaviota + +_ZFlat0-8 1.04GB/s ± 0% html +_ZFlat1-8 534MB/s ± 0% urls +_ZFlat2-8 15.7GB/s ± 1% jpg +_ZFlat3-8 740MB/s ± 3% jpg_200 +_ZFlat4-8 9.20GB/s ± 1% pdf +_ZFlat5-8 991MB/s ± 0% html4 +_ZFlat6-8 379MB/s ± 0% txt1 +_ZFlat7-8 352MB/s ± 0% txt2 +_ZFlat8-8 396MB/s ± 1% txt3 +_ZFlat9-8 327MB/s ± 1% txt4 +_ZFlat10-8 1.33GB/s ± 1% pb +_ZFlat11-8 605MB/s ± 1% gaviota + + + +"go test -test.bench=. -tags=noasm" + +_UFlat0-8 621MB/s ± 2% html +_UFlat1-8 494MB/s ± 1% urls +_UFlat2-8 23.2GB/s ± 1% jpg +_UFlat3-8 1.12GB/s ± 1% jpg_200 +_UFlat4-8 4.35GB/s ± 1% pdf +_UFlat5-8 609MB/s ± 0% html4 +_UFlat6-8 296MB/s ± 0% txt1 +_UFlat7-8 288MB/s ± 0% txt2 +_UFlat8-8 309MB/s ± 1% txt3 +_UFlat9-8 280MB/s ± 1% txt4 +_UFlat10-8 753MB/s ± 0% pb +_UFlat11-8 400MB/s ± 0% gaviota + +_ZFlat0-8 409MB/s ± 1% html +_ZFlat1-8 250MB/s ± 1% urls +_ZFlat2-8 12.3GB/s ± 1% jpg +_ZFlat3-8 132MB/s ± 0% jpg_200 +_ZFlat4-8 2.92GB/s ± 0% pdf +_ZFlat5-8 405MB/s ± 1% html4 +_ZFlat6-8 179MB/s ± 1% txt1 +_ZFlat7-8 170MB/s ± 1% txt2 +_ZFlat8-8 189MB/s ± 1% txt3 +_ZFlat9-8 164MB/s ± 1% txt4 +_ZFlat10-8 479MB/s ± 1% pb +_ZFlat11-8 270MB/s ± 1% gaviota + + + +For comparison (Go's encoded output is byte-for-byte identical to C++'s), here +are the numbers from C++ Snappy's + +make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log + +BM_UFlat/0 2.4GB/s html +BM_UFlat/1 1.4GB/s urls +BM_UFlat/2 21.8GB/s jpg +BM_UFlat/3 1.5GB/s jpg_200 +BM_UFlat/4 13.3GB/s pdf +BM_UFlat/5 2.1GB/s html4 +BM_UFlat/6 1.0GB/s txt1 +BM_UFlat/7 959.4MB/s txt2 +BM_UFlat/8 1.0GB/s txt3 +BM_UFlat/9 864.5MB/s txt4 +BM_UFlat/10 2.9GB/s pb +BM_UFlat/11 1.2GB/s gaviota + +BM_ZFlat/0 944.3MB/s html (22.31 %) +BM_ZFlat/1 501.6MB/s urls (47.78 %) +BM_ZFlat/2 14.3GB/s jpg (99.95 %) +BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) +BM_ZFlat/4 8.3GB/s pdf (83.30 %) +BM_ZFlat/5 903.5MB/s html4 (22.52 %) +BM_ZFlat/6 336.0MB/s txt1 (57.88 %) +BM_ZFlat/7 312.3MB/s txt2 (61.91 %) +BM_ZFlat/8 353.1MB/s txt3 (54.99 %) +BM_ZFlat/9 289.9MB/s txt4 (66.26 %) +BM_ZFlat/10 1.2GB/s pb (19.68 %) +BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/klauspost/compress/snappy/decode.go b/vendor/github.com/klauspost/compress/snappy/decode.go new file mode 100644 index 000000000..72efb0353 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode.go @@ -0,0 +1,237 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return 0, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return 0, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return 0, r.err + } + } +} diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.go b/vendor/github.com/klauspost/compress/snappy/decode_amd64.go new file mode 100644 index 000000000..fcd192b84 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.s b/vendor/github.com/klauspost/compress/snappy/decode_amd64.s new file mode 100644 index 000000000..1c66e3723 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode_amd64.s @@ -0,0 +1,482 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + CMPQ SI, R13 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + CMPQ SI, R13 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + CMPQ SI, R13 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + CMPQ SI, R13 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/snappy/decode_other.go b/vendor/github.com/klauspost/compress/snappy/decode_other.go new file mode 100644 index 000000000..94a96c5d7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode_other.go @@ -0,0 +1,115 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/snappy/encode.go b/vendor/github.com/klauspost/compress/snappy/encode.go new file mode 100644 index 000000000..8d393e904 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode.go @@ -0,0 +1,285 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/klauspost/compress/snappy/encode_amd64.go b/vendor/github.com/klauspost/compress/snappy/encode_amd64.go new file mode 100644 index 000000000..150d91bc8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode_amd64.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/klauspost/compress/snappy/encode_amd64.s b/vendor/github.com/klauspost/compress/snappy/encode_amd64.s new file mode 100644 index 000000000..adfd979fe --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/snappy/encode_other.go b/vendor/github.com/klauspost/compress/snappy/encode_other.go new file mode 100644 index 000000000..dbcae905e --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/snappy/runbench.cmd b/vendor/github.com/klauspost/compress/snappy/runbench.cmd new file mode 100644 index 000000000..d24eb4b47 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/runbench.cmd @@ -0,0 +1,2 @@ +del old.txt +go test -bench=. >>old.txt && go test -bench=. >>old.txt && go test -bench=. >>old.txt && benchstat -delta-test=ttest old.txt new.txt diff --git a/vendor/github.com/klauspost/compress/snappy/snappy.go b/vendor/github.com/klauspost/compress/snappy/snappy.go new file mode 100644 index 000000000..ea58ced88 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snappy + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md new file mode 100644 index 000000000..7680bfe1d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -0,0 +1,417 @@ +# zstd + +[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. +It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. +A high performance compression algorithm is implemented. For now focused on speed. + +This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. + +This package is pure Go and without use of "unsafe". + +The `zstd` package is provided as open source software using a Go standard license. + +Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. + +## Installation + +Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. + +Godoc Documentation: https://godoc.org/github.com/klauspost/compress/zstd + + +## Compressor + +### Status: + +STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively +used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. + +There may still be specific combinations of data types/size/settings that could lead to edge cases, +so as always, testing is recommended. + +For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. + +* The "Fastest" compression ratio is roughly equivalent to zstd level 1. +* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). +* The "Better" compression ratio is roughly equivalent to zstd level 7. +* The "Best" compression ratio is roughly equivalent to zstd level 11. + +In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. +The compression ratio compared to stdlib is around level 3, but usually 3x as fast. + + +### Usage + +An Encoder can be used for either compressing a stream via the +`io.WriteCloser` interface supported by the Encoder or as multiple independent +tasks via the `EncodeAll` function. +Smaller encodes are encouraged to use the EncodeAll function. +Use `NewWriter` to create a new instance that can be used for both. + +To create a writer with default options, do like this: + +```Go +// Compress input to output. +func Compress(in io.Reader, out io.Writer) error { + enc, err := zstd.NewWriter(out) + if err != nil { + return err + } + _, err = io.Copy(enc, in) + if err != nil { + enc.Close() + return err + } + return enc.Close() +} +``` + +Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. +Even if your encode fails, you should still call `Close()` to release any resources that may be held up. + +The above is fine for big encodes. However, whenever possible try to *reuse* the writer. + +To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. +This will allow the encoder to reuse all resources and avoid wasteful allocations. + +Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part +of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change +in the future. So if you want to limit concurrency for future updates, specify the concurrency +you would like. + +You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined +compression settings can be specified. + +#### Future Compatibility Guarantees + +This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. + +The goal will be to keep the default efficiency at the default zstd (level 3). +However the encoding should never be assumed to remain the same, +and you should not use hashes of compressed output for similarity checks. + +The Encoder can be assumed to produce the same output from the exact same code version. +However, the may be modes in the future that break this, +although they will not be enabled without an explicit option. + +This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. + +Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), +[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) +and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). + +#### Blocks + +For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. + +`EncodeAll` will encode all input in src and append it to dst. +This function can be called concurrently, but each call will only run on a single goroutine. + +Encoded blocks can be concatenated and the result will be the combined input stream. +Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. + +Especially when encoding blocks you should take special care to reuse the encoder. +This will effectively make it run without allocations after a warmup period. +To make it run completely without allocations, supply a destination buffer with space for all content. + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a writer that caches compressors. +// For this operation type we supply a nil Reader. +var encoder, _ = zstd.NewWriter(nil) + +// Compress a buffer. +// If you have a destination buffer, the allocation in the call can also be eliminated. +func Compress(src []byte) []byte { + return encoder.EncodeAll(src, make([]byte, 0, len(src))) +} +``` + +You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` +option when creating the writer. + +Using the Encoder for both a stream and individual blocks concurrently is safe. + +### Performance + +I have collected some speed examples to compare speed and compression against other compressors. + +* `file` is the input file. +* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. +* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". +* `insize`/`outsize` is the input/output size. +* `millis` is the number of milliseconds used for compression. +* `mb/s` is megabytes (2^20 bytes) per second. + +``` +Silesia Corpus: +http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip + +This package: +file out level insize outsize millis mb/s +silesia.tar zskp 1 211947520 73101992 643 313.87 +silesia.tar zskp 2 211947520 67504318 969 208.38 +silesia.tar zskp 3 211947520 65177448 1899 106.44 +silesia.tar zskp 4 211947520 61381950 8115 24.91 + +cgo zstd: +silesia.tar zstd 1 211947520 73605392 543 371.56 +silesia.tar zstd 3 211947520 66793289 864 233.68 +silesia.tar zstd 6 211947520 62916450 1913 105.66 +silesia.tar zstd 9 211947520 60212393 5063 39.92 + +gzip, stdlib/this package: +silesia.tar gzstd 1 211947520 80007735 1654 122.21 +silesia.tar gzkp 1 211947520 80369488 1168 173.06 + +GOB stream of binary data. Highly compressible. +https://files.klauspost.com/compress/gob-stream.7z + +file out level insize outsize millis mb/s +gob-stream zskp 1 1911399616 235022249 3088 590.30 +gob-stream zskp 2 1911399616 205669791 3786 481.34 +gob-stream zskp 3 1911399616 185792019 9324 195.48 +gob-stream zskp 4 1911399616 171537212 32113 56.76 +gob-stream zstd 1 1911399616 249810424 2637 691.26 +gob-stream zstd 3 1911399616 208192146 3490 522.31 +gob-stream zstd 6 1911399616 193632038 6687 272.56 +gob-stream zstd 9 1911399616 177620386 16175 112.70 +gob-stream gzstd 1 1911399616 357382641 10251 177.82 +gob-stream gzkp 1 1911399616 362156523 5695 320.08 + +The test data for the Large Text Compression Benchmark is the first +10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. +http://mattmahoney.net/dc/textdata.html + +file out level insize outsize millis mb/s +enwik9 zskp 1 1000000000 343848582 3609 264.18 +enwik9 zskp 2 1000000000 317276632 5746 165.97 +enwik9 zskp 3 1000000000 294540704 11725 81.34 +enwik9 zskp 4 1000000000 276609671 44029 21.66 +enwik9 zstd 1 1000000000 358072021 3110 306.65 +enwik9 zstd 3 1000000000 313734672 4784 199.35 +enwik9 zstd 6 1000000000 295138875 10290 92.68 +enwik9 zstd 9 1000000000 278348700 28549 33.40 +enwik9 gzstd 1 1000000000 382578136 9604 99.30 +enwik9 gzkp 1 1000000000 383825945 6544 145.73 + +Highly compressible JSON file. +https://files.klauspost.com/compress/github-june-2days-2019.json.zst + +file out level insize outsize millis mb/s +github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40 +github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96 +github-june-2days-2019.json zskp 3 6273951764 537511906 29252 204.54 +github-june-2days-2019.json zskp 4 6273951764 512796117 97791 61.18 +github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 +github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 +github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 +github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 +github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79 +github-june-2days-2019.json gzkp 1 6273951764 1128755542 19236 311.03 + +VM Image, Linux mint with a few installed applications: +https://files.klauspost.com/compress/rawstudio-mint14.7z + +file out level insize outsize millis mb/s +rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84 +rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07 +rawstudio-mint14.tar zskp 3 8558382592 3224594213 71751 113.75 +rawstudio-mint14.tar zskp 4 8558382592 3027332295 486243 16.79 +rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 +rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 +rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 +rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 +rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40 +rawstudio-mint14.tar gzkp 1 8558382592 3970463184 41749 195.49 + +CSV data: +https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst + +file out level insize outsize millis mb/s +nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35 +nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44 +nyc-taxi-data-10M.csv zskp 3 3325605752 538490114 19880 159.53 +nyc-taxi-data-10M.csv zskp 4 3325605752 495986829 89368 35.49 +nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 +nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 +nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 +nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 +nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83 +nyc-taxi-data-10M.csv gzkp 1 3325605752 924718719 16388 193.53 +``` + +## Decompressor + +Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. + +This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), +kindly supplied by [fuzzit.dev](https://fuzzit.dev/). +The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, +or run it past its limits with ANY input provided. + +### Usage + +The package has been designed for two main usages, big streams of data and smaller in-memory buffers. +There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. + +For streaming use a simple setup could look like this: + +```Go +import "github.com/klauspost/compress/zstd" + +func Decompress(in io.Reader, out io.Writer) error { + d, err := zstd.NewReader(in) + if err != nil { + return err + } + defer d.Close() + + // Copy content... + _, err = io.Copy(out, d) + return err +} +``` + +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines. +See "Allocation-less operation" below. + +For decoding buffers, it could look something like this: + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a reader that caches decompressors. +// For this operation type we supply a nil Reader. +var decoder, _ = zstd.NewReader(nil) + +// Decompress a buffer. We don't supply a destination buffer, +// so it will be allocated by the decoder. +func Decompress(src []byte) ([]byte, error) { + return decoder.DecodeAll(src, nil) +} +``` + +Both of these cases should provide the functionality needed. +The decoder can be used for *concurrent* decompression of multiple buffers. +It will only allow a certain number of concurrent operations to run. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. + +### Dictionaries + +Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. + +Dictionaries are added individually to Decoders. +Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. +To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. +Several dictionaries can be added at once. + +The dictionary will be used automatically for the data that specifies them. +A re-used Decoder will still contain the dictionaries registered. + +When registering multiple dictionaries with the same ID, the last one will be used. + +It is possible to use dictionaries when compressing data. + +To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used +and it will likely be used even if it doesn't improve compression. + +The used dictionary must be used to decompress the content. + +For any real gains, the dictionary should be built with similar data. +If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. +Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. +For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). + +For now there is a fixed startup performance penalty for compressing content with dictionaries. +This will likely be improved over time. Just be aware to test performance when implementing. + +### Allocation-less operation + +The decoder has been designed to operate without allocations after a warmup. + +This means that you should *store* the decoder for best performance. +To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. +A decoder can safely be re-used even if the previous stream failed. + +To release the resources, you must call the `Close()` function on a decoder. +After this it can *no longer be reused*, but all running goroutines will be stopped. +So you *must* use this if you will no longer need the Reader. + +For decompressing smaller buffers a single decoder can be used. +When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. +In this case no unneeded allocations should be made. + +### Concurrency + +The buffer decoder does everything on the same goroutine and does nothing concurrently. +It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. + +The stream decoder operates on + +* One goroutine reads input and splits the input to several block decoders. +* A number of decoders will decode blocks. +* A goroutine coordinates these blocks and sends history from one to the next. + +So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. + +Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. + +In practice this means that concurrency is often limited to utilizing about 2 cores effectively. + + +### Benchmarks + +These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd). + +The first two are streaming decodes and the last are smaller inputs. + +``` +BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op +BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op + +BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op +BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op + +Concurrent performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op + +BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op +``` + +This reflects the performance around May 2020, but this may be out of date. + +# Contributions + +Contributions are always welcome. +For new features/fixes, remember to add tests and for performance enhancements include benchmarks. + +For sending files for reproducing errors use a service like [goobox](https://goobox.io/#/upload) or similar to share your files. + +For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). + +This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go new file mode 100644 index 000000000..854458537 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -0,0 +1,136 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "io" + "math/bits" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 // Maybe use [16]byte, but shifting is awkward. + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) int { + if n == 0 /*|| b.bitsRead >= 64 */ { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) int { + const regMask = 64 - 1 + v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return int(v) +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off >= 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// overread returns true if more bits have been requested than is on the stream. +func (b *bitReader) overread() bool { + return b.bitsRead > 64 +} + +// remain returns the number of bits remaining. +func (b *bitReader) remain() uint { + return b.off*8 + 64 - uint(b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go new file mode 100644 index 000000000..303ae90f9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -0,0 +1,169 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits32NC will add up to 32 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits32NC(value uint32, bits uint8) { + b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go new file mode 100644 index 000000000..b51d922bd --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -0,0 +1,739 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type blockType uint8 + +//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex + +const ( + blockTypeRaw blockType = iota + blockTypeRLE + blockTypeCompressed + blockTypeReserved +) + +type literalsBlockType uint8 + +const ( + literalsBlockRaw literalsBlockType = iota + literalsBlockRLE + literalsBlockCompressed + literalsBlockTreeless +) + +const ( + // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) + maxCompressedBlockSize = 128 << 10 + + // Maximum possible block size (all Raw+Uncompressed). + maxBlockSize = (1 << 21) - 1 + + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header + maxCompressedLiteralSize = 1 << 18 + maxRLELiteralSize = 1 << 20 + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff + + // We support slightly less than the reference decoder to be able to + // use ints on 32 bit archs. + maxOffsetBits = 30 +) + +var ( + huffDecoderPool = sync.Pool{New: func() interface{} { + return &huff0.Scratch{} + }} + + fseDecoderPool = sync.Pool{New: func() interface{} { + return &fseDecoder{} + }} +) + +type blockDec struct { + // Raw source data of the block. + data []byte + dataStorage []byte + + // Destination of the decoded data. + dst []byte + + // Buffer for literals data. + literalBuf []byte + + // Window size of the block. + WindowSize uint64 + + history chan *history + input chan struct{} + result chan decodeOutput + sequenceBuf []seq + err error + decWG sync.WaitGroup + + // Frame to use for singlethreaded decoding. + // Should not be used by the decoder itself since parent may be another frame. + localFrame *frameDec + + // Block is RLE, this is the size. + RLESize uint32 + tmp [4]byte + + Type blockType + + // Is this the last block of a frame? + Last bool + + // Use less memory + lowMem bool +} + +func (b *blockDec) String() string { + if b == nil { + return "" + } + return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) +} + +func newBlockDec(lowMem bool) *blockDec { + b := blockDec{ + lowMem: lowMem, + result: make(chan decodeOutput, 1), + input: make(chan struct{}, 1), + history: make(chan *history, 1), + } + b.decWG.Add(1) + go b.startDecoder() + return &b +} + +// reset will reset the block. +// Input must be a start of a block and will be at the end of the block when returned. +func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { + b.WindowSize = windowSize + tmp := br.readSmall(3) + if tmp == nil { + if debug { + println("Reading block header:", io.ErrUnexpectedEOF) + } + return io.ErrUnexpectedEOF + } + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + b.Last = bh&1 != 0 + b.Type = blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + maxSize := maxBlockSize + switch b.Type { + case blockTypeReserved: + return ErrReservedBlockType + case blockTypeRLE: + b.RLESize = uint32(cSize) + if b.lowMem { + maxSize = cSize + } + cSize = 1 + case blockTypeCompressed: + if debug { + println("Data size on stream:", cSize) + } + b.RLESize = 0 + maxSize = maxCompressedBlockSize + if windowSize < maxCompressedBlockSize && b.lowMem { + maxSize = int(windowSize) + } + if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { + if debug { + printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrCompressedSizeTooBig + } + case blockTypeRaw: + b.RLESize = 0 + // We do not need a destination for raw blocks. + maxSize = -1 + default: + panic("Invalid block type") + } + + // Read block data. + if cap(b.dataStorage) < cSize { + if b.lowMem { + b.dataStorage = make([]byte, 0, cSize) + } else { + b.dataStorage = make([]byte, 0, maxBlockSize) + } + } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } + var err error + b.data, err = br.readBig(cSize, b.dataStorage) + if err != nil { + if debug { + println("Reading block:", err, "(", cSize, ")", len(b.data)) + printf("%T", br) + } + return err + } + return nil +} + +// sendEOF will make the decoder send EOF on this frame. +func (b *blockDec) sendErr(err error) { + b.Last = true + b.Type = blockTypeReserved + b.err = err + b.input <- struct{}{} +} + +// Close will release resources. +// Closed blockDec cannot be reset. +func (b *blockDec) Close() { + close(b.input) + close(b.history) + close(b.result) + b.decWG.Wait() +} + +// decodeAsync will prepare decoding the block when it receives input. +// This will separate output and history. +func (b *blockDec) startDecoder() { + defer b.decWG.Done() + for range b.input { + //println("blockDec: Got block input") + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxBlockSize) + } + } + o := decodeOutput{ + d: b, + b: b.dst[:b.RLESize], + err: nil, + } + v := b.data[0] + for i := range o.b { + o.b[i] = v + } + hist := <-b.history + hist.append(o.b) + b.result <- o + case blockTypeRaw: + o := decodeOutput{ + d: b, + b: b.data, + err: nil, + } + hist := <-b.history + hist.append(o.b) + b.result <- o + case blockTypeCompressed: + b.dst = b.dst[:0] + err := b.decodeCompressed(nil) + o := decodeOutput{ + d: b, + b: b.dst, + err: err, + } + if debug { + println("Decompressed to", len(b.dst), "bytes, error:", err) + } + b.result <- o + case blockTypeReserved: + // Used for returning errors. + <-b.history + b.result <- decodeOutput{ + d: b, + b: nil, + err: b.err, + } + default: + panic("Invalid block type") + } + if debug { + println("blockDec: Finished block") + } + } +} + +// decodeAsync will prepare decoding the block when it receives the history. +// If history is provided, it will not fetch it from the channel. +func (b *blockDec) decodeBuf(hist *history) error { + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxBlockSize) + } + } + b.dst = b.dst[:b.RLESize] + v := b.data[0] + for i := range b.dst { + b.dst[i] = v + } + hist.appendKeep(b.dst) + return nil + case blockTypeRaw: + hist.appendKeep(b.data) + return nil + case blockTypeCompressed: + saved := b.dst + b.dst = hist.b + hist.b = nil + err := b.decodeCompressed(hist) + if debug { + println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) + } + hist.b = b.dst + b.dst = saved + return err + case blockTypeReserved: + // Used for returning errors. + return b.err + default: + panic("Invalid block type") + } +} + +// decodeCompressed will start decompressing a block. +// If no history is supplied the decoder will decodeAsync as much as possible +// before fetching from blockDec.history +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + delayedHistory := hist == nil + + if delayedHistory { + // We must always grab history. + defer func() { + if hist == nil { + <-b.history + } + }() + } + // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header + if len(in) < 2 { + return ErrBlockTooSmall + } + litType := literalsBlockType(in[0] & 3) + var litRegenSize int + var litCompSize int + sizeFormat := (in[0] >> 2) & 3 + var fourStreams bool + switch litType { + case literalsBlockRaw, literalsBlockRLE: + switch sizeFormat { + case 0, 2: + // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. + litRegenSize = int(in[0] >> 3) + in = in[1:] + case 1: + // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + in = in[2:] + case 3: + // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) + in = in[3:] + } + case literalsBlockCompressed, literalsBlockTreeless: + switch sizeFormat { + case 0, 1: + // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + litRegenSize = int(n & 1023) + litCompSize = int(n >> 10) + fourStreams = sizeFormat == 1 + in = in[3:] + case 2: + fourStreams = true + if len(in) < 4 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + litRegenSize = int(n & 16383) + litCompSize = int(n >> 14) + in = in[4:] + case 3: + fourStreams = true + if len(in) < 5 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) + litRegenSize = int(n & 262143) + litCompSize = int(n >> 18) + in = in[5:] + } + } + if debug { + println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) + } + var literals []byte + var huff *huff0.Scratch + switch litType { + case literalsBlockRaw: + if len(in) < litRegenSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) + return ErrBlockTooSmall + } + literals = in[:litRegenSize] + in = in[litRegenSize:] + //printf("Found %d uncompressed literals\n", litRegenSize) + case literalsBlockRLE: + if len(in) < 1 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) + return ErrBlockTooSmall + } + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, litRegenSize) + } else { + if litRegenSize > maxCompressedLiteralSize { + // Exceptional + b.literalBuf = make([]byte, litRegenSize) + } else { + b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize) + + } + } + } + literals = b.literalBuf[:litRegenSize] + v := in[0] + for i := range literals { + literals[i] = v + } + in = in[1:] + if debug { + printf("Found %d RLE compressed literals\n", litRegenSize) + } + case literalsBlockTreeless: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return ErrBlockTooSmall + } + // Store compressed literals, so we defer decoding until we get history. + literals = in[:litCompSize] + in = in[litCompSize:] + if debug { + printf("Found %d compressed literals\n", litCompSize) + } + case literalsBlockCompressed: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return ErrBlockTooSmall + } + literals = in[:litCompSize] + in = in[litCompSize:] + huff = huffDecoderPool.Get().(*huff0.Scratch) + var err error + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize) + } else { + b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + } + } + if huff == nil { + huff = &huff0.Scratch{} + } + huff, literals, err = huff0.ReadTable(literals, huff) + if err != nil { + println("reading huffman table:", err) + return err + } + // Use our out buffer. + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + if err != nil { + println("decoding compressed literals:", err) + return err + } + // Make sure we don't leak our literals buffer + if len(literals) != litRegenSize { + return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + if debug { + printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) + } + } + + // Decode Sequences + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section + if len(in) < 1 { + return ErrBlockTooSmall + } + seqHeader := in[0] + nSeqs := 0 + switch { + case seqHeader == 0: + in = in[1:] + case seqHeader < 128: + nSeqs = int(seqHeader) + in = in[1:] + case seqHeader < 255: + if len(in) < 2 { + return ErrBlockTooSmall + } + nSeqs = int(seqHeader-128)<<8 | int(in[1]) + in = in[2:] + case seqHeader == 255: + if len(in) < 3 { + return ErrBlockTooSmall + } + nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) + in = in[3:] + } + // Allocate sequences + if cap(b.sequenceBuf) < nSeqs { + if b.lowMem { + b.sequenceBuf = make([]seq, nSeqs) + } else { + // Allocate max + b.sequenceBuf = make([]seq, nSeqs, maxSequences) + } + } else { + // Reuse buffer + b.sequenceBuf = b.sequenceBuf[:nSeqs] + } + var seqs = &sequenceDecs{} + if nSeqs > 0 { + if len(in) < 1 { + return ErrBlockTooSmall + } + br := byteReader{b: in, off: 0} + compMode := br.Uint8() + br.advance(1) + if debug { + printf("Compression modes: 0b%b", compMode) + } + for i := uint(0); i < 3; i++ { + mode := seqCompMode((compMode >> (6 - i*2)) & 3) + if debug { + println("Table", tableIndex(i), "is", mode) + } + var seq *sequenceDec + switch tableIndex(i) { + case tableLiteralLengths: + seq = &seqs.litLengths + case tableOffsets: + seq = &seqs.offsets + case tableMatchLengths: + seq = &seqs.matchLengths + default: + panic("unknown table") + } + switch mode { + case compModePredefined: + seq.fse = &fsePredef[i] + case compModeRLE: + if br.remain() < 1 { + return ErrBlockTooSmall + } + v := br.Uint8() + br.advance(1) + dec := fseDecoderPool.Get().(*fseDecoder) + symb, err := decSymbolValue(v, symbolTableX[i]) + if err != nil { + printf("RLE Transform table (%v) error: %v", tableIndex(i), err) + return err + } + dec.setRLE(symb) + seq.fse = dec + if debug { + printf("RLE set to %+v, code: %v", symb, v) + } + case compModeFSE: + println("Reading table for", tableIndex(i)) + dec := fseDecoderPool.Get().(*fseDecoder) + err := dec.readNCount(&br, uint16(maxTableSymbol[i])) + if err != nil { + println("Read table error:", err) + return err + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debug { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + seq.fse = dec + case compModeRepeat: + seq.repeat = true + } + if br.overread() { + return io.ErrUnexpectedEOF + } + } + in = br.unread() + } + + // Wait for history. + // All time spent after this is critical since it is strictly sequential. + if hist == nil { + hist = <-b.history + if hist.error { + return ErrDecoderClosed + } + } + + // Decode treeless literal block. + if litType == literalsBlockTreeless { + // TODO: We could send the history early WITHOUT the stream history. + // This would allow decoding treeless literals before the byte history is available. + // Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless. + // So not much obvious gain here. + + if hist.huffTree == nil { + return errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize) + } else { + b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + } + } + var err error + // Use our out buffer. + huff = hist.huffTree + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return err + } + if len(literals) != litRegenSize { + return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + } else { + if hist.huffTree != nil && huff != nil { + if hist.dict == nil || hist.dict.litEnc != hist.huffTree { + huffDecoderPool.Put(hist.huffTree) + } + hist.huffTree = nil + } + } + if huff != nil { + hist.huffTree = huff + } + if debug { + println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.") + } + + if nSeqs == 0 { + // Decompressed content is defined entirely as Literals Section content. + b.dst = append(b.dst, literals...) + if delayedHistory { + hist.append(literals) + } + return nil + } + + seqs, err := seqs.mergeHistory(&hist.decoders) + if err != nil { + return err + } + if debug { + println("History merged ok") + } + br := &bitReader{} + if err := br.init(in); err != nil { + return err + } + + // TODO: Investigate if sending history without decoders are faster. + // This would allow the sequences to be decoded async and only have to construct stream history. + // If only recent offsets were not transferred, this would be an obvious win. + // Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded. + + hbytes := hist.b + if len(hbytes) > hist.windowSize { + hbytes = hbytes[len(hbytes)-hist.windowSize:] + // We do not need history any more. + if hist.dict != nil { + hist.dict.content = nil + } + } + + if err := seqs.initialize(br, hist, literals, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + + err = seqs.decode(nSeqs, br, hbytes) + if err != nil { + return err + } + if !br.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", br.remain()) + } + + err = br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + if len(b.data) > maxCompressedBlockSize { + return fmt.Errorf("compressed block size too large (%d)", len(b.data)) + } + // Set output and release references. + b.dst = seqs.out + seqs.out, seqs.literals, seqs.hist = nil, nil, nil + + if !delayedHistory { + // If we don't have delayed history, no need to update. + hist.recentOffsets = seqs.prevOffset + return nil + } + if b.Last { + // if last block we don't care about history. + println("Last block, no history returned") + hist.b = hist.b[:0] + return nil + } + hist.append(b.dst) + hist.recentOffsets = seqs.prevOffset + if debug { + println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.") + } + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go new file mode 100644 index 000000000..9647c64e5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -0,0 +1,871 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/huff0" +) + +type blockEnc struct { + size int + literals []byte + sequences []seq + coders seqCoders + litEnc *huff0.Scratch + dictLitEnc *huff0.Scratch + wr bitWriter + + extraLits int + output []byte + recentOffsets [3]uint32 + prevRecentOffsets [3]uint32 + + last bool + lowMem bool +} + +// init should be used once the block has been created. +// If called more than once, the effect is the same as calling reset. +func (b *blockEnc) init() { + if b.lowMem { + // 1K literals + if cap(b.literals) < 1<<10 { + b.literals = make([]byte, 0, 1<<10) + } + const defSeqs = 20 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + // 1K + if cap(b.output) < 1<<10 { + b.output = make([]byte, 0, 1<<10) + } + } else { + if cap(b.literals) < maxCompressedBlockSize { + b.literals = make([]byte, 0, maxCompressedBlockSize) + } + const defSeqs = 200 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + if cap(b.output) < maxCompressedBlockSize { + b.output = make([]byte, 0, maxCompressedBlockSize) + } + } + + if b.coders.mlEnc == nil { + b.coders.mlEnc = &fseEncoder{} + b.coders.mlPrev = &fseEncoder{} + b.coders.ofEnc = &fseEncoder{} + b.coders.ofPrev = &fseEncoder{} + b.coders.llEnc = &fseEncoder{} + b.coders.llPrev = &fseEncoder{} + } + b.litEnc = &huff0.Scratch{WantLogLess: 4} + b.reset(nil) +} + +// initNewEncode can be used to reset offsets and encoders to the initial state. +func (b *blockEnc) initNewEncode() { + b.recentOffsets = [3]uint32{1, 4, 8} + b.litEnc.Reuse = huff0.ReusePolicyNone + b.coders.setPrev(nil, nil, nil) +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) reset(prev *blockEnc) { + b.extraLits = 0 + b.literals = b.literals[:0] + b.size = 0 + b.sequences = b.sequences[:0] + b.output = b.output[:0] + b.last = false + if prev != nil { + b.recentOffsets = prev.prevRecentOffsets + } + b.dictLitEnc = nil +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) swapEncoders(prev *blockEnc) { + b.coders.swap(&prev.coders) + b.litEnc, prev.litEnc = prev.litEnc, b.litEnc +} + +// blockHeader contains the information for a block header. +type blockHeader uint32 + +// setLast sets the 'last' indicator on a block. +func (h *blockHeader) setLast(b bool) { + if b { + *h = *h | 1 + } else { + const mask = (1 << 24) - 2 + *h = *h & mask + } +} + +// setSize will store the compressed size of a block. +func (h *blockHeader) setSize(v uint32) { + const mask = 7 + *h = (*h)&mask | blockHeader(v<<3) +} + +// setType sets the block type. +func (h *blockHeader) setType(t blockType) { + const mask = 1 | (((1 << 24) - 1) ^ 7) + *h = (*h & mask) | blockHeader(t<<1) +} + +// appendTo will append the block header to a slice. +func (h blockHeader) appendTo(b []byte) []byte { + return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) +} + +// String returns a string representation of the block. +func (h blockHeader) String() string { + return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) +} + +// literalsHeader contains literals header information. +type literalsHeader uint64 + +// setType can be used to set the type of literal block. +func (h *literalsHeader) setType(t literalsBlockType) { + const mask = math.MaxUint64 - 3 + *h = (*h & mask) | literalsHeader(t) +} + +// setSize can be used to set a single size, for uncompressed and RLE content. +func (h *literalsHeader) setSize(regenLen int) { + inBits := bits.Len32(uint32(regenLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case inBits < 5: + lh |= (uint64(regenLen) << 3) | (1 << 60) + if debug { + got := int(lh>>3) & 0xff + if got != regenLen { + panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) + } + } + case inBits < 12: + lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) + case inBits < 20: + lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) + default: + panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) + } + *h = literalsHeader(lh) +} + +// setSizes will set the size of a compressed literals section and the input length. +func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { + compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case compBits <= 10 && inBits <= 10: + if !single { + lh |= 1 << 2 + } + lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) + if debug { + const mmask = (1 << 24) - 1 + n := (lh >> 4) & mmask + if int(n&1023) != inLen { + panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) + } + if int(n>>10) != compLen { + panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) + } + } + case compBits <= 14 && inBits <= 14: + lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + case compBits <= 18 && inBits <= 18: + lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + default: + panic("internal error: block too big") + } + *h = literalsHeader(lh) +} + +// appendTo will append the literals header to a byte slice. +func (h literalsHeader) appendTo(b []byte) []byte { + size := uint8(h >> 60) + switch size { + case 1: + b = append(b, uint8(h)) + case 2: + b = append(b, uint8(h), uint8(h>>8)) + case 3: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) + case 4: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) + case 5: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) + default: + panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) + } + return b +} + +// size returns the output size with currently set values. +func (h literalsHeader) size() int { + return int(h >> 60) +} + +func (h literalsHeader) String() string { + return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) pushOffsets() { + b.prevRecentOffsets = b.recentOffsets +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) popOffsets() { + b.recentOffsets = b.prevRecentOffsets +} + +// matchOffset will adjust recent offsets and return the adjusted one, +// if it matches a previous offset. +func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if true { + if lits > 0 { + switch offset { + case b.recentOffsets[0]: + offset = 1 + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } else { + switch offset { + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 1 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[0] - 1: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } + } else { + offset += 3 + } + return offset +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRaw(a []byte) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(a))) + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output[:0]) + b.output = append(b.output, a...) + if debug { + println("Adding RAW block, length", len(a), "last:", b.last) + } +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(src))) + bh.setType(blockTypeRaw) + dst = bh.appendTo(dst) + dst = append(dst, src...) + if debug { + println("Adding RAW block, length", len(src), "last:", b.last) + } + return dst +} + +// encodeLits can be used if the block is only litLen. +func (b *blockEnc) encodeLits(lits []byte, raw bool) error { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(lits))) + + // Don't compress extremely small blocks + if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { + if debug { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + } + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(lits) >= 1024 { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(lits, b.litEnc) + } else if len(lits) > 32 { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(lits, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + switch err { + case huff0.ErrIncompressible: + if debug { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + case huff0.ErrUseRLE: + if debug { + println("Adding RLE block, length", len(lits)) + } + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits[0]) + return nil + default: + return err + case nil: + } + // Compressed... + // Now, allow reuse + b.litEnc.Reuse = huff0.ReusePolicyAllow + bh.setType(blockTypeCompressed) + var lh literalsHeader + if reUsed { + if debug { + println("Reused tree, compressed to", len(out)) + } + lh.setType(literalsBlockTreeless) + } else { + if debug { + println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + } + // Set sizes + lh.setSizes(len(out), len(lits), single) + bh.setSize(uint32(len(out) + lh.size() + 1)) + + // Write block headers. + b.output = bh.appendTo(b.output) + b.output = lh.appendTo(b.output) + // Add compressed data. + b.output = append(b.output, out...) + // No sequences. + b.output = append(b.output, 0) + return nil +} + +// fuzzFseEncoder can be used to fuzz the FSE encoder. +func fuzzFseEncoder(data []byte) int { + if len(data) > maxSequences || len(data) < 2 { + return 0 + } + enc := fseEncoder{} + hist := enc.Histogram()[:256] + maxSym := uint8(0) + for i, v := range data { + v = v & 63 + data[i] = v + hist[v]++ + if v > maxSym { + maxSym = v + } + } + if maxSym == 0 { + // All 0 + return 0 + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + cnt := maxCount(hist[:maxSym]) + if cnt == len(data) { + // RLE + return 0 + } + enc.HistogramFinished(maxSym, cnt) + err := enc.normalizeCount(len(data)) + if err != nil { + return 0 + } + _, err = enc.writeCount(nil) + if err != nil { + panic(err) + } + return 1 +} + +// encode will encode the block and append the output in b.output. +// Previous offset codes must be pushed if more blocks are expected. +func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { + if len(b.sequences) == 0 { + return b.encodeLits(b.literals, rawAllLits) + } + // We want some difference to at least account for the headers. + saved := b.size - len(b.literals) - (b.size >> 5) + if saved < 16 { + if org == nil { + return errIncompressible + } + b.popOffsets() + return b.encodeLits(org, rawAllLits) + } + + var bh blockHeader + var lh literalsHeader + bh.setLast(b.last) + bh.setType(blockTypeCompressed) + // Store offset of the block header. Needed when we know the size. + bhOffset := len(b.output) + b.output = bh.appendTo(b.output) + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(b.literals) >= 1024 && !raw { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) + } else if len(b.literals) > 32 && !raw { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + switch err { + case huff0.ErrIncompressible: + lh.setType(literalsBlockRaw) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals...) + if debug { + println("Adding literals RAW, length", len(b.literals)) + } + case huff0.ErrUseRLE: + lh.setType(literalsBlockRLE) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals[0]) + if debug { + println("Adding literals RLE") + } + default: + if debug { + println("Adding literals ERROR:", err) + } + return err + case nil: + // Compressed litLen... + if reUsed { + if debug { + println("reused tree") + } + lh.setType(literalsBlockTreeless) + } else { + if debug { + println("new tree, size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + if debug { + _, _, err := huff0.ReadTable(out, nil) + if err != nil { + panic(err) + } + } + } + lh.setSizes(len(out), len(b.literals), single) + if debug { + printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) + println("Adding literal header:", lh) + } + b.output = lh.appendTo(b.output) + b.output = append(b.output, out...) + b.litEnc.Reuse = huff0.ReusePolicyAllow + if debug { + println("Adding literals compressed") + } + } + // Sequence compression + + // Write the number of sequences + switch { + case len(b.sequences) < 128: + b.output = append(b.output, uint8(len(b.sequences))) + case len(b.sequences) < 0x7f00: // TODO: this could be wrong + n := len(b.sequences) + b.output = append(b.output, 128+uint8(n>>8), uint8(n)) + default: + n := len(b.sequences) - 0x7f00 + b.output = append(b.output, 255, uint8(n), uint8(n>>8)) + } + if debug { + println("Encoding", len(b.sequences), "sequences") + } + b.genCodes() + llEnc := b.coders.llEnc + ofEnc := b.coders.ofEnc + mlEnc := b.coders.mlEnc + err = llEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = ofEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = mlEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + + // Choose the best compression mode for each type. + // Will evaluate the new vs predefined and previous. + chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { + // See if predefined/previous is better + hist := cur.count[:cur.symbolLen] + nSize := cur.approxSize(hist) + cur.maxHeaderSize() + predefSize := preDef.approxSize(hist) + prevSize := prev.approxSize(hist) + + // Add a small penalty for new encoders. + // Don't bother with extremely small (<2 byte gains). + nSize = nSize + (nSize+2*8*16)>>4 + switch { + case predefSize <= prevSize && predefSize <= nSize || forcePreDef: + if debug { + println("Using predefined", predefSize>>3, "<=", nSize>>3) + } + return preDef, compModePredefined + case prevSize <= nSize: + if debug { + println("Using previous", prevSize>>3, "<=", nSize>>3) + } + return prev, compModeRepeat + default: + if debug { + println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") + println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) + } + return cur, compModeFSE + } + } + + // Write compression mode + var mode uint8 + if llEnc.useRLE { + mode |= uint8(compModeRLE) << 6 + llEnc.setRLE(b.sequences[0].llCode) + if debug { + println("llEnc.useRLE") + } + } else { + var m seqCompMode + llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) + mode |= uint8(m) << 6 + } + if ofEnc.useRLE { + mode |= uint8(compModeRLE) << 4 + ofEnc.setRLE(b.sequences[0].ofCode) + if debug { + println("ofEnc.useRLE") + } + } else { + var m seqCompMode + ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) + mode |= uint8(m) << 4 + } + + if mlEnc.useRLE { + mode |= uint8(compModeRLE) << 2 + mlEnc.setRLE(b.sequences[0].mlCode) + if debug { + println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) + } + } else { + var m seqCompMode + mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) + mode |= uint8(m) << 2 + } + b.output = append(b.output, mode) + if debug { + printf("Compression modes: 0b%b", mode) + } + b.output, err = llEnc.writeCount(b.output) + if err != nil { + return err + } + start := len(b.output) + b.output, err = ofEnc.writeCount(b.output) + if err != nil { + return err + } + if false { + println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) + for i, v := range ofEnc.norm[:ofEnc.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) + } + } + b.output, err = mlEnc.writeCount(b.output) + if err != nil { + return err + } + + // Maybe in block? + wr := &b.wr + wr.reset(b.output) + + var ll, of, ml cState + + // Current sequence + seq := len(b.sequences) - 1 + s := b.sequences[seq] + llEnc.setBits(llBitsTable[:]) + mlEnc.setBits(mlBitsTable[:]) + ofEnc.setBits(nil) + + llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] + + // We have 3 bounds checks here (and in the loop). + // Since we are iterating backwards it is kinda hard to avoid. + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + ll.init(wr, &llEnc.ct, llB) + of.init(wr, &ofEnc.ct, ofB) + wr.flush32() + ml.init(wr, &mlEnc.ct, mlB) + + // Each of these lookups also generates a bounds check. + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + if debugSequences { + println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) + } + seq-- + if llEnc.maxBits+mlEnc.maxBits+ofEnc.maxBits <= 32 { + // No need to flush (common) + for seq >= 0 { + s = b.sequences[seq] + wr.flush32() + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + // tabelog max is 8 for all. + of.encode(ofB) + ml.encode(mlB) + ll.encode(llB) + wr.flush32() + + // We checked that all can stay within 32 bits + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.addBits32NC(s.offset, ofB.outBits) + + if debugSequences { + println("Encoded seq", seq, s) + } + + seq-- + } + } else { + for seq >= 0 { + s = b.sequences[seq] + wr.flush32() + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + // tabelog max is below 8 for each. + of.encode(ofB) + ml.encode(mlB) + ll.encode(llB) + wr.flush32() + + // ml+ll = max 32 bits total + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + + if debugSequences { + println("Encoded seq", seq, s) + } + + seq-- + } + } + ml.flush(mlEnc.actualTableLog) + of.flush(ofEnc.actualTableLog) + ll.flush(llEnc.actualTableLog) + err = wr.close() + if err != nil { + return err + } + b.output = wr.out + + if len(b.output)-3-bhOffset >= b.size { + // Maybe even add a bigger margin. + b.litEnc.Reuse = huff0.ReusePolicyNone + return errIncompressible + } + + // Size is output minus block header. + bh.setSize(uint32(len(b.output)-bhOffset) - 3) + if debug { + println("Rewriting block header", bh) + } + _ = bh.appendTo(b.output[bhOffset:bhOffset]) + b.coders.setPrev(llEnc, mlEnc, ofEnc) + return nil +} + +var errIncompressible = errors.New("incompressible") + +func (b *blockEnc) genCodes() { + if len(b.sequences) == 0 { + // nothing to do + return + } + + if len(b.sequences) > math.MaxUint16 { + panic("can only encode up to 64K sequences") + } + // No bounds checks after here: + llH := b.coders.llEnc.Histogram()[:256] + ofH := b.coders.ofEnc.Histogram()[:256] + mlH := b.coders.mlEnc.Histogram()[:256] + for i := range llH { + llH[i] = 0 + } + for i := range ofH { + ofH[i] = 0 + } + for i := range mlH { + mlH[i] = 0 + } + + var llMax, ofMax, mlMax uint8 + for i, seq := range b.sequences { + v := llCode(seq.litLen) + seq.llCode = v + llH[v]++ + if v > llMax { + llMax = v + } + + v = ofCode(seq.offset) + seq.ofCode = v + ofH[v]++ + if v > ofMax { + ofMax = v + } + + v = mlCode(seq.matchLen) + seq.mlCode = v + mlH[v]++ + if v > mlMax { + mlMax = v + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) + } + } + b.sequences[i] = seq + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) + } + if debugAsserts && ofMax > maxOffsetBits { + panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) + } + if debugAsserts && llMax > maxLiteralLengthSymbol { + panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) + } + + b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) + b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) + b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) +} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go new file mode 100644 index 000000000..01a01e486 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go @@ -0,0 +1,85 @@ +// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. + +package zstd + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[blockTypeRaw-0] + _ = x[blockTypeRLE-1] + _ = x[blockTypeCompressed-2] + _ = x[blockTypeReserved-3] +} + +const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" + +var _blockType_index = [...]uint8{0, 12, 24, 43, 60} + +func (i blockType) String() string { + if i >= blockType(len(_blockType_index)-1) { + return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[literalsBlockRaw-0] + _ = x[literalsBlockRLE-1] + _ = x[literalsBlockCompressed-2] + _ = x[literalsBlockTreeless-3] +} + +const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" + +var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} + +func (i literalsBlockType) String() string { + if i >= literalsBlockType(len(_literalsBlockType_index)-1) { + return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[compModePredefined-0] + _ = x[compModeRLE-1] + _ = x[compModeFSE-2] + _ = x[compModeRepeat-3] +} + +const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" + +var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} + +func (i seqCompMode) String() string { + if i >= seqCompMode(len(_seqCompMode_index)-1) { + return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[tableLiteralLengths-0] + _ = x[tableOffsets-1] + _ = x[tableMatchLengths-2] +} + +const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" + +var _tableIndex_index = [...]uint8{0, 19, 31, 48} + +func (i tableIndex) String() string { + if i >= tableIndex(len(_tableIndex_index)-1) { + return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go new file mode 100644 index 000000000..658ef7838 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -0,0 +1,127 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "io" + "io/ioutil" +) + +type byteBuffer interface { + // Read up to 8 bytes. + // Returns nil if no more input is available. + readSmall(n int) []byte + + // Read >8 bytes. + // MAY use the destination slice. + readBig(n int, dst []byte) ([]byte, error) + + // Read a single byte. + readByte() (byte, error) + + // Skip n bytes. + skipN(n int) error +} + +// in-memory buffer +type byteBuf []byte + +func (b *byteBuf) readSmall(n int) []byte { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + bb := *b + if len(bb) < n { + return nil + } + r := bb[:n] + *b = bb[n:] + return r +} + +func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) remain() []byte { + return *b +} + +func (b *byteBuf) readByte() (byte, error) { + bb := *b + if len(bb) < 1 { + return 0, nil + } + r := bb[0] + *b = bb[1:] + return r, nil +} + +func (b *byteBuf) skipN(n int) error { + bb := *b + if len(bb) < n { + return io.ErrUnexpectedEOF + } + *b = bb[n:] + return nil +} + +// wrapper around a reader. +type readerWrapper struct { + r io.Reader + tmp [8]byte +} + +func (r *readerWrapper) readSmall(n int) []byte { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + n2, err := io.ReadFull(r.r, r.tmp[:n]) + // We only really care about the actual bytes read. + if n2 != n { + if debug { + println("readSmall: got", n2, "want", n, "err", err) + } + return nil + } + return r.tmp[:n] +} + +func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { + if cap(dst) < n { + dst = make([]byte, n) + } + n2, err := io.ReadFull(r.r, dst[:n]) + if err == io.EOF && n > 0 { + err = io.ErrUnexpectedEOF + } + return dst[:n2], err +} + +func (r *readerWrapper) readByte() (byte, error) { + n2, err := r.r.Read(r.tmp[:1]) + if err != nil { + return 0, err + } + if n2 != 1 { + return 0, io.ErrUnexpectedEOF + } + return r.tmp[0], nil +} + +func (r *readerWrapper) skipN(n int) error { + n2, err := io.CopyN(ioutil.Discard, r.r, int64(n)) + if n2 != int64(n) { + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go new file mode 100644 index 000000000..2c4fca17f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -0,0 +1,88 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// overread returns whether we have advanced too far. +func (b *byteReader) overread() bool { + return b.off > len(b.b) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := int32(b2[3]) + v2 := int32(b2[2]) + v1 := int32(b2[1]) + v0 := int32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint8 returns the next byte +func (b *byteReader) Uint8() uint8 { + v := b.b[b.off] + return v +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + if r := b.remain(); r < 4 { + // Very rare + v := uint32(0) + for i := 1; i <= r; i++ { + v = (v << 8) | uint32(b.b[len(b.b)-i]) + } + return v + } + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint32NC returns a little endian uint32 starting at current offset. +// The caller must be sure if there are at least 4 bytes left. +func (b byteReader) Uint32NC() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go new file mode 100644 index 000000000..87896c5ea --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -0,0 +1,202 @@ +// Copyright 2020+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "bytes" + "errors" + "io" +) + +// HeaderMaxSize is the maximum size of a Frame and Block Header. +// If less is sent to Header.Decode it *may* still contain enough information. +const HeaderMaxSize = 14 + 3 + +// Header contains information about the first frame and block within that. +type Header struct { + // Window Size the window of data to keep while decoding. + // Will only be set if HasFCS is false. + WindowSize uint64 + + // Frame content size. + // Expected size of the entire frame. + FrameContentSize uint64 + + // Dictionary ID. + // If 0, no dictionary. + DictionaryID uint32 + + // First block information. + FirstBlock struct { + // OK will be set if first block could be decoded. + OK bool + + // Is this the last block of a frame? + Last bool + + // Is the data compressed? + // If true CompressedSize will be populated. + // Unfortunately DecompressedSize cannot be determined + // without decoding the blocks. + Compressed bool + + // DecompressedSize is the expected decompressed size of the block. + // Will be 0 if it cannot be determined. + DecompressedSize int + + // CompressedSize of the data in the block. + // Does not include the block header. + // Will be equal to DecompressedSize if not Compressed. + CompressedSize int + } + + // Skippable will be true if the frame is meant to be skipped. + // No other information will be populated. + Skippable bool + + // If set there is a checksum present for the block content. + HasCheckSum bool + + // If this is true FrameContentSize will have a valid value + HasFCS bool + + SingleSegment bool +} + +// Decode the header from the beginning of the stream. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) Decode(in []byte) error { + if len(in) < 4 { + return io.ErrUnexpectedEOF + } + b, in := in[:4], in[4:] + if !bytes.Equal(b, frameMagic) { + if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { + return ErrMagicMismatch + } + *h = Header{Skippable: true} + return nil + } + if len(in) < 1 { + return io.ErrUnexpectedEOF + } + + // Clear output + *h = Header{} + fhd, in := in[0], in[1:] + h.SingleSegment = fhd&(1<<5) != 0 + h.HasCheckSum = fhd&(1<<2) != 0 + + if fhd&(1<<3) != 0 { + return errors.New("Reserved bit set on frame header") + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + if !h.SingleSegment { + if len(in) < 1 { + return io.ErrUnexpectedEOF + } + var wd byte + wd, in = in[0], in[1:] + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + h.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + if len(in) < int(size) { + return io.ErrUnexpectedEOF + } + b, in = in[:size], in[size:] + if b == nil { + return io.ErrUnexpectedEOF + } + switch size { + case 1: + h.DictionaryID = uint32(b[0]) + case 2: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if h.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + + if fcsSize > 0 { + h.HasFCS = true + if len(in) < fcsSize { + return io.ErrUnexpectedEOF + } + b, in = in[:fcsSize], in[fcsSize:] + if b == nil { + return io.ErrUnexpectedEOF + } + switch fcsSize { + case 1: + h.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + } + + // Frame Header done, we will not fail from now on. + if len(in) < 3 { + return nil + } + tmp, in := in[:3], in[3:] + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + h.FirstBlock.Last = bh&1 != 0 + blockType := blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + switch blockType { + case blockTypeReserved: + return nil + case blockTypeRLE: + h.FirstBlock.Compressed = true + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = 1 + case blockTypeCompressed: + h.FirstBlock.Compressed = true + h.FirstBlock.CompressedSize = cSize + case blockTypeRaw: + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = cSize + default: + panic("Invalid block type") + } + + h.FirstBlock.OK = true + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go new file mode 100644 index 000000000..1d41c25d2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -0,0 +1,561 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "io" + "sync" +) + +// Decoder provides decoding of zstandard streams. +// The decoder has been designed to operate without allocations after a warmup. +// This means that you should store the decoder for best performance. +// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. +// A decoder can safely be re-used even if the previous stream failed. +// To release the resources, you must call the Close() function on a decoder. +type Decoder struct { + o decoderOptions + + // Unreferenced decoders, ready for use. + decoders chan *blockDec + + // Streams ready to be decoded. + stream chan decodeStream + + // Current read position used for Reader functionality. + current decoderState + + // Custom dictionaries. + // Always uses copies. + dicts map[uint32]dict + + // streamWg is the waitgroup for all streams + streamWg sync.WaitGroup +} + +// decoderState is used for maintaining state when the decoder +// is used for streaming. +type decoderState struct { + // current block being written to stream. + decodeOutput + + // output in order to be written to stream. + output chan decodeOutput + + // cancel remaining output. + cancel chan struct{} + + flushed bool +} + +var ( + // Check the interfaces we want to support. + _ = io.WriterTo(&Decoder{}) + _ = io.Reader(&Decoder{}) +) + +// NewReader creates a new decoder. +// A nil Reader can be provided in which case Reset can be used to start a decode. +// +// A Decoder can be used in two modes: +// +// 1) As a stream, or +// 2) For stateless decoding using DecodeAll. +// +// Only a single stream can be decoded concurrently, but the same decoder +// can run multiple concurrent stateless decodes. It is even possible to +// use stateless decodes while a stream is being decoded. +// +// The Reset function can be used to initiate a new stream, which is will considerably +// reduce the allocations normally caused by NewReader. +func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { + initPredefined() + var d Decoder + d.o.setDefault() + for _, o := range opts { + err := o(&d.o) + if err != nil { + return nil, err + } + } + d.current.output = make(chan decodeOutput, d.o.concurrent) + d.current.flushed = true + + if r == nil { + d.current.err = ErrDecoderNilInput + } + + // Transfer option dicts. + d.dicts = make(map[uint32]dict, len(d.o.dicts)) + for _, dc := range d.o.dicts { + d.dicts[dc.id] = dc + } + d.o.dicts = nil + + // Create decoders + d.decoders = make(chan *blockDec, d.o.concurrent) + for i := 0; i < d.o.concurrent; i++ { + dec := newBlockDec(d.o.lowMem) + dec.localFrame = newFrameDec(d.o) + d.decoders <- dec + } + + if r == nil { + return &d, nil + } + return &d, d.Reset(r) +} + +// Read bytes from the decompressed stream into p. +// Returns the number of bytes written and any error that occurred. +// When the stream is done, io.EOF will be returned. +func (d *Decoder) Read(p []byte) (int, error) { + if d.stream == nil { + return 0, ErrDecoderNilInput + } + var n int + for { + if len(d.current.b) > 0 { + filled := copy(p, d.current.b) + p = p[filled:] + d.current.b = d.current.b[filled:] + n += filled + } + if len(p) == 0 { + break + } + if len(d.current.b) == 0 { + // We have an error and no more data + if d.current.err != nil { + break + } + if !d.nextBlock(n == 0) { + return n, nil + } + } + } + if len(d.current.b) > 0 { + if debug { + println("returning", n, "still bytes left:", len(d.current.b)) + } + // Only return error at end of block + return n, nil + } + if d.current.err != nil { + d.drainOutput() + } + if debug { + println("returning", n, d.current.err, len(d.decoders)) + } + return n, d.current.err +} + +// Reset will reset the decoder the supplied stream after the current has finished processing. +// Note that this functionality cannot be used after Close has been called. +// Reset can be called with a nil reader to release references to the previous reader. +// After being called with a nil reader, no other operations than Reset or DecodeAll or Close +// should be used. +func (d *Decoder) Reset(r io.Reader) error { + if d.current.err == ErrDecoderClosed { + return d.current.err + } + + d.drainOutput() + + if r == nil { + d.current.err = ErrDecoderNilInput + d.current.flushed = true + return nil + } + + if d.stream == nil { + d.stream = make(chan decodeStream, 1) + d.streamWg.Add(1) + go d.startStreamDecoder(d.stream) + } + + // If bytes buffer and < 1MB, do sync decoding anyway. + if bb, ok := r.(byter); ok && bb.Len() < 1<<20 { + var bb2 byter + bb2 = bb + if debug { + println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) + } + b := bb2.Bytes() + var dst []byte + if cap(d.current.b) > 0 { + dst = d.current.b + } + + dst, err := d.DecodeAll(b, dst[:0]) + if err == nil { + err = io.EOF + } + d.current.b = dst + d.current.err = err + d.current.flushed = true + if debug { + println("sync decode to", len(dst), "bytes, err:", err) + } + return nil + } + + // Remove current block. + d.current.decodeOutput = decodeOutput{} + d.current.err = nil + d.current.cancel = make(chan struct{}) + d.current.flushed = false + d.current.d = nil + + d.stream <- decodeStream{ + r: r, + output: d.current.output, + cancel: d.current.cancel, + } + return nil +} + +// drainOutput will drain the output until errEndOfStream is sent. +func (d *Decoder) drainOutput() { + if d.current.cancel != nil { + println("cancelling current") + close(d.current.cancel) + d.current.cancel = nil + } + if d.current.d != nil { + if debug { + printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) + } + d.decoders <- d.current.d + d.current.d = nil + d.current.b = nil + } + if d.current.output == nil || d.current.flushed { + println("current already flushed") + return + } + for { + select { + case v := <-d.current.output: + if v.d != nil { + if debug { + printf("re-adding decoder %p", v.d) + } + d.decoders <- v.d + } + if v.err == errEndOfStream { + println("current flushed") + d.current.flushed = true + return + } + } + } +} + +// WriteTo writes data to w until there's no more data to write or when an error occurs. +// The return value n is the number of bytes written. +// Any error encountered during the write is also returned. +func (d *Decoder) WriteTo(w io.Writer) (int64, error) { + if d.stream == nil { + return 0, ErrDecoderNilInput + } + var n int64 + for { + if len(d.current.b) > 0 { + n2, err2 := w.Write(d.current.b) + n += int64(n2) + if err2 != nil && d.current.err == nil { + d.current.err = err2 + break + } + } + if d.current.err != nil { + break + } + d.nextBlock(true) + } + err := d.current.err + if err != nil { + d.drainOutput() + } + if err == io.EOF { + err = nil + } + return n, err +} + +// DecodeAll allows stateless decoding of a blob of bytes. +// Output will be appended to dst, so if the destination size is known +// you can pre-allocate the destination slice to avoid allocations. +// DecodeAll can be used concurrently. +// The Decoder concurrency limits will be respected. +func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { + if d.current.err == ErrDecoderClosed { + return dst, ErrDecoderClosed + } + + // Grab a block decoder and frame decoder. + block := <-d.decoders + frame := block.localFrame + defer func() { + if debug { + printf("re-adding decoder: %p", block) + } + frame.rawInput = nil + frame.bBuf = nil + d.decoders <- block + }() + frame.bBuf = input + + for { + frame.history.reset() + err := frame.reset(&frame.bBuf) + if err == io.EOF { + if debug { + println("frame reset return EOF") + } + return dst, nil + } + if frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + return nil, ErrUnknownDictionary + } + frame.history.setDict(&dict) + } + if err != nil { + return dst, err + } + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { + return dst, ErrDecoderSizeExceeded + } + if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 { + // Never preallocate moe than 1 GB up front. + if cap(dst)-len(dst) < int(frame.FrameContentSize) { + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) + copy(dst2, dst) + dst = dst2 + } + } + if cap(dst) == 0 { + // Allocate len(input) * 2 by default if nothing is provided + // and we didn't get frame content size. + size := len(input) * 2 + // Cap to 1 MB. + if size > 1<<20 { + size = 1 << 20 + } + if uint64(size) > d.o.maxDecodedSize { + size = int(d.o.maxDecodedSize) + } + dst = make([]byte, 0, size) + } + + dst, err = frame.runDecoder(dst, block) + if err != nil { + return dst, err + } + if len(frame.bBuf) == 0 { + if debug { + println("frame dbuf empty") + } + break + } + } + return dst, nil +} + +// nextBlock returns the next block. +// If an error occurs d.err will be set. +// Optionally the function can block for new output. +// If non-blocking mode is used the returned boolean will be false +// if no data was available without blocking. +func (d *Decoder) nextBlock(blocking bool) (ok bool) { + if d.current.d != nil { + if debug { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } + if d.current.err != nil { + // Keep error state. + return blocking + } + + if blocking { + d.current.decodeOutput = <-d.current.output + } else { + select { + case d.current.decodeOutput = <-d.current.output: + default: + return false + } + } + if debug { + println("got", len(d.current.b), "bytes, error:", d.current.err) + } + return true +} + +// Close will release all resources. +// It is NOT possible to reuse the decoder after this. +func (d *Decoder) Close() { + if d.current.err == ErrDecoderClosed { + return + } + d.drainOutput() + if d.stream != nil { + close(d.stream) + d.streamWg.Wait() + d.stream = nil + } + if d.decoders != nil { + close(d.decoders) + for dec := range d.decoders { + dec.Close() + } + d.decoders = nil + } + if d.current.d != nil { + d.current.d.Close() + d.current.d = nil + } + d.current.err = ErrDecoderClosed +} + +// IOReadCloser returns the decoder as an io.ReadCloser for convenience. +// Any changes to the decoder will be reflected, so the returned ReadCloser +// can be reused along with the decoder. +// io.WriterTo is also supported by the returned ReadCloser. +func (d *Decoder) IOReadCloser() io.ReadCloser { + return closeWrapper{d: d} +} + +// closeWrapper wraps a function call as a closer. +type closeWrapper struct { + d *Decoder +} + +// WriteTo forwards WriteTo calls to the decoder. +func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { + return c.d.WriteTo(w) +} + +// Read forwards read calls to the decoder. +func (c closeWrapper) Read(p []byte) (n int, err error) { + return c.d.Read(p) +} + +// Close closes the decoder. +func (c closeWrapper) Close() error { + c.d.Close() + return nil +} + +type decodeOutput struct { + d *blockDec + b []byte + err error +} + +type decodeStream struct { + r io.Reader + + // Blocks ready to be written to output. + output chan decodeOutput + + // cancel reading from the input + cancel chan struct{} +} + +// errEndOfStream indicates that everything from the stream was read. +var errEndOfStream = errors.New("end-of-stream") + +// Create Decoder: +// Spawn n block decoders. These accept tasks to decode a block. +// Create goroutine that handles stream processing, this will send history to decoders as they are available. +// Decoders update the history as they decode. +// When a block is returned: +// a) history is sent to the next decoder, +// b) content written to CRC. +// c) return data to WRITER. +// d) wait for next block to return data. +// Once WRITTEN, the decoders reused by the writer frame decoder for re-use. +func (d *Decoder) startStreamDecoder(inStream chan decodeStream) { + defer d.streamWg.Done() + frame := newFrameDec(d.o) + for stream := range inStream { + if debug { + println("got new stream") + } + br := readerWrapper{r: stream.r} + decodeStream: + for { + frame.history.reset() + err := frame.reset(&br) + if debug && err != nil { + println("Frame decoder returned", err) + } + if err == nil && frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + err = ErrUnknownDictionary + } else { + frame.history.setDict(&dict) + } + } + if err != nil { + stream.output <- decodeOutput{ + err: err, + } + break + } + if debug { + println("starting frame decoder") + } + + // This goroutine will forward history between frames. + frame.frameDone.Add(1) + frame.initAsync() + + go frame.startDecoder(stream.output) + decodeFrame: + // Go through all blocks of the frame. + for { + dec := <-d.decoders + select { + case <-stream.cancel: + if !frame.sendErr(dec, io.EOF) { + // To not let the decoder dangle, send it back. + stream.output <- decodeOutput{d: dec} + } + break decodeStream + default: + } + err := frame.next(dec) + switch err { + case io.EOF: + // End of current frame, no error + println("EOF on next block") + break decodeFrame + case nil: + continue + default: + println("block decoder returned", err) + break decodeStream + } + } + // All blocks have started decoding, check if there are more frames. + println("waiting for done") + frame.frameDone.Wait() + println("done waiting...") + } + frame.frameDone.Wait() + println("Sending EOS") + stream.output <- decodeOutput{err: errEndOfStream} + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go new file mode 100644 index 000000000..284d38449 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -0,0 +1,84 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "runtime" +) + +// DOption is an option for creating a decoder. +type DOption func(*decoderOptions) error + +// options retains accumulated state of multiple options. +type decoderOptions struct { + lowMem bool + concurrent int + maxDecodedSize uint64 + dicts []dict +} + +func (o *decoderOptions) setDefault() { + *o = decoderOptions{ + // use less ram: true for now, but may change. + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + } + o.maxDecodedSize = 1 << 63 +} + +// WithDecoderLowmem will set whether to use a lower amount of memory, +// but possibly have to allocate more while running. +func WithDecoderLowmem(b bool) DOption { + return func(o *decoderOptions) error { o.lowMem = b; return nil } +} + +// WithDecoderConcurrency will set the concurrency, +// meaning the maximum number of decoders to run concurrently. +// The value supplied must be at least 1. +// By default this will be set to GOMAXPROCS. +func WithDecoderConcurrency(n int) DOption { + return func(o *decoderOptions) error { + if n <= 0 { + return fmt.Errorf("Concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory +// non-streaming operations or maximum window size for streaming operations. +// This can be used to control memory usage of potentially hostile content. +// For streaming operations, the maximum window size is capped at 1<<30 bytes. +// Maximum and default is 1 << 63 bytes. +func WithDecoderMaxMemory(n uint64) DOption { + return func(o *decoderOptions) error { + if n == 0 { + return errors.New("WithDecoderMaxMemory must be at least 1") + } + if n > 1<<63 { + return fmt.Errorf("WithDecoderMaxmemory must be less than 1 << 63") + } + o.maxDecodedSize = n + return nil + } +} + +// WithDecoderDicts allows to register one or more dictionaries for the decoder. +// If several dictionaries with the same ID is provided the last one will be used. +func WithDecoderDicts(dicts ...[]byte) DOption { + return func(o *decoderOptions) error { + for _, b := range dicts { + d, err := loadDict(b) + if err != nil { + return err + } + o.dicts = append(o.dicts, *d) + } + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go new file mode 100644 index 000000000..fa25a18d8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -0,0 +1,122 @@ +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/huff0" +) + +type dict struct { + id uint32 + + litEnc *huff0.Scratch + llDec, ofDec, mlDec sequenceDec + //llEnc, ofEnc, mlEnc []*fseEncoder + offsets [3]int + content []byte +} + +var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec} + +// ID returns the dictionary id or 0 if d is nil. +func (d *dict) ID() uint32 { + if d == nil { + return 0 + } + return d.id +} + +// DictContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) DictContentSize() int { + if d == nil { + return 0 + } + return len(d.content) +} + +// Load a dictionary as described in +// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format +func loadDict(b []byte) (*dict, error) { + // Check static field size. + if len(b) <= 8+(3*4) { + return nil, io.ErrUnexpectedEOF + } + d := dict{ + llDec: sequenceDec{fse: &fseDecoder{}}, + ofDec: sequenceDec{fse: &fseDecoder{}}, + mlDec: sequenceDec{fse: &fseDecoder{}}, + } + if !bytes.Equal(b[:4], dictMagic[:]) { + return nil, ErrMagicMismatch + } + d.id = binary.LittleEndian.Uint32(b[4:8]) + if d.id == 0 { + return nil, errors.New("dictionaries cannot have ID 0") + } + + // Read literal table + var err error + d.litEnc, b, err = huff0.ReadTable(b[8:], nil) + if err != nil { + return nil, err + } + d.litEnc.Reuse = huff0.ReusePolicyMust + + br := byteReader{ + b: b, + off: 0, + } + readDec := func(i tableIndex, dec *fseDecoder) error { + if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { + return err + } + if br.overread() { + return io.ErrUnexpectedEOF + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debug { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + // Set decoders as predefined so they aren't reused. + dec.preDefined = true + return nil + } + + if err := readDec(tableOffsets, d.ofDec.fse); err != nil { + return nil, err + } + if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { + return nil, err + } + if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { + return nil, err + } + if br.remain() < 12 { + return nil, io.ErrUnexpectedEOF + } + + d.offsets[0] = int(br.Uint32()) + br.advance(4) + d.offsets[1] = int(br.Uint32()) + br.advance(4) + d.offsets[2] = int(br.Uint32()) + br.advance(4) + if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { + return nil, errors.New("invalid offset in dictionary") + } + d.content = make([]byte, br.remain()) + copy(d.content, br.unread()) + if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { + return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) + } + + return &d, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go new file mode 100644 index 000000000..2d4d893eb --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -0,0 +1,177 @@ +package zstd + +import ( + "fmt" + "math/bits" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +const ( + dictShardBits = 6 +) + +type fastBase struct { + // cur is the offset at the start of hist + cur int32 + // maximum offset. Should be at least 2x block size. + maxMatchOff int32 + hist []byte + crc *xxhash.Digest + tmp [8]byte + blk *blockEnc + lastDictID uint32 + lowMem bool +} + +// CRC returns the underlying CRC writer. +func (e *fastBase) CRC() *xxhash.Digest { + return e.crc +} + +// AppendCRC will append the CRC to the destination slice and return it. +func (e *fastBase) AppendCRC(dst []byte) []byte { + crc := e.crc.Sum(e.tmp[:0]) + dst = append(dst, crc[7], crc[6], crc[5], crc[4]) + return dst +} + +// WindowSize returns the window size of the encoder, +// or a window size small enough to contain the input size, if > 0. +func (e *fastBase) WindowSize(size int) int32 { + if size > 0 && size < int(e.maxMatchOff) { + b := int32(1) << uint(bits.Len(uint(size))) + // Keep minimum window. + if b < 1024 { + b = 1024 + } + return b + } + return e.maxMatchOff +} + +// Block returns the current block. +func (e *fastBase) Block() *blockEnc { + return e.blk +} + +func (e *fastBase) addBlock(src []byte) int32 { + if debugAsserts && e.cur > bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) + } + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.ensureHist(len(src)) + } else { + if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { + panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) + } + // Move down + offset := int32(len(e.hist)) - e.maxMatchOff + copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:e.maxMatchOff] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// ensureHist will ensure that history can keep at least this many bytes. +func (e *fastBase) ensureHist(n int) { + if cap(e.hist) >= n { + return + } + l := e.maxMatchOff + if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { + l += maxCompressedBlockSize + } else { + l += e.maxMatchOff + } + // Make it at least 1MB. + if l < 1<<20 && !e.lowMem { + l = 1 << 20 + } + // Make it at least the requested size. + if l < int32(n) { + l = int32(n) + } + e.hist = make([]byte, 0, l) +} + +// useBlock will replace the block with the provided one, +// but transfer recent offsets from the previous. +func (e *fastBase) UseBlock(enc *blockEnc) { + enc.reset(e.blk) + e.blk = enc +} + +func (e *fastBase) matchlenNoHist(s, t int32, src []byte) int32 { + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} + +func (e *fastBase) matchlen(s, t int32, src []byte) int32 { + if debugAsserts { + if s < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if t < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if s-t > e.maxMatchOff { + err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) + panic(err) + } + if len(src)-int(s) > maxCompressedBlockSize { + panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) + } + } + + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastBase) resetBase(d *dict, singleBlock bool) { + if e.blk == nil { + e.blk = &blockEnc{lowMem: e.lowMem} + e.blk.init() + } else { + e.blk.reset(nil) + } + e.blk.initNewEncode() + if e.crc == nil { + e.crc = xxhash.New() + } else { + e.crc.Reset() + } + if (!singleBlock || d.DictContentSize() > 0) && cap(e.hist) < int(e.maxMatchOff*2)+d.DictContentSize() { + l := e.maxMatchOff*2 + int32(d.DictContentSize()) + // Make it at least 1MB. + if l < 1<<20 { + l = 1 << 20 + } + e.hist = make([]byte, 0, l) + } + // We offset current position so everything will be out of reach. + // If above reset line, history will be purged. + if e.cur < bufferReset { + e.cur += e.maxMatchOff + int32(len(e.hist)) + } + e.hist = e.hist[:0] + if d != nil { + // Set offsets (currently not used) + for i, off := range d.offsets { + e.blk.recentOffsets[i] = uint32(off) + e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] + } + // Transfer litenc. + e.blk.dictLitEnc = d.litEnc + e.hist = append(e.hist, d.content...) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go new file mode 100644 index 000000000..fe3625c5f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -0,0 +1,487 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math/bits" +) + +const ( + bestLongTableBits = 20 // Bits used in the long match table + bestLongTableSize = 1 << bestLongTableBits // Size of the table + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + bestShortTableBits = 16 // Bits used in the short match table + bestShortTableSize = 1 << bestShortTableBits // Size of the table +) + +// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type bestFastEncoder struct { + fastBase + table [bestShortTableSize]prevEntry + longTable [bestLongTableSize]prevEntry + dictTable []prevEntry + dictLongTable []prevEntry +} + +// Encode improves compression... +func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 4 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = prevEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + v2 := e.table[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.table[i] = prevEntry{ + offset: v, + prev: v2, + } + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + const kSearchStrength = 10 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + offset3 := int32(blk.recentOffsets[2]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + _ = addLiterals + + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + type match struct { + offset int32 + s int32 + length int32 + rep int32 + } + matchAt := func(offset int32, s int32, first uint32, rep int32) match { + if s-offset >= e.maxMatchOff || load3232(src, offset) != first { + return match{offset: offset, s: s} + } + return match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep} + } + + bestOf := func(a, b match) match { + aScore := b.s - a.s + a.length + bScore := a.s - b.s + b.length + if a.rep < 0 { + aScore = aScore - int32(bits.Len32(uint32(a.offset)))/8 + } + if b.rep < 0 { + bScore = bScore - int32(bits.Len32(uint32(b.offset)))/8 + } + if aScore >= bScore { + return a + } + return b + } + const goodEnough = 100 + + nextHashL := hash8(cv, bestLongTableBits) + nextHashS := hash4x64(cv, bestShortTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)) + if canRepeat && best.length < goodEnough { + best = bestOf(best, matchAt(s-offset1+1, s+1, uint32(cv>>8), 1)) + best = bestOf(best, matchAt(s-offset2+1, s+1, uint32(cv>>8), 2)) + best = bestOf(best, matchAt(s-offset3+1, s+1, uint32(cv>>8), 3)) + if best.length > 0 { + best = bestOf(best, matchAt(s-offset1+3, s+3, uint32(cv>>24), 1)) + best = bestOf(best, matchAt(s-offset2+3, s+3, uint32(cv>>24), 2)) + best = bestOf(best, matchAt(s-offset3+3, s+3, uint32(cv>>24), 3)) + } + } + // Load next and check... + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} + + // Look far ahead, unless we have a really long match already... + if best.length < goodEnough { + // No match found, move forward on input, no need to check forward... + if best.length < 4 { + s += 1 + (s-nextEmit)>>(kSearchStrength-1) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + continue + } + + s++ + candidateS = e.table[hash4x64(cv>>8, bestShortTableBits)] + cv = load6432(src, s) + cv2 := load6432(src, s+1) + candidateL = e.longTable[hash8(cv, bestLongTableBits)] + candidateL2 := e.longTable[hash8(cv2, bestLongTableBits)] + + best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)) + best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)) + } + + // We have a match, we can store the forward value + if best.rep > 0 { + s = best.s + var seq seq + seq.matchLen = uint32(best.length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := best.s + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + repIndex := best.offset + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = uint32(best.rep) + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + s = best.s + best.length + + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, best.length) + + } + break encodeLoop + } + // Index skipped... + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + h0 := hash8(cv0, bestLongTableBits) + h1 := hash4x64(cv0, bestShortTableBits) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + off++ + index0++ + } + switch best.rep { + case 2: + offset1, offset2 = offset2, offset1 + case 3: + offset1, offset2, offset3 = offset3, offset1, offset2 + } + cv = load6432(src, s) + continue + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + s = best.s + t := best.offset + offset1, offset2, offset3 = s-t, offset1, offset2 + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := best.length + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + // every entry + for index0 < s-1 { + cv0 := load6432(src, index0) + h0 := hash8(cv0, bestLongTableBits) + h1 := hash4x64(cv0, bestShortTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + index0++ + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash4x64(cv, bestShortTableBits) + nextHashL := hash8(cv, bestLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + blk.recentOffsets[2] = uint32(offset3) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// ResetDict will reset and set a dictionary if not nil +func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]prevEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = bestShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hash4x64(cv, hashLog) // 0 -> 4 + nextHash1 := hash4x64(cv>>8, hashLog) // 1 -> 5 + nextHash2 := hash4x64(cv>>16, hashLog) // 2 -> 6 + nextHash3 := hash4x64(cv>>24, hashLog) // 3 -> 7 + e.dictTable[nextHash] = prevEntry{ + prev: e.dictTable[nextHash].offset, + offset: i, + } + e.dictTable[nextHash1] = prevEntry{ + prev: e.dictTable[nextHash1].offset, + offset: i + 1, + } + e.dictTable[nextHash2] = prevEntry{ + prev: e.dictTable[nextHash2].offset, + offset: i + 2, + } + e.dictTable[nextHash3] = prevEntry{ + prev: e.dictTable[nextHash3].offset, + offset: i + 3, + } + } + e.lastDictID = d.id + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hash8(cv, bestLongTableBits) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hash8(cv, bestLongTableBits) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + } + // Reset table to initial state + copy(e.longTable[:], e.dictLongTable) + + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go new file mode 100644 index 000000000..c2ce4a2ba --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -0,0 +1,1170 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + betterLongTableBits = 19 // Bits used in the long match table + betterLongTableSize = 1 << betterLongTableBits // Size of the table + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + betterShortTableBits = 13 // Bits used in the short match table + betterShortTableSize = 1 << betterShortTableBits // Size of the table + + betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table + betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard + + betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table + betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard +) + +type prevEntry struct { + offset int32 + prev int32 +} + +// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type betterFastEncoder struct { + fastBase + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry +} + +type betterFastEncoderDict struct { + betterFastEncoder + dictTable []tableEntry + dictLongTable []prevEntry + shortTableShardDirty [betterShortTableShardCnt]bool + longTableShardDirty [betterLongTableShardCnt]bool + allDirty bool +} + +// Encode improves compression... +func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashS := hash5(cv, betterShortTableBits) + nextHashL := hash8(cv, betterLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + index0 := s + repOff2 + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, betterLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv, betterShortTableBits) + nextHashL := hash8(cv, betterLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Encode improves compression... +func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + e.allDirty = true + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.allDirty = true + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashS := hash5(cv, betterShortTableBits) + nextHashL := hash8(cv, betterLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hash5(cv1, betterShortTableBits) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + index0 := s + repOff2 + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hash5(cv1, betterShortTableBits) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, betterLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hash8(cv0, betterLongTableBits) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hash5(cv1, betterShortTableBits) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv, betterShortTableBits) + nextHashL := hash8(cv, betterLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("betterFastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = betterShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hash5(cv, hashLog) // 0 -> 4 + nextHash1 := hash5(cv>>8, hashLog) // 1 -> 5 + nextHash2 := hash5(cv>>16, hashLog) // 2 -> 6 + nextHash3 := hash5(cv>>24, hashLog) // 3 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + e.dictTable[nextHash3] = tableEntry{ + val: uint32(cv >> 24), + offset: i + 3, + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hash8(cv, betterLongTableBits) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hash8(cv, betterLongTableBits) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Reset table to initial state + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterShortTableShardCnt + const shardSize = betterShortTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.shortTableShardDirty { + e.shortTableShardDirty[i] = false + } + } else { + for i := range e.shortTableShardDirty { + if !e.shortTableShardDirty[i] { + continue + } + + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.shortTableShardDirty[i] = false + } + } + } + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterLongTableShardCnt + const shardSize = betterLongTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + } else { + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) + e.longTableShardDirty[i] = false + } + } + } + e.cur = e.maxMatchOff + e.allDirty = false +} + +func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/betterLongTableShardSize] = true +} + +func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { + e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go new file mode 100644 index 000000000..8629d43d8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -0,0 +1,1121 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + dFastLongTableBits = 17 // Bits used in the long match table + dFastLongTableSize = 1 << dFastLongTableBits // Size of the table + dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + + dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table + dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard + + dFastShortTableBits = tableBits // Bits used in the short match table + dFastShortTableSize = 1 << dFastShortTableBits // Size of the table + dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. +) + +type doubleFastEncoder struct { + fastEncoder + longTable [dFastLongTableSize]tableEntry +} + +type doubleFastEncoderDict struct { + fastEncoderDict + longTable [dFastLongTableSize]tableEntry + dictLongTable []tableEntry + longTableShardDirty [dLongTableShardCnt]bool +} + +// Encode mimmics functionality in zstd_dfast.c +func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, dFastLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hash8(cv0, dFastLongTableBits)] = te0 + e.longTable[hash8(cv1, dFastLongTableBits)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hash5(cv0, dFastShortTableBits)] = te0 + e.table[hash5(cv1, dFastShortTableBits)] = te1 + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + if e.cur >= bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + for { + + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if len(blk.sequences) > 2 { + if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, dFastLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hash8(cv0, dFastLongTableBits)] = te0 + e.longTable[hash8(cv1, dFastLongTableBits)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hash5(cv0, dFastShortTableBits)] = te0 + e.table[hash5(cv1, dFastShortTableBits)] = te1 + + cv = load6432(src, s) + + if len(blk.sequences) <= 2 { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv1>>8, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hash8(cv, dFastLongTableBits) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + e.markLongShardDirty(nextHashL) + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + longHash1 := hash8(cv0, dFastLongTableBits) + longHash2 := hash8(cv0, dFastLongTableBits) + e.longTable[longHash1] = te0 + e.longTable[longHash2] = te1 + e.markLongShardDirty(longHash1) + e.markLongShardDirty(longHash2) + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + hashVal1 := hash5(cv0, dFastShortTableBits) + hashVal2 := hash5(cv1, dFastShortTableBits) + e.table[hashVal1] = te0 + e.markShardDirty(hashVal1) + e.table[hashVal2] = te1 + e.markShardDirty(hashVal2) + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hash5(cv, dFastShortTableBits) + nextHashL := hash8(cv, dFastLongTableBits) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // If we encoded more than 64K mark all dirty. + if len(src) > 64<<10 { + e.markAllShardsDirty() + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { + e.fastEncoder.Reset(d, singleBlock) + if d != nil { + panic("doubleFastEncoder: Reset with dict not supported") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { + allDirty := e.allDirty + e.fastEncoderDict.Reset(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]tableEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{ + val: uint32(cv), + offset: e.maxMatchOff, + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) + e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{ + val: uint32(cv), + offset: i, + } + } + } + e.lastDictID = d.id + e.allDirty = true + } + // Reset table to initial state + e.cur = e.maxMatchOff + + dirtyShardCnt := 0 + if !allDirty { + for i := range e.longTableShardDirty { + if e.longTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + return + } + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + e.longTableShardDirty[i] = false + } +} + +func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/dLongTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go new file mode 100644 index 000000000..ba4a17e10 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -0,0 +1,1018 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "math/bits" +) + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table + tableShardSize = tableSize / tableShardCnt // Size of an individual shard + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + maxMatchLength = 131074 +) + +type tableEntry struct { + val uint32 + offset int32 +} + +type fastEncoder struct { + fastBase + table [tableSize]tableEntry +} + +type fastEncoderDict struct { + fastEncoder + dictTable []tableEntry + tableShardDirty [tableShardCnt]bool + allDirty bool +} + +// Encode mimmics functionality in zstd_fast.c +func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 7 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hash6(cv, hashLog) + nextHash2 := hash6(cv>>8, hashLog) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + var length int32 + // length = 4 + e.matchlen(s+6, repIndex+4, src) + { + a := src[s+6:] + b := src[repIndex+4:] + endI := len(a) & (math.MaxInt32 - 7) + length = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + var l int32 + { + a := src[s+4:] + b := src[t+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + var l int32 + { + a := src[s+4:] + b := src[o2+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Store this, since we have it. + nextHash := hash6(cv, hashLog) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if debug { + if len(src) > maxBlockSize { + panic("src too big") + } + } + + // Protect against e.cur wraparound. + if e.cur >= bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + + for { + nextHash := hash6(cv, hashLog) + nextHash2 := hash6(cv>>8, hashLog) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + // length := 4 + e.matchlen(s+6, repIndex+4, src) + // length := 4 + int32(matchLen(src[s+6:], src[repIndex+4:])) + var length int32 + { + a := src[s+6:] + b := src[repIndex+4:] + endI := len(a) & (math.MaxInt32 - 7) + length = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0 ", t)) + } + // Extend the 4-byte match as long as possible. + //l := e.matchlenNoHist(s+4, t+4, src) + 4 + // l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + var l int32 + { + a := src[s+4:] + b := src[t+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlenNoHist(s+4, o2+4, src) + // l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + var l int32 + { + a := src[s+4:] + b := src[o2+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Store this, since we have it. + nextHash := hash6(cv, hashLog) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if e.allDirty || len(src) > 32<<10 { + e.fastEncoder.Encode(blk, src) + e.allDirty = true + return + } + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 7 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debug { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hash6(cv, hashLog) + nextHash2 := hash6(cv>>8, hashLog) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + e.markShardDirty(nextHash2) + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + var length int32 + // length = 4 + e.matchlen(s+6, repIndex+4, src) + { + a := src[s+6:] + b := src[repIndex+4:] + endI := len(a) & (math.MaxInt32 - 7) + length = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debug { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + var l int32 + { + a := src[s+4:] + b := src[t+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + var l int32 + { + a := src[s+4:] + b := src[o2+4:] + endI := len(a) & (math.MaxInt32 - 7) + l = int32(endI) + 4 + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 + break + } + } + } + + // Store this, since we have it. + nextHash := hash6(cv, hashLog) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debug { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("fastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + if true { + end := e.maxMatchOff + int32(len(d.content)) - 8 + for i := e.maxMatchOff; i < end; i += 3 { + const hashLog = tableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hash6(cv, hashLog) // 0 -> 5 + nextHash1 := hash6(cv>>8, hashLog) // 1 -> 6 + nextHash2 := hash6(cv>>16, hashLog) // 2 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + } + } + e.lastDictID = d.id + e.allDirty = true + } + + e.cur = e.maxMatchOff + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.tableShardDirty { + if e.tableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + const shardCnt = tableShardCnt + const shardSize = tableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.tableShardDirty { + e.tableShardDirty[i] = false + } + e.allDirty = false + return + } + for i := range e.tableShardDirty { + if !e.tableShardDirty[i] { + continue + } + + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.tableShardDirty[i] = false + } + e.allDirty = false +} + +func (e *fastEncoderDict) markAllShardsDirty() { + e.allDirty = true +} + +func (e *fastEncoderDict) markShardDirty(entryNum uint32) { + e.tableShardDirty[entryNum/tableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go new file mode 100644 index 000000000..6f0265099 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -0,0 +1,576 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "crypto/rand" + "fmt" + "io" + rdebug "runtime/debug" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Encoder provides encoding to Zstandard. +// An Encoder can be used for either compressing a stream via the +// io.WriteCloser interface supported by the Encoder or as multiple independent +// tasks via the EncodeAll function. +// Smaller encodes are encouraged to use the EncodeAll function. +// Use NewWriter to create a new instance. +type Encoder struct { + o encoderOptions + encoders chan encoder + state encoderState + init sync.Once +} + +type encoder interface { + Encode(blk *blockEnc, src []byte) + EncodeNoHist(blk *blockEnc, src []byte) + Block() *blockEnc + CRC() *xxhash.Digest + AppendCRC([]byte) []byte + WindowSize(size int) int32 + UseBlock(*blockEnc) + Reset(d *dict, singleBlock bool) +} + +type encoderState struct { + w io.Writer + filling []byte + current []byte + previous []byte + encoder encoder + writing *blockEnc + err error + writeErr error + nWritten int64 + headerWritten bool + eofWritten bool + fullFrameWritten bool + + // This waitgroup indicates an encode is running. + wg sync.WaitGroup + // This waitgroup indicates we have a block encoding/writing. + wWg sync.WaitGroup +} + +// NewWriter will create a new Zstandard encoder. +// If the encoder will be used for encoding blocks a nil writer can be used. +func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { + initPredefined() + var e Encoder + e.o.setDefault() + for _, o := range opts { + err := o(&e.o) + if err != nil { + return nil, err + } + } + if w != nil { + e.Reset(w) + } + return &e, nil +} + +func (e *Encoder) initialize() { + if e.o.concurrent == 0 { + e.o.setDefault() + } + e.encoders = make(chan encoder, e.o.concurrent) + for i := 0; i < e.o.concurrent; i++ { + enc := e.o.encoder() + e.encoders <- enc + } +} + +// Reset will re-initialize the writer and new writes will encode to the supplied writer +// as a new, independent stream. +func (e *Encoder) Reset(w io.Writer) { + s := &e.state + s.wg.Wait() + s.wWg.Wait() + if cap(s.filling) == 0 { + s.filling = make([]byte, 0, e.o.blockSize) + } + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + if s.encoder == nil { + s.encoder = e.o.encoder() + } + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() + s.filling = s.filling[:0] + s.current = s.current[:0] + s.previous = s.previous[:0] + s.encoder.Reset(e.o.dict, false) + s.headerWritten = false + s.eofWritten = false + s.fullFrameWritten = false + s.w = w + s.err = nil + s.nWritten = 0 + s.writeErr = nil +} + +// Write data to the encoder. +// Input data will be buffered and as the buffer fills up +// content will be compressed and written to the output. +// When done writing, use Close to flush the remaining output +// and write CRC if requested. +func (e *Encoder) Write(p []byte) (n int, err error) { + s := &e.state + for len(p) > 0 { + if len(p)+len(s.filling) < e.o.blockSize { + if e.o.crc { + _, _ = s.encoder.CRC().Write(p) + } + s.filling = append(s.filling, p...) + return n + len(p), nil + } + add := p + if len(p)+len(s.filling) > e.o.blockSize { + add = add[:e.o.blockSize-len(s.filling)] + } + if e.o.crc { + _, _ = s.encoder.CRC().Write(add) + } + s.filling = append(s.filling, add...) + p = p[len(add):] + n += len(add) + if len(s.filling) < e.o.blockSize { + return n, nil + } + err := e.nextBlock(false) + if err != nil { + return n, err + } + if debugAsserts && len(s.filling) > 0 { + panic(len(s.filling)) + } + } + return n, nil +} + +// nextBlock will synchronize and start compressing input in e.state.filling. +// If an error has occurred during encoding it will be returned. +func (e *Encoder) nextBlock(final bool) error { + s := &e.state + // Wait for current block. + s.wg.Wait() + if s.err != nil { + return s.err + } + if len(s.filling) > e.o.blockSize { + return fmt.Errorf("block > maxStoreBlockSize") + } + if !s.headerWritten { + // If we have a single block encode, do a sync compression. + if final && len(s.filling) == 0 && !e.o.fullZero { + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + if final && len(s.filling) > 0 { + s.current = e.EncodeAll(s.filling, s.current[:0]) + var n2 int + n2, s.err = s.w.Write(s.current) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + s.current = s.current[:0] + s.filling = s.filling[:0] + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + + var tmp [maxHeaderSize]byte + fh := frameHeader{ + ContentSize: 0, + WindowSize: uint32(s.encoder.WindowSize(0)), + SingleSegment: false, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + dst, err := fh.appendTo(tmp[:0]) + if err != nil { + return err + } + s.headerWritten = true + s.wWg.Wait() + var n2 int + n2, s.err = s.w.Write(dst) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + } + if s.eofWritten { + // Ensure we only write it once. + final = false + } + + if len(s.filling) == 0 { + // Final block, but no data. + if final { + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + blk.last = true + blk.encodeRaw(nil) + s.wWg.Wait() + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.eofWritten = true + } + return s.err + } + + // Move blocks forward. + s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current + s.wg.Add(1) + go func(src []byte) { + if debug { + println("Adding block,", len(src), "bytes, final:", final) + } + defer func() { + if r := recover(); r != nil { + s.err = fmt.Errorf("panic while encoding: %v", r) + rdebug.PrintStack() + } + s.wg.Done() + }() + enc := s.encoder + blk := enc.Block() + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + // Wait for pending writes. + s.wWg.Wait() + if s.writeErr != nil { + s.err = s.writeErr + return + } + // Transfer encoders from previous write block. + blk.swapEncoders(s.writing) + // Transfer recent offsets to next. + enc.UseBlock(s.writing) + s.writing = blk + s.wWg.Add(1) + go func() { + defer func() { + if r := recover(); r != nil { + s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) + rdebug.PrintStack() + } + s.wWg.Done() + }() + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + switch err { + case errIncompressible: + if debug { + println("Storing incompressible block as raw") + } + blk.encodeRaw(src) + // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. + case nil: + default: + s.writeErr = err + return + } + _, s.writeErr = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + }() + }(s.current) + return nil +} + +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +// +// The Copy function uses ReaderFrom if available. +func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { + if debug { + println("Using ReadFrom") + } + + // Flush any current writes. + if len(e.state.filling) > 0 { + if err := e.nextBlock(false); err != nil { + return 0, err + } + } + e.state.filling = e.state.filling[:e.o.blockSize] + src := e.state.filling + for { + n2, err := r.Read(src) + if e.o.crc { + _, _ = e.state.encoder.CRC().Write(src[:n2]) + } + // src is now the unfilled part... + src = src[n2:] + n += int64(n2) + switch err { + case io.EOF: + e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] + if debug { + println("ReadFrom: got EOF final block:", len(e.state.filling)) + } + return n, nil + default: + if debug { + println("ReadFrom: got error:", err) + } + e.state.err = err + return n, err + case nil: + } + if len(src) > 0 { + if debug { + println("ReadFrom: got space left in source:", len(src)) + } + continue + } + err = e.nextBlock(false) + if err != nil { + return n, err + } + e.state.filling = e.state.filling[:e.o.blockSize] + src = e.state.filling + } +} + +// Flush will send the currently written data to output +// and block until everything has been written. +// This should only be used on rare occasions where pushing the currently queued data is critical. +func (e *Encoder) Flush() error { + s := &e.state + if len(s.filling) > 0 { + err := e.nextBlock(false) + if err != nil { + return err + } + } + s.wg.Wait() + s.wWg.Wait() + if s.err != nil { + return s.err + } + return s.writeErr +} + +// Close will flush the final output and close the stream. +// The function will block until everything has been written. +// The Encoder can still be re-used after calling this. +func (e *Encoder) Close() error { + s := &e.state + if s.encoder == nil { + return nil + } + err := e.nextBlock(true) + if err != nil { + return err + } + if e.state.fullFrameWritten { + return s.err + } + s.wg.Wait() + s.wWg.Wait() + + if s.err != nil { + return s.err + } + if s.writeErr != nil { + return s.writeErr + } + + // Write CRC + if e.o.crc && s.err == nil { + // heap alloc. + var tmp [4]byte + _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) + s.nWritten += 4 + } + + // Add padding with content from crypto/rand.Reader + if s.err == nil && e.o.pad > 0 { + add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) + frame, err := skippableFrame(s.filling[:0], add, rand.Reader) + if err != nil { + return err + } + _, s.err = s.w.Write(frame) + } + return s.err +} + +// EncodeAll will encode all input in src and append it to dst. +// This function can be called concurrently, but each call will only run on a single goroutine. +// If empty input is given, nothing is returned, unless WithZeroFrames is specified. +// Encoded blocks can be concatenated and the result will be the combined input stream. +// Data compressed with EncodeAll can be decoded with the Decoder, +// using either a stream or DecodeAll. +func (e *Encoder) EncodeAll(src, dst []byte) []byte { + if len(src) == 0 { + if e.o.fullZero { + // Add frame header. + fh := frameHeader{ + ContentSize: 0, + WindowSize: MinWindowSize, + SingleSegment: true, + // Adding a checksum would be a waste of space. + Checksum: false, + DictID: 0, + } + dst, _ = fh.appendTo(dst) + + // Write raw block as last one only. + var blk blockHeader + blk.setSize(0) + blk.setType(blockTypeRaw) + blk.setLast(true) + dst = blk.appendTo(dst) + } + return dst + } + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + // Release encoder reference to last block. + // If a non-single block is needed the encoder will reset again. + e.encoders <- enc + }() + // Use single segments when above minimum window and below 1MB. + single := len(src) < 1<<20 && len(src) > MinWindowSize + if e.o.single != nil { + single = *e.o.single + } + fh := frameHeader{ + ContentSize: uint64(len(src)), + WindowSize: uint32(enc.WindowSize(len(src))), + SingleSegment: single, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + // If less than 1MB, allocate a buffer up front. + if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { + dst = make([]byte, 0, len(src)) + } + dst, err := fh.appendTo(dst) + if err != nil { + panic(err) + } + + // If we can do everything in one block, prefer that. + if len(src) <= maxCompressedBlockSize { + enc.Reset(e.o.dict, true) + // Slightly faster with no history and everything in one block. + if e.o.crc { + _, _ = enc.CRC().Write(src) + } + blk := enc.Block() + blk.last = true + if e.o.dict == nil { + enc.EncodeNoHist(blk, src) + } else { + enc.Encode(blk, src) + } + + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + err := errIncompressible + oldout := blk.output + if len(blk.literals) != len(src) || len(src) != e.o.blockSize { + // Output directly to dst + blk.output = dst + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + + switch err { + case errIncompressible: + if debug { + println("Storing incompressible block as raw") + } + dst = blk.encodeRawTo(dst, src) + case nil: + dst = blk.output + default: + panic(err) + } + blk.output = oldout + } else { + enc.Reset(e.o.dict, false) + blk := enc.Block() + for len(src) > 0 { + todo := src + if len(todo) > e.o.blockSize { + todo = todo[:e.o.blockSize] + } + src = src[len(todo):] + if e.o.crc { + _, _ = enc.CRC().Write(todo) + } + blk.pushOffsets() + enc.Encode(blk, todo) + if len(src) == 0 { + blk.last = true + } + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { + err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) + } + + switch err { + case errIncompressible: + if debug { + println("Storing incompressible block as raw") + } + dst = blk.encodeRawTo(dst, todo) + blk.popOffsets() + case nil: + dst = append(dst, blk.output...) + default: + panic(err) + } + blk.reset(nil) + } + } + if e.o.crc { + dst = enc.AppendCRC(dst) + } + // Add padding with content from crypto/rand.Reader + if e.o.pad > 0 { + add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) + dst, err = skippableFrame(dst, add, rand.Reader) + if err != nil { + panic(err) + } + } + return dst +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go new file mode 100644 index 000000000..18a47eb03 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -0,0 +1,312 @@ +package zstd + +import ( + "errors" + "fmt" + "runtime" + "strings" +) + +// EOption is an option for creating a encoder. +type EOption func(*encoderOptions) error + +// options retains accumulated state of multiple options. +type encoderOptions struct { + concurrent int + level EncoderLevel + single *bool + pad int + blockSize int + windowSize int + crc bool + fullZero bool + noEntropy bool + allLitEntropy bool + customWindow bool + customALEntropy bool + lowMem bool + dict *dict +} + +func (o *encoderOptions) setDefault() { + *o = encoderOptions{ + concurrent: runtime.GOMAXPROCS(0), + crc: true, + single: nil, + blockSize: 1 << 16, + windowSize: 8 << 20, + level: SpeedDefault, + allLitEntropy: true, + lowMem: false, + } +} + +// encoder returns an encoder with the selected options. +func (o encoderOptions) encoder() encoder { + switch o.level { + case SpeedFastest: + if o.dict != nil { + return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + } + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + + case SpeedDefault: + if o.dict != nil { + return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}} + } + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + case SpeedBetterCompression: + if o.dict != nil { + return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + } + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + case SpeedBestCompression: + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + } + panic("unknown compression level") +} + +// WithEncoderCRC will add CRC value to output. +// Output will be 4 bytes larger. +func WithEncoderCRC(b bool) EOption { + return func(o *encoderOptions) error { o.crc = b; return nil } +} + +// WithEncoderConcurrency will set the concurrency, +// meaning the maximum number of decoders to run concurrently. +// The value supplied must be at least 1. +// By default this will be set to GOMAXPROCS. +func WithEncoderConcurrency(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithWindowSize will set the maximum allowed back-reference distance. +// The value must be a power of two between MinWindowSize and MaxWindowSize. +// A larger value will enable better compression but allocate more memory and, +// for above-default values, take considerably longer. +// The default value is determined by the compression level. +func WithWindowSize(n int) EOption { + return func(o *encoderOptions) error { + switch { + case n < MinWindowSize: + return fmt.Errorf("window size must be at least %d", MinWindowSize) + case n > MaxWindowSize: + return fmt.Errorf("window size must be at most %d", MaxWindowSize) + case (n & (n - 1)) != 0: + return errors.New("window size must be a power of 2") + } + + o.windowSize = n + o.customWindow = true + if o.blockSize > o.windowSize { + o.blockSize = o.windowSize + } + return nil + } +} + +// WithEncoderPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 1GB, 1<<30 bytes. +// The padded area will be filled with data from crypto/rand.Reader. +// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. +func WithEncoderPadding(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + o.pad = 0 + } + if n > 1<<30 { + return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") + } + o.pad = n + return nil + } +} + +// EncoderLevel predefines encoder compression levels. +// Only use the constants made available, since the actual mapping +// of these values are very likely to change and your compression could change +// unpredictably when upgrading the library. +type EncoderLevel int + +const ( + speedNotSet EncoderLevel = iota + + // SpeedFastest will choose the fastest reasonable compression. + // This is roughly equivalent to the fastest Zstandard mode. + SpeedFastest + + // SpeedDefault is the default "pretty fast" compression option. + // This is roughly equivalent to the default Zstandard mode (level 3). + SpeedDefault + + // SpeedBetterCompression will yield better compression than the default. + // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. + // By using this, notice that CPU usage may go up in the future. + SpeedBetterCompression + + // SpeedBestCompression will choose the best available compression option. + // This will offer the best compression no matter the CPU cost. + SpeedBestCompression + + // speedLast should be kept as the last actual compression option. + // The is not for external usage, but is used to keep track of the valid options. + speedLast +) + +// EncoderLevelFromString will convert a string representation of an encoding level back +// to a compression level. The compare is not case sensitive. +// If the string wasn't recognized, (false, SpeedDefault) will be returned. +func EncoderLevelFromString(s string) (bool, EncoderLevel) { + for l := speedNotSet + 1; l < speedLast; l++ { + if strings.EqualFold(s, l.String()) { + return true, l + } + } + return false, SpeedDefault +} + +// EncoderLevelFromZstd will return an encoder level that closest matches the compression +// ratio of a specific zstd compression level. +// Many input values will provide the same compression level. +func EncoderLevelFromZstd(level int) EncoderLevel { + switch { + case level < 3: + return SpeedFastest + case level >= 3 && level < 6: + return SpeedDefault + case level >= 6 && level < 10: + return SpeedBetterCompression + case level >= 10: + return SpeedBetterCompression + } + return SpeedDefault +} + +// String provides a string representation of the compression level. +func (e EncoderLevel) String() string { + switch e { + case SpeedFastest: + return "fastest" + case SpeedDefault: + return "default" + case SpeedBetterCompression: + return "better" + case SpeedBestCompression: + return "best" + default: + return "invalid" + } +} + +// WithEncoderLevel specifies a predefined compression level. +func WithEncoderLevel(l EncoderLevel) EOption { + return func(o *encoderOptions) error { + switch { + case l <= speedNotSet || l >= speedLast: + return fmt.Errorf("unknown encoder level") + } + o.level = l + if !o.customWindow { + switch o.level { + case SpeedFastest: + o.windowSize = 4 << 20 + case SpeedDefault: + o.windowSize = 8 << 20 + case SpeedBetterCompression: + o.windowSize = 16 << 20 + case SpeedBestCompression: + o.windowSize = 32 << 20 + } + } + if !o.customALEntropy { + o.allLitEntropy = l > SpeedFastest + } + + return nil + } +} + +// WithZeroFrames will encode 0 length input as full frames. +// This can be needed for compatibility with zstandard usage, +// but is not needed for this package. +func WithZeroFrames(b bool) EOption { + return func(o *encoderOptions) error { + o.fullZero = b + return nil + } +} + +// WithAllLitEntropyCompression will apply entropy compression if no matches are found. +// Disabling this will skip incompressible data faster, but in cases with no matches but +// skewed character distribution compression is lost. +// Default value depends on the compression level selected. +func WithAllLitEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.customALEntropy = true + o.allLitEntropy = b + return nil + } +} + +// WithNoEntropyCompression will always skip entropy compression of literals. +// This can be useful if content has matches, but unlikely to benefit from entropy +// compression. Usually the slight speed improvement is not worth enabling this. +func WithNoEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.noEntropy = b + return nil + } +} + +// WithSingleSegment will set the "single segment" flag when EncodeAll is used. +// If this flag is set, data must be regenerated within a single continuous memory segment. +// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. +// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. +// In order to preserve the decoder from unreasonable memory requirements, +// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. +// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. +// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. +// If this is not specified, block encodes will automatically choose this based on the input size. +// This setting has no effect on streamed encodes. +func WithSingleSegment(b bool) EOption { + return func(o *encoderOptions) error { + o.single = &b + return nil + } +} + +// WithLowerEncoderMem will trade in some memory cases trade less memory usage for +// slower encoding speed. +// This will not change the window size which is the primary function for reducing +// memory usage. See WithWindowSize. +func WithLowerEncoderMem(b bool) EOption { + return func(o *encoderOptions) error { + o.lowMem = b + return nil + } +} + +// WithEncoderDict allows to register a dictionary that will be used for the encode. +// The encoder *may* choose to use no dictionary instead for certain payloads. +func WithEncoderDict(dict []byte) EOption { + return func(o *encoderOptions) error { + d, err := loadDict(dict) + if err != nil { + return err + } + o.dict = d + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go new file mode 100644 index 000000000..fc4a566d3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -0,0 +1,494 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "encoding/hex" + "errors" + "hash" + "io" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type frameDec struct { + o decoderOptions + crc hash.Hash64 + offset int64 + + WindowSize uint64 + + // maxWindowSize is the maximum windows size to support. + // should never be bigger than max-int. + maxWindowSize uint64 + + // In order queue of blocks being decoded. + decoding chan *blockDec + + // Frame history passed between blocks + history history + + rawInput byteBuffer + + // Byte buffer that can be reused for small input blocks. + bBuf byteBuf + + FrameContentSize uint64 + frameDone sync.WaitGroup + + DictionaryID *uint32 + HasCheckSum bool + SingleSegment bool + + // asyncRunning indicates whether the async routine processes input on 'decoding'. + asyncRunningMu sync.Mutex + asyncRunning bool +} + +const ( + // The minimum Window_Size is 1 KB. + MinWindowSize = 1 << 10 + MaxWindowSize = 1 << 29 +) + +var ( + frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} + skippableFrameMagic = []byte{0x2a, 0x4d, 0x18} +) + +func newFrameDec(o decoderOptions) *frameDec { + d := frameDec{ + o: o, + maxWindowSize: MaxWindowSize, + } + if d.maxWindowSize > o.maxDecodedSize { + d.maxWindowSize = o.maxDecodedSize + } + return &d +} + +// reset will read the frame header and prepare for block decoding. +// If nothing can be read from the input, io.EOF will be returned. +// Any other error indicated that the stream contained data, but +// there was a problem. +func (d *frameDec) reset(br byteBuffer) error { + d.HasCheckSum = false + d.WindowSize = 0 + var b []byte + for { + b = br.readSmall(4) + if b == nil { + return io.EOF + } + if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { + if debug { + println("Not skippable", hex.EncodeToString(b), hex.EncodeToString(skippableFrameMagic)) + } + // Break if not skippable frame. + break + } + // Read size to skip + b = br.readSmall(4) + if b == nil { + println("Reading Frame Size EOF") + return io.ErrUnexpectedEOF + } + n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + println("Skipping frame with", n, "bytes.") + err := br.skipN(int(n)) + if err != nil { + if debug { + println("Reading discarded frame", err) + } + return err + } + } + if !bytes.Equal(b, frameMagic) { + println("Got magic numbers: ", b, "want:", frameMagic) + return ErrMagicMismatch + } + + // Read Frame_Header_Descriptor + fhd, err := br.readByte() + if err != nil { + println("Reading Frame_Header_Descriptor", err) + return err + } + d.SingleSegment = fhd&(1<<5) != 0 + + if fhd&(1<<3) != 0 { + return errors.New("Reserved bit set on frame header") + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + d.WindowSize = 0 + if !d.SingleSegment { + wd, err := br.readByte() + if err != nil { + println("Reading Window_Descriptor", err) + return err + } + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + d.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + d.DictionaryID = nil + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + b = br.readSmall(int(size)) + if b == nil { + if debug { + println("Reading Dictionary_ID", io.ErrUnexpectedEOF) + } + return io.ErrUnexpectedEOF + } + var id uint32 + switch size { + case 1: + id = uint32(b[0]) + case 2: + id = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + if debug { + println("Dict size", size, "ID:", id) + } + if id > 0 { + // ID 0 means "sorry, no dictionary anyway". + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format + d.DictionaryID = &id + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if d.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + d.FrameContentSize = 0 + if fcsSize > 0 { + b := br.readSmall(fcsSize) + if b == nil { + println("Reading Frame content", io.ErrUnexpectedEOF) + return io.ErrUnexpectedEOF + } + switch fcsSize { + case 1: + d.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + if debug { + println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize) + } + } + // Move this to shared. + d.HasCheckSum = fhd&(1<<2) != 0 + if d.HasCheckSum { + if d.crc == nil { + d.crc = xxhash.New() + } + d.crc.Reset() + } + + if d.WindowSize == 0 && d.SingleSegment { + // We may not need window in this case. + d.WindowSize = d.FrameContentSize + if d.WindowSize < MinWindowSize { + d.WindowSize = MinWindowSize + } + } + + if d.WindowSize > d.maxWindowSize { + printf("window size %d > max %d\n", d.WindowSize, d.maxWindowSize) + return ErrWindowSizeExceeded + } + // The minimum Window_Size is 1 KB. + if d.WindowSize < MinWindowSize { + println("got window size: ", d.WindowSize) + return ErrWindowSizeTooSmall + } + d.history.windowSize = int(d.WindowSize) + if d.o.lowMem && d.history.windowSize < maxBlockSize { + d.history.maxSize = d.history.windowSize * 2 + } else { + d.history.maxSize = d.history.windowSize + maxBlockSize + } + // history contains input - maybe we do something + d.rawInput = br + return nil +} + +// next will start decoding the next block from stream. +func (d *frameDec) next(block *blockDec) error { + if debug { + printf("decoding new block %p:%p", block, block.data) + } + err := block.reset(d.rawInput, d.WindowSize) + if err != nil { + println("block error:", err) + // Signal the frame decoder we have a problem. + d.sendErr(block, err) + return err + } + block.input <- struct{}{} + if debug { + println("next block:", block) + } + d.asyncRunningMu.Lock() + defer d.asyncRunningMu.Unlock() + if !d.asyncRunning { + return nil + } + if block.Last { + // We indicate the frame is done by sending io.EOF + d.decoding <- block + return io.EOF + } + d.decoding <- block + return nil +} + +// sendEOF will queue an error block on the frame. +// This will cause the frame decoder to return when it encounters the block. +// Returns true if the decoder was added. +func (d *frameDec) sendErr(block *blockDec, err error) bool { + d.asyncRunningMu.Lock() + defer d.asyncRunningMu.Unlock() + if !d.asyncRunning { + return false + } + + println("sending error", err.Error()) + block.sendErr(err) + d.decoding <- block + return true +} + +// checkCRC will check the checksum if the frame has one. +// Will return ErrCRCMismatch if crc check failed, otherwise nil. +func (d *frameDec) checkCRC() error { + if !d.HasCheckSum { + return nil + } + var tmp [4]byte + got := d.crc.Sum64() + // Flip to match file order. + tmp[0] = byte(got >> 0) + tmp[1] = byte(got >> 8) + tmp[2] = byte(got >> 16) + tmp[3] = byte(got >> 24) + + // We can overwrite upper tmp now + want := d.rawInput.readSmall(4) + if want == nil { + println("CRC missing?") + return io.ErrUnexpectedEOF + } + + if !bytes.Equal(tmp[:], want) { + if debug { + println("CRC Check Failed:", tmp[:], "!=", want) + } + return ErrCRCMismatch + } + if debug { + println("CRC ok", tmp[:]) + } + return nil +} + +func (d *frameDec) initAsync() { + if !d.o.lowMem && !d.SingleSegment { + // set max extra size history to 10MB. + d.history.maxSize = d.history.windowSize + maxBlockSize*5 + } + // re-alloc if more than one extra block size. + if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize { + d.history.b = make([]byte, 0, d.history.maxSize) + } + if cap(d.history.b) < d.history.maxSize { + d.history.b = make([]byte, 0, d.history.maxSize) + } + if cap(d.decoding) < d.o.concurrent { + d.decoding = make(chan *blockDec, d.o.concurrent) + } + if debug { + h := d.history + printf("history init. len: %d, cap: %d", len(h.b), cap(h.b)) + } + d.asyncRunningMu.Lock() + d.asyncRunning = true + d.asyncRunningMu.Unlock() +} + +// startDecoder will start decoding blocks and write them to the writer. +// The decoder will stop as soon as an error occurs or at end of frame. +// When the frame has finished decoding the *bufio.Reader +// containing the remaining input will be sent on frameDec.frameDone. +func (d *frameDec) startDecoder(output chan decodeOutput) { + written := int64(0) + + defer func() { + d.asyncRunningMu.Lock() + d.asyncRunning = false + d.asyncRunningMu.Unlock() + + // Drain the currently decoding. + d.history.error = true + flushdone: + for { + select { + case b := <-d.decoding: + b.history <- &d.history + output <- <-b.result + default: + break flushdone + } + } + println("frame decoder done, signalling done") + d.frameDone.Done() + }() + // Get decoder for first block. + block := <-d.decoding + block.history <- &d.history + for { + var next *blockDec + // Get result + r := <-block.result + if r.err != nil { + println("Result contained error", r.err) + output <- r + return + } + if debug { + println("got result, from ", d.offset, "to", d.offset+int64(len(r.b))) + d.offset += int64(len(r.b)) + } + if !block.Last { + // Send history to next block + select { + case next = <-d.decoding: + if debug { + println("Sending ", len(d.history.b), "bytes as history") + } + next.history <- &d.history + default: + // Wait until we have sent the block, so + // other decoders can potentially get the decoder. + next = nil + } + } + + // Add checksum, async to decoding. + if d.HasCheckSum { + n, err := d.crc.Write(r.b) + if err != nil { + r.err = err + if n != len(r.b) { + r.err = io.ErrShortWrite + } + output <- r + return + } + } + written += int64(len(r.b)) + if d.SingleSegment && uint64(written) > d.FrameContentSize { + println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize) + r.err = ErrFrameSizeExceeded + output <- r + return + } + if block.Last { + r.err = d.checkCRC() + output <- r + return + } + output <- r + if next == nil { + // There was no decoder available, we wait for one now that we have sent to the writer. + if debug { + println("Sending ", len(d.history.b), " bytes as history") + } + next = <-d.decoding + next.history <- &d.history + } + block = next + } +} + +// runDecoder will create a sync decoder that will decode a block of data. +func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { + saved := d.history.b + + // We use the history for output to avoid copying it. + d.history.b = dst + // Store input length, so we only check new data. + crcStart := len(dst) + var err error + for { + err = dec.reset(d.rawInput, d.WindowSize) + if err != nil { + break + } + if debug { + println("next block:", dec) + } + err = dec.decodeBuf(&d.history) + if err != nil || dec.Last { + break + } + if uint64(len(d.history.b)) > d.o.maxDecodedSize { + err = ErrDecoderSizeExceeded + break + } + if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize { + println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize) + err = ErrFrameSizeExceeded + break + } + } + dst = d.history.b + if err == nil { + if d.HasCheckSum { + var n int + n, err = d.crc.Write(dst[crcStart:]) + if err == nil { + if n != len(dst)-crcStart { + err = io.ErrShortWrite + } else { + err = d.checkCRC() + } + } + } + } + d.history.b = saved + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go new file mode 100644 index 000000000..4ef7f5a3e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -0,0 +1,137 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "math/bits" +) + +type frameHeader struct { + ContentSize uint64 + WindowSize uint32 + SingleSegment bool + Checksum bool + DictID uint32 +} + +const maxHeaderSize = 14 + +func (f frameHeader) appendTo(dst []byte) ([]byte, error) { + dst = append(dst, frameMagic...) + var fhd uint8 + if f.Checksum { + fhd |= 1 << 2 + } + if f.SingleSegment { + fhd |= 1 << 5 + } + + var dictIDContent []byte + if f.DictID > 0 { + var tmp [4]byte + if f.DictID < 256 { + fhd |= 1 + tmp[0] = uint8(f.DictID) + dictIDContent = tmp[:1] + } else if f.DictID < 1<<16 { + fhd |= 2 + binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) + dictIDContent = tmp[:2] + } else { + fhd |= 3 + binary.LittleEndian.PutUint32(tmp[:4], f.DictID) + dictIDContent = tmp[:4] + } + } + var fcs uint8 + if f.ContentSize >= 256 { + fcs++ + } + if f.ContentSize >= 65536+256 { + fcs++ + } + if f.ContentSize >= 0xffffffff { + fcs++ + } + + fhd |= fcs << 6 + + dst = append(dst, fhd) + if !f.SingleSegment { + const winLogMin = 10 + windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 + dst = append(dst, uint8(windowLog)) + } + if f.DictID > 0 { + dst = append(dst, dictIDContent...) + } + switch fcs { + case 0: + if f.SingleSegment { + dst = append(dst, uint8(f.ContentSize)) + } + // Unless SingleSegment is set, framessizes < 256 are nto stored. + case 1: + f.ContentSize -= 256 + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) + case 2: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) + case 3: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), + uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) + default: + panic("invalid fcs") + } + return dst, nil +} + +const skippableFrameHeader = 4 + 4 + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < math.MaxUint32. +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) + } + if int64(total) > math.MaxUint32 { + return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) + } + dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) + f := uint32(total - skippableFrameHeader) + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go new file mode 100644 index 000000000..e6d3d49b3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -0,0 +1,385 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" +) + +const ( + tablelogAbsoluteMax = 9 +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = tablelogAbsoluteMax + 2 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + maxTableMask = (1 << maxTableLog) - 1 + minTablelog = 5 + maxSymbolValue = 255 +) + +// fseDecoder provides temporary storage for compression and decompression. +type fseDecoder struct { + dt [maxTablesize]decSymbol // Decompression table. + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + maxBits uint8 // Maximum number of additional bits + + // used for table creation to avoid allocations. + stateTable [256]uint16 + norm [maxSymbolValue + 1]int16 + preDefined bool +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +// readNCount will read the symbol distribution so decoding tables can be constructed. +func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { + var ( + charnum uint16 + previous0 bool + ) + if b.remain() < 4 { + return errors.New("input too small") + } + bitStream := b.Uint32NC() + nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog + if nbBits > tablelogAbsoluteMax { + println("Invalid tablelog:", nbBits) + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 && charnum <= maxSymbol { + if previous0 { + //println("prev0") + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + //println("24 x 0") + n0 += 24 + if r := b.remain(); r > 5 { + b.advance(2) + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + // end of bit stream + bitStream >>= 16 + bitCount += 16 + } + } + //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) + for charnum < n0 { + s.norm[uint8(charnum)] = 0 + charnum++ + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*threshold - 1) - remaining + var count int32 + + if int32(bitStream)&(threshold-1) < max { + count = int32(bitStream) & (threshold - 1) + if debugAsserts && nbBits < 1 { + panic("nbBits underflow") + } + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + // extra accuracy + count-- + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> (bitCount & 31) + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + bitStream = b.Uint32() >> (bitCount & 31) + } + } + s.symbolLen = charnum + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + // println(s.norm[:s.symbolLen], s.symbolLen) + return s.buildDtable() +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +// Using a composite uint64 is faster than a struct with separate members. +type decSymbol uint64 + +func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { + return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d decSymbol) nbBits() uint8 { + return uint8(d) +} + +func (d decSymbol) addBits() uint8 { + return uint8(d >> 8) +} + +func (d decSymbol) newState() uint16 { + return uint16(d >> 16) +} + +func (d decSymbol) baseline() uint32 { + return uint32(d >> 32) +} + +func (d decSymbol) baselineInt() int { + return int(d >> 32) +} + +func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) { + *d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d *decSymbol) setNBits(nBits uint8) { + const mask = 0xffffffffffffff00 + *d = (*d & mask) | decSymbol(nBits) +} + +func (d *decSymbol) setAddBits(addBits uint8) { + const mask = 0xffffffffffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) +} + +func (d *decSymbol) setNewState(state uint16) { + const mask = 0xffffffff0000ffff + *d = (*d & mask) | decSymbol(state)<<16 +} + +func (d *decSymbol) setBaseline(baseline uint32) { + const mask = 0xffffffff + *d = (*d & mask) | decSymbol(baseline)<<32 +} + +func (d *decSymbol) setExt(addBits uint8, baseline uint32) { + const mask = 0xffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) +} + +// decSymbolValue returns the transformed decSymbol for the given symbol. +func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { + if int(symb) >= len(t) { + return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) + } + lu := t[symb] + return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil +} + +// setRLE will set the decoder til RLE mode. +func (s *fseDecoder) setRLE(symbol decSymbol) { + s.actualTableLog = 0 + s.maxBits = symbol.addBits() + s.dt[0] = symbol +} + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + symbolNext[i] = 1 + } else { + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} + +// transform will transform the decoder table into a table usable for +// decoding without having to apply the transformation while decoding. +// The state will contain the base value and the number of bits to read. +func (s *fseDecoder) transform(t []baseOffset) error { + tableSize := uint16(1 << s.actualTableLog) + s.maxBits = 0 + for i, v := range s.dt[:tableSize] { + add := v.addBits() + if int(add) >= len(t) { + return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) + } + lu := t[add] + if lu.addBits > s.maxBits { + s.maxBits = lu.addBits + } + v.setExt(lu.addBits, lu.baseLine) + s.dt[i] = v + } + return nil +} + +type fseState struct { + dt []decSymbol + state decSymbol +} + +// Initialize and decodeAsync first state and symbol. +func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { + s.dt = dt + br.fill() + s.state = dt[br.getBits(tableLog)] +} + +// next returns the current symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (s *fseState) next(br *bitReader) { + lowBits := uint16(br.getBits(s.state.nbBits())) + s.state = s.dt[s.state.newState()+lowBits] +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (s *fseState) finished(br *bitReader) bool { + return br.finished() && s.state.nbBits() > 0 +} + +// final returns the current state symbol without decoding the next. +func (s *fseState) final() (int, uint8) { + return s.state.baselineInt(), s.state.addBits() +} + +// final returns the current state symbol without decoding the next. +func (s decSymbol) final() (int, uint8) { + return s.baselineInt(), s.addBits() +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (s *fseState) nextFast(br *bitReader) (uint32, uint8) { + lowBits := uint16(br.getBitsFast(s.state.nbBits())) + s.state = s.dt[s.state.newState()+lowBits] + return s.state.baseline(), s.state.addBits() +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go new file mode 100644 index 000000000..b80709d5e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -0,0 +1,726 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" +) + +const ( + // For encoding we only support up to + maxEncTableLog = 8 + maxEncTablesize = 1 << maxTableLog + maxEncTableMask = (1 << maxTableLog) - 1 + minEncTablelog = 5 + maxEncSymbolValue = maxMatchLengthSymbol +) + +// Scratch provides temporary storage for compression and decompression. +type fseEncoder struct { + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + ct cTable // Compression tables. + maxCount int // count of the most probable symbol + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + useRLE bool // This encoder is for RLE + preDefined bool // This encoder is predefined. + reUsed bool // Set to know when the encoder has been reused. + rleVal uint8 // RLE Symbol + maxBits uint8 // Maximum output bits after transform. + + // TODO: Technically zstd should be fine with 64 bytes. + count [256]uint32 + norm [256]int16 +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaNbBits uint32 + deltaFindState int16 + outBits uint8 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *fseEncoder) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *fseEncoder) prepare() (*fseEncoder, error) { + if s == nil { + s = &fseEncoder{} + } + s.useRLE = false + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + return s, nil +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *fseEncoder) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *fseEncoder) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [256]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = total - 1 + total++ + default: + maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = total - v + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +func (s *fseEncoder) setRLE(val byte) { + s.allocCtable() + s.actualTableLog = 0 + s.ct.stateTable = s.ct.stateTable[:1] + s.ct.symbolTT[val] = symbolTransform{ + deltaFindState: 0, + deltaNbBits: 0, + } + if debug { + println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) + } + s.rleVal = val + s.useRLE = true +} + +// setBits will set output bits for the transform. +// if nil is provided, the number of bits is equal to the index. +func (s *fseEncoder) setBits(transform []byte) { + if s.reUsed || s.preDefined { + return + } + if s.useRLE { + if transform == nil { + s.ct.symbolTT[s.rleVal].outBits = s.rleVal + s.maxBits = s.rleVal + return + } + s.maxBits = transform[s.rleVal] + s.ct.symbolTT[s.rleVal].outBits = s.maxBits + return + } + if transform == nil { + for i := range s.ct.symbolTT[:s.symbolLen] { + s.ct.symbolTT[i].outBits = uint8(i) + } + s.maxBits = uint8(s.symbolLen - 1) + return + } + s.maxBits = 0 + for i, v := range transform[:s.symbolLen] { + s.ct.symbolTT[i].outBits = v + if v > s.maxBits { + // We could assume bits always going up, but we play safe. + s.maxBits = v + } + } +} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +// If successful, compression tables will also be made ready. +func (s *fseEncoder) normalizeCount(length int) error { + if s.reUsed { + return nil + } + s.optimalTableLog(length) + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(length) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(length >> tableLog) + ) + if s.maxCount == length { + s.useRLE = true + return nil + } + s.useRLE = false + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + err := s.normalizeCount2(length) + if err != nil { + return err + } + if debugAsserts { + err = s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() + } + s.norm[largest] += stillToDistribute + if debugAsserts { + err := s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *fseEncoder) normalizeCount2(length int) error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(length) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *fseEncoder) optimalTableLog(length int) { + tableLog := uint8(maxEncTableLog) + minBitsSrc := highBit(uint32(length)) + 1 + minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 + minBits := uint8(minBitsSymbols) + if minBitsSrc < minBitsSymbols { + minBits = uint8(minBitsSrc) + } + + maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minEncTablelog { + tableLog = minEncTablelog + } + if tableLog > maxEncTableLog { + tableLog = maxEncTableLog + } + s.actualTableLog = tableLog +} + +// validateNorm validates the normalized histogram table. +func (s *fseEncoder) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 + + // Write Table Size + bitStream = uint32(tableLog - minEncTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + outP = len(out) + ) + if cap(out) < outP+maxHeaderSize { + out = append(out, make([]byte, maxHeaderSize*3)...) + out = out[:len(out)-maxHeaderSize*3] + } + out = out[:outP+maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return nil, errors.New("internal error: remaining < 1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + if outP+2 > len(out) { + return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) + } + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += int((bitCount + 7) / 8) + + if charnum > s.symbolLen { + return nil, errors.New("internal error: charnum > s.symbolLen") + } + return out[:outP], nil +} + +// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) +// note 1 : assume symbolValue is valid (<= maxSymbolValue) +// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * +func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { + minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 + threshold := (minNbBits + 1) << 16 + if debugAsserts { + if !(s.actualTableLog < 16) { + panic("!s.actualTableLog < 16") + } + // ensure enough room for renormalization double shift + if !(uint8(accuracyLog) < 31-s.actualTableLog) { + panic("!uint8(accuracyLog) < 31-s.actualTableLog") + } + } + tableSize := uint32(1) << s.actualTableLog + deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) + // linear interpolation (very approximate) + normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog + bitMultiplier := uint32(1) << accuracyLog + if debugAsserts { + if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { + panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") + } + if normalizedDeltaFromThreshold > bitMultiplier { + panic("normalizedDeltaFromThreshold > bitMultiplier") + } + } + return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold +} + +// Returns the cost in bits of encoding the distribution in count using ctable. +// Histogram should only be up to the last non-zero symbol. +// Returns an -1 if ctable cannot represent all the symbols in count. +func (s *fseEncoder) approxSize(hist []uint32) uint32 { + if int(s.symbolLen) < len(hist) { + // More symbols than we have. + return math.MaxUint32 + } + if s.useRLE { + // We will never reuse RLE encoders. + return math.MaxUint32 + } + const kAccuracyLog = 8 + badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog + var cost uint32 + for i, v := range hist { + if v == 0 { + continue + } + if s.norm[i] == 0 { + return math.MaxUint32 + } + bitCost := s.bitCost(uint8(i), kAccuracyLog) + if bitCost > badCost { + return math.MaxUint32 + } + cost += v * bitCost + } + return cost >> kAccuracyLog +} + +// maxHeaderSize returns the maximum header size in bits. +// This is not exact size, but we want a penalty for new tables anyway. +func (s *fseEncoder) maxHeaderSize() uint32 { + if s.preDefined { + return 0 + } + if s.useRLE { + return 8 + } + return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + if len(c.stateTable) == 1 { + // RLE + c.stateTable[0] = uint16(0) + c.state = 0 + return + } + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + int32(first.deltaFindState) + c.state = c.stateTable[lu] + return +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState) + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go new file mode 100644 index 000000000..6c17dc17f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go @@ -0,0 +1,158 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "sync" +) + +var ( + // fsePredef are the predefined fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredef [3]fseDecoder + + // fsePredefEnc are the predefined encoder based on fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredefEnc [3]fseEncoder + + // symbolTableX contain the transformations needed for each type as defined in + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + symbolTableX [3][]baseOffset + + // maxTableSymbol is the biggest supported symbol for each table type + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} + + // bitTables is the bits table for each table. + bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} +) + +type tableIndex uint8 + +const ( + // indexes for fsePredef and symbolTableX + tableLiteralLengths tableIndex = 0 + tableOffsets tableIndex = 1 + tableMatchLengths tableIndex = 2 + + maxLiteralLengthSymbol = 35 + maxOffsetLengthSymbol = 30 + maxMatchLengthSymbol = 52 +) + +// baseOffset is used for calculating transformations. +type baseOffset struct { + baseLine uint32 + addBits uint8 +} + +// fillBase will precalculate base offsets with the given bit distributions. +func fillBase(dst []baseOffset, base uint32, bits ...uint8) { + if len(bits) != len(dst) { + panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) + } + for i, bit := range bits { + if base > math.MaxInt32 { + panic(fmt.Sprintf("invalid decoding table, base overflows int32")) + } + + dst[i] = baseOffset{ + baseLine: base, + addBits: bit, + } + base += 1 << bit + } +} + +var predef sync.Once + +func initPredefined() { + predef.Do(func() { + // Literals length codes + tmp := make([]baseOffset, 36) + for i := range tmp[:16] { + tmp[i] = baseOffset{ + baseLine: uint32(i), + addBits: 0, + } + } + fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableLiteralLengths] = tmp + + // Match length codes + tmp = make([]baseOffset, 53) + for i := range tmp[:32] { + tmp[i] = baseOffset{ + // The transformation adds the 3 length. + baseLine: uint32(i) + 3, + addBits: 0, + } + } + fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableMatchLengths] = tmp + + // Offset codes + tmp = make([]baseOffset, maxOffsetBits+1) + tmp[1] = baseOffset{ + baseLine: 1, + addBits: 1, + } + fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) + symbolTableX[tableOffsets] = tmp + + // Fill predefined tables and transform them. + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + for i := range fsePredef[:] { + f := &fsePredef[i] + switch tableIndex(i) { + case tableLiteralLengths: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 + f.actualTableLog = 6 + copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, + -1, -1, -1, -1}) + f.symbolLen = 36 + case tableOffsets: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 + f.actualTableLog = 5 + copy(f.norm[:], []int16{ + 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) + f.symbolLen = 29 + case tableMatchLengths: + //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 + f.actualTableLog = 6 + copy(f.norm[:], []int16{ + 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, + -1, -1, -1, -1, -1}) + f.symbolLen = 53 + } + if err := f.buildDtable(); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + if err := f.transform(symbolTableX[i]); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + f.preDefined = true + + // Create encoder as well + enc := &fsePredefEnc[i] + copy(enc.norm[:], f.norm[:]) + enc.symbolLen = f.symbolLen + enc.actualTableLog = f.actualTableLog + if err := enc.buildCTable(); err != nil { + panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) + } + enc.setBits(bitTables[i]) + enc.preDefined = true + } + }) +} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go new file mode 100644 index 000000000..4a752067f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -0,0 +1,77 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +// hashLen returns a hash of the lowest l bytes of u for a size size of h bytes. +// l must be >=4 and <=8. Any other value will return hash for 4 bytes. +// h should always be <32. +// Preferably h and l should be a constant. +// FIXME: This does NOT get resolved, if 'mls' is constant, +// so this cannot be used. +func hashLen(u uint64, hashLog, mls uint8) uint32 { + switch mls { + case 5: + return hash5(u, hashLog) + case 6: + return hash6(u, hashLog) + case 7: + return hash7(u, hashLog) + case 8: + return hash8(u, hashLog) + default: + return hash4x64(u, hashLog) + } +} + +// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash3(u uint32, h uint8) uint32 { + return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31) +} + +// hash4 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4(u uint32, h uint8) uint32 { + return (u * prime4bytes) >> ((32 - h) & 31) +} + +// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4x64(u uint64, h uint8) uint32 { + return (uint32(u) * prime4bytes) >> ((32 - h) & 31) +} + +// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash5(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63)) +} + +// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash6(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) +} + +// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash7(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) +} + +// hash8 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash8(u uint64, h uint8) uint32 { + return uint32((u * prime8bytes) >> ((64 - h) & 63)) +} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go new file mode 100644 index 000000000..f783e32d2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -0,0 +1,89 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "github.com/klauspost/compress/huff0" +) + +// history contains the information transferred between blocks. +type history struct { + b []byte + huffTree *huff0.Scratch + recentOffsets [3]int + decoders sequenceDecs + windowSize int + maxSize int + error bool + dict *dict +} + +// reset will reset the history to initial state of a frame. +// The history must already have been initialized to the desired size. +func (h *history) reset() { + h.b = h.b[:0] + h.error = false + h.recentOffsets = [3]int{1, 4, 8} + if f := h.decoders.litLengths.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + } + if f := h.decoders.offsets.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + } + if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined { + fseDecoderPool.Put(f) + } + h.decoders = sequenceDecs{} + if h.huffTree != nil { + if h.dict == nil || h.dict.litEnc != h.huffTree { + huffDecoderPool.Put(h.huffTree) + } + } + h.huffTree = nil + h.dict = nil + //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) +} + +func (h *history) setDict(dict *dict) { + if dict == nil { + return + } + h.dict = dict + h.decoders.litLengths = dict.llDec + h.decoders.offsets = dict.ofDec + h.decoders.matchLengths = dict.mlDec + h.recentOffsets = dict.offsets + h.huffTree = dict.litEnc +} + +// append bytes to history. +// This function will make sure there is space for it, +// if the buffer has been allocated with enough extra space. +func (h *history) append(b []byte) { + if len(b) >= h.windowSize { + // Discard all history by simply overwriting + h.b = h.b[:h.windowSize] + copy(h.b, b[len(b)-h.windowSize:]) + return + } + + // If there is space, append it. + if len(b) < cap(h.b)-len(h.b) { + h.b = append(h.b, b...) + return + } + + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(b) + len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] + copy(h.b[h.windowSize-len(b):], b) +} + +// append bytes to history without ever discarding anything. +func (h *history) appendKeep(b []byte) { + h.b = append(h.b, b...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt new file mode 100644 index 000000000..24b53065f --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md new file mode 100644 index 000000000..69aa3bb58 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md @@ -0,0 +1,58 @@ +# xxhash + +VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. + + +[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) +[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) + +xxhash is a Go implementation of the 64-bit +[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +This implementation provides a fast pure-Go implementation and an even faster +assembly implementation for amd64. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| --- | --- | --- | +| 5 B | 979.66 MB/s | 1291.17 MB/s | +| 100 B | 7475.26 MB/s | 7973.40 MB/s | +| 4 KB | 17573.46 MB/s | 17602.65 MB/s | +| 10 MB | 17131.46 MB/s | 17142.16 MB/s | + +These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using +the following commands under Go 1.11.2: + +``` +$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' +$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [FreeCache](https://github.com/coocood/freecache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go new file mode 100644 index 000000000..426b9cac7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -0,0 +1,238 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. + +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where +// possible in the Go code is worth a small (but measurable) performance boost +// by avoiding some MOVQs. Vars are needed for the asm and also are useful for +// convenience in the Go code in a few places where we need to intentionally +// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the +// result overflows a uint64). +var ( + prime1v = prime1 + prime2v = prime2 + prime3v = prime3 + prime4v = prime4 + prime5v = prime5 +) + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = prime1v + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -prime1v + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(d.mem[d.n:], b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + copy(d.mem[d.n:], b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[32-d.n:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + i, end := 0, d.n + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(d.mem[i:i+8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(d.mem[i:i+4])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for i < end { + h ^= uint64(d.mem[i]) * prime5 + h = rol11(h) * prime1 + i++ + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + b = b[len(d.mem):] + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go new file mode 100644 index 000000000..35318d7c4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go @@ -0,0 +1,13 @@ +// +build !appengine +// +build gc +// +build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(*Digest, []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s new file mode 100644 index 000000000..2c9c5357a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -0,0 +1,215 @@ +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Register allocation: +// AX h +// CX pointer to advance through b +// DX n +// BX loop end +// R8 v1, k1 +// R9 v2 +// R10 v3 +// R11 v4 +// R12 tmp +// R13 prime1v +// R14 prime2v +// R15 prime4v + +// round reads from and advances the buffer pointer in CX. +// It assumes that R13 has prime1v and R14 has prime2v. +#define round(r) \ + MOVQ (CX), R12 \ + ADDQ $8, CX \ + IMULQ R14, R12 \ + ADDQ R12, r \ + ROLQ $31, r \ + IMULQ R13, r + +// mergeRound applies a merge round on the two registers acc and val. +// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. +#define mergeRound(acc, val) \ + IMULQ R14, val \ + ROLQ $31, val \ + IMULQ R13, val \ + XORQ val, acc \ + IMULQ R13, acc \ + ADDQ R15, acc + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT, $0-32 + // Load fixed primes. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + MOVQ ·prime4v(SB), R15 + + // Load slice. + MOVQ b_base+0(FP), CX + MOVQ b_len+8(FP), DX + LEAQ (CX)(DX*1), BX + + // The first loop limit will be len(b)-32. + SUBQ $32, BX + + // Check whether we have at least one block. + CMPQ DX, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ R13, R8 + ADDQ R14, R8 + MOVQ R14, R9 + XORQ R10, R10 + XORQ R11, R11 + SUBQ R13, R11 + + // Loop until CX > BX. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + MOVQ R8, AX + ROLQ $1, AX + MOVQ R9, R12 + ROLQ $7, R12 + ADDQ R12, AX + MOVQ R10, R12 + ROLQ $12, R12 + ADDQ R12, AX + MOVQ R11, R12 + ROLQ $18, R12 + ADDQ R12, AX + + mergeRound(AX, R8) + mergeRound(AX, R9) + mergeRound(AX, R10) + mergeRound(AX, R11) + + JMP afterBlocks + +noBlocks: + MOVQ ·prime5v(SB), AX + +afterBlocks: + ADDQ DX, AX + + // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. + ADDQ $24, BX + + CMPQ CX, BX + JG fourByte + +wordLoop: + // Calculate k1. + MOVQ (CX), R8 + ADDQ $8, CX + IMULQ R14, R8 + ROLQ $31, R8 + IMULQ R13, R8 + + XORQ R8, AX + ROLQ $27, AX + IMULQ R13, AX + ADDQ R15, AX + + CMPQ CX, BX + JLE wordLoop + +fourByte: + ADDQ $4, BX + CMPQ CX, BX + JG singles + + MOVL (CX), R8 + ADDQ $4, CX + IMULQ R13, R8 + XORQ R8, AX + + ROLQ $23, AX + IMULQ R14, AX + ADDQ ·prime3v(SB), AX + +singles: + ADDQ $4, BX + CMPQ CX, BX + JGE finalize + +singlesLoop: + MOVBQZX (CX), R12 + ADDQ $1, CX + IMULQ ·prime5v(SB), R12 + XORQ R12, AX + + ROLQ $11, AX + IMULQ R13, AX + + CMPQ CX, BX + JL singlesLoop + +finalize: + MOVQ AX, R12 + SHRQ $33, R12 + XORQ R12, AX + IMULQ R14, AX + MOVQ AX, R12 + SHRQ $29, R12 + XORQ R12, AX + IMULQ ·prime3v(SB), AX + MOVQ AX, R12 + SHRQ $32, R12 + XORQ R12, AX + + MOVQ AX, ret+24(FP) + RET + +// writeBlocks uses the same registers as above except that it uses AX to store +// the d pointer. + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT, $0-40 + // Load fixed primes needed for round. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + + // Load slice. + MOVQ arg1_base+8(FP), CX + MOVQ arg1_len+16(FP), DX + LEAQ (CX)(DX*1), BX + SUBQ $32, BX + + // Load vN from d. + MOVQ arg+0(FP), AX + MOVQ 0(AX), R8 // v1 + MOVQ 8(AX), R9 // v2 + MOVQ 16(AX), R10 // v3 + MOVQ 24(AX), R11 // v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + // Copy vN back to d. + MOVQ R8, 0(AX) + MOVQ R9, 8(AX) + MOVQ R10, 16(AX) + MOVQ R11, 24(AX) + + // The number of bytes written is CX minus the old base pointer. + SUBQ arg1_base+8(FP), CX + MOVQ CX, ret+32(FP) + + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go new file mode 100644 index 000000000..4a5a82160 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -0,0 +1,76 @@ +// +build !amd64 appengine !gc purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := prime1v + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -prime1v + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + i, end := 0, len(b) + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(b[i:i+8:len(b)])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for ; i < end; i++ { + h ^= uint64(b[i]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go new file mode 100644 index 000000000..6f3b0cb10 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go @@ -0,0 +1,11 @@ +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go new file mode 100644 index 000000000..1dd39e63b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -0,0 +1,492 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" +) + +type seq struct { + litLen uint32 + matchLen uint32 + offset uint32 + + // Codes are stored here for the encoder + // so they only have to be looked up once. + llCode, mlCode, ofCode uint8 +} + +func (s seq) String() string { + if s.offset <= 3 { + if s.offset == 0 { + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") +} + +type seqCompMode uint8 + +const ( + compModePredefined seqCompMode = iota + compModeRLE + compModeFSE + compModeRepeat +) + +type sequenceDec struct { + // decoder keeps track of the current state and updates it from the bitstream. + fse *fseDecoder + state fseState + repeat bool +} + +// init the state of the decoder with input from stream. +func (s *sequenceDec) init(br *bitReader) error { + if s.fse == nil { + return errors.New("sequence decoder not defined") + } + s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1<= 0; i-- { + if br.overread() { + printf("reading sequence %d, exceeded available data\n", seqs-i) + return io.ErrUnexpectedEOF + } + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + + if ll > len(s.literals) { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) + } + size := ll + ml + len(s.out) + if size-startSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size", size) + } + if size > cap(s.out) { + // Not enough size, which can happen under high volume block streaming conditions + // but could be if destination slice is too small for sync operations. + // over-allocating here can create a large amount of GC pressure so we try to keep + // it as contained as possible + used := len(s.out) - startSize + addBytes := 256 + ll + ml + used>>2 + // Clamp to max block size. + if used+addBytes > maxBlockSize { + addBytes = maxBlockSize - used + } + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + + // Add literals + s.out = append(s.out, s.literals[:ll]...) + s.literals = s.literals[ll:] + out := s.out + + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + + if mo > len(s.out)+len(hist) || mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (mo - (len(s.out) + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + } + end := dictO + ml + if end > len(s.dict) { + out = append(out, s.dict[dictO:]...) + mo -= len(s.dict) - dictO + ml -= len(s.dict) - dictO + } else { + out = append(out, s.dict[dictO:end]...) + mo = 0 + ml = 0 + } + } + + // Copy from history. + // TODO: Blocks without history could be made to ignore this completely. + if v := mo - len(s.out); v > 0 { + // v is the start position in history from end. + start := len(s.hist) - v + if ml > v { + // Some goes into current block. + // Copy remainder of history + out = append(out, s.hist[start:]...) + mo -= v + ml -= v + } else { + out = append(out, s.hist[start:start+ml]...) + ml = 0 + } + } + // We must be in current buffer now + if ml > 0 { + start := len(s.out) - mo + if ml <= len(s.out)-start { + // No overlap + out = append(out, s.out[start:start+ml]...) + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + out = out[:len(out)+ml] + src := out[start : start+ml] + // Destination is the space we just added. + dst := out[len(out)-ml:] + dst = dst[:len(src)] + for i := range src { + dst[i] = src[i] + } + } + } + s.out = out + if i == 0 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.getBitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + + // Add final literals + s.out = append(s.out, s.literals...) + return nil +} + +// update states, at least 27 bits must be available. +func (s *sequenceDecs) update(br *bitReader) { + // Max 8 bits + s.litLengths.state.next(br) + // Max 9 bits + s.matchLengths.state.next(br) + // Max 8 bits + s.offsets.state.next(br) +} + +var bitMask [16]uint16 + +func init() { + for i := range bitMask[:] { + bitMask[i] = uint16((1 << uint(i)) - 1) + } +} + +// update states, at least 27 bits must be available. +func (s *sequenceDecs) updateAlt(br *bitReader) { + // Update all 3 states at once. Approx 20% faster. + a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + + nBits := a.nbBits() + b.nbBits() + c.nbBits() + if nBits == 0 { + s.litLengths.state.state = s.litLengths.state.dt[a.newState()] + s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()] + s.offsets.state.state = s.offsets.state.dt[c.newState()] + return + } + bits := br.getBitsFast(nBits) + lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31)) + s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits] + + lowBits = uint16(bits >> (c.nbBits() & 31)) + lowBits &= bitMask[b.nbBits()&15] + s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits] + + lowBits = uint16(bits) & bitMask[c.nbBits()&15] + s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits] +} + +// nextFast will return new states when there are at least 4 unused bytes left on the stream when done. +func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + return + } + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + return + } + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + return +} + +func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fill() + if s.maxBits <= 32 { + mo += br.getBits(moB) + ml += br.getBits(mlB) + ll += br.getBits(llB) + } else { + mo += br.getBits(moB) + br.fill() + // matchlength+literal length, max 32 bits + ml += br.getBits(mlB) + ll += br.getBits(llB) + + } + mo = s.adjustOffset(mo, ll, moB) + return +} + +func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { + if offsetB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = offset + return offset + } + + if litLen == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + offset++ + } + + if offset == 0 { + return s.prevOffset[0] + } + var temp int + if offset == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[offset] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if offset != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + return temp +} + +// mergeHistory will merge history. +func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) { + for i := uint(0); i < 3; i++ { + var sNew, sHist *sequenceDec + switch i { + default: + // same as "case 0": + sNew = &s.litLengths + sHist = &hist.litLengths + case 1: + sNew = &s.offsets + sHist = &hist.offsets + case 2: + sNew = &s.matchLengths + sHist = &hist.matchLengths + } + if sNew.repeat { + if sHist.fse == nil { + return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i) + } + continue + } + if sNew.fse == nil { + return nil, fmt.Errorf("sequence stream %d, no fse found", i) + } + if sHist.fse != nil && !sHist.fse.preDefined { + fseDecoderPool.Put(sHist.fse) + } + sHist.fse = sNew.fse + } + return hist, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go new file mode 100644 index 000000000..36bcc3cc0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -0,0 +1,115 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "math/bits" + +type seqCoders struct { + llEnc, ofEnc, mlEnc *fseEncoder + llPrev, ofPrev, mlPrev *fseEncoder +} + +// swap coders with another (block). +func (s *seqCoders) swap(other *seqCoders) { + *s, *other = *other, *s +} + +// setPrev will update the previous encoders to the actually used ones +// and make sure a fresh one is in the main slot. +func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { + compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { + // We used the new one, more current to history and reuse the previous history + if *current == used { + *prev, *current = *current, *prev + c := *current + p := *prev + c.reUsed = false + p.reUsed = true + return + } + if used == *prev { + return + } + // Ensure we cannot reuse by accident + prevEnc := *prev + prevEnc.symbolLen = 0 + return + } + compareSwap(ll, &s.llEnc, &s.llPrev) + compareSwap(ml, &s.mlEnc, &s.mlPrev) + compareSwap(of, &s.ofEnc, &s.ofPrev) +} + +func highBit(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} + +var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 17, 18, 18, 19, 19, + 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24} + +// Up to 6 bits +const maxLLCode = 35 + +// llBitsTable translates from ll code to number of bits. +var llBitsTable = [maxLLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16} + +// llCode returns the code that represents the literal length requested. +func llCode(litLength uint32) uint8 { + const llDeltaCode = 19 + if litLength <= 63 { + // Compiler insists on bounds check (Go 1.12) + return llCodeTable[litLength&63] + } + return uint8(highBit(litLength)) + llDeltaCode +} + +var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} + +// Up to 6 bits +const maxMLCode = 52 + +// mlBitsTable translates from ml code to number of bits. +var mlBitsTable = [maxMLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16} + +// note : mlBase = matchLength - MINMATCH; +// because it's the format it's stored in seqStore->sequences +func mlCode(mlBase uint32) uint8 { + const mlDeltaCode = 36 + if mlBase <= 127 { + // Compiler insists on bounds check (Go 1.12) + return mlCodeTable[mlBase&127] + } + return uint8(highBit(mlBase)) + mlDeltaCode +} + +func ofCode(offset uint32) uint8 { + // A valid offset will always be > 0. + return uint8(bits.Len32(offset) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go new file mode 100644 index 000000000..c95fe5111 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -0,0 +1,436 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "hash/crc32" + "io" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/snappy" +) + +const ( + snappyTagLiteral = 0x00 + snappyTagCopy1 = 0x01 + snappyTagCopy2 = 0x02 + snappyTagCopy4 = 0x03 +) + +const ( + snappyChecksumSize = 4 + snappyMagicBody = "sNaPpY" + + // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + snappyMaxBlockSize = 65536 + + // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + snappyMaxEncodedLenOfMaxBlockSize = 76490 +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var ( + // ErrSnappyCorrupt reports that the input is invalid. + ErrSnappyCorrupt = errors.New("snappy: corrupt input") + // ErrSnappyTooLarge reports that the uncompressed length is too large. + ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") + // ErrSnappyUnsupported reports that the input isn't supported. + ErrSnappyUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. +// Conversion is done by converting the stream directly from Snappy without intermediate +// full decoding. +// Therefore the compression ratio is much less than what can be done by a full decompression +// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without +// any errors being generated. +// No CRC value is being generated and not all CRC values of the Snappy stream are checked. +// However, it provides really fast recompression of Snappy streams. +// The converter can be reused to avoid allocations, even after errors. +type SnappyConverter struct { + r io.Reader + err error + buf []byte + block *blockEnc +} + +// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. +// If any error is detected on the Snappy stream it is returned. +// The number of bytes written is returned. +func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { + initPredefined() + r.err = nil + r.r = in + if r.block == nil { + r.block = &blockEnc{} + r.block.init() + } + r.block.initNewEncode() + if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { + r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) + } + r.block.litEnc.Reuse = huff0.ReusePolicyNone + var written int64 + var readHeader bool + { + var header []byte + var n int + header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + + n, r.err = w.Write(header) + if r.err != nil { + return written, r.err + } + written += int64(n) + } + + for { + if !r.readFull(r.buf[:4], true) { + // Add empty last block + r.block.reset(nil) + r.block.last = true + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, err := w.Write(r.block.output) + if err != nil { + return written, err + } + written += int64(n) + + return written, r.err + } + chunkType := r.buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + println("chunkType != chunkTypeStreamIdentifier", chunkType) + r.err = ErrSnappyCorrupt + return written, r.err + } + readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + println("chunkLen > len(r.buf)", chunkType) + r.err = ErrSnappyUnsupported + return written, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return written, r.err + } + //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[snappyChecksumSize:] + + n, hdr, err := snappyDecodedLen(buf) + if err != nil { + r.err = err + return written, r.err + } + buf = buf[hdr:] + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + r.block.pushOffsets() + if err := decodeSnappy(r.block, buf); err != nil { + r.err = err + return written, r.err + } + if r.block.size+r.block.extraLits != n { + printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) + r.err = ErrSnappyCorrupt + return written, r.err + } + err = r.block.encode(nil, false, false) + switch err { + case errIncompressible: + r.block.popOffsets() + r.block.reset(nil) + r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) + if err != nil { + println("snappy.Decode:", err) + return written, err + } + err = r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + case nil: + default: + return written, err + } + + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + case chunkTypeUncompressedData: + if debug { + println("Uncompressed, chunklen", chunkLen) + } + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + buf := r.buf[:snappyChecksumSize] + if !r.readFull(buf, false) { + return written, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - snappyChecksumSize + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.literals = r.block.literals[:n] + if !r.readFull(r.block.literals, false) { + return written, r.err + } + if snappyCRC(r.block.literals) != checksum { + println("literals crc mismatch") + r.err = ErrSnappyCorrupt + return written, r.err + } + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + + case chunkTypeStreamIdentifier: + if debug { + println("stream id", chunkLen, len(snappyMagicBody)) + } + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(snappyMagicBody) { + println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) + r.err = ErrSnappyCorrupt + return written, r.err + } + if !r.readFull(r.buf[:len(snappyMagicBody)], false) { + return written, r.err + } + for i := 0; i < len(snappyMagicBody); i++ { + if r.buf[i] != snappyMagicBody[i] { + println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) + r.err = ErrSnappyCorrupt + return written, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + println("chunkType <= 0x7f") + r.err = ErrSnappyUnsupported + return written, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return written, r.err + } + } +} + +// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read. +func decodeSnappy(blk *blockEnc, src []byte) error { + //decodeRef(make([]byte, snappyMaxBlockSize), src) + var s, length int + lits := blk.extraLits + var offset uint32 + for s < len(src) { + switch src[s] & 0x03 { + case snappyTagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + if x > snappyMaxBlockSize { + println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) + return ErrSnappyCorrupt + } + length = int(x) + 1 + if length <= 0 { + println("length <= 0 ", length) + + return errUnsupportedLiteralLength + } + //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { + // return ErrSnappyCorrupt + //} + + blk.literals = append(blk.literals, src[s:s+length]...) + //println(length, "litLen") + lits += length + s += length + continue + + case snappyTagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) + + case snappyTagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = uint32(src[s-2]) | uint32(src[s-1])<<8 + + case snappyTagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + + if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { + println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) + + return ErrSnappyCorrupt + } + + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if false { + offset = blk.matchOffset(offset, uint32(lits)) + } else { + offset += 3 + } + + blk.sequences = append(blk.sequences, seq{ + litLen: uint32(lits), + offset: offset, + matchLen: uint32(length) - zstdMinMatch, + }) + blk.size += length + lits + lits = 0 + } + blk.extraLits = lits + return nil +} + +func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrSnappyCorrupt + } + return false + } + return true +} + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func snappyCRC(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} + +// snappyDecodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrSnappyCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrSnappyTooLarge + } + return int(v), n, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go new file mode 100644 index 000000000..9056beef2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -0,0 +1,156 @@ +// Package zstd provides decompression of zstandard files. +// +// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd +package zstd + +import ( + "bytes" + "errors" + "log" + "math" + "math/bits" +) + +// enable debug printing +const debug = false + +// Enable extra assertions. +const debugAsserts = debug || false + +// print sequence details +const debugSequences = false + +// print detailed matching information +const debugMatches = false + +// force encoder to use predefined tables. +const forcePreDef = false + +// zstdMinMatch is the minimum zstd match length. +const zstdMinMatch = 3 + +// Reset the buffer offset when reaching this. +const bufferReset = math.MaxInt32 - MaxWindowSize + +var ( + // ErrReservedBlockType is returned when a reserved block type is found. + // Typically this indicates wrong or corrupted input. + ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") + + // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. + // Typically this indicates wrong or corrupted input. + ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") + + // ErrBlockTooSmall is returned when a block is too small to be decoded. + // Typically returned on invalid input. + ErrBlockTooSmall = errors.New("block too small") + + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. + // Typically this indicates wrong or corrupted input. + ErrMagicMismatch = errors.New("invalid input: magic number mismatch") + + // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeExceeded = errors.New("window size exceeded") + + // ErrWindowSizeTooSmall is returned when no window size is specified. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") + + // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. + ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") + + // ErrUnknownDictionary is returned if the dictionary ID is unknown. + // For the time being dictionaries are not supported. + ErrUnknownDictionary = errors.New("unknown dictionary") + + // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeExceeded = errors.New("frame size exceeded") + + // ErrCRCMismatch is returned if CRC mismatches. + ErrCRCMismatch = errors.New("CRC check failed") + + // ErrDecoderClosed will be returned if the Decoder was used after + // Close has been called. + ErrDecoderClosed = errors.New("decoder used after Close") + + // ErrDecoderNilInput is returned when a nil Reader was provided + // and an operation other than Reset/DecodeAll/Close was attempted. + ErrDecoderNilInput = errors.New("nil input provided as reader") +) + +func println(a ...interface{}) { + if debug { + log.Println(a...) + } +} + +func printf(format string, a ...interface{}) { + if debug { + log.Printf(format, a...) + } +} + +// matchLenFast does matching, but will not match the last up to 7 bytes. +func matchLenFast(a, b []byte) int { + endI := len(a) & (math.MaxInt32 - 7) + for i := 0; i < endI; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + return i + bits.TrailingZeros64(diff)>>3 + } + } + return endI +} + +// matchLen returns the maximum length. +// a must be the shortest of the two. +// The function also returns whether all bytes matched. +func matchLen(a, b []byte) int { + b = b[:len(a)] + for i := 0; i < len(a)-7; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + return i + (bits.TrailingZeros64(diff) >> 3) + } + } + + checked := (len(a) >> 3) << 3 + a = a[checked:] + b = b[checked:] + for i := range a { + if a[i] != b[i] { + return i + checked + } + } + return len(a) + checked +} + +func load3232(b []byte, i int32) uint32 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:4] + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load6432(b []byte, i int32) uint64 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:8] + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func load64(b []byte, i int) uint64 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:8] + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +type byter interface { + Bytes() []byte + Len() int +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/magiconair/properties/.gitignore b/vendor/github.com/magiconair/properties/.gitignore new file mode 100644 index 000000000..e7081ff52 --- /dev/null +++ b/vendor/github.com/magiconair/properties/.gitignore @@ -0,0 +1,6 @@ +*.sublime-project +*.sublime-workspace +*.un~ +*.swp +.idea/ +*.iml diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md new file mode 100644 index 000000000..842e8e24f --- /dev/null +++ b/vendor/github.com/magiconair/properties/CHANGELOG.md @@ -0,0 +1,205 @@ +## Changelog + +### [1.8.7](https://github.com/magiconair/properties/tree/v1.8.7) - 08 Dec 2022 + + * [PR #65](https://github.com/magiconair/properties/pull/65): Speedup Merge + + Thanks to [@AdityaVallabh](https://github.com/AdityaVallabh) for the patch. + + * [PR #66](https://github.com/magiconair/properties/pull/66): use github actions + +### [1.8.6](https://github.com/magiconair/properties/tree/v1.8.6) - 23 Feb 2022 + + * [PR #57](https://github.com/magiconair/properties/pull/57):Fix "unreachable code" lint error + + Thanks to [@ellie](https://github.com/ellie) for the patch. + + * [PR #63](https://github.com/magiconair/properties/pull/63): Make TestMustGetParsedDuration backwards compatible + + This patch ensures that the `TestMustGetParsedDuration` still works with `go1.3` to make the + author happy until it affects real users. + + Thanks to [@maage](https://github.com/maage) for the patch. + +### [1.8.5](https://github.com/magiconair/properties/tree/v1.8.5) - 24 Mar 2021 + + * [PR #55](https://github.com/magiconair/properties/pull/55): Fix: Encoding Bug in Comments + + When reading comments \ are loaded correctly, but when writing they are then + replaced by \\. This leads to wrong comments when writing and reading multiple times. + + Thanks to [@doxsch](https://github.com/doxsch) for the patch. + +### [1.8.4](https://github.com/magiconair/properties/tree/v1.8.4) - 23 Sep 2020 + + * [PR #50](https://github.com/magiconair/properties/pull/50): enhance error message for circular references + + Thanks to [@sriv](https://github.com/sriv) for the patch. + +### [1.8.3](https://github.com/magiconair/properties/tree/v1.8.3) - 14 Sep 2020 + + * [PR #49](https://github.com/magiconair/properties/pull/49): Include the key in error message causing the circular reference + + The change is include the key in the error message which is causing the circular + reference when parsing/loading the properties files. + + Thanks to [@haroon-sheikh](https://github.com/haroon-sheikh) for the patch. + +### [1.8.2](https://github.com/magiconair/properties/tree/v1.8.2) - 25 Aug 2020 + + * [PR #36](https://github.com/magiconair/properties/pull/36): Escape backslash on write + + This patch ensures that backslashes are escaped on write. Existing applications which + rely on the old behavior may need to be updated. + + Thanks to [@apesternikov](https://github.com/apesternikov) for the patch. + + * [PR #42](https://github.com/magiconair/properties/pull/42): Made Content-Type check whitespace agnostic in LoadURL() + + Thanks to [@aliras1](https://github.com/aliras1) for the patch. + + * [PR #41](https://github.com/magiconair/properties/pull/41): Make key/value separator configurable on Write() + + Thanks to [@mkjor](https://github.com/mkjor) for the patch. + + * [PR #40](https://github.com/magiconair/properties/pull/40): Add method to return a sorted list of keys + + Thanks to [@mkjor](https://github.com/mkjor) for the patch. + +### [1.8.1](https://github.com/magiconair/properties/tree/v1.8.1) - 10 May 2019 + + * [PR #35](https://github.com/magiconair/properties/pull/35): Close body always after request + + This patch ensures that in `LoadURL` the response body is always closed. + + Thanks to [@liubog2008](https://github.com/liubog2008) for the patch. + +### [1.8](https://github.com/magiconair/properties/tree/v1.8) - 15 May 2018 + + * [PR #26](https://github.com/magiconair/properties/pull/26): Disable expansion during loading + + This adds the option to disable property expansion during loading. + + Thanks to [@kmala](https://github.com/kmala) for the patch. + +### [1.7.6](https://github.com/magiconair/properties/tree/v1.7.6) - 14 Feb 2018 + + * [PR #29](https://github.com/magiconair/properties/pull/29): Reworked expansion logic to handle more complex cases. + + See PR for an example. + + Thanks to [@yobert](https://github.com/yobert) for the fix. + +### [1.7.5](https://github.com/magiconair/properties/tree/v1.7.5) - 13 Feb 2018 + + * [PR #28](https://github.com/magiconair/properties/pull/28): Support duplicate expansions in the same value + + Values which expand the same key multiple times (e.g. `key=${a} ${a}`) will no longer fail + with a `circular reference error`. + + Thanks to [@yobert](https://github.com/yobert) for the fix. + +### [1.7.4](https://github.com/magiconair/properties/tree/v1.7.4) - 31 Oct 2017 + + * [Issue #23](https://github.com/magiconair/properties/issues/23): Ignore blank lines with whitespaces + + * [PR #24](https://github.com/magiconair/properties/pull/24): Update keys when DisableExpansion is enabled + + Thanks to [@mgurov](https://github.com/mgurov) for the fix. + +### [1.7.3](https://github.com/magiconair/properties/tree/v1.7.3) - 10 Jul 2017 + + * [Issue #17](https://github.com/magiconair/properties/issues/17): Add [SetValue()](http://godoc.org/github.com/magiconair/properties#Properties.SetValue) method to set values generically + * [Issue #22](https://github.com/magiconair/properties/issues/22): Add [LoadMap()](http://godoc.org/github.com/magiconair/properties#LoadMap) function to load properties from a string map + +### [1.7.2](https://github.com/magiconair/properties/tree/v1.7.2) - 20 Mar 2017 + + * [Issue #15](https://github.com/magiconair/properties/issues/15): Drop gocheck dependency + * [PR #21](https://github.com/magiconair/properties/pull/21): Add [Map()](http://godoc.org/github.com/magiconair/properties#Properties.Map) and [FilterFunc()](http://godoc.org/github.com/magiconair/properties#Properties.FilterFunc) + +### [1.7.1](https://github.com/magiconair/properties/tree/v1.7.1) - 13 Jan 2017 + + * [Issue #14](https://github.com/magiconair/properties/issues/14): Decouple TestLoadExpandedFile from `$USER` + * [PR #12](https://github.com/magiconair/properties/pull/12): Load from files and URLs + * [PR #16](https://github.com/magiconair/properties/pull/16): Keep gofmt happy + * [PR #18](https://github.com/magiconair/properties/pull/18): Fix Delete() function + +### [1.7.0](https://github.com/magiconair/properties/tree/v1.7.0) - 20 Mar 2016 + + * [Issue #10](https://github.com/magiconair/properties/issues/10): Add [LoadURL,LoadURLs,MustLoadURL,MustLoadURLs](http://godoc.org/github.com/magiconair/properties#LoadURL) method to load properties from a URL. + * [Issue #11](https://github.com/magiconair/properties/issues/11): Add [LoadString,MustLoadString](http://godoc.org/github.com/magiconair/properties#LoadString) method to load properties from an UTF8 string. + * [PR #8](https://github.com/magiconair/properties/pull/8): Add [MustFlag](http://godoc.org/github.com/magiconair/properties#Properties.MustFlag) method to provide overrides via command line flags. (@pascaldekloe) + +### [1.6.0](https://github.com/magiconair/properties/tree/v1.6.0) - 11 Dec 2015 + + * Add [Decode](http://godoc.org/github.com/magiconair/properties#Properties.Decode) method to populate struct from properties via tags. + +### [1.5.6](https://github.com/magiconair/properties/tree/v1.5.6) - 18 Oct 2015 + + * Vendored in gopkg.in/check.v1 + +### [1.5.5](https://github.com/magiconair/properties/tree/v1.5.5) - 31 Jul 2015 + + * [PR #6](https://github.com/magiconair/properties/pull/6): Add [Delete](http://godoc.org/github.com/magiconair/properties#Properties.Delete) method to remove keys including comments. (@gerbenjacobs) + +### [1.5.4](https://github.com/magiconair/properties/tree/v1.5.4) - 23 Jun 2015 + + * [Issue #5](https://github.com/magiconair/properties/issues/5): Allow disabling of property expansion [DisableExpansion](http://godoc.org/github.com/magiconair/properties#Properties.DisableExpansion). When property expansion is disabled Properties become a simple key/value store and don't check for circular references. + +### [1.5.3](https://github.com/magiconair/properties/tree/v1.5.3) - 02 Jun 2015 + + * [Issue #4](https://github.com/magiconair/properties/issues/4): Maintain key order in [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) and [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) + +### [1.5.2](https://github.com/magiconair/properties/tree/v1.5.2) - 10 Apr 2015 + + * [Issue #3](https://github.com/magiconair/properties/issues/3): Don't print comments in [WriteComment()](http://godoc.org/github.com/magiconair/properties#Properties.WriteComment) if they are all empty + * Add clickable links to README + +### [1.5.1](https://github.com/magiconair/properties/tree/v1.5.1) - 08 Dec 2014 + + * Added [GetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.GetParsedDuration) and [MustGetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.MustGetParsedDuration) for values specified compatible with + [time.ParseDuration()](http://golang.org/pkg/time/#ParseDuration). + +### [1.5.0](https://github.com/magiconair/properties/tree/v1.5.0) - 18 Nov 2014 + + * Added support for single and multi-line comments (reading, writing and updating) + * The order of keys is now preserved + * Calling [Set()](http://godoc.org/github.com/magiconair/properties#Properties.Set) with an empty key now silently ignores the call and does not create a new entry + * Added a [MustSet()](http://godoc.org/github.com/magiconair/properties#Properties.MustSet) method + * Migrated test library from launchpad.net/gocheck to [gopkg.in/check.v1](http://gopkg.in/check.v1) + +### [1.4.2](https://github.com/magiconair/properties/tree/v1.4.2) - 15 Nov 2014 + + * [Issue #2](https://github.com/magiconair/properties/issues/2): Fixed goroutine leak in parser which created two lexers but cleaned up only one + +### [1.4.1](https://github.com/magiconair/properties/tree/v1.4.1) - 13 Nov 2014 + + * [Issue #1](https://github.com/magiconair/properties/issues/1): Fixed bug in Keys() method which returned an empty string + +### [1.4.0](https://github.com/magiconair/properties/tree/v1.4.0) - 23 Sep 2014 + + * Added [Keys()](http://godoc.org/github.com/magiconair/properties#Properties.Keys) to get the keys + * Added [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) and [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) to get a subset of the properties + +### [1.3.0](https://github.com/magiconair/properties/tree/v1.3.0) - 18 Mar 2014 + +* Added support for time.Duration +* Made MustXXX() failure beha[ior configurable (log.Fatal, panic](https://github.com/magiconair/properties/tree/vior configurable (log.Fatal, panic) - custom) +* Changed default of MustXXX() failure from panic to log.Fatal + +### [1.2.0](https://github.com/magiconair/properties/tree/v1.2.0) - 05 Mar 2014 + +* Added MustGet... functions +* Added support for int and uint with range checks on 32 bit platforms + +### [1.1.0](https://github.com/magiconair/properties/tree/v1.1.0) - 20 Jan 2014 + +* Renamed from goproperties to properties +* Added support for expansion of environment vars in + filenames and value expressions +* Fixed bug where value expressions were not at the + start of the string + +### [1.0.0](https://github.com/magiconair/properties/tree/v1.0.0) - 7 Jan 2014 + +* Initial release diff --git a/vendor/github.com/magiconair/properties/LICENSE.md b/vendor/github.com/magiconair/properties/LICENSE.md new file mode 100644 index 000000000..79c87e3e6 --- /dev/null +++ b/vendor/github.com/magiconair/properties/LICENSE.md @@ -0,0 +1,24 @@ +Copyright (c) 2013-2020, Frank Schroeder + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/magiconair/properties/README.md b/vendor/github.com/magiconair/properties/README.md new file mode 100644 index 000000000..e2edda025 --- /dev/null +++ b/vendor/github.com/magiconair/properties/README.md @@ -0,0 +1,128 @@ +[![](https://img.shields.io/github/tag/magiconair/properties.svg?style=flat-square&label=release)](https://github.com/magiconair/properties/releases) +[![Travis CI Status](https://img.shields.io/travis/magiconair/properties.svg?branch=master&style=flat-square&label=travis)](https://travis-ci.org/magiconair/properties) +[![License](https://img.shields.io/badge/License-BSD%202--Clause-orange.svg?style=flat-square)](https://raw.githubusercontent.com/magiconair/properties/master/LICENSE) +[![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) + +# Overview + +#### Please run `git pull --tags` to update the tags. See [below](#updated-git-tags) why. + +properties is a Go library for reading and writing properties files. + +It supports reading from multiple files or URLs and Spring style recursive +property expansion of expressions like `${key}` to their corresponding value. +Value expressions can refer to other keys like in `${key}` or to environment +variables like in `${USER}`. Filenames can also contain environment variables +like in `/home/${USER}/myapp.properties`. + +Properties can be decoded into structs, maps, arrays and values through +struct tags. + +Comments and the order of keys are preserved. Comments can be modified +and can be written to the output. + +The properties library supports both ISO-8859-1 and UTF-8 encoded data. + +Starting from version 1.3.0 the behavior of the MustXXX() functions is +configurable by providing a custom `ErrorHandler` function. The default has +changed from `panic` to `log.Fatal` but this is configurable and custom +error handling functions can be provided. See the package documentation for +details. + +Read the full documentation on [![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) + +## Getting Started + +```go +import ( + "flag" + "github.com/magiconair/properties" +) + +func main() { + // init from a file + p := properties.MustLoadFile("${HOME}/config.properties", properties.UTF8) + + // or multiple files + p = properties.MustLoadFiles([]string{ + "${HOME}/config.properties", + "${HOME}/config-${USER}.properties", + }, properties.UTF8, true) + + // or from a map + p = properties.LoadMap(map[string]string{"key": "value", "abc": "def"}) + + // or from a string + p = properties.MustLoadString("key=value\nabc=def") + + // or from a URL + p = properties.MustLoadURL("http://host/path") + + // or from multiple URLs + p = properties.MustLoadURL([]string{ + "http://host/config", + "http://host/config-${USER}", + }, true) + + // or from flags + p.MustFlag(flag.CommandLine) + + // get values through getters + host := p.MustGetString("host") + port := p.GetInt("port", 8080) + + // or through Decode + type Config struct { + Host string `properties:"host"` + Port int `properties:"port,default=9000"` + Accept []string `properties:"accept,default=image/png;image;gif"` + Timeout time.Duration `properties:"timeout,default=5s"` + } + var cfg Config + if err := p.Decode(&cfg); err != nil { + log.Fatal(err) + } +} + +``` + +## Installation and Upgrade + +``` +$ go get -u github.com/magiconair/properties +``` + +## License + +2 clause BSD license. See [LICENSE](https://github.com/magiconair/properties/blob/master/LICENSE) file for details. + +## ToDo + +* Dump contents with passwords and secrets obscured + +## Updated Git tags + +#### 13 Feb 2018 + +I realized that all of the git tags I had pushed before v1.7.5 were lightweight tags +and I've only recently learned that this doesn't play well with `git describe` 😞 + +I have replaced all lightweight tags with signed tags using this script which should +retain the commit date, name and email address. Please run `git pull --tags` to update them. + +Worst case you have to reclone the repo. + +```shell +#!/bin/bash +tag=$1 +echo "Updating $tag" +date=$(git show ${tag}^0 --format=%aD | head -1) +email=$(git show ${tag}^0 --format=%aE | head -1) +name=$(git show ${tag}^0 --format=%aN | head -1) +GIT_COMMITTER_DATE="$date" GIT_COMMITTER_NAME="$name" GIT_COMMITTER_EMAIL="$email" git tag -s -f ${tag} ${tag}^0 -m ${tag} +``` + +I apologize for the inconvenience. + +Frank + diff --git a/vendor/github.com/magiconair/properties/decode.go b/vendor/github.com/magiconair/properties/decode.go new file mode 100644 index 000000000..8e6aa441d --- /dev/null +++ b/vendor/github.com/magiconair/properties/decode.go @@ -0,0 +1,289 @@ +// Copyright 2013-2022 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "time" +) + +// Decode assigns property values to exported fields of a struct. +// +// Decode traverses v recursively and returns an error if a value cannot be +// converted to the field type or a required value is missing for a field. +// +// The following type dependent decodings are used: +// +// String, boolean, numeric fields have the value of the property key assigned. +// The property key name is the name of the field. A different key and a default +// value can be set in the field's tag. Fields without default value are +// required. If the value cannot be converted to the field type an error is +// returned. +// +// time.Duration fields have the result of time.ParseDuration() assigned. +// +// time.Time fields have the vaule of time.Parse() assigned. The default layout +// is time.RFC3339 but can be set in the field's tag. +// +// Arrays and slices of string, boolean, numeric, time.Duration and time.Time +// fields have the value interpreted as a comma separated list of values. The +// individual values are trimmed of whitespace and empty values are ignored. A +// default value can be provided as a semicolon separated list in the field's +// tag. +// +// Struct fields are decoded recursively using the field name plus "." as +// prefix. The prefix (without dot) can be overridden in the field's tag. +// Default values are not supported in the field's tag. Specify them on the +// fields of the inner struct instead. +// +// Map fields must have a key of type string and are decoded recursively by +// using the field's name plus ".' as prefix and the next element of the key +// name as map key. The prefix (without dot) can be overridden in the field's +// tag. Default values are not supported. +// +// Examples: +// +// // Field is ignored. +// Field int `properties:"-"` +// +// // Field is assigned value of 'Field'. +// Field int +// +// // Field is assigned value of 'myName'. +// Field int `properties:"myName"` +// +// // Field is assigned value of key 'myName' and has a default +// // value 15 if the key does not exist. +// Field int `properties:"myName,default=15"` +// +// // Field is assigned value of key 'Field' and has a default +// // value 15 if the key does not exist. +// Field int `properties:",default=15"` +// +// // Field is assigned value of key 'date' and the date +// // is in format 2006-01-02 +// Field time.Time `properties:"date,layout=2006-01-02"` +// +// // Field is assigned the non-empty and whitespace trimmed +// // values of key 'Field' split by commas. +// Field []string +// +// // Field is assigned the non-empty and whitespace trimmed +// // values of key 'Field' split by commas and has a default +// // value ["a", "b", "c"] if the key does not exist. +// Field []string `properties:",default=a;b;c"` +// +// // Field is decoded recursively with "Field." as key prefix. +// Field SomeStruct +// +// // Field is decoded recursively with "myName." as key prefix. +// Field SomeStruct `properties:"myName"` +// +// // Field is decoded recursively with "Field." as key prefix +// // and the next dotted element of the key as map key. +// Field map[string]string +// +// // Field is decoded recursively with "myName." as key prefix +// // and the next dotted element of the key as map key. +// Field map[string]string `properties:"myName"` +func (p *Properties) Decode(x interface{}) error { + t, v := reflect.TypeOf(x), reflect.ValueOf(x) + if t.Kind() != reflect.Ptr || v.Elem().Type().Kind() != reflect.Struct { + return fmt.Errorf("not a pointer to struct: %s", t) + } + if err := dec(p, "", nil, nil, v); err != nil { + return err + } + return nil +} + +func dec(p *Properties, key string, def *string, opts map[string]string, v reflect.Value) error { + t := v.Type() + + // value returns the property value for key or the default if provided. + value := func() (string, error) { + if val, ok := p.Get(key); ok { + return val, nil + } + if def != nil { + return *def, nil + } + return "", fmt.Errorf("missing required key %s", key) + } + + // conv converts a string to a value of the given type. + conv := func(s string, t reflect.Type) (val reflect.Value, err error) { + var v interface{} + + switch { + case isDuration(t): + v, err = time.ParseDuration(s) + + case isTime(t): + layout := opts["layout"] + if layout == "" { + layout = time.RFC3339 + } + v, err = time.Parse(layout, s) + + case isBool(t): + v, err = boolVal(s), nil + + case isString(t): + v, err = s, nil + + case isFloat(t): + v, err = strconv.ParseFloat(s, 64) + + case isInt(t): + v, err = strconv.ParseInt(s, 10, 64) + + case isUint(t): + v, err = strconv.ParseUint(s, 10, 64) + + default: + return reflect.Zero(t), fmt.Errorf("unsupported type %s", t) + } + if err != nil { + return reflect.Zero(t), err + } + return reflect.ValueOf(v).Convert(t), nil + } + + // keydef returns the property key and the default value based on the + // name of the struct field and the options in the tag. + keydef := func(f reflect.StructField) (string, *string, map[string]string) { + _key, _opts := parseTag(f.Tag.Get("properties")) + + var _def *string + if d, ok := _opts["default"]; ok { + _def = &d + } + if _key != "" { + return _key, _def, _opts + } + return f.Name, _def, _opts + } + + switch { + case isDuration(t) || isTime(t) || isBool(t) || isString(t) || isFloat(t) || isInt(t) || isUint(t): + s, err := value() + if err != nil { + return err + } + val, err := conv(s, t) + if err != nil { + return err + } + v.Set(val) + + case isPtr(t): + return dec(p, key, def, opts, v.Elem()) + + case isStruct(t): + for i := 0; i < v.NumField(); i++ { + fv := v.Field(i) + fk, def, opts := keydef(t.Field(i)) + if !fv.CanSet() { + return fmt.Errorf("cannot set %s", t.Field(i).Name) + } + if fk == "-" { + continue + } + if key != "" { + fk = key + "." + fk + } + if err := dec(p, fk, def, opts, fv); err != nil { + return err + } + } + return nil + + case isArray(t): + val, err := value() + if err != nil { + return err + } + vals := split(val, ";") + a := reflect.MakeSlice(t, 0, len(vals)) + for _, s := range vals { + val, err := conv(s, t.Elem()) + if err != nil { + return err + } + a = reflect.Append(a, val) + } + v.Set(a) + + case isMap(t): + valT := t.Elem() + m := reflect.MakeMap(t) + for postfix := range p.FilterStripPrefix(key + ".").m { + pp := strings.SplitN(postfix, ".", 2) + mk, mv := pp[0], reflect.New(valT) + if err := dec(p, key+"."+mk, nil, nil, mv); err != nil { + return err + } + m.SetMapIndex(reflect.ValueOf(mk), mv.Elem()) + } + v.Set(m) + + default: + return fmt.Errorf("unsupported type %s", t) + } + return nil +} + +// split splits a string on sep, trims whitespace of elements +// and omits empty elements +func split(s string, sep string) []string { + var a []string + for _, v := range strings.Split(s, sep) { + if v = strings.TrimSpace(v); v != "" { + a = append(a, v) + } + } + return a +} + +// parseTag parses a "key,k=v,k=v,..." +func parseTag(tag string) (key string, opts map[string]string) { + opts = map[string]string{} + for i, s := range strings.Split(tag, ",") { + if i == 0 { + key = s + continue + } + + pp := strings.SplitN(s, "=", 2) + if len(pp) == 1 { + opts[pp[0]] = "" + } else { + opts[pp[0]] = pp[1] + } + } + return key, opts +} + +func isArray(t reflect.Type) bool { return t.Kind() == reflect.Array || t.Kind() == reflect.Slice } +func isBool(t reflect.Type) bool { return t.Kind() == reflect.Bool } +func isDuration(t reflect.Type) bool { return t == reflect.TypeOf(time.Second) } +func isMap(t reflect.Type) bool { return t.Kind() == reflect.Map } +func isPtr(t reflect.Type) bool { return t.Kind() == reflect.Ptr } +func isString(t reflect.Type) bool { return t.Kind() == reflect.String } +func isStruct(t reflect.Type) bool { return t.Kind() == reflect.Struct } +func isTime(t reflect.Type) bool { return t == reflect.TypeOf(time.Time{}) } +func isFloat(t reflect.Type) bool { + return t.Kind() == reflect.Float32 || t.Kind() == reflect.Float64 +} +func isInt(t reflect.Type) bool { + return t.Kind() == reflect.Int || t.Kind() == reflect.Int8 || t.Kind() == reflect.Int16 || t.Kind() == reflect.Int32 || t.Kind() == reflect.Int64 +} +func isUint(t reflect.Type) bool { + return t.Kind() == reflect.Uint || t.Kind() == reflect.Uint8 || t.Kind() == reflect.Uint16 || t.Kind() == reflect.Uint32 || t.Kind() == reflect.Uint64 +} diff --git a/vendor/github.com/magiconair/properties/doc.go b/vendor/github.com/magiconair/properties/doc.go new file mode 100644 index 000000000..7c7979315 --- /dev/null +++ b/vendor/github.com/magiconair/properties/doc.go @@ -0,0 +1,155 @@ +// Copyright 2013-2022 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package properties provides functions for reading and writing +// ISO-8859-1 and UTF-8 encoded .properties files and has +// support for recursive property expansion. +// +// Java properties files are ISO-8859-1 encoded and use Unicode +// literals for characters outside the ISO character set. Unicode +// literals can be used in UTF-8 encoded properties files but +// aren't necessary. +// +// To load a single properties file use MustLoadFile(): +// +// p := properties.MustLoadFile(filename, properties.UTF8) +// +// To load multiple properties files use MustLoadFiles() +// which loads the files in the given order and merges the +// result. Missing properties files can be ignored if the +// 'ignoreMissing' flag is set to true. +// +// Filenames can contain environment variables which are expanded +// before loading. +// +// f1 := "/etc/myapp/myapp.conf" +// f2 := "/home/${USER}/myapp.conf" +// p := MustLoadFiles([]string{f1, f2}, properties.UTF8, true) +// +// All of the different key/value delimiters ' ', ':' and '=' are +// supported as well as the comment characters '!' and '#' and +// multi-line values. +// +// ! this is a comment +// # and so is this +// +// # the following expressions are equal +// key value +// key=value +// key:value +// key = value +// key : value +// key = val\ +// ue +// +// Properties stores all comments preceding a key and provides +// GetComments() and SetComments() methods to retrieve and +// update them. The convenience functions GetComment() and +// SetComment() allow access to the last comment. The +// WriteComment() method writes properties files including +// the comments and with the keys in the original order. +// This can be used for sanitizing properties files. +// +// Property expansion is recursive and circular references +// and malformed expressions are not allowed and cause an +// error. Expansion of environment variables is supported. +// +// # standard property +// key = value +// +// # property expansion: key2 = value +// key2 = ${key} +// +// # recursive expansion: key3 = value +// key3 = ${key2} +// +// # circular reference (error) +// key = ${key} +// +// # malformed expression (error) +// key = ${ke +// +// # refers to the users' home dir +// home = ${HOME} +// +// # local key takes precedence over env var: u = foo +// USER = foo +// u = ${USER} +// +// The default property expansion format is ${key} but can be +// changed by setting different pre- and postfix values on the +// Properties object. +// +// p := properties.NewProperties() +// p.Prefix = "#[" +// p.Postfix = "]#" +// +// Properties provides convenience functions for getting typed +// values with default values if the key does not exist or the +// type conversion failed. +// +// # Returns true if the value is either "1", "on", "yes" or "true" +// # Returns false for every other value and the default value if +// # the key does not exist. +// v = p.GetBool("key", false) +// +// # Returns the value if the key exists and the format conversion +// # was successful. Otherwise, the default value is returned. +// v = p.GetInt64("key", 999) +// v = p.GetUint64("key", 999) +// v = p.GetFloat64("key", 123.0) +// v = p.GetString("key", "def") +// v = p.GetDuration("key", 999) +// +// As an alternative properties may be applied with the standard +// library's flag implementation at any time. +// +// # Standard configuration +// v = flag.Int("key", 999, "help message") +// flag.Parse() +// +// # Merge p into the flag set +// p.MustFlag(flag.CommandLine) +// +// Properties provides several MustXXX() convenience functions +// which will terminate the app if an error occurs. The behavior +// of the failure is configurable and the default is to call +// log.Fatal(err). To have the MustXXX() functions panic instead +// of logging the error set a different ErrorHandler before +// you use the Properties package. +// +// properties.ErrorHandler = properties.PanicHandler +// +// # Will panic instead of logging an error +// p := properties.MustLoadFile("config.properties") +// +// You can also provide your own ErrorHandler function. The only requirement +// is that the error handler function must exit after handling the error. +// +// properties.ErrorHandler = func(err error) { +// fmt.Println(err) +// os.Exit(1) +// } +// +// # Will write to stdout and then exit +// p := properties.MustLoadFile("config.properties") +// +// Properties can also be loaded into a struct via the `Decode` +// method, e.g. +// +// type S struct { +// A string `properties:"a,default=foo"` +// D time.Duration `properties:"timeout,default=5s"` +// E time.Time `properties:"expires,layout=2006-01-02,default=2015-01-01"` +// } +// +// See `Decode()` method for the full documentation. +// +// The following documents provide a description of the properties +// file format. +// +// http://en.wikipedia.org/wiki/.properties +// +// http://docs.oracle.com/javase/7/docs/api/java/util/Properties.html#load%28java.io.Reader%29 +package properties diff --git a/vendor/github.com/magiconair/properties/integrate.go b/vendor/github.com/magiconair/properties/integrate.go new file mode 100644 index 000000000..35d0ae97b --- /dev/null +++ b/vendor/github.com/magiconair/properties/integrate.go @@ -0,0 +1,35 @@ +// Copyright 2013-2022 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +import "flag" + +// MustFlag sets flags that are skipped by dst.Parse when p contains +// the respective key for flag.Flag.Name. +// +// It's use is recommended with command line arguments as in: +// +// flag.Parse() +// p.MustFlag(flag.CommandLine) +func (p *Properties) MustFlag(dst *flag.FlagSet) { + m := make(map[string]*flag.Flag) + dst.VisitAll(func(f *flag.Flag) { + m[f.Name] = f + }) + dst.Visit(func(f *flag.Flag) { + delete(m, f.Name) // overridden + }) + + for name, f := range m { + v, ok := p.Get(name) + if !ok { + continue + } + + if err := f.Value.Set(v); err != nil { + ErrorHandler(err) + } + } +} diff --git a/vendor/github.com/magiconair/properties/lex.go b/vendor/github.com/magiconair/properties/lex.go new file mode 100644 index 000000000..3d15a1f6e --- /dev/null +++ b/vendor/github.com/magiconair/properties/lex.go @@ -0,0 +1,395 @@ +// Copyright 2013-2022 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// Parts of the lexer are from the template/text/parser package +// For these parts the following applies: +// +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file of the go 1.2 +// distribution. + +package properties + +import ( + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +// item represents a token or text string returned from the scanner. +type item struct { + typ itemType // The type of this item. + pos int // The starting position, in bytes, of this item in the input string. + val string // The value of this item. +} + +func (i item) String() string { + switch { + case i.typ == itemEOF: + return "EOF" + case i.typ == itemError: + return i.val + case len(i.val) > 10: + return fmt.Sprintf("%.10q...", i.val) + } + return fmt.Sprintf("%q", i.val) +} + +// itemType identifies the type of lex items. +type itemType int + +const ( + itemError itemType = iota // error occurred; value is text of error + itemEOF + itemKey // a key + itemValue // a value + itemComment // a comment +) + +// defines a constant for EOF +const eof = -1 + +// permitted whitespace characters space, FF and TAB +const whitespace = " \f\t" + +// stateFn represents the state of the scanner as a function that returns the next state. +type stateFn func(*lexer) stateFn + +// lexer holds the state of the scanner. +type lexer struct { + input string // the string being scanned + state stateFn // the next lexing function to enter + pos int // current position in the input + start int // start position of this item + width int // width of last rune read from input + lastPos int // position of most recent item returned by nextItem + runes []rune // scanned runes for this item + items chan item // channel of scanned items +} + +// next returns the next rune in the input. +func (l *lexer) next() rune { + if l.pos >= len(l.input) { + l.width = 0 + return eof + } + r, w := utf8.DecodeRuneInString(l.input[l.pos:]) + l.width = w + l.pos += l.width + return r +} + +// peek returns but does not consume the next rune in the input. +func (l *lexer) peek() rune { + r := l.next() + l.backup() + return r +} + +// backup steps back one rune. Can only be called once per call of next. +func (l *lexer) backup() { + l.pos -= l.width +} + +// emit passes an item back to the client. +func (l *lexer) emit(t itemType) { + i := item{t, l.start, string(l.runes)} + l.items <- i + l.start = l.pos + l.runes = l.runes[:0] +} + +// ignore skips over the pending input before this point. +func (l *lexer) ignore() { + l.start = l.pos +} + +// appends the rune to the current value +func (l *lexer) appendRune(r rune) { + l.runes = append(l.runes, r) +} + +// accept consumes the next rune if it's from the valid set. +func (l *lexer) accept(valid string) bool { + if strings.ContainsRune(valid, l.next()) { + return true + } + l.backup() + return false +} + +// acceptRun consumes a run of runes from the valid set. +func (l *lexer) acceptRun(valid string) { + for strings.ContainsRune(valid, l.next()) { + } + l.backup() +} + +// lineNumber reports which line we're on, based on the position of +// the previous item returned by nextItem. Doing it this way +// means we don't have to worry about peek double counting. +func (l *lexer) lineNumber() int { + return 1 + strings.Count(l.input[:l.lastPos], "\n") +} + +// errorf returns an error token and terminates the scan by passing +// back a nil pointer that will be the next state, terminating l.nextItem. +func (l *lexer) errorf(format string, args ...interface{}) stateFn { + l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} + return nil +} + +// nextItem returns the next item from the input. +func (l *lexer) nextItem() item { + i := <-l.items + l.lastPos = i.pos + return i +} + +// lex creates a new scanner for the input string. +func lex(input string) *lexer { + l := &lexer{ + input: input, + items: make(chan item), + runes: make([]rune, 0, 32), + } + go l.run() + return l +} + +// run runs the state machine for the lexer. +func (l *lexer) run() { + for l.state = lexBeforeKey(l); l.state != nil; { + l.state = l.state(l) + } +} + +// state functions + +// lexBeforeKey scans until a key begins. +func lexBeforeKey(l *lexer) stateFn { + switch r := l.next(); { + case isEOF(r): + l.emit(itemEOF) + return nil + + case isEOL(r): + l.ignore() + return lexBeforeKey + + case isComment(r): + return lexComment + + case isWhitespace(r): + l.ignore() + return lexBeforeKey + + default: + l.backup() + return lexKey + } +} + +// lexComment scans a comment line. The comment character has already been scanned. +func lexComment(l *lexer) stateFn { + l.acceptRun(whitespace) + l.ignore() + for { + switch r := l.next(); { + case isEOF(r): + l.ignore() + l.emit(itemEOF) + return nil + case isEOL(r): + l.emit(itemComment) + return lexBeforeKey + default: + l.appendRune(r) + } + } +} + +// lexKey scans the key up to a delimiter +func lexKey(l *lexer) stateFn { + var r rune + +Loop: + for { + switch r = l.next(); { + + case isEscape(r): + err := l.scanEscapeSequence() + if err != nil { + return l.errorf(err.Error()) + } + + case isEndOfKey(r): + l.backup() + break Loop + + case isEOF(r): + break Loop + + default: + l.appendRune(r) + } + } + + if len(l.runes) > 0 { + l.emit(itemKey) + } + + if isEOF(r) { + l.emit(itemEOF) + return nil + } + + return lexBeforeValue +} + +// lexBeforeValue scans the delimiter between key and value. +// Leading and trailing whitespace is ignored. +// We expect to be just after the key. +func lexBeforeValue(l *lexer) stateFn { + l.acceptRun(whitespace) + l.accept(":=") + l.acceptRun(whitespace) + l.ignore() + return lexValue +} + +// lexValue scans text until the end of the line. We expect to be just after the delimiter. +func lexValue(l *lexer) stateFn { + for { + switch r := l.next(); { + case isEscape(r): + if isEOL(l.peek()) { + l.next() + l.acceptRun(whitespace) + } else { + err := l.scanEscapeSequence() + if err != nil { + return l.errorf(err.Error()) + } + } + + case isEOL(r): + l.emit(itemValue) + l.ignore() + return lexBeforeKey + + case isEOF(r): + l.emit(itemValue) + l.emit(itemEOF) + return nil + + default: + l.appendRune(r) + } + } +} + +// scanEscapeSequence scans either one of the escaped characters +// or a unicode literal. We expect to be after the escape character. +func (l *lexer) scanEscapeSequence() error { + switch r := l.next(); { + + case isEscapedCharacter(r): + l.appendRune(decodeEscapedCharacter(r)) + return nil + + case atUnicodeLiteral(r): + return l.scanUnicodeLiteral() + + case isEOF(r): + return fmt.Errorf("premature EOF") + + // silently drop the escape character and append the rune as is + default: + l.appendRune(r) + return nil + } +} + +// scans a unicode literal in the form \uXXXX. We expect to be after the \u. +func (l *lexer) scanUnicodeLiteral() error { + // scan the digits + d := make([]rune, 4) + for i := 0; i < 4; i++ { + d[i] = l.next() + if d[i] == eof || !strings.ContainsRune("0123456789abcdefABCDEF", d[i]) { + return fmt.Errorf("invalid unicode literal") + } + } + + // decode the digits into a rune + r, err := strconv.ParseInt(string(d), 16, 0) + if err != nil { + return err + } + + l.appendRune(rune(r)) + return nil +} + +// decodeEscapedCharacter returns the unescaped rune. We expect to be after the escape character. +func decodeEscapedCharacter(r rune) rune { + switch r { + case 'f': + return '\f' + case 'n': + return '\n' + case 'r': + return '\r' + case 't': + return '\t' + default: + return r + } +} + +// atUnicodeLiteral reports whether we are at a unicode literal. +// The escape character has already been consumed. +func atUnicodeLiteral(r rune) bool { + return r == 'u' +} + +// isComment reports whether we are at the start of a comment. +func isComment(r rune) bool { + return r == '#' || r == '!' +} + +// isEndOfKey reports whether the rune terminates the current key. +func isEndOfKey(r rune) bool { + return strings.ContainsRune(" \f\t\r\n:=", r) +} + +// isEOF reports whether we are at EOF. +func isEOF(r rune) bool { + return r == eof +} + +// isEOL reports whether we are at a new line character. +func isEOL(r rune) bool { + return r == '\n' || r == '\r' +} + +// isEscape reports whether the rune is the escape character which +// prefixes unicode literals and other escaped characters. +func isEscape(r rune) bool { + return r == '\\' +} + +// isEscapedCharacter reports whether we are at one of the characters that need escaping. +// The escape character has already been consumed. +func isEscapedCharacter(r rune) bool { + return strings.ContainsRune(" :=fnrt", r) +} + +// isWhitespace reports whether the rune is a whitespace character. +func isWhitespace(r rune) bool { + return strings.ContainsRune(whitespace, r) +} diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go new file mode 100644 index 000000000..635368dc8 --- /dev/null +++ b/vendor/github.com/magiconair/properties/load.go @@ -0,0 +1,293 @@ +// Copyright 2013-2022 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" +) + +// Encoding specifies encoding of the input data. +type Encoding uint + +const ( + // utf8Default is a private placeholder for the zero value of Encoding to + // ensure that it has the correct meaning. UTF8 is the default encoding but + // was assigned a non-zero value which cannot be changed without breaking + // existing code. Clients should continue to use the public constants. + utf8Default Encoding = iota + + // UTF8 interprets the input data as UTF-8. + UTF8 + + // ISO_8859_1 interprets the input data as ISO-8859-1. + ISO_8859_1 +) + +type Loader struct { + // Encoding determines how the data from files and byte buffers + // is interpreted. For URLs the Content-Type header is used + // to determine the encoding of the data. + Encoding Encoding + + // DisableExpansion configures the property expansion of the + // returned property object. When set to true, the property values + // will not be expanded and the Property object will not be checked + // for invalid expansion expressions. + DisableExpansion bool + + // IgnoreMissing configures whether missing files or URLs which return + // 404 are reported as errors. When set to true, missing files and 404 + // status codes are not reported as errors. + IgnoreMissing bool +} + +// Load reads a buffer into a Properties struct. +func (l *Loader) LoadBytes(buf []byte) (*Properties, error) { + return l.loadBytes(buf, l.Encoding) +} + +// LoadAll reads the content of multiple URLs or files in the given order into +// a Properties struct. If IgnoreMissing is true then a 404 status code or +// missing file will not be reported as error. Encoding sets the encoding for +// files. For the URLs see LoadURL for the Content-Type header and the +// encoding. +func (l *Loader) LoadAll(names []string) (*Properties, error) { + all := NewProperties() + for _, name := range names { + n, err := expandName(name) + if err != nil { + return nil, err + } + + var p *Properties + switch { + case strings.HasPrefix(n, "http://"): + p, err = l.LoadURL(n) + case strings.HasPrefix(n, "https://"): + p, err = l.LoadURL(n) + default: + p, err = l.LoadFile(n) + } + if err != nil { + return nil, err + } + all.Merge(p) + } + + all.DisableExpansion = l.DisableExpansion + if all.DisableExpansion { + return all, nil + } + return all, all.check() +} + +// LoadFile reads a file into a Properties struct. +// If IgnoreMissing is true then a missing file will not be +// reported as error. +func (l *Loader) LoadFile(filename string) (*Properties, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + if l.IgnoreMissing && os.IsNotExist(err) { + LogPrintf("properties: %s not found. skipping", filename) + return NewProperties(), nil + } + return nil, err + } + return l.loadBytes(data, l.Encoding) +} + +// LoadURL reads the content of the URL into a Properties struct. +// +// The encoding is determined via the Content-Type header which +// should be set to 'text/plain'. If the 'charset' parameter is +// missing, 'iso-8859-1' or 'latin1' the encoding is set to +// ISO-8859-1. If the 'charset' parameter is set to 'utf-8' the +// encoding is set to UTF-8. A missing content type header is +// interpreted as 'text/plain; charset=utf-8'. +func (l *Loader) LoadURL(url string) (*Properties, error) { + resp, err := http.Get(url) + if err != nil { + return nil, fmt.Errorf("properties: error fetching %q. %s", url, err) + } + defer resp.Body.Close() + + if resp.StatusCode == 404 && l.IgnoreMissing { + LogPrintf("properties: %s returned %d. skipping", url, resp.StatusCode) + return NewProperties(), nil + } + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode) + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("properties: %s error reading response. %s", url, err) + } + + ct := resp.Header.Get("Content-Type") + ct = strings.Join(strings.Fields(ct), "") + var enc Encoding + switch strings.ToLower(ct) { + case "text/plain", "text/plain;charset=iso-8859-1", "text/plain;charset=latin1": + enc = ISO_8859_1 + case "", "text/plain;charset=utf-8": + enc = UTF8 + default: + return nil, fmt.Errorf("properties: invalid content type %s", ct) + } + + return l.loadBytes(body, enc) +} + +func (l *Loader) loadBytes(buf []byte, enc Encoding) (*Properties, error) { + p, err := parse(convert(buf, enc)) + if err != nil { + return nil, err + } + p.DisableExpansion = l.DisableExpansion + if p.DisableExpansion { + return p, nil + } + return p, p.check() +} + +// Load reads a buffer into a Properties struct. +func Load(buf []byte, enc Encoding) (*Properties, error) { + l := &Loader{Encoding: enc} + return l.LoadBytes(buf) +} + +// LoadString reads an UTF8 string into a properties struct. +func LoadString(s string) (*Properties, error) { + l := &Loader{Encoding: UTF8} + return l.LoadBytes([]byte(s)) +} + +// LoadMap creates a new Properties struct from a string map. +func LoadMap(m map[string]string) *Properties { + p := NewProperties() + for k, v := range m { + p.Set(k, v) + } + return p +} + +// LoadFile reads a file into a Properties struct. +func LoadFile(filename string, enc Encoding) (*Properties, error) { + l := &Loader{Encoding: enc} + return l.LoadAll([]string{filename}) +} + +// LoadFiles reads multiple files in the given order into +// a Properties struct. If 'ignoreMissing' is true then +// non-existent files will not be reported as error. +func LoadFiles(filenames []string, enc Encoding, ignoreMissing bool) (*Properties, error) { + l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing} + return l.LoadAll(filenames) +} + +// LoadURL reads the content of the URL into a Properties struct. +// See Loader#LoadURL for details. +func LoadURL(url string) (*Properties, error) { + l := &Loader{Encoding: UTF8} + return l.LoadAll([]string{url}) +} + +// LoadURLs reads the content of multiple URLs in the given order into a +// Properties struct. If IgnoreMissing is true then a 404 status code will +// not be reported as error. See Loader#LoadURL for the Content-Type header +// and the encoding. +func LoadURLs(urls []string, ignoreMissing bool) (*Properties, error) { + l := &Loader{Encoding: UTF8, IgnoreMissing: ignoreMissing} + return l.LoadAll(urls) +} + +// LoadAll reads the content of multiple URLs or files in the given order into a +// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will +// not be reported as error. Encoding sets the encoding for files. For the URLs please see +// LoadURL for the Content-Type header and the encoding. +func LoadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) { + l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing} + return l.LoadAll(names) +} + +// MustLoadString reads an UTF8 string into a Properties struct and +// panics on error. +func MustLoadString(s string) *Properties { + return must(LoadString(s)) +} + +// MustLoadFile reads a file into a Properties struct and +// panics on error. +func MustLoadFile(filename string, enc Encoding) *Properties { + return must(LoadFile(filename, enc)) +} + +// MustLoadFiles reads multiple files in the given order into +// a Properties struct and panics on error. If 'ignoreMissing' +// is true then non-existent files will not be reported as error. +func MustLoadFiles(filenames []string, enc Encoding, ignoreMissing bool) *Properties { + return must(LoadFiles(filenames, enc, ignoreMissing)) +} + +// MustLoadURL reads the content of a URL into a Properties struct and +// panics on error. +func MustLoadURL(url string) *Properties { + return must(LoadURL(url)) +} + +// MustLoadURLs reads the content of multiple URLs in the given order into a +// Properties struct and panics on error. If 'ignoreMissing' is true then a 404 +// status code will not be reported as error. +func MustLoadURLs(urls []string, ignoreMissing bool) *Properties { + return must(LoadURLs(urls, ignoreMissing)) +} + +// MustLoadAll reads the content of multiple URLs or files in the given order into a +// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will +// not be reported as error. Encoding sets the encoding for files. For the URLs please see +// LoadURL for the Content-Type header and the encoding. It panics on error. +func MustLoadAll(names []string, enc Encoding, ignoreMissing bool) *Properties { + return must(LoadAll(names, enc, ignoreMissing)) +} + +func must(p *Properties, err error) *Properties { + if err != nil { + ErrorHandler(err) + } + return p +} + +// expandName expands ${ENV_VAR} expressions in a name. +// If the environment variable does not exist then it will be replaced +// with an empty string. Malformed expressions like "${ENV_VAR" will +// be reported as error. +func expandName(name string) (string, error) { + return expand(name, []string{}, "${", "}", make(map[string]string)) +} + +// Interprets a byte buffer either as an ISO-8859-1 or UTF-8 encoded string. +// For ISO-8859-1 we can convert each byte straight into a rune since the +// first 256 unicode code points cover ISO-8859-1. +func convert(buf []byte, enc Encoding) string { + switch enc { + case utf8Default, UTF8: + return string(buf) + case ISO_8859_1: + runes := make([]rune, len(buf)) + for i, b := range buf { + runes[i] = rune(b) + } + return string(runes) + default: + ErrorHandler(fmt.Errorf("unsupported encoding %v", enc)) + } + panic("ErrorHandler should exit") +} diff --git a/vendor/github.com/magiconair/properties/parser.go b/vendor/github.com/magiconair/properties/parser.go new file mode 100644 index 000000000..fccfd39f6 --- /dev/null +++ b/vendor/github.com/magiconair/properties/parser.go @@ -0,0 +1,86 @@ +// Copyright 2013-2022 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +import ( + "fmt" + "runtime" +) + +type parser struct { + lex *lexer +} + +func parse(input string) (properties *Properties, err error) { + p := &parser{lex: lex(input)} + defer p.recover(&err) + + properties = NewProperties() + key := "" + comments := []string{} + + for { + token := p.expectOneOf(itemComment, itemKey, itemEOF) + switch token.typ { + case itemEOF: + goto done + case itemComment: + comments = append(comments, token.val) + continue + case itemKey: + key = token.val + if _, ok := properties.m[key]; !ok { + properties.k = append(properties.k, key) + } + } + + token = p.expectOneOf(itemValue, itemEOF) + if len(comments) > 0 { + properties.c[key] = comments + comments = []string{} + } + switch token.typ { + case itemEOF: + properties.m[key] = "" + goto done + case itemValue: + properties.m[key] = token.val + } + } + +done: + return properties, nil +} + +func (p *parser) errorf(format string, args ...interface{}) { + format = fmt.Sprintf("properties: Line %d: %s", p.lex.lineNumber(), format) + panic(fmt.Errorf(format, args...)) +} + +func (p *parser) expectOneOf(expected ...itemType) (token item) { + token = p.lex.nextItem() + for _, v := range expected { + if token.typ == v { + return token + } + } + p.unexpected(token) + panic("unexpected token") +} + +func (p *parser) unexpected(token item) { + p.errorf(token.String()) +} + +// recover is the handler that turns panics into returns from the top level of Parse. +func (p *parser) recover(errp *error) { + e := recover() + if e != nil { + if _, ok := e.(runtime.Error); ok { + panic(e) + } + *errp = e.(error) + } +} diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go new file mode 100644 index 000000000..fb2f7b404 --- /dev/null +++ b/vendor/github.com/magiconair/properties/properties.go @@ -0,0 +1,848 @@ +// Copyright 2013-2022 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +// BUG(frank): Set() does not check for invalid unicode literals since this is currently handled by the lexer. +// BUG(frank): Write() does not allow to configure the newline character. Therefore, on Windows LF is used. + +import ( + "bytes" + "fmt" + "io" + "log" + "os" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +const maxExpansionDepth = 64 + +// ErrorHandlerFunc defines the type of function which handles failures +// of the MustXXX() functions. An error handler function must exit +// the application after handling the error. +type ErrorHandlerFunc func(error) + +// ErrorHandler is the function which handles failures of the MustXXX() +// functions. The default is LogFatalHandler. +var ErrorHandler ErrorHandlerFunc = LogFatalHandler + +// LogHandlerFunc defines the function prototype for logging errors. +type LogHandlerFunc func(fmt string, args ...interface{}) + +// LogPrintf defines a log handler which uses log.Printf. +var LogPrintf LogHandlerFunc = log.Printf + +// LogFatalHandler handles the error by logging a fatal error and exiting. +func LogFatalHandler(err error) { + log.Fatal(err) +} + +// PanicHandler handles the error by panicking. +func PanicHandler(err error) { + panic(err) +} + +// ----------------------------------------------------------------------------- + +// A Properties contains the key/value pairs from the properties input. +// All values are stored in unexpanded form and are expanded at runtime +type Properties struct { + // Pre-/Postfix for property expansion. + Prefix string + Postfix string + + // DisableExpansion controls the expansion of properties on Get() + // and the check for circular references on Set(). When set to + // true Properties behaves like a simple key/value store and does + // not check for circular references on Get() or on Set(). + DisableExpansion bool + + // Stores the key/value pairs + m map[string]string + + // Stores the comments per key. + c map[string][]string + + // Stores the keys in order of appearance. + k []string + + // WriteSeparator specifies the separator of key and value while writing the properties. + WriteSeparator string +} + +// NewProperties creates a new Properties struct with the default +// configuration for "${key}" expressions. +func NewProperties() *Properties { + return &Properties{ + Prefix: "${", + Postfix: "}", + m: map[string]string{}, + c: map[string][]string{}, + k: []string{}, + } +} + +// Load reads a buffer into the given Properties struct. +func (p *Properties) Load(buf []byte, enc Encoding) error { + l := &Loader{Encoding: enc, DisableExpansion: p.DisableExpansion} + newProperties, err := l.LoadBytes(buf) + if err != nil { + return err + } + p.Merge(newProperties) + return nil +} + +// Get returns the expanded value for the given key if exists. +// Otherwise, ok is false. +func (p *Properties) Get(key string) (value string, ok bool) { + v, ok := p.m[key] + if p.DisableExpansion { + return v, ok + } + if !ok { + return "", false + } + + expanded, err := p.expand(key, v) + + // we guarantee that the expanded value is free of + // circular references and malformed expressions + // so we panic if we still get an error here. + if err != nil { + ErrorHandler(err) + } + + return expanded, true +} + +// MustGet returns the expanded value for the given key if exists. +// Otherwise, it panics. +func (p *Properties) MustGet(key string) string { + if v, ok := p.Get(key); ok { + return v + } + ErrorHandler(invalidKeyError(key)) + panic("ErrorHandler should exit") +} + +// ---------------------------------------------------------------------------- + +// ClearComments removes the comments for all keys. +func (p *Properties) ClearComments() { + p.c = map[string][]string{} +} + +// ---------------------------------------------------------------------------- + +// GetComment returns the last comment before the given key or an empty string. +func (p *Properties) GetComment(key string) string { + comments, ok := p.c[key] + if !ok || len(comments) == 0 { + return "" + } + return comments[len(comments)-1] +} + +// ---------------------------------------------------------------------------- + +// GetComments returns all comments that appeared before the given key or nil. +func (p *Properties) GetComments(key string) []string { + if comments, ok := p.c[key]; ok { + return comments + } + return nil +} + +// ---------------------------------------------------------------------------- + +// SetComment sets the comment for the key. +func (p *Properties) SetComment(key, comment string) { + p.c[key] = []string{comment} +} + +// ---------------------------------------------------------------------------- + +// SetComments sets the comments for the key. If the comments are nil then +// all comments for this key are deleted. +func (p *Properties) SetComments(key string, comments []string) { + if comments == nil { + delete(p.c, key) + return + } + p.c[key] = comments +} + +// ---------------------------------------------------------------------------- + +// GetBool checks if the expanded value is one of '1', 'yes', +// 'true' or 'on' if the key exists. The comparison is case-insensitive. +// If the key does not exist the default value is returned. +func (p *Properties) GetBool(key string, def bool) bool { + v, err := p.getBool(key) + if err != nil { + return def + } + return v +} + +// MustGetBool checks if the expanded value is one of '1', 'yes', +// 'true' or 'on' if the key exists. The comparison is case-insensitive. +// If the key does not exist the function panics. +func (p *Properties) MustGetBool(key string) bool { + v, err := p.getBool(key) + if err != nil { + ErrorHandler(err) + } + return v +} + +func (p *Properties) getBool(key string) (value bool, err error) { + if v, ok := p.Get(key); ok { + return boolVal(v), nil + } + return false, invalidKeyError(key) +} + +func boolVal(v string) bool { + v = strings.ToLower(v) + return v == "1" || v == "true" || v == "yes" || v == "on" +} + +// ---------------------------------------------------------------------------- + +// GetDuration parses the expanded value as an time.Duration (in ns) if the +// key exists. If key does not exist or the value cannot be parsed the default +// value is returned. In almost all cases you want to use GetParsedDuration(). +func (p *Properties) GetDuration(key string, def time.Duration) time.Duration { + v, err := p.getInt64(key) + if err != nil { + return def + } + return time.Duration(v) +} + +// MustGetDuration parses the expanded value as an time.Duration (in ns) if +// the key exists. If key does not exist or the value cannot be parsed the +// function panics. In almost all cases you want to use MustGetParsedDuration(). +func (p *Properties) MustGetDuration(key string) time.Duration { + v, err := p.getInt64(key) + if err != nil { + ErrorHandler(err) + } + return time.Duration(v) +} + +// ---------------------------------------------------------------------------- + +// GetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. +func (p *Properties) GetParsedDuration(key string, def time.Duration) time.Duration { + s, ok := p.Get(key) + if !ok { + return def + } + v, err := time.ParseDuration(s) + if err != nil { + return def + } + return v +} + +// MustGetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +func (p *Properties) MustGetParsedDuration(key string) time.Duration { + s, ok := p.Get(key) + if !ok { + ErrorHandler(invalidKeyError(key)) + } + v, err := time.ParseDuration(s) + if err != nil { + ErrorHandler(err) + } + return v +} + +// ---------------------------------------------------------------------------- + +// GetFloat64 parses the expanded value as a float64 if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. +func (p *Properties) GetFloat64(key string, def float64) float64 { + v, err := p.getFloat64(key) + if err != nil { + return def + } + return v +} + +// MustGetFloat64 parses the expanded value as a float64 if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +func (p *Properties) MustGetFloat64(key string) float64 { + v, err := p.getFloat64(key) + if err != nil { + ErrorHandler(err) + } + return v +} + +func (p *Properties) getFloat64(key string) (value float64, err error) { + if v, ok := p.Get(key); ok { + value, err = strconv.ParseFloat(v, 64) + if err != nil { + return 0, err + } + return value, nil + } + return 0, invalidKeyError(key) +} + +// ---------------------------------------------------------------------------- + +// GetInt parses the expanded value as an int if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. If the value does not fit into an int the +// function panics with an out of range error. +func (p *Properties) GetInt(key string, def int) int { + v, err := p.getInt64(key) + if err != nil { + return def + } + return intRangeCheck(key, v) +} + +// MustGetInt parses the expanded value as an int if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +// If the value does not fit into an int the function panics with +// an out of range error. +func (p *Properties) MustGetInt(key string) int { + v, err := p.getInt64(key) + if err != nil { + ErrorHandler(err) + } + return intRangeCheck(key, v) +} + +// ---------------------------------------------------------------------------- + +// GetInt64 parses the expanded value as an int64 if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. +func (p *Properties) GetInt64(key string, def int64) int64 { + v, err := p.getInt64(key) + if err != nil { + return def + } + return v +} + +// MustGetInt64 parses the expanded value as an int if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +func (p *Properties) MustGetInt64(key string) int64 { + v, err := p.getInt64(key) + if err != nil { + ErrorHandler(err) + } + return v +} + +func (p *Properties) getInt64(key string) (value int64, err error) { + if v, ok := p.Get(key); ok { + value, err = strconv.ParseInt(v, 10, 64) + if err != nil { + return 0, err + } + return value, nil + } + return 0, invalidKeyError(key) +} + +// ---------------------------------------------------------------------------- + +// GetUint parses the expanded value as an uint if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. If the value does not fit into an int the +// function panics with an out of range error. +func (p *Properties) GetUint(key string, def uint) uint { + v, err := p.getUint64(key) + if err != nil { + return def + } + return uintRangeCheck(key, v) +} + +// MustGetUint parses the expanded value as an int if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +// If the value does not fit into an int the function panics with +// an out of range error. +func (p *Properties) MustGetUint(key string) uint { + v, err := p.getUint64(key) + if err != nil { + ErrorHandler(err) + } + return uintRangeCheck(key, v) +} + +// ---------------------------------------------------------------------------- + +// GetUint64 parses the expanded value as an uint64 if the key exists. +// If key does not exist or the value cannot be parsed the default +// value is returned. +func (p *Properties) GetUint64(key string, def uint64) uint64 { + v, err := p.getUint64(key) + if err != nil { + return def + } + return v +} + +// MustGetUint64 parses the expanded value as an int if the key exists. +// If key does not exist or the value cannot be parsed the function panics. +func (p *Properties) MustGetUint64(key string) uint64 { + v, err := p.getUint64(key) + if err != nil { + ErrorHandler(err) + } + return v +} + +func (p *Properties) getUint64(key string) (value uint64, err error) { + if v, ok := p.Get(key); ok { + value, err = strconv.ParseUint(v, 10, 64) + if err != nil { + return 0, err + } + return value, nil + } + return 0, invalidKeyError(key) +} + +// ---------------------------------------------------------------------------- + +// GetString returns the expanded value for the given key if exists or +// the default value otherwise. +func (p *Properties) GetString(key, def string) string { + if v, ok := p.Get(key); ok { + return v + } + return def +} + +// MustGetString returns the expanded value for the given key if exists or +// panics otherwise. +func (p *Properties) MustGetString(key string) string { + if v, ok := p.Get(key); ok { + return v + } + ErrorHandler(invalidKeyError(key)) + panic("ErrorHandler should exit") +} + +// ---------------------------------------------------------------------------- + +// Filter returns a new properties object which contains all properties +// for which the key matches the pattern. +func (p *Properties) Filter(pattern string) (*Properties, error) { + re, err := regexp.Compile(pattern) + if err != nil { + return nil, err + } + + return p.FilterRegexp(re), nil +} + +// FilterRegexp returns a new properties object which contains all properties +// for which the key matches the regular expression. +func (p *Properties) FilterRegexp(re *regexp.Regexp) *Properties { + pp := NewProperties() + for _, k := range p.k { + if re.MatchString(k) { + // TODO(fs): we are ignoring the error which flags a circular reference. + // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) + pp.Set(k, p.m[k]) + } + } + return pp +} + +// FilterPrefix returns a new properties object with a subset of all keys +// with the given prefix. +func (p *Properties) FilterPrefix(prefix string) *Properties { + pp := NewProperties() + for _, k := range p.k { + if strings.HasPrefix(k, prefix) { + // TODO(fs): we are ignoring the error which flags a circular reference. + // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) + pp.Set(k, p.m[k]) + } + } + return pp +} + +// FilterStripPrefix returns a new properties object with a subset of all keys +// with the given prefix and the prefix removed from the keys. +func (p *Properties) FilterStripPrefix(prefix string) *Properties { + pp := NewProperties() + n := len(prefix) + for _, k := range p.k { + if len(k) > len(prefix) && strings.HasPrefix(k, prefix) { + // TODO(fs): we are ignoring the error which flags a circular reference. + // TODO(fs): since we are modifying keys I am not entirely sure whether we can create a circular reference + // TODO(fs): this function should probably return an error but the signature is fixed + pp.Set(k[n:], p.m[k]) + } + } + return pp +} + +// Len returns the number of keys. +func (p *Properties) Len() int { + return len(p.m) +} + +// Keys returns all keys in the same order as in the input. +func (p *Properties) Keys() []string { + keys := make([]string, len(p.k)) + copy(keys, p.k) + return keys +} + +// Set sets the property key to the corresponding value. +// If a value for key existed before then ok is true and prev +// contains the previous value. If the value contains a +// circular reference or a malformed expression then +// an error is returned. +// An empty key is silently ignored. +func (p *Properties) Set(key, value string) (prev string, ok bool, err error) { + if key == "" { + return "", false, nil + } + + // if expansion is disabled we allow circular references + if p.DisableExpansion { + prev, ok = p.Get(key) + p.m[key] = value + if !ok { + p.k = append(p.k, key) + } + return prev, ok, nil + } + + // to check for a circular reference we temporarily need + // to set the new value. If there is an error then revert + // to the previous state. Only if all tests are successful + // then we add the key to the p.k list. + prev, ok = p.Get(key) + p.m[key] = value + + // now check for a circular reference + _, err = p.expand(key, value) + if err != nil { + + // revert to the previous state + if ok { + p.m[key] = prev + } else { + delete(p.m, key) + } + + return "", false, err + } + + if !ok { + p.k = append(p.k, key) + } + + return prev, ok, nil +} + +// SetValue sets property key to the default string value +// as defined by fmt.Sprintf("%v"). +func (p *Properties) SetValue(key string, value interface{}) error { + _, _, err := p.Set(key, fmt.Sprintf("%v", value)) + return err +} + +// MustSet sets the property key to the corresponding value. +// If a value for key existed before then ok is true and prev +// contains the previous value. An empty key is silently ignored. +func (p *Properties) MustSet(key, value string) (prev string, ok bool) { + prev, ok, err := p.Set(key, value) + if err != nil { + ErrorHandler(err) + } + return prev, ok +} + +// String returns a string of all expanded 'key = value' pairs. +func (p *Properties) String() string { + var s string + for _, key := range p.k { + value, _ := p.Get(key) + s = fmt.Sprintf("%s%s = %s\n", s, key, value) + } + return s +} + +// Sort sorts the properties keys in alphabetical order. +// This is helpfully before writing the properties. +func (p *Properties) Sort() { + sort.Strings(p.k) +} + +// Write writes all unexpanded 'key = value' pairs to the given writer. +// Write returns the number of bytes written and any write error encountered. +func (p *Properties) Write(w io.Writer, enc Encoding) (n int, err error) { + return p.WriteComment(w, "", enc) +} + +// WriteComment writes all unexpanced 'key = value' pairs to the given writer. +// If prefix is not empty then comments are written with a blank line and the +// given prefix. The prefix should be either "# " or "! " to be compatible with +// the properties file format. Otherwise, the properties parser will not be +// able to read the file back in. It returns the number of bytes written and +// any write error encountered. +func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n int, err error) { + var x int + + for _, key := range p.k { + value := p.m[key] + + if prefix != "" { + if comments, ok := p.c[key]; ok { + // don't print comments if they are all empty + allEmpty := true + for _, c := range comments { + if c != "" { + allEmpty = false + break + } + } + + if !allEmpty { + // add a blank line between entries but not at the top + if len(comments) > 0 && n > 0 { + x, err = fmt.Fprintln(w) + if err != nil { + return + } + n += x + } + + for _, c := range comments { + x, err = fmt.Fprintf(w, "%s%s\n", prefix, c) + if err != nil { + return + } + n += x + } + } + } + } + sep := " = " + if p.WriteSeparator != "" { + sep = p.WriteSeparator + } + x, err = fmt.Fprintf(w, "%s%s%s\n", encode(key, " :", enc), sep, encode(value, "", enc)) + if err != nil { + return + } + n += x + } + return +} + +// Map returns a copy of the properties as a map. +func (p *Properties) Map() map[string]string { + m := make(map[string]string) + for k, v := range p.m { + m[k] = v + } + return m +} + +// FilterFunc returns a copy of the properties which includes the values which passed all filters. +func (p *Properties) FilterFunc(filters ...func(k, v string) bool) *Properties { + pp := NewProperties() +outer: + for k, v := range p.m { + for _, f := range filters { + if !f(k, v) { + continue outer + } + pp.Set(k, v) + } + } + return pp +} + +// ---------------------------------------------------------------------------- + +// Delete removes the key and its comments. +func (p *Properties) Delete(key string) { + delete(p.m, key) + delete(p.c, key) + newKeys := []string{} + for _, k := range p.k { + if k != key { + newKeys = append(newKeys, k) + } + } + p.k = newKeys +} + +// Merge merges properties, comments and keys from other *Properties into p +func (p *Properties) Merge(other *Properties) { + for _, k := range other.k { + if _, ok := p.m[k]; !ok { + p.k = append(p.k, k) + } + } + for k, v := range other.m { + p.m[k] = v + } + for k, v := range other.c { + p.c[k] = v + } +} + +// ---------------------------------------------------------------------------- + +// check expands all values and returns an error if a circular reference or +// a malformed expression was found. +func (p *Properties) check() error { + for key, value := range p.m { + if _, err := p.expand(key, value); err != nil { + return err + } + } + return nil +} + +func (p *Properties) expand(key, input string) (string, error) { + // no pre/postfix -> nothing to expand + if p.Prefix == "" && p.Postfix == "" { + return input, nil + } + + return expand(input, []string{key}, p.Prefix, p.Postfix, p.m) +} + +// expand recursively expands expressions of '(prefix)key(postfix)' to their corresponding values. +// The function keeps track of the keys that were already expanded and stops if it +// detects a circular reference or a malformed expression of the form '(prefix)key'. +func expand(s string, keys []string, prefix, postfix string, values map[string]string) (string, error) { + if len(keys) > maxExpansionDepth { + return "", fmt.Errorf("expansion too deep") + } + + for { + start := strings.Index(s, prefix) + if start == -1 { + return s, nil + } + + keyStart := start + len(prefix) + keyLen := strings.Index(s[keyStart:], postfix) + if keyLen == -1 { + return "", fmt.Errorf("malformed expression") + } + + end := keyStart + keyLen + len(postfix) - 1 + key := s[keyStart : keyStart+keyLen] + + // fmt.Printf("s:%q pp:%q start:%d end:%d keyStart:%d keyLen:%d key:%q\n", s, prefix + "..." + postfix, start, end, keyStart, keyLen, key) + + for _, k := range keys { + if key == k { + var b bytes.Buffer + b.WriteString("circular reference in:\n") + for _, k1 := range keys { + fmt.Fprintf(&b, "%s=%s\n", k1, values[k1]) + } + return "", fmt.Errorf(b.String()) + } + } + + val, ok := values[key] + if !ok { + val = os.Getenv(key) + } + new_val, err := expand(val, append(keys, key), prefix, postfix, values) + if err != nil { + return "", err + } + s = s[:start] + new_val + s[end+1:] + } +} + +// encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters. +func encode(s string, special string, enc Encoding) string { + switch enc { + case UTF8: + return encodeUtf8(s, special) + case ISO_8859_1: + return encodeIso(s, special) + default: + panic(fmt.Sprintf("unsupported encoding %v", enc)) + } +} + +func encodeUtf8(s string, special string) string { + v := "" + for pos := 0; pos < len(s); { + r, w := utf8.DecodeRuneInString(s[pos:]) + pos += w + v += escape(r, special) + } + return v +} + +func encodeIso(s string, special string) string { + var r rune + var w int + var v string + for pos := 0; pos < len(s); { + switch r, w = utf8.DecodeRuneInString(s[pos:]); { + case r < 1<<8: // single byte rune -> escape special chars only + v += escape(r, special) + case r < 1<<16: // two byte rune -> unicode literal + v += fmt.Sprintf("\\u%04x", r) + default: // more than two bytes per rune -> can't encode + v += "?" + } + pos += w + } + return v +} + +func escape(r rune, special string) string { + switch r { + case '\f': + return "\\f" + case '\n': + return "\\n" + case '\r': + return "\\r" + case '\t': + return "\\t" + case '\\': + return "\\\\" + default: + if strings.ContainsRune(special, r) { + return "\\" + string(r) + } + return string(r) + } +} + +func invalidKeyError(key string) error { + return fmt.Errorf("unknown property: %s", key) +} diff --git a/vendor/github.com/magiconair/properties/rangecheck.go b/vendor/github.com/magiconair/properties/rangecheck.go new file mode 100644 index 000000000..dbd60b36e --- /dev/null +++ b/vendor/github.com/magiconair/properties/rangecheck.go @@ -0,0 +1,31 @@ +// Copyright 2013-2022 Frank Schroeder. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package properties + +import ( + "fmt" + "math" +) + +// make this a var to overwrite it in a test +var is32Bit = ^uint(0) == math.MaxUint32 + +// intRangeCheck checks if the value fits into the int type and +// panics if it does not. +func intRangeCheck(key string, v int64) int { + if is32Bit && (v < math.MinInt32 || v > math.MaxInt32) { + panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) + } + return int(v) +} + +// uintRangeCheck checks if the value fits into the uint type and +// panics if it does not. +func uintRangeCheck(key string, v uint64) uint { + if is32Bit && v > math.MaxUint32 { + panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) + } + return uint(v) +} diff --git a/vendor/github.com/moby/patternmatcher/LICENSE b/vendor/github.com/moby/patternmatcher/LICENSE new file mode 100644 index 000000000..6d8d58fb6 --- /dev/null +++ b/vendor/github.com/moby/patternmatcher/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2018 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/patternmatcher/NOTICE b/vendor/github.com/moby/patternmatcher/NOTICE new file mode 100644 index 000000000..e5154640f --- /dev/null +++ b/vendor/github.com/moby/patternmatcher/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/moby/patternmatcher/patternmatcher.go b/vendor/github.com/moby/patternmatcher/patternmatcher.go new file mode 100644 index 000000000..37a1a59ac --- /dev/null +++ b/vendor/github.com/moby/patternmatcher/patternmatcher.go @@ -0,0 +1,474 @@ +package patternmatcher + +import ( + "errors" + "os" + "path/filepath" + "regexp" + "strings" + "text/scanner" + "unicode/utf8" +) + +// escapeBytes is a bitmap used to check whether a character should be escaped when creating the regex. +var escapeBytes [8]byte + +// shouldEscape reports whether a rune should be escaped as part of the regex. +// +// This only includes characters that require escaping in regex but are also NOT valid filepath pattern characters. +// Additionally, '\' is not excluded because there is specific logic to properly handle this, as it's a path separator +// on Windows. +// +// Adapted from regexp::QuoteMeta in go stdlib. +// See https://cs.opensource.google/go/go/+/refs/tags/go1.17.2:src/regexp/regexp.go;l=703-715;drc=refs%2Ftags%2Fgo1.17.2 +func shouldEscape(b rune) bool { + return b < utf8.RuneSelf && escapeBytes[b%8]&(1<<(b/8)) != 0 +} + +func init() { + for _, b := range []byte(`.+()|{}$`) { + escapeBytes[b%8] |= 1 << (b / 8) + } +} + +// PatternMatcher allows checking paths against a list of patterns +type PatternMatcher struct { + patterns []*Pattern + exclusions bool +} + +// New creates a new matcher object for specific patterns that can +// be used later to match against patterns against paths +func New(patterns []string) (*PatternMatcher, error) { + pm := &PatternMatcher{ + patterns: make([]*Pattern, 0, len(patterns)), + } + for _, p := range patterns { + // Eliminate leading and trailing whitespace. + p = strings.TrimSpace(p) + if p == "" { + continue + } + p = filepath.Clean(p) + newp := &Pattern{} + if p[0] == '!' { + if len(p) == 1 { + return nil, errors.New("illegal exclusion pattern: \"!\"") + } + newp.exclusion = true + p = p[1:] + pm.exclusions = true + } + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(p, "."); err != nil { + return nil, err + } + newp.cleanedPattern = p + newp.dirs = strings.Split(p, string(os.PathSeparator)) + pm.patterns = append(pm.patterns, newp) + } + return pm, nil +} + +// Matches returns true if "file" matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +// +// The "file" argument should be a slash-delimited path. +// +// Matches is not safe to call concurrently. +// +// Deprecated: This implementation is buggy (it only checks a single parent dir +// against the pattern) and will be removed soon. Use either +// MatchesOrParentMatches or MatchesUsingParentResults instead. +func (pm *PatternMatcher) Matches(file string) (bool, error) { + matched := false + file = filepath.FromSlash(file) + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) + + for _, pattern := range pm.patterns { + // Skip evaluation if this is an inclusion and the filename + // already matched the pattern, or it's an exclusion and it has + // not matched the pattern yet. + if pattern.exclusion != matched { + continue + } + + match, err := pattern.match(file) + if err != nil { + return false, err + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(pattern.dirs) <= len(parentPathDirs) { + match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator))) + } + } + + if match { + matched = !pattern.exclusion + } + } + + return matched, nil +} + +// MatchesOrParentMatches returns true if "file" matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +// +// The "file" argument should be a slash-delimited path. +// +// Matches is not safe to call concurrently. +func (pm *PatternMatcher) MatchesOrParentMatches(file string) (bool, error) { + matched := false + file = filepath.FromSlash(file) + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) + + for _, pattern := range pm.patterns { + // Skip evaluation if this is an inclusion and the filename + // already matched the pattern, or it's an exclusion and it has + // not matched the pattern yet. + if pattern.exclusion != matched { + continue + } + + match, err := pattern.match(file) + if err != nil { + return false, err + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + for i := range parentPathDirs { + match, _ = pattern.match(strings.Join(parentPathDirs[:i+1], string(os.PathSeparator))) + if match { + break + } + } + } + + if match { + matched = !pattern.exclusion + } + } + + return matched, nil +} + +// MatchesUsingParentResult returns true if "file" matches any of the patterns +// and isn't excluded by any of the subsequent patterns. The functionality is +// the same as Matches, but as an optimization, the caller keeps track of +// whether the parent directory matched. +// +// The "file" argument should be a slash-delimited path. +// +// MatchesUsingParentResult is not safe to call concurrently. +// +// Deprecated: this function does behave correctly in some cases (see +// https://github.com/docker/buildx/issues/850). +// +// Use MatchesUsingParentResults instead. +func (pm *PatternMatcher) MatchesUsingParentResult(file string, parentMatched bool) (bool, error) { + matched := parentMatched + file = filepath.FromSlash(file) + + for _, pattern := range pm.patterns { + // Skip evaluation if this is an inclusion and the filename + // already matched the pattern, or it's an exclusion and it has + // not matched the pattern yet. + if pattern.exclusion != matched { + continue + } + + match, err := pattern.match(file) + if err != nil { + return false, err + } + + if match { + matched = !pattern.exclusion + } + } + return matched, nil +} + +// MatchInfo tracks information about parent dir matches while traversing a +// filesystem. +type MatchInfo struct { + parentMatched []bool +} + +// MatchesUsingParentResults returns true if "file" matches any of the patterns +// and isn't excluded by any of the subsequent patterns. The functionality is +// the same as Matches, but as an optimization, the caller passes in +// intermediate results from matching the parent directory. +// +// The "file" argument should be a slash-delimited path. +// +// MatchesUsingParentResults is not safe to call concurrently. +func (pm *PatternMatcher) MatchesUsingParentResults(file string, parentMatchInfo MatchInfo) (bool, MatchInfo, error) { + parentMatched := parentMatchInfo.parentMatched + if len(parentMatched) != 0 && len(parentMatched) != len(pm.patterns) { + return false, MatchInfo{}, errors.New("wrong number of values in parentMatched") + } + + file = filepath.FromSlash(file) + matched := false + + matchInfo := MatchInfo{ + parentMatched: make([]bool, len(pm.patterns)), + } + for i, pattern := range pm.patterns { + match := false + // If the parent matched this pattern, we don't need to recheck. + if len(parentMatched) != 0 { + match = parentMatched[i] + } + + if !match { + // Skip evaluation if this is an inclusion and the filename + // already matched the pattern, or it's an exclusion and it has + // not matched the pattern yet. + if pattern.exclusion != matched { + continue + } + + var err error + match, err = pattern.match(file) + if err != nil { + return false, matchInfo, err + } + + // If the zero value of MatchInfo was passed in, we don't have + // any information about the parent dir's match results, and we + // apply the same logic as MatchesOrParentMatches. + if !match && len(parentMatched) == 0 { + if parentPath := filepath.Dir(file); parentPath != "." { + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) + // Check to see if the pattern matches one of our parent dirs. + for i := range parentPathDirs { + match, _ = pattern.match(strings.Join(parentPathDirs[:i+1], string(os.PathSeparator))) + if match { + break + } + } + } + } + } + matchInfo.parentMatched[i] = match + + if match { + matched = !pattern.exclusion + } + } + return matched, matchInfo, nil +} + +// Exclusions returns true if any of the patterns define exclusions +func (pm *PatternMatcher) Exclusions() bool { + return pm.exclusions +} + +// Patterns returns array of active patterns +func (pm *PatternMatcher) Patterns() []*Pattern { + return pm.patterns +} + +// Pattern defines a single regexp used to filter file paths. +type Pattern struct { + matchType matchType + cleanedPattern string + dirs []string + regexp *regexp.Regexp + exclusion bool +} + +type matchType int + +const ( + unknownMatch matchType = iota + exactMatch + prefixMatch + suffixMatch + regexpMatch +) + +func (p *Pattern) String() string { + return p.cleanedPattern +} + +// Exclusion returns true if this pattern defines exclusion +func (p *Pattern) Exclusion() bool { + return p.exclusion +} + +func (p *Pattern) match(path string) (bool, error) { + if p.matchType == unknownMatch { + if err := p.compile(string(os.PathSeparator)); err != nil { + return false, filepath.ErrBadPattern + } + } + + switch p.matchType { + case exactMatch: + return path == p.cleanedPattern, nil + case prefixMatch: + // strip trailing ** + return strings.HasPrefix(path, p.cleanedPattern[:len(p.cleanedPattern)-2]), nil + case suffixMatch: + // strip leading ** + suffix := p.cleanedPattern[2:] + if strings.HasSuffix(path, suffix) { + return true, nil + } + // **/foo matches "foo" + return suffix[0] == os.PathSeparator && path == suffix[1:], nil + case regexpMatch: + return p.regexp.MatchString(path), nil + } + + return false, nil +} + +func (p *Pattern) compile(sl string) error { + regStr := "^" + pattern := p.cleanedPattern + // Go through the pattern and convert it to a regexp. + // We use a scanner so we can support utf-8 chars. + var scan scanner.Scanner + scan.Init(strings.NewReader(pattern)) + + escSL := sl + if sl == `\` { + escSL += `\` + } + + p.matchType = exactMatch + for i := 0; scan.Peek() != scanner.EOF; i++ { + ch := scan.Next() + + if ch == '*' { + if scan.Peek() == '*' { + // is some flavor of "**" + scan.Next() + + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() + } + + if scan.Peek() == scanner.EOF { + // is "**EOF" - to align with .gitignore just accept all + if p.matchType == exactMatch { + p.matchType = prefixMatch + } else { + regStr += ".*" + p.matchType = regexpMatch + } + } else { + // is "**" + // Note that this allows for any # of /'s (even 0) because + // the .* will eat everything, even /'s + regStr += "(.*" + escSL + ")?" + p.matchType = regexpMatch + } + + if i == 0 { + p.matchType = suffixMatch + } + } else { + // is "*" so map it to anything but "/" + regStr += "[^" + escSL + "]*" + p.matchType = regexpMatch + } + } else if ch == '?' { + // "?" is any char except "/" + regStr += "[^" + escSL + "]" + p.matchType = regexpMatch + } else if shouldEscape(ch) { + // Escape some regexp special chars that have no meaning + // in golang's filepath.Match + regStr += `\` + string(ch) + } else if ch == '\\' { + // escape next char. Note that a trailing \ in the pattern + // will be left alone (but need to escape it) + if sl == `\` { + // On windows map "\" to "\\", meaning an escaped backslash, + // and then just continue because filepath.Match on + // Windows doesn't allow escaping at all + regStr += escSL + continue + } + if scan.Peek() != scanner.EOF { + regStr += `\` + string(scan.Next()) + p.matchType = regexpMatch + } else { + regStr += `\` + } + } else if ch == '[' || ch == ']' { + regStr += string(ch) + p.matchType = regexpMatch + } else { + regStr += string(ch) + } + } + + if p.matchType != regexpMatch { + return nil + } + + regStr += "$" + + re, err := regexp.Compile(regStr) + if err != nil { + return err + } + + p.regexp = re + p.matchType = regexpMatch + return nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +// +// This implementation is buggy (it only checks a single parent dir against the +// pattern) and will be removed soon. Use MatchesOrParentMatches instead. +func Matches(file string, patterns []string) (bool, error) { + pm, err := New(patterns) + if err != nil { + return false, err + } + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + return pm.Matches(file) +} + +// MatchesOrParentMatches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func MatchesOrParentMatches(file string, patterns []string) (bool, error) { + pm, err := New(patterns) + if err != nil { + return false, err + } + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + return pm.MatchesOrParentMatches(file) +} diff --git a/vendor/github.com/moby/sys/sequential/LICENSE b/vendor/github.com/moby/sys/sequential/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/moby/sys/sequential/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/sys/sequential/doc.go b/vendor/github.com/moby/sys/sequential/doc.go new file mode 100644 index 000000000..af2817504 --- /dev/null +++ b/vendor/github.com/moby/sys/sequential/doc.go @@ -0,0 +1,15 @@ +// Package sequential provides a set of functions for managing sequential +// files on Windows. +// +// The origin of these functions are the golang OS and windows packages, +// slightly modified to only cope with files, not directories due to the +// specific use case. +// +// The alteration is to allow a file on Windows to be opened with +// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating +// the standby list, particularly when accessing large files such as layer.tar. +// +// For non-Windows platforms, the package provides wrappers for the equivalents +// in the os packages. They are passthrough on Unix platforms, and only relevant +// on Windows. +package sequential diff --git a/vendor/github.com/moby/sys/sequential/sequential_unix.go b/vendor/github.com/moby/sys/sequential/sequential_unix.go new file mode 100644 index 000000000..a3c7340e3 --- /dev/null +++ b/vendor/github.com/moby/sys/sequential/sequential_unix.go @@ -0,0 +1,45 @@ +//go:build !windows +// +build !windows + +package sequential + +import "os" + +// Create creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func Create(name string) (*os.File, error) { + return os.Create(name) +} + +// Open opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func Open(name string) (*os.File, error) { + return os.Open(name) +} + +// OpenFile is the generalized open call; most users will use Open +// or Create instead. It opens the named file with specified flag +// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, +// methods on the returned File can be used for I/O. +// If there is an error, it will be of type *PathError. +func OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) { + return os.OpenFile(name, flag, perm) +} + +// CreateTemp creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func CreateTemp(dir, prefix string) (f *os.File, err error) { + return os.CreateTemp(dir, prefix) +} diff --git a/vendor/github.com/moby/sys/sequential/sequential_windows.go b/vendor/github.com/moby/sys/sequential/sequential_windows.go new file mode 100644 index 000000000..3f7f0d83e --- /dev/null +++ b/vendor/github.com/moby/sys/sequential/sequential_windows.go @@ -0,0 +1,161 @@ +package sequential + +import ( + "os" + "path/filepath" + "strconv" + "sync" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/windows" +) + +// Create creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func Create(name string) (*os.File, error) { + return OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) +} + +// Open opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func Open(name string) (*os.File, error) { + return OpenFile(name, os.O_RDONLY, 0) +} + +// OpenFile is the generalized open call; most users will use Open +// or Create instead. +// If there is an error, it will be of type *PathError. +func OpenFile(name string, flag int, _ os.FileMode) (*os.File, error) { + if name == "" { + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} + } + r, err := openFileSequential(name, flag, 0) + if err == nil { + return r, nil + } + return nil, &os.PathError{Op: "open", Path: name, Err: err} +} + +func openFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { + r, e := openSequential(name, flag|windows.O_CLOEXEC, 0) + if e != nil { + return nil, e + } + return os.NewFile(uintptr(r), name), nil +} + +func makeInheritSa() *windows.SecurityAttributes { + var sa windows.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +func openSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { + if len(path) == 0 { + return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND + } + pathp, err := windows.UTF16PtrFromString(path) + if err != nil { + return windows.InvalidHandle, err + } + var access uint32 + switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { + case windows.O_RDONLY: + access = windows.GENERIC_READ + case windows.O_WRONLY: + access = windows.GENERIC_WRITE + case windows.O_RDWR: + access = windows.GENERIC_READ | windows.GENERIC_WRITE + } + if mode&windows.O_CREAT != 0 { + access |= windows.GENERIC_WRITE + } + if mode&windows.O_APPEND != 0 { + access &^= windows.GENERIC_WRITE + access |= windows.FILE_APPEND_DATA + } + sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) + var sa *windows.SecurityAttributes + if mode&windows.O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): + createmode = windows.CREATE_NEW + case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): + createmode = windows.CREATE_ALWAYS + case mode&windows.O_CREAT == windows.O_CREAT: + createmode = windows.OPEN_ALWAYS + case mode&windows.O_TRUNC == windows.O_TRUNC: + createmode = windows.TRUNCATE_EXISTING + default: + createmode = windows.OPEN_EXISTING + } + // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. + // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) + return h, e +} + +// Helpers for CreateTemp +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} + +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// CreateTemp is a copy of os.CreateTemp, modified to use sequential +// file access. Below is the original comment from golang: +// TempFile creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func CreateTemp(dir, prefix string) (f *os.File, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} diff --git a/vendor/github.com/moby/term/.gitignore b/vendor/github.com/moby/term/.gitignore new file mode 100644 index 000000000..b0747ff01 --- /dev/null +++ b/vendor/github.com/moby/term/.gitignore @@ -0,0 +1,8 @@ +# if you want to ignore files created by your editor/tools, consider using a +# global .gitignore or .git/info/exclude see https://help.github.com/articles/ignoring-files +.* +!.github +!.gitignore +profile.out +# support running go modules in vendor mode for local development +vendor/ diff --git a/vendor/github.com/moby/term/LICENSE b/vendor/github.com/moby/term/LICENSE new file mode 100644 index 000000000..6d8d58fb6 --- /dev/null +++ b/vendor/github.com/moby/term/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2018 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/term/README.md b/vendor/github.com/moby/term/README.md new file mode 100644 index 000000000..0ce92cc33 --- /dev/null +++ b/vendor/github.com/moby/term/README.md @@ -0,0 +1,36 @@ +# term - utilities for dealing with terminals + +![Test](https://github.com/moby/term/workflows/Test/badge.svg) [![GoDoc](https://godoc.org/github.com/moby/term?status.svg)](https://godoc.org/github.com/moby/term) [![Go Report Card](https://goreportcard.com/badge/github.com/moby/term)](https://goreportcard.com/report/github.com/moby/term) + +term provides structures and helper functions to work with terminal (state, sizes). + +#### Using term + +```go +package main + +import ( + "log" + "os" + + "github.com/moby/term" +) + +func main() { + fd := os.Stdin.Fd() + if term.IsTerminal(fd) { + ws, err := term.GetWinsize(fd) + if err != nil { + log.Fatalf("term.GetWinsize: %s", err) + } + log.Printf("%d:%d\n", ws.Height, ws.Width) + } +} +``` + +## Contributing + +Want to hack on term? [Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md) apply. + +## Copyright and license +Code and documentation copyright 2015 Docker, inc. Code released under the Apache 2.0 license. Docs released under Creative commons. diff --git a/vendor/github.com/moby/term/ascii.go b/vendor/github.com/moby/term/ascii.go new file mode 100644 index 000000000..55873c055 --- /dev/null +++ b/vendor/github.com/moby/term/ascii.go @@ -0,0 +1,66 @@ +package term + +import ( + "fmt" + "strings" +) + +// ASCII list the possible supported ASCII key sequence +var ASCII = []string{ + "ctrl-@", + "ctrl-a", + "ctrl-b", + "ctrl-c", + "ctrl-d", + "ctrl-e", + "ctrl-f", + "ctrl-g", + "ctrl-h", + "ctrl-i", + "ctrl-j", + "ctrl-k", + "ctrl-l", + "ctrl-m", + "ctrl-n", + "ctrl-o", + "ctrl-p", + "ctrl-q", + "ctrl-r", + "ctrl-s", + "ctrl-t", + "ctrl-u", + "ctrl-v", + "ctrl-w", + "ctrl-x", + "ctrl-y", + "ctrl-z", + "ctrl-[", + "ctrl-\\", + "ctrl-]", + "ctrl-^", + "ctrl-_", +} + +// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. +func ToBytes(keys string) ([]byte, error) { + codes := []byte{} +next: + for _, key := range strings.Split(keys, ",") { + if len(key) != 1 { + for code, ctrl := range ASCII { + if ctrl == key { + codes = append(codes, byte(code)) + continue next + } + } + if key == "DEL" { + codes = append(codes, 127) + } else { + return nil, fmt.Errorf("Unknown character: '%s'", key) + } + } else { + codes = append(codes, key[0]) + } + } + return codes, nil +} diff --git a/vendor/github.com/moby/term/doc.go b/vendor/github.com/moby/term/doc.go new file mode 100644 index 000000000..c9bc03244 --- /dev/null +++ b/vendor/github.com/moby/term/doc.go @@ -0,0 +1,3 @@ +// Package term provides structures and helper functions to work with +// terminal (state, sizes). +package term diff --git a/vendor/github.com/moby/term/proxy.go b/vendor/github.com/moby/term/proxy.go new file mode 100644 index 000000000..c47756b89 --- /dev/null +++ b/vendor/github.com/moby/term/proxy.go @@ -0,0 +1,88 @@ +package term + +import ( + "io" +) + +// EscapeError is special error which returned by a TTY proxy reader's Read() +// method in case its detach escape sequence is read. +type EscapeError struct{} + +func (EscapeError) Error() string { + return "read escape sequence" +} + +// escapeProxy is used only for attaches with a TTY. It is used to proxy +// stdin keypresses from the underlying reader and look for the passed in +// escape key sequence to signal a detach. +type escapeProxy struct { + escapeKeys []byte + escapeKeyPos int + r io.Reader + buf []byte +} + +// NewEscapeProxy returns a new TTY proxy reader which wraps the given reader +// and detects when the specified escape keys are read, in which case the Read +// method will return an error of type EscapeError. +func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader { + return &escapeProxy{ + escapeKeys: escapeKeys, + r: r, + } +} + +func (r *escapeProxy) Read(buf []byte) (n int, err error) { + if len(r.escapeKeys) > 0 && r.escapeKeyPos == len(r.escapeKeys) { + return 0, EscapeError{} + } + + if len(r.buf) > 0 { + n = copy(buf, r.buf) + r.buf = r.buf[n:] + } + + nr, err := r.r.Read(buf[n:]) + n += nr + if len(r.escapeKeys) == 0 { + return n, err + } + + for i := 0; i < n; i++ { + if buf[i] == r.escapeKeys[r.escapeKeyPos] { + r.escapeKeyPos++ + + // Check if the full escape sequence is matched. + if r.escapeKeyPos == len(r.escapeKeys) { + n = i + 1 - r.escapeKeyPos + if n < 0 { + n = 0 + } + return n, EscapeError{} + } + continue + } + + // If we need to prepend a partial escape sequence from the previous + // read, make sure the new buffer size doesn't exceed len(buf). + // Otherwise, preserve any extra data in a buffer for the next read. + if i < r.escapeKeyPos { + preserve := make([]byte, 0, r.escapeKeyPos+n) + preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...) + preserve = append(preserve, buf[:n]...) + n = copy(buf, preserve) + i += r.escapeKeyPos + r.buf = append(r.buf, preserve[n:]...) + } + r.escapeKeyPos = 0 + } + + // If we're in the middle of reading an escape sequence, make sure we don't + // let the caller read it. If later on we find that this is not the escape + // sequence, we'll prepend it back to buf. + n -= r.escapeKeyPos + if n < 0 { + n = 0 + } + return n, err +} diff --git a/vendor/github.com/moby/term/term.go b/vendor/github.com/moby/term/term.go new file mode 100644 index 000000000..f9d8988ef --- /dev/null +++ b/vendor/github.com/moby/term/term.go @@ -0,0 +1,85 @@ +package term + +import "io" + +// State holds the platform-specific state / console mode for the terminal. +type State terminalState + +// Winsize represents the size of the terminal window. +type Winsize struct { + Height uint16 + Width uint16 + + // Only used on Unix + x uint16 + y uint16 +} + +// StdStreams returns the standard streams (stdin, stdout, stderr). +// +// On Windows, it attempts to turn on VT handling on all std handles if +// supported, or falls back to terminal emulation. On Unix, this returns +// the standard [os.Stdin], [os.Stdout] and [os.Stderr]. +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + return stdStreams() +} + +// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. +func GetFdInfo(in interface{}) (fd uintptr, isTerminal bool) { + return getFdInfo(in) +} + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + return getWinsize(fd) +} + +// SetWinsize tries to set the specified window size for the specified file +// descriptor. It is only implemented on Unix, and returns an error on Windows. +func SetWinsize(fd uintptr, ws *Winsize) error { + return setWinsize(fd, ws) +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + return isTerminal(fd) +} + +// RestoreTerminal restores the terminal connected to the given file descriptor +// to a previous state. +func RestoreTerminal(fd uintptr, state *State) error { + return restoreTerminal(fd, state) +} + +// SaveState saves the state of the terminal connected to the given file descriptor. +func SaveState(fd uintptr) (*State, error) { + return saveState(fd) +} + +// DisableEcho applies the specified state to the terminal connected to the file +// descriptor, with echo disabled. +func DisableEcho(fd uintptr, state *State) error { + return disableEcho(fd, state) +} + +// SetRawTerminal puts the terminal connected to the given file descriptor into +// raw mode and returns the previous state. On UNIX, this is the equivalent of +// [MakeRaw], and puts both the input and output into raw mode. On Windows, it +// only puts the input into raw mode. +func SetRawTerminal(fd uintptr) (previousState *State, err error) { + return setRawTerminal(fd) +} + +// SetRawTerminalOutput puts the output of terminal connected to the given file +// descriptor into raw mode. On UNIX, this does nothing and returns nil for the +// state. On Windows, it disables LF -> CRLF translation. +func SetRawTerminalOutput(fd uintptr) (previousState *State, err error) { + return setRawTerminalOutput(fd) +} + +// MakeRaw puts the terminal (Windows Console) connected to the +// given file descriptor into raw mode and returns the previous state of +// the terminal so that it can be restored. +func MakeRaw(fd uintptr) (previousState *State, err error) { + return makeRaw(fd) +} diff --git a/vendor/github.com/moby/term/term_unix.go b/vendor/github.com/moby/term/term_unix.go new file mode 100644 index 000000000..2ec7706a1 --- /dev/null +++ b/vendor/github.com/moby/term/term_unix.go @@ -0,0 +1,98 @@ +//go:build !windows +// +build !windows + +package term + +import ( + "errors" + "io" + "os" + + "golang.org/x/sys/unix" +) + +// ErrInvalidState is returned if the state of the terminal is invalid. +// +// Deprecated: ErrInvalidState is no longer used. +var ErrInvalidState = errors.New("Invalid terminal state") + +// terminalState holds the platform-specific state / console mode for the terminal. +type terminalState struct { + termios unix.Termios +} + +func stdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + return os.Stdin, os.Stdout, os.Stderr +} + +func getFdInfo(in interface{}) (uintptr, bool) { + var inFd uintptr + var isTerminalIn bool + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminalIn = isTerminal(inFd) + } + return inFd, isTerminalIn +} + +func getWinsize(fd uintptr) (*Winsize, error) { + uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) + ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel} + return ws, err +} + +func setWinsize(fd uintptr, ws *Winsize) error { + return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, &unix.Winsize{ + Row: ws.Height, + Col: ws.Width, + Xpixel: ws.x, + Ypixel: ws.y, + }) +} + +func isTerminal(fd uintptr) bool { + _, err := tcget(fd) + return err == nil +} + +func restoreTerminal(fd uintptr, state *State) error { + if state == nil { + return errors.New("invalid terminal state") + } + return tcset(fd, &state.termios) +} + +func saveState(fd uintptr) (*State, error) { + termios, err := tcget(fd) + if err != nil { + return nil, err + } + return &State{termios: *termios}, nil +} + +func disableEcho(fd uintptr, state *State) error { + newState := state.termios + newState.Lflag &^= unix.ECHO + + return tcset(fd, &newState) +} + +func setRawTerminal(fd uintptr) (*State, error) { + return makeRaw(fd) +} + +func setRawTerminalOutput(fd uintptr) (*State, error) { + return nil, nil +} + +func tcget(fd uintptr) (*unix.Termios, error) { + p, err := unix.IoctlGetTermios(int(fd), getTermios) + if err != nil { + return nil, err + } + return p, nil +} + +func tcset(fd uintptr, p *unix.Termios) error { + return unix.IoctlSetTermios(int(fd), setTermios, p) +} diff --git a/vendor/github.com/moby/term/term_windows.go b/vendor/github.com/moby/term/term_windows.go new file mode 100644 index 000000000..81ccff042 --- /dev/null +++ b/vendor/github.com/moby/term/term_windows.go @@ -0,0 +1,176 @@ +package term + +import ( + "fmt" + "io" + "os" + "os/signal" + + windowsconsole "github.com/moby/term/windows" + "golang.org/x/sys/windows" +) + +// terminalState holds the platform-specific state / console mode for the terminal. +type terminalState struct { + mode uint32 +} + +// vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console +var vtInputSupported bool + +func stdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + // Turn on VT handling on all std handles, if possible. This might + // fail, in which case we will fall back to terminal emulation. + var ( + emulateStdin, emulateStdout, emulateStderr bool + + mode uint32 + ) + + fd := windows.Handle(os.Stdin.Fd()) + if err := windows.GetConsoleMode(fd, &mode); err == nil { + // Validate that winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. + if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil { + emulateStdin = true + } else { + vtInputSupported = true + } + // Unconditionally set the console mode back even on failure because SetConsoleMode + // remembers invalid bits on input handles. + _ = windows.SetConsoleMode(fd, mode) + } + + fd = windows.Handle(os.Stdout.Fd()) + if err := windows.GetConsoleMode(fd, &mode); err == nil { + // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. + if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING|windows.DISABLE_NEWLINE_AUTO_RETURN); err != nil { + emulateStdout = true + } else { + _ = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING) + } + } + + fd = windows.Handle(os.Stderr.Fd()) + if err := windows.GetConsoleMode(fd, &mode); err == nil { + // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. + if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING|windows.DISABLE_NEWLINE_AUTO_RETURN); err != nil { + emulateStderr = true + } else { + _ = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING) + } + } + + if emulateStdin { + h := uint32(windows.STD_INPUT_HANDLE) + stdIn = windowsconsole.NewAnsiReader(int(h)) + } else { + stdIn = os.Stdin + } + + if emulateStdout { + h := uint32(windows.STD_OUTPUT_HANDLE) + stdOut = windowsconsole.NewAnsiWriter(int(h)) + } else { + stdOut = os.Stdout + } + + if emulateStderr { + h := uint32(windows.STD_ERROR_HANDLE) + stdErr = windowsconsole.NewAnsiWriter(int(h)) + } else { + stdErr = os.Stderr + } + + return stdIn, stdOut, stdErr +} + +func getFdInfo(in interface{}) (uintptr, bool) { + return windowsconsole.GetHandleInfo(in) +} + +func getWinsize(fd uintptr) (*Winsize, error) { + var info windows.ConsoleScreenBufferInfo + if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil { + return nil, err + } + + winsize := &Winsize{ + Width: uint16(info.Window.Right - info.Window.Left + 1), + Height: uint16(info.Window.Bottom - info.Window.Top + 1), + } + + return winsize, nil +} + +func setWinsize(fd uintptr, ws *Winsize) error { + return fmt.Errorf("not implemented on Windows") +} + +func isTerminal(fd uintptr) bool { + var mode uint32 + err := windows.GetConsoleMode(windows.Handle(fd), &mode) + return err == nil +} + +func restoreTerminal(fd uintptr, state *State) error { + return windows.SetConsoleMode(windows.Handle(fd), state.mode) +} + +func saveState(fd uintptr) (*State, error) { + var mode uint32 + + if err := windows.GetConsoleMode(windows.Handle(fd), &mode); err != nil { + return nil, err + } + + return &State{mode: mode}, nil +} + +func disableEcho(fd uintptr, state *State) error { + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + mode := state.mode + mode &^= windows.ENABLE_ECHO_INPUT + mode |= windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT + err := windows.SetConsoleMode(windows.Handle(fd), mode) + if err != nil { + return err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, state) + return nil +} + +func setRawTerminal(fd uintptr) (*State, error) { + oldState, err := MakeRaw(fd) + if err != nil { + return nil, err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, oldState) + return oldState, err +} + +func setRawTerminalOutput(fd uintptr) (*State, error) { + oldState, err := saveState(fd) + if err != nil { + return nil, err + } + + // Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this + // version of Windows. + _ = windows.SetConsoleMode(windows.Handle(fd), oldState.mode|windows.DISABLE_NEWLINE_AUTO_RETURN) + return oldState, err +} + +func restoreAtInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + + go func() { + _ = <-sigchan + _ = RestoreTerminal(fd, state) + os.Exit(0) + }() +} diff --git a/vendor/github.com/moby/term/termios_bsd.go b/vendor/github.com/moby/term/termios_bsd.go new file mode 100644 index 000000000..45f77e03c --- /dev/null +++ b/vendor/github.com/moby/term/termios_bsd.go @@ -0,0 +1,13 @@ +//go:build darwin || freebsd || openbsd || netbsd +// +build darwin freebsd openbsd netbsd + +package term + +import ( + "golang.org/x/sys/unix" +) + +const ( + getTermios = unix.TIOCGETA + setTermios = unix.TIOCSETA +) diff --git a/vendor/github.com/moby/term/termios_nonbsd.go b/vendor/github.com/moby/term/termios_nonbsd.go new file mode 100644 index 000000000..88b7b2156 --- /dev/null +++ b/vendor/github.com/moby/term/termios_nonbsd.go @@ -0,0 +1,13 @@ +//go:build !darwin && !freebsd && !netbsd && !openbsd && !windows +// +build !darwin,!freebsd,!netbsd,!openbsd,!windows + +package term + +import ( + "golang.org/x/sys/unix" +) + +const ( + getTermios = unix.TCGETS + setTermios = unix.TCSETS +) diff --git a/vendor/github.com/moby/term/termios_unix.go b/vendor/github.com/moby/term/termios_unix.go new file mode 100644 index 000000000..60c823783 --- /dev/null +++ b/vendor/github.com/moby/term/termios_unix.go @@ -0,0 +1,35 @@ +//go:build !windows +// +build !windows + +package term + +import ( + "golang.org/x/sys/unix" +) + +// Termios is the Unix API for terminal I/O. +// +// Deprecated: use [unix.Termios]. +type Termios = unix.Termios + +func makeRaw(fd uintptr) (*State, error) { + termios, err := tcget(fd) + if err != nil { + return nil, err + } + + oldState := State{termios: *termios} + + termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON + termios.Oflag &^= unix.OPOST + termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN + termios.Cflag &^= unix.CSIZE | unix.PARENB + termios.Cflag |= unix.CS8 + termios.Cc[unix.VMIN] = 1 + termios.Cc[unix.VTIME] = 0 + + if err := tcset(fd, termios); err != nil { + return nil, err + } + return &oldState, nil +} diff --git a/vendor/github.com/moby/term/termios_windows.go b/vendor/github.com/moby/term/termios_windows.go new file mode 100644 index 000000000..5be4e7601 --- /dev/null +++ b/vendor/github.com/moby/term/termios_windows.go @@ -0,0 +1,37 @@ +package term + +import "golang.org/x/sys/windows" + +func makeRaw(fd uintptr) (*State, error) { + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + mode := state.mode + + // See + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + + // Disable these modes + mode &^= windows.ENABLE_ECHO_INPUT + mode &^= windows.ENABLE_LINE_INPUT + mode &^= windows.ENABLE_MOUSE_INPUT + mode &^= windows.ENABLE_WINDOW_INPUT + mode &^= windows.ENABLE_PROCESSED_INPUT + + // Enable these modes + mode |= windows.ENABLE_EXTENDED_FLAGS + mode |= windows.ENABLE_INSERT_MODE + mode |= windows.ENABLE_QUICK_EDIT_MODE + if vtInputSupported { + mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT + } + + err = windows.SetConsoleMode(windows.Handle(fd), mode) + if err != nil { + return nil, err + } + return state, nil +} diff --git a/vendor/github.com/moby/term/windows/ansi_reader.go b/vendor/github.com/moby/term/windows/ansi_reader.go new file mode 100644 index 000000000..fb34c547a --- /dev/null +++ b/vendor/github.com/moby/term/windows/ansi_reader.go @@ -0,0 +1,252 @@ +//go:build windows +// +build windows + +package windowsconsole + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "strings" + "unsafe" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" +) + +const ( + escapeSequence = ansiterm.KEY_ESC_CSI +) + +// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. +type ansiReader struct { + file *os.File + fd uintptr + buffer []byte + cbBuffer int + command []byte +} + +// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a +// Windows console input handle. +func NewAnsiReader(nFile int) io.ReadCloser { + file, fd := winterm.GetStdFile(nFile) + return &ansiReader{ + file: file, + fd: fd, + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + buffer: make([]byte, 0), + } +} + +// Close closes the wrapped file. +func (ar *ansiReader) Close() (err error) { + return ar.file.Close() +} + +// Fd returns the file descriptor of the wrapped file. +func (ar *ansiReader) Fd() uintptr { + return ar.fd +} + +// Read reads up to len(p) bytes of translated input events into p. +func (ar *ansiReader) Read(p []byte) (int, error) { + if len(p) == 0 { + return 0, nil + } + + // Previously read bytes exist, read as much as we can and return + if len(ar.buffer) > 0 { + originalLength := len(ar.buffer) + copiedLength := copy(p, ar.buffer) + + if copiedLength == originalLength { + ar.buffer = make([]byte, 0, len(p)) + } else { + ar.buffer = ar.buffer[copiedLength:] + } + + return copiedLength, nil + } + + // Read and translate key events + events, err := readInputEvents(ar, len(p)) + if err != nil { + return 0, err + } else if len(events) == 0 { + return 0, nil + } + + keyBytes := translateKeyEvents(events, []byte(escapeSequence)) + + // Save excess bytes and right-size keyBytes + if len(keyBytes) > len(p) { + ar.buffer = keyBytes[len(p):] + keyBytes = keyBytes[:len(p)] + } else if len(keyBytes) == 0 { + return 0, nil + } + + copiedLength := copy(p, keyBytes) + if copiedLength != len(keyBytes) { + return 0, errors.New("unexpected copy length encountered") + } + + return copiedLength, nil +} + +// readInputEvents polls until at least one event is available. +func readInputEvents(ar *ansiReader, maxBytes int) ([]winterm.INPUT_RECORD, error) { + // Determine the maximum number of records to retrieve + // -- Cast around the type system to obtain the size of a single INPUT_RECORD. + // unsafe.Sizeof requires an expression vs. a type-reference; the casting + // tricks the type system into believing it has such an expression. + recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) + countRecords := maxBytes / recordSize + if countRecords > ansiterm.MAX_INPUT_EVENTS { + countRecords = ansiterm.MAX_INPUT_EVENTS + } else if countRecords == 0 { + countRecords = 1 + } + + // Wait for and read input events + events := make([]winterm.INPUT_RECORD, countRecords) + nEvents := uint32(0) + eventsExist, err := winterm.WaitForSingleObject(ar.fd, winterm.WAIT_INFINITE) + if err != nil { + return nil, err + } + + if eventsExist { + err = winterm.ReadConsoleInput(ar.fd, events, &nEvents) + if err != nil { + return nil, err + } + } + + // Return a slice restricted to the number of returned records + return events[:nEvents], nil +} + +// KeyEvent Translation Helpers + +var arrowKeyMapPrefix = map[uint16]string{ + winterm.VK_UP: "%s%sA", + winterm.VK_DOWN: "%s%sB", + winterm.VK_RIGHT: "%s%sC", + winterm.VK_LEFT: "%s%sD", +} + +var keyMapPrefix = map[uint16]string{ + winterm.VK_UP: "\x1B[%sA", + winterm.VK_DOWN: "\x1B[%sB", + winterm.VK_RIGHT: "\x1B[%sC", + winterm.VK_LEFT: "\x1B[%sD", + winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 + winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 + winterm.VK_INSERT: "\x1B[2%s~", + winterm.VK_DELETE: "\x1B[3%s~", + winterm.VK_PRIOR: "\x1B[5%s~", + winterm.VK_NEXT: "\x1B[6%s~", + winterm.VK_F1: "", + winterm.VK_F2: "", + winterm.VK_F3: "\x1B[13%s~", + winterm.VK_F4: "\x1B[14%s~", + winterm.VK_F5: "\x1B[15%s~", + winterm.VK_F6: "\x1B[17%s~", + winterm.VK_F7: "\x1B[18%s~", + winterm.VK_F8: "\x1B[19%s~", + winterm.VK_F9: "\x1B[20%s~", + winterm.VK_F10: "\x1B[21%s~", + winterm.VK_F11: "\x1B[23%s~", + winterm.VK_F12: "\x1B[24%s~", +} + +// translateKeyEvents converts the input events into the appropriate ANSI string. +func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { + var buffer bytes.Buffer + for _, event := range events { + if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { + buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) + } + } + + return buffer.Bytes() +} + +// keyToString maps the given input event record to the corresponding string. +func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { + if keyEvent.UnicodeChar == 0 { + return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) + } + + _, alt, control := getControlKeys(keyEvent.ControlKeyState) + if control { + // TODO(azlinux): Implement following control sequences + // -D Signals the end of input from the keyboard; also exits current shell. + // -H Deletes the first character to the left of the cursor. Also called the ERASE key. + // -Q Restarts printing after it has been stopped with -s. + // -S Suspends printing on the screen (does not stop the program). + // -U Deletes all characters on the current line. Also called the KILL key. + // -E Quits current command and creates a core + } + + // +Key generates ESC N Key + if !control && alt { + return ansiterm.KEY_ESC_N + strings.ToLower(string(rune(keyEvent.UnicodeChar))) + } + + return string(rune(keyEvent.UnicodeChar)) +} + +// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. +func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { + shift, alt, control := getControlKeys(controlState) + modifier := getControlKeysModifier(shift, alt, control) + + if format, ok := arrowKeyMapPrefix[key]; ok { + return fmt.Sprintf(format, escapeSequence, modifier) + } + + if format, ok := keyMapPrefix[key]; ok { + return fmt.Sprintf(format, modifier) + } + + return "" +} + +// getControlKeys extracts the shift, alt, and ctrl key states. +func getControlKeys(controlState uint32) (shift, alt, control bool) { + shift = 0 != (controlState & winterm.SHIFT_PRESSED) + alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) + control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) + return shift, alt, control +} + +// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. +func getControlKeysModifier(shift, alt, control bool) string { + if shift && alt && control { + return ansiterm.KEY_CONTROL_PARAM_8 + } + if alt && control { + return ansiterm.KEY_CONTROL_PARAM_7 + } + if shift && control { + return ansiterm.KEY_CONTROL_PARAM_6 + } + if control { + return ansiterm.KEY_CONTROL_PARAM_5 + } + if shift && alt { + return ansiterm.KEY_CONTROL_PARAM_4 + } + if alt { + return ansiterm.KEY_CONTROL_PARAM_3 + } + if shift { + return ansiterm.KEY_CONTROL_PARAM_2 + } + return "" +} diff --git a/vendor/github.com/moby/term/windows/ansi_writer.go b/vendor/github.com/moby/term/windows/ansi_writer.go new file mode 100644 index 000000000..4243307fd --- /dev/null +++ b/vendor/github.com/moby/term/windows/ansi_writer.go @@ -0,0 +1,57 @@ +//go:build windows +// +build windows + +package windowsconsole + +import ( + "io" + "os" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" +) + +// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. +type ansiWriter struct { + file *os.File + fd uintptr + infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO + command []byte + escapeSequence []byte + inAnsiSequence bool + parser *ansiterm.AnsiParser +} + +// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a +// Windows console output handle. +func NewAnsiWriter(nFile int) io.Writer { + file, fd := winterm.GetStdFile(nFile) + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil + } + + parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) + + return &ansiWriter{ + file: file, + fd: fd, + infoReset: info, + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + escapeSequence: []byte(ansiterm.KEY_ESC_CSI), + parser: parser, + } +} + +func (aw *ansiWriter) Fd() uintptr { + return aw.fd +} + +// Write writes len(p) bytes from p to the underlying data stream. +func (aw *ansiWriter) Write(p []byte) (total int, err error) { + if len(p) == 0 { + return 0, nil + } + + return aw.parser.Parse(p) +} diff --git a/vendor/github.com/moby/term/windows/console.go b/vendor/github.com/moby/term/windows/console.go new file mode 100644 index 000000000..21e57bd52 --- /dev/null +++ b/vendor/github.com/moby/term/windows/console.go @@ -0,0 +1,43 @@ +//go:build windows +// +build windows + +package windowsconsole + +import ( + "os" + + "golang.org/x/sys/windows" +) + +// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. +func GetHandleInfo(in interface{}) (uintptr, bool) { + switch t := in.(type) { + case *ansiReader: + return t.Fd(), true + case *ansiWriter: + return t.Fd(), true + } + + var inFd uintptr + var isTerminal bool + + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminal = isConsole(inFd) + } + return inFd, isTerminal +} + +// IsConsole returns true if the given file descriptor is a Windows Console. +// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. +// +// Deprecated: use [windows.GetConsoleMode] or [golang.org/x/term.IsTerminal]. +func IsConsole(fd uintptr) bool { + return isConsole(fd) +} + +func isConsole(fd uintptr) bool { + var mode uint32 + err := windows.GetConsoleMode(windows.Handle(fd), &mode) + return err == nil +} diff --git a/vendor/github.com/moby/term/windows/doc.go b/vendor/github.com/moby/term/windows/doc.go new file mode 100644 index 000000000..54265fffa --- /dev/null +++ b/vendor/github.com/moby/term/windows/doc.go @@ -0,0 +1,5 @@ +// These files implement ANSI-aware input and output streams for use by the Docker Windows client. +// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create +// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. + +package windowsconsole diff --git a/vendor/github.com/morikuni/aec/LICENSE b/vendor/github.com/morikuni/aec/LICENSE new file mode 100644 index 000000000..1c2640164 --- /dev/null +++ b/vendor/github.com/morikuni/aec/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Taihei Morikuni + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/morikuni/aec/README.md b/vendor/github.com/morikuni/aec/README.md new file mode 100644 index 000000000..3cbc4343e --- /dev/null +++ b/vendor/github.com/morikuni/aec/README.md @@ -0,0 +1,178 @@ +# aec + +[![GoDoc](https://godoc.org/github.com/morikuni/aec?status.svg)](https://godoc.org/github.com/morikuni/aec) + +Go wrapper for ANSI escape code. + +## Install + +```bash +go get github.com/morikuni/aec +``` + +## Features + +ANSI escape codes depend on terminal environment. +Some of these features may not work. +Check supported Font-Style/Font-Color features with [checkansi](./checkansi). + +[Wikipedia](https://en.wikipedia.org/wiki/ANSI_escape_code) for more detail. + +### Cursor + +- `Up(n)` +- `Down(n)` +- `Right(n)` +- `Left(n)` +- `NextLine(n)` +- `PreviousLine(n)` +- `Column(col)` +- `Position(row, col)` +- `Save` +- `Restore` +- `Hide` +- `Show` +- `Report` + +### Erase + +- `EraseDisplay(mode)` +- `EraseLine(mode)` + +### Scroll + +- `ScrollUp(n)` +- `ScrollDown(n)` + +### Font Style + +- `Bold` +- `Faint` +- `Italic` +- `Underline` +- `BlinkSlow` +- `BlinkRapid` +- `Inverse` +- `Conceal` +- `CrossOut` +- `Frame` +- `Encircle` +- `Overline` + +### Font Color + +Foreground color. + +- `DefaultF` +- `BlackF` +- `RedF` +- `GreenF` +- `YellowF` +- `BlueF` +- `MagentaF` +- `CyanF` +- `WhiteF` +- `LightBlackF` +- `LightRedF` +- `LightGreenF` +- `LightYellowF` +- `LightBlueF` +- `LightMagentaF` +- `LightCyanF` +- `LightWhiteF` +- `Color3BitF(color)` +- `Color8BitF(color)` +- `FullColorF(r, g, b)` + +Background color. + +- `DefaultB` +- `BlackB` +- `RedB` +- `GreenB` +- `YellowB` +- `BlueB` +- `MagentaB` +- `CyanB` +- `WhiteB` +- `LightBlackB` +- `LightRedB` +- `LightGreenB` +- `LightYellowB` +- `LightBlueB` +- `LightMagentaB` +- `LightCyanB` +- `LightWhiteB` +- `Color3BitB(color)` +- `Color8BitB(color)` +- `FullColorB(r, g, b)` + +### Color Converter + +24bit RGB color to ANSI color. + +- `NewRGB3Bit(r, g, b)` +- `NewRGB8Bit(r, g, b)` + +### Builder + +To mix these features. + +```go +custom := aec.EmptyBuilder.Right(2).RGB8BitF(128, 255, 64).RedB().ANSI +custom.Apply("Hello World") +``` + +## Usage + +1. Create ANSI by `aec.XXX().With(aec.YYY())` or `aec.EmptyBuilder.XXX().YYY().ANSI` +2. Print ANSI by `fmt.Print(ansi, "some string", aec.Reset)` or `fmt.Print(ansi.Apply("some string"))` + +`aec.Reset` should be added when using font style or font color features. + +## Example + +Simple progressbar. + +![sample](./sample.gif) + +```go +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/morikuni/aec" +) + +func main() { + const n = 20 + builder := aec.EmptyBuilder + + up2 := aec.Up(2) + col := aec.Column(n + 2) + bar := aec.Color8BitF(aec.NewRGB8Bit(64, 255, 64)) + label := builder.LightRedF().Underline().With(col).Right(1).ANSI + + // for up2 + fmt.Println() + fmt.Println() + + for i := 0; i <= n; i++ { + fmt.Print(up2) + fmt.Println(label.Apply(fmt.Sprint(i, "/", n))) + fmt.Print("[") + fmt.Print(bar.Apply(strings.Repeat("=", i))) + fmt.Println(col.Apply("]")) + time.Sleep(100 * time.Millisecond) + } +} +``` + +## License + +[MIT](./LICENSE) + + diff --git a/vendor/github.com/morikuni/aec/aec.go b/vendor/github.com/morikuni/aec/aec.go new file mode 100644 index 000000000..566be6eb1 --- /dev/null +++ b/vendor/github.com/morikuni/aec/aec.go @@ -0,0 +1,137 @@ +package aec + +import "fmt" + +// EraseMode is listed in a variable EraseModes. +type EraseMode uint + +var ( + // EraseModes is a list of EraseMode. + EraseModes struct { + // All erase all. + All EraseMode + + // Head erase to head. + Head EraseMode + + // Tail erase to tail. + Tail EraseMode + } + + // Save saves the cursor position. + Save ANSI + + // Restore restores the cursor position. + Restore ANSI + + // Hide hides the cursor. + Hide ANSI + + // Show shows the cursor. + Show ANSI + + // Report reports the cursor position. + Report ANSI +) + +// Up moves up the cursor. +func Up(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dA", n)) +} + +// Down moves down the cursor. +func Down(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dB", n)) +} + +// Right moves right the cursor. +func Right(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dC", n)) +} + +// Left moves left the cursor. +func Left(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dD", n)) +} + +// NextLine moves down the cursor to head of a line. +func NextLine(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dE", n)) +} + +// PreviousLine moves up the cursor to head of a line. +func PreviousLine(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dF", n)) +} + +// Column set the cursor position to a given column. +func Column(col uint) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dG", col)) +} + +// Position set the cursor position to a given absolute position. +func Position(row, col uint) ANSI { + return newAnsi(fmt.Sprintf(esc+"%d;%dH", row, col)) +} + +// EraseDisplay erases display by given EraseMode. +func EraseDisplay(m EraseMode) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dJ", m)) +} + +// EraseLine erases lines by given EraseMode. +func EraseLine(m EraseMode) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dK", m)) +} + +// ScrollUp scrolls up the page. +func ScrollUp(n int) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dS", n)) +} + +// ScrollDown scrolls down the page. +func ScrollDown(n int) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dT", n)) +} + +func init() { + EraseModes = struct { + All EraseMode + Head EraseMode + Tail EraseMode + }{ + Tail: 0, + Head: 1, + All: 2, + } + + Save = newAnsi(esc + "s") + Restore = newAnsi(esc + "u") + Hide = newAnsi(esc + "?25l") + Show = newAnsi(esc + "?25h") + Report = newAnsi(esc + "6n") +} diff --git a/vendor/github.com/morikuni/aec/ansi.go b/vendor/github.com/morikuni/aec/ansi.go new file mode 100644 index 000000000..e60722e6e --- /dev/null +++ b/vendor/github.com/morikuni/aec/ansi.go @@ -0,0 +1,59 @@ +package aec + +import ( + "fmt" + "strings" +) + +const esc = "\x1b[" + +// Reset resets SGR effect. +const Reset string = "\x1b[0m" + +var empty = newAnsi("") + +// ANSI represents ANSI escape code. +type ANSI interface { + fmt.Stringer + + // With adapts given ANSIs. + With(...ANSI) ANSI + + // Apply wraps given string in ANSI. + Apply(string) string +} + +type ansiImpl string + +func newAnsi(s string) *ansiImpl { + r := ansiImpl(s) + return &r +} + +func (a *ansiImpl) With(ansi ...ANSI) ANSI { + return concat(append([]ANSI{a}, ansi...)) +} + +func (a *ansiImpl) Apply(s string) string { + return a.String() + s + Reset +} + +func (a *ansiImpl) String() string { + return string(*a) +} + +// Apply wraps given string in ANSIs. +func Apply(s string, ansi ...ANSI) string { + if len(ansi) == 0 { + return s + } + return concat(ansi).Apply(s) +} + +func concat(ansi []ANSI) ANSI { + strs := make([]string, 0, len(ansi)) + for _, p := range ansi { + strs = append(strs, p.String()) + } + return newAnsi(strings.Join(strs, "")) +} diff --git a/vendor/github.com/morikuni/aec/builder.go b/vendor/github.com/morikuni/aec/builder.go new file mode 100644 index 000000000..13bd002d4 --- /dev/null +++ b/vendor/github.com/morikuni/aec/builder.go @@ -0,0 +1,388 @@ +package aec + +// Builder is a lightweight syntax to construct customized ANSI. +type Builder struct { + ANSI ANSI +} + +// EmptyBuilder is an initialized Builder. +var EmptyBuilder *Builder + +// NewBuilder creates a Builder from existing ANSI. +func NewBuilder(a ...ANSI) *Builder { + return &Builder{concat(a)} +} + +// With is a syntax for With. +func (builder *Builder) With(a ...ANSI) *Builder { + return NewBuilder(builder.ANSI.With(a...)) +} + +// Up is a syntax for Up. +func (builder *Builder) Up(n uint) *Builder { + return builder.With(Up(n)) +} + +// Down is a syntax for Down. +func (builder *Builder) Down(n uint) *Builder { + return builder.With(Down(n)) +} + +// Right is a syntax for Right. +func (builder *Builder) Right(n uint) *Builder { + return builder.With(Right(n)) +} + +// Left is a syntax for Left. +func (builder *Builder) Left(n uint) *Builder { + return builder.With(Left(n)) +} + +// NextLine is a syntax for NextLine. +func (builder *Builder) NextLine(n uint) *Builder { + return builder.With(NextLine(n)) +} + +// PreviousLine is a syntax for PreviousLine. +func (builder *Builder) PreviousLine(n uint) *Builder { + return builder.With(PreviousLine(n)) +} + +// Column is a syntax for Column. +func (builder *Builder) Column(col uint) *Builder { + return builder.With(Column(col)) +} + +// Position is a syntax for Position. +func (builder *Builder) Position(row, col uint) *Builder { + return builder.With(Position(row, col)) +} + +// EraseDisplay is a syntax for EraseDisplay. +func (builder *Builder) EraseDisplay(m EraseMode) *Builder { + return builder.With(EraseDisplay(m)) +} + +// EraseLine is a syntax for EraseLine. +func (builder *Builder) EraseLine(m EraseMode) *Builder { + return builder.With(EraseLine(m)) +} + +// ScrollUp is a syntax for ScrollUp. +func (builder *Builder) ScrollUp(n int) *Builder { + return builder.With(ScrollUp(n)) +} + +// ScrollDown is a syntax for ScrollDown. +func (builder *Builder) ScrollDown(n int) *Builder { + return builder.With(ScrollDown(n)) +} + +// Save is a syntax for Save. +func (builder *Builder) Save() *Builder { + return builder.With(Save) +} + +// Restore is a syntax for Restore. +func (builder *Builder) Restore() *Builder { + return builder.With(Restore) +} + +// Hide is a syntax for Hide. +func (builder *Builder) Hide() *Builder { + return builder.With(Hide) +} + +// Show is a syntax for Show. +func (builder *Builder) Show() *Builder { + return builder.With(Show) +} + +// Report is a syntax for Report. +func (builder *Builder) Report() *Builder { + return builder.With(Report) +} + +// Bold is a syntax for Bold. +func (builder *Builder) Bold() *Builder { + return builder.With(Bold) +} + +// Faint is a syntax for Faint. +func (builder *Builder) Faint() *Builder { + return builder.With(Faint) +} + +// Italic is a syntax for Italic. +func (builder *Builder) Italic() *Builder { + return builder.With(Italic) +} + +// Underline is a syntax for Underline. +func (builder *Builder) Underline() *Builder { + return builder.With(Underline) +} + +// BlinkSlow is a syntax for BlinkSlow. +func (builder *Builder) BlinkSlow() *Builder { + return builder.With(BlinkSlow) +} + +// BlinkRapid is a syntax for BlinkRapid. +func (builder *Builder) BlinkRapid() *Builder { + return builder.With(BlinkRapid) +} + +// Inverse is a syntax for Inverse. +func (builder *Builder) Inverse() *Builder { + return builder.With(Inverse) +} + +// Conceal is a syntax for Conceal. +func (builder *Builder) Conceal() *Builder { + return builder.With(Conceal) +} + +// CrossOut is a syntax for CrossOut. +func (builder *Builder) CrossOut() *Builder { + return builder.With(CrossOut) +} + +// BlackF is a syntax for BlackF. +func (builder *Builder) BlackF() *Builder { + return builder.With(BlackF) +} + +// RedF is a syntax for RedF. +func (builder *Builder) RedF() *Builder { + return builder.With(RedF) +} + +// GreenF is a syntax for GreenF. +func (builder *Builder) GreenF() *Builder { + return builder.With(GreenF) +} + +// YellowF is a syntax for YellowF. +func (builder *Builder) YellowF() *Builder { + return builder.With(YellowF) +} + +// BlueF is a syntax for BlueF. +func (builder *Builder) BlueF() *Builder { + return builder.With(BlueF) +} + +// MagentaF is a syntax for MagentaF. +func (builder *Builder) MagentaF() *Builder { + return builder.With(MagentaF) +} + +// CyanF is a syntax for CyanF. +func (builder *Builder) CyanF() *Builder { + return builder.With(CyanF) +} + +// WhiteF is a syntax for WhiteF. +func (builder *Builder) WhiteF() *Builder { + return builder.With(WhiteF) +} + +// DefaultF is a syntax for DefaultF. +func (builder *Builder) DefaultF() *Builder { + return builder.With(DefaultF) +} + +// BlackB is a syntax for BlackB. +func (builder *Builder) BlackB() *Builder { + return builder.With(BlackB) +} + +// RedB is a syntax for RedB. +func (builder *Builder) RedB() *Builder { + return builder.With(RedB) +} + +// GreenB is a syntax for GreenB. +func (builder *Builder) GreenB() *Builder { + return builder.With(GreenB) +} + +// YellowB is a syntax for YellowB. +func (builder *Builder) YellowB() *Builder { + return builder.With(YellowB) +} + +// BlueB is a syntax for BlueB. +func (builder *Builder) BlueB() *Builder { + return builder.With(BlueB) +} + +// MagentaB is a syntax for MagentaB. +func (builder *Builder) MagentaB() *Builder { + return builder.With(MagentaB) +} + +// CyanB is a syntax for CyanB. +func (builder *Builder) CyanB() *Builder { + return builder.With(CyanB) +} + +// WhiteB is a syntax for WhiteB. +func (builder *Builder) WhiteB() *Builder { + return builder.With(WhiteB) +} + +// DefaultB is a syntax for DefaultB. +func (builder *Builder) DefaultB() *Builder { + return builder.With(DefaultB) +} + +// Frame is a syntax for Frame. +func (builder *Builder) Frame() *Builder { + return builder.With(Frame) +} + +// Encircle is a syntax for Encircle. +func (builder *Builder) Encircle() *Builder { + return builder.With(Encircle) +} + +// Overline is a syntax for Overline. +func (builder *Builder) Overline() *Builder { + return builder.With(Overline) +} + +// LightBlackF is a syntax for LightBlueF. +func (builder *Builder) LightBlackF() *Builder { + return builder.With(LightBlackF) +} + +// LightRedF is a syntax for LightRedF. +func (builder *Builder) LightRedF() *Builder { + return builder.With(LightRedF) +} + +// LightGreenF is a syntax for LightGreenF. +func (builder *Builder) LightGreenF() *Builder { + return builder.With(LightGreenF) +} + +// LightYellowF is a syntax for LightYellowF. +func (builder *Builder) LightYellowF() *Builder { + return builder.With(LightYellowF) +} + +// LightBlueF is a syntax for LightBlueF. +func (builder *Builder) LightBlueF() *Builder { + return builder.With(LightBlueF) +} + +// LightMagentaF is a syntax for LightMagentaF. +func (builder *Builder) LightMagentaF() *Builder { + return builder.With(LightMagentaF) +} + +// LightCyanF is a syntax for LightCyanF. +func (builder *Builder) LightCyanF() *Builder { + return builder.With(LightCyanF) +} + +// LightWhiteF is a syntax for LightWhiteF. +func (builder *Builder) LightWhiteF() *Builder { + return builder.With(LightWhiteF) +} + +// LightBlackB is a syntax for LightBlackB. +func (builder *Builder) LightBlackB() *Builder { + return builder.With(LightBlackB) +} + +// LightRedB is a syntax for LightRedB. +func (builder *Builder) LightRedB() *Builder { + return builder.With(LightRedB) +} + +// LightGreenB is a syntax for LightGreenB. +func (builder *Builder) LightGreenB() *Builder { + return builder.With(LightGreenB) +} + +// LightYellowB is a syntax for LightYellowB. +func (builder *Builder) LightYellowB() *Builder { + return builder.With(LightYellowB) +} + +// LightBlueB is a syntax for LightBlueB. +func (builder *Builder) LightBlueB() *Builder { + return builder.With(LightBlueB) +} + +// LightMagentaB is a syntax for LightMagentaB. +func (builder *Builder) LightMagentaB() *Builder { + return builder.With(LightMagentaB) +} + +// LightCyanB is a syntax for LightCyanB. +func (builder *Builder) LightCyanB() *Builder { + return builder.With(LightCyanB) +} + +// LightWhiteB is a syntax for LightWhiteB. +func (builder *Builder) LightWhiteB() *Builder { + return builder.With(LightWhiteB) +} + +// Color3BitF is a syntax for Color3BitF. +func (builder *Builder) Color3BitF(c RGB3Bit) *Builder { + return builder.With(Color3BitF(c)) +} + +// Color3BitB is a syntax for Color3BitB. +func (builder *Builder) Color3BitB(c RGB3Bit) *Builder { + return builder.With(Color3BitB(c)) +} + +// Color8BitF is a syntax for Color8BitF. +func (builder *Builder) Color8BitF(c RGB8Bit) *Builder { + return builder.With(Color8BitF(c)) +} + +// Color8BitB is a syntax for Color8BitB. +func (builder *Builder) Color8BitB(c RGB8Bit) *Builder { + return builder.With(Color8BitB(c)) +} + +// FullColorF is a syntax for FullColorF. +func (builder *Builder) FullColorF(r, g, b uint8) *Builder { + return builder.With(FullColorF(r, g, b)) +} + +// FullColorB is a syntax for FullColorB. +func (builder *Builder) FullColorB(r, g, b uint8) *Builder { + return builder.With(FullColorB(r, g, b)) +} + +// RGB3BitF is a syntax for Color3BitF with NewRGB3Bit. +func (builder *Builder) RGB3BitF(r, g, b uint8) *Builder { + return builder.Color3BitF(NewRGB3Bit(r, g, b)) +} + +// RGB3BitB is a syntax for Color3BitB with NewRGB3Bit. +func (builder *Builder) RGB3BitB(r, g, b uint8) *Builder { + return builder.Color3BitB(NewRGB3Bit(r, g, b)) +} + +// RGB8BitF is a syntax for Color8BitF with NewRGB8Bit. +func (builder *Builder) RGB8BitF(r, g, b uint8) *Builder { + return builder.Color8BitF(NewRGB8Bit(r, g, b)) +} + +// RGB8BitB is a syntax for Color8BitB with NewRGB8Bit. +func (builder *Builder) RGB8BitB(r, g, b uint8) *Builder { + return builder.Color8BitB(NewRGB8Bit(r, g, b)) +} + +func init() { + EmptyBuilder = &Builder{empty} +} diff --git a/vendor/github.com/morikuni/aec/sample.gif b/vendor/github.com/morikuni/aec/sample.gif new file mode 100644 index 0000000000000000000000000000000000000000..c6c613bb70645efa4e184726060ea1ff981eeceb GIT binary patch literal 12548 zcmeHtXHe7ox^?KiNed7{8X;7XCP*=%29P2G0wRPS6cCY(5D}@N_aaD@UX-rVRGLyn ziUk#vP5`li6zlsB?&3b@?0xn-bLW1#I^&ms!?1qqS><^Q4E0qHIxs9l8jiG4D5=G zkc%wJQx+903onrs@|6>>krR&H55KY>eoh`;asZm7fND|@8CDR!sE7_%MAs^cwkwL% zD8UkxCDWAAxhk0ZDx%L+gi8*hnhqkK92Ch^mCRSgl&MNwQ$;^kh1^y{4XcSwtBI7U zOIE2%HL9a~)WsLnMc33J&6<)AG*RQ4$Z0M3oEDOz1$(b0YH(Pw?XYC`VaYpKsV=P4 z7FJXVhd0D24dNsp;UpHd#UJTNZt5VXbS0+rBajp4Phz6_~W&Tz}DWZtdlR_FHdX zbw;p>nRc|ipYKnXb{y$w-B=ziF)lQ{-S%;9vg!1zk=yOtZ)bYq9mUM%UMfA9n94_HLw{y+a(h(q-H)YoF^N>j>XMNzW=%^r(B zo(sGdWe;Xm(&m&J4NpP=hLMdqme=}W3IoesvgnH&z-y5>+3elk1COcaV|B0R4H$9Y zV*2>z@q}O<@LD9v%r8`##56I&r^e>X2z1>hQ07gn4Oe)0x)57)f?6Z+T3DWpO+>(w z>MQt9F%RMD^U($GTVz$+w>n>xyGT1aY;RqzrjI&k+M=7B6Ttxi%Q#QpELdhJQ~WMw zrd9O}%uo?|yP&=kswoRaNCawq{q$jH<8$z(&I>QTF$-N~5~0(e?+vAYzwBrOT3w!r zIIv#MgcGuVLgREB?jH?3m{G>GkIoQ(H43Z^ycVgIdj(kkQGrfjQfKIB7>WB`iZeTV zDFP?^gs~(6!zhWus|6oSv9)GywvA@u-yUUr%Yidbb%azIq|wNzEo9j{_eP}B_#TW- zr16?9#`Q1WA7vD=qshlQS__p!U07Yq&z@YW^3YK)59q?W-}e|55j$k54LvS^H!pPh z>M0R?L0_s0XR*@INk(%=>L+B7Gm$#t_D_0q>8waq&>YTY$&~b)Z5}jE4d2Dfyh!xc zqvg6veBEv^D18mIao_Qpn%-4O0~-fstT}^Is@kX{Nw;&9USun7v>{JYox4d{U~N5X zYUTY3c1q1@)ZJD|mncx`v+Rjm43R#Q!eOs+67*;_)hVXq0E*zp`ncvfcK40?+b&LW zuF##nVk`4s{20GLStcc=Qo3nPM&-L9hw*yXoa~pPnrK@qnC`Jz-I{b}<=Z?^4?m~x z*QbxFck?DmxZQ}6BRz~$@PFN3$guW!E!ZHi<@ zMhcql2!%1dB+rs1iuusFx^h7aQ7BGTc)D4^zJY zH=l4Jr6WOWbx-BYUssFVYg@lLuciL>#^*bq(9Lv*+upVDp6OWY7rgQLL)JmFM;oI| za$n6K9sj=lQQNKV;;Sj9#jo2JqPZUJEG&C}Gow^o+Ww?<<88?FyKU6>FXx^-9Zx!hv)_14rhhu5DCU4NsxrVV%*@?Lx>=@Sb;o*5Cyd;CimDp(W!r0-= z{HeGa4(&)phc3(U{%(P`cTw^QPHYUg9^vQj$l)nhIoZN{pig|GvFxE-DHp-LqMQxU zdXHv#&SvWh$-O6=B+T-9gZ03s@1w2!XZby|^<;e-;uXIjz{gweL6RCG-87KG_bT-n z%Nr7XT#=B^+xOMRk0u59JBn^p8fbiYf8oLxBWTrb%qQ{ji!nMIg+vEJKl+S9s284(QkX=Nod_q1+A_ULu0)}7Y7rh6OE zx-99*MnLOiNdZ)?GrXBd)wr#t6AL46eeUF%4*DhZ9J z9~ShtozIU{i47IouQjo~A^$?v?%}sF^af>qXmi8i&ezMEv#(y6DIEU(9kI(a6DrCA zK8ngm0b0jmp*?evQJarN0?;~mRGJ3SF;X}H!xIf9Pjbf~vSPSn(dDY#acnf>{qgem zcbs8T7JbFq%nFq*;>xDyU9`zZ5DO`~tU>tX@vkxF*grJyFVR8(Xqo;4v=}id*gu?! zVFE*B?^uste|Y+D&LmRkb$wn>T3K}No-^5Wr^}}bLD4a;@ZvpZ@{urtW)PVfQ!K5f zI+HZ7nN9+arh6~dnaJuwWTpXUa$JU6(`yw#OArIr#D8Li9@!~j%VK8*oJrs;S<_y` zxS%7jM^w|A)f{jpKCv3vO#V<4J1-V3(iNiIgEr+y+?90g1tu?+B8M7Juf06^A+bQF zCWuWeX^eTOqBmVSdJ?1%8^NKwzfkL<;ls73{N}F;g4wp-K5vb`^Jixg|7yPPqFG_> zmz|IQ!I{K8Vnnj5^b-&)`pmrWTuY);R1UrpZs@H#HyfSol{QO8U0H~Z5ieIAk408f ziuGmgV37I;Mu_ve%KChU5*k|+WOBw-AR&ocuX`XBj}9tDX$#|B)6Ef^t{Ik`N^Y5! z4zc1Hc5?K1B!Mi6&JOe1b-c{FzB7L%GkZns3X71!K%V~-wPm=^7L$bb&&EdqfK=-r zK>CN-+|0f@2+<_n_Xq!GHuyb)Svx&Sj+U4ez-T|0R&Z60JLHY<@Y10#( z(aCa{W$u#z0iUwq?()?GX7gB^gho8{_OZBvyQRl!pq{#1sgLVj&d{H;1I#9CJORT@ z>qeaF467MEcE1NJcpmuCg*#fkAS!PouewZxZ>%*OjBH&YDQvy1=6&69eOB~mv&qmp zbeHAsx2L;1>;G;xKRo>%(jA`PA)VF#3DST)NGr#vkRtQ|NK149NL_S(h7|lKNNK5% zvI3Brm>Mo7T05w@WKqd`nLyrkgW0Y2m1(Pgt8^E4dw$-XO5Wu>|G^-9?#J7)|A@TW zASRF;&4K?iYWb`vV>+Ne$U8s|PzliZ73ntD#?+GGAF)C~*Cbbi8Y}#XqMF8*0@PS> zELrKyJM&Leq^&QKSmgOJPXO}1)F6dQTQvZq_G7kxbZ4#fQ-HjJSD%FWuB}c>Z)4~> zJa$>3Uz%mRIwE`+bQ7dfUQ~baFI^(6nE>UYoC@(yu*V- zs5xNN9t)Ojzv&&-d}i%s{)Jm-p54Cm=UDM}Wc1*nLI>qT{;NO7iqt*wE_j2h9P6~k zcDG?H-dD^ng`jtK_TN0)d-r>Z$@Zc|kwFMZbtX)J9}Ej;*DfVQa39(vMDn4PoODzD zi-A~i3U3(o;w1A?R9*&P6vd5bivtj~%+B$07R)XQ+#`5ogbJ09d*lt9qSz;~x=qfe zW->p6q*D2qZmz$(05?+TL;Z=5763}n|AtbW#U^p8CAI(FAK}kVwR$t~*YJmG)i;Z0 zP(3Gym;Vv|$iXGi5u?=bXL727GENPDwIS)?!;*(2XNbHzy-=WazjyZ(+ zMc2JwE=g2+rOL#(xmLJ;xbpo~&=xiPHT)Q{RaVu@F?YU>e|$InZ{aWeF(Z;*08c>B zpj81DDp3d#IT*~Wj<>4wY*ez>zS&5K{z5caluQ|o7KtMwv{40>#W=}YiZe)-I#`vS z5Rr))9Ylu&>R?UAGVYe34rTSZq{5-GsAwZONvCvEH1nc%sFL$yrj4BtK4ZqDYBb>> z*_P$g%6x10Qx19fF-0DSf%As0*&-R{U{i2$fra;)Q zB-Hi-F_jghV~+O!$o6a#lpLfsfll6m8MH^jBsKFxFdI>x42MxD<>|L?2G+CW>0GX=472}$wC?^CsDP76MjH4FauEC~M zCuJY7B7Ql|p2acCIRx*XBU%uN&i=CCv6 zYK$-mUo^}S_wyX~1Cag(`iIWeAX#lQ24a}@pzlYElLq5Xlm8g(x!2+x6tSNi*a&A& zmGz~zIK=x11QU3##W_SGsRAv|su4dz$Odq>(t5I_yg;DEk?Eoh-b|1my6J6IO3G-< zH$IQCtomhR^|q8n*UP)1YNliJ;y{bTw@rx+YiV$af`0@D9%=OhDLKiW*0-mjEIkZ$ zT*ZOf{9hi-F{$x+VQV(}xPG8pllxCCjY|siKb-9s_5P!sv`DE>ORb4 zqh<3w5KE|_7Hta6CCj?Fa`_A(e0(f7WMFtqSvgAeKKv$r7>t1#TRk;=fG37R!{A4R zZH#aEJZ>R1@;&JEkDk68nmi|8wQM*5595^%X^pIh?q?G)E_i*G0uiw~GSYwrW^FV9 z%FEpnwvyLc2za0D2uv5_k=SbmUt6!e3cvo5uk68v1BK;`IQw-IjdyP=8KTy@>^jt@ z8y#8sT%9dKH`Y9v*1GoD63jGjHN^LC8%>vH=b&>F9N+RAK>E&&G!{C3>`o98qx|lD z@VPGZ0+^XFKK{xgL*{=i+9nAi3H}tP=vX2i4$sXXI>+Y3s5*%xHB;u0xuul|a!9W# zGEwCv(J4`36%S9=vSuy?gw%N<6)$XPn9|GvkTGwMj7HrHUZxJage*Iiez(g^AuR5p z&I*8#63eU5SHcojY;)N7a~AS4Tb>E!u>wL`5WLm83=gm%d;CdA(*ST^{yn%ej7YaPB-oQ{i%6 z?(^}R-u;@XQ!hQuJ)^LB{w18&gqFmQm;4;nJhG+mEvO zV%KiLHzwM5^99_8>wSN3QD0T=eBN1GJ05fQ@W9u9ZBZM6z{0K1?1bQWV$hEUyT>@8 z`NyAIIpNa-3@}Ug_-CQ*;-sPv%u-zePgH8! z7raf1QFotf`WsD_#UKmQm6Mt5z%0$O4`_9RS&u|5YO8)Ax{EsvDwhMZbZ#lvw=D>N z1Bi=8@$Wn>qKjVOH;`ds0-fNaQ3Xf_?j-*fh0?vMoG&{p9*SVo z=^QIr9$|?gZ0a7c!QvqWB(e;?GQHwq4mFV4`D*i*s`9pBttcw`E7XDS>r-ZtMeY=I2XRW?uo!|+6$Y_-vjX??a+1lR>g{uJGTwin0v&C zN&Z^eNYW&GAT#PPZg=y&YvBcY0WeQ6V?MR=eD@e$>5X1fQ!M_3^wt%r63Nq?7#YTtC&V{MwJ?dT7+o6ija;!0|pOs$%dxN-md_@nXHh zg5w3qv6}&Q9m?Y#BdoLYxP0_>GDq{7yPIhCiF%sQwaa(zaawVoJuUYL8;F7&#B`+L zWHP#8`#Vl(Thmv##tTnoC=^d@jV79-bc5b5Q-yO`ynu)8FDV-_es z4>!gx$ghirX zqE6#O8oQX)`w?}ni<>QGW}53y9JwxRb<&RaZ_h-3zX*HY)2mtCL01Y@p~HN@1~WLsd0W zw_6NaIlBt8rUmlns4=t{A|o$Ayc+b5I9`T}2SoSrj6gYSo&I&8@iV>7(M1w9Rwwr{_QZQck(H&=D+e#q&4>oZ?@`?Eprcn#AViEqK5w%&fO-S2q# z%(rhJ8HK2tla=gcm4(?dmC6+?~fhi z0cUC*i;;n$k_ysw1pN1U*(BcTJoX&S>^wI;GSd`cedib|pI&ky1!vLi zVyyKE(9t6*E2#7|$bdzf#VG@~%!!k+Wqx)(lJ=Kn03FSC%~ErRIh>+jGQ#d#aToV= zZ+15TGRbm&;PW7S9{9n)(Vw{YmvD~*aM%6~+`q1z_q)#&N5^!tTP;z>6w7t)Q^QK6 z>ea#iB$jg>d9ki#04eut<*@L=g*6Qz^u&7Cg`_;7bx_7Iu_Z9rkq}?Zx%>NRjGIn9 zXw|FkN#(g2Cz{((V-OR(87V$=b6uy2`(y%Y=4S15gs-T3J>6!l|bz8u* zN9>6+Hy?-~yrKy1+fcdp=R*--)^PB^m6i?iT`P*@yi-xUI%$}2aYS?W3}pY?$op3} z#av`9GC&B)_a@Lw;L_z}k zoe)Bt0Or(KiiU4JAtPmO0+?gU`}4)*2UT4XRSp@rB&c!Ta!Ky}nhjvi=edxoXF*%4 zt33x`ZmusWo{=UabTr+%jqH|H;XP31;SeHen=J`oo71W2p92b|0?=R7ejJLvV>%00Sv5(_*k7*+BrLn|@2`p`2;-!IKho9cG7xeie*kHuz|OO=Gy;Si&MAby6VU&eOXVB!OHAajo_;w4zX5di zkUBZksyOIr0%4-s3S&JxLFe9Ho3DOLw!2cDzChXNR^9P-#sSW$wOj9Al`|G9fMuT6 z&zFR<%!%W78rj`u=23^ICG{!QcYXHpP{vE%0J} z8?{?+$?aTQ-%KD*z7aj~(vER~P9GKrlLdo0Z?}a0R{NdrtLcf)6{}lT5Bxys%l*?e z!InzGhqZ@={aQZSx%3!9XwJ^Iok$X}VL|5d5n*RTly+c|`%Eb(B0241M99KR_njTtPhefJN|+>WJU8 zviQK&tOU$KG61Cbkc1>}?bw8K*n?cQeJwzB?QH)@V2+3*5Q{l}VTO^iJV$l9+JA=db5wOxd;!kv4QS8HFDoo5Sg`B4$d zk#>dxJqY0(S+6lRu!i*e*~hiqW9Ey)hKtmF95A!aANe-%8{;N-!j!P?XwBI~d z$-)Faa<<``_lp`{f1DF{0#xyCdYy`CI_!vMZ#rF{cYWv4mg7ZJaG9^SUW4>+y42^< zm0a+n;n*t+LwA2Y3klKA0M0@TJYT&As-xAhSARMS**Pkbpo?*ej8&Zk!*g9Loya*h z{Wc;=kr*JJ0=bnKk`aiAld8?OjYGFhf)f;+^^sOU&pV%_v5JpPIHKALQBI4 z3i;22Rwo*${Q`fz?Ez>Rc{Np?r0xDblMDsv1FlbYx%_3mh4&p;u z*KH#;HdQ^1&%b^VVX+Bg=|m3atKt%#hz5VC?NTNs6^-pq(9ZS=atH=*9OKI3VzcSp zZ53$rAg8fdJei0f(5aUmuCWG_V2d)0^H*$5UJy=)Wf%({sp-S6zoPMBdOFctL{4E1 zeJbFin=CjF7s6D?PU+!a(O+NVk-vE39Vl>9VC2io{5RVO_<44|@Q#jCAMAl5u+f%_ zWBYA8!oK*NDbtslvwh|?GQ;n1G@##f)g?g>a6bu}F?`h9p6IBbZ+j@VA_=9H&T+BX zlLGMEUW`jZPD{VFND`1rk-3kENT-m^9b}3&r|t0+E#YSZX-4DsN^JlpFJ@R88OFw# zodO{K(f<)_UDI9N49jiY3v#TkoI>A2TZAg@ty_d*vJKt&-7f^_)XWSA*VgA(mkea)AWycwY#2v*f5OUJSUN>8Y~HHb1Rk*_GP$xDMW=rLjtX z%{JCqL|~@Gqxab@&Itv}>S)QSc9uoCJUjk6#|RRy-{X>*y4kQW9-tarte@{?0;<8; z9r=ZDk&oa?uN?)P5^2!y+nOyQs-VQsz@*ZGEBsN2F;gI~aKP)wOlE8EJwN++wlR_B z`C-4_b=?|l1e`A^%Q(_hv~BZ`yRN|(@&VLF5b+Mn;%)aZ(4PJJW%Yoqcu; z^rD@48c|v!5kJaqa(+hC-h96{1XJh_(QawL_Ap(bA7V5H%!Uj@TZ!~tIV zl0G(?Fp(=G4QyAg6T{RrSVbp`Rwkb~Ix=CUTL`aY67t zqYJA=)-TgC}e>hXU zhfQ(#9nGa5Cw3zq6VwyCA}9-b6R6>}!9Sna@!#m`k3l76Gshr#8VBNJYO`755J22a zP||7~h?gH(=u1>v?RQB&upWa*fkNH_TUU;iR#ar-!)TKKFi9@Yae%Vf2EU%}>z&CH)it%uW9T z=6K%-Ix=upDf6QT`q2X>#=>;=<(Ws#--~2;>6_Pd)GUhz;i4VvoQBkoJaH(5J0>Ez z0Im>${t;j#xX3g8)Iw0ZWbVi*^Ir==_zmfPzi^LaJ@KBLY|<;Y&gM1rnZ5yE8ta)o-#<<3w9mD;?+DF2 zES0URSfg`al4y|umsWkG;Q?p6@%qs_n%8h_zNgZj8A3~6M^T%>(K~yQrjPHz-Mg2@ zcE3u@gq~gT`2DOB3=G#F`q=_(SbpedrTOprc~$pU{bW80=w}Oef`WqXyytKFX?Xl# zfWXiC$-*$OfHT#yHP5i%)Gf3BNk8?o_Vjbf>ks{`_<6W$10ZVsD~JpN_>}sYZU98Y zUJ8xZ5kRriEYIch?$gm8CM?h8#S3ae{KNJwa;D0&sbHQhgkibFKf*BJxw#>b+71QB z(}Wp7E4-%&S=1*LKq&`R$sKb&;heeHSJ6sPCX&t=kao zSPyvys>#$=JS=jn+73mhY>&#u5u?)}p9I{hqPwtJz6|3*!h}R9IL^tZWyqhvSx5KS zj;B)ZVd60S;MAB4!)q+Gh>n=AgPjo=tb}Lv_mHr-ej3thtNgow_$dx!X79NxMRD-U~8KZm@LAKgE$Ll1{C#o(5BI7i+Vv2*c7*|S?05=O%_UI~9s<-^y0f%V4+@&lKGB*Zz!stBk& zdMQd#n9+E}6+hMmkK*{jXO!Gz2PQl+!}^qDSr}xs{8GT`il_LMZel;GH0zX9Rdi&W Y(?6j)4M_L*quqaULH)mVw5$IA0FB)@mjD0& literal 0 HcmV?d00001 diff --git a/vendor/github.com/morikuni/aec/sgr.go b/vendor/github.com/morikuni/aec/sgr.go new file mode 100644 index 000000000..0ba3464e6 --- /dev/null +++ b/vendor/github.com/morikuni/aec/sgr.go @@ -0,0 +1,202 @@ +package aec + +import ( + "fmt" +) + +// RGB3Bit is a 3bit RGB color. +type RGB3Bit uint8 + +// RGB8Bit is a 8bit RGB color. +type RGB8Bit uint8 + +func newSGR(n uint) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dm", n)) +} + +// NewRGB3Bit create a RGB3Bit from given RGB. +func NewRGB3Bit(r, g, b uint8) RGB3Bit { + return RGB3Bit((r >> 7) | ((g >> 6) & 0x2) | ((b >> 5) & 0x4)) +} + +// NewRGB8Bit create a RGB8Bit from given RGB. +func NewRGB8Bit(r, g, b uint8) RGB8Bit { + return RGB8Bit(16 + 36*(r/43) + 6*(g/43) + b/43) +} + +// Color3BitF set the foreground color of text. +func Color3BitF(c RGB3Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dm", c+30)) +} + +// Color3BitB set the background color of text. +func Color3BitB(c RGB3Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dm", c+40)) +} + +// Color8BitF set the foreground color of text. +func Color8BitF(c RGB8Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"38;5;%dm", c)) +} + +// Color8BitB set the background color of text. +func Color8BitB(c RGB8Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"48;5;%dm", c)) +} + +// FullColorF set the foreground color of text. +func FullColorF(r, g, b uint8) ANSI { + return newAnsi(fmt.Sprintf(esc+"38;2;%d;%d;%dm", r, g, b)) +} + +// FullColorB set the foreground color of text. +func FullColorB(r, g, b uint8) ANSI { + return newAnsi(fmt.Sprintf(esc+"48;2;%d;%d;%dm", r, g, b)) +} + +// Style +var ( + // Bold set the text style to bold or increased intensity. + Bold ANSI + + // Faint set the text style to faint. + Faint ANSI + + // Italic set the text style to italic. + Italic ANSI + + // Underline set the text style to underline. + Underline ANSI + + // BlinkSlow set the text style to slow blink. + BlinkSlow ANSI + + // BlinkRapid set the text style to rapid blink. + BlinkRapid ANSI + + // Inverse swap the foreground color and background color. + Inverse ANSI + + // Conceal set the text style to conceal. + Conceal ANSI + + // CrossOut set the text style to crossed out. + CrossOut ANSI + + // Frame set the text style to framed. + Frame ANSI + + // Encircle set the text style to encircled. + Encircle ANSI + + // Overline set the text style to overlined. + Overline ANSI +) + +// Foreground color of text. +var ( + // DefaultF is the default color of foreground. + DefaultF ANSI + + // Normal color + BlackF ANSI + RedF ANSI + GreenF ANSI + YellowF ANSI + BlueF ANSI + MagentaF ANSI + CyanF ANSI + WhiteF ANSI + + // Light color + LightBlackF ANSI + LightRedF ANSI + LightGreenF ANSI + LightYellowF ANSI + LightBlueF ANSI + LightMagentaF ANSI + LightCyanF ANSI + LightWhiteF ANSI +) + +// Background color of text. +var ( + // DefaultB is the default color of background. + DefaultB ANSI + + // Normal color + BlackB ANSI + RedB ANSI + GreenB ANSI + YellowB ANSI + BlueB ANSI + MagentaB ANSI + CyanB ANSI + WhiteB ANSI + + // Light color + LightBlackB ANSI + LightRedB ANSI + LightGreenB ANSI + LightYellowB ANSI + LightBlueB ANSI + LightMagentaB ANSI + LightCyanB ANSI + LightWhiteB ANSI +) + +func init() { + Bold = newSGR(1) + Faint = newSGR(2) + Italic = newSGR(3) + Underline = newSGR(4) + BlinkSlow = newSGR(5) + BlinkRapid = newSGR(6) + Inverse = newSGR(7) + Conceal = newSGR(8) + CrossOut = newSGR(9) + + BlackF = newSGR(30) + RedF = newSGR(31) + GreenF = newSGR(32) + YellowF = newSGR(33) + BlueF = newSGR(34) + MagentaF = newSGR(35) + CyanF = newSGR(36) + WhiteF = newSGR(37) + + DefaultF = newSGR(39) + + BlackB = newSGR(40) + RedB = newSGR(41) + GreenB = newSGR(42) + YellowB = newSGR(43) + BlueB = newSGR(44) + MagentaB = newSGR(45) + CyanB = newSGR(46) + WhiteB = newSGR(47) + + DefaultB = newSGR(49) + + Frame = newSGR(51) + Encircle = newSGR(52) + Overline = newSGR(53) + + LightBlackF = newSGR(90) + LightRedF = newSGR(91) + LightGreenF = newSGR(92) + LightYellowF = newSGR(93) + LightBlueF = newSGR(94) + LightMagentaF = newSGR(95) + LightCyanF = newSGR(96) + LightWhiteF = newSGR(97) + + LightBlackB = newSGR(100) + LightRedB = newSGR(101) + LightGreenB = newSGR(102) + LightYellowB = newSGR(103) + LightBlueB = newSGR(104) + LightMagentaB = newSGR(105) + LightCyanB = newSGR(106) + LightWhiteB = newSGR(107) +} diff --git a/vendor/github.com/opencontainers/go-digest/.mailmap b/vendor/github.com/opencontainers/go-digest/.mailmap new file mode 100644 index 000000000..eaf8b2f9e --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/.mailmap @@ -0,0 +1,4 @@ +Aaron Lehmann +Derek McGowan +Stephen J Day +Haibing Zhou diff --git a/vendor/github.com/opencontainers/go-digest/.pullapprove.yml b/vendor/github.com/opencontainers/go-digest/.pullapprove.yml new file mode 100644 index 000000000..b6165f83c --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/.pullapprove.yml @@ -0,0 +1,28 @@ +version: 2 + +requirements: + signed_off_by: + required: true + +always_pending: + title_regex: '^WIP' + explanation: 'Work in progress...' + +group_defaults: + required: 2 + approve_by_comment: + enabled: true + approve_regex: '^LGTM' + reject_regex: '^Rejected' + reset_on_push: + enabled: true + author_approval: + ignored: true + conditions: + branches: + - master + +groups: + go-digest: + teams: + - go-digest-maintainers diff --git a/vendor/github.com/opencontainers/go-digest/.travis.yml b/vendor/github.com/opencontainers/go-digest/.travis.yml new file mode 100644 index 000000000..5775f885c --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/.travis.yml @@ -0,0 +1,5 @@ +language: go +go: + - 1.12.x + - 1.13.x + - master diff --git a/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md b/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md new file mode 100644 index 000000000..e4d962ac1 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md @@ -0,0 +1,72 @@ +# Contributing to Docker open source projects + +Want to hack on this project? Awesome! Here are instructions to get you started. + +This project is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +For an in-depth description of our contribution process, visit the +contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/) + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE b/vendor/github.com/opencontainers/go-digest/LICENSE new file mode 100644 index 000000000..3ac8ab648 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/LICENSE @@ -0,0 +1,192 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2019, 2020 OCI Contributors + Copyright 2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE.docs b/vendor/github.com/opencontainers/go-digest/LICENSE.docs new file mode 100644 index 000000000..e26cd4fc8 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/opencontainers/go-digest/MAINTAINERS b/vendor/github.com/opencontainers/go-digest/MAINTAINERS new file mode 100644 index 000000000..843b1b206 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/MAINTAINERS @@ -0,0 +1,5 @@ +Derek McGowan (@dmcgowan) +Stephen Day (@stevvooe) +Vincent Batts (@vbatts) +Akihiro Suda (@AkihiroSuda) +Sebastiaan van Stijn (@thaJeztah) diff --git a/vendor/github.com/opencontainers/go-digest/README.md b/vendor/github.com/opencontainers/go-digest/README.md new file mode 100644 index 000000000..a11287207 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/README.md @@ -0,0 +1,96 @@ +# go-digest + +[![GoDoc](https://godoc.org/github.com/opencontainers/go-digest?status.svg)](https://godoc.org/github.com/opencontainers/go-digest) [![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/go-digest)](https://goreportcard.com/report/github.com/opencontainers/go-digest) [![Build Status](https://travis-ci.org/opencontainers/go-digest.svg?branch=master)](https://travis-ci.org/opencontainers/go-digest) + +Common digest package used across the container ecosystem. + +Please see the [godoc](https://godoc.org/github.com/opencontainers/go-digest) for more information. + +# What is a digest? + +A digest is just a [hash](https://en.wikipedia.org/wiki/Hash_function). + +The most common use case for a digest is to create a content identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage) systems: + +```go +id := digest.FromBytes([]byte("my content")) +``` + +In the example above, the id can be used to uniquely identify the byte slice "my content". +This allows two disparate applications to agree on a verifiable identifier without having to trust one another. + +An identifying digest can be verified, as follows: + +```go +if id != digest.FromBytes([]byte("my content")) { + return errors.New("the content has changed!") +} +``` + +A `Verifier` type can be used to handle cases where an `io.Reader` makes more sense: + +```go +rd := getContent() +verifier := id.Verifier() +io.Copy(verifier, rd) + +if !verifier.Verified() { + return errors.New("the content has changed!") +} +``` + +Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this can power a rich, safe, content distribution system. + +# Usage + +While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is considered the best resource, a few important items need to be called out when using this package. + +1. Make sure to import the hash implementations into your application or the package will panic. + You should have something like the following in the main (or other entrypoint) of your application: + + ```go + import ( + _ "crypto/sha256" + _ "crypto/sha512" + ) + ``` + This may seem inconvenient but it allows you replace the hash + implementations with others, such as https://github.com/stevvooe/resumable. + +2. Even though `digest.Digest` may be assemblable as a string, _always_ verify your input with `digest.Parse` or use `Digest.Validate` when accepting untrusted input. + While there are measures to avoid common problems, this will ensure you have valid digests in the rest of your application. + +3. While alternative encodings of hash values (digests) are possible (for example, base64), this package deals exclusively with hex-encoded digests. + +# Stability + +The Go API, at this stage, is considered stable, unless otherwise noted. + +As always, before using a package export, read the [godoc](https://godoc.org/github.com/opencontainers/go-digest). + +# Contributing + +This package is considered fairly complete. +It has been in production in thousands (millions?) of deployments and is fairly battle-hardened. +New additions will be met with skepticism. +If you think there is a missing feature, please file a bug clearly describing the problem and the alternatives you tried before submitting a PR. + +## Code of Conduct + +Participation in the OpenContainers community is governed by [OpenContainer's Code of Conduct][code-of-conduct]. + +## Security + +If you find an issue, please follow the [security][security] protocol to report it. + +# Copyright and license + +Copyright © 2019, 2020 OCI Contributors +Copyright © 2016 Docker, Inc. +All rights reserved, except as follows. +Code is released under the [Apache 2.0 license](LICENSE). +This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). +You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/. + +[security]: https://github.com/opencontainers/org/blob/master/security +[code-of-conduct]: https://github.com/opencontainers/org/blob/master/CODE_OF_CONDUCT.md diff --git a/vendor/github.com/opencontainers/go-digest/algorithm.go b/vendor/github.com/opencontainers/go-digest/algorithm.go new file mode 100644 index 000000000..490951dc3 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/algorithm.go @@ -0,0 +1,193 @@ +// Copyright 2019, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import ( + "crypto" + "fmt" + "hash" + "io" + "regexp" +) + +// Algorithm identifies and implementation of a digester by an identifier. +// Note the that this defines both the hash algorithm used and the string +// encoding. +type Algorithm string + +// supported digest types +const ( + SHA256 Algorithm = "sha256" // sha256 with hex encoding (lower case only) + SHA384 Algorithm = "sha384" // sha384 with hex encoding (lower case only) + SHA512 Algorithm = "sha512" // sha512 with hex encoding (lower case only) + + // Canonical is the primary digest algorithm used with the distribution + // project. Other digests may be used but this one is the primary storage + // digest. + Canonical = SHA256 +) + +var ( + // TODO(stevvooe): Follow the pattern of the standard crypto package for + // registration of digests. Effectively, we are a registerable set and + // common symbol access. + + // algorithms maps values to hash.Hash implementations. Other algorithms + // may be available but they cannot be calculated by the digest package. + algorithms = map[Algorithm]crypto.Hash{ + SHA256: crypto.SHA256, + SHA384: crypto.SHA384, + SHA512: crypto.SHA512, + } + + // anchoredEncodedRegexps contains anchored regular expressions for hex-encoded digests. + // Note that /A-F/ disallowed. + anchoredEncodedRegexps = map[Algorithm]*regexp.Regexp{ + SHA256: regexp.MustCompile(`^[a-f0-9]{64}$`), + SHA384: regexp.MustCompile(`^[a-f0-9]{96}$`), + SHA512: regexp.MustCompile(`^[a-f0-9]{128}$`), + } +) + +// Available returns true if the digest type is available for use. If this +// returns false, Digester and Hash will return nil. +func (a Algorithm) Available() bool { + h, ok := algorithms[a] + if !ok { + return false + } + + // check availability of the hash, as well + return h.Available() +} + +func (a Algorithm) String() string { + return string(a) +} + +// Size returns number of bytes returned by the hash. +func (a Algorithm) Size() int { + h, ok := algorithms[a] + if !ok { + return 0 + } + return h.Size() +} + +// Set implemented to allow use of Algorithm as a command line flag. +func (a *Algorithm) Set(value string) error { + if value == "" { + *a = Canonical + } else { + // just do a type conversion, support is queried with Available. + *a = Algorithm(value) + } + + if !a.Available() { + return ErrDigestUnsupported + } + + return nil +} + +// Digester returns a new digester for the specified algorithm. If the algorithm +// does not have a digester implementation, nil will be returned. This can be +// checked by calling Available before calling Digester. +func (a Algorithm) Digester() Digester { + return &digester{ + alg: a, + hash: a.Hash(), + } +} + +// Hash returns a new hash as used by the algorithm. If not available, the +// method will panic. Check Algorithm.Available() before calling. +func (a Algorithm) Hash() hash.Hash { + if !a.Available() { + // Empty algorithm string is invalid + if a == "" { + panic(fmt.Sprintf("empty digest algorithm, validate before calling Algorithm.Hash()")) + } + + // NOTE(stevvooe): A missing hash is usually a programming error that + // must be resolved at compile time. We don't import in the digest + // package to allow users to choose their hash implementation (such as + // when using stevvooe/resumable or a hardware accelerated package). + // + // Applications that may want to resolve the hash at runtime should + // call Algorithm.Available before call Algorithm.Hash(). + panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) + } + + return algorithms[a].New() +} + +// Encode encodes the raw bytes of a digest, typically from a hash.Hash, into +// the encoded portion of the digest. +func (a Algorithm) Encode(d []byte) string { + // TODO(stevvooe): Currently, all algorithms use a hex encoding. When we + // add support for back registration, we can modify this accordingly. + return fmt.Sprintf("%x", d) +} + +// FromReader returns the digest of the reader using the algorithm. +func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { + digester := a.Digester() + + if _, err := io.Copy(digester.Hash(), rd); err != nil { + return "", err + } + + return digester.Digest(), nil +} + +// FromBytes digests the input and returns a Digest. +func (a Algorithm) FromBytes(p []byte) Digest { + digester := a.Digester() + + if _, err := digester.Hash().Write(p); err != nil { + // Writes to a Hash should never fail. None of the existing + // hash implementations in the stdlib or hashes vendored + // here can return errors from Write. Having a panic in this + // condition instead of having FromBytes return an error value + // avoids unnecessary error handling paths in all callers. + panic("write to hash function returned error: " + err.Error()) + } + + return digester.Digest() +} + +// FromString digests the string input and returns a Digest. +func (a Algorithm) FromString(s string) Digest { + return a.FromBytes([]byte(s)) +} + +// Validate validates the encoded portion string +func (a Algorithm) Validate(encoded string) error { + r, ok := anchoredEncodedRegexps[a] + if !ok { + return ErrDigestUnsupported + } + // Digests much always be hex-encoded, ensuring that their hex portion will + // always be size*2 + if a.Size()*2 != len(encoded) { + return ErrDigestInvalidLength + } + if r.MatchString(encoded) { + return nil + } + return ErrDigestInvalidFormat +} diff --git a/vendor/github.com/opencontainers/go-digest/digest.go b/vendor/github.com/opencontainers/go-digest/digest.go new file mode 100644 index 000000000..518b5e715 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/digest.go @@ -0,0 +1,157 @@ +// Copyright 2019, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import ( + "fmt" + "hash" + "io" + "regexp" + "strings" +) + +// Digest allows simple protection of hex formatted digest strings, prefixed +// by their algorithm. Strings of type Digest have some guarantee of being in +// the correct format and it provides quick access to the components of a +// digest string. +// +// The following is an example of the contents of Digest types: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// This allows to abstract the digest behind this type and work only in those +// terms. +type Digest string + +// NewDigest returns a Digest from alg and a hash.Hash object. +func NewDigest(alg Algorithm, h hash.Hash) Digest { + return NewDigestFromBytes(alg, h.Sum(nil)) +} + +// NewDigestFromBytes returns a new digest from the byte contents of p. +// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) +// functions. This is also useful for rebuilding digests from binary +// serializations. +func NewDigestFromBytes(alg Algorithm, p []byte) Digest { + return NewDigestFromEncoded(alg, alg.Encode(p)) +} + +// NewDigestFromHex is deprecated. Please use NewDigestFromEncoded. +func NewDigestFromHex(alg, hex string) Digest { + return NewDigestFromEncoded(Algorithm(alg), hex) +} + +// NewDigestFromEncoded returns a Digest from alg and the encoded digest. +func NewDigestFromEncoded(alg Algorithm, encoded string) Digest { + return Digest(fmt.Sprintf("%s:%s", alg, encoded)) +} + +// DigestRegexp matches valid digest types. +var DigestRegexp = regexp.MustCompile(`[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+`) + +// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. +var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) + +var ( + // ErrDigestInvalidFormat returned when digest format invalid. + ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") + + // ErrDigestInvalidLength returned when digest has invalid length. + ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length") + + // ErrDigestUnsupported returned when the digest algorithm is unsupported. + ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") +) + +// Parse parses s and returns the validated digest object. An error will +// be returned if the format is invalid. +func Parse(s string) (Digest, error) { + d := Digest(s) + return d, d.Validate() +} + +// FromReader consumes the content of rd until io.EOF, returning canonical digest. +func FromReader(rd io.Reader) (Digest, error) { + return Canonical.FromReader(rd) +} + +// FromBytes digests the input and returns a Digest. +func FromBytes(p []byte) Digest { + return Canonical.FromBytes(p) +} + +// FromString digests the input and returns a Digest. +func FromString(s string) Digest { + return Canonical.FromString(s) +} + +// Validate checks that the contents of d is a valid digest, returning an +// error if not. +func (d Digest) Validate() error { + s := string(d) + i := strings.Index(s, ":") + if i <= 0 || i+1 == len(s) { + return ErrDigestInvalidFormat + } + algorithm, encoded := Algorithm(s[:i]), s[i+1:] + if !algorithm.Available() { + if !DigestRegexpAnchored.MatchString(s) { + return ErrDigestInvalidFormat + } + return ErrDigestUnsupported + } + return algorithm.Validate(encoded) +} + +// Algorithm returns the algorithm portion of the digest. This will panic if +// the underlying digest is not in a valid format. +func (d Digest) Algorithm() Algorithm { + return Algorithm(d[:d.sepIndex()]) +} + +// Verifier returns a writer object that can be used to verify a stream of +// content against the digest. If the digest is invalid, the method will panic. +func (d Digest) Verifier() Verifier { + return hashVerifier{ + hash: d.Algorithm().Hash(), + digest: d, + } +} + +// Encoded returns the encoded portion of the digest. This will panic if the +// underlying digest is not in a valid format. +func (d Digest) Encoded() string { + return string(d[d.sepIndex()+1:]) +} + +// Hex is deprecated. Please use Digest.Encoded. +func (d Digest) Hex() string { + return d.Encoded() +} + +func (d Digest) String() string { + return string(d) +} + +func (d Digest) sepIndex() int { + i := strings.Index(string(d), ":") + + if i < 0 { + panic(fmt.Sprintf("no ':' separator in digest %q", d)) + } + + return i +} diff --git a/vendor/github.com/opencontainers/go-digest/digester.go b/vendor/github.com/opencontainers/go-digest/digester.go new file mode 100644 index 000000000..ede907757 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/digester.go @@ -0,0 +1,40 @@ +// Copyright 2019, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import "hash" + +// Digester calculates the digest of written data. Writes should go directly +// to the return value of Hash, while calling Digest will return the current +// value of the digest. +type Digester interface { + Hash() hash.Hash // provides direct access to underlying hash instance. + Digest() Digest +} + +// digester provides a simple digester definition that embeds a hasher. +type digester struct { + alg Algorithm + hash hash.Hash +} + +func (d *digester) Hash() hash.Hash { + return d.hash +} + +func (d *digester) Digest() Digest { + return NewDigest(d.alg, d.hash) +} diff --git a/vendor/github.com/opencontainers/go-digest/doc.go b/vendor/github.com/opencontainers/go-digest/doc.go new file mode 100644 index 000000000..83d3a936c --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/doc.go @@ -0,0 +1,62 @@ +// Copyright 2019, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package digest provides a generalized type to opaquely represent message +// digests and their operations within the registry. The Digest type is +// designed to serve as a flexible identifier in a content-addressable system. +// More importantly, it provides tools and wrappers to work with +// hash.Hash-based digests with little effort. +// +// Basics +// +// The format of a digest is simply a string with two parts, dubbed the +// "algorithm" and the "digest", separated by a colon: +// +// : +// +// An example of a sha256 digest representation follows: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// The "algorithm" portion defines both the hashing algorithm used to calculate +// the digest and the encoding of the resulting digest, which defaults to "hex" +// if not otherwise specified. Currently, all supported algorithms have their +// digests encoded in hex strings. +// +// In the example above, the string "sha256" is the algorithm and the hex bytes +// are the "digest". +// +// Because the Digest type is simply a string, once a valid Digest is +// obtained, comparisons are cheap, quick and simple to express with the +// standard equality operator. +// +// Verification +// +// The main benefit of using the Digest type is simple verification against a +// given digest. The Verifier interface, modeled after the stdlib hash.Hash +// interface, provides a common write sink for digest verification. After +// writing is complete, calling the Verifier.Verified method will indicate +// whether or not the stream of bytes matches the target digest. +// +// Missing Features +// +// In addition to the above, we intend to add the following features to this +// package: +// +// 1. A Digester type that supports write sink digest calculation. +// +// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry. +// +package digest diff --git a/vendor/github.com/opencontainers/go-digest/verifiers.go b/vendor/github.com/opencontainers/go-digest/verifiers.go new file mode 100644 index 000000000..afef506f4 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/verifiers.go @@ -0,0 +1,46 @@ +// Copyright 2019, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import ( + "hash" + "io" +) + +// Verifier presents a general verification interface to be used with message +// digests and other byte stream verifications. Users instantiate a Verifier +// from one of the various methods, write the data under test to it then check +// the result with the Verified method. +type Verifier interface { + io.Writer + + // Verified will return true if the content written to Verifier matches + // the digest. + Verified() bool +} + +type hashVerifier struct { + digest Digest + hash hash.Hash +} + +func (hv hashVerifier) Write(p []byte) (n int, err error) { + return hv.hash.Write(p) +} + +func (hv hashVerifier) Verified() bool { + return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) +} diff --git a/vendor/github.com/opencontainers/image-spec/LICENSE b/vendor/github.com/opencontainers/image-spec/LICENSE new file mode 100644 index 000000000..9fdc20fdb --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2016 The Linux Foundation. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go new file mode 100644 index 000000000..6f9e6fd3a --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go @@ -0,0 +1,71 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // AnnotationCreated is the annotation key for the date and time on which the image was built (date-time string as defined by RFC 3339). + AnnotationCreated = "org.opencontainers.image.created" + + // AnnotationAuthors is the annotation key for the contact details of the people or organization responsible for the image (freeform string). + AnnotationAuthors = "org.opencontainers.image.authors" + + // AnnotationURL is the annotation key for the URL to find more information on the image. + AnnotationURL = "org.opencontainers.image.url" + + // AnnotationDocumentation is the annotation key for the URL to get documentation on the image. + AnnotationDocumentation = "org.opencontainers.image.documentation" + + // AnnotationSource is the annotation key for the URL to get source code for building the image. + AnnotationSource = "org.opencontainers.image.source" + + // AnnotationVersion is the annotation key for the version of the packaged software. + // The version MAY match a label or tag in the source code repository. + // The version MAY be Semantic versioning-compatible. + AnnotationVersion = "org.opencontainers.image.version" + + // AnnotationRevision is the annotation key for the source control revision identifier for the packaged software. + AnnotationRevision = "org.opencontainers.image.revision" + + // AnnotationVendor is the annotation key for the name of the distributing entity, organization or individual. + AnnotationVendor = "org.opencontainers.image.vendor" + + // AnnotationLicenses is the annotation key for the license(s) under which contained software is distributed as an SPDX License Expression. + AnnotationLicenses = "org.opencontainers.image.licenses" + + // AnnotationRefName is the annotation key for the name of the reference for a target. + // SHOULD only be considered valid when on descriptors on `index.json` within image layout. + AnnotationRefName = "org.opencontainers.image.ref.name" + + // AnnotationTitle is the annotation key for the human-readable title of the image. + AnnotationTitle = "org.opencontainers.image.title" + + // AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image. + AnnotationDescription = "org.opencontainers.image.description" + + // AnnotationBaseImageDigest is the annotation key for the digest of the image's base image. + AnnotationBaseImageDigest = "org.opencontainers.image.base.digest" + + // AnnotationBaseImageName is the annotation key for the image reference of the image's base image. + AnnotationBaseImageName = "org.opencontainers.image.base.name" + + // AnnotationArtifactCreated is the annotation key for the date and time on which the artifact was built, conforming to RFC 3339. + AnnotationArtifactCreated = "org.opencontainers.artifact.created" + + // AnnotationArtifactDescription is the annotation key for the human readable description for the artifact. + AnnotationArtifactDescription = "org.opencontainers.artifact.description" + + // AnnotationReferrersFiltersApplied is the annotation key for the comma separated list of filters applied by the registry in the referrers listing. + AnnotationReferrersFiltersApplied = "org.opencontainers.referrers.filtersApplied" +) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go new file mode 100644 index 000000000..03d76ce43 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go @@ -0,0 +1,34 @@ +// Copyright 2022 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +// Artifact describes an artifact manifest. +// This structure provides `application/vnd.oci.artifact.manifest.v1+json` mediatype when marshalled to JSON. +type Artifact struct { + // MediaType is the media type of the object this schema refers to. + MediaType string `json:"mediaType"` + + // ArtifactType is the IANA media type of the artifact this schema refers to. + ArtifactType string `json:"artifactType"` + + // Blobs is a collection of blobs referenced by this manifest. + Blobs []Descriptor `json:"blobs,omitempty"` + + // Subject (reference) is an optional link from the artifact to another manifest forming an association between the artifact and the other manifest. + Subject *Descriptor `json:"subject,omitempty"` + + // Annotations contains arbitrary metadata for the artifact manifest. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go new file mode 100644 index 000000000..ffff4b6d1 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go @@ -0,0 +1,114 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "time" + + digest "github.com/opencontainers/go-digest" +) + +// ImageConfig defines the execution parameters which should be used as a base when running a container using an image. +type ImageConfig struct { + // User defines the username or UID which the process in the container should run as. + User string `json:"User,omitempty"` + + // ExposedPorts a set of ports to expose from a container running this image. + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + + // Env is a list of environment variables to be used in a container. + Env []string `json:"Env,omitempty"` + + // Entrypoint defines a list of arguments to use as the command to execute when the container starts. + Entrypoint []string `json:"Entrypoint,omitempty"` + + // Cmd defines the default arguments to the entrypoint of the container. + Cmd []string `json:"Cmd,omitempty"` + + // Volumes is a set of directories describing where the process is likely write data specific to a container instance. + Volumes map[string]struct{} `json:"Volumes,omitempty"` + + // WorkingDir sets the current working directory of the entrypoint process in the container. + WorkingDir string `json:"WorkingDir,omitempty"` + + // Labels contains arbitrary metadata for the container. + Labels map[string]string `json:"Labels,omitempty"` + + // StopSignal contains the system call signal that will be sent to the container to exit. + StopSignal string `json:"StopSignal,omitempty"` +} + +// RootFS describes a layer content addresses +type RootFS struct { + // Type is the type of the rootfs. + Type string `json:"type"` + + // DiffIDs is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. + DiffIDs []digest.Digest `json:"diff_ids"` +} + +// History describes the history of a layer. +type History struct { + // Created is the combined date and time at which the layer was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // CreatedBy is the command which created the layer. + CreatedBy string `json:"created_by,omitempty"` + + // Author is the author of the build point. + Author string `json:"author,omitempty"` + + // Comment is a custom message set when creating the layer. + Comment string `json:"comment,omitempty"` + + // EmptyLayer is used to mark if the history item created a filesystem diff. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Image is the JSON structure which describes some basic information about the image. +// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. +type Image struct { + // Created is the combined date and time at which the image was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. + Author string `json:"author,omitempty"` + + // Architecture is the CPU architecture which the binaries in this image are built to run on. + Architecture string `json:"architecture"` + + // Variant is the variant of the specified CPU architecture which image binaries are intended to run on. + Variant string `json:"variant,omitempty"` + + // OS is the name of the operating system which the image is built to run on. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system + // version, for example on Windows `10.0.14393.1066`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Config defines the execution parameters which should be used as a base when running a container using the image. + Config ImageConfig `json:"config,omitempty"` + + // RootFS references the layer content addresses used by the image. + RootFS RootFS `json:"rootfs"` + + // History describes the history of each layer. + History []History `json:"history,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go new file mode 100644 index 000000000..9654aa5af --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go @@ -0,0 +1,72 @@ +// Copyright 2016-2022 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import digest "github.com/opencontainers/go-digest" + +// Descriptor describes the disposition of targeted content. +// This structure provides `application/vnd.oci.descriptor.v1+json` mediatype +// when marshalled to JSON. +type Descriptor struct { + // MediaType is the media type of the object this schema refers to. + MediaType string `json:"mediaType,omitempty"` + + // Digest is the digest of the targeted content. + Digest digest.Digest `json:"digest"` + + // Size specifies the size in bytes of the blob. + Size int64 `json:"size"` + + // URLs specifies a list of URLs from which this object MAY be downloaded + URLs []string `json:"urls,omitempty"` + + // Annotations contains arbitrary metadata relating to the targeted content. + Annotations map[string]string `json:"annotations,omitempty"` + + // Data is an embedding of the targeted content. This is encoded as a base64 + // string when marshalled to JSON (automatically, by encoding/json). If + // present, Data can be used directly to avoid fetching the targeted content. + Data []byte `json:"data,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + // + // This should only be used when referring to a manifest. + Platform *Platform `json:"platform,omitempty"` + + // ArtifactType is the IANA media type of this artifact. + ArtifactType string `json:"artifactType,omitempty"` +} + +// Platform describes the platform which the image in the manifest runs on. +type Platform struct { + // Architecture field specifies the CPU architecture, for example + // `amd64` or `ppc64`. + Architecture string `json:"architecture"` + + // OS specifies the operating system, for example `linux` or `windows`. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system + // version, for example on Windows `10.0.14393.1066`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Variant is an optional field specifying a variant of the CPU, for + // example `v7` to specify ARMv7 when architecture is `arm`. + Variant string `json:"variant,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go new file mode 100644 index 000000000..ed4a56e59 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import "github.com/opencontainers/image-spec/specs-go" + +// Index references manifests for various platforms. +// This structure provides `application/vnd.oci.image.index.v1+json` mediatype when marshalled to JSON. +type Index struct { + specs.Versioned + + // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json` + MediaType string `json:"mediaType,omitempty"` + + // Manifests references platform specific manifests. + Manifests []Descriptor `json:"manifests"` + + // Annotations contains arbitrary metadata for the image index. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go new file mode 100644 index 000000000..fc79e9e0d --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go @@ -0,0 +1,28 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // ImageLayoutFile is the file name of oci image layout file + ImageLayoutFile = "oci-layout" + // ImageLayoutVersion is the version of ImageLayout + ImageLayoutVersion = "1.0.0" +) + +// ImageLayout is the structure in the "oci-layout" file, found in the root +// of an OCI Image-layout directory. +type ImageLayout struct { + Version string `json:"imageLayoutVersion"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go new file mode 100644 index 000000000..730a09359 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go @@ -0,0 +1,38 @@ +// Copyright 2016-2022 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import "github.com/opencontainers/image-spec/specs-go" + +// Manifest provides `application/vnd.oci.image.manifest.v1+json` mediatype structure when marshalled to JSON. +type Manifest struct { + specs.Versioned + + // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json` + MediaType string `json:"mediaType,omitempty"` + + // Config references a configuration object for a container, by digest. + // The referenced configuration object is a JSON blob that the runtime uses to set up the container. + Config Descriptor `json:"config"` + + // Layers is an indexed list of layers referenced by the manifest. + Layers []Descriptor `json:"layers"` + + // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. + Subject *Descriptor `json:"subject,omitempty"` + + // Annotations contains arbitrary metadata for the image manifest. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go new file mode 100644 index 000000000..935b481e3 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -0,0 +1,60 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // MediaTypeDescriptor specifies the media type for a content descriptor. + MediaTypeDescriptor = "application/vnd.oci.descriptor.v1+json" + + // MediaTypeLayoutHeader specifies the media type for the oci-layout. + MediaTypeLayoutHeader = "application/vnd.oci.layout.header.v1+json" + + // MediaTypeImageManifest specifies the media type for an image manifest. + MediaTypeImageManifest = "application/vnd.oci.image.manifest.v1+json" + + // MediaTypeImageIndex specifies the media type for an image index. + MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json" + + // MediaTypeImageLayer is the media type used for layers referenced by the manifest. + MediaTypeImageLayer = "application/vnd.oci.image.layer.v1.tar" + + // MediaTypeImageLayerGzip is the media type used for gzipped layers + // referenced by the manifest. + MediaTypeImageLayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip" + + // MediaTypeImageLayerZstd is the media type used for zstd compressed + // layers referenced by the manifest. + MediaTypeImageLayerZstd = "application/vnd.oci.image.layer.v1.tar+zstd" + + // MediaTypeImageLayerNonDistributable is the media type for layers referenced by + // the manifest but with distribution restrictions. + MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar" + + // MediaTypeImageLayerNonDistributableGzip is the media type for + // gzipped layers referenced by the manifest but with distribution + // restrictions. + MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + + // MediaTypeImageLayerNonDistributableZstd is the media type for zstd + // compressed layers referenced by the manifest but with distribution + // restrictions. + MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" + + // MediaTypeImageConfig specifies the media type for the image configuration. + MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" + + // MediaTypeArtifactManifest specifies the media type for a content descriptor. + MediaTypeArtifactManifest = "application/vnd.oci.artifact.manifest.v1+json" +) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go new file mode 100644 index 000000000..d27903579 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specs + +import "fmt" + +const ( + // VersionMajor is for an API incompatible changes + VersionMajor = 1 + // VersionMinor is for functionality in a backwards-compatible manner + VersionMinor = 1 + // VersionPatch is for backwards-compatible bug fixes + VersionPatch = 0 + + // VersionDev indicates development branch. Releases will be empty string. + VersionDev = "-rc2" +) + +// Version is the specification version that the package types support. +var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go b/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go new file mode 100644 index 000000000..58a1510f3 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go @@ -0,0 +1,23 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specs + +// Versioned provides a struct with the manifest schemaVersion and mediaType. +// Incoming content with unknown schema version can be decoded against this +// struct to check the version. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` +} diff --git a/vendor/github.com/opencontainers/runc/LICENSE b/vendor/github.com/opencontainers/runc/LICENSE new file mode 100644 index 000000000..27448585a --- /dev/null +++ b/vendor/github.com/opencontainers/runc/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/runc/NOTICE b/vendor/github.com/opencontainers/runc/NOTICE new file mode 100644 index 000000000..5c97abce4 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/NOTICE @@ -0,0 +1,17 @@ +runc + +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (http://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see http://www.bis.doc.gov + +See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go new file mode 100644 index 000000000..f95c1409f --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go @@ -0,0 +1,157 @@ +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package user + +import ( + "io" + "os" + "strconv" + + "golang.org/x/sys/unix" +) + +// Unix-specific path to the passwd and group formatted files. +const ( + unixPasswdPath = "/etc/passwd" + unixGroupPath = "/etc/group" +) + +// LookupUser looks up a user by their username in /etc/passwd. If the user +// cannot be found (or there is no /etc/passwd file on the filesystem), then +// LookupUser returns an error. +func LookupUser(username string) (User, error) { + return lookupUserFunc(func(u User) bool { + return u.Name == username + }) +} + +// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot +// be found (or there is no /etc/passwd file on the filesystem), then LookupId +// returns an error. +func LookupUid(uid int) (User, error) { + return lookupUserFunc(func(u User) bool { + return u.Uid == uid + }) +} + +func lookupUserFunc(filter func(u User) bool) (User, error) { + // Get operating system-specific passwd reader-closer. + passwd, err := GetPasswd() + if err != nil { + return User{}, err + } + defer passwd.Close() + + // Get the users. + users, err := ParsePasswdFilter(passwd, filter) + if err != nil { + return User{}, err + } + + // No user entries found. + if len(users) == 0 { + return User{}, ErrNoPasswdEntries + } + + // Assume the first entry is the "correct" one. + return users[0], nil +} + +// LookupGroup looks up a group by its name in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGroup +// returns an error. +func LookupGroup(groupname string) (Group, error) { + return lookupGroupFunc(func(g Group) bool { + return g.Name == groupname + }) +} + +// LookupGid looks up a group by its group id in /etc/group. If the group cannot +// be found (or there is no /etc/group file on the filesystem), then LookupGid +// returns an error. +func LookupGid(gid int) (Group, error) { + return lookupGroupFunc(func(g Group) bool { + return g.Gid == gid + }) +} + +func lookupGroupFunc(filter func(g Group) bool) (Group, error) { + // Get operating system-specific group reader-closer. + group, err := GetGroup() + if err != nil { + return Group{}, err + } + defer group.Close() + + // Get the users. + groups, err := ParseGroupFilter(group, filter) + if err != nil { + return Group{}, err + } + + // No user entries found. + if len(groups) == 0 { + return Group{}, ErrNoGroupEntries + } + + // Assume the first entry is the "correct" one. + return groups[0], nil +} + +func GetPasswdPath() (string, error) { + return unixPasswdPath, nil +} + +func GetPasswd() (io.ReadCloser, error) { + return os.Open(unixPasswdPath) +} + +func GetGroupPath() (string, error) { + return unixGroupPath, nil +} + +func GetGroup() (io.ReadCloser, error) { + return os.Open(unixGroupPath) +} + +// CurrentUser looks up the current user by their user id in /etc/passwd. If the +// user cannot be found (or there is no /etc/passwd file on the filesystem), +// then CurrentUser returns an error. +func CurrentUser() (User, error) { + return LookupUid(unix.Getuid()) +} + +// CurrentGroup looks up the current user's group by their primary group id's +// entry in /etc/passwd. If the group cannot be found (or there is no +// /etc/group file on the filesystem), then CurrentGroup returns an error. +func CurrentGroup() (Group, error) { + return LookupGid(unix.Getgid()) +} + +func currentUserSubIDs(fileName string) ([]SubID, error) { + u, err := CurrentUser() + if err != nil { + return nil, err + } + filter := func(entry SubID) bool { + return entry.Name == u.Name || entry.Name == strconv.Itoa(u.Uid) + } + return ParseSubIDFileFilter(fileName, filter) +} + +func CurrentUserSubUIDs() ([]SubID, error) { + return currentUserSubIDs("/etc/subuid") +} + +func CurrentUserSubGIDs() ([]SubID, error) { + return currentUserSubIDs("/etc/subgid") +} + +func CurrentProcessUIDMap() ([]IDMap, error) { + return ParseIDMapFile("/proc/self/uid_map") +} + +func CurrentProcessGIDMap() ([]IDMap, error) { + return ParseIDMapFile("/proc/self/gid_map") +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go new file mode 100644 index 000000000..2473c5ead --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go @@ -0,0 +1,605 @@ +package user + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +const ( + minID = 0 + maxID = 1<<31 - 1 // for 32-bit systems compatibility +) + +var ( + // ErrNoPasswdEntries is returned if no matching entries were found in /etc/group. + ErrNoPasswdEntries = errors.New("no matching entries in passwd file") + // ErrNoGroupEntries is returned if no matching entries were found in /etc/passwd. + ErrNoGroupEntries = errors.New("no matching entries in group file") + // ErrRange is returned if a UID or GID is outside of the valid range. + ErrRange = fmt.Errorf("uids and gids must be in range %d-%d", minID, maxID) +) + +type User struct { + Name string + Pass string + Uid int + Gid int + Gecos string + Home string + Shell string +} + +type Group struct { + Name string + Pass string + Gid int + List []string +} + +// SubID represents an entry in /etc/sub{u,g}id +type SubID struct { + Name string + SubID int64 + Count int64 +} + +// IDMap represents an entry in /proc/PID/{u,g}id_map +type IDMap struct { + ID int64 + ParentID int64 + Count int64 +} + +func parseLine(line []byte, v ...interface{}) { + parseParts(bytes.Split(line, []byte(":")), v...) +} + +func parseParts(parts [][]byte, v ...interface{}) { + if len(parts) == 0 { + return + } + + for i, p := range parts { + // Ignore cases where we don't have enough fields to populate the arguments. + // Some configuration files like to misbehave. + if len(v) <= i { + break + } + + // Use the type of the argument to figure out how to parse it, scanf() style. + // This is legit. + switch e := v[i].(type) { + case *string: + *e = string(p) + case *int: + // "numbers", with conversion errors ignored because of some misbehaving configuration files. + *e, _ = strconv.Atoi(string(p)) + case *int64: + *e, _ = strconv.ParseInt(string(p), 10, 64) + case *[]string: + // Comma-separated lists. + if len(p) != 0 { + *e = strings.Split(string(p), ",") + } else { + *e = []string{} + } + default: + // Someone goof'd when writing code using this function. Scream so they can hear us. + panic(fmt.Sprintf("parseLine only accepts {*string, *int, *int64, *[]string} as arguments! %#v is not a pointer!", e)) + } + } +} + +func ParsePasswdFile(path string) ([]User, error) { + passwd, err := os.Open(path) + if err != nil { + return nil, err + } + defer passwd.Close() + return ParsePasswd(passwd) +} + +func ParsePasswd(passwd io.Reader) ([]User, error) { + return ParsePasswdFilter(passwd, nil) +} + +func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) { + passwd, err := os.Open(path) + if err != nil { + return nil, err + } + defer passwd.Close() + return ParsePasswdFilter(passwd, filter) +} + +func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { + if r == nil { + return nil, errors.New("nil source for passwd-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []User{} + ) + + for s.Scan() { + line := bytes.TrimSpace(s.Bytes()) + if len(line) == 0 { + continue + } + + // see: man 5 passwd + // name:password:UID:GID:GECOS:directory:shell + // Name:Pass:Uid:Gid:Gecos:Home:Shell + // root:x:0:0:root:/root:/bin/bash + // adm:x:3:4:adm:/var/adm:/bin/false + p := User{} + parseLine(line, &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + if err := s.Err(); err != nil { + return nil, err + } + + return out, nil +} + +func ParseGroupFile(path string) ([]Group, error) { + group, err := os.Open(path) + if err != nil { + return nil, err + } + + defer group.Close() + return ParseGroup(group) +} + +func ParseGroup(group io.Reader) ([]Group, error) { + return ParseGroupFilter(group, nil) +} + +func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) { + group, err := os.Open(path) + if err != nil { + return nil, err + } + defer group.Close() + return ParseGroupFilter(group, filter) +} + +func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { + if r == nil { + return nil, errors.New("nil source for group-formatted data") + } + rd := bufio.NewReader(r) + out := []Group{} + + // Read the file line-by-line. + for { + var ( + isPrefix bool + wholeLine []byte + err error + ) + + // Read the next line. We do so in chunks (as much as reader's + // buffer is able to keep), check if we read enough columns + // already on each step and store final result in wholeLine. + for { + var line []byte + line, isPrefix, err = rd.ReadLine() + + if err != nil { + // We should return no error if EOF is reached + // without a match. + if err == io.EOF { //nolint:errorlint // comparison with io.EOF is legit, https://github.com/polyfloyd/go-errorlint/pull/12 + err = nil + } + return out, err + } + + // Simple common case: line is short enough to fit in a + // single reader's buffer. + if !isPrefix && len(wholeLine) == 0 { + wholeLine = line + break + } + + wholeLine = append(wholeLine, line...) + + // Check if we read the whole line already. + if !isPrefix { + break + } + } + + // There's no spec for /etc/passwd or /etc/group, but we try to follow + // the same rules as the glibc parser, which allows comments and blank + // space at the beginning of a line. + wholeLine = bytes.TrimSpace(wholeLine) + if len(wholeLine) == 0 || wholeLine[0] == '#' { + continue + } + + // see: man 5 group + // group_name:password:GID:user_list + // Name:Pass:Gid:List + // root:x:0:root + // adm:x:4:root,adm,daemon + p := Group{} + parseLine(wholeLine, &p.Name, &p.Pass, &p.Gid, &p.List) + + if filter == nil || filter(p) { + out = append(out, p) + } + } +} + +type ExecUser struct { + Uid int + Gid int + Sgids []int + Home string +} + +// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the +// given file paths and uses that data as the arguments to GetExecUser. If the +// files cannot be opened for any reason, the error is ignored and a nil +// io.Reader is passed instead. +func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { + var passwd, group io.Reader + + if passwdFile, err := os.Open(passwdPath); err == nil { + passwd = passwdFile + defer passwdFile.Close() + } + + if groupFile, err := os.Open(groupPath); err == nil { + group = groupFile + defer groupFile.Close() + } + + return GetExecUser(userSpec, defaults, passwd, group) +} + +// GetExecUser parses a user specification string (using the passwd and group +// readers as sources for /etc/passwd and /etc/group data, respectively). In +// the case of blank fields or missing data from the sources, the values in +// defaults is used. +// +// GetExecUser will return an error if a user or group literal could not be +// found in any entry in passwd and group respectively. +// +// Examples of valid user specifications are: +// * "" +// * "user" +// * "uid" +// * "user:group" +// * "uid:gid +// * "user:gid" +// * "uid:group" +// +// It should be noted that if you specify a numeric user or group id, they will +// not be evaluated as usernames (only the metadata will be filled). So attempting +// to parse a user with user.Name = "1337" will produce the user with a UID of +// 1337. +func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) { + if defaults == nil { + defaults = new(ExecUser) + } + + // Copy over defaults. + user := &ExecUser{ + Uid: defaults.Uid, + Gid: defaults.Gid, + Sgids: defaults.Sgids, + Home: defaults.Home, + } + + // Sgids slice *cannot* be nil. + if user.Sgids == nil { + user.Sgids = []int{} + } + + // Allow for userArg to have either "user" syntax, or optionally "user:group" syntax + var userArg, groupArg string + parseLine([]byte(userSpec), &userArg, &groupArg) + + // Convert userArg and groupArg to be numeric, so we don't have to execute + // Atoi *twice* for each iteration over lines. + uidArg, uidErr := strconv.Atoi(userArg) + gidArg, gidErr := strconv.Atoi(groupArg) + + // Find the matching user. + users, err := ParsePasswdFilter(passwd, func(u User) bool { + if userArg == "" { + // Default to current state of the user. + return u.Uid == user.Uid + } + + if uidErr == nil { + // If the userArg is numeric, always treat it as a UID. + return uidArg == u.Uid + } + + return u.Name == userArg + }) + + // If we can't find the user, we have to bail. + if err != nil && passwd != nil { + if userArg == "" { + userArg = strconv.Itoa(user.Uid) + } + return nil, fmt.Errorf("unable to find user %s: %w", userArg, err) + } + + var matchedUserName string + if len(users) > 0 { + // First match wins, even if there's more than one matching entry. + matchedUserName = users[0].Name + user.Uid = users[0].Uid + user.Gid = users[0].Gid + user.Home = users[0].Home + } else if userArg != "" { + // If we can't find a user with the given username, the only other valid + // option is if it's a numeric username with no associated entry in passwd. + + if uidErr != nil { + // Not numeric. + return nil, fmt.Errorf("unable to find user %s: %w", userArg, ErrNoPasswdEntries) + } + user.Uid = uidArg + + // Must be inside valid uid range. + if user.Uid < minID || user.Uid > maxID { + return nil, ErrRange + } + + // Okay, so it's numeric. We can just roll with this. + } + + // On to the groups. If we matched a username, we need to do this because of + // the supplementary group IDs. + if groupArg != "" || matchedUserName != "" { + groups, err := ParseGroupFilter(group, func(g Group) bool { + // If the group argument isn't explicit, we'll just search for it. + if groupArg == "" { + // Check if user is a member of this group. + for _, u := range g.List { + if u == matchedUserName { + return true + } + } + return false + } + + if gidErr == nil { + // If the groupArg is numeric, always treat it as a GID. + return gidArg == g.Gid + } + + return g.Name == groupArg + }) + if err != nil && group != nil { + return nil, fmt.Errorf("unable to find groups for spec %v: %w", matchedUserName, err) + } + + // Only start modifying user.Gid if it is in explicit form. + if groupArg != "" { + if len(groups) > 0 { + // First match wins, even if there's more than one matching entry. + user.Gid = groups[0].Gid + } else { + // If we can't find a group with the given name, the only other valid + // option is if it's a numeric group name with no associated entry in group. + + if gidErr != nil { + // Not numeric. + return nil, fmt.Errorf("unable to find group %s: %w", groupArg, ErrNoGroupEntries) + } + user.Gid = gidArg + + // Must be inside valid gid range. + if user.Gid < minID || user.Gid > maxID { + return nil, ErrRange + } + + // Okay, so it's numeric. We can just roll with this. + } + } else if len(groups) > 0 { + // Supplementary group ids only make sense if in the implicit form. + user.Sgids = make([]int, len(groups)) + for i, group := range groups { + user.Sgids[i] = group.Gid + } + } + } + + return user, nil +} + +// GetAdditionalGroups looks up a list of groups by name or group id +// against the given /etc/group formatted data. If a group name cannot +// be found, an error will be returned. If a group id cannot be found, +// or the given group data is nil, the id will be returned as-is +// provided it is in the legal range. +func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) { + groups := []Group{} + if group != nil { + var err error + groups, err = ParseGroupFilter(group, func(g Group) bool { + for _, ag := range additionalGroups { + if g.Name == ag || strconv.Itoa(g.Gid) == ag { + return true + } + } + return false + }) + if err != nil { + return nil, fmt.Errorf("Unable to find additional groups %v: %w", additionalGroups, err) + } + } + + gidMap := make(map[int]struct{}) + for _, ag := range additionalGroups { + var found bool + for _, g := range groups { + // if we found a matched group either by name or gid, take the + // first matched as correct + if g.Name == ag || strconv.Itoa(g.Gid) == ag { + if _, ok := gidMap[g.Gid]; !ok { + gidMap[g.Gid] = struct{}{} + found = true + break + } + } + } + // we asked for a group but didn't find it. let's check to see + // if we wanted a numeric group + if !found { + gid, err := strconv.ParseInt(ag, 10, 64) + if err != nil { + // Not a numeric ID either. + return nil, fmt.Errorf("Unable to find group %s: %w", ag, ErrNoGroupEntries) + } + // Ensure gid is inside gid range. + if gid < minID || gid > maxID { + return nil, ErrRange + } + gidMap[int(gid)] = struct{}{} + } + } + gids := []int{} + for gid := range gidMap { + gids = append(gids, gid) + } + return gids, nil +} + +// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups +// that opens the groupPath given and gives it as an argument to +// GetAdditionalGroups. +func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { + var group io.Reader + + if groupFile, err := os.Open(groupPath); err == nil { + group = groupFile + defer groupFile.Close() + } + return GetAdditionalGroups(additionalGroups, group) +} + +func ParseSubIDFile(path string) ([]SubID, error) { + subid, err := os.Open(path) + if err != nil { + return nil, err + } + defer subid.Close() + return ParseSubID(subid) +} + +func ParseSubID(subid io.Reader) ([]SubID, error) { + return ParseSubIDFilter(subid, nil) +} + +func ParseSubIDFileFilter(path string, filter func(SubID) bool) ([]SubID, error) { + subid, err := os.Open(path) + if err != nil { + return nil, err + } + defer subid.Close() + return ParseSubIDFilter(subid, filter) +} + +func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) { + if r == nil { + return nil, errors.New("nil source for subid-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []SubID{} + ) + + for s.Scan() { + line := bytes.TrimSpace(s.Bytes()) + if len(line) == 0 { + continue + } + + // see: man 5 subuid + p := SubID{} + parseLine(line, &p.Name, &p.SubID, &p.Count) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + if err := s.Err(); err != nil { + return nil, err + } + + return out, nil +} + +func ParseIDMapFile(path string) ([]IDMap, error) { + r, err := os.Open(path) + if err != nil { + return nil, err + } + defer r.Close() + return ParseIDMap(r) +} + +func ParseIDMap(r io.Reader) ([]IDMap, error) { + return ParseIDMapFilter(r, nil) +} + +func ParseIDMapFileFilter(path string, filter func(IDMap) bool) ([]IDMap, error) { + r, err := os.Open(path) + if err != nil { + return nil, err + } + defer r.Close() + return ParseIDMapFilter(r, filter) +} + +func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) { + if r == nil { + return nil, errors.New("nil source for idmap-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []IDMap{} + ) + + for s.Scan() { + line := bytes.TrimSpace(s.Bytes()) + if len(line) == 0 { + continue + } + + // see: man 7 user_namespaces + p := IDMap{} + parseParts(bytes.Fields(line), &p.ID, &p.ParentID, &p.Count) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + if err := s.Err(); err != nil { + return nil, err + } + + return out, nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go new file mode 100644 index 000000000..e018eae61 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go @@ -0,0 +1,43 @@ +//go:build gofuzz +// +build gofuzz + +package user + +import ( + "io" + "strings" +) + +func IsDivisbleBy(n int, divisibleby int) bool { + return (n % divisibleby) == 0 +} + +func FuzzUser(data []byte) int { + if len(data) == 0 { + return -1 + } + if !IsDivisbleBy(len(data), 5) { + return -1 + } + + var divided [][]byte + + chunkSize := len(data) / 5 + + for i := 0; i < len(data); i += chunkSize { + end := i + chunkSize + + divided = append(divided, data[i:end]) + } + + _, _ = ParsePasswdFilter(strings.NewReader(string(divided[0])), nil) + + var passwd, group io.Reader + + group = strings.NewReader(string(divided[1])) + _, _ = GetAdditionalGroups([]string{string(divided[2])}, group) + + passwd = strings.NewReader(string(divided[3])) + _, _ = GetExecUser(string(divided[4]), nil, passwd, group) + return 1 +} diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore new file mode 100644 index 000000000..1fb13abeb --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/.gitignore @@ -0,0 +1,4 @@ +logrus +vendor + +.idea/ diff --git a/vendor/github.com/sirupsen/logrus/.golangci.yml b/vendor/github.com/sirupsen/logrus/.golangci.yml new file mode 100644 index 000000000..65dc28503 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/.golangci.yml @@ -0,0 +1,40 @@ +run: + # do not run on test files yet + tests: false + +# all available settings of specific linters +linters-settings: + errcheck: + # report about not checking of errors in type assetions: `a := b.(MyStruct)`; + # default is false: such cases aren't reported by default. + check-type-assertions: false + + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; + # default is false: such cases aren't reported by default. + check-blank: false + + lll: + line-length: 100 + tab-width: 4 + + prealloc: + simple: false + range-loops: false + for-loops: false + + whitespace: + multi-if: false # Enforces newlines (or comments) after every multi-line if statement + multi-func: false # Enforces newlines (or comments) after every multi-line function signature + +linters: + enable: + - megacheck + - govet + disable: + - maligned + - prealloc + disable-all: false + presets: + - bugs + - unused + fast: false diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml new file mode 100644 index 000000000..c1dbd5a3a --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/.travis.yml @@ -0,0 +1,15 @@ +language: go +go_import_path: github.com/sirupsen/logrus +git: + depth: 1 +env: + - GO111MODULE=on +go: 1.15.x +os: linux +install: + - ./travis/install.sh +script: + - cd ci + - go run mage.go -v -w ../ crossBuild + - go run mage.go -v -w ../ lint + - go run mage.go -v -w ../ test diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md new file mode 100644 index 000000000..7567f6128 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md @@ -0,0 +1,259 @@ +# 1.8.1 +Code quality: + * move magefile in its own subdir/submodule to remove magefile dependency on logrus consumer + * improve timestamp format documentation + +Fixes: + * fix race condition on logger hooks + + +# 1.8.0 + +Correct versioning number replacing v1.7.1. + +# 1.7.1 + +Beware this release has introduced a new public API and its semver is therefore incorrect. + +Code quality: + * use go 1.15 in travis + * use magefile as task runner + +Fixes: + * small fixes about new go 1.13 error formatting system + * Fix for long time race condiction with mutating data hooks + +Features: + * build support for zos + +# 1.7.0 +Fixes: + * the dependency toward a windows terminal library has been removed + +Features: + * a new buffer pool management API has been added + * a set of `Fn()` functions have been added + +# 1.6.0 +Fixes: + * end of line cleanup + * revert the entry concurrency bug fix whic leads to deadlock under some circumstances + * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14 + +Features: + * add an option to the `TextFormatter` to completely disable fields quoting + +# 1.5.0 +Code quality: + * add golangci linter run on travis + +Fixes: + * add mutex for hooks concurrent access on `Entry` data + * caller function field for go1.14 + * fix build issue for gopherjs target + +Feature: + * add an hooks/writer sub-package whose goal is to split output on different stream depending on the trace level + * add a `DisableHTMLEscape` option in the `JSONFormatter` + * add `ForceQuote` and `PadLevelText` options in the `TextFormatter` + +# 1.4.2 + * Fixes build break for plan9, nacl, solaris +# 1.4.1 +This new release introduces: + * Enhance TextFormatter to not print caller information when they are empty (#944) + * Remove dependency on golang.org/x/crypto (#932, #943) + +Fixes: + * Fix Entry.WithContext method to return a copy of the initial entry (#941) + +# 1.4.0 +This new release introduces: + * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848). + * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter` (#909, #911) + * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919). + +Fixes: + * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893). + * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903) + * Fix infinite recursion on unknown `Level.String()` (#907) + * Fix race condition in `getCaller` (#916). + + +# 1.3.0 +This new release introduces: + * Log, Logf, Logln functions for Logger and Entry that take a Level + +Fixes: + * Building prometheus node_exporter on AIX (#840) + * Race condition in TextFormatter (#468) + * Travis CI import path (#868) + * Remove coloured output on Windows (#862) + * Pointer to func as field in JSONFormatter (#870) + * Properly marshal Levels (#873) + +# 1.2.0 +This new release introduces: + * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued + * A new trace level named `Trace` whose level is below `Debug` + * A configurable exit function to be called upon a Fatal trace + * The `Level` object now implements `encoding.TextUnmarshaler` interface + +# 1.1.1 +This is a bug fix release. + * fix the build break on Solaris + * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized + +# 1.1.0 +This new release introduces: + * several fixes: + * a fix for a race condition on entry formatting + * proper cleanup of previously used entries before putting them back in the pool + * the extra new line at the end of message in text formatter has been removed + * a new global public API to check if a level is activated: IsLevelEnabled + * the following methods have been added to the Logger object + * IsLevelEnabled + * SetFormatter + * SetOutput + * ReplaceHooks + * introduction of go module + * an indent configuration for the json formatter + * output colour support for windows + * the field sort function is now configurable for text formatter + * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater + +# 1.0.6 + +This new release introduces: + * a new api WithTime which allows to easily force the time of the log entry + which is mostly useful for logger wrapper + * a fix reverting the immutability of the entry given as parameter to the hooks + a new configuration field of the json formatter in order to put all the fields + in a nested dictionnary + * a new SetOutput method in the Logger + * a new configuration of the textformatter to configure the name of the default keys + * a new configuration of the text formatter to disable the level truncation + +# 1.0.5 + +* Fix hooks race (#707) +* Fix panic deadlock (#695) + +# 1.0.4 + +* Fix race when adding hooks (#612) +* Fix terminal check in AppEngine (#635) + +# 1.0.3 + +* Replace example files with testable examples + +# 1.0.2 + +* bug: quote non-string values in text formatter (#583) +* Make (*Logger) SetLevel a public method + +# 1.0.1 + +* bug: fix escaping in text formatter (#575) + +# 1.0.0 + +* Officially changed name to lower-case +* bug: colors on Windows 10 (#541) +* bug: fix race in accessing level (#512) + +# 0.11.5 + +* feature: add writer and writerlevel to entry (#372) + +# 0.11.4 + +* bug: fix undefined variable on solaris (#493) + +# 0.11.3 + +* formatter: configure quoting of empty values (#484) +* formatter: configure quoting character (default is `"`) (#484) +* bug: fix not importing io correctly in non-linux environments (#481) + +# 0.11.2 + +* bug: fix windows terminal detection (#476) + +# 0.11.1 + +* bug: fix tty detection with custom out (#471) + +# 0.11.0 + +* performance: Use bufferpool to allocate (#370) +* terminal: terminal detection for app-engine (#343) +* feature: exit handler (#375) + +# 0.10.0 + +* feature: Add a test hook (#180) +* feature: `ParseLevel` is now case-insensitive (#326) +* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) +* performance: avoid re-allocations on `WithFields` (#335) + +# 0.9.0 + +* logrus/text_formatter: don't emit empty msg +* logrus/hooks/airbrake: move out of main repository +* logrus/hooks/sentry: move out of main repository +* logrus/hooks/papertrail: move out of main repository +* logrus/hooks/bugsnag: move out of main repository +* logrus/core: run tests with `-race` +* logrus/core: detect TTY based on `stderr` +* logrus/core: support `WithError` on logger +* logrus/core: Solaris support + +# 0.8.7 + +* logrus/core: fix possible race (#216) +* logrus/doc: small typo fixes and doc improvements + + +# 0.8.6 + +* hooks/raven: allow passing an initialized client + +# 0.8.5 + +* logrus/core: revert #208 + +# 0.8.4 + +* formatter/text: fix data race (#218) + +# 0.8.3 + +* logrus/core: fix entry log level (#208) +* logrus/core: improve performance of text formatter by 40% +* logrus/core: expose `LevelHooks` type +* logrus/core: add support for DragonflyBSD and NetBSD +* formatter/text: print structs more verbosely + +# 0.8.2 + +* logrus: fix more Fatal family functions + +# 0.8.1 + +* logrus: fix not exiting on `Fatalf` and `Fatalln` + +# 0.8.0 + +* logrus: defaults to stderr instead of stdout +* hooks/sentry: add special field for `*http.Request` +* formatter/text: ignore Windows for colors + +# 0.7.3 + +* formatter/\*: allow configuration of timestamp layout + +# 0.7.2 + +* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE new file mode 100644 index 000000000..f090cb42f --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md new file mode 100644 index 000000000..b042c896f --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -0,0 +1,513 @@ +# Logrus :walrus: [![Build Status](https://github.com/sirupsen/logrus/workflows/CI/badge.svg)](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![Go Reference](https://pkg.go.dev/badge/github.com/sirupsen/logrus.svg)](https://pkg.go.dev/github.com/sirupsen/logrus) + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. + +**Logrus is in maintenance-mode.** We will not be introducing new features. It's +simply too hard to do in a way that won't break many people's projects, which is +the last thing you want from your Logging library (again...). + +This does not mean Logrus is dead. Logrus will continue to be maintained for +security, (backwards compatible) bug fixes, and performance (where we are +limited by the interface). + +I believe Logrus' biggest contribution is to have played a part in today's +widespread use of structured logging in Golang. There doesn't seem to be a +reason to do a major, breaking iteration into Logrus V2, since the fantastic Go +community has built those independently. Many fantastic alternatives have sprung +up. Logrus would look like those, had it been re-designed with what we know +about structured logging in Go today. Check out, for example, +[Zerolog][zerolog], [Zap][zap], and [Apex][apex]. + +[zerolog]: https://github.com/rs/zerolog +[zap]: https://github.com/uber-go/zap +[apex]: https://github.com/apex/log + +**Seeing weird case-sensitive problems?** It's in the past been possible to +import Logrus as both upper- and lower-case. Due to the Go package environment, +this caused issues in the community and we needed a standard. Some environments +experienced problems with the upper-case variant, so the lower-case was decided. +Everything using `logrus` will need to use the lower-case: +`github.com/sirupsen/logrus`. Any package that isn't, should be changed. + +To fix Glide, see [these +comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). +For an in-depth explanation of the casing issue, see [this +comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash +or Splunk: + +```json +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not +attached, the output is compatible with the +[logfmt](http://godoc.org/github.com/kr/logfmt) format: + +```text +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +``` +To ensure this behaviour even if a TTY is attached, set your formatter as follows: + +```go + log.SetFormatter(&log.TextFormatter{ + DisableColors: true, + FullTimestamp: true, + }) +``` + +#### Logging Method Name + +If you wish to add the calling method as a field, instruct the logger via: +```go +log.SetReportCaller(true) +``` +This adds the caller as 'method' like so: + +```json +{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by", +"time":"2014-03-10 19:57:38.562543129 -0400 EDT"} +``` + +```text +time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin +``` +Note that this does add measurable overhead - the cost will depend on the version of Go, but is +between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your +environment via benchmarks: +``` +go test -bench=.*CallerTracing +``` + + +#### Case-sensitivity + +The organization's name was changed to lower-case--and this will not be changed +back. If you are getting import conflicts due to case sensitivity, please use +the lower-case import: `github.com/sirupsen/logrus`. + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/sirupsen/logrus" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Output to stdout instead of the default stderr + // Can be any io.Writer, see below for File example + log.SetOutput(os.Stdout) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "os" + "github.com/sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stdout + + // You could set this to any `io.Writer` such as a file + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) + // if err == nil { + // log.Out = file + // } else { + // log.Info("Failed to log to file, using default stderr") + // } + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging through logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Default Fields + +Often it's helpful to have fields _always_ attached to log statements in an +application or parts of one. For example, you may want to always log the +`request_id` and `user_ip` in the context of a request. Instead of writing +`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on +every line, you can create a `logrus.Entry` to pass around instead: + +```go +requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) +requestLogger.Info("something happened on that request") # will log request_id and user_ip +requestLogger.Warn("something not great happened") +``` + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: + +```go +import ( + log "github.com/sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" + logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" + "log/syslog" +) + +func init() { + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook(123, "xyz", "production")) + + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + if err != nil { + log.Error("Unable to connect to local syslog daemon") + } else { + log.AddHook(hook) + } +} +``` +Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). + +A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) + + +#### Level logging + +Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Trace("Something very low level.") +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/sirupsen/logrus" +) + +func init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(&log.JSONFormatter{}) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(&log.TextFormatter{}) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true`. For Windows, see + [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). + * When colors are enabled, levels are truncated to 4 characters by default. To disable + truncation set the `DisableLevelTruncation` field to `true`. + * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). +* `logrus.JSONFormatter`. Logs fields as JSON. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). + +Third party logging formatters: + +* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. +* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). +* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. +* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo. +* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. +* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files. +* [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Logger as an `io.Writer` + +Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + +This means that we can override the standard library logger easily: + +```go +logger := logrus.New() +logger.Formatter = &logrus.JSONFormatter{} + +// Use logrus for standard log output +// Note that `log` here references stdlib's log +// Not logrus imported under the name `log`. +log.SetOutput(logger.Writer()) +``` + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotate(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + +#### Tools + +| Tool | Description | +| ---- | ----------- | +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will be generated with different configs in different environments.| +|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | + +#### Testing + +Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: + +* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook +* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): + +```go +import( + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSomething(t*testing.T){ + logger, hook := test.NewNullLogger() + logger.Error("Helloerror") + + assert.Equal(t, 1, len(hook.Entries)) + assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) + assert.Equal(t, "Helloerror", hook.LastEntry().Message) + + hook.Reset() + assert.Nil(t, hook.LastEntry()) +} +``` + +#### Fatal handlers + +Logrus can register one or more functions that will be called when any `fatal` +level message is logged. The registered handlers will be executed before +logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need +to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. + +``` +... +handler := func() { + // gracefully shutdown something... +} +logrus.RegisterExitHandler(handler) +... +``` + +#### Thread safety + +By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. +If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. + +Situation when locking is not needed includes: + +* You have no hooks registered, or hooks calling is already thread-safe. + +* Writing to logger.Out is already thread-safe, for example: + + 1) logger.Out is protected by locks. + + 2) logger.Out is an os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allows multi-thread/multi-process writing) + + (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go new file mode 100644 index 000000000..8fd189e1c --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/alt_exit.go @@ -0,0 +1,76 @@ +package logrus + +// The following code was sourced and modified from the +// https://github.com/tebeka/atexit package governed by the following license: +// +// Copyright (c) 2012 Miki Tebeka . +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import ( + "fmt" + "os" +) + +var handlers = []func(){} + +func runHandler(handler func()) { + defer func() { + if err := recover(); err != nil { + fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) + } + }() + + handler() +} + +func runHandlers() { + for _, handler := range handlers { + runHandler(handler) + } +} + +// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) +func Exit(code int) { + runHandlers() + os.Exit(code) +} + +// RegisterExitHandler appends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func RegisterExitHandler(handler func()) { + handlers = append(handlers, handler) +} + +// DeferExitHandler prepends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func DeferExitHandler(handler func()) { + handlers = append([]func(){handler}, handlers...) +} diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml new file mode 100644 index 000000000..df9d65c3a --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/appveyor.yml @@ -0,0 +1,14 @@ +version: "{build}" +platform: x64 +clone_folder: c:\gopath\src\github.com\sirupsen\logrus +environment: + GOPATH: c:\gopath +branches: + only: + - master +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version +build_script: + - go get -t + - go test diff --git a/vendor/github.com/sirupsen/logrus/buffer_pool.go b/vendor/github.com/sirupsen/logrus/buffer_pool.go new file mode 100644 index 000000000..c7787f77c --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/buffer_pool.go @@ -0,0 +1,43 @@ +package logrus + +import ( + "bytes" + "sync" +) + +var ( + bufferPool BufferPool +) + +type BufferPool interface { + Put(*bytes.Buffer) + Get() *bytes.Buffer +} + +type defaultPool struct { + pool *sync.Pool +} + +func (p *defaultPool) Put(buf *bytes.Buffer) { + p.pool.Put(buf) +} + +func (p *defaultPool) Get() *bytes.Buffer { + return p.pool.Get().(*bytes.Buffer) +} + +// SetBufferPool allows to replace the default logrus buffer pool +// to better meets the specific needs of an application. +func SetBufferPool(bp BufferPool) { + bufferPool = bp +} + +func init() { + SetBufferPool(&defaultPool{ + pool: &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, + }) +} diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go new file mode 100644 index 000000000..da67aba06 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/doc.go @@ -0,0 +1,26 @@ +/* +Package logrus is a structured logger for Go, completely API compatible with the standard library logger. + + +The simplest way to use Logrus is simply the package-level exported logger: + + package main + + import ( + log "github.com/sirupsen/logrus" + ) + + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } + +Output: + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + +For a full guide visit https://github.com/sirupsen/logrus +*/ +package logrus diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go new file mode 100644 index 000000000..71cdbbc35 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -0,0 +1,442 @@ +package logrus + +import ( + "bytes" + "context" + "fmt" + "os" + "reflect" + "runtime" + "strings" + "sync" + "time" +) + +var ( + + // qualified package name, cached at first use + logrusPackage string + + // Positions in the call stack when tracing to report the calling method + minimumCallerDepth int + + // Used for caller information initialisation + callerInitOnce sync.Once +) + +const ( + maximumCallerDepth int = 25 + knownLogrusFrames int = 4 +) + +func init() { + // start at the bottom of the stack before the package-name cache is primed + minimumCallerDepth = 1 +} + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Trace, Debug, +// Info, Warn, Error, Fatal or Panic is called on it. These objects can be +// reused and passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic + // This field will be set on entry firing and the value will be equal to the one in Logger struct field. + Level Level + + // Calling method, with package name + Caller *runtime.Frame + + // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic + Message string + + // When formatter is called in entry.log(), a Buffer may be set to entry + Buffer *bytes.Buffer + + // Contains the context set by the user. Useful for hook processing etc. + Context context.Context + + // err may contain a field formatting error + err string +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, plus one optional. Give a little extra room. + Data: make(Fields, 6), + } +} + +func (entry *Entry) Dup() *Entry { + data := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err} +} + +// Returns the bytes representation of this entry from the formatter. +func (entry *Entry) Bytes() ([]byte, error) { + return entry.Logger.Formatter.Format(entry) +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + serialized, err := entry.Bytes() + if err != nil { + return "", err + } + str := string(serialized) + return str, nil +} + +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + +// Add a context to the Entry. +func (entry *Entry) WithContext(ctx context.Context) *Entry { + dataCopy := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + dataCopy[k] = v + } + return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx} +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := make(Fields, len(entry.Data)+len(fields)) + for k, v := range entry.Data { + data[k] = v + } + fieldErr := entry.err + for k, v := range fields { + isErrField := false + if t := reflect.TypeOf(v); t != nil { + switch { + case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func: + isErrField = true + } + } + if isErrField { + tmp := fmt.Sprintf("can not add field %q", k) + if fieldErr != "" { + fieldErr = entry.err + ", " + tmp + } else { + fieldErr = tmp + } + } else { + data[k] = v + } + } + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} +} + +// Overrides the time of the Entry. +func (entry *Entry) WithTime(t time.Time) *Entry { + dataCopy := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + dataCopy[k] = v + } + return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context} +} + +// getPackageName reduces a fully qualified function name to the package name +// There really ought to be to be a better way... +func getPackageName(f string) string { + for { + lastPeriod := strings.LastIndex(f, ".") + lastSlash := strings.LastIndex(f, "/") + if lastPeriod > lastSlash { + f = f[:lastPeriod] + } else { + break + } + } + + return f +} + +// getCaller retrieves the name of the first non-logrus calling function +func getCaller() *runtime.Frame { + // cache this package's fully-qualified name + callerInitOnce.Do(func() { + pcs := make([]uintptr, maximumCallerDepth) + _ = runtime.Callers(0, pcs) + + // dynamic get the package name and the minimum caller depth + for i := 0; i < maximumCallerDepth; i++ { + funcName := runtime.FuncForPC(pcs[i]).Name() + if strings.Contains(funcName, "getCaller") { + logrusPackage = getPackageName(funcName) + break + } + } + + minimumCallerDepth = knownLogrusFrames + }) + + // Restrict the lookback frames to avoid runaway lookups + pcs := make([]uintptr, maximumCallerDepth) + depth := runtime.Callers(minimumCallerDepth, pcs) + frames := runtime.CallersFrames(pcs[:depth]) + + for f, again := frames.Next(); again; f, again = frames.Next() { + pkg := getPackageName(f.Function) + + // If the caller isn't part of this package, we're done + if pkg != logrusPackage { + return &f //nolint:scopelint + } + } + + // if we got here, we failed to find the caller's context + return nil +} + +func (entry Entry) HasCaller() (has bool) { + return entry.Logger != nil && + entry.Logger.ReportCaller && + entry.Caller != nil +} + +func (entry *Entry) log(level Level, msg string) { + var buffer *bytes.Buffer + + newEntry := entry.Dup() + + if newEntry.Time.IsZero() { + newEntry.Time = time.Now() + } + + newEntry.Level = level + newEntry.Message = msg + + newEntry.Logger.mu.Lock() + reportCaller := newEntry.Logger.ReportCaller + bufPool := newEntry.getBufferPool() + newEntry.Logger.mu.Unlock() + + if reportCaller { + newEntry.Caller = getCaller() + } + + newEntry.fireHooks() + buffer = bufPool.Get() + defer func() { + newEntry.Buffer = nil + buffer.Reset() + bufPool.Put(buffer) + }() + buffer.Reset() + newEntry.Buffer = buffer + + newEntry.write() + + newEntry.Buffer = nil + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(newEntry) + } +} + +func (entry *Entry) getBufferPool() (pool BufferPool) { + if entry.Logger.BufferPool != nil { + return entry.Logger.BufferPool + } + return bufferPool +} + +func (entry *Entry) fireHooks() { + var tmpHooks LevelHooks + entry.Logger.mu.Lock() + tmpHooks = make(LevelHooks, len(entry.Logger.Hooks)) + for k, v := range entry.Logger.Hooks { + tmpHooks[k] = v + } + entry.Logger.mu.Unlock() + + err := tmpHooks.Fire(entry.Level, entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + } +} + +func (entry *Entry) write() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + serialized, err := entry.Logger.Formatter.Format(entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + return + } + if _, err := entry.Logger.Out.Write(serialized); err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } +} + +// Log will log a message at the level given as parameter. +// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. +// For this behaviour Entry.Panic or Entry.Fatal should be used instead. +func (entry *Entry) Log(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.log(level, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Trace(args ...interface{}) { + entry.Log(TraceLevel, args...) +} + +func (entry *Entry) Debug(args ...interface{}) { + entry.Log(DebugLevel, args...) +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + entry.Log(InfoLevel, args...) +} + +func (entry *Entry) Warn(args ...interface{}) { + entry.Log(WarnLevel, args...) +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + entry.Log(ErrorLevel, args...) +} + +func (entry *Entry) Fatal(args ...interface{}) { + entry.Log(FatalLevel, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + entry.Log(PanicLevel, args...) +} + +// Entry Printf family functions + +func (entry *Entry) Logf(level Level, format string, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Tracef(format string, args ...interface{}) { + entry.Logf(TraceLevel, format, args...) +} + +func (entry *Entry) Debugf(format string, args ...interface{}) { + entry.Logf(DebugLevel, format, args...) +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + entry.Logf(InfoLevel, format, args...) +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + entry.Logf(WarnLevel, format, args...) +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + entry.Logf(ErrorLevel, format, args...) +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + entry.Logf(FatalLevel, format, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + entry.Logf(PanicLevel, format, args...) +} + +// Entry Println family functions + +func (entry *Entry) Logln(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Traceln(args ...interface{}) { + entry.Logln(TraceLevel, args...) +} + +func (entry *Entry) Debugln(args ...interface{}) { + entry.Logln(DebugLevel, args...) +} + +func (entry *Entry) Infoln(args ...interface{}) { + entry.Logln(InfoLevel, args...) +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + entry.Logln(WarnLevel, args...) +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + entry.Logln(ErrorLevel, args...) +} + +func (entry *Entry) Fatalln(args ...interface{}) { + entry.Logln(FatalLevel, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + entry.Logln(PanicLevel, args...) +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go new file mode 100644 index 000000000..017c30ce6 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/exported.go @@ -0,0 +1,270 @@ +package logrus + +import ( + "context" + "io" + "time" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.SetOutput(out) +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.SetFormatter(formatter) +} + +// SetReportCaller sets whether the standard logger will include the calling +// method as a field. +func SetReportCaller(include bool) { + std.SetReportCaller(include) +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.SetLevel(level) +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + return std.GetLevel() +} + +// IsLevelEnabled checks if the log level of the standard logger is greater than the level param +func IsLevelEnabled(level Level) bool { + return std.IsLevelEnabled(level) +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.AddHook(hook) +} + +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + +// WithContext creates an entry from the standard logger and adds a context to it. +func WithContext(ctx context.Context) *Entry { + return std.WithContext(ctx) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// WithTime creates an entry from the standard logger and overrides the time of +// logs generated with it. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithTime(t time.Time) *Entry { + return std.WithTime(t) +} + +// Trace logs a message at level Trace on the standard logger. +func Trace(args ...interface{}) { + std.Trace(args...) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// TraceFn logs a message from a func at level Trace on the standard logger. +func TraceFn(fn LogFunction) { + std.TraceFn(fn) +} + +// DebugFn logs a message from a func at level Debug on the standard logger. +func DebugFn(fn LogFunction) { + std.DebugFn(fn) +} + +// PrintFn logs a message from a func at level Info on the standard logger. +func PrintFn(fn LogFunction) { + std.PrintFn(fn) +} + +// InfoFn logs a message from a func at level Info on the standard logger. +func InfoFn(fn LogFunction) { + std.InfoFn(fn) +} + +// WarnFn logs a message from a func at level Warn on the standard logger. +func WarnFn(fn LogFunction) { + std.WarnFn(fn) +} + +// WarningFn logs a message from a func at level Warn on the standard logger. +func WarningFn(fn LogFunction) { + std.WarningFn(fn) +} + +// ErrorFn logs a message from a func at level Error on the standard logger. +func ErrorFn(fn LogFunction) { + std.ErrorFn(fn) +} + +// PanicFn logs a message from a func at level Panic on the standard logger. +func PanicFn(fn LogFunction) { + std.PanicFn(fn) +} + +// FatalFn logs a message from a func at level Fatal on the standard logger then the process will exit with status set to 1. +func FatalFn(fn LogFunction) { + std.FatalFn(fn) +} + +// Tracef logs a message at level Trace on the standard logger. +func Tracef(format string, args ...interface{}) { + std.Tracef(format, args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Traceln logs a message at level Trace on the standard logger. +func Traceln(args ...interface{}) { + std.Traceln(args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go new file mode 100644 index 000000000..408883773 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/formatter.go @@ -0,0 +1,78 @@ +package logrus + +import "time" + +// Default key names for the default fields +const ( + defaultTimestampFormat = time.RFC3339 + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" + FieldKeyLogrusError = "logrus_error" + FieldKeyFunc = "func" + FieldKeyFile = "file" +) + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { + timeKey := fieldMap.resolve(FieldKeyTime) + if t, ok := data[timeKey]; ok { + data["fields."+timeKey] = t + delete(data, timeKey) + } + + msgKey := fieldMap.resolve(FieldKeyMsg) + if m, ok := data[msgKey]; ok { + data["fields."+msgKey] = m + delete(data, msgKey) + } + + levelKey := fieldMap.resolve(FieldKeyLevel) + if l, ok := data[levelKey]; ok { + data["fields."+levelKey] = l + delete(data, levelKey) + } + + logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) + if l, ok := data[logrusErrKey]; ok { + data["fields."+logrusErrKey] = l + delete(data, logrusErrKey) + } + + // If reportCaller is not set, 'func' will not conflict. + if reportCaller { + funcKey := fieldMap.resolve(FieldKeyFunc) + if l, ok := data[funcKey]; ok { + data["fields."+funcKey] = l + } + fileKey := fieldMap.resolve(FieldKeyFile) + if l, ok := data[fileKey]; ok { + data["fields."+fileKey] = l + } + } +} diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go new file mode 100644 index 000000000..3f151cdc3 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go new file mode 100644 index 000000000..c96dc5636 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -0,0 +1,128 @@ +package logrus + +import ( + "bytes" + "encoding/json" + "fmt" + "runtime" +) + +type fieldKey string + +// FieldMap allows customization of the key names for default fields. +type FieldMap map[fieldKey]string + +func (f FieldMap) resolve(key fieldKey) string { + if k, ok := f[key]; ok { + return k + } + + return string(key) +} + +// JSONFormatter formats logs into parsable json +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + // The format to use is the same than for time.Format or time.Parse from the standard + // library. + // The standard Library already provides a set of predefined format. + TimestampFormat string + + // DisableTimestamp allows disabling automatic timestamps in output + DisableTimestamp bool + + // DisableHTMLEscape allows disabling html escaping in output + DisableHTMLEscape bool + + // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. + DataKey string + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &JSONFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message", + // FieldKeyFunc: "@caller", + // }, + // } + FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the json data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from json fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + // PrettyPrint will indent all json logs + PrettyPrint bool +} + +// Format renders a single log entry +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+4) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + + if f.DataKey != "" { + newData := make(Fields, 4) + newData[f.DataKey] = data + data = newData + } + + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + + if entry.err != "" { + data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err + } + if !f.DisableTimestamp { + data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) + } + data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message + data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + if entry.HasCaller() { + funcVal := entry.Caller.Function + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + if funcVal != "" { + data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal + } + if fileVal != "" { + data[f.FieldMap.resolve(FieldKeyFile)] = fileVal + } + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + encoder := json.NewEncoder(b) + encoder.SetEscapeHTML(!f.DisableHTMLEscape) + if f.PrettyPrint { + encoder.SetIndent("", " ") + } + if err := encoder.Encode(data); err != nil { + return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err) + } + + return b.Bytes(), nil +} diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go new file mode 100644 index 000000000..5ff0aef6d --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -0,0 +1,417 @@ +package logrus + +import ( + "context" + "io" + "os" + "sync" + "sync/atomic" + "time" +) + +// LogFunction For big messages, it can be more efficient to pass a function +// and only call it if the log level is actually enables rather than +// generating the log message and then checking if the level is enabled +type LogFunction func() []interface{} + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventurous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + + // Flag for whether to log caller info (off by default) + ReportCaller bool + + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. + Level Level + // Used to sync writing to the log. Locking is enabled by Default + mu MutexWrap + // Reusable empty entry + entryPool sync.Pool + // Function to exit the application, defaults to `os.Exit()` + ExitFunc exitFunc + // The buffer pool used to format the log. If it is nil, the default global + // buffer pool will be used. + BufferPool BufferPool +} + +type exitFunc func(int) + +type MutexWrap struct { + lock sync.Mutex + disabled bool +} + +func (mw *MutexWrap) Lock() { + if !mw.disabled { + mw.lock.Lock() + } +} + +func (mw *MutexWrap) Unlock() { + if !mw.disabled { + mw.lock.Unlock() + } +} + +func (mw *MutexWrap) Disable() { + mw.disabled = true +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &logrus.Logger{ +// Out: os.Stderr, +// Formatter: new(logrus.TextFormatter), +// Hooks: make(logrus.LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + ExitFunc: os.Exit, + ReportCaller: false, + } +} + +func (logger *Logger) newEntry() *Entry { + entry, ok := logger.entryPool.Get().(*Entry) + if ok { + return entry + } + return NewEntry(logger) +} + +func (logger *Logger) releaseEntry(entry *Entry) { + entry.Data = map[string]interface{}{} + logger.entryPool.Put(entry) +} + +// WithField allocates a new entry and adds a field to it. +// Debug, Print, Info, Warn, Error, Fatal or Panic must be then applied to +// this new returned entry. +// If you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithFields(fields) +} + +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithError(err) +} + +// Add a context to the log entry. +func (logger *Logger) WithContext(ctx context.Context) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithContext(ctx) +} + +// Overrides the time of the log entry. +func (logger *Logger) WithTime(t time.Time) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithTime(t) +} + +func (logger *Logger) Logf(level Level, format string, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Logf(level, format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Tracef(format string, args ...interface{}) { + logger.Logf(TraceLevel, format, args...) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + logger.Logf(DebugLevel, format, args...) +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + logger.Logf(InfoLevel, format, args...) +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + entry := logger.newEntry() + entry.Printf(format, args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + logger.Logf(WarnLevel, format, args...) +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + logger.Warnf(format, args...) +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + logger.Logf(ErrorLevel, format, args...) +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + logger.Logf(FatalLevel, format, args...) + logger.Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + logger.Logf(PanicLevel, format, args...) +} + +// Log will log a message at the level given as parameter. +// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. +// For this behaviour Logger.Panic or Logger.Fatal should be used instead. +func (logger *Logger) Log(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Log(level, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) LogFn(level Level, fn LogFunction) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Log(level, fn()...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Trace(args ...interface{}) { + logger.Log(TraceLevel, args...) +} + +func (logger *Logger) Debug(args ...interface{}) { + logger.Log(DebugLevel, args...) +} + +func (logger *Logger) Info(args ...interface{}) { + logger.Log(InfoLevel, args...) +} + +func (logger *Logger) Print(args ...interface{}) { + entry := logger.newEntry() + entry.Print(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warn(args ...interface{}) { + logger.Log(WarnLevel, args...) +} + +func (logger *Logger) Warning(args ...interface{}) { + logger.Warn(args...) +} + +func (logger *Logger) Error(args ...interface{}) { + logger.Log(ErrorLevel, args...) +} + +func (logger *Logger) Fatal(args ...interface{}) { + logger.Log(FatalLevel, args...) + logger.Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + logger.Log(PanicLevel, args...) +} + +func (logger *Logger) TraceFn(fn LogFunction) { + logger.LogFn(TraceLevel, fn) +} + +func (logger *Logger) DebugFn(fn LogFunction) { + logger.LogFn(DebugLevel, fn) +} + +func (logger *Logger) InfoFn(fn LogFunction) { + logger.LogFn(InfoLevel, fn) +} + +func (logger *Logger) PrintFn(fn LogFunction) { + entry := logger.newEntry() + entry.Print(fn()...) + logger.releaseEntry(entry) +} + +func (logger *Logger) WarnFn(fn LogFunction) { + logger.LogFn(WarnLevel, fn) +} + +func (logger *Logger) WarningFn(fn LogFunction) { + logger.WarnFn(fn) +} + +func (logger *Logger) ErrorFn(fn LogFunction) { + logger.LogFn(ErrorLevel, fn) +} + +func (logger *Logger) FatalFn(fn LogFunction) { + logger.LogFn(FatalLevel, fn) + logger.Exit(1) +} + +func (logger *Logger) PanicFn(fn LogFunction) { + logger.LogFn(PanicLevel, fn) +} + +func (logger *Logger) Logln(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Logln(level, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Traceln(args ...interface{}) { + logger.Logln(TraceLevel, args...) +} + +func (logger *Logger) Debugln(args ...interface{}) { + logger.Logln(DebugLevel, args...) +} + +func (logger *Logger) Infoln(args ...interface{}) { + logger.Logln(InfoLevel, args...) +} + +func (logger *Logger) Println(args ...interface{}) { + entry := logger.newEntry() + entry.Println(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnln(args ...interface{}) { + logger.Logln(WarnLevel, args...) +} + +func (logger *Logger) Warningln(args ...interface{}) { + logger.Warnln(args...) +} + +func (logger *Logger) Errorln(args ...interface{}) { + logger.Logln(ErrorLevel, args...) +} + +func (logger *Logger) Fatalln(args ...interface{}) { + logger.Logln(FatalLevel, args...) + logger.Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + logger.Logln(PanicLevel, args...) +} + +func (logger *Logger) Exit(code int) { + runHandlers() + if logger.ExitFunc == nil { + logger.ExitFunc = os.Exit + } + logger.ExitFunc(code) +} + +//When file is opened with appending mode, it's safe to +//write concurrently to a file (within 4k message on Linux). +//In these cases user can choose to disable the lock. +func (logger *Logger) SetNoLock() { + logger.mu.Disable() +} + +func (logger *Logger) level() Level { + return Level(atomic.LoadUint32((*uint32)(&logger.Level))) +} + +// SetLevel sets the logger level. +func (logger *Logger) SetLevel(level Level) { + atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) +} + +// GetLevel returns the logger level. +func (logger *Logger) GetLevel() Level { + return logger.level() +} + +// AddHook adds a hook to the logger hooks. +func (logger *Logger) AddHook(hook Hook) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Hooks.Add(hook) +} + +// IsLevelEnabled checks if the log level of the logger is greater than the level param +func (logger *Logger) IsLevelEnabled(level Level) bool { + return logger.level() >= level +} + +// SetFormatter sets the logger formatter. +func (logger *Logger) SetFormatter(formatter Formatter) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Formatter = formatter +} + +// SetOutput sets the logger output. +func (logger *Logger) SetOutput(output io.Writer) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Out = output +} + +func (logger *Logger) SetReportCaller(reportCaller bool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.ReportCaller = reportCaller +} + +// ReplaceHooks replaces the logger hooks and returns the old ones +func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { + logger.mu.Lock() + oldHooks := logger.Hooks + logger.Hooks = hooks + logger.mu.Unlock() + return oldHooks +} + +// SetBufferPool sets the logger buffer pool. +func (logger *Logger) SetBufferPool(pool BufferPool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.BufferPool = pool +} diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go new file mode 100644 index 000000000..2f16224cb --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/logrus.go @@ -0,0 +1,186 @@ +package logrus + +import ( + "fmt" + "log" + "strings" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint32 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + if b, err := level.MarshalText(); err == nil { + return string(b) + } else { + return "unknown" + } +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch strings.ToLower(lvl) { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + case "trace": + return TraceLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (level *Level) UnmarshalText(text []byte) error { + l, err := ParseLevel(string(text)) + if err != nil { + return err + } + + *level = l + + return nil +} + +func (level Level) MarshalText() ([]byte, error) { + switch level { + case TraceLevel: + return []byte("trace"), nil + case DebugLevel: + return []byte("debug"), nil + case InfoLevel: + return []byte("info"), nil + case WarnLevel: + return []byte("warning"), nil + case ErrorLevel: + return []byte("error"), nil + case FatalLevel: + return []byte("fatal"), nil + case PanicLevel: + return []byte("panic"), nil + } + + return nil, fmt.Errorf("not a valid logrus level %d", level) +} + +// A constant exposing all logging levels +var AllLevels = []Level{ + PanicLevel, + FatalLevel, + ErrorLevel, + WarnLevel, + InfoLevel, + DebugLevel, + TraceLevel, +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel + // TraceLevel level. Designates finer-grained informational events than the Debug. + TraceLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} + +// The FieldLogger interface generalizes the Entry and Logger types +type FieldLogger interface { + WithField(key string, value interface{}) *Entry + WithFields(fields Fields) *Entry + WithError(err error) *Entry + + Debugf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + Printf(format string, args ...interface{}) + Warnf(format string, args ...interface{}) + Warningf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) + Panicf(format string, args ...interface{}) + + Debug(args ...interface{}) + Info(args ...interface{}) + Print(args ...interface{}) + Warn(args ...interface{}) + Warning(args ...interface{}) + Error(args ...interface{}) + Fatal(args ...interface{}) + Panic(args ...interface{}) + + Debugln(args ...interface{}) + Infoln(args ...interface{}) + Println(args ...interface{}) + Warnln(args ...interface{}) + Warningln(args ...interface{}) + Errorln(args ...interface{}) + Fatalln(args ...interface{}) + Panicln(args ...interface{}) + + // IsDebugEnabled() bool + // IsInfoEnabled() bool + // IsWarnEnabled() bool + // IsErrorEnabled() bool + // IsFatalEnabled() bool + // IsPanicEnabled() bool +} + +// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is +// here for consistancy. Do not use. Use Logger or Entry instead. +type Ext1FieldLogger interface { + FieldLogger + Tracef(format string, args ...interface{}) + Trace(args ...interface{}) + Traceln(args ...interface{}) +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go new file mode 100644 index 000000000..2403de981 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go @@ -0,0 +1,11 @@ +// +build appengine + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return true +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go new file mode 100644 index 000000000..499789984 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go @@ -0,0 +1,13 @@ +// +build darwin dragonfly freebsd netbsd openbsd +// +build !js + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TIOCGETA + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/vendor/github.com/sirupsen/logrus/terminal_check_js.go new file mode 100644 index 000000000..ebdae3ec6 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_js.go @@ -0,0 +1,7 @@ +// +build js + +package logrus + +func isTerminal(fd int) bool { + return false +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go new file mode 100644 index 000000000..97af92c68 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go @@ -0,0 +1,11 @@ +// +build js nacl plan9 + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return false +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go new file mode 100644 index 000000000..3293fb3ca --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go @@ -0,0 +1,17 @@ +// +build !appengine,!js,!windows,!nacl,!plan9 + +package logrus + +import ( + "io" + "os" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + return isTerminal(int(v.Fd())) + default: + return false + } +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go new file mode 100644 index 000000000..f6710b3bd --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go @@ -0,0 +1,11 @@ +package logrus + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermio(fd, unix.TCGETA) + return err == nil +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go new file mode 100644 index 000000000..04748b851 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go @@ -0,0 +1,13 @@ +// +build linux aix zos +// +build !js + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go new file mode 100644 index 000000000..2879eb50e --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go @@ -0,0 +1,27 @@ +// +build !appengine,!js,windows + +package logrus + +import ( + "io" + "os" + + "golang.org/x/sys/windows" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + handle := windows.Handle(v.Fd()) + var mode uint32 + if err := windows.GetConsoleMode(handle, &mode); err != nil { + return false + } + mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + if err := windows.SetConsoleMode(handle, mode); err != nil { + return false + } + return true + } + return false +} diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go new file mode 100644 index 000000000..be2c6efe5 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -0,0 +1,339 @@ +package logrus + +import ( + "bytes" + "fmt" + "os" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" +) + +const ( + red = 31 + yellow = 33 + blue = 36 + gray = 37 +) + +var baseTimestamp time.Time + +func init() { + baseTimestamp = time.Now() +} + +// TextFormatter formats logs into text +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Force quoting of all values + ForceQuote bool + + // DisableQuote disables quoting for all values. + // DisableQuote will have a lower priority than ForceQuote. + // If both of them are set to true, quote will be forced on all values. + DisableQuote bool + + // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ + EnvironmentOverrideColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed. + // The format to use is the same than for time.Format or time.Parse from the standard + // library. + // The standard Library already provides a set of predefined format. + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool + + // The keys sorting function, when uninitialized it uses sort.Strings. + SortingFunc func([]string) + + // Disables the truncation of the level text to 4 characters. + DisableLevelTruncation bool + + // PadLevelText Adds padding the level text so that all the levels output at the same length + // PadLevelText is a superset of the DisableLevelTruncation option + PadLevelText bool + + // QuoteEmptyFields will wrap empty fields in quotes if true + QuoteEmptyFields bool + + // Whether the logger's out is to a terminal + isTerminal bool + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &TextFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message"}} + FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + terminalInitOnce sync.Once + + // The max length of the level text, generated dynamically on init + levelTextMaxLength int +} + +func (f *TextFormatter) init(entry *Entry) { + if entry.Logger != nil { + f.isTerminal = checkIfTerminal(entry.Logger.Out) + } + // Get the max length of the level text + for _, level := range AllLevels { + levelTextLength := utf8.RuneCount([]byte(level.String())) + if levelTextLength > f.levelTextMaxLength { + f.levelTextMaxLength = levelTextLength + } + } +} + +func (f *TextFormatter) isColored() bool { + isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) + + if f.EnvironmentOverrideColors { + switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); { + case ok && force != "0": + isColored = true + case ok && force == "0", os.Getenv("CLICOLOR") == "0": + isColored = false + } + } + + return isColored && !f.DisableColors +} + +// Format renders a single log entry +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields) + for k, v := range entry.Data { + data[k] = v + } + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + keys := make([]string, 0, len(data)) + for k := range data { + keys = append(keys, k) + } + + var funcVal, fileVal string + + fixedKeys := make([]string, 0, 4+len(data)) + if !f.DisableTimestamp { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) + } + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel)) + if entry.Message != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) + } + if entry.err != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) + } + if entry.HasCaller() { + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } else { + funcVal = entry.Caller.Function + fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + } + + if funcVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc)) + } + if fileVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile)) + } + } + + if !f.DisableSorting { + if f.SortingFunc == nil { + sort.Strings(keys) + fixedKeys = append(fixedKeys, keys...) + } else { + if !f.isColored() { + fixedKeys = append(fixedKeys, keys...) + f.SortingFunc(fixedKeys) + } else { + f.SortingFunc(keys) + } + } + } else { + fixedKeys = append(fixedKeys, keys...) + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + f.terminalInitOnce.Do(func() { f.init(entry) }) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + if f.isColored() { + f.printColored(b, entry, keys, data, timestampFormat) + } else { + + for _, key := range fixedKeys { + var value interface{} + switch { + case key == f.FieldMap.resolve(FieldKeyTime): + value = entry.Time.Format(timestampFormat) + case key == f.FieldMap.resolve(FieldKeyLevel): + value = entry.Level.String() + case key == f.FieldMap.resolve(FieldKeyMsg): + value = entry.Message + case key == f.FieldMap.resolve(FieldKeyLogrusError): + value = entry.err + case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): + value = funcVal + case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): + value = fileVal + default: + value = data[key] + } + f.appendKeyValue(b, key, value) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel, TraceLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + case InfoLevel: + levelColor = blue + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String()) + if !f.DisableLevelTruncation && !f.PadLevelText { + levelText = levelText[0:4] + } + if f.PadLevelText { + // Generates the format string used in the next line, for example "%-6s" or "%-7s". + // Based on the max level text length. + formatString := "%-" + strconv.Itoa(f.levelTextMaxLength) + "s" + // Formats the level text by appending spaces up to the max length, for example: + // - "INFO " + // - "WARNING" + levelText = fmt.Sprintf(formatString, levelText) + } + + // Remove a single newline if it already exists in the message to keep + // the behavior of logrus text_formatter the same as the stdlib log package + entry.Message = strings.TrimSuffix(entry.Message, "\n") + + caller := "" + if entry.HasCaller() { + funcVal := fmt.Sprintf("%s()", entry.Caller.Function) + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + + if fileVal == "" { + caller = funcVal + } else if funcVal == "" { + caller = fileVal + } else { + caller = fileVal + " " + funcVal + } + } + + switch { + case f.DisableTimestamp: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) + case !f.FullTimestamp: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) + default: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) + } + for _, k := range keys { + v := data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) + f.appendValue(b, v) + } +} + +func (f *TextFormatter) needsQuoting(text string) bool { + if f.ForceQuote { + return true + } + if f.QuoteEmptyFields && len(text) == 0 { + return true + } + if f.DisableQuote { + return false + } + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { + return true + } + } + return false +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + if b.Len() > 0 { + b.WriteByte(' ') + } + b.WriteString(key) + b.WriteByte('=') + f.appendValue(b, value) +} + +func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { + stringVal, ok := value.(string) + if !ok { + stringVal = fmt.Sprint(value) + } + + if !f.needsQuoting(stringVal) { + b.WriteString(stringVal) + } else { + b.WriteString(fmt.Sprintf("%q", stringVal)) + } +} diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go new file mode 100644 index 000000000..72e8e3a1b --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/writer.go @@ -0,0 +1,70 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +// Writer at INFO level. See WriterLevel for details. +func (logger *Logger) Writer() *io.PipeWriter { + return logger.WriterLevel(InfoLevel) +} + +// WriterLevel returns an io.Writer that can be used to write arbitrary text to +// the logger at the given log level. Each line written to the writer will be +// printed in the usual way using formatters and hooks. The writer is part of an +// io.Pipe and it is the callers responsibility to close the writer when done. +// This can be used to override the standard library logger easily. +func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) +} + +func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) +} + +func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + + switch level { + case TraceLevel: + printFunc = entry.Trace + case DebugLevel: + printFunc = entry.Debug + case InfoLevel: + printFunc = entry.Info + case WarnLevel: + printFunc = entry.Warn + case ErrorLevel: + printFunc = entry.Error + case FatalLevel: + printFunc = entry.Fatal + case PanicLevel: + printFunc = entry.Panic + default: + printFunc = entry.Print + } + + go entry.writerScanner(reader, printFunc) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + printFunc(scanner.Text()) + } + if err := scanner.Err(); err != nil { + entry.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/.gitignore b/vendor/github.com/testcontainers/testcontainers-go/.gitignore new file mode 100644 index 000000000..4d27765dd --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/.gitignore @@ -0,0 +1,13 @@ +# Generated by golang tooling +debug.test +vendor + +# Generated docs +site/ +.direnv/ +src/mkdocs-codeinclude-plugin +src/pip-delete-this-directory.txt +.idea/ +.DS_Store + +TEST-*.xml diff --git a/vendor/github.com/testcontainers/testcontainers-go/.golangci.yml b/vendor/github.com/testcontainers/testcontainers-go/.golangci.yml new file mode 100644 index 000000000..89bf14cc4 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/.golangci.yml @@ -0,0 +1,16 @@ +linters: + enable: + - gci + - gofmt + - misspell + + +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/testcontainters) + +run: + timeout: 3m diff --git a/vendor/github.com/testcontainers/testcontainers-go/CONTRIBUTING.md b/vendor/github.com/testcontainers/testcontainers-go/CONTRIBUTING.md new file mode 100644 index 000000000..c8194c275 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing + +Please see the [main contributing guidelines](./docs/contributing.md). + +There are additional docs describing [contributing documentation changes](./docs/contributing_docs.md). + +### GitHub Sponsorship + +Testcontainers is [in the GitHub Sponsors program](https://github.com/sponsors/testcontainers)! + +This repository is supported by our sponsors, meaning that issues are eligible to have a 'bounty' attached to them by sponsors. + +Please see [the bounty policy page](https://golang.testcontainers.org/bounty) if you are interested, either as a sponsor or as a contributor. diff --git a/vendor/github.com/testcontainers/testcontainers-go/LICENSE b/vendor/github.com/testcontainers/testcontainers-go/LICENSE new file mode 100644 index 000000000..607a9c3c4 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017-2019 Gianluca Arbezzano + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/testcontainers/testcontainers-go/Makefile b/vendor/github.com/testcontainers/testcontainers-go/Makefile new file mode 100644 index 000000000..cb9d2f49a --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/Makefile @@ -0,0 +1,9 @@ +include ./commons-test.mk + +.PHONY: test-all +test-all: tools test-unit + +.PHONY: test-examples +test-examples: + @echo "Running example tests..." + make -C examples test diff --git a/vendor/github.com/testcontainers/testcontainers-go/Pipfile b/vendor/github.com/testcontainers/testcontainers-go/Pipfile new file mode 100644 index 000000000..2236de86d --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/Pipfile @@ -0,0 +1,15 @@ +[[source]] +name = "pypi" +url = "https://pypi.org/simple" +verify_ssl = true + +[dev-packages] + +[packages] +mkdocs = "*" +mkdocs-codeinclude-plugin = "*" +mkdocs-material = "*" +mkdocs-markdownextradata-plugin = "*" + +[requires] +python_version = "3.7" diff --git a/vendor/github.com/testcontainers/testcontainers-go/Pipfile.lock b/vendor/github.com/testcontainers/testcontainers-go/Pipfile.lock new file mode 100644 index 000000000..c208f40e2 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/Pipfile.lock @@ -0,0 +1,298 @@ +{ + "_meta": { + "hash": { + "sha256": "d3830267ea93391a077a2ba5b93aa8218c0a38002694b404b56df11498da3e56" + }, + "pipfile-spec": 6, + "requires": { + "python_version": "3.7" + }, + "sources": [ + { + "name": "pypi", + "url": "https://pypi.org/simple", + "verify_ssl": true + } + ] + }, + "default": { + "click": { + "hashes": [ + "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e", + "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48" + ], + "markers": "python_version >= '3.7'", + "version": "==8.1.3" + }, + "ghp-import": { + "hashes": [ + "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619", + "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343" + ], + "version": "==2.1.0" + }, + "importlib-metadata": { + "hashes": [ + "sha256:43dd286a2cd8995d5eaef7fee2066340423b818ed3fd70adf0bad5f1fac53fed", + "sha256:92501cdf9cc66ebd3e612f1b4f0c0765dfa42f0fa38ffb319b6bd84dd675d705" + ], + "markers": "python_version >= '3.7'", + "version": "==6.6.0" + }, + "jinja2": { + "hashes": [ + "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852", + "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61" + ], + "markers": "python_version >= '3.7'", + "version": "==3.1.2" + }, + "markdown": { + "hashes": [ + "sha256:065fd4df22da73a625f14890dd77eb8040edcbd68794bcd35943be14490608b2", + "sha256:8bf101198e004dc93e84a12a7395e31aac6a9c9942848ae1d99b9d72cf9b3520" + ], + "markers": "python_version >= '3.7'", + "version": "==3.4.3" + }, + "markupsafe": { + "hashes": [ + "sha256:0576fe974b40a400449768941d5d0858cc624e3249dfd1e0c33674e5c7ca7aed", + "sha256:085fd3201e7b12809f9e6e9bc1e5c96a368c8523fad5afb02afe3c051ae4afcc", + "sha256:090376d812fb6ac5f171e5938e82e7f2d7adc2b629101cec0db8b267815c85e2", + "sha256:0b462104ba25f1ac006fdab8b6a01ebbfbce9ed37fd37fd4acd70c67c973e460", + "sha256:137678c63c977754abe9086a3ec011e8fd985ab90631145dfb9294ad09c102a7", + "sha256:1bea30e9bf331f3fef67e0a3877b2288593c98a21ccb2cf29b74c581a4eb3af0", + "sha256:22152d00bf4a9c7c83960521fc558f55a1adbc0631fbb00a9471e097b19d72e1", + "sha256:22731d79ed2eb25059ae3df1dfc9cb1546691cc41f4e3130fe6bfbc3ecbbecfa", + "sha256:2298c859cfc5463f1b64bd55cb3e602528db6fa0f3cfd568d3605c50678f8f03", + "sha256:28057e985dace2f478e042eaa15606c7efccb700797660629da387eb289b9323", + "sha256:2e7821bffe00aa6bd07a23913b7f4e01328c3d5cc0b40b36c0bd81d362faeb65", + "sha256:2ec4f2d48ae59bbb9d1f9d7efb9236ab81429a764dedca114f5fdabbc3788013", + "sha256:340bea174e9761308703ae988e982005aedf427de816d1afe98147668cc03036", + "sha256:40627dcf047dadb22cd25ea7ecfe9cbf3bbbad0482ee5920b582f3809c97654f", + "sha256:40dfd3fefbef579ee058f139733ac336312663c6706d1163b82b3003fb1925c4", + "sha256:4cf06cdc1dda95223e9d2d3c58d3b178aa5dacb35ee7e3bbac10e4e1faacb419", + "sha256:50c42830a633fa0cf9e7d27664637532791bfc31c731a87b202d2d8ac40c3ea2", + "sha256:55f44b440d491028addb3b88f72207d71eeebfb7b5dbf0643f7c023ae1fba619", + "sha256:608e7073dfa9e38a85d38474c082d4281f4ce276ac0010224eaba11e929dd53a", + "sha256:63ba06c9941e46fa389d389644e2d8225e0e3e5ebcc4ff1ea8506dce646f8c8a", + "sha256:65608c35bfb8a76763f37036547f7adfd09270fbdbf96608be2bead319728fcd", + "sha256:665a36ae6f8f20a4676b53224e33d456a6f5a72657d9c83c2aa00765072f31f7", + "sha256:6d6607f98fcf17e534162f0709aaad3ab7a96032723d8ac8750ffe17ae5a0666", + "sha256:7313ce6a199651c4ed9d7e4cfb4aa56fe923b1adf9af3b420ee14e6d9a73df65", + "sha256:7668b52e102d0ed87cb082380a7e2e1e78737ddecdde129acadb0eccc5423859", + "sha256:7df70907e00c970c60b9ef2938d894a9381f38e6b9db73c5be35e59d92e06625", + "sha256:7e007132af78ea9df29495dbf7b5824cb71648d7133cf7848a2a5dd00d36f9ff", + "sha256:835fb5e38fd89328e9c81067fd642b3593c33e1e17e2fdbf77f5676abb14a156", + "sha256:8bca7e26c1dd751236cfb0c6c72d4ad61d986e9a41bbf76cb445f69488b2a2bd", + "sha256:8db032bf0ce9022a8e41a22598eefc802314e81b879ae093f36ce9ddf39ab1ba", + "sha256:99625a92da8229df6d44335e6fcc558a5037dd0a760e11d84be2260e6f37002f", + "sha256:9cad97ab29dfc3f0249b483412c85c8ef4766d96cdf9dcf5a1e3caa3f3661cf1", + "sha256:a4abaec6ca3ad8660690236d11bfe28dfd707778e2442b45addd2f086d6ef094", + "sha256:a6e40afa7f45939ca356f348c8e23048e02cb109ced1eb8420961b2f40fb373a", + "sha256:a6f2fcca746e8d5910e18782f976489939d54a91f9411c32051b4aab2bd7c513", + "sha256:a806db027852538d2ad7555b203300173dd1b77ba116de92da9afbc3a3be3eed", + "sha256:abcabc8c2b26036d62d4c746381a6f7cf60aafcc653198ad678306986b09450d", + "sha256:b8526c6d437855442cdd3d87eede9c425c4445ea011ca38d937db299382e6fa3", + "sha256:bb06feb762bade6bf3c8b844462274db0c76acc95c52abe8dbed28ae3d44a147", + "sha256:c0a33bc9f02c2b17c3ea382f91b4db0e6cde90b63b296422a939886a7a80de1c", + "sha256:c4a549890a45f57f1ebf99c067a4ad0cb423a05544accaf2b065246827ed9603", + "sha256:ca244fa73f50a800cf8c3ebf7fd93149ec37f5cb9596aa8873ae2c1d23498601", + "sha256:cf877ab4ed6e302ec1d04952ca358b381a882fbd9d1b07cccbfd61783561f98a", + "sha256:d9d971ec1e79906046aa3ca266de79eac42f1dbf3612a05dc9368125952bd1a1", + "sha256:da25303d91526aac3672ee6d49a2f3db2d9502a4a60b55519feb1a4c7714e07d", + "sha256:e55e40ff0cc8cc5c07996915ad367fa47da6b3fc091fdadca7f5403239c5fec3", + "sha256:f03a532d7dee1bed20bc4884194a16160a2de9ffc6354b3878ec9682bb623c54", + "sha256:f1cd098434e83e656abf198f103a8207a8187c0fc110306691a2e94a78d0abb2", + "sha256:f2bfb563d0211ce16b63c7cb9395d2c682a23187f54c3d79bfec33e6705473c6", + "sha256:f8ffb705ffcf5ddd0e80b65ddf7bed7ee4f5a441ea7d3419e861a12eaf41af58" + ], + "markers": "python_version >= '3.7'", + "version": "==2.1.2" + }, + "mergedeep": { + "hashes": [ + "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8", + "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307" + ], + "markers": "python_version >= '3.6'", + "version": "==1.3.4" + }, + "mkdocs": { + "hashes": [ + "sha256:89f5a094764381cda656af4298727c9f53dc3e602983087e1fe96ea1df24f4c1", + "sha256:a1fa8c2d0c1305d7fc2b9d9f607c71778572a8b110fb26642aa00296c9e6d072" + ], + "index": "pypi", + "version": "==1.2.3" + }, + "mkdocs-codeinclude-plugin": { + "hashes": [ + "sha256:172a917c9b257fa62850b669336151f85d3cd40312b2b52520cbcceab557ea6c", + "sha256:305387f67a885f0e36ec1cf977324fe1fe50d31301147194b63631d0864601b1" + ], + "index": "pypi", + "version": "==0.2.1" + }, + "mkdocs-markdownextradata-plugin": { + "hashes": [ + "sha256:64d1c966b288d653f51f7531c03204eb988d0d77e56055c9d703d99105259a36" + ], + "index": "pypi", + "version": "==0.0.5" + }, + "mkdocs-material": { + "hashes": [ + "sha256:524debb6ee8ee89cee08886f2a67c3c3875c0ee9579c598d7448cbd2607cd3b7", + "sha256:62ae84082fa9f077c86b7db63e7bedf392005041b451defc850f8d0887a11e91" + ], + "index": "pypi", + "version": "==3.2.0" + }, + "packaging": { + "hashes": [ + "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61", + "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f" + ], + "markers": "python_version >= '3.7'", + "version": "==23.1" + }, + "pygments": { + "hashes": [ + "sha256:8ace4d3c1dd481894b2005f560ead0f9f19ee64fe983366be1a21e171d12775c", + "sha256:db2db3deb4b4179f399a09054b023b6a586b76499d36965813c71aa8ed7b5fd1" + ], + "markers": "python_version >= '3.7'", + "version": "==2.15.1" + }, + "pymdown-extensions": { + "hashes": [ + "sha256:9a77955e63528c2ee98073a1fb3207c1a45607bc74a34ef21acd098f46c3aa8a", + "sha256:e6cbe8ace7d8feda30bc4fd6a21a073893a9a0e90c373e92d69ce5b653051f55" + ], + "index": "pypi", + "version": "==10.0" + }, + "python-dateutil": { + "hashes": [ + "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86", + "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==2.8.2" + }, + "pyyaml": { + "hashes": [ + "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf", + "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293", + "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b", + "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57", + "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b", + "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4", + "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07", + "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba", + "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9", + "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287", + "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513", + "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0", + "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782", + "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0", + "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92", + "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f", + "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2", + "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc", + "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1", + "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c", + "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86", + "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4", + "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c", + "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34", + "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b", + "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d", + "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c", + "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb", + "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7", + "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737", + "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3", + "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d", + "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358", + "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53", + "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78", + "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803", + "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a", + "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f", + "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174", + "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5" + ], + "markers": "python_version >= '3.6'", + "version": "==6.0" + }, + "pyyaml-env-tag": { + "hashes": [ + "sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb", + "sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069" + ], + "markers": "python_version >= '3.6'", + "version": "==0.1" + }, + "six": { + "hashes": [ + "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", + "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==1.16.0" + }, + "typing-extensions": { + "hashes": [ + "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb", + "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4" + ], + "markers": "python_version < '3.8'", + "version": "==4.5.0" + }, + "watchdog": { + "hashes": [ + "sha256:0e06ab8858a76e1219e68c7573dfeba9dd1c0219476c5a44d5333b01d7e1743a", + "sha256:13bbbb462ee42ec3c5723e1205be8ced776f05b100e4737518c67c8325cf6100", + "sha256:233b5817932685d39a7896b1090353fc8efc1ef99c9c054e46c8002561252fb8", + "sha256:25f70b4aa53bd743729c7475d7ec41093a580528b100e9a8c5b5efe8899592fc", + "sha256:2b57a1e730af3156d13b7fdddfc23dea6487fceca29fc75c5a868beed29177ae", + "sha256:336adfc6f5cc4e037d52db31194f7581ff744b67382eb6021c868322e32eef41", + "sha256:3aa7f6a12e831ddfe78cdd4f8996af9cf334fd6346531b16cec61c3b3c0d8da0", + "sha256:3ed7c71a9dccfe838c2f0b6314ed0d9b22e77d268c67e015450a29036a81f60f", + "sha256:4c9956d27be0bb08fc5f30d9d0179a855436e655f046d288e2bcc11adfae893c", + "sha256:4d98a320595da7a7c5a18fc48cb633c2e73cda78f93cac2ef42d42bf609a33f9", + "sha256:4f94069eb16657d2c6faada4624c39464f65c05606af50bb7902e036e3219be3", + "sha256:5113334cf8cf0ac8cd45e1f8309a603291b614191c9add34d33075727a967709", + "sha256:51f90f73b4697bac9c9a78394c3acbbd331ccd3655c11be1a15ae6fe289a8c83", + "sha256:5d9f3a10e02d7371cd929b5d8f11e87d4bad890212ed3901f9b4d68767bee759", + "sha256:7ade88d0d778b1b222adebcc0927428f883db07017618a5e684fd03b83342bd9", + "sha256:7c5f84b5194c24dd573fa6472685b2a27cc5a17fe5f7b6fd40345378ca6812e3", + "sha256:7e447d172af52ad204d19982739aa2346245cc5ba6f579d16dac4bfec226d2e7", + "sha256:8ae9cda41fa114e28faf86cb137d751a17ffd0316d1c34ccf2235e8a84365c7f", + "sha256:8f3ceecd20d71067c7fd4c9e832d4e22584318983cabc013dbf3f70ea95de346", + "sha256:9fac43a7466eb73e64a9940ac9ed6369baa39b3bf221ae23493a9ec4d0022674", + "sha256:a70a8dcde91be523c35b2bf96196edc5730edb347e374c7de7cd20c43ed95397", + "sha256:adfdeab2da79ea2f76f87eb42a3ab1966a5313e5a69a0213a3cc06ef692b0e96", + "sha256:ba07e92756c97e3aca0912b5cbc4e5ad802f4557212788e72a72a47ff376950d", + "sha256:c07253088265c363d1ddf4b3cdb808d59a0468ecd017770ed716991620b8f77a", + "sha256:c9d8c8ec7efb887333cf71e328e39cffbf771d8f8f95d308ea4125bf5f90ba64", + "sha256:d00e6be486affb5781468457b21a6cbe848c33ef43f9ea4a73b4882e5f188a44", + "sha256:d429c2430c93b7903914e4db9a966c7f2b068dd2ebdd2fa9b9ce094c7d459f33" + ], + "markers": "python_version >= '3.7'", + "version": "==3.0.0" + }, + "zipp": { + "hashes": [ + "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b", + "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556" + ], + "markers": "python_version >= '3.7'", + "version": "==3.15.0" + } + }, + "develop": {} +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/README.md b/vendor/github.com/testcontainers/testcontainers-go/README.md new file mode 100644 index 000000000..01ecc7334 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/README.md @@ -0,0 +1,17 @@ +# Testcontainers + +[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://github.com/codespaces/new?hide_repo_select=true&ref=main&repo=141451032&machine=standardLinux32gb&devcontainer_path=.devcontainer%2Fdevcontainer.json&location=EastUs) + +[![Main pipeline](https://github.com/testcontainers/testcontainers-go/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/testcontainers/testcontainers-go/actions/workflows/ci.yml) +[![Go Report Card](https://goreportcard.com/badge/github.com/testcontainers/testcontainers-go)](https://goreportcard.com/report/github.com/testcontainers/testcontainers-go) +[![GoDoc Reference](https://camo.githubusercontent.com/8609cfcb531fa0f5598a3d4353596fae9336cce3/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f79616e6777656e6d61692f686f772d746f2d6164642d62616467652d696e2d6769746875622d726561646d653f7374617475732e737667)](https://pkg.go.dev/github.com/testcontainers/testcontainers-go) + +_Testcontainers for Go_ is a Go package that makes it simple to create and clean up container-based dependencies for +automated integration/smoke tests. The clean, easy-to-use API enables developers to programmatically define containers +that should be run as part of a test and clean up those resources when the test is done. + +You can find more information about _Testcontainers for Go_ at [golang.testcontainers.org](https://golang.testcontainers.org), which is rendered from the [./docs](./docs) directory. + +## Using _Testcontainers for Go_ + +Please visit [the quickstart guide](https://golang.testcontainers.org/quickstart) to understand how to add the dependency to your Go project. diff --git a/vendor/github.com/testcontainers/testcontainers-go/RELEASING.md b/vendor/github.com/testcontainers/testcontainers-go/RELEASING.md new file mode 100644 index 000000000..3990216f7 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/RELEASING.md @@ -0,0 +1,203 @@ +# Releasing Testcontainers for Go + +In order to create a release, we have added a shell script that performs all the tasks for you, allowing a dry-run mode for checking it before creating the release. We are going to explain how to use it in this document. + +## Prerequisites + +First, it's really important that you first check that the [version.go](./internal/version.go) file is up-to-date, containing the right version you want to create. That file will be used by the automation to perform the release. +Once the version file is correct in the repository: + +Second, check that the git remote for the `origin` is pointing to `github.com/testcontainers/testcontainers-go`. You can check it by running: + +```shell +git remote -v +``` + +## Prepare the release + +Once the remote is properly set, please follow these steps: + +- Run the [pre-release.sh](./scripts/pre-release.sh) shell script to run it in dry-run mode. +- You can use the `DRY_RUN` variable to enable or disable the dry-run mode. By default, it's enabled. +- To prepare for a release, updating the _Testcontainers for Go_ dependency for all the modules and examples, without performing any Git operation: + + DRY_RUN="false" ./scripts/pre-release.sh + +- The script will update the [mkdocs.yml](./mkdocks.yml) file, updating the `latest_version` field to the current version. +- The script will update the `go.mod` files for each Go modules and example modules under the examples and modules directories, updating the version of the testcontainers-go dependency to the recently created tag. +- The script will modify the docs for the each Go module **that was not released yet**, updating the version of _Testcontainers for Go_ where it was added to the recently created tag. + +An example execution, with dry-run mode enabled: + +```shell +sed "s/latest_version: .*/latest_version: v0.20.1/g" /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/mkdocs.yml > /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/mkdocs.yml.tmp +mv /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/mkdocs.yml.tmp /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/mkdocs.yml +sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" bigtable/go.mod > bigtable/go.mod.tmp +mv bigtable/go.mod.tmp bigtable/go.mod +sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" cockroachdb/go.mod > cockroachdb/go.mod.tmp +mv cockroachdb/go.mod.tmp cockroachdb/go.mod +sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" consul/go.mod > consul/go.mod.tmp +mv consul/go.mod.tmp consul/go.mod +sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" datastore/go.mod > datastore/go.mod.tmp +mv datastore/go.mod.tmp datastore/go.mod +sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" firestore/go.mod > firestore/go.mod.tmp +mv firestore/go.mod.tmp firestore/go.mod +sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" mongodb/go.mod > mongodb/go.mod.tmp +mv mongodb/go.mod.tmp mongodb/go.mod +sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" nginx/go.mod > nginx/go.mod.tmp +mv nginx/go.mod.tmp nginx/go.mod +sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" pubsub/go.mod > pubsub/go.mod.tmp +mv pubsub/go.mod.tmp pubsub/go.mod +sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" spanner/go.mod > spanner/go.mod.tmp +mv spanner/go.mod.tmp spanner/go.mod +sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" toxiproxy/go.mod > toxiproxy/go.mod.tmp +mv toxiproxy/go.mod.tmp toxiproxy/go.mod +go mod tidy +go mod tidy +go mod tidy +go mod tidy +go mod tidy +go mod tidy +go mod tidy +go mod tidy +go mod tidy +go mod tidy +go mod tidy +sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" compose/go.mod > compose/go.mod.tmp +mv compose/go.mod.tmp compose/go.mod +sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" couchbase/go.mod > couchbase/go.mod.tmp +mv couchbase/go.mod.tmp couchbase/go.mod +sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" localstack/go.mod > localstack/go.mod.tmp +mv localstack/go.mod.tmp localstack/go.mod +sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" mysql/go.mod > mysql/go.mod.tmp +mv mysql/go.mod.tmp mysql/go.mod +sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" neo4j/go.mod > neo4j/go.mod.tmp +mv neo4j/go.mod.tmp neo4j/go.mod +sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" postgres/go.mod > postgres/go.mod.tmp +mv postgres/go.mod.tmp postgres/go.mod +sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" pulsar/go.mod > pulsar/go.mod.tmp +mv pulsar/go.mod.tmp pulsar/go.mod +sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" redis/go.mod > redis/go.mod.tmp +mv redis/go.mod.tmp redis/go.mod +sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" redpanda/go.mod > redpanda/go.mod.tmp +mv redpanda/go.mod.tmp redpanda/go.mod +sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" vault/go.mod > vault/go.mod.tmp +mv vault/go.mod.tmp vault/go.mod +go mod tidy +go mod tidy +go mod tidy +go mod tidy +go mod tidy +go mod tidy +go mod tidy +go mod tidy +go mod tidy +go mod tidy +sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" couchbase.md > couchbase.md.tmp +mv couchbase.md.tmp couchbase.md +sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" localstack.md > localstack.md.tmp +mv localstack.md.tmp localstack.md +sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" mysql.md > mysql.md.tmp +mv mysql.md.tmp mysql.md +sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" neo4j.md > neo4j.md.tmp +mv neo4j.md.tmp neo4j.md +sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" postgres.md > postgres.md.tmp +mv postgres.md.tmp postgres.md +sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" pulsar.md > pulsar.md.tmp +mv pulsar.md.tmp pulsar.md +sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" redis.md > redis.md.tmp +mv redis.md.tmp redis.md +sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" redpanda.md > redpanda.md.tmp +mv redpanda.md.tmp redpanda.md +sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" vault.md > vault.md.tmp +mv vault.md.tmp vault.md +``` + +## Performing a release + +Once you are satisfied with the modified files in the git state: + +- Run the [release.sh](./scripts/release.sh) shell script to create the release in dry-run mode. +- You can use the `DRY_RUN` variable to enable or disable the dry-run mode. By default, it's enabled. + + DRY_RUN="false" ./scripts/release.sh + +- You can define the bump type, using the `BUMP_TYPE` environment variable. The default value is `minor`, but you can also use `major` or `patch` (the script will fail if the value is not one of these three): + + BUMP_TYPE="major" ./scripts/release.sh + +- The script will commit the current state of the git repository, if the `DRY_RUN` variable is set to `false`. The modified files are the ones modified by the `pre-release.sh` script. +- The script will create a git tag with the current value of the [version.go](./internal/version.go) file, starting with `v`: e.g. `v0.18.0`, for the following Go modules: + - the root module, representing the Testcontainers for Go library. + - all the Go modules living in both the `examples` and `modules` directory. The git tag value for these Go modules will be created using this name convention: + + "${directory}/${module_name}/${version}", e.g. "examples/mysql/v0.18.0", "modules/compose/v0.18.0" + +- The script will update the [version.go](./internal/version.go) file, setting the next development version to the value defined in the `BUMP_TYPE` environment variable. For example, if the current version is `v0.18.0`, the script will update the [version.go](./internal/version.go) file with the next development version `v0.19.0`. +- The script will create a commit in the **main** branch if the `DRY_RUN` variable is set to `false`. +- The script will push the main branch including the tags to the upstream repository, https://github.com/testcontainers/testcontainers-go, if the `DRY_RUN` variable is set to `false`. +- Finally, the script will trigger the Golang proxy to update the modules in https://proxy.golang.org/, if the `DRY_RUN` variable is set to `false`. + +An example execution, with dry-run mode enabled: + +``` +$ ./scripts/release.sh +Current version: v0.20.1 +git add /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/internal/version.go +git add /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/mkdocs.yml +git add examples/**/go.* +git add modules/**/go.* +git commit -m chore: use new version (v0.20.1) in modules and examples +git tag v0.20.1 +git tag examples/bigtable/v0.20.1 +git tag examples/cockroachdb/v0.20.1 +git tag examples/consul/v0.20.1 +git tag examples/datastore/v0.20.1 +git tag examples/firestore/v0.20.1 +git tag examples/mongodb/v0.20.1 +git tag examples/nginx/v0.20.1 +git tag examples/pubsub/v0.20.1 +git tag examples/spanner/v0.20.1 +git tag examples/toxiproxy/v0.20.1 +git tag modules/compose/v0.20.1 +git tag modules/couchbase/v0.20.1 +git tag modules/localstack/v0.20.1 +git tag modules/mysql/v0.20.1 +git tag modules/neo4j/v0.20.1 +git tag modules/postgres/v0.20.1 +git tag modules/pulsar/v0.20.1 +git tag modules/redis/v0.20.1 +git tag modules/redpanda/v0.20.1 +git tag modules/vault/v0.20.1 +WARNING: The requested image's platform (linux/amd64) does not match the detected host platform (linux/arm64/v8) and no specific platform was requested +Producing a minor bump of the version, from 0.20.1 to 0.21.0 +sed "s/const Version = ".*"/const Version = "0.21.0"/g" /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/internal/version.go > /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/internal/version.go.tmp +mv /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/internal/version.go.tmp /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/internal/version.go +git add /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/internal/version.go +git commit -m chore: prepare for next minor development cycle (0.21.0) +git push origin main --tags +curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/@v/v0.20.1.info +curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/bigtable/@v/v0.20.1.info +curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/cockroachdb/@v/v0.20.1.info +curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/consul/@v/v0.20.1.info +curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/datastore/@v/v0.20.1.info +curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/firestore/@v/v0.20.1.info +curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/mongodb/@v/v0.20.1.info +curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/nginx/@v/v0.20.1.info +curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/pubsub/@v/v0.20.1.info +curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/spanner/@v/v0.20.1.info +curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/toxiproxy/@v/v0.20.1.info +curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/compose/@v/v0.20.1.info +curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/couchbase/@v/v0.20.1.info +curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/localstack/@v/v0.20.1.info +curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/mysql/@v/v0.20.1.info +curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/neo4j/@v/v0.20.1.info +curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/postgres/@v/v0.20.1.info +curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/pulsar/@v/v0.20.1.info +curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/redis/@v/v0.20.1.info +curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/redpanda/@v/v0.20.1.info +curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/vault/@v/v0.20.1.info +``` + +Right after that, you have to: +- Verify that the commits are in the upstream repository, otherwise, update it with the current state of the main branch. diff --git a/vendor/github.com/testcontainers/testcontainers-go/commons-test.mk b/vendor/github.com/testcontainers/testcontainers-go/commons-test.mk new file mode 100644 index 000000000..4a1213d96 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/commons-test.mk @@ -0,0 +1,21 @@ +.PHONY: dependencies-scan +dependencies-scan: + @echo ">> Scanning dependencies in $(CURDIR)..." + go list -json -m all | docker run --rm -i sonatypecommunity/nancy:latest sleuth --skip-update-check + +.PHONY: test-% +test-%: + @echo "Running $* tests..." + go run gotest.tools/gotestsum \ + --format short-verbose \ + --rerun-fails=5 \ + --packages="./..." \ + --junitfile TEST-$*.xml + +.PHONY: tools +tools: + go mod download + +.PHONY: tools-tidy +tools-tidy: + go mod tidy diff --git a/vendor/github.com/testcontainers/testcontainers-go/config.go b/vendor/github.com/testcontainers/testcontainers-go/config.go new file mode 100644 index 000000000..91a333107 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/config.go @@ -0,0 +1,29 @@ +package testcontainers + +import ( + "github.com/testcontainers/testcontainers-go/internal/config" +) + +// TestcontainersConfig represents the configuration for Testcontainers +type TestcontainersConfig struct { + Host string `properties:"docker.host,default="` // Deprecated: use Config.Host instead + TLSVerify int `properties:"docker.tls.verify,default=0"` // Deprecated: use Config.TLSVerify instead + CertPath string `properties:"docker.cert.path,default="` // Deprecated: use Config.CertPath instead + RyukDisabled bool `properties:"ryuk.disabled,default=false"` // Deprecated: use Config.RyukDisabled instead + RyukPrivileged bool `properties:"ryuk.container.privileged,default=false"` // Deprecated: use Config.RyukPrivileged instead + Config config.Config +} + +// ReadConfig reads from testcontainers properties file, storing the result in a singleton instance +// of the TestcontainersConfig struct +func ReadConfig() TestcontainersConfig { + cfg := config.Read() + return TestcontainersConfig{ + Host: cfg.Host, + TLSVerify: cfg.TLSVerify, + CertPath: cfg.CertPath, + RyukDisabled: cfg.RyukDisabled, + RyukPrivileged: cfg.RyukPrivileged, + Config: cfg, + } +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/container.go b/vendor/github.com/testcontainers/testcontainers-go/container.go new file mode 100644 index 000000000..77ce49e32 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/container.go @@ -0,0 +1,263 @@ +package testcontainers + +import ( + "context" + "errors" + "fmt" + "io" + "path/filepath" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/pkg/archive" + "github.com/docker/go-connections/nat" + tcexec "github.com/testcontainers/testcontainers-go/exec" + "github.com/testcontainers/testcontainers-go/internal/testcontainersdocker" + "github.com/testcontainers/testcontainers-go/wait" +) + +// DeprecatedContainer shows methods that were supported before, but are now deprecated +// Deprecated: Use Container +type DeprecatedContainer interface { + GetHostEndpoint(ctx context.Context, port string) (string, string, error) + GetIPAddress(ctx context.Context) (string, error) + LivenessCheckPorts(ctx context.Context) (nat.PortSet, error) + Terminate(ctx context.Context) error +} + +// Container allows getting info about and controlling a single container instance +type Container interface { + GetContainerID() string // get the container id from the provider + Endpoint(context.Context, string) (string, error) // get proto://ip:port string for the first exposed port + PortEndpoint(context.Context, nat.Port, string) (string, error) // get proto://ip:port string for the given exposed port + Host(context.Context) (string, error) // get host where the container port is exposed + MappedPort(context.Context, nat.Port) (nat.Port, error) // get externally mapped port for a container port + Ports(context.Context) (nat.PortMap, error) // get all exposed ports + SessionID() string // get session id + IsRunning() bool + Start(context.Context) error // start the container + Stop(context.Context, *time.Duration) error // stop the container + Terminate(context.Context) error // terminate the container + Logs(context.Context) (io.ReadCloser, error) // Get logs of the container + FollowOutput(LogConsumer) + StartLogProducer(context.Context) error + StopLogProducer() error + Name(context.Context) (string, error) // get container name + State(context.Context) (*types.ContainerState, error) // returns container's running state + Networks(context.Context) ([]string, error) // get container networks + NetworkAliases(context.Context) (map[string][]string, error) // get container network aliases for a network + Exec(ctx context.Context, cmd []string, options ...tcexec.ProcessOption) (int, io.Reader, error) + ContainerIP(context.Context) (string, error) // get container ip + ContainerIPs(context.Context) ([]string, error) // get all container IPs + CopyToContainer(ctx context.Context, fileContent []byte, containerFilePath string, fileMode int64) error + CopyDirToContainer(ctx context.Context, hostDirPath string, containerParentPath string, fileMode int64) error + CopyFileToContainer(ctx context.Context, hostFilePath string, containerFilePath string, fileMode int64) error + CopyFileFromContainer(ctx context.Context, filePath string) (io.ReadCloser, error) +} + +// ImageBuildInfo defines what is needed to build an image +type ImageBuildInfo interface { + GetContext() (io.Reader, error) // the path to the build context + GetDockerfile() string // the relative path to the Dockerfile, including the fileitself + ShouldPrintBuildLog() bool // allow build log to be printed to stdout + ShouldBuildImage() bool // return true if the image needs to be built + GetBuildArgs() map[string]*string // return the environment args used to build the from Dockerfile + GetAuthConfigs() map[string]types.AuthConfig // return the auth configs to be able to pull from an authenticated docker registry +} + +// FromDockerfile represents the parameters needed to build an image from a Dockerfile +// rather than using a pre-built one +type FromDockerfile struct { + Context string // the path to the context of of the docker build + ContextArchive io.Reader // the tar archive file to send to docker that contains the build context + Dockerfile string // the path from the context to the Dockerfile for the image, defaults to "Dockerfile" + BuildArgs map[string]*string // enable user to pass build args to docker daemon + PrintBuildLog bool // enable user to print build log + AuthConfigs map[string]types.AuthConfig // Deprecated. Testcontainers will detect registry credentials automatically. Enable auth configs to be able to pull from an authenticated docker registry +} + +type ContainerFile struct { + HostFilePath string + ContainerFilePath string + FileMode int64 +} + +// ContainerRequest represents the parameters used to get a running container +type ContainerRequest struct { + FromDockerfile + Image string + Entrypoint []string + Env map[string]string + ExposedPorts []string // allow specifying protocol info + Cmd []string + Labels map[string]string + Mounts ContainerMounts + Tmpfs map[string]string + RegistryCred string // Deprecated: Testcontainers will detect registry credentials automatically + WaitingFor wait.Strategy + Name string // for specifying container name + Hostname string + ExtraHosts []string // Deprecated: Use HostConfigModifier instead + Privileged bool // For starting privileged container + Networks []string // for specifying network names + NetworkAliases map[string][]string // for specifying network aliases + NetworkMode container.NetworkMode // Deprecated: Use HostConfigModifier instead + Resources container.Resources // Deprecated: Use HostConfigModifier instead + Files []ContainerFile // files which will be copied when container starts + User string // for specifying uid:gid + SkipReaper bool // Deprecated: The reaper is globally controlled by the .testcontainers.properties file or the TESTCONTAINERS_RYUK_DISABLED environment variable + ReaperImage string // Deprecated: use WithImageName ContainerOption instead. Alternative reaper image + ReaperOptions []ContainerOption // options for the reaper + AutoRemove bool // Deprecated: Use HostConfigModifier instead. If set to true, the container will be removed from the host when stopped + AlwaysPullImage bool // Always pull image + ImagePlatform string // ImagePlatform describes the platform which the image runs on. + Binds []string // Deprecated: Use HostConfigModifier instead + ShmSize int64 // Amount of memory shared with the host (in bytes) + CapAdd []string // Deprecated: Use HostConfigModifier instead. Add Linux capabilities + CapDrop []string // Deprecated: Use HostConfigModifier instead. Drop Linux capabilities + ConfigModifier func(*container.Config) // Modifier for the config before container creation + HostConfigModifier func(*container.HostConfig) // Modifier for the host config before container creation + EnpointSettingsModifier func(map[string]*network.EndpointSettings) // Modifier for the network settings before container creation + LifecycleHooks []ContainerLifecycleHooks // define hooks to be executed during container lifecycle +} + +// containerOptions functional options for a container +type containerOptions struct { + ImageName string + RegistryCredentials string // Deprecated: Testcontainers will detect registry credentials automatically +} + +// functional option for setting the reaper image +type ContainerOption func(*containerOptions) + +// WithImageName sets the reaper image name +func WithImageName(imageName string) ContainerOption { + return func(o *containerOptions) { + o.ImageName = imageName + } +} + +// Deprecated: Testcontainers will detect registry credentials automatically +// WithRegistryCredentials sets the reaper registry credentials +func WithRegistryCredentials(registryCredentials string) ContainerOption { + return func(o *containerOptions) { + o.RegistryCredentials = registryCredentials + } +} + +// Validate ensures that the ContainerRequest does not have invalid parameters configured to it +// ex. make sure you are not specifying both an image as well as a context +func (c *ContainerRequest) Validate() error { + validationMethods := []func() error{ + c.validateContextAndImage, + c.validateContextOrImageIsSpecified, + c.validateMounts, + } + + var err error + for _, validationMethod := range validationMethods { + err = validationMethod() + if err != nil { + return err + } + } + + return nil +} + +// GetContext retrieve the build context for the request +func (c *ContainerRequest) GetContext() (io.Reader, error) { + if c.ContextArchive != nil { + return c.ContextArchive, nil + } + + // always pass context as absolute path + abs, err := filepath.Abs(c.Context) + if err != nil { + return nil, fmt.Errorf("error getting absolute path: %w", err) + } + c.Context = abs + + buildContext, err := archive.TarWithOptions(c.Context, &archive.TarOptions{}) + if err != nil { + return nil, err + } + + return buildContext, nil +} + +// GetBuildArgs returns the env args to be used when creating from Dockerfile +func (c *ContainerRequest) GetBuildArgs() map[string]*string { + return c.FromDockerfile.BuildArgs +} + +// GetDockerfile returns the Dockerfile from the ContainerRequest, defaults to "Dockerfile" +func (c *ContainerRequest) GetDockerfile() string { + f := c.FromDockerfile.Dockerfile + if f == "" { + return "Dockerfile" + } + + return f +} + +// GetAuthConfigs returns the auth configs to be able to pull from an authenticated docker registry +func (c *ContainerRequest) GetAuthConfigs() map[string]types.AuthConfig { + images, err := testcontainersdocker.ExtractImagesFromDockerfile(filepath.Join(c.Context, c.GetDockerfile()), c.GetBuildArgs()) + if err != nil { + return map[string]types.AuthConfig{} + } + + authConfigs := map[string]types.AuthConfig{} + for _, image := range images { + registry, authConfig, err := DockerImageAuth(context.Background(), image) + if err != nil { + continue + } + + authConfigs[registry] = authConfig + } + + return authConfigs +} + +func (c *ContainerRequest) ShouldBuildImage() bool { + return c.FromDockerfile.Context != "" || c.FromDockerfile.ContextArchive != nil +} + +func (c *ContainerRequest) ShouldPrintBuildLog() bool { + return c.FromDockerfile.PrintBuildLog +} + +func (c *ContainerRequest) validateContextAndImage() error { + if c.FromDockerfile.Context != "" && c.Image != "" { + return errors.New("you cannot specify both an Image and Context in a ContainerRequest") + } + + return nil +} + +func (c *ContainerRequest) validateContextOrImageIsSpecified() error { + if c.FromDockerfile.Context == "" && c.FromDockerfile.ContextArchive == nil && c.Image == "" { + return errors.New("you must specify either a build context or an image") + } + + return nil +} + +func (c *ContainerRequest) validateMounts() error { + targets := make(map[string]bool, len(c.Mounts)) + + for idx := range c.Mounts { + m := c.Mounts[idx] + targetPath := m.Target.Target() + if targets[targetPath] { + return fmt.Errorf("%w: %s", ErrDuplicateMountTarget, targetPath) + } else { + targets[targetPath] = true + } + } + return nil +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/docker.go b/vendor/github.com/testcontainers/testcontainers-go/docker.go new file mode 100644 index 000000000..5e935c678 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/docker.go @@ -0,0 +1,1404 @@ +package testcontainers + +import ( + "archive/tar" + "bufio" + "bytes" + "context" + "encoding/base64" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/containerd/containerd/platforms" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/client" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/go-connections/nat" + "github.com/google/uuid" + "github.com/moby/term" + specs "github.com/opencontainers/image-spec/specs-go/v1" + tcexec "github.com/testcontainers/testcontainers-go/exec" + "github.com/testcontainers/testcontainers-go/internal" + "github.com/testcontainers/testcontainers-go/internal/testcontainersdocker" + "github.com/testcontainers/testcontainers-go/internal/testcontainerssession" + "github.com/testcontainers/testcontainers-go/wait" +) + +var ( + // Implement interfaces + _ Container = (*DockerContainer)(nil) + + logOnce sync.Once + ErrDuplicateMountTarget = errors.New("duplicate mount target detected") +) + +const ( + Bridge = "bridge" // Bridge network name (as well as driver) + Podman = "podman" + ReaperDefault = "reaper_default" // Default network name when bridge is not available + packagePath = "github.com/testcontainers/testcontainers-go" + + logStoppedForOutOfSyncMessage = "Stopping log consumer: Headers out of sync" +) + +// DockerContainer represents a container started using Docker +type DockerContainer struct { + // Container ID from Docker + ID string + WaitingFor wait.Strategy + Image string + + isRunning bool + imageWasBuilt bool + provider *DockerProvider + sessionID uuid.UUID + terminationSignal chan bool + consumers []LogConsumer + raw *types.ContainerJSON + stopProducer chan bool + logger Logging + lifecycleHooks []ContainerLifecycleHooks +} + +// SetLogger sets the logger for the container +func (c *DockerContainer) SetLogger(logger Logging) { + c.logger = logger +} + +// SetProvider sets the provider for the container +func (c *DockerContainer) SetProvider(provider *DockerProvider) { + c.provider = provider +} + +func (c *DockerContainer) GetContainerID() string { + return c.ID +} + +func (c *DockerContainer) IsRunning() bool { + return c.isRunning +} + +// Endpoint gets proto://host:port string for the first exposed port +// Will returns just host:port if proto is "" +func (c *DockerContainer) Endpoint(ctx context.Context, proto string) (string, error) { + ports, err := c.Ports(ctx) + if err != nil { + return "", err + } + + // get first port + var firstPort nat.Port + for p := range ports { + firstPort = p + break + } + + return c.PortEndpoint(ctx, firstPort, proto) +} + +// PortEndpoint gets proto://host:port string for the given exposed port +// Will returns just host:port if proto is "" +func (c *DockerContainer) PortEndpoint(ctx context.Context, port nat.Port, proto string) (string, error) { + host, err := c.Host(ctx) + if err != nil { + return "", err + } + + outerPort, err := c.MappedPort(ctx, port) + if err != nil { + return "", err + } + + protoFull := "" + if proto != "" { + protoFull = fmt.Sprintf("%s://", proto) + } + + return fmt.Sprintf("%s%s:%s", protoFull, host, outerPort.Port()), nil +} + +// Host gets host (ip or name) of the docker daemon where the container port is exposed +// Warning: this is based on your Docker host setting. Will fail if using an SSH tunnel +// You can use the "TC_HOST" env variable to set this yourself +func (c *DockerContainer) Host(ctx context.Context) (string, error) { + host, err := c.provider.DaemonHost(ctx) + if err != nil { + return "", err + } + return host, nil +} + +// MappedPort gets externally mapped port for a container port +func (c *DockerContainer) MappedPort(ctx context.Context, port nat.Port) (nat.Port, error) { + inspect, err := c.inspectContainer(ctx) + if err != nil { + return "", err + } + if inspect.ContainerJSONBase.HostConfig.NetworkMode == "host" { + return port, nil + } + ports, err := c.Ports(ctx) + if err != nil { + return "", err + } + + for k, p := range ports { + if k.Port() != port.Port() { + continue + } + if port.Proto() != "" && k.Proto() != port.Proto() { + continue + } + if len(p) == 0 { + continue + } + return nat.NewPort(k.Proto(), p[0].HostPort) + } + + return "", errors.New("port not found") +} + +// Ports gets the exposed ports for the container. +func (c *DockerContainer) Ports(ctx context.Context) (nat.PortMap, error) { + inspect, err := c.inspectContainer(ctx) + if err != nil { + return nil, err + } + return inspect.NetworkSettings.Ports, nil +} + +// SessionID gets the current session id +func (c *DockerContainer) SessionID() string { + return c.sessionID.String() +} + +// Start will start an already created container +func (c *DockerContainer) Start(ctx context.Context) error { + err := c.startingHook(ctx) + if err != nil { + return err + } + + if err := c.provider.client.ContainerStart(ctx, c.ID, types.ContainerStartOptions{}); err != nil { + return err + } + defer c.provider.Close() + + err = c.startedHook(ctx) + if err != nil { + return err + } + + return nil +} + +// Stop will stop an already started container +// +// In case the container fails to stop +// gracefully within a time frame specified by the timeout argument, +// it is forcefully terminated (killed). +// +// If the timeout is nil, the container's StopTimeout value is used, if set, +// otherwise the engine default. A negative timeout value can be specified, +// meaning no timeout, i.e. no forceful termination is performed. +func (c *DockerContainer) Stop(ctx context.Context, timeout *time.Duration) error { + err := c.stoppingHook(ctx) + if err != nil { + return err + } + + var options container.StopOptions + + if timeout != nil { + timeoutSeconds := int(timeout.Seconds()) + options.Timeout = &timeoutSeconds + } + + if err := c.provider.client.ContainerStop(ctx, c.ID, options); err != nil { + return err + } + defer c.provider.Close() + + c.isRunning = false + + err = c.stoppedHook(ctx) + if err != nil { + return err + } + + return nil +} + +// Terminate is used to kill the container. It is usually triggered by as defer function. +func (c *DockerContainer) Terminate(ctx context.Context) error { + err := c.terminatingHook(ctx) + if err != nil { + return err + } + + err = c.StopLogProducer() + if err != nil { + return err + } + + select { + // close reaper if it was created + case c.terminationSignal <- true: + default: + } + err = c.provider.client.ContainerRemove(ctx, c.GetContainerID(), types.ContainerRemoveOptions{ + RemoveVolumes: true, + Force: true, + }) + if err != nil { + return err + } + + err = c.terminatedHook(ctx) + if err != nil { + return err + } + + if c.imageWasBuilt { + _, err := c.provider.client.ImageRemove(ctx, c.Image, types.ImageRemoveOptions{ + Force: true, + PruneChildren: true, + }) + if err != nil { + return err + } + } + + if err := c.provider.client.Close(); err != nil { + return err + } + + c.sessionID = uuid.UUID{} + c.isRunning = false + return nil +} + +// update container raw info +func (c *DockerContainer) inspectRawContainer(ctx context.Context) (*types.ContainerJSON, error) { + inspect, err := c.provider.client.ContainerInspect(ctx, c.ID) + if err != nil { + return nil, err + } + defer c.provider.Close() + + c.raw = &inspect + return c.raw, nil +} + +func (c *DockerContainer) inspectContainer(ctx context.Context) (*types.ContainerJSON, error) { + inspect, err := c.provider.client.ContainerInspect(ctx, c.ID) + if err != nil { + return nil, err + } + defer c.provider.Close() + + return &inspect, nil +} + +// Logs will fetch both STDOUT and STDERR from the current container. Returns a +// ReadCloser and leaves it up to the caller to extract what it wants. +func (c *DockerContainer) Logs(ctx context.Context) (io.ReadCloser, error) { + + const streamHeaderSize = 8 + + options := types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + } + + rc, err := c.provider.client.ContainerLogs(ctx, c.ID, options) + if err != nil { + return nil, err + } + defer c.provider.Close() + + pr, pw := io.Pipe() + r := bufio.NewReader(rc) + + go func() { + var lineStarted = true + for err == nil { + line, isPrefix, err := r.ReadLine() + + if lineStarted && len(line) >= streamHeaderSize { + line = line[streamHeaderSize:] // trim stream header + lineStarted = false + } + if !isPrefix { + lineStarted = true + } + + _, errW := pw.Write(line) + if errW != nil { + return + } + + if !isPrefix { + _, errW := pw.Write([]byte("\n")) + if errW != nil { + return + } + } + + if err != nil { + _ = pw.CloseWithError(err) + return + } + } + }() + + return pr, nil +} + +// FollowOutput adds a LogConsumer to be sent logs from the container's +// STDOUT and STDERR +func (c *DockerContainer) FollowOutput(consumer LogConsumer) { + c.consumers = append(c.consumers, consumer) +} + +// Name gets the name of the container. +func (c *DockerContainer) Name(ctx context.Context) (string, error) { + inspect, err := c.inspectContainer(ctx) + if err != nil { + return "", err + } + return inspect.Name, nil +} + +// State returns container's running state +func (c *DockerContainer) State(ctx context.Context) (*types.ContainerState, error) { + inspect, err := c.inspectRawContainer(ctx) + if err != nil { + if c.raw != nil { + return c.raw.State, err + } + return nil, err + } + return inspect.State, nil +} + +// Networks gets the names of the networks the container is attached to. +func (c *DockerContainer) Networks(ctx context.Context) ([]string, error) { + inspect, err := c.inspectContainer(ctx) + if err != nil { + return []string{}, err + } + + networks := inspect.NetworkSettings.Networks + + n := []string{} + + for k := range networks { + n = append(n, k) + } + + return n, nil +} + +// ContainerIP gets the IP address of the primary network within the container. +func (c *DockerContainer) ContainerIP(ctx context.Context) (string, error) { + inspect, err := c.inspectContainer(ctx) + if err != nil { + return "", err + } + + ip := inspect.NetworkSettings.IPAddress + if ip == "" { + // use IP from "Networks" if only single network defined + networks := inspect.NetworkSettings.Networks + if len(networks) == 1 { + for _, v := range networks { + ip = v.IPAddress + } + } + } + + return ip, nil +} + +// ContainerIPs gets the IP addresses of all the networks within the container. +func (c *DockerContainer) ContainerIPs(ctx context.Context) ([]string, error) { + ips := make([]string, 0) + + inspect, err := c.inspectContainer(ctx) + if err != nil { + return nil, err + } + + networks := inspect.NetworkSettings.Networks + for _, nw := range networks { + ips = append(ips, nw.IPAddress) + } + + return ips, nil +} + +// NetworkAliases gets the aliases of the container for the networks it is attached to. +func (c *DockerContainer) NetworkAliases(ctx context.Context) (map[string][]string, error) { + inspect, err := c.inspectContainer(ctx) + if err != nil { + return map[string][]string{}, err + } + + networks := inspect.NetworkSettings.Networks + + a := map[string][]string{} + + for k := range networks { + a[k] = networks[k].Aliases + } + + return a, nil +} + +func (c *DockerContainer) Exec(ctx context.Context, cmd []string, options ...tcexec.ProcessOption) (int, io.Reader, error) { + cli := c.provider.client + response, err := cli.ContainerExecCreate(ctx, c.ID, types.ExecConfig{ + Cmd: cmd, + Detach: false, + AttachStdout: true, + AttachStderr: true, + }) + if err != nil { + return 0, nil, err + } + + hijack, err := cli.ContainerExecAttach(ctx, response.ID, types.ExecStartCheck{}) + if err != nil { + return 0, nil, err + } + + opt := &tcexec.ProcessOptions{ + Reader: hijack.Reader, + } + + for _, o := range options { + o.Apply(opt) + } + + var exitCode int + for { + execResp, err := cli.ContainerExecInspect(ctx, response.ID) + if err != nil { + return 0, nil, err + } + + if !execResp.Running { + exitCode = execResp.ExitCode + break + } + + time.Sleep(100 * time.Millisecond) + } + + return exitCode, opt.Reader, nil +} + +type FileFromContainer struct { + underlying *io.ReadCloser + tarreader *tar.Reader +} + +func (fc *FileFromContainer) Read(b []byte) (int, error) { + return (*fc.tarreader).Read(b) +} + +func (fc *FileFromContainer) Close() error { + return (*fc.underlying).Close() +} + +func (c *DockerContainer) CopyFileFromContainer(ctx context.Context, filePath string) (io.ReadCloser, error) { + r, _, err := c.provider.client.CopyFromContainer(ctx, c.ID, filePath) + if err != nil { + return nil, err + } + defer c.provider.Close() + + tarReader := tar.NewReader(r) + + // if we got here we have exactly one file in the TAR-stream + // so we advance the index by one so the next call to Read will start reading it + _, err = tarReader.Next() + if err != nil { + return nil, err + } + + ret := &FileFromContainer{ + underlying: &r, + tarreader: tarReader, + } + + return ret, nil +} + +// CopyDirToContainer copies the contents of a directory to a parent path in the container. This parent path must exist in the container first +// as we cannot create it +func (c *DockerContainer) CopyDirToContainer(ctx context.Context, hostDirPath string, containerParentPath string, fileMode int64) error { + dir, err := isDir(hostDirPath) + if err != nil { + return err + } + + if !dir { + // it's not a dir: let the consumer to handle an error + return fmt.Errorf("path %s is not a directory", hostDirPath) + } + + buff, err := tarDir(hostDirPath, fileMode) + if err != nil { + return err + } + + // create the directory under its parent + parent := filepath.Dir(containerParentPath) + + err = c.provider.client.CopyToContainer(ctx, c.ID, parent, buff, types.CopyToContainerOptions{}) + if err != nil { + return err + } + defer c.provider.Close() + + return nil +} + +func (c *DockerContainer) CopyFileToContainer(ctx context.Context, hostFilePath string, containerFilePath string, fileMode int64) error { + dir, err := isDir(hostFilePath) + if err != nil { + return err + } + + if dir { + return c.CopyDirToContainer(ctx, hostFilePath, containerFilePath, fileMode) + } + + fileContent, err := os.ReadFile(hostFilePath) + if err != nil { + return err + } + return c.CopyToContainer(ctx, fileContent, containerFilePath, fileMode) +} + +// CopyToContainer copies fileContent data to a file in container +func (c *DockerContainer) CopyToContainer(ctx context.Context, fileContent []byte, containerFilePath string, fileMode int64) error { + buffer, err := tarFile(fileContent, containerFilePath, fileMode) + if err != nil { + return err + } + + err = c.provider.client.CopyToContainer(ctx, c.ID, filepath.Dir(containerFilePath), buffer, types.CopyToContainerOptions{}) + if err != nil { + return err + } + defer c.provider.Close() + + return nil +} + +// StartLogProducer will start a concurrent process that will continuously read logs +// from the container and will send them to each added LogConsumer +func (c *DockerContainer) StartLogProducer(ctx context.Context) error { + if c.stopProducer != nil { + return errors.New("log producer already started") + } + + c.stopProducer = make(chan bool) + + go func(stop <-chan bool) { + since := "" + // if the socket is closed we will make additional logs request with updated Since timestamp + BEGIN: + options := types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Follow: true, + Since: since, + } + + ctx, cancel := context.WithTimeout(ctx, time.Second*5) + defer cancel() + + r, err := c.provider.client.ContainerLogs(ctx, c.GetContainerID(), options) + if err != nil { + // if we can't get the logs, panic, we can't return an error to anything + // from within this goroutine + panic(err) + } + defer c.provider.Close() + + for { + select { + case <-stop: + err := r.Close() + if err != nil { + // we can't close the read closer, this should never happen + panic(err) + } + return + default: + h := make([]byte, 8) + _, err := io.ReadFull(r, h) + if err != nil { + // proper type matching requires https://go-review.googlesource.com/c/go/+/250357/ (go 1.16) + if strings.Contains(err.Error(), "use of closed network connection") { + now := time.Now() + since = fmt.Sprintf("%d.%09d", now.Unix(), int64(now.Nanosecond())) + goto BEGIN + } + if errors.Is(err, context.DeadlineExceeded) { + // Probably safe to continue here + continue + } + _, _ = fmt.Fprintf(os.Stderr, "container log error: %+v. %s", err, logStoppedForOutOfSyncMessage) + // if we would continue here, the next header-read will result into random data... + return + } + + count := binary.BigEndian.Uint32(h[4:]) + if count == 0 { + continue + } + logType := h[0] + if logType > 2 { + _, _ = fmt.Fprintf(os.Stderr, "received invalid log type: %d", logType) + // sometimes docker returns logType = 3 which is an undocumented log type, so treat it as stdout + logType = 1 + } + + // a map of the log type --> int representation in the header, notice the first is blank, this is stdin, but the go docker client doesn't allow following that in logs + logTypes := []string{"", StdoutLog, StderrLog} + + b := make([]byte, count) + _, err = io.ReadFull(r, b) + if err != nil { + // TODO: add-logger: use logger to log out this error + _, _ = fmt.Fprintf(os.Stderr, "error occurred reading log with known length %s", err.Error()) + if errors.Is(err, context.DeadlineExceeded) { + // Probably safe to continue here + continue + } + // we can not continue here as the next read most likely will not be the next header + _, _ = fmt.Fprintln(os.Stderr, logStoppedForOutOfSyncMessage) + return + } + for _, c := range c.consumers { + c.Accept(Log{ + LogType: logTypes[logType], + Content: b, + }) + } + } + } + }(c.stopProducer) + + return nil +} + +// StopLogProducer will stop the concurrent process that is reading logs +// and sending them to each added LogConsumer +func (c *DockerContainer) StopLogProducer() error { + if c.stopProducer != nil { + c.stopProducer <- true + c.stopProducer = nil + } + return nil +} + +// DockerNetwork represents a network started using Docker +type DockerNetwork struct { + ID string // Network ID from Docker + Driver string + Name string + provider *DockerProvider + terminationSignal chan bool +} + +// Remove is used to remove the network. It is usually triggered by as defer function. +func (n *DockerNetwork) Remove(ctx context.Context) error { + select { + // close reaper if it was created + case n.terminationSignal <- true: + default: + } + + err := n.provider.client.NetworkRemove(ctx, n.ID) + if err != nil { + return err + } + defer n.provider.Close() + + return nil +} + +// DockerProvider implements the ContainerProvider interface +type DockerProvider struct { + *DockerProviderOptions + client client.APIClient + host string + hostCache string + config TestcontainersConfig +} + +// Client gets the docker client used by the provider +func (p *DockerProvider) Client() client.APIClient { + return p.client +} + +// Close closes the docker client used by the provider +func (p *DockerProvider) Close() error { + if p.client == nil { + return nil + } + + return p.client.Close() +} + +// SetClient sets the docker client to be used by the provider +func (p *DockerProvider) SetClient(c client.APIClient) { + p.client = c +} + +var _ ContainerProvider = (*DockerProvider)(nil) + +func NewDockerClient() (cli *client.Client, err error) { + return testcontainersdocker.NewClient(context.Background()) +} + +// BuildImage will build and image from context and Dockerfile, then return the tag +func (p *DockerProvider) BuildImage(ctx context.Context, img ImageBuildInfo) (string, error) { + repo := uuid.New() + tag := uuid.New() + + repoTag := fmt.Sprintf("%s:%s", repo, tag) + + buildContext, err := img.GetContext() + if err != nil { + return "", err + } + + buildOptions := types.ImageBuildOptions{ + BuildArgs: img.GetBuildArgs(), + Dockerfile: img.GetDockerfile(), + AuthConfigs: img.GetAuthConfigs(), + Context: buildContext, + Tags: []string{repoTag}, + Remove: true, + ForceRemove: true, + } + + var resp types.ImageBuildResponse + err = backoff.Retry(func() error { + resp, err = p.client.ImageBuild(ctx, buildContext, buildOptions) + if err != nil { + if _, ok := err.(errdefs.ErrNotFound); ok { + return backoff.Permanent(err) + } + Logger.Printf("Failed to build image: %s, will retry", err) + return err + } + defer p.Close() + + return nil + }, backoff.WithContext(backoff.NewExponentialBackOff(), ctx)) + if err != nil { + return "", err + } + + if img.ShouldPrintBuildLog() { + termFd, isTerm := term.GetFdInfo(os.Stderr) + err = jsonmessage.DisplayJSONMessagesStream(resp.Body, os.Stderr, termFd, isTerm, nil) + if err != nil { + return "", err + } + } + + // need to read the response from Docker, I think otherwise the image + // might not finish building before continuing to execute here + buf := new(bytes.Buffer) + _, err = buf.ReadFrom(resp.Body) + if err != nil { + return "", err + } + + _ = resp.Body.Close() + + return repoTag, nil +} + +// CreateContainer fulfills a request for a container without starting it +func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerRequest) (Container, error) { + var err error + + // defer the close of the Docker client connection the soonest + defer p.Close() + + // Make sure that bridge network exists + // In case it is disabled we will create reaper_default network + if p.DefaultNetwork == "" { + p.DefaultNetwork, err = p.getDefaultNetwork(ctx, p.client) + if err != nil { + return nil, err + } + } + + // If default network is not bridge make sure it is attached to the request + // as container won't be attached to it automatically + // in case of Podman the bridge network is called 'podman' as 'bridge' would conflict + if p.DefaultNetwork != p.defaultBridgeNetworkName { + isAttached := false + for _, net := range req.Networks { + if net == p.DefaultNetwork { + isAttached = true + break + } + } + + if !isAttached { + req.Networks = append(req.Networks, p.DefaultNetwork) + } + } + + env := []string{} + for envKey, envVar := range req.Env { + env = append(env, envKey+"="+envVar) + } + + if req.Labels == nil { + req.Labels = make(map[string]string) + } + + reaperOpts := containerOptions{ + ImageName: req.ReaperImage, + } + for _, opt := range req.ReaperOptions { + opt(&reaperOpts) + } + + tcConfig := p.Config().Config + + var termSignal chan bool + // the reaper does not need to start a reaper for itself + isReaperContainer := strings.EqualFold(req.Image, reaperImage(reaperOpts.ImageName)) + if !tcConfig.RyukDisabled && !isReaperContainer { + r, err := reuseOrCreateReaper(context.WithValue(ctx, testcontainersdocker.DockerHostContextKey, p.host), testcontainerssession.String(), p, req.ReaperOptions...) + if err != nil { + return nil, fmt.Errorf("%w: creating reaper failed", err) + } + termSignal, err = r.Connect() + if err != nil { + return nil, fmt.Errorf("%w: connecting to reaper failed", err) + } + for k, v := range r.Labels() { + if _, ok := req.Labels[k]; !ok { + req.Labels[k] = v + } + } + } + + if err = req.Validate(); err != nil { + return nil, err + } + + var tag string + var platform *specs.Platform + + if req.ShouldBuildImage() { + tag, err = p.BuildImage(ctx, &req) + if err != nil { + return nil, err + } + } else { + tag = req.Image + + if req.ImagePlatform != "" { + p, err := platforms.Parse(req.ImagePlatform) + if err != nil { + return nil, fmt.Errorf("invalid platform %s: %w", req.ImagePlatform, err) + } + platform = &p + } + + var shouldPullImage bool + + if req.AlwaysPullImage { + shouldPullImage = true // If requested always attempt to pull image + } else { + image, _, err := p.client.ImageInspectWithRaw(ctx, tag) + if err != nil { + if client.IsErrNotFound(err) { + shouldPullImage = true + } else { + return nil, err + } + } + if platform != nil && (image.Architecture != platform.Architecture || image.Os != platform.OS) { + shouldPullImage = true + } + } + + if shouldPullImage { + pullOpt := types.ImagePullOptions{ + Platform: req.ImagePlatform, // may be empty + } + + registry, imageAuth, err := DockerImageAuth(ctx, req.Image) + if err != nil { + p.Logger.Printf("Failed to get image auth for %s. Setting empty credentials for the image: %s. Error is:%s", registry, req.Image, err) + } else { + // see https://github.com/docker/docs/blob/e8e1204f914767128814dca0ea008644709c117f/engine/api/sdk/examples.md?plain=1#L649-L657 + encodedJSON, err := json.Marshal(imageAuth) + if err != nil { + p.Logger.Printf("Failed to marshal image auth. Setting empty credentials for the image: %s. Error is:%s", req.Image, err) + } else { + pullOpt.RegistryAuth = base64.URLEncoding.EncodeToString(encodedJSON) + } + } + + if err := p.attemptToPullImage(ctx, tag, pullOpt); err != nil { + return nil, err + } + } + } + + dockerInput := &container.Config{ + Entrypoint: req.Entrypoint, + Image: tag, + Env: env, + Labels: req.Labels, + Cmd: req.Cmd, + Hostname: req.Hostname, + User: req.User, + } + + hostConfig := &container.HostConfig{ + Privileged: req.Privileged, + ShmSize: req.ShmSize, + Tmpfs: req.Tmpfs, + } + + networkingConfig := &network.NetworkingConfig{} + + // default hooks include logger hook and pre-create hook + defaultHooks := []ContainerLifecycleHooks{ + DefaultLoggingHook(p.Logger), + { + PreCreates: []ContainerRequestHook{ + func(ctx context.Context, req ContainerRequest) error { + return p.preCreateContainerHook(ctx, req, dockerInput, hostConfig, networkingConfig) + }, + }, + PostCreates: []ContainerHook{ + // copy files to container after it's created + func(ctx context.Context, c Container) error { + for _, f := range req.Files { + err := c.CopyFileToContainer(ctx, f.HostFilePath, f.ContainerFilePath, f.FileMode) + if err != nil { + return fmt.Errorf("can't copy %s to container: %w", f.HostFilePath, err) + } + } + + return nil + }, + }, + PostStarts: []ContainerHook{ + // first post-start hook is to wait for the container to be ready + func(ctx context.Context, c Container) error { + dockerContainer := c.(*DockerContainer) + + // if a Wait Strategy has been specified, wait before returning + if dockerContainer.WaitingFor != nil { + dockerContainer.logger.Printf( + "🚧 Waiting for container id %s image: %s. Waiting for: %+v", + dockerContainer.ID[:12], dockerContainer.Image, dockerContainer.WaitingFor, + ) + if err := dockerContainer.WaitingFor.WaitUntilReady(ctx, c); err != nil { + return err + } + } + + dockerContainer.isRunning = true + + return nil + }, + }, + }, + } + + // always prepend default lifecycle hooks to user-defined hooks + req.LifecycleHooks = append(defaultHooks, req.LifecycleHooks...) + + err = req.creatingHook(ctx) + if err != nil { + return nil, err + } + + resp, err := p.client.ContainerCreate(ctx, dockerInput, hostConfig, networkingConfig, platform, req.Name) + if err != nil { + return nil, err + } + + // #248: If there is more than one network specified in the request attach newly created container to them one by one + if len(req.Networks) > 1 { + for _, n := range req.Networks[1:] { + nw, err := p.GetNetwork(ctx, NetworkRequest{ + Name: n, + }) + if err == nil { + endpointSetting := network.EndpointSettings{ + Aliases: req.NetworkAliases[n], + } + err = p.client.NetworkConnect(ctx, nw.ID, resp.ID, &endpointSetting) + if err != nil { + return nil, err + } + } + } + } + + c := &DockerContainer{ + ID: resp.ID, + WaitingFor: req.WaitingFor, + Image: tag, + imageWasBuilt: req.ShouldBuildImage(), + sessionID: testcontainerssession.ID(), + provider: p, + terminationSignal: termSignal, + stopProducer: nil, + logger: p.Logger, + lifecycleHooks: req.LifecycleHooks, + } + + err = c.createdHook(ctx) + if err != nil { + return nil, err + } + + return c, nil +} + +func (p *DockerProvider) findContainerByName(ctx context.Context, name string) (*types.Container, error) { + if name == "" { + return nil, nil + } + + // Note that, 'name' filter will use regex to find the containers + filter := filters.NewArgs(filters.Arg("name", fmt.Sprintf("^%s$", name))) + containers, err := p.client.ContainerList(ctx, types.ContainerListOptions{Filters: filter}) + if err != nil { + return nil, err + } + defer p.Close() + + if len(containers) > 0 { + return &containers[0], nil + } + return nil, nil +} + +func (p *DockerProvider) ReuseOrCreateContainer(ctx context.Context, req ContainerRequest) (Container, error) { + c, err := p.findContainerByName(ctx, req.Name) + if err != nil { + return nil, err + } + if c == nil { + return p.CreateContainer(ctx, req) + } + + tcConfig := p.Config().Config + + var termSignal chan bool + if !tcConfig.RyukDisabled { + r, err := reuseOrCreateReaper(context.WithValue(ctx, testcontainersdocker.DockerHostContextKey, p.host), testcontainerssession.String(), p, req.ReaperOptions...) + if err != nil { + return nil, fmt.Errorf("%w: creating reaper failed", err) + } + termSignal, err = r.Connect() + if err != nil { + return nil, fmt.Errorf("%w: connecting to reaper failed", err) + } + } + + dc := &DockerContainer{ + ID: c.ID, + WaitingFor: req.WaitingFor, + Image: c.Image, + sessionID: testcontainerssession.ID(), + provider: p, + terminationSignal: termSignal, + stopProducer: nil, + logger: p.Logger, + isRunning: c.State == "running", + } + + return dc, nil +} + +// attemptToPullImage tries to pull the image while respecting the ctx cancellations. +// Besides, if the image cannot be pulled due to ErrorNotFound then no need to retry but terminate immediately. +func (p *DockerProvider) attemptToPullImage(ctx context.Context, tag string, pullOpt types.ImagePullOptions) error { + var ( + err error + pull io.ReadCloser + ) + err = backoff.Retry(func() error { + pull, err = p.client.ImagePull(ctx, tag, pullOpt) + if err != nil { + if _, ok := err.(errdefs.ErrNotFound); ok { + return backoff.Permanent(err) + } + Logger.Printf("Failed to pull image: %s, will retry", err) + return err + } + defer p.Close() + + return nil + }, backoff.WithContext(backoff.NewExponentialBackOff(), ctx)) + if err != nil { + return err + } + defer pull.Close() + + // download of docker image finishes at EOF of the pull request + _, err = io.ReadAll(pull) + return err +} + +// Health measure the healthiness of the provider. Right now we leverage the +// docker-client ping endpoint to see if the daemon is reachable. +func (p *DockerProvider) Health(ctx context.Context) (err error) { + _, err = p.client.Ping(ctx) + defer p.Close() + + return err +} + +// RunContainer takes a RequestContainer as input and it runs a container via the docker sdk +func (p *DockerProvider) RunContainer(ctx context.Context, req ContainerRequest) (Container, error) { + c, err := p.CreateContainer(ctx, req) + if err != nil { + return nil, err + } + + if err := c.Start(ctx); err != nil { + return c, fmt.Errorf("%w: could not start container", err) + } + + return c, nil +} + +// Config provides the TestcontainersConfig read from $HOME/.testcontainers.properties or +// the environment variables +func (p *DockerProvider) Config() TestcontainersConfig { + return p.config +} + +// DaemonHost gets the host or ip of the Docker daemon where ports are exposed on +// Warning: this is based on your Docker host setting. Will fail if using an SSH tunnel +// You can use the "TC_HOST" env variable to set this yourself +func (p *DockerProvider) DaemonHost(ctx context.Context) (string, error) { + return daemonHost(ctx, p) +} + +func daemonHost(ctx context.Context, p *DockerProvider) (string, error) { + if p.hostCache != "" { + return p.hostCache, nil + } + + host, exists := os.LookupEnv("TC_HOST") + if exists { + p.hostCache = host + return p.hostCache, nil + } + + // infer from Docker host + url, err := url.Parse(p.client.DaemonHost()) + if err != nil { + return "", err + } + defer p.Close() + + switch url.Scheme { + case "http", "https", "tcp": + p.hostCache = url.Hostname() + case "unix", "npipe": + if testcontainersdocker.InAContainer() { + ip, err := p.GetGatewayIP(ctx) + if err != nil { + ip, err = testcontainersdocker.DefaultGatewayIP() + if err != nil { + ip = "localhost" + } + } + p.hostCache = ip + } else { + p.hostCache = "localhost" + } + default: + return "", errors.New("could not determine host through env or docker host") + } + + return p.hostCache, nil +} + +// CreateNetwork returns the object representing a new network identified by its name +func (p *DockerProvider) CreateNetwork(ctx context.Context, req NetworkRequest) (Network, error) { + var err error + + // defer the close of the Docker client connection the soonest + defer p.Close() + + // Make sure that bridge network exists + // In case it is disabled we will create reaper_default network + if p.DefaultNetwork == "" { + if p.DefaultNetwork, err = p.getDefaultNetwork(ctx, p.client); err != nil { + return nil, err + } + } + + if req.Labels == nil { + req.Labels = make(map[string]string) + } + + tcConfig := p.Config().Config + + nc := types.NetworkCreate{ + Driver: req.Driver, + CheckDuplicate: req.CheckDuplicate, + Internal: req.Internal, + EnableIPv6: req.EnableIPv6, + Attachable: req.Attachable, + Labels: req.Labels, + IPAM: req.IPAM, + } + + var termSignal chan bool + if !tcConfig.RyukDisabled { + r, err := reuseOrCreateReaper(context.WithValue(ctx, testcontainersdocker.DockerHostContextKey, p.host), testcontainerssession.String(), p, req.ReaperOptions...) + if err != nil { + return nil, fmt.Errorf("%w: creating network reaper failed", err) + } + termSignal, err = r.Connect() + if err != nil { + return nil, fmt.Errorf("%w: connecting to network reaper failed", err) + } + for k, v := range r.Labels() { + if _, ok := req.Labels[k]; !ok { + req.Labels[k] = v + } + } + } + + response, err := p.client.NetworkCreate(ctx, req.Name, nc) + if err != nil { + return &DockerNetwork{}, err + } + + n := &DockerNetwork{ + ID: response.ID, + Driver: req.Driver, + Name: req.Name, + terminationSignal: termSignal, + provider: p, + } + + return n, nil +} + +// GetNetwork returns the object representing the network identified by its name +func (p *DockerProvider) GetNetwork(ctx context.Context, req NetworkRequest) (types.NetworkResource, error) { + networkResource, err := p.client.NetworkInspect(ctx, req.Name, types.NetworkInspectOptions{ + Verbose: true, + }) + if err != nil { + return types.NetworkResource{}, err + } + + return networkResource, err +} + +func (p *DockerProvider) GetGatewayIP(ctx context.Context) (string, error) { + // Use a default network as defined in the DockerProvider + if p.DefaultNetwork == "" { + var err error + p.DefaultNetwork, err = p.getDefaultNetwork(ctx, p.client) + if err != nil { + return "", err + } + } + nw, err := p.GetNetwork(ctx, NetworkRequest{Name: p.DefaultNetwork}) + if err != nil { + return "", err + } + + var ip string + for _, config := range nw.IPAM.Config { + if config.Gateway != "" { + ip = config.Gateway + break + } + } + if ip == "" { + return "", errors.New("Failed to get gateway IP from network settings") + } + + return ip, nil +} + +func (p *DockerProvider) getDefaultNetwork(ctx context.Context, cli client.APIClient) (string, error) { + // Get list of available networks + networkResources, err := cli.NetworkList(ctx, types.NetworkListOptions{}) + if err != nil { + return "", err + } + + reaperNetwork := ReaperDefault + + reaperNetworkExists := false + + for _, net := range networkResources { + if net.Name == p.defaultBridgeNetworkName { + return p.defaultBridgeNetworkName, nil + } + + if net.Name == reaperNetwork { + reaperNetworkExists = true + } + } + + // Create a bridge network for the container communications + if !reaperNetworkExists { + _, err = cli.NetworkCreate(ctx, reaperNetwork, types.NetworkCreate{ + Driver: Bridge, + Attachable: true, + Labels: map[string]string{ + TestcontainerLabel: "true", + testcontainersdocker.LabelLang: "go", + testcontainersdocker.LabelVersion: internal.Version, + }, + }) + + if err != nil { + return "", err + } + } + + return reaperNetwork, nil +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/docker_auth.go b/vendor/github.com/testcontainers/testcontainers-go/docker_auth.go new file mode 100644 index 000000000..c95f848c0 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/docker_auth.go @@ -0,0 +1,108 @@ +package testcontainers + +import ( + "context" + "encoding/base64" + "encoding/json" + "os" + + "github.com/cpuguy83/dockercfg" + "github.com/docker/docker/api/types" + "github.com/testcontainers/testcontainers-go/internal/testcontainersdocker" +) + +// DockerImageAuth returns the auth config for the given Docker image, extracting first its Docker registry. +// Finally, it will use the credential helpers to extract the information from the docker config file +// for that registry, if it exists. +func DockerImageAuth(ctx context.Context, image string) (string, types.AuthConfig, error) { + defaultRegistry := defaultRegistry(ctx) + registry := testcontainersdocker.ExtractRegistry(image, defaultRegistry) + + cfgs, err := getDockerAuthConfigs() + if err != nil { + return registry, types.AuthConfig{}, err + } + + if cfg, ok := cfgs[registry]; ok { + return registry, cfg, nil + } + + return registry, types.AuthConfig{}, dockercfg.ErrCredentialsNotFound +} + +// defaultRegistry returns the default registry to use when pulling images +// It will use the docker daemon to get the default registry, returning "https://index.docker.io/v1/" if +// it fails to get the information from the daemon +func defaultRegistry(ctx context.Context) string { + client, err := testcontainersdocker.NewClient(ctx) + if err != nil { + return testcontainersdocker.IndexDockerIO + } + defer client.Close() + + info, err := client.Info(ctx) + if err != nil { + return testcontainersdocker.IndexDockerIO + } + + return info.IndexServerAddress +} + +// getDockerAuthConfigs returns a map with the auth configs from the docker config file +// using the registry as the key +func getDockerAuthConfigs() (map[string]types.AuthConfig, error) { + cfg, err := getDockerConfig() + if err != nil { + return nil, err + } + + cfgs := map[string]types.AuthConfig{} + for k, v := range cfg.AuthConfigs { + ac := types.AuthConfig{ + Auth: v.Auth, + Email: v.Email, + IdentityToken: v.IdentityToken, + Password: v.Password, + RegistryToken: v.RegistryToken, + ServerAddress: v.ServerAddress, + Username: v.Username, + } + + if v.Username == "" && v.Password == "" { + u, p, _ := dockercfg.GetRegistryCredentials(k) + ac.Username = u + ac.Password = p + } + + if v.Auth == "" { + ac.Auth = base64.StdEncoding.EncodeToString([]byte(ac.Username + ":" + ac.Password)) + } + + cfgs[k] = ac + } + + return cfgs, nil +} + +// getDockerConfig returns the docker config file. It will internally check, in this particular order: +// 1. the DOCKER_AUTH_CONFIG environment variable, unmarshalling it into a dockercfg.Config +// 2. the DOCKER_CONFIG environment variable, as the path to the config file +// 3. else it will load the default config file, which is ~/.docker/config.json +func getDockerConfig() (dockercfg.Config, error) { + dockerAuthConfig := os.Getenv("DOCKER_AUTH_CONFIG") + if dockerAuthConfig != "" { + cfg := dockercfg.Config{} + err := json.Unmarshal([]byte(dockerAuthConfig), &cfg) + if err == nil { + return cfg, nil + } + + } + + cfg, err := dockercfg.LoadDefaultConfig() + if err != nil { + return cfg, err + } + + return cfg, nil +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/docker_mounts.go b/vendor/github.com/testcontainers/testcontainers-go/docker_mounts.go new file mode 100644 index 000000000..bb63dc047 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/docker_mounts.go @@ -0,0 +1,116 @@ +package testcontainers + +import "github.com/docker/docker/api/types/mount" + +var ( + mountTypeMapping = map[MountType]mount.Type{ + MountTypeBind: mount.TypeBind, + MountTypeVolume: mount.TypeVolume, + MountTypeTmpfs: mount.TypeTmpfs, + MountTypePipe: mount.TypeNamedPipe, + } +) + +// BindMounter can optionally be implemented by mount sources +// to support advanced scenarios based on mount.BindOptions +type BindMounter interface { + GetBindOptions() *mount.BindOptions +} + +// VolumeMounter can optionally be implemented by mount sources +// to support advanced scenarios based on mount.VolumeOptions +type VolumeMounter interface { + GetVolumeOptions() *mount.VolumeOptions +} + +// TmpfsMounter can optionally be implemented by mount sources +// to support advanced scenarios based on mount.TmpfsOptions +type TmpfsMounter interface { + GetTmpfsOptions() *mount.TmpfsOptions +} + +type DockerBindMountSource struct { + *mount.BindOptions + + // HostPath is the path mounted into the container + // the same host path might be mounted to multiple locations within a single container + HostPath string +} + +func (s DockerBindMountSource) Source() string { + return s.HostPath +} + +func (DockerBindMountSource) Type() MountType { + return MountTypeBind +} + +func (s DockerBindMountSource) GetBindOptions() *mount.BindOptions { + return s.BindOptions +} + +type DockerVolumeMountSource struct { + *mount.VolumeOptions + + // Name refers to the name of the volume to be mounted + // the same volume might be mounted to multiple locations within a single container + Name string +} + +func (s DockerVolumeMountSource) Source() string { + return s.Name +} + +func (DockerVolumeMountSource) Type() MountType { + return MountTypeVolume +} + +func (s DockerVolumeMountSource) GetVolumeOptions() *mount.VolumeOptions { + return s.VolumeOptions +} + +type DockerTmpfsMountSource struct { + GenericTmpfsMountSource + *mount.TmpfsOptions +} + +func (s DockerTmpfsMountSource) GetTmpfsOptions() *mount.TmpfsOptions { + return s.TmpfsOptions +} + +// mapToDockerMounts maps the given []ContainerMount to the corresponding +// []mount.Mount for further processing +func mapToDockerMounts(containerMounts ContainerMounts) []mount.Mount { + mounts := make([]mount.Mount, 0, len(containerMounts)) + + for idx := range containerMounts { + m := containerMounts[idx] + + var mountType mount.Type + if mt, ok := mountTypeMapping[m.Source.Type()]; ok { + mountType = mt + } else { + continue + } + + containerMount := mount.Mount{ + Type: mountType, + Source: m.Source.Source(), + ReadOnly: m.ReadOnly, + Target: m.Target.Target(), + } + + switch typedMounter := m.Source.(type) { + case BindMounter: + containerMount.BindOptions = typedMounter.GetBindOptions() + case VolumeMounter: + containerMount.VolumeOptions = typedMounter.GetVolumeOptions() + case TmpfsMounter: + containerMount.TmpfsOptions = typedMounter.GetTmpfsOptions() + } + + mounts = append(mounts, containerMount) + } + + return mounts +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/exec/processor.go b/vendor/github.com/testcontainers/testcontainers-go/exec/processor.go new file mode 100644 index 000000000..63987e461 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/exec/processor.go @@ -0,0 +1,44 @@ +package exec + +import ( + "bytes" + "io" + + "github.com/docker/docker/pkg/stdcopy" +) + +// ProcessOptions defines options applicable to the reader processor +type ProcessOptions struct { + Reader io.Reader +} + +// ProcessOption defines a common interface to modify the reader processor +// These options can be passed to the Exec function in a variadic way to customize the returned Reader instance +type ProcessOption interface { + Apply(opts *ProcessOptions) +} + +type ProcessOptionFunc func(opts *ProcessOptions) + +func (fn ProcessOptionFunc) Apply(opts *ProcessOptions) { + fn(opts) +} + +func Multiplexed() ProcessOption { + return ProcessOptionFunc(func(opts *ProcessOptions) { + done := make(chan struct{}) + + var outBuff bytes.Buffer + var errBuff bytes.Buffer + go func() { + if _, err := stdcopy.StdCopy(&outBuff, &errBuff, opts.Reader); err != nil { + return + } + close(done) + }() + + <-done + + opts.Reader = &outBuff + }) +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/file.go b/vendor/github.com/testcontainers/testcontainers-go/file.go new file mode 100644 index 000000000..509da643d --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/file.go @@ -0,0 +1,141 @@ +package testcontainers + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "fmt" + "io" + "os" + "path/filepath" + "strings" +) + +func isDir(path string) (bool, error) { + file, err := os.Open(path) + if err != nil { + return false, err + } + defer file.Close() + + fileInfo, err := file.Stat() + if err != nil { + return false, err + } + + if fileInfo.IsDir() { + return true, nil + } + + return false, nil +} + +// tarDir compress a directory using tar + gzip algorithms +func tarDir(src string, fileMode int64) (*bytes.Buffer, error) { + // always pass src as absolute path + abs, err := filepath.Abs(src) + if err != nil { + return &bytes.Buffer{}, fmt.Errorf("error getting absolute path: %w", err) + } + src = abs + + buffer := &bytes.Buffer{} + + fmt.Printf(">> creating TAR file from directory: %s\n", src) + + // tar > gzip > buffer + zr := gzip.NewWriter(buffer) + tw := tar.NewWriter(zr) + + _, baseDir := filepath.Split(src) + // keep the path relative to the parent directory + index := strings.LastIndex(src, baseDir) + + // walk through every file in the folder + err = filepath.Walk(src, func(file string, fi os.FileInfo, errFn error) error { + if errFn != nil { + return fmt.Errorf("error traversing the file system: %w", errFn) + } + + // if a symlink, skip file + if fi.Mode().Type() == os.ModeSymlink { + fmt.Printf(">> skipping symlink: %s\n", file) + return nil + } + + // generate tar header + header, err := tar.FileInfoHeader(fi, file) + if err != nil { + return fmt.Errorf("error getting file info header: %w", err) + } + + // see https://pkg.go.dev/archive/tar#FileInfoHeader: + // Since fs.FileInfo's Name method only returns the base name of the file it describes, + // it may be necessary to modify Header.Name to provide the full path name of the file. + header.Name = filepath.ToSlash(file[index:]) + header.Mode = fileMode + + // write header + if err := tw.WriteHeader(header); err != nil { + return fmt.Errorf("error writing header: %w", err) + } + + // if not a dir, write file content + if !fi.IsDir() { + data, err := os.Open(file) + if err != nil { + return fmt.Errorf("error opening file: %w", err) + } + defer data.Close() + if _, err := io.Copy(tw, data); err != nil { + return fmt.Errorf("error compressing file: %w", err) + } + } + return nil + }) + if err != nil { + return buffer, err + } + + // produce tar + if err := tw.Close(); err != nil { + return buffer, fmt.Errorf("error closing tar file: %w", err) + } + // produce gzip + if err := zr.Close(); err != nil { + return buffer, fmt.Errorf("error closing gzip file: %w", err) + } + + return buffer, nil +} + +// tarFile compress a single file using tar + gzip algorithms +func tarFile(fileContent []byte, basePath string, fileMode int64) (*bytes.Buffer, error) { + buffer := &bytes.Buffer{} + + zr := gzip.NewWriter(buffer) + tw := tar.NewWriter(zr) + + hdr := &tar.Header{ + Name: filepath.Base(basePath), + Mode: fileMode, + Size: int64(len(fileContent)), + } + if err := tw.WriteHeader(hdr); err != nil { + return buffer, err + } + if _, err := tw.Write(fileContent); err != nil { + return buffer, err + } + + // produce tar + if err := tw.Close(); err != nil { + return buffer, fmt.Errorf("error closing tar file: %w", err) + } + // produce gzip + if err := zr.Close(); err != nil { + return buffer, fmt.Errorf("error closing gzip file: %w", err) + } + + return buffer, nil +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/generic.go b/vendor/github.com/testcontainers/testcontainers-go/generic.go new file mode 100644 index 000000000..86a280be3 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/generic.go @@ -0,0 +1,157 @@ +package testcontainers + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/imdario/mergo" + "github.com/testcontainers/testcontainers-go/wait" +) + +var ( + reuseContainerMx sync.Mutex + ErrReuseEmptyName = errors.New("with reuse option a container name mustn't be empty") +) + +// GenericContainerRequest represents parameters to a generic container +type GenericContainerRequest struct { + ContainerRequest // embedded request for provider + Started bool // whether to auto-start the container + ProviderType ProviderType // which provider to use, Docker if empty + Logger Logging // provide a container specific Logging - use default global logger if empty + Reuse bool // reuse an existing container if it exists or create a new one. a container name mustn't be empty +} + +// ContainerCustomizer is an interface that can be used to configure the Testcontainers container +// request. The passed request will be merged with the default one. +type ContainerCustomizer interface { + Customize(req *GenericContainerRequest) +} + +// CustomizeRequestOption is a type that can be used to configure the Testcontainers container request. +// The passed request will be merged with the default one. +type CustomizeRequestOption func(req *GenericContainerRequest) + +func (opt CustomizeRequestOption) Customize(req *GenericContainerRequest) { + opt(req) +} + +// CustomizeRequest returns a function that can be used to merge the passed container request with the one that is used by the container. +// Slices and Maps will be appended. +func CustomizeRequest(src GenericContainerRequest) CustomizeRequestOption { + return func(req *GenericContainerRequest) { + if err := mergo.Merge(req, &src, mergo.WithOverride, mergo.WithAppendSlice); err != nil { + fmt.Printf("error merging container request, keeping the original one. Error: %v", err) + return + } + } +} + +// WithImage sets the image for a container +func WithImage(image string) CustomizeRequestOption { + return func(req *GenericContainerRequest) { + req.Image = image + } +} + +// WithConfigModifier allows to override the default container config +func WithConfigModifier(modifier func(config *container.Config)) CustomizeRequestOption { + return func(req *GenericContainerRequest) { + req.ConfigModifier = modifier + } +} + +// WithEndpointSettingsModifier allows to override the default endpoint settings +func WithEndpointSettingsModifier(modifier func(settings map[string]*network.EndpointSettings)) CustomizeRequestOption { + return func(req *GenericContainerRequest) { + req.EnpointSettingsModifier = modifier + } +} + +// WithHostConfigModifier allows to override the default host config +func WithHostConfigModifier(modifier func(hostConfig *container.HostConfig)) CustomizeRequestOption { + return func(req *GenericContainerRequest) { + req.HostConfigModifier = modifier + } +} + +// WithWaitStrategy sets the wait strategy for a container, using 60 seconds as deadline +func WithWaitStrategy(strategies ...wait.Strategy) CustomizeRequestOption { + return WithWaitStrategyAndDeadline(60*time.Second, strategies...) +} + +// WithWaitStrategyAndDeadline sets the wait strategy for a container, including deadline +func WithWaitStrategyAndDeadline(deadline time.Duration, strategies ...wait.Strategy) CustomizeRequestOption { + return func(req *GenericContainerRequest) { + req.WaitingFor = wait.ForAll(strategies...).WithDeadline(deadline) + } +} + +// GenericNetworkRequest represents parameters to a generic network +type GenericNetworkRequest struct { + NetworkRequest // embedded request for provider + ProviderType ProviderType // which provider to use, Docker if empty +} + +// GenericNetwork creates a generic network with parameters +func GenericNetwork(ctx context.Context, req GenericNetworkRequest) (Network, error) { + provider, err := req.ProviderType.GetProvider() + if err != nil { + return nil, err + } + network, err := provider.CreateNetwork(ctx, req.NetworkRequest) + if err != nil { + return nil, fmt.Errorf("%w: failed to create network", err) + } + + return network, nil +} + +// GenericContainer creates a generic container with parameters +func GenericContainer(ctx context.Context, req GenericContainerRequest) (Container, error) { + if req.Reuse && req.Name == "" { + return nil, ErrReuseEmptyName + } + + logging := req.Logger + if logging == nil { + logging = Logger + } + provider, err := req.ProviderType.GetProvider(WithLogger(logging)) + if err != nil { + return nil, err + } + + var c Container + if req.Reuse { + // we must protect the reusability of the container in the case it's invoked + // in a parallel execution, via ParallelContainers or t.Parallel() + reuseContainerMx.Lock() + defer reuseContainerMx.Unlock() + + c, err = provider.ReuseOrCreateContainer(ctx, req.ContainerRequest) + } else { + c, err = provider.CreateContainer(ctx, req.ContainerRequest) + } + if err != nil { + return nil, fmt.Errorf("%w: failed to create container", err) + } + + if req.Started && !c.IsRunning() { + if err := c.Start(ctx); err != nil { + return c, fmt.Errorf("%w: failed to start container", err) + } + } + return c, nil +} + +// GenericProvider represents an abstraction for container and network providers +type GenericProvider interface { + ContainerProvider + NetworkProvider +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/config/config.go b/vendor/github.com/testcontainers/testcontainers-go/internal/config/config.go new file mode 100644 index 000000000..5460d6d67 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/internal/config/config.go @@ -0,0 +1,100 @@ +package config + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "sync" + + "github.com/magiconair/properties" +) + +var tcConfig Config +var tcConfigOnce *sync.Once = new(sync.Once) + +// Config represents the configuration for Testcontainers +// testcontainersConfig { +type Config struct { + Host string `properties:"docker.host,default="` + TLSVerify int `properties:"docker.tls.verify,default=0"` + CertPath string `properties:"docker.cert.path,default="` + RyukDisabled bool `properties:"ryuk.disabled,default=false"` + RyukPrivileged bool `properties:"ryuk.container.privileged,default=false"` + TestcontainersHost string `properties:"tc.host,default="` +} + +// } + +// Read reads from testcontainers properties file, if it exists +// it is possible that certain values get overridden when set as environment variables +func Read() Config { + tcConfigOnce.Do(func() { + tcConfig = read() + + if tcConfig.RyukDisabled { + ryukDisabledMessage := ` +********************************************************************************************** +Ryuk has been disabled for the current execution. This can cause unexpected behavior in your environment. +More on this: https://golang.testcontainers.org/features/garbage_collector/ +**********************************************************************************************` + fmt.Println(ryukDisabledMessage) + } + }) + + return tcConfig +} + +// Reset resets the singleton instance of the Config struct, +// allowing to read the configuration again. +// Handy for testing, so do not use it in production code +// This function is not thread-safe +func Reset() { + tcConfigOnce = new(sync.Once) +} + +func read() Config { + config := Config{} + + applyEnvironmentConfiguration := func(config Config) Config { + ryukDisabledEnv := os.Getenv("TESTCONTAINERS_RYUK_DISABLED") + if parseBool(ryukDisabledEnv) { + config.RyukDisabled = ryukDisabledEnv == "true" + } + + ryukPrivilegedEnv := os.Getenv("TESTCONTAINERS_RYUK_CONTAINER_PRIVILEGED") + if parseBool(ryukPrivilegedEnv) { + config.RyukPrivileged = ryukPrivilegedEnv == "true" + } + + return config + } + + home, err := os.UserHomeDir() + if err != nil { + return applyEnvironmentConfiguration(config) + } + + tcProp := filepath.Join(home, ".testcontainers.properties") + // init from a file + properties, err := properties.LoadFile(tcProp, properties.UTF8) + if err != nil { + return applyEnvironmentConfiguration(config) + } + + if err := properties.Decode(&config); err != nil { + fmt.Printf("invalid testcontainers properties file, returning an empty Testcontainers configuration: %v\n", err) + return applyEnvironmentConfiguration(config) + } + + fmt.Printf("Testcontainers properties file has been found: %s\n", tcProp) + + return applyEnvironmentConfiguration(config) +} + +func parseBool(input string) bool { + if _, err := strconv.ParseBool(input); err == nil { + return true + } + return false +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/client.go b/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/client.go new file mode 100644 index 000000000..ec10d34d1 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/client.go @@ -0,0 +1,65 @@ +package testcontainersdocker + +import ( + "context" + "path/filepath" + + "github.com/docker/docker/client" + "github.com/testcontainers/testcontainers-go/internal/config" + "github.com/testcontainers/testcontainers-go/internal/testcontainerssession" +) + +// NewClient returns a new docker client extracting the docker host from the different alternatives +func NewClient(ctx context.Context, ops ...client.Opt) (*client.Client, error) { + tcConfig := config.Read() + + dockerHost := ExtractDockerHost(ctx) + + opts := []client.Opt{client.FromEnv, client.WithAPIVersionNegotiation()} + if dockerHost != "" { + opts = append(opts, client.WithHost(dockerHost)) + + // for further information, read https://docs.docker.com/engine/security/protect-access/ + if tcConfig.TLSVerify == 1 { + cacertPath := filepath.Join(tcConfig.CertPath, "ca.pem") + certPath := filepath.Join(tcConfig.CertPath, "cert.pem") + keyPath := filepath.Join(tcConfig.CertPath, "key.pem") + + opts = append(opts, client.WithTLSClientConfig(cacertPath, certPath, keyPath)) + } + } + + opts = append(opts, client.WithHTTPHeaders( + map[string]string{ + "x-tc-sid": testcontainerssession.String(), + }), + ) + + // passed options have priority over the default ones + opts = append(opts, ops...) + + cli, err := client.NewClientWithOpts(opts...) + if err != nil { + return nil, err + } + + if _, err = cli.Ping(context.Background()); err != nil { + // Fallback to environment, including the original options + cli, err = defaultClient(context.Background(), ops...) + if err != nil { + return nil, err + } + } + defer cli.Close() + + return cli, nil +} + +// defaultClient returns a plain, new docker client with the default options +func defaultClient(ctx context.Context, ops ...client.Opt) (*client.Client, error) { + if len(ops) == 0 { + ops = []client.Opt{client.FromEnv, client.WithAPIVersionNegotiation()} + } + + return client.NewClientWithOpts(ops...) +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/docker_host.go b/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/docker_host.go new file mode 100644 index 000000000..c625c6cd5 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/docker_host.go @@ -0,0 +1,264 @@ +package testcontainersdocker + +import ( + "context" + "errors" + "fmt" + "os" + "os/exec" + "strings" + "sync" + + "github.com/docker/docker/client" + "github.com/testcontainers/testcontainers-go/internal/config" +) + +type dockerHostContext string + +var DockerHostContextKey = dockerHostContext("docker_host") + +var ( + ErrDockerHostNotSet = errors.New("DOCKER_HOST is not set") + ErrDockerSocketOverrideNotSet = errors.New("TESTCONTAINERS_DOCKER_SOCKET_OVERRIDE is not set") + ErrDockerSocketNotSetInContext = errors.New("socket not set in context") + ErrDockerSocketNotSetInProperties = errors.New("socket not set in ~/.testcontainers.properties") + ErrNoUnixSchema = errors.New("URL schema is not unix") + ErrSocketNotFound = errors.New("socket not found") + ErrSocketNotFoundInPath = errors.New("docker socket not found in " + DockerSocketPath) + // ErrTestcontainersHostNotSetInProperties this error is specific to Testcontainers + ErrTestcontainersHostNotSetInProperties = errors.New("tc.host not set in ~/.testcontainers.properties") +) + +var dockerHostCache string +var dockerHostOnce sync.Once + +var dockerSocketPathCache string +var dockerSocketPathOnce sync.Once + +// deprecated +// see https://github.com/testcontainers/testcontainers-java/blob/main/core/src/main/java/org/testcontainers/dockerclient/DockerClientConfigUtils.java#L46 +func DefaultGatewayIP() (string, error) { + // see https://github.com/testcontainers/testcontainers-java/blob/3ad8d80e2484864e554744a4800a81f6b7982168/core/src/main/java/org/testcontainers/dockerclient/DockerClientConfigUtils.java#L27 + cmd := exec.Command("sh", "-c", "ip route|awk '/default/ { print $3 }'") + stdout, err := cmd.Output() + if err != nil { + return "", errors.New("failed to detect docker host") + } + ip := strings.TrimSpace(string(stdout)) + if len(ip) == 0 { + return "", errors.New("failed to parse default gateway IP") + } + return ip, nil +} + +// ExtractDockerHost Extracts the docker host from the different alternatives, caching the result to avoid unnecessary +// calculations. Use this function to get the actual Docker host. This function does not consider Windows containers at the moment. +// The possible alternatives are: +// +// 1. Docker host from the "tc.host" property in the ~/.testcontainers.properties file. +// 2. DOCKER_HOST environment variable. +// 3. Docker host from context. +// 4. Docker host from the default docker socket path, without the unix schema. +// 5. Docker host from the "docker.host" property in the ~/.testcontainers.properties file. +// 6. Rootless docker socket path. +// 7. Else, the default Docker socket including schema will be returned. +func ExtractDockerHost(ctx context.Context) string { + dockerHostOnce.Do(func() { + dockerHostCache = extractDockerHost(ctx) + }) + + return dockerHostCache +} + +// ExtractDockerSocket Extracts the docker socket from the different alternatives, removing the socket schema and +// caching the result to avoid unnecessary calculations. Use this function to get the docker socket path, +// not the host (e.g. mounting the socket in a container). This function does not consider Windows containers at the moment. +// The possible alternatives are: +// +// 1. Docker host from the "tc.host" property in the ~/.testcontainers.properties file. +// 2. The TESTCONTAINERS_DOCKER_SOCKET_OVERRIDE environment variable. +// 3. Using a Docker client, check if the Info().OperativeSystem is "Docker Desktop" and return the default docker socket path for rootless docker. +// 4. Else, Get the current Docker Host from the existing strategies: see ExtractDockerHost. +// 5. If the socket contains the unix schema, the schema is removed (e.g. unix:///var/run/docker.sock -> /var/run/docker.sock) +// 6. Else, the default location of the docker socket is used (/var/run/docker.sock) +// +// In any case, if the docker socket schema is "tcp://", the default docker socket path will be returned. +func ExtractDockerSocket(ctx context.Context) string { + dockerSocketPathOnce.Do(func() { + dockerSocketPathCache = extractDockerSocket(ctx) + }) + + return dockerSocketPathCache +} + +// extractDockerHost Extracts the docker host from the different alternatives, without caching the result. +// This internal method is handy for testing purposes. +func extractDockerHost(ctx context.Context) string { + dockerHostFns := []func(context.Context) (string, error){ + testcontainersHostFromProperties, + dockerHostFromEnv, + dockerHostFromContext, + dockerSocketPath, + dockerHostFromProperties, + rootlessDockerSocketPath, + } + + outerErr := ErrSocketNotFound + for _, dockerHostFn := range dockerHostFns { + dockerHost, err := dockerHostFn(ctx) + if err != nil { + outerErr = fmt.Errorf("%w: %v", outerErr, err) + continue + } + + return dockerHost + } + + // We are not supporting Windows containers at the moment + return DockerSocketPathWithSchema +} + +// extractDockerHost Extracts the docker socket from the different alternatives, without caching the result. +// It will internally use the default Docker client, calling the internal method extractDockerSocketFromClient with it. +// This internal method is handy for testing purposes. +// If a Docker client cannot be created, the program will panic. +func extractDockerSocket(ctx context.Context) string { + cli, err := NewClient(ctx) + if err != nil { + panic(err) // a Docker client is required to get the Docker info + } + + return extractDockerSocketFromClient(ctx, cli) +} + +// extractDockerSocketFromClient Extracts the docker socket from the different alternatives, without caching the result, +// and receiving an instance of the Docker API client interface. +// This internal method is handy for testing purposes, passing a mock type simulating the desired behaviour. +func extractDockerSocketFromClient(ctx context.Context, cli client.APIClient) string { + // check that the socket is not a tcp or unix socket + checkDockerSocketFn := func(socket string) string { + // this use case will cover the case when the docker host is a tcp socket + if strings.HasPrefix(socket, TCPSchema) { + return DockerSocketPath + } + + if strings.HasPrefix(socket, DockerSocketSchema) { + return strings.Replace(socket, DockerSocketSchema, "", 1) + } + + return socket + } + + tcHost, err := testcontainersHostFromProperties(ctx) + if err == nil { + return checkDockerSocketFn(tcHost) + } + + testcontainersDockerSocket, err := dockerSocketOverridePath(ctx) + if err == nil { + return checkDockerSocketFn(testcontainersDockerSocket) + } + + info, err := cli.Info(ctx) + if err != nil { + panic(err) // Docker Info is required to get the Operating System + } + + // Because Docker Desktop runs in a VM, we need to use the default docker path for rootless docker + if info.OperatingSystem == "Docker Desktop" { + return DockerSocketPath + } + + dockerHost := extractDockerHost(ctx) + + return checkDockerSocketFn(dockerHost) +} + +// dockerHostFromEnv returns the docker host from the DOCKER_HOST environment variable, if it's not empty +func dockerHostFromEnv(ctx context.Context) (string, error) { + if dockerHostPath := os.Getenv("DOCKER_HOST"); dockerHostPath != "" { + return dockerHostPath, nil + } + + return "", ErrDockerHostNotSet +} + +// dockerHostFromContext returns the docker host from the Go context, if it's not empty +func dockerHostFromContext(ctx context.Context) (string, error) { + if socketPath, ok := ctx.Value(DockerHostContextKey).(string); ok && socketPath != "" { + parsed, err := parseURL(socketPath) + if err != nil { + return "", err + } + + return parsed, nil + } + + return "", ErrDockerSocketNotSetInContext +} + +// dockerHostFromProperties returns the docker host from the ~/.testcontainers.properties file, if it's not empty +func dockerHostFromProperties(ctx context.Context) (string, error) { + cfg := config.Read() + socketPath := cfg.Host + if socketPath != "" { + parsed, err := parseURL(socketPath) + if err != nil { + return "", err + } + + return parsed, nil + } + + return "", ErrDockerSocketNotSetInProperties +} + +// dockerSocketOverridePath returns the docker socket from the TESTCONTAINERS_DOCKER_SOCKET_OVERRIDE environment variable, +// if it's not empty +func dockerSocketOverridePath(ctx context.Context) (string, error) { + if dockerHostPath, exists := os.LookupEnv("TESTCONTAINERS_DOCKER_SOCKET_OVERRIDE"); exists { + return dockerHostPath, nil + } + + return "", ErrDockerSocketOverrideNotSet +} + +// dockerSocketPath returns the docker socket from the default docker socket path, if it's not empty +// and the socket exists +func dockerSocketPath(ctx context.Context) (string, error) { + if fileExists(DockerSocketPath) { + return DockerSocketPathWithSchema, nil + } + + return "", ErrSocketNotFoundInPath +} + +// testcontainersHostFromProperties returns the testcontainers host from the ~/.testcontainers.properties file, if it's not empty +func testcontainersHostFromProperties(ctx context.Context) (string, error) { + cfg := config.Read() + testcontainersHost := cfg.TestcontainersHost + if testcontainersHost != "" { + parsed, err := parseURL(testcontainersHost) + if err != nil { + return "", err + } + + return parsed, nil + } + + return "", ErrTestcontainersHostNotSetInProperties +} + +// InAContainer returns true if the code is running inside a container +// See https://github.com/docker/docker/blob/a9fa38b1edf30b23cae3eade0be48b3d4b1de14b/daemon/initlayer/setup_unix.go#L25 +func InAContainer() bool { + return inAContainer("/.dockerenv") +} + +func inAContainer(path string) bool { + // see https://github.com/testcontainers/testcontainers-java/blob/3ad8d80e2484864e554744a4800a81f6b7982168/core/src/main/java/org/testcontainers/dockerclient/DockerClientConfigUtils.java#L15 + if _, err := os.Stat(path); err == nil { + return true + } + return false +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/docker_rootless.go b/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/docker_rootless.go new file mode 100644 index 000000000..43ff4637d --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/docker_rootless.go @@ -0,0 +1,141 @@ +package testcontainersdocker + +import ( + "context" + "errors" + "fmt" + "net/url" + "os" + "path/filepath" + "runtime" +) + +var ( + ErrRootlessDockerNotFound = errors.New("rootless Docker not found") + ErrRootlessDockerNotFoundHomeDesktopDir = errors.New("checked path: ~/.docker/desktop/docker.sock") + ErrRootlessDockerNotFoundHomeRunDir = errors.New("checked path: ~/.docker/run/docker.sock") + ErrRootlessDockerNotFoundRunDir = errors.New("checked path: /run/user/${uid}/docker.sock") + ErrRootlessDockerNotFoundXDGRuntimeDir = errors.New("checked path: $XDG_RUNTIME_DIR") + ErrRootlessDockerNotSupportedWindows = errors.New("rootless Docker is not supported on Windows") + ErrXDGRuntimeDirNotSet = errors.New("XDG_RUNTIME_DIR is not set") +) + +// baseRunDir is the base directory for the "/run/user/${uid}" directory. +// It is a variable so it can be modified for testing. +var baseRunDir = "/run" + +// rootlessDockerSocketPath returns if the path to the rootless Docker socket exists. +// The rootless socket path is determined by the following order: +// +// 1. XDG_RUNTIME_DIR environment variable. +// 2. ~/.docker/run/docker.sock file. +// 3. ~/.docker/desktop/docker.sock file. +// 4. /run/user/${uid}/docker.sock file. +// 5. Else, return ErrRootlessDockerNotFound, wrapping secific errors for each of the above paths. +// +// It should include the Docker socket schema (unix://) in the returned path. +func rootlessDockerSocketPath(_ context.Context) (string, error) { + // adding a manner to test it on non-windows machines, setting the GOOS env var to windows + // This is needed because runtime.GOOS is a constant that returns the OS of the machine running the test + if os.Getenv("GOOS") == "windows" || runtime.GOOS == "windows" { + return "", ErrRootlessDockerNotSupportedWindows + } + + socketPathFns := []func() (string, error){ + rootlessSocketPathFromEnv, + rootlessSocketPathFromHomeRunDir, + rootlessSocketPathFromHomeDesktopDir, + rootlessSocketPathFromRunDir, + } + + outerErr := ErrRootlessDockerNotFound + for _, socketPathFn := range socketPathFns { + s, err := socketPathFn() + if err != nil { + outerErr = fmt.Errorf("%w: %v", outerErr, err) + continue + } + + return DockerSocketSchema + s, nil + } + + return "", outerErr +} + +func fileExists(f string) bool { + _, err := os.Stat(f) + return err == nil +} + +func parseURL(s string) (string, error) { + var hostURL *url.URL + if u, err := url.Parse(s); err != nil { + return "", err + } else { + hostURL = u + } + + switch hostURL.Scheme { + case "unix", "npipe": + return hostURL.Path, nil + case "tcp": + // return the original URL, as it is a valid TCP URL + return s, nil + default: + return "", ErrNoUnixSchema + } +} + +// rootlessSocketPathFromEnv returns the path to the rootless Docker socket from the XDG_RUNTIME_DIR environment variable. +// It should include the Docker socket schema (unix://) in the returned path. +func rootlessSocketPathFromEnv() (string, error) { + xdgRuntimeDir, exists := os.LookupEnv("XDG_RUNTIME_DIR") + if exists { + f := filepath.Join(xdgRuntimeDir, "docker.sock") + if fileExists(f) { + return f, nil + } + + return "", ErrRootlessDockerNotFoundXDGRuntimeDir + } + + return "", ErrXDGRuntimeDirNotSet +} + +// rootlessSocketPathFromHomeRunDir returns the path to the rootless Docker socket from the ~/.docker/run/docker.sock file. +func rootlessSocketPathFromHomeRunDir() (string, error) { + home, err := os.UserHomeDir() + if err != nil { + return "", err + } + + f := filepath.Join(home, ".docker", "run", "docker.sock") + if fileExists(f) { + return f, nil + } + return "", ErrRootlessDockerNotFoundHomeRunDir +} + +// rootlessSocketPathFromHomeDesktopDir returns the path to the rootless Docker socket from the ~/.docker/desktop/docker.sock file. +func rootlessSocketPathFromHomeDesktopDir() (string, error) { + home, err := os.UserHomeDir() + if err != nil { + return "", err + } + + f := filepath.Join(home, ".docker", "desktop", "docker.sock") + if fileExists(f) { + return f, nil + } + return "", ErrRootlessDockerNotFoundHomeDesktopDir +} + +// rootlessSocketPathFromRunDir returns the path to the rootless Docker socket from the /run/user//docker.sock file. +func rootlessSocketPathFromRunDir() (string, error) { + uid := os.Getuid() + f := filepath.Join(baseRunDir, "user", fmt.Sprintf("%d", uid), "docker.sock") + if fileExists(f) { + return f, nil + } + return "", ErrRootlessDockerNotFoundRunDir +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/docker_socket.go b/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/docker_socket.go new file mode 100644 index 000000000..42414e94b --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/docker_socket.go @@ -0,0 +1,46 @@ +package testcontainersdocker + +import ( + "net/url" + "strings" + + "github.com/docker/docker/client" +) + +// DockerSocketSchema is the unix schema. +var DockerSocketSchema = "unix://" + +// DockerSocketPath is the path to the docker socket under unix systems. +var DockerSocketPath = "/var/run/docker.sock" + +// DockerSocketPathWithSchema is the path to the docker socket under unix systems with the unix schema. +var DockerSocketPathWithSchema = DockerSocketSchema + DockerSocketPath + +// TCPSchema is the tcp schema. +var TCPSchema = "tcp://" + +func init() { + const DefaultDockerHost = client.DefaultDockerHost + + u, err := url.Parse(DefaultDockerHost) + if err != nil { + // unsupported default host specified by the docker client package, + // so revert to the default unix docker socket path + return + } + + switch u.Scheme { + case "unix", "npipe": + DockerSocketSchema = u.Scheme + "://" + DockerSocketPath = u.Path + if !strings.HasPrefix(DockerSocketPath, "/") { + // seeing as the code in this module depends on DockerSocketPath having + // a slash (`/`) prefix, we add it here if it is missing. + // for the known environments, we do not foresee how the socket-path + // should miss the slash, however this extra if-condition is worth to + // save future pain from innocent users. + DockerSocketPath = "/" + DockerSocketPath + } + DockerSocketPathWithSchema = DockerSocketSchema + DockerSocketPath + } +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/images.go b/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/images.go new file mode 100644 index 000000000..3dc10c2a4 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/images.go @@ -0,0 +1,126 @@ +package testcontainersdocker + +import ( + "bufio" + "net/url" + "os" + "regexp" + "strings" + "unicode/utf8" +) + +const ( + IndexDockerIO = "https://index.docker.io/v1/" + maxURLRuneCount = 2083 + minURLRuneCount = 3 + URLSchema = `((ftp|tcp|udp|wss?|https?):\/\/)` + URLUsername = `(\S+(:\S*)?@)` + URLIP = `([1-9]\d?|1\d\d|2[01]\d|22[0-3]|24\d|25[0-5])(\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-5]))` + IP = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))` + URLSubdomain = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))` + URLPath = `((\/|\?|#)[^\s]*)` + URLPort = `(:(\d{1,5}))` + URL = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$` +) + +var rxURL = regexp.MustCompile(URL) + +func ExtractImagesFromDockerfile(dockerfile string, buildArgs map[string]*string) ([]string, error) { + var images []string + + file, err := os.Open(dockerfile) + if err != nil { + return nil, err + } + defer file.Close() + + var lines []string + scanner := bufio.NewScanner(file) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + if scanner.Err() != nil { + return nil, scanner.Err() + } + + // extract images from dockerfile + for _, line := range lines { + line = strings.TrimSpace(line) + if !strings.HasPrefix(strings.ToUpper(line), "FROM") { + continue + } + + // remove FROM + line = strings.TrimPrefix(line, "FROM") + parts := strings.Split(strings.TrimSpace(line), " ") + if len(parts) == 0 { + continue + } + + // interpolate build args + for k, v := range buildArgs { + if v != nil { + parts[0] = strings.Replace(parts[0], "${"+k+"}", *v, -1) + } + } + images = append(images, parts[0]) + } + + return images, nil +} + +// ExtractRegistry extracts the registry from the image name, using a regular expression to extract the registry from the image name. +// regular expression to extract the registry from the image name +// the regular expression is based on the grammar defined in +// - image:tag +// - image +// - repository/image:tag +// - repository/image +// - registry/image:tag +// - registry/image +// - registry/repository/image:tag +// - registry/repository/image +// - registry:port/repository/image:tag +// - registry:port/repository/image +// - registry:port/image:tag +// - registry:port/image +// Once extracted the registry, it is validated to check if it is a valid URL or an IP address. +func ExtractRegistry(image string, fallback string) string { + exp := regexp.MustCompile(`^(?:(?P(https?://)?[^/]+)(?::(?P\d+))?/)?(?:(?P[^/]+)/)?(?P[^:]+)(?::(?P.+))?$`).FindStringSubmatch(image) + if len(exp) == 0 { + return "" + } + + registry := exp[1] + + if IsURL(registry) { + return registry + } + + return fallback +} + +// IsURL checks if the string is an URL. +// Extracted from https://github.com/asaskevich/govalidator/blob/f21760c49a8d/validator.go#L104 +func IsURL(str string) bool { + if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") { + return false + } + strTemp := str + if strings.Contains(str, ":") && !strings.Contains(str, "://") { + // support no indicated urlscheme but with colon for port number + // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString + strTemp = "http://" + str + } + u, err := url.Parse(strTemp) + if err != nil { + return false + } + if strings.HasPrefix(u.Host, ".") { + return false + } + if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) { + return false + } + return rxURL.MatchString(str) +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/labels.go b/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/labels.go new file mode 100644 index 000000000..12742769b --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/labels.go @@ -0,0 +1,9 @@ +package testcontainersdocker + +const ( + LabelBase = "org.testcontainers" + LabelLang = LabelBase + ".lang" + LabelReaper = LabelBase + ".reaper" + LabelSessionID = LabelBase + ".sessionId" + LabelVersion = LabelBase + ".version" +) diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainerssession/session.go b/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainerssession/session.go new file mode 100644 index 000000000..21bd1f980 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainerssession/session.go @@ -0,0 +1,22 @@ +package testcontainerssession + +import ( + "sync" + + "github.com/google/uuid" +) + +var id uuid.UUID +var idOnce sync.Once + +func ID() uuid.UUID { + idOnce.Do(func() { + id = uuid.New() + }) + + return id +} + +func String() string { + return ID().String() +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/version.go b/vendor/github.com/testcontainers/testcontainers-go/internal/version.go new file mode 100644 index 000000000..1177c175a --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/internal/version.go @@ -0,0 +1,4 @@ +package internal + +// Version is the next development version of the application +const Version = "0.21.0" diff --git a/vendor/github.com/testcontainers/testcontainers-go/lifecycle.go b/vendor/github.com/testcontainers/testcontainers-go/lifecycle.go new file mode 100644 index 000000000..55dd48cf7 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/lifecycle.go @@ -0,0 +1,379 @@ +package testcontainers + +import ( + "context" + "io" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/go-connections/nat" + "golang.org/x/exp/slices" +) + +// ContainerRequestHook is a hook that will be called before a container is created. +// It can be used to modify container configuration before it is created, +// using the different lifecycle hooks that are available: +// - Creating +// For that, it will receive a ContainerRequest, modify it and return an error if needed. +type ContainerRequestHook func(ctx context.Context, req ContainerRequest) error + +// ContainerHook is a hook that will be called after a container is created +// It can be used to modify the state of the container after it is created, +// using the different lifecycle hooks that are available: +// - Created +// - Starting +// - Started +// - Stopping +// - Stopped +// - Terminating +// - Terminated +// For that, it will receive a Container, modify it and return an error if needed. +type ContainerHook func(ctx context.Context, container Container) error + +// ContainerLifecycleHooks is a struct that contains all the hooks that can be used +// to modify the container lifecycle. All the container lifecycle hooks except the PreCreates hooks +// will be passed to the container once it's created +type ContainerLifecycleHooks struct { + PreCreates []ContainerRequestHook + PostCreates []ContainerHook + PreStarts []ContainerHook + PostStarts []ContainerHook + PreStops []ContainerHook + PostStops []ContainerHook + PreTerminates []ContainerHook + PostTerminates []ContainerHook +} + +var DefaultLoggingHook = func(logger Logging) ContainerLifecycleHooks { + shortContainerID := func(c Container) string { + return c.GetContainerID()[:12] + } + + return ContainerLifecycleHooks{ + PreCreates: []ContainerRequestHook{ + func(ctx context.Context, req ContainerRequest) error { + logger.Printf("🐳 Creating container for image %s", req.Image) + return nil + }, + }, + PostCreates: []ContainerHook{ + func(ctx context.Context, c Container) error { + logger.Printf("✅ Container created: %s", shortContainerID(c)) + return nil + }, + }, + PreStarts: []ContainerHook{ + func(ctx context.Context, c Container) error { + logger.Printf("🐳 Starting container: %s", shortContainerID(c)) + return nil + }, + }, + PostStarts: []ContainerHook{ + func(ctx context.Context, c Container) error { + logger.Printf("✅ Container started: %s", shortContainerID(c)) + return nil + }, + }, + PreStops: []ContainerHook{ + func(ctx context.Context, c Container) error { + logger.Printf("🐳 Stopping container: %s", shortContainerID(c)) + return nil + }, + }, + PostStops: []ContainerHook{ + func(ctx context.Context, c Container) error { + logger.Printf("✋ Container stopped: %s", shortContainerID(c)) + return nil + }, + }, + PreTerminates: []ContainerHook{ + func(ctx context.Context, c Container) error { + logger.Printf("🐳 Terminating container: %s", shortContainerID(c)) + return nil + }, + }, + PostTerminates: []ContainerHook{ + func(ctx context.Context, c Container) error { + logger.Printf("🚫 Container terminated: %s", shortContainerID(c)) + return nil + }, + }, + } +} + +// creatingHook is a hook that will be called before a container is created. +func (req ContainerRequest) creatingHook(ctx context.Context) error { + for _, lifecycleHooks := range req.LifecycleHooks { + err := lifecycleHooks.Creating(ctx)(req) + if err != nil { + return err + } + } + + return nil +} + +// createdHook is a hook that will be called after a container is created +func (c *DockerContainer) createdHook(ctx context.Context) error { + for _, lifecycleHooks := range c.lifecycleHooks { + err := containerHookFn(ctx, lifecycleHooks.PostCreates)(c) + if err != nil { + return err + } + } + + return nil +} + +// startingHook is a hook that will be called before a container is started +func (c *DockerContainer) startingHook(ctx context.Context) error { + for _, lifecycleHooks := range c.lifecycleHooks { + err := containerHookFn(ctx, lifecycleHooks.PreStarts)(c) + if err != nil { + c.printLogs(ctx) + return err + } + } + + return nil +} + +// startedHook is a hook that will be called after a container is started +func (c *DockerContainer) startedHook(ctx context.Context) error { + for _, lifecycleHooks := range c.lifecycleHooks { + err := containerHookFn(ctx, lifecycleHooks.PostStarts)(c) + if err != nil { + c.printLogs(ctx) + return err + } + } + + return nil +} + +// printLogs is a helper function that will print the logs of a Docker container +// We are going to use this helper function to inform the user of the logs when an error occurs +func (c *DockerContainer) printLogs(ctx context.Context) { + reader, err := c.Logs(ctx) + if err != nil { + c.logger.Printf("failed accessing container logs: %w\n", err) + return + } + + b, err := io.ReadAll(reader) + if err != nil { + c.logger.Printf("failed reading container logs: %w\n", err) + return + } + + c.logger.Printf("container logs:\n%s", b) +} + +// stoppingHook is a hook that will be called before a container is stopped +func (c *DockerContainer) stoppingHook(ctx context.Context) error { + for _, lifecycleHooks := range c.lifecycleHooks { + err := containerHookFn(ctx, lifecycleHooks.PreStops)(c) + if err != nil { + return err + } + } + + return nil +} + +// stoppedHook is a hook that will be called after a container is stopped +func (c *DockerContainer) stoppedHook(ctx context.Context) error { + for _, lifecycleHooks := range c.lifecycleHooks { + err := containerHookFn(ctx, lifecycleHooks.PostStops)(c) + if err != nil { + return err + } + } + + return nil +} + +// terminatingHook is a hook that will be called before a container is terminated +func (c *DockerContainer) terminatingHook(ctx context.Context) error { + for _, lifecycleHooks := range c.lifecycleHooks { + err := containerHookFn(ctx, lifecycleHooks.PreTerminates)(c) + if err != nil { + return err + } + } + + return nil +} + +// terminatedHook is a hook that will be called after a container is terminated +func (c *DockerContainer) terminatedHook(ctx context.Context) error { + for _, lifecycleHooks := range c.lifecycleHooks { + err := containerHookFn(ctx, lifecycleHooks.PostTerminates)(c) + if err != nil { + return err + } + } + + return nil +} + +// Creating is a hook that will be called before a container is created. +func (c ContainerLifecycleHooks) Creating(ctx context.Context) func(req ContainerRequest) error { + return func(req ContainerRequest) error { + for _, hook := range c.PreCreates { + if err := hook(ctx, req); err != nil { + return err + } + } + + return nil + } +} + +// containerHookFn is a helper function that will create a function to be returned by all the different +// container lifecycle hooks. The created function will iterate over all the hooks and call them one by one. +func containerHookFn(ctx context.Context, containerHook []ContainerHook) func(container Container) error { + return func(container Container) error { + for _, hook := range containerHook { + if err := hook(ctx, container); err != nil { + return err + } + } + + return nil + } +} + +// Created is a hook that will be called after a container is created +func (c ContainerLifecycleHooks) Created(ctx context.Context) func(container Container) error { + return containerHookFn(ctx, c.PostCreates) +} + +// Starting is a hook that will be called before a container is started +func (c ContainerLifecycleHooks) Starting(ctx context.Context) func(container Container) error { + return containerHookFn(ctx, c.PreStarts) +} + +// Started is a hook that will be called after a container is started +func (c ContainerLifecycleHooks) Started(ctx context.Context) func(container Container) error { + return containerHookFn(ctx, c.PostStarts) +} + +// Stopping is a hook that will be called before a container is stopped +func (c ContainerLifecycleHooks) Stopping(ctx context.Context) func(container Container) error { + return containerHookFn(ctx, c.PreStops) +} + +// Stopped is a hook that will be called after a container is stopped +func (c ContainerLifecycleHooks) Stopped(ctx context.Context) func(container Container) error { + return containerHookFn(ctx, c.PostStops) +} + +// Terminating is a hook that will be called before a container is terminated +func (c ContainerLifecycleHooks) Terminating(ctx context.Context) func(container Container) error { + return containerHookFn(ctx, c.PreTerminates) +} + +// Terminated is a hook that will be called after a container is terminated +func (c ContainerLifecycleHooks) Terminated(ctx context.Context) func(container Container) error { + return containerHookFn(ctx, c.PostTerminates) +} + +func (p *DockerProvider) preCreateContainerHook(ctx context.Context, req ContainerRequest, dockerInput *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig) error { + // prepare mounts + hostConfig.Mounts = mapToDockerMounts(req.Mounts) + + endpointSettings := map[string]*network.EndpointSettings{} + + // #248: Docker allows only one network to be specified during container creation + // If there is more than one network specified in the request container should be attached to them + // once it is created. We will take a first network if any specified in the request and use it to create container + if len(req.Networks) > 0 { + attachContainerTo := req.Networks[0] + + nw, err := p.GetNetwork(ctx, NetworkRequest{ + Name: attachContainerTo, + }) + if err == nil { + aliases := []string{} + if _, ok := req.NetworkAliases[attachContainerTo]; ok { + aliases = req.NetworkAliases[attachContainerTo] + } + endpointSetting := network.EndpointSettings{ + Aliases: aliases, + NetworkID: nw.ID, + } + endpointSettings[attachContainerTo] = &endpointSetting + } + } + + if req.ConfigModifier != nil { + req.ConfigModifier(dockerInput) + } + + if req.HostConfigModifier == nil { + req.HostConfigModifier = defaultHostConfigModifier(req) + } + req.HostConfigModifier(hostConfig) + + if req.EnpointSettingsModifier != nil { + req.EnpointSettingsModifier(endpointSettings) + } + + networkingConfig.EndpointsConfig = endpointSettings + + exposedPorts := req.ExposedPorts + // this check must be done after the pre-creation Modifiers are called, so the network mode is already set + if len(exposedPorts) == 0 && !hostConfig.NetworkMode.IsContainer() { + image, _, err := p.client.ImageInspectWithRaw(ctx, dockerInput.Image) + if err != nil { + return err + } + for p := range image.ContainerConfig.ExposedPorts { + exposedPorts = append(exposedPorts, string(p)) + } + } + + exposedPortSet, exposedPortMap, err := nat.ParsePortSpecs(exposedPorts) + if err != nil { + return err + } + + dockerInput.ExposedPorts = exposedPortSet + + // only exposing those ports automatically if the container request exposes zero ports and the container does not run in a container network + if len(exposedPorts) == 0 && !hostConfig.NetworkMode.IsContainer() { + hostConfig.PortBindings = exposedPortMap + } else { + hostConfig.PortBindings = mergePortBindings(hostConfig.PortBindings, exposedPortMap, req.ExposedPorts) + } + + return nil +} + +func mergePortBindings(configPortMap, exposedPortMap nat.PortMap, exposedPorts []string) nat.PortMap { + if exposedPortMap == nil { + exposedPortMap = make(map[nat.Port][]nat.PortBinding) + } + + for k, v := range configPortMap { + if slices.Contains(exposedPorts, strings.Split(string(k), "/")[0]) { + exposedPortMap[k] = v + } + } + return exposedPortMap +} + +// defaultHostConfigModifier provides a default modifier including the deprecated fields +func defaultHostConfigModifier(req ContainerRequest) func(hostConfig *container.HostConfig) { + return func(hostConfig *container.HostConfig) { + hostConfig.AutoRemove = req.AutoRemove + hostConfig.CapAdd = req.CapAdd + hostConfig.CapDrop = req.CapDrop + hostConfig.Binds = req.Binds + hostConfig.ExtraHosts = req.ExtraHosts + hostConfig.NetworkMode = req.NetworkMode + hostConfig.Resources = req.Resources + } +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/logconsumer.go b/vendor/github.com/testcontainers/testcontainers-go/logconsumer.go new file mode 100644 index 000000000..c5a2e29cf --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/logconsumer.go @@ -0,0 +1,22 @@ +package testcontainers + +// StdoutLog is the log type for STDOUT +const StdoutLog = "STDOUT" + +// StderrLog is the log type for STDERR +const StderrLog = "STDERR" + +// Log represents a message that was created by a process, +// LogType is either "STDOUT" or "STDERR", +// Content is the byte contents of the message itself +type Log struct { + LogType string + Content []byte +} + +// LogConsumer represents any object that can +// handle a Log, it is up to the LogConsumer instance +// what to do with the log +type LogConsumer interface { + Accept(Log) +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/logger.go b/vendor/github.com/testcontainers/testcontainers-go/logger.go new file mode 100644 index 000000000..002ab19bb --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/logger.go @@ -0,0 +1,75 @@ +package testcontainers + +import ( + "context" + "log" + "os" + "testing" + + "github.com/docker/docker/client" +) + +// Logger is the default log instance +var Logger Logging = log.New(os.Stderr, "", log.LstdFlags) + +// Logging defines the Logger interface +type Logging interface { + Printf(format string, v ...interface{}) +} + +// LogDockerServerInfo logs the docker server info using the provided logger and Docker client +func LogDockerServerInfo(ctx context.Context, client client.APIClient, logger Logging) { + infoMessage := `%v - Connected to docker: + Server Version: %v + API Version: %v + Operating System: %v + Total Memory: %v MB +` + + info, err := client.Info(ctx) + if err != nil { + logger.Printf("failed getting information about docker server: %s", err) + return + } + + logger.Printf(infoMessage, packagePath, + info.ServerVersion, client.ClientVersion(), + info.OperatingSystem, info.MemTotal/1024/1024) +} + +// TestLogger returns a Logging implementation for testing.TB +// This way logs from testcontainers are part of the test output of a test suite or test case +func TestLogger(tb testing.TB) Logging { + tb.Helper() + return testLogger{TB: tb} +} + +// WithLogger is a generic option that implements GenericProviderOption, DockerProviderOption +// It replaces the global Logging implementation with a user defined one e.g. to aggregate logs from testcontainers +// with the logs of specific test case +func WithLogger(logger Logging) LoggerOption { + return LoggerOption{ + logger: logger, + } +} + +type LoggerOption struct { + logger Logging +} + +func (o LoggerOption) ApplyGenericTo(opts *GenericProviderOptions) { + opts.Logger = o.logger +} + +func (o LoggerOption) ApplyDockerTo(opts *DockerProviderOptions) { + opts.Logger = o.logger +} + +type testLogger struct { + testing.TB +} + +func (t testLogger) Printf(format string, v ...interface{}) { + t.Helper() + t.Logf(format, v...) +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/mkdocs.yml b/vendor/github.com/testcontainers/testcontainers-go/mkdocs.yml new file mode 100644 index 000000000..91383709d --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/mkdocs.yml @@ -0,0 +1,106 @@ +site_name: Testcontainers for Go +site_url: https://golang.testcontainers.org +plugins: + - search + - codeinclude + - markdownextradata +theme: + name: material + custom_dir: docs/theme + palette: + scheme: testcontainers + font: + text: Roboto + code: Roboto Mono + logo: logo.svg + favicon: favicon.ico +extra_css: + - css/extra.css +repo_name: testcontainers-go +repo_url: https://github.com/testcontainers/testcontainers-go +markdown_extensions: + - admonition + - codehilite: + linenums: false + - pymdownx.superfences + - pymdownx.tabbed: + alternate_style: true + - pymdownx.snippets + - toc: + permalink: true + - attr_list + - pymdownx.emoji: + emoji_index: !!python/name:materialx.emoji.twemoji + emoji_generator: !!python/name:materialx.emoji.to_svg +nav: + - Home: index.md + - Quickstart: quickstart.md + - Features: + - features/creating_container.md + - features/files_and_mounts.md + - features/configuration.md + - features/networking.md + - features/garbage_collector.md + - features/build_from_dockerfile.md + - features/docker_auth.md + - features/docker_compose.md + - features/follow_logs.md + - features/override_container_command.md + - features/copy_file.md + - Wait Strategies: + - Introduction: features/wait/introduction.md + - Exec: features/wait/exec.md + - Exit: features/wait/exit.md + - Health: features/wait/health.md + - HostPort: features/wait/host_port.md + - HTTP: features/wait/http.md + - Log: features/wait/log.md + - Multi: features/wait/multi.md + - SQL: features/wait/sql.md + - Modules: + - modules/index.md + - modules/couchbase.md + - modules/k3s.md + - modules/localstack.md + - modules/mysql.md + - modules/neo4j.md + - modules/postgres.md + - modules/pulsar.md + - modules/redis.md + - modules/redpanda.md + - modules/vault.md + - Examples: + - examples/index.md + - examples/bigtable.md + - examples/cockroachdb.md + - examples/consul.md + - examples/datastore.md + - examples/firestore.md + - examples/mongodb.md + - examples/nats.md + - examples/nginx.md + - examples/pubsub.md + - examples/spanner.md + - examples/toxiproxy.md + - System Requirements: + - system_requirements/index.md + - system_requirements/docker.md + - Continuous Integration: + - system_requirements/ci/aws_codebuild.md + - system_requirements/ci/bitbucket_pipelines.md + - system_requirements/ci/circle_ci.md + - system_requirements/ci/concourse_ci.md + - system_requirements/ci/dind_patterns.md + - system_requirements/ci/drone.md + - system_requirements/ci/gitlab_ci.md + - system_requirements/ci/tekton.md + - system_requirements/ci/travis.md + - system_requirements/using_colima.md + - system_requirements/using_podman.md + - Contributing: + - contributing.md + - contributing_docs.md + - Getting help: getting_help.md +edit_uri: edit/main/docs/ +extra: + latest_version: v0.21.0 diff --git a/vendor/github.com/testcontainers/testcontainers-go/mounts.go b/vendor/github.com/testcontainers/testcontainers-go/mounts.go new file mode 100644 index 000000000..c4b6fc916 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/mounts.go @@ -0,0 +1,116 @@ +package testcontainers + +const ( + MountTypeBind MountType = iota + MountTypeVolume + MountTypeTmpfs + MountTypePipe +) + +var ( + _ ContainerMountSource = (*GenericBindMountSource)(nil) + _ ContainerMountSource = (*GenericVolumeMountSource)(nil) + _ ContainerMountSource = (*GenericTmpfsMountSource)(nil) +) + +type ( + // ContainerMounts represents a collection of mounts for a container + ContainerMounts []ContainerMount + MountType uint +) + +// ContainerMountSource is the base for all mount sources +type ContainerMountSource interface { + // Source will be used as Source field in the final mount + // this might either be a volume name, a host path or might be empty e.g. for Tmpfs + Source() string + + // Type determines the final mount type + // possible options are limited by the Docker API + Type() MountType +} + +// GenericBindMountSource implements ContainerMountSource and represents a bind mount +// Optionally mount.BindOptions might be added for advanced scenarios +type GenericBindMountSource struct { + // HostPath is the path mounted into the container + // the same host path might be mounted to multiple locations within a single container + HostPath string +} + +func (s GenericBindMountSource) Source() string { + return s.HostPath +} + +func (GenericBindMountSource) Type() MountType { + return MountTypeBind +} + +// GenericVolumeMountSource implements ContainerMountSource and represents a volume mount +type GenericVolumeMountSource struct { + // Name refers to the name of the volume to be mounted + // the same volume might be mounted to multiple locations within a single container + Name string +} + +func (s GenericVolumeMountSource) Source() string { + return s.Name +} + +func (GenericVolumeMountSource) Type() MountType { + return MountTypeVolume +} + +// GenericTmpfsMountSource implements ContainerMountSource and represents a TmpFS mount +// Optionally mount.TmpfsOptions might be added for advanced scenarios +type GenericTmpfsMountSource struct { +} + +func (s GenericTmpfsMountSource) Source() string { + return "" +} + +func (GenericTmpfsMountSource) Type() MountType { + return MountTypeTmpfs +} + +// ContainerMountTarget represents the target path within a container where the mount will be available +// Note that mount targets must be unique. It's not supported to mount different sources to the same target. +type ContainerMountTarget string + +func (t ContainerMountTarget) Target() string { + return string(t) +} + +// BindMount returns a new ContainerMount with a GenericBindMountSource as source +// This is a convenience method to cover typical use cases. +func BindMount(hostPath string, mountTarget ContainerMountTarget) ContainerMount { + return ContainerMount{ + Source: GenericBindMountSource{HostPath: hostPath}, + Target: mountTarget, + } +} + +// VolumeMount returns a new ContainerMount with a GenericVolumeMountSource as source +// This is a convenience method to cover typical use cases. +func VolumeMount(volumeName string, mountTarget ContainerMountTarget) ContainerMount { + return ContainerMount{ + Source: GenericVolumeMountSource{Name: volumeName}, + Target: mountTarget, + } +} + +// Mounts returns a ContainerMounts to support a more fluent API +func Mounts(mounts ...ContainerMount) ContainerMounts { + return mounts +} + +// ContainerMount models a mount into a container +type ContainerMount struct { + // Source is typically either a GenericBindMountSource or a GenericVolumeMountSource + Source ContainerMountSource + // Target is the path where the mount should be mounted within the container + Target ContainerMountTarget + // ReadOnly determines if the mount should be read-only + ReadOnly bool +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/network.go b/vendor/github.com/testcontainers/testcontainers-go/network.go new file mode 100644 index 000000000..c96fc11c9 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/network.go @@ -0,0 +1,45 @@ +package testcontainers + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" +) + +// NetworkProvider allows the creation of networks on an arbitrary system +type NetworkProvider interface { + CreateNetwork(context.Context, NetworkRequest) (Network, error) // create a network + GetNetwork(context.Context, NetworkRequest) (types.NetworkResource, error) // get a network +} + +// Network allows getting info about a single network instance +type Network interface { + Remove(context.Context) error // removes the network +} + +type DefaultNetwork string + +func (n DefaultNetwork) ApplyGenericTo(opts *GenericProviderOptions) { + opts.DefaultNetwork = string(n) +} + +func (n DefaultNetwork) ApplyDockerTo(opts *DockerProviderOptions) { + opts.DefaultNetwork = string(n) +} + +// NetworkRequest represents the parameters used to get a network +type NetworkRequest struct { + Driver string + CheckDuplicate bool + Internal bool + EnableIPv6 bool + Name string + Labels map[string]string + Attachable bool + IPAM *network.IPAM + + SkipReaper bool // Deprecated: The reaper is globally controlled by the .testcontainers.properties file or the TESTCONTAINERS_RYUK_DISABLED environment variable + ReaperImage string // Deprecated: use WithImageName ContainerOption instead. Alternative reaper registry + ReaperOptions []ContainerOption // Reaper options to use for this network +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/parallel.go b/vendor/github.com/testcontainers/testcontainers-go/parallel.go new file mode 100644 index 000000000..297ab9d6d --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/parallel.go @@ -0,0 +1,122 @@ +package testcontainers + +import ( + "context" + "fmt" + "sync" +) + +const ( + defaultWorkersCount = 8 +) + +type ParallelContainerRequest []GenericContainerRequest + +// ParallelContainersOptions represents additional options for parallel running +type ParallelContainersOptions struct { + WorkersCount int // count of parallel workers. If field empty(zero), default value will be 'defaultWorkersCount' +} + +// ParallelContainersRequestError represents error from parallel request +type ParallelContainersRequestError struct { + Request GenericContainerRequest + Error error +} + +type ParallelContainersError struct { + Errors []ParallelContainersRequestError +} + +func (gpe ParallelContainersError) Error() string { + return fmt.Sprintf("%v", gpe.Errors) +} + +func parallelContainersRunner( + ctx context.Context, + requests <-chan GenericContainerRequest, + errors chan<- ParallelContainersRequestError, + containers chan<- Container, + wg *sync.WaitGroup) { + + for req := range requests { + c, err := GenericContainer(ctx, req) + if err != nil { + errors <- ParallelContainersRequestError{ + Request: req, + Error: err, + } + continue + } + containers <- c + } + wg.Done() +} + +// ParallelContainers creates a generic containers with parameters and run it in parallel mode +func ParallelContainers(ctx context.Context, reqs ParallelContainerRequest, opt ParallelContainersOptions) ([]Container, error) { + if opt.WorkersCount == 0 { + opt.WorkersCount = defaultWorkersCount + } + + tasksChanSize := opt.WorkersCount + if tasksChanSize > len(reqs) { + tasksChanSize = len(reqs) + } + + tasksChan := make(chan GenericContainerRequest, tasksChanSize) + errsChan := make(chan ParallelContainersRequestError) + resChan := make(chan Container) + waitRes := make(chan struct{}) + + containers := make([]Container, 0) + errors := make([]ParallelContainersRequestError, 0) + + wg := sync.WaitGroup{} + wg.Add(tasksChanSize) + + // run workers + for i := 0; i < tasksChanSize; i++ { + go parallelContainersRunner(ctx, tasksChan, errsChan, resChan, &wg) + } + + go func() { + for { + select { + case c, ok := <-resChan: + if !ok { + resChan = nil + } else { + containers = append(containers, c) + } + case e, ok := <-errsChan: + if !ok { + errsChan = nil + } else { + errors = append(errors, e) + } + } + + if resChan == nil && errsChan == nil { + waitRes <- struct{}{} + break + } + } + + }() + + for _, req := range reqs { + tasksChan <- req + } + close(tasksChan) + wg.Wait() + close(resChan) + close(errsChan) + + <-waitRes + + if len(errors) != 0 { + return containers, ParallelContainersError{Errors: errors} + } + + return containers, nil +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/provider.go b/vendor/github.com/testcontainers/testcontainers-go/provider.go new file mode 100644 index 000000000..d6868f8d1 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/provider.go @@ -0,0 +1,163 @@ +package testcontainers + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + + "github.com/testcontainers/testcontainers-go/internal/testcontainersdocker" +) + +// possible provider types +const ( + ProviderDefault ProviderType = iota // default will auto-detect provider from DOCKER_HOST environment variable + ProviderDocker + ProviderPodman +) + +type ( + // ProviderType is an enum for the possible providers + ProviderType int + + // GenericProviderOptions defines options applicable to all providers + GenericProviderOptions struct { + Logger Logging + DefaultNetwork string + } + + // GenericProviderOption defines a common interface to modify GenericProviderOptions + // These options can be passed to GetProvider in a variadic way to customize the returned GenericProvider instance + GenericProviderOption interface { + ApplyGenericTo(opts *GenericProviderOptions) + } + + // GenericProviderOptionFunc is a shorthand to implement the GenericProviderOption interface + GenericProviderOptionFunc func(opts *GenericProviderOptions) + + // DockerProviderOptions defines options applicable to DockerProvider + DockerProviderOptions struct { + defaultBridgeNetworkName string + *GenericProviderOptions + } + + // DockerProviderOption defines a common interface to modify DockerProviderOptions + // These can be passed to NewDockerProvider in a variadic way to customize the returned DockerProvider instance + DockerProviderOption interface { + ApplyDockerTo(opts *DockerProviderOptions) + } + + // DockerProviderOptionFunc is a shorthand to implement the DockerProviderOption interface + DockerProviderOptionFunc func(opts *DockerProviderOptions) +) + +func (f DockerProviderOptionFunc) ApplyDockerTo(opts *DockerProviderOptions) { + f(opts) +} + +func Generic2DockerOptions(opts ...GenericProviderOption) []DockerProviderOption { + converted := make([]DockerProviderOption, 0, len(opts)) + for _, o := range opts { + switch c := o.(type) { + case DockerProviderOption: + converted = append(converted, c) + default: + converted = append(converted, DockerProviderOptionFunc(func(opts *DockerProviderOptions) { + o.ApplyGenericTo(opts.GenericProviderOptions) + })) + } + } + + return converted +} + +func WithDefaultBridgeNetwork(bridgeNetworkName string) DockerProviderOption { + return DockerProviderOptionFunc(func(opts *DockerProviderOptions) { + opts.defaultBridgeNetworkName = bridgeNetworkName + }) +} + +func (f GenericProviderOptionFunc) ApplyGenericTo(opts *GenericProviderOptions) { + f(opts) +} + +// ContainerProvider allows the creation of containers on an arbitrary system +type ContainerProvider interface { + Close() error // close the provider + CreateContainer(context.Context, ContainerRequest) (Container, error) // create a container without starting it + ReuseOrCreateContainer(context.Context, ContainerRequest) (Container, error) // reuses a container if it exists or creates a container without starting + RunContainer(context.Context, ContainerRequest) (Container, error) // create a container and start it + Health(context.Context) error + Config() TestcontainersConfig +} + +// GetProvider provides the provider implementation for a certain type +func (t ProviderType) GetProvider(opts ...GenericProviderOption) (GenericProvider, error) { + opt := &GenericProviderOptions{ + Logger: Logger, + } + + for _, o := range opts { + o.ApplyGenericTo(opt) + } + + pt := t + if pt == ProviderDefault && strings.Contains(os.Getenv("DOCKER_HOST"), "podman.sock") { + pt = ProviderPodman + } + + switch pt { + case ProviderDefault, ProviderDocker: + providerOptions := append(Generic2DockerOptions(opts...), WithDefaultBridgeNetwork(Bridge)) + provider, err := NewDockerProvider(providerOptions...) + if err != nil { + return nil, fmt.Errorf("%w, failed to create Docker provider", err) + } + return provider, nil + case ProviderPodman: + providerOptions := append(Generic2DockerOptions(opts...), WithDefaultBridgeNetwork(Podman)) + provider, err := NewDockerProvider(providerOptions...) + if err != nil { + return nil, fmt.Errorf("%w, failed to create Docker provider", err) + } + return provider, nil + } + return nil, errors.New("unknown provider") +} + +// NewDockerProvider creates a Docker provider with the EnvClient +func NewDockerProvider(provOpts ...DockerProviderOption) (*DockerProvider, error) { + o := &DockerProviderOptions{ + GenericProviderOptions: &GenericProviderOptions{ + Logger: Logger, + }, + } + + for idx := range provOpts { + provOpts[idx].ApplyDockerTo(o) + } + + c, err := NewDockerClient() + if err != nil { + return nil, err + } + + tcConfig := ReadConfig() + + dockerHost := testcontainersdocker.ExtractDockerHost(context.Background()) + + p := &DockerProvider{ + DockerProviderOptions: o, + host: dockerHost, + client: c, + config: tcConfig, + } + + // log docker server info only once + logOnce.Do(func() { + LogDockerServerInfo(context.Background(), p.client, p.Logger) + }) + + return p, nil +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/reaper.go b/vendor/github.com/testcontainers/testcontainers-go/reaper.go new file mode 100644 index 000000000..b72a9bec5 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/reaper.go @@ -0,0 +1,212 @@ +package testcontainers + +import ( + "bufio" + "context" + "fmt" + "net" + "strings" + "sync" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/go-connections/nat" + "github.com/testcontainers/testcontainers-go/internal" + "github.com/testcontainers/testcontainers-go/internal/testcontainersdocker" + "github.com/testcontainers/testcontainers-go/wait" +) + +const ( + // Deprecated: it has been replaced by the internal testcontainersdocker.LabelLang + TestcontainerLabel = "org.testcontainers.golang" + // Deprecated: it has been replaced by the internal testcontainersdocker.LabelSessionID + TestcontainerLabelSessionID = TestcontainerLabel + ".sessionId" + // Deprecated: it has been replaced by the internal testcontainersdocker.LabelReaper + TestcontainerLabelIsReaper = TestcontainerLabel + ".reaper" + + ReaperDefaultImage = "docker.io/testcontainers/ryuk:0.5.1" +) + +var ( + reaperInstance *Reaper // We would like to create reaper only once + mutex sync.Mutex +) + +// ReaperProvider represents a provider for the reaper to run itself with +// The ContainerProvider interface should usually satisfy this as well, so it is pluggable +type ReaperProvider interface { + RunContainer(ctx context.Context, req ContainerRequest) (Container, error) + Config() TestcontainersConfig +} + +// NewReaper creates a Reaper with a sessionID to identify containers and a provider to use +// Deprecated: it's not possible to create a reaper anymore. +func NewReaper(ctx context.Context, sessionID string, provider ReaperProvider, reaperImageName string) (*Reaper, error) { + return reuseOrCreateReaper(ctx, sessionID, provider, WithImageName(reaperImageName)) +} + +// reuseOrCreateReaper returns an existing Reaper instance if it exists and is running. Otherwise, a new Reaper instance +// will be created with a sessionID to identify containers and a provider to use +func reuseOrCreateReaper(ctx context.Context, sessionID string, provider ReaperProvider, opts ...ContainerOption) (*Reaper, error) { + mutex.Lock() + defer mutex.Unlock() + // If reaper already exists and healthy, re-use it + if reaperInstance != nil { + // Verify this instance is still running by checking state. + // Can't use Container.IsRunning because the bool is not updated when Reaper is terminated + state, err := reaperInstance.container.State(ctx) + if err == nil && state.Running { + return reaperInstance, nil + } + } + + r, err := newReaper(ctx, sessionID, provider, opts...) + if err != nil { + return nil, err + } + + reaperInstance = r + return reaperInstance, nil +} + +// newReaper creates a Reaper with a sessionID to identify containers and a provider to use +// Should only be used internally and instead use reuseOrCreateReaper to prefer reusing an existing Reaper instance +func newReaper(ctx context.Context, sessionID string, provider ReaperProvider, opts ...ContainerOption) (*Reaper, error) { + dockerHostMount := testcontainersdocker.ExtractDockerSocket(ctx) + + reaper := &Reaper{ + Provider: provider, + SessionID: sessionID, + } + + listeningPort := nat.Port("8080/tcp") + + tcConfig := provider.Config().Config + + reaperOpts := containerOptions{} + + for _, opt := range opts { + opt(&reaperOpts) + } + + req := ContainerRequest{ + Image: reaperImage(reaperOpts.ImageName), + ExposedPorts: []string{string(listeningPort)}, + Labels: map[string]string{ + TestcontainerLabelIsReaper: "true", + testcontainersdocker.LabelReaper: "true", + }, + Mounts: Mounts(BindMount(dockerHostMount, "/var/run/docker.sock")), + Privileged: tcConfig.RyukPrivileged, + WaitingFor: wait.ForListeningPort(listeningPort), + ReaperOptions: opts, + HostConfigModifier: func(hc *container.HostConfig) { + hc.AutoRemove = true + hc.NetworkMode = Bridge + }, + } + + // keep backwards compatibility + req.ReaperImage = req.Image + + // include reaper-specific labels to the reaper container + for k, v := range reaper.Labels() { + if k == TestcontainerLabelSessionID || k == testcontainersdocker.LabelSessionID { + continue + } + req.Labels[k] = v + } + + // Attach reaper container to a requested network if it is specified + if p, ok := provider.(*DockerProvider); ok { + req.Networks = append(req.Networks, p.DefaultNetwork) + } + + c, err := provider.RunContainer(ctx, req) + if err != nil { + return nil, err + } + reaper.container = c + + endpoint, err := c.PortEndpoint(ctx, "8080", "") + if err != nil { + return nil, err + } + reaper.Endpoint = endpoint + + return reaper, nil +} + +// Reaper is used to start a sidecar container that cleans up resources +type Reaper struct { + Provider ReaperProvider + SessionID string + Endpoint string + container Container +} + +// Connect runs a goroutine which can be terminated by sending true into the returned channel +func (r *Reaper) Connect() (chan bool, error) { + conn, err := net.DialTimeout("tcp", r.Endpoint, 10*time.Second) + if err != nil { + return nil, fmt.Errorf("%w: Connecting to Ryuk on %s failed", err, r.Endpoint) + } + + terminationSignal := make(chan bool) + go func(conn net.Conn) { + sock := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)) + defer conn.Close() + + labelFilters := []string{} + for l, v := range r.Labels() { + labelFilters = append(labelFilters, fmt.Sprintf("label=%s=%s", l, v)) + } + + retryLimit := 3 + for retryLimit > 0 { + retryLimit-- + + if _, err := sock.WriteString(strings.Join(labelFilters, "&")); err != nil { + continue + } + + if _, err := sock.WriteString("\n"); err != nil { + continue + } + + if err := sock.Flush(); err != nil { + continue + } + + resp, err := sock.ReadString('\n') + if err != nil { + continue + } + + if resp == "ACK\n" { + break + } + } + + <-terminationSignal + }(conn) + return terminationSignal, nil +} + +// Labels returns the container labels to use so that this Reaper cleans them up +func (r *Reaper) Labels() map[string]string { + return map[string]string{ + TestcontainerLabel: "true", + TestcontainerLabelSessionID: r.SessionID, + testcontainersdocker.LabelLang: "go", + testcontainersdocker.LabelVersion: internal.Version, + testcontainersdocker.LabelSessionID: r.SessionID, + } +} + +func reaperImage(reaperImageName string) string { + if reaperImageName == "" { + return ReaperDefaultImage + } + return reaperImageName +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/requirements.txt b/vendor/github.com/testcontainers/testcontainers-go/requirements.txt new file mode 100644 index 000000000..f77d057b5 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/requirements.txt @@ -0,0 +1,4 @@ +mkdocs==1.3.0 +mkdocs-codeinclude-plugin==0.2.0 +mkdocs-material==8.1.3 +mkdocs-markdownextradata-plugin==0.2.5 diff --git a/vendor/github.com/testcontainers/testcontainers-go/runtime.txt b/vendor/github.com/testcontainers/testcontainers-go/runtime.txt new file mode 100644 index 000000000..cc1923a40 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/runtime.txt @@ -0,0 +1 @@ +3.8 diff --git a/vendor/github.com/testcontainers/testcontainers-go/testcontainer.go b/vendor/github.com/testcontainers/testcontainers-go/testcontainer.go new file mode 100644 index 000000000..6c42b00d4 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/testcontainer.go @@ -0,0 +1 @@ +package testcontainers diff --git a/vendor/github.com/testcontainers/testcontainers-go/testing.go b/vendor/github.com/testcontainers/testcontainers-go/testing.go new file mode 100644 index 000000000..cdbd68d72 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/testing.go @@ -0,0 +1,42 @@ +package testcontainers + +import ( + "context" + "testing" + + "github.com/testcontainers/testcontainers-go/internal/testcontainersdocker" +) + +// SkipIfProviderIsNotHealthy is a utility function capable of skipping tests +// if the provider is not healthy, or running at all. +// This is a function designed to be used in your test, when Docker is not mandatory for CI/CD. +// In this way tests that depend on Testcontainers won't run if the provider is provisioned correctly. +func SkipIfProviderIsNotHealthy(t *testing.T) { + ctx := context.Background() + provider, err := ProviderDocker.GetProvider() + if err != nil { + t.Skipf("Docker is not running. TestContainers can't perform is work without it: %s", err) + } + err = provider.Health(ctx) + if err != nil { + t.Skipf("Docker is not running. TestContainers can't perform is work without it: %s", err) + } +} + +// SkipIfDockerDesktop is a utility function capable of skipping tests +// if tests are run using Docker Desktop. +func SkipIfDockerDesktop(t *testing.T, ctx context.Context) { + cli, err := testcontainersdocker.NewClient(ctx) + if err != nil { + t.Fatalf("failed to create docker client: %s", err) + } + + info, err := cli.Info(ctx) + if err != nil { + t.Fatalf("failed to get docker info: %s", err) + } + + if info.OperatingSystem == "Docker Desktop" { + t.Skip("Skipping test that requires host network access when running in Docker Desktop") + } +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/all.go b/vendor/github.com/testcontainers/testcontainers-go/wait/all.go new file mode 100644 index 000000000..18531cbf5 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/all.go @@ -0,0 +1,80 @@ +package wait + +import ( + "context" + "fmt" + "time" +) + +// Implement interface +var _ Strategy = (*MultiStrategy)(nil) +var _ StrategyTimeout = (*MultiStrategy)(nil) + +type MultiStrategy struct { + // all Strategies should have a startupTimeout to avoid waiting infinitely + timeout *time.Duration + deadline *time.Duration + + // additional properties + Strategies []Strategy +} + +// WithStartupTimeoutDefault sets the default timeout for all inner wait strategies +func (ms *MultiStrategy) WithStartupTimeoutDefault(timeout time.Duration) *MultiStrategy { + ms.timeout = &timeout + return ms +} + +// WithStartupTimeout sets a time.Duration which limits all wait strategies +// +// Deprecated: use WithDeadline +func (ms *MultiStrategy) WithStartupTimeout(timeout time.Duration) Strategy { + return ms.WithDeadline(timeout) +} + +// WithDeadline sets a time.Duration which limits all wait strategies +func (ms *MultiStrategy) WithDeadline(deadline time.Duration) *MultiStrategy { + ms.deadline = &deadline + return ms +} + +func ForAll(strategies ...Strategy) *MultiStrategy { + return &MultiStrategy{ + Strategies: strategies, + } +} + +func (ms *MultiStrategy) Timeout() *time.Duration { + return ms.timeout +} + +func (ms *MultiStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) error { + var cancel context.CancelFunc + if ms.deadline != nil { + ctx, cancel = context.WithTimeout(ctx, *ms.deadline) + defer cancel() + } + + if len(ms.Strategies) == 0 { + return fmt.Errorf("no wait strategy supplied") + } + + for _, strategy := range ms.Strategies { + strategyCtx := ctx + + // Set default Timeout when strategy implements StrategyTimeout + if st, ok := strategy.(StrategyTimeout); ok { + if ms.Timeout() != nil && st.Timeout() == nil { + strategyCtx, cancel = context.WithTimeout(ctx, *ms.Timeout()) + defer cancel() + } + } + + err := strategy.WaitUntilReady(strategyCtx, target) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/errors.go b/vendor/github.com/testcontainers/testcontainers-go/wait/errors.go new file mode 100644 index 000000000..f8a02a59b --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/errors.go @@ -0,0 +1,10 @@ +//go:build !windows +// +build !windows + +package wait + +import "syscall" + +func isConnRefusedErr(err error) bool { + return err == syscall.ECONNREFUSED +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/errors_windows.go b/vendor/github.com/testcontainers/testcontainers-go/wait/errors_windows.go new file mode 100644 index 000000000..3ae346d8a --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/errors_windows.go @@ -0,0 +1,9 @@ +package wait + +import ( + "golang.org/x/sys/windows" +) + +func isConnRefusedErr(err error) bool { + return err == windows.WSAECONNREFUSED +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/exec.go b/vendor/github.com/testcontainers/testcontainers-go/wait/exec.go new file mode 100644 index 000000000..82f15c609 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/exec.go @@ -0,0 +1,99 @@ +package wait + +import ( + "context" + "io" + "time" + + tcexec "github.com/testcontainers/testcontainers-go/exec" +) + +// Implement interface +var _ Strategy = (*ExecStrategy)(nil) +var _ StrategyTimeout = (*ExecStrategy)(nil) + +type ExecStrategy struct { + // all Strategies should have a startupTimeout to avoid waiting infinitely + timeout *time.Duration + cmd []string + + // additional properties + ExitCodeMatcher func(exitCode int) bool + ResponseMatcher func(body io.Reader) bool + PollInterval time.Duration +} + +// NewExecStrategy constructs an Exec strategy ... +func NewExecStrategy(cmd []string) *ExecStrategy { + return &ExecStrategy{ + cmd: cmd, + ExitCodeMatcher: defaultExitCodeMatcher, + ResponseMatcher: func(body io.Reader) bool { return true }, + PollInterval: defaultPollInterval(), + } +} + +func defaultExitCodeMatcher(exitCode int) bool { + return exitCode == 0 +} + +// WithStartupTimeout can be used to change the default startup timeout +func (ws *ExecStrategy) WithStartupTimeout(startupTimeout time.Duration) *ExecStrategy { + ws.timeout = &startupTimeout + return ws +} + +func (ws *ExecStrategy) WithExitCodeMatcher(exitCodeMatcher func(exitCode int) bool) *ExecStrategy { + ws.ExitCodeMatcher = exitCodeMatcher + return ws +} + +func (ws *ExecStrategy) WithResponseMatcher(matcher func(body io.Reader) bool) *ExecStrategy { + ws.ResponseMatcher = matcher + return ws +} + +// WithPollInterval can be used to override the default polling interval of 100 milliseconds +func (ws *ExecStrategy) WithPollInterval(pollInterval time.Duration) *ExecStrategy { + ws.PollInterval = pollInterval + return ws +} + +// ForExec is a convenience method to assign ExecStrategy +func ForExec(cmd []string) *ExecStrategy { + return NewExecStrategy(cmd) +} + +func (ws *ExecStrategy) Timeout() *time.Duration { + return ws.timeout +} + +func (ws *ExecStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) error { + timeout := defaultStartupTimeout() + if ws.timeout != nil { + timeout = *ws.timeout + } + + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(ws.PollInterval): + exitCode, resp, err := target.Exec(ctx, ws.cmd, tcexec.Multiplexed()) + if err != nil { + return err + } + if !ws.ExitCodeMatcher(exitCode) { + continue + } + if ws.ResponseMatcher != nil && !ws.ResponseMatcher(resp) { + continue + } + + return nil + } + } +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/exit.go b/vendor/github.com/testcontainers/testcontainers-go/wait/exit.go new file mode 100644 index 000000000..8ff59fe0a --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/exit.go @@ -0,0 +1,89 @@ +package wait + +import ( + "context" + "strings" + "time" +) + +// Implement interface +var _ Strategy = (*ExitStrategy)(nil) +var _ StrategyTimeout = (*ExitStrategy)(nil) + +// ExitStrategy will wait until container exit +type ExitStrategy struct { + // all Strategies should have a timeout to avoid waiting infinitely + timeout *time.Duration + + // additional properties + PollInterval time.Duration +} + +// NewExitStrategy constructs with polling interval of 100 milliseconds without timeout by default +func NewExitStrategy() *ExitStrategy { + return &ExitStrategy{ + PollInterval: defaultPollInterval(), + } + +} + +// fluent builders for each property +// since go has neither covariance nor generics, the return type must be the type of the concrete implementation +// this is true for all properties, even the "shared" ones + +// WithExitTimeout can be used to change the default exit timeout +func (ws *ExitStrategy) WithExitTimeout(exitTimeout time.Duration) *ExitStrategy { + ws.timeout = &exitTimeout + return ws +} + +// WithPollInterval can be used to override the default polling interval of 100 milliseconds +func (ws *ExitStrategy) WithPollInterval(pollInterval time.Duration) *ExitStrategy { + ws.PollInterval = pollInterval + return ws +} + +// ForExit is the default construction for the fluid interface. +// +// For Example: +// +// wait. +// ForExit(). +// WithPollInterval(1 * time.Second) +func ForExit() *ExitStrategy { + return NewExitStrategy() +} + +func (ws *ExitStrategy) Timeout() *time.Duration { + return ws.timeout +} + +// WaitUntilReady implements Strategy.WaitUntilReady +func (ws *ExitStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) (err error) { + if ws.timeout != nil { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *ws.timeout) + defer cancel() + } + + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + state, err := target.State(ctx) + if err != nil { + if !strings.Contains(err.Error(), "No such container") { + return err + } else { + return nil + } + } + if state.Running { + time.Sleep(ws.PollInterval) + continue + } + return nil + } + } +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/health.go b/vendor/github.com/testcontainers/testcontainers-go/wait/health.go new file mode 100644 index 000000000..b0821c8ae --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/health.go @@ -0,0 +1,91 @@ +package wait + +import ( + "context" + "time" + + "github.com/docker/docker/api/types" +) + +// Implement interface +var _ Strategy = (*HealthStrategy)(nil) +var _ StrategyTimeout = (*HealthStrategy)(nil) + +// HealthStrategy will wait until the container becomes healthy +type HealthStrategy struct { + // all Strategies should have a startupTimeout to avoid waiting infinitely + timeout *time.Duration + + // additional properties + PollInterval time.Duration +} + +// NewHealthStrategy constructs with polling interval of 100 milliseconds and startup timeout of 60 seconds by default +func NewHealthStrategy() *HealthStrategy { + return &HealthStrategy{ + PollInterval: defaultPollInterval(), + } + +} + +// fluent builders for each property +// since go has neither covariance nor generics, the return type must be the type of the concrete implementation +// this is true for all properties, even the "shared" ones like startupTimeout + +// WithStartupTimeout can be used to change the default startup timeout +func (ws *HealthStrategy) WithStartupTimeout(startupTimeout time.Duration) *HealthStrategy { + ws.timeout = &startupTimeout + return ws +} + +// WithPollInterval can be used to override the default polling interval of 100 milliseconds +func (ws *HealthStrategy) WithPollInterval(pollInterval time.Duration) *HealthStrategy { + ws.PollInterval = pollInterval + return ws +} + +// ForHealthCheck is the default construction for the fluid interface. +// +// For Example: +// +// wait. +// ForHealthCheck(). +// WithPollInterval(1 * time.Second) +func ForHealthCheck() *HealthStrategy { + return NewHealthStrategy() +} + +func (ws *HealthStrategy) Timeout() *time.Duration { + return ws.timeout +} + +// WaitUntilReady implements Strategy.WaitUntilReady +func (ws *HealthStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) (err error) { + timeout := defaultStartupTimeout() + if ws.timeout != nil { + timeout = *ws.timeout + } + + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + state, err := target.State(ctx) + if err != nil { + return err + } + if err := checkState(state); err != nil { + return err + } + if state.Health == nil || state.Health.Status != types.Healthy { + time.Sleep(ws.PollInterval) + continue + } + return nil + } + } +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/host_port.go b/vendor/github.com/testcontainers/testcontainers-go/wait/host_port.go new file mode 100644 index 000000000..59fe7c113 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/host_port.go @@ -0,0 +1,204 @@ +package wait + +import ( + "context" + "errors" + "fmt" + "log" + "net" + "os" + "strconv" + "time" + + "github.com/docker/go-connections/nat" +) + +// Implement interface +var _ Strategy = (*HostPortStrategy)(nil) +var _ StrategyTimeout = (*HostPortStrategy)(nil) + +var errShellNotExecutable = errors.New("/bin/sh command not executable") + +type HostPortStrategy struct { + // Port is a string containing port number and protocol in the format "80/tcp" + // which + Port nat.Port + // all WaitStrategies should have a startupTimeout to avoid waiting infinitely + timeout *time.Duration + PollInterval time.Duration +} + +// NewHostPortStrategy constructs a default host port strategy +func NewHostPortStrategy(port nat.Port) *HostPortStrategy { + return &HostPortStrategy{ + Port: port, + PollInterval: defaultPollInterval(), + } +} + +// fluent builders for each property +// since go has neither covariance nor generics, the return type must be the type of the concrete implementation +// this is true for all properties, even the "shared" ones like startupTimeout + +// ForListeningPort is a helper similar to those in Wait.java +// https://github.com/testcontainers/testcontainers-java/blob/1d85a3834bd937f80aad3a4cec249c027f31aeb4/core/src/main/java/org/testcontainers/containers/wait/strategy/Wait.java +func ForListeningPort(port nat.Port) *HostPortStrategy { + return NewHostPortStrategy(port) +} + +// ForExposedPort constructs an exposed port strategy. Alias for `NewHostPortStrategy("")`. +// This strategy waits for the first port exposed in the Docker container. +func ForExposedPort() *HostPortStrategy { + return NewHostPortStrategy("") +} + +// WithStartupTimeout can be used to change the default startup timeout +func (hp *HostPortStrategy) WithStartupTimeout(startupTimeout time.Duration) *HostPortStrategy { + hp.timeout = &startupTimeout + return hp +} + +// WithPollInterval can be used to override the default polling interval of 100 milliseconds +func (hp *HostPortStrategy) WithPollInterval(pollInterval time.Duration) *HostPortStrategy { + hp.PollInterval = pollInterval + return hp +} + +func (hp *HostPortStrategy) Timeout() *time.Duration { + return hp.timeout +} + +// WaitUntilReady implements Strategy.WaitUntilReady +func (hp *HostPortStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) (err error) { + timeout := defaultStartupTimeout() + if hp.timeout != nil { + timeout = *hp.timeout + } + + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + ipAddress, err := target.Host(ctx) + if err != nil { + return + } + + var waitInterval = hp.PollInterval + + internalPort := hp.Port + if internalPort == "" { + var ports nat.PortMap + ports, err = target.Ports(ctx) + if err != nil { + return + } + if len(ports) > 0 { + for p := range ports { + internalPort = p + break + } + } + } + + if internalPort == "" { + err = fmt.Errorf("no port to wait for") + return + } + + var port nat.Port + port, err = target.MappedPort(ctx, internalPort) + var i = 0 + + for port == "" { + i++ + + select { + case <-ctx.Done(): + return fmt.Errorf("%s:%w", ctx.Err(), err) + case <-time.After(waitInterval): + if err := checkTarget(ctx, target); err != nil { + return err + } + port, err = target.MappedPort(ctx, internalPort) + if err != nil { + fmt.Printf("(%d) [%s] %s\n", i, port, err) + } + } + } + + if err := externalCheck(ctx, ipAddress, port, target, waitInterval); err != nil { + return err + } + + err = internalCheck(ctx, internalPort, target) + if err != nil && errors.Is(errShellNotExecutable, err) { + log.Println("Shell not executable in container, only external port check will be performed") + } else { + return err + } + + return nil +} + +func externalCheck(ctx context.Context, ipAddress string, port nat.Port, target StrategyTarget, waitInterval time.Duration) error { + proto := port.Proto() + portNumber := port.Int() + portString := strconv.Itoa(portNumber) + + dialer := net.Dialer{} + address := net.JoinHostPort(ipAddress, portString) + for { + if err := checkTarget(ctx, target); err != nil { + return err + } + conn, err := dialer.DialContext(ctx, proto, address) + if err != nil { + if v, ok := err.(*net.OpError); ok { + if v2, ok := (v.Err).(*os.SyscallError); ok { + if isConnRefusedErr(v2.Err) { + time.Sleep(waitInterval) + continue + } + } + } + return err + } else { + _ = conn.Close() + break + } + } + return nil +} + +func internalCheck(ctx context.Context, internalPort nat.Port, target StrategyTarget) error { + command := buildInternalCheckCommand(internalPort.Int()) + for { + if ctx.Err() != nil { + return ctx.Err() + } + if err := checkTarget(ctx, target); err != nil { + return err + } + exitCode, _, err := target.Exec(ctx, []string{"/bin/sh", "-c", command}) + if err != nil { + return fmt.Errorf("%w, host port waiting failed", err) + } + + if exitCode == 0 { + break + } else if exitCode == 126 { + return errShellNotExecutable + } + } + return nil +} + +func buildInternalCheckCommand(internalPort int) string { + command := `( + cat /proc/net/tcp* | awk '{print $2}' | grep -i :%04x || + nc -vz -w 1 localhost %d || + /bin/sh -c ' 0 { + ws.TLSConfig = tlsconf[0] + } + return ws +} + +func (ws *HTTPStrategy) WithAllowInsecure(allowInsecure bool) *HTTPStrategy { + ws.AllowInsecure = allowInsecure + return ws +} + +func (ws *HTTPStrategy) WithMethod(method string) *HTTPStrategy { + ws.Method = method + return ws +} + +func (ws *HTTPStrategy) WithBody(reqdata io.Reader) *HTTPStrategy { + ws.Body = reqdata + return ws +} + +func (ws *HTTPStrategy) WithBasicAuth(username, password string) *HTTPStrategy { + ws.UserInfo = url.UserPassword(username, password) + return ws +} + +// WithPollInterval can be used to override the default polling interval of 100 milliseconds +func (ws *HTTPStrategy) WithPollInterval(pollInterval time.Duration) *HTTPStrategy { + ws.PollInterval = pollInterval + return ws +} + +// ForHTTP is a convenience method similar to Wait.java +// https://github.com/testcontainers/testcontainers-java/blob/1d85a3834bd937f80aad3a4cec249c027f31aeb4/core/src/main/java/org/testcontainers/containers/wait/strategy/Wait.java +func ForHTTP(path string) *HTTPStrategy { + return NewHTTPStrategy(path) +} + +func (ws *HTTPStrategy) Timeout() *time.Duration { + return ws.timeout +} + +// WaitUntilReady implements Strategy.WaitUntilReady +func (ws *HTTPStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) (err error) { + timeout := defaultStartupTimeout() + if ws.timeout != nil { + timeout = *ws.timeout + } + + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + ipAddress, err := target.Host(ctx) + if err != nil { + return + } + + var mappedPort nat.Port + if ws.Port == "" { + ports, err := target.Ports(ctx) + for err != nil { + select { + case <-ctx.Done(): + return fmt.Errorf("%s:%w", ctx.Err(), err) + case <-time.After(ws.PollInterval): + if err := checkTarget(ctx, target); err != nil { + return err + } + + ports, err = target.Ports(ctx) + } + } + + for k, bindings := range ports { + if len(bindings) == 0 || k.Proto() != "tcp" { + continue + } + mappedPort, _ = nat.NewPort(k.Proto(), bindings[0].HostPort) + break + } + + if mappedPort == "" { + return errors.New("No exposed tcp ports or mapped ports - cannot wait for status") + } + } else { + mappedPort, err = target.MappedPort(ctx, ws.Port) + + for mappedPort == "" { + select { + case <-ctx.Done(): + return fmt.Errorf("%s:%w", ctx.Err(), err) + case <-time.After(ws.PollInterval): + if err := checkTarget(ctx, target); err != nil { + return err + } + + mappedPort, err = target.MappedPort(ctx, ws.Port) + } + } + + if mappedPort.Proto() != "tcp" { + return errors.New("Cannot use HTTP client on non-TCP ports") + } + } + + switch ws.Method { + case http.MethodGet, http.MethodHead, http.MethodPost, + http.MethodPut, http.MethodPatch, http.MethodDelete, + http.MethodConnect, http.MethodOptions, http.MethodTrace: + default: + if ws.Method != "" { + return fmt.Errorf("invalid http method %q", ws.Method) + } + ws.Method = http.MethodGet + } + + tripper := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: ws.TLSConfig, + } + + var proto string + if ws.UseTLS { + proto = "https" + if ws.AllowInsecure { + if ws.TLSConfig == nil { + tripper.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + } else { + ws.TLSConfig.InsecureSkipVerify = true + } + } + } else { + proto = "http" + } + + client := http.Client{Transport: tripper, Timeout: time.Second} + address := net.JoinHostPort(ipAddress, strconv.Itoa(mappedPort.Int())) + + endpoint := url.URL{ + Scheme: proto, + Host: address, + Path: ws.Path, + } + + if ws.UserInfo != nil { + endpoint.User = ws.UserInfo + } + + // cache the body into a byte-slice so that it can be iterated over multiple times + var body []byte + if ws.Body != nil { + body, err = io.ReadAll(ws.Body) + if err != nil { + return + } + } + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(ws.PollInterval): + if err := checkTarget(ctx, target); err != nil { + return err + } + req, err := http.NewRequestWithContext(ctx, ws.Method, endpoint.String(), bytes.NewReader(body)) + if err != nil { + return err + } + resp, err := client.Do(req) + if err != nil { + continue + } + if ws.StatusCodeMatcher != nil && !ws.StatusCodeMatcher(resp.StatusCode) { + _ = resp.Body.Close() + continue + } + if ws.ResponseMatcher != nil && !ws.ResponseMatcher(resp.Body) { + _ = resp.Body.Close() + continue + } + if err := resp.Body.Close(); err != nil { + continue + } + return nil + } + } +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/log.go b/vendor/github.com/testcontainers/testcontainers-go/wait/log.go new file mode 100644 index 000000000..4e14e9d31 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/log.go @@ -0,0 +1,117 @@ +package wait + +import ( + "context" + "io" + "strings" + "time" +) + +// Implement interface +var _ Strategy = (*LogStrategy)(nil) +var _ StrategyTimeout = (*LogStrategy)(nil) + +// LogStrategy will wait until a given log entry shows up in the docker logs +type LogStrategy struct { + // all Strategies should have a startupTimeout to avoid waiting infinitely + timeout *time.Duration + + // additional properties + Log string + Occurrence int + PollInterval time.Duration +} + +// NewLogStrategy constructs with polling interval of 100 milliseconds and startup timeout of 60 seconds by default +func NewLogStrategy(log string) *LogStrategy { + return &LogStrategy{ + Log: log, + Occurrence: 1, + PollInterval: defaultPollInterval(), + } +} + +// fluent builders for each property +// since go has neither covariance nor generics, the return type must be the type of the concrete implementation +// this is true for all properties, even the "shared" ones like startupTimeout + +// WithStartupTimeout can be used to change the default startup timeout +func (ws *LogStrategy) WithStartupTimeout(timeout time.Duration) *LogStrategy { + ws.timeout = &timeout + return ws +} + +// WithPollInterval can be used to override the default polling interval of 100 milliseconds +func (ws *LogStrategy) WithPollInterval(pollInterval time.Duration) *LogStrategy { + ws.PollInterval = pollInterval + return ws +} + +func (ws *LogStrategy) WithOccurrence(o int) *LogStrategy { + // the number of occurrence needs to be positive + if o <= 0 { + o = 1 + } + ws.Occurrence = o + return ws +} + +// ForLog is the default construction for the fluid interface. +// +// For Example: +// +// wait. +// ForLog("some text"). +// WithPollInterval(1 * time.Second) +func ForLog(log string) *LogStrategy { + return NewLogStrategy(log) +} + +func (ws *LogStrategy) Timeout() *time.Duration { + return ws.timeout +} + +// WaitUntilReady implements Strategy.WaitUntilReady +func (ws *LogStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) (err error) { + timeout := defaultStartupTimeout() + if ws.timeout != nil { + timeout = *ws.timeout + } + + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + +LOOP: + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + checkErr := checkTarget(ctx, target) + + reader, err := target.Logs(ctx) + if err != nil { + time.Sleep(ws.PollInterval) + continue + } + + b, err := io.ReadAll(reader) + if err != nil { + time.Sleep(ws.PollInterval) + continue + } + + logs := string(b) + if logs == "" && checkErr != nil { + return checkErr + } else if strings.Count(logs, ws.Log) >= ws.Occurrence { + break LOOP + } else { + time.Sleep(ws.PollInterval) + continue + } + } + } + + return nil +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/nop.go b/vendor/github.com/testcontainers/testcontainers-go/wait/nop.go new file mode 100644 index 000000000..a16feb878 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/nop.go @@ -0,0 +1,69 @@ +package wait + +import ( + "context" + "io" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/go-connections/nat" + "github.com/testcontainers/testcontainers-go/exec" +) + +var _ Strategy = (*NopStrategy)(nil) +var _ StrategyTimeout = (*NopStrategy)(nil) + +type NopStrategy struct { + timeout *time.Duration + waitUntilReady func(context.Context, StrategyTarget) error +} + +func ForNop( + waitUntilReady func(context.Context, StrategyTarget) error, +) *NopStrategy { + return &NopStrategy{ + waitUntilReady: waitUntilReady, + } +} + +func (ws *NopStrategy) Timeout() *time.Duration { + return ws.timeout +} + +func (ws *NopStrategy) WithStartupTimeout(timeout time.Duration) *NopStrategy { + ws.timeout = &timeout + return ws +} + +func (ws *NopStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) error { + return ws.waitUntilReady(ctx, target) +} + +type NopStrategyTarget struct { + ReaderCloser io.ReadCloser + ContainerState types.ContainerState +} + +func (st NopStrategyTarget) Host(_ context.Context) (string, error) { + return "", nil +} + +func (st NopStrategyTarget) Ports(_ context.Context) (nat.PortMap, error) { + return nil, nil +} + +func (st NopStrategyTarget) MappedPort(_ context.Context, n nat.Port) (nat.Port, error) { + return n, nil +} + +func (st NopStrategyTarget) Logs(_ context.Context) (io.ReadCloser, error) { + return st.ReaderCloser, nil +} + +func (st NopStrategyTarget) Exec(_ context.Context, _ []string, _ ...exec.ProcessOption) (int, io.Reader, error) { + return 0, nil, nil +} + +func (st NopStrategyTarget) State(_ context.Context) (*types.ContainerState, error) { + return &st.ContainerState, nil +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/sql.go b/vendor/github.com/testcontainers/testcontainers-go/wait/sql.go new file mode 100644 index 000000000..9e0c05607 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/sql.go @@ -0,0 +1,116 @@ +package wait + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/docker/go-connections/nat" +) + +var _ Strategy = (*waitForSql)(nil) +var _ StrategyTimeout = (*waitForSql)(nil) + +const defaultForSqlQuery = "SELECT 1" + +// ForSQL constructs a new waitForSql strategy for the given driver +func ForSQL(port nat.Port, driver string, url func(host string, port nat.Port) string) *waitForSql { + return &waitForSql{ + Port: port, + URL: url, + Driver: driver, + startupTimeout: defaultStartupTimeout(), + PollInterval: defaultPollInterval(), + query: defaultForSqlQuery, + } +} + +type waitForSql struct { + timeout *time.Duration + + URL func(host string, port nat.Port) string + Driver string + Port nat.Port + startupTimeout time.Duration + PollInterval time.Duration + query string +} + +// WithStartupTimeout can be used to change the default startup timeout +func (w *waitForSql) WithStartupTimeout(timeout time.Duration) *waitForSql { + w.timeout = &timeout + return w +} + +// WithPollInterval can be used to override the default polling interval of 100 milliseconds +func (w *waitForSql) WithPollInterval(pollInterval time.Duration) *waitForSql { + w.PollInterval = pollInterval + return w +} + +// WithQuery can be used to override the default query used in the strategy. +func (w *waitForSql) WithQuery(query string) *waitForSql { + w.query = query + return w +} + +func (w *waitForSql) Timeout() *time.Duration { + return w.timeout +} + +// WaitUntilReady repeatedly tries to run "SELECT 1" or user defined query on the given port using sql and driver. +// +// If it doesn't succeed until the timeout value which defaults to 60 seconds, it will return an error. +func (w *waitForSql) WaitUntilReady(ctx context.Context, target StrategyTarget) (err error) { + timeout := defaultStartupTimeout() + if w.timeout != nil { + timeout = *w.timeout + } + + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + host, err := target.Host(ctx) + if err != nil { + return + } + + ticker := time.NewTicker(w.PollInterval) + defer ticker.Stop() + + var port nat.Port + port, err = target.MappedPort(ctx, w.Port) + + for port == "" { + select { + case <-ctx.Done(): + return fmt.Errorf("%s:%w", ctx.Err(), err) + case <-ticker.C: + if err := checkTarget(ctx, target); err != nil { + return err + } + port, err = target.MappedPort(ctx, w.Port) + } + } + + db, err := sql.Open(w.Driver, w.URL(host, port)) + if err != nil { + return fmt.Errorf("sql.Open: %v", err) + } + defer db.Close() + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + if err := checkTarget(ctx, target); err != nil { + return err + } + if _, err := db.ExecContext(ctx, w.query); err != nil { + continue + } + return nil + } + } +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/wait.go b/vendor/github.com/testcontainers/testcontainers-go/wait/wait.go new file mode 100644 index 000000000..559ce3979 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/wait.go @@ -0,0 +1,62 @@ +package wait + +import ( + "context" + "errors" + "fmt" + "io" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/go-connections/nat" + "github.com/testcontainers/testcontainers-go/exec" +) + +// Strategy defines the basic interface for a Wait Strategy +type Strategy interface { + WaitUntilReady(context.Context, StrategyTarget) error +} + +// StrategyTimeout allows MultiStrategy to configure a Strategy's Timeout +type StrategyTimeout interface { + Timeout() *time.Duration +} + +type StrategyTarget interface { + Host(context.Context) (string, error) + Ports(ctx context.Context) (nat.PortMap, error) + MappedPort(context.Context, nat.Port) (nat.Port, error) + Logs(context.Context) (io.ReadCloser, error) + Exec(context.Context, []string, ...exec.ProcessOption) (int, io.Reader, error) + State(context.Context) (*types.ContainerState, error) +} + +func checkTarget(ctx context.Context, target StrategyTarget) error { + state, err := target.State(ctx) + if err != nil { + return err + } + + return checkState(state) +} + +func checkState(state *types.ContainerState) error { + switch { + case state.Running: + return nil + case state.OOMKilled: + return errors.New("container crashed with out-of-memory (OOMKilled)") + case state.Status == "exited": + return fmt.Errorf("container exited with code %d", state.ExitCode) + default: + return fmt.Errorf("unexpected container status %q", state.Status) + } +} + +func defaultStartupTimeout() time.Duration { + return 60 * time.Second +} + +func defaultPollInterval() time.Duration { + return 100 * time.Millisecond +} diff --git a/vendor/golang.org/x/xerrors/LICENSE b/vendor/golang.org/x/exp/LICENSE similarity index 96% rename from vendor/golang.org/x/xerrors/LICENSE rename to vendor/golang.org/x/exp/LICENSE index e4a47e17f..6a66aea5e 100644 --- a/vendor/golang.org/x/xerrors/LICENSE +++ b/vendor/golang.org/x/exp/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2019 The Go Authors. All rights reserved. +Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/golang.org/x/xerrors/PATENTS b/vendor/golang.org/x/exp/PATENTS similarity index 100% rename from vendor/golang.org/x/xerrors/PATENTS rename to vendor/golang.org/x/exp/PATENTS diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go new file mode 100644 index 000000000..2c033dff4 --- /dev/null +++ b/vendor/golang.org/x/exp/constraints/constraints.go @@ -0,0 +1,50 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package constraints defines a set of useful constraints to be used +// with type parameters. +package constraints + +// Signed is a constraint that permits any signed integer type. +// If future releases of Go add new predeclared signed integer types, +// this constraint will be modified to include them. +type Signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 +} + +// Unsigned is a constraint that permits any unsigned integer type. +// If future releases of Go add new predeclared unsigned integer types, +// this constraint will be modified to include them. +type Unsigned interface { + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +// Integer is a constraint that permits any integer type. +// If future releases of Go add new predeclared integer types, +// this constraint will be modified to include them. +type Integer interface { + Signed | Unsigned +} + +// Float is a constraint that permits any floating-point type. +// If future releases of Go add new predeclared floating-point types, +// this constraint will be modified to include them. +type Float interface { + ~float32 | ~float64 +} + +// Complex is a constraint that permits any complex numeric type. +// If future releases of Go add new predeclared complex numeric types, +// this constraint will be modified to include them. +type Complex interface { + ~complex64 | ~complex128 +} + +// Ordered is a constraint that permits any ordered type: any type +// that supports the operators < <= >= >. +// If future releases of Go add new ordered types, +// this constraint will be modified to include them. +type Ordered interface { + Integer | Float | ~string +} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go new file mode 100644 index 000000000..2540bd682 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -0,0 +1,258 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package slices defines various functions useful with slices of any type. +// Unless otherwise specified, these functions all apply to the elements +// of a slice at index 0 <= i < len(s). +// +// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a +// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings), +// or the sorting may fail to sort correctly. A common case is when sorting slices of +// floating-point numbers containing NaN values. +package slices + +import "golang.org/x/exp/constraints" + +// Equal reports whether two slices are equal: the same length and all +// elements equal. If the lengths are different, Equal returns false. +// Otherwise, the elements are compared in increasing index order, and the +// comparison stops at the first unequal pair. +// Floating point NaNs are not considered equal. +func Equal[E comparable](s1, s2 []E) bool { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + if s1[i] != s2[i] { + return false + } + } + return true +} + +// EqualFunc reports whether two slices are equal using a comparison +// function on each pair of elements. If the lengths are different, +// EqualFunc returns false. Otherwise, the elements are compared in +// increasing index order, and the comparison stops at the first index +// for which eq returns false. +func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { + if len(s1) != len(s2) { + return false + } + for i, v1 := range s1 { + v2 := s2[i] + if !eq(v1, v2) { + return false + } + } + return true +} + +// Compare compares the elements of s1 and s2. +// The elements are compared sequentially, starting at index 0, +// until one element is not equal to the other. +// The result of comparing the first non-matching elements is returned. +// If both slices are equal until one of them ends, the shorter slice is +// considered less than the longer one. +// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. +// Comparisons involving floating point NaNs are ignored. +func Compare[E constraints.Ordered](s1, s2 []E) int { + s2len := len(s2) + for i, v1 := range s1 { + if i >= s2len { + return +1 + } + v2 := s2[i] + switch { + case v1 < v2: + return -1 + case v1 > v2: + return +1 + } + } + if len(s1) < s2len { + return -1 + } + return 0 +} + +// CompareFunc is like Compare but uses a comparison function +// on each pair of elements. The elements are compared in increasing +// index order, and the comparisons stop after the first time cmp +// returns non-zero. +// The result is the first non-zero result of cmp; if cmp always +// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), +// and +1 if len(s1) > len(s2). +func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { + s2len := len(s2) + for i, v1 := range s1 { + if i >= s2len { + return +1 + } + v2 := s2[i] + if c := cmp(v1, v2); c != 0 { + return c + } + } + if len(s1) < s2len { + return -1 + } + return 0 +} + +// Index returns the index of the first occurrence of v in s, +// or -1 if not present. +func Index[E comparable](s []E, v E) int { + for i := range s { + if v == s[i] { + return i + } + } + return -1 +} + +// IndexFunc returns the first index i satisfying f(s[i]), +// or -1 if none do. +func IndexFunc[E any](s []E, f func(E) bool) int { + for i := range s { + if f(s[i]) { + return i + } + } + return -1 +} + +// Contains reports whether v is present in s. +func Contains[E comparable](s []E, v E) bool { + return Index(s, v) >= 0 +} + +// ContainsFunc reports whether at least one +// element e of s satisfies f(e). +func ContainsFunc[E any](s []E, f func(E) bool) bool { + return IndexFunc(s, f) >= 0 +} + +// Insert inserts the values v... into s at index i, +// returning the modified slice. +// In the returned slice r, r[i] == v[0]. +// Insert panics if i is out of range. +// This function is O(len(s) + len(v)). +func Insert[S ~[]E, E any](s S, i int, v ...E) S { + tot := len(s) + len(v) + if tot <= cap(s) { + s2 := s[:tot] + copy(s2[i+len(v):], s[i:]) + copy(s2[i:], v) + return s2 + } + s2 := make(S, tot) + copy(s2, s[:i]) + copy(s2[i:], v) + copy(s2[i+len(v):], s[i:]) + return s2 +} + +// Delete removes the elements s[i:j] from s, returning the modified slice. +// Delete panics if s[i:j] is not a valid slice of s. +// Delete modifies the contents of the slice s; it does not create a new slice. +// Delete is O(len(s)-j), so if many items must be deleted, it is better to +// make a single call deleting them all together than to delete one at a time. +// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those +// elements contain pointers you might consider zeroing those elements so that +// objects they reference can be garbage collected. +func Delete[S ~[]E, E any](s S, i, j int) S { + _ = s[i:j] // bounds check + + return append(s[:i], s[j:]...) +} + +// Replace replaces the elements s[i:j] by the given v, and returns the +// modified slice. Replace panics if s[i:j] is not a valid slice of s. +func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { + _ = s[i:j] // verify that i:j is a valid subslice + tot := len(s[:i]) + len(v) + len(s[j:]) + if tot <= cap(s) { + s2 := s[:tot] + copy(s2[i+len(v):], s[j:]) + copy(s2[i:], v) + return s2 + } + s2 := make(S, tot) + copy(s2, s[:i]) + copy(s2[i:], v) + copy(s2[i+len(v):], s[j:]) + return s2 +} + +// Clone returns a copy of the slice. +// The elements are copied using assignment, so this is a shallow clone. +func Clone[S ~[]E, E any](s S) S { + // Preserve nil in case it matters. + if s == nil { + return nil + } + return append(S([]E{}), s...) +} + +// Compact replaces consecutive runs of equal elements with a single copy. +// This is like the uniq command found on Unix. +// Compact modifies the contents of the slice s; it does not create a new slice. +// When Compact discards m elements in total, it might not modify the elements +// s[len(s)-m:len(s)]. If those elements contain pointers you might consider +// zeroing those elements so that objects they reference can be garbage collected. +func Compact[S ~[]E, E comparable](s S) S { + if len(s) < 2 { + return s + } + i := 1 + for k := 1; k < len(s); k++ { + if s[k] != s[k-1] { + if i != k { + s[i] = s[k] + } + i++ + } + } + return s[:i] +} + +// CompactFunc is like Compact but uses a comparison function. +func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { + if len(s) < 2 { + return s + } + i := 1 + for k := 1; k < len(s); k++ { + if !eq(s[k], s[k-1]) { + if i != k { + s[i] = s[k] + } + i++ + } + } + return s[:i] +} + +// Grow increases the slice's capacity, if necessary, to guarantee space for +// another n elements. After Grow(n), at least n elements can be appended +// to the slice without another allocation. If n is negative or too large to +// allocate the memory, Grow panics. +func Grow[S ~[]E, E any](s S, n int) S { + if n < 0 { + panic("cannot be negative") + } + if n -= cap(s) - len(s); n > 0 { + // TODO(https://go.dev/issue/53888): Make using []E instead of S + // to workaround a compiler bug where the runtime.growslice optimization + // does not take effect. Revert when the compiler is fixed. + s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] + } + return s +} + +// Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. +func Clip[S ~[]E, E any](s S) S { + return s[:len(s):len(s)] +} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go new file mode 100644 index 000000000..231b6448a --- /dev/null +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -0,0 +1,128 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import ( + "math/bits" + + "golang.org/x/exp/constraints" +) + +// Sort sorts a slice of any ordered type in ascending order. +// Sort may fail to sort correctly when sorting slices of floating-point +// numbers containing Not-a-number (NaN) values. +// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))}) +// instead if the input may contain NaNs. +func Sort[E constraints.Ordered](x []E) { + n := len(x) + pdqsortOrdered(x, 0, n, bits.Len(uint(n))) +} + +// SortFunc sorts the slice x in ascending order as determined by the less function. +// This sort is not guaranteed to be stable. +// +// SortFunc requires that less is a strict weak ordering. +// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. +func SortFunc[E any](x []E, less func(a, b E) bool) { + n := len(x) + pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less) +} + +// SortStableFunc sorts the slice x while keeping the original order of equal +// elements, using less to compare elements. +func SortStableFunc[E any](x []E, less func(a, b E) bool) { + stableLessFunc(x, len(x), less) +} + +// IsSorted reports whether x is sorted in ascending order. +func IsSorted[E constraints.Ordered](x []E) bool { + for i := len(x) - 1; i > 0; i-- { + if x[i] < x[i-1] { + return false + } + } + return true +} + +// IsSortedFunc reports whether x is sorted in ascending order, with less as the +// comparison function. +func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool { + for i := len(x) - 1; i > 0; i-- { + if less(x[i], x[i-1]) { + return false + } + } + return true +} + +// BinarySearch searches for target in a sorted slice and returns the position +// where target is found, or the position where target would appear in the +// sort order; it also returns a bool saying whether the target is really found +// in the slice. The slice must be sorted in increasing order. +func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { + // Inlining is faster than calling BinarySearchFunc with a lambda. + n := len(x) + // Define x[-1] < target and x[n] >= target. + // Invariant: x[i-1] < target, x[j] >= target. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if x[h] < target { + i = h + 1 // preserves x[i-1] < target + } else { + j = h // preserves x[j] >= target + } + } + // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. + return i, i < n && x[i] == target +} + +// BinarySearchFunc works like BinarySearch, but uses a custom comparison +// function. The slice must be sorted in increasing order, where "increasing" +// is defined by cmp. cmp should return 0 if the slice element matches +// the target, a negative number if the slice element precedes the target, +// or a positive number if the slice element follows the target. +// cmp must implement the same ordering as the slice, such that if +// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. +func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) { + n := len(x) + // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . + // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if cmp(x[h], target) < 0 { + i = h + 1 // preserves cmp(x[i - 1], target) < 0 + } else { + j = h // preserves cmp(x[j], target) >= 0 + } + } + // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i. + return i, i < n && cmp(x[i], target) == 0 +} + +type sortedHint int // hint for pdqsort when choosing the pivot + +const ( + unknownHint sortedHint = iota + increasingHint + decreasingHint +) + +// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf +type xorshift uint64 + +func (r *xorshift) Next() uint64 { + *r ^= *r << 13 + *r ^= *r >> 17 + *r ^= *r << 5 + return uint64(*r) +} + +func nextPowerOfTwo(length int) uint { + return 1 << bits.Len(uint(length)) +} diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortfunc.go new file mode 100644 index 000000000..2a632476c --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortfunc.go @@ -0,0 +1,479 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +// insertionSortLessFunc sorts data[a:b] using insertion sort. +func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + for i := a + 1; i < b; i++ { + for j := i; j > a && less(data[j], data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownLessFunc implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && less(data[first+child], data[first+child+1]) { + child++ + } + if !less(data[first+root], data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownLessFunc(data, i, hi, first, less) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownLessFunc(data, lo, i, first, less) + } +} + +// pdqsortLessFunc sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortLessFunc(data, a, b, less) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortLessFunc(data, a, b, less) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsLessFunc(data, a, b, less) + limit-- + } + + pivot, hint := choosePivotLessFunc(data, a, b, less) + if hint == decreasingHint { + reverseRangeLessFunc(data, a, b, less) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortLessFunc(data, a, b, less) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !less(data[a-1], data[pivot]) { + mid := partitionEqualLessFunc(data, a, b, pivot, less) + a = mid + continue + } + + mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortLessFunc(data, a, mid, limit, less) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortLessFunc(data, mid+1, b, limit, less) + b = mid + } + } +} + +// partitionLessFunc does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && less(data[i], data[a]) { + i++ + } + for i <= j && !less(data[j], data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && less(data[i], data[a]) { + i++ + } + for i <= j && !less(data[j], data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !less(data[a], data[i]) { + i++ + } + for i <= j && less(data[a], data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !less(data[i], data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !less(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !less(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotLessFunc chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentLessFunc(data, i, &swaps, less) + j = medianAdjacentLessFunc(data, j, &swaps, less) + k = medianAdjacentLessFunc(data, k, &swaps, less) + } + // Find the median among i, j, k and stores it into j. + j = medianLessFunc(data, i, j, k, &swaps, less) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) { + if less(data[b], data[a]) { + *swaps++ + return b, a + } + return a, b +} + +// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int { + a, b = order2LessFunc(data, a, b, swaps, less) + b, c = order2LessFunc(data, b, c, swaps, less) + a, b = order2LessFunc(data, a, b, swaps, less) + return b +} + +// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int { + return medianLessFunc(data, a-1, a, a+1, swaps, less) +} + +func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortLessFunc(data, a, b, less) + a = b + b += blockSize + } + insertionSortLessFunc(data, a, n, less) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeLessFunc(data, a, a+blockSize, b, less) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeLessFunc(data, a, m, n, less) + } + blockSize *= 2 + } +} + +// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if less(data[h], data[a]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !less(data[m], data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !less(data[p-c], data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateLessFunc(data, start, m, end, less) + } + if a < start && start < mid { + symMergeLessFunc(data, a, start, mid, less) + } + if mid < end && end < b { + symMergeLessFunc(data, mid, end, b, less) + } +} + +// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeLessFunc(data, m-i, m, j, less) + i -= j + } else { + swapRangeLessFunc(data, m-i, m+j-i, i, less) + j -= i + } + } + // i == j + swapRangeLessFunc(data, m-i, m, i, less) +} diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go new file mode 100644 index 000000000..efaa1c8b7 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortordered.go @@ -0,0 +1,481 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// insertionSortOrdered sorts data[a:b] using insertion sort. +func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && (data[j] < data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownOrdered implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && (data[first+child] < data[first+child+1]) { + child++ + } + if !(data[first+root] < data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortOrdered[E constraints.Ordered](data []E, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownOrdered(data, i, hi, first) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownOrdered(data, lo, i, first) + } +} + +// pdqsortOrdered sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortOrdered(data, a, b) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortOrdered(data, a, b) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsOrdered(data, a, b) + limit-- + } + + pivot, hint := choosePivotOrdered(data, a, b) + if hint == decreasingHint { + reverseRangeOrdered(data, a, b) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortOrdered(data, a, b) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !(data[a-1] < data[pivot]) { + mid := partitionEqualOrdered(data, a, b, pivot) + a = mid + continue + } + + mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortOrdered(data, a, mid, limit) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortOrdered(data, mid+1, b, limit) + b = mid + } + } +} + +// partitionOrdered does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && (data[i] < data[a]) { + i++ + } + for i <= j && !(data[j] < data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && (data[i] < data[a]) { + i++ + } + for i <= j && !(data[j] < data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !(data[a] < data[i]) { + i++ + } + for i <= j && (data[a] < data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !(data[i] < data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !(data[j] < data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !(data[j] < data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsOrdered scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotOrdered chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentOrdered(data, i, &swaps) + j = medianAdjacentOrdered(data, j, &swaps) + k = medianAdjacentOrdered(data, k, &swaps) + } + // Find the median among i, j, k and stores it into j. + j = medianOrdered(data, i, j, k, &swaps) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { + if data[b] < data[a] { + *swaps++ + return b, a + } + return a, b +} + +// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int { + a, b = order2Ordered(data, a, b, swaps) + b, c = order2Ordered(data, b, c, swaps) + a, b = order2Ordered(data, a, b, swaps) + return b +} + +// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int { + return medianOrdered(data, a-1, a, a+1, swaps) +} + +func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableOrdered[E constraints.Ordered](data []E, n int) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortOrdered(data, a, b) + a = b + b += blockSize + } + insertionSortOrdered(data, a, n) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeOrdered(data, a, a+blockSize, b) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeOrdered(data, a, m, n) + } + blockSize *= 2 + } +} + +// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if data[h] < data[a] { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !(data[m] < data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !(data[p-c] < data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateOrdered(data, start, m, end) + } + if a < start && start < mid { + symMergeOrdered(data, a, start, mid) + } + if mid < end && end < b { + symMergeOrdered(data, mid, end, b) + } +} + +// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeOrdered(data, m-i, m, j) + i -= j + } else { + swapRangeOrdered(data, m-i, m+j-i, i) + j -= i + } + } + // i == j + swapRangeOrdered(data, m-i, m, i) +} diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/golang.org/x/net/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/golang.org/x/net/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/internal/socks/client.go b/vendor/golang.org/x/net/internal/socks/client.go new file mode 100644 index 000000000..3d6f516a5 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socks/client.go @@ -0,0 +1,168 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socks + +import ( + "context" + "errors" + "io" + "net" + "strconv" + "time" +) + +var ( + noDeadline = time.Time{} + aLongTimeAgo = time.Unix(1, 0) +) + +func (d *Dialer) connect(ctx context.Context, c net.Conn, address string) (_ net.Addr, ctxErr error) { + host, port, err := splitHostPort(address) + if err != nil { + return nil, err + } + if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() { + c.SetDeadline(deadline) + defer c.SetDeadline(noDeadline) + } + if ctx != context.Background() { + errCh := make(chan error, 1) + done := make(chan struct{}) + defer func() { + close(done) + if ctxErr == nil { + ctxErr = <-errCh + } + }() + go func() { + select { + case <-ctx.Done(): + c.SetDeadline(aLongTimeAgo) + errCh <- ctx.Err() + case <-done: + errCh <- nil + } + }() + } + + b := make([]byte, 0, 6+len(host)) // the size here is just an estimate + b = append(b, Version5) + if len(d.AuthMethods) == 0 || d.Authenticate == nil { + b = append(b, 1, byte(AuthMethodNotRequired)) + } else { + ams := d.AuthMethods + if len(ams) > 255 { + return nil, errors.New("too many authentication methods") + } + b = append(b, byte(len(ams))) + for _, am := range ams { + b = append(b, byte(am)) + } + } + if _, ctxErr = c.Write(b); ctxErr != nil { + return + } + + if _, ctxErr = io.ReadFull(c, b[:2]); ctxErr != nil { + return + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) + } + am := AuthMethod(b[1]) + if am == AuthMethodNoAcceptableMethods { + return nil, errors.New("no acceptable authentication methods") + } + if d.Authenticate != nil { + if ctxErr = d.Authenticate(ctx, c, am); ctxErr != nil { + return + } + } + + b = b[:0] + b = append(b, Version5, byte(d.cmd), 0) + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + b = append(b, AddrTypeIPv4) + b = append(b, ip4...) + } else if ip6 := ip.To16(); ip6 != nil { + b = append(b, AddrTypeIPv6) + b = append(b, ip6...) + } else { + return nil, errors.New("unknown address type") + } + } else { + if len(host) > 255 { + return nil, errors.New("FQDN too long") + } + b = append(b, AddrTypeFQDN) + b = append(b, byte(len(host))) + b = append(b, host...) + } + b = append(b, byte(port>>8), byte(port)) + if _, ctxErr = c.Write(b); ctxErr != nil { + return + } + + if _, ctxErr = io.ReadFull(c, b[:4]); ctxErr != nil { + return + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) + } + if cmdErr := Reply(b[1]); cmdErr != StatusSucceeded { + return nil, errors.New("unknown error " + cmdErr.String()) + } + if b[2] != 0 { + return nil, errors.New("non-zero reserved field") + } + l := 2 + var a Addr + switch b[3] { + case AddrTypeIPv4: + l += net.IPv4len + a.IP = make(net.IP, net.IPv4len) + case AddrTypeIPv6: + l += net.IPv6len + a.IP = make(net.IP, net.IPv6len) + case AddrTypeFQDN: + if _, err := io.ReadFull(c, b[:1]); err != nil { + return nil, err + } + l += int(b[0]) + default: + return nil, errors.New("unknown address type " + strconv.Itoa(int(b[3]))) + } + if cap(b) < l { + b = make([]byte, l) + } else { + b = b[:l] + } + if _, ctxErr = io.ReadFull(c, b); ctxErr != nil { + return + } + if a.IP != nil { + copy(a.IP, b) + } else { + a.Name = string(b[:len(b)-2]) + } + a.Port = int(b[len(b)-2])<<8 | int(b[len(b)-1]) + return &a, nil +} + +func splitHostPort(address string) (string, int, error) { + host, port, err := net.SplitHostPort(address) + if err != nil { + return "", 0, err + } + portnum, err := strconv.Atoi(port) + if err != nil { + return "", 0, err + } + if 1 > portnum || portnum > 0xffff { + return "", 0, errors.New("port number out of range " + port) + } + return host, portnum, nil +} diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go new file mode 100644 index 000000000..97db2340e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socks/socks.go @@ -0,0 +1,317 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package socks provides a SOCKS version 5 client implementation. +// +// SOCKS protocol version 5 is defined in RFC 1928. +// Username/Password authentication for SOCKS version 5 is defined in +// RFC 1929. +package socks + +import ( + "context" + "errors" + "io" + "net" + "strconv" +) + +// A Command represents a SOCKS command. +type Command int + +func (cmd Command) String() string { + switch cmd { + case CmdConnect: + return "socks connect" + case cmdBind: + return "socks bind" + default: + return "socks " + strconv.Itoa(int(cmd)) + } +} + +// An AuthMethod represents a SOCKS authentication method. +type AuthMethod int + +// A Reply represents a SOCKS command reply code. +type Reply int + +func (code Reply) String() string { + switch code { + case StatusSucceeded: + return "succeeded" + case 0x01: + return "general SOCKS server failure" + case 0x02: + return "connection not allowed by ruleset" + case 0x03: + return "network unreachable" + case 0x04: + return "host unreachable" + case 0x05: + return "connection refused" + case 0x06: + return "TTL expired" + case 0x07: + return "command not supported" + case 0x08: + return "address type not supported" + default: + return "unknown code: " + strconv.Itoa(int(code)) + } +} + +// Wire protocol constants. +const ( + Version5 = 0x05 + + AddrTypeIPv4 = 0x01 + AddrTypeFQDN = 0x03 + AddrTypeIPv6 = 0x04 + + CmdConnect Command = 0x01 // establishes an active-open forward proxy connection + cmdBind Command = 0x02 // establishes a passive-open forward proxy connection + + AuthMethodNotRequired AuthMethod = 0x00 // no authentication required + AuthMethodUsernamePassword AuthMethod = 0x02 // use username/password + AuthMethodNoAcceptableMethods AuthMethod = 0xff // no acceptable authentication methods + + StatusSucceeded Reply = 0x00 +) + +// An Addr represents a SOCKS-specific address. +// Either Name or IP is used exclusively. +type Addr struct { + Name string // fully-qualified domain name + IP net.IP + Port int +} + +func (a *Addr) Network() string { return "socks" } + +func (a *Addr) String() string { + if a == nil { + return "" + } + port := strconv.Itoa(a.Port) + if a.IP == nil { + return net.JoinHostPort(a.Name, port) + } + return net.JoinHostPort(a.IP.String(), port) +} + +// A Conn represents a forward proxy connection. +type Conn struct { + net.Conn + + boundAddr net.Addr +} + +// BoundAddr returns the address assigned by the proxy server for +// connecting to the command target address from the proxy server. +func (c *Conn) BoundAddr() net.Addr { + if c == nil { + return nil + } + return c.boundAddr +} + +// A Dialer holds SOCKS-specific options. +type Dialer struct { + cmd Command // either CmdConnect or cmdBind + proxyNetwork string // network between a proxy server and a client + proxyAddress string // proxy server address + + // ProxyDial specifies the optional dial function for + // establishing the transport connection. + ProxyDial func(context.Context, string, string) (net.Conn, error) + + // AuthMethods specifies the list of request authentication + // methods. + // If empty, SOCKS client requests only AuthMethodNotRequired. + AuthMethods []AuthMethod + + // Authenticate specifies the optional authentication + // function. It must be non-nil when AuthMethods is not empty. + // It must return an error when the authentication is failed. + Authenticate func(context.Context, io.ReadWriter, AuthMethod) error +} + +// DialContext connects to the provided address on the provided +// network. +// +// The returned error value may be a net.OpError. When the Op field of +// net.OpError contains "socks", the Source field contains a proxy +// server address and the Addr field contains a command target +// address. +// +// See func Dial of the net package of standard library for a +// description of the network and address parameters. +func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if ctx == nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} + } + var err error + var c net.Conn + if d.ProxyDial != nil { + c, err = d.ProxyDial(ctx, d.proxyNetwork, d.proxyAddress) + } else { + var dd net.Dialer + c, err = dd.DialContext(ctx, d.proxyNetwork, d.proxyAddress) + } + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + a, err := d.connect(ctx, c, address) + if err != nil { + c.Close() + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + return &Conn{Conn: c, boundAddr: a}, nil +} + +// DialWithConn initiates a connection from SOCKS server to the target +// network and address using the connection c that is already +// connected to the SOCKS server. +// +// It returns the connection's local address assigned by the SOCKS +// server. +func (d *Dialer) DialWithConn(ctx context.Context, c net.Conn, network, address string) (net.Addr, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if ctx == nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} + } + a, err := d.connect(ctx, c, address) + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + return a, nil +} + +// Dial connects to the provided address on the provided network. +// +// Unlike DialContext, it returns a raw transport connection instead +// of a forward proxy connection. +// +// Deprecated: Use DialContext or DialWithConn instead. +func (d *Dialer) Dial(network, address string) (net.Conn, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + var err error + var c net.Conn + if d.ProxyDial != nil { + c, err = d.ProxyDial(context.Background(), d.proxyNetwork, d.proxyAddress) + } else { + c, err = net.Dial(d.proxyNetwork, d.proxyAddress) + } + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if _, err := d.DialWithConn(context.Background(), c, network, address); err != nil { + c.Close() + return nil, err + } + return c, nil +} + +func (d *Dialer) validateTarget(network, address string) error { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return errors.New("network not implemented") + } + switch d.cmd { + case CmdConnect, cmdBind: + default: + return errors.New("command not implemented") + } + return nil +} + +func (d *Dialer) pathAddrs(address string) (proxy, dst net.Addr, err error) { + for i, s := range []string{d.proxyAddress, address} { + host, port, err := splitHostPort(s) + if err != nil { + return nil, nil, err + } + a := &Addr{Port: port} + a.IP = net.ParseIP(host) + if a.IP == nil { + a.Name = host + } + if i == 0 { + proxy = a + } else { + dst = a + } + } + return +} + +// NewDialer returns a new Dialer that dials through the provided +// proxy server's network and address. +func NewDialer(network, address string) *Dialer { + return &Dialer{proxyNetwork: network, proxyAddress: address, cmd: CmdConnect} +} + +const ( + authUsernamePasswordVersion = 0x01 + authStatusSucceeded = 0x00 +) + +// UsernamePassword are the credentials for the username/password +// authentication method. +type UsernamePassword struct { + Username string + Password string +} + +// Authenticate authenticates a pair of username and password with the +// proxy server. +func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, auth AuthMethod) error { + switch auth { + case AuthMethodNotRequired: + return nil + case AuthMethodUsernamePassword: + if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) == 0 || len(up.Password) > 255 { + return errors.New("invalid username/password") + } + b := []byte{authUsernamePasswordVersion} + b = append(b, byte(len(up.Username))) + b = append(b, up.Username...) + b = append(b, byte(len(up.Password))) + b = append(b, up.Password...) + // TODO(mikio): handle IO deadlines and cancelation if + // necessary + if _, err := rw.Write(b); err != nil { + return err + } + if _, err := io.ReadFull(rw, b[:2]); err != nil { + return err + } + if b[0] != authUsernamePasswordVersion { + return errors.New("invalid username/password version") + } + if b[1] != authStatusSucceeded { + return errors.New("username/password authentication failed") + } + return nil + } + return errors.New("unsupported authentication method " + strconv.Itoa(int(auth))) +} diff --git a/vendor/golang.org/x/net/proxy/dial.go b/vendor/golang.org/x/net/proxy/dial.go new file mode 100644 index 000000000..811c2e4e9 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/dial.go @@ -0,0 +1,54 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" +) + +// A ContextDialer dials using a context. +type ContextDialer interface { + DialContext(ctx context.Context, network, address string) (net.Conn, error) +} + +// Dial works like DialContext on net.Dialer but using a dialer returned by FromEnvironment. +// +// The passed ctx is only used for returning the Conn, not the lifetime of the Conn. +// +// Custom dialers (registered via RegisterDialerType) that do not implement ContextDialer +// can leak a goroutine for as long as it takes the underlying Dialer implementation to timeout. +// +// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. +func Dial(ctx context.Context, network, address string) (net.Conn, error) { + d := FromEnvironment() + if xd, ok := d.(ContextDialer); ok { + return xd.DialContext(ctx, network, address) + } + return dialContext(ctx, d, network, address) +} + +// WARNING: this can leak a goroutine for as long as the underlying Dialer implementation takes to timeout +// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. +func dialContext(ctx context.Context, d Dialer, network, address string) (net.Conn, error) { + var ( + conn net.Conn + done = make(chan struct{}, 1) + err error + ) + go func() { + conn, err = d.Dial(network, address) + close(done) + if conn != nil && ctx.Err() != nil { + conn.Close() + } + }() + select { + case <-ctx.Done(): + err = ctx.Err() + case <-done: + } + return conn, err +} diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go new file mode 100644 index 000000000..3d66bdef9 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/direct.go @@ -0,0 +1,31 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" +) + +type direct struct{} + +// Direct implements Dialer by making network connections directly using net.Dial or net.DialContext. +var Direct = direct{} + +var ( + _ Dialer = Direct + _ ContextDialer = Direct +) + +// Dial directly invokes net.Dial with the supplied parameters. +func (direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} + +// DialContext instantiates a net.Dialer and invokes its DialContext receiver with the supplied parameters. +func (direct) DialContext(ctx context.Context, network, addr string) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, network, addr) +} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go new file mode 100644 index 000000000..573fe79e8 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -0,0 +1,155 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" + "strings" +) + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type PerHost struct { + def, bypass Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func NewPerHost(defaultDialer, bypass Dialer) *PerHost { + return &PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +// DialContext connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + d := p.dialerForRequest(host) + if x, ok := d.(ContextDialer); ok { + return x.DialContext(ctx, network, addr) + } + return dialContext(ctx, d, network, addr) +} + +func (p *PerHost) dialerForRequest(host string) Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go new file mode 100644 index 000000000..9ff4b9a77 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/proxy.go @@ -0,0 +1,149 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proxy provides support for a variety of protocols to proxy network +// data. +package proxy // import "golang.org/x/net/proxy" + +import ( + "errors" + "net" + "net/url" + "os" + "sync" +) + +// A Dialer is a means to establish a connection. +// Custom dialers should also implement ContextDialer. +type Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy-related +// variables in the environment and makes underlying connections +// directly. +func FromEnvironment() Dialer { + return FromEnvironmentUsing(Direct) +} + +// FromEnvironmentUsing returns the dialer specify by the proxy-related +// variables in the environment and makes underlying connections +// using the provided forwarding Dialer (for instance, a *net.Dialer +// with desired configuration). +func FromEnvironmentUsing(forward Dialer) Dialer { + allProxy := allProxyEnv.Get() + if len(allProxy) == 0 { + return forward + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return forward + } + proxy, err := FromURL(proxyURL, forward) + if err != nil { + return forward + } + + noProxy := noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := NewPerHost(proxy, forward) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) { + if proxySchemes == nil { + proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error)) + } + proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func FromURL(u *url.URL, forward Dialer) (Dialer, error) { + var auth *Auth + if u.User != nil { + auth = new(Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5", "socks5h": + addr := u.Hostname() + port := u.Port() + if port == "" { + port = "1080" + } + return SOCKS5("tcp", net.JoinHostPort(addr, port), auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxySchemes != nil { + if f, ok := proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + allProxyEnv = &envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + noProxyEnv = &envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type envOnce struct { + names []string + once sync.Once + val string +} + +func (e *envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// reset is used by tests +func (e *envOnce) reset() { + e.once = sync.Once{} + e.val = "" +} diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go new file mode 100644 index 000000000..c91651f96 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/socks5.go @@ -0,0 +1,42 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" + + "golang.org/x/net/internal/socks" +) + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given +// address with an optional username and password. +// See RFC 1928 and RFC 1929. +func SOCKS5(network, address string, auth *Auth, forward Dialer) (Dialer, error) { + d := socks.NewDialer(network, address) + if forward != nil { + if f, ok := forward.(ContextDialer); ok { + d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { + return f.DialContext(ctx, network, address) + } + } else { + d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { + return dialContext(ctx, forward, network, address) + } + } + } + if auth != nil { + up := socks.UsernamePassword{ + Username: auth.User, + Password: auth.Password, + } + d.AuthMethods = []socks.AuthMethod{ + socks.AuthMethodNotRequired, + socks.AuthMethodUsernamePassword, + } + d.Authenticate = up.Authenticate + } + return d, nil +} diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index 9857fe53d..cbee7a4e2 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -8,22 +8,35 @@ package errgroup import ( "context" + "fmt" "sync" ) +type token struct{} + // A Group is a collection of goroutines working on subtasks that are part of // the same overall task. // -// A zero Group is valid and does not cancel on error. +// A zero Group is valid, has no limit on the number of active goroutines, +// and does not cancel on error. type Group struct { cancel func() wg sync.WaitGroup + sem chan token + errOnce sync.Once err error } +func (g *Group) done() { + if g.sem != nil { + <-g.sem + } + g.wg.Done() +} + // WithContext returns a new Group and an associated Context derived from ctx. // // The derived Context is canceled the first time a function passed to Go @@ -45,14 +58,48 @@ func (g *Group) Wait() error { } // Go calls the given function in a new goroutine. +// It blocks until the new goroutine can be added without the number of +// active goroutines in the group exceeding the configured limit. // -// The first call to return a non-nil error cancels the group; its error will be -// returned by Wait. +// The first call to return a non-nil error cancels the group's context, if the +// group was created by calling WithContext. The error will be returned by Wait. func (g *Group) Go(f func() error) { + if g.sem != nil { + g.sem <- token{} + } + g.wg.Add(1) + go func() { + defer g.done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel() + } + }) + } + }() +} + +// TryGo calls the given function in a new goroutine only if the number of +// active goroutines in the group is currently below the configured limit. +// +// The return value reports whether the goroutine was started. +func (g *Group) TryGo(f func() error) bool { + if g.sem != nil { + select { + case g.sem <- token{}: + // Note: this allows barging iff channels in general allow barging. + default: + return false + } + } + g.wg.Add(1) go func() { - defer g.wg.Done() + defer g.done() if err := f(); err != nil { g.errOnce.Do(func() { @@ -63,4 +110,23 @@ func (g *Group) Go(f func() error) { }) } }() + return true +} + +// SetLimit limits the number of active goroutines in this group to at most n. +// A negative value indicates no limit. +// +// Any subsequent call to the Go method will block until it can add an active +// goroutine without exceeding the configured limit. +// +// The limit must not be modified while any goroutines in the group are active. +func (g *Group) SetLimit(n int) { + if n < 0 { + g.sem = nil + return + } + if len(g.sem) != 0 { + panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem))) + } + g.sem = make(chan token, n) } diff --git a/vendor/golang.org/x/sys/execabs/execabs.go b/vendor/golang.org/x/sys/execabs/execabs.go new file mode 100644 index 000000000..3bf40fdfe --- /dev/null +++ b/vendor/golang.org/x/sys/execabs/execabs.go @@ -0,0 +1,102 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package execabs is a drop-in replacement for os/exec +// that requires PATH lookups to find absolute paths. +// That is, execabs.Command("cmd") runs the same PATH lookup +// as exec.Command("cmd"), but if the result is a path +// which is relative, the Run and Start methods will report +// an error instead of running the executable. +// +// See https://blog.golang.org/path-security for more information +// about when it may be necessary or appropriate to use this package. +package execabs + +import ( + "context" + "fmt" + "os/exec" + "path/filepath" + "reflect" + "unsafe" +) + +// ErrNotFound is the error resulting if a path search failed to find an executable file. +// It is an alias for exec.ErrNotFound. +var ErrNotFound = exec.ErrNotFound + +// Cmd represents an external command being prepared or run. +// It is an alias for exec.Cmd. +type Cmd = exec.Cmd + +// Error is returned by LookPath when it fails to classify a file as an executable. +// It is an alias for exec.Error. +type Error = exec.Error + +// An ExitError reports an unsuccessful exit by a command. +// It is an alias for exec.ExitError. +type ExitError = exec.ExitError + +func relError(file, path string) error { + return fmt.Errorf("%s resolves to executable in current directory (.%c%s)", file, filepath.Separator, path) +} + +// LookPath searches for an executable named file in the directories +// named by the PATH environment variable. If file contains a slash, +// it is tried directly and the PATH is not consulted. The result will be +// an absolute path. +// +// LookPath differs from exec.LookPath in its handling of PATH lookups, +// which are used for file names without slashes. If exec.LookPath's +// PATH lookup would have returned an executable from the current directory, +// LookPath instead returns an error. +func LookPath(file string) (string, error) { + path, err := exec.LookPath(file) + if err != nil && !isGo119ErrDot(err) { + return "", err + } + if filepath.Base(file) == file && !filepath.IsAbs(path) { + return "", relError(file, path) + } + return path, nil +} + +func fixCmd(name string, cmd *exec.Cmd) { + if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) && !isGo119ErrFieldSet(cmd) { + // exec.Command was called with a bare binary name and + // exec.LookPath returned a path which is not absolute. + // Set cmd.lookPathErr and clear cmd.Path so that it + // cannot be run. + lookPathErr := (*error)(unsafe.Pointer(reflect.ValueOf(cmd).Elem().FieldByName("lookPathErr").Addr().Pointer())) + if *lookPathErr == nil { + *lookPathErr = relError(name, cmd.Path) + } + cmd.Path = "" + } +} + +// CommandContext is like Command but includes a context. +// +// The provided context is used to kill the process (by calling os.Process.Kill) +// if the context becomes done before the command completes on its own. +func CommandContext(ctx context.Context, name string, arg ...string) *exec.Cmd { + cmd := exec.CommandContext(ctx, name, arg...) + fixCmd(name, cmd) + return cmd + +} + +// Command returns the Cmd struct to execute the named program with the given arguments. +// See exec.Command for most details. +// +// Command differs from exec.Command in its handling of PATH lookups, +// which are used when the program name contains no slashes. +// If exec.Command would have returned an exec.Cmd configured to run an +// executable from the current directory, Command instead +// returns an exec.Cmd that will return an error from Start or Run. +func Command(name string, arg ...string) *exec.Cmd { + cmd := exec.Command(name, arg...) + fixCmd(name, cmd) + return cmd +} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go118.go b/vendor/golang.org/x/sys/execabs/execabs_go118.go new file mode 100644 index 000000000..2000064a8 --- /dev/null +++ b/vendor/golang.org/x/sys/execabs/execabs_go118.go @@ -0,0 +1,18 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.19 +// +build !go1.19 + +package execabs + +import "os/exec" + +func isGo119ErrDot(err error) bool { + return false +} + +func isGo119ErrFieldSet(cmd *exec.Cmd) bool { + return false +} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go new file mode 100644 index 000000000..f364b3418 --- /dev/null +++ b/vendor/golang.org/x/sys/execabs/execabs_go119.go @@ -0,0 +1,21 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 +// +build go1.19 + +package execabs + +import ( + "errors" + "os/exec" +) + +func isGo119ErrDot(err error) bool { + return errors.Is(err, exec.ErrDot) +} + +func isGo119ErrFieldSet(cmd *exec.Cmd) bool { + return cmd.Err != nil +} diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s new file mode 100644 index 000000000..e5b9a8489 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s @@ -0,0 +1,31 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (darwin || freebsd || netbsd || openbsd) && gc +// +build darwin freebsd netbsd openbsd +// +build gc + +#include "textflag.h" + +// +// System call support for ppc64, BSD +// + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-104 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s new file mode 100644 index 000000000..d560019ea --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s @@ -0,0 +1,29 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (darwin || freebsd || netbsd || openbsd) && gc +// +build darwin freebsd netbsd openbsd +// +build gc + +#include "textflag.h" + +// System call support for RISCV64 BSD + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·Syscall9(SB),NOSPLIT,$0-104 + JMP syscall·Syscall9(SB) + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s new file mode 100644 index 000000000..565357288 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s @@ -0,0 +1,54 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && loong64 && gc +// +build linux +// +build loong64 +// +build gc + +#include "textflag.h" + + +// Just jump to package syscall's implementation for all these functions. +// The runtime may know about them. + +TEXT ·Syscall(SB),NOSPLIT,$0-56 + JMP syscall·Syscall(SB) + +TEXT ·Syscall6(SB),NOSPLIT,$0-80 + JMP syscall·Syscall6(SB) + +TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 + JAL runtime·entersyscall(SB) + MOVV a1+8(FP), R4 + MOVV a2+16(FP), R5 + MOVV a3+24(FP), R6 + MOVV R0, R7 + MOVV R0, R8 + MOVV R0, R9 + MOVV trap+0(FP), R11 // syscall entry + SYSCALL + MOVV R4, r1+32(FP) + MOVV R0, r2+40(FP) // r2 is not used. Always set to 0 + JAL runtime·exitsyscall(SB) + RET + +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 + JMP syscall·RawSyscall(SB) + +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 + JMP syscall·RawSyscall6(SB) + +TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 + MOVV a1+8(FP), R4 + MOVV a2+16(FP), R5 + MOVV a3+24(FP), R6 + MOVV R0, R7 + MOVV R0, R8 + MOVV R0, R9 + MOVV trap+0(FP), R11 // syscall entry + SYSCALL + MOVV R4, r1+32(FP) + MOVV R0, r2+40(FP) // r2 is not used. Always set to 0 + RET diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go index e74e5eaa3..2499f977b 100644 --- a/vendor/golang.org/x/sys/unix/dirent.go +++ b/vendor/golang.org/x/sys/unix/dirent.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go index 4362f47e2..b0f2bc4ae 100644 --- a/vendor/golang.org/x/sys/unix/endian_little.go +++ b/vendor/golang.org/x/sys/unix/endian_little.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // -//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh -// +build 386 amd64 amd64p32 alpha arm arm64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh +//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh +// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh package unix diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_386.go b/vendor/golang.org/x/sys/unix/errors_freebsd_386.go deleted file mode 100644 index 761db66ef..000000000 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_386.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep -// them here for backwards compatibility. - -package unix - -const ( - DLT_HHDLC = 0x79 - IFF_SMART = 0x20 - IFT_1822 = 0x2 - IFT_A12MPPSWITCH = 0x82 - IFT_AAL2 = 0xbb - IFT_AAL5 = 0x31 - IFT_ADSL = 0x5e - IFT_AFLANE8023 = 0x3b - IFT_AFLANE8025 = 0x3c - IFT_ARAP = 0x58 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ASYNC = 0x54 - IFT_ATM = 0x25 - IFT_ATMDXI = 0x69 - IFT_ATMFUNI = 0x6a - IFT_ATMIMA = 0x6b - IFT_ATMLOGICAL = 0x50 - IFT_ATMRADIO = 0xbd - IFT_ATMSUBINTERFACE = 0x86 - IFT_ATMVCIENDPT = 0xc2 - IFT_ATMVIRTUAL = 0x95 - IFT_BGPPOLICYACCOUNTING = 0xa2 - IFT_BSC = 0x53 - IFT_CCTEMUL = 0x3d - IFT_CEPT = 0x13 - IFT_CES = 0x85 - IFT_CHANNEL = 0x46 - IFT_CNR = 0x55 - IFT_COFFEE = 0x84 - IFT_COMPOSITELINK = 0x9b - IFT_DCN = 0x8d - IFT_DIGITALPOWERLINE = 0x8a - IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba - IFT_DLSW = 0x4a - IFT_DOCSCABLEDOWNSTREAM = 0x80 - IFT_DOCSCABLEMACLAYER = 0x7f - IFT_DOCSCABLEUPSTREAM = 0x81 - IFT_DS0 = 0x51 - IFT_DS0BUNDLE = 0x52 - IFT_DS1FDL = 0xaa - IFT_DS3 = 0x1e - IFT_DTM = 0x8c - IFT_DVBASILN = 0xac - IFT_DVBASIOUT = 0xad - IFT_DVBRCCDOWNSTREAM = 0x93 - IFT_DVBRCCMACLAYER = 0x92 - IFT_DVBRCCUPSTREAM = 0x94 - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_EPLRS = 0x57 - IFT_ESCON = 0x49 - IFT_ETHER = 0x6 - IFT_FAITH = 0xf2 - IFT_FAST = 0x7d - IFT_FASTETHER = 0x3e - IFT_FASTETHERFX = 0x45 - IFT_FDDI = 0xf - IFT_FIBRECHANNEL = 0x38 - IFT_FRAMERELAYINTERCONNECT = 0x3a - IFT_FRAMERELAYMPI = 0x5c - IFT_FRDLCIENDPT = 0xc1 - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_FRF16MFRBUNDLE = 0xa3 - IFT_FRFORWARD = 0x9e - IFT_G703AT2MB = 0x43 - IFT_G703AT64K = 0x42 - IFT_GIF = 0xf0 - IFT_GIGABITETHERNET = 0x75 - IFT_GR303IDT = 0xb2 - IFT_GR303RDT = 0xb1 - IFT_H323GATEKEEPER = 0xa4 - IFT_H323PROXY = 0xa5 - IFT_HDH1822 = 0x3 - IFT_HDLC = 0x76 - IFT_HDSL2 = 0xa8 - IFT_HIPERLAN2 = 0xb7 - IFT_HIPPI = 0x2f - IFT_HIPPIINTERFACE = 0x39 - IFT_HOSTPAD = 0x5a - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IBM370PARCHAN = 0x48 - IFT_IDSL = 0x9a - IFT_IEEE80211 = 0x47 - IFT_IEEE80212 = 0x37 - IFT_IEEE8023ADLAG = 0xa1 - IFT_IFGSN = 0x91 - IFT_IMT = 0xbe - IFT_INTERLEAVE = 0x7c - IFT_IP = 0x7e - IFT_IPFORWARD = 0x8e - IFT_IPOVERATM = 0x72 - IFT_IPOVERCDLC = 0x6d - IFT_IPOVERCLAW = 0x6e - IFT_IPSWITCH = 0x4e - IFT_IPXIP = 0xf9 - IFT_ISDN = 0x3f - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISDNS = 0x4b - IFT_ISDNU = 0x4c - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88025CRFPINT = 0x62 - IFT_ISO88025DTR = 0x56 - IFT_ISO88025FIBER = 0x73 - IFT_ISO88026 = 0xa - IFT_ISUP = 0xb3 - IFT_L3IPXVLAN = 0x89 - IFT_LAPB = 0x10 - IFT_LAPD = 0x4d - IFT_LAPF = 0x77 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MEDIAMAILOVERIP = 0x8b - IFT_MFSIGLINK = 0xa7 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_MPC = 0x71 - IFT_MPLS = 0xa6 - IFT_MPLSTUNNEL = 0x96 - IFT_MSDSL = 0x8f - IFT_MVL = 0xbf - IFT_MYRINET = 0x63 - IFT_NFAS = 0xaf - IFT_NSIP = 0x1b - IFT_OPTICALCHANNEL = 0xc3 - IFT_OPTICALTRANSPORT = 0xc4 - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PFLOG = 0xf6 - IFT_PFSYNC = 0xf7 - IFT_PLC = 0xae - IFT_POS = 0xab - IFT_PPPMULTILINKBUNDLE = 0x6c - IFT_PROPBWAP2MP = 0xb8 - IFT_PROPCNLS = 0x59 - IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 - IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 - IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 - IFT_PROPMUX = 0x36 - IFT_PROPWIRELESSP2P = 0x9d - IFT_PTPSERIAL = 0x16 - IFT_PVC = 0xf1 - IFT_QLLC = 0x44 - IFT_RADIOMAC = 0xbc - IFT_RADSL = 0x5f - IFT_REACHDSL = 0xc0 - IFT_RFC1483 = 0x9f - IFT_RS232 = 0x21 - IFT_RSRB = 0x4f - IFT_SDLC = 0x11 - IFT_SDSL = 0x60 - IFT_SHDSL = 0xa9 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETOVERHEADCHANNEL = 0xb9 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_SRP = 0x97 - IFT_SS7SIGLINK = 0x9c - IFT_STACKTOSTACK = 0x6f - IFT_STARLAN = 0xb - IFT_STF = 0xd7 - IFT_T1 = 0x12 - IFT_TDLC = 0x74 - IFT_TERMPAD = 0x5b - IFT_TR008 = 0xb0 - IFT_TRANSPHDLC = 0x7b - IFT_TUNNEL = 0x83 - IFT_ULTRA = 0x1d - IFT_USB = 0xa0 - IFT_V11 = 0x40 - IFT_V35 = 0x2d - IFT_V36 = 0x41 - IFT_V37 = 0x78 - IFT_VDSL = 0x61 - IFT_VIRTUALIPADDRESS = 0x70 - IFT_VOICEEM = 0x64 - IFT_VOICEENCAP = 0x67 - IFT_VOICEFXO = 0x65 - IFT_VOICEFXS = 0x66 - IFT_VOICEOVERATM = 0x98 - IFT_VOICEOVERFRAMERELAY = 0x99 - IFT_VOICEOVERIP = 0x68 - IFT_X213 = 0x5d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25HUNTGROUP = 0x7a - IFT_X25MLP = 0x79 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - IPPROTO_MAXID = 0x34 - IPV6_FAITH = 0x1d - IPV6_MIN_MEMBERSHIPS = 0x1f - IP_FAITH = 0x16 - IP_MAX_SOURCE_FILTER = 0x400 - IP_MIN_MEMBERSHIPS = 0x1f - MAP_NORESERVE = 0x40 - MAP_RENAME = 0x20 - NET_RT_MAXID = 0x6 - RTF_PRCLONING = 0x10000 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - RT_CACHING_CONTEXT = 0x1 - RT_NORTREF = 0x2 - SIOCADDRT = 0x8030720a - SIOCALIFADDR = 0x8118691b - SIOCDELRT = 0x8030720b - SIOCDLIFADDR = 0x8118691d - SIOCGLIFADDR = 0xc118691c - SIOCGLIFPHYADDR = 0xc118694b - SIOCSLIFPHYADDR = 0x8118694a -) diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go deleted file mode 100644 index 070f44b65..000000000 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep -// them here for backwards compatibility. - -package unix - -const ( - DLT_HHDLC = 0x79 - IFF_SMART = 0x20 - IFT_1822 = 0x2 - IFT_A12MPPSWITCH = 0x82 - IFT_AAL2 = 0xbb - IFT_AAL5 = 0x31 - IFT_ADSL = 0x5e - IFT_AFLANE8023 = 0x3b - IFT_AFLANE8025 = 0x3c - IFT_ARAP = 0x58 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ASYNC = 0x54 - IFT_ATM = 0x25 - IFT_ATMDXI = 0x69 - IFT_ATMFUNI = 0x6a - IFT_ATMIMA = 0x6b - IFT_ATMLOGICAL = 0x50 - IFT_ATMRADIO = 0xbd - IFT_ATMSUBINTERFACE = 0x86 - IFT_ATMVCIENDPT = 0xc2 - IFT_ATMVIRTUAL = 0x95 - IFT_BGPPOLICYACCOUNTING = 0xa2 - IFT_BSC = 0x53 - IFT_CCTEMUL = 0x3d - IFT_CEPT = 0x13 - IFT_CES = 0x85 - IFT_CHANNEL = 0x46 - IFT_CNR = 0x55 - IFT_COFFEE = 0x84 - IFT_COMPOSITELINK = 0x9b - IFT_DCN = 0x8d - IFT_DIGITALPOWERLINE = 0x8a - IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba - IFT_DLSW = 0x4a - IFT_DOCSCABLEDOWNSTREAM = 0x80 - IFT_DOCSCABLEMACLAYER = 0x7f - IFT_DOCSCABLEUPSTREAM = 0x81 - IFT_DS0 = 0x51 - IFT_DS0BUNDLE = 0x52 - IFT_DS1FDL = 0xaa - IFT_DS3 = 0x1e - IFT_DTM = 0x8c - IFT_DVBASILN = 0xac - IFT_DVBASIOUT = 0xad - IFT_DVBRCCDOWNSTREAM = 0x93 - IFT_DVBRCCMACLAYER = 0x92 - IFT_DVBRCCUPSTREAM = 0x94 - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_EPLRS = 0x57 - IFT_ESCON = 0x49 - IFT_ETHER = 0x6 - IFT_FAITH = 0xf2 - IFT_FAST = 0x7d - IFT_FASTETHER = 0x3e - IFT_FASTETHERFX = 0x45 - IFT_FDDI = 0xf - IFT_FIBRECHANNEL = 0x38 - IFT_FRAMERELAYINTERCONNECT = 0x3a - IFT_FRAMERELAYMPI = 0x5c - IFT_FRDLCIENDPT = 0xc1 - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_FRF16MFRBUNDLE = 0xa3 - IFT_FRFORWARD = 0x9e - IFT_G703AT2MB = 0x43 - IFT_G703AT64K = 0x42 - IFT_GIF = 0xf0 - IFT_GIGABITETHERNET = 0x75 - IFT_GR303IDT = 0xb2 - IFT_GR303RDT = 0xb1 - IFT_H323GATEKEEPER = 0xa4 - IFT_H323PROXY = 0xa5 - IFT_HDH1822 = 0x3 - IFT_HDLC = 0x76 - IFT_HDSL2 = 0xa8 - IFT_HIPERLAN2 = 0xb7 - IFT_HIPPI = 0x2f - IFT_HIPPIINTERFACE = 0x39 - IFT_HOSTPAD = 0x5a - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IBM370PARCHAN = 0x48 - IFT_IDSL = 0x9a - IFT_IEEE80211 = 0x47 - IFT_IEEE80212 = 0x37 - IFT_IEEE8023ADLAG = 0xa1 - IFT_IFGSN = 0x91 - IFT_IMT = 0xbe - IFT_INTERLEAVE = 0x7c - IFT_IP = 0x7e - IFT_IPFORWARD = 0x8e - IFT_IPOVERATM = 0x72 - IFT_IPOVERCDLC = 0x6d - IFT_IPOVERCLAW = 0x6e - IFT_IPSWITCH = 0x4e - IFT_IPXIP = 0xf9 - IFT_ISDN = 0x3f - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISDNS = 0x4b - IFT_ISDNU = 0x4c - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88025CRFPINT = 0x62 - IFT_ISO88025DTR = 0x56 - IFT_ISO88025FIBER = 0x73 - IFT_ISO88026 = 0xa - IFT_ISUP = 0xb3 - IFT_L3IPXVLAN = 0x89 - IFT_LAPB = 0x10 - IFT_LAPD = 0x4d - IFT_LAPF = 0x77 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MEDIAMAILOVERIP = 0x8b - IFT_MFSIGLINK = 0xa7 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_MPC = 0x71 - IFT_MPLS = 0xa6 - IFT_MPLSTUNNEL = 0x96 - IFT_MSDSL = 0x8f - IFT_MVL = 0xbf - IFT_MYRINET = 0x63 - IFT_NFAS = 0xaf - IFT_NSIP = 0x1b - IFT_OPTICALCHANNEL = 0xc3 - IFT_OPTICALTRANSPORT = 0xc4 - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PFLOG = 0xf6 - IFT_PFSYNC = 0xf7 - IFT_PLC = 0xae - IFT_POS = 0xab - IFT_PPPMULTILINKBUNDLE = 0x6c - IFT_PROPBWAP2MP = 0xb8 - IFT_PROPCNLS = 0x59 - IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 - IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 - IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 - IFT_PROPMUX = 0x36 - IFT_PROPWIRELESSP2P = 0x9d - IFT_PTPSERIAL = 0x16 - IFT_PVC = 0xf1 - IFT_QLLC = 0x44 - IFT_RADIOMAC = 0xbc - IFT_RADSL = 0x5f - IFT_REACHDSL = 0xc0 - IFT_RFC1483 = 0x9f - IFT_RS232 = 0x21 - IFT_RSRB = 0x4f - IFT_SDLC = 0x11 - IFT_SDSL = 0x60 - IFT_SHDSL = 0xa9 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETOVERHEADCHANNEL = 0xb9 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_SRP = 0x97 - IFT_SS7SIGLINK = 0x9c - IFT_STACKTOSTACK = 0x6f - IFT_STARLAN = 0xb - IFT_STF = 0xd7 - IFT_T1 = 0x12 - IFT_TDLC = 0x74 - IFT_TERMPAD = 0x5b - IFT_TR008 = 0xb0 - IFT_TRANSPHDLC = 0x7b - IFT_TUNNEL = 0x83 - IFT_ULTRA = 0x1d - IFT_USB = 0xa0 - IFT_V11 = 0x40 - IFT_V35 = 0x2d - IFT_V36 = 0x41 - IFT_V37 = 0x78 - IFT_VDSL = 0x61 - IFT_VIRTUALIPADDRESS = 0x70 - IFT_VOICEEM = 0x64 - IFT_VOICEENCAP = 0x67 - IFT_VOICEFXO = 0x65 - IFT_VOICEFXS = 0x66 - IFT_VOICEOVERATM = 0x98 - IFT_VOICEOVERFRAMERELAY = 0x99 - IFT_VOICEOVERIP = 0x68 - IFT_X213 = 0x5d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25HUNTGROUP = 0x7a - IFT_X25MLP = 0x79 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - IPPROTO_MAXID = 0x34 - IPV6_FAITH = 0x1d - IPV6_MIN_MEMBERSHIPS = 0x1f - IP_FAITH = 0x16 - IP_MAX_SOURCE_FILTER = 0x400 - IP_MIN_MEMBERSHIPS = 0x1f - MAP_NORESERVE = 0x40 - MAP_RENAME = 0x20 - NET_RT_MAXID = 0x6 - RTF_PRCLONING = 0x10000 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - RT_CACHING_CONTEXT = 0x1 - RT_NORTREF = 0x2 - SIOCADDRT = 0x8040720a - SIOCALIFADDR = 0x8118691b - SIOCDELRT = 0x8040720b - SIOCDLIFADDR = 0x8118691d - SIOCGLIFADDR = 0xc118691c - SIOCGLIFPHYADDR = 0xc118694b - SIOCSLIFPHYADDR = 0x8118694a -) diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go deleted file mode 100644 index 856dca325..000000000 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package unix - -const ( - IFT_1822 = 0x2 - IFT_A12MPPSWITCH = 0x82 - IFT_AAL2 = 0xbb - IFT_AAL5 = 0x31 - IFT_ADSL = 0x5e - IFT_AFLANE8023 = 0x3b - IFT_AFLANE8025 = 0x3c - IFT_ARAP = 0x58 - IFT_ARCNET = 0x23 - IFT_ARCNETPLUS = 0x24 - IFT_ASYNC = 0x54 - IFT_ATM = 0x25 - IFT_ATMDXI = 0x69 - IFT_ATMFUNI = 0x6a - IFT_ATMIMA = 0x6b - IFT_ATMLOGICAL = 0x50 - IFT_ATMRADIO = 0xbd - IFT_ATMSUBINTERFACE = 0x86 - IFT_ATMVCIENDPT = 0xc2 - IFT_ATMVIRTUAL = 0x95 - IFT_BGPPOLICYACCOUNTING = 0xa2 - IFT_BSC = 0x53 - IFT_CCTEMUL = 0x3d - IFT_CEPT = 0x13 - IFT_CES = 0x85 - IFT_CHANNEL = 0x46 - IFT_CNR = 0x55 - IFT_COFFEE = 0x84 - IFT_COMPOSITELINK = 0x9b - IFT_DCN = 0x8d - IFT_DIGITALPOWERLINE = 0x8a - IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba - IFT_DLSW = 0x4a - IFT_DOCSCABLEDOWNSTREAM = 0x80 - IFT_DOCSCABLEMACLAYER = 0x7f - IFT_DOCSCABLEUPSTREAM = 0x81 - IFT_DS0 = 0x51 - IFT_DS0BUNDLE = 0x52 - IFT_DS1FDL = 0xaa - IFT_DS3 = 0x1e - IFT_DTM = 0x8c - IFT_DVBASILN = 0xac - IFT_DVBASIOUT = 0xad - IFT_DVBRCCDOWNSTREAM = 0x93 - IFT_DVBRCCMACLAYER = 0x92 - IFT_DVBRCCUPSTREAM = 0x94 - IFT_ENC = 0xf4 - IFT_EON = 0x19 - IFT_EPLRS = 0x57 - IFT_ESCON = 0x49 - IFT_ETHER = 0x6 - IFT_FAST = 0x7d - IFT_FASTETHER = 0x3e - IFT_FASTETHERFX = 0x45 - IFT_FDDI = 0xf - IFT_FIBRECHANNEL = 0x38 - IFT_FRAMERELAYINTERCONNECT = 0x3a - IFT_FRAMERELAYMPI = 0x5c - IFT_FRDLCIENDPT = 0xc1 - IFT_FRELAY = 0x20 - IFT_FRELAYDCE = 0x2c - IFT_FRF16MFRBUNDLE = 0xa3 - IFT_FRFORWARD = 0x9e - IFT_G703AT2MB = 0x43 - IFT_G703AT64K = 0x42 - IFT_GIF = 0xf0 - IFT_GIGABITETHERNET = 0x75 - IFT_GR303IDT = 0xb2 - IFT_GR303RDT = 0xb1 - IFT_H323GATEKEEPER = 0xa4 - IFT_H323PROXY = 0xa5 - IFT_HDH1822 = 0x3 - IFT_HDLC = 0x76 - IFT_HDSL2 = 0xa8 - IFT_HIPERLAN2 = 0xb7 - IFT_HIPPI = 0x2f - IFT_HIPPIINTERFACE = 0x39 - IFT_HOSTPAD = 0x5a - IFT_HSSI = 0x2e - IFT_HY = 0xe - IFT_IBM370PARCHAN = 0x48 - IFT_IDSL = 0x9a - IFT_IEEE80211 = 0x47 - IFT_IEEE80212 = 0x37 - IFT_IEEE8023ADLAG = 0xa1 - IFT_IFGSN = 0x91 - IFT_IMT = 0xbe - IFT_INTERLEAVE = 0x7c - IFT_IP = 0x7e - IFT_IPFORWARD = 0x8e - IFT_IPOVERATM = 0x72 - IFT_IPOVERCDLC = 0x6d - IFT_IPOVERCLAW = 0x6e - IFT_IPSWITCH = 0x4e - IFT_ISDN = 0x3f - IFT_ISDNBASIC = 0x14 - IFT_ISDNPRIMARY = 0x15 - IFT_ISDNS = 0x4b - IFT_ISDNU = 0x4c - IFT_ISO88022LLC = 0x29 - IFT_ISO88023 = 0x7 - IFT_ISO88024 = 0x8 - IFT_ISO88025 = 0x9 - IFT_ISO88025CRFPINT = 0x62 - IFT_ISO88025DTR = 0x56 - IFT_ISO88025FIBER = 0x73 - IFT_ISO88026 = 0xa - IFT_ISUP = 0xb3 - IFT_L3IPXVLAN = 0x89 - IFT_LAPB = 0x10 - IFT_LAPD = 0x4d - IFT_LAPF = 0x77 - IFT_LOCALTALK = 0x2a - IFT_LOOP = 0x18 - IFT_MEDIAMAILOVERIP = 0x8b - IFT_MFSIGLINK = 0xa7 - IFT_MIOX25 = 0x26 - IFT_MODEM = 0x30 - IFT_MPC = 0x71 - IFT_MPLS = 0xa6 - IFT_MPLSTUNNEL = 0x96 - IFT_MSDSL = 0x8f - IFT_MVL = 0xbf - IFT_MYRINET = 0x63 - IFT_NFAS = 0xaf - IFT_NSIP = 0x1b - IFT_OPTICALCHANNEL = 0xc3 - IFT_OPTICALTRANSPORT = 0xc4 - IFT_OTHER = 0x1 - IFT_P10 = 0xc - IFT_P80 = 0xd - IFT_PARA = 0x22 - IFT_PFLOG = 0xf6 - IFT_PFSYNC = 0xf7 - IFT_PLC = 0xae - IFT_POS = 0xab - IFT_PPPMULTILINKBUNDLE = 0x6c - IFT_PROPBWAP2MP = 0xb8 - IFT_PROPCNLS = 0x59 - IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 - IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 - IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 - IFT_PROPMUX = 0x36 - IFT_PROPWIRELESSP2P = 0x9d - IFT_PTPSERIAL = 0x16 - IFT_PVC = 0xf1 - IFT_QLLC = 0x44 - IFT_RADIOMAC = 0xbc - IFT_RADSL = 0x5f - IFT_REACHDSL = 0xc0 - IFT_RFC1483 = 0x9f - IFT_RS232 = 0x21 - IFT_RSRB = 0x4f - IFT_SDLC = 0x11 - IFT_SDSL = 0x60 - IFT_SHDSL = 0xa9 - IFT_SIP = 0x1f - IFT_SLIP = 0x1c - IFT_SMDSDXI = 0x2b - IFT_SMDSICIP = 0x34 - IFT_SONET = 0x27 - IFT_SONETOVERHEADCHANNEL = 0xb9 - IFT_SONETPATH = 0x32 - IFT_SONETVT = 0x33 - IFT_SRP = 0x97 - IFT_SS7SIGLINK = 0x9c - IFT_STACKTOSTACK = 0x6f - IFT_STARLAN = 0xb - IFT_STF = 0xd7 - IFT_T1 = 0x12 - IFT_TDLC = 0x74 - IFT_TERMPAD = 0x5b - IFT_TR008 = 0xb0 - IFT_TRANSPHDLC = 0x7b - IFT_TUNNEL = 0x83 - IFT_ULTRA = 0x1d - IFT_USB = 0xa0 - IFT_V11 = 0x40 - IFT_V35 = 0x2d - IFT_V36 = 0x41 - IFT_V37 = 0x78 - IFT_VDSL = 0x61 - IFT_VIRTUALIPADDRESS = 0x70 - IFT_VOICEEM = 0x64 - IFT_VOICEENCAP = 0x67 - IFT_VOICEFXO = 0x65 - IFT_VOICEFXS = 0x66 - IFT_VOICEOVERATM = 0x98 - IFT_VOICEOVERFRAMERELAY = 0x99 - IFT_VOICEOVERIP = 0x68 - IFT_X213 = 0x5d - IFT_X25 = 0x5 - IFT_X25DDN = 0x4 - IFT_X25HUNTGROUP = 0x7a - IFT_X25MLP = 0x79 - IFT_X25PLE = 0x28 - IFT_XETHER = 0x1a - - // missing constants on FreeBSD-11.1-RELEASE, copied from old values in ztypes_freebsd_arm.go - IFF_SMART = 0x20 - IFT_FAITH = 0xf2 - IFT_IPXIP = 0xf9 - IPPROTO_MAXID = 0x34 - IPV6_FAITH = 0x1d - IP_FAITH = 0x16 - MAP_NORESERVE = 0x40 - MAP_RENAME = 0x20 - NET_RT_MAXID = 0x6 - RTF_PRCLONING = 0x10000 - RTM_OLDADD = 0x9 - RTM_OLDDEL = 0xa - SIOCADDRT = 0x8030720a - SIOCALIFADDR = 0x8118691b - SIOCDELRT = 0x8030720b - SIOCDLIFADDR = 0x8118691d - SIOCGLIFADDR = 0xc118691c - SIOCGLIFPHYADDR = 0xc118694b - SIOCSLIFPHYADDR = 0x8118694a -) diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go deleted file mode 100644 index 946dcf3fc..000000000 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep -// them here for backwards compatibility. - -package unix - -const ( - DLT_HHDLC = 0x79 - IPV6_MIN_MEMBERSHIPS = 0x1f - IP_MAX_SOURCE_FILTER = 0x400 - IP_MIN_MEMBERSHIPS = 0x1f - RT_CACHING_CONTEXT = 0x1 - RT_NORTREF = 0x2 -) diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go index 0dee23222..b06f52d74 100644 --- a/vendor/golang.org/x/sys/unix/gccgo.go +++ b/vendor/golang.org/x/sys/unix/gccgo.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build gccgo && !aix -// +build gccgo,!aix +//go:build gccgo && !aix && !hurd +// +build gccgo,!aix,!hurd package unix diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c index 2cb1fefac..f98a1c542 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_c.c +++ b/vendor/golang.org/x/sys/unix/gccgo_c.c @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build gccgo -// +build !aix +//go:build gccgo && !aix && !hurd +// +build gccgo,!aix,!hurd #include #include diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go index 934af313c..15721a510 100644 --- a/vendor/golang.org/x/sys/unix/ifreq_linux.go +++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go @@ -8,7 +8,6 @@ package unix import ( - "bytes" "unsafe" ) @@ -45,13 +44,7 @@ func NewIfreq(name string) (*Ifreq, error) { // Name returns the interface name associated with the Ifreq. func (ifr *Ifreq) Name() string { - // BytePtrToString requires a NULL terminator or the program may crash. If - // one is not present, just return the empty string. - if !bytes.Contains(ifr.raw.Ifrn[:], []byte{0x00}) { - return "" - } - - return BytePtrToString(&ifr.raw.Ifrn[0]) + return ByteSliceToString(ifr.raw.Ifrn[:]) } // According to netdevice(7), only AF_INET addresses are returned for numerous diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go index 884430b81..0d12c0851 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -4,9 +4,7 @@ package unix -import ( - "unsafe" -) +import "unsafe" // IoctlRetInt performs an ioctl operation specified by req on a device // associated with opened file descriptor fd, and returns a non-negative @@ -217,3 +215,19 @@ func IoctlKCMAttach(fd int, info KCMAttach) error { func IoctlKCMUnattach(fd int, info KCMUnattach) error { return ioctlPtr(fd, SIOCKCMUNATTACH, unsafe.Pointer(&info)) } + +// IoctlLoopGetStatus64 gets the status of the loop device associated with the +// file descriptor fd using the LOOP_GET_STATUS64 operation. +func IoctlLoopGetStatus64(fd int) (*LoopInfo64, error) { + var value LoopInfo64 + if err := ioctlPtr(fd, LOOP_GET_STATUS64, unsafe.Pointer(&value)); err != nil { + return nil, err + } + return &value, nil +} + +// IoctlLoopSetStatus64 sets the status of the loop device associated with the +// file descriptor fd using the LOOP_SET_STATUS64 operation. +func IoctlLoopSetStatus64(fd int, value *LoopInfo64) error { + return ioctlPtr(fd, LOOP_SET_STATUS64, unsafe.Pointer(value)) +} diff --git a/vendor/golang.org/x/sys/unix/ioctl_signed.go b/vendor/golang.org/x/sys/unix/ioctl_signed.go new file mode 100644 index 000000000..7def9580e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ioctl_signed.go @@ -0,0 +1,70 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || solaris +// +build aix solaris + +package unix + +import ( + "unsafe" +) + +// ioctl itself should not be exposed directly, but additional get/set +// functions for specific types are permissible. + +// IoctlSetInt performs an ioctl operation which sets an integer value +// on fd, using the specified request number. +func IoctlSetInt(fd int, req int, value int) error { + return ioctl(fd, req, uintptr(value)) +} + +// IoctlSetPointerInt performs an ioctl operation which sets an +// integer value on fd, using the specified request number. The ioctl +// argument is called with a pointer to the integer value, rather than +// passing the integer value directly. +func IoctlSetPointerInt(fd int, req int, value int) error { + v := int32(value) + return ioctlPtr(fd, req, unsafe.Pointer(&v)) +} + +// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. +// +// To change fd's window size, the req argument should be TIOCSWINSZ. +func IoctlSetWinsize(fd int, req int, value *Winsize) error { + // TODO: if we get the chance, remove the req parameter and + // hardcode TIOCSWINSZ. + return ioctlPtr(fd, req, unsafe.Pointer(value)) +} + +// IoctlSetTermios performs an ioctl on fd with a *Termios. +// +// The req value will usually be TCSETA or TIOCSETA. +func IoctlSetTermios(fd int, req int, value *Termios) error { + // TODO: if we get the chance, remove the req parameter. + return ioctlPtr(fd, req, unsafe.Pointer(value)) +} + +// IoctlGetInt performs an ioctl operation which gets an integer value +// from fd, using the specified request number. +// +// A few ioctl requests use the return value as an output parameter; +// for those, IoctlRetInt should be used instead of this function. +func IoctlGetInt(fd int, req int) (int, error) { + var value int + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return value, err +} + +func IoctlGetWinsize(fd int, req int) (*Winsize, error) { + var value Winsize + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return &value, err +} + +func IoctlGetTermios(fd int, req int) (*Termios, error) { + var value Termios + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) + return &value, err +} diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go similarity index 77% rename from vendor/golang.org/x/sys/unix/ioctl.go rename to vendor/golang.org/x/sys/unix/ioctl_unsigned.go index 6c7ad052e..649913d1e 100644 --- a/vendor/golang.org/x/sys/unix/ioctl.go +++ b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go @@ -2,13 +2,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd +// +build darwin dragonfly freebsd hurd linux netbsd openbsd package unix import ( - "runtime" "unsafe" ) @@ -27,7 +26,7 @@ func IoctlSetInt(fd int, req uint, value int) error { // passing the integer value directly. func IoctlSetPointerInt(fd int, req uint, value int) error { v := int32(value) - return ioctl(fd, req, uintptr(unsafe.Pointer(&v))) + return ioctlPtr(fd, req, unsafe.Pointer(&v)) } // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. @@ -36,9 +35,7 @@ func IoctlSetPointerInt(fd int, req uint, value int) error { func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // TODO: if we get the chance, remove the req parameter and // hardcode TIOCSWINSZ. - err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(fd, req, unsafe.Pointer(value)) } // IoctlSetTermios performs an ioctl on fd with a *Termios. @@ -46,9 +43,7 @@ func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // The req value will usually be TCSETA or TIOCSETA. func IoctlSetTermios(fd int, req uint, value *Termios) error { // TODO: if we get the chance, remove the req parameter. - err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(fd, req, unsafe.Pointer(value)) } // IoctlGetInt performs an ioctl operation which gets an integer value @@ -58,18 +53,18 @@ func IoctlSetTermios(fd int, req uint, value *Termios) error { // for those, IoctlRetInt should be used instead of this function. func IoctlGetInt(fd int, req uint) (int, error) { var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return value, err } func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err } func IoctlGetTermios(fd int, req uint) (*Termios, error) { var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err } diff --git a/vendor/golang.org/x/sys/unix/ioctl_zos.go b/vendor/golang.org/x/sys/unix/ioctl_zos.go index 5384e7d91..cdc21bf76 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_zos.go +++ b/vendor/golang.org/x/sys/unix/ioctl_zos.go @@ -17,25 +17,23 @@ import ( // IoctlSetInt performs an ioctl operation which sets an integer value // on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { +func IoctlSetInt(fd int, req int, value int) error { return ioctl(fd, req, uintptr(value)) } // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. // // To change fd's window size, the req argument should be TIOCSWINSZ. -func IoctlSetWinsize(fd int, req uint, value *Winsize) error { +func IoctlSetWinsize(fd int, req int, value *Winsize) error { // TODO: if we get the chance, remove the req parameter and // hardcode TIOCSWINSZ. - err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(fd, req, unsafe.Pointer(value)) } // IoctlSetTermios performs an ioctl on fd with a *Termios. // // The req value is expected to be TCSETS, TCSETSW, or TCSETSF -func IoctlSetTermios(fd int, req uint, value *Termios) error { +func IoctlSetTermios(fd int, req int, value *Termios) error { if (req != TCSETS) && (req != TCSETSW) && (req != TCSETSF) { return ENOSYS } @@ -49,22 +47,22 @@ func IoctlSetTermios(fd int, req uint, value *Termios) error { // // A few ioctl requests use the return value as an output parameter; // for those, IoctlRetInt should be used instead of this function. -func IoctlGetInt(fd int, req uint) (int, error) { +func IoctlGetInt(fd int, req int) (int, error) { var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return value, err } -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { +func IoctlGetWinsize(fd int, req int) (*Winsize, error) { var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err } // IoctlGetTermios performs an ioctl on fd with a *Termios. // // The req value is expected to be TCGETS -func IoctlGetTermios(fd int, req uint) (*Termios, error) { +func IoctlGetTermios(fd int, req int) (*Termios, error) { var value Termios if req != TCGETS { return &value, ENOSYS diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index ee7362348..8e3947c36 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -73,12 +73,12 @@ aix_ppc64) darwin_amd64) mkerrors="$mkerrors -m64" mktypes="GOARCH=$GOARCH go tool cgo -godefs" - mkasm="go run mkasm_darwin.go" + mkasm="go run mkasm.go" ;; darwin_arm64) mkerrors="$mkerrors -m64" mktypes="GOARCH=$GOARCH go tool cgo -godefs" - mkasm="go run mkasm_darwin.go" + mkasm="go run mkasm.go" ;; dragonfly_amd64) mkerrors="$mkerrors -m64" @@ -89,25 +89,30 @@ dragonfly_amd64) freebsd_386) mkerrors="$mkerrors -m32" mksyscall="go run mksyscall.go -l32" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; freebsd_amd64) mkerrors="$mkerrors -m64" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; freebsd_arm) mkerrors="$mkerrors" mksyscall="go run mksyscall.go -l32 -arm" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; freebsd_arm64) mkerrors="$mkerrors -m64" - mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" + mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'" + mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" + ;; +freebsd_riscv64) + mkerrors="$mkerrors -m64" + mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'" mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; netbsd_386) @@ -137,42 +142,60 @@ netbsd_arm64) mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; openbsd_386) + mkasm="go run mkasm.go" mkerrors="$mkerrors -m32" - mksyscall="go run mksyscall.go -l32 -openbsd" + mksyscall="go run mksyscall.go -l32 -openbsd -libc" mksysctl="go run mksysctl_openbsd.go" - mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; openbsd_amd64) + mkasm="go run mkasm.go" mkerrors="$mkerrors -m64" - mksyscall="go run mksyscall.go -openbsd" + mksyscall="go run mksyscall.go -openbsd -libc" mksysctl="go run mksysctl_openbsd.go" - mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; openbsd_arm) + mkasm="go run mkasm.go" mkerrors="$mkerrors" - mksyscall="go run mksyscall.go -l32 -openbsd -arm" + mksyscall="go run mksyscall.go -l32 -openbsd -arm -libc" mksysctl="go run mksysctl_openbsd.go" - mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; openbsd_arm64) + mkasm="go run mkasm.go" mkerrors="$mkerrors -m64" - mksyscall="go run mksyscall.go -openbsd" + mksyscall="go run mksyscall.go -openbsd -libc" mksysctl="go run mksysctl_openbsd.go" - mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; openbsd_mips64) + mkasm="go run mkasm.go" + mkerrors="$mkerrors -m64" + mksyscall="go run mksyscall.go -openbsd -libc" + mksysctl="go run mksysctl_openbsd.go" + # Let the type of C char be signed for making the bare syscall + # API consistent across platforms. + mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" + ;; +openbsd_ppc64) + mkasm="go run mkasm.go" + mkerrors="$mkerrors -m64" + mksyscall="go run mksyscall.go -openbsd -libc" + mksysctl="go run mksysctl_openbsd.go" + # Let the type of C char be signed for making the bare syscall + # API consistent across platforms. + mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" + ;; +openbsd_riscv64) + mkasm="go run mkasm.go" mkerrors="$mkerrors -m64" - mksyscall="go run mksyscall.go -openbsd" + mksyscall="go run mksyscall.go -openbsd -libc" mksysctl="go run mksysctl_openbsd.go" - mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" @@ -209,11 +232,6 @@ esac if [ "$GOOSARCH" == "aix_ppc64" ]; then # aix/ppc64 script generates files instead of writing to stdin. echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in && gofmt -w zsyscall_$GOOSARCH.go && gofmt -w zsyscall_"$GOOSARCH"_gccgo.go && gofmt -w zsyscall_"$GOOSARCH"_gc.go " ; - elif [ "$GOOS" == "darwin" ]; then - # 1.12 and later, syscalls via libSystem - echo "$mksyscall -tags $GOOS,$GOARCH,go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; - # 1.13 and later, syscalls via libSystem (including syscallPtr) - echo "$mksyscall -tags $GOOS,$GOARCH,go1.13 syscall_darwin.1_13.go |gofmt >zsyscall_$GOOSARCH.1_13.go"; elif [ "$GOOS" == "illumos" ]; then # illumos code generation requires a --illumos switch echo "$mksyscall -illumos -tags illumos,$GOARCH syscall_illumos.go |gofmt > zsyscall_illumos_$GOARCH.go"; @@ -227,5 +245,5 @@ esac if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go | go run mkpost.go > ztypes_$GOOSARCH.go"; fi - if [ -n "$mkasm" ]; then echo "$mkasm $GOARCH"; fi + if [ -n "$mkasm" ]; then echo "$mkasm $GOOS $GOARCH"; fi ) | $run diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 72f65a9af..be0423e68 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -66,6 +66,7 @@ includes_Darwin=' #include #include #include +#include #include #include #include @@ -128,6 +129,7 @@ includes_FreeBSD=' #include #include #include +#include #include #include #include @@ -202,6 +204,8 @@ struct ltchars { #include #include #include +#include +#include #include #include #include @@ -215,6 +219,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -294,6 +299,10 @@ struct ltchars { #define SOL_NETLINK 270 #endif +#ifndef SOL_SMC +#define SOL_SMC 286 +#endif + #ifdef SOL_BLUETOOTH // SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h // but it is already in bluetooth_linux.go @@ -510,10 +519,11 @@ ccflags="$@" $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || $2 ~ /^LO_(KEY|NAME)_SIZE$/ || $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || - $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT)_/ || + $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || $2 ~ /^RAW_PAYLOAD_/ || + $2 ~ /^[US]F_/ || $2 ~ /^TP_STATUS_/ || $2 ~ /^FALLOC_/ || $2 ~ /^ICMPV?6?_(FILTER|SEC)/ || @@ -528,7 +538,7 @@ ccflags="$@" $2 ~ /^(MS|MNT|MOUNT|UMOUNT)_/ || $2 ~ /^NS_GET_/ || $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || - $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT|TFD)_/ || + $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT|PIOD|TFD)_/ || $2 ~ /^KEXEC_/ || $2 ~ /^LINUX_REBOOT_CMD_/ || $2 ~ /^LINUX_REBOOT_MAGIC[12]$/ || @@ -552,6 +562,7 @@ ccflags="$@" $2 ~ /^CLONE_[A-Z_]+/ || $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+)$/ && $2 ~ /^(BPF|DLT)_/ || + $2 ~ /^AUDIT_/ || $2 ~ /^(CLOCK|TIMER)_/ || $2 ~ /^CAN_/ || $2 ~ /^CAP_/ || @@ -574,7 +585,6 @@ ccflags="$@" $2 ~ /^SEEK_/ || $2 ~ /^SPLICE_/ || $2 ~ /^SYNC_FILE_RANGE_/ || - $2 !~ /^AUDIT_RECORD_MAGIC/ && $2 !~ /IOC_MAGIC/ && $2 ~ /^[A-Z][A-Z0-9_]+_MAGIC2?$/ || $2 ~ /^(VM|VMADDR)_/ || @@ -613,6 +623,7 @@ ccflags="$@" $2 ~ /^OTP/ || $2 ~ /^MEM/ || $2 ~ /^WG/ || + $2 ~ /^FIB_RULE_/ || $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} $2 ~ /^__WCOREFLAG$/ {next} $2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)} @@ -634,7 +645,7 @@ errors=$( signals=$( echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | - egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | + grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | sort ) @@ -644,7 +655,7 @@ echo '#include ' | $CC -x c - -E -dM $ccflags | sort >_error.grep echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | - egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | + grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | sort >_signal.grep echo '// mkerrors.sh' "$@" diff --git a/vendor/golang.org/x/sys/unix/ptrace_darwin.go b/vendor/golang.org/x/sys/unix/ptrace_darwin.go index 463c3eff7..39dba6ca6 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_darwin.go +++ b/vendor/golang.org/x/sys/unix/ptrace_darwin.go @@ -7,6 +7,12 @@ package unix +import "unsafe" + func ptrace(request int, pid int, addr uintptr, data uintptr) error { return ptrace1(request, pid, addr, data) } + +func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) error { + return ptrace1Ptr(request, pid, addr, data) +} diff --git a/vendor/golang.org/x/sys/unix/ptrace_ios.go b/vendor/golang.org/x/sys/unix/ptrace_ios.go index ed0509a01..9ea66330a 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_ios.go +++ b/vendor/golang.org/x/sys/unix/ptrace_ios.go @@ -7,6 +7,12 @@ package unix +import "unsafe" + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { return ENOTSUP } + +func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { + return ENOTSUP +} diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go index 453a942c5..3865943f6 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go @@ -52,6 +52,20 @@ func ParseSocketControlMessage(b []byte) ([]SocketControlMessage, error) { return msgs, nil } +// ParseOneSocketControlMessage parses a single socket control message from b, returning the message header, +// message data (a slice of b), and the remainder of b after that single message. +// When there are no remaining messages, len(remainder) == 0. +func ParseOneSocketControlMessage(b []byte) (hdr Cmsghdr, data []byte, remainder []byte, err error) { + h, dbuf, err := socketControlMessageHeaderAndData(b) + if err != nil { + return Cmsghdr{}, nil, nil, err + } + if i := cmsgAlignOf(int(h.Len)); i < len(b) { + remainder = b[i:] + } + return *h, dbuf, remainder, nil +} + func socketControlMessageHeaderAndData(b []byte) (*Cmsghdr, []byte, error) { h := (*Cmsghdr)(unsafe.Pointer(&b[0])) if h.Len < SizeofCmsghdr || uint64(h.Len) > uint64(len(b)) { diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index 649fa8740..63e8c8383 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -29,8 +29,6 @@ import ( "bytes" "strings" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) // ByteSliceFromString returns a NUL-terminated slice of bytes @@ -82,13 +80,7 @@ func BytePtrToString(p *byte) string { ptr = unsafe.Pointer(uintptr(ptr) + 1) } - var s []byte - h := (*unsafeheader.Slice)(unsafe.Pointer(&s)) - h.Data = unsafe.Pointer(p) - h.Len = n - h.Cap = n - - return string(s) + return string(unsafe.Slice(p, n)) } // Single-word zero for use when we need a valid pointer to 0 bytes. diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index f2a114fc2..c406ae00f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -37,6 +37,7 @@ func Creat(path string, mode uint32) (fd int, err error) { } //sys utimes(path string, times *[2]Timeval) (err error) + func Utimes(path string, tv []Timeval) error { if len(tv) != 2 { return EINVAL @@ -45,6 +46,7 @@ func Utimes(path string, tv []Timeval) error { } //sys utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error) + func UtimesNano(path string, ts []Timespec) error { if len(ts) != 2 { return EINVAL @@ -215,14 +217,63 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { return } -func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { - // Recvmsg not implemented on AIX - return -1, -1, -1, ENOSYS +func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { + var msg Msghdr + msg.Name = (*byte)(unsafe.Pointer(rsa)) + msg.Namelen = uint32(SizeofSockaddrAny) + var dummy byte + if len(oob) > 0 { + // receive at least one normal byte + if emptyIovecs(iov) { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] + } + msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.SetControllen(len(oob)) + } + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } + if n, err = recvmsg(fd, &msg, flags); n == -1 { + return + } + oobn = int(msg.Controllen) + recvflags = int(msg.Flags) + return } -func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { - // SendmsgN not implemented on AIX - return -1, ENOSYS +func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { + var msg Msghdr + msg.Name = (*byte)(unsafe.Pointer(ptr)) + msg.Namelen = uint32(salen) + var dummy byte + var empty bool + if len(oob) > 0 { + // send at least one normal byte + empty = emptyIovecs(iov) + if empty { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] + } + msg.Control = (*byte)(unsafe.Pointer(&oob[0])) + msg.SetControllen(len(oob)) + } + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } + if n, err = sendmsg(fd, &msg, flags); err != nil { + return 0, err + } + if len(oob) > 0 && empty { + n = 0 + } + return n, nil } func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { @@ -241,9 +292,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { break } } - - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: @@ -300,11 +349,13 @@ func direntNamlen(buf []byte) (uint64, bool) { } //sys getdirent(fd int, buf []byte) (n int, err error) + func Getdents(fd int, buf []byte) (n int, err error) { return getdirent(fd, buf) } //sys wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, err error) + func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { var status _C_int var r Pid_t @@ -357,7 +408,8 @@ func (w WaitStatus) CoreDump() bool { return w&0x80 == 0x80 } func (w WaitStatus) TrapCause() int { return -1 } -//sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctl(fd int, req int, arg uintptr) (err error) +//sys ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) = ioctl // fcntl must never be called with cmd=F_DUP2FD because it doesn't work on AIX // There is no way to create a custom fcntl and to keep //sys fcntl easily, @@ -372,6 +424,7 @@ func (w WaitStatus) TrapCause() int { return -1 } //sys fcntl(fd int, cmd int, arg int) (val int, err error) //sys fsyncRange(fd int, how int, start int64, length int64) (err error) = fsync_range + func Fsync(fd int) error { return fsyncRange(fd, O_SYNC, 0, 0) } @@ -536,6 +589,7 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { //sys Getsystemcfg(label int) (n uint64) //sys umount(target string) (err error) + func Unmount(target string, flags int) (err error) { if flags != 0 { // AIX doesn't have any flags for umount. diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go index e92a0be16..f2871fa95 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go @@ -8,7 +8,6 @@ package unix //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) = getrlimit64 -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) = setrlimit64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = lseek64 //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go index 16eed1709..75718ec0f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go @@ -8,7 +8,6 @@ package unix //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) //sys Seek(fd int, offset int64, whence int) (off int64, err error) = lseek //sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) = mmap64 diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 9c87c5f07..7705c3270 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -245,8 +245,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { break } } - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: @@ -325,27 +324,26 @@ func GetsockoptString(fd, level, opt int) (string, error) { //sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) -func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { +func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { var msg Msghdr msg.Name = (*byte)(unsafe.Pointer(rsa)) msg.Namelen = uint32(SizeofSockaddrAny) - var iov Iovec - if len(p) > 0 { - iov.Base = (*byte)(unsafe.Pointer(&p[0])) - iov.SetLen(len(p)) - } var dummy byte if len(oob) > 0 { // receive at least one normal byte - if len(p) == 0 { - iov.Base = &dummy - iov.SetLen(1) + if emptyIovecs(iov) { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } msg.Control = (*byte)(unsafe.Pointer(&oob[0])) msg.SetControllen(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = recvmsg(fd, &msg, flags); err != nil { return } @@ -356,31 +354,32 @@ func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) -func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { +func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { var msg Msghdr msg.Name = (*byte)(unsafe.Pointer(ptr)) msg.Namelen = uint32(salen) - var iov Iovec - if len(p) > 0 { - iov.Base = (*byte)(unsafe.Pointer(&p[0])) - iov.SetLen(len(p)) - } var dummy byte + var empty bool if len(oob) > 0 { // send at least one normal byte - if len(p) == 0 { - iov.Base = &dummy - iov.SetLen(1) + empty = emptyIovecs(iov) + if empty { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } msg.Control = (*byte)(unsafe.Pointer(&oob[0])) msg.SetControllen(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = sendmsg(fd, &msg, flags); err != nil { return 0, err } - if len(oob) > 0 && len(p) == 0 { + if len(oob) > 0 && empty { n = 0 } return n, nil diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go deleted file mode 100644 index b0098607c..000000000 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build darwin && go1.12 && !go1.13 -// +build darwin,go1.12,!go1.13 - -package unix - -import ( - "unsafe" -) - -const _SYS_GETDIRENTRIES64 = 344 - -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - // To implement this using libSystem we'd need syscall_syscallPtr for - // fdopendir. However, syscallPtr was only added in Go 1.13, so we fall - // back to raw syscalls for this func on Go 1.12. - var p unsafe.Pointer - if len(buf) > 0 { - p = unsafe.Pointer(&buf[0]) - } else { - p = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(_SYS_GETDIRENTRIES64, uintptr(fd), uintptr(p), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - return n, errnoErr(e1) - } - return n, nil -} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go deleted file mode 100644 index 1596426b1..000000000 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build darwin && go1.13 -// +build darwin,go1.13 - -package unix - -import ( - "unsafe" - - "golang.org/x/sys/internal/unsafeheader" -) - -//sys closedir(dir uintptr) (err error) -//sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) - -func fdopendir(fd int) (dir uintptr, err error) { - r0, _, e1 := syscall_syscallPtr(libc_fdopendir_trampoline_addr, uintptr(fd), 0, 0) - dir = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_fdopendir_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib" - -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - // Simulate Getdirentries using fdopendir/readdir_r/closedir. - // We store the number of entries to skip in the seek - // offset of fd. See issue #31368. - // It's not the full required semantics, but should handle the case - // of calling Getdirentries or ReadDirent repeatedly. - // It won't handle assigning the results of lseek to *basep, or handle - // the directory being edited underfoot. - skip, err := Seek(fd, 0, 1 /* SEEK_CUR */) - if err != nil { - return 0, err - } - - // We need to duplicate the incoming file descriptor - // because the caller expects to retain control of it, but - // fdopendir expects to take control of its argument. - // Just Dup'ing the file descriptor is not enough, as the - // result shares underlying state. Use Openat to make a really - // new file descriptor referring to the same directory. - fd2, err := Openat(fd, ".", O_RDONLY, 0) - if err != nil { - return 0, err - } - d, err := fdopendir(fd2) - if err != nil { - Close(fd2) - return 0, err - } - defer closedir(d) - - var cnt int64 - for { - var entry Dirent - var entryp *Dirent - e := readdir_r(d, &entry, &entryp) - if e != 0 { - return n, errnoErr(e) - } - if entryp == nil { - break - } - if skip > 0 { - skip-- - cnt++ - continue - } - - reclen := int(entry.Reclen) - if reclen > len(buf) { - // Not enough room. Return for now. - // The counter will let us know where we should start up again. - // Note: this strategy for suspending in the middle and - // restarting is O(n^2) in the length of the directory. Oh well. - break - } - - // Copy entry into return buffer. - var s []byte - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&s)) - hdr.Data = unsafe.Pointer(&entry) - hdr.Cap = reclen - hdr.Len = reclen - copy(buf, s) - - buf = buf[reclen:] - n += reclen - cnt++ - } - // Set the seek offset of the input fd to record - // how many files we've already returned. - _, err = Seek(fd, cnt, 0 /* SEEK_SET */) - if err != nil { - return n, err - } - - return n, nil -} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 09a25c653..206921504 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -14,11 +14,100 @@ package unix import ( "fmt" - "runtime" "syscall" "unsafe" ) +//sys closedir(dir uintptr) (err error) +//sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) + +func fdopendir(fd int) (dir uintptr, err error) { + r0, _, e1 := syscall_syscallPtr(libc_fdopendir_trampoline_addr, uintptr(fd), 0, 0) + dir = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fdopendir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib" + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + // Simulate Getdirentries using fdopendir/readdir_r/closedir. + // We store the number of entries to skip in the seek + // offset of fd. See issue #31368. + // It's not the full required semantics, but should handle the case + // of calling Getdirentries or ReadDirent repeatedly. + // It won't handle assigning the results of lseek to *basep, or handle + // the directory being edited underfoot. + skip, err := Seek(fd, 0, 1 /* SEEK_CUR */) + if err != nil { + return 0, err + } + + // We need to duplicate the incoming file descriptor + // because the caller expects to retain control of it, but + // fdopendir expects to take control of its argument. + // Just Dup'ing the file descriptor is not enough, as the + // result shares underlying state. Use Openat to make a really + // new file descriptor referring to the same directory. + fd2, err := Openat(fd, ".", O_RDONLY, 0) + if err != nil { + return 0, err + } + d, err := fdopendir(fd2) + if err != nil { + Close(fd2) + return 0, err + } + defer closedir(d) + + var cnt int64 + for { + var entry Dirent + var entryp *Dirent + e := readdir_r(d, &entry, &entryp) + if e != 0 { + return n, errnoErr(e) + } + if entryp == nil { + break + } + if skip > 0 { + skip-- + cnt++ + continue + } + + reclen := int(entry.Reclen) + if reclen > len(buf) { + // Not enough room. Return for now. + // The counter will let us know where we should start up again. + // Note: this strategy for suspending in the middle and + // restarting is O(n^2) in the length of the directory. Oh well. + break + } + + // Copy entry into return buffer. + s := unsafe.Slice((*byte)(unsafe.Pointer(&entry)), reclen) + copy(buf, s) + + buf = buf[reclen:] + n += reclen + cnt++ + } + // Set the seek offset of the input fd to record + // how many files we've already returned. + _, err = Seek(fd, cnt, 0 /* SEEK_SET */) + if err != nil { + return n, err + } + + return n, nil +} + // SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { Len uint8 @@ -140,6 +229,7 @@ func direntNamlen(buf []byte) (uint64, bool) { func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) } func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) } +func PtraceDenyAttach() (err error) { return ptrace(PT_DENY_ATTACH, 0, 0, 0) } //sysnb pipe(p *[2]int32) (err error) @@ -285,11 +375,10 @@ func Flistxattr(fd int, dest []byte) (sz int, err error) { func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(signum), 1) } //sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL func IoctlCtlInfo(fd int, ctlInfo *CtlInfo) error { - err := ioctl(fd, CTLIOCGINFO, uintptr(unsafe.Pointer(ctlInfo))) - runtime.KeepAlive(ctlInfo) - return err + return ioctlPtr(fd, CTLIOCGINFO, unsafe.Pointer(ctlInfo)) } // IfreqMTU is struct ifreq used to get or set a network device's MTU. @@ -303,16 +392,14 @@ type IfreqMTU struct { func IoctlGetIfreqMTU(fd int, ifname string) (*IfreqMTU, error) { var ifreq IfreqMTU copy(ifreq.Name[:], ifname) - err := ioctl(fd, SIOCGIFMTU, uintptr(unsafe.Pointer(&ifreq))) + err := ioctlPtr(fd, SIOCGIFMTU, unsafe.Pointer(&ifreq)) return &ifreq, err } // IoctlSetIfreqMTU performs the SIOCSIFMTU ioctl operation on fd to set the MTU // of the network device specified by ifreq.Name. func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error { - err := ioctl(fd, SIOCSIFMTU, uintptr(unsafe.Pointer(ifreq))) - runtime.KeepAlive(ifreq) - return err + return ioctlPtr(fd, SIOCSIFMTU, unsafe.Pointer(ifreq)) } //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL @@ -393,6 +480,13 @@ func GetsockoptXucred(fd, level, opt int) (*Xucred, error) { return x, err } +func GetsockoptTCPConnectionInfo(fd, level, opt int) (*TCPConnectionInfo, error) { + var value TCPConnectionInfo + vallen := _Socklen(SizeofTCPConnectionInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + func SysctlKinfoProc(name string, args ...int) (*KinfoProc, error) { mib, err := sysctlmib(name, args...) if err != nil { @@ -504,6 +598,7 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) +//sys Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) //sys Pathconf(path string, name int) (val int, err error) @@ -518,6 +613,7 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) +//sys Setattrlist(path string, attrlist *Attrlist, attrBuf []byte, options int) (err error) //sys Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) @@ -527,7 +623,6 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys Setprivexec(flag int) (err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setreuid(ruid int, euid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) @@ -572,7 +667,6 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { // Nfssvc // Getfh // Quotactl -// Mount // Csops // Waitid // Add_profil @@ -582,7 +676,6 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { // Kqueue_from_portset_np // Kqueue_portset // Getattrlist -// Setattrlist // Getdirentriesattr // Searchfs // Delete diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index b37310ce9..9fa879806 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -47,5 +47,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace +//sys ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index d51ec9963..f17b8c526 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -47,5 +47,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT //sys Lstat(path string, stat *Stat_t) (err error) //sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace +//sys ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index c61e27498..d4ce988e7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -125,11 +125,13 @@ func Pipe2(p []int, flags int) (err error) { } //sys extpread(fd int, p []byte, flags int, offset int64) (n int, err error) + func pread(fd int, p []byte, offset int64) (n int, err error) { return extpread(fd, p, 0, offset) } //sys extpwrite(fd int, p []byte, flags int, offset int64) (n int, err error) + func pwrite(fd int, p []byte, offset int64) (n int, err error) { return extpwrite(fd, p, 0, offset) } @@ -170,6 +172,7 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { } //sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL @@ -253,6 +256,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Chmod(path string, mode uint32) (err error) //sys Chown(path string, uid int, gid int) (err error) //sys Chroot(path string) (err error) +//sys ClockGettime(clockid int32, time *Timespec) (err error) //sys Close(fd int) (err error) //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) @@ -322,7 +326,6 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sysnb Setreuid(ruid int, euid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 6f6c510f4..afb10106f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -17,25 +17,12 @@ import ( "unsafe" ) -const ( - SYS_FSTAT_FREEBSD12 = 551 // { int fstat(int fd, _Out_ struct stat *sb); } - SYS_FSTATAT_FREEBSD12 = 552 // { int fstatat(int fd, _In_z_ char *path, \ - SYS_GETDIRENTRIES_FREEBSD12 = 554 // { ssize_t getdirentries(int fd, \ - SYS_STATFS_FREEBSD12 = 555 // { int statfs(_In_z_ char *path, \ - SYS_FSTATFS_FREEBSD12 = 556 // { int fstatfs(int fd, \ - SYS_GETFSSTAT_FREEBSD12 = 557 // { int getfsstat( \ - SYS_MKNODAT_FREEBSD12 = 559 // { int mknodat(int fd, _In_z_ char *path, \ -) - // See https://www.freebsd.org/doc/en_US.ISO8859-1/books/porters-handbook/versions.html. var ( osreldateOnce sync.Once osreldate uint32 ) -// INO64_FIRST from /usr/src/lib/libc/sys/compat-ino64.h -const _ino64First = 1200031 - func supportsABI(ver uint32) bool { osreldateOnce.Do(func() { osreldate, _ = SysctlUint32("kern.osreldate") }) return osreldate >= ver @@ -159,42 +146,23 @@ func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) { func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { var ( - _p0 unsafe.Pointer - bufsize uintptr - oldBuf []statfs_freebsd11_t - needsConvert bool + _p0 unsafe.Pointer + bufsize uintptr ) - if len(buf) > 0 { - if supportsABI(_ino64First) { - _p0 = unsafe.Pointer(&buf[0]) - bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) - } else { - n := len(buf) - oldBuf = make([]statfs_freebsd11_t, n) - _p0 = unsafe.Pointer(&oldBuf[0]) - bufsize = unsafe.Sizeof(statfs_freebsd11_t{}) * uintptr(n) - needsConvert = true - } - } - var sysno uintptr = SYS_GETFSSTAT - if supportsABI(_ino64First) { - sysno = SYS_GETFSSTAT_FREEBSD12 + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) } - r0, _, e1 := Syscall(sysno, uintptr(_p0), bufsize, uintptr(flags)) + r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) n = int(r0) if e1 != 0 { err = e1 } - if e1 == 0 && needsConvert { - for i := range oldBuf { - buf[i].convertFrom(&oldBuf[i]) - } - } return } -//sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctl(fd int, req uint, arg uintptr) (err error) = SYS_IOCTL +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL @@ -245,87 +213,11 @@ func Uname(uname *Utsname) error { } func Stat(path string, st *Stat_t) (err error) { - var oldStat stat_freebsd11_t - if supportsABI(_ino64First) { - return fstatat_freebsd12(AT_FDCWD, path, st, 0) - } - err = stat(path, &oldStat) - if err != nil { - return err - } - - st.convertFrom(&oldStat) - return nil + return Fstatat(AT_FDCWD, path, st, 0) } func Lstat(path string, st *Stat_t) (err error) { - var oldStat stat_freebsd11_t - if supportsABI(_ino64First) { - return fstatat_freebsd12(AT_FDCWD, path, st, AT_SYMLINK_NOFOLLOW) - } - err = lstat(path, &oldStat) - if err != nil { - return err - } - - st.convertFrom(&oldStat) - return nil -} - -func Fstat(fd int, st *Stat_t) (err error) { - var oldStat stat_freebsd11_t - if supportsABI(_ino64First) { - return fstat_freebsd12(fd, st) - } - err = fstat(fd, &oldStat) - if err != nil { - return err - } - - st.convertFrom(&oldStat) - return nil -} - -func Fstatat(fd int, path string, st *Stat_t, flags int) (err error) { - var oldStat stat_freebsd11_t - if supportsABI(_ino64First) { - return fstatat_freebsd12(fd, path, st, flags) - } - err = fstatat(fd, path, &oldStat, flags) - if err != nil { - return err - } - - st.convertFrom(&oldStat) - return nil -} - -func Statfs(path string, st *Statfs_t) (err error) { - var oldStatfs statfs_freebsd11_t - if supportsABI(_ino64First) { - return statfs_freebsd12(path, st) - } - err = statfs(path, &oldStatfs) - if err != nil { - return err - } - - st.convertFrom(&oldStatfs) - return nil -} - -func Fstatfs(fd int, st *Statfs_t) (err error) { - var oldStatfs statfs_freebsd11_t - if supportsABI(_ino64First) { - return fstatfs_freebsd12(fd, st) - } - err = fstatfs(fd, &oldStatfs) - if err != nil { - return err - } - - st.convertFrom(&oldStatfs) - return nil + return Fstatat(AT_FDCWD, path, st, AT_SYMLINK_NOFOLLOW) } func Getdents(fd int, buf []byte) (n int, err error) { @@ -333,162 +225,25 @@ func Getdents(fd int, buf []byte) (n int, err error) { } func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - if supportsABI(_ino64First) { - if basep == nil || unsafe.Sizeof(*basep) == 8 { - return getdirentries_freebsd12(fd, buf, (*uint64)(unsafe.Pointer(basep))) - } - // The freebsd12 syscall needs a 64-bit base. On 32-bit machines - // we can't just use the basep passed in. See #32498. - var base uint64 = uint64(*basep) - n, err = getdirentries_freebsd12(fd, buf, &base) - *basep = uintptr(base) - if base>>32 != 0 { - // We can't stuff the base back into a uintptr, so any - // future calls would be suspect. Generate an error. - // EIO is allowed by getdirentries. - err = EIO - } - return - } - - // The old syscall entries are smaller than the new. Use 1/4 of the original - // buffer size rounded up to DIRBLKSIZ (see /usr/src/lib/libc/sys/getdirentries.c). - oldBufLen := roundup(len(buf)/4, _dirblksiz) - oldBuf := make([]byte, oldBufLen) - n, err = getdirentries(fd, oldBuf, basep) - if err == nil && n > 0 { - n = convertFromDirents11(buf, oldBuf[:n]) + if basep == nil || unsafe.Sizeof(*basep) == 8 { + return getdirentries(fd, buf, (*uint64)(unsafe.Pointer(basep))) + } + // The syscall needs a 64-bit base. On 32-bit machines + // we can't just use the basep passed in. See #32498. + var base uint64 = uint64(*basep) + n, err = getdirentries(fd, buf, &base) + *basep = uintptr(base) + if base>>32 != 0 { + // We can't stuff the base back into a uintptr, so any + // future calls would be suspect. Generate an error. + // EIO is allowed by getdirentries. + err = EIO } return } func Mknod(path string, mode uint32, dev uint64) (err error) { - var oldDev int - if supportsABI(_ino64First) { - return mknodat_freebsd12(AT_FDCWD, path, mode, dev) - } - oldDev = int(dev) - return mknod(path, mode, oldDev) -} - -func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { - var oldDev int - if supportsABI(_ino64First) { - return mknodat_freebsd12(fd, path, mode, dev) - } - oldDev = int(dev) - return mknodat(fd, path, mode, oldDev) -} - -// round x to the nearest multiple of y, larger or equal to x. -// -// from /usr/include/sys/param.h Macros for counting and rounding. -// #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) -func roundup(x, y int) int { - return ((x + y - 1) / y) * y -} - -func (s *Stat_t) convertFrom(old *stat_freebsd11_t) { - *s = Stat_t{ - Dev: uint64(old.Dev), - Ino: uint64(old.Ino), - Nlink: uint64(old.Nlink), - Mode: old.Mode, - Uid: old.Uid, - Gid: old.Gid, - Rdev: uint64(old.Rdev), - Atim: old.Atim, - Mtim: old.Mtim, - Ctim: old.Ctim, - Btim: old.Btim, - Size: old.Size, - Blocks: old.Blocks, - Blksize: old.Blksize, - Flags: old.Flags, - Gen: uint64(old.Gen), - } -} - -func (s *Statfs_t) convertFrom(old *statfs_freebsd11_t) { - *s = Statfs_t{ - Version: _statfsVersion, - Type: old.Type, - Flags: old.Flags, - Bsize: old.Bsize, - Iosize: old.Iosize, - Blocks: old.Blocks, - Bfree: old.Bfree, - Bavail: old.Bavail, - Files: old.Files, - Ffree: old.Ffree, - Syncwrites: old.Syncwrites, - Asyncwrites: old.Asyncwrites, - Syncreads: old.Syncreads, - Asyncreads: old.Asyncreads, - // Spare - Namemax: old.Namemax, - Owner: old.Owner, - Fsid: old.Fsid, - // Charspare - // Fstypename - // Mntfromname - // Mntonname - } - - sl := old.Fstypename[:] - n := clen(*(*[]byte)(unsafe.Pointer(&sl))) - copy(s.Fstypename[:], old.Fstypename[:n]) - - sl = old.Mntfromname[:] - n = clen(*(*[]byte)(unsafe.Pointer(&sl))) - copy(s.Mntfromname[:], old.Mntfromname[:n]) - - sl = old.Mntonname[:] - n = clen(*(*[]byte)(unsafe.Pointer(&sl))) - copy(s.Mntonname[:], old.Mntonname[:n]) -} - -func convertFromDirents11(buf []byte, old []byte) int { - const ( - fixedSize = int(unsafe.Offsetof(Dirent{}.Name)) - oldFixedSize = int(unsafe.Offsetof(dirent_freebsd11{}.Name)) - ) - - dstPos := 0 - srcPos := 0 - for dstPos+fixedSize < len(buf) && srcPos+oldFixedSize < len(old) { - var dstDirent Dirent - var srcDirent dirent_freebsd11 - - // If multiple direntries are written, sometimes when we reach the final one, - // we may have cap of old less than size of dirent_freebsd11. - copy((*[unsafe.Sizeof(srcDirent)]byte)(unsafe.Pointer(&srcDirent))[:], old[srcPos:]) - - reclen := roundup(fixedSize+int(srcDirent.Namlen)+1, 8) - if dstPos+reclen > len(buf) { - break - } - - dstDirent.Fileno = uint64(srcDirent.Fileno) - dstDirent.Off = 0 - dstDirent.Reclen = uint16(reclen) - dstDirent.Type = srcDirent.Type - dstDirent.Pad0 = 0 - dstDirent.Namlen = uint16(srcDirent.Namlen) - dstDirent.Pad1 = 0 - - copy(dstDirent.Name[:], srcDirent.Name[:srcDirent.Namlen]) - copy(buf[dstPos:], (*[unsafe.Sizeof(dstDirent)]byte)(unsafe.Pointer(&dstDirent))[:]) - padding := buf[dstPos+fixedSize+int(dstDirent.Namlen) : dstPos+reclen] - for i := range padding { - padding[i] = 0 - } - - dstPos += int(dstDirent.Reclen) - srcPos += int(srcDirent.Reclen) - } - - return dstPos + return Mknodat(AT_FDCWD, path, mode, dev) } func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { @@ -499,33 +254,51 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } //sys ptrace(request int, pid int, addr uintptr, data int) (err error) +//sys ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) = SYS_PTRACE func PtraceAttach(pid int) (err error) { - return ptrace(PTRACE_ATTACH, pid, 0, 0) + return ptrace(PT_ATTACH, pid, 0, 0) } func PtraceCont(pid int, signal int) (err error) { - return ptrace(PTRACE_CONT, pid, 1, signal) + return ptrace(PT_CONTINUE, pid, 1, signal) } func PtraceDetach(pid int) (err error) { - return ptrace(PTRACE_DETACH, pid, 1, 0) + return ptrace(PT_DETACH, pid, 1, 0) } func PtraceGetFpRegs(pid int, fpregsout *FpReg) (err error) { - return ptrace(PTRACE_GETFPREGS, pid, uintptr(unsafe.Pointer(fpregsout)), 0) + return ptracePtr(PT_GETFPREGS, pid, unsafe.Pointer(fpregsout), 0) } func PtraceGetRegs(pid int, regsout *Reg) (err error) { - return ptrace(PTRACE_GETREGS, pid, uintptr(unsafe.Pointer(regsout)), 0) + return ptracePtr(PT_GETREGS, pid, unsafe.Pointer(regsout), 0) +} + +func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{ + Op: int32(req), + Offs: offs, + } + if countin > 0 { + _ = out[:countin] // check bounds + ioDesc.Addr = &out[0] + } else if out != nil { + ioDesc.Addr = (*byte)(unsafe.Pointer(&_zero)) + } + ioDesc.SetLen(countin) + + err = ptracePtr(PT_IO, pid, unsafe.Pointer(&ioDesc), 0) + return int(ioDesc.Len), err } func PtraceLwpEvents(pid int, enable int) (err error) { - return ptrace(PTRACE_LWPEVENTS, pid, 0, enable) + return ptrace(PT_LWP_EVENTS, pid, 0, enable) } -func PtraceLwpInfo(pid int, info uintptr) (err error) { - return ptrace(PTRACE_LWPINFO, pid, info, int(unsafe.Sizeof(PtraceLwpInfoStruct{}))) +func PtraceLwpInfo(pid int, info *PtraceLwpInfoStruct) (err error) { + return ptracePtr(PT_LWPINFO, pid, unsafe.Pointer(info), int(unsafe.Sizeof(*info))) } func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) { @@ -545,11 +318,23 @@ func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) { } func PtraceSetRegs(pid int, regs *Reg) (err error) { - return ptrace(PTRACE_SETREGS, pid, uintptr(unsafe.Pointer(regs)), 0) + return ptracePtr(PT_SETREGS, pid, unsafe.Pointer(regs), 0) } func PtraceSingleStep(pid int) (err error) { - return ptrace(PTRACE_SINGLESTEP, pid, 1, 0) + return ptrace(PT_STEP, pid, 1, 0) +} + +func Dup3(oldfd, newfd, flags int) error { + if oldfd == newfd || flags&^O_CLOEXEC != 0 { + return EINVAL + } + how := F_DUP2FD + if flags&O_CLOEXEC != 0 { + how = F_DUP2FD_CLOEXEC + } + _, err := fcntl(oldfd, how, newfd) + return err } /* @@ -565,6 +350,7 @@ func PtraceSingleStep(pid int) (err error) { //sys Chmod(path string, mode uint32) (err error) //sys Chown(path string, uid int, gid int) (err error) //sys Chroot(path string) (err error) +//sys ClockGettime(clockid int32, time *Timespec) (err error) //sys Close(fd int) (err error) //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) @@ -591,16 +377,12 @@ func PtraceSingleStep(pid int) (err error) { //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) -//sys fstat(fd int, stat *stat_freebsd11_t) (err error) -//sys fstat_freebsd12(fd int, stat *Stat_t) (err error) -//sys fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) -//sys fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) -//sys fstatfs(fd int, stat *statfs_freebsd11_t) (err error) -//sys fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) +//sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) +//sys Fstatfs(fd int, stat *Statfs_t) (err error) //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) -//sys getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) -//sys getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) +//sys getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) //sys Getdtablesize() (size int) //sysnb Getegid() (egid int) //sysnb Geteuid() (uid int) @@ -622,13 +404,10 @@ func PtraceSingleStep(pid int) (err error) { //sys Link(path string, link string) (err error) //sys Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) //sys Listen(s int, backlog int) (err error) -//sys lstat(path string, stat *stat_freebsd11_t) (err error) //sys Mkdir(path string, mode uint32) (err error) //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mkfifo(path string, mode uint32) (err error) -//sys mknod(path string, mode uint32, dev int) (err error) -//sys mknodat(fd int, path string, mode uint32, dev int) (err error) -//sys mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) +//sys Mknodat(fd int, path string, mode uint32, dev uint64) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(fdat int, path string, mode int, perm uint32) (fd int, err error) @@ -654,13 +433,10 @@ func PtraceSingleStep(pid int) (err error) { //sysnb Setreuid(ruid int, euid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) -//sys stat(path string, stat *stat_freebsd11_t) (err error) -//sys statfs(path string, stat *statfs_freebsd11_t) (err error) -//sys statfs_freebsd12(path string, stat *Statfs_t) (err error) +//sys Statfs(path string, stat *Statfs_t) (err error) //sys Symlink(path string, link string) (err error) //sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error) //sys Sync() (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index 342fc32b1..b8da51004 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint32(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0) @@ -57,11 +61,5 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceGetFsBase(pid int, fsbase *int64) (err error) { - return ptrace(PTRACE_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) -} - -func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint32(countin)} - err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err + return ptracePtr(PT_GETFSBASE, pid, unsafe.Pointer(fsbase), 0) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index a32d5aa4a..47155c483 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint64(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) @@ -57,11 +61,5 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceGetFsBase(pid int, fsbase *int64) (err error) { - return ptrace(PTRACE_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) -} - -func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)} - err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err + return ptracePtr(PT_GETFSBASE, pid, unsafe.Pointer(fsbase), 0) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 1e36d39ab..08932093f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint32(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0) @@ -55,9 +59,3 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) - -func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint32(countin)} - err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err -} diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index a09a1537b..d151a0d0e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint64(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) @@ -55,9 +59,3 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) - -func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)} - err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err -} diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go new file mode 100644 index 000000000..d5cd64b37 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go @@ -0,0 +1,61 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build riscv64 && freebsd +// +build riscv64,freebsd + +package unix + +import ( + "syscall" + "unsafe" +) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint64(length) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + var writtenOut uint64 = 0 + _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) + + written = int(writtenOut) + + if e1 != 0 { + err = e1 + } + return +} + +func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go new file mode 100644 index 000000000..381fd4673 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -0,0 +1,30 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build hurd +// +build hurd + +package unix + +/* +#include +int ioctl(int, unsigned long int, uintptr_t); +*/ +import "C" + +func ioctl(fd int, req uint, arg uintptr) (err error) { + r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg)) + if r0 == -1 && er != nil { + err = er + } + return +} + +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(uintptr(arg))) + if r0 == -1 && er != nil { + err = er + } + return +} diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd_386.go b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go new file mode 100644 index 000000000..7cf54a3e4 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go @@ -0,0 +1,29 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 && hurd +// +build 386,hurd + +package unix + +const ( + TIOCGETA = 0x62251713 +) + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} diff --git a/vendor/golang.org/x/sys/unix/syscall_illumos.go b/vendor/golang.org/x/sys/unix/syscall_illumos.go index 8d5f294c4..87db5a6a8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_illumos.go +++ b/vendor/golang.org/x/sys/unix/syscall_illumos.go @@ -10,8 +10,6 @@ package unix import ( - "fmt" - "runtime" "unsafe" ) @@ -20,10 +18,9 @@ func bytes2iovec(bs [][]byte) []Iovec { for i, b := range bs { iovecs[i].SetLen(len(b)) if len(b) > 0 { - // somehow Iovec.Base on illumos is (*int8), not (*byte) - iovecs[i].Base = (*int8)(unsafe.Pointer(&b[0])) + iovecs[i].Base = &b[0] } else { - iovecs[i].Base = (*int8)(unsafe.Pointer(&_zero)) + iovecs[i].Base = (*byte)(unsafe.Pointer(&_zero)) } } return iovecs @@ -80,107 +77,3 @@ func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) { } return } - -//sys putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) - -func Putmsg(fd int, cl []byte, data []byte, flags int) (err error) { - var clp, datap *strbuf - if len(cl) > 0 { - clp = &strbuf{ - Len: int32(len(cl)), - Buf: (*int8)(unsafe.Pointer(&cl[0])), - } - } - if len(data) > 0 { - datap = &strbuf{ - Len: int32(len(data)), - Buf: (*int8)(unsafe.Pointer(&data[0])), - } - } - return putmsg(fd, clp, datap, flags) -} - -//sys getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error) - -func Getmsg(fd int, cl []byte, data []byte) (retCl []byte, retData []byte, flags int, err error) { - var clp, datap *strbuf - if len(cl) > 0 { - clp = &strbuf{ - Maxlen: int32(len(cl)), - Buf: (*int8)(unsafe.Pointer(&cl[0])), - } - } - if len(data) > 0 { - datap = &strbuf{ - Maxlen: int32(len(data)), - Buf: (*int8)(unsafe.Pointer(&data[0])), - } - } - - if err = getmsg(fd, clp, datap, &flags); err != nil { - return nil, nil, 0, err - } - - if len(cl) > 0 { - retCl = cl[:clp.Len] - } - if len(data) > 0 { - retData = data[:datap.Len] - } - return retCl, retData, flags, nil -} - -func IoctlSetIntRetInt(fd int, req uint, arg int) (int, error) { - return ioctlRet(fd, req, uintptr(arg)) -} - -func IoctlSetString(fd int, req uint, val string) error { - bs := make([]byte, len(val)+1) - copy(bs[:len(bs)-1], val) - err := ioctl(fd, req, uintptr(unsafe.Pointer(&bs[0]))) - runtime.KeepAlive(&bs[0]) - return err -} - -// Lifreq Helpers - -func (l *Lifreq) SetName(name string) error { - if len(name) >= len(l.Name) { - return fmt.Errorf("name cannot be more than %d characters", len(l.Name)-1) - } - for i := range name { - l.Name[i] = int8(name[i]) - } - return nil -} - -func (l *Lifreq) SetLifruInt(d int) { - *(*int)(unsafe.Pointer(&l.Lifru[0])) = d -} - -func (l *Lifreq) GetLifruInt() int { - return *(*int)(unsafe.Pointer(&l.Lifru[0])) -} - -func (l *Lifreq) SetLifruUint(d uint) { - *(*uint)(unsafe.Pointer(&l.Lifru[0])) = d -} - -func (l *Lifreq) GetLifruUint() uint { - return *(*uint)(unsafe.Pointer(&l.Lifru[0])) -} - -func IoctlLifreq(fd int, req uint, l *Lifreq) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(l))) -} - -// Strioctl Helpers - -func (s *Strioctl) SetInt(i int) { - s.Len = int32(unsafe.Sizeof(i)) - s.Dp = (*int8)(unsafe.Pointer(&i)) -} - -func IoctlSetStrioctlRetInt(fd int, req uint, s *Strioctl) (int, error) { - return ioctlRet(fd, req, uintptr(unsafe.Pointer(s))) -} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 298323eb1..fbaeb5fff 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -13,6 +13,7 @@ package unix import ( "encoding/binary" + "strconv" "syscall" "time" "unsafe" @@ -233,7 +234,7 @@ func Futimesat(dirfd int, path string, tv []Timeval) error { func Futimes(fd int, tv []Timeval) (err error) { // Believe it or not, this is the best we can do on Linux // (and is what glibc does). - return Utimes("/proc/self/fd/"+itoa(fd), tv) + return Utimes("/proc/self/fd/"+strconv.Itoa(fd), tv) } const ImplementsGetwd = true @@ -512,24 +513,24 @@ func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) { // // Server example: // -// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) -// _ = unix.Bind(fd, &unix.SockaddrRFCOMM{ -// Channel: 1, -// Addr: [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00 -// }) -// _ = Listen(fd, 1) -// nfd, sa, _ := Accept(fd) -// fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd) -// Read(nfd, buf) +// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) +// _ = unix.Bind(fd, &unix.SockaddrRFCOMM{ +// Channel: 1, +// Addr: [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00 +// }) +// _ = Listen(fd, 1) +// nfd, sa, _ := Accept(fd) +// fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd) +// Read(nfd, buf) // // Client example: // -// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) -// _ = Connect(fd, &SockaddrRFCOMM{ -// Channel: 1, -// Addr: [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11 -// }) -// Write(fd, []byte(`hello`)) +// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM) +// _ = Connect(fd, &SockaddrRFCOMM{ +// Channel: 1, +// Addr: [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11 +// }) +// Write(fd, []byte(`hello`)) type SockaddrRFCOMM struct { // Addr represents a bluetooth address, byte ordering is little-endian. Addr [6]uint8 @@ -556,12 +557,12 @@ func (sa *SockaddrRFCOMM) sockaddr() (unsafe.Pointer, _Socklen, error) { // The SockaddrCAN struct must be bound to the socket file descriptor // using Bind before the CAN socket can be used. // -// // Read one raw CAN frame -// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW) -// addr := &SockaddrCAN{Ifindex: index} -// Bind(fd, addr) -// frame := make([]byte, 16) -// Read(fd, frame) +// // Read one raw CAN frame +// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW) +// addr := &SockaddrCAN{Ifindex: index} +// Bind(fd, addr) +// frame := make([]byte, 16) +// Read(fd, frame) // // The full SocketCAN documentation can be found in the linux kernel // archives at: https://www.kernel.org/doc/Documentation/networking/can.txt @@ -632,13 +633,13 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) { // Here is an example of using an AF_ALG socket with SHA1 hashing. // The initial socket setup process is as follows: // -// // Open a socket to perform SHA1 hashing. -// fd, _ := unix.Socket(unix.AF_ALG, unix.SOCK_SEQPACKET, 0) -// addr := &unix.SockaddrALG{Type: "hash", Name: "sha1"} -// unix.Bind(fd, addr) -// // Note: unix.Accept does not work at this time; must invoke accept() -// // manually using unix.Syscall. -// hashfd, _, _ := unix.Syscall(unix.SYS_ACCEPT, uintptr(fd), 0, 0) +// // Open a socket to perform SHA1 hashing. +// fd, _ := unix.Socket(unix.AF_ALG, unix.SOCK_SEQPACKET, 0) +// addr := &unix.SockaddrALG{Type: "hash", Name: "sha1"} +// unix.Bind(fd, addr) +// // Note: unix.Accept does not work at this time; must invoke accept() +// // manually using unix.Syscall. +// hashfd, _, _ := unix.Syscall(unix.SYS_ACCEPT, uintptr(fd), 0, 0) // // Once a file descriptor has been returned from Accept, it may be used to // perform SHA1 hashing. The descriptor is not safe for concurrent use, but @@ -647,39 +648,39 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) { // When hashing a small byte slice or string, a single Write and Read may // be used: // -// // Assume hashfd is already configured using the setup process. -// hash := os.NewFile(hashfd, "sha1") -// // Hash an input string and read the results. Each Write discards -// // previous hash state. Read always reads the current state. -// b := make([]byte, 20) -// for i := 0; i < 2; i++ { -// io.WriteString(hash, "Hello, world.") -// hash.Read(b) -// fmt.Println(hex.EncodeToString(b)) -// } -// // Output: -// // 2ae01472317d1935a84797ec1983ae243fc6aa28 -// // 2ae01472317d1935a84797ec1983ae243fc6aa28 +// // Assume hashfd is already configured using the setup process. +// hash := os.NewFile(hashfd, "sha1") +// // Hash an input string and read the results. Each Write discards +// // previous hash state. Read always reads the current state. +// b := make([]byte, 20) +// for i := 0; i < 2; i++ { +// io.WriteString(hash, "Hello, world.") +// hash.Read(b) +// fmt.Println(hex.EncodeToString(b)) +// } +// // Output: +// // 2ae01472317d1935a84797ec1983ae243fc6aa28 +// // 2ae01472317d1935a84797ec1983ae243fc6aa28 // // For hashing larger byte slices, or byte streams such as those read from // a file or socket, use Sendto with MSG_MORE to instruct the kernel to update // the hash digest instead of creating a new one for a given chunk and finalizing it. // -// // Assume hashfd and addr are already configured using the setup process. -// hash := os.NewFile(hashfd, "sha1") -// // Hash the contents of a file. -// f, _ := os.Open("/tmp/linux-4.10-rc7.tar.xz") -// b := make([]byte, 4096) -// for { -// n, err := f.Read(b) -// if err == io.EOF { -// break -// } -// unix.Sendto(hashfd, b[:n], unix.MSG_MORE, addr) -// } -// hash.Read(b) -// fmt.Println(hex.EncodeToString(b)) -// // Output: 85cdcad0c06eef66f805ecce353bec9accbeecc5 +// // Assume hashfd and addr are already configured using the setup process. +// hash := os.NewFile(hashfd, "sha1") +// // Hash the contents of a file. +// f, _ := os.Open("/tmp/linux-4.10-rc7.tar.xz") +// b := make([]byte, 4096) +// for { +// n, err := f.Read(b) +// if err == io.EOF { +// break +// } +// unix.Sendto(hashfd, b[:n], unix.MSG_MORE, addr) +// } +// hash.Read(b) +// fmt.Println(hex.EncodeToString(b)) +// // Output: 85cdcad0c06eef66f805ecce353bec9accbeecc5 // // For more information, see: http://www.chronox.de/crypto-API/crypto/userspace-if.html. type SockaddrALG struct { @@ -1014,8 +1015,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { for n < len(pp.Path) && pp.Path[n] != 0 { n++ } - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: @@ -1364,6 +1364,10 @@ func SetsockoptTCPRepairOpt(fd, level, opt int, o []TCPRepairOpt) (err error) { return setsockopt(fd, level, opt, unsafe.Pointer(&o[0]), uintptr(SizeofTCPRepairOpt*len(o))) } +func SetsockoptTCPMD5Sig(fd, level, opt int, s *TCPMD5Sig) error { + return setsockopt(fd, level, opt, unsafe.Pointer(s), unsafe.Sizeof(*s)) +} + // Keyctl Commands (http://man7.org/linux/man-pages/man2/keyctl.2.html) // KeyctlInt calls keyctl commands in which each argument is an int. @@ -1499,18 +1503,13 @@ func KeyctlRestrictKeyring(ringid int, keyType string, restriction string) error //sys keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) = SYS_KEYCTL //sys keyctlRestrictKeyring(cmd int, arg2 int) (err error) = SYS_KEYCTL -func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { +func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { var msg Msghdr msg.Name = (*byte)(unsafe.Pointer(rsa)) msg.Namelen = uint32(SizeofSockaddrAny) - var iov Iovec - if len(p) > 0 { - iov.Base = &p[0] - iov.SetLen(len(p)) - } var dummy byte if len(oob) > 0 { - if len(p) == 0 { + if emptyIovecs(iov) { var sockType int sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) if err != nil { @@ -1518,15 +1517,19 @@ func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn } // receive at least one normal byte if sockType != SOCK_DGRAM { - iov.Base = &dummy - iov.SetLen(1) + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } } msg.Control = &oob[0] msg.SetControllen(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = recvmsg(fd, &msg, flags); err != nil { return } @@ -1535,18 +1538,15 @@ func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn return } -func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { +func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { var msg Msghdr msg.Name = (*byte)(ptr) msg.Namelen = uint32(salen) - var iov Iovec - if len(p) > 0 { - iov.Base = &p[0] - iov.SetLen(len(p)) - } var dummy byte + var empty bool if len(oob) > 0 { - if len(p) == 0 { + empty = emptyIovecs(iov) + if empty { var sockType int sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) if err != nil { @@ -1554,19 +1554,23 @@ func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags i } // send at least one normal byte if sockType != SOCK_DGRAM { - iov.Base = &dummy - iov.SetLen(1) + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } } msg.Control = &oob[0] msg.SetControllen(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = sendmsg(fd, &msg, flags); err != nil { return 0, err } - if len(oob) > 0 && len(p) == 0 { + if len(oob) > 0 && empty { n = 0 } return n, nil @@ -1578,6 +1582,7 @@ func BindToDevice(fd int, device string) (err error) { } //sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) +//sys ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) = SYS_PTRACE func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err error) { // The peek requests are machine-size oriented, so we wrap it @@ -1595,7 +1600,7 @@ func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err erro // boundary. n := 0 if addr%SizeofPtr != 0 { - err = ptrace(req, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) + err = ptracePtr(req, pid, addr-addr%SizeofPtr, unsafe.Pointer(&buf[0])) if err != nil { return 0, err } @@ -1607,7 +1612,7 @@ func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err erro for len(out) > 0 { // We use an internal buffer to guarantee alignment. // It's not documented if this is necessary, but we're paranoid. - err = ptrace(req, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0]))) + err = ptracePtr(req, pid, addr+uintptr(n), unsafe.Pointer(&buf[0])) if err != nil { return n, err } @@ -1639,7 +1644,7 @@ func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (c n := 0 if addr%SizeofPtr != 0 { var buf [SizeofPtr]byte - err = ptrace(peekReq, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) + err = ptracePtr(peekReq, pid, addr-addr%SizeofPtr, unsafe.Pointer(&buf[0])) if err != nil { return 0, err } @@ -1666,7 +1671,7 @@ func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (c // Trailing edge. if len(data) > 0 { var buf [SizeofPtr]byte - err = ptrace(peekReq, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0]))) + err = ptracePtr(peekReq, pid, addr+uintptr(n), unsafe.Pointer(&buf[0])) if err != nil { return n, err } @@ -1695,11 +1700,11 @@ func PtracePokeUser(pid int, addr uintptr, data []byte) (count int, err error) { } func PtraceGetRegs(pid int, regsout *PtraceRegs) (err error) { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) + return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout)) } func PtraceSetRegs(pid int, regs *PtraceRegs) (err error) { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) + return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs)) } func PtraceSetOptions(pid int, options int) (err error) { @@ -1708,7 +1713,7 @@ func PtraceSetOptions(pid int, options int) (err error) { func PtraceGetEventMsg(pid int) (msg uint, err error) { var data _C_long - err = ptrace(PTRACE_GETEVENTMSG, pid, 0, uintptr(unsafe.Pointer(&data))) + err = ptracePtr(PTRACE_GETEVENTMSG, pid, 0, unsafe.Pointer(&data)) msg = uint(data) return } @@ -1799,6 +1804,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sysnb Capset(hdr *CapUserHeader, data *CapUserData) (err error) //sys Chdir(path string) (err error) //sys Chroot(path string) (err error) +//sys ClockAdjtime(clockid int32, buf *Timex) (state int, err error) //sys ClockGetres(clockid int32, res *Timespec) (err error) //sys ClockGettime(clockid int32, time *Timespec) (err error) //sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) @@ -1829,6 +1835,9 @@ func Dup2(oldfd, newfd int) error { //sys Fremovexattr(fd int, attr string) (err error) //sys Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) //sys Fsync(fd int) (err error) +//sys Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) +//sys Fsopen(fsName string, flags int) (fd int, err error) +//sys Fspick(dirfd int, pathName string, flags int) (fd int, err error) //sys Getdents(fd int, buf []byte) (n int, err error) = SYS_GETDENTS64 //sysnb Getpgid(pid int) (pgid int, err error) @@ -1859,11 +1868,11 @@ func Getpgrp() (pid int) { //sys MemfdCreate(name string, flags int) (fd int, err error) //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) +//sys MoveMount(fromDirfd int, fromPathName string, toDirfd int, toPathName string, flags int) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys OpenTree(dfd int, fileName string, flags uint) (r int, err error) //sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT -//sysnb Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 //sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) //sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6 //sys read(fd int, p []byte) (n int, err error) @@ -1877,6 +1886,15 @@ func Getpgrp() (pid int) { //sysnb Settimeofday(tv *Timeval) (err error) //sys Setns(fd int, nstype int) (err error) +//go:linkname syscall_prlimit syscall.prlimit +func syscall_prlimit(pid, resource int, newlimit, old *syscall.Rlimit) error + +func Prlimit(pid, resource int, newlimit, old *Rlimit) error { + // Just call the syscall version, because as of Go 1.21 + // it will affect starting a new process. + return syscall_prlimit(pid, resource, (*syscall.Rlimit)(newlimit), (*syscall.Rlimit)(old)) +} + // PrctlRetInt performs a prctl operation specified by option and further // optional arguments arg2 through arg5 depending on option. It returns a // non-negative integer that is returned by the prctl syscall. @@ -1888,17 +1906,28 @@ func PrctlRetInt(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uint return int(ret), nil } -// issue 1435. -// On linux Setuid and Setgid only affects the current thread, not the process. -// This does not match what most callers expect so we must return an error -// here rather than letting the caller think that the call succeeded. - func Setuid(uid int) (err error) { - return EOPNOTSUPP + return syscall.Setuid(uid) +} + +func Setgid(gid int) (err error) { + return syscall.Setgid(gid) +} + +func Setreuid(ruid, euid int) (err error) { + return syscall.Setreuid(ruid, euid) } -func Setgid(uid int) (err error) { - return EOPNOTSUPP +func Setregid(rgid, egid int) (err error) { + return syscall.Setregid(rgid, egid) +} + +func Setresuid(ruid, euid, suid int) (err error) { + return syscall.Setresuid(ruid, euid, suid) +} + +func Setresgid(rgid, egid, sgid int) (err error) { + return syscall.Setresgid(rgid, egid, sgid) } // SetfsgidRetGid sets fsgid for current thread and returns previous fsgid set. @@ -1957,36 +1986,46 @@ func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) { //sys preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) = SYS_PREADV2 //sys pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) = SYS_PWRITEV2 -func bytes2iovec(bs [][]byte) []Iovec { - iovecs := make([]Iovec, len(bs)) - for i, b := range bs { - iovecs[i].SetLen(len(b)) +// minIovec is the size of the small initial allocation used by +// Readv, Writev, etc. +// +// This small allocation gets stack allocated, which lets the +// common use case of len(iovs) <= minIovs avoid more expensive +// heap allocations. +const minIovec = 8 + +// appendBytes converts bs to Iovecs and appends them to vecs. +func appendBytes(vecs []Iovec, bs [][]byte) []Iovec { + for _, b := range bs { + var v Iovec + v.SetLen(len(b)) if len(b) > 0 { - iovecs[i].Base = &b[0] + v.Base = &b[0] } else { - iovecs[i].Base = (*byte)(unsafe.Pointer(&_zero)) + v.Base = (*byte)(unsafe.Pointer(&_zero)) } + vecs = append(vecs, v) } - return iovecs + return vecs } -// offs2lohi splits offs into its lower and upper unsigned long. On 64-bit -// systems, hi will always be 0. On 32-bit systems, offs will be split in half. -// preadv/pwritev chose this calling convention so they don't need to add a -// padding-register for alignment on ARM. +// offs2lohi splits offs into its low and high order bits. func offs2lohi(offs int64) (lo, hi uintptr) { - return uintptr(offs), uintptr(uint64(offs) >> SizeofLong) + const longBits = SizeofLong * 8 + return uintptr(offs), uintptr(uint64(offs) >> (longBits - 1) >> 1) // two shifts to avoid false positive in vet } func Readv(fd int, iovs [][]byte) (n int, err error) { - iovecs := bytes2iovec(iovs) + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) n, err = readv(fd, iovecs) readvRacedetect(iovecs, n, err) return n, err } func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { - iovecs := bytes2iovec(iovs) + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) lo, hi := offs2lohi(offset) n, err = preadv(fd, iovecs, lo, hi) readvRacedetect(iovecs, n, err) @@ -1994,7 +2033,8 @@ func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { } func Preadv2(fd int, iovs [][]byte, offset int64, flags int) (n int, err error) { - iovecs := bytes2iovec(iovs) + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) lo, hi := offs2lohi(offset) n, err = preadv2(fd, iovecs, lo, hi, flags) readvRacedetect(iovecs, n, err) @@ -2021,7 +2061,8 @@ func readvRacedetect(iovecs []Iovec, n int, err error) { } func Writev(fd int, iovs [][]byte) (n int, err error) { - iovecs := bytes2iovec(iovs) + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) } @@ -2031,7 +2072,8 @@ func Writev(fd int, iovs [][]byte) (n int, err error) { } func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { - iovecs := bytes2iovec(iovs) + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) } @@ -2042,7 +2084,8 @@ func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { } func Pwritev2(fd int, iovs [][]byte, offset int64, flags int) (n int, err error) { - iovecs := bytes2iovec(iovs) + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) if raceenabled { raceReleaseMerge(unsafe.Pointer(&ioSync)) } @@ -2123,6 +2166,14 @@ func isGroupMember(gid int) bool { return false } +func isCapDacOverrideSet() bool { + hdr := CapUserHeader{Version: LINUX_CAPABILITY_VERSION_3} + data := [2]CapUserData{} + err := Capget(&hdr, &data[0]) + + return err == nil && data[0].Effective&(1<> 3) & 7 } else { fmode = st.Mode & 7 @@ -2237,7 +2294,7 @@ func (fh *FileHandle) Bytes() []byte { if n == 0 { return nil } - return (*[1 << 30]byte)(unsafe.Pointer(uintptr(unsafe.Pointer(&fh.fileHandle.Type)) + 4))[:n:n] + return unsafe.Slice((*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(&fh.fileHandle.Type))+4)), n) } // NameToHandleAt wraps the name_to_handle_at system call; it obtains @@ -2353,6 +2410,16 @@ func Setitimer(which ItimerWhich, it Itimerval) (Itimerval, error) { return prev, nil } +//sysnb rtSigprocmask(how int, set *Sigset_t, oldset *Sigset_t, sigsetsize uintptr) (err error) = SYS_RT_SIGPROCMASK + +func PthreadSigmask(how int, set, oldset *Sigset_t) error { + if oldset != nil { + // Explicitly clear in case Sigset_t is larger than _C__NSIG. + *oldset = Sigset_t{} + } + return rtSigprocmask(how, set, oldset, _C__NSIG/8) +} + /* * Unimplemented */ @@ -2411,7 +2478,6 @@ func Setitimer(which ItimerWhich, it Itimerval) (Itimerval, error) { // RestartSyscall // RtSigaction // RtSigpending -// RtSigprocmask // RtSigqueueinfo // RtSigreturn // RtSigsuspend diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index 518e476e6..c7d9945ea 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -41,10 +41,6 @@ func setTimeval(sec, usec int64) Timeval { //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 //sys setfsgid(gid int) (prev int, err error) = SYS_SETFSGID32 //sys setfsuid(uid int) (prev int, err error) = SYS_SETFSUID32 -//sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32 -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32 -//sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32 -//sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID32 //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) @@ -101,33 +97,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - - rl := rlimit32{} - if rlim.Cur == rlimInf64 { - rl.Cur = rlimInf32 - } else if rlim.Cur < uint64(rlimInf32) { - rl.Cur = uint32(rlim.Cur) - } else { - return EINVAL - } - if rlim.Max == rlimInf64 { - rl.Max = rlimInf32 - } else if rlim.Max < uint64(rlimInf32) { - rl.Max = uint32(rlim.Max) - } else { - return EINVAL - } - - return setrlimit(resource, &rl) -} - func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { newoffset, errno := seek(fd, offset, whence) if errno != 0 { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index f5e9d6bef..5b21fcfd7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -46,11 +46,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index c1a7778f1..da2986415 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -62,10 +62,6 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT //sys setfsgid(gid int) (prev int, err error) = SYS_SETFSGID32 //sys setfsuid(uid int) (prev int, err error) = SYS_SETFSUID32 -//sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32 -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32 -//sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32 -//sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID32 //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 @@ -175,33 +171,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - - rl := rlimit32{} - if rlim.Cur == rlimInf64 { - rl.Cur = rlimInf32 - } else if rlim.Cur < uint64(rlimInf32) { - rl.Cur = uint32(rlim.Cur) - } else { - return EINVAL - } - if rlim.Max == rlimInf64 { - rl.Max = rlimInf32 - } else if rlim.Max < uint64(rlimInf32) { - rl.Max = uint32(rlim.Max) - } else { - return EINVAL - } - - return setrlimit(resource, &rl) -} - func (r *PtraceRegs) PC() uint64 { return uint64(r.Uregs[15]) } func (r *PtraceRegs) SetPC(pc uint64) { r.Uregs[15] = uint32(pc) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index d83e2c657..a81f5742b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -39,11 +39,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) @@ -147,15 +142,6 @@ func Getrlimit(resource int, rlim *Rlimit) error { return getrlimit(resource, rlim) } -// Setrlimit prefers the prlimit64 system call. See issue 38604. -func Setrlimit(resource int, rlim *Rlimit) error { - err := Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - return setrlimit(resource, rlim) -} - func (r *PtraceRegs) PC() uint64 { return r.Pc } func (r *PtraceRegs) SetPC(pc uint64) { r.Pc = pc } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go new file mode 100644 index 000000000..69d2d7c3d --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -0,0 +1,217 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build loong64 && linux +// +build loong64,linux + +package unix + +import "unsafe" + +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 +//sys Fchown(fd int, uid int, gid int) (err error) +//sys Fstatfs(fd int, buf *Statfs_t) (err error) +//sys Ftruncate(fd int, length int64) (err error) +//sysnb Getegid() (egid int) +//sysnb Geteuid() (euid int) +//sysnb Getgid() (gid int) +//sysnb Getuid() (uid int) +//sys Listen(s int, n int) (err error) +//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 +//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 +//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + var ts *Timespec + if timeout != nil { + ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} + } + return Pselect(nfd, r, w, e, ts, nil) +} + +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) +//sys setfsgid(gid int) (prev int, err error) +//sys setfsuid(uid int) (prev int, err error) +//sys Shutdown(fd int, how int) (err error) +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) + +func timespecFromStatxTimestamp(x StatxTimestamp) Timespec { + return Timespec{ + Sec: x.Sec, + Nsec: int64(x.Nsec), + } +} + +func Fstatat(fd int, path string, stat *Stat_t, flags int) error { + var r Statx_t + // Do it the glibc way, add AT_NO_AUTOMOUNT. + if err := Statx(fd, path, AT_NO_AUTOMOUNT|flags, STATX_BASIC_STATS, &r); err != nil { + return err + } + + stat.Dev = Mkdev(r.Dev_major, r.Dev_minor) + stat.Ino = r.Ino + stat.Mode = uint32(r.Mode) + stat.Nlink = r.Nlink + stat.Uid = r.Uid + stat.Gid = r.Gid + stat.Rdev = Mkdev(r.Rdev_major, r.Rdev_minor) + // hope we don't get to process files so large to overflow these size + // fields... + stat.Size = int64(r.Size) + stat.Blksize = int32(r.Blksize) + stat.Blocks = int64(r.Blocks) + stat.Atim = timespecFromStatxTimestamp(r.Atime) + stat.Mtim = timespecFromStatxTimestamp(r.Mtime) + stat.Ctim = timespecFromStatxTimestamp(r.Ctime) + + return nil +} + +func Fstat(fd int, stat *Stat_t) (err error) { + return Fstatat(fd, "", stat, AT_EMPTY_PATH) +} + +func Stat(path string, stat *Stat_t) (err error) { + return Fstatat(AT_FDCWD, path, stat, 0) +} + +func Lchown(path string, uid int, gid int) (err error) { + return Fchownat(AT_FDCWD, path, uid, gid, AT_SYMLINK_NOFOLLOW) +} + +func Lstat(path string, stat *Stat_t) (err error) { + return Fstatat(AT_FDCWD, path, stat, AT_SYMLINK_NOFOLLOW) +} + +//sys Statfs(path string, buf *Statfs_t) (err error) +//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) +//sys Truncate(path string, length int64) (err error) + +func Ustat(dev int, ubuf *Ustat_t) (err error) { + return ENOSYS +} + +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) +//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) +//sysnb getgroups(n int, list *_Gid_t) (nn int, err error) +//sysnb setgroups(n int, list *_Gid_t) (err error) +//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) +//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) +//sysnb socket(domain int, typ int, proto int) (fd int, err error) +//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) +//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) +//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) +//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) +//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) +//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) + +//sysnb Gettimeofday(tv *Timeval) (err error) + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + err = Prlimit(0, resource, nil, rlim) + return +} + +func futimesat(dirfd int, path string, tv *[2]Timeval) (err error) { + if tv == nil { + return utimensat(dirfd, path, nil, 0) + } + + ts := []Timespec{ + NsecToTimespec(TimevalToNsec(tv[0])), + NsecToTimespec(TimevalToNsec(tv[1])), + } + return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func Time(t *Time_t) (Time_t, error) { + var tv Timeval + err := Gettimeofday(&tv) + if err != nil { + return 0, err + } + if t != nil { + *t = Time_t(tv.Sec) + } + return Time_t(tv.Sec), nil +} + +func Utime(path string, buf *Utimbuf) error { + tv := []Timeval{ + {Sec: buf.Actime}, + {Sec: buf.Modtime}, + } + return Utimes(path, tv) +} + +func utimes(path string, tv *[2]Timeval) (err error) { + if tv == nil { + return utimensat(AT_FDCWD, path, nil, 0) + } + + ts := []Timespec{ + NsecToTimespec(TimevalToNsec(tv[0])), + NsecToTimespec(TimevalToNsec(tv[1])), + } + return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func (r *PtraceRegs) PC() uint64 { return r.Era } + +func (r *PtraceRegs) SetPC(era uint64) { r.Era = era } + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint64(length) +} + +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint64(length) +} + +func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) { + rsa.Service_name_len = uint64(length) +} + +func Pause() error { + _, err := ppoll(nil, 0, nil, nil) + return err +} + +func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + return Renameat2(olddirfd, oldpath, newdirfd, newpath, 0) +} + +//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) + +func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error { + cmdlineLen := len(cmdline) + if cmdlineLen > 0 { + // Account for the additional NULL byte added by + // BytePtrFromString in kexecFileLoad. The kexec_file_load + // syscall expects a NULL-terminated string. + cmdlineLen++ + } + return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 98a2660b9..76d564095 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -37,11 +37,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Statfs(path string, buf *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index b8a18c0ad..aae7f0ffd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -32,10 +32,6 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) @@ -155,33 +151,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - - rl := rlimit32{} - if rlim.Cur == rlimInf64 { - rl.Cur = rlimInf32 - } else if rlim.Cur < uint64(rlimInf32) { - rl.Cur = uint32(rlim.Cur) - } else { - return EINVAL - } - if rlim.Max == rlimInf64 { - rl.Max = rlimInf32 - } else if rlim.Max < uint64(rlimInf32) { - rl.Max = uint32(rlim.Max) - } else { - return EINVAL - } - - return setrlimit(resource, &rl) -} - func (r *PtraceRegs) PC() uint64 { return r.Epc } func (r *PtraceRegs) SetPC(pc uint64) { r.Epc = pc } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go index 4ed9e67c6..66eff19a3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go @@ -34,10 +34,6 @@ import ( //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 @@ -163,33 +159,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { return } -//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - err = Prlimit(0, resource, rlim, nil) - if err != ENOSYS { - return err - } - - rl := rlimit32{} - if rlim.Cur == rlimInf64 { - rl.Cur = rlimInf32 - } else if rlim.Cur < uint64(rlimInf32) { - rl.Cur = uint32(rlim.Cur) - } else { - return EINVAL - } - if rlim.Max == rlimInf64 { - rl.Max = rlimInf32 - } else if rlim.Max < uint64(rlimInf32) { - rl.Max = uint32(rlim.Max) - } else { - return EINVAL - } - - return setrlimit(resource, &rl) -} - func (r *PtraceRegs) PC() uint32 { return r.Nip } func (r *PtraceRegs) SetPC(pc uint32) { r.Nip = pc } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index db63d384c..806aa2574 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -34,11 +34,6 @@ package unix //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 8ff7adba0..35851ef70 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -22,6 +22,7 @@ import "unsafe" //sysnb Getrlimit(resource int, rlim *Rlimit) (err error) //sysnb Getuid() (uid int) //sys Listen(s int, n int) (err error) +//sys MemfdSecret(flags int) (fd int, err error) //sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 //sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK @@ -37,11 +38,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index 6fcf277b0..2f89e8f5d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -34,11 +34,6 @@ import ( //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, buf *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index 02a45d9cc..7ca064ae7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -31,11 +31,6 @@ package unix //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys setfsgid(gid int) (prev int, err error) //sys setfsuid(uid int) (prev int, err error) -//sysnb Setregid(rgid int, egid int) (err error) -//sysnb Setresgid(rgid int, egid int, sgid int) (err error) -//sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) -//sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) //sys Stat(path string, stat *Stat_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 666f0a1b3..018d7d478 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -13,7 +13,6 @@ package unix import ( - "runtime" "syscall" "unsafe" ) @@ -110,6 +109,20 @@ func direntNamlen(buf []byte) (uint64, bool) { return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) } +func SysctlUvmexp(name string) (*Uvmexp, error) { + mib, err := sysctlmib(name) + if err != nil { + return nil, err + } + + n := uintptr(SizeofUvmexp) + var u Uvmexp + if err := sysctl(mib, (*byte)(unsafe.Pointer(&u)), &n, nil, 0); err != nil { + return nil, err + } + return &u, nil +} + func Pipe(p []int) (err error) { return Pipe2(p, 0) } @@ -164,13 +177,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } //sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL func IoctlGetPtmget(fd int, req uint) (*Ptmget, error) { var value Ptmget - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - runtime.KeepAlive(value) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err } @@ -245,6 +258,7 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { //sys Chmod(path string, mode uint32) (err error) //sys Chown(path string, uid int, gid int) (err error) //sys Chroot(path string) (err error) +//sys ClockGettime(clockid int32, time *Timespec) (err error) //sys Close(fd int) (err error) //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) @@ -326,7 +340,6 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { //sys Setpriority(which int, who int, prio int) (err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setreuid(ruid int, euid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) @@ -487,7 +500,6 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { // compat_43_osendmsg // compat_43_osethostid // compat_43_osethostname -// compat_43_osetrlimit // compat_43_osigblock // compat_43_osigsetmask // compat_43_osigstack diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 15d637d63..f9c7a9663 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -81,6 +81,7 @@ func Pipe(p []int) (err error) { } //sysnb pipe2(p *[2]_C_int, flags int) (err error) + func Pipe2(p []int, flags int) error { if len(p) != 2 { return EINVAL @@ -95,6 +96,7 @@ func Pipe2(p []int, flags int) error { } //sys Getdents(fd int, buf []byte) (n int, err error) + func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { n, err = Getdents(fd, buf) if err != nil || basep == nil { @@ -150,6 +152,7 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { } //sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL @@ -218,6 +221,7 @@ func Uname(uname *Utsname) error { //sys Chmod(path string, mode uint32) (err error) //sys Chown(path string, uid int, gid int) (err error) //sys Chroot(path string) (err error) +//sys ClockGettime(clockid int32, time *Timespec) (err error) //sys Close(fd int) (err error) //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) @@ -290,7 +294,6 @@ func Uname(uname *Utsname) error { //sysnb Setreuid(ruid int, euid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setrtable(rtable int) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go new file mode 100644 index 000000000..04aa43f41 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go @@ -0,0 +1,27 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build openbsd +// +build openbsd + +package unix + +import _ "unsafe" + +// Implemented in the runtime package (runtime/sys_openbsd3.go) +func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) +func syscall_syscall10(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2 uintptr, err Errno) +func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) +func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) + +//go:linkname syscall_syscall syscall.syscall +//go:linkname syscall_syscall6 syscall.syscall6 +//go:linkname syscall_syscall10 syscall.syscall10 +//go:linkname syscall_rawSyscall syscall.rawSyscall +//go:linkname syscall_rawSyscall6 syscall.rawSyscall6 + +func syscall_syscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) { + return syscall_syscall10(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, 0) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go index 30f285343..1378489f8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_mips64.go @@ -26,6 +26,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go new file mode 100644 index 000000000..c2796139c --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go @@ -0,0 +1,42 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ppc64 && openbsd +// +build ppc64,openbsd + +package unix + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions +// of openbsd/ppc64 the syscall is called sysctl instead of __sysctl. +const SYS___SYSCTL = SYS_SYSCTL diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go new file mode 100644 index 000000000..23199a7ff --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go @@ -0,0 +1,42 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build riscv64 && openbsd +// +build riscv64,openbsd + +package unix + +func setTimespec(sec, nsec int64) Timespec { + return Timespec{Sec: sec, Nsec: nsec} +} + +func setTimeval(sec, usec int64) Timeval { + return Timeval{Sec: sec, Usec: usec} +} + +func SetKevent(k *Kevent_t, fd, mode, flags int) { + k.Ident = uint64(fd) + k.Filter = int16(mode) + k.Flags = uint16(flags) +} + +func (iov *Iovec) SetLen(length int) { + iov.Len = uint64(length) +} + +func (msghdr *Msghdr) SetControllen(length int) { + msghdr.Controllen = uint32(length) +} + +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + +func (cmsg *Cmsghdr) SetLen(length int) { + cmsg.Len = uint32(length) +} + +// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions +// of openbsd/riscv64 the syscall is called sysctl instead of __sysctl. +const SYS___SYSCTL = SYS_SYSCTL diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 5c2003cec..b600a289d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -408,8 +408,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { for n < len(pp.Path) && pp.Path[n] != 0 { n++ } - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: @@ -451,26 +450,25 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.__xnet_recvmsg -func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { +func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { var msg Msghdr msg.Name = (*byte)(unsafe.Pointer(rsa)) msg.Namelen = uint32(SizeofSockaddrAny) - var iov Iovec - if len(p) > 0 { - iov.Base = (*int8)(unsafe.Pointer(&p[0])) - iov.SetLen(len(p)) - } - var dummy int8 + var dummy byte if len(oob) > 0 { // receive at least one normal byte - if len(p) == 0 { - iov.Base = &dummy - iov.SetLen(1) + if emptyIovecs(iov) { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } msg.Accrightslen = int32(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = recvmsg(fd, &msg, flags); n == -1 { return } @@ -480,30 +478,31 @@ func recvmsgRaw(fd int, p, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = libsocket.__xnet_sendmsg -func sendmsgN(fd int, p, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { +func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { var msg Msghdr msg.Name = (*byte)(unsafe.Pointer(ptr)) msg.Namelen = uint32(salen) - var iov Iovec - if len(p) > 0 { - iov.Base = (*int8)(unsafe.Pointer(&p[0])) - iov.SetLen(len(p)) - } - var dummy int8 + var dummy byte + var empty bool if len(oob) > 0 { // send at least one normal byte - if len(p) == 0 { - iov.Base = &dummy - iov.SetLen(1) + empty = emptyIovecs(iov) + if empty { + var iova [1]Iovec + iova[0].Base = &dummy + iova[0].SetLen(1) + iov = iova[:] } msg.Accrightslen = int32(len(oob)) } - msg.Iov = &iov - msg.Iovlen = 1 + if len(iov) > 0 { + msg.Iov = &iov[0] + msg.SetIovlen(len(iov)) + } if n, err = sendmsg(fd, &msg, flags); err != nil { return 0, err } - if len(oob) > 0 && len(p) == 0 { + if len(oob) > 0 && empty { n = 0 } return n, nil @@ -546,22 +545,26 @@ func Minor(dev uint64) uint32 { * Expose the ioctl function */ -//sys ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) = libc.ioctl +//sys ioctlRet(fd int, req int, arg uintptr) (ret int, err error) = libc.ioctl +//sys ioctlPtrRet(fd int, req int, arg unsafe.Pointer) (ret int, err error) = libc.ioctl -func ioctl(fd int, req uint, arg uintptr) (err error) { +func ioctl(fd int, req int, arg uintptr) (err error) { _, err = ioctlRet(fd, req, arg) return err } -func IoctlSetTermio(fd int, req uint, value *Termio) error { - err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { + _, err = ioctlPtrRet(fd, req, arg) return err } -func IoctlGetTermio(fd int, req uint) (*Termio, error) { +func IoctlSetTermio(fd int, req int, value *Termio) error { + return ioctlPtr(fd, req, unsafe.Pointer(value)) +} + +func IoctlGetTermio(fd int, req int) (*Termio, error) { var value Termio - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err } @@ -590,6 +593,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Chmod(path string, mode uint32) (err error) //sys Chown(path string, uid int, gid int) (err error) //sys Chroot(path string) (err error) +//sys ClockGettime(clockid int32, time *Timespec) (err error) //sys Close(fd int) (err error) //sys Creat(path string, mode uint32) (fd int, err error) //sys Dup(fd int) (nfd int, err error) @@ -618,6 +622,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Getpriority(which int, who int) (n int, err error) //sysnb Getrlimit(which int, lim *Rlimit) (err error) //sysnb Getrusage(who int, rusage *Rusage) (err error) +//sysnb Getsid(pid int) (sid int, err error) //sysnb Gettimeofday(tv *Timeval) (err error) //sysnb Getuid() (uid int) //sys Kill(pid int, signum syscall.Signal) (err error) @@ -660,7 +665,6 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Setpriority(which int, who int, prio int) (err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setreuid(ruid int, euid int) (err error) -//sysnb Setrlimit(which int, lim *Rlimit) (err error) //sysnb Setsid() (pid int, err error) //sysnb Setuid(uid int) (err error) //sys Shutdown(s int, how int) (err error) = libsocket.shutdown @@ -749,8 +753,8 @@ type EventPort struct { // we should handle things gracefully. To do so, we need to keep an extra // reference to the cookie around until the event is processed // thus the otherwise seemingly extraneous "cookies" map - // The key of this map is a pointer to the corresponding &fCookie.cookie - cookies map[*interface{}]*fileObjCookie + // The key of this map is a pointer to the corresponding fCookie + cookies map[*fileObjCookie]struct{} } // PortEvent is an abstraction of the port_event C struct. @@ -777,7 +781,7 @@ func NewEventPort() (*EventPort, error) { port: port, fds: make(map[uintptr]*fileObjCookie), paths: make(map[string]*fileObjCookie), - cookies: make(map[*interface{}]*fileObjCookie), + cookies: make(map[*fileObjCookie]struct{}), } return e, nil } @@ -798,6 +802,7 @@ func (e *EventPort) Close() error { } e.fds = nil e.paths = nil + e.cookies = nil return nil } @@ -825,17 +830,16 @@ func (e *EventPort) AssociatePath(path string, stat os.FileInfo, events int, coo if _, found := e.paths[path]; found { return fmt.Errorf("%v is already associated with this Event Port", path) } - fobj, err := createFileObj(path, stat) + fCookie, err := createFileObjCookie(path, stat, cookie) if err != nil { return err } - fCookie := &fileObjCookie{fobj, cookie} - _, err = port_associate(e.port, PORT_SOURCE_FILE, uintptr(unsafe.Pointer(fobj)), events, (*byte)(unsafe.Pointer(&fCookie.cookie))) + _, err = port_associate(e.port, PORT_SOURCE_FILE, uintptr(unsafe.Pointer(fCookie.fobj)), events, (*byte)(unsafe.Pointer(fCookie))) if err != nil { return err } e.paths[path] = fCookie - e.cookies[&fCookie.cookie] = fCookie + e.cookies[fCookie] = struct{}{} return nil } @@ -857,7 +861,7 @@ func (e *EventPort) DissociatePath(path string) error { if err == nil { // dissociate was successful, safe to delete the cookie fCookie := e.paths[path] - delete(e.cookies, &fCookie.cookie) + delete(e.cookies, fCookie) } delete(e.paths, path) return err @@ -870,13 +874,16 @@ func (e *EventPort) AssociateFd(fd uintptr, events int, cookie interface{}) erro if _, found := e.fds[fd]; found { return fmt.Errorf("%v is already associated with this Event Port", fd) } - fCookie := &fileObjCookie{nil, cookie} - _, err := port_associate(e.port, PORT_SOURCE_FD, fd, events, (*byte)(unsafe.Pointer(&fCookie.cookie))) + fCookie, err := createFileObjCookie("", nil, cookie) + if err != nil { + return err + } + _, err = port_associate(e.port, PORT_SOURCE_FD, fd, events, (*byte)(unsafe.Pointer(fCookie))) if err != nil { return err } e.fds[fd] = fCookie - e.cookies[&fCookie.cookie] = fCookie + e.cookies[fCookie] = struct{}{} return nil } @@ -895,27 +902,31 @@ func (e *EventPort) DissociateFd(fd uintptr) error { if err == nil { // dissociate was successful, safe to delete the cookie fCookie := e.fds[fd] - delete(e.cookies, &fCookie.cookie) + delete(e.cookies, fCookie) } delete(e.fds, fd) return err } -func createFileObj(name string, stat os.FileInfo) (*fileObj, error) { - fobj := new(fileObj) - bs, err := ByteSliceFromString(name) - if err != nil { - return nil, err - } - fobj.Name = (*int8)(unsafe.Pointer(&bs[0])) - s := stat.Sys().(*syscall.Stat_t) - fobj.Atim.Sec = s.Atim.Sec - fobj.Atim.Nsec = s.Atim.Nsec - fobj.Mtim.Sec = s.Mtim.Sec - fobj.Mtim.Nsec = s.Mtim.Nsec - fobj.Ctim.Sec = s.Ctim.Sec - fobj.Ctim.Nsec = s.Ctim.Nsec - return fobj, nil +func createFileObjCookie(name string, stat os.FileInfo, cookie interface{}) (*fileObjCookie, error) { + fCookie := new(fileObjCookie) + fCookie.cookie = cookie + if name != "" && stat != nil { + fCookie.fobj = new(fileObj) + bs, err := ByteSliceFromString(name) + if err != nil { + return nil, err + } + fCookie.fobj.Name = (*int8)(unsafe.Pointer(&bs[0])) + s := stat.Sys().(*syscall.Stat_t) + fCookie.fobj.Atim.Sec = s.Atim.Sec + fCookie.fobj.Atim.Nsec = s.Atim.Nsec + fCookie.fobj.Mtim.Sec = s.Mtim.Sec + fCookie.fobj.Mtim.Nsec = s.Mtim.Nsec + fCookie.fobj.Ctim.Sec = s.Ctim.Sec + fCookie.fobj.Ctim.Nsec = s.Ctim.Nsec + } + return fCookie, nil } // GetOne wraps port_get(3c) and returns a single PortEvent. @@ -928,44 +939,50 @@ func (e *EventPort) GetOne(t *Timespec) (*PortEvent, error) { p := new(PortEvent) e.mu.Lock() defer e.mu.Unlock() - e.peIntToExt(pe, p) + err = e.peIntToExt(pe, p) + if err != nil { + return nil, err + } return p, nil } // peIntToExt converts a cgo portEvent struct into the friendlier PortEvent // NOTE: Always call this function while holding the e.mu mutex -func (e *EventPort) peIntToExt(peInt *portEvent, peExt *PortEvent) { +func (e *EventPort) peIntToExt(peInt *portEvent, peExt *PortEvent) error { + if e.cookies == nil { + return fmt.Errorf("this EventPort is already closed") + } peExt.Events = peInt.Events peExt.Source = peInt.Source - cookie := (*interface{})(unsafe.Pointer(peInt.User)) - peExt.Cookie = *cookie + fCookie := (*fileObjCookie)(unsafe.Pointer(peInt.User)) + _, found := e.cookies[fCookie] + + if !found { + panic("unexpected event port address; may be due to kernel bug; see https://go.dev/issue/54254") + } + peExt.Cookie = fCookie.cookie + delete(e.cookies, fCookie) + switch peInt.Source { case PORT_SOURCE_FD: - delete(e.cookies, cookie) peExt.Fd = uintptr(peInt.Object) // Only remove the fds entry if it exists and this cookie matches if fobj, ok := e.fds[peExt.Fd]; ok { - if &fobj.cookie == cookie { + if fobj == fCookie { delete(e.fds, peExt.Fd) } } case PORT_SOURCE_FILE: - if fCookie, ok := e.cookies[cookie]; ok && uintptr(unsafe.Pointer(fCookie.fobj)) == uintptr(peInt.Object) { - // Use our stashed reference rather than using unsafe on what we got back - // the unsafe version would be (*fileObj)(unsafe.Pointer(uintptr(peInt.Object))) - peExt.fobj = fCookie.fobj - } else { - panic("mismanaged memory") - } - delete(e.cookies, cookie) + peExt.fobj = fCookie.fobj peExt.Path = BytePtrToString((*byte)(unsafe.Pointer(peExt.fobj.Name))) // Only remove the paths entry if it exists and this cookie matches if fobj, ok := e.paths[peExt.Path]; ok { - if &fobj.cookie == cookie { + if fobj == fCookie { delete(e.paths, peExt.Path) } } } + return nil } // Pending wraps port_getn(3c) and returns how many events are pending. @@ -989,7 +1006,7 @@ func (e *EventPort) Get(s []PortEvent, min int, timeout *Timespec) (int, error) got := uint32(min) max := uint32(len(s)) var err error - ps := make([]portEvent, max, max) + ps := make([]portEvent, max) _, err = port_getn(e.port, &ps[0], max, &got, timeout) // got will be trustworthy with ETIME, but not any other error. if err != nil && err != ETIME { @@ -997,8 +1014,122 @@ func (e *EventPort) Get(s []PortEvent, min int, timeout *Timespec) (int, error) } e.mu.Lock() defer e.mu.Unlock() + valid := 0 for i := 0; i < int(got); i++ { - e.peIntToExt(&ps[i], &s[i]) + err2 := e.peIntToExt(&ps[i], &s[i]) + if err2 != nil { + if valid == 0 && err == nil { + // If err2 is the only error and there are no valid events + // to return, return it to the caller. + err = err2 + } + break + } + valid = i + 1 + } + return valid, err +} + +//sys putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) + +func Putmsg(fd int, cl []byte, data []byte, flags int) (err error) { + var clp, datap *strbuf + if len(cl) > 0 { + clp = &strbuf{ + Len: int32(len(cl)), + Buf: (*int8)(unsafe.Pointer(&cl[0])), + } } - return int(got), err + if len(data) > 0 { + datap = &strbuf{ + Len: int32(len(data)), + Buf: (*int8)(unsafe.Pointer(&data[0])), + } + } + return putmsg(fd, clp, datap, flags) +} + +//sys getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error) + +func Getmsg(fd int, cl []byte, data []byte) (retCl []byte, retData []byte, flags int, err error) { + var clp, datap *strbuf + if len(cl) > 0 { + clp = &strbuf{ + Maxlen: int32(len(cl)), + Buf: (*int8)(unsafe.Pointer(&cl[0])), + } + } + if len(data) > 0 { + datap = &strbuf{ + Maxlen: int32(len(data)), + Buf: (*int8)(unsafe.Pointer(&data[0])), + } + } + + if err = getmsg(fd, clp, datap, &flags); err != nil { + return nil, nil, 0, err + } + + if len(cl) > 0 { + retCl = cl[:clp.Len] + } + if len(data) > 0 { + retData = data[:datap.Len] + } + return retCl, retData, flags, nil +} + +func IoctlSetIntRetInt(fd int, req int, arg int) (int, error) { + return ioctlRet(fd, req, uintptr(arg)) +} + +func IoctlSetString(fd int, req int, val string) error { + bs := make([]byte, len(val)+1) + copy(bs[:len(bs)-1], val) + err := ioctlPtr(fd, req, unsafe.Pointer(&bs[0])) + runtime.KeepAlive(&bs[0]) + return err +} + +// Lifreq Helpers + +func (l *Lifreq) SetName(name string) error { + if len(name) >= len(l.Name) { + return fmt.Errorf("name cannot be more than %d characters", len(l.Name)-1) + } + for i := range name { + l.Name[i] = int8(name[i]) + } + return nil +} + +func (l *Lifreq) SetLifruInt(d int) { + *(*int)(unsafe.Pointer(&l.Lifru[0])) = d +} + +func (l *Lifreq) GetLifruInt() int { + return *(*int)(unsafe.Pointer(&l.Lifru[0])) +} + +func (l *Lifreq) SetLifruUint(d uint) { + *(*uint)(unsafe.Pointer(&l.Lifru[0])) = d +} + +func (l *Lifreq) GetLifruUint() uint { + return *(*uint)(unsafe.Pointer(&l.Lifru[0])) +} + +func IoctlLifreq(fd int, req int, l *Lifreq) error { + return ioctlPtr(fd, req, unsafe.Pointer(l)) +} + +// Strioctl Helpers + +func (s *Strioctl) SetInt(i int) { + s.Len = int32(unsafe.Sizeof(i)) + s.Dp = (*int8)(unsafe.Pointer(&i)) +} + +func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) { + return ioctlPtrRet(fd, req, unsafe.Pointer(s)) } diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 70508afc1..8e48c29ec 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -13,8 +13,6 @@ import ( "sync" "syscall" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) var ( @@ -117,11 +115,7 @@ func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (d } // Use unsafe to convert addr into a []byte. - var b []byte - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&b)) - hdr.Data = unsafe.Pointer(addr) - hdr.Cap = length - hdr.Len = length + b := unsafe.Slice((*byte)(unsafe.Pointer(addr)), length) // Register mapping in m and return it. p := &b[cap(b)-1] @@ -337,9 +331,27 @@ func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) { return } +// Recvmsg receives a message from a socket using the recvmsg system call. The +// received non-control data will be written to p, and any "out of band" +// control data will be written to oob. The flags are passed to recvmsg. +// +// The results are: +// - n is the number of non-control data bytes read into p +// - oobn is the number of control data bytes read into oob; this may be interpreted using [ParseSocketControlMessage] +// - recvflags is flags returned by recvmsg +// - from is the address of the sender +// +// If the underlying socket type is not SOCK_DGRAM, a received message +// containing oob data and a single '\0' of non-control data is treated as if +// the message contained only control data, i.e. n will be zero on return. func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { + var iov [1]Iovec + if len(p) > 0 { + iov[0].Base = &p[0] + iov[0].SetLen(len(p)) + } var rsa RawSockaddrAny - n, oobn, recvflags, err = recvmsgRaw(fd, p, oob, flags, &rsa) + n, oobn, recvflags, err = recvmsgRaw(fd, iov[:], oob, flags, &rsa) // source address is only specified if the socket is unconnected if rsa.Addr.Family != AF_UNSPEC { from, err = anyToSockaddr(fd, &rsa) @@ -347,12 +359,65 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from return } +// RecvmsgBuffers receives a message from a socket using the recvmsg system +// call. This function is equivalent to Recvmsg, but non-control data read is +// scattered into the buffers slices. +func RecvmsgBuffers(fd int, buffers [][]byte, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { + iov := make([]Iovec, len(buffers)) + for i := range buffers { + if len(buffers[i]) > 0 { + iov[i].Base = &buffers[i][0] + iov[i].SetLen(len(buffers[i])) + } else { + iov[i].Base = (*byte)(unsafe.Pointer(&_zero)) + } + } + var rsa RawSockaddrAny + n, oobn, recvflags, err = recvmsgRaw(fd, iov, oob, flags, &rsa) + if err == nil && rsa.Addr.Family != AF_UNSPEC { + from, err = anyToSockaddr(fd, &rsa) + } + return +} + +// Sendmsg sends a message on a socket to an address using the sendmsg system +// call. This function is equivalent to SendmsgN, but does not return the +// number of bytes actually sent. func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) { _, err = SendmsgN(fd, p, oob, to, flags) return } +// SendmsgN sends a message on a socket to an address using the sendmsg system +// call. p contains the non-control data to send, and oob contains the "out of +// band" control data. The flags are passed to sendmsg. The number of +// non-control bytes actually written to the socket is returned. +// +// Some socket types do not support sending control data without accompanying +// non-control data. If p is empty, and oob contains control data, and the +// underlying socket type is not SOCK_DGRAM, p will be treated as containing a +// single '\0' and the return value will indicate zero bytes sent. +// +// The Go function Recvmsg, if called with an empty p and a non-empty oob, +// will read and ignore this additional '\0'. If the message is received by +// code that does not use Recvmsg, or that does not use Go at all, that code +// will need to be written to expect and ignore the additional '\0'. +// +// If you need to send non-empty oob with p actually empty, and if the +// underlying socket type supports it, you can do so via a raw system call as +// follows: +// +// msg := &unix.Msghdr{ +// Control: &oob[0], +// } +// msg.SetControllen(len(oob)) +// n, _, errno := unix.Syscall(unix.SYS_SENDMSG, uintptr(fd), uintptr(unsafe.Pointer(msg)), flags) func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) { + var iov [1]Iovec + if len(p) > 0 { + iov[0].Base = &p[0] + iov[0].SetLen(len(p)) + } var ptr unsafe.Pointer var salen _Socklen if to != nil { @@ -361,7 +426,31 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) return 0, err } } - return sendmsgN(fd, p, oob, ptr, salen, flags) + return sendmsgN(fd, iov[:], oob, ptr, salen, flags) +} + +// SendmsgBuffers sends a message on a socket to an address using the sendmsg +// system call. This function is equivalent to SendmsgN, but the non-control +// data is gathered from buffers. +func SendmsgBuffers(fd int, buffers [][]byte, oob []byte, to Sockaddr, flags int) (n int, err error) { + iov := make([]Iovec, len(buffers)) + for i := range buffers { + if len(buffers[i]) > 0 { + iov[i].Base = &buffers[i][0] + iov[i].SetLen(len(buffers[i])) + } else { + iov[i].Base = (*byte)(unsafe.Pointer(&_zero)) + } + } + var ptr unsafe.Pointer + var salen _Socklen + if to != nil { + ptr, salen, err = to.sockaddr() + if err != nil { + return 0, err + } + } + return sendmsgN(fd, iov, oob, ptr, salen, flags) } func Send(s int, buf []byte, flags int) (err error) { @@ -369,11 +458,15 @@ func Send(s int, buf []byte, flags int) (err error) { } func Sendto(fd int, p []byte, flags int, to Sockaddr) (err error) { - ptr, n, err := to.sockaddr() - if err != nil { - return err + var ptr unsafe.Pointer + var salen _Socklen + if to != nil { + ptr, salen, err = to.sockaddr() + if err != nil { + return err + } } - return sendto(fd, p, flags, ptr, n) + return sendto(fd, p, flags, ptr, salen) } func SetsockoptByte(fd, level, opt int, value byte) (err error) { @@ -484,3 +577,20 @@ func Lutimes(path string, tv []Timeval) error { } return UtimesNanoAt(AT_FDCWD, path, ts, AT_SYMLINK_NOFOLLOW) } + +// emptyIovecs reports whether there are no bytes in the slice of Iovec. +func emptyIovecs(iov []Iovec) bool { + for i := range iov { + if iov[i].Len > 0 { + return false + } + } + return true +} + +// Setrlimit sets a resource limit. +func Setrlimit(resource int, rlim *Rlimit) error { + // Just call the syscall version, because as of Go 1.21 + // it will affect starting a new process. + return syscall.Setrlimit(resource, (*syscall.Rlimit)(rlim)) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go index 5898e9a52..b6919ca58 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go @@ -2,11 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && gc && !ppc64le && !ppc64 -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build (darwin || dragonfly || freebsd || (linux && !ppc64 && !ppc64le) || netbsd || openbsd || solaris) && gc +// +build darwin dragonfly freebsd linux,!ppc64,!ppc64le netbsd openbsd solaris // +build gc -// +build !ppc64le -// +build !ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index f8616f454..d3d49ec3e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -9,8 +9,10 @@ package unix import ( "bytes" + "fmt" "runtime" "sort" + "strings" "sync" "syscall" "unsafe" @@ -55,7 +57,13 @@ func (d *Dirent) NameString() string { if d == nil { return "" } - return string(d.Name[:d.Namlen]) + s := string(d.Name[:]) + idx := strings.IndexByte(s, 0) + if idx == -1 { + return s + } else { + return s[:idx] + } } func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { @@ -131,8 +139,7 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { for n < int(pp.Len) && pp.Path[n] != 0 { n++ } - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: @@ -205,7 +212,8 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error) = SYS___SENDMSG_A //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) = SYS_MMAP //sys munmap(addr uintptr, length uintptr) (err error) = SYS_MUNMAP -//sys ioctl(fd int, req uint, arg uintptr) (err error) = SYS_IOCTL +//sys ioctl(fd int, req int, arg uintptr) (err error) = SYS_IOCTL +//sys ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) = SYS_IOCTL //sys Access(path string, mode uint32) (err error) = SYS___ACCESS_A //sys Chdir(path string) (err error) = SYS___CHDIR_A @@ -1230,6 +1238,14 @@ func Readdir(dir uintptr) (*Dirent, error) { return &ent, err } +func readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) { + r0, _, e1 := syscall_syscall(SYS___READDIR_R_A, dirp, uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + if int64(r0) == -1 { + err = errnoErr(Errno(e1)) + } + return +} + func Closedir(dir uintptr) error { _, _, e := syscall_syscall(SYS_CLOSEDIR, dir, 0, 0) if e != 0 { @@ -1821,3 +1837,158 @@ func Unmount(name string, mtm int) (err error) { } return err } + +func fdToPath(dirfd int) (path string, err error) { + var buffer [1024]byte + // w_ctrl() + ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_W_IOCTL<<4, + []uintptr{uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))}) + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + // __e2a_l() + runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, + []uintptr{uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)}) + return string(buffer[:zb]), nil + } + // __errno() + errno := int(*(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, + []uintptr{})))) + // __errno2() + errno2 := int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO2<<4, + []uintptr{})) + // strerror_r() + ret = runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_STRERROR_R<<4, + []uintptr{uintptr(errno), uintptr(unsafe.Pointer(&buffer[0])), 1024}) + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + return "", fmt.Errorf("%s (errno2=0x%x)", buffer[:zb], errno2) + } else { + return "", fmt.Errorf("fdToPath errno %d (errno2=0x%x)", errno, errno2) + } +} + +func direntLeToDirentUnix(dirent *direntLE, dir uintptr, path string) (Dirent, error) { + var d Dirent + + d.Ino = uint64(dirent.Ino) + offset, err := Telldir(dir) + if err != nil { + return d, err + } + + d.Off = int64(offset) + s := string(bytes.Split(dirent.Name[:], []byte{0})[0]) + copy(d.Name[:], s) + + d.Reclen = uint16(24 + len(d.NameString())) + var st Stat_t + path = path + "/" + s + err = Lstat(path, &st) + if err != nil { + return d, err + } + + d.Type = uint8(st.Mode >> 24) + return d, err +} + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + // Simulation of Getdirentries port from the Darwin implementation. + // COMMENTS FROM DARWIN: + // It's not the full required semantics, but should handle the case + // of calling Getdirentries or ReadDirent repeatedly. + // It won't handle assigning the results of lseek to *basep, or handle + // the directory being edited underfoot. + + skip, err := Seek(fd, 0, 1 /* SEEK_CUR */) + if err != nil { + return 0, err + } + + // Get path from fd to avoid unavailable call (fdopendir) + path, err := fdToPath(fd) + if err != nil { + return 0, err + } + d, err := Opendir(path) + if err != nil { + return 0, err + } + defer Closedir(d) + + var cnt int64 + for { + var entryLE direntLE + var entrypLE *direntLE + e := readdir_r(d, &entryLE, &entrypLE) + if e != nil { + return n, e + } + if entrypLE == nil { + break + } + if skip > 0 { + skip-- + cnt++ + continue + } + + // Dirent on zos has a different structure + entry, e := direntLeToDirentUnix(&entryLE, d, path) + if e != nil { + return n, e + } + + reclen := int(entry.Reclen) + if reclen > len(buf) { + // Not enough room. Return for now. + // The counter will let us know where we should start up again. + // Note: this strategy for suspending in the middle and + // restarting is O(n^2) in the length of the directory. Oh well. + break + } + + // Copy entry into return buffer. + s := unsafe.Slice((*byte)(unsafe.Pointer(&entry)), reclen) + copy(buf, s) + + buf = buf[reclen:] + n += reclen + cnt++ + } + // Set the seek offset of the input fd to record + // how many files we've already returned. + _, err = Seek(fd, cnt, 0 /* SEEK_SET */) + if err != nil { + return n, err + } + + return n, nil +} + +func ReadDirent(fd int, buf []byte) (n int, err error) { + var base = (*uintptr)(unsafe.Pointer(new(uint64))) + return Getdirentries(fd, buf, base) +} + +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + reclen, ok := direntReclen(buf) + if !ok { + return 0, false + } + return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true +} diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix.go b/vendor/golang.org/x/sys/unix/sysvshm_unix.go index 0bb4c8de5..5bb41d17b 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix.go @@ -7,11 +7,7 @@ package unix -import ( - "unsafe" - - "golang.org/x/sys/internal/unsafeheader" -) +import "unsafe" // SysvShmAttach attaches the Sysv shared memory segment associated with the // shared memory identifier id. @@ -34,12 +30,7 @@ func SysvShmAttach(id int, addr uintptr, flag int) ([]byte, error) { } // Use unsafe to convert addr into a []byte. - // TODO: convert to unsafe.Slice once we can assume Go 1.17 - var b []byte - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&b)) - hdr.Data = unsafe.Pointer(addr) - hdr.Cap = int(info.Segsz) - hdr.Len = int(info.Segsz) + b := unsafe.Slice((*byte)(unsafe.Pointer(addr)), int(info.Segsz)) return b, nil } diff --git a/vendor/golang.org/x/sys/unix/timestruct.go b/vendor/golang.org/x/sys/unix/timestruct.go index 3d8930405..616b1b284 100644 --- a/vendor/golang.org/x/sys/unix/timestruct.go +++ b/vendor/golang.org/x/sys/unix/timestruct.go @@ -9,7 +9,7 @@ package unix import "time" -// TimespecToNSec returns the time stored in ts as nanoseconds. +// TimespecToNsec returns the time stored in ts as nanoseconds. func TimespecToNsec(ts Timespec) int64 { return ts.Nano() } // NsecToTimespec converts a number of nanoseconds into a Timespec. diff --git a/vendor/golang.org/x/sys/unix/xattr_bsd.go b/vendor/golang.org/x/sys/unix/xattr_bsd.go index 25df1e378..f5f8e9f36 100644 --- a/vendor/golang.org/x/sys/unix/xattr_bsd.go +++ b/vendor/golang.org/x/sys/unix/xattr_bsd.go @@ -36,9 +36,14 @@ func xattrnamespace(fullattr string) (ns int, attr string, err error) { func initxattrdest(dest []byte, idx int) (d unsafe.Pointer) { if len(dest) > idx { return unsafe.Pointer(&dest[idx]) - } else { - return unsafe.Pointer(_zero) } + if dest != nil { + // extattr_get_file and extattr_list_file treat NULL differently from + // a non-NULL pointer of length zero. Preserve the property of nilness, + // even if we can't use dest directly. + return unsafe.Pointer(&_zero) + } + return nil } // FreeBSD and NetBSD implement their own syscalls to handle extended attributes @@ -160,13 +165,12 @@ func Lremovexattr(link string, attr string) (err error) { } func Listxattr(file string, dest []byte) (sz int, err error) { - d := initxattrdest(dest, 0) destsiz := len(dest) // FreeBSD won't allow you to list xattrs from multiple namespaces - s := 0 + s, pos := 0, 0 for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { - stmp, e := ExtattrListFile(file, nsid, uintptr(d), destsiz) + stmp, e := ListxattrNS(file, nsid, dest[pos:]) /* Errors accessing system attrs are ignored so that * we can implement the Linux-like behavior of omitting errors that @@ -175,66 +179,102 @@ func Listxattr(file string, dest []byte) (sz int, err error) { * Linux will still error if we ask for user attributes on a file that * we don't have read permissions on, so don't ignore those errors */ - if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { - continue - } else if e != nil { + if e != nil { + if e == EPERM && nsid != EXTATTR_NAMESPACE_USER { + continue + } return s, e } s += stmp - destsiz -= s - if destsiz < 0 { - destsiz = 0 + pos = s + if pos > destsiz { + pos = destsiz } - d = initxattrdest(dest, s) } return s, nil } -func Flistxattr(fd int, dest []byte) (sz int, err error) { +func ListxattrNS(file string, nsid int, dest []byte) (sz int, err error) { d := initxattrdest(dest, 0) destsiz := len(dest) - s := 0 + s, e := ExtattrListFile(file, nsid, uintptr(d), destsiz) + if e != nil { + return 0, err + } + + return s, nil +} + +func Flistxattr(fd int, dest []byte) (sz int, err error) { + destsiz := len(dest) + + s, pos := 0, 0 for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { - stmp, e := ExtattrListFd(fd, nsid, uintptr(d), destsiz) - if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { - continue - } else if e != nil { + stmp, e := FlistxattrNS(fd, nsid, dest[pos:]) + + if e != nil { + if e == EPERM && nsid != EXTATTR_NAMESPACE_USER { + continue + } return s, e } s += stmp - destsiz -= s - if destsiz < 0 { - destsiz = 0 + pos = s + if pos > destsiz { + pos = destsiz } - d = initxattrdest(dest, s) } return s, nil } -func Llistxattr(link string, dest []byte) (sz int, err error) { +func FlistxattrNS(fd int, nsid int, dest []byte) (sz int, err error) { d := initxattrdest(dest, 0) destsiz := len(dest) - s := 0 + s, e := ExtattrListFd(fd, nsid, uintptr(d), destsiz) + if e != nil { + return 0, err + } + + return s, nil +} + +func Llistxattr(link string, dest []byte) (sz int, err error) { + destsiz := len(dest) + + s, pos := 0, 0 for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} { - stmp, e := ExtattrListLink(link, nsid, uintptr(d), destsiz) - if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER { - continue - } else if e != nil { + stmp, e := LlistxattrNS(link, nsid, dest[pos:]) + + if e != nil { + if e == EPERM && nsid != EXTATTR_NAMESPACE_USER { + continue + } return s, e } s += stmp - destsiz -= s - if destsiz < 0 { - destsiz = 0 + pos = s + if pos > destsiz { + pos = destsiz } - d = initxattrdest(dest, s) + } + + return s, nil +} + +func LlistxattrNS(link string, nsid int, dest []byte) (sz int, err error) { + d := initxattrdest(dest, 0) + destsiz := len(dest) + + s, e := ExtattrListLink(link, nsid, uintptr(d), destsiz) + if e != nil { + return 0, err } return s, nil diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 476a1c7e7..143007627 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -1270,6 +1270,16 @@ const ( SEEK_END = 0x2 SEEK_HOLE = 0x3 SEEK_SET = 0x0 + SF_APPEND = 0x40000 + SF_ARCHIVED = 0x10000 + SF_DATALESS = 0x40000000 + SF_FIRMLINK = 0x800000 + SF_IMMUTABLE = 0x20000 + SF_NOUNLINK = 0x100000 + SF_RESTRICTED = 0x80000 + SF_SETTABLE = 0x3fff0000 + SF_SUPPORTED = 0x9f0000 + SF_SYNTHETIC = 0xc0000000 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1543,6 +1553,15 @@ const ( TIOCTIMESTAMP = 0x40107459 TIOCUCNTL = 0x80047466 TOSTOP = 0x400000 + UF_APPEND = 0x4 + UF_COMPRESSED = 0x20 + UF_DATAVAULT = 0x80 + UF_HIDDEN = 0x8000 + UF_IMMUTABLE = 0x2 + UF_NODUMP = 0x1 + UF_OPAQUE = 0x8 + UF_SETTABLE = 0xffff + UF_TRACKED = 0x40 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index e36f5178d..ab044a742 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -1270,6 +1270,16 @@ const ( SEEK_END = 0x2 SEEK_HOLE = 0x3 SEEK_SET = 0x0 + SF_APPEND = 0x40000 + SF_ARCHIVED = 0x10000 + SF_DATALESS = 0x40000000 + SF_FIRMLINK = 0x800000 + SF_IMMUTABLE = 0x20000 + SF_NOUNLINK = 0x100000 + SF_RESTRICTED = 0x80000 + SF_SETTABLE = 0x3fff0000 + SF_SUPPORTED = 0x9f0000 + SF_SYNTHETIC = 0xc0000000 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1543,6 +1553,15 @@ const ( TIOCTIMESTAMP = 0x40107459 TIOCUCNTL = 0x80047466 TOSTOP = 0x400000 + UF_APPEND = 0x4 + UF_COMPRESSED = 0x20 + UF_DATAVAULT = 0x80 + UF_HIDDEN = 0x8000 + UF_IMMUTABLE = 0x2 + UF_NODUMP = 0x1 + UF_OPAQUE = 0x8 + UF_SETTABLE = 0xffff + UF_TRACKED = 0x40 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index 440900112..f8c2c5138 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -151,6 +151,7 @@ const ( BIOCSETF = 0x80084267 BIOCSETFNR = 0x80084282 BIOCSETIF = 0x8020426c + BIOCSETVLANPCP = 0x80044285 BIOCSETWF = 0x8008427b BIOCSETZBUF = 0x800c4281 BIOCSHDRCMPLT = 0x80044275 @@ -447,7 +448,7 @@ const ( DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 DLT_INFINIBAND = 0xf7 DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 + DLT_IPMB_KONTRON = 0xc7 DLT_IPMB_LINUX = 0xd1 DLT_IPMI_HPM_2 = 0x104 DLT_IPNET = 0xe2 @@ -487,10 +488,11 @@ const ( DLT_LINUX_LAPD = 0xb1 DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 + DLT_LINUX_SLL2 = 0x114 DLT_LOOP = 0x6c DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x113 + DLT_MATCHING_MAX = 0x114 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -734,6 +736,7 @@ const ( IPPROTO_CMTP = 0x26 IPPROTO_CPHB = 0x49 IPPROTO_CPNX = 0x48 + IPPROTO_DCCP = 0x21 IPPROTO_DDP = 0x25 IPPROTO_DGP = 0x56 IPPROTO_DIVERT = 0x102 @@ -814,7 +817,6 @@ const ( IPPROTO_SCTP = 0x84 IPPROTO_SDRP = 0x2a IPPROTO_SEND = 0x103 - IPPROTO_SEP = 0x21 IPPROTO_SHIM6 = 0x8c IPPROTO_SKIP = 0x39 IPPROTO_SPACER = 0x7fff @@ -911,6 +913,7 @@ const ( IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 + IPV6_VLAN_PCP = 0x4b IP_ADD_MEMBERSHIP = 0xc IP_ADD_SOURCE_MEMBERSHIP = 0x46 IP_BINDANY = 0x18 @@ -989,8 +992,12 @@ const ( IP_TOS = 0x3 IP_TTL = 0x4 IP_UNBLOCK_SOURCE = 0x49 + IP_VLAN_PCP = 0x4b ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -1000,7 +1007,6 @@ const ( KERN_VERSION = 0x4 LOCAL_CONNWAIT = 0x4 LOCAL_CREDS = 0x2 - LOCAL_CREDS_PERSISTENT = 0x3 LOCAL_PEERCRED = 0x1 LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 @@ -1179,6 +1185,8 @@ const ( O_NONBLOCK = 0x4 O_RDONLY = 0x0 O_RDWR = 0x2 + O_RESOLVE_BENEATH = 0x800000 + O_SEARCH = 0x40000 O_SHLOCK = 0x10 O_SYNC = 0x80 O_TRUNC = 0x400 @@ -1189,6 +1197,10 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 + PIOD_READ_D = 0x1 + PIOD_READ_I = 0x3 + PIOD_WRITE_D = 0x2 + PIOD_WRITE_I = 0x4 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1196,6 +1208,60 @@ const ( PROT_NONE = 0x0 PROT_READ = 0x1 PROT_WRITE = 0x2 + PTRACE_DEFAULT = 0x1 + PTRACE_EXEC = 0x1 + PTRACE_FORK = 0x8 + PTRACE_LWP = 0x10 + PTRACE_SCE = 0x2 + PTRACE_SCX = 0x4 + PTRACE_SYSCALL = 0x6 + PTRACE_VFORK = 0x20 + PT_ATTACH = 0xa + PT_CLEARSTEP = 0x10 + PT_CONTINUE = 0x7 + PT_DETACH = 0xb + PT_FIRSTMACH = 0x40 + PT_FOLLOW_FORK = 0x17 + PT_GETDBREGS = 0x25 + PT_GETFPREGS = 0x23 + PT_GETFSBASE = 0x47 + PT_GETGSBASE = 0x49 + PT_GETLWPLIST = 0xf + PT_GETNUMLWPS = 0xe + PT_GETREGS = 0x21 + PT_GETXMMREGS = 0x40 + PT_GETXSTATE = 0x45 + PT_GETXSTATE_INFO = 0x44 + PT_GET_EVENT_MASK = 0x19 + PT_GET_SC_ARGS = 0x1b + PT_GET_SC_RET = 0x1c + PT_IO = 0xc + PT_KILL = 0x8 + PT_LWPINFO = 0xd + PT_LWP_EVENTS = 0x18 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_RESUME = 0x13 + PT_SETDBREGS = 0x26 + PT_SETFPREGS = 0x24 + PT_SETFSBASE = 0x48 + PT_SETGSBASE = 0x4a + PT_SETREGS = 0x22 + PT_SETSTEP = 0x11 + PT_SETXMMREGS = 0x41 + PT_SETXSTATE = 0x46 + PT_SET_EVENT_MASK = 0x1a + PT_STEP = 0x9 + PT_SUSPEND = 0x12 + PT_SYSCALL = 0x16 + PT_TO_SCE = 0x14 + PT_TO_SCX = 0x15 + PT_TRACE_ME = 0x0 + PT_VM_ENTRY = 0x29 + PT_VM_TIMESTAMP = 0x28 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + P_ZONEID = 0xc RLIMIT_AS = 0xa RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1320,10 +1386,12 @@ const ( SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc044692d SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCAP = 0xc020691f SIOCGIFCONF = 0xc0086924 SIOCGIFDESCR = 0xc020692a + SIOCGIFDOWNREASON = 0xc058699a SIOCGIFDSTADDR = 0xc0206922 SIOCGIFFIB = 0xc020695c SIOCGIFFLAGS = 0xc0206911 @@ -1414,6 +1482,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x20000 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_REUSEPORT_LB = 0x10000 @@ -1472,22 +1541,40 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_FAST_OPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_PAD = 0x0 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_WINDOW = 0x3 TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_ALGORITHM = 0x43b TCP_BBR_DRAIN_INC_EXTRA = 0x43c TCP_BBR_DRAIN_PG = 0x42e TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_EXTRA_STATE = 0x453 + TCP_BBR_FLOOR_MIN_TSO = 0x454 + TCP_BBR_HDWR_PACE = 0x451 + TCP_BBR_HOLD_TARGET = 0x436 TCP_BBR_IWINTSO = 0x42b TCP_BBR_LOWGAIN_FD = 0x436 TCP_BBR_LOWGAIN_HALF = 0x435 TCP_BBR_LOWGAIN_THRESH = 0x434 TCP_BBR_MAX_RTO = 0x439 TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_MIN_TOPACEOUT = 0x455 TCP_BBR_ONE_RETRAN = 0x431 TCP_BBR_PACE_CROSS = 0x442 TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_OH = 0x435 TCP_BBR_PACE_PER_SEC = 0x43e TCP_BBR_PACE_SEG_MAX = 0x440 TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_POLICER_DETECT = 0x457 TCP_BBR_PROBE_RTT_GAIN = 0x44d TCP_BBR_PROBE_RTT_INT = 0x430 TCP_BBR_PROBE_RTT_LEN = 0x44e @@ -1496,12 +1583,18 @@ const ( TCP_BBR_REC_OVER_HPTS = 0x43a TCP_BBR_RETRAN_WTSO = 0x44b TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_SEND_IWND_IN_TSO = 0x44f TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d TCP_BBR_STARTUP_LOSS_EXIT = 0x432 TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_TMR_PACE_OH = 0x448 + TCP_BBR_TSLIMITS = 0x434 + TCP_BBR_TSTMP_RAISES = 0x456 TCP_BBR_UNLIMITED = 0x43b TCP_BBR_USEDEL_RATE = 0x437 TCP_BBR_USE_LOWGAIN = 0x433 + TCP_BBR_USE_RACK_CHEAT = 0x450 + TCP_BBR_UTTER_MAX_TSO = 0x452 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 @@ -1541,6 +1634,7 @@ const ( TCP_PCAP_OUT = 0x800 TCP_RACK_EARLY_RECOV = 0x423 TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_GP_INCREASE = 0x446 TCP_RACK_IDLE_REDUCE_HIGH = 0x444 TCP_RACK_MIN_PACE = 0x445 TCP_RACK_MIN_PACE_SEG = 0x446 @@ -1554,7 +1648,6 @@ const ( TCP_RACK_PRR_SENDALOT = 0x421 TCP_RACK_REORD_FADE = 0x426 TCP_RACK_REORD_THRESH = 0x425 - TCP_RACK_SESS_CWV = 0x42a TCP_RACK_TLP_INC_VAR = 0x429 TCP_RACK_TLP_REDUCE = 0x41c TCP_RACK_TLP_THRESH = 0x427 @@ -1694,12 +1787,13 @@ const ( EIDRM = syscall.Errno(0x52) EILSEQ = syscall.Errno(0x56) EINPROGRESS = syscall.Errno(0x24) + EINTEGRITY = syscall.Errno(0x61) EINTR = syscall.Errno(0x4) EINVAL = syscall.Errno(0x16) EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x60) + ELAST = syscall.Errno(0x61) ELOOP = syscall.Errno(0x3e) EMFILE = syscall.Errno(0x18) EMLINK = syscall.Errno(0x1f) @@ -1842,7 +1936,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EAGAIN", "resource temporarily unavailable"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1904,6 +1998,7 @@ var errorList = [...]struct { {94, "ECAPMODE", "not permitted in capability mode"}, {95, "ENOTRECOVERABLE", "state not recoverable"}, {96, "EOWNERDEAD", "previous owner died"}, + {97, "EINTEGRITY", "integrity check failed"}, } // Signal table diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index 64520d312..96310c3be 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -151,6 +151,7 @@ const ( BIOCSETF = 0x80104267 BIOCSETFNR = 0x80104282 BIOCSETIF = 0x8020426c + BIOCSETVLANPCP = 0x80044285 BIOCSETWF = 0x8010427b BIOCSETZBUF = 0x80184281 BIOCSHDRCMPLT = 0x80044275 @@ -447,7 +448,7 @@ const ( DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 DLT_INFINIBAND = 0xf7 DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 + DLT_IPMB_KONTRON = 0xc7 DLT_IPMB_LINUX = 0xd1 DLT_IPMI_HPM_2 = 0x104 DLT_IPNET = 0xe2 @@ -487,10 +488,11 @@ const ( DLT_LINUX_LAPD = 0xb1 DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 + DLT_LINUX_SLL2 = 0x114 DLT_LOOP = 0x6c DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x113 + DLT_MATCHING_MAX = 0x114 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -734,6 +736,7 @@ const ( IPPROTO_CMTP = 0x26 IPPROTO_CPHB = 0x49 IPPROTO_CPNX = 0x48 + IPPROTO_DCCP = 0x21 IPPROTO_DDP = 0x25 IPPROTO_DGP = 0x56 IPPROTO_DIVERT = 0x102 @@ -814,7 +817,6 @@ const ( IPPROTO_SCTP = 0x84 IPPROTO_SDRP = 0x2a IPPROTO_SEND = 0x103 - IPPROTO_SEP = 0x21 IPPROTO_SHIM6 = 0x8c IPPROTO_SKIP = 0x39 IPPROTO_SPACER = 0x7fff @@ -911,6 +913,7 @@ const ( IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 + IPV6_VLAN_PCP = 0x4b IP_ADD_MEMBERSHIP = 0xc IP_ADD_SOURCE_MEMBERSHIP = 0x46 IP_BINDANY = 0x18 @@ -989,8 +992,12 @@ const ( IP_TOS = 0x3 IP_TTL = 0x4 IP_UNBLOCK_SOURCE = 0x49 + IP_VLAN_PCP = 0x4b ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -1000,7 +1007,6 @@ const ( KERN_VERSION = 0x4 LOCAL_CONNWAIT = 0x4 LOCAL_CREDS = 0x2 - LOCAL_CREDS_PERSISTENT = 0x3 LOCAL_PEERCRED = 0x1 LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 @@ -1180,6 +1186,8 @@ const ( O_NONBLOCK = 0x4 O_RDONLY = 0x0 O_RDWR = 0x2 + O_RESOLVE_BENEATH = 0x800000 + O_SEARCH = 0x40000 O_SHLOCK = 0x10 O_SYNC = 0x80 O_TRUNC = 0x400 @@ -1190,6 +1198,10 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 + PIOD_READ_D = 0x1 + PIOD_READ_I = 0x3 + PIOD_WRITE_D = 0x2 + PIOD_WRITE_I = 0x4 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1197,6 +1209,58 @@ const ( PROT_NONE = 0x0 PROT_READ = 0x1 PROT_WRITE = 0x2 + PTRACE_DEFAULT = 0x1 + PTRACE_EXEC = 0x1 + PTRACE_FORK = 0x8 + PTRACE_LWP = 0x10 + PTRACE_SCE = 0x2 + PTRACE_SCX = 0x4 + PTRACE_SYSCALL = 0x6 + PTRACE_VFORK = 0x20 + PT_ATTACH = 0xa + PT_CLEARSTEP = 0x10 + PT_CONTINUE = 0x7 + PT_DETACH = 0xb + PT_FIRSTMACH = 0x40 + PT_FOLLOW_FORK = 0x17 + PT_GETDBREGS = 0x25 + PT_GETFPREGS = 0x23 + PT_GETFSBASE = 0x47 + PT_GETGSBASE = 0x49 + PT_GETLWPLIST = 0xf + PT_GETNUMLWPS = 0xe + PT_GETREGS = 0x21 + PT_GETXSTATE = 0x45 + PT_GETXSTATE_INFO = 0x44 + PT_GET_EVENT_MASK = 0x19 + PT_GET_SC_ARGS = 0x1b + PT_GET_SC_RET = 0x1c + PT_IO = 0xc + PT_KILL = 0x8 + PT_LWPINFO = 0xd + PT_LWP_EVENTS = 0x18 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_RESUME = 0x13 + PT_SETDBREGS = 0x26 + PT_SETFPREGS = 0x24 + PT_SETFSBASE = 0x48 + PT_SETGSBASE = 0x4a + PT_SETREGS = 0x22 + PT_SETSTEP = 0x11 + PT_SETXSTATE = 0x46 + PT_SET_EVENT_MASK = 0x1a + PT_STEP = 0x9 + PT_SUSPEND = 0x12 + PT_SYSCALL = 0x16 + PT_TO_SCE = 0x14 + PT_TO_SCX = 0x15 + PT_TRACE_ME = 0x0 + PT_VM_ENTRY = 0x29 + PT_VM_TIMESTAMP = 0x28 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + P_ZONEID = 0xc RLIMIT_AS = 0xa RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1321,10 +1385,12 @@ const ( SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc044692d SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCAP = 0xc020691f SIOCGIFCONF = 0xc0106924 SIOCGIFDESCR = 0xc020692a + SIOCGIFDOWNREASON = 0xc058699a SIOCGIFDSTADDR = 0xc0206922 SIOCGIFFIB = 0xc020695c SIOCGIFFLAGS = 0xc0206911 @@ -1415,6 +1481,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x20000 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_REUSEPORT_LB = 0x10000 @@ -1473,22 +1540,40 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_FAST_OPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_PAD = 0x0 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_WINDOW = 0x3 TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_ALGORITHM = 0x43b TCP_BBR_DRAIN_INC_EXTRA = 0x43c TCP_BBR_DRAIN_PG = 0x42e TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_EXTRA_STATE = 0x453 + TCP_BBR_FLOOR_MIN_TSO = 0x454 + TCP_BBR_HDWR_PACE = 0x451 + TCP_BBR_HOLD_TARGET = 0x436 TCP_BBR_IWINTSO = 0x42b TCP_BBR_LOWGAIN_FD = 0x436 TCP_BBR_LOWGAIN_HALF = 0x435 TCP_BBR_LOWGAIN_THRESH = 0x434 TCP_BBR_MAX_RTO = 0x439 TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_MIN_TOPACEOUT = 0x455 TCP_BBR_ONE_RETRAN = 0x431 TCP_BBR_PACE_CROSS = 0x442 TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_OH = 0x435 TCP_BBR_PACE_PER_SEC = 0x43e TCP_BBR_PACE_SEG_MAX = 0x440 TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_POLICER_DETECT = 0x457 TCP_BBR_PROBE_RTT_GAIN = 0x44d TCP_BBR_PROBE_RTT_INT = 0x430 TCP_BBR_PROBE_RTT_LEN = 0x44e @@ -1497,12 +1582,18 @@ const ( TCP_BBR_REC_OVER_HPTS = 0x43a TCP_BBR_RETRAN_WTSO = 0x44b TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_SEND_IWND_IN_TSO = 0x44f TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d TCP_BBR_STARTUP_LOSS_EXIT = 0x432 TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_TMR_PACE_OH = 0x448 + TCP_BBR_TSLIMITS = 0x434 + TCP_BBR_TSTMP_RAISES = 0x456 TCP_BBR_UNLIMITED = 0x43b TCP_BBR_USEDEL_RATE = 0x437 TCP_BBR_USE_LOWGAIN = 0x433 + TCP_BBR_USE_RACK_CHEAT = 0x450 + TCP_BBR_UTTER_MAX_TSO = 0x452 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 @@ -1542,6 +1633,7 @@ const ( TCP_PCAP_OUT = 0x800 TCP_RACK_EARLY_RECOV = 0x423 TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_GP_INCREASE = 0x446 TCP_RACK_IDLE_REDUCE_HIGH = 0x444 TCP_RACK_MIN_PACE = 0x445 TCP_RACK_MIN_PACE_SEG = 0x446 @@ -1555,7 +1647,6 @@ const ( TCP_RACK_PRR_SENDALOT = 0x421 TCP_RACK_REORD_FADE = 0x426 TCP_RACK_REORD_THRESH = 0x425 - TCP_RACK_SESS_CWV = 0x42a TCP_RACK_TLP_INC_VAR = 0x429 TCP_RACK_TLP_REDUCE = 0x41c TCP_RACK_TLP_THRESH = 0x427 @@ -1693,12 +1784,13 @@ const ( EIDRM = syscall.Errno(0x52) EILSEQ = syscall.Errno(0x56) EINPROGRESS = syscall.Errno(0x24) + EINTEGRITY = syscall.Errno(0x61) EINTR = syscall.Errno(0x4) EINVAL = syscall.Errno(0x16) EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x60) + ELAST = syscall.Errno(0x61) ELOOP = syscall.Errno(0x3e) EMFILE = syscall.Errno(0x18) EMLINK = syscall.Errno(0x1f) @@ -1841,7 +1933,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EAGAIN", "resource temporarily unavailable"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1903,6 +1995,7 @@ var errorList = [...]struct { {94, "ECAPMODE", "not permitted in capability mode"}, {95, "ENOTRECOVERABLE", "state not recoverable"}, {96, "EOWNERDEAD", "previous owner died"}, + {97, "EINTEGRITY", "integrity check failed"}, } // Signal table diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index 99e9a0e06..777b69def 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -151,6 +151,7 @@ const ( BIOCSETF = 0x80084267 BIOCSETFNR = 0x80084282 BIOCSETIF = 0x8020426c + BIOCSETVLANPCP = 0x80044285 BIOCSETWF = 0x8008427b BIOCSETZBUF = 0x800c4281 BIOCSHDRCMPLT = 0x80044275 @@ -362,7 +363,7 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0x18 CTL_NET = 0x4 - DIOCGATTR = 0xc144648e + DIOCGATTR = 0xc148648e DIOCGDELETE = 0x80106488 DIOCGFLUSH = 0x20006487 DIOCGFRONTSTUFF = 0x40086486 @@ -377,7 +378,7 @@ const ( DIOCGSTRIPESIZE = 0x4008648b DIOCSKERNELDUMP = 0x804c6490 DIOCSKERNELDUMP_FREEBSD11 = 0x80046485 - DIOCZONECMD = 0xc06c648f + DIOCZONECMD = 0xc078648f DLT_A429 = 0xb8 DLT_A653_ICM = 0xb9 DLT_AIRONET_HEADER = 0x78 @@ -407,7 +408,9 @@ const ( DLT_C_HDLC_WITH_DIR = 0xcd DLT_DBUS = 0xe7 DLT_DECT = 0xdd + DLT_DISPLAYPORT_AUX = 0x113 DLT_DOCSIS = 0x8f + DLT_DOCSIS31_XRA31 = 0x111 DLT_DVB_CI = 0xeb DLT_ECONET = 0x73 DLT_EN10MB = 0x1 @@ -417,6 +420,7 @@ const ( DLT_ERF = 0xc5 DLT_ERF_ETH = 0xaf DLT_ERF_POS = 0xb0 + DLT_ETHERNET_MPACKET = 0x112 DLT_FC_2 = 0xe0 DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 DLT_FDDI = 0xa @@ -444,7 +448,7 @@ const ( DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 DLT_INFINIBAND = 0xf7 DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 + DLT_IPMB_KONTRON = 0xc7 DLT_IPMB_LINUX = 0xd1 DLT_IPMI_HPM_2 = 0x104 DLT_IPNET = 0xe2 @@ -484,9 +488,11 @@ const ( DLT_LINUX_LAPD = 0xb1 DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 + DLT_LINUX_SLL2 = 0x114 DLT_LOOP = 0x6c + DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x109 + DLT_MATCHING_MAX = 0x114 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -502,7 +508,9 @@ const ( DLT_NFC_LLCP = 0xf5 DLT_NFLOG = 0xef DLT_NG40 = 0xf4 + DLT_NORDIC_BLE = 0x110 DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b DLT_PCI_EXP = 0x7d DLT_PFLOG = 0x75 DLT_PFSYNC = 0x79 @@ -526,15 +534,18 @@ const ( DLT_RTAC_SERIAL = 0xfa DLT_SCCP = 0x8e DLT_SCTP = 0xf8 + DLT_SDLC = 0x10c DLT_SITA = 0xc4 DLT_SLIP = 0x8 DLT_SLIP_BSDOS = 0xd DLT_STANAG_5066_D_PDU = 0xed DLT_SUNATM = 0x7b DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TI_LLN_SNIFFER = 0x10d DLT_TZSP = 0x80 DLT_USB = 0xba DLT_USBPCAP = 0xf9 + DLT_USB_DARWIN = 0x10a DLT_USB_FREEBSD = 0xba DLT_USB_LINUX = 0xbd DLT_USB_LINUX_MMAPPED = 0xdc @@ -554,6 +565,7 @@ const ( DLT_USER7 = 0x9a DLT_USER8 = 0x9b DLT_USER9 = 0x9c + DLT_VSOCK = 0x10f DLT_WATTSTOPPER_DLM = 0x107 DLT_WIHART = 0xdf DLT_WIRESHARK_UPPER_PDU = 0xfc @@ -578,6 +590,7 @@ const ( ECHONL = 0x10 ECHOPRT = 0x20 EVFILT_AIO = -0x3 + EVFILT_EMPTY = -0xd EVFILT_FS = -0x9 EVFILT_LIO = -0xa EVFILT_PROC = -0x5 @@ -585,11 +598,12 @@ const ( EVFILT_READ = -0x1 EVFILT_SENDFILE = -0xc EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xc + EVFILT_SYSCOUNT = 0xd EVFILT_TIMER = -0x7 EVFILT_USER = -0xb EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 + EVNAMEMAP_NAME_SIZE = 0x40 EV_ADD = 0x1 EV_CLEAR = 0x20 EV_DELETE = 0x2 @@ -606,6 +620,7 @@ const ( EV_RECEIPT = 0x40 EV_SYSFLAGS = 0xf000 EXTA = 0x4b00 + EXTATTR_MAXNAMELEN = 0xff EXTATTR_NAMESPACE_EMPTY = 0x0 EXTATTR_NAMESPACE_SYSTEM = 0x2 EXTATTR_NAMESPACE_USER = 0x1 @@ -647,6 +662,7 @@ const ( IEXTEN = 0x400 IFAN_ARRIVAL = 0x0 IFAN_DEPARTURE = 0x1 + IFCAP_WOL_MAGIC = 0x2000 IFF_ALLMULTI = 0x200 IFF_ALTPHYS = 0x4000 IFF_BROADCAST = 0x2 @@ -663,6 +679,7 @@ const ( IFF_MONITOR = 0x40000 IFF_MULTICAST = 0x8000 IFF_NOARP = 0x80 + IFF_NOGROUP = 0x800000 IFF_OACTIVE = 0x400 IFF_POINTOPOINT = 0x10 IFF_PPROMISC = 0x20000 @@ -719,6 +736,7 @@ const ( IPPROTO_CMTP = 0x26 IPPROTO_CPHB = 0x49 IPPROTO_CPNX = 0x48 + IPPROTO_DCCP = 0x21 IPPROTO_DDP = 0x25 IPPROTO_DGP = 0x56 IPPROTO_DIVERT = 0x102 @@ -799,7 +817,6 @@ const ( IPPROTO_SCTP = 0x84 IPPROTO_SDRP = 0x2a IPPROTO_SEND = 0x103 - IPPROTO_SEP = 0x21 IPPROTO_SHIM6 = 0x8c IPPROTO_SKIP = 0x39 IPPROTO_SPACER = 0x7fff @@ -837,6 +854,7 @@ const ( IPV6_DSTOPTS = 0x32 IPV6_FLOWID = 0x43 IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_LEN = 0x14 IPV6_FLOWLABEL_MASK = 0xffff0f00 IPV6_FLOWTYPE = 0x44 IPV6_FRAGTTL = 0x78 @@ -857,13 +875,13 @@ const ( IPV6_MAX_GROUP_SRC_FILTER = 0x200 IPV6_MAX_MEMBERSHIPS = 0xfff IPV6_MAX_SOCK_SRC_FILTER = 0x80 - IPV6_MIN_MEMBERSHIPS = 0x1f IPV6_MMTU = 0x500 IPV6_MSFILTER = 0x4a IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 IPV6_MULTICAST_LOOP = 0xb IPV6_NEXTHOP = 0x30 + IPV6_ORIGDSTADDR = 0x48 IPV6_PATHMTU = 0x2c IPV6_PKTINFO = 0x2e IPV6_PORTRANGE = 0xe @@ -875,6 +893,7 @@ const ( IPV6_RECVFLOWID = 0x46 IPV6_RECVHOPLIMIT = 0x25 IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVORIGDSTADDR = 0x48 IPV6_RECVPATHMTU = 0x2b IPV6_RECVPKTINFO = 0x24 IPV6_RECVRSSBUCKETID = 0x47 @@ -894,6 +913,7 @@ const ( IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 + IPV6_VLAN_PCP = 0x4b IP_ADD_MEMBERSHIP = 0xc IP_ADD_SOURCE_MEMBERSHIP = 0x46 IP_BINDANY = 0x18 @@ -935,10 +955,8 @@ const ( IP_MAX_MEMBERSHIPS = 0xfff IP_MAX_SOCK_MUTE_FILTER = 0x80 IP_MAX_SOCK_SRC_FILTER = 0x80 - IP_MAX_SOURCE_FILTER = 0x400 IP_MF = 0x2000 IP_MINTTL = 0x42 - IP_MIN_MEMBERSHIPS = 0x1f IP_MSFILTER = 0x4a IP_MSS = 0x240 IP_MULTICAST_IF = 0x9 @@ -948,6 +966,7 @@ const ( IP_OFFMASK = 0x1fff IP_ONESBCAST = 0x17 IP_OPTIONS = 0x1 + IP_ORIGDSTADDR = 0x1b IP_PORTRANGE = 0x13 IP_PORTRANGE_DEFAULT = 0x0 IP_PORTRANGE_HIGH = 0x1 @@ -956,6 +975,7 @@ const ( IP_RECVFLOWID = 0x5d IP_RECVIF = 0x14 IP_RECVOPTS = 0x5 + IP_RECVORIGDSTADDR = 0x1b IP_RECVRETOPTS = 0x6 IP_RECVRSSBUCKETID = 0x5e IP_RECVTOS = 0x44 @@ -972,8 +992,12 @@ const ( IP_TOS = 0x3 IP_TTL = 0x4 IP_UNBLOCK_SOURCE = 0x49 + IP_VLAN_PCP = 0x4b ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -983,7 +1007,6 @@ const ( KERN_VERSION = 0x4 LOCAL_CONNWAIT = 0x4 LOCAL_CREDS = 0x2 - LOCAL_CREDS_PERSISTENT = 0x3 LOCAL_PEERCRED = 0x1 LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 @@ -1071,10 +1094,12 @@ const ( MNT_SUSPEND = 0x4 MNT_SYNCHRONOUS = 0x2 MNT_UNION = 0x20 + MNT_UNTRUSTED = 0x800000000 MNT_UPDATE = 0x10000 - MNT_UPDATEMASK = 0x2d8d0807e + MNT_UPDATEMASK = 0xad8d0807e MNT_USER = 0x8000 - MNT_VISFLAGMASK = 0x3fef0ffff + MNT_VERIFIED = 0x400000000 + MNT_VISFLAGMASK = 0xffef0ffff MNT_WAIT = 0x1 MSG_CMSG_CLOEXEC = 0x40000 MSG_COMPAT = 0x8000 @@ -1103,6 +1128,7 @@ const ( NFDBITS = 0x20 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 + NOTE_ABSTIME = 0x10 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 NOTE_CLOSE = 0x100 @@ -1159,6 +1185,8 @@ const ( O_NONBLOCK = 0x4 O_RDONLY = 0x0 O_RDWR = 0x2 + O_RESOLVE_BENEATH = 0x800000 + O_SEARCH = 0x40000 O_SHLOCK = 0x10 O_SYNC = 0x80 O_TRUNC = 0x400 @@ -1169,6 +1197,10 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 + PIOD_READ_D = 0x1 + PIOD_READ_I = 0x3 + PIOD_WRITE_D = 0x2 + PIOD_WRITE_I = 0x4 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1176,6 +1208,53 @@ const ( PROT_NONE = 0x0 PROT_READ = 0x1 PROT_WRITE = 0x2 + PTRACE_DEFAULT = 0x1 + PTRACE_EXEC = 0x1 + PTRACE_FORK = 0x8 + PTRACE_LWP = 0x10 + PTRACE_SCE = 0x2 + PTRACE_SCX = 0x4 + PTRACE_SYSCALL = 0x6 + PTRACE_VFORK = 0x20 + PT_ATTACH = 0xa + PT_CLEARSTEP = 0x10 + PT_CONTINUE = 0x7 + PT_DETACH = 0xb + PT_FIRSTMACH = 0x40 + PT_FOLLOW_FORK = 0x17 + PT_GETDBREGS = 0x25 + PT_GETFPREGS = 0x23 + PT_GETLWPLIST = 0xf + PT_GETNUMLWPS = 0xe + PT_GETREGS = 0x21 + PT_GETVFPREGS = 0x40 + PT_GET_EVENT_MASK = 0x19 + PT_GET_SC_ARGS = 0x1b + PT_GET_SC_RET = 0x1c + PT_IO = 0xc + PT_KILL = 0x8 + PT_LWPINFO = 0xd + PT_LWP_EVENTS = 0x18 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_RESUME = 0x13 + PT_SETDBREGS = 0x26 + PT_SETFPREGS = 0x24 + PT_SETREGS = 0x22 + PT_SETSTEP = 0x11 + PT_SETVFPREGS = 0x41 + PT_SET_EVENT_MASK = 0x1a + PT_STEP = 0x9 + PT_SUSPEND = 0x12 + PT_SYSCALL = 0x16 + PT_TO_SCE = 0x14 + PT_TO_SCX = 0x15 + PT_TRACE_ME = 0x0 + PT_VM_ENTRY = 0x29 + PT_VM_TIMESTAMP = 0x28 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + P_ZONEID = 0xc RLIMIT_AS = 0xa RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1257,7 +1336,6 @@ const ( RTV_WEIGHT = 0x100 RT_ALL_FIBS = -0x1 RT_BLACKHOLE = 0x40 - RT_CACHING_CONTEXT = 0x1 RT_DEFAULT_FIB = 0x0 RT_HAS_GW = 0x80 RT_HAS_HEADER = 0x10 @@ -1267,15 +1345,17 @@ const ( RT_LLE_CACHE = 0x100 RT_MAY_LOOP = 0x8 RT_MAY_LOOP_BIT = 0x3 - RT_NORTREF = 0x2 RT_REJECT = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 SCM_BINTIME = 0x4 SCM_CREDS = 0x3 + SCM_MONOTONIC = 0x6 + SCM_REALTIME = 0x5 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 + SCM_TIME_INFO = 0x7 SEEK_CUR = 0x1 SEEK_DATA = 0x3 SEEK_END = 0x2 @@ -1299,10 +1379,12 @@ const ( SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc044692d SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCAP = 0xc020691f SIOCGIFCONF = 0xc0086924 SIOCGIFDESCR = 0xc020692a + SIOCGIFDOWNREASON = 0xc058699a SIOCGIFDSTADDR = 0xc0206922 SIOCGIFFIB = 0xc020695c SIOCGIFFLAGS = 0xc0206911 @@ -1318,8 +1400,11 @@ const ( SIOCGIFPDSTADDR = 0xc0206948 SIOCGIFPHYS = 0xc0206935 SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFRSSHASH = 0xc0186997 + SIOCGIFRSSKEY = 0xc0946996 SIOCGIFSTATUS = 0xc331693b SIOCGIFXMEDIA = 0xc028698b + SIOCGLANPCP = 0xc0206998 SIOCGLOWAT = 0x40047303 SIOCGPGRP = 0x40047309 SIOCGPRIVATE_0 = 0xc0206950 @@ -1350,6 +1435,7 @@ const ( SIOCSIFPHYS = 0x80206936 SIOCSIFRVNET = 0xc020695b SIOCSIFVNET = 0xc020695a + SIOCSLANPCP = 0x80206999 SIOCSLOWAT = 0x80047302 SIOCSPGRP = 0x80047308 SIOCSTUNFIB = 0x8020695f @@ -1369,6 +1455,7 @@ const ( SO_BINTIME = 0x2000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1019 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1377,6 +1464,7 @@ const ( SO_LISTENINCQLEN = 0x1013 SO_LISTENQLEN = 0x1012 SO_LISTENQLIMIT = 0x1011 + SO_MAX_PACING_RATE = 0x1018 SO_NOSIGPIPE = 0x800 SO_NO_DDP = 0x8000 SO_NO_OFFLOAD = 0x4000 @@ -1387,13 +1475,22 @@ const ( SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x20000 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 + SO_REUSEPORT_LB = 0x10000 SO_SETFIB = 0x1014 SO_SNDBUF = 0x1001 SO_SNDLOWAT = 0x1003 SO_SNDTIMEO = 0x1005 SO_TIMESTAMP = 0x400 + SO_TS_BINTIME = 0x1 + SO_TS_CLOCK = 0x1017 + SO_TS_CLOCK_MAX = 0x3 + SO_TS_DEFAULT = 0x0 + SO_TS_MONOTONIC = 0x3 + SO_TS_REALTIME = 0x2 + SO_TS_REALTIME_MICRO = 0x0 SO_TYPE = 0x1008 SO_USELOOPBACK = 0x40 SO_USER_COOKIE = 0x1015 @@ -1437,10 +1534,69 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_FAST_OPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_PAD = 0x0 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_WINDOW = 0x3 + TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_ALGORITHM = 0x43b + TCP_BBR_DRAIN_INC_EXTRA = 0x43c + TCP_BBR_DRAIN_PG = 0x42e + TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_EXTRA_STATE = 0x453 + TCP_BBR_FLOOR_MIN_TSO = 0x454 + TCP_BBR_HDWR_PACE = 0x451 + TCP_BBR_HOLD_TARGET = 0x436 + TCP_BBR_IWINTSO = 0x42b + TCP_BBR_LOWGAIN_FD = 0x436 + TCP_BBR_LOWGAIN_HALF = 0x435 + TCP_BBR_LOWGAIN_THRESH = 0x434 + TCP_BBR_MAX_RTO = 0x439 + TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_MIN_TOPACEOUT = 0x455 + TCP_BBR_ONE_RETRAN = 0x431 + TCP_BBR_PACE_CROSS = 0x442 + TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_OH = 0x435 + TCP_BBR_PACE_PER_SEC = 0x43e + TCP_BBR_PACE_SEG_MAX = 0x440 + TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_POLICER_DETECT = 0x457 + TCP_BBR_PROBE_RTT_GAIN = 0x44d + TCP_BBR_PROBE_RTT_INT = 0x430 + TCP_BBR_PROBE_RTT_LEN = 0x44e + TCP_BBR_RACK_RTT_USE = 0x44a + TCP_BBR_RECFORCE = 0x42c + TCP_BBR_REC_OVER_HPTS = 0x43a + TCP_BBR_RETRAN_WTSO = 0x44b + TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_SEND_IWND_IN_TSO = 0x44f + TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d + TCP_BBR_STARTUP_LOSS_EXIT = 0x432 + TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_TMR_PACE_OH = 0x448 + TCP_BBR_TSLIMITS = 0x434 + TCP_BBR_TSTMP_RAISES = 0x456 + TCP_BBR_UNLIMITED = 0x43b + TCP_BBR_USEDEL_RATE = 0x437 + TCP_BBR_USE_LOWGAIN = 0x433 + TCP_BBR_USE_RACK_CHEAT = 0x450 + TCP_BBR_UTTER_MAX_TSO = 0x452 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 + TCP_DATA_AFTER_CLOSE = 0x44c + TCP_DELACK = 0x48 TCP_FASTOPEN = 0x401 + TCP_FASTOPEN_MAX_COOKIE_LEN = 0x10 + TCP_FASTOPEN_MIN_COOKIE_LEN = 0x4 + TCP_FASTOPEN_PSK_LEN = 0x10 TCP_FUNCTION_BLK = 0x2000 TCP_FUNCTION_NAME_LEN_MAX = 0x20 TCP_INFO = 0x20 @@ -1448,6 +1604,12 @@ const ( TCP_KEEPIDLE = 0x100 TCP_KEEPINIT = 0x80 TCP_KEEPINTVL = 0x200 + TCP_LOG = 0x22 + TCP_LOGBUF = 0x23 + TCP_LOGDUMP = 0x25 + TCP_LOGDUMPID = 0x26 + TCP_LOGID = 0x24 + TCP_LOG_ID_LEN = 0x40 TCP_MAXBURST = 0x4 TCP_MAXHLEN = 0x3c TCP_MAXOLEN = 0x28 @@ -1463,8 +1625,30 @@ const ( TCP_NOPUSH = 0x4 TCP_PCAP_IN = 0x1000 TCP_PCAP_OUT = 0x800 + TCP_RACK_EARLY_RECOV = 0x423 + TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_GP_INCREASE = 0x446 + TCP_RACK_IDLE_REDUCE_HIGH = 0x444 + TCP_RACK_MIN_PACE = 0x445 + TCP_RACK_MIN_PACE_SEG = 0x446 + TCP_RACK_MIN_TO = 0x422 + TCP_RACK_PACE_ALWAYS = 0x41f + TCP_RACK_PACE_MAX_SEG = 0x41e + TCP_RACK_PACE_REDUCE = 0x41d + TCP_RACK_PKT_DELAY = 0x428 + TCP_RACK_PROP = 0x41b + TCP_RACK_PROP_RATE = 0x420 + TCP_RACK_PRR_SENDALOT = 0x421 + TCP_RACK_REORD_FADE = 0x426 + TCP_RACK_REORD_THRESH = 0x425 + TCP_RACK_TLP_INC_VAR = 0x429 + TCP_RACK_TLP_REDUCE = 0x41c + TCP_RACK_TLP_THRESH = 0x427 + TCP_RACK_TLP_USE = 0x447 TCP_VENDOR = 0x80000000 TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 TIOCCONS = 0x80047462 @@ -1528,6 +1712,8 @@ const ( TIOCTIMESTAMP = 0x40107459 TIOCUCNTL = 0x80047466 TOSTOP = 0x400000 + UTIME_NOW = -0x1 + UTIME_OMIT = -0x2 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 @@ -1592,12 +1778,13 @@ const ( EIDRM = syscall.Errno(0x52) EILSEQ = syscall.Errno(0x56) EINPROGRESS = syscall.Errno(0x24) + EINTEGRITY = syscall.Errno(0x61) EINTR = syscall.Errno(0x4) EINVAL = syscall.Errno(0x16) EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x60) + ELAST = syscall.Errno(0x61) ELOOP = syscall.Errno(0x3e) EMFILE = syscall.Errno(0x18) EMLINK = syscall.Errno(0x1f) @@ -1740,7 +1927,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EAGAIN", "resource temporarily unavailable"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1802,6 +1989,7 @@ var errorList = [...]struct { {94, "ECAPMODE", "not permitted in capability mode"}, {95, "ENOTRECOVERABLE", "state not recoverable"}, {96, "EOWNERDEAD", "previous owner died"}, + {97, "EINTEGRITY", "integrity check failed"}, } // Signal table diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index 4c8377114..c557ac2db 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -151,6 +151,7 @@ const ( BIOCSETF = 0x80104267 BIOCSETFNR = 0x80104282 BIOCSETIF = 0x8020426c + BIOCSETVLANPCP = 0x80044285 BIOCSETWF = 0x8010427b BIOCSETZBUF = 0x80184281 BIOCSHDRCMPLT = 0x80044275 @@ -447,7 +448,7 @@ const ( DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 DLT_INFINIBAND = 0xf7 DLT_IPFILTER = 0x74 - DLT_IPMB = 0xc7 + DLT_IPMB_KONTRON = 0xc7 DLT_IPMB_LINUX = 0xd1 DLT_IPMI_HPM_2 = 0x104 DLT_IPNET = 0xe2 @@ -487,10 +488,11 @@ const ( DLT_LINUX_LAPD = 0xb1 DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 + DLT_LINUX_SLL2 = 0x114 DLT_LOOP = 0x6c DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x113 + DLT_MATCHING_MAX = 0x114 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -734,6 +736,7 @@ const ( IPPROTO_CMTP = 0x26 IPPROTO_CPHB = 0x49 IPPROTO_CPNX = 0x48 + IPPROTO_DCCP = 0x21 IPPROTO_DDP = 0x25 IPPROTO_DGP = 0x56 IPPROTO_DIVERT = 0x102 @@ -814,7 +817,6 @@ const ( IPPROTO_SCTP = 0x84 IPPROTO_SDRP = 0x2a IPPROTO_SEND = 0x103 - IPPROTO_SEP = 0x21 IPPROTO_SHIM6 = 0x8c IPPROTO_SKIP = 0x39 IPPROTO_SPACER = 0x7fff @@ -911,6 +913,7 @@ const ( IPV6_V6ONLY = 0x1b IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 + IPV6_VLAN_PCP = 0x4b IP_ADD_MEMBERSHIP = 0xc IP_ADD_SOURCE_MEMBERSHIP = 0x46 IP_BINDANY = 0x18 @@ -989,8 +992,12 @@ const ( IP_TOS = 0x3 IP_TTL = 0x4 IP_UNBLOCK_SOURCE = 0x49 + IP_VLAN_PCP = 0x4b ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -1000,7 +1007,6 @@ const ( KERN_VERSION = 0x4 LOCAL_CONNWAIT = 0x4 LOCAL_CREDS = 0x2 - LOCAL_CREDS_PERSISTENT = 0x3 LOCAL_PEERCRED = 0x1 LOCAL_VENDOR = 0x80000000 LOCK_EX = 0x2 @@ -1180,6 +1186,8 @@ const ( O_NONBLOCK = 0x4 O_RDONLY = 0x0 O_RDWR = 0x2 + O_RESOLVE_BENEATH = 0x800000 + O_SEARCH = 0x40000 O_SHLOCK = 0x10 O_SYNC = 0x80 O_TRUNC = 0x400 @@ -1190,6 +1198,10 @@ const ( PARMRK = 0x8 PARODD = 0x2000 PENDIN = 0x20000000 + PIOD_READ_D = 0x1 + PIOD_READ_I = 0x3 + PIOD_WRITE_D = 0x2 + PIOD_WRITE_I = 0x4 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 @@ -1197,6 +1209,51 @@ const ( PROT_NONE = 0x0 PROT_READ = 0x1 PROT_WRITE = 0x2 + PTRACE_DEFAULT = 0x1 + PTRACE_EXEC = 0x1 + PTRACE_FORK = 0x8 + PTRACE_LWP = 0x10 + PTRACE_SCE = 0x2 + PTRACE_SCX = 0x4 + PTRACE_SYSCALL = 0x6 + PTRACE_VFORK = 0x20 + PT_ATTACH = 0xa + PT_CLEARSTEP = 0x10 + PT_CONTINUE = 0x7 + PT_DETACH = 0xb + PT_FIRSTMACH = 0x40 + PT_FOLLOW_FORK = 0x17 + PT_GETDBREGS = 0x25 + PT_GETFPREGS = 0x23 + PT_GETLWPLIST = 0xf + PT_GETNUMLWPS = 0xe + PT_GETREGS = 0x21 + PT_GET_EVENT_MASK = 0x19 + PT_GET_SC_ARGS = 0x1b + PT_GET_SC_RET = 0x1c + PT_IO = 0xc + PT_KILL = 0x8 + PT_LWPINFO = 0xd + PT_LWP_EVENTS = 0x18 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_RESUME = 0x13 + PT_SETDBREGS = 0x26 + PT_SETFPREGS = 0x24 + PT_SETREGS = 0x22 + PT_SETSTEP = 0x11 + PT_SET_EVENT_MASK = 0x1a + PT_STEP = 0x9 + PT_SUSPEND = 0x12 + PT_SYSCALL = 0x16 + PT_TO_SCE = 0x14 + PT_TO_SCX = 0x15 + PT_TRACE_ME = 0x0 + PT_VM_ENTRY = 0x29 + PT_VM_TIMESTAMP = 0x28 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + P_ZONEID = 0xc RLIMIT_AS = 0xa RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1321,10 +1378,12 @@ const ( SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc044692d SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCAP = 0xc020691f SIOCGIFCONF = 0xc0106924 SIOCGIFDESCR = 0xc020692a + SIOCGIFDOWNREASON = 0xc058699a SIOCGIFDSTADDR = 0xc0206922 SIOCGIFFIB = 0xc020695c SIOCGIFFLAGS = 0xc0206911 @@ -1415,6 +1474,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x20000 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 SO_REUSEPORT_LB = 0x10000 @@ -1473,22 +1533,40 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_FAST_OPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_PAD = 0x0 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_WINDOW = 0x3 TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_ALGORITHM = 0x43b TCP_BBR_DRAIN_INC_EXTRA = 0x43c TCP_BBR_DRAIN_PG = 0x42e TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_EXTRA_STATE = 0x453 + TCP_BBR_FLOOR_MIN_TSO = 0x454 + TCP_BBR_HDWR_PACE = 0x451 + TCP_BBR_HOLD_TARGET = 0x436 TCP_BBR_IWINTSO = 0x42b TCP_BBR_LOWGAIN_FD = 0x436 TCP_BBR_LOWGAIN_HALF = 0x435 TCP_BBR_LOWGAIN_THRESH = 0x434 TCP_BBR_MAX_RTO = 0x439 TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_MIN_TOPACEOUT = 0x455 TCP_BBR_ONE_RETRAN = 0x431 TCP_BBR_PACE_CROSS = 0x442 TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_OH = 0x435 TCP_BBR_PACE_PER_SEC = 0x43e TCP_BBR_PACE_SEG_MAX = 0x440 TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_POLICER_DETECT = 0x457 TCP_BBR_PROBE_RTT_GAIN = 0x44d TCP_BBR_PROBE_RTT_INT = 0x430 TCP_BBR_PROBE_RTT_LEN = 0x44e @@ -1497,12 +1575,18 @@ const ( TCP_BBR_REC_OVER_HPTS = 0x43a TCP_BBR_RETRAN_WTSO = 0x44b TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_SEND_IWND_IN_TSO = 0x44f TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d TCP_BBR_STARTUP_LOSS_EXIT = 0x432 TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_TMR_PACE_OH = 0x448 + TCP_BBR_TSLIMITS = 0x434 + TCP_BBR_TSTMP_RAISES = 0x456 TCP_BBR_UNLIMITED = 0x43b TCP_BBR_USEDEL_RATE = 0x437 TCP_BBR_USE_LOWGAIN = 0x433 + TCP_BBR_USE_RACK_CHEAT = 0x450 + TCP_BBR_UTTER_MAX_TSO = 0x452 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 @@ -1542,6 +1626,7 @@ const ( TCP_PCAP_OUT = 0x800 TCP_RACK_EARLY_RECOV = 0x423 TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_GP_INCREASE = 0x446 TCP_RACK_IDLE_REDUCE_HIGH = 0x444 TCP_RACK_MIN_PACE = 0x445 TCP_RACK_MIN_PACE_SEG = 0x446 @@ -1555,7 +1640,6 @@ const ( TCP_RACK_PRR_SENDALOT = 0x421 TCP_RACK_REORD_FADE = 0x426 TCP_RACK_REORD_THRESH = 0x425 - TCP_RACK_SESS_CWV = 0x42a TCP_RACK_TLP_INC_VAR = 0x429 TCP_RACK_TLP_REDUCE = 0x41c TCP_RACK_TLP_THRESH = 0x427 @@ -1694,12 +1778,13 @@ const ( EIDRM = syscall.Errno(0x52) EILSEQ = syscall.Errno(0x56) EINPROGRESS = syscall.Errno(0x24) + EINTEGRITY = syscall.Errno(0x61) EINTR = syscall.Errno(0x4) EINVAL = syscall.Errno(0x16) EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x60) + ELAST = syscall.Errno(0x61) ELOOP = syscall.Errno(0x3e) EMFILE = syscall.Errno(0x18) EMLINK = syscall.Errno(0x1f) @@ -1842,7 +1927,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EAGAIN", "resource temporarily unavailable"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1904,6 +1989,7 @@ var errorList = [...]struct { {94, "ECAPMODE", "not permitted in capability mode"}, {95, "ENOTRECOVERABLE", "state not recoverable"}, {96, "EOWNERDEAD", "previous owner died"}, + {97, "EINTEGRITY", "integrity check failed"}, } // Signal table diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go new file mode 100644 index 000000000..341b4d962 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go @@ -0,0 +1,2148 @@ +// mkerrors.sh -m64 +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build riscv64 && freebsd +// +build riscv64,freebsd + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_ARP = 0x23 + AF_ATM = 0x1e + AF_BLUETOOTH = 0x24 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_HYLINK = 0xf + AF_HYPERV = 0x2b + AF_IEEE80211 = 0x25 + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x1c + AF_INET6_SDP = 0x2a + AF_INET_SDP = 0x28 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x2b + AF_NATM = 0x1d + AF_NETBIOS = 0x6 + AF_NETGRAPH = 0x20 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SCLUSTER = 0x22 + AF_SIP = 0x18 + AF_SLOW = 0x21 + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VENDOR00 = 0x27 + AF_VENDOR01 = 0x29 + AF_VENDOR03 = 0x2d + AF_VENDOR04 = 0x2f + AF_VENDOR05 = 0x31 + AF_VENDOR06 = 0x33 + AF_VENDOR07 = 0x35 + AF_VENDOR08 = 0x37 + AF_VENDOR09 = 0x39 + AF_VENDOR10 = 0x3b + AF_VENDOR11 = 0x3d + AF_VENDOR12 = 0x3f + AF_VENDOR13 = 0x41 + AF_VENDOR14 = 0x43 + AF_VENDOR15 = 0x45 + AF_VENDOR16 = 0x47 + AF_VENDOR17 = 0x49 + AF_VENDOR18 = 0x4b + AF_VENDOR19 = 0x4d + AF_VENDOR20 = 0x4f + AF_VENDOR21 = 0x51 + AF_VENDOR22 = 0x53 + AF_VENDOR23 = 0x55 + AF_VENDOR24 = 0x57 + AF_VENDOR25 = 0x59 + AF_VENDOR26 = 0x5b + AF_VENDOR27 = 0x5d + AF_VENDOR28 = 0x5f + AF_VENDOR29 = 0x61 + AF_VENDOR30 = 0x63 + AF_VENDOR31 = 0x65 + AF_VENDOR32 = 0x67 + AF_VENDOR33 = 0x69 + AF_VENDOR34 = 0x6b + AF_VENDOR35 = 0x6d + AF_VENDOR36 = 0x6f + AF_VENDOR37 = 0x71 + AF_VENDOR38 = 0x73 + AF_VENDOR39 = 0x75 + AF_VENDOR40 = 0x77 + AF_VENDOR41 = 0x79 + AF_VENDOR42 = 0x7b + AF_VENDOR43 = 0x7d + AF_VENDOR44 = 0x7f + AF_VENDOR45 = 0x81 + AF_VENDOR46 = 0x83 + AF_VENDOR47 = 0x85 + ALTWERASE = 0x200 + B0 = 0x0 + B1000000 = 0xf4240 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1500000 = 0x16e360 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B2000000 = 0x1e8480 + B230400 = 0x38400 + B2400 = 0x960 + B2500000 = 0x2625a0 + B28800 = 0x7080 + B300 = 0x12c + B3000000 = 0x2dc6c0 + B3500000 = 0x3567e0 + B38400 = 0x9600 + B4000000 = 0x3d0900 + B460800 = 0x70800 + B4800 = 0x12c0 + B50 = 0x32 + B500000 = 0x7a120 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B921600 = 0xe1000 + B9600 = 0x2580 + BIOCFEEDBACK = 0x8004427c + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRECTION = 0x40044276 + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc0104279 + BIOCGETBUFMODE = 0x4004427d + BIOCGETIF = 0x4020426b + BIOCGETZMAX = 0x4008427f + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044272 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSEESENT = 0x40044276 + BIOCGSTATS = 0x4008426f + BIOCGTSTAMP = 0x40044283 + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x2000427a + BIOCPROMISC = 0x20004269 + BIOCROTZBUF = 0x40184280 + BIOCSBLEN = 0xc0044266 + BIOCSDIRECTION = 0x80044277 + BIOCSDLT = 0x80044278 + BIOCSETBUFMODE = 0x8004427e + BIOCSETF = 0x80104267 + BIOCSETFNR = 0x80104282 + BIOCSETIF = 0x8020426c + BIOCSETVLANPCP = 0x80044285 + BIOCSETWF = 0x8010427b + BIOCSETZBUF = 0x80184281 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044273 + BIOCSRTIMEOUT = 0x8010426d + BIOCSSEESENT = 0x80044277 + BIOCSTSTAMP = 0x80044284 + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x8 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_BUFMODE_BUFFER = 0x1 + BPF_BUFMODE_ZBUF = 0x2 + BPF_DIV = 0x30 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x80000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_T_BINTIME = 0x2 + BPF_T_BINTIME_FAST = 0x102 + BPF_T_BINTIME_MONOTONIC = 0x202 + BPF_T_BINTIME_MONOTONIC_FAST = 0x302 + BPF_T_FAST = 0x100 + BPF_T_FLAG_MASK = 0x300 + BPF_T_FORMAT_MASK = 0x3 + BPF_T_MICROTIME = 0x0 + BPF_T_MICROTIME_FAST = 0x100 + BPF_T_MICROTIME_MONOTONIC = 0x200 + BPF_T_MICROTIME_MONOTONIC_FAST = 0x300 + BPF_T_MONOTONIC = 0x200 + BPF_T_MONOTONIC_FAST = 0x300 + BPF_T_NANOTIME = 0x1 + BPF_T_NANOTIME_FAST = 0x101 + BPF_T_NANOTIME_MONOTONIC = 0x201 + BPF_T_NANOTIME_MONOTONIC_FAST = 0x301 + BPF_T_NONE = 0x3 + BPF_T_NORMAL = 0x0 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XOR = 0xa0 + BRKINT = 0x2 + CAP_ACCEPT = 0x200000020000000 + CAP_ACL_CHECK = 0x400000000010000 + CAP_ACL_DELETE = 0x400000000020000 + CAP_ACL_GET = 0x400000000040000 + CAP_ACL_SET = 0x400000000080000 + CAP_ALL0 = 0x20007ffffffffff + CAP_ALL1 = 0x4000000001fffff + CAP_BIND = 0x200000040000000 + CAP_BINDAT = 0x200008000000400 + CAP_CHFLAGSAT = 0x200000000001400 + CAP_CONNECT = 0x200000080000000 + CAP_CONNECTAT = 0x200010000000400 + CAP_CREATE = 0x200000000000040 + CAP_EVENT = 0x400000000000020 + CAP_EXTATTR_DELETE = 0x400000000001000 + CAP_EXTATTR_GET = 0x400000000002000 + CAP_EXTATTR_LIST = 0x400000000004000 + CAP_EXTATTR_SET = 0x400000000008000 + CAP_FCHDIR = 0x200000000000800 + CAP_FCHFLAGS = 0x200000000001000 + CAP_FCHMOD = 0x200000000002000 + CAP_FCHMODAT = 0x200000000002400 + CAP_FCHOWN = 0x200000000004000 + CAP_FCHOWNAT = 0x200000000004400 + CAP_FCNTL = 0x200000000008000 + CAP_FCNTL_ALL = 0x78 + CAP_FCNTL_GETFL = 0x8 + CAP_FCNTL_GETOWN = 0x20 + CAP_FCNTL_SETFL = 0x10 + CAP_FCNTL_SETOWN = 0x40 + CAP_FEXECVE = 0x200000000000080 + CAP_FLOCK = 0x200000000010000 + CAP_FPATHCONF = 0x200000000020000 + CAP_FSCK = 0x200000000040000 + CAP_FSTAT = 0x200000000080000 + CAP_FSTATAT = 0x200000000080400 + CAP_FSTATFS = 0x200000000100000 + CAP_FSYNC = 0x200000000000100 + CAP_FTRUNCATE = 0x200000000000200 + CAP_FUTIMES = 0x200000000200000 + CAP_FUTIMESAT = 0x200000000200400 + CAP_GETPEERNAME = 0x200000100000000 + CAP_GETSOCKNAME = 0x200000200000000 + CAP_GETSOCKOPT = 0x200000400000000 + CAP_IOCTL = 0x400000000000080 + CAP_IOCTLS_ALL = 0x7fffffffffffffff + CAP_KQUEUE = 0x400000000100040 + CAP_KQUEUE_CHANGE = 0x400000000100000 + CAP_KQUEUE_EVENT = 0x400000000000040 + CAP_LINKAT_SOURCE = 0x200020000000400 + CAP_LINKAT_TARGET = 0x200000000400400 + CAP_LISTEN = 0x200000800000000 + CAP_LOOKUP = 0x200000000000400 + CAP_MAC_GET = 0x400000000000001 + CAP_MAC_SET = 0x400000000000002 + CAP_MKDIRAT = 0x200000000800400 + CAP_MKFIFOAT = 0x200000001000400 + CAP_MKNODAT = 0x200000002000400 + CAP_MMAP = 0x200000000000010 + CAP_MMAP_R = 0x20000000000001d + CAP_MMAP_RW = 0x20000000000001f + CAP_MMAP_RWX = 0x20000000000003f + CAP_MMAP_RX = 0x20000000000003d + CAP_MMAP_W = 0x20000000000001e + CAP_MMAP_WX = 0x20000000000003e + CAP_MMAP_X = 0x20000000000003c + CAP_PDGETPID = 0x400000000000200 + CAP_PDKILL = 0x400000000000800 + CAP_PDWAIT = 0x400000000000400 + CAP_PEELOFF = 0x200001000000000 + CAP_POLL_EVENT = 0x400000000000020 + CAP_PREAD = 0x20000000000000d + CAP_PWRITE = 0x20000000000000e + CAP_READ = 0x200000000000001 + CAP_RECV = 0x200000000000001 + CAP_RENAMEAT_SOURCE = 0x200000004000400 + CAP_RENAMEAT_TARGET = 0x200040000000400 + CAP_RIGHTS_VERSION = 0x0 + CAP_RIGHTS_VERSION_00 = 0x0 + CAP_SEEK = 0x20000000000000c + CAP_SEEK_TELL = 0x200000000000004 + CAP_SEM_GETVALUE = 0x400000000000004 + CAP_SEM_POST = 0x400000000000008 + CAP_SEM_WAIT = 0x400000000000010 + CAP_SEND = 0x200000000000002 + CAP_SETSOCKOPT = 0x200002000000000 + CAP_SHUTDOWN = 0x200004000000000 + CAP_SOCK_CLIENT = 0x200007780000003 + CAP_SOCK_SERVER = 0x200007f60000003 + CAP_SYMLINKAT = 0x200000008000400 + CAP_TTYHOOK = 0x400000000000100 + CAP_UNLINKAT = 0x200000010000400 + CAP_UNUSED0_44 = 0x200080000000000 + CAP_UNUSED0_57 = 0x300000000000000 + CAP_UNUSED1_22 = 0x400000000200000 + CAP_UNUSED1_57 = 0x500000000000000 + CAP_WRITE = 0x200000000000002 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x5 + CLOCK_MONOTONIC = 0x4 + CLOCK_MONOTONIC_COARSE = 0xc + CLOCK_MONOTONIC_FAST = 0xc + CLOCK_MONOTONIC_PRECISE = 0xb + CLOCK_PROCESS_CPUTIME_ID = 0xf + CLOCK_PROF = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_COARSE = 0xa + CLOCK_REALTIME_FAST = 0xa + CLOCK_REALTIME_PRECISE = 0x9 + CLOCK_SECOND = 0xd + CLOCK_THREAD_CPUTIME_ID = 0xe + CLOCK_UPTIME = 0x5 + CLOCK_UPTIME_FAST = 0x8 + CLOCK_UPTIME_PRECISE = 0x7 + CLOCK_VIRTUAL = 0x1 + CPUSTATES = 0x5 + CP_IDLE = 0x4 + CP_INTR = 0x3 + CP_NICE = 0x1 + CP_SYS = 0x2 + CP_USER = 0x0 + CREAD = 0x800 + CRTSCTS = 0x30000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0x14 + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0x18 + CTL_NET = 0x4 + DIOCGATTR = 0xc148648e + DIOCGDELETE = 0x80106488 + DIOCGFLUSH = 0x20006487 + DIOCGFWHEADS = 0x40046483 + DIOCGFWSECTORS = 0x40046482 + DIOCGIDENT = 0x41006489 + DIOCGKERNELDUMP = 0xc0986492 + DIOCGMEDIASIZE = 0x40086481 + DIOCGPHYSPATH = 0x4400648d + DIOCGPROVIDERNAME = 0x4400648a + DIOCGSECTORSIZE = 0x40046480 + DIOCGSTRIPEOFFSET = 0x4008648c + DIOCGSTRIPESIZE = 0x4008648b + DIOCSKERNELDUMP = 0x80986491 + DIOCSKERNELDUMP_FREEBSD11 = 0x80046485 + DIOCSKERNELDUMP_FREEBSD12 = 0x80506490 + DIOCZONECMD = 0xc080648f + DLT_A429 = 0xb8 + DLT_A653_ICM = 0xb9 + DLT_AIRONET_HEADER = 0x78 + DLT_AOS = 0xde + DLT_APPLE_IP_OVER_IEEE1394 = 0x8a + DLT_ARCNET = 0x7 + DLT_ARCNET_LINUX = 0x81 + DLT_ATM_CLIP = 0x13 + DLT_ATM_RFC1483 = 0xb + DLT_AURORA = 0x7e + DLT_AX25 = 0x3 + DLT_AX25_KISS = 0xca + DLT_BACNET_MS_TP = 0xa5 + DLT_BLUETOOTH_BREDR_BB = 0xff + DLT_BLUETOOTH_HCI_H4 = 0xbb + DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9 + DLT_BLUETOOTH_LE_LL = 0xfb + DLT_BLUETOOTH_LE_LL_WITH_PHDR = 0x100 + DLT_BLUETOOTH_LINUX_MONITOR = 0xfe + DLT_CAN20B = 0xbe + DLT_CAN_SOCKETCAN = 0xe3 + DLT_CHAOS = 0x5 + DLT_CHDLC = 0x68 + DLT_CISCO_IOS = 0x76 + DLT_CLASS_NETBSD_RAWAF = 0x2240000 + DLT_C_HDLC = 0x68 + DLT_C_HDLC_WITH_DIR = 0xcd + DLT_DBUS = 0xe7 + DLT_DECT = 0xdd + DLT_DISPLAYPORT_AUX = 0x113 + DLT_DOCSIS = 0x8f + DLT_DOCSIS31_XRA31 = 0x111 + DLT_DVB_CI = 0xeb + DLT_ECONET = 0x73 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0x6d + DLT_EPON = 0x103 + DLT_ERF = 0xc5 + DLT_ERF_ETH = 0xaf + DLT_ERF_POS = 0xb0 + DLT_ETHERNET_MPACKET = 0x112 + DLT_FC_2 = 0xe0 + DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 + DLT_FDDI = 0xa + DLT_FLEXRAY = 0xd2 + DLT_FRELAY = 0x6b + DLT_FRELAY_WITH_DIR = 0xce + DLT_GCOM_SERIAL = 0xad + DLT_GCOM_T1E1 = 0xac + DLT_GPF_F = 0xab + DLT_GPF_T = 0xaa + DLT_GPRS_LLC = 0xa9 + DLT_GSMTAP_ABIS = 0xda + DLT_GSMTAP_UM = 0xd9 + DLT_IBM_SN = 0x92 + DLT_IBM_SP = 0x91 + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_IEEE802_11_RADIO_AVS = 0xa3 + DLT_IEEE802_15_4 = 0xc3 + DLT_IEEE802_15_4_LINUX = 0xbf + DLT_IEEE802_15_4_NOFCS = 0xe6 + DLT_IEEE802_15_4_NONASK_PHY = 0xd7 + DLT_IEEE802_16_MAC_CPS = 0xbc + DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1 + DLT_INFINIBAND = 0xf7 + DLT_IPFILTER = 0x74 + DLT_IPMB_KONTRON = 0xc7 + DLT_IPMB_LINUX = 0xd1 + DLT_IPMI_HPM_2 = 0x104 + DLT_IPNET = 0xe2 + DLT_IPOIB = 0xf2 + DLT_IPV4 = 0xe4 + DLT_IPV6 = 0xe5 + DLT_IP_OVER_FC = 0x7a + DLT_ISO_14443 = 0x108 + DLT_JUNIPER_ATM1 = 0x89 + DLT_JUNIPER_ATM2 = 0x87 + DLT_JUNIPER_ATM_CEMIC = 0xee + DLT_JUNIPER_CHDLC = 0xb5 + DLT_JUNIPER_ES = 0x84 + DLT_JUNIPER_ETHER = 0xb2 + DLT_JUNIPER_FIBRECHANNEL = 0xea + DLT_JUNIPER_FRELAY = 0xb4 + DLT_JUNIPER_GGSN = 0x85 + DLT_JUNIPER_ISM = 0xc2 + DLT_JUNIPER_MFR = 0x86 + DLT_JUNIPER_MLFR = 0x83 + DLT_JUNIPER_MLPPP = 0x82 + DLT_JUNIPER_MONITOR = 0xa4 + DLT_JUNIPER_PIC_PEER = 0xae + DLT_JUNIPER_PPP = 0xb3 + DLT_JUNIPER_PPPOE = 0xa7 + DLT_JUNIPER_PPPOE_ATM = 0xa8 + DLT_JUNIPER_SERVICES = 0x88 + DLT_JUNIPER_SRX_E2E = 0xe9 + DLT_JUNIPER_ST = 0xc8 + DLT_JUNIPER_VP = 0xb7 + DLT_JUNIPER_VS = 0xe8 + DLT_LAPB_WITH_DIR = 0xcf + DLT_LAPD = 0xcb + DLT_LIN = 0xd4 + DLT_LINUX_EVDEV = 0xd8 + DLT_LINUX_IRDA = 0x90 + DLT_LINUX_LAPD = 0xb1 + DLT_LINUX_PPP_WITHDIRECTION = 0xa6 + DLT_LINUX_SLL = 0x71 + DLT_LINUX_SLL2 = 0x114 + DLT_LOOP = 0x6c + DLT_LORATAP = 0x10e + DLT_LTALK = 0x72 + DLT_MATCHING_MAX = 0x114 + DLT_MATCHING_MIN = 0x68 + DLT_MFR = 0xb6 + DLT_MOST = 0xd3 + DLT_MPEG_2_TS = 0xf3 + DLT_MPLS = 0xdb + DLT_MTP2 = 0x8c + DLT_MTP2_WITH_PHDR = 0x8b + DLT_MTP3 = 0x8d + DLT_MUX27010 = 0xec + DLT_NETANALYZER = 0xf0 + DLT_NETANALYZER_TRANSPARENT = 0xf1 + DLT_NETLINK = 0xfd + DLT_NFC_LLCP = 0xf5 + DLT_NFLOG = 0xef + DLT_NG40 = 0xf4 + DLT_NORDIC_BLE = 0x110 + DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b + DLT_PCI_EXP = 0x7d + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x79 + DLT_PKTAP = 0x102 + DLT_PPI = 0xc0 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0xe + DLT_PPP_ETHER = 0x33 + DLT_PPP_PPPD = 0xa6 + DLT_PPP_SERIAL = 0x32 + DLT_PPP_WITH_DIR = 0xcc + DLT_PPP_WITH_DIRECTION = 0xa6 + DLT_PRISM_HEADER = 0x77 + DLT_PROFIBUS_DL = 0x101 + DLT_PRONET = 0x4 + DLT_RAIF1 = 0xc6 + DLT_RAW = 0xc + DLT_RDS = 0x109 + DLT_REDBACK_SMARTEDGE = 0x20 + DLT_RIO = 0x7c + DLT_RTAC_SERIAL = 0xfa + DLT_SCCP = 0x8e + DLT_SCTP = 0xf8 + DLT_SDLC = 0x10c + DLT_SITA = 0xc4 + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xd + DLT_STANAG_5066_D_PDU = 0xed + DLT_SUNATM = 0x7b + DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TI_LLN_SNIFFER = 0x10d + DLT_TZSP = 0x80 + DLT_USB = 0xba + DLT_USBPCAP = 0xf9 + DLT_USB_DARWIN = 0x10a + DLT_USB_FREEBSD = 0xba + DLT_USB_LINUX = 0xbd + DLT_USB_LINUX_MMAPPED = 0xdc + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DLT_VSOCK = 0x10f + DLT_WATTSTOPPER_DLM = 0x107 + DLT_WIHART = 0xdf + DLT_WIRESHARK_UPPER_PDU = 0xfc + DLT_X2E_SERIAL = 0xd5 + DLT_X2E_XORAYA = 0xd6 + DLT_ZWAVE_R1_R2 = 0x105 + DLT_ZWAVE_R3 = 0x106 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EHE_DEAD_PRIORITY = -0x1 + EVFILT_AIO = -0x3 + EVFILT_EMPTY = -0xd + EVFILT_FS = -0x9 + EVFILT_LIO = -0xa + EVFILT_PROC = -0x5 + EVFILT_PROCDESC = -0x8 + EVFILT_READ = -0x1 + EVFILT_SENDFILE = -0xc + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0xd + EVFILT_TIMER = -0x7 + EVFILT_USER = -0xb + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EVNAMEMAP_NAME_SIZE = 0x40 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_DROP = 0x1000 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_FLAG2 = 0x4000 + EV_FORCEONESHOT = 0x100 + EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf000 + EXTA = 0x4b00 + EXTATTR_MAXNAMELEN = 0xff + EXTATTR_NAMESPACE_EMPTY = 0x0 + EXTATTR_NAMESPACE_SYSTEM = 0x2 + EXTATTR_NAMESPACE_USER = 0x1 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_NONE = -0xc8 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_ADD_SEALS = 0x13 + F_CANCEL = 0x5 + F_DUP2FD = 0xa + F_DUP2FD_CLOEXEC = 0x12 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x11 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0xb + F_GETOWN = 0x5 + F_GET_SEALS = 0x14 + F_ISUNIONSTACK = 0x15 + F_KINFO = 0x16 + F_OGETLK = 0x7 + F_OK = 0x0 + F_OSETLK = 0x8 + F_OSETLKW = 0x9 + F_RDAHEAD = 0x10 + F_RDLCK = 0x1 + F_READAHEAD = 0xf + F_SEAL_GROW = 0x4 + F_SEAL_SEAL = 0x1 + F_SEAL_SHRINK = 0x2 + F_SEAL_WRITE = 0x8 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0xc + F_SETLKW = 0xd + F_SETLK_REMOTE = 0xe + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_UNLCKSYS = 0x4 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFCAP_WOL_MAGIC = 0x2000 + IFF_ALLMULTI = 0x200 + IFF_ALTPHYS = 0x4000 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x218f72 + IFF_CANTCONFIG = 0x10000 + IFF_DEBUG = 0x4 + IFF_DRV_OACTIVE = 0x400 + IFF_DRV_RUNNING = 0x40 + IFF_DYING = 0x200000 + IFF_KNOWSEPOCH = 0x20 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MONITOR = 0x40000 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_NOGROUP = 0x800000 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PPROMISC = 0x20000 + IFF_PROMISC = 0x100 + IFF_RENAMING = 0x400000 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x80000 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_BRIDGE = 0xd1 + IFT_CARP = 0xf8 + IFT_IEEE1394 = 0x90 + IFT_INFINIBAND = 0xc7 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_PPP = 0x17 + IFT_PROPVIRTUAL = 0x35 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_NETMASK_DEFAULT = 0xffffff00 + IN_RFC3021_MASK = 0xfffffffe + IPPROTO_3PC = 0x22 + IPPROTO_ADFS = 0x44 + IPPROTO_AH = 0x33 + IPPROTO_AHIP = 0x3d + IPPROTO_APES = 0x63 + IPPROTO_ARGUS = 0xd + IPPROTO_AX25 = 0x5d + IPPROTO_BHA = 0x31 + IPPROTO_BLT = 0x1e + IPPROTO_BRSATMON = 0x4c + IPPROTO_CARP = 0x70 + IPPROTO_CFTP = 0x3e + IPPROTO_CHAOS = 0x10 + IPPROTO_CMTP = 0x26 + IPPROTO_CPHB = 0x49 + IPPROTO_CPNX = 0x48 + IPPROTO_DCCP = 0x21 + IPPROTO_DDP = 0x25 + IPPROTO_DGP = 0x56 + IPPROTO_DIVERT = 0x102 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_EMCON = 0xe + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GMTP = 0x64 + IPPROTO_GRE = 0x2f + IPPROTO_HELLO = 0x3f + IPPROTO_HIP = 0x8b + IPPROTO_HMP = 0x14 + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IDPR = 0x23 + IPPROTO_IDRP = 0x2d + IPPROTO_IGMP = 0x2 + IPPROTO_IGP = 0x55 + IPPROTO_IGRP = 0x58 + IPPROTO_IL = 0x28 + IPPROTO_INLSP = 0x34 + IPPROTO_INP = 0x20 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPCV = 0x47 + IPPROTO_IPEIP = 0x5e + IPPROTO_IPIP = 0x4 + IPPROTO_IPPC = 0x43 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_IRTP = 0x1c + IPPROTO_KRYPTOLAN = 0x41 + IPPROTO_LARP = 0x5b + IPPROTO_LEAF1 = 0x19 + IPPROTO_LEAF2 = 0x1a + IPPROTO_MAX = 0x100 + IPPROTO_MEAS = 0x13 + IPPROTO_MH = 0x87 + IPPROTO_MHRP = 0x30 + IPPROTO_MICP = 0x5f + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_MUX = 0x12 + IPPROTO_ND = 0x4d + IPPROTO_NHRP = 0x36 + IPPROTO_NONE = 0x3b + IPPROTO_NSP = 0x1f + IPPROTO_NVPII = 0xb + IPPROTO_OLD_DIVERT = 0xfe + IPPROTO_OSPFIGP = 0x59 + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PGM = 0x71 + IPPROTO_PIGP = 0x9 + IPPROTO_PIM = 0x67 + IPPROTO_PRM = 0x15 + IPPROTO_PUP = 0xc + IPPROTO_PVP = 0x4b + IPPROTO_RAW = 0xff + IPPROTO_RCCMON = 0xa + IPPROTO_RDP = 0x1b + IPPROTO_RESERVED_253 = 0xfd + IPPROTO_RESERVED_254 = 0xfe + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_RVD = 0x42 + IPPROTO_SATEXPAK = 0x40 + IPPROTO_SATMON = 0x45 + IPPROTO_SCCSP = 0x60 + IPPROTO_SCTP = 0x84 + IPPROTO_SDRP = 0x2a + IPPROTO_SEND = 0x103 + IPPROTO_SHIM6 = 0x8c + IPPROTO_SKIP = 0x39 + IPPROTO_SPACER = 0x7fff + IPPROTO_SRPC = 0x5a + IPPROTO_ST = 0x7 + IPPROTO_SVMTP = 0x52 + IPPROTO_SWIPE = 0x35 + IPPROTO_TCF = 0x57 + IPPROTO_TCP = 0x6 + IPPROTO_TLSP = 0x38 + IPPROTO_TP = 0x1d + IPPROTO_TPXX = 0x27 + IPPROTO_TRUNK1 = 0x17 + IPPROTO_TRUNK2 = 0x18 + IPPROTO_TTP = 0x54 + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPPROTO_VINES = 0x53 + IPPROTO_VISA = 0x46 + IPPROTO_VMTP = 0x51 + IPPROTO_WBEXPAK = 0x4f + IPPROTO_WBMON = 0x4e + IPPROTO_WSN = 0x4a + IPPROTO_XNET = 0xf + IPPROTO_XTP = 0x24 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_BINDANY = 0x40 + IPV6_BINDMULTI = 0x41 + IPV6_BINDV6ONLY = 0x1b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_FLOWID = 0x43 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_LEN = 0x14 + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FLOWTYPE = 0x44 + IPV6_FRAGTTL = 0x78 + IPV6_FW_ADD = 0x1e + IPV6_FW_DEL = 0x1f + IPV6_FW_FLUSH = 0x20 + IPV6_FW_GET = 0x22 + IPV6_FW_ZERO = 0x21 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPSEC_POLICY = 0x1c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXOPTHDR = 0x800 + IPV6_MAXPACKET = 0xffff + IPV6_MAX_GROUP_SRC_FILTER = 0x200 + IPV6_MAX_MEMBERSHIPS = 0xfff + IPV6_MAX_SOCK_SRC_FILTER = 0x80 + IPV6_MMTU = 0x500 + IPV6_MSFILTER = 0x4a + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_ORIGDSTADDR = 0x48 + IPV6_PATHMTU = 0x2c + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_PREFER_TEMPADDR = 0x3f + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVFLOWID = 0x46 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVORIGDSTADDR = 0x48 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRSSBUCKETID = 0x47 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RSSBUCKETID = 0x45 + IPV6_RSS_LISTEN_BUCKET = 0x42 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IPV6_VLAN_PCP = 0x4b + IP_ADD_MEMBERSHIP = 0xc + IP_ADD_SOURCE_MEMBERSHIP = 0x46 + IP_BINDANY = 0x18 + IP_BINDMULTI = 0x19 + IP_BLOCK_SOURCE = 0x48 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DONTFRAG = 0x43 + IP_DROP_MEMBERSHIP = 0xd + IP_DROP_SOURCE_MEMBERSHIP = 0x47 + IP_DUMMYNET3 = 0x31 + IP_DUMMYNET_CONFIGURE = 0x3c + IP_DUMMYNET_DEL = 0x3d + IP_DUMMYNET_FLUSH = 0x3e + IP_DUMMYNET_GET = 0x40 + IP_FLOWID = 0x5a + IP_FLOWTYPE = 0x5b + IP_FW3 = 0x30 + IP_FW_ADD = 0x32 + IP_FW_DEL = 0x33 + IP_FW_FLUSH = 0x34 + IP_FW_GET = 0x36 + IP_FW_NAT_CFG = 0x38 + IP_FW_NAT_DEL = 0x39 + IP_FW_NAT_GET_CONFIG = 0x3a + IP_FW_NAT_GET_LOG = 0x3b + IP_FW_RESETLOG = 0x37 + IP_FW_TABLE_ADD = 0x28 + IP_FW_TABLE_DEL = 0x29 + IP_FW_TABLE_FLUSH = 0x2a + IP_FW_TABLE_GETSIZE = 0x2b + IP_FW_TABLE_LIST = 0x2c + IP_FW_ZERO = 0x35 + IP_HDRINCL = 0x2 + IP_IPSEC_POLICY = 0x15 + IP_MAXPACKET = 0xffff + IP_MAX_GROUP_SRC_FILTER = 0x200 + IP_MAX_MEMBERSHIPS = 0xfff + IP_MAX_SOCK_MUTE_FILTER = 0x80 + IP_MAX_SOCK_SRC_FILTER = 0x80 + IP_MF = 0x2000 + IP_MINTTL = 0x42 + IP_MSFILTER = 0x4a + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_VIF = 0xe + IP_OFFMASK = 0x1fff + IP_ONESBCAST = 0x17 + IP_OPTIONS = 0x1 + IP_ORIGDSTADDR = 0x1b + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVFLOWID = 0x5d + IP_RECVIF = 0x14 + IP_RECVOPTS = 0x5 + IP_RECVORIGDSTADDR = 0x1b + IP_RECVRETOPTS = 0x6 + IP_RECVRSSBUCKETID = 0x5e + IP_RECVTOS = 0x44 + IP_RECVTTL = 0x41 + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RSSBUCKETID = 0x5c + IP_RSS_LISTEN_BUCKET = 0x1a + IP_RSVP_OFF = 0x10 + IP_RSVP_ON = 0xf + IP_RSVP_VIF_OFF = 0x12 + IP_RSVP_VIF_ON = 0x11 + IP_SENDSRCADDR = 0x7 + IP_TOS = 0x3 + IP_TTL = 0x4 + IP_UNBLOCK_SOURCE = 0x49 + IP_VLAN_PCP = 0x4b + ISIG = 0x80 + ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LOCAL_CONNWAIT = 0x4 + LOCAL_CREDS = 0x2 + LOCAL_CREDS_PERSISTENT = 0x3 + LOCAL_PEERCRED = 0x1 + LOCAL_VENDOR = 0x80000000 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_AUTOSYNC = 0x7 + MADV_CORE = 0x9 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x5 + MADV_NOCORE = 0x8 + MADV_NORMAL = 0x0 + MADV_NOSYNC = 0x6 + MADV_PROTECT = 0xa + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_WILLNEED = 0x3 + MAP_32BIT = 0x80000 + MAP_ALIGNED_SUPER = 0x1000000 + MAP_ALIGNMENT_MASK = -0x1000000 + MAP_ALIGNMENT_SHIFT = 0x18 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_COPY = 0x2 + MAP_EXCL = 0x4000 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_GUARD = 0x2000 + MAP_HASSEMAPHORE = 0x200 + MAP_NOCORE = 0x20000 + MAP_NOSYNC = 0x800 + MAP_PREFAULT_READ = 0x40000 + MAP_PRIVATE = 0x2 + MAP_RESERVED0020 = 0x20 + MAP_RESERVED0040 = 0x40 + MAP_RESERVED0080 = 0x80 + MAP_RESERVED0100 = 0x100 + MAP_SHARED = 0x1 + MAP_STACK = 0x400 + MCAST_BLOCK_SOURCE = 0x54 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x50 + MCAST_JOIN_SOURCE_GROUP = 0x52 + MCAST_LEAVE_GROUP = 0x51 + MCAST_LEAVE_SOURCE_GROUP = 0x53 + MCAST_UNBLOCK_SOURCE = 0x55 + MCAST_UNDEFINED = 0x0 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MFD_ALLOW_SEALING = 0x2 + MFD_CLOEXEC = 0x1 + MFD_HUGETLB = 0x4 + MFD_HUGE_16GB = -0x78000000 + MFD_HUGE_16MB = 0x60000000 + MFD_HUGE_1GB = 0x78000000 + MFD_HUGE_1MB = 0x50000000 + MFD_HUGE_256MB = 0x70000000 + MFD_HUGE_2GB = 0x7c000000 + MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_32MB = 0x64000000 + MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_512MB = 0x74000000 + MFD_HUGE_64KB = 0x40000000 + MFD_HUGE_8MB = 0x5c000000 + MFD_HUGE_MASK = 0xfc000000 + MFD_HUGE_SHIFT = 0x1a + MNT_ACLS = 0x8000000 + MNT_ASYNC = 0x40 + MNT_AUTOMOUNTED = 0x200000000 + MNT_BYFSID = 0x8000000 + MNT_CMDFLAGS = 0x300d0f0000 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_EMPTYDIR = 0x2000000000 + MNT_EXKERB = 0x800 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXPUBLIC = 0x20000000 + MNT_EXRDONLY = 0x80 + MNT_EXTLS = 0x4000000000 + MNT_EXTLSCERT = 0x8000000000 + MNT_EXTLSCERTUSER = 0x10000000000 + MNT_FORCE = 0x80000 + MNT_GJOURNAL = 0x2000000 + MNT_IGNORE = 0x800000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_MULTILABEL = 0x4000000 + MNT_NFS4ACLS = 0x10 + MNT_NOATIME = 0x10000000 + MNT_NOCLUSTERR = 0x40000000 + MNT_NOCLUSTERW = 0x80000000 + MNT_NOCOVER = 0x1000000000 + MNT_NOEXEC = 0x4 + MNT_NONBUSY = 0x4000000 + MNT_NOSUID = 0x8 + MNT_NOSYMFOLLOW = 0x400000 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SNAPSHOT = 0x1000000 + MNT_SOFTDEP = 0x200000 + MNT_SUIDDIR = 0x100000 + MNT_SUJ = 0x100000000 + MNT_SUSPEND = 0x4 + MNT_SYNCHRONOUS = 0x2 + MNT_UNION = 0x20 + MNT_UNTRUSTED = 0x800000000 + MNT_UPDATE = 0x10000 + MNT_UPDATEMASK = 0xad8d0807e + MNT_USER = 0x8000 + MNT_VERIFIED = 0x400000000 + MNT_VISFLAGMASK = 0xffef0ffff + MNT_WAIT = 0x1 + MSG_CMSG_CLOEXEC = 0x40000 + MSG_COMPAT = 0x8000 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOF = 0x100 + MSG_EOR = 0x8 + MSG_NBIO = 0x4000 + MSG_NOSIGNAL = 0x20000 + MSG_NOTIFICATION = 0x2000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x80000 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x2 + MS_SYNC = 0x0 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFLISTL = 0x5 + NET_RT_IFMALIST = 0x4 + NET_RT_NHGRP = 0x7 + NET_RT_NHOP = 0x6 + NFDBITS = 0x40 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ABSTIME = 0x10 + NOTE_ATTRIB = 0x8 + NOTE_CHILD = 0x4 + NOTE_CLOSE = 0x100 + NOTE_CLOSE_WRITE = 0x200 + NOTE_DELETE = 0x1 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FFAND = 0x40000000 + NOTE_FFCOPY = 0xc0000000 + NOTE_FFCTRLMASK = 0xc0000000 + NOTE_FFLAGSMASK = 0xffffff + NOTE_FFNOP = 0x0 + NOTE_FFOR = 0x80000000 + NOTE_FILE_POLL = 0x2 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_MSECONDS = 0x2 + NOTE_NSECONDS = 0x8 + NOTE_OPEN = 0x80 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_READ = 0x400 + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_SECONDS = 0x1 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRIGGER = 0x1000000 + NOTE_USECONDS = 0x4 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + ONLCR = 0x2 + ONLRET = 0x40 + ONOCR = 0x20 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x100000 + O_CREAT = 0x200 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x20000 + O_DSYNC = 0x1000000 + O_EMPTY_PATH = 0x2000000 + O_EXCL = 0x800 + O_EXEC = 0x40000 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_PATH = 0x400000 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RESOLVE_BENEATH = 0x800000 + O_SEARCH = 0x40000 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_TTY_INIT = 0x80000 + O_VERIFY = 0x200000 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PIOD_READ_D = 0x1 + PIOD_READ_I = 0x3 + PIOD_WRITE_D = 0x2 + PIOD_WRITE_I = 0x4 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PTRACE_DEFAULT = 0x1 + PTRACE_EXEC = 0x1 + PTRACE_FORK = 0x8 + PTRACE_LWP = 0x10 + PTRACE_SCE = 0x2 + PTRACE_SCX = 0x4 + PTRACE_SYSCALL = 0x6 + PTRACE_VFORK = 0x20 + PT_ATTACH = 0xa + PT_CLEARSTEP = 0x10 + PT_CONTINUE = 0x7 + PT_COREDUMP = 0x1d + PT_DETACH = 0xb + PT_FIRSTMACH = 0x40 + PT_FOLLOW_FORK = 0x17 + PT_GETDBREGS = 0x25 + PT_GETFPREGS = 0x23 + PT_GETLWPLIST = 0xf + PT_GETNUMLWPS = 0xe + PT_GETREGS = 0x21 + PT_GET_EVENT_MASK = 0x19 + PT_GET_SC_ARGS = 0x1b + PT_GET_SC_RET = 0x1c + PT_IO = 0xc + PT_KILL = 0x8 + PT_LWPINFO = 0xd + PT_LWP_EVENTS = 0x18 + PT_READ_D = 0x2 + PT_READ_I = 0x1 + PT_RESUME = 0x13 + PT_SETDBREGS = 0x26 + PT_SETFPREGS = 0x24 + PT_SETREGS = 0x22 + PT_SETSTEP = 0x11 + PT_SET_EVENT_MASK = 0x1a + PT_STEP = 0x9 + PT_SUSPEND = 0x12 + PT_SYSCALL = 0x16 + PT_TO_SCE = 0x14 + PT_TO_SCX = 0x15 + PT_TRACE_ME = 0x0 + PT_VM_ENTRY = 0x29 + PT_VM_TIMESTAMP = 0x28 + PT_WRITE_D = 0x5 + PT_WRITE_I = 0x4 + P_ZONEID = 0xc + RLIMIT_AS = 0xa + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BRD = 0x7 + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_MAX = 0x8 + RTAX_NETMASK = 0x2 + RTA_AUTHOR = 0x40 + RTA_BRD = 0x80 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_NETMASK = 0x4 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FIXEDMTU = 0x80000 + RTF_FMASK = 0x1004d808 + RTF_GATEWAY = 0x2 + RTF_GWFLAG_COMPAT = 0x80000000 + RTF_HOST = 0x4 + RTF_LLDATA = 0x400 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MULTICAST = 0x800000 + RTF_PINNED = 0x100000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x40000 + RTF_REJECT = 0x8 + RTF_STATIC = 0x800 + RTF_STICKY = 0x10000000 + RTF_UP = 0x1 + RTF_XRESOLVE = 0x200 + RTM_ADD = 0x1 + RTM_CHANGE = 0x3 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DELMADDR = 0x10 + RTM_GET = 0x4 + RTM_IEEE80211 = 0x12 + RTM_IFANNOUNCE = 0x11 + RTM_IFINFO = 0xe + RTM_LOCK = 0x8 + RTM_LOSING = 0x5 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_NEWMADDR = 0xf + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_RTTUNIT = 0xf4240 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RTV_WEIGHT = 0x100 + RT_ALL_FIBS = -0x1 + RT_BLACKHOLE = 0x40 + RT_DEFAULT_FIB = 0x0 + RT_DEFAULT_WEIGHT = 0x1 + RT_HAS_GW = 0x80 + RT_HAS_HEADER = 0x10 + RT_HAS_HEADER_BIT = 0x4 + RT_L2_ME = 0x4 + RT_L2_ME_BIT = 0x2 + RT_LLE_CACHE = 0x100 + RT_MAX_WEIGHT = 0xffffff + RT_MAY_LOOP = 0x8 + RT_MAY_LOOP_BIT = 0x3 + RT_REJECT = 0x20 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_BINTIME = 0x4 + SCM_CREDS = 0x3 + SCM_CREDS2 = 0x8 + SCM_MONOTONIC = 0x6 + SCM_REALTIME = 0x5 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x2 + SCM_TIME_INFO = 0x7 + SEEK_CUR = 0x1 + SEEK_DATA = 0x3 + SEEK_END = 0x2 + SEEK_HOLE = 0x4 + SEEK_SET = 0x0 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80286987 + SIOCATMARK = 0x40047307 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80286989 + SIOCDIFPHYADDR = 0x80206949 + SIOCGDRVSPEC = 0xc028697b + SIOCGETSGCNT = 0xc0207210 + SIOCGETVIFCNT = 0xc028720f + SIOCGHIWAT = 0x40047301 + SIOCGHWADDR = 0xc020693e + SIOCGI2C = 0xc020693d + SIOCGIFADDR = 0xc0206921 + SIOCGIFALIAS = 0xc044692d + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCAP = 0xc020691f + SIOCGIFCONF = 0xc0106924 + SIOCGIFDATA = 0x8020692c + SIOCGIFDESCR = 0xc020692a + SIOCGIFDOWNREASON = 0xc058699a + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFIB = 0xc020695c + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGMEMB = 0xc028698a + SIOCGIFGROUP = 0xc0286988 + SIOCGIFINDEX = 0xc0206920 + SIOCGIFMAC = 0xc0206926 + SIOCGIFMEDIA = 0xc0306938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc0206933 + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPHYS = 0xc0206935 + SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFRSSHASH = 0xc0186997 + SIOCGIFRSSKEY = 0xc0946996 + SIOCGIFSTATUS = 0xc331693b + SIOCGIFXMEDIA = 0xc030698b + SIOCGLANPCP = 0xc0206998 + SIOCGLOWAT = 0x40047303 + SIOCGPGRP = 0x40047309 + SIOCGPRIVATE_0 = 0xc0206950 + SIOCGPRIVATE_1 = 0xc0206951 + SIOCGTUNFIB = 0xc020695e + SIOCIFCREATE = 0xc020697a + SIOCIFCREATE2 = 0xc020697c + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106978 + SIOCSDRVSPEC = 0x8028697b + SIOCSHIWAT = 0x80047300 + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFCAP = 0x8020691e + SIOCSIFDESCR = 0x80206929 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFIB = 0x8020695d + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020693c + SIOCSIFMAC = 0x80206927 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x80206934 + SIOCSIFNAME = 0x80206928 + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPHYS = 0x80206936 + SIOCSIFRVNET = 0xc020695b + SIOCSIFVNET = 0xc020695a + SIOCSLANPCP = 0x80206999 + SIOCSLOWAT = 0x80047302 + SIOCSPGRP = 0x80047308 + SIOCSTUNFIB = 0x8020695f + SOCK_CLOEXEC = 0x10000000 + SOCK_DGRAM = 0x2 + SOCK_MAXADDRLEN = 0xff + SOCK_NONBLOCK = 0x20000000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_LOCAL = 0x0 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_ACCEPTFILTER = 0x1000 + SO_BINTIME = 0x2000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DOMAIN = 0x1019 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LABEL = 0x1009 + SO_LINGER = 0x80 + SO_LISTENINCQLEN = 0x1013 + SO_LISTENQLEN = 0x1012 + SO_LISTENQLIMIT = 0x1011 + SO_MAX_PACING_RATE = 0x1018 + SO_NOSIGPIPE = 0x800 + SO_NO_DDP = 0x8000 + SO_NO_OFFLOAD = 0x4000 + SO_OOBINLINE = 0x100 + SO_PEERLABEL = 0x1010 + SO_PROTOCOL = 0x1016 + SO_PROTOTYPE = 0x1016 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_RERROR = 0x20000 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_REUSEPORT_LB = 0x10000 + SO_SETFIB = 0x1014 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_TIMESTAMP = 0x400 + SO_TS_BINTIME = 0x1 + SO_TS_CLOCK = 0x1017 + SO_TS_CLOCK_MAX = 0x3 + SO_TS_DEFAULT = 0x0 + SO_TS_MONOTONIC = 0x3 + SO_TS_REALTIME = 0x2 + SO_TS_REALTIME_MICRO = 0x0 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_USER_COOKIE = 0x1015 + SO_VENDOR = 0x80000000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IFWHT = 0xe000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TAB3 = 0x4 + TABDLY = 0x4 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_FAST_OPEN = 0x22 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_PAD = 0x0 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_WINDOW = 0x3 + TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_ALGORITHM = 0x43b + TCP_BBR_DRAIN_INC_EXTRA = 0x43c + TCP_BBR_DRAIN_PG = 0x42e + TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_EXTRA_STATE = 0x453 + TCP_BBR_FLOOR_MIN_TSO = 0x454 + TCP_BBR_HDWR_PACE = 0x451 + TCP_BBR_HOLD_TARGET = 0x436 + TCP_BBR_IWINTSO = 0x42b + TCP_BBR_LOWGAIN_FD = 0x436 + TCP_BBR_LOWGAIN_HALF = 0x435 + TCP_BBR_LOWGAIN_THRESH = 0x434 + TCP_BBR_MAX_RTO = 0x439 + TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_MIN_TOPACEOUT = 0x455 + TCP_BBR_ONE_RETRAN = 0x431 + TCP_BBR_PACE_CROSS = 0x442 + TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_OH = 0x435 + TCP_BBR_PACE_PER_SEC = 0x43e + TCP_BBR_PACE_SEG_MAX = 0x440 + TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_POLICER_DETECT = 0x457 + TCP_BBR_PROBE_RTT_GAIN = 0x44d + TCP_BBR_PROBE_RTT_INT = 0x430 + TCP_BBR_PROBE_RTT_LEN = 0x44e + TCP_BBR_RACK_INIT_RATE = 0x458 + TCP_BBR_RACK_RTT_USE = 0x44a + TCP_BBR_RECFORCE = 0x42c + TCP_BBR_REC_OVER_HPTS = 0x43a + TCP_BBR_RETRAN_WTSO = 0x44b + TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_SEND_IWND_IN_TSO = 0x44f + TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d + TCP_BBR_STARTUP_LOSS_EXIT = 0x432 + TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_TMR_PACE_OH = 0x448 + TCP_BBR_TSLIMITS = 0x434 + TCP_BBR_TSTMP_RAISES = 0x456 + TCP_BBR_UNLIMITED = 0x43b + TCP_BBR_USEDEL_RATE = 0x437 + TCP_BBR_USE_LOWGAIN = 0x433 + TCP_BBR_USE_RACK_CHEAT = 0x450 + TCP_BBR_USE_RACK_RR = 0x450 + TCP_BBR_UTTER_MAX_TSO = 0x452 + TCP_CA_NAME_MAX = 0x10 + TCP_CCALGOOPT = 0x41 + TCP_CONGESTION = 0x40 + TCP_DATA_AFTER_CLOSE = 0x44c + TCP_DEFER_OPTIONS = 0x470 + TCP_DELACK = 0x48 + TCP_FASTOPEN = 0x401 + TCP_FASTOPEN_MAX_COOKIE_LEN = 0x10 + TCP_FASTOPEN_MIN_COOKIE_LEN = 0x4 + TCP_FASTOPEN_PSK_LEN = 0x10 + TCP_FAST_RSM_HACK = 0x471 + TCP_FIN_IS_RST = 0x49 + TCP_FUNCTION_BLK = 0x2000 + TCP_FUNCTION_NAME_LEN_MAX = 0x20 + TCP_HDWR_RATE_CAP = 0x46a + TCP_HDWR_UP_ONLY = 0x46c + TCP_IDLE_REDUCE = 0x46 + TCP_INFO = 0x20 + TCP_IWND_NB = 0x2b + TCP_IWND_NSEG = 0x2c + TCP_KEEPCNT = 0x400 + TCP_KEEPIDLE = 0x100 + TCP_KEEPINIT = 0x80 + TCP_KEEPINTVL = 0x200 + TCP_LOG = 0x22 + TCP_LOGBUF = 0x23 + TCP_LOGDUMP = 0x25 + TCP_LOGDUMPID = 0x26 + TCP_LOGID = 0x24 + TCP_LOGID_CNT = 0x2e + TCP_LOG_ID_LEN = 0x40 + TCP_LOG_LIMIT = 0x4a + TCP_LOG_TAG = 0x2f + TCP_MAXBURST = 0x4 + TCP_MAXHLEN = 0x3c + TCP_MAXOLEN = 0x28 + TCP_MAXPEAKRATE = 0x45 + TCP_MAXSEG = 0x2 + TCP_MAXUNACKTIME = 0x44 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x4 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x10 + TCP_MINMSS = 0xd8 + TCP_MSS = 0x218 + TCP_NODELAY = 0x1 + TCP_NOOPT = 0x8 + TCP_NOPUSH = 0x4 + TCP_NO_PRR = 0x462 + TCP_PACING_RATE_CAP = 0x46b + TCP_PCAP_IN = 0x1000 + TCP_PCAP_OUT = 0x800 + TCP_PERF_INFO = 0x4e + TCP_PROC_ACCOUNTING = 0x4c + TCP_RACK_ABC_VAL = 0x46d + TCP_RACK_CHEAT_NOT_CONF_RATE = 0x459 + TCP_RACK_DO_DETECTION = 0x449 + TCP_RACK_EARLY_RECOV = 0x423 + TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_FORCE_MSEG = 0x45d + TCP_RACK_GP_INCREASE = 0x446 + TCP_RACK_GP_INCREASE_CA = 0x45a + TCP_RACK_GP_INCREASE_REC = 0x45c + TCP_RACK_GP_INCREASE_SS = 0x45b + TCP_RACK_IDLE_REDUCE_HIGH = 0x444 + TCP_RACK_MBUF_QUEUE = 0x41a + TCP_RACK_MEASURE_CNT = 0x46f + TCP_RACK_MIN_PACE = 0x445 + TCP_RACK_MIN_PACE_SEG = 0x446 + TCP_RACK_MIN_TO = 0x422 + TCP_RACK_NONRXT_CFG_RATE = 0x463 + TCP_RACK_NO_PUSH_AT_MAX = 0x466 + TCP_RACK_PACE_ALWAYS = 0x41f + TCP_RACK_PACE_MAX_SEG = 0x41e + TCP_RACK_PACE_RATE_CA = 0x45e + TCP_RACK_PACE_RATE_REC = 0x460 + TCP_RACK_PACE_RATE_SS = 0x45f + TCP_RACK_PACE_REDUCE = 0x41d + TCP_RACK_PACE_TO_FILL = 0x467 + TCP_RACK_PACING_BETA = 0x472 + TCP_RACK_PACING_BETA_ECN = 0x473 + TCP_RACK_PKT_DELAY = 0x428 + TCP_RACK_PROFILE = 0x469 + TCP_RACK_PROP = 0x41b + TCP_RACK_PROP_RATE = 0x420 + TCP_RACK_PRR_SENDALOT = 0x421 + TCP_RACK_REORD_FADE = 0x426 + TCP_RACK_REORD_THRESH = 0x425 + TCP_RACK_RR_CONF = 0x459 + TCP_RACK_TIMER_SLOP = 0x474 + TCP_RACK_TLP_INC_VAR = 0x429 + TCP_RACK_TLP_REDUCE = 0x41c + TCP_RACK_TLP_THRESH = 0x427 + TCP_RACK_TLP_USE = 0x447 + TCP_REC_ABC_VAL = 0x46e + TCP_REMOTE_UDP_ENCAPS_PORT = 0x47 + TCP_REUSPORT_LB_NUMA = 0x402 + TCP_REUSPORT_LB_NUMA_CURDOM = -0x1 + TCP_REUSPORT_LB_NUMA_NODOM = -0x2 + TCP_RXTLS_ENABLE = 0x29 + TCP_RXTLS_MODE = 0x2a + TCP_SHARED_CWND_ALLOWED = 0x4b + TCP_SHARED_CWND_ENABLE = 0x464 + TCP_SHARED_CWND_TIME_LIMIT = 0x468 + TCP_STATS = 0x21 + TCP_TIMELY_DYN_ADJ = 0x465 + TCP_TLS_MODE_IFNET = 0x2 + TCP_TLS_MODE_NONE = 0x0 + TCP_TLS_MODE_SW = 0x1 + TCP_TLS_MODE_TOE = 0x3 + TCP_TXTLS_ENABLE = 0x27 + TCP_TXTLS_MODE = 0x28 + TCP_USER_LOG = 0x30 + TCP_USE_CMP_ACKS = 0x4d + TCP_VENDOR = 0x80000000 + TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLUSH = 0x80047410 + TIOCGDRAINWAIT = 0x40047456 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGPGRP = 0x40047477 + TIOCGPTN = 0x4004740f + TIOCGSID = 0x40047463 + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGDTRWAIT = 0x4004745a + TIOCMGET = 0x4004746a + TIOCMSDTRWAIT = 0x8004745b + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DCD = 0x40 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCPTMASTER = 0x2000741c + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDRAINWAIT = 0x80047457 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSIG = 0x2004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCTIMESTAMP = 0x40107459 + TIOCUCNTL = 0x80047466 + TOSTOP = 0x400000 + UTIME_NOW = -0x1 + UTIME_OMIT = -0x2 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VERASE2 = 0x7 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WCONTINUED = 0x4 + WCOREFLAG = 0x80 + WEXITED = 0x10 + WLINUXCLONE = 0x80000000 + WNOHANG = 0x1 + WNOWAIT = 0x8 + WSTOPPED = 0x2 + WTRAPPED = 0x20 + WUNTRACED = 0x2 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x59) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x55) + ECAPMODE = syscall.Errno(0x5e) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDOOFUS = syscall.Errno(0x58) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x52) + EILSEQ = syscall.Errno(0x56) + EINPROGRESS = syscall.Errno(0x24) + EINTEGRITY = syscall.Errno(0x61) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x61) + ELOOP = syscall.Errno(0x3e) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + EMULTIHOP = syscall.Errno(0x5a) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x57) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOLINK = syscall.Errno(0x5b) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x53) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCAPABLE = syscall.Errno(0x5d) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5f) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x2d) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x54) + EOWNERDEAD = syscall.Errno(0x60) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5c) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGLIBRT = syscall.Signal(0x21) + SIGLWP = syscall.Signal(0x20) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIDRM", "identifier removed"}, + {83, "ENOMSG", "no message of desired type"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "ECANCELED", "operation canceled"}, + {86, "EILSEQ", "illegal byte sequence"}, + {87, "ENOATTR", "attribute not found"}, + {88, "EDOOFUS", "programming error"}, + {89, "EBADMSG", "bad message"}, + {90, "EMULTIHOP", "multihop attempted"}, + {91, "ENOLINK", "link has been severed"}, + {92, "EPROTO", "protocol error"}, + {93, "ENOTCAPABLE", "capabilities insufficient"}, + {94, "ECAPMODE", "not permitted in capability mode"}, + {95, "ENOTRECOVERABLE", "state not recoverable"}, + {96, "EOWNERDEAD", "previous owner died"}, + {97, "EINTEGRITY", "integrity check failed"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGIOT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "unknown signal"}, + {33, "SIGLIBRT", "unknown signal"}, +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 36a89c643..de936b677 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -70,6 +70,7 @@ const ( ALG_SET_DRBG_ENTROPY = 0x6 ALG_SET_IV = 0x2 ALG_SET_KEY = 0x1 + ALG_SET_KEY_BY_KEY_SERIAL = 0x7 ALG_SET_OP = 0x3 ANON_INODE_FS_MAGIC = 0x9041934 ARPHRD_6LOWPAN = 0x339 @@ -140,6 +141,306 @@ const ( ARPHRD_VOID = 0xffff ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f + AUDIT_ADD = 0x3eb + AUDIT_ADD_RULE = 0x3f3 + AUDIT_ALWAYS = 0x2 + AUDIT_ANOM_ABEND = 0x6a5 + AUDIT_ANOM_CREAT = 0x6a7 + AUDIT_ANOM_LINK = 0x6a6 + AUDIT_ANOM_PROMISCUOUS = 0x6a4 + AUDIT_ARCH = 0xb + AUDIT_ARCH_AARCH64 = 0xc00000b7 + AUDIT_ARCH_ALPHA = 0xc0009026 + AUDIT_ARCH_ARCOMPACT = 0x4000005d + AUDIT_ARCH_ARCOMPACTBE = 0x5d + AUDIT_ARCH_ARCV2 = 0x400000c3 + AUDIT_ARCH_ARCV2BE = 0xc3 + AUDIT_ARCH_ARM = 0x40000028 + AUDIT_ARCH_ARMEB = 0x28 + AUDIT_ARCH_C6X = 0x4000008c + AUDIT_ARCH_C6XBE = 0x8c + AUDIT_ARCH_CRIS = 0x4000004c + AUDIT_ARCH_CSKY = 0x400000fc + AUDIT_ARCH_FRV = 0x5441 + AUDIT_ARCH_H8300 = 0x2e + AUDIT_ARCH_HEXAGON = 0xa4 + AUDIT_ARCH_I386 = 0x40000003 + AUDIT_ARCH_IA64 = 0xc0000032 + AUDIT_ARCH_LOONGARCH32 = 0x40000102 + AUDIT_ARCH_LOONGARCH64 = 0xc0000102 + AUDIT_ARCH_M32R = 0x58 + AUDIT_ARCH_M68K = 0x4 + AUDIT_ARCH_MICROBLAZE = 0xbd + AUDIT_ARCH_MIPS = 0x8 + AUDIT_ARCH_MIPS64 = 0x80000008 + AUDIT_ARCH_MIPS64N32 = 0xa0000008 + AUDIT_ARCH_MIPSEL = 0x40000008 + AUDIT_ARCH_MIPSEL64 = 0xc0000008 + AUDIT_ARCH_MIPSEL64N32 = 0xe0000008 + AUDIT_ARCH_NDS32 = 0x400000a7 + AUDIT_ARCH_NDS32BE = 0xa7 + AUDIT_ARCH_NIOS2 = 0x40000071 + AUDIT_ARCH_OPENRISC = 0x5c + AUDIT_ARCH_PARISC = 0xf + AUDIT_ARCH_PARISC64 = 0x8000000f + AUDIT_ARCH_PPC = 0x14 + AUDIT_ARCH_PPC64 = 0x80000015 + AUDIT_ARCH_PPC64LE = 0xc0000015 + AUDIT_ARCH_RISCV32 = 0x400000f3 + AUDIT_ARCH_RISCV64 = 0xc00000f3 + AUDIT_ARCH_S390 = 0x16 + AUDIT_ARCH_S390X = 0x80000016 + AUDIT_ARCH_SH = 0x2a + AUDIT_ARCH_SH64 = 0x8000002a + AUDIT_ARCH_SHEL = 0x4000002a + AUDIT_ARCH_SHEL64 = 0xc000002a + AUDIT_ARCH_SPARC = 0x2 + AUDIT_ARCH_SPARC64 = 0x8000002b + AUDIT_ARCH_TILEGX = 0xc00000bf + AUDIT_ARCH_TILEGX32 = 0x400000bf + AUDIT_ARCH_TILEPRO = 0x400000bc + AUDIT_ARCH_UNICORE = 0x4000006e + AUDIT_ARCH_X86_64 = 0xc000003e + AUDIT_ARCH_XTENSA = 0x5e + AUDIT_ARG0 = 0xc8 + AUDIT_ARG1 = 0xc9 + AUDIT_ARG2 = 0xca + AUDIT_ARG3 = 0xcb + AUDIT_AVC = 0x578 + AUDIT_AVC_PATH = 0x57a + AUDIT_BITMASK_SIZE = 0x40 + AUDIT_BIT_MASK = 0x8000000 + AUDIT_BIT_TEST = 0x48000000 + AUDIT_BPF = 0x536 + AUDIT_BPRM_FCAPS = 0x529 + AUDIT_CAPSET = 0x52a + AUDIT_CLASS_CHATTR = 0x2 + AUDIT_CLASS_CHATTR_32 = 0x3 + AUDIT_CLASS_DIR_WRITE = 0x0 + AUDIT_CLASS_DIR_WRITE_32 = 0x1 + AUDIT_CLASS_READ = 0x4 + AUDIT_CLASS_READ_32 = 0x5 + AUDIT_CLASS_SIGNAL = 0x8 + AUDIT_CLASS_SIGNAL_32 = 0x9 + AUDIT_CLASS_WRITE = 0x6 + AUDIT_CLASS_WRITE_32 = 0x7 + AUDIT_COMPARE_AUID_TO_EUID = 0x10 + AUDIT_COMPARE_AUID_TO_FSUID = 0xe + AUDIT_COMPARE_AUID_TO_OBJ_UID = 0x5 + AUDIT_COMPARE_AUID_TO_SUID = 0xf + AUDIT_COMPARE_EGID_TO_FSGID = 0x17 + AUDIT_COMPARE_EGID_TO_OBJ_GID = 0x4 + AUDIT_COMPARE_EGID_TO_SGID = 0x18 + AUDIT_COMPARE_EUID_TO_FSUID = 0x12 + AUDIT_COMPARE_EUID_TO_OBJ_UID = 0x3 + AUDIT_COMPARE_EUID_TO_SUID = 0x11 + AUDIT_COMPARE_FSGID_TO_OBJ_GID = 0x9 + AUDIT_COMPARE_FSUID_TO_OBJ_UID = 0x8 + AUDIT_COMPARE_GID_TO_EGID = 0x14 + AUDIT_COMPARE_GID_TO_FSGID = 0x15 + AUDIT_COMPARE_GID_TO_OBJ_GID = 0x2 + AUDIT_COMPARE_GID_TO_SGID = 0x16 + AUDIT_COMPARE_SGID_TO_FSGID = 0x19 + AUDIT_COMPARE_SGID_TO_OBJ_GID = 0x7 + AUDIT_COMPARE_SUID_TO_FSUID = 0x13 + AUDIT_COMPARE_SUID_TO_OBJ_UID = 0x6 + AUDIT_COMPARE_UID_TO_AUID = 0xa + AUDIT_COMPARE_UID_TO_EUID = 0xb + AUDIT_COMPARE_UID_TO_FSUID = 0xc + AUDIT_COMPARE_UID_TO_OBJ_UID = 0x1 + AUDIT_COMPARE_UID_TO_SUID = 0xd + AUDIT_CONFIG_CHANGE = 0x519 + AUDIT_CWD = 0x51b + AUDIT_DAEMON_ABORT = 0x4b2 + AUDIT_DAEMON_CONFIG = 0x4b3 + AUDIT_DAEMON_END = 0x4b1 + AUDIT_DAEMON_START = 0x4b0 + AUDIT_DEL = 0x3ec + AUDIT_DEL_RULE = 0x3f4 + AUDIT_DEVMAJOR = 0x64 + AUDIT_DEVMINOR = 0x65 + AUDIT_DIR = 0x6b + AUDIT_DM_CTRL = 0x53a + AUDIT_DM_EVENT = 0x53b + AUDIT_EGID = 0x6 + AUDIT_EOE = 0x528 + AUDIT_EQUAL = 0x40000000 + AUDIT_EUID = 0x2 + AUDIT_EVENT_LISTENER = 0x537 + AUDIT_EXE = 0x70 + AUDIT_EXECVE = 0x51d + AUDIT_EXIT = 0x67 + AUDIT_FAIL_PANIC = 0x2 + AUDIT_FAIL_PRINTK = 0x1 + AUDIT_FAIL_SILENT = 0x0 + AUDIT_FANOTIFY = 0x533 + AUDIT_FD_PAIR = 0x525 + AUDIT_FEATURE_BITMAP_ALL = 0x7f + AUDIT_FEATURE_BITMAP_BACKLOG_LIMIT = 0x1 + AUDIT_FEATURE_BITMAP_BACKLOG_WAIT_TIME = 0x2 + AUDIT_FEATURE_BITMAP_EXCLUDE_EXTEND = 0x8 + AUDIT_FEATURE_BITMAP_EXECUTABLE_PATH = 0x4 + AUDIT_FEATURE_BITMAP_FILTER_FS = 0x40 + AUDIT_FEATURE_BITMAP_LOST_RESET = 0x20 + AUDIT_FEATURE_BITMAP_SESSIONID_FILTER = 0x10 + AUDIT_FEATURE_CHANGE = 0x530 + AUDIT_FEATURE_LOGINUID_IMMUTABLE = 0x1 + AUDIT_FEATURE_ONLY_UNSET_LOGINUID = 0x0 + AUDIT_FEATURE_VERSION = 0x1 + AUDIT_FIELD_COMPARE = 0x6f + AUDIT_FILETYPE = 0x6c + AUDIT_FILTERKEY = 0xd2 + AUDIT_FILTER_ENTRY = 0x2 + AUDIT_FILTER_EXCLUDE = 0x5 + AUDIT_FILTER_EXIT = 0x4 + AUDIT_FILTER_FS = 0x6 + AUDIT_FILTER_PREPEND = 0x10 + AUDIT_FILTER_TASK = 0x1 + AUDIT_FILTER_TYPE = 0x5 + AUDIT_FILTER_URING_EXIT = 0x7 + AUDIT_FILTER_USER = 0x0 + AUDIT_FILTER_WATCH = 0x3 + AUDIT_FIRST_KERN_ANOM_MSG = 0x6a4 + AUDIT_FIRST_USER_MSG = 0x44c + AUDIT_FIRST_USER_MSG2 = 0x834 + AUDIT_FSGID = 0x8 + AUDIT_FSTYPE = 0x1a + AUDIT_FSUID = 0x4 + AUDIT_GET = 0x3e8 + AUDIT_GET_FEATURE = 0x3fb + AUDIT_GID = 0x5 + AUDIT_GREATER_THAN = 0x20000000 + AUDIT_GREATER_THAN_OR_EQUAL = 0x60000000 + AUDIT_INODE = 0x66 + AUDIT_INTEGRITY_DATA = 0x708 + AUDIT_INTEGRITY_EVM_XATTR = 0x70e + AUDIT_INTEGRITY_HASH = 0x70b + AUDIT_INTEGRITY_METADATA = 0x709 + AUDIT_INTEGRITY_PCR = 0x70c + AUDIT_INTEGRITY_POLICY_RULE = 0x70f + AUDIT_INTEGRITY_RULE = 0x70d + AUDIT_INTEGRITY_STATUS = 0x70a + AUDIT_IPC = 0x517 + AUDIT_IPC_SET_PERM = 0x51f + AUDIT_KERNEL = 0x7d0 + AUDIT_KERNEL_OTHER = 0x524 + AUDIT_KERN_MODULE = 0x532 + AUDIT_LAST_FEATURE = 0x1 + AUDIT_LAST_KERN_ANOM_MSG = 0x707 + AUDIT_LAST_USER_MSG = 0x4af + AUDIT_LAST_USER_MSG2 = 0xbb7 + AUDIT_LESS_THAN = 0x10000000 + AUDIT_LESS_THAN_OR_EQUAL = 0x50000000 + AUDIT_LIST = 0x3ea + AUDIT_LIST_RULES = 0x3f5 + AUDIT_LOGIN = 0x3ee + AUDIT_LOGINUID = 0x9 + AUDIT_LOGINUID_SET = 0x18 + AUDIT_MAC_CALIPSO_ADD = 0x58a + AUDIT_MAC_CALIPSO_DEL = 0x58b + AUDIT_MAC_CIPSOV4_ADD = 0x57f + AUDIT_MAC_CIPSOV4_DEL = 0x580 + AUDIT_MAC_CONFIG_CHANGE = 0x57d + AUDIT_MAC_IPSEC_ADDSA = 0x583 + AUDIT_MAC_IPSEC_ADDSPD = 0x585 + AUDIT_MAC_IPSEC_DELSA = 0x584 + AUDIT_MAC_IPSEC_DELSPD = 0x586 + AUDIT_MAC_IPSEC_EVENT = 0x587 + AUDIT_MAC_MAP_ADD = 0x581 + AUDIT_MAC_MAP_DEL = 0x582 + AUDIT_MAC_POLICY_LOAD = 0x57b + AUDIT_MAC_STATUS = 0x57c + AUDIT_MAC_UNLBL_ALLOW = 0x57e + AUDIT_MAC_UNLBL_STCADD = 0x588 + AUDIT_MAC_UNLBL_STCDEL = 0x589 + AUDIT_MAKE_EQUIV = 0x3f7 + AUDIT_MAX_FIELDS = 0x40 + AUDIT_MAX_FIELD_COMPARE = 0x19 + AUDIT_MAX_KEY_LEN = 0x100 + AUDIT_MESSAGE_TEXT_MAX = 0x2170 + AUDIT_MMAP = 0x52b + AUDIT_MQ_GETSETATTR = 0x523 + AUDIT_MQ_NOTIFY = 0x522 + AUDIT_MQ_OPEN = 0x520 + AUDIT_MQ_SENDRECV = 0x521 + AUDIT_MSGTYPE = 0xc + AUDIT_NEGATE = 0x80000000 + AUDIT_NETFILTER_CFG = 0x52d + AUDIT_NETFILTER_PKT = 0x52c + AUDIT_NEVER = 0x0 + AUDIT_NLGRP_MAX = 0x1 + AUDIT_NOT_EQUAL = 0x30000000 + AUDIT_NR_FILTERS = 0x8 + AUDIT_OBJ_GID = 0x6e + AUDIT_OBJ_LEV_HIGH = 0x17 + AUDIT_OBJ_LEV_LOW = 0x16 + AUDIT_OBJ_PID = 0x526 + AUDIT_OBJ_ROLE = 0x14 + AUDIT_OBJ_TYPE = 0x15 + AUDIT_OBJ_UID = 0x6d + AUDIT_OBJ_USER = 0x13 + AUDIT_OPENAT2 = 0x539 + AUDIT_OPERATORS = 0x78000000 + AUDIT_PATH = 0x516 + AUDIT_PERM = 0x6a + AUDIT_PERM_ATTR = 0x8 + AUDIT_PERM_EXEC = 0x1 + AUDIT_PERM_READ = 0x4 + AUDIT_PERM_WRITE = 0x2 + AUDIT_PERS = 0xa + AUDIT_PID = 0x0 + AUDIT_POSSIBLE = 0x1 + AUDIT_PPID = 0x12 + AUDIT_PROCTITLE = 0x52f + AUDIT_REPLACE = 0x531 + AUDIT_SADDR_FAM = 0x71 + AUDIT_SECCOMP = 0x52e + AUDIT_SELINUX_ERR = 0x579 + AUDIT_SESSIONID = 0x19 + AUDIT_SET = 0x3e9 + AUDIT_SET_FEATURE = 0x3fa + AUDIT_SGID = 0x7 + AUDIT_SID_UNSET = 0xffffffff + AUDIT_SIGNAL_INFO = 0x3f2 + AUDIT_SOCKADDR = 0x51a + AUDIT_SOCKETCALL = 0x518 + AUDIT_STATUS_BACKLOG_LIMIT = 0x10 + AUDIT_STATUS_BACKLOG_WAIT_TIME = 0x20 + AUDIT_STATUS_BACKLOG_WAIT_TIME_ACTUAL = 0x80 + AUDIT_STATUS_ENABLED = 0x1 + AUDIT_STATUS_FAILURE = 0x2 + AUDIT_STATUS_LOST = 0x40 + AUDIT_STATUS_PID = 0x4 + AUDIT_STATUS_RATE_LIMIT = 0x8 + AUDIT_SUBJ_CLR = 0x11 + AUDIT_SUBJ_ROLE = 0xe + AUDIT_SUBJ_SEN = 0x10 + AUDIT_SUBJ_TYPE = 0xf + AUDIT_SUBJ_USER = 0xd + AUDIT_SUCCESS = 0x68 + AUDIT_SUID = 0x3 + AUDIT_SYSCALL = 0x514 + AUDIT_SYSCALL_CLASSES = 0x10 + AUDIT_TIME_ADJNTPVAL = 0x535 + AUDIT_TIME_INJOFFSET = 0x534 + AUDIT_TRIM = 0x3f6 + AUDIT_TTY = 0x527 + AUDIT_TTY_GET = 0x3f8 + AUDIT_TTY_SET = 0x3f9 + AUDIT_UID = 0x1 + AUDIT_UID_UNSET = 0xffffffff + AUDIT_UNUSED_BITS = 0x7fffc00 + AUDIT_URINGOP = 0x538 + AUDIT_USER = 0x3ed + AUDIT_USER_AVC = 0x453 + AUDIT_USER_TTY = 0x464 + AUDIT_VERSION_BACKLOG_LIMIT = 0x1 + AUDIT_VERSION_BACKLOG_WAIT_TIME = 0x2 + AUDIT_VERSION_LATEST = 0x7f + AUDIT_WATCH = 0x69 + AUDIT_WATCH_INS = 0x3ef + AUDIT_WATCH_LIST = 0x3f1 + AUDIT_WATCH_REM = 0x3f0 AUTOFS_SUPER_MAGIC = 0x187 B0 = 0x0 B110 = 0x3 @@ -157,7 +458,6 @@ const ( B600 = 0x8 B75 = 0x2 B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 BDEVFS_MAGIC = 0x62646576 BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d @@ -184,6 +484,7 @@ const ( BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_KPROBE_MULTI_RETURN = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 @@ -191,6 +492,8 @@ const ( BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 BPF_F_TEST_STATE_FREQ = 0x8 + BPF_F_TEST_XDP_LIVE_FRAMES = 0x2 + BPF_F_XDP_HAS_FRAGS = 0x20 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -260,6 +563,7 @@ const ( BUS_USB = 0x3 BUS_VIRTUAL = 0x6 CAN_BCM = 0x2 + CAN_BUS_OFF_THRESHOLD = 0x100 CAN_CTRLMODE_3_SAMPLES = 0x4 CAN_CTRLMODE_BERR_REPORTING = 0x10 CAN_CTRLMODE_CC_LEN8_DLC = 0x100 @@ -274,9 +578,12 @@ const ( CAN_EFF_FLAG = 0x80000000 CAN_EFF_ID_BITS = 0x1d CAN_EFF_MASK = 0x1fffffff + CAN_ERROR_PASSIVE_THRESHOLD = 0x80 + CAN_ERROR_WARNING_THRESHOLD = 0x60 CAN_ERR_ACK = 0x20 CAN_ERR_BUSERROR = 0x80 CAN_ERR_BUSOFF = 0x40 + CAN_ERR_CNT = 0x200 CAN_ERR_CRTL = 0x4 CAN_ERR_CRTL_ACTIVE = 0x40 CAN_ERR_CRTL_RX_OVERFLOW = 0x1 @@ -393,9 +700,11 @@ const ( CAP_SYS_TIME = 0x19 CAP_SYS_TTY_CONFIG = 0x1a CAP_WAKE_ALARM = 0x23 + CEPH_SUPER_MAGIC = 0xc36400 CFLUSH = 0xf CGROUP2_SUPER_MAGIC = 0x63677270 CGROUP_SUPER_MAGIC = 0x27e0eb + CIFS_SUPER_MAGIC = 0xff534d42 CLOCK_BOOTTIME = 0x7 CLOCK_BOOTTIME_ALARM = 0x9 CLOCK_DEFAULT = 0x0 @@ -466,6 +775,8 @@ const ( DEVLINK_GENL_MCGRP_CONFIG_NAME = "config" DEVLINK_GENL_NAME = "devlink" DEVLINK_GENL_VERSION = 0x1 + DEVLINK_PORT_FN_CAP_MIGRATABLE = 0x2 + DEVLINK_PORT_FN_CAP_ROCE = 0x1 DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX = 0x14 DEVLINK_SUPPORTED_FLASH_OVERWRITE_SECTIONS = 0x3 DEVMEM_MAGIC = 0x454d444d @@ -515,9 +826,9 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2021-03-22)" + DM_VERSION_EXTRA = "-ioctl (2022-07-28)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x2d + DM_VERSION_MINOR = 0x2f DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 @@ -533,6 +844,55 @@ const ( EFD_SEMAPHORE = 0x1 EFIVARFS_MAGIC = 0xde5e81e4 EFS_SUPER_MAGIC = 0x414a53 + EM_386 = 0x3 + EM_486 = 0x6 + EM_68K = 0x4 + EM_860 = 0x7 + EM_88K = 0x5 + EM_AARCH64 = 0xb7 + EM_ALPHA = 0x9026 + EM_ALTERA_NIOS2 = 0x71 + EM_ARCOMPACT = 0x5d + EM_ARCV2 = 0xc3 + EM_ARM = 0x28 + EM_BLACKFIN = 0x6a + EM_BPF = 0xf7 + EM_CRIS = 0x4c + EM_CSKY = 0xfc + EM_CYGNUS_M32R = 0x9041 + EM_CYGNUS_MN10300 = 0xbeef + EM_FRV = 0x5441 + EM_H8_300 = 0x2e + EM_HEXAGON = 0xa4 + EM_IA_64 = 0x32 + EM_LOONGARCH = 0x102 + EM_M32 = 0x1 + EM_M32R = 0x58 + EM_MICROBLAZE = 0xbd + EM_MIPS = 0x8 + EM_MIPS_RS3_LE = 0xa + EM_MIPS_RS4_BE = 0xa + EM_MN10300 = 0x59 + EM_NDS32 = 0xa7 + EM_NONE = 0x0 + EM_OPENRISC = 0x5c + EM_PARISC = 0xf + EM_PPC = 0x14 + EM_PPC64 = 0x15 + EM_RISCV = 0xf3 + EM_S390 = 0x16 + EM_S390_OLD = 0xa390 + EM_SH = 0x2a + EM_SPARC = 0x2 + EM_SPARC32PLUS = 0x12 + EM_SPARCV9 = 0x2b + EM_SPU = 0x17 + EM_TILEGX = 0xbf + EM_TILEPRO = 0xbc + EM_TI_C6000 = 0x8c + EM_UNICORE = 0x6e + EM_X86_64 = 0x3e + EM_XTENSA = 0x5e ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -695,6 +1055,7 @@ const ( ETH_P_CAIF = 0xf7 ETH_P_CAN = 0xc ETH_P_CANFD = 0xd + ETH_P_CANXL = 0xe ETH_P_CFM = 0x8902 ETH_P_CONTROL = 0x16 ETH_P_CUST = 0x6006 @@ -706,10 +1067,12 @@ const ( ETH_P_DNA_RT = 0x6003 ETH_P_DSA = 0x1b ETH_P_DSA_8021Q = 0xdadb + ETH_P_DSA_A5PSW = 0xe001 ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be ETH_P_ERSPAN2 = 0x22eb + ETH_P_ETHERCAT = 0x88a4 ETH_P_FCOE = 0x8906 ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 @@ -747,6 +1110,7 @@ const ( ETH_P_PPP_MP = 0x8 ETH_P_PPP_SES = 0x8864 ETH_P_PREAUTH = 0x88c7 + ETH_P_PROFINET = 0x8892 ETH_P_PRP = 0x88fb ETH_P_PUP = 0x200 ETH_P_PUPAT = 0x201 @@ -784,6 +1148,7 @@ const ( EV_SYN = 0x0 EV_VERSION = 0x10001 EXABYTE_ENABLE_NEST = 0xf0 + EXFAT_SUPER_MAGIC = 0x2011bab0 EXT2_SUPER_MAGIC = 0xef53 EXT3_SUPER_MAGIC = 0xef53 EXT4_SUPER_MAGIC = 0xef53 @@ -826,16 +1191,21 @@ const ( FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 FAN_EVENT_INFO_TYPE_ERROR = 0x5 FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc + FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa FAN_EVENT_INFO_TYPE_PIDFD = 0x4 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_FS_ERROR = 0x8000 FAN_MARK_ADD = 0x1 FAN_MARK_DONT_FOLLOW = 0x4 + FAN_MARK_EVICTABLE = 0x200 FAN_MARK_FILESYSTEM = 0x100 FAN_MARK_FLUSH = 0x80 + FAN_MARK_IGNORE = 0x400 FAN_MARK_IGNORED_MASK = 0x20 FAN_MARK_IGNORED_SURV_MODIFY = 0x40 + FAN_MARK_IGNORE_SURV = 0x440 FAN_MARK_INODE = 0x0 FAN_MARK_MOUNT = 0x10 FAN_MARK_ONLYDIR = 0x8 @@ -854,17 +1224,27 @@ const ( FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 FAN_Q_OVERFLOW = 0x4000 + FAN_RENAME = 0x10000000 FAN_REPORT_DFID_NAME = 0xc00 + FAN_REPORT_DFID_NAME_TARGET = 0x1e00 FAN_REPORT_DIR_FID = 0x400 FAN_REPORT_FID = 0x200 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 + FAN_REPORT_TARGET_FID = 0x1000 FAN_REPORT_TID = 0x100 FAN_UNLIMITED_MARKS = 0x20 FAN_UNLIMITED_QUEUE = 0x10 FD_CLOEXEC = 0x1 FD_SETSIZE = 0x400 FF0 = 0x0 + FIB_RULE_DEV_DETACHED = 0x8 + FIB_RULE_FIND_SADDR = 0x10000 + FIB_RULE_IIF_DETACHED = 0x8 + FIB_RULE_INVERT = 0x2 + FIB_RULE_OIF_DETACHED = 0x10 + FIB_RULE_PERMANENT = 0x1 + FIB_RULE_UNRESOLVED = 0x4 FIDEDUPERANGE = 0xc0189436 FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8 FSCRYPT_KEY_DESC_PREFIX = "fscrypt:" @@ -883,7 +1263,10 @@ const ( FSCRYPT_MODE_AES_128_CBC = 0x5 FSCRYPT_MODE_AES_128_CTS = 0x6 FSCRYPT_MODE_AES_256_CTS = 0x4 + FSCRYPT_MODE_AES_256_HCTR2 = 0xa FSCRYPT_MODE_AES_256_XTS = 0x1 + FSCRYPT_MODE_SM4_CTS = 0x8 + FSCRYPT_MODE_SM4_XTS = 0x7 FSCRYPT_POLICY_FLAGS_PAD_16 = 0x2 FSCRYPT_POLICY_FLAGS_PAD_32 = 0x3 FSCRYPT_POLICY_FLAGS_PAD_4 = 0x0 @@ -902,8 +1285,6 @@ const ( FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 FS_IOC_ADD_ENCRYPTION_KEY = 0xc0506617 FS_IOC_GET_ENCRYPTION_KEY_STATUS = 0xc080661a FS_IOC_GET_ENCRYPTION_POLICY_EX = 0xc0096616 @@ -927,6 +1308,7 @@ const ( FS_VERITY_METADATA_TYPE_DESCRIPTOR = 0x2 FS_VERITY_METADATA_TYPE_MERKLE_TREE = 0x1 FS_VERITY_METADATA_TYPE_SIGNATURE = 0x3 + FUSE_SUPER_MAGIC = 0x65735546 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 @@ -1039,7 +1421,7 @@ const ( IFA_F_STABLE_PRIVACY = 0x800 IFA_F_TEMPORARY = 0x1 IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa + IFA_MAX = 0xb IFF_ALLMULTI = 0x200 IFF_ATTACH_QUEUE = 0x200 IFF_AUTOMEDIA = 0x4000 @@ -1059,6 +1441,7 @@ const ( IFF_NOARP = 0x80 IFF_NOFILTER = 0x1000 IFF_NOTRAILERS = 0x20 + IFF_NO_CARRIER = 0x40 IFF_NO_PI = 0x1000 IFF_ONE_QUEUE = 0x2000 IFF_PERSIST = 0x800 @@ -1294,6 +1677,7 @@ const ( KEXEC_ARCH_ARM = 0x280000 KEXEC_ARCH_DEFAULT = 0x0 KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_LOONGARCH = 0x1020000 KEXEC_ARCH_MASK = 0xffff0000 KEXEC_ARCH_MIPS = 0x80000 KEXEC_ARCH_MIPS_LE = 0xa0000 @@ -1386,8 +1770,10 @@ const ( LANDLOCK_ACCESS_FS_MAKE_SYM = 0x1000 LANDLOCK_ACCESS_FS_READ_DIR = 0x8 LANDLOCK_ACCESS_FS_READ_FILE = 0x4 + LANDLOCK_ACCESS_FS_REFER = 0x2000 LANDLOCK_ACCESS_FS_REMOVE_DIR = 0x10 LANDLOCK_ACCESS_FS_REMOVE_FILE = 0x20 + LANDLOCK_ACCESS_FS_TRUNCATE = 0x4000 LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 LINUX_REBOOT_CMD_CAD_OFF = 0x0 @@ -1427,11 +1813,13 @@ const ( LWTUNNEL_IP_OPT_GENEVE_MAX = 0x3 LWTUNNEL_IP_OPT_VXLAN_MAX = 0x1 MADV_COLD = 0x14 + MADV_COLLAPSE = 0x19 MADV_DODUMP = 0x11 MADV_DOFORK = 0xb MADV_DONTDUMP = 0x10 MADV_DONTFORK = 0xa MADV_DONTNEED = 0x4 + MADV_DONTNEED_LOCKED = 0x18 MADV_FREE = 0x8 MADV_HUGEPAGE = 0xe MADV_HWPOISON = 0x64 @@ -1473,7 +1861,7 @@ const ( MFD_ALLOW_SEALING = 0x2 MFD_CLOEXEC = 0x1 MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 + MFD_HUGE_16GB = 0x88000000 MFD_HUGE_16MB = 0x60000000 MFD_HUGE_1GB = 0x78000000 MFD_HUGE_1MB = 0x50000000 @@ -1495,6 +1883,7 @@ const ( MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 MOUNT_ATTR_IDMAP = 0x100000 @@ -1740,6 +2129,7 @@ const ( NLM_F_ACK_TLVS = 0x200 NLM_F_APPEND = 0x800 NLM_F_ATOMIC = 0x400 + NLM_F_BULK = 0x200 NLM_F_CAPPED = 0x100 NLM_F_CREATE = 0x400 NLM_F_DUMP = 0x300 @@ -1778,6 +2168,7 @@ const ( PACKET_FANOUT_DATA = 0x16 PACKET_FANOUT_EBPF = 0x7 PACKET_FANOUT_FLAG_DEFRAG = 0x8000 + PACKET_FANOUT_FLAG_IGNORE_OUTGOING = 0x4000 PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 PACKET_FANOUT_HASH = 0x0 @@ -1837,6 +2228,11 @@ const ( PERF_AUX_FLAG_PARTIAL = 0x4 PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK = 0xff00 PERF_AUX_FLAG_TRUNCATED = 0x1 + PERF_BR_ARM64_DEBUG_DATA = 0x7 + PERF_BR_ARM64_DEBUG_EXIT = 0x5 + PERF_BR_ARM64_DEBUG_HALT = 0x4 + PERF_BR_ARM64_DEBUG_INST = 0x6 + PERF_BR_ARM64_FIQ = 0x3 PERF_FLAG_FD_CLOEXEC = 0x8 PERF_FLAG_FD_NO_GROUP = 0x1 PERF_FLAG_FD_OUTPUT = 0x2 @@ -1849,11 +2245,16 @@ const ( PERF_MEM_BLK_NA = 0x1 PERF_MEM_BLK_SHIFT = 0x28 PERF_MEM_HOPS_0 = 0x1 + PERF_MEM_HOPS_1 = 0x2 + PERF_MEM_HOPS_2 = 0x3 + PERF_MEM_HOPS_3 = 0x4 PERF_MEM_HOPS_SHIFT = 0x2b PERF_MEM_LOCK_LOCKED = 0x2 PERF_MEM_LOCK_NA = 0x1 PERF_MEM_LOCK_SHIFT = 0x18 PERF_MEM_LVLNUM_ANY_CACHE = 0xb + PERF_MEM_LVLNUM_CXL = 0x9 + PERF_MEM_LVLNUM_IO = 0xa PERF_MEM_LVLNUM_L1 = 0x1 PERF_MEM_LVLNUM_L2 = 0x2 PERF_MEM_LVLNUM_L3 = 0x3 @@ -1887,6 +2288,7 @@ const ( PERF_MEM_REMOTE_REMOTE = 0x1 PERF_MEM_REMOTE_SHIFT = 0x25 PERF_MEM_SNOOPX_FWD = 0x1 + PERF_MEM_SNOOPX_PEER = 0x2 PERF_MEM_SNOOPX_SHIFT = 0x26 PERF_MEM_SNOOP_HIT = 0x4 PERF_MEM_SNOOP_HITM = 0x10 @@ -1923,7 +2325,6 @@ const ( PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 PERF_SAMPLE_WEIGHT_TYPE = 0x1004000 PIPEFS_MAGIC = 0x50495045 - PPC_CMM_MAGIC = 0xc7571590 PPPIOCGNPMODE = 0xc008744c PPPIOCNEWUNIT = 0xc004743e PRIO_PGRP = 0x1 @@ -2052,6 +2453,13 @@ const ( PR_SET_TIMING = 0xe PR_SET_TSC = 0x1a PR_SET_UNALIGN = 0x6 + PR_SET_VMA = 0x53564d41 + PR_SET_VMA_ANON_NAME = 0x0 + PR_SME_GET_VL = 0x40 + PR_SME_SET_VL = 0x3f + PR_SME_SET_VL_ONEXEC = 0x40000 + PR_SME_VL_INHERIT = 0x20000 + PR_SME_VL_LEN_MASK = 0xffff PR_SPEC_DISABLE = 0x4 PR_SPEC_DISABLE_NOEXEC = 0x10 PR_SPEC_ENABLE = 0x2 @@ -2204,8 +2612,9 @@ const ( RTC_FEATURE_ALARM = 0x0 RTC_FEATURE_ALARM_RES_2S = 0x3 RTC_FEATURE_ALARM_RES_MINUTE = 0x1 + RTC_FEATURE_ALARM_WAKEUP_ONLY = 0x7 RTC_FEATURE_BACKUP_SWITCH_MODE = 0x6 - RTC_FEATURE_CNT = 0x7 + RTC_FEATURE_CNT = 0x8 RTC_FEATURE_CORRECTION = 0x5 RTC_FEATURE_NEED_WEEK_DAY = 0x2 RTC_FEATURE_UPDATE_INTERRUPT = 0x4 @@ -2279,6 +2688,7 @@ const ( RTM_DELRULE = 0x21 RTM_DELTCLASS = 0x29 RTM_DELTFILTER = 0x2d + RTM_DELTUNNEL = 0x79 RTM_DELVLAN = 0x71 RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 @@ -2311,8 +2721,9 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e + RTM_GETTUNNEL = 0x7a RTM_GETVLAN = 0x72 - RTM_MAX = 0x77 + RTM_MAX = 0x7b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -2336,11 +2747,13 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x1a - RTM_NR_MSGTYPES = 0x68 + RTM_NEWTUNNEL = 0x78 + RTM_NR_FAMILIES = 0x1b + RTM_NR_MSGTYPES = 0x6c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 + RTM_SETSTATS = 0x5f RTNH_ALIGNTO = 0x4 RTNH_COMPARE_MASK = 0x59 RTNH_F_DEAD = 0x1 @@ -2509,6 +2922,7 @@ const ( SMART_STATUS = 0xda SMART_WRITE_LOG_SECTOR = 0xd6 SMART_WRITE_THRESHOLDS = 0xd7 + SMB2_SUPER_MAGIC = 0xfe534d42 SMB_SUPER_MAGIC = 0x517b SOCKFS_MAGIC = 0x534f434b SOCK_BUF_LOCK_MASK = 0x3 @@ -2520,6 +2934,9 @@ const ( SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 SOCK_SNDBUF_LOCK = 0x1 + SOCK_TXREHASH_DEFAULT = 0xff + SOCK_TXREHASH_DISABLED = 0x0 + SOCK_TXREHASH_ENABLED = 0x1 SOL_AAL = 0x109 SOL_ALG = 0x117 SOL_ATM = 0x108 @@ -2535,6 +2952,8 @@ const ( SOL_IUCV = 0x115 SOL_KCM = 0x119 SOL_LLC = 0x10c + SOL_MCTP = 0x11d + SOL_MPTCP = 0x11c SOL_NETBEUI = 0x10b SOL_NETLINK = 0x10e SOL_NFC = 0x118 @@ -2544,9 +2963,11 @@ const ( SOL_RAW = 0xff SOL_RDS = 0x114 SOL_RXRPC = 0x110 + SOL_SMC = 0x11e SOL_TCP = 0x6 SOL_TIPC = 0x10f SOL_TLS = 0x11a + SOL_UDP = 0x11 SOL_X25 = 0x106 SOL_XDP = 0x11b SOMAXCONN = 0x1000 @@ -2602,6 +3023,7 @@ const ( STATX_BLOCKS = 0x400 STATX_BTIME = 0x800 STATX_CTIME = 0x80 + STATX_DIOALIGN = 0x2000 STATX_GID = 0x10 STATX_INO = 0x100 STATX_MNT_ID = 0x1000 @@ -2650,7 +3072,7 @@ const ( TASKSTATS_GENL_NAME = "TASKSTATS" TASKSTATS_GENL_VERSION = 0x1 TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0xa + TASKSTATS_VERSION = 0xd TCIFLUSH = 0x0 TCIOFF = 0x2 TCIOFLUSH = 0x2 @@ -2830,6 +3252,19 @@ const ( TRACEFS_MAGIC = 0x74726163 TS_COMM_LEN = 0x20 UDF_SUPER_MAGIC = 0x15013346 + UDP_CORK = 0x1 + UDP_ENCAP = 0x64 + UDP_ENCAP_ESPINUDP = 0x2 + UDP_ENCAP_ESPINUDP_NON_IKE = 0x1 + UDP_ENCAP_GTP0 = 0x4 + UDP_ENCAP_GTP1U = 0x5 + UDP_ENCAP_L2TPINUDP = 0x3 + UDP_GRO = 0x68 + UDP_NO_CHECK6_RX = 0x66 + UDP_NO_CHECK6_TX = 0x65 + UDP_SEGMENT = 0x67 + UDP_V4_FLOW = 0x2 + UDP_V6_FLOW = 0x6 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff @@ -2995,9 +3430,7 @@ const ( XDP_ZEROCOPY = 0x4 XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 - Z3FOLD_MAGIC = 0x33 ZONEFS_MAGIC = 0x5a4f4653 - ZSMALLOC_MAGIC = 0x58295829 _HIDIOCGRAWNAME_LEN = 0x80 _HIDIOCGRAWPHYS_LEN = 0x40 _HIDIOCGRAWUNIQ_LEN = 0x40 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 234fd4a5d..a46df0f1e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include -m32 +// mkerrors.sh -Wall -Werror -static -I/tmp/386/include -m32 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux // +build 386,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/386/include -m32 _const.go package unix @@ -133,6 +133,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc03c4d1a MEMREADOOB = 0xc00c4d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 @@ -326,6 +327,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -350,6 +352,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 58619b758..6cd4a3ea9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include -m64 +// mkerrors.sh -Wall -Werror -static -I/tmp/amd64/include -m64 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux // +build amd64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/amd64/include -m64 _const.go package unix @@ -133,6 +133,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 @@ -327,6 +328,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -351,6 +353,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 3a64ff59d..c7ebee24d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/arm/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux // +build arm,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/arm/include _const.go package unix @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc00c4d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 @@ -333,6 +334,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -357,6 +359,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index abe0b9257..9d5352c3e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include -fsigned-char +// mkerrors.sh -Wall -Werror -static -I/tmp/arm64/include -fsigned-char // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux // +build arm64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/arm64/include -fsigned-char _const.go package unix @@ -134,6 +134,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 @@ -323,6 +324,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -347,6 +349,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 @@ -511,6 +514,7 @@ const ( WORDSIZE = 0x40 XCASE = 0x4 XTABS = 0x1800 + ZA_MAGIC = 0x54366345 _HIDIOCGRAWNAME = 0x80804804 _HIDIOCGRAWPHYS = 0x80404805 _HIDIOCGRAWUNIQ = 0x80404808 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go new file mode 100644 index 000000000..f26a164f4 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -0,0 +1,819 @@ +// mkerrors.sh -Wall -Werror -static -I/tmp/loong64/include +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build loong64 && linux +// +build loong64,linux + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -Wall -Werror -static -I/tmp/loong64/include _const.go + +package unix + +import "syscall" + +const ( + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x80081270 + BLKBSZSET = 0x40081271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80081272 + BLKPBSZGET = 0x127b + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECCGETLAYOUT = 0x81484d11 + ECCGETSTATS = 0x80104d12 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FICLONE = 0x40049409 + FICLONERANGE = 0x4020940d + FLUSHO = 0x1000 + FPU_CTX_MAGIC = 0x46505501 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80086601 + FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b + FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SETFLAGS = 0x40086602 + FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 + F_GETLK = 0x5 + F_GETLK64 = 0x5 + F_GETOWN = 0x9 + F_RDLCK = 0x0 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x8 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HIDIOCGRAWINFO = 0x80084803 + HIDIOCGRDESC = 0x90044802 + HIDIOCGRDESCSIZE = 0x80044801 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x8000 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x800 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + MEMERASE = 0x40084d02 + MEMERASE64 = 0x40104d14 + MEMGETBADBLOCK = 0x40084d0b + MEMGETINFO = 0x80204d01 + MEMGETOOBSEL = 0x80c84d0a + MEMGETREGIONCOUNT = 0x80044d07 + MEMISLOCKED = 0x80084d17 + MEMLOCK = 0x40084d05 + MEMREAD = 0xc0404d1a + MEMREADOOB = 0xc0104d04 + MEMSETBADBLOCK = 0x40084d0c + MEMUNLOCK = 0x40084d06 + MEMWRITEOOB = 0xc0104d03 + MTDFILEMODE = 0x4d13 + NFDBITS = 0x40 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 + OLCUC = 0x2 + ONLCR = 0x4 + OTPERASE = 0x400c4d19 + OTPGETREGIONCOUNT = 0x40044d0e + OTPGETREGIONINFO = 0x400c4d0f + OTPLOCK = 0x800c4d10 + OTPSELECT = 0x80044d0d + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x4000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PPPIOCATTACH = 0x4004743d + PPPIOCATTCHAN = 0x40047438 + PPPIOCBRIDGECHAN = 0x40047435 + PPPIOCCONNECT = 0x4004743a + PPPIOCDETACH = 0x4004743c + PPPIOCDISCONN = 0x7439 + PPPIOCGASYNCMAP = 0x80047458 + PPPIOCGCHAN = 0x80047437 + PPPIOCGDEBUG = 0x80047441 + PPPIOCGFLAGS = 0x8004745a + PPPIOCGIDLE = 0x8010743f + PPPIOCGIDLE32 = 0x8008743f + PPPIOCGIDLE64 = 0x8010743f + PPPIOCGL2TPSTATS = 0x80487436 + PPPIOCGMRU = 0x80047453 + PPPIOCGRASYNCMAP = 0x80047455 + PPPIOCGUNIT = 0x80047456 + PPPIOCGXASYNCMAP = 0x80207450 + PPPIOCSACTIVE = 0x40107446 + PPPIOCSASYNCMAP = 0x40047457 + PPPIOCSCOMPRESS = 0x4010744d + PPPIOCSDEBUG = 0x40047440 + PPPIOCSFLAGS = 0x40047459 + PPPIOCSMAXCID = 0x40047451 + PPPIOCSMRRU = 0x4004743b + PPPIOCSMRU = 0x40047452 + PPPIOCSNPMODE = 0x4008744b + PPPIOCSPASS = 0x40107447 + PPPIOCSRASYNCMAP = 0x40047454 + PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCUNBRIDGECHAN = 0x7434 + PPPIOCXFERUNIT = 0x744e + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_SYSEMU = 0x1f + PTRACE_SYSEMU_SINGLESTEP = 0x20 + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x40085203 + RNDADDTOENTCNT = 0x40045201 + RNDCLEARPOOL = 0x5206 + RNDGETENTCNT = 0x80045200 + RNDGETPOOL = 0x80085202 + RNDRESEEDCRNG = 0x5207 + RNDZAPENTCNT = 0x5204 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8008700d + RTC_EPOCH_SET = 0x4008700e + RTC_IRQP_READ = 0x8008700b + RTC_IRQP_SET = 0x4008700c + RTC_PARAM_GET = 0x40187013 + RTC_PARAM_SET = 0x40187014 + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x80207011 + RTC_PLL_SET = 0x40207012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0x1 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUF_LOCK = 0x48 + SO_BUSY_POLL = 0x2e + SO_BUSY_POLL_BUDGET = 0x46 + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NETNS_COOKIE = 0x47 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1f + SO_PREFER_BUSY_POLL = 0x45 + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b + SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 + SO_RESERVE_MEM = 0x49 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a + SO_TXTIME = 0x3d + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x80285442 + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TUNATTACHFILTER = 0x401054d5 + TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x801054db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETBE = 0x800454df + TUNGETVNETHDRSZ = 0x800454d7 + TUNGETVNETLE = 0x800454dd + TUNSETCARRIER = 0x400454e2 + TUNSETDEBUG = 0x400454c9 + TUNSETFILTEREBPF = 0x800454e1 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETSTEERINGEBPF = 0x800454e0 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETBE = 0x400454de + TUNSETVNETHDRSZ = 0x400454d8 + TUNSETVNETLE = 0x400454dc + UBI_IOCATT = 0x40186f40 + UBI_IOCDET = 0x40046f41 + UBI_IOCEBCH = 0x40044f02 + UBI_IOCEBER = 0x40044f01 + UBI_IOCEBISMAP = 0x80044f05 + UBI_IOCEBMAP = 0x40084f03 + UBI_IOCEBUNMAP = 0x40044f04 + UBI_IOCMKVOL = 0x40986f00 + UBI_IOCRMVOL = 0x40046f01 + UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 + UBI_IOCRSVOL = 0x400c6f02 + UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 + UBI_IOCVOLCRBLK = 0x40804f07 + UBI_IOCVOLRMBLK = 0x4f08 + UBI_IOCVOLUP = 0x40084f00 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VMIN = 0x6 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WORDSIZE = 0x40 + XCASE = 0x4 + XTABS = 0x1800 + _HIDIOCGRAWNAME = 0x80804804 + _HIDIOCGRAWPHYS = 0x80404805 + _HIDIOCGRAWUNIQ = 0x80404808 +) + +// Errors +const ( + EADDRINUSE = syscall.Errno(0x62) + EADDRNOTAVAIL = syscall.Errno(0x63) + EADV = syscall.Errno(0x44) + EAFNOSUPPORT = syscall.Errno(0x61) + EALREADY = syscall.Errno(0x72) + EBADE = syscall.Errno(0x34) + EBADFD = syscall.Errno(0x4d) + EBADMSG = syscall.Errno(0x4a) + EBADR = syscall.Errno(0x35) + EBADRQC = syscall.Errno(0x38) + EBADSLT = syscall.Errno(0x39) + EBFONT = syscall.Errno(0x3b) + ECANCELED = syscall.Errno(0x7d) + ECHRNG = syscall.Errno(0x2c) + ECOMM = syscall.Errno(0x46) + ECONNABORTED = syscall.Errno(0x67) + ECONNREFUSED = syscall.Errno(0x6f) + ECONNRESET = syscall.Errno(0x68) + EDEADLK = syscall.Errno(0x23) + EDEADLOCK = syscall.Errno(0x23) + EDESTADDRREQ = syscall.Errno(0x59) + EDOTDOT = syscall.Errno(0x49) + EDQUOT = syscall.Errno(0x7a) + EHOSTDOWN = syscall.Errno(0x70) + EHOSTUNREACH = syscall.Errno(0x71) + EHWPOISON = syscall.Errno(0x85) + EIDRM = syscall.Errno(0x2b) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x73) + EISCONN = syscall.Errno(0x6a) + EISNAM = syscall.Errno(0x78) + EKEYEXPIRED = syscall.Errno(0x7f) + EKEYREJECTED = syscall.Errno(0x81) + EKEYREVOKED = syscall.Errno(0x80) + EL2HLT = syscall.Errno(0x33) + EL2NSYNC = syscall.Errno(0x2d) + EL3HLT = syscall.Errno(0x2e) + EL3RST = syscall.Errno(0x2f) + ELIBACC = syscall.Errno(0x4f) + ELIBBAD = syscall.Errno(0x50) + ELIBEXEC = syscall.Errno(0x53) + ELIBMAX = syscall.Errno(0x52) + ELIBSCN = syscall.Errno(0x51) + ELNRNG = syscall.Errno(0x30) + ELOOP = syscall.Errno(0x28) + EMEDIUMTYPE = syscall.Errno(0x7c) + EMSGSIZE = syscall.Errno(0x5a) + EMULTIHOP = syscall.Errno(0x48) + ENAMETOOLONG = syscall.Errno(0x24) + ENAVAIL = syscall.Errno(0x77) + ENETDOWN = syscall.Errno(0x64) + ENETRESET = syscall.Errno(0x66) + ENETUNREACH = syscall.Errno(0x65) + ENOANO = syscall.Errno(0x37) + ENOBUFS = syscall.Errno(0x69) + ENOCSI = syscall.Errno(0x32) + ENODATA = syscall.Errno(0x3d) + ENOKEY = syscall.Errno(0x7e) + ENOLCK = syscall.Errno(0x25) + ENOLINK = syscall.Errno(0x43) + ENOMEDIUM = syscall.Errno(0x7b) + ENOMSG = syscall.Errno(0x2a) + ENONET = syscall.Errno(0x40) + ENOPKG = syscall.Errno(0x41) + ENOPROTOOPT = syscall.Errno(0x5c) + ENOSR = syscall.Errno(0x3f) + ENOSTR = syscall.Errno(0x3c) + ENOSYS = syscall.Errno(0x26) + ENOTCONN = syscall.Errno(0x6b) + ENOTEMPTY = syscall.Errno(0x27) + ENOTNAM = syscall.Errno(0x76) + ENOTRECOVERABLE = syscall.Errno(0x83) + ENOTSOCK = syscall.Errno(0x58) + ENOTSUP = syscall.Errno(0x5f) + ENOTUNIQ = syscall.Errno(0x4c) + EOPNOTSUPP = syscall.Errno(0x5f) + EOVERFLOW = syscall.Errno(0x4b) + EOWNERDEAD = syscall.Errno(0x82) + EPFNOSUPPORT = syscall.Errno(0x60) + EPROTO = syscall.Errno(0x47) + EPROTONOSUPPORT = syscall.Errno(0x5d) + EPROTOTYPE = syscall.Errno(0x5b) + EREMCHG = syscall.Errno(0x4e) + EREMOTE = syscall.Errno(0x42) + EREMOTEIO = syscall.Errno(0x79) + ERESTART = syscall.Errno(0x55) + ERFKILL = syscall.Errno(0x84) + ESHUTDOWN = syscall.Errno(0x6c) + ESOCKTNOSUPPORT = syscall.Errno(0x5e) + ESRMNT = syscall.Errno(0x45) + ESTALE = syscall.Errno(0x74) + ESTRPIPE = syscall.Errno(0x56) + ETIME = syscall.Errno(0x3e) + ETIMEDOUT = syscall.Errno(0x6e) + ETOOMANYREFS = syscall.Errno(0x6d) + EUCLEAN = syscall.Errno(0x75) + EUNATCH = syscall.Errno(0x31) + EUSERS = syscall.Errno(0x57) + EXFULL = syscall.Errno(0x36) +) + +// Signals +const ( + SIGBUS = syscall.Signal(0x7) + SIGCHLD = syscall.Signal(0x11) + SIGCLD = syscall.Signal(0x11) + SIGCONT = syscall.Signal(0x12) + SIGIO = syscall.Signal(0x1d) + SIGPOLL = syscall.Signal(0x1d) + SIGPROF = syscall.Signal(0x1b) + SIGPWR = syscall.Signal(0x1e) + SIGSTKFLT = syscall.Signal(0x10) + SIGSTOP = syscall.Signal(0x13) + SIGSYS = syscall.Signal(0x1f) + SIGTSTP = syscall.Signal(0x14) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x17) + SIGUSR1 = syscall.Signal(0xa) + SIGUSR2 = syscall.Signal(0xc) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "EDEADLK", "resource deadlock avoided"}, + {36, "ENAMETOOLONG", "file name too long"}, + {37, "ENOLCK", "no locks available"}, + {38, "ENOSYS", "function not implemented"}, + {39, "ENOTEMPTY", "directory not empty"}, + {40, "ELOOP", "too many levels of symbolic links"}, + {42, "ENOMSG", "no message of desired type"}, + {43, "EIDRM", "identifier removed"}, + {44, "ECHRNG", "channel number out of range"}, + {45, "EL2NSYNC", "level 2 not synchronized"}, + {46, "EL3HLT", "level 3 halted"}, + {47, "EL3RST", "level 3 reset"}, + {48, "ELNRNG", "link number out of range"}, + {49, "EUNATCH", "protocol driver not attached"}, + {50, "ENOCSI", "no CSI structure available"}, + {51, "EL2HLT", "level 2 halted"}, + {52, "EBADE", "invalid exchange"}, + {53, "EBADR", "invalid request descriptor"}, + {54, "EXFULL", "exchange full"}, + {55, "ENOANO", "no anode"}, + {56, "EBADRQC", "invalid request code"}, + {57, "EBADSLT", "invalid slot"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "EMULTIHOP", "multihop attempted"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EBADMSG", "bad message"}, + {75, "EOVERFLOW", "value too large for defined data type"}, + {76, "ENOTUNIQ", "name not unique on network"}, + {77, "EBADFD", "file descriptor in bad state"}, + {78, "EREMCHG", "remote address changed"}, + {79, "ELIBACC", "can not access a needed shared library"}, + {80, "ELIBBAD", "accessing a corrupted shared library"}, + {81, "ELIBSCN", ".lib section in a.out corrupted"}, + {82, "ELIBMAX", "attempting to link in too many shared libraries"}, + {83, "ELIBEXEC", "cannot exec a shared library directly"}, + {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {85, "ERESTART", "interrupted system call should be restarted"}, + {86, "ESTRPIPE", "streams pipe error"}, + {87, "EUSERS", "too many users"}, + {88, "ENOTSOCK", "socket operation on non-socket"}, + {89, "EDESTADDRREQ", "destination address required"}, + {90, "EMSGSIZE", "message too long"}, + {91, "EPROTOTYPE", "protocol wrong type for socket"}, + {92, "ENOPROTOOPT", "protocol not available"}, + {93, "EPROTONOSUPPORT", "protocol not supported"}, + {94, "ESOCKTNOSUPPORT", "socket type not supported"}, + {95, "ENOTSUP", "operation not supported"}, + {96, "EPFNOSUPPORT", "protocol family not supported"}, + {97, "EAFNOSUPPORT", "address family not supported by protocol"}, + {98, "EADDRINUSE", "address already in use"}, + {99, "EADDRNOTAVAIL", "cannot assign requested address"}, + {100, "ENETDOWN", "network is down"}, + {101, "ENETUNREACH", "network is unreachable"}, + {102, "ENETRESET", "network dropped connection on reset"}, + {103, "ECONNABORTED", "software caused connection abort"}, + {104, "ECONNRESET", "connection reset by peer"}, + {105, "ENOBUFS", "no buffer space available"}, + {106, "EISCONN", "transport endpoint is already connected"}, + {107, "ENOTCONN", "transport endpoint is not connected"}, + {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {109, "ETOOMANYREFS", "too many references: cannot splice"}, + {110, "ETIMEDOUT", "connection timed out"}, + {111, "ECONNREFUSED", "connection refused"}, + {112, "EHOSTDOWN", "host is down"}, + {113, "EHOSTUNREACH", "no route to host"}, + {114, "EALREADY", "operation already in progress"}, + {115, "EINPROGRESS", "operation now in progress"}, + {116, "ESTALE", "stale file handle"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EDQUOT", "disk quota exceeded"}, + {123, "ENOMEDIUM", "no medium found"}, + {124, "EMEDIUMTYPE", "wrong medium type"}, + {125, "ECANCELED", "operation canceled"}, + {126, "ENOKEY", "required key not available"}, + {127, "EKEYEXPIRED", "key has expired"}, + {128, "EKEYREVOKED", "key has been revoked"}, + {129, "EKEYREJECTED", "key was rejected by service"}, + {130, "EOWNERDEAD", "owner died"}, + {131, "ENOTRECOVERABLE", "state not recoverable"}, + {132, "ERFKILL", "operation not possible due to RF-kill"}, + {133, "EHWPOISON", "memory page has hardware error"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGBUS", "bus error"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGUSR1", "user defined signal 1"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGUSR2", "user defined signal 2"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGSTKFLT", "stack fault"}, + {17, "SIGCHLD", "child exited"}, + {18, "SIGCONT", "continued"}, + {19, "SIGSTOP", "stopped (signal)"}, + {20, "SIGTSTP", "stopped"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGURG", "urgent I/O condition"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGIO", "I/O possible"}, + {30, "SIGPWR", "power failure"}, + {31, "SIGSYS", "bad system call"}, +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 14d7a8439..890bc3c9b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/mips/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux // +build mips,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/mips/include _const.go package unix @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc00c4d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 @@ -326,6 +327,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 @@ -351,6 +353,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 99e7c4ac0..549f26ac6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/mips64/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux // +build mips64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/mips64/include _const.go package unix @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 @@ -326,6 +327,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 @@ -351,6 +353,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 496364c33..e0365e32c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/mips64le/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux // +build mips64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/mips64le/include _const.go package unix @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 @@ -326,6 +327,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 @@ -351,6 +353,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 3e4083085..fdccce15c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/mipsle/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux // +build mipsle,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/mipsle/include _const.go package unix @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc00c4d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 @@ -326,6 +327,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 @@ -351,6 +353,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 1151a7dfa..b2205c83f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/ppc/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux // +build ppc,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/ppc/include _const.go package unix @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc00c4d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 @@ -381,6 +382,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 @@ -405,6 +407,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index ed17f249e..81aa5ad0f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/ppc64/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux // +build ppc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64/include _const.go package unix @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 @@ -385,6 +386,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 @@ -409,6 +411,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index d84a37c1a..76807a1fd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/ppc64le/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux // +build ppc64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64le/include _const.go package unix @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 @@ -385,6 +386,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 @@ -409,6 +411,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 5cafba83f..d4a5ab9e4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/riscv64/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux // +build riscv64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/riscv64/include _const.go package unix @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 @@ -314,6 +315,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -338,6 +340,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 6d122da41..66e65db95 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include -fsigned-char +// mkerrors.sh -Wall -Werror -static -I/tmp/s390x/include -fsigned-char // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux // +build s390x,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/s390x/include -fsigned-char _const.go package unix @@ -131,6 +131,7 @@ const ( MEMGETREGIONCOUNT = 0x80044d07 MEMISLOCKED = 0x80084d17 MEMLOCK = 0x40084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x40084d0c MEMUNLOCK = 0x40084d06 @@ -389,6 +390,7 @@ const ( SO_RCVBUF = 0x8 SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 + SO_RCVMARK = 0x4b SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 @@ -413,6 +415,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x40 SO_TIMESTAMPNS_OLD = 0x23 SO_TIMESTAMP_NEW = 0x3f + SO_TXREHASH = 0x4a SO_TXTIME = 0x3d SO_TYPE = 0x3 SO_WIFI_STATUS = 0x29 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 6bd19e51d..f61925269 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/include +// mkerrors.sh -Wall -Werror -static -I/tmp/sparc64/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux // +build sparc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/sparc64/include _const.go package unix @@ -136,6 +136,7 @@ const ( MEMGETREGIONCOUNT = 0x40044d07 MEMISLOCKED = 0x40084d17 MEMLOCK = 0x80084d05 + MEMREAD = 0xc0404d1a MEMREADOOB = 0xc0104d04 MEMSETBADBLOCK = 0x80084d0c MEMUNLOCK = 0x80084d06 @@ -380,6 +381,7 @@ const ( SO_RCVBUF = 0x1002 SO_RCVBUFFORCE = 0x100b SO_RCVLOWAT = 0x800 + SO_RCVMARK = 0x54 SO_RCVTIMEO = 0x2000 SO_RCVTIMEO_NEW = 0x44 SO_RCVTIMEO_OLD = 0x2000 @@ -404,6 +406,7 @@ const ( SO_TIMESTAMPNS_NEW = 0x42 SO_TIMESTAMPNS_OLD = 0x21 SO_TIMESTAMP_NEW = 0x46 + SO_TXREHASH = 0x53 SO_TXTIME = 0x3f SO_TYPE = 0x1008 SO_WIFI_STATUS = 0x25 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index 6d56edc05..af20e474b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -46,6 +46,7 @@ const ( AF_SNA = 0xb AF_UNIX = 0x1 AF_UNSPEC = 0x0 + ALTWERASE = 0x200 ARPHRD_ETHER = 0x1 ARPHRD_FRELAY = 0xf ARPHRD_IEEE1394 = 0x18 @@ -108,6 +109,15 @@ const ( BPF_DIRECTION_IN = 0x1 BPF_DIRECTION_OUT = 0x2 BPF_DIV = 0x30 + BPF_FILDROP_CAPTURE = 0x1 + BPF_FILDROP_DROP = 0x2 + BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -136,6 +146,7 @@ const ( BPF_OR = 0x40 BPF_RELEASE = 0x30bb6 BPF_RET = 0x6 + BPF_RND = 0xc0 BPF_RSH = 0x70 BPF_ST = 0x2 BPF_STX = 0x3 @@ -147,6 +158,12 @@ const ( BRKINT = 0x2 CFLUSH = 0xf CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x6 + CLOCK_MONOTONIC = 0x3 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x4 + CLOCK_UPTIME = 0x5 CPUSTATES = 0x6 CP_IDLE = 0x5 CP_INTR = 0x4 @@ -170,7 +187,65 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0xc CTL_NET = 0x4 + DIOCADDQUEUE = 0xc100445d + DIOCADDRULE = 0xccc84404 + DIOCADDSTATE = 0xc1084425 + DIOCCHANGERULE = 0xccc8441a + DIOCCLRIFFLAG = 0xc024445a + DIOCCLRSRCNODES = 0x20004455 + DIOCCLRSTATES = 0xc0d04412 + DIOCCLRSTATUS = 0xc0244416 + DIOCGETLIMIT = 0xc0084427 + DIOCGETQSTATS = 0xc1084460 + DIOCGETQUEUE = 0xc100445f + DIOCGETQUEUES = 0xc100445e + DIOCGETRULE = 0xccc84407 + DIOCGETRULES = 0xccc84406 + DIOCGETRULESET = 0xc444443b + DIOCGETRULESETS = 0xc444443a + DIOCGETSRCNODES = 0xc0084454 + DIOCGETSTATE = 0xc1084413 + DIOCGETSTATES = 0xc0084419 + DIOCGETSTATUS = 0xc1e84415 + DIOCGETSYNFLWATS = 0xc0084463 + DIOCGETTIMEOUT = 0xc008441e + DIOCIGETIFACES = 0xc0244457 + DIOCKILLSRCNODES = 0xc068445b + DIOCKILLSTATES = 0xc0d04429 + DIOCNATLOOK = 0xc0504417 + DIOCOSFPADD = 0xc084444f DIOCOSFPFLUSH = 0x2000444e + DIOCOSFPGET = 0xc0844450 + DIOCRADDADDRS = 0xc44c4443 + DIOCRADDTABLES = 0xc44c443d + DIOCRCLRADDRS = 0xc44c4442 + DIOCRCLRASTATS = 0xc44c4448 + DIOCRCLRTABLES = 0xc44c443c + DIOCRCLRTSTATS = 0xc44c4441 + DIOCRDELADDRS = 0xc44c4444 + DIOCRDELTABLES = 0xc44c443e + DIOCRGETADDRS = 0xc44c4446 + DIOCRGETASTATS = 0xc44c4447 + DIOCRGETTABLES = 0xc44c443f + DIOCRGETTSTATS = 0xc44c4440 + DIOCRINADEFINE = 0xc44c444d + DIOCRSETADDRS = 0xc44c4445 + DIOCRSETTFLAGS = 0xc44c444a + DIOCRTSTADDRS = 0xc44c4449 + DIOCSETDEBUG = 0xc0044418 + DIOCSETHOSTID = 0xc0044456 + DIOCSETIFFLAG = 0xc0244459 + DIOCSETLIMIT = 0xc0084428 + DIOCSETREASS = 0xc004445c + DIOCSETSTATUSIF = 0xc0244414 + DIOCSETSYNCOOKIES = 0xc0014462 + DIOCSETSYNFLWATS = 0xc0084461 + DIOCSETTIMEOUT = 0xc008441d + DIOCSTART = 0x20004401 + DIOCSTOP = 0x20004402 + DIOCXBEGIN = 0xc00c4451 + DIOCXCOMMIT = 0xc00c4452 + DIOCXROLLBACK = 0xc00c4453 DLT_ARCNET = 0x7 DLT_ATM_RFC1483 = 0xb DLT_AX25 = 0x3 @@ -186,6 +261,7 @@ const ( DLT_LOOP = 0xc DLT_MPLS = 0xdb DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b DLT_PFLOG = 0x75 DLT_PFSYNC = 0x12 DLT_PPP = 0x9 @@ -196,6 +272,23 @@ const ( DLT_RAW = 0xe DLT_SLIP = 0x8 DLT_SLIP_BSDOS = 0xf + DLT_USBPCAP = 0xf9 + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -215,6 +308,8 @@ const ( EMUL_ENABLED = 0x1 EMUL_NATIVE = 0x2 ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 ETHERMIN = 0x2e ETHERMTU = 0x5dc ETHERTYPE_8023 = 0x4 @@ -267,6 +362,7 @@ const ( ETHERTYPE_DN = 0x6003 ETHERTYPE_DOGFIGHT = 0x1989 ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e ETHERTYPE_ECMA = 0x803 ETHERTYPE_ENCRYPT = 0x803d ETHERTYPE_ES = 0x805d @@ -298,6 +394,7 @@ const ( ETHERTYPE_LLDP = 0x88cc ETHERTYPE_LOGICRAFT = 0x8148 ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MACSEC = 0x88e5 ETHERTYPE_MATRA = 0x807a ETHERTYPE_MAX = 0xffff ETHERTYPE_MERIT = 0x807c @@ -326,15 +423,17 @@ const ( ETHERTYPE_NCD = 0x8149 ETHERTYPE_NESTAR = 0x8006 ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 ETHERTYPE_NOVELL = 0x8138 ETHERTYPE_NS = 0x600 ETHERTYPE_NSAT = 0x601 ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f ETHERTYPE_NTRAILER = 0x10 ETHERTYPE_OS9 = 0x7007 ETHERTYPE_OS9NET = 0x7009 ETHERTYPE_PACER = 0x80c6 - ETHERTYPE_PAE = 0x888e + ETHERTYPE_PBB = 0x88e7 ETHERTYPE_PCS = 0x4242 ETHERTYPE_PLANNING = 0x8044 ETHERTYPE_PPP = 0x880b @@ -409,28 +508,40 @@ const ( ETHER_CRC_POLY_LE = 0xedb88320 ETHER_HDR_LEN = 0xe ETHER_MAX_DIX_LEN = 0x600 + ETHER_MAX_HARDMTU_LEN = 0xff9b ETHER_MAX_LEN = 0x5ee ETHER_MIN_LEN = 0x40 ETHER_TYPE_LEN = 0x2 ETHER_VLAN_ENCAP_LEN = 0x4 EVFILT_AIO = -0x3 + EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x7 + EVFILT_SYSCOUNT = 0x9 EVFILT_TIMER = -0x7 EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 + EVL_ENCAPLEN = 0x4 + EVL_PRIO_BITS = 0xd + EVL_PRIO_MAX = 0x7 + EVL_VLID_MASK = 0xfff + EVL_VLID_MAX = 0xffe + EVL_VLID_MIN = 0x1 + EVL_VLID_NULL = 0x0 EV_ADD = 0x1 EV_CLEAR = 0x20 EV_DELETE = 0x2 EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 EV_ENABLE = 0x4 EV_EOF = 0x8000 EV_ERROR = 0x4000 EV_FLAG1 = 0x2000 EV_ONESHOT = 0x10 - EV_SYSFLAGS = 0xf000 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf800 EXTA = 0x4b00 EXTB = 0x9600 EXTPROC = 0x800 @@ -443,6 +554,7 @@ const ( F_GETFL = 0x3 F_GETLK = 0x7 F_GETOWN = 0x5 + F_ISATTY = 0xb F_OK = 0x0 F_RDLCK = 0x1 F_SETFD = 0x2 @@ -460,7 +572,6 @@ const ( IEXTEN = 0x400 IFAN_ARRIVAL = 0x0 IFAN_DEPARTURE = 0x1 - IFA_ROUTE = 0x1 IFF_ALLMULTI = 0x200 IFF_BROADCAST = 0x2 IFF_CANTCHANGE = 0x8e52 @@ -471,12 +582,12 @@ const ( IFF_LOOPBACK = 0x8 IFF_MULTICAST = 0x8000 IFF_NOARP = 0x80 - IFF_NOTRAILERS = 0x20 IFF_OACTIVE = 0x400 IFF_POINTOPOINT = 0x10 IFF_PROMISC = 0x100 IFF_RUNNING = 0x40 IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x20 IFF_UP = 0x1 IFNAMSIZ = 0x10 IFT_1822 = 0x2 @@ -605,6 +716,7 @@ const ( IFT_LINEGROUP = 0xd2 IFT_LOCALTALK = 0x2a IFT_LOOP = 0x18 + IFT_MBIM = 0xfa IFT_MEDIAMAILOVERIP = 0x8b IFT_MFSIGLINK = 0xa7 IFT_MIOX25 = 0x26 @@ -695,6 +807,7 @@ const ( IFT_VOICEOVERCABLE = 0xc6 IFT_VOICEOVERFRAMERELAY = 0x99 IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb IFT_X213 = 0x5d IFT_X25 = 0x5 IFT_X25DDN = 0x4 @@ -729,8 +842,6 @@ const ( IPPROTO_AH = 0x33 IPPROTO_CARP = 0x70 IPPROTO_DIVERT = 0x102 - IPPROTO_DIVERT_INIT = 0x2 - IPPROTO_DIVERT_RESP = 0x1 IPPROTO_DONE = 0x101 IPPROTO_DSTOPTS = 0x3c IPPROTO_EGP = 0x8 @@ -762,9 +873,11 @@ const ( IPPROTO_RAW = 0xff IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 IPV6_AUTH_LEVEL = 0x35 IPV6_AUTOFLOWLABEL = 0x3b IPV6_CHECKSUM = 0x1a @@ -787,6 +900,7 @@ const ( IPV6_LEAVE_GROUP = 0xd IPV6_MAXHLIM = 0xff IPV6_MAXPACKET = 0xffff + IPV6_MINHOPCOUNT = 0x41 IPV6_MMTU = 0x500 IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 @@ -826,12 +940,12 @@ const ( IP_DEFAULT_MULTICAST_LOOP = 0x1 IP_DEFAULT_MULTICAST_TTL = 0x1 IP_DF = 0x4000 - IP_DIVERTFL = 0x1022 IP_DROP_MEMBERSHIP = 0xd IP_ESP_NETWORK_LEVEL = 0x16 IP_ESP_TRANS_LEVEL = 0x15 IP_HDRINCL = 0x2 IP_IPCOMP_LEVEL = 0x1d + IP_IPDEFTTL = 0x25 IP_IPSECFLOWINFO = 0x24 IP_IPSEC_LOCAL_AUTH = 0x1b IP_IPSEC_LOCAL_CRED = 0x19 @@ -865,10 +979,15 @@ const ( IP_RETOPTS = 0x8 IP_RF = 0x8000 IP_RTABLE = 0x1021 + IP_SENDSRCADDR = 0x7 IP_TOS = 0x3 IP_TTL = 0x4 ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 + IUCLC = 0x1000 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -900,10 +1019,11 @@ const ( MAP_INHERIT_COPY = 0x1 MAP_INHERIT_NONE = 0x2 MAP_INHERIT_SHARE = 0x0 - MAP_NOEXTEND = 0x100 - MAP_NORESERVE = 0x40 + MAP_INHERIT_ZERO = 0x3 + MAP_NOEXTEND = 0x0 + MAP_NORESERVE = 0x0 MAP_PRIVATE = 0x2 - MAP_RENAME = 0x20 + MAP_RENAME = 0x0 MAP_SHARED = 0x1 MAP_STACK = 0x4000 MAP_TRYFIXED = 0x0 @@ -922,6 +1042,7 @@ const ( MNT_NOATIME = 0x8000 MNT_NODEV = 0x10 MNT_NOEXEC = 0x4 + MNT_NOPERM = 0x20 MNT_NOSUID = 0x8 MNT_NOWAIT = 0x2 MNT_QUOTA = 0x2000 @@ -929,13 +1050,29 @@ const ( MNT_RELOAD = 0x40000 MNT_ROOTFS = 0x4000 MNT_SOFTDEP = 0x4000000 + MNT_STALLED = 0x100000 + MNT_SWAPPABLE = 0x200000 MNT_SYNCHRONOUS = 0x2 MNT_UPDATE = 0x10000 MNT_VISFLAGMASK = 0x400ffff MNT_WAIT = 0x1 MNT_WANTRDWR = 0x2000000 MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" MSG_BCAST = 0x100 + MSG_CMSG_CLOEXEC = 0x800 MSG_CTRUNC = 0x20 MSG_DONTROUTE = 0x4 MSG_DONTWAIT = 0x80 @@ -946,6 +1083,7 @@ const ( MSG_PEEK = 0x2 MSG_TRUNC = 0x10 MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x1000 MS_ASYNC = 0x1 MS_INVALIDATE = 0x4 MS_SYNC = 0x2 @@ -953,12 +1091,16 @@ const ( NET_RT_DUMP = 0x1 NET_RT_FLAGS = 0x2 NET_RT_IFLIST = 0x3 - NET_RT_MAXID = 0x6 + NET_RT_IFNAMES = 0x6 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 NFDBITS = 0x20 NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 + NOTE_CHANGE = 0x1 NOTE_CHILD = 0x4 NOTE_DELETE = 0x1 NOTE_EOF = 0x2 @@ -968,6 +1110,7 @@ const ( NOTE_FORK = 0x40000000 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 NOTE_PCTRLMASK = 0xf0000000 NOTE_PDATAMASK = 0xfffff NOTE_RENAME = 0x20 @@ -977,11 +1120,13 @@ const ( NOTE_TRUNCATE = 0x80 NOTE_WRITE = 0x2 OCRNL = 0x10 + OLCUC = 0x20 ONLCR = 0x2 ONLRET = 0x80 ONOCR = 0x40 ONOEOT = 0x8 OPOST = 0x1 + OXTABS = 0x4 O_ACCMODE = 0x3 O_APPEND = 0x8 O_ASYNC = 0x40 @@ -1015,7 +1160,6 @@ const ( PROT_NONE = 0x0 PROT_READ = 0x1 PROT_WRITE = 0x2 - PT_MASK = 0x3ff000 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 @@ -1027,19 +1171,25 @@ const ( RLIMIT_STACK = 0x3 RLIM_INFINITY = 0x7fffffffffffffff RTAX_AUTHOR = 0x6 + RTAX_BFD = 0xb RTAX_BRD = 0x7 + RTAX_DNS = 0xc RTAX_DST = 0x0 RTAX_GATEWAY = 0x1 RTAX_GENMASK = 0x3 RTAX_IFA = 0x5 RTAX_IFP = 0x4 RTAX_LABEL = 0xa - RTAX_MAX = 0xb + RTAX_MAX = 0xf RTAX_NETMASK = 0x2 + RTAX_SEARCH = 0xe RTAX_SRC = 0x8 RTAX_SRCMASK = 0x9 + RTAX_STATIC = 0xd RTA_AUTHOR = 0x40 + RTA_BFD = 0x800 RTA_BRD = 0x80 + RTA_DNS = 0x1000 RTA_DST = 0x1 RTA_GATEWAY = 0x2 RTA_GENMASK = 0x8 @@ -1047,49 +1197,57 @@ const ( RTA_IFP = 0x10 RTA_LABEL = 0x400 RTA_NETMASK = 0x4 + RTA_SEARCH = 0x4000 RTA_SRC = 0x100 RTA_SRCMASK = 0x200 + RTA_STATIC = 0x2000 RTF_ANNOUNCE = 0x4000 + RTF_BFD = 0x1000000 RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CACHED = 0x20000 RTF_CLONED = 0x10000 RTF_CLONING = 0x100 + RTF_CONNECTED = 0x800000 RTF_DONE = 0x40 RTF_DYNAMIC = 0x10 - RTF_FMASK = 0x10f808 + RTF_FMASK = 0x110fc08 RTF_GATEWAY = 0x2 RTF_HOST = 0x4 RTF_LLINFO = 0x400 - RTF_MASK = 0x80 + RTF_LOCAL = 0x200000 RTF_MODIFIED = 0x20 RTF_MPATH = 0x40000 RTF_MPLS = 0x100000 + RTF_MULTICAST = 0x200 RTF_PERMANENT_ARP = 0x2000 RTF_PROTO1 = 0x8000 RTF_PROTO2 = 0x4000 RTF_PROTO3 = 0x2000 RTF_REJECT = 0x8 - RTF_SOURCE = 0x20000 RTF_STATIC = 0x800 - RTF_TUNNEL = 0x100000 RTF_UP = 0x1 RTF_USETRAILERS = 0x8000 - RTF_XRESOLVE = 0x200 + RTM_80211INFO = 0x15 RTM_ADD = 0x1 + RTM_BFD = 0x12 RTM_CHANGE = 0x3 + RTM_CHGADDRATTR = 0x14 RTM_DELADDR = 0xd RTM_DELETE = 0x2 RTM_DESYNC = 0x10 RTM_GET = 0x4 RTM_IFANNOUNCE = 0xf RTM_IFINFO = 0xe - RTM_LOCK = 0x8 + RTM_INVALIDATE = 0x11 RTM_LOSING = 0x5 RTM_MAXSIZE = 0x800 RTM_MISS = 0x7 RTM_NEWADDR = 0xc + RTM_PROPOSAL = 0x13 RTM_REDIRECT = 0x6 RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 + RTM_SOURCE = 0x16 RTM_VERSION = 0x5 RTV_EXPIRE = 0x4 RTV_HOPCOUNT = 0x2 @@ -1099,67 +1257,74 @@ const ( RTV_RTTVAR = 0x80 RTV_SPIPE = 0x10 RTV_SSTHRESH = 0x20 + RT_TABLEID_BITS = 0x8 + RT_TABLEID_MASK = 0xff RT_TABLEID_MAX = 0xff RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 SIOCADDMULTI = 0x80206931 SIOCAIFADDR = 0x8040691a SIOCAIFGROUP = 0x80246987 - SIOCALIFADDR = 0x8218691c SIOCATMARK = 0x40047307 - SIOCBRDGADD = 0x8054693c - SIOCBRDGADDS = 0x80546941 - SIOCBRDGARL = 0x806e694d + SIOCBRDGADD = 0x805c693c + SIOCBRDGADDL = 0x805c6949 + SIOCBRDGADDS = 0x805c6941 + SIOCBRDGARL = 0x808c694d SIOCBRDGDADDR = 0x81286947 - SIOCBRDGDEL = 0x8054693d - SIOCBRDGDELS = 0x80546942 - SIOCBRDGFLUSH = 0x80546948 - SIOCBRDGFRL = 0x806e694e + SIOCBRDGDEL = 0x805c693d + SIOCBRDGDELS = 0x805c6942 + SIOCBRDGFLUSH = 0x805c6948 + SIOCBRDGFRL = 0x808c694e SIOCBRDGGCACHE = 0xc0146941 SIOCBRDGGFD = 0xc0146952 SIOCBRDGGHT = 0xc0146951 - SIOCBRDGGIFFLGS = 0xc054693e + SIOCBRDGGIFFLGS = 0xc05c693e SIOCBRDGGMA = 0xc0146953 SIOCBRDGGPARAM = 0xc03c6958 SIOCBRDGGPRI = 0xc0146950 SIOCBRDGGRL = 0xc028694f - SIOCBRDGGSIFS = 0xc054693c SIOCBRDGGTO = 0xc0146946 - SIOCBRDGIFS = 0xc0546942 + SIOCBRDGIFS = 0xc05c6942 SIOCBRDGRTS = 0xc0186943 SIOCBRDGSADDR = 0xc1286944 SIOCBRDGSCACHE = 0x80146940 SIOCBRDGSFD = 0x80146952 SIOCBRDGSHT = 0x80146951 - SIOCBRDGSIFCOST = 0x80546955 - SIOCBRDGSIFFLGS = 0x8054693f - SIOCBRDGSIFPRIO = 0x80546954 + SIOCBRDGSIFCOST = 0x805c6955 + SIOCBRDGSIFFLGS = 0x805c693f + SIOCBRDGSIFPRIO = 0x805c6954 + SIOCBRDGSIFPROT = 0x805c694a SIOCBRDGSMA = 0x80146953 SIOCBRDGSPRI = 0x80146950 SIOCBRDGSPROTO = 0x8014695a SIOCBRDGSTO = 0x80146945 SIOCBRDGSTXHC = 0x80146959 + SIOCDELLABEL = 0x80206997 SIOCDELMULTI = 0x80206932 SIOCDIFADDR = 0x80206919 SIOCDIFGROUP = 0x80246989 + SIOCDIFPARENT = 0x802069b4 SIOCDIFPHYADDR = 0x80206949 - SIOCDLIFADDR = 0x8218691e + SIOCDPWE3NEIGHBOR = 0x802069de + SIOCDVNETID = 0x802069af SIOCGETKALIVE = 0xc01869a4 SIOCGETLABEL = 0x8020699a + SIOCGETMPWCFG = 0xc02069ae SIOCGETPFLOW = 0xc02069fe SIOCGETPFSYNC = 0xc02069f8 SIOCGETSGCNT = 0xc0147534 SIOCGETVIFCNT = 0xc0147533 SIOCGETVLAN = 0xc0206990 - SIOCGHIWAT = 0x40047301 SIOCGIFADDR = 0xc0206921 - SIOCGIFASYNCMAP = 0xc020697c SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCONF = 0xc0086924 SIOCGIFDATA = 0xc020691b @@ -1168,40 +1333,53 @@ const ( SIOCGIFFLAGS = 0xc0206911 SIOCGIFGATTR = 0xc024698b SIOCGIFGENERIC = 0xc020693a + SIOCGIFGLIST = 0xc024698d SIOCGIFGMEMB = 0xc024698a SIOCGIFGROUP = 0xc0246988 SIOCGIFHARDMTU = 0xc02069a5 - SIOCGIFMEDIA = 0xc0286936 + SIOCGIFLLPRIO = 0xc02069b6 + SIOCGIFMEDIA = 0xc0386938 SIOCGIFMETRIC = 0xc0206917 SIOCGIFMTU = 0xc020697e SIOCGIFNETMASK = 0xc0206925 - SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPAIR = 0xc02069b1 + SIOCGIFPARENT = 0xc02069b3 SIOCGIFPRIORITY = 0xc020699c - SIOCGIFPSRCADDR = 0xc0206947 SIOCGIFRDOMAIN = 0xc02069a0 SIOCGIFRTLABEL = 0xc0206983 - SIOCGIFTIMESLOT = 0xc0206986 + SIOCGIFRXR = 0x802069aa + SIOCGIFSFFPAGE = 0xc1126939 SIOCGIFXFLAGS = 0xc020699e - SIOCGLIFADDR = 0xc218691d SIOCGLIFPHYADDR = 0xc218694b + SIOCGLIFPHYDF = 0xc02069c2 + SIOCGLIFPHYECN = 0xc02069c8 SIOCGLIFPHYRTABLE = 0xc02069a2 SIOCGLIFPHYTTL = 0xc02069a9 - SIOCGLOWAT = 0x40047303 SIOCGPGRP = 0x40047309 + SIOCGPWE3 = 0xc0206998 + SIOCGPWE3CTRLWORD = 0xc02069dc + SIOCGPWE3FAT = 0xc02069dd + SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGRXHPRIO = 0xc02069db SIOCGSPPPPARAMS = 0xc0206994 + SIOCGTXHPRIO = 0xc02069c6 + SIOCGUMBINFO = 0xc02069be + SIOCGUMBPARAM = 0xc02069c0 SIOCGVH = 0xc02069f6 + SIOCGVNETFLOWID = 0xc02069c4 SIOCGVNETID = 0xc02069a7 + SIOCIFAFATTACH = 0x801169ab + SIOCIFAFDETACH = 0x801169ac SIOCIFCREATE = 0x8020697a SIOCIFDESTROY = 0x80206979 SIOCIFGCLONERS = 0xc00c6978 SIOCSETKALIVE = 0x801869a3 SIOCSETLABEL = 0x80206999 + SIOCSETMPWCFG = 0x802069ad SIOCSETPFLOW = 0x802069fd SIOCSETPFSYNC = 0x802069f7 SIOCSETVLAN = 0x8020698f - SIOCSHIWAT = 0x80047300 SIOCSIFADDR = 0x8020690c - SIOCSIFASYNCMAP = 0x8020697d SIOCSIFBRDADDR = 0x80206913 SIOCSIFDESCR = 0x80206980 SIOCSIFDSTADDR = 0x8020690e @@ -1209,25 +1387,37 @@ const ( SIOCSIFGATTR = 0x8024698c SIOCSIFGENERIC = 0x80206939 SIOCSIFLLADDR = 0x8020691f - SIOCSIFMEDIA = 0xc0206935 + SIOCSIFLLPRIO = 0x802069b5 + SIOCSIFMEDIA = 0xc0206937 SIOCSIFMETRIC = 0x80206918 SIOCSIFMTU = 0x8020697f SIOCSIFNETMASK = 0x80206916 - SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPAIR = 0x802069b0 + SIOCSIFPARENT = 0x802069b2 SIOCSIFPRIORITY = 0x8020699b SIOCSIFRDOMAIN = 0x8020699f SIOCSIFRTLABEL = 0x80206982 - SIOCSIFTIMESLOT = 0x80206985 SIOCSIFXFLAGS = 0x8020699d SIOCSLIFPHYADDR = 0x8218694a + SIOCSLIFPHYDF = 0x802069c1 + SIOCSLIFPHYECN = 0x802069c7 SIOCSLIFPHYRTABLE = 0x802069a1 SIOCSLIFPHYTTL = 0x802069a8 - SIOCSLOWAT = 0x80047302 SIOCSPGRP = 0x80047308 + SIOCSPWE3CTRLWORD = 0x802069dc + SIOCSPWE3FAT = 0x802069dd + SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSRXHPRIO = 0x802069db SIOCSSPPPPARAMS = 0x80206993 + SIOCSTXHPRIO = 0x802069c5 + SIOCSUMBPARAM = 0x802069bf SIOCSVH = 0xc02069f5 + SIOCSVNETFLOWID = 0x802069c3 SIOCSVNETID = 0x802069a6 + SOCK_CLOEXEC = 0x8000 SOCK_DGRAM = 0x2 + SOCK_DNS = 0x1000 + SOCK_NONBLOCK = 0x4000 SOCK_RAW = 0x3 SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 @@ -1238,6 +1428,7 @@ const ( SO_BINDANY = 0x1000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1024 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1245,6 +1436,7 @@ const ( SO_NETPROC = 0x1020 SO_OOBINLINE = 0x100 SO_PEERCRED = 0x1022 + SO_PROTOCOL = 0x1025 SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 @@ -1258,6 +1450,7 @@ const ( SO_TIMESTAMP = 0x800 SO_TYPE = 0x1008 SO_USELOOPBACK = 0x40 + SO_ZEROIZE = 0x2000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 @@ -1287,9 +1480,24 @@ const ( S_IXOTH = 0x1 S_IXUSR = 0x40 TCIFLUSH = 0x1 + TCIOFF = 0x3 TCIOFLUSH = 0x3 + TCION = 0x4 TCOFLUSH = 0x2 - TCP_MAXBURST = 0x4 + TCOOFF = 0x1 + TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 TCP_MAXSEG = 0x2 TCP_MAXWIN = 0xffff TCP_MAX_SACK = 0x3 @@ -1298,11 +1506,15 @@ const ( TCP_MSS = 0x200 TCP_NODELAY = 0x1 TCP_NOPUSH = 0x10 - TCP_NSTATES = 0xb + TCP_SACKHOLE_LIMIT = 0x80 TCP_SACK_ENABLE = 0x8 TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 + TIOCCHKVERAUTH = 0x2000741e + TIOCCLRVERAUTH = 0x2000741d TIOCCONS = 0x80047462 TIOCDRAIN = 0x2000745e TIOCEXCL = 0x2000740d @@ -1357,17 +1569,21 @@ const ( TIOCSETAF = 0x802c7416 TIOCSETAW = 0x802c7415 TIOCSETD = 0x8004741b + TIOCSETVERAUTH = 0x8004741c TIOCSFLAGS = 0x8004745c TIOCSIG = 0x8004745f TIOCSPGRP = 0x80047476 TIOCSTART = 0x2000746e - TIOCSTAT = 0x80047465 - TIOCSTI = 0x80017472 + TIOCSTAT = 0x20007465 TIOCSTOP = 0x2000746f TIOCSTSTAMP = 0x8008745a TIOCSWINSZ = 0x80087467 TIOCUCNTL = 0x80047466 + TIOCUCNTL_CBRK = 0x7a + TIOCUCNTL_SBRK = 0x7b TOSTOP = 0x400000 + UTIME_NOW = -0x2 + UTIME_OMIT = -0x1 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 @@ -1378,6 +1594,19 @@ const ( VKILL = 0x5 VLNEXT = 0xe VMIN = 0x10 + VM_ANONMIN = 0x7 + VM_LOADAVG = 0x2 + VM_MALLOC_CONF = 0xc + VM_MAXID = 0xd + VM_MAXSLP = 0xa + VM_METER = 0x1 + VM_NKMEMPAGES = 0x6 + VM_PSSTRINGS = 0x3 + VM_SWAPENCRYPT = 0x5 + VM_USPACE = 0xb + VM_UVMEXP = 0x4 + VM_VNODEMIN = 0x9 + VM_VTEXTMIN = 0x8 VQUIT = 0x9 VREPRINT = 0x6 VSTART = 0xc @@ -1390,8 +1619,8 @@ const ( WCONTINUED = 0x8 WCOREFLAG = 0x80 WNOHANG = 0x1 - WSTOPPED = 0x7f WUNTRACED = 0x2 + XCASE = 0x1000000 ) // Errors @@ -1405,6 +1634,7 @@ const ( EALREADY = syscall.Errno(0x25) EAUTH = syscall.Errno(0x50) EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x5c) EBADRPC = syscall.Errno(0x48) EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x58) @@ -1431,7 +1661,7 @@ const ( EIPSEC = syscall.Errno(0x52) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x5b) + ELAST = syscall.Errno(0x5f) ELOOP = syscall.Errno(0x3e) EMEDIUMTYPE = syscall.Errno(0x56) EMFILE = syscall.Errno(0x18) @@ -1459,12 +1689,14 @@ const ( ENOTCONN = syscall.Errno(0x39) ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5d) ENOTSOCK = syscall.Errno(0x26) ENOTSUP = syscall.Errno(0x5b) ENOTTY = syscall.Errno(0x19) ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x2d) EOVERFLOW = syscall.Errno(0x57) + EOWNERDEAD = syscall.Errno(0x5e) EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x2e) EPIPE = syscall.Errno(0x20) @@ -1472,6 +1704,7 @@ const ( EPROCUNAVAIL = syscall.Errno(0x4c) EPROGMISMATCH = syscall.Errno(0x4b) EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5f) EPROTONOSUPPORT = syscall.Errno(0x2b) EPROTOTYPE = syscall.Errno(0x29) ERANGE = syscall.Errno(0x22) @@ -1568,7 +1801,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EWOULDBLOCK", "resource temporarily unavailable"}, + {35, "EAGAIN", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1624,7 +1857,11 @@ var errorList = [...]struct { {88, "ECANCELED", "operation canceled"}, {89, "EIDRM", "identifier removed"}, {90, "ENOMSG", "no message of desired type"}, - {91, "ELAST", "not supported"}, + {91, "ENOTSUP", "not supported"}, + {92, "EBADMSG", "bad message"}, + {93, "ENOTRECOVERABLE", "state not recoverable"}, + {94, "EOWNERDEAD", "previous owner died"}, + {95, "ELAST", "protocol error"}, } // Signal table @@ -1638,7 +1875,7 @@ var signalList = [...]struct { {3, "SIGQUIT", "quit"}, {4, "SIGILL", "illegal instruction"}, {5, "SIGTRAP", "trace/BPT trap"}, - {6, "SIGABRT", "abort trap"}, + {6, "SIGIOT", "abort trap"}, {7, "SIGEMT", "EMT trap"}, {8, "SIGFPE", "floating point exception"}, {9, "SIGKILL", "killed"}, @@ -1665,4 +1902,5 @@ var signalList = [...]struct { {30, "SIGUSR1", "user defined signal 1"}, {31, "SIGUSR2", "user defined signal 2"}, {32, "SIGTHR", "thread AST"}, + {28672, "SIGSTKSZ", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index 25cb60948..6015fcb2b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -109,6 +109,15 @@ const ( BPF_DIRECTION_IN = 0x1 BPF_DIRECTION_OUT = 0x2 BPF_DIV = 0x30 + BPF_FILDROP_CAPTURE = 0x1 + BPF_FILDROP_DROP = 0x2 + BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -137,6 +146,7 @@ const ( BPF_OR = 0x40 BPF_RELEASE = 0x30bb6 BPF_RET = 0x6 + BPF_RND = 0xc0 BPF_RSH = 0x70 BPF_ST = 0x2 BPF_STX = 0x3 @@ -177,7 +187,65 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0xc CTL_NET = 0x4 + DIOCADDQUEUE = 0xc110445d + DIOCADDRULE = 0xcd604404 + DIOCADDSTATE = 0xc1084425 + DIOCCHANGERULE = 0xcd60441a + DIOCCLRIFFLAG = 0xc028445a + DIOCCLRSRCNODES = 0x20004455 + DIOCCLRSTATES = 0xc0e04412 + DIOCCLRSTATUS = 0xc0284416 + DIOCGETLIMIT = 0xc0084427 + DIOCGETQSTATS = 0xc1204460 + DIOCGETQUEUE = 0xc110445f + DIOCGETQUEUES = 0xc110445e + DIOCGETRULE = 0xcd604407 + DIOCGETRULES = 0xcd604406 + DIOCGETRULESET = 0xc444443b + DIOCGETRULESETS = 0xc444443a + DIOCGETSRCNODES = 0xc0104454 + DIOCGETSTATE = 0xc1084413 + DIOCGETSTATES = 0xc0104419 + DIOCGETSTATUS = 0xc1e84415 + DIOCGETSYNFLWATS = 0xc0084463 + DIOCGETTIMEOUT = 0xc008441e + DIOCIGETIFACES = 0xc0284457 + DIOCKILLSRCNODES = 0xc080445b + DIOCKILLSTATES = 0xc0e04429 + DIOCNATLOOK = 0xc0504417 + DIOCOSFPADD = 0xc088444f DIOCOSFPFLUSH = 0x2000444e + DIOCOSFPGET = 0xc0884450 + DIOCRADDADDRS = 0xc4504443 + DIOCRADDTABLES = 0xc450443d + DIOCRCLRADDRS = 0xc4504442 + DIOCRCLRASTATS = 0xc4504448 + DIOCRCLRTABLES = 0xc450443c + DIOCRCLRTSTATS = 0xc4504441 + DIOCRDELADDRS = 0xc4504444 + DIOCRDELTABLES = 0xc450443e + DIOCRGETADDRS = 0xc4504446 + DIOCRGETASTATS = 0xc4504447 + DIOCRGETTABLES = 0xc450443f + DIOCRGETTSTATS = 0xc4504440 + DIOCRINADEFINE = 0xc450444d + DIOCRSETADDRS = 0xc4504445 + DIOCRSETTFLAGS = 0xc450444a + DIOCRTSTADDRS = 0xc4504449 + DIOCSETDEBUG = 0xc0044418 + DIOCSETHOSTID = 0xc0044456 + DIOCSETIFFLAG = 0xc0284459 + DIOCSETLIMIT = 0xc0084428 + DIOCSETREASS = 0xc004445c + DIOCSETSTATUSIF = 0xc0284414 + DIOCSETSYNCOOKIES = 0xc0014462 + DIOCSETSYNFLWATS = 0xc0084461 + DIOCSETTIMEOUT = 0xc008441d + DIOCSTART = 0x20004401 + DIOCSTOP = 0x20004402 + DIOCXBEGIN = 0xc0104451 + DIOCXCOMMIT = 0xc0104452 + DIOCXROLLBACK = 0xc0104453 DLT_ARCNET = 0x7 DLT_ATM_RFC1483 = 0xb DLT_AX25 = 0x3 @@ -240,6 +308,8 @@ const ( EMUL_ENABLED = 0x1 EMUL_NATIVE = 0x2 ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 ETHERMIN = 0x2e ETHERMTU = 0x5dc ETHERTYPE_8023 = 0x4 @@ -292,6 +362,7 @@ const ( ETHERTYPE_DN = 0x6003 ETHERTYPE_DOGFIGHT = 0x1989 ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e ETHERTYPE_ECMA = 0x803 ETHERTYPE_ENCRYPT = 0x803d ETHERTYPE_ES = 0x805d @@ -323,6 +394,7 @@ const ( ETHERTYPE_LLDP = 0x88cc ETHERTYPE_LOGICRAFT = 0x8148 ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MACSEC = 0x88e5 ETHERTYPE_MATRA = 0x807a ETHERTYPE_MAX = 0xffff ETHERTYPE_MERIT = 0x807c @@ -351,15 +423,17 @@ const ( ETHERTYPE_NCD = 0x8149 ETHERTYPE_NESTAR = 0x8006 ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 ETHERTYPE_NOVELL = 0x8138 ETHERTYPE_NS = 0x600 ETHERTYPE_NSAT = 0x601 ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f ETHERTYPE_NTRAILER = 0x10 ETHERTYPE_OS9 = 0x7007 ETHERTYPE_OS9NET = 0x7009 ETHERTYPE_PACER = 0x80c6 - ETHERTYPE_PAE = 0x888e + ETHERTYPE_PBB = 0x88e7 ETHERTYPE_PCS = 0x4242 ETHERTYPE_PLANNING = 0x8044 ETHERTYPE_PPP = 0x880b @@ -441,10 +515,11 @@ const ( ETHER_VLAN_ENCAP_LEN = 0x4 EVFILT_AIO = -0x3 EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x8 + EVFILT_SYSCOUNT = 0x9 EVFILT_TIMER = -0x7 EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 @@ -466,7 +541,7 @@ const ( EV_FLAG1 = 0x2000 EV_ONESHOT = 0x10 EV_RECEIPT = 0x40 - EV_SYSFLAGS = 0xf000 + EV_SYSFLAGS = 0xf800 EXTA = 0x4b00 EXTB = 0x9600 EXTPROC = 0x800 @@ -732,6 +807,7 @@ const ( IFT_VOICEOVERCABLE = 0xc6 IFT_VOICEOVERFRAMERELAY = 0x99 IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb IFT_X213 = 0x5d IFT_X25 = 0x5 IFT_X25DDN = 0x4 @@ -797,9 +873,11 @@ const ( IPPROTO_RAW = 0xff IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 IPV6_AUTH_LEVEL = 0x35 IPV6_AUTOFLOWLABEL = 0x3b IPV6_CHECKSUM = 0x1a @@ -906,6 +984,9 @@ const ( IP_TTL = 0x4 ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IUCLC = 0x1000 IXANY = 0x800 IXOFF = 0x400 @@ -970,12 +1051,26 @@ const ( MNT_ROOTFS = 0x4000 MNT_SOFTDEP = 0x4000000 MNT_STALLED = 0x100000 + MNT_SWAPPABLE = 0x200000 MNT_SYNCHRONOUS = 0x2 MNT_UPDATE = 0x10000 MNT_VISFLAGMASK = 0x400ffff MNT_WAIT = 0x1 MNT_WANTRDWR = 0x2000000 MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" MSG_BCAST = 0x100 MSG_CMSG_CLOEXEC = 0x800 MSG_CTRUNC = 0x20 @@ -988,6 +1083,7 @@ const ( MSG_PEEK = 0x2 MSG_TRUNC = 0x10 MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x1000 MS_ASYNC = 0x1 MS_INVALIDATE = 0x4 MS_SYNC = 0x2 @@ -996,7 +1092,8 @@ const ( NET_RT_FLAGS = 0x2 NET_RT_IFLIST = 0x3 NET_RT_IFNAMES = 0x6 - NET_RT_MAXID = 0x7 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 NFDBITS = 0x20 @@ -1013,6 +1110,7 @@ const ( NOTE_FORK = 0x40000000 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 NOTE_PCTRLMASK = 0xf0000000 NOTE_PDATAMASK = 0xfffff NOTE_RENAME = 0x20 @@ -1130,9 +1228,11 @@ const ( RTF_STATIC = 0x800 RTF_UP = 0x1 RTF_USETRAILERS = 0x8000 + RTM_80211INFO = 0x15 RTM_ADD = 0x1 RTM_BFD = 0x12 RTM_CHANGE = 0x3 + RTM_CHGADDRATTR = 0x14 RTM_DELADDR = 0xd RTM_DELETE = 0x2 RTM_DESYNC = 0x10 @@ -1140,7 +1240,6 @@ const ( RTM_IFANNOUNCE = 0xf RTM_IFINFO = 0xe RTM_INVALIDATE = 0x11 - RTM_LOCK = 0x8 RTM_LOSING = 0x5 RTM_MAXSIZE = 0x800 RTM_MISS = 0x7 @@ -1148,7 +1247,7 @@ const ( RTM_PROPOSAL = 0x13 RTM_REDIRECT = 0x6 RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 + RTM_SOURCE = 0x16 RTM_VERSION = 0x5 RTV_EXPIRE = 0x4 RTV_HOPCOUNT = 0x2 @@ -1166,6 +1265,9 @@ const ( RUSAGE_THREAD = 0x1 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1182,35 +1284,37 @@ const ( SIOCBRDGDELS = 0x80606942 SIOCBRDGFLUSH = 0x80606948 SIOCBRDGFRL = 0x808c694e - SIOCBRDGGCACHE = 0xc0186941 - SIOCBRDGGFD = 0xc0186952 - SIOCBRDGGHT = 0xc0186951 + SIOCBRDGGCACHE = 0xc0146941 + SIOCBRDGGFD = 0xc0146952 + SIOCBRDGGHT = 0xc0146951 SIOCBRDGGIFFLGS = 0xc060693e - SIOCBRDGGMA = 0xc0186953 + SIOCBRDGGMA = 0xc0146953 SIOCBRDGGPARAM = 0xc0406958 - SIOCBRDGGPRI = 0xc0186950 + SIOCBRDGGPRI = 0xc0146950 SIOCBRDGGRL = 0xc030694f - SIOCBRDGGTO = 0xc0186946 + SIOCBRDGGTO = 0xc0146946 SIOCBRDGIFS = 0xc0606942 SIOCBRDGRTS = 0xc0206943 SIOCBRDGSADDR = 0xc1286944 - SIOCBRDGSCACHE = 0x80186940 - SIOCBRDGSFD = 0x80186952 - SIOCBRDGSHT = 0x80186951 + SIOCBRDGSCACHE = 0x80146940 + SIOCBRDGSFD = 0x80146952 + SIOCBRDGSHT = 0x80146951 SIOCBRDGSIFCOST = 0x80606955 SIOCBRDGSIFFLGS = 0x8060693f SIOCBRDGSIFPRIO = 0x80606954 SIOCBRDGSIFPROT = 0x8060694a - SIOCBRDGSMA = 0x80186953 - SIOCBRDGSPRI = 0x80186950 - SIOCBRDGSPROTO = 0x8018695a - SIOCBRDGSTO = 0x80186945 - SIOCBRDGSTXHC = 0x80186959 + SIOCBRDGSMA = 0x80146953 + SIOCBRDGSPRI = 0x80146950 + SIOCBRDGSPROTO = 0x8014695a + SIOCBRDGSTO = 0x80146945 + SIOCBRDGSTXHC = 0x80146959 + SIOCDELLABEL = 0x80206997 SIOCDELMULTI = 0x80206932 SIOCDIFADDR = 0x80206919 SIOCDIFGROUP = 0x80286989 SIOCDIFPARENT = 0x802069b4 SIOCDIFPHYADDR = 0x80206949 + SIOCDPWE3NEIGHBOR = 0x802069de SIOCDVNETID = 0x802069af SIOCGETKALIVE = 0xc01869a4 SIOCGETLABEL = 0x8020699a @@ -1229,6 +1333,7 @@ const ( SIOCGIFFLAGS = 0xc0206911 SIOCGIFGATTR = 0xc028698b SIOCGIFGENERIC = 0xc020693a + SIOCGIFGLIST = 0xc028698d SIOCGIFGMEMB = 0xc028698a SIOCGIFGROUP = 0xc0286988 SIOCGIFHARDMTU = 0xc02069a5 @@ -1243,13 +1348,21 @@ const ( SIOCGIFRDOMAIN = 0xc02069a0 SIOCGIFRTLABEL = 0xc0206983 SIOCGIFRXR = 0x802069aa + SIOCGIFSFFPAGE = 0xc1126939 SIOCGIFXFLAGS = 0xc020699e SIOCGLIFPHYADDR = 0xc218694b SIOCGLIFPHYDF = 0xc02069c2 + SIOCGLIFPHYECN = 0xc02069c8 SIOCGLIFPHYRTABLE = 0xc02069a2 SIOCGLIFPHYTTL = 0xc02069a9 SIOCGPGRP = 0x40047309 + SIOCGPWE3 = 0xc0206998 + SIOCGPWE3CTRLWORD = 0xc02069dc + SIOCGPWE3FAT = 0xc02069dd + SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGRXHPRIO = 0xc02069db SIOCGSPPPPARAMS = 0xc0206994 + SIOCGTXHPRIO = 0xc02069c6 SIOCGUMBINFO = 0xc02069be SIOCGUMBPARAM = 0xc02069c0 SIOCGVH = 0xc02069f6 @@ -1287,19 +1400,20 @@ const ( SIOCSIFXFLAGS = 0x8020699d SIOCSLIFPHYADDR = 0x8218694a SIOCSLIFPHYDF = 0x802069c1 + SIOCSLIFPHYECN = 0x802069c7 SIOCSLIFPHYRTABLE = 0x802069a1 SIOCSLIFPHYTTL = 0x802069a8 SIOCSPGRP = 0x80047308 + SIOCSPWE3CTRLWORD = 0x802069dc + SIOCSPWE3FAT = 0x802069dd + SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSRXHPRIO = 0x802069db SIOCSSPPPPARAMS = 0x80206993 + SIOCSTXHPRIO = 0x802069c5 SIOCSUMBPARAM = 0x802069bf SIOCSVH = 0xc02069f5 SIOCSVNETFLOWID = 0x802069c3 SIOCSVNETID = 0x802069a6 - SIOCSWGDPID = 0xc018695b - SIOCSWGMAXFLOW = 0xc0186960 - SIOCSWGMAXGROUP = 0xc018695d - SIOCSWSDPID = 0x8018695c - SIOCSWSPORTNO = 0xc060695f SOCK_CLOEXEC = 0x8000 SOCK_DGRAM = 0x2 SOCK_DNS = 0x1000 @@ -1314,6 +1428,7 @@ const ( SO_BINDANY = 0x1000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1024 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1321,6 +1436,7 @@ const ( SO_NETPROC = 0x1020 SO_OOBINLINE = 0x100 SO_PEERCRED = 0x1022 + SO_PROTOCOL = 0x1025 SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 @@ -1370,7 +1486,18 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 - TCP_MAXBURST = 0x4 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 TCP_MAXSEG = 0x2 TCP_MAXWIN = 0xffff TCP_MAX_SACK = 0x3 @@ -1379,8 +1506,11 @@ const ( TCP_MSS = 0x200 TCP_NODELAY = 0x1 TCP_NOPUSH = 0x10 + TCP_SACKHOLE_LIMIT = 0x80 TCP_SACK_ENABLE = 0x8 TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 TIOCCHKVERAUTH = 0x2000741e @@ -1445,7 +1575,6 @@ const ( TIOCSPGRP = 0x80047476 TIOCSTART = 0x2000746e TIOCSTAT = 0x20007465 - TIOCSTI = 0x80017472 TIOCSTOP = 0x2000746f TIOCSTSTAMP = 0x8008745a TIOCSWINSZ = 0x80087467 @@ -1467,7 +1596,8 @@ const ( VMIN = 0x10 VM_ANONMIN = 0x7 VM_LOADAVG = 0x2 - VM_MAXID = 0xc + VM_MALLOC_CONF = 0xc + VM_MAXID = 0xd VM_MAXSLP = 0xa VM_METER = 0x1 VM_NKMEMPAGES = 0x6 @@ -1745,7 +1875,7 @@ var signalList = [...]struct { {3, "SIGQUIT", "quit"}, {4, "SIGILL", "illegal instruction"}, {5, "SIGTRAP", "trace/BPT trap"}, - {6, "SIGABRT", "abort trap"}, + {6, "SIGIOT", "abort trap"}, {7, "SIGEMT", "EMT trap"}, {8, "SIGFPE", "floating point exception"}, {9, "SIGKILL", "killed"}, @@ -1772,4 +1902,5 @@ var signalList = [...]struct { {30, "SIGUSR1", "user defined signal 1"}, {31, "SIGUSR2", "user defined signal 2"}, {32, "SIGTHR", "thread AST"}, + {28672, "SIGSTKSZ", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index aef6c0856..8d44955e4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -46,6 +46,7 @@ const ( AF_SNA = 0xb AF_UNIX = 0x1 AF_UNSPEC = 0x0 + ALTWERASE = 0x200 ARPHRD_ETHER = 0x1 ARPHRD_FRELAY = 0xf ARPHRD_IEEE1394 = 0x18 @@ -82,7 +83,7 @@ const ( BIOCGFILDROP = 0x40044278 BIOCGHDRCMPLT = 0x40044274 BIOCGRSIG = 0x40044273 - BIOCGRTIMEOUT = 0x400c426e + BIOCGRTIMEOUT = 0x4010426e BIOCGSTATS = 0x4008426f BIOCIMMEDIATE = 0x80044270 BIOCLOCK = 0x20004276 @@ -96,7 +97,7 @@ const ( BIOCSFILDROP = 0x80044279 BIOCSHDRCMPLT = 0x80044275 BIOCSRSIG = 0x80044272 - BIOCSRTIMEOUT = 0x800c426d + BIOCSRTIMEOUT = 0x8010426d BIOCVERSION = 0x40044271 BPF_A = 0x10 BPF_ABS = 0x20 @@ -108,6 +109,15 @@ const ( BPF_DIRECTION_IN = 0x1 BPF_DIRECTION_OUT = 0x2 BPF_DIV = 0x30 + BPF_FILDROP_CAPTURE = 0x1 + BPF_FILDROP_DROP = 0x2 + BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -136,6 +146,7 @@ const ( BPF_OR = 0x40 BPF_RELEASE = 0x30bb6 BPF_RET = 0x6 + BPF_RND = 0xc0 BPF_RSH = 0x70 BPF_ST = 0x2 BPF_STX = 0x3 @@ -147,6 +158,12 @@ const ( BRKINT = 0x2 CFLUSH = 0xf CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x6 + CLOCK_MONOTONIC = 0x3 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x4 + CLOCK_UPTIME = 0x5 CPUSTATES = 0x6 CP_IDLE = 0x5 CP_INTR = 0x4 @@ -170,7 +187,65 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0xc CTL_NET = 0x4 + DIOCADDQUEUE = 0xc100445d + DIOCADDRULE = 0xcce04404 + DIOCADDSTATE = 0xc1084425 + DIOCCHANGERULE = 0xcce0441a + DIOCCLRIFFLAG = 0xc024445a + DIOCCLRSRCNODES = 0x20004455 + DIOCCLRSTATES = 0xc0d04412 + DIOCCLRSTATUS = 0xc0244416 + DIOCGETLIMIT = 0xc0084427 + DIOCGETQSTATS = 0xc1084460 + DIOCGETQUEUE = 0xc100445f + DIOCGETQUEUES = 0xc100445e + DIOCGETRULE = 0xcce04407 + DIOCGETRULES = 0xcce04406 + DIOCGETRULESET = 0xc444443b + DIOCGETRULESETS = 0xc444443a + DIOCGETSRCNODES = 0xc0084454 + DIOCGETSTATE = 0xc1084413 + DIOCGETSTATES = 0xc0084419 + DIOCGETSTATUS = 0xc1e84415 + DIOCGETSYNFLWATS = 0xc0084463 + DIOCGETTIMEOUT = 0xc008441e + DIOCIGETIFACES = 0xc0244457 + DIOCKILLSRCNODES = 0xc068445b + DIOCKILLSTATES = 0xc0d04429 + DIOCNATLOOK = 0xc0504417 + DIOCOSFPADD = 0xc088444f DIOCOSFPFLUSH = 0x2000444e + DIOCOSFPGET = 0xc0884450 + DIOCRADDADDRS = 0xc44c4443 + DIOCRADDTABLES = 0xc44c443d + DIOCRCLRADDRS = 0xc44c4442 + DIOCRCLRASTATS = 0xc44c4448 + DIOCRCLRTABLES = 0xc44c443c + DIOCRCLRTSTATS = 0xc44c4441 + DIOCRDELADDRS = 0xc44c4444 + DIOCRDELTABLES = 0xc44c443e + DIOCRGETADDRS = 0xc44c4446 + DIOCRGETASTATS = 0xc44c4447 + DIOCRGETTABLES = 0xc44c443f + DIOCRGETTSTATS = 0xc44c4440 + DIOCRINADEFINE = 0xc44c444d + DIOCRSETADDRS = 0xc44c4445 + DIOCRSETTFLAGS = 0xc44c444a + DIOCRTSTADDRS = 0xc44c4449 + DIOCSETDEBUG = 0xc0044418 + DIOCSETHOSTID = 0xc0044456 + DIOCSETIFFLAG = 0xc0244459 + DIOCSETLIMIT = 0xc0084428 + DIOCSETREASS = 0xc004445c + DIOCSETSTATUSIF = 0xc0244414 + DIOCSETSYNCOOKIES = 0xc0014462 + DIOCSETSYNFLWATS = 0xc0084461 + DIOCSETTIMEOUT = 0xc008441d + DIOCSTART = 0x20004401 + DIOCSTOP = 0x20004402 + DIOCXBEGIN = 0xc00c4451 + DIOCXCOMMIT = 0xc00c4452 + DIOCXROLLBACK = 0xc00c4453 DLT_ARCNET = 0x7 DLT_ATM_RFC1483 = 0xb DLT_AX25 = 0x3 @@ -186,6 +261,7 @@ const ( DLT_LOOP = 0xc DLT_MPLS = 0xdb DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b DLT_PFLOG = 0x75 DLT_PFSYNC = 0x12 DLT_PPP = 0x9 @@ -196,6 +272,23 @@ const ( DLT_RAW = 0xe DLT_SLIP = 0x8 DLT_SLIP_BSDOS = 0xf + DLT_USBPCAP = 0xf9 + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -215,6 +308,8 @@ const ( EMUL_ENABLED = 0x1 EMUL_NATIVE = 0x2 ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 ETHERMIN = 0x2e ETHERMTU = 0x5dc ETHERTYPE_8023 = 0x4 @@ -267,6 +362,7 @@ const ( ETHERTYPE_DN = 0x6003 ETHERTYPE_DOGFIGHT = 0x1989 ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e ETHERTYPE_ECMA = 0x803 ETHERTYPE_ENCRYPT = 0x803d ETHERTYPE_ES = 0x805d @@ -298,6 +394,7 @@ const ( ETHERTYPE_LLDP = 0x88cc ETHERTYPE_LOGICRAFT = 0x8148 ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MACSEC = 0x88e5 ETHERTYPE_MATRA = 0x807a ETHERTYPE_MAX = 0xffff ETHERTYPE_MERIT = 0x807c @@ -326,15 +423,17 @@ const ( ETHERTYPE_NCD = 0x8149 ETHERTYPE_NESTAR = 0x8006 ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 ETHERTYPE_NOVELL = 0x8138 ETHERTYPE_NS = 0x600 ETHERTYPE_NSAT = 0x601 ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f ETHERTYPE_NTRAILER = 0x10 ETHERTYPE_OS9 = 0x7007 ETHERTYPE_OS9NET = 0x7009 ETHERTYPE_PACER = 0x80c6 - ETHERTYPE_PAE = 0x888e + ETHERTYPE_PBB = 0x88e7 ETHERTYPE_PCS = 0x4242 ETHERTYPE_PLANNING = 0x8044 ETHERTYPE_PPP = 0x880b @@ -409,28 +508,40 @@ const ( ETHER_CRC_POLY_LE = 0xedb88320 ETHER_HDR_LEN = 0xe ETHER_MAX_DIX_LEN = 0x600 + ETHER_MAX_HARDMTU_LEN = 0xff9b ETHER_MAX_LEN = 0x5ee ETHER_MIN_LEN = 0x40 ETHER_TYPE_LEN = 0x2 ETHER_VLAN_ENCAP_LEN = 0x4 EVFILT_AIO = -0x3 + EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x7 + EVFILT_SYSCOUNT = 0x9 EVFILT_TIMER = -0x7 EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 + EVL_ENCAPLEN = 0x4 + EVL_PRIO_BITS = 0xd + EVL_PRIO_MAX = 0x7 + EVL_VLID_MASK = 0xfff + EVL_VLID_MAX = 0xffe + EVL_VLID_MIN = 0x1 + EVL_VLID_NULL = 0x0 EV_ADD = 0x1 EV_CLEAR = 0x20 EV_DELETE = 0x2 EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 EV_ENABLE = 0x4 EV_EOF = 0x8000 EV_ERROR = 0x4000 EV_FLAG1 = 0x2000 EV_ONESHOT = 0x10 - EV_SYSFLAGS = 0xf000 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf800 EXTA = 0x4b00 EXTB = 0x9600 EXTPROC = 0x800 @@ -443,6 +554,8 @@ const ( F_GETFL = 0x3 F_GETLK = 0x7 F_GETOWN = 0x5 + F_ISATTY = 0xb + F_OK = 0x0 F_RDLCK = 0x1 F_SETFD = 0x2 F_SETFL = 0x4 @@ -459,7 +572,6 @@ const ( IEXTEN = 0x400 IFAN_ARRIVAL = 0x0 IFAN_DEPARTURE = 0x1 - IFA_ROUTE = 0x1 IFF_ALLMULTI = 0x200 IFF_BROADCAST = 0x2 IFF_CANTCHANGE = 0x8e52 @@ -470,12 +582,12 @@ const ( IFF_LOOPBACK = 0x8 IFF_MULTICAST = 0x8000 IFF_NOARP = 0x80 - IFF_NOTRAILERS = 0x20 IFF_OACTIVE = 0x400 IFF_POINTOPOINT = 0x10 IFF_PROMISC = 0x100 IFF_RUNNING = 0x40 IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x20 IFF_UP = 0x1 IFNAMSIZ = 0x10 IFT_1822 = 0x2 @@ -604,6 +716,7 @@ const ( IFT_LINEGROUP = 0xd2 IFT_LOCALTALK = 0x2a IFT_LOOP = 0x18 + IFT_MBIM = 0xfa IFT_MEDIAMAILOVERIP = 0x8b IFT_MFSIGLINK = 0xa7 IFT_MIOX25 = 0x26 @@ -694,6 +807,7 @@ const ( IFT_VOICEOVERCABLE = 0xc6 IFT_VOICEOVERFRAMERELAY = 0x99 IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb IFT_X213 = 0x5d IFT_X25 = 0x5 IFT_X25DDN = 0x4 @@ -728,8 +842,6 @@ const ( IPPROTO_AH = 0x33 IPPROTO_CARP = 0x70 IPPROTO_DIVERT = 0x102 - IPPROTO_DIVERT_INIT = 0x2 - IPPROTO_DIVERT_RESP = 0x1 IPPROTO_DONE = 0x101 IPPROTO_DSTOPTS = 0x3c IPPROTO_EGP = 0x8 @@ -761,9 +873,11 @@ const ( IPPROTO_RAW = 0xff IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 IPV6_AUTH_LEVEL = 0x35 IPV6_AUTOFLOWLABEL = 0x3b IPV6_CHECKSUM = 0x1a @@ -786,6 +900,7 @@ const ( IPV6_LEAVE_GROUP = 0xd IPV6_MAXHLIM = 0xff IPV6_MAXPACKET = 0xffff + IPV6_MINHOPCOUNT = 0x41 IPV6_MMTU = 0x500 IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 @@ -825,12 +940,12 @@ const ( IP_DEFAULT_MULTICAST_LOOP = 0x1 IP_DEFAULT_MULTICAST_TTL = 0x1 IP_DF = 0x4000 - IP_DIVERTFL = 0x1022 IP_DROP_MEMBERSHIP = 0xd IP_ESP_NETWORK_LEVEL = 0x16 IP_ESP_TRANS_LEVEL = 0x15 IP_HDRINCL = 0x2 IP_IPCOMP_LEVEL = 0x1d + IP_IPDEFTTL = 0x25 IP_IPSECFLOWINFO = 0x24 IP_IPSEC_LOCAL_AUTH = 0x1b IP_IPSEC_LOCAL_CRED = 0x19 @@ -864,10 +979,15 @@ const ( IP_RETOPTS = 0x8 IP_RF = 0x8000 IP_RTABLE = 0x1021 + IP_SENDSRCADDR = 0x7 IP_TOS = 0x3 IP_TTL = 0x4 ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 + IUCLC = 0x1000 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -922,6 +1042,7 @@ const ( MNT_NOATIME = 0x8000 MNT_NODEV = 0x10 MNT_NOEXEC = 0x4 + MNT_NOPERM = 0x20 MNT_NOSUID = 0x8 MNT_NOWAIT = 0x2 MNT_QUOTA = 0x2000 @@ -929,12 +1050,27 @@ const ( MNT_RELOAD = 0x40000 MNT_ROOTFS = 0x4000 MNT_SOFTDEP = 0x4000000 + MNT_STALLED = 0x100000 + MNT_SWAPPABLE = 0x200000 MNT_SYNCHRONOUS = 0x2 MNT_UPDATE = 0x10000 MNT_VISFLAGMASK = 0x400ffff MNT_WAIT = 0x1 MNT_WANTRDWR = 0x2000000 MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" MSG_BCAST = 0x100 MSG_CMSG_CLOEXEC = 0x800 MSG_CTRUNC = 0x20 @@ -947,6 +1083,7 @@ const ( MSG_PEEK = 0x2 MSG_TRUNC = 0x10 MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x1000 MS_ASYNC = 0x1 MS_INVALIDATE = 0x4 MS_SYNC = 0x2 @@ -954,12 +1091,16 @@ const ( NET_RT_DUMP = 0x1 NET_RT_FLAGS = 0x2 NET_RT_IFLIST = 0x3 - NET_RT_MAXID = 0x6 + NET_RT_IFNAMES = 0x6 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 NFDBITS = 0x20 NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 + NOTE_CHANGE = 0x1 NOTE_CHILD = 0x4 NOTE_DELETE = 0x1 NOTE_EOF = 0x2 @@ -969,6 +1110,7 @@ const ( NOTE_FORK = 0x40000000 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 NOTE_PCTRLMASK = 0xf0000000 NOTE_PDATAMASK = 0xfffff NOTE_RENAME = 0x20 @@ -978,11 +1120,13 @@ const ( NOTE_TRUNCATE = 0x80 NOTE_WRITE = 0x2 OCRNL = 0x10 + OLCUC = 0x20 ONLCR = 0x2 ONLRET = 0x80 ONOCR = 0x40 ONOEOT = 0x8 OPOST = 0x1 + OXTABS = 0x4 O_ACCMODE = 0x3 O_APPEND = 0x8 O_ASYNC = 0x40 @@ -1027,19 +1171,25 @@ const ( RLIMIT_STACK = 0x3 RLIM_INFINITY = 0x7fffffffffffffff RTAX_AUTHOR = 0x6 + RTAX_BFD = 0xb RTAX_BRD = 0x7 + RTAX_DNS = 0xc RTAX_DST = 0x0 RTAX_GATEWAY = 0x1 RTAX_GENMASK = 0x3 RTAX_IFA = 0x5 RTAX_IFP = 0x4 RTAX_LABEL = 0xa - RTAX_MAX = 0xb + RTAX_MAX = 0xf RTAX_NETMASK = 0x2 + RTAX_SEARCH = 0xe RTAX_SRC = 0x8 RTAX_SRCMASK = 0x9 + RTAX_STATIC = 0xd RTA_AUTHOR = 0x40 + RTA_BFD = 0x800 RTA_BRD = 0x80 + RTA_DNS = 0x1000 RTA_DST = 0x1 RTA_GATEWAY = 0x2 RTA_GENMASK = 0x8 @@ -1047,24 +1197,29 @@ const ( RTA_IFP = 0x10 RTA_LABEL = 0x400 RTA_NETMASK = 0x4 + RTA_SEARCH = 0x4000 RTA_SRC = 0x100 RTA_SRCMASK = 0x200 + RTA_STATIC = 0x2000 RTF_ANNOUNCE = 0x4000 + RTF_BFD = 0x1000000 RTF_BLACKHOLE = 0x1000 RTF_BROADCAST = 0x400000 + RTF_CACHED = 0x20000 RTF_CLONED = 0x10000 RTF_CLONING = 0x100 + RTF_CONNECTED = 0x800000 RTF_DONE = 0x40 RTF_DYNAMIC = 0x10 - RTF_FMASK = 0x70f808 + RTF_FMASK = 0x110fc08 RTF_GATEWAY = 0x2 RTF_HOST = 0x4 RTF_LLINFO = 0x400 RTF_LOCAL = 0x200000 - RTF_MASK = 0x80 RTF_MODIFIED = 0x20 RTF_MPATH = 0x40000 RTF_MPLS = 0x100000 + RTF_MULTICAST = 0x200 RTF_PERMANENT_ARP = 0x2000 RTF_PROTO1 = 0x8000 RTF_PROTO2 = 0x4000 @@ -1073,23 +1228,26 @@ const ( RTF_STATIC = 0x800 RTF_UP = 0x1 RTF_USETRAILERS = 0x8000 - RTF_XRESOLVE = 0x200 + RTM_80211INFO = 0x15 RTM_ADD = 0x1 + RTM_BFD = 0x12 RTM_CHANGE = 0x3 + RTM_CHGADDRATTR = 0x14 RTM_DELADDR = 0xd RTM_DELETE = 0x2 RTM_DESYNC = 0x10 RTM_GET = 0x4 RTM_IFANNOUNCE = 0xf RTM_IFINFO = 0xe - RTM_LOCK = 0x8 + RTM_INVALIDATE = 0x11 RTM_LOSING = 0x5 RTM_MAXSIZE = 0x800 RTM_MISS = 0x7 RTM_NEWADDR = 0xc + RTM_PROPOSAL = 0x13 RTM_REDIRECT = 0x6 RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 + RTM_SOURCE = 0x16 RTM_VERSION = 0x5 RTV_EXPIRE = 0x4 RTV_HOPCOUNT = 0x2 @@ -1099,67 +1257,74 @@ const ( RTV_RTTVAR = 0x80 RTV_SPIPE = 0x10 RTV_SSTHRESH = 0x20 + RT_TABLEID_BITS = 0x8 + RT_TABLEID_MASK = 0xff RT_TABLEID_MAX = 0xff RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 SIOCADDMULTI = 0x80206931 SIOCAIFADDR = 0x8040691a SIOCAIFGROUP = 0x80246987 - SIOCALIFADDR = 0x8218691c SIOCATMARK = 0x40047307 - SIOCBRDGADD = 0x8054693c - SIOCBRDGADDS = 0x80546941 - SIOCBRDGARL = 0x806e694d + SIOCBRDGADD = 0x8060693c + SIOCBRDGADDL = 0x80606949 + SIOCBRDGADDS = 0x80606941 + SIOCBRDGARL = 0x808c694d SIOCBRDGDADDR = 0x81286947 - SIOCBRDGDEL = 0x8054693d - SIOCBRDGDELS = 0x80546942 - SIOCBRDGFLUSH = 0x80546948 - SIOCBRDGFRL = 0x806e694e + SIOCBRDGDEL = 0x8060693d + SIOCBRDGDELS = 0x80606942 + SIOCBRDGFLUSH = 0x80606948 + SIOCBRDGFRL = 0x808c694e SIOCBRDGGCACHE = 0xc0146941 SIOCBRDGGFD = 0xc0146952 SIOCBRDGGHT = 0xc0146951 - SIOCBRDGGIFFLGS = 0xc054693e + SIOCBRDGGIFFLGS = 0xc060693e SIOCBRDGGMA = 0xc0146953 - SIOCBRDGGPARAM = 0xc03c6958 + SIOCBRDGGPARAM = 0xc0406958 SIOCBRDGGPRI = 0xc0146950 SIOCBRDGGRL = 0xc028694f - SIOCBRDGGSIFS = 0xc054693c SIOCBRDGGTO = 0xc0146946 - SIOCBRDGIFS = 0xc0546942 + SIOCBRDGIFS = 0xc0606942 SIOCBRDGRTS = 0xc0186943 SIOCBRDGSADDR = 0xc1286944 SIOCBRDGSCACHE = 0x80146940 SIOCBRDGSFD = 0x80146952 SIOCBRDGSHT = 0x80146951 - SIOCBRDGSIFCOST = 0x80546955 - SIOCBRDGSIFFLGS = 0x8054693f - SIOCBRDGSIFPRIO = 0x80546954 + SIOCBRDGSIFCOST = 0x80606955 + SIOCBRDGSIFFLGS = 0x8060693f + SIOCBRDGSIFPRIO = 0x80606954 + SIOCBRDGSIFPROT = 0x8060694a SIOCBRDGSMA = 0x80146953 SIOCBRDGSPRI = 0x80146950 SIOCBRDGSPROTO = 0x8014695a SIOCBRDGSTO = 0x80146945 SIOCBRDGSTXHC = 0x80146959 + SIOCDELLABEL = 0x80206997 SIOCDELMULTI = 0x80206932 SIOCDIFADDR = 0x80206919 SIOCDIFGROUP = 0x80246989 + SIOCDIFPARENT = 0x802069b4 SIOCDIFPHYADDR = 0x80206949 - SIOCDLIFADDR = 0x8218691e + SIOCDPWE3NEIGHBOR = 0x802069de + SIOCDVNETID = 0x802069af SIOCGETKALIVE = 0xc01869a4 SIOCGETLABEL = 0x8020699a + SIOCGETMPWCFG = 0xc02069ae SIOCGETPFLOW = 0xc02069fe SIOCGETPFSYNC = 0xc02069f8 SIOCGETSGCNT = 0xc0147534 SIOCGETVIFCNT = 0xc0147533 SIOCGETVLAN = 0xc0206990 - SIOCGHIWAT = 0x40047301 SIOCGIFADDR = 0xc0206921 - SIOCGIFASYNCMAP = 0xc020697c SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCONF = 0xc0086924 SIOCGIFDATA = 0xc020691b @@ -1168,41 +1333,53 @@ const ( SIOCGIFFLAGS = 0xc0206911 SIOCGIFGATTR = 0xc024698b SIOCGIFGENERIC = 0xc020693a + SIOCGIFGLIST = 0xc024698d SIOCGIFGMEMB = 0xc024698a SIOCGIFGROUP = 0xc0246988 SIOCGIFHARDMTU = 0xc02069a5 - SIOCGIFMEDIA = 0xc0286936 + SIOCGIFLLPRIO = 0xc02069b6 + SIOCGIFMEDIA = 0xc0386938 SIOCGIFMETRIC = 0xc0206917 SIOCGIFMTU = 0xc020697e SIOCGIFNETMASK = 0xc0206925 - SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPAIR = 0xc02069b1 + SIOCGIFPARENT = 0xc02069b3 SIOCGIFPRIORITY = 0xc020699c - SIOCGIFPSRCADDR = 0xc0206947 SIOCGIFRDOMAIN = 0xc02069a0 SIOCGIFRTLABEL = 0xc0206983 SIOCGIFRXR = 0x802069aa - SIOCGIFTIMESLOT = 0xc0206986 + SIOCGIFSFFPAGE = 0xc1126939 SIOCGIFXFLAGS = 0xc020699e - SIOCGLIFADDR = 0xc218691d SIOCGLIFPHYADDR = 0xc218694b + SIOCGLIFPHYDF = 0xc02069c2 + SIOCGLIFPHYECN = 0xc02069c8 SIOCGLIFPHYRTABLE = 0xc02069a2 SIOCGLIFPHYTTL = 0xc02069a9 - SIOCGLOWAT = 0x40047303 SIOCGPGRP = 0x40047309 + SIOCGPWE3 = 0xc0206998 + SIOCGPWE3CTRLWORD = 0xc02069dc + SIOCGPWE3FAT = 0xc02069dd + SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGRXHPRIO = 0xc02069db SIOCGSPPPPARAMS = 0xc0206994 + SIOCGTXHPRIO = 0xc02069c6 + SIOCGUMBINFO = 0xc02069be + SIOCGUMBPARAM = 0xc02069c0 SIOCGVH = 0xc02069f6 + SIOCGVNETFLOWID = 0xc02069c4 SIOCGVNETID = 0xc02069a7 + SIOCIFAFATTACH = 0x801169ab + SIOCIFAFDETACH = 0x801169ac SIOCIFCREATE = 0x8020697a SIOCIFDESTROY = 0x80206979 SIOCIFGCLONERS = 0xc00c6978 SIOCSETKALIVE = 0x801869a3 SIOCSETLABEL = 0x80206999 + SIOCSETMPWCFG = 0x802069ad SIOCSETPFLOW = 0x802069fd SIOCSETPFSYNC = 0x802069f7 SIOCSETVLAN = 0x8020698f - SIOCSHIWAT = 0x80047300 SIOCSIFADDR = 0x8020690c - SIOCSIFASYNCMAP = 0x8020697d SIOCSIFBRDADDR = 0x80206913 SIOCSIFDESCR = 0x80206980 SIOCSIFDSTADDR = 0x8020690e @@ -1210,26 +1387,36 @@ const ( SIOCSIFGATTR = 0x8024698c SIOCSIFGENERIC = 0x80206939 SIOCSIFLLADDR = 0x8020691f - SIOCSIFMEDIA = 0xc0206935 + SIOCSIFLLPRIO = 0x802069b5 + SIOCSIFMEDIA = 0xc0206937 SIOCSIFMETRIC = 0x80206918 SIOCSIFMTU = 0x8020697f SIOCSIFNETMASK = 0x80206916 - SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPAIR = 0x802069b0 + SIOCSIFPARENT = 0x802069b2 SIOCSIFPRIORITY = 0x8020699b SIOCSIFRDOMAIN = 0x8020699f SIOCSIFRTLABEL = 0x80206982 - SIOCSIFTIMESLOT = 0x80206985 SIOCSIFXFLAGS = 0x8020699d SIOCSLIFPHYADDR = 0x8218694a + SIOCSLIFPHYDF = 0x802069c1 + SIOCSLIFPHYECN = 0x802069c7 SIOCSLIFPHYRTABLE = 0x802069a1 SIOCSLIFPHYTTL = 0x802069a8 - SIOCSLOWAT = 0x80047302 SIOCSPGRP = 0x80047308 + SIOCSPWE3CTRLWORD = 0x802069dc + SIOCSPWE3FAT = 0x802069dd + SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSRXHPRIO = 0x802069db SIOCSSPPPPARAMS = 0x80206993 + SIOCSTXHPRIO = 0x802069c5 + SIOCSUMBPARAM = 0x802069bf SIOCSVH = 0xc02069f5 + SIOCSVNETFLOWID = 0x802069c3 SIOCSVNETID = 0x802069a6 SOCK_CLOEXEC = 0x8000 SOCK_DGRAM = 0x2 + SOCK_DNS = 0x1000 SOCK_NONBLOCK = 0x4000 SOCK_RAW = 0x3 SOCK_RDM = 0x4 @@ -1241,6 +1428,7 @@ const ( SO_BINDANY = 0x1000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1024 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1248,6 +1436,7 @@ const ( SO_NETPROC = 0x1020 SO_OOBINLINE = 0x100 SO_PEERCRED = 0x1022 + SO_PROTOCOL = 0x1025 SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 @@ -1261,6 +1450,7 @@ const ( SO_TIMESTAMP = 0x800 SO_TYPE = 0x1008 SO_USELOOPBACK = 0x40 + SO_ZEROIZE = 0x2000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 @@ -1290,9 +1480,24 @@ const ( S_IXOTH = 0x1 S_IXUSR = 0x40 TCIFLUSH = 0x1 + TCIOFF = 0x3 TCIOFLUSH = 0x3 + TCION = 0x4 TCOFLUSH = 0x2 - TCP_MAXBURST = 0x4 + TCOOFF = 0x1 + TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 TCP_MAXSEG = 0x2 TCP_MAXWIN = 0xffff TCP_MAX_SACK = 0x3 @@ -1301,11 +1506,15 @@ const ( TCP_MSS = 0x200 TCP_NODELAY = 0x1 TCP_NOPUSH = 0x10 - TCP_NSTATES = 0xb + TCP_SACKHOLE_LIMIT = 0x80 TCP_SACK_ENABLE = 0x8 TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 + TIOCCHKVERAUTH = 0x2000741e + TIOCCLRVERAUTH = 0x2000741d TIOCCONS = 0x80047462 TIOCDRAIN = 0x2000745e TIOCEXCL = 0x2000740d @@ -1321,7 +1530,7 @@ const ( TIOCGFLAGS = 0x4004745d TIOCGPGRP = 0x40047477 TIOCGSID = 0x40047463 - TIOCGTSTAMP = 0x400c745b + TIOCGTSTAMP = 0x4010745b TIOCGWINSZ = 0x40087468 TIOCMBIC = 0x8004746b TIOCMBIS = 0x8004746c @@ -1360,17 +1569,21 @@ const ( TIOCSETAF = 0x802c7416 TIOCSETAW = 0x802c7415 TIOCSETD = 0x8004741b + TIOCSETVERAUTH = 0x8004741c TIOCSFLAGS = 0x8004745c TIOCSIG = 0x8004745f TIOCSPGRP = 0x80047476 TIOCSTART = 0x2000746e - TIOCSTAT = 0x80047465 - TIOCSTI = 0x80017472 + TIOCSTAT = 0x20007465 TIOCSTOP = 0x2000746f TIOCSTSTAMP = 0x8008745a TIOCSWINSZ = 0x80087467 TIOCUCNTL = 0x80047466 + TIOCUCNTL_CBRK = 0x7a + TIOCUCNTL_SBRK = 0x7b TOSTOP = 0x400000 + UTIME_NOW = -0x2 + UTIME_OMIT = -0x1 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 @@ -1381,6 +1594,19 @@ const ( VKILL = 0x5 VLNEXT = 0xe VMIN = 0x10 + VM_ANONMIN = 0x7 + VM_LOADAVG = 0x2 + VM_MALLOC_CONF = 0xc + VM_MAXID = 0xd + VM_MAXSLP = 0xa + VM_METER = 0x1 + VM_NKMEMPAGES = 0x6 + VM_PSSTRINGS = 0x3 + VM_SWAPENCRYPT = 0x5 + VM_USPACE = 0xb + VM_UVMEXP = 0x4 + VM_VNODEMIN = 0x9 + VM_VTEXTMIN = 0x8 VQUIT = 0x9 VREPRINT = 0x6 VSTART = 0xc @@ -1394,6 +1620,7 @@ const ( WCOREFLAG = 0x80 WNOHANG = 0x1 WUNTRACED = 0x2 + XCASE = 0x1000000 ) // Errors @@ -1407,6 +1634,7 @@ const ( EALREADY = syscall.Errno(0x25) EAUTH = syscall.Errno(0x50) EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x5c) EBADRPC = syscall.Errno(0x48) EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x58) @@ -1433,7 +1661,7 @@ const ( EIPSEC = syscall.Errno(0x52) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x5b) + ELAST = syscall.Errno(0x5f) ELOOP = syscall.Errno(0x3e) EMEDIUMTYPE = syscall.Errno(0x56) EMFILE = syscall.Errno(0x18) @@ -1461,12 +1689,14 @@ const ( ENOTCONN = syscall.Errno(0x39) ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5d) ENOTSOCK = syscall.Errno(0x26) ENOTSUP = syscall.Errno(0x5b) ENOTTY = syscall.Errno(0x19) ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x2d) EOVERFLOW = syscall.Errno(0x57) + EOWNERDEAD = syscall.Errno(0x5e) EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x2e) EPIPE = syscall.Errno(0x20) @@ -1474,6 +1704,7 @@ const ( EPROCUNAVAIL = syscall.Errno(0x4c) EPROGMISMATCH = syscall.Errno(0x4b) EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5f) EPROTONOSUPPORT = syscall.Errno(0x2b) EPROTOTYPE = syscall.Errno(0x29) ERANGE = syscall.Errno(0x22) @@ -1570,7 +1801,7 @@ var errorList = [...]struct { {32, "EPIPE", "broken pipe"}, {33, "EDOM", "numerical argument out of domain"}, {34, "ERANGE", "result too large"}, - {35, "EWOULDBLOCK", "resource temporarily unavailable"}, + {35, "EAGAIN", "resource temporarily unavailable"}, {36, "EINPROGRESS", "operation now in progress"}, {37, "EALREADY", "operation already in progress"}, {38, "ENOTSOCK", "socket operation on non-socket"}, @@ -1626,7 +1857,11 @@ var errorList = [...]struct { {88, "ECANCELED", "operation canceled"}, {89, "EIDRM", "identifier removed"}, {90, "ENOMSG", "no message of desired type"}, - {91, "ELAST", "not supported"}, + {91, "ENOTSUP", "not supported"}, + {92, "EBADMSG", "bad message"}, + {93, "ENOTRECOVERABLE", "state not recoverable"}, + {94, "EOWNERDEAD", "previous owner died"}, + {95, "ELAST", "protocol error"}, } // Signal table @@ -1640,7 +1875,7 @@ var signalList = [...]struct { {3, "SIGQUIT", "quit"}, {4, "SIGILL", "illegal instruction"}, {5, "SIGTRAP", "trace/BPT trap"}, - {6, "SIGABRT", "abort trap"}, + {6, "SIGIOT", "abort trap"}, {7, "SIGEMT", "EMT trap"}, {8, "SIGFPE", "floating point exception"}, {9, "SIGKILL", "killed"}, @@ -1667,4 +1902,5 @@ var signalList = [...]struct { {30, "SIGUSR1", "user defined signal 1"}, {31, "SIGUSR2", "user defined signal 2"}, {32, "SIGTHR", "thread AST"}, + {28672, "SIGSTKSZ", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go index 90de7dfc3..ae16fe754 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go @@ -112,6 +112,12 @@ const ( BPF_FILDROP_CAPTURE = 0x1 BPF_FILDROP_DROP = 0x2 BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -140,6 +146,7 @@ const ( BPF_OR = 0x40 BPF_RELEASE = 0x30bb6 BPF_RET = 0x6 + BPF_RND = 0xc0 BPF_RSH = 0x70 BPF_ST = 0x2 BPF_STX = 0x3 @@ -180,7 +187,65 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0xc CTL_NET = 0x4 + DIOCADDQUEUE = 0xc110445d + DIOCADDRULE = 0xcd604404 + DIOCADDSTATE = 0xc1084425 + DIOCCHANGERULE = 0xcd60441a + DIOCCLRIFFLAG = 0xc028445a + DIOCCLRSRCNODES = 0x20004455 + DIOCCLRSTATES = 0xc0e04412 + DIOCCLRSTATUS = 0xc0284416 + DIOCGETLIMIT = 0xc0084427 + DIOCGETQSTATS = 0xc1204460 + DIOCGETQUEUE = 0xc110445f + DIOCGETQUEUES = 0xc110445e + DIOCGETRULE = 0xcd604407 + DIOCGETRULES = 0xcd604406 + DIOCGETRULESET = 0xc444443b + DIOCGETRULESETS = 0xc444443a + DIOCGETSRCNODES = 0xc0104454 + DIOCGETSTATE = 0xc1084413 + DIOCGETSTATES = 0xc0104419 + DIOCGETSTATUS = 0xc1e84415 + DIOCGETSYNFLWATS = 0xc0084463 + DIOCGETTIMEOUT = 0xc008441e + DIOCIGETIFACES = 0xc0284457 + DIOCKILLSRCNODES = 0xc080445b + DIOCKILLSTATES = 0xc0e04429 + DIOCNATLOOK = 0xc0504417 + DIOCOSFPADD = 0xc088444f DIOCOSFPFLUSH = 0x2000444e + DIOCOSFPGET = 0xc0884450 + DIOCRADDADDRS = 0xc4504443 + DIOCRADDTABLES = 0xc450443d + DIOCRCLRADDRS = 0xc4504442 + DIOCRCLRASTATS = 0xc4504448 + DIOCRCLRTABLES = 0xc450443c + DIOCRCLRTSTATS = 0xc4504441 + DIOCRDELADDRS = 0xc4504444 + DIOCRDELTABLES = 0xc450443e + DIOCRGETADDRS = 0xc4504446 + DIOCRGETASTATS = 0xc4504447 + DIOCRGETTABLES = 0xc450443f + DIOCRGETTSTATS = 0xc4504440 + DIOCRINADEFINE = 0xc450444d + DIOCRSETADDRS = 0xc4504445 + DIOCRSETTFLAGS = 0xc450444a + DIOCRTSTADDRS = 0xc4504449 + DIOCSETDEBUG = 0xc0044418 + DIOCSETHOSTID = 0xc0044456 + DIOCSETIFFLAG = 0xc0284459 + DIOCSETLIMIT = 0xc0084428 + DIOCSETREASS = 0xc004445c + DIOCSETSTATUSIF = 0xc0284414 + DIOCSETSYNCOOKIES = 0xc0014462 + DIOCSETSYNFLWATS = 0xc0084461 + DIOCSETTIMEOUT = 0xc008441d + DIOCSTART = 0x20004401 + DIOCSTOP = 0x20004402 + DIOCXBEGIN = 0xc0104451 + DIOCXCOMMIT = 0xc0104452 + DIOCXROLLBACK = 0xc0104453 DLT_ARCNET = 0x7 DLT_ATM_RFC1483 = 0xb DLT_AX25 = 0x3 @@ -243,6 +308,8 @@ const ( EMUL_ENABLED = 0x1 EMUL_NATIVE = 0x2 ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 ETHERMIN = 0x2e ETHERMTU = 0x5dc ETHERTYPE_8023 = 0x4 @@ -295,6 +362,7 @@ const ( ETHERTYPE_DN = 0x6003 ETHERTYPE_DOGFIGHT = 0x1989 ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e ETHERTYPE_ECMA = 0x803 ETHERTYPE_ENCRYPT = 0x803d ETHERTYPE_ES = 0x805d @@ -326,6 +394,7 @@ const ( ETHERTYPE_LLDP = 0x88cc ETHERTYPE_LOGICRAFT = 0x8148 ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MACSEC = 0x88e5 ETHERTYPE_MATRA = 0x807a ETHERTYPE_MAX = 0xffff ETHERTYPE_MERIT = 0x807c @@ -354,15 +423,16 @@ const ( ETHERTYPE_NCD = 0x8149 ETHERTYPE_NESTAR = 0x8006 ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 ETHERTYPE_NOVELL = 0x8138 ETHERTYPE_NS = 0x600 ETHERTYPE_NSAT = 0x601 ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f ETHERTYPE_NTRAILER = 0x10 ETHERTYPE_OS9 = 0x7007 ETHERTYPE_OS9NET = 0x7009 ETHERTYPE_PACER = 0x80c6 - ETHERTYPE_PAE = 0x888e ETHERTYPE_PBB = 0x88e7 ETHERTYPE_PCS = 0x4242 ETHERTYPE_PLANNING = 0x8044 @@ -445,10 +515,11 @@ const ( ETHER_VLAN_ENCAP_LEN = 0x4 EVFILT_AIO = -0x3 EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x8 + EVFILT_SYSCOUNT = 0x9 EVFILT_TIMER = -0x7 EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 @@ -470,7 +541,7 @@ const ( EV_FLAG1 = 0x2000 EV_ONESHOT = 0x10 EV_RECEIPT = 0x40 - EV_SYSFLAGS = 0xf000 + EV_SYSFLAGS = 0xf800 EXTA = 0x4b00 EXTB = 0x9600 EXTPROC = 0x800 @@ -736,6 +807,7 @@ const ( IFT_VOICEOVERCABLE = 0xc6 IFT_VOICEOVERFRAMERELAY = 0x99 IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb IFT_X213 = 0x5d IFT_X25 = 0x5 IFT_X25DDN = 0x4 @@ -801,9 +873,11 @@ const ( IPPROTO_RAW = 0xff IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 IPV6_AUTH_LEVEL = 0x35 IPV6_AUTOFLOWLABEL = 0x3b IPV6_CHECKSUM = 0x1a @@ -910,6 +984,9 @@ const ( IP_TTL = 0x4 ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IUCLC = 0x1000 IXANY = 0x800 IXOFF = 0x400 @@ -981,6 +1058,19 @@ const ( MNT_WAIT = 0x1 MNT_WANTRDWR = 0x2000000 MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" MSG_BCAST = 0x100 MSG_CMSG_CLOEXEC = 0x800 MSG_CTRUNC = 0x20 @@ -993,6 +1083,7 @@ const ( MSG_PEEK = 0x2 MSG_TRUNC = 0x10 MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x1000 MS_ASYNC = 0x1 MS_INVALIDATE = 0x4 MS_SYNC = 0x2 @@ -1001,7 +1092,8 @@ const ( NET_RT_FLAGS = 0x2 NET_RT_IFLIST = 0x3 NET_RT_IFNAMES = 0x6 - NET_RT_MAXID = 0x7 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 NFDBITS = 0x20 @@ -1018,6 +1110,7 @@ const ( NOTE_FORK = 0x40000000 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 NOTE_PCTRLMASK = 0xf0000000 NOTE_PDATAMASK = 0xfffff NOTE_RENAME = 0x20 @@ -1154,7 +1247,7 @@ const ( RTM_PROPOSAL = 0x13 RTM_REDIRECT = 0x6 RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 + RTM_SOURCE = 0x16 RTM_VERSION = 0x5 RTV_EXPIRE = 0x4 RTV_HOPCOUNT = 0x2 @@ -1172,6 +1265,9 @@ const ( RUSAGE_THREAD = 0x1 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1188,30 +1284,30 @@ const ( SIOCBRDGDELS = 0x80606942 SIOCBRDGFLUSH = 0x80606948 SIOCBRDGFRL = 0x808c694e - SIOCBRDGGCACHE = 0xc0186941 - SIOCBRDGGFD = 0xc0186952 - SIOCBRDGGHT = 0xc0186951 + SIOCBRDGGCACHE = 0xc0146941 + SIOCBRDGGFD = 0xc0146952 + SIOCBRDGGHT = 0xc0146951 SIOCBRDGGIFFLGS = 0xc060693e - SIOCBRDGGMA = 0xc0186953 + SIOCBRDGGMA = 0xc0146953 SIOCBRDGGPARAM = 0xc0406958 - SIOCBRDGGPRI = 0xc0186950 + SIOCBRDGGPRI = 0xc0146950 SIOCBRDGGRL = 0xc030694f - SIOCBRDGGTO = 0xc0186946 + SIOCBRDGGTO = 0xc0146946 SIOCBRDGIFS = 0xc0606942 SIOCBRDGRTS = 0xc0206943 SIOCBRDGSADDR = 0xc1286944 - SIOCBRDGSCACHE = 0x80186940 - SIOCBRDGSFD = 0x80186952 - SIOCBRDGSHT = 0x80186951 + SIOCBRDGSCACHE = 0x80146940 + SIOCBRDGSFD = 0x80146952 + SIOCBRDGSHT = 0x80146951 SIOCBRDGSIFCOST = 0x80606955 SIOCBRDGSIFFLGS = 0x8060693f SIOCBRDGSIFPRIO = 0x80606954 SIOCBRDGSIFPROT = 0x8060694a - SIOCBRDGSMA = 0x80186953 - SIOCBRDGSPRI = 0x80186950 - SIOCBRDGSPROTO = 0x8018695a - SIOCBRDGSTO = 0x80186945 - SIOCBRDGSTXHC = 0x80186959 + SIOCBRDGSMA = 0x80146953 + SIOCBRDGSPRI = 0x80146950 + SIOCBRDGSPROTO = 0x8014695a + SIOCBRDGSTO = 0x80146945 + SIOCBRDGSTXHC = 0x80146959 SIOCDELLABEL = 0x80206997 SIOCDELMULTI = 0x80206932 SIOCDIFADDR = 0x80206919 @@ -1264,6 +1360,7 @@ const ( SIOCGPWE3CTRLWORD = 0xc02069dc SIOCGPWE3FAT = 0xc02069dd SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGRXHPRIO = 0xc02069db SIOCGSPPPPARAMS = 0xc0206994 SIOCGTXHPRIO = 0xc02069c6 SIOCGUMBINFO = 0xc02069be @@ -1310,17 +1407,13 @@ const ( SIOCSPWE3CTRLWORD = 0x802069dc SIOCSPWE3FAT = 0x802069dd SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSRXHPRIO = 0x802069db SIOCSSPPPPARAMS = 0x80206993 SIOCSTXHPRIO = 0x802069c5 SIOCSUMBPARAM = 0x802069bf SIOCSVH = 0xc02069f5 SIOCSVNETFLOWID = 0x802069c3 SIOCSVNETID = 0x802069a6 - SIOCSWGDPID = 0xc018695b - SIOCSWGMAXFLOW = 0xc0186960 - SIOCSWGMAXGROUP = 0xc018695d - SIOCSWSDPID = 0x8018695c - SIOCSWSPORTNO = 0xc060695f SOCK_CLOEXEC = 0x8000 SOCK_DGRAM = 0x2 SOCK_DNS = 0x1000 @@ -1335,6 +1428,7 @@ const ( SO_BINDANY = 0x1000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1024 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1342,6 +1436,7 @@ const ( SO_NETPROC = 0x1020 SO_OOBINLINE = 0x100 SO_PEERCRED = 0x1022 + SO_PROTOCOL = 0x1025 SO_RCVBUF = 0x1002 SO_RCVLOWAT = 0x1004 SO_RCVTIMEO = 0x1006 @@ -1391,7 +1486,18 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 - TCP_MAXBURST = 0x4 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 TCP_MAXSEG = 0x2 TCP_MAXWIN = 0xffff TCP_MAX_SACK = 0x3 @@ -1400,6 +1506,7 @@ const ( TCP_MSS = 0x200 TCP_NODELAY = 0x1 TCP_NOPUSH = 0x10 + TCP_SACKHOLE_LIMIT = 0x80 TCP_SACK_ENABLE = 0x8 TCSAFLUSH = 0x2 TIMER_ABSTIME = 0x1 @@ -1768,7 +1875,7 @@ var signalList = [...]struct { {3, "SIGQUIT", "quit"}, {4, "SIGILL", "illegal instruction"}, {5, "SIGTRAP", "trace/BPT trap"}, - {6, "SIGABRT", "abort trap"}, + {6, "SIGIOT", "abort trap"}, {7, "SIGEMT", "EMT trap"}, {8, "SIGFPE", "floating point exception"}, {9, "SIGKILL", "killed"}, @@ -1795,4 +1902,5 @@ var signalList = [...]struct { {30, "SIGUSR1", "user defined signal 1"}, {31, "SIGUSR2", "user defined signal 2"}, {32, "SIGTHR", "thread AST"}, + {28672, "SIGSTKSZ", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go index f1154ff56..03d90fe35 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go @@ -112,6 +112,12 @@ const ( BPF_FILDROP_CAPTURE = 0x1 BPF_FILDROP_DROP = 0x2 BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -140,6 +146,7 @@ const ( BPF_OR = 0x40 BPF_RELEASE = 0x30bb6 BPF_RET = 0x6 + BPF_RND = 0xc0 BPF_RSH = 0x70 BPF_ST = 0x2 BPF_STX = 0x3 @@ -301,6 +308,8 @@ const ( EMUL_ENABLED = 0x1 EMUL_NATIVE = 0x2 ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 ETHERMIN = 0x2e ETHERMTU = 0x5dc ETHERTYPE_8023 = 0x4 @@ -353,6 +362,7 @@ const ( ETHERTYPE_DN = 0x6003 ETHERTYPE_DOGFIGHT = 0x1989 ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e ETHERTYPE_ECMA = 0x803 ETHERTYPE_ENCRYPT = 0x803d ETHERTYPE_ES = 0x805d @@ -413,15 +423,16 @@ const ( ETHERTYPE_NCD = 0x8149 ETHERTYPE_NESTAR = 0x8006 ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 ETHERTYPE_NOVELL = 0x8138 ETHERTYPE_NS = 0x600 ETHERTYPE_NSAT = 0x601 ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f ETHERTYPE_NTRAILER = 0x10 ETHERTYPE_OS9 = 0x7007 ETHERTYPE_OS9NET = 0x7009 ETHERTYPE_PACER = 0x80c6 - ETHERTYPE_PAE = 0x888e ETHERTYPE_PBB = 0x88e7 ETHERTYPE_PCS = 0x4242 ETHERTYPE_PLANNING = 0x8044 @@ -504,10 +515,11 @@ const ( ETHER_VLAN_ENCAP_LEN = 0x4 EVFILT_AIO = -0x3 EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x8 + EVFILT_SYSCOUNT = 0x9 EVFILT_TIMER = -0x7 EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 @@ -529,7 +541,7 @@ const ( EV_FLAG1 = 0x2000 EV_ONESHOT = 0x10 EV_RECEIPT = 0x40 - EV_SYSFLAGS = 0xf000 + EV_SYSFLAGS = 0xf800 EXTA = 0x4b00 EXTB = 0x9600 EXTPROC = 0x800 @@ -795,6 +807,7 @@ const ( IFT_VOICEOVERCABLE = 0xc6 IFT_VOICEOVERFRAMERELAY = 0x99 IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb IFT_X213 = 0x5d IFT_X25 = 0x5 IFT_X25DDN = 0x4 @@ -860,6 +873,7 @@ const ( IPPROTO_RAW = 0xff IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 @@ -970,6 +984,9 @@ const ( IP_TTL = 0x4 ISIG = 0x80 ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 IUCLC = 0x1000 IXANY = 0x800 IXOFF = 0x400 @@ -1041,6 +1058,19 @@ const ( MNT_WAIT = 0x1 MNT_WANTRDWR = 0x2000000 MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" MSG_BCAST = 0x100 MSG_CMSG_CLOEXEC = 0x800 MSG_CTRUNC = 0x20 @@ -1053,6 +1083,7 @@ const ( MSG_PEEK = 0x2 MSG_TRUNC = 0x10 MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x1000 MS_ASYNC = 0x1 MS_INVALIDATE = 0x4 MS_SYNC = 0x2 @@ -1061,7 +1092,8 @@ const ( NET_RT_FLAGS = 0x2 NET_RT_IFLIST = 0x3 NET_RT_IFNAMES = 0x6 - NET_RT_MAXID = 0x7 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 NFDBITS = 0x20 @@ -1078,6 +1110,7 @@ const ( NOTE_FORK = 0x40000000 NOTE_LINK = 0x10 NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 NOTE_PCTRLMASK = 0xf0000000 NOTE_PDATAMASK = 0xfffff NOTE_RENAME = 0x20 @@ -1214,7 +1247,7 @@ const ( RTM_PROPOSAL = 0x13 RTM_REDIRECT = 0x6 RTM_RESOLVE = 0xb - RTM_RTTUNIT = 0xf4240 + RTM_SOURCE = 0x16 RTM_VERSION = 0x5 RTV_EXPIRE = 0x4 RTV_HOPCOUNT = 0x2 @@ -1232,6 +1265,9 @@ const ( RUSAGE_THREAD = 0x1 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1248,30 +1284,30 @@ const ( SIOCBRDGDELS = 0x80606942 SIOCBRDGFLUSH = 0x80606948 SIOCBRDGFRL = 0x808c694e - SIOCBRDGGCACHE = 0xc0186941 - SIOCBRDGGFD = 0xc0186952 - SIOCBRDGGHT = 0xc0186951 + SIOCBRDGGCACHE = 0xc0146941 + SIOCBRDGGFD = 0xc0146952 + SIOCBRDGGHT = 0xc0146951 SIOCBRDGGIFFLGS = 0xc060693e - SIOCBRDGGMA = 0xc0186953 + SIOCBRDGGMA = 0xc0146953 SIOCBRDGGPARAM = 0xc0406958 - SIOCBRDGGPRI = 0xc0186950 + SIOCBRDGGPRI = 0xc0146950 SIOCBRDGGRL = 0xc030694f - SIOCBRDGGTO = 0xc0186946 + SIOCBRDGGTO = 0xc0146946 SIOCBRDGIFS = 0xc0606942 SIOCBRDGRTS = 0xc0206943 SIOCBRDGSADDR = 0xc1286944 - SIOCBRDGSCACHE = 0x80186940 - SIOCBRDGSFD = 0x80186952 - SIOCBRDGSHT = 0x80186951 + SIOCBRDGSCACHE = 0x80146940 + SIOCBRDGSFD = 0x80146952 + SIOCBRDGSHT = 0x80146951 SIOCBRDGSIFCOST = 0x80606955 SIOCBRDGSIFFLGS = 0x8060693f SIOCBRDGSIFPRIO = 0x80606954 SIOCBRDGSIFPROT = 0x8060694a - SIOCBRDGSMA = 0x80186953 - SIOCBRDGSPRI = 0x80186950 - SIOCBRDGSPROTO = 0x8018695a - SIOCBRDGSTO = 0x80186945 - SIOCBRDGSTXHC = 0x80186959 + SIOCBRDGSMA = 0x80146953 + SIOCBRDGSPRI = 0x80146950 + SIOCBRDGSPROTO = 0x8014695a + SIOCBRDGSTO = 0x80146945 + SIOCBRDGSTXHC = 0x80146959 SIOCDELLABEL = 0x80206997 SIOCDELMULTI = 0x80206932 SIOCDIFADDR = 0x80206919 @@ -1378,11 +1414,6 @@ const ( SIOCSVH = 0xc02069f5 SIOCSVNETFLOWID = 0x802069c3 SIOCSVNETID = 0x802069a6 - SIOCSWGDPID = 0xc018695b - SIOCSWGMAXFLOW = 0xc0186960 - SIOCSWGMAXGROUP = 0xc018695d - SIOCSWSDPID = 0x8018695c - SIOCSWSPORTNO = 0xc060695f SOCK_CLOEXEC = 0x8000 SOCK_DGRAM = 0x2 SOCK_DNS = 0x1000 @@ -1455,7 +1486,18 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 - TCP_MAXBURST = 0x4 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 TCP_MAXSEG = 0x2 TCP_MAXWIN = 0xffff TCP_MAX_SACK = 0x3 @@ -1833,7 +1875,7 @@ var signalList = [...]struct { {3, "SIGQUIT", "quit"}, {4, "SIGILL", "illegal instruction"}, {5, "SIGTRAP", "trace/BPT trap"}, - {6, "SIGABRT", "abort trap"}, + {6, "SIGIOT", "abort trap"}, {7, "SIGEMT", "EMT trap"}, {8, "SIGFPE", "floating point exception"}, {9, "SIGKILL", "killed"}, @@ -1860,4 +1902,5 @@ var signalList = [...]struct { {30, "SIGUSR1", "user defined signal 1"}, {31, "SIGUSR2", "user defined signal 2"}, {32, "SIGTHR", "thread AST"}, + {81920, "SIGSTKSZ", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go new file mode 100644 index 000000000..8e2c51b1e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go @@ -0,0 +1,1905 @@ +// mkerrors.sh -m64 +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build ppc64 && openbsd +// +build ppc64,openbsd + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_BLUETOOTH = 0x20 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_ENCAP = 0x1c + AF_HYLINK = 0xf + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x18 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_KEY = 0x1e + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x24 + AF_MPLS = 0x21 + AF_NATM = 0x1b + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SIP = 0x1d + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + ALTWERASE = 0x200 + ARPHRD_ETHER = 0x1 + ARPHRD_FRELAY = 0xf + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRFILT = 0x4004427c + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc010427b + BIOCGETIF = 0x4020426b + BIOCGFILDROP = 0x40044278 + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044273 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x20004276 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDIRFILT = 0x8004427d + BIOCSDLT = 0x8004427a + BIOCSETF = 0x80104267 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x80104277 + BIOCSFILDROP = 0x80044279 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044272 + BIOCSRTIMEOUT = 0x8010426d + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIRECTION_IN = 0x1 + BPF_DIRECTION_OUT = 0x2 + BPF_DIV = 0x30 + BPF_FILDROP_CAPTURE = 0x1 + BPF_FILDROP_DROP = 0x2 + BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x200000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RND = 0xc0 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x6 + CLOCK_MONOTONIC = 0x3 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x4 + CLOCK_UPTIME = 0x5 + CPUSTATES = 0x6 + CP_IDLE = 0x5 + CP_INTR = 0x4 + CP_NICE = 0x1 + CP_SPIN = 0x3 + CP_SYS = 0x2 + CP_USER = 0x0 + CREAD = 0x800 + CRTSCTS = 0x10000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0xff + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DIOCADDQUEUE = 0xc110445d + DIOCADDRULE = 0xcd604404 + DIOCADDSTATE = 0xc1084425 + DIOCCHANGERULE = 0xcd60441a + DIOCCLRIFFLAG = 0xc028445a + DIOCCLRSRCNODES = 0x20004455 + DIOCCLRSTATES = 0xc0e04412 + DIOCCLRSTATUS = 0xc0284416 + DIOCGETLIMIT = 0xc0084427 + DIOCGETQSTATS = 0xc1204460 + DIOCGETQUEUE = 0xc110445f + DIOCGETQUEUES = 0xc110445e + DIOCGETRULE = 0xcd604407 + DIOCGETRULES = 0xcd604406 + DIOCGETRULESET = 0xc444443b + DIOCGETRULESETS = 0xc444443a + DIOCGETSRCNODES = 0xc0104454 + DIOCGETSTATE = 0xc1084413 + DIOCGETSTATES = 0xc0104419 + DIOCGETSTATUS = 0xc1e84415 + DIOCGETSYNFLWATS = 0xc0084463 + DIOCGETTIMEOUT = 0xc008441e + DIOCIGETIFACES = 0xc0284457 + DIOCKILLSRCNODES = 0xc080445b + DIOCKILLSTATES = 0xc0e04429 + DIOCNATLOOK = 0xc0504417 + DIOCOSFPADD = 0xc088444f + DIOCOSFPFLUSH = 0x2000444e + DIOCOSFPGET = 0xc0884450 + DIOCRADDADDRS = 0xc4504443 + DIOCRADDTABLES = 0xc450443d + DIOCRCLRADDRS = 0xc4504442 + DIOCRCLRASTATS = 0xc4504448 + DIOCRCLRTABLES = 0xc450443c + DIOCRCLRTSTATS = 0xc4504441 + DIOCRDELADDRS = 0xc4504444 + DIOCRDELTABLES = 0xc450443e + DIOCRGETADDRS = 0xc4504446 + DIOCRGETASTATS = 0xc4504447 + DIOCRGETTABLES = 0xc450443f + DIOCRGETTSTATS = 0xc4504440 + DIOCRINADEFINE = 0xc450444d + DIOCRSETADDRS = 0xc4504445 + DIOCRSETTFLAGS = 0xc450444a + DIOCRTSTADDRS = 0xc4504449 + DIOCSETDEBUG = 0xc0044418 + DIOCSETHOSTID = 0xc0044456 + DIOCSETIFFLAG = 0xc0284459 + DIOCSETLIMIT = 0xc0084428 + DIOCSETREASS = 0xc004445c + DIOCSETSTATUSIF = 0xc0284414 + DIOCSETSYNCOOKIES = 0xc0014462 + DIOCSETSYNFLWATS = 0xc0084461 + DIOCSETTIMEOUT = 0xc008441d + DIOCSTART = 0x20004401 + DIOCSTOP = 0x20004402 + DIOCXBEGIN = 0xc0104451 + DIOCXCOMMIT = 0xc0104452 + DIOCXROLLBACK = 0xc0104453 + DLT_ARCNET = 0x7 + DLT_ATM_RFC1483 = 0xb + DLT_AX25 = 0x3 + DLT_CHAOS = 0x5 + DLT_C_HDLC = 0x68 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0xd + DLT_FDDI = 0xa + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_LOOP = 0xc + DLT_MPLS = 0xdb + DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_SERIAL = 0x32 + DLT_PRONET = 0x4 + DLT_RAW = 0xe + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_USBPCAP = 0xf9 + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EMT_TAGOVF = 0x1 + EMUL_ENABLED = 0x1 + EMUL_NATIVE = 0x2 + ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 + ETHERMIN = 0x2e + ETHERMTU = 0x5dc + ETHERTYPE_8023 = 0x4 + ETHERTYPE_AARP = 0x80f3 + ETHERTYPE_ACCTON = 0x8390 + ETHERTYPE_AEONIC = 0x8036 + ETHERTYPE_ALPHA = 0x814a + ETHERTYPE_AMBER = 0x6008 + ETHERTYPE_AMOEBA = 0x8145 + ETHERTYPE_AOE = 0x88a2 + ETHERTYPE_APOLLO = 0x80f7 + ETHERTYPE_APOLLODOMAIN = 0x8019 + ETHERTYPE_APPLETALK = 0x809b + ETHERTYPE_APPLITEK = 0x80c7 + ETHERTYPE_ARGONAUT = 0x803a + ETHERTYPE_ARP = 0x806 + ETHERTYPE_AT = 0x809b + ETHERTYPE_ATALK = 0x809b + ETHERTYPE_ATOMIC = 0x86df + ETHERTYPE_ATT = 0x8069 + ETHERTYPE_ATTSTANFORD = 0x8008 + ETHERTYPE_AUTOPHON = 0x806a + ETHERTYPE_AXIS = 0x8856 + ETHERTYPE_BCLOOP = 0x9003 + ETHERTYPE_BOFL = 0x8102 + ETHERTYPE_CABLETRON = 0x7034 + ETHERTYPE_CHAOS = 0x804 + ETHERTYPE_COMDESIGN = 0x806c + ETHERTYPE_COMPUGRAPHIC = 0x806d + ETHERTYPE_COUNTERPOINT = 0x8062 + ETHERTYPE_CRONUS = 0x8004 + ETHERTYPE_CRONUSVLN = 0x8003 + ETHERTYPE_DCA = 0x1234 + ETHERTYPE_DDE = 0x807b + ETHERTYPE_DEBNI = 0xaaaa + ETHERTYPE_DECAM = 0x8048 + ETHERTYPE_DECCUST = 0x6006 + ETHERTYPE_DECDIAG = 0x6005 + ETHERTYPE_DECDNS = 0x803c + ETHERTYPE_DECDTS = 0x803e + ETHERTYPE_DECEXPER = 0x6000 + ETHERTYPE_DECLAST = 0x8041 + ETHERTYPE_DECLTM = 0x803f + ETHERTYPE_DECMUMPS = 0x6009 + ETHERTYPE_DECNETBIOS = 0x8040 + ETHERTYPE_DELTACON = 0x86de + ETHERTYPE_DIDDLE = 0x4321 + ETHERTYPE_DLOG1 = 0x660 + ETHERTYPE_DLOG2 = 0x661 + ETHERTYPE_DN = 0x6003 + ETHERTYPE_DOGFIGHT = 0x1989 + ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e + ETHERTYPE_ECMA = 0x803 + ETHERTYPE_ENCRYPT = 0x803d + ETHERTYPE_ES = 0x805d + ETHERTYPE_EXCELAN = 0x8010 + ETHERTYPE_EXPERDATA = 0x8049 + ETHERTYPE_FLIP = 0x8146 + ETHERTYPE_FLOWCONTROL = 0x8808 + ETHERTYPE_FRARP = 0x808 + ETHERTYPE_GENDYN = 0x8068 + ETHERTYPE_HAYES = 0x8130 + ETHERTYPE_HIPPI_FP = 0x8180 + ETHERTYPE_HITACHI = 0x8820 + ETHERTYPE_HP = 0x8005 + ETHERTYPE_IEEEPUP = 0xa00 + ETHERTYPE_IEEEPUPAT = 0xa01 + ETHERTYPE_IMLBL = 0x4c42 + ETHERTYPE_IMLBLDIAG = 0x424c + ETHERTYPE_IP = 0x800 + ETHERTYPE_IPAS = 0x876c + ETHERTYPE_IPV6 = 0x86dd + ETHERTYPE_IPX = 0x8137 + ETHERTYPE_IPXNEW = 0x8037 + ETHERTYPE_KALPANA = 0x8582 + ETHERTYPE_LANBRIDGE = 0x8038 + ETHERTYPE_LANPROBE = 0x8888 + ETHERTYPE_LAT = 0x6004 + ETHERTYPE_LBACK = 0x9000 + ETHERTYPE_LITTLE = 0x8060 + ETHERTYPE_LLDP = 0x88cc + ETHERTYPE_LOGICRAFT = 0x8148 + ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MACSEC = 0x88e5 + ETHERTYPE_MATRA = 0x807a + ETHERTYPE_MAX = 0xffff + ETHERTYPE_MERIT = 0x807c + ETHERTYPE_MICP = 0x873a + ETHERTYPE_MOPDL = 0x6001 + ETHERTYPE_MOPRC = 0x6002 + ETHERTYPE_MOTOROLA = 0x818d + ETHERTYPE_MPLS = 0x8847 + ETHERTYPE_MPLS_MCAST = 0x8848 + ETHERTYPE_MUMPS = 0x813f + ETHERTYPE_NBPCC = 0x3c04 + ETHERTYPE_NBPCLAIM = 0x3c09 + ETHERTYPE_NBPCLREQ = 0x3c05 + ETHERTYPE_NBPCLRSP = 0x3c06 + ETHERTYPE_NBPCREQ = 0x3c02 + ETHERTYPE_NBPCRSP = 0x3c03 + ETHERTYPE_NBPDG = 0x3c07 + ETHERTYPE_NBPDGB = 0x3c08 + ETHERTYPE_NBPDLTE = 0x3c0a + ETHERTYPE_NBPRAR = 0x3c0c + ETHERTYPE_NBPRAS = 0x3c0b + ETHERTYPE_NBPRST = 0x3c0d + ETHERTYPE_NBPSCD = 0x3c01 + ETHERTYPE_NBPVCD = 0x3c00 + ETHERTYPE_NBS = 0x802 + ETHERTYPE_NCD = 0x8149 + ETHERTYPE_NESTAR = 0x8006 + ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 + ETHERTYPE_NOVELL = 0x8138 + ETHERTYPE_NS = 0x600 + ETHERTYPE_NSAT = 0x601 + ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f + ETHERTYPE_NTRAILER = 0x10 + ETHERTYPE_OS9 = 0x7007 + ETHERTYPE_OS9NET = 0x7009 + ETHERTYPE_PACER = 0x80c6 + ETHERTYPE_PBB = 0x88e7 + ETHERTYPE_PCS = 0x4242 + ETHERTYPE_PLANNING = 0x8044 + ETHERTYPE_PPP = 0x880b + ETHERTYPE_PPPOE = 0x8864 + ETHERTYPE_PPPOEDISC = 0x8863 + ETHERTYPE_PRIMENTS = 0x7031 + ETHERTYPE_PUP = 0x200 + ETHERTYPE_PUPAT = 0x200 + ETHERTYPE_QINQ = 0x88a8 + ETHERTYPE_RACAL = 0x7030 + ETHERTYPE_RATIONAL = 0x8150 + ETHERTYPE_RAWFR = 0x6559 + ETHERTYPE_RCL = 0x1995 + ETHERTYPE_RDP = 0x8739 + ETHERTYPE_RETIX = 0x80f2 + ETHERTYPE_REVARP = 0x8035 + ETHERTYPE_SCA = 0x6007 + ETHERTYPE_SECTRA = 0x86db + ETHERTYPE_SECUREDATA = 0x876d + ETHERTYPE_SGITW = 0x817e + ETHERTYPE_SG_BOUNCE = 0x8016 + ETHERTYPE_SG_DIAG = 0x8013 + ETHERTYPE_SG_NETGAMES = 0x8014 + ETHERTYPE_SG_RESV = 0x8015 + ETHERTYPE_SIMNET = 0x5208 + ETHERTYPE_SLOW = 0x8809 + ETHERTYPE_SNA = 0x80d5 + ETHERTYPE_SNMP = 0x814c + ETHERTYPE_SONIX = 0xfaf5 + ETHERTYPE_SPIDER = 0x809f + ETHERTYPE_SPRITE = 0x500 + ETHERTYPE_STP = 0x8181 + ETHERTYPE_TALARIS = 0x812b + ETHERTYPE_TALARISMC = 0x852b + ETHERTYPE_TCPCOMP = 0x876b + ETHERTYPE_TCPSM = 0x9002 + ETHERTYPE_TEC = 0x814f + ETHERTYPE_TIGAN = 0x802f + ETHERTYPE_TRAIL = 0x1000 + ETHERTYPE_TRANSETHER = 0x6558 + ETHERTYPE_TYMSHARE = 0x802e + ETHERTYPE_UBBST = 0x7005 + ETHERTYPE_UBDEBUG = 0x900 + ETHERTYPE_UBDIAGLOOP = 0x7002 + ETHERTYPE_UBDL = 0x7000 + ETHERTYPE_UBNIU = 0x7001 + ETHERTYPE_UBNMC = 0x7003 + ETHERTYPE_VALID = 0x1600 + ETHERTYPE_VARIAN = 0x80dd + ETHERTYPE_VAXELN = 0x803b + ETHERTYPE_VEECO = 0x8067 + ETHERTYPE_VEXP = 0x805b + ETHERTYPE_VGLAB = 0x8131 + ETHERTYPE_VINES = 0xbad + ETHERTYPE_VINESECHO = 0xbaf + ETHERTYPE_VINESLOOP = 0xbae + ETHERTYPE_VITAL = 0xff00 + ETHERTYPE_VLAN = 0x8100 + ETHERTYPE_VLTLMAN = 0x8080 + ETHERTYPE_VPROD = 0x805c + ETHERTYPE_VURESERVED = 0x8147 + ETHERTYPE_WATERLOO = 0x8130 + ETHERTYPE_WELLFLEET = 0x8103 + ETHERTYPE_X25 = 0x805 + ETHERTYPE_X75 = 0x801 + ETHERTYPE_XNSSM = 0x9001 + ETHERTYPE_XTP = 0x817d + ETHER_ADDR_LEN = 0x6 + ETHER_ALIGN = 0x2 + ETHER_CRC_LEN = 0x4 + ETHER_CRC_POLY_BE = 0x4c11db6 + ETHER_CRC_POLY_LE = 0xedb88320 + ETHER_HDR_LEN = 0xe + ETHER_MAX_DIX_LEN = 0x600 + ETHER_MAX_HARDMTU_LEN = 0xff9b + ETHER_MAX_LEN = 0x5ee + ETHER_MIN_LEN = 0x40 + ETHER_TYPE_LEN = 0x2 + ETHER_VLAN_ENCAP_LEN = 0x4 + EVFILT_AIO = -0x3 + EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0x9 + EVFILT_TIMER = -0x7 + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EVL_ENCAPLEN = 0x4 + EVL_PRIO_BITS = 0xd + EVL_PRIO_MAX = 0x7 + EVL_VLID_MASK = 0xfff + EVL_VLID_MAX = 0xffe + EVL_VLID_MIN = 0x1 + EVL_VLID_NULL = 0x0 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf800 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0xa + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETOWN = 0x5 + F_ISATTY = 0xb + F_OK = 0x0 + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x8e52 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x20 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BLUETOOTH = 0xf8 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf7 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DUMMY = 0xf1 + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ECONET = 0xce + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf3 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LINEGROUP = 0xd2 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MBIM = 0xfa + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf5 + IFT_PFLOW = 0xf9 + IFT_PFSYNC = 0xf6 + IFT_PLC = 0xae + IFT_PON155 = 0xcf + IFT_PON622 = 0xd0 + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPATM = 0xc5 + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf2 + IFT_Q2931 = 0xc9 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SIPSIG = 0xcc + IFT_SIPTG = 0xcb + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TELINK = 0xc8 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VIRTUALTG = 0xca + IFT_VOICEDID = 0xd5 + IFT_VOICEEM = 0x64 + IFT_VOICEEMFGD = 0xd3 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFGDEANA = 0xd4 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERCABLE = 0xc6 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_RFC3021_HOST = 0x1 + IN_RFC3021_NET = 0xfffffffe + IN_RFC3021_NSHIFT = 0x1f + IPPROTO_AH = 0x33 + IPPROTO_CARP = 0x70 + IPPROTO_DIVERT = 0x102 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPIP = 0x4 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x103 + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_NONE = 0x3b + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_AUTH_LEVEL = 0x35 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_ESP_NETWORK_LEVEL = 0x37 + IPV6_ESP_TRANS_LEVEL = 0x36 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff + IPV6_FRAGTTL = 0x78 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPCOMP_LEVEL = 0x3c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXPACKET = 0xffff + IPV6_MINHOPCOUNT = 0x41 + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_OPTIONS = 0x1 + IPV6_PATHMTU = 0x2c + IPV6_PIPEX = 0x3f + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVDSTPORT = 0x40 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTABLE = 0x1021 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_AUTH_LEVEL = 0x14 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_ESP_NETWORK_LEVEL = 0x16 + IP_ESP_TRANS_LEVEL = 0x15 + IP_HDRINCL = 0x2 + IP_IPCOMP_LEVEL = 0x1d + IP_IPDEFTTL = 0x25 + IP_IPSECFLOWINFO = 0x24 + IP_IPSEC_LOCAL_AUTH = 0x1b + IP_IPSEC_LOCAL_CRED = 0x19 + IP_IPSEC_LOCAL_ID = 0x17 + IP_IPSEC_REMOTE_AUTH = 0x1c + IP_IPSEC_REMOTE_CRED = 0x1a + IP_IPSEC_REMOTE_ID = 0x18 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0xfff + IP_MF = 0x2000 + IP_MINTTL = 0x20 + IP_MIN_MEMBERSHIPS = 0xf + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x1 + IP_PIPEX = 0x22 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVDSTPORT = 0x21 + IP_RECVIF = 0x1e + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVRTABLE = 0x23 + IP_RECVTTL = 0x1f + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RTABLE = 0x1021 + IP_SENDSRCADDR = 0x7 + IP_TOS = 0x3 + IP_TTL = 0x4 + ISIG = 0x80 + ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 + IUCLC = 0x1000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LCNT_OVERLOAD_FLUSH = 0x6 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x6 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SPACEAVAIL = 0x5 + MADV_WILLNEED = 0x3 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_CONCEAL = 0x8000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_FLAGMASK = 0xfff7 + MAP_HASSEMAPHORE = 0x0 + MAP_INHERIT = 0x0 + MAP_INHERIT_COPY = 0x1 + MAP_INHERIT_NONE = 0x2 + MAP_INHERIT_SHARE = 0x0 + MAP_INHERIT_ZERO = 0x3 + MAP_NOEXTEND = 0x0 + MAP_NORESERVE = 0x0 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x0 + MAP_SHARED = 0x1 + MAP_STACK = 0x4000 + MAP_TRYFIXED = 0x0 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MNT_ASYNC = 0x40 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_DOOMED = 0x8000000 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXRDONLY = 0x80 + MNT_FORCE = 0x80000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_NOATIME = 0x8000 + MNT_NODEV = 0x10 + MNT_NOEXEC = 0x4 + MNT_NOPERM = 0x20 + MNT_NOSUID = 0x8 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SOFTDEP = 0x4000000 + MNT_STALLED = 0x100000 + MNT_SWAPPABLE = 0x200000 + MNT_SYNCHRONOUS = 0x2 + MNT_UPDATE = 0x10000 + MNT_VISFLAGMASK = 0x400ffff + MNT_WAIT = 0x1 + MNT_WANTRDWR = 0x2000000 + MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" + MSG_BCAST = 0x100 + MSG_CMSG_CLOEXEC = 0x800 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOR = 0x8 + MSG_MCAST = 0x200 + MSG_NOSIGNAL = 0x400 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MSG_WAITFORONE = 0x1000 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x4 + MS_SYNC = 0x2 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFNAMES = 0x6 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 + NET_RT_STATS = 0x4 + NET_RT_TABLE = 0x5 + NFDBITS = 0x20 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ATTRIB = 0x8 + NOTE_CHANGE = 0x1 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EOF = 0x2 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRUNCATE = 0x80 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OLCUC = 0x20 + ONLCR = 0x2 + ONLRET = 0x80 + ONOCR = 0x40 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x10000 + O_CREAT = 0x200 + O_DIRECTORY = 0x20000 + O_DSYNC = 0x80 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x80 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PF_FLUSH = 0x1 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BFD = 0xb + RTAX_BRD = 0x7 + RTAX_DNS = 0xc + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_LABEL = 0xa + RTAX_MAX = 0xf + RTAX_NETMASK = 0x2 + RTAX_SEARCH = 0xe + RTAX_SRC = 0x8 + RTAX_SRCMASK = 0x9 + RTAX_STATIC = 0xd + RTA_AUTHOR = 0x40 + RTA_BFD = 0x800 + RTA_BRD = 0x80 + RTA_DNS = 0x1000 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_LABEL = 0x400 + RTA_NETMASK = 0x4 + RTA_SEARCH = 0x4000 + RTA_SRC = 0x100 + RTA_SRCMASK = 0x200 + RTA_STATIC = 0x2000 + RTF_ANNOUNCE = 0x4000 + RTF_BFD = 0x1000000 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CACHED = 0x20000 + RTF_CLONED = 0x10000 + RTF_CLONING = 0x100 + RTF_CONNECTED = 0x800000 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FMASK = 0x110fc08 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MPATH = 0x40000 + RTF_MPLS = 0x100000 + RTF_MULTICAST = 0x200 + RTF_PERMANENT_ARP = 0x2000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x2000 + RTF_REJECT = 0x8 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_USETRAILERS = 0x8000 + RTM_80211INFO = 0x15 + RTM_ADD = 0x1 + RTM_BFD = 0x12 + RTM_CHANGE = 0x3 + RTM_CHGADDRATTR = 0x14 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DESYNC = 0x10 + RTM_GET = 0x4 + RTM_IFANNOUNCE = 0xf + RTM_IFINFO = 0xe + RTM_INVALIDATE = 0x11 + RTM_LOSING = 0x5 + RTM_MAXSIZE = 0x800 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_PROPOSAL = 0x13 + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_SOURCE = 0x16 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RT_TABLEID_BITS = 0x8 + RT_TABLEID_MASK = 0xff + RT_TABLEID_MAX = 0xff + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80286987 + SIOCATMARK = 0x40047307 + SIOCBRDGADD = 0x8060693c + SIOCBRDGADDL = 0x80606949 + SIOCBRDGADDS = 0x80606941 + SIOCBRDGARL = 0x808c694d + SIOCBRDGDADDR = 0x81286947 + SIOCBRDGDEL = 0x8060693d + SIOCBRDGDELS = 0x80606942 + SIOCBRDGFLUSH = 0x80606948 + SIOCBRDGFRL = 0x808c694e + SIOCBRDGGCACHE = 0xc0146941 + SIOCBRDGGFD = 0xc0146952 + SIOCBRDGGHT = 0xc0146951 + SIOCBRDGGIFFLGS = 0xc060693e + SIOCBRDGGMA = 0xc0146953 + SIOCBRDGGPARAM = 0xc0406958 + SIOCBRDGGPRI = 0xc0146950 + SIOCBRDGGRL = 0xc030694f + SIOCBRDGGTO = 0xc0146946 + SIOCBRDGIFS = 0xc0606942 + SIOCBRDGRTS = 0xc0206943 + SIOCBRDGSADDR = 0xc1286944 + SIOCBRDGSCACHE = 0x80146940 + SIOCBRDGSFD = 0x80146952 + SIOCBRDGSHT = 0x80146951 + SIOCBRDGSIFCOST = 0x80606955 + SIOCBRDGSIFFLGS = 0x8060693f + SIOCBRDGSIFPRIO = 0x80606954 + SIOCBRDGSIFPROT = 0x8060694a + SIOCBRDGSMA = 0x80146953 + SIOCBRDGSPRI = 0x80146950 + SIOCBRDGSPROTO = 0x8014695a + SIOCBRDGSTO = 0x80146945 + SIOCBRDGSTXHC = 0x80146959 + SIOCDELLABEL = 0x80206997 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80286989 + SIOCDIFPARENT = 0x802069b4 + SIOCDIFPHYADDR = 0x80206949 + SIOCDPWE3NEIGHBOR = 0x802069de + SIOCDVNETID = 0x802069af + SIOCGETKALIVE = 0xc01869a4 + SIOCGETLABEL = 0x8020699a + SIOCGETMPWCFG = 0xc02069ae + SIOCGETPFLOW = 0xc02069fe + SIOCGETPFSYNC = 0xc02069f8 + SIOCGETSGCNT = 0xc0207534 + SIOCGETVIFCNT = 0xc0287533 + SIOCGETVLAN = 0xc0206990 + SIOCGIFADDR = 0xc0206921 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCONF = 0xc0106924 + SIOCGIFDATA = 0xc020691b + SIOCGIFDESCR = 0xc0206981 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGATTR = 0xc028698b + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGLIST = 0xc028698d + SIOCGIFGMEMB = 0xc028698a + SIOCGIFGROUP = 0xc0286988 + SIOCGIFHARDMTU = 0xc02069a5 + SIOCGIFLLPRIO = 0xc02069b6 + SIOCGIFMEDIA = 0xc0406938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc020697e + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPAIR = 0xc02069b1 + SIOCGIFPARENT = 0xc02069b3 + SIOCGIFPRIORITY = 0xc020699c + SIOCGIFRDOMAIN = 0xc02069a0 + SIOCGIFRTLABEL = 0xc0206983 + SIOCGIFRXR = 0x802069aa + SIOCGIFSFFPAGE = 0xc1126939 + SIOCGIFXFLAGS = 0xc020699e + SIOCGLIFPHYADDR = 0xc218694b + SIOCGLIFPHYDF = 0xc02069c2 + SIOCGLIFPHYECN = 0xc02069c8 + SIOCGLIFPHYRTABLE = 0xc02069a2 + SIOCGLIFPHYTTL = 0xc02069a9 + SIOCGPGRP = 0x40047309 + SIOCGPWE3 = 0xc0206998 + SIOCGPWE3CTRLWORD = 0xc02069dc + SIOCGPWE3FAT = 0xc02069dd + SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGRXHPRIO = 0xc02069db + SIOCGSPPPPARAMS = 0xc0206994 + SIOCGTXHPRIO = 0xc02069c6 + SIOCGUMBINFO = 0xc02069be + SIOCGUMBPARAM = 0xc02069c0 + SIOCGVH = 0xc02069f6 + SIOCGVNETFLOWID = 0xc02069c4 + SIOCGVNETID = 0xc02069a7 + SIOCIFAFATTACH = 0x801169ab + SIOCIFAFDETACH = 0x801169ac + SIOCIFCREATE = 0x8020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106978 + SIOCSETKALIVE = 0x801869a3 + SIOCSETLABEL = 0x80206999 + SIOCSETMPWCFG = 0x802069ad + SIOCSETPFLOW = 0x802069fd + SIOCSETPFSYNC = 0x802069f7 + SIOCSETVLAN = 0x8020698f + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFDESCR = 0x80206980 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGATTR = 0x8028698c + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020691f + SIOCSIFLLPRIO = 0x802069b5 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x8020697f + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPAIR = 0x802069b0 + SIOCSIFPARENT = 0x802069b2 + SIOCSIFPRIORITY = 0x8020699b + SIOCSIFRDOMAIN = 0x8020699f + SIOCSIFRTLABEL = 0x80206982 + SIOCSIFXFLAGS = 0x8020699d + SIOCSLIFPHYADDR = 0x8218694a + SIOCSLIFPHYDF = 0x802069c1 + SIOCSLIFPHYECN = 0x802069c7 + SIOCSLIFPHYRTABLE = 0x802069a1 + SIOCSLIFPHYTTL = 0x802069a8 + SIOCSPGRP = 0x80047308 + SIOCSPWE3CTRLWORD = 0x802069dc + SIOCSPWE3FAT = 0x802069dd + SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSRXHPRIO = 0x802069db + SIOCSSPPPPARAMS = 0x80206993 + SIOCSTXHPRIO = 0x802069c5 + SIOCSUMBPARAM = 0x802069bf + SIOCSVH = 0xc02069f5 + SIOCSVNETFLOWID = 0x802069c3 + SIOCSVNETID = 0x802069a6 + SOCK_CLOEXEC = 0x8000 + SOCK_DGRAM = 0x2 + SOCK_DNS = 0x1000 + SOCK_NONBLOCK = 0x4000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BINDANY = 0x1000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DOMAIN = 0x1024 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_NETPROC = 0x1020 + SO_OOBINLINE = 0x100 + SO_PEERCRED = 0x1022 + SO_PROTOCOL = 0x1025 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RTABLE = 0x1021 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_SPLICE = 0x1023 + SO_TIMESTAMP = 0x800 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_ZEROIZE = 0x2000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x3 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x4 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOPUSH = 0x10 + TCP_SACKHOLE_LIMIT = 0x80 + TCP_SACK_ENABLE = 0x8 + TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCHKVERAUTH = 0x2000741e + TIOCCLRVERAUTH = 0x2000741d + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLAG_CLOCAL = 0x2 + TIOCFLAG_CRTSCTS = 0x4 + TIOCFLAG_MDMBUF = 0x8 + TIOCFLAG_PPS = 0x10 + TIOCFLAG_SOFTCAR = 0x1 + TIOCFLUSH = 0x80047410 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGFLAGS = 0x4004745d + TIOCGPGRP = 0x40047477 + TIOCGSID = 0x40047463 + TIOCGTSTAMP = 0x4010745b + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGET = 0x4004746a + TIOCMODG = 0x4004746a + TIOCMODS = 0x8004746d + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSETVERAUTH = 0x8004741c + TIOCSFLAGS = 0x8004745c + TIOCSIG = 0x8004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTOP = 0x2000746f + TIOCSTSTAMP = 0x8008745a + TIOCSWINSZ = 0x80087467 + TIOCUCNTL = 0x80047466 + TIOCUCNTL_CBRK = 0x7a + TIOCUCNTL_SBRK = 0x7b + TOSTOP = 0x400000 + UTIME_NOW = -0x2 + UTIME_OMIT = -0x1 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VM_ANONMIN = 0x7 + VM_LOADAVG = 0x2 + VM_MALLOC_CONF = 0xc + VM_MAXID = 0xd + VM_MAXSLP = 0xa + VM_METER = 0x1 + VM_NKMEMPAGES = 0x6 + VM_PSSTRINGS = 0x3 + VM_SWAPENCRYPT = 0x5 + VM_USPACE = 0xb + VM_UVMEXP = 0x4 + VM_VNODEMIN = 0x9 + VM_VTEXTMIN = 0x8 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WALTSIG = 0x4 + WCONTINUED = 0x8 + WCOREFLAG = 0x80 + WNOHANG = 0x1 + WUNTRACED = 0x2 + XCASE = 0x1000000 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x5c) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x58) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x59) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EIPSEC = syscall.Errno(0x52) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x5f) + ELOOP = syscall.Errno(0x3e) + EMEDIUMTYPE = syscall.Errno(0x56) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x53) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOMEDIUM = syscall.Errno(0x55) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x5a) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5d) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x5b) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x57) + EOWNERDEAD = syscall.Errno(0x5e) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5f) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disk quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC program not available"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIPSEC", "IPsec processing failure"}, + {83, "ENOATTR", "attribute not found"}, + {84, "EILSEQ", "illegal byte sequence"}, + {85, "ENOMEDIUM", "no medium found"}, + {86, "EMEDIUMTYPE", "wrong medium type"}, + {87, "EOVERFLOW", "value too large to be stored in data type"}, + {88, "ECANCELED", "operation canceled"}, + {89, "EIDRM", "identifier removed"}, + {90, "ENOMSG", "no message of desired type"}, + {91, "ENOTSUP", "not supported"}, + {92, "EBADMSG", "bad message"}, + {93, "ENOTRECOVERABLE", "state not recoverable"}, + {94, "EOWNERDEAD", "previous owner died"}, + {95, "ELAST", "protocol error"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGABRT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "thread AST"}, +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go new file mode 100644 index 000000000..13d403031 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go @@ -0,0 +1,1904 @@ +// mkerrors.sh -m64 +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build riscv64 && openbsd +// +build riscv64,openbsd + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- -m64 _const.go + +package unix + +import "syscall" + +const ( + AF_APPLETALK = 0x10 + AF_BLUETOOTH = 0x20 + AF_CCITT = 0xa + AF_CHAOS = 0x5 + AF_CNT = 0x15 + AF_COIP = 0x14 + AF_DATAKIT = 0x9 + AF_DECnet = 0xc + AF_DLI = 0xd + AF_E164 = 0x1a + AF_ECMA = 0x8 + AF_ENCAP = 0x1c + AF_HYLINK = 0xf + AF_IMPLINK = 0x3 + AF_INET = 0x2 + AF_INET6 = 0x18 + AF_IPX = 0x17 + AF_ISDN = 0x1a + AF_ISO = 0x7 + AF_KEY = 0x1e + AF_LAT = 0xe + AF_LINK = 0x12 + AF_LOCAL = 0x1 + AF_MAX = 0x24 + AF_MPLS = 0x21 + AF_NATM = 0x1b + AF_NS = 0x6 + AF_OSI = 0x7 + AF_PUP = 0x4 + AF_ROUTE = 0x11 + AF_SIP = 0x1d + AF_SNA = 0xb + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + ALTWERASE = 0x200 + ARPHRD_ETHER = 0x1 + ARPHRD_FRELAY = 0xf + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + B0 = 0x0 + B110 = 0x6e + B115200 = 0x1c200 + B1200 = 0x4b0 + B134 = 0x86 + B14400 = 0x3840 + B150 = 0x96 + B1800 = 0x708 + B19200 = 0x4b00 + B200 = 0xc8 + B230400 = 0x38400 + B2400 = 0x960 + B28800 = 0x7080 + B300 = 0x12c + B38400 = 0x9600 + B4800 = 0x12c0 + B50 = 0x32 + B57600 = 0xe100 + B600 = 0x258 + B7200 = 0x1c20 + B75 = 0x4b + B76800 = 0x12c00 + B9600 = 0x2580 + BIOCFLUSH = 0x20004268 + BIOCGBLEN = 0x40044266 + BIOCGDIRFILT = 0x4004427c + BIOCGDLT = 0x4004426a + BIOCGDLTLIST = 0xc010427b + BIOCGETIF = 0x4020426b + BIOCGFILDROP = 0x40044278 + BIOCGHDRCMPLT = 0x40044274 + BIOCGRSIG = 0x40044273 + BIOCGRTIMEOUT = 0x4010426e + BIOCGSTATS = 0x4008426f + BIOCIMMEDIATE = 0x80044270 + BIOCLOCK = 0x20004276 + BIOCPROMISC = 0x20004269 + BIOCSBLEN = 0xc0044266 + BIOCSDIRFILT = 0x8004427d + BIOCSDLT = 0x8004427a + BIOCSETF = 0x80104267 + BIOCSETIF = 0x8020426c + BIOCSETWF = 0x80104277 + BIOCSFILDROP = 0x80044279 + BIOCSHDRCMPLT = 0x80044275 + BIOCSRSIG = 0x80044272 + BIOCSRTIMEOUT = 0x8010426d + BIOCVERSION = 0x40044271 + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ALIGNMENT = 0x4 + BPF_ALU = 0x4 + BPF_AND = 0x50 + BPF_B = 0x10 + BPF_DIRECTION_IN = 0x1 + BPF_DIRECTION_OUT = 0x2 + BPF_DIV = 0x30 + BPF_FILDROP_CAPTURE = 0x1 + BPF_FILDROP_DROP = 0x2 + BPF_FILDROP_PASS = 0x0 + BPF_F_DIR_IN = 0x10 + BPF_F_DIR_MASK = 0x30 + BPF_F_DIR_OUT = 0x20 + BPF_F_DIR_SHIFT = 0x4 + BPF_F_FLOWID = 0x8 + BPF_F_PRI_MASK = 0x7 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JMP = 0x5 + BPF_JSET = 0x40 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXBUFSIZE = 0x200000 + BPF_MAXINSNS = 0x200 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINBUFSIZE = 0x20 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_OR = 0x40 + BPF_RELEASE = 0x30bb6 + BPF_RET = 0x6 + BPF_RND = 0xc0 + BPF_RSH = 0x70 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAX = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BRKINT = 0x2 + CFLUSH = 0xf + CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x6 + CLOCK_MONOTONIC = 0x3 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x4 + CLOCK_UPTIME = 0x5 + CPUSTATES = 0x6 + CP_IDLE = 0x5 + CP_INTR = 0x4 + CP_NICE = 0x1 + CP_SPIN = 0x3 + CP_SYS = 0x2 + CP_USER = 0x0 + CREAD = 0x800 + CRTSCTS = 0x10000 + CS5 = 0x0 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTART = 0x11 + CSTATUS = 0xff + CSTOP = 0x13 + CSTOPB = 0x400 + CSUSP = 0x1a + CTL_HW = 0x6 + CTL_KERN = 0x1 + CTL_MAXNAME = 0xc + CTL_NET = 0x4 + DIOCADDQUEUE = 0xc110445d + DIOCADDRULE = 0xcd604404 + DIOCADDSTATE = 0xc1084425 + DIOCCHANGERULE = 0xcd60441a + DIOCCLRIFFLAG = 0xc028445a + DIOCCLRSRCNODES = 0x20004455 + DIOCCLRSTATES = 0xc0e04412 + DIOCCLRSTATUS = 0xc0284416 + DIOCGETLIMIT = 0xc0084427 + DIOCGETQSTATS = 0xc1204460 + DIOCGETQUEUE = 0xc110445f + DIOCGETQUEUES = 0xc110445e + DIOCGETRULE = 0xcd604407 + DIOCGETRULES = 0xcd604406 + DIOCGETRULESET = 0xc444443b + DIOCGETRULESETS = 0xc444443a + DIOCGETSRCNODES = 0xc0104454 + DIOCGETSTATE = 0xc1084413 + DIOCGETSTATES = 0xc0104419 + DIOCGETSTATUS = 0xc1e84415 + DIOCGETSYNFLWATS = 0xc0084463 + DIOCGETTIMEOUT = 0xc008441e + DIOCIGETIFACES = 0xc0284457 + DIOCKILLSRCNODES = 0xc080445b + DIOCKILLSTATES = 0xc0e04429 + DIOCNATLOOK = 0xc0504417 + DIOCOSFPADD = 0xc088444f + DIOCOSFPFLUSH = 0x2000444e + DIOCOSFPGET = 0xc0884450 + DIOCRADDADDRS = 0xc4504443 + DIOCRADDTABLES = 0xc450443d + DIOCRCLRADDRS = 0xc4504442 + DIOCRCLRASTATS = 0xc4504448 + DIOCRCLRTABLES = 0xc450443c + DIOCRCLRTSTATS = 0xc4504441 + DIOCRDELADDRS = 0xc4504444 + DIOCRDELTABLES = 0xc450443e + DIOCRGETADDRS = 0xc4504446 + DIOCRGETASTATS = 0xc4504447 + DIOCRGETTABLES = 0xc450443f + DIOCRGETTSTATS = 0xc4504440 + DIOCRINADEFINE = 0xc450444d + DIOCRSETADDRS = 0xc4504445 + DIOCRSETTFLAGS = 0xc450444a + DIOCRTSTADDRS = 0xc4504449 + DIOCSETDEBUG = 0xc0044418 + DIOCSETHOSTID = 0xc0044456 + DIOCSETIFFLAG = 0xc0284459 + DIOCSETLIMIT = 0xc0084428 + DIOCSETREASS = 0xc004445c + DIOCSETSTATUSIF = 0xc0284414 + DIOCSETSYNCOOKIES = 0xc0014462 + DIOCSETSYNFLWATS = 0xc0084461 + DIOCSETTIMEOUT = 0xc008441d + DIOCSTART = 0x20004401 + DIOCSTOP = 0x20004402 + DIOCXBEGIN = 0xc0104451 + DIOCXCOMMIT = 0xc0104452 + DIOCXROLLBACK = 0xc0104453 + DLT_ARCNET = 0x7 + DLT_ATM_RFC1483 = 0xb + DLT_AX25 = 0x3 + DLT_CHAOS = 0x5 + DLT_C_HDLC = 0x68 + DLT_EN10MB = 0x1 + DLT_EN3MB = 0x2 + DLT_ENC = 0xd + DLT_FDDI = 0xa + DLT_IEEE802 = 0x6 + DLT_IEEE802_11 = 0x69 + DLT_IEEE802_11_RADIO = 0x7f + DLT_LOOP = 0xc + DLT_MPLS = 0xdb + DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b + DLT_PFLOG = 0x75 + DLT_PFSYNC = 0x12 + DLT_PPP = 0x9 + DLT_PPP_BSDOS = 0x10 + DLT_PPP_ETHER = 0x33 + DLT_PPP_SERIAL = 0x32 + DLT_PRONET = 0x4 + DLT_RAW = 0xe + DLT_SLIP = 0x8 + DLT_SLIP_BSDOS = 0xf + DLT_USBPCAP = 0xf9 + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + ECHO = 0x8 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EMT_TAGOVF = 0x1 + EMUL_ENABLED = 0x1 + EMUL_NATIVE = 0x2 + ENDRUNDISC = 0x9 + ETH64_8021_RSVD_MASK = 0xfffffffffff0 + ETH64_8021_RSVD_PREFIX = 0x180c2000000 + ETHERMIN = 0x2e + ETHERMTU = 0x5dc + ETHERTYPE_8023 = 0x4 + ETHERTYPE_AARP = 0x80f3 + ETHERTYPE_ACCTON = 0x8390 + ETHERTYPE_AEONIC = 0x8036 + ETHERTYPE_ALPHA = 0x814a + ETHERTYPE_AMBER = 0x6008 + ETHERTYPE_AMOEBA = 0x8145 + ETHERTYPE_AOE = 0x88a2 + ETHERTYPE_APOLLO = 0x80f7 + ETHERTYPE_APOLLODOMAIN = 0x8019 + ETHERTYPE_APPLETALK = 0x809b + ETHERTYPE_APPLITEK = 0x80c7 + ETHERTYPE_ARGONAUT = 0x803a + ETHERTYPE_ARP = 0x806 + ETHERTYPE_AT = 0x809b + ETHERTYPE_ATALK = 0x809b + ETHERTYPE_ATOMIC = 0x86df + ETHERTYPE_ATT = 0x8069 + ETHERTYPE_ATTSTANFORD = 0x8008 + ETHERTYPE_AUTOPHON = 0x806a + ETHERTYPE_AXIS = 0x8856 + ETHERTYPE_BCLOOP = 0x9003 + ETHERTYPE_BOFL = 0x8102 + ETHERTYPE_CABLETRON = 0x7034 + ETHERTYPE_CHAOS = 0x804 + ETHERTYPE_COMDESIGN = 0x806c + ETHERTYPE_COMPUGRAPHIC = 0x806d + ETHERTYPE_COUNTERPOINT = 0x8062 + ETHERTYPE_CRONUS = 0x8004 + ETHERTYPE_CRONUSVLN = 0x8003 + ETHERTYPE_DCA = 0x1234 + ETHERTYPE_DDE = 0x807b + ETHERTYPE_DEBNI = 0xaaaa + ETHERTYPE_DECAM = 0x8048 + ETHERTYPE_DECCUST = 0x6006 + ETHERTYPE_DECDIAG = 0x6005 + ETHERTYPE_DECDNS = 0x803c + ETHERTYPE_DECDTS = 0x803e + ETHERTYPE_DECEXPER = 0x6000 + ETHERTYPE_DECLAST = 0x8041 + ETHERTYPE_DECLTM = 0x803f + ETHERTYPE_DECMUMPS = 0x6009 + ETHERTYPE_DECNETBIOS = 0x8040 + ETHERTYPE_DELTACON = 0x86de + ETHERTYPE_DIDDLE = 0x4321 + ETHERTYPE_DLOG1 = 0x660 + ETHERTYPE_DLOG2 = 0x661 + ETHERTYPE_DN = 0x6003 + ETHERTYPE_DOGFIGHT = 0x1989 + ETHERTYPE_DSMD = 0x8039 + ETHERTYPE_EAPOL = 0x888e + ETHERTYPE_ECMA = 0x803 + ETHERTYPE_ENCRYPT = 0x803d + ETHERTYPE_ES = 0x805d + ETHERTYPE_EXCELAN = 0x8010 + ETHERTYPE_EXPERDATA = 0x8049 + ETHERTYPE_FLIP = 0x8146 + ETHERTYPE_FLOWCONTROL = 0x8808 + ETHERTYPE_FRARP = 0x808 + ETHERTYPE_GENDYN = 0x8068 + ETHERTYPE_HAYES = 0x8130 + ETHERTYPE_HIPPI_FP = 0x8180 + ETHERTYPE_HITACHI = 0x8820 + ETHERTYPE_HP = 0x8005 + ETHERTYPE_IEEEPUP = 0xa00 + ETHERTYPE_IEEEPUPAT = 0xa01 + ETHERTYPE_IMLBL = 0x4c42 + ETHERTYPE_IMLBLDIAG = 0x424c + ETHERTYPE_IP = 0x800 + ETHERTYPE_IPAS = 0x876c + ETHERTYPE_IPV6 = 0x86dd + ETHERTYPE_IPX = 0x8137 + ETHERTYPE_IPXNEW = 0x8037 + ETHERTYPE_KALPANA = 0x8582 + ETHERTYPE_LANBRIDGE = 0x8038 + ETHERTYPE_LANPROBE = 0x8888 + ETHERTYPE_LAT = 0x6004 + ETHERTYPE_LBACK = 0x9000 + ETHERTYPE_LITTLE = 0x8060 + ETHERTYPE_LLDP = 0x88cc + ETHERTYPE_LOGICRAFT = 0x8148 + ETHERTYPE_LOOPBACK = 0x9000 + ETHERTYPE_MACSEC = 0x88e5 + ETHERTYPE_MATRA = 0x807a + ETHERTYPE_MAX = 0xffff + ETHERTYPE_MERIT = 0x807c + ETHERTYPE_MICP = 0x873a + ETHERTYPE_MOPDL = 0x6001 + ETHERTYPE_MOPRC = 0x6002 + ETHERTYPE_MOTOROLA = 0x818d + ETHERTYPE_MPLS = 0x8847 + ETHERTYPE_MPLS_MCAST = 0x8848 + ETHERTYPE_MUMPS = 0x813f + ETHERTYPE_NBPCC = 0x3c04 + ETHERTYPE_NBPCLAIM = 0x3c09 + ETHERTYPE_NBPCLREQ = 0x3c05 + ETHERTYPE_NBPCLRSP = 0x3c06 + ETHERTYPE_NBPCREQ = 0x3c02 + ETHERTYPE_NBPCRSP = 0x3c03 + ETHERTYPE_NBPDG = 0x3c07 + ETHERTYPE_NBPDGB = 0x3c08 + ETHERTYPE_NBPDLTE = 0x3c0a + ETHERTYPE_NBPRAR = 0x3c0c + ETHERTYPE_NBPRAS = 0x3c0b + ETHERTYPE_NBPRST = 0x3c0d + ETHERTYPE_NBPSCD = 0x3c01 + ETHERTYPE_NBPVCD = 0x3c00 + ETHERTYPE_NBS = 0x802 + ETHERTYPE_NCD = 0x8149 + ETHERTYPE_NESTAR = 0x8006 + ETHERTYPE_NETBEUI = 0x8191 + ETHERTYPE_NHRP = 0x2001 + ETHERTYPE_NOVELL = 0x8138 + ETHERTYPE_NS = 0x600 + ETHERTYPE_NSAT = 0x601 + ETHERTYPE_NSCOMPAT = 0x807 + ETHERTYPE_NSH = 0x984f + ETHERTYPE_NTRAILER = 0x10 + ETHERTYPE_OS9 = 0x7007 + ETHERTYPE_OS9NET = 0x7009 + ETHERTYPE_PACER = 0x80c6 + ETHERTYPE_PBB = 0x88e7 + ETHERTYPE_PCS = 0x4242 + ETHERTYPE_PLANNING = 0x8044 + ETHERTYPE_PPP = 0x880b + ETHERTYPE_PPPOE = 0x8864 + ETHERTYPE_PPPOEDISC = 0x8863 + ETHERTYPE_PRIMENTS = 0x7031 + ETHERTYPE_PUP = 0x200 + ETHERTYPE_PUPAT = 0x200 + ETHERTYPE_QINQ = 0x88a8 + ETHERTYPE_RACAL = 0x7030 + ETHERTYPE_RATIONAL = 0x8150 + ETHERTYPE_RAWFR = 0x6559 + ETHERTYPE_RCL = 0x1995 + ETHERTYPE_RDP = 0x8739 + ETHERTYPE_RETIX = 0x80f2 + ETHERTYPE_REVARP = 0x8035 + ETHERTYPE_SCA = 0x6007 + ETHERTYPE_SECTRA = 0x86db + ETHERTYPE_SECUREDATA = 0x876d + ETHERTYPE_SGITW = 0x817e + ETHERTYPE_SG_BOUNCE = 0x8016 + ETHERTYPE_SG_DIAG = 0x8013 + ETHERTYPE_SG_NETGAMES = 0x8014 + ETHERTYPE_SG_RESV = 0x8015 + ETHERTYPE_SIMNET = 0x5208 + ETHERTYPE_SLOW = 0x8809 + ETHERTYPE_SNA = 0x80d5 + ETHERTYPE_SNMP = 0x814c + ETHERTYPE_SONIX = 0xfaf5 + ETHERTYPE_SPIDER = 0x809f + ETHERTYPE_SPRITE = 0x500 + ETHERTYPE_STP = 0x8181 + ETHERTYPE_TALARIS = 0x812b + ETHERTYPE_TALARISMC = 0x852b + ETHERTYPE_TCPCOMP = 0x876b + ETHERTYPE_TCPSM = 0x9002 + ETHERTYPE_TEC = 0x814f + ETHERTYPE_TIGAN = 0x802f + ETHERTYPE_TRAIL = 0x1000 + ETHERTYPE_TRANSETHER = 0x6558 + ETHERTYPE_TYMSHARE = 0x802e + ETHERTYPE_UBBST = 0x7005 + ETHERTYPE_UBDEBUG = 0x900 + ETHERTYPE_UBDIAGLOOP = 0x7002 + ETHERTYPE_UBDL = 0x7000 + ETHERTYPE_UBNIU = 0x7001 + ETHERTYPE_UBNMC = 0x7003 + ETHERTYPE_VALID = 0x1600 + ETHERTYPE_VARIAN = 0x80dd + ETHERTYPE_VAXELN = 0x803b + ETHERTYPE_VEECO = 0x8067 + ETHERTYPE_VEXP = 0x805b + ETHERTYPE_VGLAB = 0x8131 + ETHERTYPE_VINES = 0xbad + ETHERTYPE_VINESECHO = 0xbaf + ETHERTYPE_VINESLOOP = 0xbae + ETHERTYPE_VITAL = 0xff00 + ETHERTYPE_VLAN = 0x8100 + ETHERTYPE_VLTLMAN = 0x8080 + ETHERTYPE_VPROD = 0x805c + ETHERTYPE_VURESERVED = 0x8147 + ETHERTYPE_WATERLOO = 0x8130 + ETHERTYPE_WELLFLEET = 0x8103 + ETHERTYPE_X25 = 0x805 + ETHERTYPE_X75 = 0x801 + ETHERTYPE_XNSSM = 0x9001 + ETHERTYPE_XTP = 0x817d + ETHER_ADDR_LEN = 0x6 + ETHER_ALIGN = 0x2 + ETHER_CRC_LEN = 0x4 + ETHER_CRC_POLY_BE = 0x4c11db6 + ETHER_CRC_POLY_LE = 0xedb88320 + ETHER_HDR_LEN = 0xe + ETHER_MAX_DIX_LEN = 0x600 + ETHER_MAX_HARDMTU_LEN = 0xff9b + ETHER_MAX_LEN = 0x5ee + ETHER_MIN_LEN = 0x40 + ETHER_TYPE_LEN = 0x2 + ETHER_VLAN_ENCAP_LEN = 0x4 + EVFILT_AIO = -0x3 + EVFILT_DEVICE = -0x8 + EVFILT_EXCEPT = -0x9 + EVFILT_PROC = -0x5 + EVFILT_READ = -0x1 + EVFILT_SIGNAL = -0x6 + EVFILT_SYSCOUNT = 0x9 + EVFILT_TIMER = -0x7 + EVFILT_VNODE = -0x4 + EVFILT_WRITE = -0x2 + EVL_ENCAPLEN = 0x4 + EVL_PRIO_BITS = 0xd + EVL_PRIO_MAX = 0x7 + EVL_VLID_MASK = 0xfff + EVL_VLID_MAX = 0xffe + EVL_VLID_MIN = 0x1 + EVL_VLID_NULL = 0x0 + EV_ADD = 0x1 + EV_CLEAR = 0x20 + EV_DELETE = 0x2 + EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 + EV_ENABLE = 0x4 + EV_EOF = 0x8000 + EV_ERROR = 0x4000 + EV_FLAG1 = 0x2000 + EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 + EV_SYSFLAGS = 0xf800 + EXTA = 0x4b00 + EXTB = 0x9600 + EXTPROC = 0x800 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FLUSHO = 0x800000 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0xa + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLK = 0x7 + F_GETOWN = 0x5 + F_ISATTY = 0xb + F_OK = 0x0 + F_RDLCK = 0x1 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLK = 0x8 + F_SETLKW = 0x9 + F_SETOWN = 0x6 + F_UNLCK = 0x2 + F_WRLCK = 0x3 + HUPCL = 0x4000 + HW_MACHINE = 0x1 + ICANON = 0x100 + ICMP6_FILTER = 0x12 + ICRNL = 0x100 + IEXTEN = 0x400 + IFAN_ARRIVAL = 0x0 + IFAN_DEPARTURE = 0x1 + IFF_ALLMULTI = 0x200 + IFF_BROADCAST = 0x2 + IFF_CANTCHANGE = 0x8e52 + IFF_DEBUG = 0x4 + IFF_LINK0 = 0x1000 + IFF_LINK1 = 0x2000 + IFF_LINK2 = 0x4000 + IFF_LOOPBACK = 0x8 + IFF_MULTICAST = 0x8000 + IFF_NOARP = 0x80 + IFF_OACTIVE = 0x400 + IFF_POINTOPOINT = 0x10 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x20 + IFF_UP = 0x1 + IFNAMSIZ = 0x10 + IFT_1822 = 0x2 + IFT_A12MPPSWITCH = 0x82 + IFT_AAL2 = 0xbb + IFT_AAL5 = 0x31 + IFT_ADSL = 0x5e + IFT_AFLANE8023 = 0x3b + IFT_AFLANE8025 = 0x3c + IFT_ARAP = 0x58 + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ASYNC = 0x54 + IFT_ATM = 0x25 + IFT_ATMDXI = 0x69 + IFT_ATMFUNI = 0x6a + IFT_ATMIMA = 0x6b + IFT_ATMLOGICAL = 0x50 + IFT_ATMRADIO = 0xbd + IFT_ATMSUBINTERFACE = 0x86 + IFT_ATMVCIENDPT = 0xc2 + IFT_ATMVIRTUAL = 0x95 + IFT_BGPPOLICYACCOUNTING = 0xa2 + IFT_BLUETOOTH = 0xf8 + IFT_BRIDGE = 0xd1 + IFT_BSC = 0x53 + IFT_CARP = 0xf7 + IFT_CCTEMUL = 0x3d + IFT_CEPT = 0x13 + IFT_CES = 0x85 + IFT_CHANNEL = 0x46 + IFT_CNR = 0x55 + IFT_COFFEE = 0x84 + IFT_COMPOSITELINK = 0x9b + IFT_DCN = 0x8d + IFT_DIGITALPOWERLINE = 0x8a + IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba + IFT_DLSW = 0x4a + IFT_DOCSCABLEDOWNSTREAM = 0x80 + IFT_DOCSCABLEMACLAYER = 0x7f + IFT_DOCSCABLEUPSTREAM = 0x81 + IFT_DOCSCABLEUPSTREAMCHANNEL = 0xcd + IFT_DS0 = 0x51 + IFT_DS0BUNDLE = 0x52 + IFT_DS1FDL = 0xaa + IFT_DS3 = 0x1e + IFT_DTM = 0x8c + IFT_DUMMY = 0xf1 + IFT_DVBASILN = 0xac + IFT_DVBASIOUT = 0xad + IFT_DVBRCCDOWNSTREAM = 0x93 + IFT_DVBRCCMACLAYER = 0x92 + IFT_DVBRCCUPSTREAM = 0x94 + IFT_ECONET = 0xce + IFT_ENC = 0xf4 + IFT_EON = 0x19 + IFT_EPLRS = 0x57 + IFT_ESCON = 0x49 + IFT_ETHER = 0x6 + IFT_FAITH = 0xf3 + IFT_FAST = 0x7d + IFT_FASTETHER = 0x3e + IFT_FASTETHERFX = 0x45 + IFT_FDDI = 0xf + IFT_FIBRECHANNEL = 0x38 + IFT_FRAMERELAYINTERCONNECT = 0x3a + IFT_FRAMERELAYMPI = 0x5c + IFT_FRDLCIENDPT = 0xc1 + IFT_FRELAY = 0x20 + IFT_FRELAYDCE = 0x2c + IFT_FRF16MFRBUNDLE = 0xa3 + IFT_FRFORWARD = 0x9e + IFT_G703AT2MB = 0x43 + IFT_G703AT64K = 0x42 + IFT_GIF = 0xf0 + IFT_GIGABITETHERNET = 0x75 + IFT_GR303IDT = 0xb2 + IFT_GR303RDT = 0xb1 + IFT_H323GATEKEEPER = 0xa4 + IFT_H323PROXY = 0xa5 + IFT_HDH1822 = 0x3 + IFT_HDLC = 0x76 + IFT_HDSL2 = 0xa8 + IFT_HIPERLAN2 = 0xb7 + IFT_HIPPI = 0x2f + IFT_HIPPIINTERFACE = 0x39 + IFT_HOSTPAD = 0x5a + IFT_HSSI = 0x2e + IFT_HY = 0xe + IFT_IBM370PARCHAN = 0x48 + IFT_IDSL = 0x9a + IFT_IEEE1394 = 0x90 + IFT_IEEE80211 = 0x47 + IFT_IEEE80212 = 0x37 + IFT_IEEE8023ADLAG = 0xa1 + IFT_IFGSN = 0x91 + IFT_IMT = 0xbe + IFT_INFINIBAND = 0xc7 + IFT_INTERLEAVE = 0x7c + IFT_IP = 0x7e + IFT_IPFORWARD = 0x8e + IFT_IPOVERATM = 0x72 + IFT_IPOVERCDLC = 0x6d + IFT_IPOVERCLAW = 0x6e + IFT_IPSWITCH = 0x4e + IFT_ISDN = 0x3f + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_ISDNS = 0x4b + IFT_ISDNU = 0x4c + IFT_ISO88022LLC = 0x29 + IFT_ISO88023 = 0x7 + IFT_ISO88024 = 0x8 + IFT_ISO88025 = 0x9 + IFT_ISO88025CRFPINT = 0x62 + IFT_ISO88025DTR = 0x56 + IFT_ISO88025FIBER = 0x73 + IFT_ISO88026 = 0xa + IFT_ISUP = 0xb3 + IFT_L2VLAN = 0x87 + IFT_L3IPVLAN = 0x88 + IFT_L3IPXVLAN = 0x89 + IFT_LAPB = 0x10 + IFT_LAPD = 0x4d + IFT_LAPF = 0x77 + IFT_LINEGROUP = 0xd2 + IFT_LOCALTALK = 0x2a + IFT_LOOP = 0x18 + IFT_MBIM = 0xfa + IFT_MEDIAMAILOVERIP = 0x8b + IFT_MFSIGLINK = 0xa7 + IFT_MIOX25 = 0x26 + IFT_MODEM = 0x30 + IFT_MPC = 0x71 + IFT_MPLS = 0xa6 + IFT_MPLSTUNNEL = 0x96 + IFT_MSDSL = 0x8f + IFT_MVL = 0xbf + IFT_MYRINET = 0x63 + IFT_NFAS = 0xaf + IFT_NSIP = 0x1b + IFT_OPTICALCHANNEL = 0xc3 + IFT_OPTICALTRANSPORT = 0xc4 + IFT_OTHER = 0x1 + IFT_P10 = 0xc + IFT_P80 = 0xd + IFT_PARA = 0x22 + IFT_PFLOG = 0xf5 + IFT_PFLOW = 0xf9 + IFT_PFSYNC = 0xf6 + IFT_PLC = 0xae + IFT_PON155 = 0xcf + IFT_PON622 = 0xd0 + IFT_POS = 0xab + IFT_PPP = 0x17 + IFT_PPPMULTILINKBUNDLE = 0x6c + IFT_PROPATM = 0xc5 + IFT_PROPBWAP2MP = 0xb8 + IFT_PROPCNLS = 0x59 + IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5 + IFT_PROPDOCSWIRELESSMACLAYER = 0xb4 + IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6 + IFT_PROPMUX = 0x36 + IFT_PROPVIRTUAL = 0x35 + IFT_PROPWIRELESSP2P = 0x9d + IFT_PTPSERIAL = 0x16 + IFT_PVC = 0xf2 + IFT_Q2931 = 0xc9 + IFT_QLLC = 0x44 + IFT_RADIOMAC = 0xbc + IFT_RADSL = 0x5f + IFT_REACHDSL = 0xc0 + IFT_RFC1483 = 0x9f + IFT_RS232 = 0x21 + IFT_RSRB = 0x4f + IFT_SDLC = 0x11 + IFT_SDSL = 0x60 + IFT_SHDSL = 0xa9 + IFT_SIP = 0x1f + IFT_SIPSIG = 0xcc + IFT_SIPTG = 0xcb + IFT_SLIP = 0x1c + IFT_SMDSDXI = 0x2b + IFT_SMDSICIP = 0x34 + IFT_SONET = 0x27 + IFT_SONETOVERHEADCHANNEL = 0xb9 + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SRP = 0x97 + IFT_SS7SIGLINK = 0x9c + IFT_STACKTOSTACK = 0x6f + IFT_STARLAN = 0xb + IFT_T1 = 0x12 + IFT_TDLC = 0x74 + IFT_TELINK = 0xc8 + IFT_TERMPAD = 0x5b + IFT_TR008 = 0xb0 + IFT_TRANSPHDLC = 0x7b + IFT_TUNNEL = 0x83 + IFT_ULTRA = 0x1d + IFT_USB = 0xa0 + IFT_V11 = 0x40 + IFT_V35 = 0x2d + IFT_V36 = 0x41 + IFT_V37 = 0x78 + IFT_VDSL = 0x61 + IFT_VIRTUALIPADDRESS = 0x70 + IFT_VIRTUALTG = 0xca + IFT_VOICEDID = 0xd5 + IFT_VOICEEM = 0x64 + IFT_VOICEEMFGD = 0xd3 + IFT_VOICEENCAP = 0x67 + IFT_VOICEFGDEANA = 0xd4 + IFT_VOICEFXO = 0x65 + IFT_VOICEFXS = 0x66 + IFT_VOICEOVERATM = 0x98 + IFT_VOICEOVERCABLE = 0xc6 + IFT_VOICEOVERFRAMERELAY = 0x99 + IFT_VOICEOVERIP = 0x68 + IFT_WIREGUARD = 0xfb + IFT_X213 = 0x5d + IFT_X25 = 0x5 + IFT_X25DDN = 0x4 + IFT_X25HUNTGROUP = 0x7a + IFT_X25MLP = 0x79 + IFT_X25PLE = 0x28 + IFT_XETHER = 0x1a + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLASSD_HOST = 0xfffffff + IN_CLASSD_NET = 0xf0000000 + IN_CLASSD_NSHIFT = 0x1c + IN_LOOPBACKNET = 0x7f + IN_RFC3021_HOST = 0x1 + IN_RFC3021_NET = 0xfffffffe + IN_RFC3021_NSHIFT = 0x1f + IPPROTO_AH = 0x33 + IPPROTO_CARP = 0x70 + IPPROTO_DIVERT = 0x102 + IPPROTO_DONE = 0x101 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_EON = 0x50 + IPPROTO_ESP = 0x32 + IPPROTO_ETHERIP = 0x61 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GGP = 0x3 + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPCOMP = 0x6c + IPPROTO_IPIP = 0x4 + IPPROTO_IPV4 = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_MAX = 0x100 + IPPROTO_MAXID = 0x103 + IPPROTO_MOBILE = 0x37 + IPPROTO_MPLS = 0x89 + IPPROTO_NONE = 0x3b + IPPROTO_PFSYNC = 0xf0 + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_AUTH_LEVEL = 0x35 + IPV6_AUTOFLOWLABEL = 0x3b + IPV6_CHECKSUM = 0x1a + IPV6_DEFAULT_MULTICAST_HOPS = 0x1 + IPV6_DEFAULT_MULTICAST_LOOP = 0x1 + IPV6_DEFHLIM = 0x40 + IPV6_DONTFRAG = 0x3e + IPV6_DSTOPTS = 0x32 + IPV6_ESP_NETWORK_LEVEL = 0x37 + IPV6_ESP_TRANS_LEVEL = 0x36 + IPV6_FAITH = 0x1d + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 + IPV6_FRAGTTL = 0x78 + IPV6_HLIMDEC = 0x1 + IPV6_HOPLIMIT = 0x2f + IPV6_HOPOPTS = 0x31 + IPV6_IPCOMP_LEVEL = 0x3c + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_MAXHLIM = 0xff + IPV6_MAXPACKET = 0xffff + IPV6_MINHOPCOUNT = 0x41 + IPV6_MMTU = 0x500 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_LOOP = 0xb + IPV6_NEXTHOP = 0x30 + IPV6_OPTIONS = 0x1 + IPV6_PATHMTU = 0x2c + IPV6_PIPEX = 0x3f + IPV6_PKTINFO = 0x2e + IPV6_PORTRANGE = 0xe + IPV6_PORTRANGE_DEFAULT = 0x0 + IPV6_PORTRANGE_HIGH = 0x1 + IPV6_PORTRANGE_LOW = 0x2 + IPV6_RECVDSTOPTS = 0x28 + IPV6_RECVDSTPORT = 0x40 + IPV6_RECVHOPLIMIT = 0x25 + IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVPATHMTU = 0x2b + IPV6_RECVPKTINFO = 0x24 + IPV6_RECVRTHDR = 0x26 + IPV6_RECVTCLASS = 0x39 + IPV6_RTABLE = 0x1021 + IPV6_RTHDR = 0x33 + IPV6_RTHDRDSTOPTS = 0x23 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_SOCKOPT_RESERVED1 = 0x3 + IPV6_TCLASS = 0x3d + IPV6_UNICAST_HOPS = 0x4 + IPV6_USE_MIN_MTU = 0x2a + IPV6_V6ONLY = 0x1b + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 + IP_ADD_MEMBERSHIP = 0xc + IP_AUTH_LEVEL = 0x14 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0xd + IP_ESP_NETWORK_LEVEL = 0x16 + IP_ESP_TRANS_LEVEL = 0x15 + IP_HDRINCL = 0x2 + IP_IPCOMP_LEVEL = 0x1d + IP_IPDEFTTL = 0x25 + IP_IPSECFLOWINFO = 0x24 + IP_IPSEC_LOCAL_AUTH = 0x1b + IP_IPSEC_LOCAL_CRED = 0x19 + IP_IPSEC_LOCAL_ID = 0x17 + IP_IPSEC_REMOTE_AUTH = 0x1c + IP_IPSEC_REMOTE_CRED = 0x1a + IP_IPSEC_REMOTE_ID = 0x18 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0xfff + IP_MF = 0x2000 + IP_MINTTL = 0x20 + IP_MIN_MEMBERSHIPS = 0xf + IP_MSS = 0x240 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_LOOP = 0xb + IP_MULTICAST_TTL = 0xa + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x1 + IP_PIPEX = 0x22 + IP_PORTRANGE = 0x13 + IP_PORTRANGE_DEFAULT = 0x0 + IP_PORTRANGE_HIGH = 0x1 + IP_PORTRANGE_LOW = 0x2 + IP_RECVDSTADDR = 0x7 + IP_RECVDSTPORT = 0x21 + IP_RECVIF = 0x1e + IP_RECVOPTS = 0x5 + IP_RECVRETOPTS = 0x6 + IP_RECVRTABLE = 0x23 + IP_RECVTTL = 0x1f + IP_RETOPTS = 0x8 + IP_RF = 0x8000 + IP_RTABLE = 0x1021 + IP_SENDSRCADDR = 0x7 + IP_TOS = 0x3 + IP_TTL = 0x4 + ISIG = 0x80 + ISTRIP = 0x20 + ITIMER_PROF = 0x2 + ITIMER_REAL = 0x0 + ITIMER_VIRTUAL = 0x1 + IUCLC = 0x1000 + IXANY = 0x800 + IXOFF = 0x400 + IXON = 0x200 + KERN_HOSTNAME = 0xa + KERN_OSRELEASE = 0x2 + KERN_OSTYPE = 0x1 + KERN_VERSION = 0x4 + LCNT_OVERLOAD_FLUSH = 0x6 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + MADV_DONTNEED = 0x4 + MADV_FREE = 0x6 + MADV_NORMAL = 0x0 + MADV_RANDOM = 0x1 + MADV_SEQUENTIAL = 0x2 + MADV_SPACEAVAIL = 0x5 + MADV_WILLNEED = 0x3 + MAP_ANON = 0x1000 + MAP_ANONYMOUS = 0x1000 + MAP_CONCEAL = 0x8000 + MAP_COPY = 0x2 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_FLAGMASK = 0xfff7 + MAP_HASSEMAPHORE = 0x0 + MAP_INHERIT = 0x0 + MAP_INHERIT_COPY = 0x1 + MAP_INHERIT_NONE = 0x2 + MAP_INHERIT_SHARE = 0x0 + MAP_INHERIT_ZERO = 0x3 + MAP_NOEXTEND = 0x0 + MAP_NORESERVE = 0x0 + MAP_PRIVATE = 0x2 + MAP_RENAME = 0x0 + MAP_SHARED = 0x1 + MAP_STACK = 0x4000 + MAP_TRYFIXED = 0x0 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MNT_ASYNC = 0x40 + MNT_DEFEXPORTED = 0x200 + MNT_DELEXPORT = 0x20000 + MNT_DOOMED = 0x8000000 + MNT_EXPORTANON = 0x400 + MNT_EXPORTED = 0x100 + MNT_EXRDONLY = 0x80 + MNT_FORCE = 0x80000 + MNT_LAZY = 0x3 + MNT_LOCAL = 0x1000 + MNT_NOATIME = 0x8000 + MNT_NODEV = 0x10 + MNT_NOEXEC = 0x4 + MNT_NOPERM = 0x20 + MNT_NOSUID = 0x8 + MNT_NOWAIT = 0x2 + MNT_QUOTA = 0x2000 + MNT_RDONLY = 0x1 + MNT_RELOAD = 0x40000 + MNT_ROOTFS = 0x4000 + MNT_SOFTDEP = 0x4000000 + MNT_STALLED = 0x100000 + MNT_SWAPPABLE = 0x200000 + MNT_SYNCHRONOUS = 0x2 + MNT_UPDATE = 0x10000 + MNT_VISFLAGMASK = 0x400ffff + MNT_WAIT = 0x1 + MNT_WANTRDWR = 0x2000000 + MNT_WXALLOWED = 0x800 + MOUNT_AFS = "afs" + MOUNT_CD9660 = "cd9660" + MOUNT_EXT2FS = "ext2fs" + MOUNT_FFS = "ffs" + MOUNT_FUSEFS = "fuse" + MOUNT_MFS = "mfs" + MOUNT_MSDOS = "msdos" + MOUNT_NCPFS = "ncpfs" + MOUNT_NFS = "nfs" + MOUNT_NTFS = "ntfs" + MOUNT_TMPFS = "tmpfs" + MOUNT_UDF = "udf" + MOUNT_UFS = "ffs" + MSG_BCAST = 0x100 + MSG_CMSG_CLOEXEC = 0x800 + MSG_CTRUNC = 0x20 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x80 + MSG_EOR = 0x8 + MSG_MCAST = 0x200 + MSG_NOSIGNAL = 0x400 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_TRUNC = 0x10 + MSG_WAITALL = 0x40 + MS_ASYNC = 0x1 + MS_INVALIDATE = 0x4 + MS_SYNC = 0x2 + NAME_MAX = 0xff + NET_RT_DUMP = 0x1 + NET_RT_FLAGS = 0x2 + NET_RT_IFLIST = 0x3 + NET_RT_IFNAMES = 0x6 + NET_RT_MAXID = 0x8 + NET_RT_SOURCE = 0x7 + NET_RT_STATS = 0x4 + NET_RT_TABLE = 0x5 + NFDBITS = 0x20 + NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 + NOTE_ATTRIB = 0x8 + NOTE_CHANGE = 0x1 + NOTE_CHILD = 0x4 + NOTE_DELETE = 0x1 + NOTE_EOF = 0x2 + NOTE_EXEC = 0x20000000 + NOTE_EXIT = 0x80000000 + NOTE_EXTEND = 0x4 + NOTE_FORK = 0x40000000 + NOTE_LINK = 0x10 + NOTE_LOWAT = 0x1 + NOTE_OOB = 0x4 + NOTE_PCTRLMASK = 0xf0000000 + NOTE_PDATAMASK = 0xfffff + NOTE_RENAME = 0x20 + NOTE_REVOKE = 0x40 + NOTE_TRACK = 0x1 + NOTE_TRACKERR = 0x2 + NOTE_TRUNCATE = 0x80 + NOTE_WRITE = 0x2 + OCRNL = 0x10 + OLCUC = 0x20 + ONLCR = 0x2 + ONLRET = 0x80 + ONOCR = 0x40 + ONOEOT = 0x8 + OPOST = 0x1 + OXTABS = 0x4 + O_ACCMODE = 0x3 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x10000 + O_CREAT = 0x200 + O_DIRECTORY = 0x20000 + O_DSYNC = 0x80 + O_EXCL = 0x800 + O_EXLOCK = 0x20 + O_FSYNC = 0x80 + O_NDELAY = 0x4 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x100 + O_NONBLOCK = 0x4 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_RSYNC = 0x80 + O_SHLOCK = 0x10 + O_SYNC = 0x80 + O_TRUNC = 0x400 + O_WRONLY = 0x1 + PARENB = 0x1000 + PARMRK = 0x8 + PARODD = 0x2000 + PENDIN = 0x20000000 + PF_FLUSH = 0x1 + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROT_EXEC = 0x4 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 + RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0x7fffffffffffffff + RTAX_AUTHOR = 0x6 + RTAX_BFD = 0xb + RTAX_BRD = 0x7 + RTAX_DNS = 0xc + RTAX_DST = 0x0 + RTAX_GATEWAY = 0x1 + RTAX_GENMASK = 0x3 + RTAX_IFA = 0x5 + RTAX_IFP = 0x4 + RTAX_LABEL = 0xa + RTAX_MAX = 0xf + RTAX_NETMASK = 0x2 + RTAX_SEARCH = 0xe + RTAX_SRC = 0x8 + RTAX_SRCMASK = 0x9 + RTAX_STATIC = 0xd + RTA_AUTHOR = 0x40 + RTA_BFD = 0x800 + RTA_BRD = 0x80 + RTA_DNS = 0x1000 + RTA_DST = 0x1 + RTA_GATEWAY = 0x2 + RTA_GENMASK = 0x8 + RTA_IFA = 0x20 + RTA_IFP = 0x10 + RTA_LABEL = 0x400 + RTA_NETMASK = 0x4 + RTA_SEARCH = 0x4000 + RTA_SRC = 0x100 + RTA_SRCMASK = 0x200 + RTA_STATIC = 0x2000 + RTF_ANNOUNCE = 0x4000 + RTF_BFD = 0x1000000 + RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CACHED = 0x20000 + RTF_CLONED = 0x10000 + RTF_CLONING = 0x100 + RTF_CONNECTED = 0x800000 + RTF_DONE = 0x40 + RTF_DYNAMIC = 0x10 + RTF_FMASK = 0x110fc08 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_LLINFO = 0x400 + RTF_LOCAL = 0x200000 + RTF_MODIFIED = 0x20 + RTF_MPATH = 0x40000 + RTF_MPLS = 0x100000 + RTF_MULTICAST = 0x200 + RTF_PERMANENT_ARP = 0x2000 + RTF_PROTO1 = 0x8000 + RTF_PROTO2 = 0x4000 + RTF_PROTO3 = 0x2000 + RTF_REJECT = 0x8 + RTF_STATIC = 0x800 + RTF_UP = 0x1 + RTF_USETRAILERS = 0x8000 + RTM_80211INFO = 0x15 + RTM_ADD = 0x1 + RTM_BFD = 0x12 + RTM_CHANGE = 0x3 + RTM_CHGADDRATTR = 0x14 + RTM_DELADDR = 0xd + RTM_DELETE = 0x2 + RTM_DESYNC = 0x10 + RTM_GET = 0x4 + RTM_IFANNOUNCE = 0xf + RTM_IFINFO = 0xe + RTM_INVALIDATE = 0x11 + RTM_LOSING = 0x5 + RTM_MAXSIZE = 0x800 + RTM_MISS = 0x7 + RTM_NEWADDR = 0xc + RTM_PROPOSAL = 0x13 + RTM_REDIRECT = 0x6 + RTM_RESOLVE = 0xb + RTM_SOURCE = 0x16 + RTM_VERSION = 0x5 + RTV_EXPIRE = 0x4 + RTV_HOPCOUNT = 0x2 + RTV_MTU = 0x1 + RTV_RPIPE = 0x8 + RTV_RTT = 0x40 + RTV_RTTVAR = 0x80 + RTV_SPIPE = 0x10 + RTV_SSTHRESH = 0x20 + RT_TABLEID_BITS = 0x8 + RT_TABLEID_MASK = 0xff + RT_TABLEID_MAX = 0xff + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x4 + SEEK_CUR = 0x1 + SEEK_END = 0x2 + SEEK_SET = 0x0 + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDMULTI = 0x80206931 + SIOCAIFADDR = 0x8040691a + SIOCAIFGROUP = 0x80286987 + SIOCATMARK = 0x40047307 + SIOCBRDGADD = 0x8060693c + SIOCBRDGADDL = 0x80606949 + SIOCBRDGADDS = 0x80606941 + SIOCBRDGARL = 0x808c694d + SIOCBRDGDADDR = 0x81286947 + SIOCBRDGDEL = 0x8060693d + SIOCBRDGDELS = 0x80606942 + SIOCBRDGFLUSH = 0x80606948 + SIOCBRDGFRL = 0x808c694e + SIOCBRDGGCACHE = 0xc0146941 + SIOCBRDGGFD = 0xc0146952 + SIOCBRDGGHT = 0xc0146951 + SIOCBRDGGIFFLGS = 0xc060693e + SIOCBRDGGMA = 0xc0146953 + SIOCBRDGGPARAM = 0xc0406958 + SIOCBRDGGPRI = 0xc0146950 + SIOCBRDGGRL = 0xc030694f + SIOCBRDGGTO = 0xc0146946 + SIOCBRDGIFS = 0xc0606942 + SIOCBRDGRTS = 0xc0206943 + SIOCBRDGSADDR = 0xc1286944 + SIOCBRDGSCACHE = 0x80146940 + SIOCBRDGSFD = 0x80146952 + SIOCBRDGSHT = 0x80146951 + SIOCBRDGSIFCOST = 0x80606955 + SIOCBRDGSIFFLGS = 0x8060693f + SIOCBRDGSIFPRIO = 0x80606954 + SIOCBRDGSIFPROT = 0x8060694a + SIOCBRDGSMA = 0x80146953 + SIOCBRDGSPRI = 0x80146950 + SIOCBRDGSPROTO = 0x8014695a + SIOCBRDGSTO = 0x80146945 + SIOCBRDGSTXHC = 0x80146959 + SIOCDELLABEL = 0x80206997 + SIOCDELMULTI = 0x80206932 + SIOCDIFADDR = 0x80206919 + SIOCDIFGROUP = 0x80286989 + SIOCDIFPARENT = 0x802069b4 + SIOCDIFPHYADDR = 0x80206949 + SIOCDPWE3NEIGHBOR = 0x802069de + SIOCDVNETID = 0x802069af + SIOCGETKALIVE = 0xc01869a4 + SIOCGETLABEL = 0x8020699a + SIOCGETMPWCFG = 0xc02069ae + SIOCGETPFLOW = 0xc02069fe + SIOCGETPFSYNC = 0xc02069f8 + SIOCGETSGCNT = 0xc0207534 + SIOCGETVIFCNT = 0xc0287533 + SIOCGETVLAN = 0xc0206990 + SIOCGIFADDR = 0xc0206921 + SIOCGIFBRDADDR = 0xc0206923 + SIOCGIFCONF = 0xc0106924 + SIOCGIFDATA = 0xc020691b + SIOCGIFDESCR = 0xc0206981 + SIOCGIFDSTADDR = 0xc0206922 + SIOCGIFFLAGS = 0xc0206911 + SIOCGIFGATTR = 0xc028698b + SIOCGIFGENERIC = 0xc020693a + SIOCGIFGLIST = 0xc028698d + SIOCGIFGMEMB = 0xc028698a + SIOCGIFGROUP = 0xc0286988 + SIOCGIFHARDMTU = 0xc02069a5 + SIOCGIFLLPRIO = 0xc02069b6 + SIOCGIFMEDIA = 0xc0406938 + SIOCGIFMETRIC = 0xc0206917 + SIOCGIFMTU = 0xc020697e + SIOCGIFNETMASK = 0xc0206925 + SIOCGIFPAIR = 0xc02069b1 + SIOCGIFPARENT = 0xc02069b3 + SIOCGIFPRIORITY = 0xc020699c + SIOCGIFRDOMAIN = 0xc02069a0 + SIOCGIFRTLABEL = 0xc0206983 + SIOCGIFRXR = 0x802069aa + SIOCGIFSFFPAGE = 0xc1126939 + SIOCGIFXFLAGS = 0xc020699e + SIOCGLIFPHYADDR = 0xc218694b + SIOCGLIFPHYDF = 0xc02069c2 + SIOCGLIFPHYECN = 0xc02069c8 + SIOCGLIFPHYRTABLE = 0xc02069a2 + SIOCGLIFPHYTTL = 0xc02069a9 + SIOCGPGRP = 0x40047309 + SIOCGPWE3 = 0xc0206998 + SIOCGPWE3CTRLWORD = 0xc02069dc + SIOCGPWE3FAT = 0xc02069dd + SIOCGPWE3NEIGHBOR = 0xc21869de + SIOCGRXHPRIO = 0xc02069db + SIOCGSPPPPARAMS = 0xc0206994 + SIOCGTXHPRIO = 0xc02069c6 + SIOCGUMBINFO = 0xc02069be + SIOCGUMBPARAM = 0xc02069c0 + SIOCGVH = 0xc02069f6 + SIOCGVNETFLOWID = 0xc02069c4 + SIOCGVNETID = 0xc02069a7 + SIOCIFAFATTACH = 0x801169ab + SIOCIFAFDETACH = 0x801169ac + SIOCIFCREATE = 0x8020697a + SIOCIFDESTROY = 0x80206979 + SIOCIFGCLONERS = 0xc0106978 + SIOCSETKALIVE = 0x801869a3 + SIOCSETLABEL = 0x80206999 + SIOCSETMPWCFG = 0x802069ad + SIOCSETPFLOW = 0x802069fd + SIOCSETPFSYNC = 0x802069f7 + SIOCSETVLAN = 0x8020698f + SIOCSIFADDR = 0x8020690c + SIOCSIFBRDADDR = 0x80206913 + SIOCSIFDESCR = 0x80206980 + SIOCSIFDSTADDR = 0x8020690e + SIOCSIFFLAGS = 0x80206910 + SIOCSIFGATTR = 0x8028698c + SIOCSIFGENERIC = 0x80206939 + SIOCSIFLLADDR = 0x8020691f + SIOCSIFLLPRIO = 0x802069b5 + SIOCSIFMEDIA = 0xc0206937 + SIOCSIFMETRIC = 0x80206918 + SIOCSIFMTU = 0x8020697f + SIOCSIFNETMASK = 0x80206916 + SIOCSIFPAIR = 0x802069b0 + SIOCSIFPARENT = 0x802069b2 + SIOCSIFPRIORITY = 0x8020699b + SIOCSIFRDOMAIN = 0x8020699f + SIOCSIFRTLABEL = 0x80206982 + SIOCSIFXFLAGS = 0x8020699d + SIOCSLIFPHYADDR = 0x8218694a + SIOCSLIFPHYDF = 0x802069c1 + SIOCSLIFPHYECN = 0x802069c7 + SIOCSLIFPHYRTABLE = 0x802069a1 + SIOCSLIFPHYTTL = 0x802069a8 + SIOCSPGRP = 0x80047308 + SIOCSPWE3CTRLWORD = 0x802069dc + SIOCSPWE3FAT = 0x802069dd + SIOCSPWE3NEIGHBOR = 0x821869de + SIOCSRXHPRIO = 0x802069db + SIOCSSPPPPARAMS = 0x80206993 + SIOCSTXHPRIO = 0x802069c5 + SIOCSUMBPARAM = 0x802069bf + SIOCSVH = 0xc02069f5 + SIOCSVNETFLOWID = 0x802069c3 + SIOCSVNETID = 0x802069a6 + SOCK_CLOEXEC = 0x8000 + SOCK_DGRAM = 0x2 + SOCK_DNS = 0x1000 + SOCK_NONBLOCK = 0x4000 + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SOMAXCONN = 0x80 + SO_ACCEPTCONN = 0x2 + SO_BINDANY = 0x1000 + SO_BROADCAST = 0x20 + SO_DEBUG = 0x1 + SO_DOMAIN = 0x1024 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_NETPROC = 0x1020 + SO_OOBINLINE = 0x100 + SO_PEERCRED = 0x1022 + SO_PROTOCOL = 0x1025 + SO_RCVBUF = 0x1002 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RTABLE = 0x1021 + SO_SNDBUF = 0x1001 + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_SPLICE = 0x1023 + SO_TIMESTAMP = 0x800 + SO_TYPE = 0x1008 + SO_USELOOPBACK = 0x40 + SO_ZEROIZE = 0x2000 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISTXT = 0x200 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TCIFLUSH = 0x1 + TCIOFF = 0x3 + TCIOFLUSH = 0x3 + TCION = 0x4 + TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 + TCPOPT_EOL = 0x0 + TCPOPT_MAXSEG = 0x2 + TCPOPT_NOP = 0x1 + TCPOPT_SACK = 0x5 + TCPOPT_SACK_HDR = 0x1010500 + TCPOPT_SACK_PERMITTED = 0x4 + TCPOPT_SACK_PERMIT_HDR = 0x1010402 + TCPOPT_SIGNATURE = 0x13 + TCPOPT_TIMESTAMP = 0x8 + TCPOPT_TSTAMP_HDR = 0x101080a + TCPOPT_WINDOW = 0x3 + TCP_INFO = 0x9 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_SACK = 0x3 + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0x4 + TCP_MSS = 0x200 + TCP_NODELAY = 0x1 + TCP_NOPUSH = 0x10 + TCP_SACKHOLE_LIMIT = 0x80 + TCP_SACK_ENABLE = 0x8 + TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 + TIOCCBRK = 0x2000747a + TIOCCDTR = 0x20007478 + TIOCCHKVERAUTH = 0x2000741e + TIOCCLRVERAUTH = 0x2000741d + TIOCCONS = 0x80047462 + TIOCDRAIN = 0x2000745e + TIOCEXCL = 0x2000740d + TIOCEXT = 0x80047460 + TIOCFLAG_CLOCAL = 0x2 + TIOCFLAG_CRTSCTS = 0x4 + TIOCFLAG_MDMBUF = 0x8 + TIOCFLAG_PPS = 0x10 + TIOCFLAG_SOFTCAR = 0x1 + TIOCFLUSH = 0x80047410 + TIOCGETA = 0x402c7413 + TIOCGETD = 0x4004741a + TIOCGFLAGS = 0x4004745d + TIOCGPGRP = 0x40047477 + TIOCGSID = 0x40047463 + TIOCGTSTAMP = 0x4010745b + TIOCGWINSZ = 0x40087468 + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGET = 0x4004746a + TIOCMODG = 0x4004746a + TIOCMODS = 0x8004746d + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_RTS = 0x4 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIOCREMOTE = 0x80047469 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007461 + TIOCSDTR = 0x20007479 + TIOCSETA = 0x802c7414 + TIOCSETAF = 0x802c7416 + TIOCSETAW = 0x802c7415 + TIOCSETD = 0x8004741b + TIOCSETVERAUTH = 0x8004741c + TIOCSFLAGS = 0x8004745c + TIOCSIG = 0x8004745f + TIOCSPGRP = 0x80047476 + TIOCSTART = 0x2000746e + TIOCSTAT = 0x20007465 + TIOCSTOP = 0x2000746f + TIOCSTSTAMP = 0x8008745a + TIOCSWINSZ = 0x80087467 + TIOCUCNTL = 0x80047466 + TIOCUCNTL_CBRK = 0x7a + TIOCUCNTL_SBRK = 0x7b + TOSTOP = 0x400000 + UTIME_NOW = -0x2 + UTIME_OMIT = -0x1 + VDISCARD = 0xf + VDSUSP = 0xb + VEOF = 0x0 + VEOL = 0x1 + VEOL2 = 0x2 + VERASE = 0x3 + VINTR = 0x8 + VKILL = 0x5 + VLNEXT = 0xe + VMIN = 0x10 + VM_ANONMIN = 0x7 + VM_LOADAVG = 0x2 + VM_MALLOC_CONF = 0xc + VM_MAXID = 0xd + VM_MAXSLP = 0xa + VM_METER = 0x1 + VM_NKMEMPAGES = 0x6 + VM_PSSTRINGS = 0x3 + VM_SWAPENCRYPT = 0x5 + VM_USPACE = 0xb + VM_UVMEXP = 0x4 + VM_VNODEMIN = 0x9 + VM_VTEXTMIN = 0x8 + VQUIT = 0x9 + VREPRINT = 0x6 + VSTART = 0xc + VSTATUS = 0x12 + VSTOP = 0xd + VSUSP = 0xa + VTIME = 0x11 + VWERASE = 0x4 + WALTSIG = 0x4 + WCONTINUED = 0x8 + WCOREFLAG = 0x80 + WNOHANG = 0x1 + WUNTRACED = 0x2 + XCASE = 0x1000000 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EADDRINUSE = syscall.Errno(0x30) + EADDRNOTAVAIL = syscall.Errno(0x31) + EAFNOSUPPORT = syscall.Errno(0x2f) + EAGAIN = syscall.Errno(0x23) + EALREADY = syscall.Errno(0x25) + EAUTH = syscall.Errno(0x50) + EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x5c) + EBADRPC = syscall.Errno(0x48) + EBUSY = syscall.Errno(0x10) + ECANCELED = syscall.Errno(0x58) + ECHILD = syscall.Errno(0xa) + ECONNABORTED = syscall.Errno(0x35) + ECONNREFUSED = syscall.Errno(0x3d) + ECONNRESET = syscall.Errno(0x36) + EDEADLK = syscall.Errno(0xb) + EDESTADDRREQ = syscall.Errno(0x27) + EDOM = syscall.Errno(0x21) + EDQUOT = syscall.Errno(0x45) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EFTYPE = syscall.Errno(0x4f) + EHOSTDOWN = syscall.Errno(0x40) + EHOSTUNREACH = syscall.Errno(0x41) + EIDRM = syscall.Errno(0x59) + EILSEQ = syscall.Errno(0x54) + EINPROGRESS = syscall.Errno(0x24) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EIPSEC = syscall.Errno(0x52) + EISCONN = syscall.Errno(0x38) + EISDIR = syscall.Errno(0x15) + ELAST = syscall.Errno(0x5f) + ELOOP = syscall.Errno(0x3e) + EMEDIUMTYPE = syscall.Errno(0x56) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + EMSGSIZE = syscall.Errno(0x28) + ENAMETOOLONG = syscall.Errno(0x3f) + ENEEDAUTH = syscall.Errno(0x51) + ENETDOWN = syscall.Errno(0x32) + ENETRESET = syscall.Errno(0x34) + ENETUNREACH = syscall.Errno(0x33) + ENFILE = syscall.Errno(0x17) + ENOATTR = syscall.Errno(0x53) + ENOBUFS = syscall.Errno(0x37) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOLCK = syscall.Errno(0x4d) + ENOMEDIUM = syscall.Errno(0x55) + ENOMEM = syscall.Errno(0xc) + ENOMSG = syscall.Errno(0x5a) + ENOPROTOOPT = syscall.Errno(0x2a) + ENOSPC = syscall.Errno(0x1c) + ENOSYS = syscall.Errno(0x4e) + ENOTBLK = syscall.Errno(0xf) + ENOTCONN = syscall.Errno(0x39) + ENOTDIR = syscall.Errno(0x14) + ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5d) + ENOTSOCK = syscall.Errno(0x26) + ENOTSUP = syscall.Errno(0x5b) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EOPNOTSUPP = syscall.Errno(0x2d) + EOVERFLOW = syscall.Errno(0x57) + EOWNERDEAD = syscall.Errno(0x5e) + EPERM = syscall.Errno(0x1) + EPFNOSUPPORT = syscall.Errno(0x2e) + EPIPE = syscall.Errno(0x20) + EPROCLIM = syscall.Errno(0x43) + EPROCUNAVAIL = syscall.Errno(0x4c) + EPROGMISMATCH = syscall.Errno(0x4b) + EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5f) + EPROTONOSUPPORT = syscall.Errno(0x2b) + EPROTOTYPE = syscall.Errno(0x29) + ERANGE = syscall.Errno(0x22) + EREMOTE = syscall.Errno(0x47) + EROFS = syscall.Errno(0x1e) + ERPCMISMATCH = syscall.Errno(0x49) + ESHUTDOWN = syscall.Errno(0x3a) + ESOCKTNOSUPPORT = syscall.Errno(0x2c) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ESTALE = syscall.Errno(0x46) + ETIMEDOUT = syscall.Errno(0x3c) + ETOOMANYREFS = syscall.Errno(0x3b) + ETXTBSY = syscall.Errno(0x1a) + EUSERS = syscall.Errno(0x44) + EWOULDBLOCK = syscall.Errno(0x23) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGBUS = syscall.Signal(0xa) + SIGCHLD = syscall.Signal(0x14) + SIGCONT = syscall.Signal(0x13) + SIGEMT = syscall.Signal(0x7) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINFO = syscall.Signal(0x1d) + SIGINT = syscall.Signal(0x2) + SIGIO = syscall.Signal(0x17) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGPROF = syscall.Signal(0x1b) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGSTOP = syscall.Signal(0x11) + SIGSYS = syscall.Signal(0xc) + SIGTERM = syscall.Signal(0xf) + SIGTHR = syscall.Signal(0x20) + SIGTRAP = syscall.Signal(0x5) + SIGTSTP = syscall.Signal(0x12) + SIGTTIN = syscall.Signal(0x15) + SIGTTOU = syscall.Signal(0x16) + SIGURG = syscall.Signal(0x10) + SIGUSR1 = syscall.Signal(0x1e) + SIGUSR2 = syscall.Signal(0x1f) + SIGVTALRM = syscall.Signal(0x1a) + SIGWINCH = syscall.Signal(0x1c) + SIGXCPU = syscall.Signal(0x18) + SIGXFSZ = syscall.Signal(0x19) +) + +// Error table +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disk quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC program not available"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIPSEC", "IPsec processing failure"}, + {83, "ENOATTR", "attribute not found"}, + {84, "EILSEQ", "illegal byte sequence"}, + {85, "ENOMEDIUM", "no medium found"}, + {86, "EMEDIUMTYPE", "wrong medium type"}, + {87, "EOVERFLOW", "value too large to be stored in data type"}, + {88, "ECANCELED", "operation canceled"}, + {89, "EIDRM", "identifier removed"}, + {90, "ENOMSG", "no message of desired type"}, + {91, "ENOTSUP", "not supported"}, + {92, "EBADMSG", "bad message"}, + {93, "ENOTRECOVERABLE", "state not recoverable"}, + {94, "EOWNERDEAD", "previous owner died"}, + {95, "ELAST", "protocol error"}, +} + +// Signal table +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGABRT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "thread AST"}, +} diff --git a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go index bd001a6e1..97f20ca28 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go @@ -15,12 +15,12 @@ type PtraceRegsArm struct { // PtraceGetRegsArm fetches the registers used by arm binaries. func PtraceGetRegsArm(pid int, regsout *PtraceRegsArm) error { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) + return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout)) } // PtraceSetRegsArm sets the registers used by arm binaries. func PtraceSetRegsArm(pid int, regs *PtraceRegsArm) error { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) + return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs)) } // PtraceRegsArm64 is the registers used by arm64 binaries. @@ -33,10 +33,10 @@ type PtraceRegsArm64 struct { // PtraceGetRegsArm64 fetches the registers used by arm64 binaries. func PtraceGetRegsArm64(pid int, regsout *PtraceRegsArm64) error { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) + return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout)) } // PtraceSetRegsArm64 sets the registers used by arm64 binaries. func PtraceSetRegsArm64(pid int, regs *PtraceRegsArm64) error { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) + return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs)) } diff --git a/vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go b/vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go index 6cb6d688a..834d2856d 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go @@ -7,11 +7,11 @@ import "unsafe" // PtraceGetRegSetArm64 fetches the registers used by arm64 binaries. func PtraceGetRegSetArm64(pid, addr int, regsout *PtraceRegsArm64) error { iovec := Iovec{(*byte)(unsafe.Pointer(regsout)), uint64(unsafe.Sizeof(*regsout))} - return ptrace(PTRACE_GETREGSET, pid, uintptr(addr), uintptr(unsafe.Pointer(&iovec))) + return ptracePtr(PTRACE_GETREGSET, pid, uintptr(addr), unsafe.Pointer(&iovec)) } // PtraceSetRegSetArm64 sets the registers used by arm64 binaries. func PtraceSetRegSetArm64(pid, addr int, regs *PtraceRegsArm64) error { iovec := Iovec{(*byte)(unsafe.Pointer(regs)), uint64(unsafe.Sizeof(*regs))} - return ptrace(PTRACE_SETREGSET, pid, uintptr(addr), uintptr(unsafe.Pointer(&iovec))) + return ptracePtr(PTRACE_SETREGSET, pid, uintptr(addr), unsafe.Pointer(&iovec)) } diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go index c34d0639b..0b5f79430 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go @@ -21,12 +21,12 @@ type PtraceRegsMips struct { // PtraceGetRegsMips fetches the registers used by mips binaries. func PtraceGetRegsMips(pid int, regsout *PtraceRegsMips) error { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) + return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout)) } // PtraceSetRegsMips sets the registers used by mips binaries. func PtraceSetRegsMips(pid int, regs *PtraceRegsMips) error { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) + return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs)) } // PtraceRegsMips64 is the registers used by mips64 binaries. @@ -42,10 +42,10 @@ type PtraceRegsMips64 struct { // PtraceGetRegsMips64 fetches the registers used by mips64 binaries. func PtraceGetRegsMips64(pid int, regsout *PtraceRegsMips64) error { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) + return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout)) } // PtraceSetRegsMips64 sets the registers used by mips64 binaries. func PtraceSetRegsMips64(pid int, regs *PtraceRegsMips64) error { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) + return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs)) } diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go index 3ccf0c0c4..2807f7e64 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go @@ -21,12 +21,12 @@ type PtraceRegsMipsle struct { // PtraceGetRegsMipsle fetches the registers used by mipsle binaries. func PtraceGetRegsMipsle(pid int, regsout *PtraceRegsMipsle) error { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) + return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout)) } // PtraceSetRegsMipsle sets the registers used by mipsle binaries. func PtraceSetRegsMipsle(pid int, regs *PtraceRegsMipsle) error { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) + return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs)) } // PtraceRegsMips64le is the registers used by mips64le binaries. @@ -42,10 +42,10 @@ type PtraceRegsMips64le struct { // PtraceGetRegsMips64le fetches the registers used by mips64le binaries. func PtraceGetRegsMips64le(pid int, regsout *PtraceRegsMips64le) error { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) + return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout)) } // PtraceSetRegsMips64le sets the registers used by mips64le binaries. func PtraceSetRegsMips64le(pid int, regs *PtraceRegsMips64le) error { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) + return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs)) } diff --git a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go index 7d6585700..281ea64e3 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go @@ -31,12 +31,12 @@ type PtraceRegs386 struct { // PtraceGetRegs386 fetches the registers used by 386 binaries. func PtraceGetRegs386(pid int, regsout *PtraceRegs386) error { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) + return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout)) } // PtraceSetRegs386 sets the registers used by 386 binaries. func PtraceSetRegs386(pid int, regs *PtraceRegs386) error { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) + return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs)) } // PtraceRegsAmd64 is the registers used by amd64 binaries. @@ -72,10 +72,10 @@ type PtraceRegsAmd64 struct { // PtraceGetRegsAmd64 fetches the registers used by amd64 binaries. func PtraceGetRegsAmd64(pid int, regsout *PtraceRegsAmd64) error { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) + return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout)) } // PtraceSetRegsAmd64 sets the registers used by amd64 binaries. func PtraceSetRegsAmd64(pid int, regs *PtraceRegsAmd64) error { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) + return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs)) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index 870215d2c..9a257219d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -124,7 +124,6 @@ int utime(uintptr_t, uintptr_t); unsigned long long getsystemcfg(int); int umount(uintptr_t); int getrlimit64(int, uintptr_t); -int setrlimit64(int, uintptr_t); long long lseek64(int, long long, int); uintptr_t mmap(uintptr_t, uintptr_t, int, int, int, long long); @@ -213,7 +212,7 @@ func wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { +func ioctl(fd int, req int, arg uintptr) (err error) { r0, er := C.ioctl(C.int(fd), C.int(req), C.uintptr_t(arg)) if r0 == -1 && er != nil { err = er @@ -223,6 +222,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { + r0, er := C.ioctl(C.int(fd), C.int(req), C.uintptr_t(uintptr(arg))) + if r0 == -1 && er != nil { + err = er + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func FcntlInt(fd uintptr, cmd int, arg int) (r int, err error) { r0, er := C.fcntl(C.uintptr_t(fd), C.int(cmd), C.uintptr_t(arg)) r = int(r0) @@ -1454,16 +1463,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - r0, er := C.setrlimit64(C.int(resource), C.uintptr_t(uintptr(unsafe.Pointer(rlim)))) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Seek(fd int, offset int64, whence int) (off int64, err error) { r0, er := C.lseek64(C.int(fd), C.longlong(offset), C.int(whence)) off = int64(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index a89b0bfa5..6de80c20c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -93,8 +93,18 @@ func wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, e1 := callioctl(fd, int(req), arg) +func ioctl(fd int, req int, arg uintptr) (err error) { + _, e1 := callioctl(fd, req, arg) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { + _, e1 := callioctl_ptr(fd, req, arg) if e1 != 0 { err = errnoErr(e1) } @@ -1412,16 +1422,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, e1 := callsetrlimit(resource, uintptr(unsafe.Pointer(rlim))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Seek(fd int, offset int64, whence int) (off int64, err error) { r0, e1 := calllseek(fd, offset, whence) off = int64(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index 2caa5adf9..c4d50ae50 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -124,7 +124,6 @@ import ( //go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" //go:cgo_import_dynamic libc_umount umount "libc.a/shr_64.o" //go:cgo_import_dynamic libc_getrlimit getrlimit "libc.a/shr_64.o" -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.a/shr_64.o" //go:cgo_import_dynamic libc_lseek lseek "libc.a/shr_64.o" //go:cgo_import_dynamic libc_mmap64 mmap64 "libc.a/shr_64.o" @@ -242,7 +241,6 @@ import ( //go:linkname libc_getsystemcfg libc_getsystemcfg //go:linkname libc_umount libc_umount //go:linkname libc_getrlimit libc_getrlimit -//go:linkname libc_setrlimit libc_setrlimit //go:linkname libc_lseek libc_lseek //go:linkname libc_mmap64 libc_mmap64 @@ -363,7 +361,6 @@ var ( libc_getsystemcfg, libc_umount, libc_getrlimit, - libc_setrlimit, libc_lseek, libc_mmap64 syscallFunc ) @@ -423,6 +420,13 @@ func callioctl(fd int, req int, arg uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func callioctl_ptr(fd int, req int, arg unsafe.Pointer) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_ioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func callfcntl(fd uintptr, cmd int, arg uintptr) (r1 uintptr, e1 Errno) { r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_fcntl)), 3, fd, uintptr(cmd), arg, 0, 0, 0) return @@ -1172,13 +1176,6 @@ func callgetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callsetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { - r1, _, e1 = rawSyscall6(uintptr(unsafe.Pointer(&libc_setrlimit)), 2, uintptr(resource), rlim, 0, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func calllseek(fd int, offset int64, whence int) (r1 uintptr, e1 Errno) { r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_lseek)), 3, uintptr(fd), uintptr(offset), uintptr(whence), 0, 0, 0) return diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go index 944a714b1..6903d3b09 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go @@ -123,7 +123,6 @@ int utime(uintptr_t, uintptr_t); unsigned long long getsystemcfg(int); int umount(uintptr_t); int getrlimit(int, uintptr_t); -int setrlimit(int, uintptr_t); long long lseek(int, long long, int); uintptr_t mmap64(uintptr_t, uintptr_t, int, int, int, long long); @@ -131,6 +130,7 @@ uintptr_t mmap64(uintptr_t, uintptr_t, int, int, int, long long); import "C" import ( "syscall" + "unsafe" ) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -191,6 +191,14 @@ func callioctl(fd int, req int, arg uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func callioctl_ptr(fd int, req int, arg unsafe.Pointer) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.ioctl(C.int(fd), C.int(req), C.uintptr_t(uintptr(arg)))) + e1 = syscall.GetErrno() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func callfcntl(fd uintptr, cmd int, arg uintptr) (r1 uintptr, e1 Errno) { r1 = uintptr(C.fcntl(C.uintptr_t(fd), C.int(cmd), C.uintptr_t(arg))) e1 = syscall.GetErrno() @@ -1047,14 +1055,6 @@ func callgetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func callsetrlimit(resource int, rlim uintptr) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.setrlimit(C.int(resource), C.uintptr_t(rlim))) - e1 = syscall.GetErrno() - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func calllseek(fd int, offset int64, whence int) (r1 uintptr, e1 Errno) { r1 = uintptr(C.lseek(C.int(fd), C.longlong(offset), C.int(whence))) e1 = syscall.GetErrno() diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go deleted file mode 100644 index a06eb0932..000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go +++ /dev/null @@ -1,40 +0,0 @@ -// go run mksyscall.go -tags darwin,amd64,go1.13 syscall_darwin.1_13.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -//go:build darwin && amd64 && go1.13 -// +build darwin,amd64,go1.13 - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func closedir(dir uintptr) (err error) { - _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_closedir_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { - r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) - res = Errno(r0) - return -} - -var libc_readdir_r_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s deleted file mode 100644 index d6c3e25c0..000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s +++ /dev/null @@ -1,25 +0,0 @@ -// go run mkasm_darwin.go amd64 -// Code generated by the command above; DO NOT EDIT. - -//go:build go1.13 -// +build go1.13 - -#include "textflag.h" - -TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fdopendir(SB) - -GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) - -TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_closedir(SB) - -GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 -DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) - -TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_readdir_r(SB) - -GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 -DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 879376589..4037ccf7a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -1,8 +1,8 @@ -// go run mksyscall.go -tags darwin,amd64,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go +// go run mksyscall.go -tags darwin,amd64 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. -//go:build darwin && amd64 && go1.12 -// +build darwin,amd64,go1.12 +//go:build darwin && amd64 +// +build darwin,amd64 package unix @@ -463,6 +463,32 @@ var libc_munlockall_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func closedir(dir uintptr) (err error) { + _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_closedir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { + r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + res = Errno(r0) + return +} + +var libc_readdir_r_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]int32) (err error) { _, _, e1 := syscall_rawSyscall(libc_pipe_trampoline_addr, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -699,6 +725,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" @@ -1643,6 +1677,30 @@ var libc_mknod_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Open(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1934,6 +1992,31 @@ var libc_select_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Setattrlist(path string, attrlist *Attrlist, attrBuf []byte, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(attrBuf) > 0 { + _p1 = unsafe.Pointer(&attrBuf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(attrlist)), uintptr(_p1), uintptr(len(attrBuf)), uintptr(options), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setattrlist_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setegid(egid int) (err error) { _, _, e1 := syscall_syscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { @@ -2065,20 +2148,6 @@ var libc_setreuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) @@ -2452,6 +2521,14 @@ func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) { return } +func ptrace1Ptr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall6(libc_ptrace_trampoline_addr, uintptr(request), uintptr(pid), addr, uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ptrace_trampoline_addr uintptr //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 8da90cf0e..4baaed0bc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -1,11 +1,14 @@ -// go run mkasm_darwin.go amd64 +// go run mkasm.go darwin amd64 // Code generated by the command above; DO NOT EDIT. -//go:build go1.12 -// +build go1.12 - #include "textflag.h" +TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fdopendir(SB) + +GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) + TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) @@ -174,6 +177,18 @@ TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) +TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_closedir(SB) + +GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) + +TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readdir_r(SB) + +GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) + TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) @@ -600,6 +615,12 @@ TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) + +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) @@ -684,6 +705,11 @@ TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) +TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setattrlist(SB) +GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) + TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) @@ -738,12 +764,6 @@ TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) - -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go deleted file mode 100644 index cec595d55..000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go +++ /dev/null @@ -1,40 +0,0 @@ -// go run mksyscall.go -tags darwin,arm64,go1.13 syscall_darwin.1_13.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -//go:build darwin && arm64 && go1.13 -// +build darwin,arm64,go1.13 - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func closedir(dir uintptr) (err error) { - _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_closedir_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { - r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) - res = Errno(r0) - return -} - -var libc_readdir_r_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s deleted file mode 100644 index 357989722..000000000 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s +++ /dev/null @@ -1,25 +0,0 @@ -// go run mkasm_darwin.go arm64 -// Code generated by the command above; DO NOT EDIT. - -//go:build go1.13 -// +build go1.13 - -#include "textflag.h" - -TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fdopendir(SB) - -GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) - -TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_closedir(SB) - -GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 -DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) - -TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_readdir_r(SB) - -GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 -DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index f47eedd5a..51d6f3fb2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -1,8 +1,8 @@ -// go run mksyscall.go -tags darwin,arm64,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go +// go run mksyscall.go -tags darwin,arm64 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. -//go:build darwin && arm64 && go1.12 -// +build darwin,arm64,go1.12 +//go:build darwin && arm64 +// +build darwin,arm64 package unix @@ -463,6 +463,32 @@ var libc_munlockall_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func closedir(dir uintptr) (err error) { + _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_closedir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { + r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + res = Errno(r0) + return +} + +var libc_readdir_r_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]int32) (err error) { _, _, e1 := syscall_rawSyscall(libc_pipe_trampoline_addr, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -699,6 +725,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" @@ -1643,6 +1677,30 @@ var libc_mknod_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Open(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1934,6 +1992,31 @@ var libc_select_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Setattrlist(path string, attrlist *Attrlist, attrBuf []byte, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(attrBuf) > 0 { + _p1 = unsafe.Pointer(&attrBuf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_setattrlist_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(attrlist)), uintptr(_p1), uintptr(len(attrBuf)), uintptr(options), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setattrlist_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setegid(egid int) (err error) { _, _, e1 := syscall_syscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { @@ -2065,20 +2148,6 @@ var libc_setreuid_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) @@ -2452,6 +2521,14 @@ func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) { return } +func ptrace1Ptr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall6(libc_ptrace_trampoline_addr, uintptr(request), uintptr(pid), addr, uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ptrace_trampoline_addr uintptr //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 4d26f7d01..c3b82c037 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -1,11 +1,14 @@ -// go run mkasm_darwin.go arm64 +// go run mkasm.go darwin arm64 // Code generated by the command above; DO NOT EDIT. -//go:build go1.12 -// +build go1.12 - #include "textflag.h" +TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fdopendir(SB) + +GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) + TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) @@ -174,6 +177,18 @@ TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) +TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_closedir(SB) + +GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) + +TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readdir_r(SB) + +GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) + TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) @@ -600,6 +615,12 @@ TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) + +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) @@ -684,6 +705,11 @@ TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) +TEXT libc_setattrlist_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setattrlist(SB) +GLOBL ·libc_setattrlist_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB) + TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) @@ -738,12 +764,6 @@ TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) - -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 1b6eedfa6..0eabac7ad 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -436,6 +436,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -552,6 +562,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { @@ -1390,16 +1410,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index e9d9997ee..ee313eb00 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -544,6 +564,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { @@ -912,7 +942,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *stat_freebsd11_t) (err error) { +func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -922,17 +952,7 @@ func fstat(fd int, stat *stat_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat_freebsd12(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -947,22 +967,7 @@ func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { +func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -972,16 +977,6 @@ func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1002,7 +997,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) @@ -1019,23 +1014,6 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getdtablesize() (size int) { r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) size = int(r0) @@ -1257,21 +1235,6 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1317,43 +1280,13 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat(fd int, path string, mode uint32, dev int) (err error) { +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), uintptr(dev>>32), 0) + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), uintptr(dev>>32), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1712,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) @@ -1753,22 +1676,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func stat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func statfs(path string, stat *statfs_freebsd11_t) (err error) { +func Statfs(path string, stat *Statfs_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1783,21 +1691,6 @@ func statfs(path string, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func statfs_freebsd12(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index edd373b1a..4c986e448 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -544,6 +564,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { @@ -912,7 +942,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *stat_freebsd11_t) (err error) { +func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -922,17 +952,7 @@ func fstat(fd int, stat *stat_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat_freebsd12(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -947,22 +967,7 @@ func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { +func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -972,16 +977,6 @@ func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1002,7 +997,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) @@ -1019,23 +1014,6 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getdtablesize() (size int) { r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) size = int(r0) @@ -1257,21 +1235,6 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1317,22 +1280,7 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat(fd int, path string, mode uint32, dev int) (err error) { +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1347,21 +1295,6 @@ func mknodat(fd int, path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1712,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) @@ -1753,22 +1676,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func stat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func statfs(path string, stat *statfs_freebsd11_t) (err error) { +func Statfs(path string, stat *Statfs_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1783,21 +1691,6 @@ func statfs(path string, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func statfs_freebsd12(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 82e9764b2..555216944 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -351,14 +351,25 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -367,8 +378,8 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } @@ -377,15 +388,24 @@ func pipe2(p *[2]_C_int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getcwd(buf []byte) (n int, err error) { +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } @@ -394,8 +414,8 @@ func Getcwd(buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) +func ptrace(request int, pid int, addr uintptr, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -404,7 +424,7 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data int) (err error) { +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) @@ -544,6 +564,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { @@ -912,7 +942,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *stat_freebsd11_t) (err error) { +func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -922,17 +952,7 @@ func fstat(fd int, stat *stat_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat_freebsd12(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -947,22 +967,7 @@ func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { +func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -972,16 +977,6 @@ func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1002,7 +997,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) @@ -1019,23 +1014,6 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getdtablesize() (size int) { r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) size = int(r0) @@ -1257,21 +1235,6 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1317,43 +1280,13 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknod(path string, mode uint32, dev int) (err error) { +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat(fd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, uintptr(dev), uintptr(dev>>32)) if e1 != 0 { err = errnoErr(e1) } @@ -1712,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) @@ -1753,22 +1676,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func stat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func statfs(path string, stat *statfs_freebsd11_t) (err error) { +func Statfs(path string, stat *Statfs_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1783,21 +1691,6 @@ func statfs(path string, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func statfs_freebsd12(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index a6479acd1..67a226fbf 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -544,6 +564,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { @@ -912,7 +942,7 @@ func Fpathconf(fd int, name int) (val int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *stat_freebsd11_t) (err error) { +func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -922,17 +952,7 @@ func fstat(fd int, stat *stat_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat_freebsd12(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) { +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -947,22 +967,7 @@ func fstatat(fd int, path string, stat *stat_freebsd11_t, flags int) (err error) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatat_freebsd12(fd int, path string, stat *Stat_t, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FSTATAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { +func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) @@ -972,16 +977,6 @@ func fstatfs(fd int, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstatfs_freebsd12(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1002,7 +997,7 @@ func Ftruncate(fd int, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { +func getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) @@ -1019,23 +1014,6 @@ func getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func getdirentries_freebsd12(fd int, buf []byte, basep *uint64) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES_FREEBSD12, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getdtablesize() (size int) { r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) size = int(r0) @@ -1257,21 +1235,6 @@ func Listen(s int, backlog int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1317,22 +1280,7 @@ func Mkfifo(path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mknodat(fd int, path string, mode uint32, dev int) (err error) { +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1347,21 +1295,6 @@ func mknodat(fd int, path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1712,16 +1645,6 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) @@ -1753,22 +1676,7 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func stat(path string, stat *stat_freebsd11_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func statfs(path string, stat *statfs_freebsd11_t) (err error) { +func Statfs(path string, stat *Statfs_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { @@ -1783,21 +1691,6 @@ func statfs(path string, stat *statfs_freebsd11_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func statfs_freebsd12(path string, stat *Statfs_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STATFS_FREEBSD12, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go new file mode 100644 index 000000000..f0b9ddaaa --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -0,0 +1,1909 @@ +// go run mksyscall.go -tags freebsd,riscv64 syscall_bsd.go syscall_freebsd.go syscall_freebsd_riscv64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build freebsd && riscv64 +// +build freebsd,riscv64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CapEnter() (err error) { + _, _, e1 := Syscall(SYS_CAP_ENTER, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func capRightsGet(version int, fd int, rightsp *CapRights) (err error) { + _, _, e1 := Syscall(SYS___CAP_RIGHTS_GET, uintptr(version), uintptr(fd), uintptr(unsafe.Pointer(rightsp))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func capRightsLimit(fd int, rightsp *CapRights) (err error) { + _, _, e1 := Syscall(SYS_CAP_RIGHTS_LIMIT, uintptr(fd), uintptr(unsafe.Pointer(rightsp)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + Syscall(SYS_EXIT, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFd(fd int, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FD, uintptr(fd), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p0))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFd(fd int, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FD, uintptr(fd), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteFile(file string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListFile(file string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(file) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_FILE, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrGetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_GET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrSetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_SET_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1)), uintptr(data), uintptr(nbytes), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrDeleteLink(link string, attrnamespace int, attrname string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attrname) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_EXTATTR_DELETE_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ExtattrListLink(link string, attrnamespace int, data uintptr, nbytes int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_EXTATTR_LIST_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(attrnamespace), uintptr(data), uintptr(nbytes), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_POSIX_FADVISE, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getdirentries(fd int, buf []byte, basep *uint64) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdtablesize() (size int) { + r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) + size = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + pgrp = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(fd int, path string, mode uint32, dev uint64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Openat(fdat int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(fdat), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Undelete(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go index af5cb064e..b57c7050d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go @@ -15,25 +15,19 @@ import ( //go:cgo_import_dynamic libc_writev writev "libc.so" //go:cgo_import_dynamic libc_pwritev pwritev "libc.so" //go:cgo_import_dynamic libc_accept4 accept4 "libsocket.so" -//go:cgo_import_dynamic libc_putmsg putmsg "libc.so" -//go:cgo_import_dynamic libc_getmsg getmsg "libc.so" //go:linkname procreadv libc_readv //go:linkname procpreadv libc_preadv //go:linkname procwritev libc_writev //go:linkname procpwritev libc_pwritev //go:linkname procaccept4 libc_accept4 -//go:linkname procputmsg libc_putmsg -//go:linkname procgetmsg libc_getmsg var ( procreadv, procpreadv, procwritev, procpwritev, - procaccept4, - procputmsg, - procgetmsg syscallFunc + procaccept4 syscallFunc ) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -106,23 +100,3 @@ func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, } return } - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procputmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(flags), 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(unsafe.Pointer(flags)), 0, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index ca65ac82b..da63d9d78 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -379,6 +379,16 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(arg) @@ -537,6 +547,17 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockAdjtime(clockid int32, buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_CLOCK_ADJTIME, uintptr(clockid), uintptr(unsafe.Pointer(buf)), 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockGetres(clockid int32, res *Timespec) (err error) { _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) if e1 != 0 { @@ -828,6 +849,49 @@ func Fsync(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fsmount(fd int, flags int, mountAttrs int) (fsfd int, err error) { + r0, _, e1 := Syscall(SYS_FSMOUNT, uintptr(fd), uintptr(flags), uintptr(mountAttrs)) + fsfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsopen(fsName string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsName) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_FSOPEN, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fspick(dirfd int, pathName string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathName) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_FSPICK, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getdents(fd int, buf []byte) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { @@ -1205,6 +1269,26 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MoveMount(fromDirfd int, fromPathName string, toDirfd int, toPathName string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fromPathName) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(toPathName) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOVE_MOUNT, uintptr(fromDirfd), uintptr(unsafe.Pointer(_p0)), uintptr(toDirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { @@ -1262,16 +1346,6 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) if e1 != 0 { @@ -2088,3 +2162,13 @@ func setitimer(which int, newValue *Itimerval, oldValue *Itimerval) (err error) } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func rtSigprocmask(how int, set *Sigset_t, oldset *Sigset_t, sigsetsize uintptr) (err error) { + _, _, e1 := RawSyscall6(SYS_RT_SIGPROCMASK, uintptr(how), uintptr(unsafe.Pointer(set)), uintptr(unsafe.Pointer(oldset)), uintptr(sigsetsize), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index 88af526b7..07b549cc2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -287,46 +287,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID32, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID32, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID32, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID32, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) n = int(r0) @@ -451,16 +411,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func futimesat(dirfd int, path string, times *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 2a0c4aa6a..5f481bf83 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -334,56 +334,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 4882bde3a..824cd52c7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -412,46 +412,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID32, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID32, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID32, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID32, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -618,16 +578,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func armSyncFileRange(fd int, flags int, off int64, n int64) (err error) { _, _, e1 := Syscall6(SYS_ARM_SYNC_FILE_RANGE, uintptr(fd), uintptr(flags), uintptr(off), uintptr(off>>32), uintptr(n), uintptr(n>>32)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 9f8c24e43..e77aecfe9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -289,56 +289,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go new file mode 100644 index 000000000..806ffd1e1 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go @@ -0,0 +1,487 @@ +// go run mksyscall.go -tags linux,loong64 syscall_linux.go syscall_linux_loong64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build linux && loong64 +// +build linux,loong64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_PWAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, buf *Statfs_t) (err error) { + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0) + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0) + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (off int64, err error) { + r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) + off = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func SyncFileRange(fd int, off int64, n int64, flags int) (err error) { + _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(n int, list *_Gid_t) (nn int, err error) { + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + nn = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(n int, list *_Gid_t) (err error) { + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { + r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)) + xaddr = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(cmdline) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index d7d6f4244..961a3afb7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -248,46 +248,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -684,16 +644,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Alarm(seconds uint) (remaining uint, err error) { r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) remaining = uint(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 7f1f8e653..ed05005e9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -278,56 +278,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index f933d0f51..d365b718f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -278,56 +278,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 297d0a998..c3f1b8bbd 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -248,46 +248,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -684,16 +644,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Alarm(seconds uint) (remaining uint, err error) { r0, _, e1 := Syscall(SYS_ALARM, uintptr(seconds), 0, 0) remaining = uint(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go index 2e32e7a44..a6574cf98 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go @@ -308,46 +308,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { @@ -664,16 +624,6 @@ func getrlimit(resource int, rlim *rlimit32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func setrlimit(resource int, rlim *rlimit32) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func syncFileRange2(fd int, flags int, off int64, n int64) (err error) { _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE2, uintptr(fd), uintptr(flags), uintptr(off>>32), uintptr(off), uintptr(n>>32), uintptr(n)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 3c5317046..f40990264 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -349,56 +349,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index a00c6744e..9dfcc2997 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -349,56 +349,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index a1a9bcbbd..0b2923958 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -180,6 +180,17 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func MemfdSecret(flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_MEMFD_SECRET, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { @@ -258,56 +269,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index e0dabc602..6cde32237 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -319,56 +319,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) n = int64(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index 368623c0f..5253d65bf 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -329,56 +329,6 @@ func setfsuid(uid int) (prev int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Shutdown(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 4af561a48..cdb2af5ae 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -405,6 +405,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -521,6 +531,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { @@ -1587,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 3b90e9448..9d25f76b0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -405,6 +405,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -521,6 +531,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { @@ -1587,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 890f4ccd1..d3f803516 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -405,6 +405,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -521,6 +531,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { @@ -1587,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index c79f071fc..887188a52 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -405,6 +405,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -521,6 +531,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { @@ -1587,16 +1607,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index a057fc5d3..6699a783e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -l32 -openbsd -tags openbsd,386 syscall_bsd.go syscall_openbsd.go syscall_openbsd_386.go +// go run mksyscall.go -l32 -openbsd -libc -tags openbsd,386 syscall_bsd.go syscall_openbsd.go syscall_openbsd_386.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && 386 @@ -16,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -24,20 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -45,10 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -56,30 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -87,66 +111,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -156,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -164,6 +216,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -173,17 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -191,10 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -202,10 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -213,6 +281,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -221,27 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -249,6 +329,10 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Madvise(b []byte, behav int) (err error) { @@ -258,13 +342,17 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -274,23 +362,31 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -300,13 +396,17 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Msync(b []byte, flags int) (err error) { @@ -316,13 +416,17 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -332,33 +436,45 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdents(fd int, buf []byte) (n int, err error) { @@ -368,7 +484,7 @@ func Getdents(fd int, buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -376,6 +492,10 @@ func Getdents(fd int, buf []byte) (n int, err error) { return } +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getcwd(buf []byte) (n int, err error) { @@ -385,7 +505,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -393,16 +513,32 @@ func Getcwd(buf []byte) (n int, err error) { return } +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -412,17 +548,21 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -430,6 +570,10 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, return } +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -438,23 +582,31 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -463,13 +615,17 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -478,13 +634,17 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -493,13 +653,17 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -508,13 +672,17 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -523,27 +691,49 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -551,33 +741,49 @@ func Dup(fd int) (nfd int, err error) { return } +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup3(from int, to int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) return } +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -586,43 +792,59 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -631,23 +853,31 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { @@ -656,27 +886,35 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -684,16 +922,24 @@ func Fpathconf(fd int, name int) (val int, err error) { return } +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { @@ -702,71 +948,99 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall6(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), uintptr(length>>32)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) egid = int(r0) return } +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) gid = int(r0) return } +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -774,34 +1048,50 @@ func Getpgid(pid int) (pgid int, err error) { return } +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) pgrp = int(r0) return } +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) pid = int(r0) return } +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) ppid = int(r0) return } +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -809,20 +1099,28 @@ func Getpriority(which int, who int) (prio int, err error) { return } +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrtable() (rtable int, err error) { - r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) rtable = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -830,20 +1128,28 @@ func Getrtable() (rtable int, err error) { return } +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -851,46 +1157,66 @@ func Getsid(pid int) (sid int, err error) { return } +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) tainted = bool(r0 != 0) return } +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kill(pid int, signum syscall.Signal) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -898,6 +1224,10 @@ func Kqueue() (fd int, err error) { return } +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -906,13 +1236,17 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -926,13 +1260,17 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { @@ -946,23 +1284,31 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lstat(path string, stat *Stat_t) (err error) { @@ -971,13 +1317,17 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -986,13 +1336,17 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdirat(dirfd int, path string, mode uint32) (err error) { @@ -1001,13 +1355,17 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -1016,13 +1374,17 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifoat(dirfd int, path string, mode uint32) (err error) { @@ -1031,13 +1393,17 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -1046,13 +1412,17 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { @@ -1061,23 +1431,31 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -1086,7 +1464,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1094,6 +1472,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { @@ -1102,7 +1484,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1110,6 +1492,10 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -1118,7 +1504,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1126,6 +1512,10 @@ func Pathconf(path string, name int) (val int, err error) { return } +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pread(fd int, p []byte, offset int64) (n int, err error) { @@ -1135,7 +1525,7 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1143,6 +1533,10 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pwrite(fd int, p []byte, offset int64) (n int, err error) { @@ -1152,7 +1546,7 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1160,6 +1554,10 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -1169,7 +1567,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1177,6 +1575,10 @@ func read(fd int, p []byte) (n int, err error) { return } +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -1191,7 +1593,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1199,6 +1601,10 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { @@ -1213,7 +1619,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1221,6 +1627,10 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -1234,13 +1644,17 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Renameat(fromfd int, from string, tofd int, to string) (err error) { @@ -1254,13 +1668,17 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1269,13 +1687,17 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1284,17 +1706,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0) + r0, r1, e1 := syscall_syscall6(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0) newoffset = int64(int64(r1)<<32 | int64(r0)) if e1 != 0 { err = errnoErr(e1) @@ -1302,10 +1728,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1313,36 +1743,52 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1351,97 +1797,119 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +var libc_setresuid_trampoline_addr uintptr -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrtable(rtable int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1449,26 +1917,38 @@ func Setsid() (pid int, err error) { return } +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Stat(path string, stat *Stat_t) (err error) { @@ -1477,13 +1957,17 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statfs(path string, stat *Statfs_t) (err error) { @@ -1492,13 +1976,17 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { @@ -1512,13 +2000,17 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { @@ -1532,23 +2024,31 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { @@ -1557,21 +2057,29 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) oldmask = int(r0) return } +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { @@ -1580,13 +2088,17 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlinkat(dirfd int, path string, flags int) (err error) { @@ -1595,13 +2107,17 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { @@ -1610,13 +2126,17 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { @@ -1626,7 +2146,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1634,10 +2154,14 @@ func write(fd int, p []byte) (n int, err error) { return } +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0) + r0, _, e1 := syscall_syscall9(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos), uintptr(pos>>32), 0, 0) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -1645,20 +2169,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1669,7 +2201,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1685,9 +2217,13 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error if err != nil { return } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s new file mode 100644 index 000000000..04f0de34b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -0,0 +1,664 @@ +// go run mkasm.go openbsd 386 +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getgroups_trampoline_addr(SB)/4, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setgroups_trampoline_addr(SB)/4, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $4 +DATA ·libc_wait4_trampoline_addr(SB)/4, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $4 +DATA ·libc_accept_trampoline_addr(SB)/4, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $4 +DATA ·libc_bind_trampoline_addr(SB)/4, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $4 +DATA ·libc_connect_trampoline_addr(SB)/4, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $4 +DATA ·libc_socket_trampoline_addr(SB)/4, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getsockopt_trampoline_addr(SB)/4, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setsockopt_trampoline_addr(SB)/4, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpeername_trampoline_addr(SB)/4, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getsockname_trampoline_addr(SB)/4, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_shutdown_trampoline_addr(SB)/4, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $4 +DATA ·libc_socketpair_trampoline_addr(SB)/4, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $4 +DATA ·libc_recvfrom_trampoline_addr(SB)/4, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sendto_trampoline_addr(SB)/4, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $4 +DATA ·libc_recvmsg_trampoline_addr(SB)/4, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sendmsg_trampoline_addr(SB)/4, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $4 +DATA ·libc_kevent_trampoline_addr(SB)/4, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $4 +DATA ·libc_utimes_trampoline_addr(SB)/4, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $4 +DATA ·libc_futimes_trampoline_addr(SB)/4, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_poll(SB) +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $4 +DATA ·libc_poll_trampoline_addr(SB)/4, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_madvise(SB) +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $4 +DATA ·libc_madvise_trampoline_addr(SB)/4, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mlock_trampoline_addr(SB)/4, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mlockall_trampoline_addr(SB)/4, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mprotect_trampoline_addr(SB)/4, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_msync(SB) +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $4 +DATA ·libc_msync_trampoline_addr(SB)/4, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $4 +DATA ·libc_munlock_trampoline_addr(SB)/4, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $4 +DATA ·libc_munlockall_trampoline_addr(SB)/4, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pipe2(SB) +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pipe2_trampoline_addr(SB)/4, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getdents(SB) +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getdents_trampoline_addr(SB)/4, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_ioctl_trampoline_addr(SB)/4, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 +DATA ·libc_ppoll_trampoline_addr(SB)/4, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_access(SB) +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $4 +DATA ·libc_access_trampoline_addr(SB)/4, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $4 +DATA ·libc_adjtime_trampoline_addr(SB)/4, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chdir_trampoline_addr(SB)/4, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chflags_trampoline_addr(SB)/4, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chmod_trampoline_addr(SB)/4, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chown_trampoline_addr(SB)/4, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chroot_trampoline_addr(SB)/4, $libc_chroot_trampoline<>(SB) + +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $4 +DATA ·libc_clock_gettime_trampoline_addr(SB)/4, $libc_clock_gettime_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_close(SB) +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $4 +DATA ·libc_close_trampoline_addr(SB)/4, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $4 +DATA ·libc_dup_trampoline_addr(SB)/4, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $4 +DATA ·libc_dup2_trampoline_addr(SB)/4, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup3(SB) +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $4 +DATA ·libc_dup3_trampoline_addr(SB)/4, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $4 +DATA ·libc_exit_trampoline_addr(SB)/4, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_faccessat(SB) +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_faccessat_trampoline_addr(SB)/4, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchdir_trampoline_addr(SB)/4, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchflags_trampoline_addr(SB)/4, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchmod_trampoline_addr(SB)/4, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmodat(SB) +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchmodat_trampoline_addr(SB)/4, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchown_trampoline_addr(SB)/4, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchownat(SB) +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchownat_trampoline_addr(SB)/4, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $4 +DATA ·libc_flock_trampoline_addr(SB)/4, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fpathconf_trampoline_addr(SB)/4, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstat(SB) +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fstat_trampoline_addr(SB)/4, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatat(SB) +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fstatat_trampoline_addr(SB)/4, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatfs(SB) +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fstatfs_trampoline_addr(SB)/4, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fsync_trampoline_addr(SB)/4, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $4 +DATA ·libc_ftruncate_trampoline_addr(SB)/4, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getegid_trampoline_addr(SB)/4, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_geteuid_trampoline_addr(SB)/4, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getgid_trampoline_addr(SB)/4, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpgid_trampoline_addr(SB)/4, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpgrp_trampoline_addr(SB)/4, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpid_trampoline_addr(SB)/4, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getppid_trampoline_addr(SB)/4, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpriority_trampoline_addr(SB)/4, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getrlimit_trampoline_addr(SB)/4, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrtable(SB) +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getrtable_trampoline_addr(SB)/4, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getrusage_trampoline_addr(SB)/4, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getsid_trampoline_addr(SB)/4, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $4 +DATA ·libc_gettimeofday_trampoline_addr(SB)/4, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getuid_trampoline_addr(SB)/4, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_issetugid_trampoline_addr(SB)/4, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $4 +DATA ·libc_kill_trampoline_addr(SB)/4, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $4 +DATA ·libc_kqueue_trampoline_addr(SB)/4, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_lchown_trampoline_addr(SB)/4, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_link(SB) +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $4 +DATA ·libc_link_trampoline_addr(SB)/4, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_linkat(SB) +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_linkat_trampoline_addr(SB)/4, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $4 +DATA ·libc_listen_trampoline_addr(SB)/4, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lstat(SB) +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_lstat_trampoline_addr(SB)/4, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkdir_trampoline_addr(SB)/4, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdirat(SB) +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkdirat_trampoline_addr(SB)/4, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkfifo_trampoline_addr(SB)/4, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifoat(SB) +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkfifoat_trampoline_addr(SB)/4, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mknod_trampoline_addr(SB)/4, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknodat(SB) +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_nanosleep(SB) +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 +DATA ·libc_nanosleep_trampoline_addr(SB)/4, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_open(SB) +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $4 +DATA ·libc_open_trampoline_addr(SB)/4, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_openat_trampoline_addr(SB)/4, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pathconf_trampoline_addr(SB)/4, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pread_trampoline_addr(SB)/4, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pwrite_trampoline_addr(SB)/4, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_read(SB) +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $4 +DATA ·libc_read_trampoline_addr(SB)/4, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $4 +DATA ·libc_readlink_trampoline_addr(SB)/4, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlinkat(SB) +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_readlinkat_trampoline_addr(SB)/4, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $4 +DATA ·libc_rename_trampoline_addr(SB)/4, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameat(SB) +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_renameat_trampoline_addr(SB)/4, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $4 +DATA ·libc_revoke_trampoline_addr(SB)/4, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_rmdir_trampoline_addr(SB)/4, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $4 +DATA ·libc_lseek_trampoline_addr(SB)/4, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_select(SB) +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $4 +DATA ·libc_select_trampoline_addr(SB)/4, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setegid_trampoline_addr(SB)/4, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_seteuid_trampoline_addr(SB)/4, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setgid_trampoline_addr(SB)/4, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setlogin_trampoline_addr(SB)/4, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setpgid_trampoline_addr(SB)/4, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setpriority_trampoline_addr(SB)/4, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setregid_trampoline_addr(SB)/4, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setreuid_trampoline_addr(SB)/4, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresgid(SB) +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setresgid_trampoline_addr(SB)/4, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresuid(SB) +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrtable(SB) +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setrtable_trampoline_addr(SB)/4, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setsid_trampoline_addr(SB)/4, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $4 +DATA ·libc_settimeofday_trampoline_addr(SB)/4, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setuid_trampoline_addr(SB)/4, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_stat(SB) +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_stat_trampoline_addr(SB)/4, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_statfs(SB) +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $4 +DATA ·libc_statfs_trampoline_addr(SB)/4, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $4 +DATA ·libc_symlink_trampoline_addr(SB)/4, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlinkat(SB) +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_symlinkat_trampoline_addr(SB)/4, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sync_trampoline_addr(SB)/4, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $4 +DATA ·libc_truncate_trampoline_addr(SB)/4, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $4 +DATA ·libc_umask_trampoline_addr(SB)/4, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unlink_trampoline_addr(SB)/4, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unlinkat_trampoline_addr(SB)/4, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unmount_trampoline_addr(SB)/4, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_write(SB) +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $4 +DATA ·libc_write_trampoline_addr(SB)/4, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mmap_trampoline_addr(SB)/4, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 +DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 04db8fa2f..1e775fe05 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -openbsd -tags openbsd,amd64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_amd64.go +// go run mksyscall.go -openbsd -libc -tags openbsd,amd64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && amd64 @@ -16,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -24,20 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -45,10 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -56,30 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -87,66 +111,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -156,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -164,6 +216,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -173,17 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -191,10 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -202,10 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -213,6 +281,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -221,27 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -249,6 +329,10 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Madvise(b []byte, behav int) (err error) { @@ -258,13 +342,17 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -274,23 +362,31 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -300,13 +396,17 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Msync(b []byte, flags int) (err error) { @@ -316,13 +416,17 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -332,33 +436,45 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdents(fd int, buf []byte) (n int, err error) { @@ -368,7 +484,7 @@ func Getdents(fd int, buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -376,6 +492,10 @@ func Getdents(fd int, buf []byte) (n int, err error) { return } +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getcwd(buf []byte) (n int, err error) { @@ -385,7 +505,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -393,16 +513,32 @@ func Getcwd(buf []byte) (n int, err error) { return } +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -412,17 +548,21 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -430,6 +570,10 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, return } +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -438,23 +582,31 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -463,13 +615,17 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -478,13 +634,17 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -493,13 +653,17 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -508,13 +672,17 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -523,27 +691,49 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -551,33 +741,49 @@ func Dup(fd int) (nfd int, err error) { return } +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup3(from int, to int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) return } +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -586,43 +792,59 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -631,23 +853,31 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { @@ -656,27 +886,35 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -684,16 +922,24 @@ func Fpathconf(fd int, name int) (val int, err error) { return } +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { @@ -702,71 +948,99 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length)) + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) egid = int(r0) return } +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) gid = int(r0) return } +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -774,34 +1048,50 @@ func Getpgid(pid int) (pgid int, err error) { return } +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) pgrp = int(r0) return } +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) pid = int(r0) return } +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) ppid = int(r0) return } +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -809,20 +1099,28 @@ func Getpriority(which int, who int) (prio int, err error) { return } +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrtable() (rtable int, err error) { - r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) rtable = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -830,20 +1128,28 @@ func Getrtable() (rtable int, err error) { return } +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -851,46 +1157,66 @@ func Getsid(pid int) (sid int, err error) { return } +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) tainted = bool(r0 != 0) return } +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kill(pid int, signum syscall.Signal) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -898,6 +1224,10 @@ func Kqueue() (fd int, err error) { return } +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -906,13 +1236,17 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -926,13 +1260,17 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { @@ -946,23 +1284,31 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lstat(path string, stat *Stat_t) (err error) { @@ -971,13 +1317,17 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -986,13 +1336,17 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdirat(dirfd int, path string, mode uint32) (err error) { @@ -1001,13 +1355,17 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -1016,13 +1374,17 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifoat(dirfd int, path string, mode uint32) (err error) { @@ -1031,13 +1393,17 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -1046,13 +1412,17 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { @@ -1061,23 +1431,31 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -1086,7 +1464,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1094,6 +1472,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { @@ -1102,7 +1484,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1110,6 +1492,10 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -1118,7 +1504,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1126,6 +1512,10 @@ func Pathconf(path string, name int) (val int, err error) { return } +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pread(fd int, p []byte, offset int64) (n int, err error) { @@ -1135,7 +1525,7 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1143,6 +1533,10 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pwrite(fd int, p []byte, offset int64) (n int, err error) { @@ -1152,7 +1546,7 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1160,6 +1554,10 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -1169,7 +1567,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1177,6 +1575,10 @@ func read(fd int, p []byte) (n int, err error) { return } +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -1191,7 +1593,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1199,6 +1601,10 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { @@ -1213,7 +1619,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1221,6 +1627,10 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -1234,13 +1644,17 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Renameat(fromfd int, from string, tofd int, to string) (err error) { @@ -1254,13 +1668,17 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1269,13 +1687,17 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1284,17 +1706,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0) + r0, _, e1 := syscall_syscall(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(whence)) newoffset = int64(r0) if e1 != 0 { err = errnoErr(e1) @@ -1302,10 +1728,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1313,36 +1743,52 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1351,97 +1797,119 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +var libc_setresuid_trampoline_addr uintptr -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrtable(rtable int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1449,26 +1917,38 @@ func Setsid() (pid int, err error) { return } +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Stat(path string, stat *Stat_t) (err error) { @@ -1477,13 +1957,17 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statfs(path string, stat *Statfs_t) (err error) { @@ -1492,13 +1976,17 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { @@ -1512,13 +2000,17 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { @@ -1532,23 +2024,31 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { @@ -1557,21 +2057,29 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length)) + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) oldmask = int(r0) return } +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { @@ -1580,13 +2088,17 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlinkat(dirfd int, path string, flags int) (err error) { @@ -1595,13 +2107,17 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { @@ -1610,13 +2126,17 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { @@ -1626,7 +2146,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1634,10 +2154,14 @@ func write(fd int, p []byte) (n int, err error) { return } +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0) + r0, _, e1 := syscall_syscall6(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -1645,20 +2169,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1669,7 +2201,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1685,9 +2217,13 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error if err != nil { return } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s new file mode 100644 index 000000000..27b6f4df7 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -0,0 +1,664 @@ +// go run mkasm.go openbsd amd64 +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 +DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 +DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 +DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_poll(SB) +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_madvise(SB) +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 +DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_msync(SB) +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pipe2(SB) +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getdents(SB) +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_access(SB) +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 +DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) + +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_close(SB) +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 +DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup3(SB) +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_faccessat(SB) +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmodat(SB) +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchownat(SB) +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstat(SB) +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatat(SB) +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatfs(SB) +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrtable(SB) +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_link(SB) +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 +DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_linkat(SB) +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lstat(SB) +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdirat(SB) +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifoat(SB) +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknodat(SB) +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_nanosleep(SB) +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 +DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_open(SB) +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 +DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_read(SB) +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 +DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlinkat(SB) +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameat(SB) +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 +DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_select(SB) +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 +DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresgid(SB) +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresuid(SB) +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrtable(SB) +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_stat(SB) +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_statfs(SB) +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlinkat(SB) +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 +DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_write(SB) +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 +DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 69f803006..7f6427899 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -l32 -openbsd -arm -tags openbsd,arm syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm.go +// go run mksyscall.go -l32 -openbsd -arm -libc -tags openbsd,arm syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm @@ -16,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -24,20 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -45,10 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -56,30 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -87,66 +111,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -156,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -164,6 +216,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -173,17 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -191,10 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -202,10 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -213,6 +281,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -221,27 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -249,6 +329,10 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Madvise(b []byte, behav int) (err error) { @@ -258,13 +342,17 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -274,23 +362,31 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -300,13 +396,17 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Msync(b []byte, flags int) (err error) { @@ -316,13 +416,17 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -332,33 +436,45 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdents(fd int, buf []byte) (n int, err error) { @@ -368,7 +484,7 @@ func Getdents(fd int, buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -376,6 +492,10 @@ func Getdents(fd int, buf []byte) (n int, err error) { return } +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getcwd(buf []byte) (n int, err error) { @@ -385,7 +505,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -393,16 +513,32 @@ func Getcwd(buf []byte) (n int, err error) { return } +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -412,17 +548,21 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -430,6 +570,10 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, return } +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -438,23 +582,31 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -463,13 +615,17 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -478,13 +634,17 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -493,13 +653,17 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -508,13 +672,17 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -523,27 +691,49 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -551,33 +741,49 @@ func Dup(fd int) (nfd int, err error) { return } +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup3(from int, to int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) return } +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -586,43 +792,59 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -631,23 +853,31 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { @@ -656,27 +886,35 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -684,16 +922,24 @@ func Fpathconf(fd int, name int) (val int, err error) { return } +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { @@ -702,71 +948,99 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall6(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) + _, _, e1 := syscall_syscall6(libc_ftruncate_trampoline_addr, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) egid = int(r0) return } +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) gid = int(r0) return } +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -774,34 +1048,50 @@ func Getpgid(pid int) (pgid int, err error) { return } +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) pgrp = int(r0) return } +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) pid = int(r0) return } +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) ppid = int(r0) return } +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -809,20 +1099,28 @@ func Getpriority(which int, who int) (prio int, err error) { return } +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrtable() (rtable int, err error) { - r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) rtable = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -830,20 +1128,28 @@ func Getrtable() (rtable int, err error) { return } +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -851,46 +1157,66 @@ func Getsid(pid int) (sid int, err error) { return } +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) tainted = bool(r0 != 0) return } +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kill(pid int, signum syscall.Signal) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -898,6 +1224,10 @@ func Kqueue() (fd int, err error) { return } +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -906,13 +1236,17 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -926,13 +1260,17 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { @@ -946,23 +1284,31 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lstat(path string, stat *Stat_t) (err error) { @@ -971,13 +1317,17 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -986,13 +1336,17 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdirat(dirfd int, path string, mode uint32) (err error) { @@ -1001,13 +1355,17 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -1016,13 +1374,17 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifoat(dirfd int, path string, mode uint32) (err error) { @@ -1031,13 +1393,17 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -1046,13 +1412,17 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { @@ -1061,23 +1431,31 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -1086,7 +1464,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1094,6 +1472,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { @@ -1102,7 +1484,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1110,6 +1492,10 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -1118,7 +1504,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1126,6 +1512,10 @@ func Pathconf(path string, name int) (val int, err error) { return } +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pread(fd int, p []byte, offset int64) (n int, err error) { @@ -1135,7 +1525,7 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1143,6 +1533,10 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pwrite(fd int, p []byte, offset int64) (n int, err error) { @@ -1152,7 +1546,7 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1160,6 +1554,10 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -1169,7 +1567,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1177,6 +1575,10 @@ func read(fd int, p []byte) (n int, err error) { return } +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -1191,7 +1593,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1199,6 +1601,10 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { @@ -1213,7 +1619,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1221,6 +1627,10 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -1234,13 +1644,17 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Renameat(fromfd int, from string, tofd int, to string) (err error) { @@ -1254,13 +1668,17 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1269,13 +1687,17 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1284,17 +1706,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0) + r0, r1, e1 := syscall_syscall6(libc_lseek_trampoline_addr, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0) newoffset = int64(int64(r1)<<32 | int64(r0)) if e1 != 0 { err = errnoErr(e1) @@ -1302,10 +1728,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1313,36 +1743,52 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1351,97 +1797,119 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +var libc_setresuid_trampoline_addr uintptr -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrtable(rtable int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1449,26 +1917,38 @@ func Setsid() (pid int, err error) { return } +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Stat(path string, stat *Stat_t) (err error) { @@ -1477,13 +1957,17 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statfs(path string, stat *Statfs_t) (err error) { @@ -1492,13 +1976,17 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { @@ -1512,13 +2000,17 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { @@ -1532,23 +2024,31 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { @@ -1557,21 +2057,29 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) + _, _, e1 := syscall_syscall6(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) oldmask = int(r0) return } +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { @@ -1580,13 +2088,17 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlinkat(dirfd int, path string, flags int) (err error) { @@ -1595,13 +2107,17 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { @@ -1610,13 +2126,17 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { @@ -1626,7 +2146,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1634,10 +2154,14 @@ func write(fd int, p []byte) (n int, err error) { return } +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0) + r0, _, e1 := syscall_syscall9(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -1645,20 +2169,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1669,7 +2201,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1685,9 +2217,13 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error if err != nil { return } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s new file mode 100644 index 000000000..b797045fd --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -0,0 +1,664 @@ +// go run mkasm.go openbsd arm +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getgroups_trampoline_addr(SB)/4, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setgroups_trampoline_addr(SB)/4, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $4 +DATA ·libc_wait4_trampoline_addr(SB)/4, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $4 +DATA ·libc_accept_trampoline_addr(SB)/4, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $4 +DATA ·libc_bind_trampoline_addr(SB)/4, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $4 +DATA ·libc_connect_trampoline_addr(SB)/4, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $4 +DATA ·libc_socket_trampoline_addr(SB)/4, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getsockopt_trampoline_addr(SB)/4, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setsockopt_trampoline_addr(SB)/4, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpeername_trampoline_addr(SB)/4, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getsockname_trampoline_addr(SB)/4, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_shutdown_trampoline_addr(SB)/4, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $4 +DATA ·libc_socketpair_trampoline_addr(SB)/4, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $4 +DATA ·libc_recvfrom_trampoline_addr(SB)/4, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sendto_trampoline_addr(SB)/4, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $4 +DATA ·libc_recvmsg_trampoline_addr(SB)/4, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sendmsg_trampoline_addr(SB)/4, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $4 +DATA ·libc_kevent_trampoline_addr(SB)/4, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $4 +DATA ·libc_utimes_trampoline_addr(SB)/4, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $4 +DATA ·libc_futimes_trampoline_addr(SB)/4, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_poll(SB) +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $4 +DATA ·libc_poll_trampoline_addr(SB)/4, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_madvise(SB) +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $4 +DATA ·libc_madvise_trampoline_addr(SB)/4, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mlock_trampoline_addr(SB)/4, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mlockall_trampoline_addr(SB)/4, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mprotect_trampoline_addr(SB)/4, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_msync(SB) +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $4 +DATA ·libc_msync_trampoline_addr(SB)/4, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $4 +DATA ·libc_munlock_trampoline_addr(SB)/4, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $4 +DATA ·libc_munlockall_trampoline_addr(SB)/4, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pipe2(SB) +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pipe2_trampoline_addr(SB)/4, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getdents(SB) +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getdents_trampoline_addr(SB)/4, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_ioctl_trampoline_addr(SB)/4, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 +DATA ·libc_ppoll_trampoline_addr(SB)/4, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_access(SB) +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $4 +DATA ·libc_access_trampoline_addr(SB)/4, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $4 +DATA ·libc_adjtime_trampoline_addr(SB)/4, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chdir_trampoline_addr(SB)/4, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chflags_trampoline_addr(SB)/4, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chmod_trampoline_addr(SB)/4, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chown_trampoline_addr(SB)/4, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $4 +DATA ·libc_chroot_trampoline_addr(SB)/4, $libc_chroot_trampoline<>(SB) + +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $4 +DATA ·libc_clock_gettime_trampoline_addr(SB)/4, $libc_clock_gettime_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_close(SB) +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $4 +DATA ·libc_close_trampoline_addr(SB)/4, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $4 +DATA ·libc_dup_trampoline_addr(SB)/4, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $4 +DATA ·libc_dup2_trampoline_addr(SB)/4, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup3(SB) +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $4 +DATA ·libc_dup3_trampoline_addr(SB)/4, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $4 +DATA ·libc_exit_trampoline_addr(SB)/4, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_faccessat(SB) +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_faccessat_trampoline_addr(SB)/4, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchdir_trampoline_addr(SB)/4, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchflags_trampoline_addr(SB)/4, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchmod_trampoline_addr(SB)/4, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmodat(SB) +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchmodat_trampoline_addr(SB)/4, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchown_trampoline_addr(SB)/4, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchownat(SB) +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fchownat_trampoline_addr(SB)/4, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $4 +DATA ·libc_flock_trampoline_addr(SB)/4, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fpathconf_trampoline_addr(SB)/4, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstat(SB) +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fstat_trampoline_addr(SB)/4, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatat(SB) +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fstatat_trampoline_addr(SB)/4, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatfs(SB) +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fstatfs_trampoline_addr(SB)/4, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fsync_trampoline_addr(SB)/4, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $4 +DATA ·libc_ftruncate_trampoline_addr(SB)/4, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getegid_trampoline_addr(SB)/4, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_geteuid_trampoline_addr(SB)/4, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getgid_trampoline_addr(SB)/4, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpgid_trampoline_addr(SB)/4, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpgrp_trampoline_addr(SB)/4, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpid_trampoline_addr(SB)/4, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getppid_trampoline_addr(SB)/4, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getpriority_trampoline_addr(SB)/4, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getrlimit_trampoline_addr(SB)/4, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrtable(SB) +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getrtable_trampoline_addr(SB)/4, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getrusage_trampoline_addr(SB)/4, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getsid_trampoline_addr(SB)/4, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $4 +DATA ·libc_gettimeofday_trampoline_addr(SB)/4, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getuid_trampoline_addr(SB)/4, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_issetugid_trampoline_addr(SB)/4, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $4 +DATA ·libc_kill_trampoline_addr(SB)/4, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $4 +DATA ·libc_kqueue_trampoline_addr(SB)/4, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $4 +DATA ·libc_lchown_trampoline_addr(SB)/4, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_link(SB) +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $4 +DATA ·libc_link_trampoline_addr(SB)/4, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_linkat(SB) +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_linkat_trampoline_addr(SB)/4, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $4 +DATA ·libc_listen_trampoline_addr(SB)/4, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lstat(SB) +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_lstat_trampoline_addr(SB)/4, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkdir_trampoline_addr(SB)/4, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdirat(SB) +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkdirat_trampoline_addr(SB)/4, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkfifo_trampoline_addr(SB)/4, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifoat(SB) +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mkfifoat_trampoline_addr(SB)/4, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mknod_trampoline_addr(SB)/4, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknodat(SB) +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_nanosleep(SB) +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 +DATA ·libc_nanosleep_trampoline_addr(SB)/4, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_open(SB) +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $4 +DATA ·libc_open_trampoline_addr(SB)/4, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_openat_trampoline_addr(SB)/4, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pathconf_trampoline_addr(SB)/4, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pread_trampoline_addr(SB)/4, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pwrite_trampoline_addr(SB)/4, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_read(SB) +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $4 +DATA ·libc_read_trampoline_addr(SB)/4, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $4 +DATA ·libc_readlink_trampoline_addr(SB)/4, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlinkat(SB) +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_readlinkat_trampoline_addr(SB)/4, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $4 +DATA ·libc_rename_trampoline_addr(SB)/4, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameat(SB) +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_renameat_trampoline_addr(SB)/4, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $4 +DATA ·libc_revoke_trampoline_addr(SB)/4, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $4 +DATA ·libc_rmdir_trampoline_addr(SB)/4, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $4 +DATA ·libc_lseek_trampoline_addr(SB)/4, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_select(SB) +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $4 +DATA ·libc_select_trampoline_addr(SB)/4, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setegid_trampoline_addr(SB)/4, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_seteuid_trampoline_addr(SB)/4, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setgid_trampoline_addr(SB)/4, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setlogin_trampoline_addr(SB)/4, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setpgid_trampoline_addr(SB)/4, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setpriority_trampoline_addr(SB)/4, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setregid_trampoline_addr(SB)/4, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setreuid_trampoline_addr(SB)/4, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresgid(SB) +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setresgid_trampoline_addr(SB)/4, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresuid(SB) +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrtable(SB) +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setrtable_trampoline_addr(SB)/4, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setsid_trampoline_addr(SB)/4, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $4 +DATA ·libc_settimeofday_trampoline_addr(SB)/4, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $4 +DATA ·libc_setuid_trampoline_addr(SB)/4, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_stat(SB) +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_stat_trampoline_addr(SB)/4, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_statfs(SB) +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $4 +DATA ·libc_statfs_trampoline_addr(SB)/4, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $4 +DATA ·libc_symlink_trampoline_addr(SB)/4, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlinkat(SB) +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_symlinkat_trampoline_addr(SB)/4, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $4 +DATA ·libc_sync_trampoline_addr(SB)/4, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $4 +DATA ·libc_truncate_trampoline_addr(SB)/4, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $4 +DATA ·libc_umask_trampoline_addr(SB)/4, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unlink_trampoline_addr(SB)/4, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unlinkat_trampoline_addr(SB)/4, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unmount_trampoline_addr(SB)/4, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_write(SB) +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $4 +DATA ·libc_write_trampoline_addr(SB)/4, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mmap_trampoline_addr(SB)/4, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 +DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index c96a50517..756ef7b17 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -openbsd -tags openbsd,arm64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm64.go +// go run mksyscall.go -openbsd -libc -tags openbsd,arm64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm64 @@ -16,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -24,20 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -45,10 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -56,30 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -87,66 +111,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -156,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -164,6 +216,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -173,17 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -191,10 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -202,10 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -213,6 +281,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -221,27 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -249,6 +329,10 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Madvise(b []byte, behav int) (err error) { @@ -258,13 +342,17 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -274,23 +362,31 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -300,13 +396,17 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Msync(b []byte, flags int) (err error) { @@ -316,13 +416,17 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -332,33 +436,45 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdents(fd int, buf []byte) (n int, err error) { @@ -368,7 +484,7 @@ func Getdents(fd int, buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -376,6 +492,10 @@ func Getdents(fd int, buf []byte) (n int, err error) { return } +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getcwd(buf []byte) (n int, err error) { @@ -385,7 +505,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -393,16 +513,32 @@ func Getcwd(buf []byte) (n int, err error) { return } +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -412,17 +548,21 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -430,6 +570,10 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, return } +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -438,23 +582,31 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -463,13 +615,17 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -478,13 +634,17 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -493,13 +653,17 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -508,13 +672,17 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -523,27 +691,49 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -551,33 +741,49 @@ func Dup(fd int) (nfd int, err error) { return } +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup3(from int, to int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) return } +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -586,43 +792,59 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -631,23 +853,31 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { @@ -656,27 +886,35 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -684,16 +922,24 @@ func Fpathconf(fd int, name int) (val int, err error) { return } +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { @@ -702,71 +948,99 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length)) + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) egid = int(r0) return } +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) gid = int(r0) return } +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -774,34 +1048,50 @@ func Getpgid(pid int) (pgid int, err error) { return } +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) pgrp = int(r0) return } +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) pid = int(r0) return } +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) ppid = int(r0) return } +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -809,20 +1099,28 @@ func Getpriority(which int, who int) (prio int, err error) { return } +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrtable() (rtable int, err error) { - r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) rtable = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -830,20 +1128,28 @@ func Getrtable() (rtable int, err error) { return } +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -851,46 +1157,66 @@ func Getsid(pid int) (sid int, err error) { return } +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) tainted = bool(r0 != 0) return } +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kill(pid int, signum syscall.Signal) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -898,6 +1224,10 @@ func Kqueue() (fd int, err error) { return } +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -906,13 +1236,17 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -926,13 +1260,17 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { @@ -946,23 +1284,31 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lstat(path string, stat *Stat_t) (err error) { @@ -971,13 +1317,17 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -986,13 +1336,17 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdirat(dirfd int, path string, mode uint32) (err error) { @@ -1001,13 +1355,17 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -1016,13 +1374,17 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifoat(dirfd int, path string, mode uint32) (err error) { @@ -1031,13 +1393,17 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -1046,13 +1412,17 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { @@ -1061,23 +1431,31 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -1086,7 +1464,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1094,6 +1472,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { @@ -1102,7 +1484,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1110,6 +1492,10 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -1118,7 +1504,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1126,6 +1512,10 @@ func Pathconf(path string, name int) (val int, err error) { return } +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pread(fd int, p []byte, offset int64) (n int, err error) { @@ -1135,7 +1525,7 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1143,6 +1533,10 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pwrite(fd int, p []byte, offset int64) (n int, err error) { @@ -1152,7 +1546,7 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1160,6 +1554,10 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -1169,7 +1567,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1177,6 +1575,10 @@ func read(fd int, p []byte) (n int, err error) { return } +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -1191,7 +1593,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1199,6 +1601,10 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { @@ -1213,7 +1619,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1221,6 +1627,10 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -1234,13 +1644,17 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Renameat(fromfd int, from string, tofd int, to string) (err error) { @@ -1254,13 +1668,17 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1269,13 +1687,17 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1284,17 +1706,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0) + r0, _, e1 := syscall_syscall(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(whence)) newoffset = int64(r0) if e1 != 0 { err = errnoErr(e1) @@ -1302,10 +1728,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1313,36 +1743,52 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1351,97 +1797,119 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +var libc_setresuid_trampoline_addr uintptr -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrtable(rtable int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1449,26 +1917,38 @@ func Setsid() (pid int, err error) { return } +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Stat(path string, stat *Stat_t) (err error) { @@ -1477,13 +1957,17 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statfs(path string, stat *Statfs_t) (err error) { @@ -1492,13 +1976,17 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { @@ -1512,13 +2000,17 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { @@ -1532,23 +2024,31 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { @@ -1557,21 +2057,29 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length)) + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) oldmask = int(r0) return } +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { @@ -1580,13 +2088,17 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlinkat(dirfd int, path string, flags int) (err error) { @@ -1595,13 +2107,17 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { @@ -1610,13 +2126,17 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { @@ -1626,7 +2146,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1634,10 +2154,14 @@ func write(fd int, p []byte) (n int, err error) { return } +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0) + r0, _, e1 := syscall_syscall6(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -1645,20 +2169,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1669,7 +2201,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1685,9 +2217,13 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error if err != nil { return } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s new file mode 100644 index 000000000..a87126622 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -0,0 +1,664 @@ +// go run mkasm.go openbsd arm64 +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 +DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 +DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 +DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_poll(SB) +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_madvise(SB) +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 +DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_msync(SB) +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pipe2(SB) +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getdents(SB) +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_access(SB) +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 +DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) + +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_close(SB) +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 +DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup3(SB) +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_faccessat(SB) +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmodat(SB) +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchownat(SB) +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstat(SB) +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatat(SB) +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatfs(SB) +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrtable(SB) +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_link(SB) +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 +DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_linkat(SB) +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lstat(SB) +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdirat(SB) +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifoat(SB) +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknodat(SB) +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_nanosleep(SB) +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 +DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_open(SB) +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 +DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_read(SB) +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 +DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlinkat(SB) +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameat(SB) +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 +DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_select(SB) +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 +DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresgid(SB) +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresuid(SB) +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrtable(SB) +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_stat(SB) +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_statfs(SB) +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlinkat(SB) +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 +DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_write(SB) +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 +DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index 016d959bc..7bc2e24eb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -openbsd -tags openbsd,mips64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_mips64.go +// go run mksyscall.go -openbsd -libc -tags openbsd,mips64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_mips64.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && mips64 @@ -16,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -24,20 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -45,10 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -56,30 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -87,66 +111,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -156,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -164,6 +216,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -173,17 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -191,10 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -202,10 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -213,6 +281,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -221,27 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -249,6 +329,10 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Madvise(b []byte, behav int) (err error) { @@ -258,13 +342,17 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -274,23 +362,31 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -300,13 +396,17 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Msync(b []byte, flags int) (err error) { @@ -316,13 +416,17 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -332,33 +436,45 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdents(fd int, buf []byte) (n int, err error) { @@ -368,7 +484,7 @@ func Getdents(fd int, buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -376,6 +492,10 @@ func Getdents(fd int, buf []byte) (n int, err error) { return } +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getcwd(buf []byte) (n int, err error) { @@ -385,7 +505,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -393,16 +513,32 @@ func Getcwd(buf []byte) (n int, err error) { return } +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -412,17 +548,21 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -430,6 +570,10 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, return } +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -438,23 +582,31 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -463,13 +615,17 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -478,13 +634,17 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -493,13 +653,17 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -508,13 +672,17 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -523,27 +691,49 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -551,33 +741,49 @@ func Dup(fd int) (nfd int, err error) { return } +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup3(from int, to int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - Syscall(SYS_EXIT, uintptr(code), 0, 0) + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) return } +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -586,43 +792,59 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -631,23 +853,31 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { @@ -656,27 +886,35 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -684,16 +922,24 @@ func Fpathconf(fd int, name int) (val int, err error) { return } +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { @@ -702,71 +948,99 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length)) + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) egid = int(r0) return } +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) gid = int(r0) return } +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -774,34 +1048,50 @@ func Getpgid(pid int) (pgid int, err error) { return } +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) pgrp = int(r0) return } +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) pid = int(r0) return } +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) ppid = int(r0) return } +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -809,20 +1099,28 @@ func Getpriority(which int, who int) (prio int, err error) { return } +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrtable() (rtable int, err error) { - r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) rtable = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -830,20 +1128,28 @@ func Getrtable() (rtable int, err error) { return } +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -851,46 +1157,66 @@ func Getsid(pid int) (sid int, err error) { return } +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) uid = int(r0) return } +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) tainted = bool(r0 != 0) return } +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kill(pid int, signum syscall.Signal) (err error) { - _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -898,6 +1224,10 @@ func Kqueue() (fd int, err error) { return } +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -906,13 +1236,17 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -926,13 +1260,17 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { @@ -946,23 +1284,31 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lstat(path string, stat *Stat_t) (err error) { @@ -971,13 +1317,17 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -986,13 +1336,17 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdirat(dirfd int, path string, mode uint32) (err error) { @@ -1001,13 +1355,17 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -1016,13 +1374,17 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifoat(dirfd int, path string, mode uint32) (err error) { @@ -1031,13 +1393,17 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -1046,13 +1412,17 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { @@ -1061,23 +1431,31 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -1086,7 +1464,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1094,6 +1472,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { @@ -1102,7 +1484,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1110,6 +1492,10 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -1118,7 +1504,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1126,6 +1512,10 @@ func Pathconf(path string, name int) (val int, err error) { return } +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pread(fd int, p []byte, offset int64) (n int, err error) { @@ -1135,7 +1525,7 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1143,6 +1533,10 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pwrite(fd int, p []byte, offset int64) (n int, err error) { @@ -1152,7 +1546,7 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1160,6 +1554,10 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { return } +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -1169,7 +1567,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1177,6 +1575,10 @@ func read(fd int, p []byte) (n int, err error) { return } +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -1191,7 +1593,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1199,6 +1601,10 @@ func Readlink(path string, buf []byte) (n int, err error) { return } +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { @@ -1213,7 +1619,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1221,6 +1627,10 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -1234,13 +1644,17 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Renameat(fromfd int, from string, tofd int, to string) (err error) { @@ -1254,13 +1668,17 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1269,13 +1687,17 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1284,17 +1706,21 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0) + r0, _, e1 := syscall_syscall(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(whence)) newoffset = int64(r0) if e1 != 0 { err = errnoErr(e1) @@ -1302,10 +1728,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1313,36 +1743,52 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1351,97 +1797,119 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +var libc_setresuid_trampoline_addr uintptr -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrtable(rtable int) (err error) { - _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1449,26 +1917,38 @@ func Setsid() (pid int, err error) { return } +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Stat(path string, stat *Stat_t) (err error) { @@ -1477,13 +1957,17 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statfs(path string, stat *Statfs_t) (err error) { @@ -1492,13 +1976,17 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { @@ -1512,13 +2000,17 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { @@ -1532,23 +2024,31 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { @@ -1557,21 +2057,29 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length)) + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) oldmask = int(r0) return } +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { @@ -1580,13 +2088,17 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlinkat(dirfd int, path string, flags int) (err error) { @@ -1595,13 +2107,17 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { @@ -1610,13 +2126,17 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { @@ -1626,7 +2146,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1634,10 +2154,14 @@ func write(fd int, p []byte) (n int, err error) { return } +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0) + r0, _, e1 := syscall_syscall6(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -1645,20 +2169,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1669,7 +2201,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1685,9 +2217,13 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error if err != nil { return } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s new file mode 100644 index 000000000..05d4bffd7 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -0,0 +1,664 @@ +// go run mkasm.go openbsd mips64 +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 +DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 +DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 +DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_poll(SB) +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_madvise(SB) +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 +DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_msync(SB) +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pipe2(SB) +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getdents(SB) +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_access(SB) +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 +DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) + +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_close(SB) +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 +DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup3(SB) +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_faccessat(SB) +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmodat(SB) +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchownat(SB) +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstat(SB) +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatat(SB) +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatfs(SB) +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrtable(SB) +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_link(SB) +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 +DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_linkat(SB) +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lstat(SB) +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdirat(SB) +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifoat(SB) +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknodat(SB) +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_nanosleep(SB) +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 +DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_open(SB) +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 +DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_read(SB) +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 +DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlinkat(SB) +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameat(SB) +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 +DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_select(SB) +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 +DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresgid(SB) +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresuid(SB) +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrtable(SB) +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_stat(SB) +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_statfs(SB) +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlinkat(SB) +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 +DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_write(SB) +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 +DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go new file mode 100644 index 000000000..739be6217 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -0,0 +1,2229 @@ +// go run mksyscall.go -openbsd -libc -tags openbsd,ppc64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_ppc64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build openbsd && ppc64 +// +build openbsd,ppc64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(from int, to int, flags int) (err error) { + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) + return +} + +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) + egid = int(r0) + return +} + +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) + uid = int(r0) + return +} + +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) + gid = int(r0) + return +} + +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) + pgrp = int(r0) + return +} + +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) + pid = int(r0) + return +} + +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) + ppid = int(r0) + return +} + +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrtable() (rtable int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) + rtable = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) + uid = int(r0) + return +} + +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := syscall_syscall(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(whence)) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrtable(rtable int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := syscall_syscall6(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s new file mode 100644 index 000000000..74a25f8d6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -0,0 +1,796 @@ +// go run mkasm.go openbsd ppc64 +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getgroups(SB) + RET +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setgroups(SB) + RET +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_wait4(SB) + RET +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 +DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_accept(SB) + RET +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 +DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_bind(SB) + RET +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 +DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_connect(SB) + RET +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_socket(SB) + RET +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getsockopt(SB) + RET +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setsockopt(SB) + RET +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getpeername(SB) + RET +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getsockname(SB) + RET +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_shutdown(SB) + RET +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_socketpair(SB) + RET +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_recvfrom(SB) + RET +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_sendto(SB) + RET +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_recvmsg(SB) + RET +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_sendmsg(SB) + RET +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_kevent(SB) + RET +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_utimes(SB) + RET +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_futimes(SB) + RET +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_poll(SB) + RET +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_madvise(SB) + RET +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 +DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mlock(SB) + RET +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mlockall(SB) + RET +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mprotect(SB) + RET +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_msync(SB) + RET +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_munlock(SB) + RET +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_munlockall(SB) + RET +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_pipe2(SB) + RET +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getdents(SB) + RET +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getcwd(SB) + RET +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_ioctl(SB) + RET +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_sysctl(SB) + RET +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_ppoll(SB) + RET +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_access(SB) + RET +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 +DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_adjtime(SB) + RET +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_chdir(SB) + RET +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_chflags(SB) + RET +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_chmod(SB) + RET +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_chown(SB) + RET +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_chroot(SB) + RET +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) + +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_clock_gettime(SB) + RET +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_close(SB) + RET +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 +DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_dup(SB) + RET +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_dup2(SB) + RET +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_dup3(SB) + RET +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_exit(SB) + RET +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_faccessat(SB) + RET +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fchdir(SB) + RET +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fchflags(SB) + RET +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fchmod(SB) + RET +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fchmodat(SB) + RET +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fchown(SB) + RET +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fchownat(SB) + RET +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_flock(SB) + RET +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fpathconf(SB) + RET +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fstat(SB) + RET +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fstatat(SB) + RET +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fstatfs(SB) + RET +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fsync(SB) + RET +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_ftruncate(SB) + RET +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getegid(SB) + RET +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_geteuid(SB) + RET +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getgid(SB) + RET +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getpgid(SB) + RET +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getpgrp(SB) + RET +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getpid(SB) + RET +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getppid(SB) + RET +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getpriority(SB) + RET +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getrlimit(SB) + RET +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getrtable(SB) + RET +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getrusage(SB) + RET +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getsid(SB) + RET +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_gettimeofday(SB) + RET +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getuid(SB) + RET +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_issetugid(SB) + RET +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_kill(SB) + RET +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_kqueue(SB) + RET +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_lchown(SB) + RET +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_link(SB) + RET +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 +DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_linkat(SB) + RET +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_listen(SB) + RET +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_lstat(SB) + RET +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mkdir(SB) + RET +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mkdirat(SB) + RET +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mkfifo(SB) + RET +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mkfifoat(SB) + RET +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mknod(SB) + RET +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mknodat(SB) + RET +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_nanosleep(SB) + RET +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 +DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_open(SB) + RET +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 +DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_openat(SB) + RET +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_pathconf(SB) + RET +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_pread(SB) + RET +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_pwrite(SB) + RET +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_read(SB) + RET +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 +DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_readlink(SB) + RET +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_readlinkat(SB) + RET +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_rename(SB) + RET +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_renameat(SB) + RET +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_revoke(SB) + RET +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 +DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_rmdir(SB) + RET +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_lseek(SB) + RET +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_select(SB) + RET +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 +DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setegid(SB) + RET +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_seteuid(SB) + RET +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setgid(SB) + RET +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setlogin(SB) + RET +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setpgid(SB) + RET +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setpriority(SB) + RET +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setregid(SB) + RET +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setreuid(SB) + RET +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setresgid(SB) + RET +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setresuid(SB) + RET +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setrtable(SB) + RET +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setsid(SB) + RET +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_settimeofday(SB) + RET +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_setuid(SB) + RET +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_stat(SB) + RET +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_statfs(SB) + RET +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_symlink(SB) + RET +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_symlinkat(SB) + RET +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_sync(SB) + RET +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_truncate(SB) + RET +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_umask(SB) + RET +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 +DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_unlink(SB) + RET +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_unlinkat(SB) + RET +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_unmount(SB) + RET +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_write(SB) + RET +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 +DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mmap(SB) + RET +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_munmap(SB) + RET +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_utimensat(SB) + RET +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go new file mode 100644 index 000000000..7d95a1978 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -0,0 +1,2229 @@ +// go run mksyscall.go -openbsd -libc -tags openbsd,riscv64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_riscv64.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build openbsd && riscv64 +// +build openbsd,riscv64 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getgroups(ngid int, gid *_Gid_t) (n int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setgroups(ngid int, gid *_Gid_t) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setgroups_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_wait4_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { + r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_accept_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_accept accept "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_bind_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_bind bind "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { + _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connect connect "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socket(domain int, typ int, proto int) (fd int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_socket_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socket socket "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { + _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { + _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setsockopt_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getpeername_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { + _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getsockname_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Shutdown(s int, how int) (err error) { + _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_shutdown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { + _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_socketpair_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_recvfrom_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_sendto_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendto sendto "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_recvmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_sendmsg_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { + r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_kevent_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kevent kevent "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimes(path string, timeval *[2]Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_utimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimes utimes "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func futimes(fd int, timeval *[2]Timeval) (err error) { + _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_futimes_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_futimes futimes "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_poll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_poll poll "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, behav int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_madvise_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_madvise madvise "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlock mlock "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mprotect_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_msync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_msync msync "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_munlock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlock munlock "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_munlockall_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pipe2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getdents_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getdents getdents "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getcwd_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_sysctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_ppoll_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Access(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_access_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_access access "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { + _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_adjtime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chdir chdir "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chflags(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chflags chflags "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chmod(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chmod chmod "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chown chown "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_chroot_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_chroot chroot "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_clock_gettime_trampoline_addr, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_clock_gettime_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_close_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_close close "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(fd int) (nfd int, err error) { + r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) + nfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_dup_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup dup "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup2(from int, to int) (err error) { + _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_dup2_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(from int, to int, flags int) (err error) { + _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_dup3_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) + return +} + +var libc_exit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_exit exit "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_faccessat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchflags(fd int, flags int) (err error) { + _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchflags_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchmod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchmodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchown fchown "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fchownat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_flock_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_flock flock "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fpathconf(fd int, name int) (val int, err error) { + r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fpathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstat(fd int, stat *Stat_t) (err error) { + _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstat fstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fstatat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatfs(fd int, stat *Statfs_t) (err error) { + _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fstatfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fsync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fsync fsync "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Ftruncate(fd int, length int64) (err error) { + _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_ftruncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) + egid = int(r0) + return +} + +var libc_getegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getegid getegid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (uid int) { + r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) + uid = int(r0) + return +} + +var libc_geteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) + gid = int(r0) + return +} + +var libc_getgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getgid getgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgrp() (pgrp int) { + r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) + pgrp = int(r0) + return +} + +var libc_getpgrp_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) + pid = int(r0) + return +} + +var libc_getpid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpid getpid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) + ppid = int(r0) + return +} + +var libc_getppid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getppid getppid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(which int, lim *Rlimit) (err error) { + _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getrlimit_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrtable() (rtable int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) + rtable = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getrusage_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getsid getsid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettimeofday(tv *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_gettimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) + uid = int(r0) + return +} + +var libc_getuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getuid getuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Issetugid() (tainted bool) { + r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) + tainted = bool(r0 != 0) + return +} + +var libc_issetugid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, signum syscall.Signal) (err error) { + _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_kill_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kill kill "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kqueue() (fd int, err error) { + r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_kqueue_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_lchown_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lchown lchown "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_link_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_link link "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_linkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_linkat linkat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, backlog int) (err error) { + _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_listen_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_listen listen "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lstat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_lstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lstat lstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mkdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mkdirat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mkfifo_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mkfifoat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mknod_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknod mknod "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mknodat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_nanosleep_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Open(path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_open_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_open open "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_openat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_openat openat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pathconf(path string, name int) (val int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pathconf_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pread_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pread pread "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pwrite_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_read_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_read read "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_readlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlink readlink "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_readlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rename(from string, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_rename_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rename rename "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat(fromfd int, from string, tofd int, to string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renameat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameat renameat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Revoke(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_revoke_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_revoke revoke "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Rmdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_rmdir_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { + r0, _, e1 := syscall_syscall(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(whence)) + newoffset = int64(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_lseek_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_lseek lseek "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_select_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_select select "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setegid(egid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setegid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setegid setegid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_seteuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setgid(gid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setgid setgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setlogin(name string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setlogin_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setpgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setpriority_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setregid(rgid int, egid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setregid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setregid setregid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setreuid(ruid int, euid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setreuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresgid(rgid int, egid int, sgid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setresgid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setresuid(ruid int, euid int, suid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setresuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setrtable(rtable int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setrtable_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setsid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setsid setsid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tp *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_settimeofday_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setuid(uid int) (err error) { + _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_setuid_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_setuid setuid "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Stat(path string, stat *Stat_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_stat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_stat stat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statfs(path string, stat *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_statfs_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_statfs statfs "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlink(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_symlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlink symlink "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_symlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() (err error) { + _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_sync_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_sync sync "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Truncate(path string, length int64) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_truncate_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_truncate truncate "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(newmask int) (oldmask int) { + r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) + oldmask = int(r0) + return +} + +var libc_umask_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_umask umask "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlink(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unlink_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlink unlink "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unlinkat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unmount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unmount unmount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_write_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_write write "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { + r0, _, e1 := syscall_syscall6(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + ret = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mmap mmap "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_munmap_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_munmap munmap "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, buf *byte, nbuf int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_utimensat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s new file mode 100644 index 000000000..990be2457 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -0,0 +1,664 @@ +// go run mkasm.go openbsd riscv64 +// Code generated by the command above; DO NOT EDIT. + +#include "textflag.h" + +TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgroups(SB) +GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) + +TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgroups(SB) +GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) + +TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_wait4(SB) +GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 +DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) + +TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_accept(SB) +GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 +DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) + +TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_bind(SB) +GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 +DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) + +TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connect(SB) +GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) + +TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socket(SB) +GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) + +TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockopt(SB) +GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) + +TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsockopt(SB) +GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) + +TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpeername(SB) +GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) + +TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsockname(SB) +GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) + +TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_shutdown(SB) +GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) + +TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_socketpair(SB) +GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 +DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) + +TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvfrom(SB) +GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) + +TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendto(SB) +GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) + +TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_recvmsg(SB) +GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) + +TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sendmsg(SB) +GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) + +TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kevent(SB) +GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) + +TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimes(SB) +GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) + +TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_futimes(SB) +GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 +DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) + +TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_poll(SB) +GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) + +TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_madvise(SB) +GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 +DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) + +TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlock(SB) +GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) + +TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mlockall(SB) +GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) + +TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mprotect(SB) +GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) + +TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_msync(SB) +GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) + +TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlock(SB) +GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) + +TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munlockall(SB) +GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) + +TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pipe2(SB) +GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) + +TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getdents(SB) +GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) + +TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getcwd(SB) +GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) + +TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ioctl(SB) +GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ppoll(SB) +GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) + +TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_access(SB) +GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 +DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) + +TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_adjtime(SB) +GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) + +TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chdir(SB) +GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) + +TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chflags(SB) +GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) + +TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chmod(SB) +GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) + +TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chown(SB) +GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) + +TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_chroot(SB) +GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 +DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) + +TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) +GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 +DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) + +TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_close(SB) +GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 +DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) + +TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup(SB) +GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) + +TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup2(SB) +GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) + +TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_dup3(SB) +GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 +DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) + +TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_exit(SB) +GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) + +TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_faccessat(SB) +GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) + +TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchdir(SB) +GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) + +TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchflags(SB) +GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) + +TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmod(SB) +GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) + +TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchmodat(SB) +GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) + +TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchown(SB) +GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) + +TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fchownat(SB) +GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) + +TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_flock(SB) +GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 +DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) + +TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fpathconf(SB) +GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) + +TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstat(SB) +GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) + +TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatat(SB) +GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) + +TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fstatfs(SB) +GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) + +TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fsync(SB) +GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) + +TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_ftruncate(SB) +GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) + +TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getegid(SB) +GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) + +TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_geteuid(SB) +GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) + +TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getgid(SB) +GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) + +TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgid(SB) +GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) + +TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpgrp(SB) +GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) + +TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpid(SB) +GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) + +TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getppid(SB) +GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) + +TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getpriority(SB) +GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) + +TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrlimit(SB) +GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) + +TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrtable(SB) +GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) + +TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getrusage(SB) +GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) + +TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getsid(SB) +GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) + +TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_gettimeofday(SB) +GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) + +TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getuid(SB) +GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) + +TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_issetugid(SB) +GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) + +TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kill(SB) +GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) + +TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_kqueue(SB) +GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 +DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) + +TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lchown(SB) +GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) + +TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_link(SB) +GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 +DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) + +TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_linkat(SB) +GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) + +TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_listen(SB) +GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 +DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) + +TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lstat(SB) +GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) + +TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdir(SB) +GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) + +TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkdirat(SB) +GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) + +TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifo(SB) +GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) + +TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mkfifoat(SB) +GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) + +TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknod(SB) +GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) + +TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mknodat(SB) +GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) + +TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_nanosleep(SB) +GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 +DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) + +TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_open(SB) +GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 +DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) + +TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_openat(SB) +GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) + +TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pathconf(SB) +GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) + +TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pread(SB) +GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) + +TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwrite(SB) +GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) + +TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_read(SB) +GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 +DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) + +TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlink(SB) +GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) + +TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readlinkat(SB) +GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) + +TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rename(SB) +GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) + +TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameat(SB) +GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) + +TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_revoke(SB) +GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 +DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) + +TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_rmdir(SB) +GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 +DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) + +TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_lseek(SB) +GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 +DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) + +TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_select(SB) +GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 +DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) + +TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setegid(SB) +GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) + +TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_seteuid(SB) +GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) + +TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setgid(SB) +GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) + +TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setlogin(SB) +GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) + +TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpgid(SB) +GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) + +TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setpriority(SB) +GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) + +TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setregid(SB) +GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) + +TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setreuid(SB) +GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) + +TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresgid(SB) +GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) + +TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setresuid(SB) +GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) + +TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setrtable(SB) +GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) + +TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setsid(SB) +GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) + +TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_settimeofday(SB) +GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 +DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) + +TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_setuid(SB) +GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 +DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) + +TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_stat(SB) +GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) + +TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_statfs(SB) +GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 +DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) + +TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlink(SB) +GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) + +TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_symlinkat(SB) +GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) + +TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sync(SB) +GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) + +TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_truncate(SB) +GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 +DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) + +TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_umask(SB) +GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 +DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) + +TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlink(SB) +GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) + +TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unlinkat(SB) +GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) + +TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unmount(SB) +GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) + +TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_write(SB) +GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 +DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) + +TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mmap(SB) +GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) + +TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_munmap(SB) +GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 +DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) + +TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_utimensat(SB) +GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index d12f4fbfe..609d1c598 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -38,6 +38,7 @@ import ( //go:cgo_import_dynamic libc_chmod chmod "libc.so" //go:cgo_import_dynamic libc_chown chown "libc.so" //go:cgo_import_dynamic libc_chroot chroot "libc.so" +//go:cgo_import_dynamic libc_clockgettime clockgettime "libc.so" //go:cgo_import_dynamic libc_close close "libc.so" //go:cgo_import_dynamic libc_creat creat "libc.so" //go:cgo_import_dynamic libc_dup dup "libc.so" @@ -66,6 +67,7 @@ import ( //go:cgo_import_dynamic libc_getpriority getpriority "libc.so" //go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" //go:cgo_import_dynamic libc_getrusage getrusage "libc.so" +//go:cgo_import_dynamic libc_getsid getsid "libc.so" //go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" //go:cgo_import_dynamic libc_getuid getuid "libc.so" //go:cgo_import_dynamic libc_kill kill "libc.so" @@ -108,7 +110,6 @@ import ( //go:cgo_import_dynamic libc_setpriority setpriority "libc.so" //go:cgo_import_dynamic libc_setregid setregid "libc.so" //go:cgo_import_dynamic libc_setreuid setreuid "libc.so" -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" //go:cgo_import_dynamic libc_setsid setsid "libc.so" //go:cgo_import_dynamic libc_setuid setuid "libc.so" //go:cgo_import_dynamic libc_shutdown shutdown "libsocket.so" @@ -146,6 +147,8 @@ import ( //go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so" //go:cgo_import_dynamic libc_port_get port_get "libc.so" //go:cgo_import_dynamic libc_port_getn port_getn "libc.so" +//go:cgo_import_dynamic libc_putmsg putmsg "libc.so" +//go:cgo_import_dynamic libc_getmsg getmsg "libc.so" //go:linkname procpipe libc_pipe //go:linkname procpipe2 libc_pipe2 @@ -174,6 +177,7 @@ import ( //go:linkname procChmod libc_chmod //go:linkname procChown libc_chown //go:linkname procChroot libc_chroot +//go:linkname procClockGettime libc_clockgettime //go:linkname procClose libc_close //go:linkname procCreat libc_creat //go:linkname procDup libc_dup @@ -202,6 +206,7 @@ import ( //go:linkname procGetpriority libc_getpriority //go:linkname procGetrlimit libc_getrlimit //go:linkname procGetrusage libc_getrusage +//go:linkname procGetsid libc_getsid //go:linkname procGettimeofday libc_gettimeofday //go:linkname procGetuid libc_getuid //go:linkname procKill libc_kill @@ -244,7 +249,6 @@ import ( //go:linkname procSetpriority libc_setpriority //go:linkname procSetregid libc_setregid //go:linkname procSetreuid libc_setreuid -//go:linkname procSetrlimit libc_setrlimit //go:linkname procSetsid libc_setsid //go:linkname procSetuid libc_setuid //go:linkname procshutdown libc_shutdown @@ -282,6 +286,8 @@ import ( //go:linkname procport_dissociate libc_port_dissociate //go:linkname procport_get libc_port_get //go:linkname procport_getn libc_port_getn +//go:linkname procputmsg libc_putmsg +//go:linkname procgetmsg libc_getmsg var ( procpipe, @@ -311,6 +317,7 @@ var ( procChmod, procChown, procChroot, + procClockGettime, procClose, procCreat, procDup, @@ -339,6 +346,7 @@ var ( procGetpriority, procGetrlimit, procGetrusage, + procGetsid, procGettimeofday, procGetuid, procKill, @@ -381,7 +389,6 @@ var ( procSetpriority, procSetregid, procSetreuid, - procSetrlimit, procSetsid, procSetuid, procshutdown, @@ -418,7 +425,9 @@ var ( procport_associate, procport_dissociate, procport_get, - procport_getn syscallFunc + procport_getn, + procputmsg, + procgetmsg syscallFunc ) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -634,7 +643,18 @@ func __minor(version int, dev uint64) (val uint) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) { +func ioctlRet(fd int, req int, arg uintptr) (ret int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) + ret = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctlPtrRet(fd int, req int, arg unsafe.Pointer) (ret int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) ret = int(r0) if e1 != 0 { @@ -741,6 +761,16 @@ func Chroot(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClockGettime)), 2, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0, 0, 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClose)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { @@ -1044,6 +1074,17 @@ func Getrusage(who int, rusage *Rusage) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetsid)), 1, uintptr(pid), 0, 0, 0, 0, 0) + sid = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGettimeofday)), 1, uintptr(unsafe.Pointer(tv)), 0, 0, 0, 0, 0) if e1 != 0 { @@ -1606,16 +1647,6 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Setsid() (pid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetsid)), 0, 0, 0, 0, 0, 0, 0) pid = int(r0) @@ -2051,3 +2082,23 @@ func port_getn(port int, pe *portEvent, max uint32, nget *uint32, timeout *Times } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procputmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(flags), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(unsafe.Pointer(flags)), 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index f2079457c..c31681743 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -257,7 +257,17 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { +func ioctl(fd int, req int, arg uintptr) (err error) { + _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go index 9e9d0b2a9..55e048471 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go @@ -17,6 +17,7 @@ var sysctlMib = []mibentry{ {"ddb.max_line", []_C_int{9, 3}}, {"ddb.max_width", []_C_int{9, 2}}, {"ddb.panic", []_C_int{9, 5}}, + {"ddb.profile", []_C_int{9, 9}}, {"ddb.radix", []_C_int{9, 1}}, {"ddb.tab_stop_width", []_C_int{9, 4}}, {"ddb.trigger", []_C_int{9, 8}}, @@ -33,29 +34,37 @@ var sysctlMib = []mibentry{ {"hw.ncpufound", []_C_int{6, 21}}, {"hw.ncpuonline", []_C_int{6, 25}}, {"hw.pagesize", []_C_int{6, 7}}, + {"hw.perfpolicy", []_C_int{6, 23}}, {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, {"hw.product", []_C_int{6, 15}}, {"hw.serialno", []_C_int{6, 17}}, {"hw.setperf", []_C_int{6, 13}}, + {"hw.smt", []_C_int{6, 24}}, {"hw.usermem", []_C_int{6, 20}}, {"hw.uuid", []_C_int{6, 18}}, {"hw.vendor", []_C_int{6, 14}}, {"hw.version", []_C_int{6, 16}}, - {"kern.arandom", []_C_int{1, 37}}, + {"kern.allowdt", []_C_int{1, 65}}, + {"kern.allowkmem", []_C_int{1, 52}}, {"kern.argmax", []_C_int{1, 8}}, + {"kern.audio", []_C_int{1, 84}}, {"kern.boottime", []_C_int{1, 21}}, {"kern.bufcachepercent", []_C_int{1, 72}}, {"kern.ccpu", []_C_int{1, 45}}, {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consbuf", []_C_int{1, 83}}, + {"kern.consbufsize", []_C_int{1, 82}}, {"kern.consdev", []_C_int{1, 75}}, {"kern.cp_time", []_C_int{1, 40}}, {"kern.cp_time2", []_C_int{1, 71}}, - {"kern.cryptodevallowsoft", []_C_int{1, 53}}, + {"kern.cpustats", []_C_int{1, 85}}, {"kern.domainname", []_C_int{1, 22}}, {"kern.file", []_C_int{1, 73}}, {"kern.forkstat", []_C_int{1, 42}}, {"kern.fscale", []_C_int{1, 46}}, {"kern.fsync", []_C_int{1, 33}}, + {"kern.global_ptrace", []_C_int{1, 81}}, {"kern.hostid", []_C_int{1, 11}}, {"kern.hostname", []_C_int{1, 10}}, {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, @@ -78,17 +87,16 @@ var sysctlMib = []mibentry{ {"kern.ngroups", []_C_int{1, 18}}, {"kern.nosuidcoredump", []_C_int{1, 32}}, {"kern.nprocs", []_C_int{1, 47}}, - {"kern.nselcoll", []_C_int{1, 43}}, {"kern.nthreads", []_C_int{1, 26}}, {"kern.numvnodes", []_C_int{1, 58}}, {"kern.osrelease", []_C_int{1, 2}}, {"kern.osrevision", []_C_int{1, 3}}, {"kern.ostype", []_C_int{1, 1}}, {"kern.osversion", []_C_int{1, 27}}, + {"kern.pfstatus", []_C_int{1, 86}}, {"kern.pool_debug", []_C_int{1, 77}}, {"kern.posix1version", []_C_int{1, 17}}, {"kern.proc", []_C_int{1, 66}}, - {"kern.random", []_C_int{1, 31}}, {"kern.rawpartition", []_C_int{1, 24}}, {"kern.saved_ids", []_C_int{1, 20}}, {"kern.securelevel", []_C_int{1, 9}}, @@ -106,21 +114,20 @@ var sysctlMib = []mibentry{ {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, {"kern.timecounter.tick", []_C_int{1, 69, 1}}, {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, - {"kern.tty.maxptys", []_C_int{1, 44, 6}}, - {"kern.tty.nptys", []_C_int{1, 44, 7}}, + {"kern.timeout_stats", []_C_int{1, 87}}, {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, {"kern.ttycount", []_C_int{1, 57}}, - {"kern.userasymcrypto", []_C_int{1, 60}}, - {"kern.usercrypto", []_C_int{1, 52}}, - {"kern.usermount", []_C_int{1, 30}}, + {"kern.utc_offset", []_C_int{1, 88}}, {"kern.version", []_C_int{1, 4}}, - {"kern.vnode", []_C_int{1, 13}}, + {"kern.video", []_C_int{1, 89}}, {"kern.watchdog.auto", []_C_int{1, 64, 2}}, {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"kern.witnesswatch", []_C_int{1, 53}}, + {"kern.wxabort", []_C_int{1, 74}}, {"net.bpf.bufsize", []_C_int{4, 31, 1}}, {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, @@ -148,7 +155,9 @@ var sysctlMib = []mibentry{ {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpdown", []_C_int{4, 2, 0, 40}}, {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.arptimeout", []_C_int{4, 2, 0, 39}}, {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, @@ -157,8 +166,10 @@ var sysctlMib = []mibentry{ {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtmfc", []_C_int{4, 2, 0, 37}}, {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mrtvif", []_C_int{4, 2, 0, 38}}, {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, @@ -175,9 +186,7 @@ var sysctlMib = []mibentry{ {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, - {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, - {"net.inet.pim.stats", []_C_int{4, 2, 103, 1}}, {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, @@ -191,6 +200,7 @@ var sysctlMib = []mibentry{ {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rootonly", []_C_int{4, 2, 6, 24}}, {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, @@ -198,9 +208,12 @@ var sysctlMib = []mibentry{ {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.tcp.synhashsize", []_C_int{4, 2, 6, 25}}, + {"net.inet.tcp.synuselimit", []_C_int{4, 2, 6, 23}}, {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.rootonly", []_C_int{4, 2, 17, 6}}, {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, @@ -213,13 +226,8 @@ var sysctlMib = []mibentry{ {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, - {"net.inet6.icmp6.nd6_prune", []_C_int{4, 24, 30, 6}}, {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, - {"net.inet6.icmp6.nd6_useloopback", []_C_int{4, 24, 30, 11}}, - {"net.inet6.icmp6.nodeinfo", []_C_int{4, 24, 30, 13}}, - {"net.inet6.icmp6.rediraccept", []_C_int{4, 24, 30, 2}}, {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, - {"net.inet6.ip6.accept_rtadv", []_C_int{4, 24, 17, 12}}, {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, @@ -232,20 +240,19 @@ var sysctlMib = []mibentry{ {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, - {"net.inet6.ip6.maxifdefrouters", []_C_int{4, 24, 17, 47}}, - {"net.inet6.ip6.maxifprefixes", []_C_int{4, 24, 17, 46}}, {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtmfc", []_C_int{4, 24, 17, 53}}, + {"net.inet6.ip6.mrtmif", []_C_int{4, 24, 17, 52}}, {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, - {"net.inet6.ip6.rr_prune", []_C_int{4, 24, 17, 22}}, + {"net.inet6.ip6.soiikey", []_C_int{4, 24, 17, 54}}, {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, - {"net.inet6.ip6.v6only", []_C_int{4, 24, 17, 24}}, {"net.key.sadb_dump", []_C_int{4, 30, 1}}, {"net.key.spd_dump", []_C_int{4, 30, 2}}, {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, @@ -254,12 +261,12 @@ var sysctlMib = []mibentry{ {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, - {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, {"net.mpls.ttl", []_C_int{4, 33, 2}}, {"net.pflow.stats", []_C_int{4, 34, 1}}, {"net.pipex.enable", []_C_int{4, 35, 1}}, {"vm.anonmin", []_C_int{2, 7}}, {"vm.loadavg", []_C_int{2, 2}}, + {"vm.malloc_conf", []_C_int{2, 12}}, {"vm.maxslp", []_C_int{2, 10}}, {"vm.nkmempages", []_C_int{2, 6}}, {"vm.psstrings", []_C_int{2, 3}}, diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go index adecd0966..d2243cf83 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -36,23 +36,29 @@ var sysctlMib = []mibentry{ {"hw.pagesize", []_C_int{6, 7}}, {"hw.perfpolicy", []_C_int{6, 23}}, {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, {"hw.product", []_C_int{6, 15}}, {"hw.serialno", []_C_int{6, 17}}, {"hw.setperf", []_C_int{6, 13}}, + {"hw.smt", []_C_int{6, 24}}, {"hw.usermem", []_C_int{6, 20}}, {"hw.uuid", []_C_int{6, 18}}, {"hw.vendor", []_C_int{6, 14}}, {"hw.version", []_C_int{6, 16}}, + {"kern.allowdt", []_C_int{1, 65}}, {"kern.allowkmem", []_C_int{1, 52}}, {"kern.argmax", []_C_int{1, 8}}, + {"kern.audio", []_C_int{1, 84}}, {"kern.boottime", []_C_int{1, 21}}, {"kern.bufcachepercent", []_C_int{1, 72}}, {"kern.ccpu", []_C_int{1, 45}}, {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consbuf", []_C_int{1, 83}}, + {"kern.consbufsize", []_C_int{1, 82}}, {"kern.consdev", []_C_int{1, 75}}, {"kern.cp_time", []_C_int{1, 40}}, {"kern.cp_time2", []_C_int{1, 71}}, - {"kern.dnsjackport", []_C_int{1, 13}}, + {"kern.cpustats", []_C_int{1, 85}}, {"kern.domainname", []_C_int{1, 22}}, {"kern.file", []_C_int{1, 73}}, {"kern.forkstat", []_C_int{1, 42}}, @@ -81,13 +87,13 @@ var sysctlMib = []mibentry{ {"kern.ngroups", []_C_int{1, 18}}, {"kern.nosuidcoredump", []_C_int{1, 32}}, {"kern.nprocs", []_C_int{1, 47}}, - {"kern.nselcoll", []_C_int{1, 43}}, {"kern.nthreads", []_C_int{1, 26}}, {"kern.numvnodes", []_C_int{1, 58}}, {"kern.osrelease", []_C_int{1, 2}}, {"kern.osrevision", []_C_int{1, 3}}, {"kern.ostype", []_C_int{1, 1}}, {"kern.osversion", []_C_int{1, 27}}, + {"kern.pfstatus", []_C_int{1, 86}}, {"kern.pool_debug", []_C_int{1, 77}}, {"kern.posix1version", []_C_int{1, 17}}, {"kern.proc", []_C_int{1, 66}}, @@ -108,15 +114,19 @@ var sysctlMib = []mibentry{ {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, {"kern.timecounter.tick", []_C_int{1, 69, 1}}, {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.timeout_stats", []_C_int{1, 87}}, {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, {"kern.ttycount", []_C_int{1, 57}}, + {"kern.utc_offset", []_C_int{1, 88}}, {"kern.version", []_C_int{1, 4}}, + {"kern.video", []_C_int{1, 89}}, {"kern.watchdog.auto", []_C_int{1, 64, 2}}, {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"kern.witnesswatch", []_C_int{1, 53}}, {"kern.wxabort", []_C_int{1, 74}}, {"net.bpf.bufsize", []_C_int{4, 31, 1}}, {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, @@ -176,7 +186,6 @@ var sysctlMib = []mibentry{ {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, - {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, @@ -252,12 +261,12 @@ var sysctlMib = []mibentry{ {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, - {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, {"net.mpls.ttl", []_C_int{4, 33, 2}}, {"net.pflow.stats", []_C_int{4, 34, 1}}, {"net.pipex.enable", []_C_int{4, 35, 1}}, {"vm.anonmin", []_C_int{2, 7}}, {"vm.loadavg", []_C_int{2, 2}}, + {"vm.malloc_conf", []_C_int{2, 12}}, {"vm.maxslp", []_C_int{2, 10}}, {"vm.nkmempages", []_C_int{2, 6}}, {"vm.psstrings", []_C_int{2, 3}}, diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go index 8ea52a4a1..82dc51bd8 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -17,6 +17,7 @@ var sysctlMib = []mibentry{ {"ddb.max_line", []_C_int{9, 3}}, {"ddb.max_width", []_C_int{9, 2}}, {"ddb.panic", []_C_int{9, 5}}, + {"ddb.profile", []_C_int{9, 9}}, {"ddb.radix", []_C_int{9, 1}}, {"ddb.tab_stop_width", []_C_int{9, 4}}, {"ddb.trigger", []_C_int{9, 8}}, @@ -33,29 +34,37 @@ var sysctlMib = []mibentry{ {"hw.ncpufound", []_C_int{6, 21}}, {"hw.ncpuonline", []_C_int{6, 25}}, {"hw.pagesize", []_C_int{6, 7}}, + {"hw.perfpolicy", []_C_int{6, 23}}, {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, {"hw.product", []_C_int{6, 15}}, {"hw.serialno", []_C_int{6, 17}}, {"hw.setperf", []_C_int{6, 13}}, + {"hw.smt", []_C_int{6, 24}}, {"hw.usermem", []_C_int{6, 20}}, {"hw.uuid", []_C_int{6, 18}}, {"hw.vendor", []_C_int{6, 14}}, {"hw.version", []_C_int{6, 16}}, - {"kern.arandom", []_C_int{1, 37}}, + {"kern.allowdt", []_C_int{1, 65}}, + {"kern.allowkmem", []_C_int{1, 52}}, {"kern.argmax", []_C_int{1, 8}}, + {"kern.audio", []_C_int{1, 84}}, {"kern.boottime", []_C_int{1, 21}}, {"kern.bufcachepercent", []_C_int{1, 72}}, {"kern.ccpu", []_C_int{1, 45}}, {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consbuf", []_C_int{1, 83}}, + {"kern.consbufsize", []_C_int{1, 82}}, {"kern.consdev", []_C_int{1, 75}}, {"kern.cp_time", []_C_int{1, 40}}, {"kern.cp_time2", []_C_int{1, 71}}, - {"kern.cryptodevallowsoft", []_C_int{1, 53}}, + {"kern.cpustats", []_C_int{1, 85}}, {"kern.domainname", []_C_int{1, 22}}, {"kern.file", []_C_int{1, 73}}, {"kern.forkstat", []_C_int{1, 42}}, {"kern.fscale", []_C_int{1, 46}}, {"kern.fsync", []_C_int{1, 33}}, + {"kern.global_ptrace", []_C_int{1, 81}}, {"kern.hostid", []_C_int{1, 11}}, {"kern.hostname", []_C_int{1, 10}}, {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, @@ -78,17 +87,16 @@ var sysctlMib = []mibentry{ {"kern.ngroups", []_C_int{1, 18}}, {"kern.nosuidcoredump", []_C_int{1, 32}}, {"kern.nprocs", []_C_int{1, 47}}, - {"kern.nselcoll", []_C_int{1, 43}}, {"kern.nthreads", []_C_int{1, 26}}, {"kern.numvnodes", []_C_int{1, 58}}, {"kern.osrelease", []_C_int{1, 2}}, {"kern.osrevision", []_C_int{1, 3}}, {"kern.ostype", []_C_int{1, 1}}, {"kern.osversion", []_C_int{1, 27}}, + {"kern.pfstatus", []_C_int{1, 86}}, {"kern.pool_debug", []_C_int{1, 77}}, {"kern.posix1version", []_C_int{1, 17}}, {"kern.proc", []_C_int{1, 66}}, - {"kern.random", []_C_int{1, 31}}, {"kern.rawpartition", []_C_int{1, 24}}, {"kern.saved_ids", []_C_int{1, 20}}, {"kern.securelevel", []_C_int{1, 9}}, @@ -106,21 +114,20 @@ var sysctlMib = []mibentry{ {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, {"kern.timecounter.tick", []_C_int{1, 69, 1}}, {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, - {"kern.tty.maxptys", []_C_int{1, 44, 6}}, - {"kern.tty.nptys", []_C_int{1, 44, 7}}, + {"kern.timeout_stats", []_C_int{1, 87}}, {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, {"kern.ttycount", []_C_int{1, 57}}, - {"kern.userasymcrypto", []_C_int{1, 60}}, - {"kern.usercrypto", []_C_int{1, 52}}, - {"kern.usermount", []_C_int{1, 30}}, + {"kern.utc_offset", []_C_int{1, 88}}, {"kern.version", []_C_int{1, 4}}, - {"kern.vnode", []_C_int{1, 13}}, + {"kern.video", []_C_int{1, 89}}, {"kern.watchdog.auto", []_C_int{1, 64, 2}}, {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"kern.witnesswatch", []_C_int{1, 53}}, + {"kern.wxabort", []_C_int{1, 74}}, {"net.bpf.bufsize", []_C_int{4, 31, 1}}, {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, @@ -148,7 +155,9 @@ var sysctlMib = []mibentry{ {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpdown", []_C_int{4, 2, 0, 40}}, {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.arptimeout", []_C_int{4, 2, 0, 39}}, {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, @@ -157,8 +166,10 @@ var sysctlMib = []mibentry{ {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtmfc", []_C_int{4, 2, 0, 37}}, {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mrtvif", []_C_int{4, 2, 0, 38}}, {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, @@ -175,9 +186,7 @@ var sysctlMib = []mibentry{ {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, - {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, - {"net.inet.pim.stats", []_C_int{4, 2, 103, 1}}, {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, @@ -191,6 +200,7 @@ var sysctlMib = []mibentry{ {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rootonly", []_C_int{4, 2, 6, 24}}, {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, @@ -198,9 +208,12 @@ var sysctlMib = []mibentry{ {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.tcp.synhashsize", []_C_int{4, 2, 6, 25}}, + {"net.inet.tcp.synuselimit", []_C_int{4, 2, 6, 23}}, {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.rootonly", []_C_int{4, 2, 17, 6}}, {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, @@ -213,13 +226,8 @@ var sysctlMib = []mibentry{ {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, - {"net.inet6.icmp6.nd6_prune", []_C_int{4, 24, 30, 6}}, {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, - {"net.inet6.icmp6.nd6_useloopback", []_C_int{4, 24, 30, 11}}, - {"net.inet6.icmp6.nodeinfo", []_C_int{4, 24, 30, 13}}, - {"net.inet6.icmp6.rediraccept", []_C_int{4, 24, 30, 2}}, {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, - {"net.inet6.ip6.accept_rtadv", []_C_int{4, 24, 17, 12}}, {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, @@ -232,20 +240,19 @@ var sysctlMib = []mibentry{ {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, - {"net.inet6.ip6.maxifdefrouters", []_C_int{4, 24, 17, 47}}, - {"net.inet6.ip6.maxifprefixes", []_C_int{4, 24, 17, 46}}, {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtmfc", []_C_int{4, 24, 17, 53}}, + {"net.inet6.ip6.mrtmif", []_C_int{4, 24, 17, 52}}, {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, - {"net.inet6.ip6.rr_prune", []_C_int{4, 24, 17, 22}}, + {"net.inet6.ip6.soiikey", []_C_int{4, 24, 17, 54}}, {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, - {"net.inet6.ip6.v6only", []_C_int{4, 24, 17, 24}}, {"net.key.sadb_dump", []_C_int{4, 30, 1}}, {"net.key.spd_dump", []_C_int{4, 30, 2}}, {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, @@ -254,12 +261,12 @@ var sysctlMib = []mibentry{ {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, - {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, {"net.mpls.ttl", []_C_int{4, 33, 2}}, {"net.pflow.stats", []_C_int{4, 34, 1}}, {"net.pipex.enable", []_C_int{4, 35, 1}}, {"vm.anonmin", []_C_int{2, 7}}, {"vm.loadavg", []_C_int{2, 2}}, + {"vm.malloc_conf", []_C_int{2, 12}}, {"vm.maxslp", []_C_int{2, 10}}, {"vm.nkmempages", []_C_int{2, 6}}, {"vm.psstrings", []_C_int{2, 3}}, diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go index 154b57ae3..cbdda1a4a 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go @@ -36,6 +36,7 @@ var sysctlMib = []mibentry{ {"hw.pagesize", []_C_int{6, 7}}, {"hw.perfpolicy", []_C_int{6, 23}}, {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, {"hw.product", []_C_int{6, 15}}, {"hw.serialno", []_C_int{6, 17}}, {"hw.setperf", []_C_int{6, 13}}, @@ -44,6 +45,7 @@ var sysctlMib = []mibentry{ {"hw.uuid", []_C_int{6, 18}}, {"hw.vendor", []_C_int{6, 14}}, {"hw.version", []_C_int{6, 16}}, + {"kern.allowdt", []_C_int{1, 65}}, {"kern.allowkmem", []_C_int{1, 52}}, {"kern.argmax", []_C_int{1, 8}}, {"kern.audio", []_C_int{1, 84}}, @@ -51,6 +53,8 @@ var sysctlMib = []mibentry{ {"kern.bufcachepercent", []_C_int{1, 72}}, {"kern.ccpu", []_C_int{1, 45}}, {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consbuf", []_C_int{1, 83}}, + {"kern.consbufsize", []_C_int{1, 82}}, {"kern.consdev", []_C_int{1, 75}}, {"kern.cp_time", []_C_int{1, 40}}, {"kern.cp_time2", []_C_int{1, 71}}, @@ -83,13 +87,13 @@ var sysctlMib = []mibentry{ {"kern.ngroups", []_C_int{1, 18}}, {"kern.nosuidcoredump", []_C_int{1, 32}}, {"kern.nprocs", []_C_int{1, 47}}, - {"kern.nselcoll", []_C_int{1, 43}}, {"kern.nthreads", []_C_int{1, 26}}, {"kern.numvnodes", []_C_int{1, 58}}, {"kern.osrelease", []_C_int{1, 2}}, {"kern.osrevision", []_C_int{1, 3}}, {"kern.ostype", []_C_int{1, 1}}, {"kern.osversion", []_C_int{1, 27}}, + {"kern.pfstatus", []_C_int{1, 86}}, {"kern.pool_debug", []_C_int{1, 77}}, {"kern.posix1version", []_C_int{1, 17}}, {"kern.proc", []_C_int{1, 66}}, @@ -110,13 +114,16 @@ var sysctlMib = []mibentry{ {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, {"kern.timecounter.tick", []_C_int{1, 69, 1}}, {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.timeout_stats", []_C_int{1, 87}}, {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, {"kern.ttycount", []_C_int{1, 57}}, + {"kern.utc_offset", []_C_int{1, 88}}, {"kern.version", []_C_int{1, 4}}, + {"kern.video", []_C_int{1, 89}}, {"kern.watchdog.auto", []_C_int{1, 64, 2}}, {"kern.watchdog.period", []_C_int{1, 64, 1}}, {"kern.witnesswatch", []_C_int{1, 53}}, @@ -179,7 +186,6 @@ var sysctlMib = []mibentry{ {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, - {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, @@ -255,7 +261,6 @@ var sysctlMib = []mibentry{ {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, - {"net.mpls.maxloop_inkernel", []_C_int{4, 33, 4}}, {"net.mpls.ttl", []_C_int{4, 33, 2}}, {"net.pflow.stats", []_C_int{4, 34, 1}}, {"net.pipex.enable", []_C_int{4, 35, 1}}, diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go index d96bb2ba4..f55eae1a8 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go @@ -36,6 +36,7 @@ var sysctlMib = []mibentry{ {"hw.pagesize", []_C_int{6, 7}}, {"hw.perfpolicy", []_C_int{6, 23}}, {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, {"hw.product", []_C_int{6, 15}}, {"hw.serialno", []_C_int{6, 17}}, {"hw.setperf", []_C_int{6, 13}}, @@ -86,7 +87,6 @@ var sysctlMib = []mibentry{ {"kern.ngroups", []_C_int{1, 18}}, {"kern.nosuidcoredump", []_C_int{1, 32}}, {"kern.nprocs", []_C_int{1, 47}}, - {"kern.nselcoll", []_C_int{1, 43}}, {"kern.nthreads", []_C_int{1, 26}}, {"kern.numvnodes", []_C_int{1, 58}}, {"kern.osrelease", []_C_int{1, 2}}, @@ -123,6 +123,7 @@ var sysctlMib = []mibentry{ {"kern.ttycount", []_C_int{1, 57}}, {"kern.utc_offset", []_C_int{1, 88}}, {"kern.version", []_C_int{1, 4}}, + {"kern.video", []_C_int{1, 89}}, {"kern.watchdog.auto", []_C_int{1, 64, 2}}, {"kern.watchdog.period", []_C_int{1, 64, 1}}, {"kern.witnesswatch", []_C_int{1, 53}}, diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go new file mode 100644 index 000000000..e44054470 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go @@ -0,0 +1,281 @@ +// go run mksysctl_openbsd.go +// Code generated by the command above; DO NOT EDIT. + +//go:build ppc64 && openbsd +// +build ppc64,openbsd + +package unix + +type mibentry struct { + ctlname string + ctloid []_C_int +} + +var sysctlMib = []mibentry{ + {"ddb.console", []_C_int{9, 6}}, + {"ddb.log", []_C_int{9, 7}}, + {"ddb.max_line", []_C_int{9, 3}}, + {"ddb.max_width", []_C_int{9, 2}}, + {"ddb.panic", []_C_int{9, 5}}, + {"ddb.profile", []_C_int{9, 9}}, + {"ddb.radix", []_C_int{9, 1}}, + {"ddb.tab_stop_width", []_C_int{9, 4}}, + {"ddb.trigger", []_C_int{9, 8}}, + {"fs.posix.setuid", []_C_int{3, 1, 1}}, + {"hw.allowpowerdown", []_C_int{6, 22}}, + {"hw.byteorder", []_C_int{6, 4}}, + {"hw.cpuspeed", []_C_int{6, 12}}, + {"hw.diskcount", []_C_int{6, 10}}, + {"hw.disknames", []_C_int{6, 8}}, + {"hw.diskstats", []_C_int{6, 9}}, + {"hw.machine", []_C_int{6, 1}}, + {"hw.model", []_C_int{6, 2}}, + {"hw.ncpu", []_C_int{6, 3}}, + {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.ncpuonline", []_C_int{6, 25}}, + {"hw.pagesize", []_C_int{6, 7}}, + {"hw.perfpolicy", []_C_int{6, 23}}, + {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, + {"hw.product", []_C_int{6, 15}}, + {"hw.serialno", []_C_int{6, 17}}, + {"hw.setperf", []_C_int{6, 13}}, + {"hw.smt", []_C_int{6, 24}}, + {"hw.usermem", []_C_int{6, 20}}, + {"hw.uuid", []_C_int{6, 18}}, + {"hw.vendor", []_C_int{6, 14}}, + {"hw.version", []_C_int{6, 16}}, + {"kern.allowdt", []_C_int{1, 65}}, + {"kern.allowkmem", []_C_int{1, 52}}, + {"kern.argmax", []_C_int{1, 8}}, + {"kern.audio", []_C_int{1, 84}}, + {"kern.boottime", []_C_int{1, 21}}, + {"kern.bufcachepercent", []_C_int{1, 72}}, + {"kern.ccpu", []_C_int{1, 45}}, + {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consbuf", []_C_int{1, 83}}, + {"kern.consbufsize", []_C_int{1, 82}}, + {"kern.consdev", []_C_int{1, 75}}, + {"kern.cp_time", []_C_int{1, 40}}, + {"kern.cp_time2", []_C_int{1, 71}}, + {"kern.cpustats", []_C_int{1, 85}}, + {"kern.domainname", []_C_int{1, 22}}, + {"kern.file", []_C_int{1, 73}}, + {"kern.forkstat", []_C_int{1, 42}}, + {"kern.fscale", []_C_int{1, 46}}, + {"kern.fsync", []_C_int{1, 33}}, + {"kern.global_ptrace", []_C_int{1, 81}}, + {"kern.hostid", []_C_int{1, 11}}, + {"kern.hostname", []_C_int{1, 10}}, + {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, + {"kern.job_control", []_C_int{1, 19}}, + {"kern.malloc.buckets", []_C_int{1, 39, 1}}, + {"kern.malloc.kmemnames", []_C_int{1, 39, 3}}, + {"kern.maxclusters", []_C_int{1, 67}}, + {"kern.maxfiles", []_C_int{1, 7}}, + {"kern.maxlocksperuid", []_C_int{1, 70}}, + {"kern.maxpartitions", []_C_int{1, 23}}, + {"kern.maxproc", []_C_int{1, 6}}, + {"kern.maxthread", []_C_int{1, 25}}, + {"kern.maxvnodes", []_C_int{1, 5}}, + {"kern.mbstat", []_C_int{1, 59}}, + {"kern.msgbuf", []_C_int{1, 48}}, + {"kern.msgbufsize", []_C_int{1, 38}}, + {"kern.nchstats", []_C_int{1, 41}}, + {"kern.netlivelocks", []_C_int{1, 76}}, + {"kern.nfiles", []_C_int{1, 56}}, + {"kern.ngroups", []_C_int{1, 18}}, + {"kern.nosuidcoredump", []_C_int{1, 32}}, + {"kern.nprocs", []_C_int{1, 47}}, + {"kern.nthreads", []_C_int{1, 26}}, + {"kern.numvnodes", []_C_int{1, 58}}, + {"kern.osrelease", []_C_int{1, 2}}, + {"kern.osrevision", []_C_int{1, 3}}, + {"kern.ostype", []_C_int{1, 1}}, + {"kern.osversion", []_C_int{1, 27}}, + {"kern.pfstatus", []_C_int{1, 86}}, + {"kern.pool_debug", []_C_int{1, 77}}, + {"kern.posix1version", []_C_int{1, 17}}, + {"kern.proc", []_C_int{1, 66}}, + {"kern.rawpartition", []_C_int{1, 24}}, + {"kern.saved_ids", []_C_int{1, 20}}, + {"kern.securelevel", []_C_int{1, 9}}, + {"kern.seminfo", []_C_int{1, 61}}, + {"kern.shminfo", []_C_int{1, 62}}, + {"kern.somaxconn", []_C_int{1, 28}}, + {"kern.sominconn", []_C_int{1, 29}}, + {"kern.splassert", []_C_int{1, 54}}, + {"kern.stackgap_random", []_C_int{1, 50}}, + {"kern.sysvipc_info", []_C_int{1, 51}}, + {"kern.sysvmsg", []_C_int{1, 34}}, + {"kern.sysvsem", []_C_int{1, 35}}, + {"kern.sysvshm", []_C_int{1, 36}}, + {"kern.timecounter.choice", []_C_int{1, 69, 4}}, + {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, + {"kern.timecounter.tick", []_C_int{1, 69, 1}}, + {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.timeout_stats", []_C_int{1, 87}}, + {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, + {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, + {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, + {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, + {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, + {"kern.ttycount", []_C_int{1, 57}}, + {"kern.utc_offset", []_C_int{1, 88}}, + {"kern.version", []_C_int{1, 4}}, + {"kern.video", []_C_int{1, 89}}, + {"kern.watchdog.auto", []_C_int{1, 64, 2}}, + {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"kern.witnesswatch", []_C_int{1, 53}}, + {"kern.wxabort", []_C_int{1, 74}}, + {"net.bpf.bufsize", []_C_int{4, 31, 1}}, + {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, + {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, + {"net.inet.ah.stats", []_C_int{4, 2, 51, 2}}, + {"net.inet.carp.allow", []_C_int{4, 2, 112, 1}}, + {"net.inet.carp.log", []_C_int{4, 2, 112, 3}}, + {"net.inet.carp.preempt", []_C_int{4, 2, 112, 2}}, + {"net.inet.carp.stats", []_C_int{4, 2, 112, 4}}, + {"net.inet.divert.recvspace", []_C_int{4, 2, 258, 1}}, + {"net.inet.divert.sendspace", []_C_int{4, 2, 258, 2}}, + {"net.inet.divert.stats", []_C_int{4, 2, 258, 3}}, + {"net.inet.esp.enable", []_C_int{4, 2, 50, 1}}, + {"net.inet.esp.stats", []_C_int{4, 2, 50, 4}}, + {"net.inet.esp.udpencap", []_C_int{4, 2, 50, 2}}, + {"net.inet.esp.udpencap_port", []_C_int{4, 2, 50, 3}}, + {"net.inet.etherip.allow", []_C_int{4, 2, 97, 1}}, + {"net.inet.etherip.stats", []_C_int{4, 2, 97, 2}}, + {"net.inet.gre.allow", []_C_int{4, 2, 47, 1}}, + {"net.inet.gre.wccp", []_C_int{4, 2, 47, 2}}, + {"net.inet.icmp.bmcastecho", []_C_int{4, 2, 1, 2}}, + {"net.inet.icmp.errppslimit", []_C_int{4, 2, 1, 3}}, + {"net.inet.icmp.maskrepl", []_C_int{4, 2, 1, 1}}, + {"net.inet.icmp.rediraccept", []_C_int{4, 2, 1, 4}}, + {"net.inet.icmp.redirtimeout", []_C_int{4, 2, 1, 5}}, + {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, + {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, + {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpdown", []_C_int{4, 2, 0, 40}}, + {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.arptimeout", []_C_int{4, 2, 0, 39}}, + {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, + {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, + {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, + {"net.inet.ip.ifq.drops", []_C_int{4, 2, 0, 30, 3}}, + {"net.inet.ip.ifq.len", []_C_int{4, 2, 0, 30, 1}}, + {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, + {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, + {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtmfc", []_C_int{4, 2, 0, 37}}, + {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, + {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mrtvif", []_C_int{4, 2, 0, 38}}, + {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, + {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, + {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, + {"net.inet.ip.multipath", []_C_int{4, 2, 0, 32}}, + {"net.inet.ip.portfirst", []_C_int{4, 2, 0, 7}}, + {"net.inet.ip.porthifirst", []_C_int{4, 2, 0, 9}}, + {"net.inet.ip.porthilast", []_C_int{4, 2, 0, 10}}, + {"net.inet.ip.portlast", []_C_int{4, 2, 0, 8}}, + {"net.inet.ip.redirect", []_C_int{4, 2, 0, 2}}, + {"net.inet.ip.sourceroute", []_C_int{4, 2, 0, 5}}, + {"net.inet.ip.stats", []_C_int{4, 2, 0, 33}}, + {"net.inet.ip.ttl", []_C_int{4, 2, 0, 3}}, + {"net.inet.ipcomp.enable", []_C_int{4, 2, 108, 1}}, + {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, + {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, + {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, + {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, + {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, + {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, + {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, + {"net.inet.tcp.drop", []_C_int{4, 2, 6, 19}}, + {"net.inet.tcp.ecn", []_C_int{4, 2, 6, 14}}, + {"net.inet.tcp.ident", []_C_int{4, 2, 6, 9}}, + {"net.inet.tcp.keepidle", []_C_int{4, 2, 6, 3}}, + {"net.inet.tcp.keepinittime", []_C_int{4, 2, 6, 2}}, + {"net.inet.tcp.keepintvl", []_C_int{4, 2, 6, 4}}, + {"net.inet.tcp.mssdflt", []_C_int{4, 2, 6, 11}}, + {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, + {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, + {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rootonly", []_C_int{4, 2, 6, 24}}, + {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, + {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, + {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, + {"net.inet.tcp.slowhz", []_C_int{4, 2, 6, 5}}, + {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, + {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, + {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.tcp.synhashsize", []_C_int{4, 2, 6, 25}}, + {"net.inet.tcp.synuselimit", []_C_int{4, 2, 6, 23}}, + {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, + {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, + {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.rootonly", []_C_int{4, 2, 17, 6}}, + {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, + {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, + {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, + {"net.inet6.divert.sendspace", []_C_int{4, 24, 86, 2}}, + {"net.inet6.divert.stats", []_C_int{4, 24, 86, 3}}, + {"net.inet6.icmp6.errppslimit", []_C_int{4, 24, 30, 14}}, + {"net.inet6.icmp6.mtudisc_hiwat", []_C_int{4, 24, 30, 16}}, + {"net.inet6.icmp6.mtudisc_lowat", []_C_int{4, 24, 30, 17}}, + {"net.inet6.icmp6.nd6_debug", []_C_int{4, 24, 30, 18}}, + {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, + {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, + {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, + {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, + {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, + {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, + {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, + {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, + {"net.inet6.ip6.defmcasthlim", []_C_int{4, 24, 17, 18}}, + {"net.inet6.ip6.forwarding", []_C_int{4, 24, 17, 1}}, + {"net.inet6.ip6.forwsrcrt", []_C_int{4, 24, 17, 5}}, + {"net.inet6.ip6.hdrnestlimit", []_C_int{4, 24, 17, 15}}, + {"net.inet6.ip6.hlim", []_C_int{4, 24, 17, 3}}, + {"net.inet6.ip6.log_interval", []_C_int{4, 24, 17, 14}}, + {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, + {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, + {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, + {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtmfc", []_C_int{4, 24, 17, 53}}, + {"net.inet6.ip6.mrtmif", []_C_int{4, 24, 17, 52}}, + {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, + {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, + {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, + {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, + {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, + {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, + {"net.inet6.ip6.soiikey", []_C_int{4, 24, 17, 54}}, + {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, + {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, + {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, + {"net.key.sadb_dump", []_C_int{4, 30, 1}}, + {"net.key.spd_dump", []_C_int{4, 30, 2}}, + {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, + {"net.mpls.ifq.drops", []_C_int{4, 33, 3, 3}}, + {"net.mpls.ifq.len", []_C_int{4, 33, 3, 1}}, + {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, + {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, + {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, + {"net.mpls.ttl", []_C_int{4, 33, 2}}, + {"net.pflow.stats", []_C_int{4, 34, 1}}, + {"net.pipex.enable", []_C_int{4, 35, 1}}, + {"vm.anonmin", []_C_int{2, 7}}, + {"vm.loadavg", []_C_int{2, 2}}, + {"vm.malloc_conf", []_C_int{2, 12}}, + {"vm.maxslp", []_C_int{2, 10}}, + {"vm.nkmempages", []_C_int{2, 6}}, + {"vm.psstrings", []_C_int{2, 3}}, + {"vm.swapencrypt.enable", []_C_int{2, 5, 0}}, + {"vm.swapencrypt.keyscreated", []_C_int{2, 5, 1}}, + {"vm.swapencrypt.keysdeleted", []_C_int{2, 5, 2}}, + {"vm.uspace", []_C_int{2, 11}}, + {"vm.uvmexp", []_C_int{2, 4}}, + {"vm.vmmeter", []_C_int{2, 1}}, + {"vm.vnodemin", []_C_int{2, 9}}, + {"vm.vtextmin", []_C_int{2, 8}}, +} diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go new file mode 100644 index 000000000..a0db82fce --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go @@ -0,0 +1,282 @@ +// go run mksysctl_openbsd.go +// Code generated by the command above; DO NOT EDIT. + +//go:build riscv64 && openbsd +// +build riscv64,openbsd + +package unix + +type mibentry struct { + ctlname string + ctloid []_C_int +} + +var sysctlMib = []mibentry{ + {"ddb.console", []_C_int{9, 6}}, + {"ddb.log", []_C_int{9, 7}}, + {"ddb.max_line", []_C_int{9, 3}}, + {"ddb.max_width", []_C_int{9, 2}}, + {"ddb.panic", []_C_int{9, 5}}, + {"ddb.profile", []_C_int{9, 9}}, + {"ddb.radix", []_C_int{9, 1}}, + {"ddb.tab_stop_width", []_C_int{9, 4}}, + {"ddb.trigger", []_C_int{9, 8}}, + {"fs.posix.setuid", []_C_int{3, 1, 1}}, + {"hw.allowpowerdown", []_C_int{6, 22}}, + {"hw.byteorder", []_C_int{6, 4}}, + {"hw.cpuspeed", []_C_int{6, 12}}, + {"hw.diskcount", []_C_int{6, 10}}, + {"hw.disknames", []_C_int{6, 8}}, + {"hw.diskstats", []_C_int{6, 9}}, + {"hw.machine", []_C_int{6, 1}}, + {"hw.model", []_C_int{6, 2}}, + {"hw.ncpu", []_C_int{6, 3}}, + {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.ncpuonline", []_C_int{6, 25}}, + {"hw.pagesize", []_C_int{6, 7}}, + {"hw.perfpolicy", []_C_int{6, 23}}, + {"hw.physmem", []_C_int{6, 19}}, + {"hw.power", []_C_int{6, 26}}, + {"hw.product", []_C_int{6, 15}}, + {"hw.serialno", []_C_int{6, 17}}, + {"hw.setperf", []_C_int{6, 13}}, + {"hw.smt", []_C_int{6, 24}}, + {"hw.usermem", []_C_int{6, 20}}, + {"hw.uuid", []_C_int{6, 18}}, + {"hw.vendor", []_C_int{6, 14}}, + {"hw.version", []_C_int{6, 16}}, + {"kern.allowdt", []_C_int{1, 65}}, + {"kern.allowkmem", []_C_int{1, 52}}, + {"kern.argmax", []_C_int{1, 8}}, + {"kern.audio", []_C_int{1, 84}}, + {"kern.boottime", []_C_int{1, 21}}, + {"kern.bufcachepercent", []_C_int{1, 72}}, + {"kern.ccpu", []_C_int{1, 45}}, + {"kern.clockrate", []_C_int{1, 12}}, + {"kern.consbuf", []_C_int{1, 83}}, + {"kern.consbufsize", []_C_int{1, 82}}, + {"kern.consdev", []_C_int{1, 75}}, + {"kern.cp_time", []_C_int{1, 40}}, + {"kern.cp_time2", []_C_int{1, 71}}, + {"kern.cpustats", []_C_int{1, 85}}, + {"kern.domainname", []_C_int{1, 22}}, + {"kern.file", []_C_int{1, 73}}, + {"kern.forkstat", []_C_int{1, 42}}, + {"kern.fscale", []_C_int{1, 46}}, + {"kern.fsync", []_C_int{1, 33}}, + {"kern.global_ptrace", []_C_int{1, 81}}, + {"kern.hostid", []_C_int{1, 11}}, + {"kern.hostname", []_C_int{1, 10}}, + {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, + {"kern.job_control", []_C_int{1, 19}}, + {"kern.malloc.buckets", []_C_int{1, 39, 1}}, + {"kern.malloc.kmemnames", []_C_int{1, 39, 3}}, + {"kern.maxclusters", []_C_int{1, 67}}, + {"kern.maxfiles", []_C_int{1, 7}}, + {"kern.maxlocksperuid", []_C_int{1, 70}}, + {"kern.maxpartitions", []_C_int{1, 23}}, + {"kern.maxproc", []_C_int{1, 6}}, + {"kern.maxthread", []_C_int{1, 25}}, + {"kern.maxvnodes", []_C_int{1, 5}}, + {"kern.mbstat", []_C_int{1, 59}}, + {"kern.msgbuf", []_C_int{1, 48}}, + {"kern.msgbufsize", []_C_int{1, 38}}, + {"kern.nchstats", []_C_int{1, 41}}, + {"kern.netlivelocks", []_C_int{1, 76}}, + {"kern.nfiles", []_C_int{1, 56}}, + {"kern.ngroups", []_C_int{1, 18}}, + {"kern.nosuidcoredump", []_C_int{1, 32}}, + {"kern.nprocs", []_C_int{1, 47}}, + {"kern.nselcoll", []_C_int{1, 43}}, + {"kern.nthreads", []_C_int{1, 26}}, + {"kern.numvnodes", []_C_int{1, 58}}, + {"kern.osrelease", []_C_int{1, 2}}, + {"kern.osrevision", []_C_int{1, 3}}, + {"kern.ostype", []_C_int{1, 1}}, + {"kern.osversion", []_C_int{1, 27}}, + {"kern.pfstatus", []_C_int{1, 86}}, + {"kern.pool_debug", []_C_int{1, 77}}, + {"kern.posix1version", []_C_int{1, 17}}, + {"kern.proc", []_C_int{1, 66}}, + {"kern.rawpartition", []_C_int{1, 24}}, + {"kern.saved_ids", []_C_int{1, 20}}, + {"kern.securelevel", []_C_int{1, 9}}, + {"kern.seminfo", []_C_int{1, 61}}, + {"kern.shminfo", []_C_int{1, 62}}, + {"kern.somaxconn", []_C_int{1, 28}}, + {"kern.sominconn", []_C_int{1, 29}}, + {"kern.splassert", []_C_int{1, 54}}, + {"kern.stackgap_random", []_C_int{1, 50}}, + {"kern.sysvipc_info", []_C_int{1, 51}}, + {"kern.sysvmsg", []_C_int{1, 34}}, + {"kern.sysvsem", []_C_int{1, 35}}, + {"kern.sysvshm", []_C_int{1, 36}}, + {"kern.timecounter.choice", []_C_int{1, 69, 4}}, + {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, + {"kern.timecounter.tick", []_C_int{1, 69, 1}}, + {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, + {"kern.timeout_stats", []_C_int{1, 87}}, + {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, + {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, + {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, + {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, + {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, + {"kern.ttycount", []_C_int{1, 57}}, + {"kern.utc_offset", []_C_int{1, 88}}, + {"kern.version", []_C_int{1, 4}}, + {"kern.video", []_C_int{1, 89}}, + {"kern.watchdog.auto", []_C_int{1, 64, 2}}, + {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"kern.witnesswatch", []_C_int{1, 53}}, + {"kern.wxabort", []_C_int{1, 74}}, + {"net.bpf.bufsize", []_C_int{4, 31, 1}}, + {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, + {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, + {"net.inet.ah.stats", []_C_int{4, 2, 51, 2}}, + {"net.inet.carp.allow", []_C_int{4, 2, 112, 1}}, + {"net.inet.carp.log", []_C_int{4, 2, 112, 3}}, + {"net.inet.carp.preempt", []_C_int{4, 2, 112, 2}}, + {"net.inet.carp.stats", []_C_int{4, 2, 112, 4}}, + {"net.inet.divert.recvspace", []_C_int{4, 2, 258, 1}}, + {"net.inet.divert.sendspace", []_C_int{4, 2, 258, 2}}, + {"net.inet.divert.stats", []_C_int{4, 2, 258, 3}}, + {"net.inet.esp.enable", []_C_int{4, 2, 50, 1}}, + {"net.inet.esp.stats", []_C_int{4, 2, 50, 4}}, + {"net.inet.esp.udpencap", []_C_int{4, 2, 50, 2}}, + {"net.inet.esp.udpencap_port", []_C_int{4, 2, 50, 3}}, + {"net.inet.etherip.allow", []_C_int{4, 2, 97, 1}}, + {"net.inet.etherip.stats", []_C_int{4, 2, 97, 2}}, + {"net.inet.gre.allow", []_C_int{4, 2, 47, 1}}, + {"net.inet.gre.wccp", []_C_int{4, 2, 47, 2}}, + {"net.inet.icmp.bmcastecho", []_C_int{4, 2, 1, 2}}, + {"net.inet.icmp.errppslimit", []_C_int{4, 2, 1, 3}}, + {"net.inet.icmp.maskrepl", []_C_int{4, 2, 1, 1}}, + {"net.inet.icmp.rediraccept", []_C_int{4, 2, 1, 4}}, + {"net.inet.icmp.redirtimeout", []_C_int{4, 2, 1, 5}}, + {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, + {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, + {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpdown", []_C_int{4, 2, 0, 40}}, + {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.arptimeout", []_C_int{4, 2, 0, 39}}, + {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, + {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, + {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, + {"net.inet.ip.ifq.drops", []_C_int{4, 2, 0, 30, 3}}, + {"net.inet.ip.ifq.len", []_C_int{4, 2, 0, 30, 1}}, + {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, + {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, + {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtmfc", []_C_int{4, 2, 0, 37}}, + {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, + {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mrtvif", []_C_int{4, 2, 0, 38}}, + {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, + {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, + {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, + {"net.inet.ip.multipath", []_C_int{4, 2, 0, 32}}, + {"net.inet.ip.portfirst", []_C_int{4, 2, 0, 7}}, + {"net.inet.ip.porthifirst", []_C_int{4, 2, 0, 9}}, + {"net.inet.ip.porthilast", []_C_int{4, 2, 0, 10}}, + {"net.inet.ip.portlast", []_C_int{4, 2, 0, 8}}, + {"net.inet.ip.redirect", []_C_int{4, 2, 0, 2}}, + {"net.inet.ip.sourceroute", []_C_int{4, 2, 0, 5}}, + {"net.inet.ip.stats", []_C_int{4, 2, 0, 33}}, + {"net.inet.ip.ttl", []_C_int{4, 2, 0, 3}}, + {"net.inet.ipcomp.enable", []_C_int{4, 2, 108, 1}}, + {"net.inet.ipcomp.stats", []_C_int{4, 2, 108, 2}}, + {"net.inet.ipip.allow", []_C_int{4, 2, 4, 1}}, + {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, + {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, + {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, + {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, + {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, + {"net.inet.tcp.drop", []_C_int{4, 2, 6, 19}}, + {"net.inet.tcp.ecn", []_C_int{4, 2, 6, 14}}, + {"net.inet.tcp.ident", []_C_int{4, 2, 6, 9}}, + {"net.inet.tcp.keepidle", []_C_int{4, 2, 6, 3}}, + {"net.inet.tcp.keepinittime", []_C_int{4, 2, 6, 2}}, + {"net.inet.tcp.keepintvl", []_C_int{4, 2, 6, 4}}, + {"net.inet.tcp.mssdflt", []_C_int{4, 2, 6, 11}}, + {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, + {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, + {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rootonly", []_C_int{4, 2, 6, 24}}, + {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, + {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, + {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, + {"net.inet.tcp.slowhz", []_C_int{4, 2, 6, 5}}, + {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, + {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, + {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.tcp.synhashsize", []_C_int{4, 2, 6, 25}}, + {"net.inet.tcp.synuselimit", []_C_int{4, 2, 6, 23}}, + {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, + {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, + {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.rootonly", []_C_int{4, 2, 17, 6}}, + {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, + {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, + {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, + {"net.inet6.divert.sendspace", []_C_int{4, 24, 86, 2}}, + {"net.inet6.divert.stats", []_C_int{4, 24, 86, 3}}, + {"net.inet6.icmp6.errppslimit", []_C_int{4, 24, 30, 14}}, + {"net.inet6.icmp6.mtudisc_hiwat", []_C_int{4, 24, 30, 16}}, + {"net.inet6.icmp6.mtudisc_lowat", []_C_int{4, 24, 30, 17}}, + {"net.inet6.icmp6.nd6_debug", []_C_int{4, 24, 30, 18}}, + {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, + {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, + {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, + {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, + {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, + {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, + {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, + {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, + {"net.inet6.ip6.defmcasthlim", []_C_int{4, 24, 17, 18}}, + {"net.inet6.ip6.forwarding", []_C_int{4, 24, 17, 1}}, + {"net.inet6.ip6.forwsrcrt", []_C_int{4, 24, 17, 5}}, + {"net.inet6.ip6.hdrnestlimit", []_C_int{4, 24, 17, 15}}, + {"net.inet6.ip6.hlim", []_C_int{4, 24, 17, 3}}, + {"net.inet6.ip6.log_interval", []_C_int{4, 24, 17, 14}}, + {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, + {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, + {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, + {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtmfc", []_C_int{4, 24, 17, 53}}, + {"net.inet6.ip6.mrtmif", []_C_int{4, 24, 17, 52}}, + {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, + {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, + {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, + {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, + {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, + {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, + {"net.inet6.ip6.soiikey", []_C_int{4, 24, 17, 54}}, + {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, + {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, + {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, + {"net.key.sadb_dump", []_C_int{4, 30, 1}}, + {"net.key.spd_dump", []_C_int{4, 30, 2}}, + {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, + {"net.mpls.ifq.drops", []_C_int{4, 33, 3, 3}}, + {"net.mpls.ifq.len", []_C_int{4, 33, 3, 1}}, + {"net.mpls.ifq.maxlen", []_C_int{4, 33, 3, 2}}, + {"net.mpls.mapttl_ip", []_C_int{4, 33, 5}}, + {"net.mpls.mapttl_ip6", []_C_int{4, 33, 6}}, + {"net.mpls.ttl", []_C_int{4, 33, 2}}, + {"net.pflow.stats", []_C_int{4, 34, 1}}, + {"net.pipex.enable", []_C_int{4, 35, 1}}, + {"vm.anonmin", []_C_int{2, 7}}, + {"vm.loadavg", []_C_int{2, 2}}, + {"vm.malloc_conf", []_C_int{2, 12}}, + {"vm.maxslp", []_C_int{2, 10}}, + {"vm.nkmempages", []_C_int{2, 6}}, + {"vm.psstrings", []_C_int{2, 3}}, + {"vm.swapencrypt.enable", []_C_int{2, 5, 0}}, + {"vm.swapencrypt.keyscreated", []_C_int{2, 5, 1}}, + {"vm.swapencrypt.keysdeleted", []_C_int{2, 5, 2}}, + {"vm.uspace", []_C_int{2, 11}}, + {"vm.uvmexp", []_C_int{2, 4}}, + {"vm.vmmeter", []_C_int{2, 1}}, + {"vm.vnodemin", []_C_int{2, 9}}, + {"vm.vtextmin", []_C_int{2, 8}}, +} diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go index 59d5dfc20..4e0d96107 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master +// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd @@ -19,10 +19,9 @@ const ( SYS_UNLINK = 10 // { int unlink(char *path); } SYS_CHDIR = 12 // { int chdir(char *path); } SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } SYS_CHMOD = 15 // { int chmod(char *path, int mode); } SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_BREAK = 17 // { caddr_t break(char *nsize); } SYS_GETPID = 20 // { pid_t getpid(void); } SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } @@ -43,7 +42,6 @@ const ( SYS_KILL = 37 // { int kill(int pid, int signum); } SYS_GETPPID = 39 // { pid_t getppid(void); } SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } SYS_GETEGID = 43 // { gid_t getegid(void); } SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } @@ -58,15 +56,14 @@ const ( SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_UMASK = 60 // { int umask(int newmask); } SYS_CHROOT = 61 // { int chroot(char *path); } SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } SYS_VFORK = 66 // { int vfork(void); } SYS_SBRK = 69 // { int sbrk(int incr); } SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } @@ -124,14 +121,10 @@ const ( SYS_SETGID = 181 // { int setgid(gid_t gid); } SYS_SETEGID = 182 // { int setegid(gid_t egid); } SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -143,12 +136,12 @@ const ( SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } - SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } - SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); } SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } @@ -157,50 +150,44 @@ const ( SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); } SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); } SYS_ISSETUGID = 253 // { int issetugid(void); } SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } - SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); } SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } SYS_MODFNEXT = 302 // { int modfnext(int modid); } SYS_MODFIND = 303 // { int modfind(const char *name); } SYS_KLDLOAD = 304 // { int kldload(const char *file); } SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } SYS_KLDFIND = 306 // { int kldfind(const char *file); } SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); } SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } - SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); } SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } @@ -226,14 +213,13 @@ const ( SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } @@ -251,10 +237,6 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } - SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } @@ -267,14 +249,14 @@ const ( SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } - SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); } + SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); } SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); } SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } SYS_SWAPOFF = 424 // { int swapoff(const char *name); } SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } @@ -288,10 +270,10 @@ const ( SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } - SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); } + SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); } SYS_THR_WAKE = 443 // { int thr_wake(long id); } SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } @@ -300,17 +282,17 @@ const ( SYS_SETAUID = 448 // { int setauid(uid_t *auid); } SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } SYS_AUDITCTL = 453 // { int auditctl(char *path); } SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } - SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } - SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } - SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);} - SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } @@ -319,7 +301,7 @@ const ( SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } - SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } @@ -338,14 +320,12 @@ const ( SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); } SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } @@ -391,7 +371,24 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } SYS_FDATASYNC = 550 // { int fdatasync(int fd); } + SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); } + SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); } + SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); } + SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); } + SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); } + SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); } + SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); } + SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); } + SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); } + SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go index 342d471d2..01636b838 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master +// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd @@ -19,10 +19,9 @@ const ( SYS_UNLINK = 10 // { int unlink(char *path); } SYS_CHDIR = 12 // { int chdir(char *path); } SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } SYS_CHMOD = 15 // { int chmod(char *path, int mode); } SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_BREAK = 17 // { caddr_t break(char *nsize); } SYS_GETPID = 20 // { pid_t getpid(void); } SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } @@ -43,7 +42,6 @@ const ( SYS_KILL = 37 // { int kill(int pid, int signum); } SYS_GETPPID = 39 // { pid_t getppid(void); } SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } SYS_GETEGID = 43 // { gid_t getegid(void); } SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } @@ -58,15 +56,14 @@ const ( SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_UMASK = 60 // { int umask(int newmask); } SYS_CHROOT = 61 // { int chroot(char *path); } SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } SYS_VFORK = 66 // { int vfork(void); } SYS_SBRK = 69 // { int sbrk(int incr); } SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } @@ -124,14 +121,10 @@ const ( SYS_SETGID = 181 // { int setgid(gid_t gid); } SYS_SETEGID = 182 // { int setegid(gid_t egid); } SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -143,12 +136,12 @@ const ( SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } - SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } - SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); } SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } @@ -157,50 +150,44 @@ const ( SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); } SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); } SYS_ISSETUGID = 253 // { int issetugid(void); } SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } - SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); } SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } SYS_MODFNEXT = 302 // { int modfnext(int modid); } SYS_MODFIND = 303 // { int modfind(const char *name); } SYS_KLDLOAD = 304 // { int kldload(const char *file); } SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } SYS_KLDFIND = 306 // { int kldfind(const char *file); } SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); } SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } - SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); } SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } @@ -226,14 +213,13 @@ const ( SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } @@ -251,10 +237,6 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } - SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } @@ -267,14 +249,14 @@ const ( SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } - SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); } + SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); } SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); } SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } SYS_SWAPOFF = 424 // { int swapoff(const char *name); } SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } @@ -288,10 +270,10 @@ const ( SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } - SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); } + SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); } SYS_THR_WAKE = 443 // { int thr_wake(long id); } SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } @@ -300,17 +282,17 @@ const ( SYS_SETAUID = 448 // { int setauid(uid_t *auid); } SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } SYS_AUDITCTL = 453 // { int auditctl(char *path); } SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } - SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } - SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } - SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);} - SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } @@ -319,7 +301,7 @@ const ( SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } - SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } @@ -338,14 +320,12 @@ const ( SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); } SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } @@ -391,7 +371,24 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } SYS_FDATASYNC = 550 // { int fdatasync(int fd); } + SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); } + SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); } + SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); } + SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); } + SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); } + SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); } + SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); } + SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); } + SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); } + SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go index e2e3d72c5..ad99bc106 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master +// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd @@ -19,10 +19,9 @@ const ( SYS_UNLINK = 10 // { int unlink(char *path); } SYS_CHDIR = 12 // { int chdir(char *path); } SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } SYS_CHMOD = 15 // { int chmod(char *path, int mode); } SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_BREAK = 17 // { caddr_t break(char *nsize); } SYS_GETPID = 20 // { pid_t getpid(void); } SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } @@ -43,7 +42,6 @@ const ( SYS_KILL = 37 // { int kill(int pid, int signum); } SYS_GETPPID = 39 // { pid_t getppid(void); } SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } SYS_GETEGID = 43 // { gid_t getegid(void); } SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } @@ -58,15 +56,14 @@ const ( SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_UMASK = 60 // { int umask(int newmask); } SYS_CHROOT = 61 // { int chroot(char *path); } SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } SYS_VFORK = 66 // { int vfork(void); } SYS_SBRK = 69 // { int sbrk(int incr); } SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } @@ -124,14 +121,10 @@ const ( SYS_SETGID = 181 // { int setgid(gid_t gid); } SYS_SETEGID = 182 // { int setegid(gid_t egid); } SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -143,12 +136,12 @@ const ( SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } - SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } - SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); } SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } @@ -157,50 +150,44 @@ const ( SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); } SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); } SYS_ISSETUGID = 253 // { int issetugid(void); } SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } - SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); } SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } SYS_MODFNEXT = 302 // { int modfnext(int modid); } SYS_MODFIND = 303 // { int modfind(const char *name); } SYS_KLDLOAD = 304 // { int kldload(const char *file); } SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } SYS_KLDFIND = 306 // { int kldfind(const char *file); } SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); } SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } - SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); } SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } @@ -226,14 +213,13 @@ const ( SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } @@ -251,10 +237,6 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } - SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } @@ -267,14 +249,14 @@ const ( SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } - SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); } + SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); } SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); } SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } SYS_SWAPOFF = 424 // { int swapoff(const char *name); } SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } @@ -288,10 +270,10 @@ const ( SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } - SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); } + SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); } SYS_THR_WAKE = 443 // { int thr_wake(long id); } SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } @@ -300,17 +282,17 @@ const ( SYS_SETAUID = 448 // { int setauid(uid_t *auid); } SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } SYS_AUDITCTL = 453 // { int auditctl(char *path); } SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } - SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } - SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } - SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);} - SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } @@ -319,7 +301,7 @@ const ( SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } - SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } @@ -338,14 +320,12 @@ const ( SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); } SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } @@ -391,7 +371,24 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } SYS_FDATASYNC = 550 // { int fdatasync(int fd); } + SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); } + SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); } + SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); } + SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); } + SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); } + SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); } + SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); } + SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); } + SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); } + SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go index 61ad5ca3c..89dcc4274 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go @@ -1,4 +1,4 @@ -// go run mksysnum.go https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master +// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd @@ -19,10 +19,9 @@ const ( SYS_UNLINK = 10 // { int unlink(char *path); } SYS_CHDIR = 12 // { int chdir(char *path); } SYS_FCHDIR = 13 // { int fchdir(int fd); } - SYS_MKNOD = 14 // { int mknod(char *path, int mode, int dev); } SYS_CHMOD = 15 // { int chmod(char *path, int mode); } SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } - SYS_OBREAK = 17 // { int obreak(char *nsize); } break obreak_args int + SYS_BREAK = 17 // { caddr_t break(char *nsize); } SYS_GETPID = 20 // { pid_t getpid(void); } SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } @@ -43,7 +42,6 @@ const ( SYS_KILL = 37 // { int kill(int pid, int signum); } SYS_GETPPID = 39 // { pid_t getppid(void); } SYS_DUP = 41 // { int dup(u_int fd); } - SYS_PIPE = 42 // { int pipe(void); } SYS_GETEGID = 43 // { gid_t getegid(void); } SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } @@ -58,15 +56,14 @@ const ( SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } - SYS_UMASK = 60 // { int umask(int newmask); } umask umask_args int + SYS_UMASK = 60 // { int umask(int newmask); } SYS_CHROOT = 61 // { int chroot(char *path); } SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } SYS_VFORK = 66 // { int vfork(void); } SYS_SBRK = 69 // { int sbrk(int incr); } SYS_SSTK = 70 // { int sstk(int incr); } - SYS_OVADVISE = 72 // { int ovadvise(int anom); } vadvise ovadvise_args int SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } - SYS_MPROTECT = 74 // { int mprotect(const void *addr, size_t len, int prot); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } @@ -124,14 +121,10 @@ const ( SYS_SETGID = 181 // { int setgid(gid_t gid); } SYS_SETEGID = 182 // { int setegid(gid_t egid); } SYS_SETEUID = 183 // { int seteuid(uid_t euid); } - SYS_STAT = 188 // { int stat(char *path, struct stat *ub); } - SYS_FSTAT = 189 // { int fstat(int fd, struct stat *sb); } - SYS_LSTAT = 190 // { int lstat(char *path, struct stat *ub); } SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int - SYS_GETDIRENTRIES = 196 // { int getdirentries(int fd, char *buf, u_int count, long *basep); } SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } @@ -143,12 +136,12 @@ const ( SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } - SYS_MSGRCV = 227 // { int msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } - SYS_CLOCK_SETTIME = 233 // { int clock_settime( clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); } SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } @@ -157,50 +150,44 @@ const ( SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } - SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate( struct ffclock_estimate *cest); } - SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate( struct ffclock_estimate *cest); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); } SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } - SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id,int which, clockid_t *clock_id); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); } SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } SYS_RFORK = 251 // { int rfork(int flags); } - SYS_OPENBSD_POLL = 252 // { int openbsd_poll(struct pollfd *fds, u_int nfds, int timeout); } SYS_ISSETUGID = 253 // { int issetugid(void); } SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } - SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb * const *acb_list, int nent, struct sigevent *sig); } - SYS_GETDENTS = 272 // { int getdents(int fd, char *buf, size_t count); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); } SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } - SYS_NSTAT = 278 // { int nstat(char *path, struct nstat *ub); } - SYS_NFSTAT = 279 // { int nfstat(int fd, struct nstat *sb); } - SYS_NLSTAT = 280 // { int nlstat(char *path, struct nstat *ub); } SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } - SYS_FHSTAT = 299 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } SYS_MODNEXT = 300 // { int modnext(int modid); } - SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat *stat); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } SYS_MODFNEXT = 302 // { int modfnext(int modid); } SYS_MODFIND = 303 // { int modfind(const char *name); } SYS_KLDLOAD = 304 // { int kldload(const char *file); } SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } SYS_KLDFIND = 306 // { int kldfind(const char *file); } SYS_KLDNEXT = 307 // { int kldnext(int fileid); } - SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat* stat); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); } SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } SYS_GETSID = 310 // { int getsid(pid_t pid); } SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } - SYS_AIO_SUSPEND = 315 // { int aio_suspend( struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } SYS_YIELD = 321 // { int yield(void); } SYS_MLOCKALL = 324 // { int mlockall(int how); } SYS_MUNLOCKALL = 325 // { int munlockall(void); } - SYS___GETCWD = 326 // { int __getcwd(char *buf, u_int buflen); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); } SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } @@ -226,14 +213,13 @@ const ( SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } - SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } - SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete( struct aiocb **aiocbp, struct timespec *timeout); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } SYS_KQUEUE = 362 // { int kqueue(void); } - SYS_KEVENT = 363 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } @@ -251,10 +237,6 @@ const ( SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } - SYS_GETFSSTAT = 395 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } - SYS_STATFS = 396 // { int statfs(char *path, struct statfs *buf); } - SYS_FSTATFS = 397 // { int fstatfs(int fd, struct statfs *buf); } - SYS_FHSTATFS = 398 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } @@ -267,14 +249,14 @@ const ( SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } - SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link( const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } - SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link( const char *path, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } - SYS_SIGRETURN = 417 // { int sigreturn( const struct __ucontext *sigcntxp); } + SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); } SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } - SYS_SETCONTEXT = 422 // { int setcontext( const struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); } SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } SYS_SWAPOFF = 424 // { int swapoff(const char *name); } SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } @@ -288,10 +270,10 @@ const ( SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file( const char *path, int attrnamespace, void *data, size_t nbytes); } - SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link( const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } - SYS_THR_SUSPEND = 442 // { int thr_suspend( const struct timespec *timeout); } + SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); } SYS_THR_WAKE = 443 // { int thr_wake(long id); } SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } @@ -300,17 +282,17 @@ const ( SYS_SETAUID = 448 // { int setauid(uid_t *auid); } SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } - SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } - SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr( struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } SYS_AUDITCTL = 453 // { int auditctl(char *path); } SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } - SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } - SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } - SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len,unsigned msg_prio, const struct timespec *abs_timeout);} - SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } @@ -319,7 +301,7 @@ const ( SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } - SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr * from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } @@ -338,14 +320,12 @@ const ( SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } - SYS_FSTATAT = 493 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } - SYS_MKNODAT = 498 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } - SYS_READLINKAT = 500 // { int readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); } SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } @@ -391,7 +371,24 @@ const ( SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } - SYS_NUMA_GETAFFINITY = 548 // { int numa_getaffinity(cpuwhich_t which, id_t id, struct vm_domain_policy_entry *policy); } - SYS_NUMA_SETAFFINITY = 549 // { int numa_setaffinity(cpuwhich_t which, id_t id, const struct vm_domain_policy_entry *policy); } SYS_FDATASYNC = 550 // { int fdatasync(int fd); } + SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); } + SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); } + SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); } + SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); } + SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); } + SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); } + SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); } + SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); } + SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); } + SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); } ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go new file mode 100644 index 000000000..ee37aaa0c --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go @@ -0,0 +1,394 @@ +// go run mksysnum.go https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12 +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build riscv64 && freebsd +// +build riscv64,freebsd + +package unix + +const ( + // SYS_NOSYS = 0; // { int nosys(void); } syscall nosys_args int + SYS_EXIT = 1 // { void sys_exit(int rval); } exit sys_exit_args void + SYS_FORK = 2 // { int fork(void); } + SYS_READ = 3 // { ssize_t read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int open(char *path, int flags, int mode); } + SYS_CLOSE = 6 // { int close(int fd); } + SYS_WAIT4 = 7 // { int wait4(int pid, int *status, int options, struct rusage *rusage); } + SYS_LINK = 9 // { int link(char *path, char *link); } + SYS_UNLINK = 10 // { int unlink(char *path); } + SYS_CHDIR = 12 // { int chdir(char *path); } + SYS_FCHDIR = 13 // { int fchdir(int fd); } + SYS_CHMOD = 15 // { int chmod(char *path, int mode); } + SYS_CHOWN = 16 // { int chown(char *path, int uid, int gid); } + SYS_BREAK = 17 // { caddr_t break(char *nsize); } + SYS_GETPID = 20 // { pid_t getpid(void); } + SYS_MOUNT = 21 // { int mount(char *type, char *path, int flags, caddr_t data); } + SYS_UNMOUNT = 22 // { int unmount(char *path, int flags); } + SYS_SETUID = 23 // { int setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t getuid(void); } + SYS_GETEUID = 25 // { uid_t geteuid(void); } + SYS_PTRACE = 26 // { int ptrace(int req, pid_t pid, caddr_t addr, int data); } + SYS_RECVMSG = 27 // { int recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { int sendmsg(int s, struct msghdr *msg, int flags); } + SYS_RECVFROM = 29 // { int recvfrom(int s, caddr_t buf, size_t len, int flags, struct sockaddr * __restrict from, __socklen_t * __restrict fromlenaddr); } + SYS_ACCEPT = 30 // { int accept(int s, struct sockaddr * __restrict name, __socklen_t * __restrict anamelen); } + SYS_GETPEERNAME = 31 // { int getpeername(int fdes, struct sockaddr * __restrict asa, __socklen_t * __restrict alen); } + SYS_GETSOCKNAME = 32 // { int getsockname(int fdes, struct sockaddr * __restrict asa, __socklen_t * __restrict alen); } + SYS_ACCESS = 33 // { int access(char *path, int amode); } + SYS_CHFLAGS = 34 // { int chflags(const char *path, u_long flags); } + SYS_FCHFLAGS = 35 // { int fchflags(int fd, u_long flags); } + SYS_SYNC = 36 // { int sync(void); } + SYS_KILL = 37 // { int kill(int pid, int signum); } + SYS_GETPPID = 39 // { pid_t getppid(void); } + SYS_DUP = 41 // { int dup(u_int fd); } + SYS_GETEGID = 43 // { gid_t getegid(void); } + SYS_PROFIL = 44 // { int profil(caddr_t samples, size_t size, size_t offset, u_int scale); } + SYS_KTRACE = 45 // { int ktrace(const char *fname, int ops, int facs, int pid); } + SYS_GETGID = 47 // { gid_t getgid(void); } + SYS_GETLOGIN = 49 // { int getlogin(char *namebuf, u_int namelen); } + SYS_SETLOGIN = 50 // { int setlogin(char *namebuf); } + SYS_ACCT = 51 // { int acct(char *path); } + SYS_SIGALTSTACK = 53 // { int sigaltstack(stack_t *ss, stack_t *oss); } + SYS_IOCTL = 54 // { int ioctl(int fd, u_long com, caddr_t data); } + SYS_REBOOT = 55 // { int reboot(int opt); } + SYS_REVOKE = 56 // { int revoke(char *path); } + SYS_SYMLINK = 57 // { int symlink(char *path, char *link); } + SYS_READLINK = 58 // { ssize_t readlink(char *path, char *buf, size_t count); } + SYS_EXECVE = 59 // { int execve(char *fname, char **argv, char **envv); } + SYS_UMASK = 60 // { int umask(int newmask); } + SYS_CHROOT = 61 // { int chroot(char *path); } + SYS_MSYNC = 65 // { int msync(void *addr, size_t len, int flags); } + SYS_VFORK = 66 // { int vfork(void); } + SYS_SBRK = 69 // { int sbrk(int incr); } + SYS_SSTK = 70 // { int sstk(int incr); } + SYS_MUNMAP = 73 // { int munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int madvise(void *addr, size_t len, int behav); } + SYS_MINCORE = 78 // { int mincore(const void *addr, size_t len, char *vec); } + SYS_GETGROUPS = 79 // { int getgroups(u_int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int setgroups(u_int gidsetsize, gid_t *gidset); } + SYS_GETPGRP = 81 // { int getpgrp(void); } + SYS_SETPGID = 82 // { int setpgid(int pid, int pgid); } + SYS_SETITIMER = 83 // { int setitimer(u_int which, struct itimerval *itv, struct itimerval *oitv); } + SYS_SWAPON = 85 // { int swapon(char *name); } + SYS_GETITIMER = 86 // { int getitimer(u_int which, struct itimerval *itv); } + SYS_GETDTABLESIZE = 89 // { int getdtablesize(void); } + SYS_DUP2 = 90 // { int dup2(u_int from, u_int to); } + SYS_FCNTL = 92 // { int fcntl(int fd, int cmd, long arg); } + SYS_SELECT = 93 // { int select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } + SYS_FSYNC = 95 // { int fsync(int fd); } + SYS_SETPRIORITY = 96 // { int setpriority(int which, int who, int prio); } + SYS_SOCKET = 97 // { int socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int connect(int s, caddr_t name, int namelen); } + SYS_GETPRIORITY = 100 // { int getpriority(int which, int who); } + SYS_BIND = 104 // { int bind(int s, caddr_t name, int namelen); } + SYS_SETSOCKOPT = 105 // { int setsockopt(int s, int level, int name, caddr_t val, int valsize); } + SYS_LISTEN = 106 // { int listen(int s, int backlog); } + SYS_GETTIMEOFDAY = 116 // { int gettimeofday(struct timeval *tp, struct timezone *tzp); } + SYS_GETRUSAGE = 117 // { int getrusage(int who, struct rusage *rusage); } + SYS_GETSOCKOPT = 118 // { int getsockopt(int s, int level, int name, caddr_t val, int *avalsize); } + SYS_READV = 120 // { int readv(int fd, struct iovec *iovp, u_int iovcnt); } + SYS_WRITEV = 121 // { int writev(int fd, struct iovec *iovp, u_int iovcnt); } + SYS_SETTIMEOFDAY = 122 // { int settimeofday(struct timeval *tv, struct timezone *tzp); } + SYS_FCHOWN = 123 // { int fchown(int fd, int uid, int gid); } + SYS_FCHMOD = 124 // { int fchmod(int fd, int mode); } + SYS_SETREUID = 126 // { int setreuid(int ruid, int euid); } + SYS_SETREGID = 127 // { int setregid(int rgid, int egid); } + SYS_RENAME = 128 // { int rename(char *from, char *to); } + SYS_FLOCK = 131 // { int flock(int fd, int how); } + SYS_MKFIFO = 132 // { int mkfifo(char *path, int mode); } + SYS_SENDTO = 133 // { int sendto(int s, caddr_t buf, size_t len, int flags, caddr_t to, int tolen); } + SYS_SHUTDOWN = 134 // { int shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int socketpair(int domain, int type, int protocol, int *rsv); } + SYS_MKDIR = 136 // { int mkdir(char *path, int mode); } + SYS_RMDIR = 137 // { int rmdir(char *path); } + SYS_UTIMES = 138 // { int utimes(char *path, struct timeval *tptr); } + SYS_ADJTIME = 140 // { int adjtime(struct timeval *delta, struct timeval *olddelta); } + SYS_SETSID = 147 // { int setsid(void); } + SYS_QUOTACTL = 148 // { int quotactl(char *path, int cmd, int uid, caddr_t arg); } + SYS_NLM_SYSCALL = 154 // { int nlm_syscall(int debug_level, int grace_period, int addr_count, char **addrs); } + SYS_NFSSVC = 155 // { int nfssvc(int flag, caddr_t argp); } + SYS_LGETFH = 160 // { int lgetfh(char *fname, struct fhandle *fhp); } + SYS_GETFH = 161 // { int getfh(char *fname, struct fhandle *fhp); } + SYS_SYSARCH = 165 // { int sysarch(int op, char *parms); } + SYS_RTPRIO = 166 // { int rtprio(int function, pid_t pid, struct rtprio *rtp); } + SYS_SEMSYS = 169 // { int semsys(int which, int a2, int a3, int a4, int a5); } + SYS_MSGSYS = 170 // { int msgsys(int which, int a2, int a3, int a4, int a5, int a6); } + SYS_SHMSYS = 171 // { int shmsys(int which, int a2, int a3, int a4); } + SYS_SETFIB = 175 // { int setfib(int fibnum); } + SYS_NTP_ADJTIME = 176 // { int ntp_adjtime(struct timex *tp); } + SYS_SETGID = 181 // { int setgid(gid_t gid); } + SYS_SETEGID = 182 // { int setegid(gid_t egid); } + SYS_SETEUID = 183 // { int seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { int pathconf(char *path, int name); } + SYS_FPATHCONF = 192 // { int fpathconf(int fd, int name); } + SYS_GETRLIMIT = 194 // { int getrlimit(u_int which, struct rlimit *rlp); } getrlimit __getrlimit_args int + SYS_SETRLIMIT = 195 // { int setrlimit(u_int which, struct rlimit *rlp); } setrlimit __setrlimit_args int + SYS___SYSCTL = 202 // { int __sysctl(int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } __sysctl sysctl_args int + SYS_MLOCK = 203 // { int mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int munlock(const void *addr, size_t len); } + SYS_UNDELETE = 205 // { int undelete(char *path); } + SYS_FUTIMES = 206 // { int futimes(int fd, struct timeval *tptr); } + SYS_GETPGID = 207 // { int getpgid(pid_t pid); } + SYS_POLL = 209 // { int poll(struct pollfd *fds, u_int nfds, int timeout); } + SYS_SEMGET = 221 // { int semget(key_t key, int nsems, int semflg); } + SYS_SEMOP = 222 // { int semop(int semid, struct sembuf *sops, size_t nsops); } + SYS_MSGGET = 225 // { int msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } + SYS_MSGRCV = 227 // { ssize_t msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_SHMAT = 228 // { int shmat(int shmid, const void *shmaddr, int shmflg); } + SYS_SHMDT = 230 // { int shmdt(const void *shmaddr); } + SYS_SHMGET = 231 // { int shmget(key_t key, size_t size, int shmflg); } + SYS_CLOCK_GETTIME = 232 // { int clock_gettime(clockid_t clock_id, struct timespec *tp); } + SYS_CLOCK_SETTIME = 233 // { int clock_settime(clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_GETRES = 234 // { int clock_getres(clockid_t clock_id, struct timespec *tp); } + SYS_KTIMER_CREATE = 235 // { int ktimer_create(clockid_t clock_id, struct sigevent *evp, int *timerid); } + SYS_KTIMER_DELETE = 236 // { int ktimer_delete(int timerid); } + SYS_KTIMER_SETTIME = 237 // { int ktimer_settime(int timerid, int flags, const struct itimerspec *value, struct itimerspec *ovalue); } + SYS_KTIMER_GETTIME = 238 // { int ktimer_gettime(int timerid, struct itimerspec *value); } + SYS_KTIMER_GETOVERRUN = 239 // { int ktimer_getoverrun(int timerid); } + SYS_NANOSLEEP = 240 // { int nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } + SYS_FFCLOCK_GETCOUNTER = 241 // { int ffclock_getcounter(ffcounter *ffcount); } + SYS_FFCLOCK_SETESTIMATE = 242 // { int ffclock_setestimate(struct ffclock_estimate *cest); } + SYS_FFCLOCK_GETESTIMATE = 243 // { int ffclock_getestimate(struct ffclock_estimate *cest); } + SYS_CLOCK_NANOSLEEP = 244 // { int clock_nanosleep(clockid_t clock_id, int flags, const struct timespec *rqtp, struct timespec *rmtp); } + SYS_CLOCK_GETCPUCLOCKID2 = 247 // { int clock_getcpuclockid2(id_t id, int which, clockid_t *clock_id); } + SYS_NTP_GETTIME = 248 // { int ntp_gettime(struct ntptimeval *ntvp); } + SYS_MINHERIT = 250 // { int minherit(void *addr, size_t len, int inherit); } + SYS_RFORK = 251 // { int rfork(int flags); } + SYS_ISSETUGID = 253 // { int issetugid(void); } + SYS_LCHOWN = 254 // { int lchown(char *path, int uid, int gid); } + SYS_AIO_READ = 255 // { int aio_read(struct aiocb *aiocbp); } + SYS_AIO_WRITE = 256 // { int aio_write(struct aiocb *aiocbp); } + SYS_LIO_LISTIO = 257 // { int lio_listio(int mode, struct aiocb* const *acb_list, int nent, struct sigevent *sig); } + SYS_LCHMOD = 274 // { int lchmod(char *path, mode_t mode); } + SYS_LUTIMES = 276 // { int lutimes(char *path, struct timeval *tptr); } + SYS_PREADV = 289 // { ssize_t preadv(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } + SYS_PWRITEV = 290 // { ssize_t pwritev(int fd, struct iovec *iovp, u_int iovcnt, off_t offset); } + SYS_FHOPEN = 298 // { int fhopen(const struct fhandle *u_fhp, int flags); } + SYS_MODNEXT = 300 // { int modnext(int modid); } + SYS_MODSTAT = 301 // { int modstat(int modid, struct module_stat* stat); } + SYS_MODFNEXT = 302 // { int modfnext(int modid); } + SYS_MODFIND = 303 // { int modfind(const char *name); } + SYS_KLDLOAD = 304 // { int kldload(const char *file); } + SYS_KLDUNLOAD = 305 // { int kldunload(int fileid); } + SYS_KLDFIND = 306 // { int kldfind(const char *file); } + SYS_KLDNEXT = 307 // { int kldnext(int fileid); } + SYS_KLDSTAT = 308 // { int kldstat(int fileid, struct kld_file_stat *stat); } + SYS_KLDFIRSTMOD = 309 // { int kldfirstmod(int fileid); } + SYS_GETSID = 310 // { int getsid(pid_t pid); } + SYS_SETRESUID = 311 // { int setresuid(uid_t ruid, uid_t euid, uid_t suid); } + SYS_SETRESGID = 312 // { int setresgid(gid_t rgid, gid_t egid, gid_t sgid); } + SYS_AIO_RETURN = 314 // { ssize_t aio_return(struct aiocb *aiocbp); } + SYS_AIO_SUSPEND = 315 // { int aio_suspend(struct aiocb * const * aiocbp, int nent, const struct timespec *timeout); } + SYS_AIO_CANCEL = 316 // { int aio_cancel(int fd, struct aiocb *aiocbp); } + SYS_AIO_ERROR = 317 // { int aio_error(struct aiocb *aiocbp); } + SYS_YIELD = 321 // { int yield(void); } + SYS_MLOCKALL = 324 // { int mlockall(int how); } + SYS_MUNLOCKALL = 325 // { int munlockall(void); } + SYS___GETCWD = 326 // { int __getcwd(char *buf, size_t buflen); } + SYS_SCHED_SETPARAM = 327 // { int sched_setparam (pid_t pid, const struct sched_param *param); } + SYS_SCHED_GETPARAM = 328 // { int sched_getparam (pid_t pid, struct sched_param *param); } + SYS_SCHED_SETSCHEDULER = 329 // { int sched_setscheduler (pid_t pid, int policy, const struct sched_param *param); } + SYS_SCHED_GETSCHEDULER = 330 // { int sched_getscheduler (pid_t pid); } + SYS_SCHED_YIELD = 331 // { int sched_yield (void); } + SYS_SCHED_GET_PRIORITY_MAX = 332 // { int sched_get_priority_max (int policy); } + SYS_SCHED_GET_PRIORITY_MIN = 333 // { int sched_get_priority_min (int policy); } + SYS_SCHED_RR_GET_INTERVAL = 334 // { int sched_rr_get_interval (pid_t pid, struct timespec *interval); } + SYS_UTRACE = 335 // { int utrace(const void *addr, size_t len); } + SYS_KLDSYM = 337 // { int kldsym(int fileid, int cmd, void *data); } + SYS_JAIL = 338 // { int jail(struct jail *jail); } + SYS_SIGPROCMASK = 340 // { int sigprocmask(int how, const sigset_t *set, sigset_t *oset); } + SYS_SIGSUSPEND = 341 // { int sigsuspend(const sigset_t *sigmask); } + SYS_SIGPENDING = 343 // { int sigpending(sigset_t *set); } + SYS_SIGTIMEDWAIT = 345 // { int sigtimedwait(const sigset_t *set, siginfo_t *info, const struct timespec *timeout); } + SYS_SIGWAITINFO = 346 // { int sigwaitinfo(const sigset_t *set, siginfo_t *info); } + SYS___ACL_GET_FILE = 347 // { int __acl_get_file(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_SET_FILE = 348 // { int __acl_set_file(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_GET_FD = 349 // { int __acl_get_fd(int filedes, acl_type_t type, struct acl *aclp); } + SYS___ACL_SET_FD = 350 // { int __acl_set_fd(int filedes, acl_type_t type, struct acl *aclp); } + SYS___ACL_DELETE_FILE = 351 // { int __acl_delete_file(const char *path, acl_type_t type); } + SYS___ACL_DELETE_FD = 352 // { int __acl_delete_fd(int filedes, acl_type_t type); } + SYS___ACL_ACLCHECK_FILE = 353 // { int __acl_aclcheck_file(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_ACLCHECK_FD = 354 // { int __acl_aclcheck_fd(int filedes, acl_type_t type, struct acl *aclp); } + SYS_EXTATTRCTL = 355 // { int extattrctl(const char *path, int cmd, const char *filename, int attrnamespace, const char *attrname); } + SYS_EXTATTR_SET_FILE = 356 // { ssize_t extattr_set_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FILE = 357 // { ssize_t extattr_get_file(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FILE = 358 // { int extattr_delete_file(const char *path, int attrnamespace, const char *attrname); } + SYS_AIO_WAITCOMPLETE = 359 // { ssize_t aio_waitcomplete(struct aiocb **aiocbp, struct timespec *timeout); } + SYS_GETRESUID = 360 // { int getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } + SYS_GETRESGID = 361 // { int getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } + SYS_KQUEUE = 362 // { int kqueue(void); } + SYS_EXTATTR_SET_FD = 371 // { ssize_t extattr_set_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_FD = 372 // { ssize_t extattr_get_fd(int fd, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_FD = 373 // { int extattr_delete_fd(int fd, int attrnamespace, const char *attrname); } + SYS___SETUGID = 374 // { int __setugid(int flag); } + SYS_EACCESS = 376 // { int eaccess(char *path, int amode); } + SYS_NMOUNT = 378 // { int nmount(struct iovec *iovp, unsigned int iovcnt, int flags); } + SYS___MAC_GET_PROC = 384 // { int __mac_get_proc(struct mac *mac_p); } + SYS___MAC_SET_PROC = 385 // { int __mac_set_proc(struct mac *mac_p); } + SYS___MAC_GET_FD = 386 // { int __mac_get_fd(int fd, struct mac *mac_p); } + SYS___MAC_GET_FILE = 387 // { int __mac_get_file(const char *path_p, struct mac *mac_p); } + SYS___MAC_SET_FD = 388 // { int __mac_set_fd(int fd, struct mac *mac_p); } + SYS___MAC_SET_FILE = 389 // { int __mac_set_file(const char *path_p, struct mac *mac_p); } + SYS_KENV = 390 // { int kenv(int what, const char *name, char *value, int len); } + SYS_LCHFLAGS = 391 // { int lchflags(const char *path, u_long flags); } + SYS_UUIDGEN = 392 // { int uuidgen(struct uuid *store, int count); } + SYS_SENDFILE = 393 // { int sendfile(int fd, int s, off_t offset, size_t nbytes, struct sf_hdtr *hdtr, off_t *sbytes, int flags); } + SYS_MAC_SYSCALL = 394 // { int mac_syscall(const char *policy, int call, void *arg); } + SYS_KSEM_CLOSE = 400 // { int ksem_close(semid_t id); } + SYS_KSEM_POST = 401 // { int ksem_post(semid_t id); } + SYS_KSEM_WAIT = 402 // { int ksem_wait(semid_t id); } + SYS_KSEM_TRYWAIT = 403 // { int ksem_trywait(semid_t id); } + SYS_KSEM_INIT = 404 // { int ksem_init(semid_t *idp, unsigned int value); } + SYS_KSEM_OPEN = 405 // { int ksem_open(semid_t *idp, const char *name, int oflag, mode_t mode, unsigned int value); } + SYS_KSEM_UNLINK = 406 // { int ksem_unlink(const char *name); } + SYS_KSEM_GETVALUE = 407 // { int ksem_getvalue(semid_t id, int *val); } + SYS_KSEM_DESTROY = 408 // { int ksem_destroy(semid_t id); } + SYS___MAC_GET_PID = 409 // { int __mac_get_pid(pid_t pid, struct mac *mac_p); } + SYS___MAC_GET_LINK = 410 // { int __mac_get_link(const char *path_p, struct mac *mac_p); } + SYS___MAC_SET_LINK = 411 // { int __mac_set_link(const char *path_p, struct mac *mac_p); } + SYS_EXTATTR_SET_LINK = 412 // { ssize_t extattr_set_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_GET_LINK = 413 // { ssize_t extattr_get_link(const char *path, int attrnamespace, const char *attrname, void *data, size_t nbytes); } + SYS_EXTATTR_DELETE_LINK = 414 // { int extattr_delete_link(const char *path, int attrnamespace, const char *attrname); } + SYS___MAC_EXECVE = 415 // { int __mac_execve(char *fname, char **argv, char **envv, struct mac *mac_p); } + SYS_SIGACTION = 416 // { int sigaction(int sig, const struct sigaction *act, struct sigaction *oact); } + SYS_SIGRETURN = 417 // { int sigreturn(const struct __ucontext *sigcntxp); } + SYS_GETCONTEXT = 421 // { int getcontext(struct __ucontext *ucp); } + SYS_SETCONTEXT = 422 // { int setcontext(const struct __ucontext *ucp); } + SYS_SWAPCONTEXT = 423 // { int swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp); } + SYS_SWAPOFF = 424 // { int swapoff(const char *name); } + SYS___ACL_GET_LINK = 425 // { int __acl_get_link(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_SET_LINK = 426 // { int __acl_set_link(const char *path, acl_type_t type, struct acl *aclp); } + SYS___ACL_DELETE_LINK = 427 // { int __acl_delete_link(const char *path, acl_type_t type); } + SYS___ACL_ACLCHECK_LINK = 428 // { int __acl_aclcheck_link(const char *path, acl_type_t type, struct acl *aclp); } + SYS_SIGWAIT = 429 // { int sigwait(const sigset_t *set, int *sig); } + SYS_THR_CREATE = 430 // { int thr_create(ucontext_t *ctx, long *id, int flags); } + SYS_THR_EXIT = 431 // { void thr_exit(long *state); } + SYS_THR_SELF = 432 // { int thr_self(long *id); } + SYS_THR_KILL = 433 // { int thr_kill(long id, int sig); } + SYS_JAIL_ATTACH = 436 // { int jail_attach(int jid); } + SYS_EXTATTR_LIST_FD = 437 // { ssize_t extattr_list_fd(int fd, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_FILE = 438 // { ssize_t extattr_list_file(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_EXTATTR_LIST_LINK = 439 // { ssize_t extattr_list_link(const char *path, int attrnamespace, void *data, size_t nbytes); } + SYS_KSEM_TIMEDWAIT = 441 // { int ksem_timedwait(semid_t id, const struct timespec *abstime); } + SYS_THR_SUSPEND = 442 // { int thr_suspend(const struct timespec *timeout); } + SYS_THR_WAKE = 443 // { int thr_wake(long id); } + SYS_KLDUNLOADF = 444 // { int kldunloadf(int fileid, int flags); } + SYS_AUDIT = 445 // { int audit(const void *record, u_int length); } + SYS_AUDITON = 446 // { int auditon(int cmd, void *data, u_int length); } + SYS_GETAUID = 447 // { int getauid(uid_t *auid); } + SYS_SETAUID = 448 // { int setauid(uid_t *auid); } + SYS_GETAUDIT = 449 // { int getaudit(struct auditinfo *auditinfo); } + SYS_SETAUDIT = 450 // { int setaudit(struct auditinfo *auditinfo); } + SYS_GETAUDIT_ADDR = 451 // { int getaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_SETAUDIT_ADDR = 452 // { int setaudit_addr(struct auditinfo_addr *auditinfo_addr, u_int length); } + SYS_AUDITCTL = 453 // { int auditctl(char *path); } + SYS__UMTX_OP = 454 // { int _umtx_op(void *obj, int op, u_long val, void *uaddr1, void *uaddr2); } + SYS_THR_NEW = 455 // { int thr_new(struct thr_param *param, int param_size); } + SYS_SIGQUEUE = 456 // { int sigqueue(pid_t pid, int signum, void *value); } + SYS_KMQ_OPEN = 457 // { int kmq_open(const char *path, int flags, mode_t mode, const struct mq_attr *attr); } + SYS_KMQ_SETATTR = 458 // { int kmq_setattr(int mqd, const struct mq_attr *attr, struct mq_attr *oattr); } + SYS_KMQ_TIMEDRECEIVE = 459 // { int kmq_timedreceive(int mqd, char *msg_ptr, size_t msg_len, unsigned *msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_TIMEDSEND = 460 // { int kmq_timedsend(int mqd, const char *msg_ptr, size_t msg_len, unsigned msg_prio, const struct timespec *abs_timeout); } + SYS_KMQ_NOTIFY = 461 // { int kmq_notify(int mqd, const struct sigevent *sigev); } + SYS_KMQ_UNLINK = 462 // { int kmq_unlink(const char *path); } + SYS_ABORT2 = 463 // { int abort2(const char *why, int nargs, void **args); } + SYS_THR_SET_NAME = 464 // { int thr_set_name(long id, const char *name); } + SYS_AIO_FSYNC = 465 // { int aio_fsync(int op, struct aiocb *aiocbp); } + SYS_RTPRIO_THREAD = 466 // { int rtprio_thread(int function, lwpid_t lwpid, struct rtprio *rtp); } + SYS_SCTP_PEELOFF = 471 // { int sctp_peeloff(int sd, uint32_t name); } + SYS_SCTP_GENERIC_SENDMSG = 472 // { int sctp_generic_sendmsg(int sd, caddr_t msg, int mlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } + SYS_SCTP_GENERIC_SENDMSG_IOV = 473 // { int sctp_generic_sendmsg_iov(int sd, struct iovec *iov, int iovlen, caddr_t to, __socklen_t tolen, struct sctp_sndrcvinfo *sinfo, int flags); } + SYS_SCTP_GENERIC_RECVMSG = 474 // { int sctp_generic_recvmsg(int sd, struct iovec *iov, int iovlen, struct sockaddr *from, __socklen_t *fromlenaddr, struct sctp_sndrcvinfo *sinfo, int *msg_flags); } + SYS_PREAD = 475 // { ssize_t pread(int fd, void *buf, size_t nbyte, off_t offset); } + SYS_PWRITE = 476 // { ssize_t pwrite(int fd, const void *buf, size_t nbyte, off_t offset); } + SYS_MMAP = 477 // { caddr_t mmap(caddr_t addr, size_t len, int prot, int flags, int fd, off_t pos); } + SYS_LSEEK = 478 // { off_t lseek(int fd, off_t offset, int whence); } + SYS_TRUNCATE = 479 // { int truncate(char *path, off_t length); } + SYS_FTRUNCATE = 480 // { int ftruncate(int fd, off_t length); } + SYS_THR_KILL2 = 481 // { int thr_kill2(pid_t pid, long id, int sig); } + SYS_SHM_OPEN = 482 // { int shm_open(const char *path, int flags, mode_t mode); } + SYS_SHM_UNLINK = 483 // { int shm_unlink(const char *path); } + SYS_CPUSET = 484 // { int cpuset(cpusetid_t *setid); } + SYS_CPUSET_SETID = 485 // { int cpuset_setid(cpuwhich_t which, id_t id, cpusetid_t setid); } + SYS_CPUSET_GETID = 486 // { int cpuset_getid(cpulevel_t level, cpuwhich_t which, id_t id, cpusetid_t *setid); } + SYS_CPUSET_GETAFFINITY = 487 // { int cpuset_getaffinity(cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, cpuset_t *mask); } + SYS_CPUSET_SETAFFINITY = 488 // { int cpuset_setaffinity(cpulevel_t level, cpuwhich_t which, id_t id, size_t cpusetsize, const cpuset_t *mask); } + SYS_FACCESSAT = 489 // { int faccessat(int fd, char *path, int amode, int flag); } + SYS_FCHMODAT = 490 // { int fchmodat(int fd, char *path, mode_t mode, int flag); } + SYS_FCHOWNAT = 491 // { int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag); } + SYS_FEXECVE = 492 // { int fexecve(int fd, char **argv, char **envv); } + SYS_FUTIMESAT = 494 // { int futimesat(int fd, char *path, struct timeval *times); } + SYS_LINKAT = 495 // { int linkat(int fd1, char *path1, int fd2, char *path2, int flag); } + SYS_MKDIRAT = 496 // { int mkdirat(int fd, char *path, mode_t mode); } + SYS_MKFIFOAT = 497 // { int mkfifoat(int fd, char *path, mode_t mode); } + SYS_OPENAT = 499 // { int openat(int fd, char *path, int flag, mode_t mode); } + SYS_READLINKAT = 500 // { ssize_t readlinkat(int fd, char *path, char *buf, size_t bufsize); } + SYS_RENAMEAT = 501 // { int renameat(int oldfd, char *old, int newfd, char *new); } + SYS_SYMLINKAT = 502 // { int symlinkat(char *path1, int fd, char *path2); } + SYS_UNLINKAT = 503 // { int unlinkat(int fd, char *path, int flag); } + SYS_POSIX_OPENPT = 504 // { int posix_openpt(int flags); } + SYS_GSSD_SYSCALL = 505 // { int gssd_syscall(char *path); } + SYS_JAIL_GET = 506 // { int jail_get(struct iovec *iovp, unsigned int iovcnt, int flags); } + SYS_JAIL_SET = 507 // { int jail_set(struct iovec *iovp, unsigned int iovcnt, int flags); } + SYS_JAIL_REMOVE = 508 // { int jail_remove(int jid); } + SYS_CLOSEFROM = 509 // { int closefrom(int lowfd); } + SYS___SEMCTL = 510 // { int __semctl(int semid, int semnum, int cmd, union semun *arg); } + SYS_MSGCTL = 511 // { int msgctl(int msqid, int cmd, struct msqid_ds *buf); } + SYS_SHMCTL = 512 // { int shmctl(int shmid, int cmd, struct shmid_ds *buf); } + SYS_LPATHCONF = 513 // { int lpathconf(char *path, int name); } + SYS___CAP_RIGHTS_GET = 515 // { int __cap_rights_get(int version, int fd, cap_rights_t *rightsp); } + SYS_CAP_ENTER = 516 // { int cap_enter(void); } + SYS_CAP_GETMODE = 517 // { int cap_getmode(u_int *modep); } + SYS_PDFORK = 518 // { int pdfork(int *fdp, int flags); } + SYS_PDKILL = 519 // { int pdkill(int fd, int signum); } + SYS_PDGETPID = 520 // { int pdgetpid(int fd, pid_t *pidp); } + SYS_PSELECT = 522 // { int pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *sm); } + SYS_GETLOGINCLASS = 523 // { int getloginclass(char *namebuf, size_t namelen); } + SYS_SETLOGINCLASS = 524 // { int setloginclass(const char *namebuf); } + SYS_RCTL_GET_RACCT = 525 // { int rctl_get_racct(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_GET_RULES = 526 // { int rctl_get_rules(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_GET_LIMITS = 527 // { int rctl_get_limits(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_ADD_RULE = 528 // { int rctl_add_rule(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_RCTL_REMOVE_RULE = 529 // { int rctl_remove_rule(const void *inbufp, size_t inbuflen, void *outbufp, size_t outbuflen); } + SYS_POSIX_FALLOCATE = 530 // { int posix_fallocate(int fd, off_t offset, off_t len); } + SYS_POSIX_FADVISE = 531 // { int posix_fadvise(int fd, off_t offset, off_t len, int advice); } + SYS_WAIT6 = 532 // { int wait6(idtype_t idtype, id_t id, int *status, int options, struct __wrusage *wrusage, siginfo_t *info); } + SYS_CAP_RIGHTS_LIMIT = 533 // { int cap_rights_limit(int fd, cap_rights_t *rightsp); } + SYS_CAP_IOCTLS_LIMIT = 534 // { int cap_ioctls_limit(int fd, const u_long *cmds, size_t ncmds); } + SYS_CAP_IOCTLS_GET = 535 // { ssize_t cap_ioctls_get(int fd, u_long *cmds, size_t maxcmds); } + SYS_CAP_FCNTLS_LIMIT = 536 // { int cap_fcntls_limit(int fd, uint32_t fcntlrights); } + SYS_CAP_FCNTLS_GET = 537 // { int cap_fcntls_get(int fd, uint32_t *fcntlrightsp); } + SYS_BINDAT = 538 // { int bindat(int fd, int s, caddr_t name, int namelen); } + SYS_CONNECTAT = 539 // { int connectat(int fd, int s, caddr_t name, int namelen); } + SYS_CHFLAGSAT = 540 // { int chflagsat(int fd, const char *path, u_long flags, int atflag); } + SYS_ACCEPT4 = 541 // { int accept4(int s, struct sockaddr * __restrict name, __socklen_t * __restrict anamelen, int flags); } + SYS_PIPE2 = 542 // { int pipe2(int *fildes, int flags); } + SYS_AIO_MLOCK = 543 // { int aio_mlock(struct aiocb *aiocbp); } + SYS_PROCCTL = 544 // { int procctl(idtype_t idtype, id_t id, int com, void *data); } + SYS_PPOLL = 545 // { int ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *set); } + SYS_FUTIMENS = 546 // { int futimens(int fd, struct timespec *times); } + SYS_UTIMENSAT = 547 // { int utimensat(int fd, char *path, struct timespec *times, int flag); } + SYS_FDATASYNC = 550 // { int fdatasync(int fd); } + SYS_FSTAT = 551 // { int fstat(int fd, struct stat *sb); } + SYS_FSTATAT = 552 // { int fstatat(int fd, char *path, struct stat *buf, int flag); } + SYS_FHSTAT = 553 // { int fhstat(const struct fhandle *u_fhp, struct stat *sb); } + SYS_GETDIRENTRIES = 554 // { ssize_t getdirentries(int fd, char *buf, size_t count, off_t *basep); } + SYS_STATFS = 555 // { int statfs(char *path, struct statfs *buf); } + SYS_FSTATFS = 556 // { int fstatfs(int fd, struct statfs *buf); } + SYS_GETFSSTAT = 557 // { int getfsstat(struct statfs *buf, long bufsize, int mode); } + SYS_FHSTATFS = 558 // { int fhstatfs(const struct fhandle *u_fhp, struct statfs *buf); } + SYS_MKNODAT = 559 // { int mknodat(int fd, char *path, mode_t mode, dev_t dev); } + SYS_KEVENT = 560 // { int kevent(int fd, struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_CPUSET_GETDOMAIN = 561 // { int cpuset_getdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int *policy); } + SYS_CPUSET_SETDOMAIN = 562 // { int cpuset_setdomain(cpulevel_t level, cpuwhich_t which, id_t id, size_t domainsetsize, domainset_t *mask, int policy); } + SYS_GETRANDOM = 563 // { int getrandom(void *buf, size_t buflen, unsigned int flags); } + SYS_GETFHAT = 564 // { int getfhat(int fd, char *path, struct fhandle *fhp, int flags); } + SYS_FHLINK = 565 // { int fhlink(struct fhandle *fhp, const char *to); } + SYS_FHLINKAT = 566 // { int fhlinkat(struct fhandle *fhp, int tofd, const char *to,); } + SYS_FHREADLINK = 567 // { int fhreadlink(struct fhandle *fhp, char *buf, size_t bufsize); } + SYS___SYSCTLBYNAME = 570 // { int __sysctlbyname(const char *name, size_t namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_CLOSE_RANGE = 575 // { int close_range(u_int lowfd, u_int highfd, int flags); } +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index cac1f758b..c9c4ad031 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -m32 /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/386/include -m32 /tmp/386/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux @@ -446,4 +446,5 @@ const ( SYS_MEMFD_SECRET = 447 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f327e4a0b..12ff3417c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -m64 /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/amd64/include -m64 /tmp/amd64/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux @@ -368,4 +368,5 @@ const ( SYS_MEMFD_SECRET = 447 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index fb06a08d4..c3fb5e77a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/arm/include /tmp/arm/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux @@ -410,4 +410,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 58285646e..358c847a4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -fsigned-char /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/arm64/include -fsigned-char /tmp/arm64/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux @@ -313,4 +313,5 @@ const ( SYS_MEMFD_SECRET = 447 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go new file mode 100644 index 000000000..81c4849b1 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -0,0 +1,311 @@ +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/loong64/include /tmp/loong64/include/asm/unistd.h +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build loong64 && linux +// +build loong64,linux + +package unix + +const ( + SYS_IO_SETUP = 0 + SYS_IO_DESTROY = 1 + SYS_IO_SUBMIT = 2 + SYS_IO_CANCEL = 3 + SYS_IO_GETEVENTS = 4 + SYS_SETXATTR = 5 + SYS_LSETXATTR = 6 + SYS_FSETXATTR = 7 + SYS_GETXATTR = 8 + SYS_LGETXATTR = 9 + SYS_FGETXATTR = 10 + SYS_LISTXATTR = 11 + SYS_LLISTXATTR = 12 + SYS_FLISTXATTR = 13 + SYS_REMOVEXATTR = 14 + SYS_LREMOVEXATTR = 15 + SYS_FREMOVEXATTR = 16 + SYS_GETCWD = 17 + SYS_LOOKUP_DCOOKIE = 18 + SYS_EVENTFD2 = 19 + SYS_EPOLL_CREATE1 = 20 + SYS_EPOLL_CTL = 21 + SYS_EPOLL_PWAIT = 22 + SYS_DUP = 23 + SYS_DUP3 = 24 + SYS_FCNTL = 25 + SYS_INOTIFY_INIT1 = 26 + SYS_INOTIFY_ADD_WATCH = 27 + SYS_INOTIFY_RM_WATCH = 28 + SYS_IOCTL = 29 + SYS_IOPRIO_SET = 30 + SYS_IOPRIO_GET = 31 + SYS_FLOCK = 32 + SYS_MKNODAT = 33 + SYS_MKDIRAT = 34 + SYS_UNLINKAT = 35 + SYS_SYMLINKAT = 36 + SYS_LINKAT = 37 + SYS_UMOUNT2 = 39 + SYS_MOUNT = 40 + SYS_PIVOT_ROOT = 41 + SYS_NFSSERVCTL = 42 + SYS_STATFS = 43 + SYS_FSTATFS = 44 + SYS_TRUNCATE = 45 + SYS_FTRUNCATE = 46 + SYS_FALLOCATE = 47 + SYS_FACCESSAT = 48 + SYS_CHDIR = 49 + SYS_FCHDIR = 50 + SYS_CHROOT = 51 + SYS_FCHMOD = 52 + SYS_FCHMODAT = 53 + SYS_FCHOWNAT = 54 + SYS_FCHOWN = 55 + SYS_OPENAT = 56 + SYS_CLOSE = 57 + SYS_VHANGUP = 58 + SYS_PIPE2 = 59 + SYS_QUOTACTL = 60 + SYS_GETDENTS64 = 61 + SYS_LSEEK = 62 + SYS_READ = 63 + SYS_WRITE = 64 + SYS_READV = 65 + SYS_WRITEV = 66 + SYS_PREAD64 = 67 + SYS_PWRITE64 = 68 + SYS_PREADV = 69 + SYS_PWRITEV = 70 + SYS_SENDFILE = 71 + SYS_PSELECT6 = 72 + SYS_PPOLL = 73 + SYS_SIGNALFD4 = 74 + SYS_VMSPLICE = 75 + SYS_SPLICE = 76 + SYS_TEE = 77 + SYS_READLINKAT = 78 + SYS_SYNC = 81 + SYS_FSYNC = 82 + SYS_FDATASYNC = 83 + SYS_SYNC_FILE_RANGE = 84 + SYS_TIMERFD_CREATE = 85 + SYS_TIMERFD_SETTIME = 86 + SYS_TIMERFD_GETTIME = 87 + SYS_UTIMENSAT = 88 + SYS_ACCT = 89 + SYS_CAPGET = 90 + SYS_CAPSET = 91 + SYS_PERSONALITY = 92 + SYS_EXIT = 93 + SYS_EXIT_GROUP = 94 + SYS_WAITID = 95 + SYS_SET_TID_ADDRESS = 96 + SYS_UNSHARE = 97 + SYS_FUTEX = 98 + SYS_SET_ROBUST_LIST = 99 + SYS_GET_ROBUST_LIST = 100 + SYS_NANOSLEEP = 101 + SYS_GETITIMER = 102 + SYS_SETITIMER = 103 + SYS_KEXEC_LOAD = 104 + SYS_INIT_MODULE = 105 + SYS_DELETE_MODULE = 106 + SYS_TIMER_CREATE = 107 + SYS_TIMER_GETTIME = 108 + SYS_TIMER_GETOVERRUN = 109 + SYS_TIMER_SETTIME = 110 + SYS_TIMER_DELETE = 111 + SYS_CLOCK_SETTIME = 112 + SYS_CLOCK_GETTIME = 113 + SYS_CLOCK_GETRES = 114 + SYS_CLOCK_NANOSLEEP = 115 + SYS_SYSLOG = 116 + SYS_PTRACE = 117 + SYS_SCHED_SETPARAM = 118 + SYS_SCHED_SETSCHEDULER = 119 + SYS_SCHED_GETSCHEDULER = 120 + SYS_SCHED_GETPARAM = 121 + SYS_SCHED_SETAFFINITY = 122 + SYS_SCHED_GETAFFINITY = 123 + SYS_SCHED_YIELD = 124 + SYS_SCHED_GET_PRIORITY_MAX = 125 + SYS_SCHED_GET_PRIORITY_MIN = 126 + SYS_SCHED_RR_GET_INTERVAL = 127 + SYS_RESTART_SYSCALL = 128 + SYS_KILL = 129 + SYS_TKILL = 130 + SYS_TGKILL = 131 + SYS_SIGALTSTACK = 132 + SYS_RT_SIGSUSPEND = 133 + SYS_RT_SIGACTION = 134 + SYS_RT_SIGPROCMASK = 135 + SYS_RT_SIGPENDING = 136 + SYS_RT_SIGTIMEDWAIT = 137 + SYS_RT_SIGQUEUEINFO = 138 + SYS_RT_SIGRETURN = 139 + SYS_SETPRIORITY = 140 + SYS_GETPRIORITY = 141 + SYS_REBOOT = 142 + SYS_SETREGID = 143 + SYS_SETGID = 144 + SYS_SETREUID = 145 + SYS_SETUID = 146 + SYS_SETRESUID = 147 + SYS_GETRESUID = 148 + SYS_SETRESGID = 149 + SYS_GETRESGID = 150 + SYS_SETFSUID = 151 + SYS_SETFSGID = 152 + SYS_TIMES = 153 + SYS_SETPGID = 154 + SYS_GETPGID = 155 + SYS_GETSID = 156 + SYS_SETSID = 157 + SYS_GETGROUPS = 158 + SYS_SETGROUPS = 159 + SYS_UNAME = 160 + SYS_SETHOSTNAME = 161 + SYS_SETDOMAINNAME = 162 + SYS_GETRUSAGE = 165 + SYS_UMASK = 166 + SYS_PRCTL = 167 + SYS_GETCPU = 168 + SYS_GETTIMEOFDAY = 169 + SYS_SETTIMEOFDAY = 170 + SYS_ADJTIMEX = 171 + SYS_GETPID = 172 + SYS_GETPPID = 173 + SYS_GETUID = 174 + SYS_GETEUID = 175 + SYS_GETGID = 176 + SYS_GETEGID = 177 + SYS_GETTID = 178 + SYS_SYSINFO = 179 + SYS_MQ_OPEN = 180 + SYS_MQ_UNLINK = 181 + SYS_MQ_TIMEDSEND = 182 + SYS_MQ_TIMEDRECEIVE = 183 + SYS_MQ_NOTIFY = 184 + SYS_MQ_GETSETATTR = 185 + SYS_MSGGET = 186 + SYS_MSGCTL = 187 + SYS_MSGRCV = 188 + SYS_MSGSND = 189 + SYS_SEMGET = 190 + SYS_SEMCTL = 191 + SYS_SEMTIMEDOP = 192 + SYS_SEMOP = 193 + SYS_SHMGET = 194 + SYS_SHMCTL = 195 + SYS_SHMAT = 196 + SYS_SHMDT = 197 + SYS_SOCKET = 198 + SYS_SOCKETPAIR = 199 + SYS_BIND = 200 + SYS_LISTEN = 201 + SYS_ACCEPT = 202 + SYS_CONNECT = 203 + SYS_GETSOCKNAME = 204 + SYS_GETPEERNAME = 205 + SYS_SENDTO = 206 + SYS_RECVFROM = 207 + SYS_SETSOCKOPT = 208 + SYS_GETSOCKOPT = 209 + SYS_SHUTDOWN = 210 + SYS_SENDMSG = 211 + SYS_RECVMSG = 212 + SYS_READAHEAD = 213 + SYS_BRK = 214 + SYS_MUNMAP = 215 + SYS_MREMAP = 216 + SYS_ADD_KEY = 217 + SYS_REQUEST_KEY = 218 + SYS_KEYCTL = 219 + SYS_CLONE = 220 + SYS_EXECVE = 221 + SYS_MMAP = 222 + SYS_FADVISE64 = 223 + SYS_SWAPON = 224 + SYS_SWAPOFF = 225 + SYS_MPROTECT = 226 + SYS_MSYNC = 227 + SYS_MLOCK = 228 + SYS_MUNLOCK = 229 + SYS_MLOCKALL = 230 + SYS_MUNLOCKALL = 231 + SYS_MINCORE = 232 + SYS_MADVISE = 233 + SYS_REMAP_FILE_PAGES = 234 + SYS_MBIND = 235 + SYS_GET_MEMPOLICY = 236 + SYS_SET_MEMPOLICY = 237 + SYS_MIGRATE_PAGES = 238 + SYS_MOVE_PAGES = 239 + SYS_RT_TGSIGQUEUEINFO = 240 + SYS_PERF_EVENT_OPEN = 241 + SYS_ACCEPT4 = 242 + SYS_RECVMMSG = 243 + SYS_ARCH_SPECIFIC_SYSCALL = 244 + SYS_WAIT4 = 260 + SYS_PRLIMIT64 = 261 + SYS_FANOTIFY_INIT = 262 + SYS_FANOTIFY_MARK = 263 + SYS_NAME_TO_HANDLE_AT = 264 + SYS_OPEN_BY_HANDLE_AT = 265 + SYS_CLOCK_ADJTIME = 266 + SYS_SYNCFS = 267 + SYS_SETNS = 268 + SYS_SENDMMSG = 269 + SYS_PROCESS_VM_READV = 270 + SYS_PROCESS_VM_WRITEV = 271 + SYS_KCMP = 272 + SYS_FINIT_MODULE = 273 + SYS_SCHED_SETATTR = 274 + SYS_SCHED_GETATTR = 275 + SYS_RENAMEAT2 = 276 + SYS_SECCOMP = 277 + SYS_GETRANDOM = 278 + SYS_MEMFD_CREATE = 279 + SYS_BPF = 280 + SYS_EXECVEAT = 281 + SYS_USERFAULTFD = 282 + SYS_MEMBARRIER = 283 + SYS_MLOCK2 = 284 + SYS_COPY_FILE_RANGE = 285 + SYS_PREADV2 = 286 + SYS_PWRITEV2 = 287 + SYS_PKEY_MPROTECT = 288 + SYS_PKEY_ALLOC = 289 + SYS_PKEY_FREE = 290 + SYS_STATX = 291 + SYS_IO_PGETEVENTS = 292 + SYS_RSEQ = 293 + SYS_KEXEC_FILE_LOAD = 294 + SYS_PIDFD_SEND_SIGNAL = 424 + SYS_IO_URING_SETUP = 425 + SYS_IO_URING_ENTER = 426 + SYS_IO_URING_REGISTER = 427 + SYS_OPEN_TREE = 428 + SYS_MOVE_MOUNT = 429 + SYS_FSOPEN = 430 + SYS_FSCONFIG = 431 + SYS_FSMOUNT = 432 + SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_CLOSE_RANGE = 436 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 + SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 + SYS_EPOLL_PWAIT2 = 441 + SYS_MOUNT_SETATTR = 442 + SYS_QUOTACTL_FD = 443 + SYS_LANDLOCK_CREATE_RULESET = 444 + SYS_LANDLOCK_ADD_RULE = 445 + SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_PROCESS_MRELEASE = 448 + SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 3b0418e68..202a57e90 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/mips/include /tmp/mips/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux @@ -430,4 +430,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 4446 SYS_PROCESS_MRELEASE = 4448 SYS_FUTEX_WAITV = 4449 + SYS_SET_MEMPOLICY_HOME_NODE = 4450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 314ebf166..1fbceb52d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/mips64/include /tmp/mips64/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux @@ -360,4 +360,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 5446 SYS_PROCESS_MRELEASE = 5448 SYS_FUTEX_WAITV = 5449 + SYS_SET_MEMPOLICY_HOME_NODE = 5450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index b8fbb937a..b4ffb7a20 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/mips64le/include /tmp/mips64le/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux @@ -360,4 +360,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 5446 SYS_PROCESS_MRELEASE = 5448 SYS_FUTEX_WAITV = 5449 + SYS_SET_MEMPOLICY_HOME_NODE = 5450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index ee309b2ba..867985f9b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/mipsle/include /tmp/mipsle/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux @@ -430,4 +430,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 4446 SYS_PROCESS_MRELEASE = 4448 SYS_FUTEX_WAITV = 4449 + SYS_SET_MEMPOLICY_HOME_NODE = 4450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index ac3748104..a8cce69ed 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/ppc/include /tmp/ppc/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux @@ -437,4 +437,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 5aa472111..d44c5b39d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/ppc64/include /tmp/ppc64/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux @@ -409,4 +409,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 0793ac1a6..4214dd9c0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/ppc64le/include /tmp/ppc64le/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux @@ -409,4 +409,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index a520962e3..3e594a8c0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/riscv64/include /tmp/riscv64/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux @@ -309,6 +309,8 @@ const ( SYS_LANDLOCK_CREATE_RULESET = 444 SYS_LANDLOCK_ADD_RULE = 445 SYS_LANDLOCK_RESTRICT_SELF = 446 + SYS_MEMFD_SECRET = 447 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index d1738586b..7ea465204 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -fsigned-char /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/s390x/include -fsigned-char /tmp/s390x/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux @@ -374,4 +374,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index dfd5660f9..92f628ef4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/sparc64/include /tmp/sparc64/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux @@ -388,4 +388,5 @@ const ( SYS_LANDLOCK_RESTRICT_SELF = 446 SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 + SYS_SET_MEMPOLICY_HOME_NODE = 450 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go index 817edbf95..597733813 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go @@ -6,6 +6,7 @@ package unix +// Deprecated: Use libc wrappers instead of direct syscalls. const ( SYS_EXIT = 1 // { void sys_exit(int rval); } SYS_FORK = 2 // { int sys_fork(void); } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go index ea453614e..16af29189 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go @@ -6,6 +6,7 @@ package unix +// Deprecated: Use libc wrappers instead of direct syscalls. const ( SYS_EXIT = 1 // { void sys_exit(int rval); } SYS_FORK = 2 // { int sys_fork(void); } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go index 467971eed..f59b18a97 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go @@ -6,6 +6,7 @@ package unix +// Deprecated: Use libc wrappers instead of direct syscalls. const ( SYS_EXIT = 1 // { void sys_exit(int rval); } SYS_FORK = 2 // { int sys_fork(void); } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go index 32eec5ed5..721ef5910 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go @@ -6,6 +6,7 @@ package unix +// Deprecated: Use libc wrappers instead of direct syscalls. const ( SYS_EXIT = 1 // { void sys_exit(int rval); } SYS_FORK = 2 // { int sys_fork(void); } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go index a37f77375..01c43a01f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go @@ -6,6 +6,7 @@ package unix +// Deprecated: Use libc wrappers instead of direct syscalls. const ( SYS_EXIT = 1 // { void sys_exit(int rval); } SYS_FORK = 2 // { int sys_fork(void); } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go new file mode 100644 index 000000000..f258cfa24 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go @@ -0,0 +1,218 @@ +// go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build ppc64 && openbsd +// +build ppc64,openbsd + +package unix + +const ( + SYS_EXIT = 1 // { void sys_exit(int rval); } + SYS_FORK = 2 // { int sys_fork(void); } + SYS_READ = 3 // { ssize_t sys_read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t sys_write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int sys_open(const char *path, int flags, ... mode_t mode); } + SYS_CLOSE = 6 // { int sys_close(int fd); } + SYS_GETENTROPY = 7 // { int sys_getentropy(void *buf, size_t nbyte); } + SYS___TFORK = 8 // { int sys___tfork(const struct __tfork *param, size_t psize); } + SYS_LINK = 9 // { int sys_link(const char *path, const char *link); } + SYS_UNLINK = 10 // { int sys_unlink(const char *path); } + SYS_WAIT4 = 11 // { pid_t sys_wait4(pid_t pid, int *status, int options, struct rusage *rusage); } + SYS_CHDIR = 12 // { int sys_chdir(const char *path); } + SYS_FCHDIR = 13 // { int sys_fchdir(int fd); } + SYS_MKNOD = 14 // { int sys_mknod(const char *path, mode_t mode, dev_t dev); } + SYS_CHMOD = 15 // { int sys_chmod(const char *path, mode_t mode); } + SYS_CHOWN = 16 // { int sys_chown(const char *path, uid_t uid, gid_t gid); } + SYS_OBREAK = 17 // { int sys_obreak(char *nsize); } break + SYS_GETDTABLECOUNT = 18 // { int sys_getdtablecount(void); } + SYS_GETRUSAGE = 19 // { int sys_getrusage(int who, struct rusage *rusage); } + SYS_GETPID = 20 // { pid_t sys_getpid(void); } + SYS_MOUNT = 21 // { int sys_mount(const char *type, const char *path, int flags, void *data); } + SYS_UNMOUNT = 22 // { int sys_unmount(const char *path, int flags); } + SYS_SETUID = 23 // { int sys_setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t sys_getuid(void); } + SYS_GETEUID = 25 // { uid_t sys_geteuid(void); } + SYS_PTRACE = 26 // { int sys_ptrace(int req, pid_t pid, caddr_t addr, int data); } + SYS_RECVMSG = 27 // { ssize_t sys_recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { ssize_t sys_sendmsg(int s, const struct msghdr *msg, int flags); } + SYS_RECVFROM = 29 // { ssize_t sys_recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlenaddr); } + SYS_ACCEPT = 30 // { int sys_accept(int s, struct sockaddr *name, socklen_t *anamelen); } + SYS_GETPEERNAME = 31 // { int sys_getpeername(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_GETSOCKNAME = 32 // { int sys_getsockname(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_ACCESS = 33 // { int sys_access(const char *path, int amode); } + SYS_CHFLAGS = 34 // { int sys_chflags(const char *path, u_int flags); } + SYS_FCHFLAGS = 35 // { int sys_fchflags(int fd, u_int flags); } + SYS_SYNC = 36 // { void sys_sync(void); } + SYS_STAT = 38 // { int sys_stat(const char *path, struct stat *ub); } + SYS_GETPPID = 39 // { pid_t sys_getppid(void); } + SYS_LSTAT = 40 // { int sys_lstat(const char *path, struct stat *ub); } + SYS_DUP = 41 // { int sys_dup(int fd); } + SYS_FSTATAT = 42 // { int sys_fstatat(int fd, const char *path, struct stat *buf, int flag); } + SYS_GETEGID = 43 // { gid_t sys_getegid(void); } + SYS_PROFIL = 44 // { int sys_profil(caddr_t samples, size_t size, u_long offset, u_int scale); } + SYS_KTRACE = 45 // { int sys_ktrace(const char *fname, int ops, int facs, pid_t pid); } + SYS_SIGACTION = 46 // { int sys_sigaction(int signum, const struct sigaction *nsa, struct sigaction *osa); } + SYS_GETGID = 47 // { gid_t sys_getgid(void); } + SYS_SIGPROCMASK = 48 // { int sys_sigprocmask(int how, sigset_t mask); } + SYS_SETLOGIN = 50 // { int sys_setlogin(const char *namebuf); } + SYS_ACCT = 51 // { int sys_acct(const char *path); } + SYS_SIGPENDING = 52 // { int sys_sigpending(void); } + SYS_FSTAT = 53 // { int sys_fstat(int fd, struct stat *sb); } + SYS_IOCTL = 54 // { int sys_ioctl(int fd, u_long com, ... void *data); } + SYS_REBOOT = 55 // { int sys_reboot(int opt); } + SYS_REVOKE = 56 // { int sys_revoke(const char *path); } + SYS_SYMLINK = 57 // { int sys_symlink(const char *path, const char *link); } + SYS_READLINK = 58 // { ssize_t sys_readlink(const char *path, char *buf, size_t count); } + SYS_EXECVE = 59 // { int sys_execve(const char *path, char * const *argp, char * const *envp); } + SYS_UMASK = 60 // { mode_t sys_umask(mode_t newmask); } + SYS_CHROOT = 61 // { int sys_chroot(const char *path); } + SYS_GETFSSTAT = 62 // { int sys_getfsstat(struct statfs *buf, size_t bufsize, int flags); } + SYS_STATFS = 63 // { int sys_statfs(const char *path, struct statfs *buf); } + SYS_FSTATFS = 64 // { int sys_fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 65 // { int sys_fhstatfs(const fhandle_t *fhp, struct statfs *buf); } + SYS_VFORK = 66 // { int sys_vfork(void); } + SYS_GETTIMEOFDAY = 67 // { int sys_gettimeofday(struct timeval *tp, struct timezone *tzp); } + SYS_SETTIMEOFDAY = 68 // { int sys_settimeofday(const struct timeval *tv, const struct timezone *tzp); } + SYS_SETITIMER = 69 // { int sys_setitimer(int which, const struct itimerval *itv, struct itimerval *oitv); } + SYS_GETITIMER = 70 // { int sys_getitimer(int which, struct itimerval *itv); } + SYS_SELECT = 71 // { int sys_select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } + SYS_KEVENT = 72 // { int sys_kevent(int fd, const struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_MUNMAP = 73 // { int sys_munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int sys_mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int sys_madvise(void *addr, size_t len, int behav); } + SYS_UTIMES = 76 // { int sys_utimes(const char *path, const struct timeval *tptr); } + SYS_FUTIMES = 77 // { int sys_futimes(int fd, const struct timeval *tptr); } + SYS_GETGROUPS = 79 // { int sys_getgroups(int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int sys_setgroups(int gidsetsize, const gid_t *gidset); } + SYS_GETPGRP = 81 // { int sys_getpgrp(void); } + SYS_SETPGID = 82 // { int sys_setpgid(pid_t pid, pid_t pgid); } + SYS_FUTEX = 83 // { int sys_futex(uint32_t *f, int op, int val, const struct timespec *timeout, uint32_t *g); } + SYS_UTIMENSAT = 84 // { int sys_utimensat(int fd, const char *path, const struct timespec *times, int flag); } + SYS_FUTIMENS = 85 // { int sys_futimens(int fd, const struct timespec *times); } + SYS_KBIND = 86 // { int sys_kbind(const struct __kbind *param, size_t psize, int64_t proc_cookie); } + SYS_CLOCK_GETTIME = 87 // { int sys_clock_gettime(clockid_t clock_id, struct timespec *tp); } + SYS_CLOCK_SETTIME = 88 // { int sys_clock_settime(clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_GETRES = 89 // { int sys_clock_getres(clockid_t clock_id, struct timespec *tp); } + SYS_DUP2 = 90 // { int sys_dup2(int from, int to); } + SYS_NANOSLEEP = 91 // { int sys_nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } + SYS_FCNTL = 92 // { int sys_fcntl(int fd, int cmd, ... void *arg); } + SYS_ACCEPT4 = 93 // { int sys_accept4(int s, struct sockaddr *name, socklen_t *anamelen, int flags); } + SYS___THRSLEEP = 94 // { int sys___thrsleep(const volatile void *ident, clockid_t clock_id, const struct timespec *tp, void *lock, const int *abort); } + SYS_FSYNC = 95 // { int sys_fsync(int fd); } + SYS_SETPRIORITY = 96 // { int sys_setpriority(int which, id_t who, int prio); } + SYS_SOCKET = 97 // { int sys_socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int sys_connect(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_GETDENTS = 99 // { int sys_getdents(int fd, void *buf, size_t buflen); } + SYS_GETPRIORITY = 100 // { int sys_getpriority(int which, id_t who); } + SYS_PIPE2 = 101 // { int sys_pipe2(int *fdp, int flags); } + SYS_DUP3 = 102 // { int sys_dup3(int from, int to, int flags); } + SYS_SIGRETURN = 103 // { int sys_sigreturn(struct sigcontext *sigcntxp); } + SYS_BIND = 104 // { int sys_bind(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_SETSOCKOPT = 105 // { int sys_setsockopt(int s, int level, int name, const void *val, socklen_t valsize); } + SYS_LISTEN = 106 // { int sys_listen(int s, int backlog); } + SYS_CHFLAGSAT = 107 // { int sys_chflagsat(int fd, const char *path, u_int flags, int atflags); } + SYS_PLEDGE = 108 // { int sys_pledge(const char *promises, const char *execpromises); } + SYS_PPOLL = 109 // { int sys_ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *mask); } + SYS_PSELECT = 110 // { int sys_pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *mask); } + SYS_SIGSUSPEND = 111 // { int sys_sigsuspend(int mask); } + SYS_SENDSYSLOG = 112 // { int sys_sendsyslog(const char *buf, size_t nbyte, int flags); } + SYS_UNVEIL = 114 // { int sys_unveil(const char *path, const char *permissions); } + SYS_GETSOCKOPT = 118 // { int sys_getsockopt(int s, int level, int name, void *val, socklen_t *avalsize); } + SYS_THRKILL = 119 // { int sys_thrkill(pid_t tid, int signum, void *tcb); } + SYS_READV = 120 // { ssize_t sys_readv(int fd, const struct iovec *iovp, int iovcnt); } + SYS_WRITEV = 121 // { ssize_t sys_writev(int fd, const struct iovec *iovp, int iovcnt); } + SYS_KILL = 122 // { int sys_kill(int pid, int signum); } + SYS_FCHOWN = 123 // { int sys_fchown(int fd, uid_t uid, gid_t gid); } + SYS_FCHMOD = 124 // { int sys_fchmod(int fd, mode_t mode); } + SYS_SETREUID = 126 // { int sys_setreuid(uid_t ruid, uid_t euid); } + SYS_SETREGID = 127 // { int sys_setregid(gid_t rgid, gid_t egid); } + SYS_RENAME = 128 // { int sys_rename(const char *from, const char *to); } + SYS_FLOCK = 131 // { int sys_flock(int fd, int how); } + SYS_MKFIFO = 132 // { int sys_mkfifo(const char *path, mode_t mode); } + SYS_SENDTO = 133 // { ssize_t sys_sendto(int s, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen); } + SYS_SHUTDOWN = 134 // { int sys_shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int sys_socketpair(int domain, int type, int protocol, int *rsv); } + SYS_MKDIR = 136 // { int sys_mkdir(const char *path, mode_t mode); } + SYS_RMDIR = 137 // { int sys_rmdir(const char *path); } + SYS_ADJTIME = 140 // { int sys_adjtime(const struct timeval *delta, struct timeval *olddelta); } + SYS_GETLOGIN_R = 141 // { int sys_getlogin_r(char *namebuf, u_int namelen); } + SYS_SETSID = 147 // { int sys_setsid(void); } + SYS_QUOTACTL = 148 // { int sys_quotactl(const char *path, int cmd, int uid, char *arg); } + SYS_NFSSVC = 155 // { int sys_nfssvc(int flag, void *argp); } + SYS_GETFH = 161 // { int sys_getfh(const char *fname, fhandle_t *fhp); } + SYS_SYSARCH = 165 // { int sys_sysarch(int op, void *parms); } + SYS_PREAD = 173 // { ssize_t sys_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); } + SYS_PWRITE = 174 // { ssize_t sys_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); } + SYS_SETGID = 181 // { int sys_setgid(gid_t gid); } + SYS_SETEGID = 182 // { int sys_setegid(gid_t egid); } + SYS_SETEUID = 183 // { int sys_seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { long sys_pathconf(const char *path, int name); } + SYS_FPATHCONF = 192 // { long sys_fpathconf(int fd, int name); } + SYS_SWAPCTL = 193 // { int sys_swapctl(int cmd, const void *arg, int misc); } + SYS_GETRLIMIT = 194 // { int sys_getrlimit(int which, struct rlimit *rlp); } + SYS_SETRLIMIT = 195 // { int sys_setrlimit(int which, const struct rlimit *rlp); } + SYS_MMAP = 197 // { void *sys_mmap(void *addr, size_t len, int prot, int flags, int fd, long pad, off_t pos); } + SYS_LSEEK = 199 // { off_t sys_lseek(int fd, int pad, off_t offset, int whence); } + SYS_TRUNCATE = 200 // { int sys_truncate(const char *path, int pad, off_t length); } + SYS_FTRUNCATE = 201 // { int sys_ftruncate(int fd, int pad, off_t length); } + SYS_SYSCTL = 202 // { int sys_sysctl(const int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_MLOCK = 203 // { int sys_mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int sys_munlock(const void *addr, size_t len); } + SYS_GETPGID = 207 // { pid_t sys_getpgid(pid_t pid); } + SYS_UTRACE = 209 // { int sys_utrace(const char *label, const void *addr, size_t len); } + SYS_SEMGET = 221 // { int sys_semget(key_t key, int nsems, int semflg); } + SYS_MSGGET = 225 // { int sys_msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int sys_msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } + SYS_MSGRCV = 227 // { int sys_msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_SHMAT = 228 // { void *sys_shmat(int shmid, const void *shmaddr, int shmflg); } + SYS_SHMDT = 230 // { int sys_shmdt(const void *shmaddr); } + SYS_MINHERIT = 250 // { int sys_minherit(void *addr, size_t len, int inherit); } + SYS_POLL = 252 // { int sys_poll(struct pollfd *fds, u_int nfds, int timeout); } + SYS_ISSETUGID = 253 // { int sys_issetugid(void); } + SYS_LCHOWN = 254 // { int sys_lchown(const char *path, uid_t uid, gid_t gid); } + SYS_GETSID = 255 // { pid_t sys_getsid(pid_t pid); } + SYS_MSYNC = 256 // { int sys_msync(void *addr, size_t len, int flags); } + SYS_PIPE = 263 // { int sys_pipe(int *fdp); } + SYS_FHOPEN = 264 // { int sys_fhopen(const fhandle_t *fhp, int flags); } + SYS_PREADV = 267 // { ssize_t sys_preadv(int fd, const struct iovec *iovp, int iovcnt, int pad, off_t offset); } + SYS_PWRITEV = 268 // { ssize_t sys_pwritev(int fd, const struct iovec *iovp, int iovcnt, int pad, off_t offset); } + SYS_KQUEUE = 269 // { int sys_kqueue(void); } + SYS_MLOCKALL = 271 // { int sys_mlockall(int flags); } + SYS_MUNLOCKALL = 272 // { int sys_munlockall(void); } + SYS_GETRESUID = 281 // { int sys_getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } + SYS_SETRESUID = 282 // { int sys_setresuid(uid_t ruid, uid_t euid, uid_t suid); } + SYS_GETRESGID = 283 // { int sys_getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } + SYS_SETRESGID = 284 // { int sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid); } + SYS_MQUERY = 286 // { void *sys_mquery(void *addr, size_t len, int prot, int flags, int fd, long pad, off_t pos); } + SYS_CLOSEFROM = 287 // { int sys_closefrom(int fd); } + SYS_SIGALTSTACK = 288 // { int sys_sigaltstack(const struct sigaltstack *nss, struct sigaltstack *oss); } + SYS_SHMGET = 289 // { int sys_shmget(key_t key, size_t size, int shmflg); } + SYS_SEMOP = 290 // { int sys_semop(int semid, struct sembuf *sops, size_t nsops); } + SYS_FHSTAT = 294 // { int sys_fhstat(const fhandle_t *fhp, struct stat *sb); } + SYS___SEMCTL = 295 // { int sys___semctl(int semid, int semnum, int cmd, union semun *arg); } + SYS_SHMCTL = 296 // { int sys_shmctl(int shmid, int cmd, struct shmid_ds *buf); } + SYS_MSGCTL = 297 // { int sys_msgctl(int msqid, int cmd, struct msqid_ds *buf); } + SYS_SCHED_YIELD = 298 // { int sys_sched_yield(void); } + SYS_GETTHRID = 299 // { pid_t sys_getthrid(void); } + SYS___THRWAKEUP = 301 // { int sys___thrwakeup(const volatile void *ident, int n); } + SYS___THREXIT = 302 // { void sys___threxit(pid_t *notdead); } + SYS___THRSIGDIVERT = 303 // { int sys___thrsigdivert(sigset_t sigmask, siginfo_t *info, const struct timespec *timeout); } + SYS___GETCWD = 304 // { int sys___getcwd(char *buf, size_t len); } + SYS_ADJFREQ = 305 // { int sys_adjfreq(const int64_t *freq, int64_t *oldfreq); } + SYS_SETRTABLE = 310 // { int sys_setrtable(int rtableid); } + SYS_GETRTABLE = 311 // { int sys_getrtable(void); } + SYS_FACCESSAT = 313 // { int sys_faccessat(int fd, const char *path, int amode, int flag); } + SYS_FCHMODAT = 314 // { int sys_fchmodat(int fd, const char *path, mode_t mode, int flag); } + SYS_FCHOWNAT = 315 // { int sys_fchownat(int fd, const char *path, uid_t uid, gid_t gid, int flag); } + SYS_LINKAT = 317 // { int sys_linkat(int fd1, const char *path1, int fd2, const char *path2, int flag); } + SYS_MKDIRAT = 318 // { int sys_mkdirat(int fd, const char *path, mode_t mode); } + SYS_MKFIFOAT = 319 // { int sys_mkfifoat(int fd, const char *path, mode_t mode); } + SYS_MKNODAT = 320 // { int sys_mknodat(int fd, const char *path, mode_t mode, dev_t dev); } + SYS_OPENAT = 321 // { int sys_openat(int fd, const char *path, int flags, ... mode_t mode); } + SYS_READLINKAT = 322 // { ssize_t sys_readlinkat(int fd, const char *path, char *buf, size_t count); } + SYS_RENAMEAT = 323 // { int sys_renameat(int fromfd, const char *from, int tofd, const char *to); } + SYS_SYMLINKAT = 324 // { int sys_symlinkat(const char *path, int fd, const char *link); } + SYS_UNLINKAT = 325 // { int sys_unlinkat(int fd, const char *path, int flag); } + SYS___SET_TCB = 329 // { void sys___set_tcb(void *tcb); } + SYS___GET_TCB = 330 // { void *sys___get_tcb(void); } +) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go new file mode 100644 index 000000000..07919e0ec --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go @@ -0,0 +1,219 @@ +// go run mksysnum.go https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build riscv64 && openbsd +// +build riscv64,openbsd + +package unix + +// Deprecated: Use libc wrappers instead of direct syscalls. +const ( + SYS_EXIT = 1 // { void sys_exit(int rval); } + SYS_FORK = 2 // { int sys_fork(void); } + SYS_READ = 3 // { ssize_t sys_read(int fd, void *buf, size_t nbyte); } + SYS_WRITE = 4 // { ssize_t sys_write(int fd, const void *buf, size_t nbyte); } + SYS_OPEN = 5 // { int sys_open(const char *path, int flags, ... mode_t mode); } + SYS_CLOSE = 6 // { int sys_close(int fd); } + SYS_GETENTROPY = 7 // { int sys_getentropy(void *buf, size_t nbyte); } + SYS___TFORK = 8 // { int sys___tfork(const struct __tfork *param, size_t psize); } + SYS_LINK = 9 // { int sys_link(const char *path, const char *link); } + SYS_UNLINK = 10 // { int sys_unlink(const char *path); } + SYS_WAIT4 = 11 // { pid_t sys_wait4(pid_t pid, int *status, int options, struct rusage *rusage); } + SYS_CHDIR = 12 // { int sys_chdir(const char *path); } + SYS_FCHDIR = 13 // { int sys_fchdir(int fd); } + SYS_MKNOD = 14 // { int sys_mknod(const char *path, mode_t mode, dev_t dev); } + SYS_CHMOD = 15 // { int sys_chmod(const char *path, mode_t mode); } + SYS_CHOWN = 16 // { int sys_chown(const char *path, uid_t uid, gid_t gid); } + SYS_OBREAK = 17 // { int sys_obreak(char *nsize); } break + SYS_GETDTABLECOUNT = 18 // { int sys_getdtablecount(void); } + SYS_GETRUSAGE = 19 // { int sys_getrusage(int who, struct rusage *rusage); } + SYS_GETPID = 20 // { pid_t sys_getpid(void); } + SYS_MOUNT = 21 // { int sys_mount(const char *type, const char *path, int flags, void *data); } + SYS_UNMOUNT = 22 // { int sys_unmount(const char *path, int flags); } + SYS_SETUID = 23 // { int sys_setuid(uid_t uid); } + SYS_GETUID = 24 // { uid_t sys_getuid(void); } + SYS_GETEUID = 25 // { uid_t sys_geteuid(void); } + SYS_PTRACE = 26 // { int sys_ptrace(int req, pid_t pid, caddr_t addr, int data); } + SYS_RECVMSG = 27 // { ssize_t sys_recvmsg(int s, struct msghdr *msg, int flags); } + SYS_SENDMSG = 28 // { ssize_t sys_sendmsg(int s, const struct msghdr *msg, int flags); } + SYS_RECVFROM = 29 // { ssize_t sys_recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlenaddr); } + SYS_ACCEPT = 30 // { int sys_accept(int s, struct sockaddr *name, socklen_t *anamelen); } + SYS_GETPEERNAME = 31 // { int sys_getpeername(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_GETSOCKNAME = 32 // { int sys_getsockname(int fdes, struct sockaddr *asa, socklen_t *alen); } + SYS_ACCESS = 33 // { int sys_access(const char *path, int amode); } + SYS_CHFLAGS = 34 // { int sys_chflags(const char *path, u_int flags); } + SYS_FCHFLAGS = 35 // { int sys_fchflags(int fd, u_int flags); } + SYS_SYNC = 36 // { void sys_sync(void); } + SYS_STAT = 38 // { int sys_stat(const char *path, struct stat *ub); } + SYS_GETPPID = 39 // { pid_t sys_getppid(void); } + SYS_LSTAT = 40 // { int sys_lstat(const char *path, struct stat *ub); } + SYS_DUP = 41 // { int sys_dup(int fd); } + SYS_FSTATAT = 42 // { int sys_fstatat(int fd, const char *path, struct stat *buf, int flag); } + SYS_GETEGID = 43 // { gid_t sys_getegid(void); } + SYS_PROFIL = 44 // { int sys_profil(caddr_t samples, size_t size, u_long offset, u_int scale); } + SYS_KTRACE = 45 // { int sys_ktrace(const char *fname, int ops, int facs, pid_t pid); } + SYS_SIGACTION = 46 // { int sys_sigaction(int signum, const struct sigaction *nsa, struct sigaction *osa); } + SYS_GETGID = 47 // { gid_t sys_getgid(void); } + SYS_SIGPROCMASK = 48 // { int sys_sigprocmask(int how, sigset_t mask); } + SYS_SETLOGIN = 50 // { int sys_setlogin(const char *namebuf); } + SYS_ACCT = 51 // { int sys_acct(const char *path); } + SYS_SIGPENDING = 52 // { int sys_sigpending(void); } + SYS_FSTAT = 53 // { int sys_fstat(int fd, struct stat *sb); } + SYS_IOCTL = 54 // { int sys_ioctl(int fd, u_long com, ... void *data); } + SYS_REBOOT = 55 // { int sys_reboot(int opt); } + SYS_REVOKE = 56 // { int sys_revoke(const char *path); } + SYS_SYMLINK = 57 // { int sys_symlink(const char *path, const char *link); } + SYS_READLINK = 58 // { ssize_t sys_readlink(const char *path, char *buf, size_t count); } + SYS_EXECVE = 59 // { int sys_execve(const char *path, char * const *argp, char * const *envp); } + SYS_UMASK = 60 // { mode_t sys_umask(mode_t newmask); } + SYS_CHROOT = 61 // { int sys_chroot(const char *path); } + SYS_GETFSSTAT = 62 // { int sys_getfsstat(struct statfs *buf, size_t bufsize, int flags); } + SYS_STATFS = 63 // { int sys_statfs(const char *path, struct statfs *buf); } + SYS_FSTATFS = 64 // { int sys_fstatfs(int fd, struct statfs *buf); } + SYS_FHSTATFS = 65 // { int sys_fhstatfs(const fhandle_t *fhp, struct statfs *buf); } + SYS_VFORK = 66 // { int sys_vfork(void); } + SYS_GETTIMEOFDAY = 67 // { int sys_gettimeofday(struct timeval *tp, struct timezone *tzp); } + SYS_SETTIMEOFDAY = 68 // { int sys_settimeofday(const struct timeval *tv, const struct timezone *tzp); } + SYS_SETITIMER = 69 // { int sys_setitimer(int which, const struct itimerval *itv, struct itimerval *oitv); } + SYS_GETITIMER = 70 // { int sys_getitimer(int which, struct itimerval *itv); } + SYS_SELECT = 71 // { int sys_select(int nd, fd_set *in, fd_set *ou, fd_set *ex, struct timeval *tv); } + SYS_KEVENT = 72 // { int sys_kevent(int fd, const struct kevent *changelist, int nchanges, struct kevent *eventlist, int nevents, const struct timespec *timeout); } + SYS_MUNMAP = 73 // { int sys_munmap(void *addr, size_t len); } + SYS_MPROTECT = 74 // { int sys_mprotect(void *addr, size_t len, int prot); } + SYS_MADVISE = 75 // { int sys_madvise(void *addr, size_t len, int behav); } + SYS_UTIMES = 76 // { int sys_utimes(const char *path, const struct timeval *tptr); } + SYS_FUTIMES = 77 // { int sys_futimes(int fd, const struct timeval *tptr); } + SYS_GETGROUPS = 79 // { int sys_getgroups(int gidsetsize, gid_t *gidset); } + SYS_SETGROUPS = 80 // { int sys_setgroups(int gidsetsize, const gid_t *gidset); } + SYS_GETPGRP = 81 // { int sys_getpgrp(void); } + SYS_SETPGID = 82 // { int sys_setpgid(pid_t pid, pid_t pgid); } + SYS_FUTEX = 83 // { int sys_futex(uint32_t *f, int op, int val, const struct timespec *timeout, uint32_t *g); } + SYS_UTIMENSAT = 84 // { int sys_utimensat(int fd, const char *path, const struct timespec *times, int flag); } + SYS_FUTIMENS = 85 // { int sys_futimens(int fd, const struct timespec *times); } + SYS_KBIND = 86 // { int sys_kbind(const struct __kbind *param, size_t psize, int64_t proc_cookie); } + SYS_CLOCK_GETTIME = 87 // { int sys_clock_gettime(clockid_t clock_id, struct timespec *tp); } + SYS_CLOCK_SETTIME = 88 // { int sys_clock_settime(clockid_t clock_id, const struct timespec *tp); } + SYS_CLOCK_GETRES = 89 // { int sys_clock_getres(clockid_t clock_id, struct timespec *tp); } + SYS_DUP2 = 90 // { int sys_dup2(int from, int to); } + SYS_NANOSLEEP = 91 // { int sys_nanosleep(const struct timespec *rqtp, struct timespec *rmtp); } + SYS_FCNTL = 92 // { int sys_fcntl(int fd, int cmd, ... void *arg); } + SYS_ACCEPT4 = 93 // { int sys_accept4(int s, struct sockaddr *name, socklen_t *anamelen, int flags); } + SYS___THRSLEEP = 94 // { int sys___thrsleep(const volatile void *ident, clockid_t clock_id, const struct timespec *tp, void *lock, const int *abort); } + SYS_FSYNC = 95 // { int sys_fsync(int fd); } + SYS_SETPRIORITY = 96 // { int sys_setpriority(int which, id_t who, int prio); } + SYS_SOCKET = 97 // { int sys_socket(int domain, int type, int protocol); } + SYS_CONNECT = 98 // { int sys_connect(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_GETDENTS = 99 // { int sys_getdents(int fd, void *buf, size_t buflen); } + SYS_GETPRIORITY = 100 // { int sys_getpriority(int which, id_t who); } + SYS_PIPE2 = 101 // { int sys_pipe2(int *fdp, int flags); } + SYS_DUP3 = 102 // { int sys_dup3(int from, int to, int flags); } + SYS_SIGRETURN = 103 // { int sys_sigreturn(struct sigcontext *sigcntxp); } + SYS_BIND = 104 // { int sys_bind(int s, const struct sockaddr *name, socklen_t namelen); } + SYS_SETSOCKOPT = 105 // { int sys_setsockopt(int s, int level, int name, const void *val, socklen_t valsize); } + SYS_LISTEN = 106 // { int sys_listen(int s, int backlog); } + SYS_CHFLAGSAT = 107 // { int sys_chflagsat(int fd, const char *path, u_int flags, int atflags); } + SYS_PLEDGE = 108 // { int sys_pledge(const char *promises, const char *execpromises); } + SYS_PPOLL = 109 // { int sys_ppoll(struct pollfd *fds, u_int nfds, const struct timespec *ts, const sigset_t *mask); } + SYS_PSELECT = 110 // { int sys_pselect(int nd, fd_set *in, fd_set *ou, fd_set *ex, const struct timespec *ts, const sigset_t *mask); } + SYS_SIGSUSPEND = 111 // { int sys_sigsuspend(int mask); } + SYS_SENDSYSLOG = 112 // { int sys_sendsyslog(const char *buf, size_t nbyte, int flags); } + SYS_UNVEIL = 114 // { int sys_unveil(const char *path, const char *permissions); } + SYS_GETSOCKOPT = 118 // { int sys_getsockopt(int s, int level, int name, void *val, socklen_t *avalsize); } + SYS_THRKILL = 119 // { int sys_thrkill(pid_t tid, int signum, void *tcb); } + SYS_READV = 120 // { ssize_t sys_readv(int fd, const struct iovec *iovp, int iovcnt); } + SYS_WRITEV = 121 // { ssize_t sys_writev(int fd, const struct iovec *iovp, int iovcnt); } + SYS_KILL = 122 // { int sys_kill(int pid, int signum); } + SYS_FCHOWN = 123 // { int sys_fchown(int fd, uid_t uid, gid_t gid); } + SYS_FCHMOD = 124 // { int sys_fchmod(int fd, mode_t mode); } + SYS_SETREUID = 126 // { int sys_setreuid(uid_t ruid, uid_t euid); } + SYS_SETREGID = 127 // { int sys_setregid(gid_t rgid, gid_t egid); } + SYS_RENAME = 128 // { int sys_rename(const char *from, const char *to); } + SYS_FLOCK = 131 // { int sys_flock(int fd, int how); } + SYS_MKFIFO = 132 // { int sys_mkfifo(const char *path, mode_t mode); } + SYS_SENDTO = 133 // { ssize_t sys_sendto(int s, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen); } + SYS_SHUTDOWN = 134 // { int sys_shutdown(int s, int how); } + SYS_SOCKETPAIR = 135 // { int sys_socketpair(int domain, int type, int protocol, int *rsv); } + SYS_MKDIR = 136 // { int sys_mkdir(const char *path, mode_t mode); } + SYS_RMDIR = 137 // { int sys_rmdir(const char *path); } + SYS_ADJTIME = 140 // { int sys_adjtime(const struct timeval *delta, struct timeval *olddelta); } + SYS_GETLOGIN_R = 141 // { int sys_getlogin_r(char *namebuf, u_int namelen); } + SYS_SETSID = 147 // { int sys_setsid(void); } + SYS_QUOTACTL = 148 // { int sys_quotactl(const char *path, int cmd, int uid, char *arg); } + SYS_NFSSVC = 155 // { int sys_nfssvc(int flag, void *argp); } + SYS_GETFH = 161 // { int sys_getfh(const char *fname, fhandle_t *fhp); } + SYS_SYSARCH = 165 // { int sys_sysarch(int op, void *parms); } + SYS_PREAD = 173 // { ssize_t sys_pread(int fd, void *buf, size_t nbyte, int pad, off_t offset); } + SYS_PWRITE = 174 // { ssize_t sys_pwrite(int fd, const void *buf, size_t nbyte, int pad, off_t offset); } + SYS_SETGID = 181 // { int sys_setgid(gid_t gid); } + SYS_SETEGID = 182 // { int sys_setegid(gid_t egid); } + SYS_SETEUID = 183 // { int sys_seteuid(uid_t euid); } + SYS_PATHCONF = 191 // { long sys_pathconf(const char *path, int name); } + SYS_FPATHCONF = 192 // { long sys_fpathconf(int fd, int name); } + SYS_SWAPCTL = 193 // { int sys_swapctl(int cmd, const void *arg, int misc); } + SYS_GETRLIMIT = 194 // { int sys_getrlimit(int which, struct rlimit *rlp); } + SYS_SETRLIMIT = 195 // { int sys_setrlimit(int which, const struct rlimit *rlp); } + SYS_MMAP = 197 // { void *sys_mmap(void *addr, size_t len, int prot, int flags, int fd, long pad, off_t pos); } + SYS_LSEEK = 199 // { off_t sys_lseek(int fd, int pad, off_t offset, int whence); } + SYS_TRUNCATE = 200 // { int sys_truncate(const char *path, int pad, off_t length); } + SYS_FTRUNCATE = 201 // { int sys_ftruncate(int fd, int pad, off_t length); } + SYS_SYSCTL = 202 // { int sys_sysctl(const int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); } + SYS_MLOCK = 203 // { int sys_mlock(const void *addr, size_t len); } + SYS_MUNLOCK = 204 // { int sys_munlock(const void *addr, size_t len); } + SYS_GETPGID = 207 // { pid_t sys_getpgid(pid_t pid); } + SYS_UTRACE = 209 // { int sys_utrace(const char *label, const void *addr, size_t len); } + SYS_SEMGET = 221 // { int sys_semget(key_t key, int nsems, int semflg); } + SYS_MSGGET = 225 // { int sys_msgget(key_t key, int msgflg); } + SYS_MSGSND = 226 // { int sys_msgsnd(int msqid, const void *msgp, size_t msgsz, int msgflg); } + SYS_MSGRCV = 227 // { int sys_msgrcv(int msqid, void *msgp, size_t msgsz, long msgtyp, int msgflg); } + SYS_SHMAT = 228 // { void *sys_shmat(int shmid, const void *shmaddr, int shmflg); } + SYS_SHMDT = 230 // { int sys_shmdt(const void *shmaddr); } + SYS_MINHERIT = 250 // { int sys_minherit(void *addr, size_t len, int inherit); } + SYS_POLL = 252 // { int sys_poll(struct pollfd *fds, u_int nfds, int timeout); } + SYS_ISSETUGID = 253 // { int sys_issetugid(void); } + SYS_LCHOWN = 254 // { int sys_lchown(const char *path, uid_t uid, gid_t gid); } + SYS_GETSID = 255 // { pid_t sys_getsid(pid_t pid); } + SYS_MSYNC = 256 // { int sys_msync(void *addr, size_t len, int flags); } + SYS_PIPE = 263 // { int sys_pipe(int *fdp); } + SYS_FHOPEN = 264 // { int sys_fhopen(const fhandle_t *fhp, int flags); } + SYS_PREADV = 267 // { ssize_t sys_preadv(int fd, const struct iovec *iovp, int iovcnt, int pad, off_t offset); } + SYS_PWRITEV = 268 // { ssize_t sys_pwritev(int fd, const struct iovec *iovp, int iovcnt, int pad, off_t offset); } + SYS_KQUEUE = 269 // { int sys_kqueue(void); } + SYS_MLOCKALL = 271 // { int sys_mlockall(int flags); } + SYS_MUNLOCKALL = 272 // { int sys_munlockall(void); } + SYS_GETRESUID = 281 // { int sys_getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); } + SYS_SETRESUID = 282 // { int sys_setresuid(uid_t ruid, uid_t euid, uid_t suid); } + SYS_GETRESGID = 283 // { int sys_getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); } + SYS_SETRESGID = 284 // { int sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid); } + SYS_MQUERY = 286 // { void *sys_mquery(void *addr, size_t len, int prot, int flags, int fd, long pad, off_t pos); } + SYS_CLOSEFROM = 287 // { int sys_closefrom(int fd); } + SYS_SIGALTSTACK = 288 // { int sys_sigaltstack(const struct sigaltstack *nss, struct sigaltstack *oss); } + SYS_SHMGET = 289 // { int sys_shmget(key_t key, size_t size, int shmflg); } + SYS_SEMOP = 290 // { int sys_semop(int semid, struct sembuf *sops, size_t nsops); } + SYS_FHSTAT = 294 // { int sys_fhstat(const fhandle_t *fhp, struct stat *sb); } + SYS___SEMCTL = 295 // { int sys___semctl(int semid, int semnum, int cmd, union semun *arg); } + SYS_SHMCTL = 296 // { int sys_shmctl(int shmid, int cmd, struct shmid_ds *buf); } + SYS_MSGCTL = 297 // { int sys_msgctl(int msqid, int cmd, struct msqid_ds *buf); } + SYS_SCHED_YIELD = 298 // { int sys_sched_yield(void); } + SYS_GETTHRID = 299 // { pid_t sys_getthrid(void); } + SYS___THRWAKEUP = 301 // { int sys___thrwakeup(const volatile void *ident, int n); } + SYS___THREXIT = 302 // { void sys___threxit(pid_t *notdead); } + SYS___THRSIGDIVERT = 303 // { int sys___thrsigdivert(sigset_t sigmask, siginfo_t *info, const struct timespec *timeout); } + SYS___GETCWD = 304 // { int sys___getcwd(char *buf, size_t len); } + SYS_ADJFREQ = 305 // { int sys_adjfreq(const int64_t *freq, int64_t *oldfreq); } + SYS_SETRTABLE = 310 // { int sys_setrtable(int rtableid); } + SYS_GETRTABLE = 311 // { int sys_getrtable(void); } + SYS_FACCESSAT = 313 // { int sys_faccessat(int fd, const char *path, int amode, int flag); } + SYS_FCHMODAT = 314 // { int sys_fchmodat(int fd, const char *path, mode_t mode, int flag); } + SYS_FCHOWNAT = 315 // { int sys_fchownat(int fd, const char *path, uid_t uid, gid_t gid, int flag); } + SYS_LINKAT = 317 // { int sys_linkat(int fd1, const char *path1, int fd2, const char *path2, int flag); } + SYS_MKDIRAT = 318 // { int sys_mkdirat(int fd, const char *path, mode_t mode); } + SYS_MKFIFOAT = 319 // { int sys_mkfifoat(int fd, const char *path, mode_t mode); } + SYS_MKNODAT = 320 // { int sys_mknodat(int fd, const char *path, mode_t mode, dev_t dev); } + SYS_OPENAT = 321 // { int sys_openat(int fd, const char *path, int flags, ... mode_t mode); } + SYS_READLINKAT = 322 // { ssize_t sys_readlinkat(int fd, const char *path, char *buf, size_t count); } + SYS_RENAMEAT = 323 // { int sys_renameat(int fromfd, const char *from, int tofd, const char *to); } + SYS_SYMLINKAT = 324 // { int sys_symlinkat(const char *path, int fd, const char *link); } + SYS_UNLINKAT = 325 // { int sys_unlinkat(int fd, const char *path, int flag); } + SYS___SET_TCB = 329 // { void sys___set_tcb(void *tcb); } + SYS___GET_TCB = 330 // { void *sys___get_tcb(void); } +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 885842c0e..690cefc3d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -151,6 +151,16 @@ type Dirent struct { _ [3]byte } +type Attrlist struct { + Bitmapcount uint16 + Reserved uint16 + Commonattr uint32 + Volattr uint32 + Dirattr uint32 + Fileattr uint32 + Forkattr uint32 +} + const ( PathMax = 0x400 ) @@ -366,30 +376,57 @@ type ICMPv6Filter struct { Filt [8]uint32 } +type TCPConnectionInfo struct { + State uint8 + Snd_wscale uint8 + Rcv_wscale uint8 + _ uint8 + Options uint32 + Flags uint32 + Rto uint32 + Maxseg uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Snd_wnd uint32 + Snd_sbbytes uint32 + Rcv_wnd uint32 + Rttcur uint32 + Srtt uint32 + Rttvar uint32 + Txpackets uint64 + Txbytes uint64 + Txretransmitbytes uint64 + Rxpackets uint64 + Rxbytes uint64 + Rxoutoforderbytes uint64 + Txretransmitpackets uint64 +} + const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x6c - SizeofSockaddrUnix = 0x6a - SizeofSockaddrDatalink = 0x14 - SizeofSockaddrCtl = 0x20 - SizeofSockaddrVM = 0xc - SizeofXvsockpcb = 0xa8 - SizeofXSocket = 0x64 - SizeofXSockbuf = 0x18 - SizeofXVSockPgen = 0x20 - SizeofXucred = 0x4c - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofMsghdr = 0x30 - SizeofCmsghdr = 0xc - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofSockaddrCtl = 0x20 + SizeofSockaddrVM = 0xc + SizeofXvsockpcb = 0xa8 + SizeofXSocket = 0x64 + SizeofXSockbuf = 0x18 + SizeofXVSockPgen = 0x20 + SizeofXucred = 0x4c + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofTCPConnectionInfo = 0x70 ) const ( @@ -583,6 +620,7 @@ const ( AT_REMOVEDIR = 0x80 AT_SYMLINK_FOLLOW = 0x40 AT_SYMLINK_NOFOLLOW = 0x20 + AT_EACCESS = 0x10 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index b23c02337..5bffc10ea 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -151,6 +151,16 @@ type Dirent struct { _ [3]byte } +type Attrlist struct { + Bitmapcount uint16 + Reserved uint16 + Commonattr uint32 + Volattr uint32 + Dirattr uint32 + Fileattr uint32 + Forkattr uint32 +} + const ( PathMax = 0x400 ) @@ -366,30 +376,57 @@ type ICMPv6Filter struct { Filt [8]uint32 } +type TCPConnectionInfo struct { + State uint8 + Snd_wscale uint8 + Rcv_wscale uint8 + _ uint8 + Options uint32 + Flags uint32 + Rto uint32 + Maxseg uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Snd_wnd uint32 + Snd_sbbytes uint32 + Rcv_wnd uint32 + Rttcur uint32 + Srtt uint32 + Rttvar uint32 + Txpackets uint64 + Txbytes uint64 + Txretransmitbytes uint64 + Rxpackets uint64 + Rxbytes uint64 + Rxoutoforderbytes uint64 + Txretransmitpackets uint64 +} + const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x6c - SizeofSockaddrUnix = 0x6a - SizeofSockaddrDatalink = 0x14 - SizeofSockaddrCtl = 0x20 - SizeofSockaddrVM = 0xc - SizeofXvsockpcb = 0xa8 - SizeofXSocket = 0x64 - SizeofXSockbuf = 0x18 - SizeofXVSockPgen = 0x20 - SizeofXucred = 0x4c - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofMsghdr = 0x30 - SizeofCmsghdr = 0xc - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x14 + SizeofSockaddrCtl = 0x20 + SizeofSockaddrVM = 0xc + SizeofXvsockpcb = 0xa8 + SizeofXSocket = 0x64 + SizeofXSockbuf = 0x18 + SizeofXVSockPgen = 0x20 + SizeofXucred = 0x4c + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofTCPConnectionInfo = 0x70 ) const ( @@ -583,6 +620,7 @@ const ( AT_REMOVEDIR = 0x80 AT_SYMLINK_FOLLOW = 0x40 AT_SYMLINK_NOFOLLOW = 0x20 + AT_EACCESS = 0x10 ) type PollFd struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 4eec078e5..29dc48337 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -90,27 +90,6 @@ type Stat_t struct { Spare [10]uint64 } -type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Btim Timespec - _ [8]byte -} - type Statfs_t struct { Version uint32 Type uint32 @@ -136,31 +115,6 @@ type Statfs_t struct { Mntonname [1024]byte } -type statfs_freebsd11_t struct { - Version uint32 - Type uint32 - Flags uint64 - Bsize uint64 - Iosize uint64 - Blocks uint64 - Bfree uint64 - Bavail int64 - Files uint64 - Ffree int64 - Syncwrites uint64 - Asyncwrites uint64 - Syncreads uint64 - Asyncreads uint64 - Spare [10]uint64 - Namemax uint32 - Owner uint32 - Fsid Fsid - Charspare [80]int8 - Fstypename [16]byte - Mntfromname [88]byte - Mntonname [88]byte -} - type Flock_t struct { Start int64 Len int64 @@ -181,14 +135,6 @@ type Dirent struct { Name [256]int8 } -type dirent_freebsd11 struct { - Fileno uint32 - Reclen uint16 - Type uint8 - Namlen uint8 - Name [256]int8 -} - type Fsid struct { Val [2]int32 } @@ -337,41 +283,9 @@ const ( ) const ( - PTRACE_ATTACH = 0xa - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0xb - PTRACE_GETFPREGS = 0x23 - PTRACE_GETFSBASE = 0x47 - PTRACE_GETLWPLIST = 0xf - PTRACE_GETNUMLWPS = 0xe - PTRACE_GETREGS = 0x21 - PTRACE_GETXSTATE = 0x45 - PTRACE_IO = 0xc - PTRACE_KILL = 0x8 - PTRACE_LWPEVENTS = 0x18 - PTRACE_LWPINFO = 0xd - PTRACE_SETFPREGS = 0x24 - PTRACE_SETREGS = 0x22 - PTRACE_SINGLESTEP = 0x9 - PTRACE_TRACEME = 0x0 -) - -const ( - PIOD_READ_D = 0x1 - PIOD_WRITE_D = 0x2 - PIOD_READ_I = 0x3 - PIOD_WRITE_I = 0x4 -) - -const ( - PL_FLAG_BORN = 0x100 - PL_FLAG_EXITED = 0x200 - PL_FLAG_SI = 0x20 -) - -const ( - TRAP_BRKPT = 0x1 - TRAP_TRACE = 0x2 + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 ) type PtraceLwpInfoStruct struct { @@ -380,7 +294,7 @@ type PtraceLwpInfoStruct struct { Flags int32 Sigmask Sigset_t Siglist Sigset_t - Siginfo __Siginfo + Siginfo __PtraceSiginfo Tdname [20]int8 Child_pid int32 Syscall_code uint32 @@ -398,6 +312,17 @@ type __Siginfo struct { Value [4]byte _ [32]byte } +type __PtraceSiginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr uintptr + Value [4]byte + _ [32]byte +} type Sigset_t struct { Val [4]uint32 @@ -432,9 +357,11 @@ type FpReg struct { Pad [64]uint8 } +type FpExtendedPrecision struct{} + type PtraceIoDesc struct { Op int32 - Offs *byte + Offs uintptr Addr *byte Len uint32 } @@ -444,8 +371,9 @@ type Kevent_t struct { Filter int16 Flags uint16 Fflags uint32 - Data int32 + Data int64 Udata *byte + Ext [4]uint64 } type FdSet struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 7622904a5..0a89b2890 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -86,26 +86,6 @@ type Stat_t struct { Spare [10]uint64 } -type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Btim Timespec -} - type Statfs_t struct { Version uint32 Type uint32 @@ -131,31 +111,6 @@ type Statfs_t struct { Mntonname [1024]byte } -type statfs_freebsd11_t struct { - Version uint32 - Type uint32 - Flags uint64 - Bsize uint64 - Iosize uint64 - Blocks uint64 - Bfree uint64 - Bavail int64 - Files uint64 - Ffree int64 - Syncwrites uint64 - Asyncwrites uint64 - Syncreads uint64 - Asyncreads uint64 - Spare [10]uint64 - Namemax uint32 - Owner uint32 - Fsid Fsid - Charspare [80]int8 - Fstypename [16]byte - Mntfromname [88]byte - Mntonname [88]byte -} - type Flock_t struct { Start int64 Len int64 @@ -177,14 +132,6 @@ type Dirent struct { Name [256]int8 } -type dirent_freebsd11 struct { - Fileno uint32 - Reclen uint16 - Type uint8 - Namlen uint8 - Name [256]int8 -} - type Fsid struct { Val [2]int32 } @@ -333,41 +280,9 @@ const ( ) const ( - PTRACE_ATTACH = 0xa - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0xb - PTRACE_GETFPREGS = 0x23 - PTRACE_GETFSBASE = 0x47 - PTRACE_GETLWPLIST = 0xf - PTRACE_GETNUMLWPS = 0xe - PTRACE_GETREGS = 0x21 - PTRACE_GETXSTATE = 0x45 - PTRACE_IO = 0xc - PTRACE_KILL = 0x8 - PTRACE_LWPEVENTS = 0x18 - PTRACE_LWPINFO = 0xd - PTRACE_SETFPREGS = 0x24 - PTRACE_SETREGS = 0x22 - PTRACE_SINGLESTEP = 0x9 - PTRACE_TRACEME = 0x0 -) - -const ( - PIOD_READ_D = 0x1 - PIOD_WRITE_D = 0x2 - PIOD_READ_I = 0x3 - PIOD_WRITE_I = 0x4 -) - -const ( - PL_FLAG_BORN = 0x100 - PL_FLAG_EXITED = 0x200 - PL_FLAG_SI = 0x20 -) - -const ( - TRAP_BRKPT = 0x1 - TRAP_TRACE = 0x2 + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 ) type PtraceLwpInfoStruct struct { @@ -376,7 +291,7 @@ type PtraceLwpInfoStruct struct { Flags int32 Sigmask Sigset_t Siglist Sigset_t - Siginfo __Siginfo + Siginfo __PtraceSiginfo Tdname [20]int8 Child_pid int32 Syscall_code uint32 @@ -395,6 +310,18 @@ type __Siginfo struct { _ [40]byte } +type __PtraceSiginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr uintptr + Value [8]byte + _ [40]byte +} + type Sigset_t struct { Val [4]uint32 } @@ -435,9 +362,11 @@ type FpReg struct { Spare [12]uint64 } +type FpExtendedPrecision struct{} + type PtraceIoDesc struct { Op int32 - Offs *byte + Offs uintptr Addr *byte Len uint64 } @@ -449,6 +378,7 @@ type Kevent_t struct { Fflags uint32 Data int64 Udata *byte + Ext [4]uint64 } type FdSet struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 19223ce8e..c8666bb15 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -33,7 +33,7 @@ type Timeval struct { _ [4]byte } -type Time_t int32 +type Time_t int64 type Rusage struct { Utime Timeval @@ -88,26 +88,6 @@ type Stat_t struct { Spare [10]uint64 } -type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Btim Timespec -} - type Statfs_t struct { Version uint32 Type uint32 @@ -133,31 +113,6 @@ type Statfs_t struct { Mntonname [1024]byte } -type statfs_freebsd11_t struct { - Version uint32 - Type uint32 - Flags uint64 - Bsize uint64 - Iosize uint64 - Blocks uint64 - Bfree uint64 - Bavail int64 - Files uint64 - Ffree int64 - Syncwrites uint64 - Asyncwrites uint64 - Syncreads uint64 - Asyncreads uint64 - Spare [10]uint64 - Namemax uint32 - Owner uint32 - Fsid Fsid - Charspare [80]int8 - Fstypename [16]byte - Mntfromname [88]byte - Mntonname [88]byte -} - type Flock_t struct { Start int64 Len int64 @@ -179,14 +134,6 @@ type Dirent struct { Name [256]int8 } -type dirent_freebsd11 struct { - Fileno uint32 - Reclen uint16 - Type uint8 - Namlen uint8 - Name [256]int8 -} - type Fsid struct { Val [2]int32 } @@ -335,41 +282,9 @@ const ( ) const ( - PTRACE_ATTACH = 0xa - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0xb - PTRACE_GETFPREGS = 0x23 - PTRACE_GETFSBASE = 0x47 - PTRACE_GETLWPLIST = 0xf - PTRACE_GETNUMLWPS = 0xe - PTRACE_GETREGS = 0x21 - PTRACE_GETXSTATE = 0x45 - PTRACE_IO = 0xc - PTRACE_KILL = 0x8 - PTRACE_LWPEVENTS = 0x18 - PTRACE_LWPINFO = 0xd - PTRACE_SETFPREGS = 0x24 - PTRACE_SETREGS = 0x22 - PTRACE_SINGLESTEP = 0x9 - PTRACE_TRACEME = 0x0 -) - -const ( - PIOD_READ_D = 0x1 - PIOD_WRITE_D = 0x2 - PIOD_READ_I = 0x3 - PIOD_WRITE_I = 0x4 -) - -const ( - PL_FLAG_BORN = 0x100 - PL_FLAG_EXITED = 0x200 - PL_FLAG_SI = 0x20 -) - -const ( - TRAP_BRKPT = 0x1 - TRAP_TRACE = 0x2 + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 ) type PtraceLwpInfoStruct struct { @@ -378,7 +293,7 @@ type PtraceLwpInfoStruct struct { Flags int32 Sigmask Sigset_t Siglist Sigset_t - Siginfo __Siginfo + Siginfo __PtraceSiginfo Tdname [20]int8 Child_pid int32 Syscall_code uint32 @@ -386,15 +301,27 @@ type PtraceLwpInfoStruct struct { } type __Siginfo struct { - Signo int32 - Errno int32 - Code int32 - Pid int32 - Uid uint32 - Status int32 - Addr *byte - Value [4]byte - X_reason [32]byte + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr *byte + Value [4]byte + _ [32]byte +} + +type __PtraceSiginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr uintptr + Value [4]byte + _ [32]byte } type Sigset_t struct { @@ -402,21 +329,27 @@ type Sigset_t struct { } type Reg struct { - R [13]uint32 - R_sp uint32 - R_lr uint32 - R_pc uint32 - R_cpsr uint32 + R [13]uint32 + Sp uint32 + Lr uint32 + Pc uint32 + Cpsr uint32 } type FpReg struct { - Fpr_fpsr uint32 - Fpr [8][3]uint32 + Fpsr uint32 + Fpr [8]FpExtendedPrecision +} + +type FpExtendedPrecision struct { + Exponent uint32 + Mantissa_hi uint32 + Mantissa_lo uint32 } type PtraceIoDesc struct { Op int32 - Offs *byte + Offs uintptr Addr *byte Len uint32 } @@ -426,8 +359,11 @@ type Kevent_t struct { Filter int16 Flags uint16 Fflags uint32 - Data int32 + _ [4]byte + Data int64 Udata *byte + _ [4]byte + Ext [4]uint64 } type FdSet struct { @@ -453,7 +389,7 @@ type ifMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 Data ifData } @@ -464,7 +400,6 @@ type IfMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Data IfData } @@ -532,7 +467,7 @@ type IfaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 Metric int32 } @@ -543,7 +478,7 @@ type IfmaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 } type IfAnnounceMsghdr struct { @@ -560,7 +495,7 @@ type RtMsghdr struct { Version uint8 Type uint8 Index uint16 - _ [2]byte + _ uint16 Flags int32 Addrs int32 Pid int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 8e3e33f67..88fb48a88 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -86,26 +86,6 @@ type Stat_t struct { Spare [10]uint64 } -type stat_freebsd11_t struct { - Dev uint32 - Ino uint32 - Mode uint16 - Nlink uint16 - Uid uint32 - Gid uint32 - Rdev uint32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize int32 - Flags uint32 - Gen uint32 - Lspare int32 - Btim Timespec -} - type Statfs_t struct { Version uint32 Type uint32 @@ -131,31 +111,6 @@ type Statfs_t struct { Mntonname [1024]byte } -type statfs_freebsd11_t struct { - Version uint32 - Type uint32 - Flags uint64 - Bsize uint64 - Iosize uint64 - Blocks uint64 - Bfree uint64 - Bavail int64 - Files uint64 - Ffree int64 - Syncwrites uint64 - Asyncwrites uint64 - Syncreads uint64 - Asyncreads uint64 - Spare [10]uint64 - Namemax uint32 - Owner uint32 - Fsid Fsid - Charspare [80]int8 - Fstypename [16]byte - Mntfromname [88]byte - Mntonname [88]byte -} - type Flock_t struct { Start int64 Len int64 @@ -177,14 +132,6 @@ type Dirent struct { Name [256]int8 } -type dirent_freebsd11 struct { - Fileno uint32 - Reclen uint16 - Type uint8 - Namlen uint8 - Name [256]int8 -} - type Fsid struct { Val [2]int32 } @@ -333,39 +280,9 @@ const ( ) const ( - PTRACE_ATTACH = 0xa - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0xb - PTRACE_GETFPREGS = 0x23 - PTRACE_GETLWPLIST = 0xf - PTRACE_GETNUMLWPS = 0xe - PTRACE_GETREGS = 0x21 - PTRACE_IO = 0xc - PTRACE_KILL = 0x8 - PTRACE_LWPEVENTS = 0x18 - PTRACE_LWPINFO = 0xd - PTRACE_SETFPREGS = 0x24 - PTRACE_SETREGS = 0x22 - PTRACE_SINGLESTEP = 0x9 - PTRACE_TRACEME = 0x0 -) - -const ( - PIOD_READ_D = 0x1 - PIOD_WRITE_D = 0x2 - PIOD_READ_I = 0x3 - PIOD_WRITE_I = 0x4 -) - -const ( - PL_FLAG_BORN = 0x100 - PL_FLAG_EXITED = 0x200 - PL_FLAG_SI = 0x20 -) - -const ( - TRAP_BRKPT = 0x1 - TRAP_TRACE = 0x2 + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 ) type PtraceLwpInfoStruct struct { @@ -374,7 +291,7 @@ type PtraceLwpInfoStruct struct { Flags int32 Sigmask Sigset_t Siglist Sigset_t - Siginfo __Siginfo + Siginfo __PtraceSiginfo Tdname [20]int8 Child_pid int32 Syscall_code uint32 @@ -393,6 +310,18 @@ type __Siginfo struct { _ [40]byte } +type __PtraceSiginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr uintptr + Value [8]byte + _ [40]byte +} + type Sigset_t struct { Val [4]uint32 } @@ -413,9 +342,11 @@ type FpReg struct { _ [8]byte } +type FpExtendedPrecision struct{} + type PtraceIoDesc struct { Op int32 - Offs *byte + Offs uintptr Addr *byte Len uint64 } @@ -427,6 +358,7 @@ type Kevent_t struct { Fflags uint32 Data int64 Udata *byte + Ext [4]uint64 } type FdSet struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go new file mode 100644 index 000000000..698dc975e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -0,0 +1,638 @@ +// cgo -godefs -- -fsigned-char types_freebsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build riscv64 && freebsd +// +build riscv64,freebsd + +package unix + +const ( + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Time_t int64 + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur int64 + Max int64 +} + +type _Gid_t uint32 + +const ( + _statfsVersion = 0x20140518 + _dirblksiz = 0x400 +) + +type Stat_t struct { + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint16 + _0 int16 + Uid uint32 + Gid uint32 + _1 int32 + Rdev uint64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Btim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint64 + Spare [10]uint64 +} + +type Statfs_t struct { + Version uint32 + Type uint32 + Flags uint64 + Bsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail int64 + Files uint64 + Ffree int64 + Syncwrites uint64 + Asyncwrites uint64 + Syncreads uint64 + Asyncreads uint64 + Spare [10]uint64 + Namemax uint32 + Owner uint32 + Fsid Fsid + Charspare [80]int8 + Fstypename [16]byte + Mntfromname [1024]byte + Mntonname [1024]byte +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 + Sysid int32 + _ [4]byte +} + +type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Pad0 uint8 + Namlen uint16 + Pad1 uint16 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +const ( + PathMax = 0x400 +) + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [46]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Xucred struct { + Version uint32 + Uid uint32 + Ngroups int16 + Groups [16]uint32 + _ *byte +} + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x36 + SizeofXucred = 0x58 + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type PtraceLwpInfoStruct struct { + Lwpid int32 + Event int32 + Flags int32 + Sigmask Sigset_t + Siglist Sigset_t + Siginfo __PtraceSiginfo + Tdname [20]int8 + Child_pid int32 + Syscall_code uint32 + Syscall_narg uint32 +} + +type __Siginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr *byte + Value [8]byte + _ [40]byte +} + +type __PtraceSiginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr uintptr + Value [8]byte + _ [40]byte +} + +type Sigset_t struct { + Val [4]uint32 +} + +type Reg struct { + Ra uint64 + Sp uint64 + Gp uint64 + Tp uint64 + T [7]uint64 + S [12]uint64 + A [8]uint64 + Sepc uint64 + Sstatus uint64 +} + +type FpReg struct { + X [32][2]uint64 + Fcsr uint64 +} + +type FpExtendedPrecision struct{} + +type PtraceIoDesc struct { + Op int32 + Offs uintptr + Addr *byte + Len uint64 +} + +type Kevent_t struct { + Ident uint64 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte + Ext [4]uint64 +} + +type FdSet struct { + Bits [16]uint64 +} + +const ( + sizeofIfMsghdr = 0xa8 + SizeofIfMsghdr = 0xa8 + sizeofIfData = 0x98 + SizeofIfData = 0x98 + SizeofIfaMsghdr = 0x14 + SizeofIfmaMsghdr = 0x10 + SizeofIfAnnounceMsghdr = 0x18 + SizeofRtMsghdr = 0x98 + SizeofRtMetrics = 0x70 +) + +type ifMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ uint16 + Data ifData +} + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Data IfData +} + +type ifData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Vhid uint8 + Datalen uint16 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Oqdrops uint64 + Noproto uint64 + Hwassist uint64 + _ [8]byte + _ [16]byte +} + +type IfData struct { + Type uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Spare_char1 uint8 + Spare_char2 uint8 + Datalen uint8 + Mtu uint64 + Metric uint64 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Hwassist uint64 + Epoch int64 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ uint16 + Metric int32 +} + +type IfmaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ uint16 +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Name [16]int8 + What uint16 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + _ uint16 + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Fmask int32 + Inits uint64 + Rmx RtMetrics +} + +type RtMetrics struct { + Locks uint64 + Mtu uint64 + Hopcount uint64 + Expire uint64 + Recvpipe uint64 + Sendpipe uint64 + Ssthresh uint64 + Rtt uint64 + Rttvar uint64 + Pksent uint64 + Weight uint64 + Nhidx uint64 + Filler [2]uint64 +} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfZbuf = 0x18 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x20 + SizeofBpfZbufHeader = 0x20 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfZbuf struct { + Bufa *byte + Bufb *byte + Buflen uint64 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [6]byte +} + +type BpfZbufHeader struct { + Kernel_gen uint32 + Kernel_len uint32 + User_gen uint32 + _ [5]uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x64 + AT_EACCESS = 0x100 + AT_SYMLINK_NOFOLLOW = 0x200 + AT_SYMLINK_FOLLOW = 0x400 + AT_REMOVEDIR = 0x800 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLINIGNEOF = 0x2000 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type CapRights struct { + Rights [2]uint64 +} + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Spare int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go deleted file mode 100644 index 4c485261d..000000000 --- a/vendor/golang.org/x/sys/unix/ztypes_illumos_amd64.go +++ /dev/null @@ -1,42 +0,0 @@ -// cgo -godefs types_illumos.go | go run mkpost.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -//go:build amd64 && illumos -// +build amd64,illumos - -package unix - -const ( - TUNNEWPPA = 0x540001 - TUNSETPPA = 0x540002 - - I_STR = 0x5308 - I_POP = 0x5303 - I_PUSH = 0x5302 - I_LINK = 0x530c - I_UNLINK = 0x530d - I_PLINK = 0x5316 - I_PUNLINK = 0x5317 - - IF_UNITSEL = -0x7ffb8cca -) - -type strbuf struct { - Maxlen int32 - Len int32 - Buf *int8 -} - -type Strioctl struct { - Cmd int32 - Timout int32 - Len int32 - Dp *int8 -} - -type Lifreq struct { - Name [32]int8 - Lifru1 [4]byte - Type uint32 - Lifru [336]byte -} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index e61891dff..ca84727cf 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -29,6 +29,41 @@ type Itimerval struct { Value Timeval } +const ( + ADJ_OFFSET = 0x1 + ADJ_FREQUENCY = 0x2 + ADJ_MAXERROR = 0x4 + ADJ_ESTERROR = 0x8 + ADJ_STATUS = 0x10 + ADJ_TIMECONST = 0x20 + ADJ_TAI = 0x80 + ADJ_SETOFFSET = 0x100 + ADJ_MICRO = 0x1000 + ADJ_NANO = 0x2000 + ADJ_TICK = 0x4000 + ADJ_OFFSET_SINGLESHOT = 0x8001 + ADJ_OFFSET_SS_READ = 0xa001 +) + +const ( + STA_PLL = 0x1 + STA_PPSFREQ = 0x2 + STA_PPSTIME = 0x4 + STA_FLL = 0x8 + STA_INS = 0x10 + STA_DEL = 0x20 + STA_UNSYNC = 0x40 + STA_FREQHOLD = 0x80 + STA_PPSSIGNAL = 0x100 + STA_PPSJITTER = 0x200 + STA_PPSWANDER = 0x400 + STA_PPSERROR = 0x800 + STA_CLOCKERR = 0x1000 + STA_NANO = 0x2000 + STA_MODE = 0x4000 + STA_CLK = 0x8000 +) + const ( TIME_OK = 0x0 TIME_INS = 0x1 @@ -53,29 +88,30 @@ type StatxTimestamp struct { } type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - Mnt_id uint64 - _ uint64 - _ [12]uint64 + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + Mnt_id uint64 + Dio_mem_align uint32 + Dio_offset_align uint32 + _ [12]uint64 } type Fsid struct { @@ -420,36 +456,60 @@ type Ucred struct { } type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 + Pacing_rate uint64 + Max_pacing_rate uint64 + Bytes_acked uint64 + Bytes_received uint64 + Segs_out uint32 + Segs_in uint32 + Notsent_bytes uint32 + Min_rtt uint32 + Data_segs_in uint32 + Data_segs_out uint32 + Delivery_rate uint64 + Busy_time uint64 + Rwnd_limited uint64 + Sndbuf_limited uint64 + Delivered uint32 + Delivered_ce uint32 + Bytes_sent uint64 + Bytes_retrans uint64 + Dsack_dups uint32 + Reord_seen uint32 + Rcv_ooopack uint32 + Snd_wnd uint32 + Rcv_wnd uint32 + Rehash uint32 } type CanFilter struct { @@ -492,7 +552,7 @@ const ( SizeofIPv6MTUInfo = 0x20 SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc - SizeofTCPInfo = 0x68 + SizeofTCPInfo = 0xf0 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -756,6 +816,23 @@ const ( AT_EACCESS = 0x200 OPEN_TREE_CLONE = 0x1 + + MOVE_MOUNT_F_SYMLINKS = 0x1 + MOVE_MOUNT_F_AUTOMOUNTS = 0x2 + MOVE_MOUNT_F_EMPTY_PATH = 0x4 + MOVE_MOUNT_T_SYMLINKS = 0x10 + MOVE_MOUNT_T_AUTOMOUNTS = 0x20 + MOVE_MOUNT_T_EMPTY_PATH = 0x40 + MOVE_MOUNT_SET_GROUP = 0x100 + + FSOPEN_CLOEXEC = 0x1 + + FSPICK_CLOEXEC = 0x1 + FSPICK_SYMLINK_NOFOLLOW = 0x2 + FSPICK_NO_AUTOMOUNT = 0x4 + FSPICK_EMPTY_PATH = 0x8 + + FSMOUNT_CLOEXEC = 0x1 ) type OpenHow struct { @@ -928,6 +1005,9 @@ type PerfEventAttr struct { Aux_watermark uint32 Sample_max_stack uint16 _ uint16 + Aux_sample_size uint32 + _ uint32 + Sig_data uint64 } type PerfEventMmapPage struct { @@ -987,6 +1067,7 @@ const ( PerfBitCommExec = CBitFieldMaskBit24 PerfBitUseClockID = CBitFieldMaskBit25 PerfBitContextSwitch = CBitFieldMaskBit26 + PerfBitWriteBackward = CBitFieldMaskBit27 ) const ( @@ -1079,7 +1160,8 @@ const ( PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 0xf PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 0x10 PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 0x11 - PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x12 + PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 0x12 + PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x13 PERF_SAMPLE_BRANCH_USER = 0x1 PERF_SAMPLE_BRANCH_KERNEL = 0x2 PERF_SAMPLE_BRANCH_HV = 0x4 @@ -1098,7 +1180,8 @@ const ( PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 PERF_SAMPLE_BRANCH_HW_INDEX = 0x20000 - PERF_SAMPLE_BRANCH_MAX = 0x40000 + PERF_SAMPLE_BRANCH_PRIV_SAVE = 0x40000 + PERF_SAMPLE_BRANCH_MAX = 0x80000 PERF_BR_UNKNOWN = 0x0 PERF_BR_COND = 0x1 PERF_BR_UNCOND = 0x2 @@ -1110,7 +1193,12 @@ const ( PERF_BR_SYSRET = 0x8 PERF_BR_COND_CALL = 0x9 PERF_BR_COND_RET = 0xa - PERF_BR_MAX = 0xb + PERF_BR_ERET = 0xb + PERF_BR_IRQ = 0xc + PERF_BR_SERROR = 0xd + PERF_BR_NO_TX = 0xe + PERF_BR_EXTEND_ABI = 0xf + PERF_BR_MAX = 0x10 PERF_SAMPLE_REGS_ABI_NONE = 0x0 PERF_SAMPLE_REGS_ABI_32 = 0x1 PERF_SAMPLE_REGS_ABI_64 = 0x2 @@ -1129,7 +1217,8 @@ const ( PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 PERF_FORMAT_ID = 0x4 PERF_FORMAT_GROUP = 0x8 - PERF_FORMAT_MAX = 0x10 + PERF_FORMAT_LOST = 0x10 + PERF_FORMAT_MAX = 0x20 PERF_IOC_FLAG_GROUP = 0x1 PERF_RECORD_MMAP = 0x1 PERF_RECORD_LOST = 0x2 @@ -1175,7 +1264,7 @@ type TCPMD5Sig struct { Flags uint8 Prefixlen uint8 Keylen uint16 - _ uint32 + Ifindex int32 Key [80]uint8 } @@ -1444,6 +1533,11 @@ const ( IFLA_ALT_IFNAME = 0x35 IFLA_PERM_ADDRESS = 0x36 IFLA_PROTO_DOWN_REASON = 0x37 + IFLA_PARENT_DEV_NAME = 0x38 + IFLA_PARENT_DEV_BUS_NAME = 0x39 + IFLA_GRO_MAX_SIZE = 0x3a + IFLA_TSO_MAX_SIZE = 0x3b + IFLA_TSO_MAX_SEGS = 0x3c IFLA_PROTO_DOWN_REASON_UNSPEC = 0x0 IFLA_PROTO_DOWN_REASON_MASK = 0x1 IFLA_PROTO_DOWN_REASON_VALUE = 0x2 @@ -1870,7 +1964,11 @@ const ( NFT_MSG_GETOBJ = 0x13 NFT_MSG_DELOBJ = 0x14 NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 + NFT_MSG_NEWFLOWTABLE = 0x16 + NFT_MSG_GETFLOWTABLE = 0x17 + NFT_MSG_DELFLOWTABLE = 0x18 + NFT_MSG_GETRULE_RESET = 0x19 + NFT_MSG_MAX = 0x1a NFTA_LIST_UNSPEC = 0x0 NFTA_LIST_ELEM = 0x1 NFTA_HOOK_UNSPEC = 0x0 @@ -2374,9 +2472,11 @@ const ( SOF_TIMESTAMPING_OPT_STATS = 0x1000 SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 + SOF_TIMESTAMPING_BIND_PHC = 0x8000 + SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x8000 - SOF_TIMESTAMPING_MASK = 0xffff + SOF_TIMESTAMPING_LAST = 0x10000 + SOF_TIMESTAMPING_MASK = 0x1ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -2952,7 +3052,16 @@ const ( DEVLINK_CMD_TRAP_POLICER_NEW = 0x47 DEVLINK_CMD_TRAP_POLICER_DEL = 0x48 DEVLINK_CMD_HEALTH_REPORTER_TEST = 0x49 - DEVLINK_CMD_MAX = 0x4d + DEVLINK_CMD_RATE_GET = 0x4a + DEVLINK_CMD_RATE_SET = 0x4b + DEVLINK_CMD_RATE_NEW = 0x4c + DEVLINK_CMD_RATE_DEL = 0x4d + DEVLINK_CMD_LINECARD_GET = 0x4e + DEVLINK_CMD_LINECARD_SET = 0x4f + DEVLINK_CMD_LINECARD_NEW = 0x50 + DEVLINK_CMD_LINECARD_DEL = 0x51 + DEVLINK_CMD_SELFTESTS_GET = 0x52 + DEVLINK_CMD_MAX = 0x53 DEVLINK_PORT_TYPE_NOTSET = 0x0 DEVLINK_PORT_TYPE_AUTO = 0x1 DEVLINK_PORT_TYPE_ETH = 0x2 @@ -3181,7 +3290,13 @@ const ( DEVLINK_ATTR_RATE_NODE_NAME = 0xa8 DEVLINK_ATTR_RATE_PARENT_NODE_NAME = 0xa9 DEVLINK_ATTR_REGION_MAX_SNAPSHOTS = 0xaa - DEVLINK_ATTR_MAX = 0xaa + DEVLINK_ATTR_LINECARD_INDEX = 0xab + DEVLINK_ATTR_LINECARD_STATE = 0xac + DEVLINK_ATTR_LINECARD_TYPE = 0xad + DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES = 0xae + DEVLINK_ATTR_NESTED_DEVLINK = 0xaf + DEVLINK_ATTR_SELFTESTS = 0xb0 + DEVLINK_ATTR_MAX = 0xb3 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1 DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0 @@ -3197,7 +3312,8 @@ const ( DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR = 0x1 DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x3 + DEVLINK_PORT_FN_ATTR_CAPS = 0x4 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x4 ) type FsverityDigest struct { @@ -3290,7 +3406,8 @@ const ( LWTUNNEL_ENCAP_SEG6_LOCAL = 0x7 LWTUNNEL_ENCAP_RPL = 0x8 LWTUNNEL_ENCAP_IOAM6 = 0x9 - LWTUNNEL_ENCAP_MAX = 0x9 + LWTUNNEL_ENCAP_XFRM = 0xa + LWTUNNEL_ENCAP_MAX = 0xa MPLS_IPTUNNEL_UNSPEC = 0x0 MPLS_IPTUNNEL_DST = 0x1 @@ -3485,7 +3602,10 @@ const ( ETHTOOL_MSG_PHC_VCLOCKS_GET = 0x21 ETHTOOL_MSG_MODULE_GET = 0x22 ETHTOOL_MSG_MODULE_SET = 0x23 - ETHTOOL_MSG_USER_MAX = 0x23 + ETHTOOL_MSG_PSE_GET = 0x24 + ETHTOOL_MSG_PSE_SET = 0x25 + ETHTOOL_MSG_RSS_GET = 0x26 + ETHTOOL_MSG_USER_MAX = 0x26 ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3523,7 +3643,9 @@ const ( ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY = 0x22 ETHTOOL_MSG_MODULE_GET_REPLY = 0x23 ETHTOOL_MSG_MODULE_NTF = 0x24 - ETHTOOL_MSG_KERNEL_MAX = 0x24 + ETHTOOL_MSG_PSE_GET_REPLY = 0x25 + ETHTOOL_MSG_RSS_GET_REPLY = 0x26 + ETHTOOL_MSG_KERNEL_MAX = 0x26 ETHTOOL_A_HEADER_UNSPEC = 0x0 ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 @@ -3582,7 +3704,8 @@ const ( ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG = 0x7 ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE = 0x8 ETHTOOL_A_LINKMODES_LANES = 0x9 - ETHTOOL_A_LINKMODES_MAX = 0x9 + ETHTOOL_A_LINKMODES_RATE_MATCHING = 0xa + ETHTOOL_A_LINKMODES_MAX = 0xa ETHTOOL_A_LINKSTATE_UNSPEC = 0x0 ETHTOOL_A_LINKSTATE_HEADER = 0x1 ETHTOOL_A_LINKSTATE_LINK = 0x2 @@ -3590,7 +3713,8 @@ const ( ETHTOOL_A_LINKSTATE_SQI_MAX = 0x4 ETHTOOL_A_LINKSTATE_EXT_STATE = 0x5 ETHTOOL_A_LINKSTATE_EXT_SUBSTATE = 0x6 - ETHTOOL_A_LINKSTATE_MAX = 0x6 + ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT = 0x7 + ETHTOOL_A_LINKSTATE_MAX = 0x7 ETHTOOL_A_DEBUG_UNSPEC = 0x0 ETHTOOL_A_DEBUG_HEADER = 0x1 ETHTOOL_A_DEBUG_MSGMASK = 0x2 @@ -3621,7 +3745,11 @@ const ( ETHTOOL_A_RINGS_RX_MINI = 0x7 ETHTOOL_A_RINGS_RX_JUMBO = 0x8 ETHTOOL_A_RINGS_TX = 0x9 - ETHTOOL_A_RINGS_MAX = 0x9 + ETHTOOL_A_RINGS_RX_BUF_LEN = 0xa + ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 0xb + ETHTOOL_A_RINGS_CQE_SIZE = 0xc + ETHTOOL_A_RINGS_TX_PUSH = 0xd + ETHTOOL_A_RINGS_MAX = 0xd ETHTOOL_A_CHANNELS_UNSPEC = 0x0 ETHTOOL_A_CHANNELS_HEADER = 0x1 ETHTOOL_A_CHANNELS_RX_MAX = 0x2 @@ -4170,6 +4298,9 @@ const ( NL80211_ACL_POLICY_DENY_UNLESS_LISTED = 0x1 NL80211_AC_VI = 0x1 NL80211_AC_VO = 0x0 + NL80211_AP_SETTINGS_EXTERNAL_AUTH_SUPPORT = 0x1 + NL80211_AP_SETTINGS_SA_QUERY_OFFLOAD_SUPPORT = 0x2 + NL80211_AP_SME_SA_QUERY_OFFLOAD = 0x1 NL80211_ATTR_4ADDR = 0x53 NL80211_ATTR_ACK = 0x5c NL80211_ATTR_ACK_SIGNAL = 0x107 @@ -4178,6 +4309,7 @@ const ( NL80211_ATTR_AIRTIME_WEIGHT = 0x112 NL80211_ATTR_AKM_SUITES = 0x4c NL80211_ATTR_AP_ISOLATE = 0x60 + NL80211_ATTR_AP_SETTINGS_FLAGS = 0x135 NL80211_ATTR_AUTH_DATA = 0x9c NL80211_ATTR_AUTH_TYPE = 0x35 NL80211_ATTR_BANDS = 0xef @@ -4209,6 +4341,9 @@ const ( NL80211_ATTR_COALESCE_RULE_DELAY = 0x1 NL80211_ATTR_COALESCE_RULE_MAX = 0x3 NL80211_ATTR_COALESCE_RULE_PKT_PATTERN = 0x3 + NL80211_ATTR_COLOR_CHANGE_COLOR = 0x130 + NL80211_ATTR_COLOR_CHANGE_COUNT = 0x12f + NL80211_ATTR_COLOR_CHANGE_ELEMS = 0x131 NL80211_ATTR_CONN_FAILED_REASON = 0x9b NL80211_ATTR_CONTROL_PORT = 0x44 NL80211_ATTR_CONTROL_PORT_ETHERTYPE = 0x66 @@ -4235,6 +4370,7 @@ const ( NL80211_ATTR_DEVICE_AP_SME = 0x8d NL80211_ATTR_DFS_CAC_TIME = 0x7 NL80211_ATTR_DFS_REGION = 0x92 + NL80211_ATTR_DISABLE_EHT = 0x137 NL80211_ATTR_DISABLE_HE = 0x12d NL80211_ATTR_DISABLE_HT = 0x93 NL80211_ATTR_DISABLE_VHT = 0xaf @@ -4242,6 +4378,8 @@ const ( NL80211_ATTR_DONT_WAIT_FOR_ACK = 0x8e NL80211_ATTR_DTIM_PERIOD = 0xd NL80211_ATTR_DURATION = 0x57 + NL80211_ATTR_EHT_CAPABILITY = 0x136 + NL80211_ATTR_EML_CAPABILITY = 0x13d NL80211_ATTR_EXT_CAPA = 0xa9 NL80211_ATTR_EXT_CAPA_MASK = 0xaa NL80211_ATTR_EXTERNAL_AUTH_ACTION = 0x104 @@ -4306,10 +4444,11 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x133 + NL80211_ATTR_MAX = 0x141 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 + NL80211_ATTR_MAX_NUM_AKM_SUITES = 0x13c NL80211_ATTR_MAX_NUM_PMKIDS = 0x56 NL80211_ATTR_MAX_NUM_SCAN_SSIDS = 0x2b NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS = 0xde @@ -4319,6 +4458,8 @@ const ( NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL = 0xdf NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS = 0xe0 NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN = 0x7c + NL80211_ATTR_MBSSID_CONFIG = 0x132 + NL80211_ATTR_MBSSID_ELEMS = 0x133 NL80211_ATTR_MCAST_RATE = 0x6b NL80211_ATTR_MDID = 0xb1 NL80211_ATTR_MEASUREMENT_DURATION = 0xeb @@ -4328,6 +4469,11 @@ const ( NL80211_ATTR_MESH_PEER_AID = 0xed NL80211_ATTR_MESH_SETUP = 0x70 NL80211_ATTR_MGMT_SUBTYPE = 0x29 + NL80211_ATTR_MLD_ADDR = 0x13a + NL80211_ATTR_MLD_CAPA_AND_OPS = 0x13e + NL80211_ATTR_MLO_LINK_ID = 0x139 + NL80211_ATTR_MLO_LINKS = 0x138 + NL80211_ATTR_MLO_SUPPORT = 0x13b NL80211_ATTR_MNTR_FLAGS = 0x17 NL80211_ATTR_MPATH_INFO = 0x1b NL80211_ATTR_MPATH_NEXT_HOP = 0x1a @@ -4340,6 +4486,7 @@ const ( NL80211_ATTR_NETNS_FD = 0xdb NL80211_ATTR_NOACK_MAP = 0x95 NL80211_ATTR_NSS = 0x106 + NL80211_ATTR_OBSS_COLOR_BITMAP = 0x12e NL80211_ATTR_OFFCHANNEL_TX_OK = 0x6c NL80211_ATTR_OPER_CLASS = 0xd6 NL80211_ATTR_OPMODE_NOTIF = 0xc2 @@ -4366,6 +4513,7 @@ const ( NL80211_ATTR_PROTOCOL_FEATURES = 0xad NL80211_ATTR_PS_STATE = 0x5d NL80211_ATTR_QOS_MAP = 0xc7 + NL80211_ATTR_RADAR_BACKGROUND = 0x134 NL80211_ATTR_RADAR_EVENT = 0xa8 NL80211_ATTR_REASON_CODE = 0x36 NL80211_ATTR_RECEIVE_MULTICAST = 0x121 @@ -4381,6 +4529,7 @@ const ( NL80211_ATTR_RESP_IE = 0x4e NL80211_ATTR_ROAM_SUPPORT = 0x83 NL80211_ATTR_RX_FRAME_TYPES = 0x64 + NL80211_ATTR_RX_HW_TIMESTAMP = 0x140 NL80211_ATTR_RXMGMT_FLAGS = 0xbc NL80211_ATTR_RX_SIGNAL_DBM = 0x97 NL80211_ATTR_S1G_CAPABILITY = 0x128 @@ -4438,6 +4587,7 @@ const ( NL80211_ATTR_SUPPORT_MESH_AUTH = 0x73 NL80211_ATTR_SURVEY_INFO = 0x54 NL80211_ATTR_SURVEY_RADIO_STATS = 0xda + NL80211_ATTR_TD_BITMAP = 0x141 NL80211_ATTR_TDLS_ACTION = 0x88 NL80211_ATTR_TDLS_DIALOG_TOKEN = 0x89 NL80211_ATTR_TDLS_EXTERNAL_SETUP = 0x8c @@ -4453,6 +4603,7 @@ const ( NL80211_ATTR_TSID = 0xd2 NL80211_ATTR_TWT_RESPONDER = 0x116 NL80211_ATTR_TX_FRAME_TYPES = 0x63 + NL80211_ATTR_TX_HW_TIMESTAMP = 0x13f NL80211_ATTR_TX_NO_CCK_RATE = 0x87 NL80211_ATTR_TXQ_LIMIT = 0x10a NL80211_ATTR_TXQ_MEMORY_LIMIT = 0x10b @@ -4526,13 +4677,19 @@ const ( NL80211_BAND_ATTR_RATES = 0x2 NL80211_BAND_ATTR_VHT_CAPA = 0x8 NL80211_BAND_ATTR_VHT_MCS_SET = 0x7 + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC = 0x8 + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MCS_SET = 0xa + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PHY = 0x9 + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PPE = 0xb NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA = 0x6 NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC = 0x2 NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET = 0x4 NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY = 0x3 NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE = 0x5 NL80211_BAND_IFTYPE_ATTR_IFTYPES = 0x1 - NL80211_BAND_IFTYPE_ATTR_MAX = 0x7 + NL80211_BAND_IFTYPE_ATTR_MAX = 0xb + NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS = 0x7 + NL80211_BAND_LC = 0x5 NL80211_BAND_S1GHZ = 0x4 NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE = 0x2 NL80211_BITRATE_ATTR_MAX = 0x2 @@ -4553,7 +4710,9 @@ const ( NL80211_BSS_FREQUENCY_OFFSET = 0x14 NL80211_BSS_INFORMATION_ELEMENTS = 0x6 NL80211_BSS_LAST_SEEN_BOOTTIME = 0xf - NL80211_BSS_MAX = 0x14 + NL80211_BSS_MAX = 0x16 + NL80211_BSS_MLD_ADDR = 0x16 + NL80211_BSS_MLO_LINK_ID = 0x15 NL80211_BSS_PAD = 0x10 NL80211_BSS_PARENT_BSSID = 0x12 NL80211_BSS_PARENT_TSF = 0x11 @@ -4581,6 +4740,7 @@ const ( NL80211_CHAN_WIDTH_20 = 0x1 NL80211_CHAN_WIDTH_20_NOHT = 0x0 NL80211_CHAN_WIDTH_2 = 0x9 + NL80211_CHAN_WIDTH_320 = 0xd NL80211_CHAN_WIDTH_40 = 0x2 NL80211_CHAN_WIDTH_4 = 0xa NL80211_CHAN_WIDTH_5 = 0x6 @@ -4590,8 +4750,11 @@ const ( NL80211_CMD_ABORT_SCAN = 0x72 NL80211_CMD_ACTION = 0x3b NL80211_CMD_ACTION_TX_STATUS = 0x3c + NL80211_CMD_ADD_LINK = 0x94 + NL80211_CMD_ADD_LINK_STA = 0x96 NL80211_CMD_ADD_NAN_FUNCTION = 0x75 NL80211_CMD_ADD_TX_TS = 0x69 + NL80211_CMD_ASSOC_COMEBACK = 0x93 NL80211_CMD_ASSOCIATE = 0x26 NL80211_CMD_AUTHENTICATE = 0x25 NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL = 0x38 @@ -4599,6 +4762,10 @@ const ( NL80211_CMD_CHANNEL_SWITCH = 0x66 NL80211_CMD_CH_SWITCH_NOTIFY = 0x58 NL80211_CMD_CH_SWITCH_STARTED_NOTIFY = 0x6e + NL80211_CMD_COLOR_CHANGE_ABORTED = 0x90 + NL80211_CMD_COLOR_CHANGE_COMPLETED = 0x91 + NL80211_CMD_COLOR_CHANGE_REQUEST = 0x8e + NL80211_CMD_COLOR_CHANGE_STARTED = 0x8f NL80211_CMD_CONNECT = 0x2e NL80211_CMD_CONN_FAILED = 0x5b NL80211_CMD_CONTROL_PORT_FRAME = 0x81 @@ -4647,8 +4814,9 @@ const ( NL80211_CMD_LEAVE_IBSS = 0x2c NL80211_CMD_LEAVE_MESH = 0x45 NL80211_CMD_LEAVE_OCB = 0x6d - NL80211_CMD_MAX = 0x92 + NL80211_CMD_MAX = 0x98 NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 + NL80211_CMD_MODIFY_LINK_STA = 0x97 NL80211_CMD_NAN_MATCH = 0x78 NL80211_CMD_NEW_BEACON = 0xf NL80211_CMD_NEW_INTERFACE = 0x7 @@ -4661,6 +4829,7 @@ const ( NL80211_CMD_NEW_WIPHY = 0x3 NL80211_CMD_NOTIFY_CQM = 0x40 NL80211_CMD_NOTIFY_RADAR = 0x86 + NL80211_CMD_OBSS_COLOR_COLLISION = 0x8d NL80211_CMD_PEER_MEASUREMENT_COMPLETE = 0x85 NL80211_CMD_PEER_MEASUREMENT_RESULT = 0x84 NL80211_CMD_PEER_MEASUREMENT_START = 0x83 @@ -4676,6 +4845,8 @@ const ( NL80211_CMD_REGISTER_FRAME = 0x3a NL80211_CMD_RELOAD_REGDB = 0x7e NL80211_CMD_REMAIN_ON_CHANNEL = 0x37 + NL80211_CMD_REMOVE_LINK = 0x95 + NL80211_CMD_REMOVE_LINK_STA = 0x98 NL80211_CMD_REQ_SET_REG = 0x1b NL80211_CMD_ROAM = 0x2f NL80211_CMD_SCAN_ABORTED = 0x23 @@ -4686,6 +4857,7 @@ const ( NL80211_CMD_SET_CHANNEL = 0x41 NL80211_CMD_SET_COALESCE = 0x65 NL80211_CMD_SET_CQM = 0x3f + NL80211_CMD_SET_FILS_AAD = 0x92 NL80211_CMD_SET_INTERFACE = 0x6 NL80211_CMD_SET_KEY = 0xa NL80211_CMD_SET_MAC_ACL = 0x5d @@ -4760,6 +4932,8 @@ const ( NL80211_EDMG_BW_CONFIG_MIN = 0x4 NL80211_EDMG_CHANNELS_MAX = 0x3c NL80211_EDMG_CHANNELS_MIN = 0x1 + NL80211_EHT_MAX_CAPABILITY_LEN = 0x33 + NL80211_EHT_MIN_CAPABILITY_LEN = 0xd NL80211_EXTERNAL_AUTH_ABORT = 0x1 NL80211_EXTERNAL_AUTH_START = 0x0 NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK = 0x32 @@ -4776,6 +4950,7 @@ const ( NL80211_EXT_FEATURE_BEACON_RATE_HT = 0x7 NL80211_EXT_FEATURE_BEACON_RATE_LEGACY = 0x6 NL80211_EXT_FEATURE_BEACON_RATE_VHT = 0x8 + NL80211_EXT_FEATURE_BSS_COLOR = 0x3a NL80211_EXT_FEATURE_BSS_PARENT_TSF = 0x4 NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 = 0x1f NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH = 0x2a @@ -4787,6 +4962,7 @@ const ( NL80211_EXT_FEATURE_DFS_OFFLOAD = 0x19 NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER = 0x20 NL80211_EXT_FEATURE_EXT_KEY_ID = 0x24 + NL80211_EXT_FEATURE_FILS_CRYPTO_OFFLOAD = 0x3b NL80211_EXT_FEATURE_FILS_DISCOVERY = 0x34 NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME = 0x11 NL80211_EXT_FEATURE_FILS_SK_OFFLOAD = 0xe @@ -4802,8 +4978,10 @@ const ( NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 0x14 NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE = 0x13 NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION = 0x31 + NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE = 0x3d NL80211_EXT_FEATURE_PROTECTED_TWT = 0x2b NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE = 0x39 + NL80211_EXT_FEATURE_RADAR_BACKGROUND = 0x3c NL80211_EXT_FEATURE_RRM = 0x1 NL80211_EXT_FEATURE_SAE_OFFLOAD_AP = 0x33 NL80211_EXT_FEATURE_SAE_OFFLOAD = 0x26 @@ -4870,12 +5048,14 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x19 + NL80211_FREQUENCY_ATTR_MAX = 0x1b NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc NL80211_FREQUENCY_ATTR_NO_20MHZ = 0x10 + NL80211_FREQUENCY_ATTR_NO_320MHZ = 0x1a NL80211_FREQUENCY_ATTR_NO_80MHZ = 0xb + NL80211_FREQUENCY_ATTR_NO_EHT = 0x1b NL80211_FREQUENCY_ATTR_NO_HE = 0x13 NL80211_FREQUENCY_ATTR_NO_HT40_MINUS = 0x9 NL80211_FREQUENCY_ATTR_NO_HT40_PLUS = 0xa @@ -4975,6 +5155,12 @@ const ( NL80211_MAX_SUPP_HT_RATES = 0x4d NL80211_MAX_SUPP_RATES = 0x20 NL80211_MAX_SUPP_REG_RULES = 0x80 + NL80211_MBSSID_CONFIG_ATTR_EMA = 0x5 + NL80211_MBSSID_CONFIG_ATTR_INDEX = 0x3 + NL80211_MBSSID_CONFIG_ATTR_MAX = 0x5 + NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY = 0x2 + NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES = 0x1 + NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX = 0x4 NL80211_MESHCONF_ATTR_MAX = 0x1f NL80211_MESHCONF_AUTO_OPEN_PLINKS = 0x7 NL80211_MESHCONF_AWAKE_WINDOW = 0x1b @@ -5137,6 +5323,7 @@ const ( NL80211_PMSR_FTM_FAILURE_UNSPECIFIED = 0x0 NL80211_PMSR_FTM_FAILURE_WRONG_CHANNEL = 0x3 NL80211_PMSR_FTM_REQ_ATTR_ASAP = 0x1 + NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR = 0xd NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION = 0x5 NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD = 0x4 NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST = 0x6 @@ -5213,12 +5400,36 @@ const ( NL80211_RADAR_PRE_CAC_EXPIRED = 0x4 NL80211_RATE_INFO_10_MHZ_WIDTH = 0xb NL80211_RATE_INFO_160_MHZ_WIDTH = 0xa + NL80211_RATE_INFO_320_MHZ_WIDTH = 0x12 NL80211_RATE_INFO_40_MHZ_WIDTH = 0x3 NL80211_RATE_INFO_5_MHZ_WIDTH = 0xc NL80211_RATE_INFO_80_MHZ_WIDTH = 0x8 NL80211_RATE_INFO_80P80_MHZ_WIDTH = 0x9 NL80211_RATE_INFO_BITRATE32 = 0x5 NL80211_RATE_INFO_BITRATE = 0x1 + NL80211_RATE_INFO_EHT_GI_0_8 = 0x0 + NL80211_RATE_INFO_EHT_GI_1_6 = 0x1 + NL80211_RATE_INFO_EHT_GI_3_2 = 0x2 + NL80211_RATE_INFO_EHT_GI = 0x15 + NL80211_RATE_INFO_EHT_MCS = 0x13 + NL80211_RATE_INFO_EHT_NSS = 0x14 + NL80211_RATE_INFO_EHT_RU_ALLOC_106 = 0x3 + NL80211_RATE_INFO_EHT_RU_ALLOC_106P26 = 0x4 + NL80211_RATE_INFO_EHT_RU_ALLOC_242 = 0x5 + NL80211_RATE_INFO_EHT_RU_ALLOC_26 = 0x0 + NL80211_RATE_INFO_EHT_RU_ALLOC_2x996 = 0xb + NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484 = 0xc + NL80211_RATE_INFO_EHT_RU_ALLOC_3x996 = 0xd + NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484 = 0xe + NL80211_RATE_INFO_EHT_RU_ALLOC_484 = 0x6 + NL80211_RATE_INFO_EHT_RU_ALLOC_484P242 = 0x7 + NL80211_RATE_INFO_EHT_RU_ALLOC_4x996 = 0xf + NL80211_RATE_INFO_EHT_RU_ALLOC_52 = 0x1 + NL80211_RATE_INFO_EHT_RU_ALLOC_52P26 = 0x2 + NL80211_RATE_INFO_EHT_RU_ALLOC_996 = 0x8 + NL80211_RATE_INFO_EHT_RU_ALLOC_996P484 = 0x9 + NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242 = 0xa + NL80211_RATE_INFO_EHT_RU_ALLOC = 0x16 NL80211_RATE_INFO_HE_1XLTF = 0x0 NL80211_RATE_INFO_HE_2XLTF = 0x1 NL80211_RATE_INFO_HE_4XLTF = 0x2 @@ -5237,7 +5448,7 @@ const ( NL80211_RATE_INFO_HE_RU_ALLOC_52 = 0x1 NL80211_RATE_INFO_HE_RU_ALLOC_996 = 0x5 NL80211_RATE_INFO_HE_RU_ALLOC = 0x11 - NL80211_RATE_INFO_MAX = 0x11 + NL80211_RATE_INFO_MAX = 0x16 NL80211_RATE_INFO_MCS = 0x2 NL80211_RATE_INFO_SHORT_GI = 0x4 NL80211_RATE_INFO_VHT_MCS = 0x6 @@ -5261,6 +5472,7 @@ const ( NL80211_RRF_GO_CONCURRENT = 0x1000 NL80211_RRF_IR_CONCURRENT = 0x1000 NL80211_RRF_NO_160MHZ = 0x10000 + NL80211_RRF_NO_320MHZ = 0x40000 NL80211_RRF_NO_80MHZ = 0x8000 NL80211_RRF_NO_CCK = 0x2 NL80211_RRF_NO_HE = 0x20000 @@ -5534,3 +5746,67 @@ const ( NL80211_WPA_VERSION_2 = 0x2 NL80211_WPA_VERSION_3 = 0x4 ) + +const ( + FRA_UNSPEC = 0x0 + FRA_DST = 0x1 + FRA_SRC = 0x2 + FRA_IIFNAME = 0x3 + FRA_GOTO = 0x4 + FRA_UNUSED2 = 0x5 + FRA_PRIORITY = 0x6 + FRA_UNUSED3 = 0x7 + FRA_UNUSED4 = 0x8 + FRA_UNUSED5 = 0x9 + FRA_FWMARK = 0xa + FRA_FLOW = 0xb + FRA_TUN_ID = 0xc + FRA_SUPPRESS_IFGROUP = 0xd + FRA_SUPPRESS_PREFIXLEN = 0xe + FRA_TABLE = 0xf + FRA_FWMASK = 0x10 + FRA_OIFNAME = 0x11 + FRA_PAD = 0x12 + FRA_L3MDEV = 0x13 + FRA_UID_RANGE = 0x14 + FRA_PROTOCOL = 0x15 + FRA_IP_PROTO = 0x16 + FRA_SPORT_RANGE = 0x17 + FRA_DPORT_RANGE = 0x18 + FR_ACT_UNSPEC = 0x0 + FR_ACT_TO_TBL = 0x1 + FR_ACT_GOTO = 0x2 + FR_ACT_NOP = 0x3 + FR_ACT_RES3 = 0x4 + FR_ACT_RES4 = 0x5 + FR_ACT_BLACKHOLE = 0x6 + FR_ACT_UNREACHABLE = 0x7 + FR_ACT_PROHIBIT = 0x8 +) + +const ( + AUDIT_NLGRP_NONE = 0x0 + AUDIT_NLGRP_READLOG = 0x1 +) + +const ( + TUN_F_CSUM = 0x1 + TUN_F_TSO4 = 0x2 + TUN_F_TSO6 = 0x4 + TUN_F_TSO_ECN = 0x8 + TUN_F_UFO = 0x10 +) + +const ( + VIRTIO_NET_HDR_F_NEEDS_CSUM = 0x1 + VIRTIO_NET_HDR_F_DATA_VALID = 0x2 + VIRTIO_NET_HDR_F_RSC_INFO = 0x4 +) + +const ( + VIRTIO_NET_HDR_GSO_NONE = 0x0 + VIRTIO_NET_HDR_GSO_TCPV4 = 0x1 + VIRTIO_NET_HDR_GSO_UDP = 0x3 + VIRTIO_NET_HDR_GSO_TCPV6 = 0x4 + VIRTIO_NET_HDR_GSO_ECN = 0x80 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 0d8ba0a95..4ecc1495c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/386/cgo -- -Wall -Werror -static -I/tmp/386/include -m32 linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux @@ -254,6 +254,12 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + type Siginfo struct { Signo int32 Errno int32 @@ -322,6 +328,15 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 @@ -399,7 +414,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]int8 + Data [122]byte _ uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 0243228f6..34fddff96 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/amd64/cgo -- -Wall -Werror -static -I/tmp/amd64/include -m64 linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux @@ -269,6 +269,12 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + type Siginfo struct { Signo int32 Errno int32 @@ -336,6 +342,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 @@ -413,7 +427,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 1b00b6c0d..3b14a6031 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/arm/cgo -- -Wall -Werror -static -I/tmp/arm/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux @@ -245,6 +245,12 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + type Siginfo struct { Signo int32 Errno int32 @@ -313,6 +319,15 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 @@ -390,7 +405,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]uint8 + Data [122]byte _ uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 2fa62e10f..0517651ab 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/arm64/cgo -- -Wall -Werror -static -I/tmp/arm64/include -fsigned-char linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux @@ -248,6 +248,12 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + type Siginfo struct { Signo int32 Errno int32 @@ -315,6 +321,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 @@ -392,7 +406,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go new file mode 100644 index 000000000..3b0c51813 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -0,0 +1,691 @@ +// cgo -godefs -objdir=/tmp/loong64/cgo -- -Wall -Werror -static -I/tmp/loong64/include linux/types.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build loong64 && linux +// +build loong64,linux + +package unix + +const ( + SizeofPtr = 0x8 + SizeofLong = 0x8 +) + +type ( + _C_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Timex struct { + Modes uint32 + Offset int64 + Freq int64 + Maxerror int64 + Esterror int64 + Status int32 + Constant int64 + Precision int64 + Tolerance int64 + Time Timeval + Tick int64 + Ppsfreq int64 + Jitter int64 + Shift int32 + Stabil int64 + Jitcnt int64 + Calcnt int64 + Errcnt int64 + Stbcnt int64 + Tai int32 + _ [44]byte +} + +type Time_t int64 + +type Tms struct { + Utime int64 + Stime int64 + Cutime int64 + Cstime int64 +} + +type Utimbuf struct { + Actime int64 + Modtime int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Stat_t struct { + Dev uint64 + Ino uint64 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + _ uint64 + Size int64 + Blksize int32 + _ int32 + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + _ [2]int32 +} + +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]int8 + _ [5]byte +} + +type Flock_t struct { + Type int16 + Whence int16 + Start int64 + Len int64 + Pid int32 + _ [4]byte +} + +type DmNameList struct { + Dev uint64 + Next uint32 + Name [0]byte + _ [4]byte +} + +const ( + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + +type RawSockaddrNFCLLCP struct { + Sa_family uint16 + Dev_idx uint32 + Target_idx uint32 + Nfc_protocol uint32 + Dsap uint8 + Ssap uint8 + Service_name [63]uint8 + Service_name_len uint64 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]int8 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + _ [4]byte +} + +type Cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type ifreq struct { + Ifrn [16]byte + Ifru [24]byte +} + +const ( + SizeofSockaddrNFCLLCP = 0x60 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 +) + +const ( + SizeofSockFprog = 0x10 +) + +type PtraceRegs struct { + Regs [32]uint64 + Orig_a0 uint64 + Era uint64 + Badv uint64 + Reserved [10]uint64 +} + +type FdSet struct { + Bits [16]int64 +} + +type Sysinfo_t struct { + Uptime int64 + Loads [3]uint64 + Totalram uint64 + Freeram uint64 + Sharedram uint64 + Bufferram uint64 + Totalswap uint64 + Freeswap uint64 + Procs uint16 + Pad uint16 + Totalhigh uint64 + Freehigh uint64 + Unit uint32 + _ [0]int8 + _ [4]byte +} + +type Ustat_t struct { + Tfree int32 + Tinode uint64 + Fname [6]int8 + Fpack [6]int8 + _ [4]byte +} + +type EpollEvent struct { + Events uint32 + _ int32 + Fd int32 + Pad int32 +} + +const ( + OPEN_TREE_CLOEXEC = 0x80000 +) + +const ( + POLLRDHUP = 0x2000 +) + +type Sigset_t struct { + Val [16]uint64 +} + +const _C__NSIG = 0x41 + +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + _ int32 + _ [112]byte +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Line uint8 + Cc [19]uint8 + Ispeed uint32 + Ospeed uint32 +} + +type Taskstats struct { + Version uint16 + Ac_exitcode uint32 + Ac_flag uint8 + Ac_nice uint8 + Cpu_count uint64 + Cpu_delay_total uint64 + Blkio_count uint64 + Blkio_delay_total uint64 + Swapin_count uint64 + Swapin_delay_total uint64 + Cpu_run_real_total uint64 + Cpu_run_virtual_total uint64 + Ac_comm [32]int8 + Ac_sched uint8 + Ac_pad [3]uint8 + _ [4]byte + Ac_uid uint32 + Ac_gid uint32 + Ac_pid uint32 + Ac_ppid uint32 + Ac_btime uint32 + Ac_etime uint64 + Ac_utime uint64 + Ac_stime uint64 + Ac_minflt uint64 + Ac_majflt uint64 + Coremem uint64 + Virtmem uint64 + Hiwater_rss uint64 + Hiwater_vm uint64 + Read_char uint64 + Write_char uint64 + Read_syscalls uint64 + Write_syscalls uint64 + Read_bytes uint64 + Write_bytes uint64 + Cancelled_write_bytes uint64 + Nvcsw uint64 + Nivcsw uint64 + Ac_utimescaled uint64 + Ac_stimescaled uint64 + Cpu_scaled_run_real_total uint64 + Freepages_count uint64 + Freepages_delay_total uint64 + Thrashing_count uint64 + Thrashing_delay_total uint64 + Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 +} + +type cpuMask uint64 + +const ( + _NCPUBITS = 0x40 +) + +const ( + CBitFieldMaskBit0 = 0x1 + CBitFieldMaskBit1 = 0x2 + CBitFieldMaskBit2 = 0x4 + CBitFieldMaskBit3 = 0x8 + CBitFieldMaskBit4 = 0x10 + CBitFieldMaskBit5 = 0x20 + CBitFieldMaskBit6 = 0x40 + CBitFieldMaskBit7 = 0x80 + CBitFieldMaskBit8 = 0x100 + CBitFieldMaskBit9 = 0x200 + CBitFieldMaskBit10 = 0x400 + CBitFieldMaskBit11 = 0x800 + CBitFieldMaskBit12 = 0x1000 + CBitFieldMaskBit13 = 0x2000 + CBitFieldMaskBit14 = 0x4000 + CBitFieldMaskBit15 = 0x8000 + CBitFieldMaskBit16 = 0x10000 + CBitFieldMaskBit17 = 0x20000 + CBitFieldMaskBit18 = 0x40000 + CBitFieldMaskBit19 = 0x80000 + CBitFieldMaskBit20 = 0x100000 + CBitFieldMaskBit21 = 0x200000 + CBitFieldMaskBit22 = 0x400000 + CBitFieldMaskBit23 = 0x800000 + CBitFieldMaskBit24 = 0x1000000 + CBitFieldMaskBit25 = 0x2000000 + CBitFieldMaskBit26 = 0x4000000 + CBitFieldMaskBit27 = 0x8000000 + CBitFieldMaskBit28 = 0x10000000 + CBitFieldMaskBit29 = 0x20000000 + CBitFieldMaskBit30 = 0x40000000 + CBitFieldMaskBit31 = 0x80000000 + CBitFieldMaskBit32 = 0x100000000 + CBitFieldMaskBit33 = 0x200000000 + CBitFieldMaskBit34 = 0x400000000 + CBitFieldMaskBit35 = 0x800000000 + CBitFieldMaskBit36 = 0x1000000000 + CBitFieldMaskBit37 = 0x2000000000 + CBitFieldMaskBit38 = 0x4000000000 + CBitFieldMaskBit39 = 0x8000000000 + CBitFieldMaskBit40 = 0x10000000000 + CBitFieldMaskBit41 = 0x20000000000 + CBitFieldMaskBit42 = 0x40000000000 + CBitFieldMaskBit43 = 0x80000000000 + CBitFieldMaskBit44 = 0x100000000000 + CBitFieldMaskBit45 = 0x200000000000 + CBitFieldMaskBit46 = 0x400000000000 + CBitFieldMaskBit47 = 0x800000000000 + CBitFieldMaskBit48 = 0x1000000000000 + CBitFieldMaskBit49 = 0x2000000000000 + CBitFieldMaskBit50 = 0x4000000000000 + CBitFieldMaskBit51 = 0x8000000000000 + CBitFieldMaskBit52 = 0x10000000000000 + CBitFieldMaskBit53 = 0x20000000000000 + CBitFieldMaskBit54 = 0x40000000000000 + CBitFieldMaskBit55 = 0x80000000000000 + CBitFieldMaskBit56 = 0x100000000000000 + CBitFieldMaskBit57 = 0x200000000000000 + CBitFieldMaskBit58 = 0x400000000000000 + CBitFieldMaskBit59 = 0x800000000000000 + CBitFieldMaskBit60 = 0x1000000000000000 + CBitFieldMaskBit61 = 0x2000000000000000 + CBitFieldMaskBit62 = 0x4000000000000000 + CBitFieldMaskBit63 = 0x8000000000000000 +) + +type SockaddrStorage struct { + Family uint16 + Data [118]byte + _ uint64 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + Start uint64 +} + +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +type TpacketHdr struct { + Status uint64 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Usec uint32 + _ [4]byte +} + +const ( + SizeofTpacketHdr = 0x20 +) + +type RTCPLLInfo struct { + Ctrl int32 + Value int32 + Max int32 + Min int32 + Posmult int32 + Negmult int32 + Clock int64 +} + +type BlkpgPartition struct { + Start int64 + Length int64 + Pno int32 + Devname [64]uint8 + Volname [64]uint8 + _ [4]byte +} + +const ( + BLKPG = 0x1269 +) + +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Size uint32 + Headroom uint32 + Flags uint32 + _ [4]byte +} + +type CryptoUserAlg struct { + Name [64]int8 + Driver_name [64]int8 + Module_name [64]int8 + Type uint32 + Mask uint32 + Refcnt uint32 + Flags uint32 +} + +type CryptoStatAEAD struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatAKCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Verify_cnt uint64 + Sign_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatCipher struct { + Type [64]int8 + Encrypt_cnt uint64 + Encrypt_tlen uint64 + Decrypt_cnt uint64 + Decrypt_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatCompress struct { + Type [64]int8 + Compress_cnt uint64 + Compress_tlen uint64 + Decompress_cnt uint64 + Decompress_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatHash struct { + Type [64]int8 + Hash_cnt uint64 + Hash_tlen uint64 + Err_cnt uint64 +} + +type CryptoStatKPP struct { + Type [64]int8 + Setsecret_cnt uint64 + Generate_public_key_cnt uint64 + Compute_shared_secret_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatRNG struct { + Type [64]int8 + Generate_cnt uint64 + Generate_tlen uint64 + Seed_cnt uint64 + Err_cnt uint64 +} + +type CryptoStatLarval struct { + Type [64]int8 +} + +type CryptoReportLarval struct { + Type [64]int8 +} + +type CryptoReportHash struct { + Type [64]int8 + Blocksize uint32 + Digestsize uint32 +} + +type CryptoReportCipher struct { + Type [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 +} + +type CryptoReportBlkCipher struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Min_keysize uint32 + Max_keysize uint32 + Ivsize uint32 +} + +type CryptoReportAEAD struct { + Type [64]int8 + Geniv [64]int8 + Blocksize uint32 + Maxauthsize uint32 + Ivsize uint32 +} + +type CryptoReportComp struct { + Type [64]int8 +} + +type CryptoReportRNG struct { + Type [64]int8 + Seedsize uint32 +} + +type CryptoReportAKCipher struct { + Type [64]int8 +} + +type CryptoReportKPP struct { + Type [64]int8 +} + +type CryptoReportAcomp struct { + Type [64]int8 +} + +type LoopInfo struct { + Number int32 + Device uint32 + Inode uint64 + Rdevice uint32 + Offset int32 + Encrypt_type int32 + Encrypt_key_size int32 + Flags int32 + Name [64]int8 + Encrypt_key [32]uint8 + Init [2]uint64 + Reserved [4]int8 + _ [4]byte +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +type PPSKInfo struct { + Assert_sequence uint32 + Clear_sequence uint32 + Assert_tu PPSKTime + Clear_tu PPSKTime + Current_mode int32 + _ [4]byte +} + +const ( + PPS_GETPARAMS = 0x800870a1 + PPS_SETPARAMS = 0x400870a2 + PPS_GETCAP = 0x800870a3 + PPS_FETCH = 0xc00870a4 +) + +const ( + PIDFD_NONBLOCK = 0x800 +) + +type SysvIpcPerm struct { + Key int32 + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode uint32 + _ [0]uint8 + Seq uint16 + _ uint16 + _ uint64 + _ uint64 +} +type SysvShmDesc struct { + Perm SysvIpcPerm + Segsz uint64 + Atime int64 + Dtime int64 + Ctime int64 + Cpid int32 + Lpid int32 + Nattch uint64 + _ uint64 + _ uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 48da0b402..fccdf4dd0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/mips/cgo -- -Wall -Werror -static -I/tmp/mips/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux @@ -250,6 +250,12 @@ type Sigset_t struct { const _C__NSIG = 0x80 +const ( + SIG_BLOCK = 0x1 + SIG_UNBLOCK = 0x2 + SIG_SETMASK = 0x3 +) + type Siginfo struct { Signo int32 Code int32 @@ -318,6 +324,15 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 @@ -395,7 +410,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]int8 + Data [122]byte _ uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 4fadda366..500de8fc0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/mips64/cgo -- -Wall -Werror -static -I/tmp/mips64/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux @@ -251,6 +251,12 @@ type Sigset_t struct { const _C__NSIG = 0x80 +const ( + SIG_BLOCK = 0x1 + SIG_UNBLOCK = 0x2 + SIG_SETMASK = 0x3 +) + type Siginfo struct { Signo int32 Code int32 @@ -318,6 +324,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 @@ -395,7 +409,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 05f957512..d0434cd2c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/mips64le/cgo -- -Wall -Werror -static -I/tmp/mips64le/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux @@ -251,6 +251,12 @@ type Sigset_t struct { const _C__NSIG = 0x80 +const ( + SIG_BLOCK = 0x1 + SIG_UNBLOCK = 0x2 + SIG_SETMASK = 0x3 +) + type Siginfo struct { Signo int32 Code int32 @@ -318,6 +324,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 @@ -395,7 +409,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 559805177..84206ba53 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/mipsle/cgo -- -Wall -Werror -static -I/tmp/mipsle/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux @@ -250,6 +250,12 @@ type Sigset_t struct { const _C__NSIG = 0x80 +const ( + SIG_BLOCK = 0x1 + SIG_UNBLOCK = 0x2 + SIG_SETMASK = 0x3 +) + type Siginfo struct { Signo int32 Code int32 @@ -318,6 +324,15 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 @@ -395,7 +410,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]int8 + Data [122]byte _ uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 94d11fadd..ab078cf1f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/ppc/cgo -- -Wall -Werror -static -I/tmp/ppc/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux @@ -257,6 +257,12 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + type Siginfo struct { Signo int32 Errno int32 @@ -325,6 +331,15 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + _ [4]byte + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint32 @@ -402,7 +417,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]uint8 + Data [122]byte _ uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index c7cd77617..42eb2c4ce 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/ppc64/cgo -- -Wall -Werror -static -I/tmp/ppc64/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux @@ -258,6 +258,12 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + type Siginfo struct { Signo int32 Errno int32 @@ -325,6 +331,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 @@ -402,7 +416,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]uint8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 5d8b4be3b..31304a4e8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/ppc64le/cgo -- -Wall -Werror -static -I/tmp/ppc64le/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux @@ -258,6 +258,12 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + type Siginfo struct { Signo int32 Errno int32 @@ -325,6 +331,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 @@ -402,7 +416,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]uint8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index c38d8962b..c311f9612 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/riscv64/cgo -- -Wall -Werror -static -I/tmp/riscv64/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux @@ -276,6 +276,12 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + type Siginfo struct { Signo int32 Errno int32 @@ -343,6 +349,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 @@ -420,7 +434,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]uint8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 513cb0c71..bba3cefac 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/s390x/cgo -- -Wall -Werror -static -I/tmp/s390x/include -fsigned-char linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux @@ -271,6 +271,12 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x0 + SIG_UNBLOCK = 0x1 + SIG_SETMASK = 0x2 +) + type Siginfo struct { Signo int32 Errno int32 @@ -338,6 +344,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 @@ -415,7 +429,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 0c3e0c482..ad8a01380 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go +// cgo -godefs -objdir=/tmp/sparc64/cgo -- -Wall -Werror -static -I/tmp/sparc64/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux @@ -253,6 +253,12 @@ type Sigset_t struct { const _C__NSIG = 0x41 +const ( + SIG_BLOCK = 0x1 + SIG_UNBLOCK = 0x2 + SIG_SETMASK = 0x4 +) + type Siginfo struct { Signo int32 Errno int32 @@ -320,6 +326,14 @@ type Taskstats struct { Thrashing_count uint64 Thrashing_delay_total uint64 Ac_btime64 uint64 + Compact_count uint64 + Compact_delay_total uint64 + Ac_tgid uint32 + Ac_tgetime uint64 + Ac_exe_dev uint64 + Ac_exe_inode uint64 + Wpcopy_count uint64 + Wpcopy_delay_total uint64 } type cpuMask uint64 @@ -397,7 +411,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 2fd2060e6..9bc4c8f9d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -491,6 +491,90 @@ type Utsname struct { Machine [256]byte } +const SizeofUvmexp = 0x278 + +type Uvmexp struct { + Pagesize int64 + Pagemask int64 + Pageshift int64 + Npages int64 + Free int64 + Active int64 + Inactive int64 + Paging int64 + Wired int64 + Zeropages int64 + Reserve_pagedaemon int64 + Reserve_kernel int64 + Freemin int64 + Freetarg int64 + Inactarg int64 + Wiredmax int64 + Nswapdev int64 + Swpages int64 + Swpginuse int64 + Swpgonly int64 + Nswget int64 + Unused1 int64 + Cpuhit int64 + Cpumiss int64 + Faults int64 + Traps int64 + Intrs int64 + Swtch int64 + Softs int64 + Syscalls int64 + Pageins int64 + Swapins int64 + Swapouts int64 + Pgswapin int64 + Pgswapout int64 + Forks int64 + Forks_ppwait int64 + Forks_sharevm int64 + Pga_zerohit int64 + Pga_zeromiss int64 + Zeroaborts int64 + Fltnoram int64 + Fltnoanon int64 + Fltpgwait int64 + Fltpgrele int64 + Fltrelck int64 + Fltrelckok int64 + Fltanget int64 + Fltanretry int64 + Fltamcopy int64 + Fltnamap int64 + Fltnomap int64 + Fltlget int64 + Fltget int64 + Flt_anon int64 + Flt_acow int64 + Flt_obj int64 + Flt_prcopy int64 + Flt_przero int64 + Pdwoke int64 + Pdrevs int64 + Unused4 int64 + Pdfreed int64 + Pdscans int64 + Pdanscan int64 + Pdobscan int64 + Pdreact int64 + Pdbusy int64 + Pdpageouts int64 + Pdpending int64 + Pddeact int64 + Anonpages int64 + Filepages int64 + Execpages int64 + Colorhit int64 + Colormiss int64 + Ncolors int64 + Bootpages int64 + Poolpages int64 +} + const SizeofClockinfo = 0x14 type Clockinfo struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index 6a5a1a8ae..bb05f655d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -499,6 +499,90 @@ type Utsname struct { Machine [256]byte } +const SizeofUvmexp = 0x278 + +type Uvmexp struct { + Pagesize int64 + Pagemask int64 + Pageshift int64 + Npages int64 + Free int64 + Active int64 + Inactive int64 + Paging int64 + Wired int64 + Zeropages int64 + Reserve_pagedaemon int64 + Reserve_kernel int64 + Freemin int64 + Freetarg int64 + Inactarg int64 + Wiredmax int64 + Nswapdev int64 + Swpages int64 + Swpginuse int64 + Swpgonly int64 + Nswget int64 + Unused1 int64 + Cpuhit int64 + Cpumiss int64 + Faults int64 + Traps int64 + Intrs int64 + Swtch int64 + Softs int64 + Syscalls int64 + Pageins int64 + Swapins int64 + Swapouts int64 + Pgswapin int64 + Pgswapout int64 + Forks int64 + Forks_ppwait int64 + Forks_sharevm int64 + Pga_zerohit int64 + Pga_zeromiss int64 + Zeroaborts int64 + Fltnoram int64 + Fltnoanon int64 + Fltpgwait int64 + Fltpgrele int64 + Fltrelck int64 + Fltrelckok int64 + Fltanget int64 + Fltanretry int64 + Fltamcopy int64 + Fltnamap int64 + Fltnomap int64 + Fltlget int64 + Fltget int64 + Flt_anon int64 + Flt_acow int64 + Flt_obj int64 + Flt_prcopy int64 + Flt_przero int64 + Pdwoke int64 + Pdrevs int64 + Unused4 int64 + Pdfreed int64 + Pdscans int64 + Pdanscan int64 + Pdobscan int64 + Pdreact int64 + Pdbusy int64 + Pdpageouts int64 + Pdpending int64 + Pddeact int64 + Anonpages int64 + Filepages int64 + Execpages int64 + Colorhit int64 + Colormiss int64 + Ncolors int64 + Bootpages int64 + Poolpages int64 +} + const SizeofClockinfo = 0x14 type Clockinfo struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 84cc8d01e..db40e3a19 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -496,6 +496,90 @@ type Utsname struct { Machine [256]byte } +const SizeofUvmexp = 0x278 + +type Uvmexp struct { + Pagesize int64 + Pagemask int64 + Pageshift int64 + Npages int64 + Free int64 + Active int64 + Inactive int64 + Paging int64 + Wired int64 + Zeropages int64 + Reserve_pagedaemon int64 + Reserve_kernel int64 + Freemin int64 + Freetarg int64 + Inactarg int64 + Wiredmax int64 + Nswapdev int64 + Swpages int64 + Swpginuse int64 + Swpgonly int64 + Nswget int64 + Unused1 int64 + Cpuhit int64 + Cpumiss int64 + Faults int64 + Traps int64 + Intrs int64 + Swtch int64 + Softs int64 + Syscalls int64 + Pageins int64 + Swapins int64 + Swapouts int64 + Pgswapin int64 + Pgswapout int64 + Forks int64 + Forks_ppwait int64 + Forks_sharevm int64 + Pga_zerohit int64 + Pga_zeromiss int64 + Zeroaborts int64 + Fltnoram int64 + Fltnoanon int64 + Fltpgwait int64 + Fltpgrele int64 + Fltrelck int64 + Fltrelckok int64 + Fltanget int64 + Fltanretry int64 + Fltamcopy int64 + Fltnamap int64 + Fltnomap int64 + Fltlget int64 + Fltget int64 + Flt_anon int64 + Flt_acow int64 + Flt_obj int64 + Flt_prcopy int64 + Flt_przero int64 + Pdwoke int64 + Pdrevs int64 + Unused4 int64 + Pdfreed int64 + Pdscans int64 + Pdanscan int64 + Pdobscan int64 + Pdreact int64 + Pdbusy int64 + Pdpageouts int64 + Pdpending int64 + Pddeact int64 + Anonpages int64 + Filepages int64 + Execpages int64 + Colorhit int64 + Colormiss int64 + Ncolors int64 + Bootpages int64 + Poolpages int64 +} + const SizeofClockinfo = 0x14 type Clockinfo struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index c844e7096..11121151c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -499,6 +499,90 @@ type Utsname struct { Machine [256]byte } +const SizeofUvmexp = 0x278 + +type Uvmexp struct { + Pagesize int64 + Pagemask int64 + Pageshift int64 + Npages int64 + Free int64 + Active int64 + Inactive int64 + Paging int64 + Wired int64 + Zeropages int64 + Reserve_pagedaemon int64 + Reserve_kernel int64 + Freemin int64 + Freetarg int64 + Inactarg int64 + Wiredmax int64 + Nswapdev int64 + Swpages int64 + Swpginuse int64 + Swpgonly int64 + Nswget int64 + Unused1 int64 + Cpuhit int64 + Cpumiss int64 + Faults int64 + Traps int64 + Intrs int64 + Swtch int64 + Softs int64 + Syscalls int64 + Pageins int64 + Swapins int64 + Swapouts int64 + Pgswapin int64 + Pgswapout int64 + Forks int64 + Forks_ppwait int64 + Forks_sharevm int64 + Pga_zerohit int64 + Pga_zeromiss int64 + Zeroaborts int64 + Fltnoram int64 + Fltnoanon int64 + Fltpgwait int64 + Fltpgrele int64 + Fltrelck int64 + Fltrelckok int64 + Fltanget int64 + Fltanretry int64 + Fltamcopy int64 + Fltnamap int64 + Fltnomap int64 + Fltlget int64 + Fltget int64 + Flt_anon int64 + Flt_acow int64 + Flt_obj int64 + Flt_prcopy int64 + Flt_przero int64 + Pdwoke int64 + Pdrevs int64 + Unused4 int64 + Pdfreed int64 + Pdscans int64 + Pdanscan int64 + Pdobscan int64 + Pdreact int64 + Pdbusy int64 + Pdpageouts int64 + Pdpending int64 + Pddeact int64 + Anonpages int64 + Filepages int64 + Execpages int64 + Colorhit int64 + Colormiss int64 + Ncolors int64 + Bootpages int64 + Poolpages int64 +} + const SizeofClockinfo = 0x14 type Clockinfo struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index baf5fe650..26eba23b7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -58,22 +58,22 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Mode uint32 - Dev int32 - Ino uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Rdev int32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize uint32 - Flags uint32 - Gen uint32 - X__st_birthtim Timespec + Mode uint32 + Dev int32 + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev int32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + _ Timespec } type Statfs_t struct { @@ -94,11 +94,11 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 - Pad_cgo_0 [2]byte + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte + _ [2]byte Mount_info [160]byte } @@ -111,13 +111,13 @@ type Flock_t struct { } type Dirent struct { - Fileno uint64 - Off int64 - Reclen uint16 - Type uint8 - Namlen uint8 - X__d_padding [4]uint8 - Name [256]int8 + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Namlen uint8 + _ [4]uint8 + Name [256]int8 } type Fsid struct { @@ -262,8 +262,8 @@ type FdSet struct { } const ( - SizeofIfMsghdr = 0xec - SizeofIfData = 0xd4 + SizeofIfMsghdr = 0xa0 + SizeofIfData = 0x88 SizeofIfaMsghdr = 0x18 SizeofIfAnnounceMsghdr = 0x1a SizeofRtMsghdr = 0x60 @@ -292,7 +292,7 @@ type IfData struct { Link_state uint8 Mtu uint32 Metric uint32 - Pad uint32 + Rdomain uint32 Baudrate uint64 Ipackets uint64 Ierrors uint64 @@ -304,10 +304,10 @@ type IfData struct { Imcasts uint64 Omcasts uint64 Iqdrops uint64 + Oqdrops uint64 Noproto uint64 Capabilities uint32 Lastchange Timeval - Mclpool [7]Mclpool } type IfaMsghdr struct { @@ -368,20 +368,12 @@ type RtMetrics struct { Pad uint32 } -type Mclpool struct { - Grown int32 - Alive uint16 - Hwm uint16 - Cwm uint16 - Lwm uint16 -} - const ( SizeofBpfVersion = 0x4 SizeofBpfStat = 0x8 SizeofBpfProgram = 0x8 SizeofBpfInsn = 0x8 - SizeofBpfHdr = 0x14 + SizeofBpfHdr = 0x18 ) type BpfVersion struct { @@ -407,11 +399,14 @@ type BpfInsn struct { } type BpfHdr struct { - Tstamp BpfTimeval - Caplen uint32 - Datalen uint32 - Hdrlen uint16 - Pad_cgo_0 [2]byte + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 } type BpfTimeval struct { @@ -488,7 +483,7 @@ type Uvmexp struct { Zeropages int32 Reserve_pagedaemon int32 Reserve_kernel int32 - Anonpages int32 + Unused01 int32 Vnodepages int32 Vtextpages int32 Freemin int32 @@ -507,8 +502,8 @@ type Uvmexp struct { Swpgonly int32 Nswget int32 Nanon int32 - Nanonneeded int32 - Nfreeanon int32 + Unused05 int32 + Unused06 int32 Faults int32 Traps int32 Intrs int32 @@ -516,8 +511,8 @@ type Uvmexp struct { Softs int32 Syscalls int32 Pageins int32 - Obsolete_swapins int32 - Obsolete_swapouts int32 + Unused07 int32 + Unused08 int32 Pgswapin int32 Pgswapout int32 Forks int32 @@ -525,7 +520,7 @@ type Uvmexp struct { Forks_sharevm int32 Pga_zerohit int32 Pga_zeromiss int32 - Zeroaborts int32 + Unused09 int32 Fltnoram int32 Fltnoanon int32 Fltnoamap int32 @@ -557,9 +552,9 @@ type Uvmexp struct { Pdpageouts int32 Pdpending int32 Pddeact int32 - Pdreanon int32 - Pdrevnode int32 - Pdrevtext int32 + Unused11 int32 + Unused12 int32 + Unused13 int32 Fpswtch int32 Kmapent int32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index e21ae8ecf..5a5479886 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -73,7 +73,6 @@ type Stat_t struct { Blksize int32 Flags uint32 Gen uint32 - _ [4]byte _ Timespec } @@ -81,7 +80,6 @@ type Statfs_t struct { F_flags uint32 F_bsize uint32 F_iosize uint32 - _ [4]byte F_blocks uint64 F_bfree uint64 F_bavail int64 @@ -96,10 +94,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte _ [2]byte Mount_info [160]byte } @@ -200,10 +198,8 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - _ [4]byte Iov *Iovec Iovlen uint32 - _ [4]byte Control *byte Controllen uint32 Flags int32 @@ -311,7 +307,6 @@ type IfData struct { Oqdrops uint64 Noproto uint64 Capabilities uint32 - _ [4]byte Lastchange Timeval } @@ -373,14 +368,12 @@ type RtMetrics struct { Pad uint32 } -type Mclpool struct{} - const ( SizeofBpfVersion = 0x4 SizeofBpfStat = 0x8 SizeofBpfProgram = 0x10 SizeofBpfInsn = 0x8 - SizeofBpfHdr = 0x14 + SizeofBpfHdr = 0x18 ) type BpfVersion struct { @@ -395,7 +388,6 @@ type BpfStat struct { type BpfProgram struct { Len uint32 - _ [4]byte Insns *BpfInsn } @@ -411,7 +403,10 @@ type BpfHdr struct { Caplen uint32 Datalen uint32 Hdrlen uint16 - _ [2]byte + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 } type BpfTimeval struct { @@ -488,7 +483,7 @@ type Uvmexp struct { Zeropages int32 Reserve_pagedaemon int32 Reserve_kernel int32 - Anonpages int32 + Unused01 int32 Vnodepages int32 Vtextpages int32 Freemin int32 @@ -507,8 +502,8 @@ type Uvmexp struct { Swpgonly int32 Nswget int32 Nanon int32 - Nanonneeded int32 - Nfreeanon int32 + Unused05 int32 + Unused06 int32 Faults int32 Traps int32 Intrs int32 @@ -516,8 +511,8 @@ type Uvmexp struct { Softs int32 Syscalls int32 Pageins int32 - Obsolete_swapins int32 - Obsolete_swapouts int32 + Unused07 int32 + Unused08 int32 Pgswapin int32 Pgswapout int32 Forks int32 @@ -525,7 +520,7 @@ type Uvmexp struct { Forks_sharevm int32 Pga_zerohit int32 Pga_zeromiss int32 - Zeroaborts int32 + Unused09 int32 Fltnoram int32 Fltnoanon int32 Fltnoamap int32 @@ -557,9 +552,9 @@ type Uvmexp struct { Pdpageouts int32 Pdpending int32 Pddeact int32 - Pdreanon int32 - Pdrevnode int32 - Pdrevtext int32 + Unused11 int32 + Unused12 int32 + Unused13 int32 Fpswtch int32 Kmapent int32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index f190651cd..be58c4e1f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -98,10 +98,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte _ [2]byte Mount_info [160]byte } @@ -375,14 +375,12 @@ type RtMetrics struct { Pad uint32 } -type Mclpool struct{} - const ( SizeofBpfVersion = 0x4 SizeofBpfStat = 0x8 SizeofBpfProgram = 0x8 SizeofBpfInsn = 0x8 - SizeofBpfHdr = 0x14 + SizeofBpfHdr = 0x18 ) type BpfVersion struct { @@ -412,7 +410,10 @@ type BpfHdr struct { Caplen uint32 Datalen uint32 Hdrlen uint16 - _ [2]byte + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 } type BpfTimeval struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index 84747c582..52338266c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -94,10 +94,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte _ [2]byte Mount_info [160]byte } @@ -368,14 +368,12 @@ type RtMetrics struct { Pad uint32 } -type Mclpool struct{} - const ( SizeofBpfVersion = 0x4 SizeofBpfStat = 0x8 SizeofBpfProgram = 0x10 SizeofBpfInsn = 0x8 - SizeofBpfHdr = 0x14 + SizeofBpfHdr = 0x18 ) type BpfVersion struct { @@ -405,7 +403,10 @@ type BpfHdr struct { Caplen uint32 Datalen uint32 Hdrlen uint16 - _ [2]byte + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 } type BpfTimeval struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index ac5c8b637..605cfdb12 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -94,10 +94,10 @@ type Statfs_t struct { F_namemax uint32 F_owner uint32 F_ctime uint64 - F_fstypename [16]int8 - F_mntonname [90]int8 - F_mntfromname [90]int8 - F_mntfromspec [90]int8 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte _ [2]byte Mount_info [160]byte } @@ -368,14 +368,12 @@ type RtMetrics struct { Pad uint32 } -type Mclpool struct{} - const ( SizeofBpfVersion = 0x4 SizeofBpfStat = 0x8 SizeofBpfProgram = 0x10 SizeofBpfInsn = 0x8 - SizeofBpfHdr = 0x14 + SizeofBpfHdr = 0x18 ) type BpfVersion struct { @@ -405,7 +403,10 @@ type BpfHdr struct { Caplen uint32 Datalen uint32 Hdrlen uint16 - _ [2]byte + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 } type BpfTimeval struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go new file mode 100644 index 000000000..d6724c010 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go @@ -0,0 +1,571 @@ +// cgo -godefs -- -fsigned-char types_openbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build ppc64 && openbsd +// +build ppc64,openbsd + +package unix + +const ( + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Mode uint32 + Dev int32 + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev int32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + _ Timespec +} + +type Statfs_t struct { + F_flags uint32 + F_bsize uint32 + F_iosize uint32 + F_blocks uint64 + F_bfree uint64 + F_bavail int64 + F_files uint64 + F_ffree uint64 + F_favail int64 + F_syncwrites uint64 + F_syncreads uint64 + F_asyncwrites uint64 + F_asyncreads uint64 + F_fsid Fsid + F_namemax uint32 + F_owner uint32 + F_ctime uint64 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte + _ [2]byte + Mount_info [160]byte +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Namlen uint8 + _ [4]uint8 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +const ( + PathMax = 0x400 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [24]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x20 + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint64 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte +} + +type FdSet struct { + Bits [32]uint32 +} + +const ( + SizeofIfMsghdr = 0xa8 + SizeofIfData = 0x90 + SizeofIfaMsghdr = 0x18 + SizeofIfAnnounceMsghdr = 0x1a + SizeofRtMsghdr = 0x60 + SizeofRtMetrics = 0x38 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Xflags int32 + Data IfData +} + +type IfData struct { + Type uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Mtu uint32 + Metric uint32 + Rdomain uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Oqdrops uint64 + Noproto uint64 + Capabilities uint32 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Metric int32 +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + What uint16 + Name [16]int8 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Priority uint8 + Mpls uint8 + Addrs int32 + Flags int32 + Fmask int32 + Pid int32 + Seq int32 + Errno int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Pksent uint64 + Expire int64 + Locks uint32 + Mtu uint32 + Refcnt uint32 + Hopcount uint32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pad uint32 +} + +type Mclpool struct{} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x18 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 +} + +type BpfTimeval struct { + Sec uint32 + Usec uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x64 + AT_EACCESS = 0x1 + AT_SYMLINK_NOFOLLOW = 0x2 + AT_SYMLINK_FOLLOW = 0x4 + AT_REMOVEDIR = 0x8 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type Sigset_t uint32 + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} + +const SizeofUvmexp = 0x158 + +type Uvmexp struct { + Pagesize int32 + Pagemask int32 + Pageshift int32 + Npages int32 + Free int32 + Active int32 + Inactive int32 + Paging int32 + Wired int32 + Zeropages int32 + Reserve_pagedaemon int32 + Reserve_kernel int32 + Unused01 int32 + Vnodepages int32 + Vtextpages int32 + Freemin int32 + Freetarg int32 + Inactarg int32 + Wiredmax int32 + Anonmin int32 + Vtextmin int32 + Vnodemin int32 + Anonminpct int32 + Vtextminpct int32 + Vnodeminpct int32 + Nswapdev int32 + Swpages int32 + Swpginuse int32 + Swpgonly int32 + Nswget int32 + Nanon int32 + Unused05 int32 + Unused06 int32 + Faults int32 + Traps int32 + Intrs int32 + Swtch int32 + Softs int32 + Syscalls int32 + Pageins int32 + Unused07 int32 + Unused08 int32 + Pgswapin int32 + Pgswapout int32 + Forks int32 + Forks_ppwait int32 + Forks_sharevm int32 + Pga_zerohit int32 + Pga_zeromiss int32 + Unused09 int32 + Fltnoram int32 + Fltnoanon int32 + Fltnoamap int32 + Fltpgwait int32 + Fltpgrele int32 + Fltrelck int32 + Fltrelckok int32 + Fltanget int32 + Fltanretry int32 + Fltamcopy int32 + Fltnamap int32 + Fltnomap int32 + Fltlget int32 + Fltget int32 + Flt_anon int32 + Flt_acow int32 + Flt_obj int32 + Flt_prcopy int32 + Flt_przero int32 + Pdwoke int32 + Pdrevs int32 + Pdswout int32 + Pdfreed int32 + Pdscans int32 + Pdanscan int32 + Pdobscan int32 + Pdreact int32 + Pdbusy int32 + Pdpageouts int32 + Pdpending int32 + Pddeact int32 + Unused11 int32 + Unused12 int32 + Unused13 int32 + Fpswtch int32 + Kmapent int32 +} + +const SizeofClockinfo = 0x10 + +type Clockinfo struct { + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go new file mode 100644 index 000000000..ddfd27a43 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go @@ -0,0 +1,571 @@ +// cgo -godefs -- -fsigned-char types_openbsd.go | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build riscv64 && openbsd +// +build riscv64,openbsd + +package unix + +const ( + SizeofPtr = 0x8 + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLong = 0x8 + SizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type Stat_t struct { + Mode uint32 + Dev int32 + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev int32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + _ Timespec +} + +type Statfs_t struct { + F_flags uint32 + F_bsize uint32 + F_iosize uint32 + F_blocks uint64 + F_bfree uint64 + F_bavail int64 + F_files uint64 + F_ffree uint64 + F_favail int64 + F_syncwrites uint64 + F_syncreads uint64 + F_asyncwrites uint64 + F_asyncreads uint64 + F_fsid Fsid + F_namemax uint32 + F_owner uint32 + F_ctime uint64 + F_fstypename [16]byte + F_mntonname [90]byte + F_mntfromname [90]byte + F_mntfromspec [90]byte + _ [2]byte + Mount_info [160]byte +} + +type Flock_t struct { + Start int64 + Len int64 + Pid int32 + Type int16 + Whence int16 +} + +type Dirent struct { + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Namlen uint8 + _ [4]uint8 + Name [256]int8 +} + +type Fsid struct { + Val [2]int32 +} + +const ( + PathMax = 0x400 +) + +type RawSockaddrInet4 struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type RawSockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Len uint8 + Family uint8 + Path [104]int8 +} + +type RawSockaddrDatalink struct { + Len uint8 + Family uint8 + Index uint16 + Type uint8 + Nlen uint8 + Alen uint8 + Slen uint8 + Data [24]int8 +} + +type RawSockaddr struct { + Len uint8 + Family uint8 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [92]int8 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type Iovec struct { + Base *byte + Len uint64 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type Msghdr struct { + Name *byte + Namelen uint32 + Iov *Iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type Cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Filt [8]uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x6c + SizeofSockaddrUnix = 0x6a + SizeofSockaddrDatalink = 0x20 + SizeofLinger = 0x8 + SizeofIovec = 0x10 + SizeofIPMreq = 0x8 + SizeofIPv6Mreq = 0x14 + SizeofMsghdr = 0x30 + SizeofCmsghdr = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 +) + +const ( + PTRACE_TRACEME = 0x0 + PTRACE_CONT = 0x7 + PTRACE_KILL = 0x8 +) + +type Kevent_t struct { + Ident uint64 + Filter int16 + Flags uint16 + Fflags uint32 + Data int64 + Udata *byte +} + +type FdSet struct { + Bits [32]uint32 +} + +const ( + SizeofIfMsghdr = 0xa8 + SizeofIfData = 0x90 + SizeofIfaMsghdr = 0x18 + SizeofIfAnnounceMsghdr = 0x1a + SizeofRtMsghdr = 0x60 + SizeofRtMetrics = 0x38 +) + +type IfMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Xflags int32 + Data IfData +} + +type IfData struct { + Type uint8 + Addrlen uint8 + Hdrlen uint8 + Link_state uint8 + Mtu uint32 + Metric uint32 + Rdomain uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Oqdrops uint64 + Noproto uint64 + Capabilities uint32 + Lastchange Timeval +} + +type IfaMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Pad1 uint8 + Pad2 uint8 + Addrs int32 + Flags int32 + Metric int32 +} + +type IfAnnounceMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + What uint16 + Name [16]int8 +} + +type RtMsghdr struct { + Msglen uint16 + Version uint8 + Type uint8 + Hdrlen uint16 + Index uint16 + Tableid uint16 + Priority uint8 + Mpls uint8 + Addrs int32 + Flags int32 + Fmask int32 + Pid int32 + Seq int32 + Errno int32 + Inits uint32 + Rmx RtMetrics +} + +type RtMetrics struct { + Pksent uint64 + Expire int64 + Locks uint32 + Mtu uint32 + Refcnt uint32 + Hopcount uint32 + Recvpipe uint32 + Sendpipe uint32 + Ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Pad uint32 +} + +type Mclpool struct{} + +const ( + SizeofBpfVersion = 0x4 + SizeofBpfStat = 0x8 + SizeofBpfProgram = 0x10 + SizeofBpfInsn = 0x8 + SizeofBpfHdr = 0x18 +) + +type BpfVersion struct { + Major uint16 + Minor uint16 +} + +type BpfStat struct { + Recv uint32 + Drop uint32 +} + +type BpfProgram struct { + Len uint32 + Insns *BpfInsn +} + +type BpfInsn struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type BpfHdr struct { + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + Ifidx uint16 + Flowid uint16 + Flags uint8 + Drops uint8 +} + +type BpfTimeval struct { + Sec uint32 + Usec uint32 +} + +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed int32 + Ospeed int32 +} + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + AT_FDCWD = -0x64 + AT_EACCESS = 0x1 + AT_SYMLINK_NOFOLLOW = 0x2 + AT_SYMLINK_FOLLOW = 0x4 + AT_REMOVEDIR = 0x8 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLERR = 0x8 + POLLHUP = 0x10 + POLLIN = 0x1 + POLLNVAL = 0x20 + POLLOUT = 0x4 + POLLPRI = 0x2 + POLLRDBAND = 0x80 + POLLRDNORM = 0x40 + POLLWRBAND = 0x100 + POLLWRNORM = 0x4 +) + +type Sigset_t uint32 + +type Utsname struct { + Sysname [256]byte + Nodename [256]byte + Release [256]byte + Version [256]byte + Machine [256]byte +} + +const SizeofUvmexp = 0x158 + +type Uvmexp struct { + Pagesize int32 + Pagemask int32 + Pageshift int32 + Npages int32 + Free int32 + Active int32 + Inactive int32 + Paging int32 + Wired int32 + Zeropages int32 + Reserve_pagedaemon int32 + Reserve_kernel int32 + Unused01 int32 + Vnodepages int32 + Vtextpages int32 + Freemin int32 + Freetarg int32 + Inactarg int32 + Wiredmax int32 + Anonmin int32 + Vtextmin int32 + Vnodemin int32 + Anonminpct int32 + Vtextminpct int32 + Vnodeminpct int32 + Nswapdev int32 + Swpages int32 + Swpginuse int32 + Swpgonly int32 + Nswget int32 + Nanon int32 + Unused05 int32 + Unused06 int32 + Faults int32 + Traps int32 + Intrs int32 + Swtch int32 + Softs int32 + Syscalls int32 + Pageins int32 + Unused07 int32 + Unused08 int32 + Pgswapin int32 + Pgswapout int32 + Forks int32 + Forks_ppwait int32 + Forks_sharevm int32 + Pga_zerohit int32 + Pga_zeromiss int32 + Unused09 int32 + Fltnoram int32 + Fltnoanon int32 + Fltnoamap int32 + Fltpgwait int32 + Fltpgrele int32 + Fltrelck int32 + Fltrelckok int32 + Fltanget int32 + Fltanretry int32 + Fltamcopy int32 + Fltnamap int32 + Fltnomap int32 + Fltlget int32 + Fltget int32 + Flt_anon int32 + Flt_acow int32 + Flt_obj int32 + Flt_prcopy int32 + Flt_przero int32 + Pdwoke int32 + Pdrevs int32 + Pdswout int32 + Pdfreed int32 + Pdscans int32 + Pdanscan int32 + Pdobscan int32 + Pdreact int32 + Pdbusy int32 + Pdpageouts int32 + Pdpending int32 + Pddeact int32 + Unused11 int32 + Unused12 int32 + Unused13 int32 + Fpswtch int32 + Kmapent int32 +} + +const SizeofClockinfo = 0x10 + +type Clockinfo struct { + Hz int32 + Tick int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index ad4aad279..0400747c6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -178,7 +178,7 @@ type Linger struct { } type Iovec struct { - Base *int8 + Base *byte Len uint64 } @@ -480,3 +480,38 @@ const ( MOUNTEDOVER = 0x40000000 FILE_EXCEPTION = 0x60000070 ) + +const ( + TUNNEWPPA = 0x540001 + TUNSETPPA = 0x540002 + + I_STR = 0x5308 + I_POP = 0x5303 + I_PUSH = 0x5302 + I_LINK = 0x530c + I_UNLINK = 0x530d + I_PLINK = 0x5316 + I_PUNLINK = 0x5317 + + IF_UNITSEL = -0x7ffb8cca +) + +type strbuf struct { + Maxlen int32 + Len int32 + Buf *int8 +} + +type Strioctl struct { + Cmd int32 + Timout int32 + Len int32 + Dp *int8 +} + +type Lifreq struct { + Name [32]int8 + Lifru1 [4]byte + Type uint32 + Lifru [336]byte +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index 4ab638cb9..aec1efcb3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -339,7 +339,7 @@ type Statfs_t struct { Flags uint64 } -type Dirent struct { +type direntLE struct { Reclen uint16 Namlen uint16 Ino uint32 @@ -347,6 +347,15 @@ type Dirent struct { Name [256]byte } +type Dirent struct { + Ino uint64 + Off int64 + Reclen uint16 + Type uint8 + Name [256]uint8 + _ [5]byte +} + type FdSet struct { Bits [64]int32 } diff --git a/vendor/golang.org/x/sys/windows/aliases.go b/vendor/golang.org/x/sys/windows/aliases.go new file mode 100644 index 000000000..a20ebea63 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/aliases.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows && go1.9 +// +build windows,go1.9 + +package windows + +import "syscall" + +type Errno = syscall.Errno +type SysProcAttr = syscall.SysProcAttr diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go new file mode 100644 index 000000000..115341fba --- /dev/null +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -0,0 +1,416 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "sync" + "sync/atomic" + "syscall" + "unsafe" +) + +// We need to use LoadLibrary and GetProcAddress from the Go runtime, because +// the these symbols are loaded by the system linker and are required to +// dynamically load additional symbols. Note that in the Go runtime, these +// return syscall.Handle and syscall.Errno, but these are the same, in fact, +// as windows.Handle and windows.Errno, and we intend to keep these the same. + +//go:linkname syscall_loadlibrary syscall.loadlibrary +func syscall_loadlibrary(filename *uint16) (handle Handle, err Errno) + +//go:linkname syscall_getprocaddress syscall.getprocaddress +func syscall_getprocaddress(handle Handle, procname *uint8) (proc uintptr, err Errno) + +// DLLError describes reasons for DLL load failures. +type DLLError struct { + Err error + ObjName string + Msg string +} + +func (e *DLLError) Error() string { return e.Msg } + +func (e *DLLError) Unwrap() error { return e.Err } + +// A DLL implements access to a single DLL. +type DLL struct { + Name string + Handle Handle +} + +// LoadDLL loads DLL file into memory. +// +// Warning: using LoadDLL without an absolute path name is subject to +// DLL preloading attacks. To safely load a system DLL, use LazyDLL +// with System set to true, or use LoadLibraryEx directly. +func LoadDLL(name string) (dll *DLL, err error) { + namep, err := UTF16PtrFromString(name) + if err != nil { + return nil, err + } + h, e := syscall_loadlibrary(namep) + if e != 0 { + return nil, &DLLError{ + Err: e, + ObjName: name, + Msg: "Failed to load " + name + ": " + e.Error(), + } + } + d := &DLL{ + Name: name, + Handle: h, + } + return d, nil +} + +// MustLoadDLL is like LoadDLL but panics if load operation failes. +func MustLoadDLL(name string) *DLL { + d, e := LoadDLL(name) + if e != nil { + panic(e) + } + return d +} + +// FindProc searches DLL d for procedure named name and returns *Proc +// if found. It returns an error if search fails. +func (d *DLL) FindProc(name string) (proc *Proc, err error) { + namep, err := BytePtrFromString(name) + if err != nil { + return nil, err + } + a, e := syscall_getprocaddress(d.Handle, namep) + if e != 0 { + return nil, &DLLError{ + Err: e, + ObjName: name, + Msg: "Failed to find " + name + " procedure in " + d.Name + ": " + e.Error(), + } + } + p := &Proc{ + Dll: d, + Name: name, + addr: a, + } + return p, nil +} + +// MustFindProc is like FindProc but panics if search fails. +func (d *DLL) MustFindProc(name string) *Proc { + p, e := d.FindProc(name) + if e != nil { + panic(e) + } + return p +} + +// FindProcByOrdinal searches DLL d for procedure by ordinal and returns *Proc +// if found. It returns an error if search fails. +func (d *DLL) FindProcByOrdinal(ordinal uintptr) (proc *Proc, err error) { + a, e := GetProcAddressByOrdinal(d.Handle, ordinal) + name := "#" + itoa(int(ordinal)) + if e != nil { + return nil, &DLLError{ + Err: e, + ObjName: name, + Msg: "Failed to find " + name + " procedure in " + d.Name + ": " + e.Error(), + } + } + p := &Proc{ + Dll: d, + Name: name, + addr: a, + } + return p, nil +} + +// MustFindProcByOrdinal is like FindProcByOrdinal but panics if search fails. +func (d *DLL) MustFindProcByOrdinal(ordinal uintptr) *Proc { + p, e := d.FindProcByOrdinal(ordinal) + if e != nil { + panic(e) + } + return p +} + +// Release unloads DLL d from memory. +func (d *DLL) Release() (err error) { + return FreeLibrary(d.Handle) +} + +// A Proc implements access to a procedure inside a DLL. +type Proc struct { + Dll *DLL + Name string + addr uintptr +} + +// Addr returns the address of the procedure represented by p. +// The return value can be passed to Syscall to run the procedure. +func (p *Proc) Addr() uintptr { + return p.addr +} + +//go:uintptrescapes + +// Call executes procedure p with arguments a. It will panic, if more than 15 arguments +// are supplied. +// +// The returned error is always non-nil, constructed from the result of GetLastError. +// Callers must inspect the primary return value to decide whether an error occurred +// (according to the semantics of the specific function being called) before consulting +// the error. The error will be guaranteed to contain windows.Errno. +func (p *Proc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) { + switch len(a) { + case 0: + return syscall.Syscall(p.Addr(), uintptr(len(a)), 0, 0, 0) + case 1: + return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], 0, 0) + case 2: + return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], 0) + case 3: + return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], a[2]) + case 4: + return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], 0, 0) + case 5: + return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], 0) + case 6: + return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5]) + case 7: + return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], 0, 0) + case 8: + return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], 0) + case 9: + return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8]) + case 10: + return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], 0, 0) + case 11: + return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], 0) + case 12: + return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11]) + case 13: + return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], 0, 0) + case 14: + return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], 0) + case 15: + return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14]) + default: + panic("Call " + p.Name + " with too many arguments " + itoa(len(a)) + ".") + } +} + +// A LazyDLL implements access to a single DLL. +// It will delay the load of the DLL until the first +// call to its Handle method or to one of its +// LazyProc's Addr method. +type LazyDLL struct { + Name string + + // System determines whether the DLL must be loaded from the + // Windows System directory, bypassing the normal DLL search + // path. + System bool + + mu sync.Mutex + dll *DLL // non nil once DLL is loaded +} + +// Load loads DLL file d.Name into memory. It returns an error if fails. +// Load will not try to load DLL, if it is already loaded into memory. +func (d *LazyDLL) Load() error { + // Non-racy version of: + // if d.dll != nil { + if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll))) != nil { + return nil + } + d.mu.Lock() + defer d.mu.Unlock() + if d.dll != nil { + return nil + } + + // kernel32.dll is special, since it's where LoadLibraryEx comes from. + // The kernel already special-cases its name, so it's always + // loaded from system32. + var dll *DLL + var err error + if d.Name == "kernel32.dll" { + dll, err = LoadDLL(d.Name) + } else { + dll, err = loadLibraryEx(d.Name, d.System) + } + if err != nil { + return err + } + + // Non-racy version of: + // d.dll = dll + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll)), unsafe.Pointer(dll)) + return nil +} + +// mustLoad is like Load but panics if search fails. +func (d *LazyDLL) mustLoad() { + e := d.Load() + if e != nil { + panic(e) + } +} + +// Handle returns d's module handle. +func (d *LazyDLL) Handle() uintptr { + d.mustLoad() + return uintptr(d.dll.Handle) +} + +// NewProc returns a LazyProc for accessing the named procedure in the DLL d. +func (d *LazyDLL) NewProc(name string) *LazyProc { + return &LazyProc{l: d, Name: name} +} + +// NewLazyDLL creates new LazyDLL associated with DLL file. +func NewLazyDLL(name string) *LazyDLL { + return &LazyDLL{Name: name} +} + +// NewLazySystemDLL is like NewLazyDLL, but will only +// search Windows System directory for the DLL if name is +// a base name (like "advapi32.dll"). +func NewLazySystemDLL(name string) *LazyDLL { + return &LazyDLL{Name: name, System: true} +} + +// A LazyProc implements access to a procedure inside a LazyDLL. +// It delays the lookup until the Addr method is called. +type LazyProc struct { + Name string + + mu sync.Mutex + l *LazyDLL + proc *Proc +} + +// Find searches DLL for procedure named p.Name. It returns +// an error if search fails. Find will not search procedure, +// if it is already found and loaded into memory. +func (p *LazyProc) Find() error { + // Non-racy version of: + // if p.proc == nil { + if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&p.proc))) == nil { + p.mu.Lock() + defer p.mu.Unlock() + if p.proc == nil { + e := p.l.Load() + if e != nil { + return e + } + proc, e := p.l.dll.FindProc(p.Name) + if e != nil { + return e + } + // Non-racy version of: + // p.proc = proc + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&p.proc)), unsafe.Pointer(proc)) + } + } + return nil +} + +// mustFind is like Find but panics if search fails. +func (p *LazyProc) mustFind() { + e := p.Find() + if e != nil { + panic(e) + } +} + +// Addr returns the address of the procedure represented by p. +// The return value can be passed to Syscall to run the procedure. +// It will panic if the procedure cannot be found. +func (p *LazyProc) Addr() uintptr { + p.mustFind() + return p.proc.Addr() +} + +//go:uintptrescapes + +// Call executes procedure p with arguments a. It will panic, if more than 15 arguments +// are supplied. It will also panic if the procedure cannot be found. +// +// The returned error is always non-nil, constructed from the result of GetLastError. +// Callers must inspect the primary return value to decide whether an error occurred +// (according to the semantics of the specific function being called) before consulting +// the error. The error will be guaranteed to contain windows.Errno. +func (p *LazyProc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) { + p.mustFind() + return p.proc.Call(a...) +} + +var canDoSearchSystem32Once struct { + sync.Once + v bool +} + +func initCanDoSearchSystem32() { + // https://msdn.microsoft.com/en-us/library/ms684179(v=vs.85).aspx says: + // "Windows 7, Windows Server 2008 R2, Windows Vista, and Windows + // Server 2008: The LOAD_LIBRARY_SEARCH_* flags are available on + // systems that have KB2533623 installed. To determine whether the + // flags are available, use GetProcAddress to get the address of the + // AddDllDirectory, RemoveDllDirectory, or SetDefaultDllDirectories + // function. If GetProcAddress succeeds, the LOAD_LIBRARY_SEARCH_* + // flags can be used with LoadLibraryEx." + canDoSearchSystem32Once.v = (modkernel32.NewProc("AddDllDirectory").Find() == nil) +} + +func canDoSearchSystem32() bool { + canDoSearchSystem32Once.Do(initCanDoSearchSystem32) + return canDoSearchSystem32Once.v +} + +func isBaseName(name string) bool { + for _, c := range name { + if c == ':' || c == '/' || c == '\\' { + return false + } + } + return true +} + +// loadLibraryEx wraps the Windows LoadLibraryEx function. +// +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684179(v=vs.85).aspx +// +// If name is not an absolute path, LoadLibraryEx searches for the DLL +// in a variety of automatic locations unless constrained by flags. +// See: https://msdn.microsoft.com/en-us/library/ff919712%28VS.85%29.aspx +func loadLibraryEx(name string, system bool) (*DLL, error) { + loadDLL := name + var flags uintptr + if system { + if canDoSearchSystem32() { + flags = LOAD_LIBRARY_SEARCH_SYSTEM32 + } else if isBaseName(name) { + // WindowsXP or unpatched Windows machine + // trying to load "foo.dll" out of the system + // folder, but LoadLibraryEx doesn't support + // that yet on their system, so emulate it. + systemdir, err := GetSystemDirectory() + if err != nil { + return nil, err + } + loadDLL = systemdir + "\\" + name + } + } + h, err := LoadLibraryEx(loadDLL, 0, flags) + if err != nil { + return nil, err + } + return &DLL{Name: name, Handle: h}, nil +} + +type errString string + +func (s errString) Error() string { return string(s) } diff --git a/vendor/golang.org/x/sys/windows/empty.s b/vendor/golang.org/x/sys/windows/empty.s new file mode 100644 index 000000000..fdbbbcd31 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/empty.s @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.12 +// +build !go1.12 + +// This file is here to allow bodyless functions with go:linkname for Go 1.11 +// and earlier (see https://golang.org/issue/23311). diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go new file mode 100644 index 000000000..b8ad19250 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -0,0 +1,54 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Windows environment variables. + +package windows + +import ( + "syscall" + "unsafe" +) + +func Getenv(key string) (value string, found bool) { + return syscall.Getenv(key) +} + +func Setenv(key, value string) error { + return syscall.Setenv(key, value) +} + +func Clearenv() { + syscall.Clearenv() +} + +func Environ() []string { + return syscall.Environ() +} + +// Returns a default environment associated with the token, rather than the current +// process. If inheritExisting is true, then this environment also inherits the +// environment of the current process. +func (token Token) Environ(inheritExisting bool) (env []string, err error) { + var block *uint16 + err = CreateEnvironmentBlock(&block, token, inheritExisting) + if err != nil { + return nil, err + } + defer DestroyEnvironmentBlock(block) + blockp := unsafe.Pointer(block) + for { + entry := UTF16PtrToString((*uint16)(blockp)) + if len(entry) == 0 { + break + } + env = append(env, entry) + blockp = unsafe.Add(blockp, 2*(len(entry)+1)) + } + return env, nil +} + +func Unsetenv(key string) error { + return syscall.Unsetenv(key) +} diff --git a/vendor/golang.org/x/sys/windows/eventlog.go b/vendor/golang.org/x/sys/windows/eventlog.go new file mode 100644 index 000000000..2cd60645e --- /dev/null +++ b/vendor/golang.org/x/sys/windows/eventlog.go @@ -0,0 +1,21 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +package windows + +const ( + EVENTLOG_SUCCESS = 0 + EVENTLOG_ERROR_TYPE = 1 + EVENTLOG_WARNING_TYPE = 2 + EVENTLOG_INFORMATION_TYPE = 4 + EVENTLOG_AUDIT_SUCCESS = 8 + EVENTLOG_AUDIT_FAILURE = 16 +) + +//sys RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) [failretval==0] = advapi32.RegisterEventSourceW +//sys DeregisterEventSource(handle Handle) (err error) = advapi32.DeregisterEventSource +//sys ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) = advapi32.ReportEventW diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go new file mode 100644 index 000000000..a52e0331d --- /dev/null +++ b/vendor/golang.org/x/sys/windows/exec_windows.go @@ -0,0 +1,183 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Fork, exec, wait, etc. + +package windows + +import ( + errorspkg "errors" + "unsafe" +) + +// EscapeArg rewrites command line argument s as prescribed +// in http://msdn.microsoft.com/en-us/library/ms880421. +// This function returns "" (2 double quotes) if s is empty. +// Alternatively, these transformations are done: +// - every back slash (\) is doubled, but only if immediately +// followed by double quote ("); +// - every double quote (") is escaped by back slash (\); +// - finally, s is wrapped with double quotes (arg -> "arg"), +// but only if there is space or tab inside s. +func EscapeArg(s string) string { + if len(s) == 0 { + return "\"\"" + } + n := len(s) + hasSpace := false + for i := 0; i < len(s); i++ { + switch s[i] { + case '"', '\\': + n++ + case ' ', '\t': + hasSpace = true + } + } + if hasSpace { + n += 2 + } + if n == len(s) { + return s + } + + qs := make([]byte, n) + j := 0 + if hasSpace { + qs[j] = '"' + j++ + } + slashes := 0 + for i := 0; i < len(s); i++ { + switch s[i] { + default: + slashes = 0 + qs[j] = s[i] + case '\\': + slashes++ + qs[j] = s[i] + case '"': + for ; slashes > 0; slashes-- { + qs[j] = '\\' + j++ + } + qs[j] = '\\' + j++ + qs[j] = s[i] + } + j++ + } + if hasSpace { + for ; slashes > 0; slashes-- { + qs[j] = '\\' + j++ + } + qs[j] = '"' + j++ + } + return string(qs[:j]) +} + +// ComposeCommandLine escapes and joins the given arguments suitable for use as a Windows command line, +// in CreateProcess's CommandLine argument, CreateService/ChangeServiceConfig's BinaryPathName argument, +// or any program that uses CommandLineToArgv. +func ComposeCommandLine(args []string) string { + var commandLine string + for i := range args { + if i > 0 { + commandLine += " " + } + commandLine += EscapeArg(args[i]) + } + return commandLine +} + +// DecomposeCommandLine breaks apart its argument command line into unescaped parts using CommandLineToArgv, +// as gathered from GetCommandLine, QUERY_SERVICE_CONFIG's BinaryPathName argument, or elsewhere that +// command lines are passed around. +// DecomposeCommandLine returns error if commandLine contains NUL. +func DecomposeCommandLine(commandLine string) ([]string, error) { + if len(commandLine) == 0 { + return []string{}, nil + } + utf16CommandLine, err := UTF16FromString(commandLine) + if err != nil { + return nil, errorspkg.New("string with NUL passed to DecomposeCommandLine") + } + var argc int32 + argv, err := CommandLineToArgv(&utf16CommandLine[0], &argc) + if err != nil { + return nil, err + } + defer LocalFree(Handle(unsafe.Pointer(argv))) + var args []string + for _, v := range (*argv)[:argc] { + args = append(args, UTF16ToString((*v)[:])) + } + return args, nil +} + +func CloseOnExec(fd Handle) { + SetHandleInformation(Handle(fd), HANDLE_FLAG_INHERIT, 0) +} + +// FullPath retrieves the full path of the specified file. +func FullPath(name string) (path string, err error) { + p, err := UTF16PtrFromString(name) + if err != nil { + return "", err + } + n := uint32(100) + for { + buf := make([]uint16, n) + n, err = GetFullPathName(p, uint32(len(buf)), &buf[0], nil) + if err != nil { + return "", err + } + if n <= uint32(len(buf)) { + return UTF16ToString(buf[:n]), nil + } + } +} + +// NewProcThreadAttributeList allocates a new ProcThreadAttributeListContainer, with the requested maximum number of attributes. +func NewProcThreadAttributeList(maxAttrCount uint32) (*ProcThreadAttributeListContainer, error) { + var size uintptr + err := initializeProcThreadAttributeList(nil, maxAttrCount, 0, &size) + if err != ERROR_INSUFFICIENT_BUFFER { + if err == nil { + return nil, errorspkg.New("unable to query buffer size from InitializeProcThreadAttributeList") + } + return nil, err + } + alloc, err := LocalAlloc(LMEM_FIXED, uint32(size)) + if err != nil { + return nil, err + } + // size is guaranteed to be ≥1 by InitializeProcThreadAttributeList. + al := &ProcThreadAttributeListContainer{data: (*ProcThreadAttributeList)(unsafe.Pointer(alloc))} + err = initializeProcThreadAttributeList(al.data, maxAttrCount, 0, &size) + if err != nil { + return nil, err + } + return al, err +} + +// Update modifies the ProcThreadAttributeList using UpdateProcThreadAttribute. +func (al *ProcThreadAttributeListContainer) Update(attribute uintptr, value unsafe.Pointer, size uintptr) error { + al.pointers = append(al.pointers, value) + return updateProcThreadAttribute(al.data, 0, attribute, value, size, nil, nil) +} + +// Delete frees ProcThreadAttributeList's resources. +func (al *ProcThreadAttributeListContainer) Delete() { + deleteProcThreadAttributeList(al.data) + LocalFree(Handle(unsafe.Pointer(al.data))) + al.data = nil + al.pointers = nil +} + +// List returns the actual ProcThreadAttributeList to be passed to StartupInfoEx. +func (al *ProcThreadAttributeListContainer) List() *ProcThreadAttributeList { + return al.data +} diff --git a/vendor/golang.org/x/sys/windows/memory_windows.go b/vendor/golang.org/x/sys/windows/memory_windows.go new file mode 100644 index 000000000..6dc0920a8 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/memory_windows.go @@ -0,0 +1,48 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +const ( + MEM_COMMIT = 0x00001000 + MEM_RESERVE = 0x00002000 + MEM_DECOMMIT = 0x00004000 + MEM_RELEASE = 0x00008000 + MEM_RESET = 0x00080000 + MEM_TOP_DOWN = 0x00100000 + MEM_WRITE_WATCH = 0x00200000 + MEM_PHYSICAL = 0x00400000 + MEM_RESET_UNDO = 0x01000000 + MEM_LARGE_PAGES = 0x20000000 + + PAGE_NOACCESS = 0x00000001 + PAGE_READONLY = 0x00000002 + PAGE_READWRITE = 0x00000004 + PAGE_WRITECOPY = 0x00000008 + PAGE_EXECUTE = 0x00000010 + PAGE_EXECUTE_READ = 0x00000020 + PAGE_EXECUTE_READWRITE = 0x00000040 + PAGE_EXECUTE_WRITECOPY = 0x00000080 + PAGE_GUARD = 0x00000100 + PAGE_NOCACHE = 0x00000200 + PAGE_WRITECOMBINE = 0x00000400 + PAGE_TARGETS_INVALID = 0x40000000 + PAGE_TARGETS_NO_UPDATE = 0x40000000 + + QUOTA_LIMITS_HARDWS_MIN_DISABLE = 0x00000002 + QUOTA_LIMITS_HARDWS_MIN_ENABLE = 0x00000001 + QUOTA_LIMITS_HARDWS_MAX_DISABLE = 0x00000008 + QUOTA_LIMITS_HARDWS_MAX_ENABLE = 0x00000004 +) + +type MemoryBasicInformation struct { + BaseAddress uintptr + AllocationBase uintptr + AllocationProtect uint32 + PartitionId uint16 + RegionSize uintptr + State uint32 + Protect uint32 + Type uint32 +} diff --git a/vendor/golang.org/x/sys/windows/mkerrors.bash b/vendor/golang.org/x/sys/windows/mkerrors.bash new file mode 100644 index 000000000..58e0188fb --- /dev/null +++ b/vendor/golang.org/x/sys/windows/mkerrors.bash @@ -0,0 +1,70 @@ +#!/bin/bash + +# Copyright 2019 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +set -e +shopt -s nullglob + +winerror="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/shared/winerror.h | sort -Vr | head -n 1)" +[[ -n $winerror ]] || { echo "Unable to find winerror.h" >&2; exit 1; } +ntstatus="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/shared/ntstatus.h | sort -Vr | head -n 1)" +[[ -n $ntstatus ]] || { echo "Unable to find ntstatus.h" >&2; exit 1; } + +declare -A errors + +{ + echo "// Code generated by 'mkerrors.bash'; DO NOT EDIT." + echo + echo "package windows" + echo "import \"syscall\"" + echo "const (" + + while read -r line; do + unset vtype + if [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +([A-Z0-9_]+\()?([A-Z][A-Z0-9_]+k?)\)? ]]; then + key="${BASH_REMATCH[1]}" + value="${BASH_REMATCH[3]}" + elif [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +([A-Z0-9_]+\()?((0x)?[0-9A-Fa-f]+)L?\)? ]]; then + key="${BASH_REMATCH[1]}" + value="${BASH_REMATCH[3]}" + vtype="${BASH_REMATCH[2]}" + elif [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +\(\(([A-Z]+)\)((0x)?[0-9A-Fa-f]+)L?\) ]]; then + key="${BASH_REMATCH[1]}" + value="${BASH_REMATCH[3]}" + vtype="${BASH_REMATCH[2]}" + else + continue + fi + [[ -n $key && -n $value ]] || continue + [[ -z ${errors["$key"]} ]] || continue + errors["$key"]="$value" + if [[ -v vtype ]]; then + if [[ $key == FACILITY_* || $key == NO_ERROR ]]; then + vtype="" + elif [[ $vtype == *HANDLE* || $vtype == *HRESULT* ]]; then + vtype="Handle" + else + vtype="syscall.Errno" + fi + last_vtype="$vtype" + else + vtype="" + if [[ $last_vtype == Handle && $value == NO_ERROR ]]; then + value="S_OK" + elif [[ $last_vtype == syscall.Errno && $value == NO_ERROR ]]; then + value="ERROR_SUCCESS" + fi + fi + + echo "$key $vtype = $value" + done < "$winerror" + + while read -r line; do + [[ $line =~ ^#define\ (STATUS_[^\s]+)\ +\(\(NTSTATUS\)((0x)?[0-9a-fA-F]+)L?\) ]] || continue + echo "${BASH_REMATCH[1]} NTStatus = ${BASH_REMATCH[2]}" + done < "$ntstatus" + + echo ")" +} | gofmt > "zerrors_windows.go" diff --git a/vendor/golang.org/x/sys/windows/mkknownfolderids.bash b/vendor/golang.org/x/sys/windows/mkknownfolderids.bash new file mode 100644 index 000000000..ab8924e93 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/mkknownfolderids.bash @@ -0,0 +1,27 @@ +#!/bin/bash + +# Copyright 2019 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +set -e +shopt -s nullglob + +knownfolders="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/um/KnownFolders.h | sort -Vr | head -n 1)" +[[ -n $knownfolders ]] || { echo "Unable to find KnownFolders.h" >&2; exit 1; } + +{ + echo "// Code generated by 'mkknownfolderids.bash'; DO NOT EDIT." + echo + echo "package windows" + echo "type KNOWNFOLDERID GUID" + echo "var (" + while read -r line; do + [[ $line =~ DEFINE_KNOWN_FOLDER\((FOLDERID_[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+)\) ]] || continue + printf "%s = &KNOWNFOLDERID{0x%08x, 0x%04x, 0x%04x, [8]byte{0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x}}\n" \ + "${BASH_REMATCH[1]}" $(( "${BASH_REMATCH[2]}" )) $(( "${BASH_REMATCH[3]}" )) $(( "${BASH_REMATCH[4]}" )) \ + $(( "${BASH_REMATCH[5]}" )) $(( "${BASH_REMATCH[6]}" )) $(( "${BASH_REMATCH[7]}" )) $(( "${BASH_REMATCH[8]}" )) \ + $(( "${BASH_REMATCH[9]}" )) $(( "${BASH_REMATCH[10]}" )) $(( "${BASH_REMATCH[11]}" )) $(( "${BASH_REMATCH[12]}" )) + done < "$knownfolders" + echo ")" +} | gofmt > "zknownfolderids_windows.go" diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go new file mode 100644 index 000000000..8563f79c5 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/mksyscall.go @@ -0,0 +1,10 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build generate +// +build generate + +package windows + +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go setupapi_windows.go diff --git a/vendor/golang.org/x/sys/windows/race.go b/vendor/golang.org/x/sys/windows/race.go new file mode 100644 index 000000000..9196b089c --- /dev/null +++ b/vendor/golang.org/x/sys/windows/race.go @@ -0,0 +1,31 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows && race +// +build windows,race + +package windows + +import ( + "runtime" + "unsafe" +) + +const raceenabled = true + +func raceAcquire(addr unsafe.Pointer) { + runtime.RaceAcquire(addr) +} + +func raceReleaseMerge(addr unsafe.Pointer) { + runtime.RaceReleaseMerge(addr) +} + +func raceReadRange(addr unsafe.Pointer, len int) { + runtime.RaceReadRange(addr, len) +} + +func raceWriteRange(addr unsafe.Pointer, len int) { + runtime.RaceWriteRange(addr, len) +} diff --git a/vendor/golang.org/x/sys/windows/race0.go b/vendor/golang.org/x/sys/windows/race0.go new file mode 100644 index 000000000..7bae4817a --- /dev/null +++ b/vendor/golang.org/x/sys/windows/race0.go @@ -0,0 +1,26 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows && !race +// +build windows,!race + +package windows + +import ( + "unsafe" +) + +const raceenabled = false + +func raceAcquire(addr unsafe.Pointer) { +} + +func raceReleaseMerge(addr unsafe.Pointer) { +} + +func raceReadRange(addr unsafe.Pointer, len int) { +} + +func raceWriteRange(addr unsafe.Pointer, len int) { +} diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go new file mode 100644 index 000000000..d414ef13b --- /dev/null +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -0,0 +1,1444 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/internal/unsafeheader" +) + +const ( + NameUnknown = 0 + NameFullyQualifiedDN = 1 + NameSamCompatible = 2 + NameDisplay = 3 + NameUniqueId = 6 + NameCanonical = 7 + NameUserPrincipal = 8 + NameCanonicalEx = 9 + NameServicePrincipal = 10 + NameDnsDomain = 12 +) + +// This function returns 1 byte BOOLEAN rather than the 4 byte BOOL. +// http://blogs.msdn.com/b/drnick/archive/2007/12/19/windows-and-upn-format-credentials.aspx +//sys TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.TranslateNameW +//sys GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.GetUserNameExW + +// TranslateAccountName converts a directory service +// object name from one format to another. +func TranslateAccountName(username string, from, to uint32, initSize int) (string, error) { + u, e := UTF16PtrFromString(username) + if e != nil { + return "", e + } + n := uint32(50) + for { + b := make([]uint16, n) + e = TranslateName(u, from, to, &b[0], &n) + if e == nil { + return UTF16ToString(b[:n]), nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return "", e + } + if n <= uint32(len(b)) { + return "", e + } + } +} + +const ( + // do not reorder + NetSetupUnknownStatus = iota + NetSetupUnjoined + NetSetupWorkgroupName + NetSetupDomainName +) + +type UserInfo10 struct { + Name *uint16 + Comment *uint16 + UsrComment *uint16 + FullName *uint16 +} + +//sys NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) = netapi32.NetUserGetInfo +//sys NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) = netapi32.NetGetJoinInformation +//sys NetApiBufferFree(buf *byte) (neterr error) = netapi32.NetApiBufferFree + +const ( + // do not reorder + SidTypeUser = 1 + iota + SidTypeGroup + SidTypeDomain + SidTypeAlias + SidTypeWellKnownGroup + SidTypeDeletedAccount + SidTypeInvalid + SidTypeUnknown + SidTypeComputer + SidTypeLabel +) + +type SidIdentifierAuthority struct { + Value [6]byte +} + +var ( + SECURITY_NULL_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 0}} + SECURITY_WORLD_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 1}} + SECURITY_LOCAL_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 2}} + SECURITY_CREATOR_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 3}} + SECURITY_NON_UNIQUE_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 4}} + SECURITY_NT_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 5}} + SECURITY_MANDATORY_LABEL_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 16}} +) + +const ( + SECURITY_NULL_RID = 0 + SECURITY_WORLD_RID = 0 + SECURITY_LOCAL_RID = 0 + SECURITY_CREATOR_OWNER_RID = 0 + SECURITY_CREATOR_GROUP_RID = 1 + SECURITY_DIALUP_RID = 1 + SECURITY_NETWORK_RID = 2 + SECURITY_BATCH_RID = 3 + SECURITY_INTERACTIVE_RID = 4 + SECURITY_LOGON_IDS_RID = 5 + SECURITY_SERVICE_RID = 6 + SECURITY_LOCAL_SYSTEM_RID = 18 + SECURITY_BUILTIN_DOMAIN_RID = 32 + SECURITY_PRINCIPAL_SELF_RID = 10 + SECURITY_CREATOR_OWNER_SERVER_RID = 0x2 + SECURITY_CREATOR_GROUP_SERVER_RID = 0x3 + SECURITY_LOGON_IDS_RID_COUNT = 0x3 + SECURITY_ANONYMOUS_LOGON_RID = 0x7 + SECURITY_PROXY_RID = 0x8 + SECURITY_ENTERPRISE_CONTROLLERS_RID = 0x9 + SECURITY_SERVER_LOGON_RID = SECURITY_ENTERPRISE_CONTROLLERS_RID + SECURITY_AUTHENTICATED_USER_RID = 0xb + SECURITY_RESTRICTED_CODE_RID = 0xc + SECURITY_NT_NON_UNIQUE_RID = 0x15 +) + +// Predefined domain-relative RIDs for local groups. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa379649(v=vs.85).aspx +const ( + DOMAIN_ALIAS_RID_ADMINS = 0x220 + DOMAIN_ALIAS_RID_USERS = 0x221 + DOMAIN_ALIAS_RID_GUESTS = 0x222 + DOMAIN_ALIAS_RID_POWER_USERS = 0x223 + DOMAIN_ALIAS_RID_ACCOUNT_OPS = 0x224 + DOMAIN_ALIAS_RID_SYSTEM_OPS = 0x225 + DOMAIN_ALIAS_RID_PRINT_OPS = 0x226 + DOMAIN_ALIAS_RID_BACKUP_OPS = 0x227 + DOMAIN_ALIAS_RID_REPLICATOR = 0x228 + DOMAIN_ALIAS_RID_RAS_SERVERS = 0x229 + DOMAIN_ALIAS_RID_PREW2KCOMPACCESS = 0x22a + DOMAIN_ALIAS_RID_REMOTE_DESKTOP_USERS = 0x22b + DOMAIN_ALIAS_RID_NETWORK_CONFIGURATION_OPS = 0x22c + DOMAIN_ALIAS_RID_INCOMING_FOREST_TRUST_BUILDERS = 0x22d + DOMAIN_ALIAS_RID_MONITORING_USERS = 0x22e + DOMAIN_ALIAS_RID_LOGGING_USERS = 0x22f + DOMAIN_ALIAS_RID_AUTHORIZATIONACCESS = 0x230 + DOMAIN_ALIAS_RID_TS_LICENSE_SERVERS = 0x231 + DOMAIN_ALIAS_RID_DCOM_USERS = 0x232 + DOMAIN_ALIAS_RID_IUSERS = 0x238 + DOMAIN_ALIAS_RID_CRYPTO_OPERATORS = 0x239 + DOMAIN_ALIAS_RID_CACHEABLE_PRINCIPALS_GROUP = 0x23b + DOMAIN_ALIAS_RID_NON_CACHEABLE_PRINCIPALS_GROUP = 0x23c + DOMAIN_ALIAS_RID_EVENT_LOG_READERS_GROUP = 0x23d + DOMAIN_ALIAS_RID_CERTSVC_DCOM_ACCESS_GROUP = 0x23e +) + +//sys LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountSidW +//sys LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountNameW +//sys ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) = advapi32.ConvertSidToStringSidW +//sys ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) = advapi32.ConvertStringSidToSidW +//sys GetLengthSid(sid *SID) (len uint32) = advapi32.GetLengthSid +//sys CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) = advapi32.CopySid +//sys AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) = advapi32.AllocateAndInitializeSid +//sys createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) = advapi32.CreateWellKnownSid +//sys isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) = advapi32.IsWellKnownSid +//sys FreeSid(sid *SID) (err error) [failretval!=0] = advapi32.FreeSid +//sys EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) = advapi32.EqualSid +//sys getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) = advapi32.GetSidIdentifierAuthority +//sys getSidSubAuthorityCount(sid *SID) (count *uint8) = advapi32.GetSidSubAuthorityCount +//sys getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) = advapi32.GetSidSubAuthority +//sys isValidSid(sid *SID) (isValid bool) = advapi32.IsValidSid + +// The security identifier (SID) structure is a variable-length +// structure used to uniquely identify users or groups. +type SID struct{} + +// StringToSid converts a string-format security identifier +// SID into a valid, functional SID. +func StringToSid(s string) (*SID, error) { + var sid *SID + p, e := UTF16PtrFromString(s) + if e != nil { + return nil, e + } + e = ConvertStringSidToSid(p, &sid) + if e != nil { + return nil, e + } + defer LocalFree((Handle)(unsafe.Pointer(sid))) + return sid.Copy() +} + +// LookupSID retrieves a security identifier SID for the account +// and the name of the domain on which the account was found. +// System specify target computer to search. +func LookupSID(system, account string) (sid *SID, domain string, accType uint32, err error) { + if len(account) == 0 { + return nil, "", 0, syscall.EINVAL + } + acc, e := UTF16PtrFromString(account) + if e != nil { + return nil, "", 0, e + } + var sys *uint16 + if len(system) > 0 { + sys, e = UTF16PtrFromString(system) + if e != nil { + return nil, "", 0, e + } + } + n := uint32(50) + dn := uint32(50) + for { + b := make([]byte, n) + db := make([]uint16, dn) + sid = (*SID)(unsafe.Pointer(&b[0])) + e = LookupAccountName(sys, acc, sid, &n, &db[0], &dn, &accType) + if e == nil { + return sid, UTF16ToString(db), accType, nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return nil, "", 0, e + } + if n <= uint32(len(b)) { + return nil, "", 0, e + } + } +} + +// String converts SID to a string format suitable for display, storage, or transmission. +func (sid *SID) String() string { + var s *uint16 + e := ConvertSidToStringSid(sid, &s) + if e != nil { + return "" + } + defer LocalFree((Handle)(unsafe.Pointer(s))) + return UTF16ToString((*[256]uint16)(unsafe.Pointer(s))[:]) +} + +// Len returns the length, in bytes, of a valid security identifier SID. +func (sid *SID) Len() int { + return int(GetLengthSid(sid)) +} + +// Copy creates a duplicate of security identifier SID. +func (sid *SID) Copy() (*SID, error) { + b := make([]byte, sid.Len()) + sid2 := (*SID)(unsafe.Pointer(&b[0])) + e := CopySid(uint32(len(b)), sid2, sid) + if e != nil { + return nil, e + } + return sid2, nil +} + +// IdentifierAuthority returns the identifier authority of the SID. +func (sid *SID) IdentifierAuthority() SidIdentifierAuthority { + return *getSidIdentifierAuthority(sid) +} + +// SubAuthorityCount returns the number of sub-authorities in the SID. +func (sid *SID) SubAuthorityCount() uint8 { + return *getSidSubAuthorityCount(sid) +} + +// SubAuthority returns the sub-authority of the SID as specified by +// the index, which must be less than sid.SubAuthorityCount(). +func (sid *SID) SubAuthority(idx uint32) uint32 { + if idx >= uint32(sid.SubAuthorityCount()) { + panic("sub-authority index out of range") + } + return *getSidSubAuthority(sid, idx) +} + +// IsValid returns whether the SID has a valid revision and length. +func (sid *SID) IsValid() bool { + return isValidSid(sid) +} + +// Equals compares two SIDs for equality. +func (sid *SID) Equals(sid2 *SID) bool { + return EqualSid(sid, sid2) +} + +// IsWellKnown determines whether the SID matches the well-known sidType. +func (sid *SID) IsWellKnown(sidType WELL_KNOWN_SID_TYPE) bool { + return isWellKnownSid(sid, sidType) +} + +// LookupAccount retrieves the name of the account for this SID +// and the name of the first domain on which this SID is found. +// System specify target computer to search for. +func (sid *SID) LookupAccount(system string) (account, domain string, accType uint32, err error) { + var sys *uint16 + if len(system) > 0 { + sys, err = UTF16PtrFromString(system) + if err != nil { + return "", "", 0, err + } + } + n := uint32(50) + dn := uint32(50) + for { + b := make([]uint16, n) + db := make([]uint16, dn) + e := LookupAccountSid(sys, sid, &b[0], &n, &db[0], &dn, &accType) + if e == nil { + return UTF16ToString(b), UTF16ToString(db), accType, nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return "", "", 0, e + } + if n <= uint32(len(b)) { + return "", "", 0, e + } + } +} + +// Various types of pre-specified SIDs that can be synthesized and compared at runtime. +type WELL_KNOWN_SID_TYPE uint32 + +const ( + WinNullSid = 0 + WinWorldSid = 1 + WinLocalSid = 2 + WinCreatorOwnerSid = 3 + WinCreatorGroupSid = 4 + WinCreatorOwnerServerSid = 5 + WinCreatorGroupServerSid = 6 + WinNtAuthoritySid = 7 + WinDialupSid = 8 + WinNetworkSid = 9 + WinBatchSid = 10 + WinInteractiveSid = 11 + WinServiceSid = 12 + WinAnonymousSid = 13 + WinProxySid = 14 + WinEnterpriseControllersSid = 15 + WinSelfSid = 16 + WinAuthenticatedUserSid = 17 + WinRestrictedCodeSid = 18 + WinTerminalServerSid = 19 + WinRemoteLogonIdSid = 20 + WinLogonIdsSid = 21 + WinLocalSystemSid = 22 + WinLocalServiceSid = 23 + WinNetworkServiceSid = 24 + WinBuiltinDomainSid = 25 + WinBuiltinAdministratorsSid = 26 + WinBuiltinUsersSid = 27 + WinBuiltinGuestsSid = 28 + WinBuiltinPowerUsersSid = 29 + WinBuiltinAccountOperatorsSid = 30 + WinBuiltinSystemOperatorsSid = 31 + WinBuiltinPrintOperatorsSid = 32 + WinBuiltinBackupOperatorsSid = 33 + WinBuiltinReplicatorSid = 34 + WinBuiltinPreWindows2000CompatibleAccessSid = 35 + WinBuiltinRemoteDesktopUsersSid = 36 + WinBuiltinNetworkConfigurationOperatorsSid = 37 + WinAccountAdministratorSid = 38 + WinAccountGuestSid = 39 + WinAccountKrbtgtSid = 40 + WinAccountDomainAdminsSid = 41 + WinAccountDomainUsersSid = 42 + WinAccountDomainGuestsSid = 43 + WinAccountComputersSid = 44 + WinAccountControllersSid = 45 + WinAccountCertAdminsSid = 46 + WinAccountSchemaAdminsSid = 47 + WinAccountEnterpriseAdminsSid = 48 + WinAccountPolicyAdminsSid = 49 + WinAccountRasAndIasServersSid = 50 + WinNTLMAuthenticationSid = 51 + WinDigestAuthenticationSid = 52 + WinSChannelAuthenticationSid = 53 + WinThisOrganizationSid = 54 + WinOtherOrganizationSid = 55 + WinBuiltinIncomingForestTrustBuildersSid = 56 + WinBuiltinPerfMonitoringUsersSid = 57 + WinBuiltinPerfLoggingUsersSid = 58 + WinBuiltinAuthorizationAccessSid = 59 + WinBuiltinTerminalServerLicenseServersSid = 60 + WinBuiltinDCOMUsersSid = 61 + WinBuiltinIUsersSid = 62 + WinIUserSid = 63 + WinBuiltinCryptoOperatorsSid = 64 + WinUntrustedLabelSid = 65 + WinLowLabelSid = 66 + WinMediumLabelSid = 67 + WinHighLabelSid = 68 + WinSystemLabelSid = 69 + WinWriteRestrictedCodeSid = 70 + WinCreatorOwnerRightsSid = 71 + WinCacheablePrincipalsGroupSid = 72 + WinNonCacheablePrincipalsGroupSid = 73 + WinEnterpriseReadonlyControllersSid = 74 + WinAccountReadonlyControllersSid = 75 + WinBuiltinEventLogReadersGroup = 76 + WinNewEnterpriseReadonlyControllersSid = 77 + WinBuiltinCertSvcDComAccessGroup = 78 + WinMediumPlusLabelSid = 79 + WinLocalLogonSid = 80 + WinConsoleLogonSid = 81 + WinThisOrganizationCertificateSid = 82 + WinApplicationPackageAuthoritySid = 83 + WinBuiltinAnyPackageSid = 84 + WinCapabilityInternetClientSid = 85 + WinCapabilityInternetClientServerSid = 86 + WinCapabilityPrivateNetworkClientServerSid = 87 + WinCapabilityPicturesLibrarySid = 88 + WinCapabilityVideosLibrarySid = 89 + WinCapabilityMusicLibrarySid = 90 + WinCapabilityDocumentsLibrarySid = 91 + WinCapabilitySharedUserCertificatesSid = 92 + WinCapabilityEnterpriseAuthenticationSid = 93 + WinCapabilityRemovableStorageSid = 94 + WinBuiltinRDSRemoteAccessServersSid = 95 + WinBuiltinRDSEndpointServersSid = 96 + WinBuiltinRDSManagementServersSid = 97 + WinUserModeDriversSid = 98 + WinBuiltinHyperVAdminsSid = 99 + WinAccountCloneableControllersSid = 100 + WinBuiltinAccessControlAssistanceOperatorsSid = 101 + WinBuiltinRemoteManagementUsersSid = 102 + WinAuthenticationAuthorityAssertedSid = 103 + WinAuthenticationServiceAssertedSid = 104 + WinLocalAccountSid = 105 + WinLocalAccountAndAdministratorSid = 106 + WinAccountProtectedUsersSid = 107 + WinCapabilityAppointmentsSid = 108 + WinCapabilityContactsSid = 109 + WinAccountDefaultSystemManagedSid = 110 + WinBuiltinDefaultSystemManagedGroupSid = 111 + WinBuiltinStorageReplicaAdminsSid = 112 + WinAccountKeyAdminsSid = 113 + WinAccountEnterpriseKeyAdminsSid = 114 + WinAuthenticationKeyTrustSid = 115 + WinAuthenticationKeyPropertyMFASid = 116 + WinAuthenticationKeyPropertyAttestationSid = 117 + WinAuthenticationFreshKeyAuthSid = 118 + WinBuiltinDeviceOwnersSid = 119 +) + +// Creates a SID for a well-known predefined alias, generally using the constants of the form +// Win*Sid, for the local machine. +func CreateWellKnownSid(sidType WELL_KNOWN_SID_TYPE) (*SID, error) { + return CreateWellKnownDomainSid(sidType, nil) +} + +// Creates a SID for a well-known predefined alias, generally using the constants of the form +// Win*Sid, for the domain specified by the domainSid parameter. +func CreateWellKnownDomainSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID) (*SID, error) { + n := uint32(50) + for { + b := make([]byte, n) + sid := (*SID)(unsafe.Pointer(&b[0])) + err := createWellKnownSid(sidType, domainSid, sid, &n) + if err == nil { + return sid, nil + } + if err != ERROR_INSUFFICIENT_BUFFER { + return nil, err + } + if n <= uint32(len(b)) { + return nil, err + } + } +} + +const ( + // do not reorder + TOKEN_ASSIGN_PRIMARY = 1 << iota + TOKEN_DUPLICATE + TOKEN_IMPERSONATE + TOKEN_QUERY + TOKEN_QUERY_SOURCE + TOKEN_ADJUST_PRIVILEGES + TOKEN_ADJUST_GROUPS + TOKEN_ADJUST_DEFAULT + TOKEN_ADJUST_SESSIONID + + TOKEN_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | + TOKEN_ASSIGN_PRIMARY | + TOKEN_DUPLICATE | + TOKEN_IMPERSONATE | + TOKEN_QUERY | + TOKEN_QUERY_SOURCE | + TOKEN_ADJUST_PRIVILEGES | + TOKEN_ADJUST_GROUPS | + TOKEN_ADJUST_DEFAULT | + TOKEN_ADJUST_SESSIONID + TOKEN_READ = STANDARD_RIGHTS_READ | TOKEN_QUERY + TOKEN_WRITE = STANDARD_RIGHTS_WRITE | + TOKEN_ADJUST_PRIVILEGES | + TOKEN_ADJUST_GROUPS | + TOKEN_ADJUST_DEFAULT + TOKEN_EXECUTE = STANDARD_RIGHTS_EXECUTE +) + +const ( + // do not reorder + TokenUser = 1 + iota + TokenGroups + TokenPrivileges + TokenOwner + TokenPrimaryGroup + TokenDefaultDacl + TokenSource + TokenType + TokenImpersonationLevel + TokenStatistics + TokenRestrictedSids + TokenSessionId + TokenGroupsAndPrivileges + TokenSessionReference + TokenSandBoxInert + TokenAuditPolicy + TokenOrigin + TokenElevationType + TokenLinkedToken + TokenElevation + TokenHasRestrictions + TokenAccessInformation + TokenVirtualizationAllowed + TokenVirtualizationEnabled + TokenIntegrityLevel + TokenUIAccess + TokenMandatoryPolicy + TokenLogonSid + MaxTokenInfoClass +) + +// Group attributes inside of Tokengroups.Groups[i].Attributes +const ( + SE_GROUP_MANDATORY = 0x00000001 + SE_GROUP_ENABLED_BY_DEFAULT = 0x00000002 + SE_GROUP_ENABLED = 0x00000004 + SE_GROUP_OWNER = 0x00000008 + SE_GROUP_USE_FOR_DENY_ONLY = 0x00000010 + SE_GROUP_INTEGRITY = 0x00000020 + SE_GROUP_INTEGRITY_ENABLED = 0x00000040 + SE_GROUP_LOGON_ID = 0xC0000000 + SE_GROUP_RESOURCE = 0x20000000 + SE_GROUP_VALID_ATTRIBUTES = SE_GROUP_MANDATORY | SE_GROUP_ENABLED_BY_DEFAULT | SE_GROUP_ENABLED | SE_GROUP_OWNER | SE_GROUP_USE_FOR_DENY_ONLY | SE_GROUP_LOGON_ID | SE_GROUP_RESOURCE | SE_GROUP_INTEGRITY | SE_GROUP_INTEGRITY_ENABLED +) + +// Privilege attributes +const ( + SE_PRIVILEGE_ENABLED_BY_DEFAULT = 0x00000001 + SE_PRIVILEGE_ENABLED = 0x00000002 + SE_PRIVILEGE_REMOVED = 0x00000004 + SE_PRIVILEGE_USED_FOR_ACCESS = 0x80000000 + SE_PRIVILEGE_VALID_ATTRIBUTES = SE_PRIVILEGE_ENABLED_BY_DEFAULT | SE_PRIVILEGE_ENABLED | SE_PRIVILEGE_REMOVED | SE_PRIVILEGE_USED_FOR_ACCESS +) + +// Token types +const ( + TokenPrimary = 1 + TokenImpersonation = 2 +) + +// Impersonation levels +const ( + SecurityAnonymous = 0 + SecurityIdentification = 1 + SecurityImpersonation = 2 + SecurityDelegation = 3 +) + +type LUID struct { + LowPart uint32 + HighPart int32 +} + +type LUIDAndAttributes struct { + Luid LUID + Attributes uint32 +} + +type SIDAndAttributes struct { + Sid *SID + Attributes uint32 +} + +type Tokenuser struct { + User SIDAndAttributes +} + +type Tokenprimarygroup struct { + PrimaryGroup *SID +} + +type Tokengroups struct { + GroupCount uint32 + Groups [1]SIDAndAttributes // Use AllGroups() for iterating. +} + +// AllGroups returns a slice that can be used to iterate over the groups in g. +func (g *Tokengroups) AllGroups() []SIDAndAttributes { + return (*[(1 << 28) - 1]SIDAndAttributes)(unsafe.Pointer(&g.Groups[0]))[:g.GroupCount:g.GroupCount] +} + +type Tokenprivileges struct { + PrivilegeCount uint32 + Privileges [1]LUIDAndAttributes // Use AllPrivileges() for iterating. +} + +// AllPrivileges returns a slice that can be used to iterate over the privileges in p. +func (p *Tokenprivileges) AllPrivileges() []LUIDAndAttributes { + return (*[(1 << 27) - 1]LUIDAndAttributes)(unsafe.Pointer(&p.Privileges[0]))[:p.PrivilegeCount:p.PrivilegeCount] +} + +type Tokenmandatorylabel struct { + Label SIDAndAttributes +} + +func (tml *Tokenmandatorylabel) Size() uint32 { + return uint32(unsafe.Sizeof(Tokenmandatorylabel{})) + GetLengthSid(tml.Label.Sid) +} + +// Authorization Functions +//sys checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) = advapi32.CheckTokenMembership +//sys isTokenRestricted(tokenHandle Token) (ret bool, err error) [!failretval] = advapi32.IsTokenRestricted +//sys OpenProcessToken(process Handle, access uint32, token *Token) (err error) = advapi32.OpenProcessToken +//sys OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) = advapi32.OpenThreadToken +//sys ImpersonateSelf(impersonationlevel uint32) (err error) = advapi32.ImpersonateSelf +//sys RevertToSelf() (err error) = advapi32.RevertToSelf +//sys SetThreadToken(thread *Handle, token Token) (err error) = advapi32.SetThreadToken +//sys LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) = advapi32.LookupPrivilegeValueW +//sys AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) = advapi32.AdjustTokenPrivileges +//sys AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) = advapi32.AdjustTokenGroups +//sys GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) = advapi32.GetTokenInformation +//sys SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) = advapi32.SetTokenInformation +//sys DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) = advapi32.DuplicateTokenEx +//sys GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) = userenv.GetUserProfileDirectoryW +//sys getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetSystemDirectoryW +//sys getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetWindowsDirectoryW +//sys getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetSystemWindowsDirectoryW + +// An access token contains the security information for a logon session. +// The system creates an access token when a user logs on, and every +// process executed on behalf of the user has a copy of the token. +// The token identifies the user, the user's groups, and the user's +// privileges. The system uses the token to control access to securable +// objects and to control the ability of the user to perform various +// system-related operations on the local computer. +type Token Handle + +// OpenCurrentProcessToken opens an access token associated with current +// process with TOKEN_QUERY access. It is a real token that needs to be closed. +// +// Deprecated: Explicitly call OpenProcessToken(CurrentProcess(), ...) +// with the desired access instead, or use GetCurrentProcessToken for a +// TOKEN_QUERY token. +func OpenCurrentProcessToken() (Token, error) { + var token Token + err := OpenProcessToken(CurrentProcess(), TOKEN_QUERY, &token) + return token, err +} + +// GetCurrentProcessToken returns the access token associated with +// the current process. It is a pseudo token that does not need +// to be closed. +func GetCurrentProcessToken() Token { + return Token(^uintptr(4 - 1)) +} + +// GetCurrentThreadToken return the access token associated with +// the current thread. It is a pseudo token that does not need +// to be closed. +func GetCurrentThreadToken() Token { + return Token(^uintptr(5 - 1)) +} + +// GetCurrentThreadEffectiveToken returns the effective access token +// associated with the current thread. It is a pseudo token that does +// not need to be closed. +func GetCurrentThreadEffectiveToken() Token { + return Token(^uintptr(6 - 1)) +} + +// Close releases access to access token. +func (t Token) Close() error { + return CloseHandle(Handle(t)) +} + +// getInfo retrieves a specified type of information about an access token. +func (t Token) getInfo(class uint32, initSize int) (unsafe.Pointer, error) { + n := uint32(initSize) + for { + b := make([]byte, n) + e := GetTokenInformation(t, class, &b[0], uint32(len(b)), &n) + if e == nil { + return unsafe.Pointer(&b[0]), nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return nil, e + } + if n <= uint32(len(b)) { + return nil, e + } + } +} + +// GetTokenUser retrieves access token t user account information. +func (t Token) GetTokenUser() (*Tokenuser, error) { + i, e := t.getInfo(TokenUser, 50) + if e != nil { + return nil, e + } + return (*Tokenuser)(i), nil +} + +// GetTokenGroups retrieves group accounts associated with access token t. +func (t Token) GetTokenGroups() (*Tokengroups, error) { + i, e := t.getInfo(TokenGroups, 50) + if e != nil { + return nil, e + } + return (*Tokengroups)(i), nil +} + +// GetTokenPrimaryGroup retrieves access token t primary group information. +// A pointer to a SID structure representing a group that will become +// the primary group of any objects created by a process using this access token. +func (t Token) GetTokenPrimaryGroup() (*Tokenprimarygroup, error) { + i, e := t.getInfo(TokenPrimaryGroup, 50) + if e != nil { + return nil, e + } + return (*Tokenprimarygroup)(i), nil +} + +// GetUserProfileDirectory retrieves path to the +// root directory of the access token t user's profile. +func (t Token) GetUserProfileDirectory() (string, error) { + n := uint32(100) + for { + b := make([]uint16, n) + e := GetUserProfileDirectory(t, &b[0], &n) + if e == nil { + return UTF16ToString(b), nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return "", e + } + if n <= uint32(len(b)) { + return "", e + } + } +} + +// IsElevated returns whether the current token is elevated from a UAC perspective. +func (token Token) IsElevated() bool { + var isElevated uint32 + var outLen uint32 + err := GetTokenInformation(token, TokenElevation, (*byte)(unsafe.Pointer(&isElevated)), uint32(unsafe.Sizeof(isElevated)), &outLen) + if err != nil { + return false + } + return outLen == uint32(unsafe.Sizeof(isElevated)) && isElevated != 0 +} + +// GetLinkedToken returns the linked token, which may be an elevated UAC token. +func (token Token) GetLinkedToken() (Token, error) { + var linkedToken Token + var outLen uint32 + err := GetTokenInformation(token, TokenLinkedToken, (*byte)(unsafe.Pointer(&linkedToken)), uint32(unsafe.Sizeof(linkedToken)), &outLen) + if err != nil { + return Token(0), err + } + return linkedToken, nil +} + +// GetSystemDirectory retrieves the path to current location of the system +// directory, which is typically, though not always, `C:\Windows\System32`. +func GetSystemDirectory() (string, error) { + n := uint32(MAX_PATH) + for { + b := make([]uint16, n) + l, e := getSystemDirectory(&b[0], n) + if e != nil { + return "", e + } + if l <= n { + return UTF16ToString(b[:l]), nil + } + n = l + } +} + +// GetWindowsDirectory retrieves the path to current location of the Windows +// directory, which is typically, though not always, `C:\Windows`. This may +// be a private user directory in the case that the application is running +// under a terminal server. +func GetWindowsDirectory() (string, error) { + n := uint32(MAX_PATH) + for { + b := make([]uint16, n) + l, e := getWindowsDirectory(&b[0], n) + if e != nil { + return "", e + } + if l <= n { + return UTF16ToString(b[:l]), nil + } + n = l + } +} + +// GetSystemWindowsDirectory retrieves the path to current location of the +// Windows directory, which is typically, though not always, `C:\Windows`. +func GetSystemWindowsDirectory() (string, error) { + n := uint32(MAX_PATH) + for { + b := make([]uint16, n) + l, e := getSystemWindowsDirectory(&b[0], n) + if e != nil { + return "", e + } + if l <= n { + return UTF16ToString(b[:l]), nil + } + n = l + } +} + +// IsMember reports whether the access token t is a member of the provided SID. +func (t Token) IsMember(sid *SID) (bool, error) { + var b int32 + if e := checkTokenMembership(t, sid, &b); e != nil { + return false, e + } + return b != 0, nil +} + +// IsRestricted reports whether the access token t is a restricted token. +func (t Token) IsRestricted() (isRestricted bool, err error) { + isRestricted, err = isTokenRestricted(t) + if !isRestricted && err == syscall.EINVAL { + // If err is EINVAL, this returned ERROR_SUCCESS indicating a non-restricted token. + err = nil + } + return +} + +const ( + WTS_CONSOLE_CONNECT = 0x1 + WTS_CONSOLE_DISCONNECT = 0x2 + WTS_REMOTE_CONNECT = 0x3 + WTS_REMOTE_DISCONNECT = 0x4 + WTS_SESSION_LOGON = 0x5 + WTS_SESSION_LOGOFF = 0x6 + WTS_SESSION_LOCK = 0x7 + WTS_SESSION_UNLOCK = 0x8 + WTS_SESSION_REMOTE_CONTROL = 0x9 + WTS_SESSION_CREATE = 0xa + WTS_SESSION_TERMINATE = 0xb +) + +const ( + WTSActive = 0 + WTSConnected = 1 + WTSConnectQuery = 2 + WTSShadow = 3 + WTSDisconnected = 4 + WTSIdle = 5 + WTSListen = 6 + WTSReset = 7 + WTSDown = 8 + WTSInit = 9 +) + +type WTSSESSION_NOTIFICATION struct { + Size uint32 + SessionID uint32 +} + +type WTS_SESSION_INFO struct { + SessionID uint32 + WindowStationName *uint16 + State uint32 +} + +//sys WTSQueryUserToken(session uint32, token *Token) (err error) = wtsapi32.WTSQueryUserToken +//sys WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) = wtsapi32.WTSEnumerateSessionsW +//sys WTSFreeMemory(ptr uintptr) = wtsapi32.WTSFreeMemory +//sys WTSGetActiveConsoleSessionId() (sessionID uint32) + +type ACL struct { + aclRevision byte + sbz1 byte + aclSize uint16 + aceCount uint16 + sbz2 uint16 +} + +type SECURITY_DESCRIPTOR struct { + revision byte + sbz1 byte + control SECURITY_DESCRIPTOR_CONTROL + owner *SID + group *SID + sacl *ACL + dacl *ACL +} + +type SECURITY_QUALITY_OF_SERVICE struct { + Length uint32 + ImpersonationLevel uint32 + ContextTrackingMode byte + EffectiveOnly byte +} + +// Constants for the ContextTrackingMode field of SECURITY_QUALITY_OF_SERVICE. +const ( + SECURITY_STATIC_TRACKING = 0 + SECURITY_DYNAMIC_TRACKING = 1 +) + +type SecurityAttributes struct { + Length uint32 + SecurityDescriptor *SECURITY_DESCRIPTOR + InheritHandle uint32 +} + +type SE_OBJECT_TYPE uint32 + +// Constants for type SE_OBJECT_TYPE +const ( + SE_UNKNOWN_OBJECT_TYPE = 0 + SE_FILE_OBJECT = 1 + SE_SERVICE = 2 + SE_PRINTER = 3 + SE_REGISTRY_KEY = 4 + SE_LMSHARE = 5 + SE_KERNEL_OBJECT = 6 + SE_WINDOW_OBJECT = 7 + SE_DS_OBJECT = 8 + SE_DS_OBJECT_ALL = 9 + SE_PROVIDER_DEFINED_OBJECT = 10 + SE_WMIGUID_OBJECT = 11 + SE_REGISTRY_WOW64_32KEY = 12 + SE_REGISTRY_WOW64_64KEY = 13 +) + +type SECURITY_INFORMATION uint32 + +// Constants for type SECURITY_INFORMATION +const ( + OWNER_SECURITY_INFORMATION = 0x00000001 + GROUP_SECURITY_INFORMATION = 0x00000002 + DACL_SECURITY_INFORMATION = 0x00000004 + SACL_SECURITY_INFORMATION = 0x00000008 + LABEL_SECURITY_INFORMATION = 0x00000010 + ATTRIBUTE_SECURITY_INFORMATION = 0x00000020 + SCOPE_SECURITY_INFORMATION = 0x00000040 + BACKUP_SECURITY_INFORMATION = 0x00010000 + PROTECTED_DACL_SECURITY_INFORMATION = 0x80000000 + PROTECTED_SACL_SECURITY_INFORMATION = 0x40000000 + UNPROTECTED_DACL_SECURITY_INFORMATION = 0x20000000 + UNPROTECTED_SACL_SECURITY_INFORMATION = 0x10000000 +) + +type SECURITY_DESCRIPTOR_CONTROL uint16 + +// Constants for type SECURITY_DESCRIPTOR_CONTROL +const ( + SE_OWNER_DEFAULTED = 0x0001 + SE_GROUP_DEFAULTED = 0x0002 + SE_DACL_PRESENT = 0x0004 + SE_DACL_DEFAULTED = 0x0008 + SE_SACL_PRESENT = 0x0010 + SE_SACL_DEFAULTED = 0x0020 + SE_DACL_AUTO_INHERIT_REQ = 0x0100 + SE_SACL_AUTO_INHERIT_REQ = 0x0200 + SE_DACL_AUTO_INHERITED = 0x0400 + SE_SACL_AUTO_INHERITED = 0x0800 + SE_DACL_PROTECTED = 0x1000 + SE_SACL_PROTECTED = 0x2000 + SE_RM_CONTROL_VALID = 0x4000 + SE_SELF_RELATIVE = 0x8000 +) + +type ACCESS_MASK uint32 + +// Constants for type ACCESS_MASK +const ( + DELETE = 0x00010000 + READ_CONTROL = 0x00020000 + WRITE_DAC = 0x00040000 + WRITE_OWNER = 0x00080000 + SYNCHRONIZE = 0x00100000 + STANDARD_RIGHTS_REQUIRED = 0x000F0000 + STANDARD_RIGHTS_READ = READ_CONTROL + STANDARD_RIGHTS_WRITE = READ_CONTROL + STANDARD_RIGHTS_EXECUTE = READ_CONTROL + STANDARD_RIGHTS_ALL = 0x001F0000 + SPECIFIC_RIGHTS_ALL = 0x0000FFFF + ACCESS_SYSTEM_SECURITY = 0x01000000 + MAXIMUM_ALLOWED = 0x02000000 + GENERIC_READ = 0x80000000 + GENERIC_WRITE = 0x40000000 + GENERIC_EXECUTE = 0x20000000 + GENERIC_ALL = 0x10000000 +) + +type ACCESS_MODE uint32 + +// Constants for type ACCESS_MODE +const ( + NOT_USED_ACCESS = 0 + GRANT_ACCESS = 1 + SET_ACCESS = 2 + DENY_ACCESS = 3 + REVOKE_ACCESS = 4 + SET_AUDIT_SUCCESS = 5 + SET_AUDIT_FAILURE = 6 +) + +// Constants for AceFlags and Inheritance fields +const ( + NO_INHERITANCE = 0x0 + SUB_OBJECTS_ONLY_INHERIT = 0x1 + SUB_CONTAINERS_ONLY_INHERIT = 0x2 + SUB_CONTAINERS_AND_OBJECTS_INHERIT = 0x3 + INHERIT_NO_PROPAGATE = 0x4 + INHERIT_ONLY = 0x8 + INHERITED_ACCESS_ENTRY = 0x10 + INHERITED_PARENT = 0x10000000 + INHERITED_GRANDPARENT = 0x20000000 + OBJECT_INHERIT_ACE = 0x1 + CONTAINER_INHERIT_ACE = 0x2 + NO_PROPAGATE_INHERIT_ACE = 0x4 + INHERIT_ONLY_ACE = 0x8 + INHERITED_ACE = 0x10 + VALID_INHERIT_FLAGS = 0x1F +) + +type MULTIPLE_TRUSTEE_OPERATION uint32 + +// Constants for MULTIPLE_TRUSTEE_OPERATION +const ( + NO_MULTIPLE_TRUSTEE = 0 + TRUSTEE_IS_IMPERSONATE = 1 +) + +type TRUSTEE_FORM uint32 + +// Constants for TRUSTEE_FORM +const ( + TRUSTEE_IS_SID = 0 + TRUSTEE_IS_NAME = 1 + TRUSTEE_BAD_FORM = 2 + TRUSTEE_IS_OBJECTS_AND_SID = 3 + TRUSTEE_IS_OBJECTS_AND_NAME = 4 +) + +type TRUSTEE_TYPE uint32 + +// Constants for TRUSTEE_TYPE +const ( + TRUSTEE_IS_UNKNOWN = 0 + TRUSTEE_IS_USER = 1 + TRUSTEE_IS_GROUP = 2 + TRUSTEE_IS_DOMAIN = 3 + TRUSTEE_IS_ALIAS = 4 + TRUSTEE_IS_WELL_KNOWN_GROUP = 5 + TRUSTEE_IS_DELETED = 6 + TRUSTEE_IS_INVALID = 7 + TRUSTEE_IS_COMPUTER = 8 +) + +// Constants for ObjectsPresent field +const ( + ACE_OBJECT_TYPE_PRESENT = 0x1 + ACE_INHERITED_OBJECT_TYPE_PRESENT = 0x2 +) + +type EXPLICIT_ACCESS struct { + AccessPermissions ACCESS_MASK + AccessMode ACCESS_MODE + Inheritance uint32 + Trustee TRUSTEE +} + +// This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions. +type TrusteeValue uintptr + +func TrusteeValueFromString(str string) TrusteeValue { + return TrusteeValue(unsafe.Pointer(StringToUTF16Ptr(str))) +} +func TrusteeValueFromSID(sid *SID) TrusteeValue { + return TrusteeValue(unsafe.Pointer(sid)) +} +func TrusteeValueFromObjectsAndSid(objectsAndSid *OBJECTS_AND_SID) TrusteeValue { + return TrusteeValue(unsafe.Pointer(objectsAndSid)) +} +func TrusteeValueFromObjectsAndName(objectsAndName *OBJECTS_AND_NAME) TrusteeValue { + return TrusteeValue(unsafe.Pointer(objectsAndName)) +} + +type TRUSTEE struct { + MultipleTrustee *TRUSTEE + MultipleTrusteeOperation MULTIPLE_TRUSTEE_OPERATION + TrusteeForm TRUSTEE_FORM + TrusteeType TRUSTEE_TYPE + TrusteeValue TrusteeValue +} + +type OBJECTS_AND_SID struct { + ObjectsPresent uint32 + ObjectTypeGuid GUID + InheritedObjectTypeGuid GUID + Sid *SID +} + +type OBJECTS_AND_NAME struct { + ObjectsPresent uint32 + ObjectType SE_OBJECT_TYPE + ObjectTypeName *uint16 + InheritedObjectTypeName *uint16 + Name *uint16 +} + +//sys getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) = advapi32.GetSecurityInfo +//sys SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) = advapi32.SetSecurityInfo +//sys getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) = advapi32.GetNamedSecurityInfoW +//sys SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) = advapi32.SetNamedSecurityInfoW +//sys SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) = advapi32.SetKernelObjectSecurity + +//sys buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) = advapi32.BuildSecurityDescriptorW +//sys initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) = advapi32.InitializeSecurityDescriptor + +//sys getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) = advapi32.GetSecurityDescriptorControl +//sys getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl **ACL, daclDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorDacl +//sys getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl **ACL, saclDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorSacl +//sys getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorOwner +//sys getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorGroup +//sys getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) = advapi32.GetSecurityDescriptorLength +//sys getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) [failretval!=0] = advapi32.GetSecurityDescriptorRMControl +//sys isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) = advapi32.IsValidSecurityDescriptor + +//sys setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) = advapi32.SetSecurityDescriptorControl +//sys setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *ACL, daclDefaulted bool) (err error) = advapi32.SetSecurityDescriptorDacl +//sys setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *ACL, saclDefaulted bool) (err error) = advapi32.SetSecurityDescriptorSacl +//sys setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaulted bool) (err error) = advapi32.SetSecurityDescriptorOwner +//sys setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaulted bool) (err error) = advapi32.SetSecurityDescriptorGroup +//sys setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) = advapi32.SetSecurityDescriptorRMControl + +//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW +//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW + +//sys makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) = advapi32.MakeAbsoluteSD +//sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD + +//sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW + +// Control returns the security descriptor control bits. +func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) { + err = getSecurityDescriptorControl(sd, &control, &revision) + return +} + +// SetControl sets the security descriptor control bits. +func (sd *SECURITY_DESCRIPTOR) SetControl(controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) error { + return setSecurityDescriptorControl(sd, controlBitsOfInterest, controlBitsToSet) +} + +// RMControl returns the security descriptor resource manager control bits. +func (sd *SECURITY_DESCRIPTOR) RMControl() (control uint8, err error) { + err = getSecurityDescriptorRMControl(sd, &control) + return +} + +// SetRMControl sets the security descriptor resource manager control bits. +func (sd *SECURITY_DESCRIPTOR) SetRMControl(rmControl uint8) { + setSecurityDescriptorRMControl(sd, &rmControl) +} + +// DACL returns the security descriptor DACL and whether it was defaulted. The dacl return value may be nil +// if a DACL exists but is an "empty DACL", meaning fully permissive. If the DACL does not exist, err returns +// ERROR_OBJECT_NOT_FOUND. +func (sd *SECURITY_DESCRIPTOR) DACL() (dacl *ACL, defaulted bool, err error) { + var present bool + err = getSecurityDescriptorDacl(sd, &present, &dacl, &defaulted) + if !present { + err = ERROR_OBJECT_NOT_FOUND + } + return +} + +// SetDACL sets the absolute security descriptor DACL. +func (absoluteSD *SECURITY_DESCRIPTOR) SetDACL(dacl *ACL, present, defaulted bool) error { + return setSecurityDescriptorDacl(absoluteSD, present, dacl, defaulted) +} + +// SACL returns the security descriptor SACL and whether it was defaulted. The sacl return value may be nil +// if a SACL exists but is an "empty SACL", meaning fully permissive. If the SACL does not exist, err returns +// ERROR_OBJECT_NOT_FOUND. +func (sd *SECURITY_DESCRIPTOR) SACL() (sacl *ACL, defaulted bool, err error) { + var present bool + err = getSecurityDescriptorSacl(sd, &present, &sacl, &defaulted) + if !present { + err = ERROR_OBJECT_NOT_FOUND + } + return +} + +// SetSACL sets the absolute security descriptor SACL. +func (absoluteSD *SECURITY_DESCRIPTOR) SetSACL(sacl *ACL, present, defaulted bool) error { + return setSecurityDescriptorSacl(absoluteSD, present, sacl, defaulted) +} + +// Owner returns the security descriptor owner and whether it was defaulted. +func (sd *SECURITY_DESCRIPTOR) Owner() (owner *SID, defaulted bool, err error) { + err = getSecurityDescriptorOwner(sd, &owner, &defaulted) + return +} + +// SetOwner sets the absolute security descriptor owner. +func (absoluteSD *SECURITY_DESCRIPTOR) SetOwner(owner *SID, defaulted bool) error { + return setSecurityDescriptorOwner(absoluteSD, owner, defaulted) +} + +// Group returns the security descriptor group and whether it was defaulted. +func (sd *SECURITY_DESCRIPTOR) Group() (group *SID, defaulted bool, err error) { + err = getSecurityDescriptorGroup(sd, &group, &defaulted) + return +} + +// SetGroup sets the absolute security descriptor owner. +func (absoluteSD *SECURITY_DESCRIPTOR) SetGroup(group *SID, defaulted bool) error { + return setSecurityDescriptorGroup(absoluteSD, group, defaulted) +} + +// Length returns the length of the security descriptor. +func (sd *SECURITY_DESCRIPTOR) Length() uint32 { + return getSecurityDescriptorLength(sd) +} + +// IsValid returns whether the security descriptor is valid. +func (sd *SECURITY_DESCRIPTOR) IsValid() bool { + return isValidSecurityDescriptor(sd) +} + +// String returns the SDDL form of the security descriptor, with a function signature that can be +// used with %v formatting directives. +func (sd *SECURITY_DESCRIPTOR) String() string { + var sddl *uint16 + err := convertSecurityDescriptorToStringSecurityDescriptor(sd, 1, 0xff, &sddl, nil) + if err != nil { + return "" + } + defer LocalFree(Handle(unsafe.Pointer(sddl))) + return UTF16PtrToString(sddl) +} + +// ToAbsolute converts a self-relative security descriptor into an absolute one. +func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DESCRIPTOR, err error) { + control, _, err := selfRelativeSD.Control() + if err != nil { + return + } + if control&SE_SELF_RELATIVE == 0 { + err = ERROR_INVALID_PARAMETER + return + } + var absoluteSDSize, daclSize, saclSize, ownerSize, groupSize uint32 + err = makeAbsoluteSD(selfRelativeSD, nil, &absoluteSDSize, + nil, &daclSize, nil, &saclSize, nil, &ownerSize, nil, &groupSize) + switch err { + case ERROR_INSUFFICIENT_BUFFER: + case nil: + // makeAbsoluteSD is expected to fail, but it succeeds. + return nil, ERROR_INTERNAL_ERROR + default: + return nil, err + } + if absoluteSDSize > 0 { + absoluteSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, absoluteSDSize)[0])) + } + var ( + dacl *ACL + sacl *ACL + owner *SID + group *SID + ) + if daclSize > 0 { + dacl = (*ACL)(unsafe.Pointer(&make([]byte, daclSize)[0])) + } + if saclSize > 0 { + sacl = (*ACL)(unsafe.Pointer(&make([]byte, saclSize)[0])) + } + if ownerSize > 0 { + owner = (*SID)(unsafe.Pointer(&make([]byte, ownerSize)[0])) + } + if groupSize > 0 { + group = (*SID)(unsafe.Pointer(&make([]byte, groupSize)[0])) + } + err = makeAbsoluteSD(selfRelativeSD, absoluteSD, &absoluteSDSize, + dacl, &daclSize, sacl, &saclSize, owner, &ownerSize, group, &groupSize) + return +} + +// ToSelfRelative converts an absolute security descriptor into a self-relative one. +func (absoluteSD *SECURITY_DESCRIPTOR) ToSelfRelative() (selfRelativeSD *SECURITY_DESCRIPTOR, err error) { + control, _, err := absoluteSD.Control() + if err != nil { + return + } + if control&SE_SELF_RELATIVE != 0 { + err = ERROR_INVALID_PARAMETER + return + } + var selfRelativeSDSize uint32 + err = makeSelfRelativeSD(absoluteSD, nil, &selfRelativeSDSize) + switch err { + case ERROR_INSUFFICIENT_BUFFER: + case nil: + // makeSelfRelativeSD is expected to fail, but it succeeds. + return nil, ERROR_INTERNAL_ERROR + default: + return nil, err + } + if selfRelativeSDSize > 0 { + selfRelativeSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, selfRelativeSDSize)[0])) + } + err = makeSelfRelativeSD(absoluteSD, selfRelativeSD, &selfRelativeSDSize) + return +} + +func (selfRelativeSD *SECURITY_DESCRIPTOR) copySelfRelativeSecurityDescriptor() *SECURITY_DESCRIPTOR { + sdLen := int(selfRelativeSD.Length()) + const min = int(unsafe.Sizeof(SECURITY_DESCRIPTOR{})) + if sdLen < min { + sdLen = min + } + + var src []byte + h := (*unsafeheader.Slice)(unsafe.Pointer(&src)) + h.Data = unsafe.Pointer(selfRelativeSD) + h.Len = sdLen + h.Cap = sdLen + + const psize = int(unsafe.Sizeof(uintptr(0))) + + var dst []byte + h = (*unsafeheader.Slice)(unsafe.Pointer(&dst)) + alloc := make([]uintptr, (sdLen+psize-1)/psize) + h.Data = (*unsafeheader.Slice)(unsafe.Pointer(&alloc)).Data + h.Len = sdLen + h.Cap = sdLen + + copy(dst, src) + return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&dst[0])) +} + +// SecurityDescriptorFromString converts an SDDL string describing a security descriptor into a +// self-relative security descriptor object allocated on the Go heap. +func SecurityDescriptorFromString(sddl string) (sd *SECURITY_DESCRIPTOR, err error) { + var winHeapSD *SECURITY_DESCRIPTOR + err = convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &winHeapSD, nil) + if err != nil { + return + } + defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) + return winHeapSD.copySelfRelativeSecurityDescriptor(), nil +} + +// GetSecurityInfo queries the security information for a given handle and returns the self-relative security +// descriptor result on the Go heap. +func GetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION) (sd *SECURITY_DESCRIPTOR, err error) { + var winHeapSD *SECURITY_DESCRIPTOR + err = getSecurityInfo(handle, objectType, securityInformation, nil, nil, nil, nil, &winHeapSD) + if err != nil { + return + } + defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) + return winHeapSD.copySelfRelativeSecurityDescriptor(), nil +} + +// GetNamedSecurityInfo queries the security information for a given named object and returns the self-relative security +// descriptor result on the Go heap. +func GetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION) (sd *SECURITY_DESCRIPTOR, err error) { + var winHeapSD *SECURITY_DESCRIPTOR + err = getNamedSecurityInfo(objectName, objectType, securityInformation, nil, nil, nil, nil, &winHeapSD) + if err != nil { + return + } + defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) + return winHeapSD.copySelfRelativeSecurityDescriptor(), nil +} + +// BuildSecurityDescriptor makes a new security descriptor using the input trustees, explicit access lists, and +// prior security descriptor to be merged, any of which can be nil, returning the self-relative security descriptor +// result on the Go heap. +func BuildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, accessEntries []EXPLICIT_ACCESS, auditEntries []EXPLICIT_ACCESS, mergedSecurityDescriptor *SECURITY_DESCRIPTOR) (sd *SECURITY_DESCRIPTOR, err error) { + var winHeapSD *SECURITY_DESCRIPTOR + var winHeapSDSize uint32 + var firstAccessEntry *EXPLICIT_ACCESS + if len(accessEntries) > 0 { + firstAccessEntry = &accessEntries[0] + } + var firstAuditEntry *EXPLICIT_ACCESS + if len(auditEntries) > 0 { + firstAuditEntry = &auditEntries[0] + } + err = buildSecurityDescriptor(owner, group, uint32(len(accessEntries)), firstAccessEntry, uint32(len(auditEntries)), firstAuditEntry, mergedSecurityDescriptor, &winHeapSDSize, &winHeapSD) + if err != nil { + return + } + defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) + return winHeapSD.copySelfRelativeSecurityDescriptor(), nil +} + +// NewSecurityDescriptor creates and initializes a new absolute security descriptor. +func NewSecurityDescriptor() (absoluteSD *SECURITY_DESCRIPTOR, err error) { + absoluteSD = &SECURITY_DESCRIPTOR{} + err = initializeSecurityDescriptor(absoluteSD, 1) + return +} + +// ACLFromEntries returns a new ACL on the Go heap containing a list of explicit entries as well as those of another ACL. +// Both explicitEntries and mergedACL are optional and can be nil. +func ACLFromEntries(explicitEntries []EXPLICIT_ACCESS, mergedACL *ACL) (acl *ACL, err error) { + var firstExplicitEntry *EXPLICIT_ACCESS + if len(explicitEntries) > 0 { + firstExplicitEntry = &explicitEntries[0] + } + var winHeapACL *ACL + err = setEntriesInAcl(uint32(len(explicitEntries)), firstExplicitEntry, mergedACL, &winHeapACL) + if err != nil { + return + } + defer LocalFree(Handle(unsafe.Pointer(winHeapACL))) + aclBytes := make([]byte, winHeapACL.aclSize) + copy(aclBytes, (*[(1 << 31) - 1]byte)(unsafe.Pointer(winHeapACL))[:len(aclBytes):len(aclBytes)]) + return (*ACL)(unsafe.Pointer(&aclBytes[0])), nil +} diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go new file mode 100644 index 000000000..c964b6848 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/service.go @@ -0,0 +1,254 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +package windows + +const ( + SC_MANAGER_CONNECT = 1 + SC_MANAGER_CREATE_SERVICE = 2 + SC_MANAGER_ENUMERATE_SERVICE = 4 + SC_MANAGER_LOCK = 8 + SC_MANAGER_QUERY_LOCK_STATUS = 16 + SC_MANAGER_MODIFY_BOOT_CONFIG = 32 + SC_MANAGER_ALL_ACCESS = 0xf003f +) + +const ( + SERVICE_KERNEL_DRIVER = 1 + SERVICE_FILE_SYSTEM_DRIVER = 2 + SERVICE_ADAPTER = 4 + SERVICE_RECOGNIZER_DRIVER = 8 + SERVICE_WIN32_OWN_PROCESS = 16 + SERVICE_WIN32_SHARE_PROCESS = 32 + SERVICE_WIN32 = SERVICE_WIN32_OWN_PROCESS | SERVICE_WIN32_SHARE_PROCESS + SERVICE_INTERACTIVE_PROCESS = 256 + SERVICE_DRIVER = SERVICE_KERNEL_DRIVER | SERVICE_FILE_SYSTEM_DRIVER | SERVICE_RECOGNIZER_DRIVER + SERVICE_TYPE_ALL = SERVICE_WIN32 | SERVICE_ADAPTER | SERVICE_DRIVER | SERVICE_INTERACTIVE_PROCESS + + SERVICE_BOOT_START = 0 + SERVICE_SYSTEM_START = 1 + SERVICE_AUTO_START = 2 + SERVICE_DEMAND_START = 3 + SERVICE_DISABLED = 4 + + SERVICE_ERROR_IGNORE = 0 + SERVICE_ERROR_NORMAL = 1 + SERVICE_ERROR_SEVERE = 2 + SERVICE_ERROR_CRITICAL = 3 + + SC_STATUS_PROCESS_INFO = 0 + + SC_ACTION_NONE = 0 + SC_ACTION_RESTART = 1 + SC_ACTION_REBOOT = 2 + SC_ACTION_RUN_COMMAND = 3 + + SERVICE_STOPPED = 1 + SERVICE_START_PENDING = 2 + SERVICE_STOP_PENDING = 3 + SERVICE_RUNNING = 4 + SERVICE_CONTINUE_PENDING = 5 + SERVICE_PAUSE_PENDING = 6 + SERVICE_PAUSED = 7 + SERVICE_NO_CHANGE = 0xffffffff + + SERVICE_ACCEPT_STOP = 1 + SERVICE_ACCEPT_PAUSE_CONTINUE = 2 + SERVICE_ACCEPT_SHUTDOWN = 4 + SERVICE_ACCEPT_PARAMCHANGE = 8 + SERVICE_ACCEPT_NETBINDCHANGE = 16 + SERVICE_ACCEPT_HARDWAREPROFILECHANGE = 32 + SERVICE_ACCEPT_POWEREVENT = 64 + SERVICE_ACCEPT_SESSIONCHANGE = 128 + SERVICE_ACCEPT_PRESHUTDOWN = 256 + + SERVICE_CONTROL_STOP = 1 + SERVICE_CONTROL_PAUSE = 2 + SERVICE_CONTROL_CONTINUE = 3 + SERVICE_CONTROL_INTERROGATE = 4 + SERVICE_CONTROL_SHUTDOWN = 5 + SERVICE_CONTROL_PARAMCHANGE = 6 + SERVICE_CONTROL_NETBINDADD = 7 + SERVICE_CONTROL_NETBINDREMOVE = 8 + SERVICE_CONTROL_NETBINDENABLE = 9 + SERVICE_CONTROL_NETBINDDISABLE = 10 + SERVICE_CONTROL_DEVICEEVENT = 11 + SERVICE_CONTROL_HARDWAREPROFILECHANGE = 12 + SERVICE_CONTROL_POWEREVENT = 13 + SERVICE_CONTROL_SESSIONCHANGE = 14 + SERVICE_CONTROL_PRESHUTDOWN = 15 + + SERVICE_ACTIVE = 1 + SERVICE_INACTIVE = 2 + SERVICE_STATE_ALL = 3 + + SERVICE_QUERY_CONFIG = 1 + SERVICE_CHANGE_CONFIG = 2 + SERVICE_QUERY_STATUS = 4 + SERVICE_ENUMERATE_DEPENDENTS = 8 + SERVICE_START = 16 + SERVICE_STOP = 32 + SERVICE_PAUSE_CONTINUE = 64 + SERVICE_INTERROGATE = 128 + SERVICE_USER_DEFINED_CONTROL = 256 + SERVICE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SERVICE_QUERY_CONFIG | SERVICE_CHANGE_CONFIG | SERVICE_QUERY_STATUS | SERVICE_ENUMERATE_DEPENDENTS | SERVICE_START | SERVICE_STOP | SERVICE_PAUSE_CONTINUE | SERVICE_INTERROGATE | SERVICE_USER_DEFINED_CONTROL + + SERVICE_RUNS_IN_SYSTEM_PROCESS = 1 + + SERVICE_CONFIG_DESCRIPTION = 1 + SERVICE_CONFIG_FAILURE_ACTIONS = 2 + SERVICE_CONFIG_DELAYED_AUTO_START_INFO = 3 + SERVICE_CONFIG_FAILURE_ACTIONS_FLAG = 4 + SERVICE_CONFIG_SERVICE_SID_INFO = 5 + SERVICE_CONFIG_REQUIRED_PRIVILEGES_INFO = 6 + SERVICE_CONFIG_PRESHUTDOWN_INFO = 7 + SERVICE_CONFIG_TRIGGER_INFO = 8 + SERVICE_CONFIG_PREFERRED_NODE = 9 + SERVICE_CONFIG_LAUNCH_PROTECTED = 12 + + SERVICE_SID_TYPE_NONE = 0 + SERVICE_SID_TYPE_UNRESTRICTED = 1 + SERVICE_SID_TYPE_RESTRICTED = 2 | SERVICE_SID_TYPE_UNRESTRICTED + + SC_ENUM_PROCESS_INFO = 0 + + SERVICE_NOTIFY_STATUS_CHANGE = 2 + SERVICE_NOTIFY_STOPPED = 0x00000001 + SERVICE_NOTIFY_START_PENDING = 0x00000002 + SERVICE_NOTIFY_STOP_PENDING = 0x00000004 + SERVICE_NOTIFY_RUNNING = 0x00000008 + SERVICE_NOTIFY_CONTINUE_PENDING = 0x00000010 + SERVICE_NOTIFY_PAUSE_PENDING = 0x00000020 + SERVICE_NOTIFY_PAUSED = 0x00000040 + SERVICE_NOTIFY_CREATED = 0x00000080 + SERVICE_NOTIFY_DELETED = 0x00000100 + SERVICE_NOTIFY_DELETE_PENDING = 0x00000200 + + SC_EVENT_DATABASE_CHANGE = 0 + SC_EVENT_PROPERTY_CHANGE = 1 + SC_EVENT_STATUS_CHANGE = 2 + + SERVICE_START_REASON_DEMAND = 0x00000001 + SERVICE_START_REASON_AUTO = 0x00000002 + SERVICE_START_REASON_TRIGGER = 0x00000004 + SERVICE_START_REASON_RESTART_ON_FAILURE = 0x00000008 + SERVICE_START_REASON_DELAYEDAUTO = 0x00000010 + + SERVICE_DYNAMIC_INFORMATION_LEVEL_START_REASON = 1 +) + +type ENUM_SERVICE_STATUS struct { + ServiceName *uint16 + DisplayName *uint16 + ServiceStatus SERVICE_STATUS +} + +type SERVICE_STATUS struct { + ServiceType uint32 + CurrentState uint32 + ControlsAccepted uint32 + Win32ExitCode uint32 + ServiceSpecificExitCode uint32 + CheckPoint uint32 + WaitHint uint32 +} + +type SERVICE_TABLE_ENTRY struct { + ServiceName *uint16 + ServiceProc uintptr +} + +type QUERY_SERVICE_CONFIG struct { + ServiceType uint32 + StartType uint32 + ErrorControl uint32 + BinaryPathName *uint16 + LoadOrderGroup *uint16 + TagId uint32 + Dependencies *uint16 + ServiceStartName *uint16 + DisplayName *uint16 +} + +type SERVICE_DESCRIPTION struct { + Description *uint16 +} + +type SERVICE_DELAYED_AUTO_START_INFO struct { + IsDelayedAutoStartUp uint32 +} + +type SERVICE_STATUS_PROCESS struct { + ServiceType uint32 + CurrentState uint32 + ControlsAccepted uint32 + Win32ExitCode uint32 + ServiceSpecificExitCode uint32 + CheckPoint uint32 + WaitHint uint32 + ProcessId uint32 + ServiceFlags uint32 +} + +type ENUM_SERVICE_STATUS_PROCESS struct { + ServiceName *uint16 + DisplayName *uint16 + ServiceStatusProcess SERVICE_STATUS_PROCESS +} + +type SERVICE_NOTIFY struct { + Version uint32 + NotifyCallback uintptr + Context uintptr + NotificationStatus uint32 + ServiceStatus SERVICE_STATUS_PROCESS + NotificationTriggered uint32 + ServiceNames *uint16 +} + +type SERVICE_FAILURE_ACTIONS struct { + ResetPeriod uint32 + RebootMsg *uint16 + Command *uint16 + ActionsCount uint32 + Actions *SC_ACTION +} + +type SC_ACTION struct { + Type uint32 + Delay uint32 +} + +type QUERY_SERVICE_LOCK_STATUS struct { + IsLocked uint32 + LockOwner *uint16 + LockDuration uint32 +} + +//sys OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenSCManagerW +//sys CloseServiceHandle(handle Handle) (err error) = advapi32.CloseServiceHandle +//sys CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) [failretval==0] = advapi32.CreateServiceW +//sys OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenServiceW +//sys DeleteService(service Handle) (err error) = advapi32.DeleteService +//sys StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) = advapi32.StartServiceW +//sys QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) = advapi32.QueryServiceStatus +//sys QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceLockStatusW +//sys ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) = advapi32.ControlService +//sys StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) = advapi32.StartServiceCtrlDispatcherW +//sys SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) = advapi32.SetServiceStatus +//sys ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) = advapi32.ChangeServiceConfigW +//sys QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfigW +//sys ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) = advapi32.ChangeServiceConfig2W +//sys QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfig2W +//sys EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) = advapi32.EnumServicesStatusExW +//sys QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceStatusEx +//sys NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) = advapi32.NotifyServiceStatusChangeW +//sys SubscribeServiceChangeNotifications(service Handle, eventType uint32, callback uintptr, callbackCtx uintptr, subscription *uintptr) (ret error) = sechost.SubscribeServiceChangeNotifications? +//sys UnsubscribeServiceChangeNotifications(subscription uintptr) = sechost.UnsubscribeServiceChangeNotifications? +//sys RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) = advapi32.RegisterServiceCtrlHandlerExW +//sys QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInfo unsafe.Pointer) (err error) = advapi32.QueryServiceDynamicInformation? +//sys EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) = advapi32.EnumDependentServicesW diff --git a/vendor/golang.org/x/sys/windows/setupapi_windows.go b/vendor/golang.org/x/sys/windows/setupapi_windows.go new file mode 100644 index 000000000..f8126482f --- /dev/null +++ b/vendor/golang.org/x/sys/windows/setupapi_windows.go @@ -0,0 +1,1425 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "encoding/binary" + "errors" + "fmt" + "runtime" + "strings" + "syscall" + "unsafe" +) + +// This file contains functions that wrap SetupAPI.dll and CfgMgr32.dll, +// core system functions for managing hardware devices, drivers, and the PnP tree. +// Information about these APIs can be found at: +// https://docs.microsoft.com/en-us/windows-hardware/drivers/install/setupapi +// https://docs.microsoft.com/en-us/windows/win32/devinst/cfgmgr32- + +const ( + ERROR_EXPECTED_SECTION_NAME Errno = 0x20000000 | 0xC0000000 | 0 + ERROR_BAD_SECTION_NAME_LINE Errno = 0x20000000 | 0xC0000000 | 1 + ERROR_SECTION_NAME_TOO_LONG Errno = 0x20000000 | 0xC0000000 | 2 + ERROR_GENERAL_SYNTAX Errno = 0x20000000 | 0xC0000000 | 3 + ERROR_WRONG_INF_STYLE Errno = 0x20000000 | 0xC0000000 | 0x100 + ERROR_SECTION_NOT_FOUND Errno = 0x20000000 | 0xC0000000 | 0x101 + ERROR_LINE_NOT_FOUND Errno = 0x20000000 | 0xC0000000 | 0x102 + ERROR_NO_BACKUP Errno = 0x20000000 | 0xC0000000 | 0x103 + ERROR_NO_ASSOCIATED_CLASS Errno = 0x20000000 | 0xC0000000 | 0x200 + ERROR_CLASS_MISMATCH Errno = 0x20000000 | 0xC0000000 | 0x201 + ERROR_DUPLICATE_FOUND Errno = 0x20000000 | 0xC0000000 | 0x202 + ERROR_NO_DRIVER_SELECTED Errno = 0x20000000 | 0xC0000000 | 0x203 + ERROR_KEY_DOES_NOT_EXIST Errno = 0x20000000 | 0xC0000000 | 0x204 + ERROR_INVALID_DEVINST_NAME Errno = 0x20000000 | 0xC0000000 | 0x205 + ERROR_INVALID_CLASS Errno = 0x20000000 | 0xC0000000 | 0x206 + ERROR_DEVINST_ALREADY_EXISTS Errno = 0x20000000 | 0xC0000000 | 0x207 + ERROR_DEVINFO_NOT_REGISTERED Errno = 0x20000000 | 0xC0000000 | 0x208 + ERROR_INVALID_REG_PROPERTY Errno = 0x20000000 | 0xC0000000 | 0x209 + ERROR_NO_INF Errno = 0x20000000 | 0xC0000000 | 0x20A + ERROR_NO_SUCH_DEVINST Errno = 0x20000000 | 0xC0000000 | 0x20B + ERROR_CANT_LOAD_CLASS_ICON Errno = 0x20000000 | 0xC0000000 | 0x20C + ERROR_INVALID_CLASS_INSTALLER Errno = 0x20000000 | 0xC0000000 | 0x20D + ERROR_DI_DO_DEFAULT Errno = 0x20000000 | 0xC0000000 | 0x20E + ERROR_DI_NOFILECOPY Errno = 0x20000000 | 0xC0000000 | 0x20F + ERROR_INVALID_HWPROFILE Errno = 0x20000000 | 0xC0000000 | 0x210 + ERROR_NO_DEVICE_SELECTED Errno = 0x20000000 | 0xC0000000 | 0x211 + ERROR_DEVINFO_LIST_LOCKED Errno = 0x20000000 | 0xC0000000 | 0x212 + ERROR_DEVINFO_DATA_LOCKED Errno = 0x20000000 | 0xC0000000 | 0x213 + ERROR_DI_BAD_PATH Errno = 0x20000000 | 0xC0000000 | 0x214 + ERROR_NO_CLASSINSTALL_PARAMS Errno = 0x20000000 | 0xC0000000 | 0x215 + ERROR_FILEQUEUE_LOCKED Errno = 0x20000000 | 0xC0000000 | 0x216 + ERROR_BAD_SERVICE_INSTALLSECT Errno = 0x20000000 | 0xC0000000 | 0x217 + ERROR_NO_CLASS_DRIVER_LIST Errno = 0x20000000 | 0xC0000000 | 0x218 + ERROR_NO_ASSOCIATED_SERVICE Errno = 0x20000000 | 0xC0000000 | 0x219 + ERROR_NO_DEFAULT_DEVICE_INTERFACE Errno = 0x20000000 | 0xC0000000 | 0x21A + ERROR_DEVICE_INTERFACE_ACTIVE Errno = 0x20000000 | 0xC0000000 | 0x21B + ERROR_DEVICE_INTERFACE_REMOVED Errno = 0x20000000 | 0xC0000000 | 0x21C + ERROR_BAD_INTERFACE_INSTALLSECT Errno = 0x20000000 | 0xC0000000 | 0x21D + ERROR_NO_SUCH_INTERFACE_CLASS Errno = 0x20000000 | 0xC0000000 | 0x21E + ERROR_INVALID_REFERENCE_STRING Errno = 0x20000000 | 0xC0000000 | 0x21F + ERROR_INVALID_MACHINENAME Errno = 0x20000000 | 0xC0000000 | 0x220 + ERROR_REMOTE_COMM_FAILURE Errno = 0x20000000 | 0xC0000000 | 0x221 + ERROR_MACHINE_UNAVAILABLE Errno = 0x20000000 | 0xC0000000 | 0x222 + ERROR_NO_CONFIGMGR_SERVICES Errno = 0x20000000 | 0xC0000000 | 0x223 + ERROR_INVALID_PROPPAGE_PROVIDER Errno = 0x20000000 | 0xC0000000 | 0x224 + ERROR_NO_SUCH_DEVICE_INTERFACE Errno = 0x20000000 | 0xC0000000 | 0x225 + ERROR_DI_POSTPROCESSING_REQUIRED Errno = 0x20000000 | 0xC0000000 | 0x226 + ERROR_INVALID_COINSTALLER Errno = 0x20000000 | 0xC0000000 | 0x227 + ERROR_NO_COMPAT_DRIVERS Errno = 0x20000000 | 0xC0000000 | 0x228 + ERROR_NO_DEVICE_ICON Errno = 0x20000000 | 0xC0000000 | 0x229 + ERROR_INVALID_INF_LOGCONFIG Errno = 0x20000000 | 0xC0000000 | 0x22A + ERROR_DI_DONT_INSTALL Errno = 0x20000000 | 0xC0000000 | 0x22B + ERROR_INVALID_FILTER_DRIVER Errno = 0x20000000 | 0xC0000000 | 0x22C + ERROR_NON_WINDOWS_NT_DRIVER Errno = 0x20000000 | 0xC0000000 | 0x22D + ERROR_NON_WINDOWS_DRIVER Errno = 0x20000000 | 0xC0000000 | 0x22E + ERROR_NO_CATALOG_FOR_OEM_INF Errno = 0x20000000 | 0xC0000000 | 0x22F + ERROR_DEVINSTALL_QUEUE_NONNATIVE Errno = 0x20000000 | 0xC0000000 | 0x230 + ERROR_NOT_DISABLEABLE Errno = 0x20000000 | 0xC0000000 | 0x231 + ERROR_CANT_REMOVE_DEVINST Errno = 0x20000000 | 0xC0000000 | 0x232 + ERROR_INVALID_TARGET Errno = 0x20000000 | 0xC0000000 | 0x233 + ERROR_DRIVER_NONNATIVE Errno = 0x20000000 | 0xC0000000 | 0x234 + ERROR_IN_WOW64 Errno = 0x20000000 | 0xC0000000 | 0x235 + ERROR_SET_SYSTEM_RESTORE_POINT Errno = 0x20000000 | 0xC0000000 | 0x236 + ERROR_SCE_DISABLED Errno = 0x20000000 | 0xC0000000 | 0x238 + ERROR_UNKNOWN_EXCEPTION Errno = 0x20000000 | 0xC0000000 | 0x239 + ERROR_PNP_REGISTRY_ERROR Errno = 0x20000000 | 0xC0000000 | 0x23A + ERROR_REMOTE_REQUEST_UNSUPPORTED Errno = 0x20000000 | 0xC0000000 | 0x23B + ERROR_NOT_AN_INSTALLED_OEM_INF Errno = 0x20000000 | 0xC0000000 | 0x23C + ERROR_INF_IN_USE_BY_DEVICES Errno = 0x20000000 | 0xC0000000 | 0x23D + ERROR_DI_FUNCTION_OBSOLETE Errno = 0x20000000 | 0xC0000000 | 0x23E + ERROR_NO_AUTHENTICODE_CATALOG Errno = 0x20000000 | 0xC0000000 | 0x23F + ERROR_AUTHENTICODE_DISALLOWED Errno = 0x20000000 | 0xC0000000 | 0x240 + ERROR_AUTHENTICODE_TRUSTED_PUBLISHER Errno = 0x20000000 | 0xC0000000 | 0x241 + ERROR_AUTHENTICODE_TRUST_NOT_ESTABLISHED Errno = 0x20000000 | 0xC0000000 | 0x242 + ERROR_AUTHENTICODE_PUBLISHER_NOT_TRUSTED Errno = 0x20000000 | 0xC0000000 | 0x243 + ERROR_SIGNATURE_OSATTRIBUTE_MISMATCH Errno = 0x20000000 | 0xC0000000 | 0x244 + ERROR_ONLY_VALIDATE_VIA_AUTHENTICODE Errno = 0x20000000 | 0xC0000000 | 0x245 + ERROR_DEVICE_INSTALLER_NOT_READY Errno = 0x20000000 | 0xC0000000 | 0x246 + ERROR_DRIVER_STORE_ADD_FAILED Errno = 0x20000000 | 0xC0000000 | 0x247 + ERROR_DEVICE_INSTALL_BLOCKED Errno = 0x20000000 | 0xC0000000 | 0x248 + ERROR_DRIVER_INSTALL_BLOCKED Errno = 0x20000000 | 0xC0000000 | 0x249 + ERROR_WRONG_INF_TYPE Errno = 0x20000000 | 0xC0000000 | 0x24A + ERROR_FILE_HASH_NOT_IN_CATALOG Errno = 0x20000000 | 0xC0000000 | 0x24B + ERROR_DRIVER_STORE_DELETE_FAILED Errno = 0x20000000 | 0xC0000000 | 0x24C + ERROR_UNRECOVERABLE_STACK_OVERFLOW Errno = 0x20000000 | 0xC0000000 | 0x300 + EXCEPTION_SPAPI_UNRECOVERABLE_STACK_OVERFLOW Errno = ERROR_UNRECOVERABLE_STACK_OVERFLOW + ERROR_NO_DEFAULT_INTERFACE_DEVICE Errno = ERROR_NO_DEFAULT_DEVICE_INTERFACE + ERROR_INTERFACE_DEVICE_ACTIVE Errno = ERROR_DEVICE_INTERFACE_ACTIVE + ERROR_INTERFACE_DEVICE_REMOVED Errno = ERROR_DEVICE_INTERFACE_REMOVED + ERROR_NO_SUCH_INTERFACE_DEVICE Errno = ERROR_NO_SUCH_DEVICE_INTERFACE +) + +const ( + MAX_DEVICE_ID_LEN = 200 + MAX_DEVNODE_ID_LEN = MAX_DEVICE_ID_LEN + MAX_GUID_STRING_LEN = 39 // 38 chars + terminator null + MAX_CLASS_NAME_LEN = 32 + MAX_PROFILE_LEN = 80 + MAX_CONFIG_VALUE = 9999 + MAX_INSTANCE_VALUE = 9999 + CONFIGMG_VERSION = 0x0400 +) + +// Maximum string length constants +const ( + LINE_LEN = 256 // Windows 9x-compatible maximum for displayable strings coming from a device INF. + MAX_INF_STRING_LENGTH = 4096 // Actual maximum size of an INF string (including string substitutions). + MAX_INF_SECTION_NAME_LENGTH = 255 // For Windows 9x compatibility, INF section names should be constrained to 32 characters. + MAX_TITLE_LEN = 60 + MAX_INSTRUCTION_LEN = 256 + MAX_LABEL_LEN = 30 + MAX_SERVICE_NAME_LEN = 256 + MAX_SUBTITLE_LEN = 256 +) + +const ( + // SP_MAX_MACHINENAME_LENGTH defines maximum length of a machine name in the format expected by ConfigMgr32 CM_Connect_Machine (i.e., "\\\\MachineName\0"). + SP_MAX_MACHINENAME_LENGTH = MAX_PATH + 3 +) + +// HSPFILEQ is type for setup file queue +type HSPFILEQ uintptr + +// DevInfo holds reference to device information set +type DevInfo Handle + +// DEVINST is a handle usually recognized by cfgmgr32 APIs +type DEVINST uint32 + +// DevInfoData is a device information structure (references a device instance that is a member of a device information set) +type DevInfoData struct { + size uint32 + ClassGUID GUID + DevInst DEVINST + _ uintptr +} + +// DevInfoListDetailData is a structure for detailed information on a device information set (used for SetupDiGetDeviceInfoListDetail which supersedes the functionality of SetupDiGetDeviceInfoListClass). +type DevInfoListDetailData struct { + size uint32 // Use unsafeSizeOf method + ClassGUID GUID + RemoteMachineHandle Handle + remoteMachineName [SP_MAX_MACHINENAME_LENGTH]uint16 +} + +func (*DevInfoListDetailData) unsafeSizeOf() uint32 { + if unsafe.Sizeof(uintptr(0)) == 4 { + // Windows declares this with pshpack1.h + return uint32(unsafe.Offsetof(DevInfoListDetailData{}.remoteMachineName) + unsafe.Sizeof(DevInfoListDetailData{}.remoteMachineName)) + } + return uint32(unsafe.Sizeof(DevInfoListDetailData{})) +} + +func (data *DevInfoListDetailData) RemoteMachineName() string { + return UTF16ToString(data.remoteMachineName[:]) +} + +func (data *DevInfoListDetailData) SetRemoteMachineName(remoteMachineName string) error { + str, err := UTF16FromString(remoteMachineName) + if err != nil { + return err + } + copy(data.remoteMachineName[:], str) + return nil +} + +// DI_FUNCTION is function type for device installer +type DI_FUNCTION uint32 + +const ( + DIF_SELECTDEVICE DI_FUNCTION = 0x00000001 + DIF_INSTALLDEVICE DI_FUNCTION = 0x00000002 + DIF_ASSIGNRESOURCES DI_FUNCTION = 0x00000003 + DIF_PROPERTIES DI_FUNCTION = 0x00000004 + DIF_REMOVE DI_FUNCTION = 0x00000005 + DIF_FIRSTTIMESETUP DI_FUNCTION = 0x00000006 + DIF_FOUNDDEVICE DI_FUNCTION = 0x00000007 + DIF_SELECTCLASSDRIVERS DI_FUNCTION = 0x00000008 + DIF_VALIDATECLASSDRIVERS DI_FUNCTION = 0x00000009 + DIF_INSTALLCLASSDRIVERS DI_FUNCTION = 0x0000000A + DIF_CALCDISKSPACE DI_FUNCTION = 0x0000000B + DIF_DESTROYPRIVATEDATA DI_FUNCTION = 0x0000000C + DIF_VALIDATEDRIVER DI_FUNCTION = 0x0000000D + DIF_DETECT DI_FUNCTION = 0x0000000F + DIF_INSTALLWIZARD DI_FUNCTION = 0x00000010 + DIF_DESTROYWIZARDDATA DI_FUNCTION = 0x00000011 + DIF_PROPERTYCHANGE DI_FUNCTION = 0x00000012 + DIF_ENABLECLASS DI_FUNCTION = 0x00000013 + DIF_DETECTVERIFY DI_FUNCTION = 0x00000014 + DIF_INSTALLDEVICEFILES DI_FUNCTION = 0x00000015 + DIF_UNREMOVE DI_FUNCTION = 0x00000016 + DIF_SELECTBESTCOMPATDRV DI_FUNCTION = 0x00000017 + DIF_ALLOW_INSTALL DI_FUNCTION = 0x00000018 + DIF_REGISTERDEVICE DI_FUNCTION = 0x00000019 + DIF_NEWDEVICEWIZARD_PRESELECT DI_FUNCTION = 0x0000001A + DIF_NEWDEVICEWIZARD_SELECT DI_FUNCTION = 0x0000001B + DIF_NEWDEVICEWIZARD_PREANALYZE DI_FUNCTION = 0x0000001C + DIF_NEWDEVICEWIZARD_POSTANALYZE DI_FUNCTION = 0x0000001D + DIF_NEWDEVICEWIZARD_FINISHINSTALL DI_FUNCTION = 0x0000001E + DIF_INSTALLINTERFACES DI_FUNCTION = 0x00000020 + DIF_DETECTCANCEL DI_FUNCTION = 0x00000021 + DIF_REGISTER_COINSTALLERS DI_FUNCTION = 0x00000022 + DIF_ADDPROPERTYPAGE_ADVANCED DI_FUNCTION = 0x00000023 + DIF_ADDPROPERTYPAGE_BASIC DI_FUNCTION = 0x00000024 + DIF_TROUBLESHOOTER DI_FUNCTION = 0x00000026 + DIF_POWERMESSAGEWAKE DI_FUNCTION = 0x00000027 + DIF_ADDREMOTEPROPERTYPAGE_ADVANCED DI_FUNCTION = 0x00000028 + DIF_UPDATEDRIVER_UI DI_FUNCTION = 0x00000029 + DIF_FINISHINSTALL_ACTION DI_FUNCTION = 0x0000002A +) + +// DevInstallParams is device installation parameters structure (associated with a particular device information element, or globally with a device information set) +type DevInstallParams struct { + size uint32 + Flags DI_FLAGS + FlagsEx DI_FLAGSEX + hwndParent uintptr + InstallMsgHandler uintptr + InstallMsgHandlerContext uintptr + FileQueue HSPFILEQ + _ uintptr + _ uint32 + driverPath [MAX_PATH]uint16 +} + +func (params *DevInstallParams) DriverPath() string { + return UTF16ToString(params.driverPath[:]) +} + +func (params *DevInstallParams) SetDriverPath(driverPath string) error { + str, err := UTF16FromString(driverPath) + if err != nil { + return err + } + copy(params.driverPath[:], str) + return nil +} + +// DI_FLAGS is SP_DEVINSTALL_PARAMS.Flags values +type DI_FLAGS uint32 + +const ( + // Flags for choosing a device + DI_SHOWOEM DI_FLAGS = 0x00000001 // support Other... button + DI_SHOWCOMPAT DI_FLAGS = 0x00000002 // show compatibility list + DI_SHOWCLASS DI_FLAGS = 0x00000004 // show class list + DI_SHOWALL DI_FLAGS = 0x00000007 // both class & compat list shown + DI_NOVCP DI_FLAGS = 0x00000008 // don't create a new copy queue--use caller-supplied FileQueue + DI_DIDCOMPAT DI_FLAGS = 0x00000010 // Searched for compatible devices + DI_DIDCLASS DI_FLAGS = 0x00000020 // Searched for class devices + DI_AUTOASSIGNRES DI_FLAGS = 0x00000040 // No UI for resources if possible + + // Flags returned by DiInstallDevice to indicate need to reboot/restart + DI_NEEDRESTART DI_FLAGS = 0x00000080 // Reboot required to take effect + DI_NEEDREBOOT DI_FLAGS = 0x00000100 // "" + + // Flags for device installation + DI_NOBROWSE DI_FLAGS = 0x00000200 // no Browse... in InsertDisk + + // Flags set by DiBuildDriverInfoList + DI_MULTMFGS DI_FLAGS = 0x00000400 // Set if multiple manufacturers in class driver list + + // Flag indicates that device is disabled + DI_DISABLED DI_FLAGS = 0x00000800 // Set if device disabled + + // Flags for Device/Class Properties + DI_GENERALPAGE_ADDED DI_FLAGS = 0x00001000 + DI_RESOURCEPAGE_ADDED DI_FLAGS = 0x00002000 + + // Flag to indicate the setting properties for this Device (or class) caused a change so the Dev Mgr UI probably needs to be updated. + DI_PROPERTIES_CHANGE DI_FLAGS = 0x00004000 + + // Flag to indicate that the sorting from the INF file should be used. + DI_INF_IS_SORTED DI_FLAGS = 0x00008000 + + // Flag to indicate that only the INF specified by SP_DEVINSTALL_PARAMS.DriverPath should be searched. + DI_ENUMSINGLEINF DI_FLAGS = 0x00010000 + + // Flag that prevents ConfigMgr from removing/re-enumerating devices during device + // registration, installation, and deletion. + DI_DONOTCALLCONFIGMG DI_FLAGS = 0x00020000 + + // The following flag can be used to install a device disabled + DI_INSTALLDISABLED DI_FLAGS = 0x00040000 + + // Flag that causes SetupDiBuildDriverInfoList to build a device's compatible driver + // list from its existing class driver list, instead of the normal INF search. + DI_COMPAT_FROM_CLASS DI_FLAGS = 0x00080000 + + // This flag is set if the Class Install params should be used. + DI_CLASSINSTALLPARAMS DI_FLAGS = 0x00100000 + + // This flag is set if the caller of DiCallClassInstaller does NOT want the internal default action performed if the Class installer returns ERROR_DI_DO_DEFAULT. + DI_NODI_DEFAULTACTION DI_FLAGS = 0x00200000 + + // Flags for device installation + DI_QUIETINSTALL DI_FLAGS = 0x00800000 // don't confuse the user with questions or excess info + DI_NOFILECOPY DI_FLAGS = 0x01000000 // No file Copy necessary + DI_FORCECOPY DI_FLAGS = 0x02000000 // Force files to be copied from install path + DI_DRIVERPAGE_ADDED DI_FLAGS = 0x04000000 // Prop provider added Driver page. + DI_USECI_SELECTSTRINGS DI_FLAGS = 0x08000000 // Use Class Installer Provided strings in the Select Device Dlg + DI_OVERRIDE_INFFLAGS DI_FLAGS = 0x10000000 // Override INF flags + DI_PROPS_NOCHANGEUSAGE DI_FLAGS = 0x20000000 // No Enable/Disable in General Props + + DI_NOSELECTICONS DI_FLAGS = 0x40000000 // No small icons in select device dialogs + + DI_NOWRITE_IDS DI_FLAGS = 0x80000000 // Don't write HW & Compat IDs on install +) + +// DI_FLAGSEX is SP_DEVINSTALL_PARAMS.FlagsEx values +type DI_FLAGSEX uint32 + +const ( + DI_FLAGSEX_CI_FAILED DI_FLAGSEX = 0x00000004 // Failed to Load/Call class installer + DI_FLAGSEX_FINISHINSTALL_ACTION DI_FLAGSEX = 0x00000008 // Class/co-installer wants to get a DIF_FINISH_INSTALL action in client context. + DI_FLAGSEX_DIDINFOLIST DI_FLAGSEX = 0x00000010 // Did the Class Info List + DI_FLAGSEX_DIDCOMPATINFO DI_FLAGSEX = 0x00000020 // Did the Compat Info List + DI_FLAGSEX_FILTERCLASSES DI_FLAGSEX = 0x00000040 + DI_FLAGSEX_SETFAILEDINSTALL DI_FLAGSEX = 0x00000080 + DI_FLAGSEX_DEVICECHANGE DI_FLAGSEX = 0x00000100 + DI_FLAGSEX_ALWAYSWRITEIDS DI_FLAGSEX = 0x00000200 + DI_FLAGSEX_PROPCHANGE_PENDING DI_FLAGSEX = 0x00000400 // One or more device property sheets have had changes made to them, and need to have a DIF_PROPERTYCHANGE occur. + DI_FLAGSEX_ALLOWEXCLUDEDDRVS DI_FLAGSEX = 0x00000800 + DI_FLAGSEX_NOUIONQUERYREMOVE DI_FLAGSEX = 0x00001000 + DI_FLAGSEX_USECLASSFORCOMPAT DI_FLAGSEX = 0x00002000 // Use the device's class when building compat drv list. (Ignored if DI_COMPAT_FROM_CLASS flag is specified.) + DI_FLAGSEX_NO_DRVREG_MODIFY DI_FLAGSEX = 0x00008000 // Don't run AddReg and DelReg for device's software (driver) key. + DI_FLAGSEX_IN_SYSTEM_SETUP DI_FLAGSEX = 0x00010000 // Installation is occurring during initial system setup. + DI_FLAGSEX_INET_DRIVER DI_FLAGSEX = 0x00020000 // Driver came from Windows Update + DI_FLAGSEX_APPENDDRIVERLIST DI_FLAGSEX = 0x00040000 // Cause SetupDiBuildDriverInfoList to append a new driver list to an existing list. + DI_FLAGSEX_PREINSTALLBACKUP DI_FLAGSEX = 0x00080000 // not used + DI_FLAGSEX_BACKUPONREPLACE DI_FLAGSEX = 0x00100000 // not used + DI_FLAGSEX_DRIVERLIST_FROM_URL DI_FLAGSEX = 0x00200000 // build driver list from INF(s) retrieved from URL specified in SP_DEVINSTALL_PARAMS.DriverPath (empty string means Windows Update website) + DI_FLAGSEX_EXCLUDE_OLD_INET_DRIVERS DI_FLAGSEX = 0x00800000 // Don't include old Internet drivers when building a driver list. Ignored on Windows Vista and later. + DI_FLAGSEX_POWERPAGE_ADDED DI_FLAGSEX = 0x01000000 // class installer added their own power page + DI_FLAGSEX_FILTERSIMILARDRIVERS DI_FLAGSEX = 0x02000000 // only include similar drivers in class list + DI_FLAGSEX_INSTALLEDDRIVER DI_FLAGSEX = 0x04000000 // only add the installed driver to the class or compat driver list. Used in calls to SetupDiBuildDriverInfoList + DI_FLAGSEX_NO_CLASSLIST_NODE_MERGE DI_FLAGSEX = 0x08000000 // Don't remove identical driver nodes from the class list + DI_FLAGSEX_ALTPLATFORM_DRVSEARCH DI_FLAGSEX = 0x10000000 // Build driver list based on alternate platform information specified in associated file queue + DI_FLAGSEX_RESTART_DEVICE_ONLY DI_FLAGSEX = 0x20000000 // only restart the device drivers are being installed on as opposed to restarting all devices using those drivers. + DI_FLAGSEX_RECURSIVESEARCH DI_FLAGSEX = 0x40000000 // Tell SetupDiBuildDriverInfoList to do a recursive search + DI_FLAGSEX_SEARCH_PUBLISHED_INFS DI_FLAGSEX = 0x80000000 // Tell SetupDiBuildDriverInfoList to do a "published INF" search +) + +// ClassInstallHeader is the first member of any class install parameters structure. It contains the device installation request code that defines the format of the rest of the install parameters structure. +type ClassInstallHeader struct { + size uint32 + InstallFunction DI_FUNCTION +} + +func MakeClassInstallHeader(installFunction DI_FUNCTION) *ClassInstallHeader { + hdr := &ClassInstallHeader{InstallFunction: installFunction} + hdr.size = uint32(unsafe.Sizeof(*hdr)) + return hdr +} + +// DICS_STATE specifies values indicating a change in a device's state +type DICS_STATE uint32 + +const ( + DICS_ENABLE DICS_STATE = 0x00000001 // The device is being enabled. + DICS_DISABLE DICS_STATE = 0x00000002 // The device is being disabled. + DICS_PROPCHANGE DICS_STATE = 0x00000003 // The properties of the device have changed. + DICS_START DICS_STATE = 0x00000004 // The device is being started (if the request is for the currently active hardware profile). + DICS_STOP DICS_STATE = 0x00000005 // The device is being stopped. The driver stack will be unloaded and the CSCONFIGFLAG_DO_NOT_START flag will be set for the device. +) + +// DICS_FLAG specifies the scope of a device property change +type DICS_FLAG uint32 + +const ( + DICS_FLAG_GLOBAL DICS_FLAG = 0x00000001 // make change in all hardware profiles + DICS_FLAG_CONFIGSPECIFIC DICS_FLAG = 0x00000002 // make change in specified profile only + DICS_FLAG_CONFIGGENERAL DICS_FLAG = 0x00000004 // 1 or more hardware profile-specific changes to follow (obsolete) +) + +// PropChangeParams is a structure corresponding to a DIF_PROPERTYCHANGE install function. +type PropChangeParams struct { + ClassInstallHeader ClassInstallHeader + StateChange DICS_STATE + Scope DICS_FLAG + HwProfile uint32 +} + +// DI_REMOVEDEVICE specifies the scope of the device removal +type DI_REMOVEDEVICE uint32 + +const ( + DI_REMOVEDEVICE_GLOBAL DI_REMOVEDEVICE = 0x00000001 // Make this change in all hardware profiles. Remove information about the device from the registry. + DI_REMOVEDEVICE_CONFIGSPECIFIC DI_REMOVEDEVICE = 0x00000002 // Make this change to only the hardware profile specified by HwProfile. this flag only applies to root-enumerated devices. When Windows removes the device from the last hardware profile in which it was configured, Windows performs a global removal. +) + +// RemoveDeviceParams is a structure corresponding to a DIF_REMOVE install function. +type RemoveDeviceParams struct { + ClassInstallHeader ClassInstallHeader + Scope DI_REMOVEDEVICE + HwProfile uint32 +} + +// DrvInfoData is driver information structure (member of a driver info list that may be associated with a particular device instance, or (globally) with a device information set) +type DrvInfoData struct { + size uint32 + DriverType uint32 + _ uintptr + description [LINE_LEN]uint16 + mfgName [LINE_LEN]uint16 + providerName [LINE_LEN]uint16 + DriverDate Filetime + DriverVersion uint64 +} + +func (data *DrvInfoData) Description() string { + return UTF16ToString(data.description[:]) +} + +func (data *DrvInfoData) SetDescription(description string) error { + str, err := UTF16FromString(description) + if err != nil { + return err + } + copy(data.description[:], str) + return nil +} + +func (data *DrvInfoData) MfgName() string { + return UTF16ToString(data.mfgName[:]) +} + +func (data *DrvInfoData) SetMfgName(mfgName string) error { + str, err := UTF16FromString(mfgName) + if err != nil { + return err + } + copy(data.mfgName[:], str) + return nil +} + +func (data *DrvInfoData) ProviderName() string { + return UTF16ToString(data.providerName[:]) +} + +func (data *DrvInfoData) SetProviderName(providerName string) error { + str, err := UTF16FromString(providerName) + if err != nil { + return err + } + copy(data.providerName[:], str) + return nil +} + +// IsNewer method returns true if DrvInfoData date and version is newer than supplied parameters. +func (data *DrvInfoData) IsNewer(driverDate Filetime, driverVersion uint64) bool { + if data.DriverDate.HighDateTime > driverDate.HighDateTime { + return true + } + if data.DriverDate.HighDateTime < driverDate.HighDateTime { + return false + } + + if data.DriverDate.LowDateTime > driverDate.LowDateTime { + return true + } + if data.DriverDate.LowDateTime < driverDate.LowDateTime { + return false + } + + if data.DriverVersion > driverVersion { + return true + } + if data.DriverVersion < driverVersion { + return false + } + + return false +} + +// DrvInfoDetailData is driver information details structure (provides detailed information about a particular driver information structure) +type DrvInfoDetailData struct { + size uint32 // Use unsafeSizeOf method + InfDate Filetime + compatIDsOffset uint32 + compatIDsLength uint32 + _ uintptr + sectionName [LINE_LEN]uint16 + infFileName [MAX_PATH]uint16 + drvDescription [LINE_LEN]uint16 + hardwareID [1]uint16 +} + +func (*DrvInfoDetailData) unsafeSizeOf() uint32 { + if unsafe.Sizeof(uintptr(0)) == 4 { + // Windows declares this with pshpack1.h + return uint32(unsafe.Offsetof(DrvInfoDetailData{}.hardwareID) + unsafe.Sizeof(DrvInfoDetailData{}.hardwareID)) + } + return uint32(unsafe.Sizeof(DrvInfoDetailData{})) +} + +func (data *DrvInfoDetailData) SectionName() string { + return UTF16ToString(data.sectionName[:]) +} + +func (data *DrvInfoDetailData) InfFileName() string { + return UTF16ToString(data.infFileName[:]) +} + +func (data *DrvInfoDetailData) DrvDescription() string { + return UTF16ToString(data.drvDescription[:]) +} + +func (data *DrvInfoDetailData) HardwareID() string { + if data.compatIDsOffset > 1 { + bufW := data.getBuf() + return UTF16ToString(bufW[:wcslen(bufW)]) + } + + return "" +} + +func (data *DrvInfoDetailData) CompatIDs() []string { + a := make([]string, 0) + + if data.compatIDsLength > 0 { + bufW := data.getBuf() + bufW = bufW[data.compatIDsOffset : data.compatIDsOffset+data.compatIDsLength] + for i := 0; i < len(bufW); { + j := i + wcslen(bufW[i:]) + if i < j { + a = append(a, UTF16ToString(bufW[i:j])) + } + i = j + 1 + } + } + + return a +} + +func (data *DrvInfoDetailData) getBuf() []uint16 { + len := (data.size - uint32(unsafe.Offsetof(data.hardwareID))) / 2 + sl := struct { + addr *uint16 + len int + cap int + }{&data.hardwareID[0], int(len), int(len)} + return *(*[]uint16)(unsafe.Pointer(&sl)) +} + +// IsCompatible method tests if given hardware ID matches the driver or is listed on the compatible ID list. +func (data *DrvInfoDetailData) IsCompatible(hwid string) bool { + hwidLC := strings.ToLower(hwid) + if strings.ToLower(data.HardwareID()) == hwidLC { + return true + } + a := data.CompatIDs() + for i := range a { + if strings.ToLower(a[i]) == hwidLC { + return true + } + } + + return false +} + +// DICD flags control SetupDiCreateDeviceInfo +type DICD uint32 + +const ( + DICD_GENERATE_ID DICD = 0x00000001 + DICD_INHERIT_CLASSDRVS DICD = 0x00000002 +) + +// SUOI flags control SetupUninstallOEMInf +type SUOI uint32 + +const ( + SUOI_FORCEDELETE SUOI = 0x0001 +) + +// SPDIT flags to distinguish between class drivers and +// device drivers. (Passed in 'DriverType' parameter of +// driver information list APIs) +type SPDIT uint32 + +const ( + SPDIT_NODRIVER SPDIT = 0x00000000 + SPDIT_CLASSDRIVER SPDIT = 0x00000001 + SPDIT_COMPATDRIVER SPDIT = 0x00000002 +) + +// DIGCF flags control what is included in the device information set built by SetupDiGetClassDevs +type DIGCF uint32 + +const ( + DIGCF_DEFAULT DIGCF = 0x00000001 // only valid with DIGCF_DEVICEINTERFACE + DIGCF_PRESENT DIGCF = 0x00000002 + DIGCF_ALLCLASSES DIGCF = 0x00000004 + DIGCF_PROFILE DIGCF = 0x00000008 + DIGCF_DEVICEINTERFACE DIGCF = 0x00000010 +) + +// DIREG specifies values for SetupDiCreateDevRegKey, SetupDiOpenDevRegKey, and SetupDiDeleteDevRegKey. +type DIREG uint32 + +const ( + DIREG_DEV DIREG = 0x00000001 // Open/Create/Delete device key + DIREG_DRV DIREG = 0x00000002 // Open/Create/Delete driver key + DIREG_BOTH DIREG = 0x00000004 // Delete both driver and Device key +) + +// SPDRP specifies device registry property codes +// (Codes marked as read-only (R) may only be used for +// SetupDiGetDeviceRegistryProperty) +// +// These values should cover the same set of registry properties +// as defined by the CM_DRP codes in cfgmgr32.h. +// +// Note that SPDRP codes are zero based while CM_DRP codes are one based! +type SPDRP uint32 + +const ( + SPDRP_DEVICEDESC SPDRP = 0x00000000 // DeviceDesc (R/W) + SPDRP_HARDWAREID SPDRP = 0x00000001 // HardwareID (R/W) + SPDRP_COMPATIBLEIDS SPDRP = 0x00000002 // CompatibleIDs (R/W) + SPDRP_SERVICE SPDRP = 0x00000004 // Service (R/W) + SPDRP_CLASS SPDRP = 0x00000007 // Class (R--tied to ClassGUID) + SPDRP_CLASSGUID SPDRP = 0x00000008 // ClassGUID (R/W) + SPDRP_DRIVER SPDRP = 0x00000009 // Driver (R/W) + SPDRP_CONFIGFLAGS SPDRP = 0x0000000A // ConfigFlags (R/W) + SPDRP_MFG SPDRP = 0x0000000B // Mfg (R/W) + SPDRP_FRIENDLYNAME SPDRP = 0x0000000C // FriendlyName (R/W) + SPDRP_LOCATION_INFORMATION SPDRP = 0x0000000D // LocationInformation (R/W) + SPDRP_PHYSICAL_DEVICE_OBJECT_NAME SPDRP = 0x0000000E // PhysicalDeviceObjectName (R) + SPDRP_CAPABILITIES SPDRP = 0x0000000F // Capabilities (R) + SPDRP_UI_NUMBER SPDRP = 0x00000010 // UiNumber (R) + SPDRP_UPPERFILTERS SPDRP = 0x00000011 // UpperFilters (R/W) + SPDRP_LOWERFILTERS SPDRP = 0x00000012 // LowerFilters (R/W) + SPDRP_BUSTYPEGUID SPDRP = 0x00000013 // BusTypeGUID (R) + SPDRP_LEGACYBUSTYPE SPDRP = 0x00000014 // LegacyBusType (R) + SPDRP_BUSNUMBER SPDRP = 0x00000015 // BusNumber (R) + SPDRP_ENUMERATOR_NAME SPDRP = 0x00000016 // Enumerator Name (R) + SPDRP_SECURITY SPDRP = 0x00000017 // Security (R/W, binary form) + SPDRP_SECURITY_SDS SPDRP = 0x00000018 // Security (W, SDS form) + SPDRP_DEVTYPE SPDRP = 0x00000019 // Device Type (R/W) + SPDRP_EXCLUSIVE SPDRP = 0x0000001A // Device is exclusive-access (R/W) + SPDRP_CHARACTERISTICS SPDRP = 0x0000001B // Device Characteristics (R/W) + SPDRP_ADDRESS SPDRP = 0x0000001C // Device Address (R) + SPDRP_UI_NUMBER_DESC_FORMAT SPDRP = 0x0000001D // UiNumberDescFormat (R/W) + SPDRP_DEVICE_POWER_DATA SPDRP = 0x0000001E // Device Power Data (R) + SPDRP_REMOVAL_POLICY SPDRP = 0x0000001F // Removal Policy (R) + SPDRP_REMOVAL_POLICY_HW_DEFAULT SPDRP = 0x00000020 // Hardware Removal Policy (R) + SPDRP_REMOVAL_POLICY_OVERRIDE SPDRP = 0x00000021 // Removal Policy Override (RW) + SPDRP_INSTALL_STATE SPDRP = 0x00000022 // Device Install State (R) + SPDRP_LOCATION_PATHS SPDRP = 0x00000023 // Device Location Paths (R) + SPDRP_BASE_CONTAINERID SPDRP = 0x00000024 // Base ContainerID (R) + + SPDRP_MAXIMUM_PROPERTY SPDRP = 0x00000025 // Upper bound on ordinals +) + +// DEVPROPTYPE represents the property-data-type identifier that specifies the +// data type of a device property value in the unified device property model. +type DEVPROPTYPE uint32 + +const ( + DEVPROP_TYPEMOD_ARRAY DEVPROPTYPE = 0x00001000 + DEVPROP_TYPEMOD_LIST DEVPROPTYPE = 0x00002000 + + DEVPROP_TYPE_EMPTY DEVPROPTYPE = 0x00000000 + DEVPROP_TYPE_NULL DEVPROPTYPE = 0x00000001 + DEVPROP_TYPE_SBYTE DEVPROPTYPE = 0x00000002 + DEVPROP_TYPE_BYTE DEVPROPTYPE = 0x00000003 + DEVPROP_TYPE_INT16 DEVPROPTYPE = 0x00000004 + DEVPROP_TYPE_UINT16 DEVPROPTYPE = 0x00000005 + DEVPROP_TYPE_INT32 DEVPROPTYPE = 0x00000006 + DEVPROP_TYPE_UINT32 DEVPROPTYPE = 0x00000007 + DEVPROP_TYPE_INT64 DEVPROPTYPE = 0x00000008 + DEVPROP_TYPE_UINT64 DEVPROPTYPE = 0x00000009 + DEVPROP_TYPE_FLOAT DEVPROPTYPE = 0x0000000A + DEVPROP_TYPE_DOUBLE DEVPROPTYPE = 0x0000000B + DEVPROP_TYPE_DECIMAL DEVPROPTYPE = 0x0000000C + DEVPROP_TYPE_GUID DEVPROPTYPE = 0x0000000D + DEVPROP_TYPE_CURRENCY DEVPROPTYPE = 0x0000000E + DEVPROP_TYPE_DATE DEVPROPTYPE = 0x0000000F + DEVPROP_TYPE_FILETIME DEVPROPTYPE = 0x00000010 + DEVPROP_TYPE_BOOLEAN DEVPROPTYPE = 0x00000011 + DEVPROP_TYPE_STRING DEVPROPTYPE = 0x00000012 + DEVPROP_TYPE_STRING_LIST DEVPROPTYPE = DEVPROP_TYPE_STRING | DEVPROP_TYPEMOD_LIST + DEVPROP_TYPE_SECURITY_DESCRIPTOR DEVPROPTYPE = 0x00000013 + DEVPROP_TYPE_SECURITY_DESCRIPTOR_STRING DEVPROPTYPE = 0x00000014 + DEVPROP_TYPE_DEVPROPKEY DEVPROPTYPE = 0x00000015 + DEVPROP_TYPE_DEVPROPTYPE DEVPROPTYPE = 0x00000016 + DEVPROP_TYPE_BINARY DEVPROPTYPE = DEVPROP_TYPE_BYTE | DEVPROP_TYPEMOD_ARRAY + DEVPROP_TYPE_ERROR DEVPROPTYPE = 0x00000017 + DEVPROP_TYPE_NTSTATUS DEVPROPTYPE = 0x00000018 + DEVPROP_TYPE_STRING_INDIRECT DEVPROPTYPE = 0x00000019 + + MAX_DEVPROP_TYPE DEVPROPTYPE = 0x00000019 + MAX_DEVPROP_TYPEMOD DEVPROPTYPE = 0x00002000 + + DEVPROP_MASK_TYPE DEVPROPTYPE = 0x00000FFF + DEVPROP_MASK_TYPEMOD DEVPROPTYPE = 0x0000F000 +) + +// DEVPROPGUID specifies a property category. +type DEVPROPGUID GUID + +// DEVPROPID uniquely identifies the property within the property category. +type DEVPROPID uint32 + +const DEVPROPID_FIRST_USABLE DEVPROPID = 2 + +// DEVPROPKEY represents a device property key for a device property in the +// unified device property model. +type DEVPROPKEY struct { + FmtID DEVPROPGUID + PID DEVPROPID +} + +// CONFIGRET is a return value or error code from cfgmgr32 APIs +type CONFIGRET uint32 + +func (ret CONFIGRET) Error() string { + if win32Error, ok := ret.Unwrap().(Errno); ok { + return fmt.Sprintf("%s (CfgMgr error: 0x%08x)", win32Error.Error(), uint32(ret)) + } + return fmt.Sprintf("CfgMgr error: 0x%08x", uint32(ret)) +} + +func (ret CONFIGRET) Win32Error(defaultError Errno) Errno { + return cm_MapCrToWin32Err(ret, defaultError) +} + +func (ret CONFIGRET) Unwrap() error { + const noMatch = Errno(^uintptr(0)) + win32Error := ret.Win32Error(noMatch) + if win32Error == noMatch { + return nil + } + return win32Error +} + +const ( + CR_SUCCESS CONFIGRET = 0x00000000 + CR_DEFAULT CONFIGRET = 0x00000001 + CR_OUT_OF_MEMORY CONFIGRET = 0x00000002 + CR_INVALID_POINTER CONFIGRET = 0x00000003 + CR_INVALID_FLAG CONFIGRET = 0x00000004 + CR_INVALID_DEVNODE CONFIGRET = 0x00000005 + CR_INVALID_DEVINST = CR_INVALID_DEVNODE + CR_INVALID_RES_DES CONFIGRET = 0x00000006 + CR_INVALID_LOG_CONF CONFIGRET = 0x00000007 + CR_INVALID_ARBITRATOR CONFIGRET = 0x00000008 + CR_INVALID_NODELIST CONFIGRET = 0x00000009 + CR_DEVNODE_HAS_REQS CONFIGRET = 0x0000000A + CR_DEVINST_HAS_REQS = CR_DEVNODE_HAS_REQS + CR_INVALID_RESOURCEID CONFIGRET = 0x0000000B + CR_DLVXD_NOT_FOUND CONFIGRET = 0x0000000C + CR_NO_SUCH_DEVNODE CONFIGRET = 0x0000000D + CR_NO_SUCH_DEVINST = CR_NO_SUCH_DEVNODE + CR_NO_MORE_LOG_CONF CONFIGRET = 0x0000000E + CR_NO_MORE_RES_DES CONFIGRET = 0x0000000F + CR_ALREADY_SUCH_DEVNODE CONFIGRET = 0x00000010 + CR_ALREADY_SUCH_DEVINST = CR_ALREADY_SUCH_DEVNODE + CR_INVALID_RANGE_LIST CONFIGRET = 0x00000011 + CR_INVALID_RANGE CONFIGRET = 0x00000012 + CR_FAILURE CONFIGRET = 0x00000013 + CR_NO_SUCH_LOGICAL_DEV CONFIGRET = 0x00000014 + CR_CREATE_BLOCKED CONFIGRET = 0x00000015 + CR_NOT_SYSTEM_VM CONFIGRET = 0x00000016 + CR_REMOVE_VETOED CONFIGRET = 0x00000017 + CR_APM_VETOED CONFIGRET = 0x00000018 + CR_INVALID_LOAD_TYPE CONFIGRET = 0x00000019 + CR_BUFFER_SMALL CONFIGRET = 0x0000001A + CR_NO_ARBITRATOR CONFIGRET = 0x0000001B + CR_NO_REGISTRY_HANDLE CONFIGRET = 0x0000001C + CR_REGISTRY_ERROR CONFIGRET = 0x0000001D + CR_INVALID_DEVICE_ID CONFIGRET = 0x0000001E + CR_INVALID_DATA CONFIGRET = 0x0000001F + CR_INVALID_API CONFIGRET = 0x00000020 + CR_DEVLOADER_NOT_READY CONFIGRET = 0x00000021 + CR_NEED_RESTART CONFIGRET = 0x00000022 + CR_NO_MORE_HW_PROFILES CONFIGRET = 0x00000023 + CR_DEVICE_NOT_THERE CONFIGRET = 0x00000024 + CR_NO_SUCH_VALUE CONFIGRET = 0x00000025 + CR_WRONG_TYPE CONFIGRET = 0x00000026 + CR_INVALID_PRIORITY CONFIGRET = 0x00000027 + CR_NOT_DISABLEABLE CONFIGRET = 0x00000028 + CR_FREE_RESOURCES CONFIGRET = 0x00000029 + CR_QUERY_VETOED CONFIGRET = 0x0000002A + CR_CANT_SHARE_IRQ CONFIGRET = 0x0000002B + CR_NO_DEPENDENT CONFIGRET = 0x0000002C + CR_SAME_RESOURCES CONFIGRET = 0x0000002D + CR_NO_SUCH_REGISTRY_KEY CONFIGRET = 0x0000002E + CR_INVALID_MACHINENAME CONFIGRET = 0x0000002F + CR_REMOTE_COMM_FAILURE CONFIGRET = 0x00000030 + CR_MACHINE_UNAVAILABLE CONFIGRET = 0x00000031 + CR_NO_CM_SERVICES CONFIGRET = 0x00000032 + CR_ACCESS_DENIED CONFIGRET = 0x00000033 + CR_CALL_NOT_IMPLEMENTED CONFIGRET = 0x00000034 + CR_INVALID_PROPERTY CONFIGRET = 0x00000035 + CR_DEVICE_INTERFACE_ACTIVE CONFIGRET = 0x00000036 + CR_NO_SUCH_DEVICE_INTERFACE CONFIGRET = 0x00000037 + CR_INVALID_REFERENCE_STRING CONFIGRET = 0x00000038 + CR_INVALID_CONFLICT_LIST CONFIGRET = 0x00000039 + CR_INVALID_INDEX CONFIGRET = 0x0000003A + CR_INVALID_STRUCTURE_SIZE CONFIGRET = 0x0000003B + NUM_CR_RESULTS CONFIGRET = 0x0000003C +) + +const ( + CM_GET_DEVICE_INTERFACE_LIST_PRESENT = 0 // only currently 'live' device interfaces + CM_GET_DEVICE_INTERFACE_LIST_ALL_DEVICES = 1 // all registered device interfaces, live or not +) + +const ( + DN_ROOT_ENUMERATED = 0x00000001 // Was enumerated by ROOT + DN_DRIVER_LOADED = 0x00000002 // Has Register_Device_Driver + DN_ENUM_LOADED = 0x00000004 // Has Register_Enumerator + DN_STARTED = 0x00000008 // Is currently configured + DN_MANUAL = 0x00000010 // Manually installed + DN_NEED_TO_ENUM = 0x00000020 // May need reenumeration + DN_NOT_FIRST_TIME = 0x00000040 // Has received a config + DN_HARDWARE_ENUM = 0x00000080 // Enum generates hardware ID + DN_LIAR = 0x00000100 // Lied about can reconfig once + DN_HAS_MARK = 0x00000200 // Not CM_Create_DevInst lately + DN_HAS_PROBLEM = 0x00000400 // Need device installer + DN_FILTERED = 0x00000800 // Is filtered + DN_MOVED = 0x00001000 // Has been moved + DN_DISABLEABLE = 0x00002000 // Can be disabled + DN_REMOVABLE = 0x00004000 // Can be removed + DN_PRIVATE_PROBLEM = 0x00008000 // Has a private problem + DN_MF_PARENT = 0x00010000 // Multi function parent + DN_MF_CHILD = 0x00020000 // Multi function child + DN_WILL_BE_REMOVED = 0x00040000 // DevInst is being removed + DN_NOT_FIRST_TIMEE = 0x00080000 // Has received a config enumerate + DN_STOP_FREE_RES = 0x00100000 // When child is stopped, free resources + DN_REBAL_CANDIDATE = 0x00200000 // Don't skip during rebalance + DN_BAD_PARTIAL = 0x00400000 // This devnode's log_confs do not have same resources + DN_NT_ENUMERATOR = 0x00800000 // This devnode's is an NT enumerator + DN_NT_DRIVER = 0x01000000 // This devnode's is an NT driver + DN_NEEDS_LOCKING = 0x02000000 // Devnode need lock resume processing + DN_ARM_WAKEUP = 0x04000000 // Devnode can be the wakeup device + DN_APM_ENUMERATOR = 0x08000000 // APM aware enumerator + DN_APM_DRIVER = 0x10000000 // APM aware driver + DN_SILENT_INSTALL = 0x20000000 // Silent install + DN_NO_SHOW_IN_DM = 0x40000000 // No show in device manager + DN_BOOT_LOG_PROB = 0x80000000 // Had a problem during preassignment of boot log conf + DN_NEED_RESTART = DN_LIAR // System needs to be restarted for this Devnode to work properly + DN_DRIVER_BLOCKED = DN_NOT_FIRST_TIME // One or more drivers are blocked from loading for this Devnode + DN_LEGACY_DRIVER = DN_MOVED // This device is using a legacy driver + DN_CHILD_WITH_INVALID_ID = DN_HAS_MARK // One or more children have invalid IDs + DN_DEVICE_DISCONNECTED = DN_NEEDS_LOCKING // The function driver for a device reported that the device is not connected. Typically this means a wireless device is out of range. + DN_QUERY_REMOVE_PENDING = DN_MF_PARENT // Device is part of a set of related devices collectively pending query-removal + DN_QUERY_REMOVE_ACTIVE = DN_MF_CHILD // Device is actively engaged in a query-remove IRP + DN_CHANGEABLE_FLAGS = DN_NOT_FIRST_TIME | DN_HARDWARE_ENUM | DN_HAS_MARK | DN_DISABLEABLE | DN_REMOVABLE | DN_MF_CHILD | DN_MF_PARENT | DN_NOT_FIRST_TIMEE | DN_STOP_FREE_RES | DN_REBAL_CANDIDATE | DN_NT_ENUMERATOR | DN_NT_DRIVER | DN_SILENT_INSTALL | DN_NO_SHOW_IN_DM +) + +//sys setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) [failretval==DevInfo(InvalidHandle)] = setupapi.SetupDiCreateDeviceInfoListExW + +// SetupDiCreateDeviceInfoListEx function creates an empty device information set on a remote or a local computer and optionally associates the set with a device setup class. +func SetupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName string) (deviceInfoSet DevInfo, err error) { + var machineNameUTF16 *uint16 + if machineName != "" { + machineNameUTF16, err = UTF16PtrFromString(machineName) + if err != nil { + return + } + } + return setupDiCreateDeviceInfoListEx(classGUID, hwndParent, machineNameUTF16, 0) +} + +//sys setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) = setupapi.SetupDiGetDeviceInfoListDetailW + +// SetupDiGetDeviceInfoListDetail function retrieves information associated with a device information set including the class GUID, remote computer handle, and remote computer name. +func SetupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo) (deviceInfoSetDetailData *DevInfoListDetailData, err error) { + data := &DevInfoListDetailData{} + data.size = data.unsafeSizeOf() + + return data, setupDiGetDeviceInfoListDetail(deviceInfoSet, data) +} + +// DeviceInfoListDetail method retrieves information associated with a device information set including the class GUID, remote computer handle, and remote computer name. +func (deviceInfoSet DevInfo) DeviceInfoListDetail() (*DevInfoListDetailData, error) { + return SetupDiGetDeviceInfoListDetail(deviceInfoSet) +} + +//sys setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiCreateDeviceInfoW + +// SetupDiCreateDeviceInfo function creates a new device information element and adds it as a new member to the specified device information set. +func SetupDiCreateDeviceInfo(deviceInfoSet DevInfo, deviceName string, classGUID *GUID, deviceDescription string, hwndParent uintptr, creationFlags DICD) (deviceInfoData *DevInfoData, err error) { + deviceNameUTF16, err := UTF16PtrFromString(deviceName) + if err != nil { + return + } + + var deviceDescriptionUTF16 *uint16 + if deviceDescription != "" { + deviceDescriptionUTF16, err = UTF16PtrFromString(deviceDescription) + if err != nil { + return + } + } + + data := &DevInfoData{} + data.size = uint32(unsafe.Sizeof(*data)) + + return data, setupDiCreateDeviceInfo(deviceInfoSet, deviceNameUTF16, classGUID, deviceDescriptionUTF16, hwndParent, creationFlags, data) +} + +// CreateDeviceInfo method creates a new device information element and adds it as a new member to the specified device information set. +func (deviceInfoSet DevInfo) CreateDeviceInfo(deviceName string, classGUID *GUID, deviceDescription string, hwndParent uintptr, creationFlags DICD) (*DevInfoData, error) { + return SetupDiCreateDeviceInfo(deviceInfoSet, deviceName, classGUID, deviceDescription, hwndParent, creationFlags) +} + +//sys setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiEnumDeviceInfo + +// SetupDiEnumDeviceInfo function returns a DevInfoData structure that specifies a device information element in a device information set. +func SetupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex int) (*DevInfoData, error) { + data := &DevInfoData{} + data.size = uint32(unsafe.Sizeof(*data)) + + return data, setupDiEnumDeviceInfo(deviceInfoSet, uint32(memberIndex), data) +} + +// EnumDeviceInfo method returns a DevInfoData structure that specifies a device information element in a device information set. +func (deviceInfoSet DevInfo) EnumDeviceInfo(memberIndex int) (*DevInfoData, error) { + return SetupDiEnumDeviceInfo(deviceInfoSet, memberIndex) +} + +// SetupDiDestroyDeviceInfoList function deletes a device information set and frees all associated memory. +//sys SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) = setupapi.SetupDiDestroyDeviceInfoList + +// Close method deletes a device information set and frees all associated memory. +func (deviceInfoSet DevInfo) Close() error { + return SetupDiDestroyDeviceInfoList(deviceInfoSet) +} + +//sys SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) = setupapi.SetupDiBuildDriverInfoList + +// BuildDriverInfoList method builds a list of drivers that is associated with a specific device or with the global class driver list for a device information set. +func (deviceInfoSet DevInfo) BuildDriverInfoList(deviceInfoData *DevInfoData, driverType SPDIT) error { + return SetupDiBuildDriverInfoList(deviceInfoSet, deviceInfoData, driverType) +} + +//sys SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) = setupapi.SetupDiCancelDriverInfoSearch + +// CancelDriverInfoSearch method cancels a driver list search that is currently in progress in a different thread. +func (deviceInfoSet DevInfo) CancelDriverInfoSearch() error { + return SetupDiCancelDriverInfoSearch(deviceInfoSet) +} + +//sys setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) = setupapi.SetupDiEnumDriverInfoW + +// SetupDiEnumDriverInfo function enumerates the members of a driver list. +func SetupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex int) (*DrvInfoData, error) { + data := &DrvInfoData{} + data.size = uint32(unsafe.Sizeof(*data)) + + return data, setupDiEnumDriverInfo(deviceInfoSet, deviceInfoData, driverType, uint32(memberIndex), data) +} + +// EnumDriverInfo method enumerates the members of a driver list. +func (deviceInfoSet DevInfo) EnumDriverInfo(deviceInfoData *DevInfoData, driverType SPDIT, memberIndex int) (*DrvInfoData, error) { + return SetupDiEnumDriverInfo(deviceInfoSet, deviceInfoData, driverType, memberIndex) +} + +//sys setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) = setupapi.SetupDiGetSelectedDriverW + +// SetupDiGetSelectedDriver function retrieves the selected driver for a device information set or a particular device information element. +func SetupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (*DrvInfoData, error) { + data := &DrvInfoData{} + data.size = uint32(unsafe.Sizeof(*data)) + + return data, setupDiGetSelectedDriver(deviceInfoSet, deviceInfoData, data) +} + +// SelectedDriver method retrieves the selected driver for a device information set or a particular device information element. +func (deviceInfoSet DevInfo) SelectedDriver(deviceInfoData *DevInfoData) (*DrvInfoData, error) { + return SetupDiGetSelectedDriver(deviceInfoSet, deviceInfoData) +} + +//sys SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) = setupapi.SetupDiSetSelectedDriverW + +// SetSelectedDriver method sets, or resets, the selected driver for a device information element or the selected class driver for a device information set. +func (deviceInfoSet DevInfo) SetSelectedDriver(deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) error { + return SetupDiSetSelectedDriver(deviceInfoSet, deviceInfoData, driverInfoData) +} + +//sys setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) = setupapi.SetupDiGetDriverInfoDetailW + +// SetupDiGetDriverInfoDetail function retrieves driver information detail for a device information set or a particular device information element in the device information set. +func SetupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (*DrvInfoDetailData, error) { + reqSize := uint32(2048) + for { + buf := make([]byte, reqSize) + data := (*DrvInfoDetailData)(unsafe.Pointer(&buf[0])) + data.size = data.unsafeSizeOf() + err := setupDiGetDriverInfoDetail(deviceInfoSet, deviceInfoData, driverInfoData, data, uint32(len(buf)), &reqSize) + if err == ERROR_INSUFFICIENT_BUFFER { + continue + } + if err != nil { + return nil, err + } + data.size = reqSize + return data, nil + } +} + +// DriverInfoDetail method retrieves driver information detail for a device information set or a particular device information element in the device information set. +func (deviceInfoSet DevInfo) DriverInfoDetail(deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (*DrvInfoDetailData, error) { + return SetupDiGetDriverInfoDetail(deviceInfoSet, deviceInfoData, driverInfoData) +} + +//sys SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) = setupapi.SetupDiDestroyDriverInfoList + +// DestroyDriverInfoList method deletes a driver list. +func (deviceInfoSet DevInfo) DestroyDriverInfoList(deviceInfoData *DevInfoData, driverType SPDIT) error { + return SetupDiDestroyDriverInfoList(deviceInfoSet, deviceInfoData, driverType) +} + +//sys setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) [failretval==DevInfo(InvalidHandle)] = setupapi.SetupDiGetClassDevsExW + +// SetupDiGetClassDevsEx function returns a handle to a device information set that contains requested device information elements for a local or a remote computer. +func SetupDiGetClassDevsEx(classGUID *GUID, enumerator string, hwndParent uintptr, flags DIGCF, deviceInfoSet DevInfo, machineName string) (handle DevInfo, err error) { + var enumeratorUTF16 *uint16 + if enumerator != "" { + enumeratorUTF16, err = UTF16PtrFromString(enumerator) + if err != nil { + return + } + } + var machineNameUTF16 *uint16 + if machineName != "" { + machineNameUTF16, err = UTF16PtrFromString(machineName) + if err != nil { + return + } + } + return setupDiGetClassDevsEx(classGUID, enumeratorUTF16, hwndParent, flags, deviceInfoSet, machineNameUTF16, 0) +} + +// SetupDiCallClassInstaller function calls the appropriate class installer, and any registered co-installers, with the specified installation request (DIF code). +//sys SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiCallClassInstaller + +// CallClassInstaller member calls the appropriate class installer, and any registered co-installers, with the specified installation request (DIF code). +func (deviceInfoSet DevInfo) CallClassInstaller(installFunction DI_FUNCTION, deviceInfoData *DevInfoData) error { + return SetupDiCallClassInstaller(installFunction, deviceInfoSet, deviceInfoData) +} + +// SetupDiOpenDevRegKey function opens a registry key for device-specific configuration information. +//sys SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key Handle, err error) [failretval==InvalidHandle] = setupapi.SetupDiOpenDevRegKey + +// OpenDevRegKey method opens a registry key for device-specific configuration information. +func (deviceInfoSet DevInfo) OpenDevRegKey(DeviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (Handle, error) { + return SetupDiOpenDevRegKey(deviceInfoSet, DeviceInfoData, Scope, HwProfile, KeyType, samDesired) +} + +//sys setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY, propertyType *DEVPROPTYPE, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32, flags uint32) (err error) = setupapi.SetupDiGetDevicePropertyW + +// SetupDiGetDeviceProperty function retrieves a specified device instance property. +func SetupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY) (value interface{}, err error) { + reqSize := uint32(256) + for { + var dataType DEVPROPTYPE + buf := make([]byte, reqSize) + err = setupDiGetDeviceProperty(deviceInfoSet, deviceInfoData, propertyKey, &dataType, &buf[0], uint32(len(buf)), &reqSize, 0) + if err == ERROR_INSUFFICIENT_BUFFER { + continue + } + if err != nil { + return + } + switch dataType { + case DEVPROP_TYPE_STRING: + ret := UTF16ToString(bufToUTF16(buf)) + runtime.KeepAlive(buf) + return ret, nil + } + return nil, errors.New("unimplemented property type") + } +} + +//sys setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) = setupapi.SetupDiGetDeviceRegistryPropertyW + +// SetupDiGetDeviceRegistryProperty function retrieves a specified Plug and Play device property. +func SetupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP) (value interface{}, err error) { + reqSize := uint32(256) + for { + var dataType uint32 + buf := make([]byte, reqSize) + err = setupDiGetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, &dataType, &buf[0], uint32(len(buf)), &reqSize) + if err == ERROR_INSUFFICIENT_BUFFER { + continue + } + if err != nil { + return + } + return getRegistryValue(buf[:reqSize], dataType) + } +} + +func getRegistryValue(buf []byte, dataType uint32) (interface{}, error) { + switch dataType { + case REG_SZ: + ret := UTF16ToString(bufToUTF16(buf)) + runtime.KeepAlive(buf) + return ret, nil + case REG_EXPAND_SZ: + value := UTF16ToString(bufToUTF16(buf)) + if value == "" { + return "", nil + } + p, err := syscall.UTF16PtrFromString(value) + if err != nil { + return "", err + } + ret := make([]uint16, 100) + for { + n, err := ExpandEnvironmentStrings(p, &ret[0], uint32(len(ret))) + if err != nil { + return "", err + } + if n <= uint32(len(ret)) { + return UTF16ToString(ret[:n]), nil + } + ret = make([]uint16, n) + } + case REG_BINARY: + return buf, nil + case REG_DWORD_LITTLE_ENDIAN: + return binary.LittleEndian.Uint32(buf), nil + case REG_DWORD_BIG_ENDIAN: + return binary.BigEndian.Uint32(buf), nil + case REG_MULTI_SZ: + bufW := bufToUTF16(buf) + a := []string{} + for i := 0; i < len(bufW); { + j := i + wcslen(bufW[i:]) + if i < j { + a = append(a, UTF16ToString(bufW[i:j])) + } + i = j + 1 + } + runtime.KeepAlive(buf) + return a, nil + case REG_QWORD_LITTLE_ENDIAN: + return binary.LittleEndian.Uint64(buf), nil + default: + return nil, fmt.Errorf("Unsupported registry value type: %v", dataType) + } +} + +// bufToUTF16 function reinterprets []byte buffer as []uint16 +func bufToUTF16(buf []byte) []uint16 { + sl := struct { + addr *uint16 + len int + cap int + }{(*uint16)(unsafe.Pointer(&buf[0])), len(buf) / 2, cap(buf) / 2} + return *(*[]uint16)(unsafe.Pointer(&sl)) +} + +// utf16ToBuf function reinterprets []uint16 as []byte +func utf16ToBuf(buf []uint16) []byte { + sl := struct { + addr *byte + len int + cap int + }{(*byte)(unsafe.Pointer(&buf[0])), len(buf) * 2, cap(buf) * 2} + return *(*[]byte)(unsafe.Pointer(&sl)) +} + +func wcslen(str []uint16) int { + for i := 0; i < len(str); i++ { + if str[i] == 0 { + return i + } + } + return len(str) +} + +// DeviceRegistryProperty method retrieves a specified Plug and Play device property. +func (deviceInfoSet DevInfo) DeviceRegistryProperty(deviceInfoData *DevInfoData, property SPDRP) (interface{}, error) { + return SetupDiGetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property) +} + +//sys setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) = setupapi.SetupDiSetDeviceRegistryPropertyW + +// SetupDiSetDeviceRegistryProperty function sets a Plug and Play device property for a device. +func SetupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffers []byte) error { + return setupDiSetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, &propertyBuffers[0], uint32(len(propertyBuffers))) +} + +// SetDeviceRegistryProperty function sets a Plug and Play device property for a device. +func (deviceInfoSet DevInfo) SetDeviceRegistryProperty(deviceInfoData *DevInfoData, property SPDRP, propertyBuffers []byte) error { + return SetupDiSetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, propertyBuffers) +} + +// SetDeviceRegistryPropertyString method sets a Plug and Play device property string for a device. +func (deviceInfoSet DevInfo) SetDeviceRegistryPropertyString(deviceInfoData *DevInfoData, property SPDRP, str string) error { + str16, err := UTF16FromString(str) + if err != nil { + return err + } + err = SetupDiSetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, utf16ToBuf(append(str16, 0))) + runtime.KeepAlive(str16) + return err +} + +//sys setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) = setupapi.SetupDiGetDeviceInstallParamsW + +// SetupDiGetDeviceInstallParams function retrieves device installation parameters for a device information set or a particular device information element. +func SetupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (*DevInstallParams, error) { + params := &DevInstallParams{} + params.size = uint32(unsafe.Sizeof(*params)) + + return params, setupDiGetDeviceInstallParams(deviceInfoSet, deviceInfoData, params) +} + +// DeviceInstallParams method retrieves device installation parameters for a device information set or a particular device information element. +func (deviceInfoSet DevInfo) DeviceInstallParams(deviceInfoData *DevInfoData) (*DevInstallParams, error) { + return SetupDiGetDeviceInstallParams(deviceInfoSet, deviceInfoData) +} + +//sys setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, instanceId *uint16, instanceIdSize uint32, instanceIdRequiredSize *uint32) (err error) = setupapi.SetupDiGetDeviceInstanceIdW + +// SetupDiGetDeviceInstanceId function retrieves the instance ID of the device. +func SetupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (string, error) { + reqSize := uint32(1024) + for { + buf := make([]uint16, reqSize) + err := setupDiGetDeviceInstanceId(deviceInfoSet, deviceInfoData, &buf[0], uint32(len(buf)), &reqSize) + if err == ERROR_INSUFFICIENT_BUFFER { + continue + } + if err != nil { + return "", err + } + return UTF16ToString(buf), nil + } +} + +// DeviceInstanceID method retrieves the instance ID of the device. +func (deviceInfoSet DevInfo) DeviceInstanceID(deviceInfoData *DevInfoData) (string, error) { + return SetupDiGetDeviceInstanceId(deviceInfoSet, deviceInfoData) +} + +// SetupDiGetClassInstallParams function retrieves class installation parameters for a device information set or a particular device information element. +//sys SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) = setupapi.SetupDiGetClassInstallParamsW + +// ClassInstallParams method retrieves class installation parameters for a device information set or a particular device information element. +func (deviceInfoSet DevInfo) ClassInstallParams(deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) error { + return SetupDiGetClassInstallParams(deviceInfoSet, deviceInfoData, classInstallParams, classInstallParamsSize, requiredSize) +} + +//sys SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) = setupapi.SetupDiSetDeviceInstallParamsW + +// SetDeviceInstallParams member sets device installation parameters for a device information set or a particular device information element. +func (deviceInfoSet DevInfo) SetDeviceInstallParams(deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) error { + return SetupDiSetDeviceInstallParams(deviceInfoSet, deviceInfoData, deviceInstallParams) +} + +// SetupDiSetClassInstallParams function sets or clears class install parameters for a device information set or a particular device information element. +//sys SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) = setupapi.SetupDiSetClassInstallParamsW + +// SetClassInstallParams method sets or clears class install parameters for a device information set or a particular device information element. +func (deviceInfoSet DevInfo) SetClassInstallParams(deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) error { + return SetupDiSetClassInstallParams(deviceInfoSet, deviceInfoData, classInstallParams, classInstallParamsSize) +} + +//sys setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) = setupapi.SetupDiClassNameFromGuidExW + +// SetupDiClassNameFromGuidEx function retrieves the class name associated with a class GUID. The class can be installed on a local or remote computer. +func SetupDiClassNameFromGuidEx(classGUID *GUID, machineName string) (className string, err error) { + var classNameUTF16 [MAX_CLASS_NAME_LEN]uint16 + + var machineNameUTF16 *uint16 + if machineName != "" { + machineNameUTF16, err = UTF16PtrFromString(machineName) + if err != nil { + return + } + } + + err = setupDiClassNameFromGuidEx(classGUID, &classNameUTF16[0], MAX_CLASS_NAME_LEN, nil, machineNameUTF16, 0) + if err != nil { + return + } + + className = UTF16ToString(classNameUTF16[:]) + return +} + +//sys setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) = setupapi.SetupDiClassGuidsFromNameExW + +// SetupDiClassGuidsFromNameEx function retrieves the GUIDs associated with the specified class name. This resulting list contains the classes currently installed on a local or remote computer. +func SetupDiClassGuidsFromNameEx(className string, machineName string) ([]GUID, error) { + classNameUTF16, err := UTF16PtrFromString(className) + if err != nil { + return nil, err + } + + var machineNameUTF16 *uint16 + if machineName != "" { + machineNameUTF16, err = UTF16PtrFromString(machineName) + if err != nil { + return nil, err + } + } + + reqSize := uint32(4) + for { + buf := make([]GUID, reqSize) + err = setupDiClassGuidsFromNameEx(classNameUTF16, &buf[0], uint32(len(buf)), &reqSize, machineNameUTF16, 0) + if err == ERROR_INSUFFICIENT_BUFFER { + continue + } + if err != nil { + return nil, err + } + return buf[:reqSize], nil + } +} + +//sys setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiGetSelectedDevice + +// SetupDiGetSelectedDevice function retrieves the selected device information element in a device information set. +func SetupDiGetSelectedDevice(deviceInfoSet DevInfo) (*DevInfoData, error) { + data := &DevInfoData{} + data.size = uint32(unsafe.Sizeof(*data)) + + return data, setupDiGetSelectedDevice(deviceInfoSet, data) +} + +// SelectedDevice method retrieves the selected device information element in a device information set. +func (deviceInfoSet DevInfo) SelectedDevice() (*DevInfoData, error) { + return SetupDiGetSelectedDevice(deviceInfoSet) +} + +// SetupDiSetSelectedDevice function sets a device information element as the selected member of a device information set. This function is typically used by an installation wizard. +//sys SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiSetSelectedDevice + +// SetSelectedDevice method sets a device information element as the selected member of a device information set. This function is typically used by an installation wizard. +func (deviceInfoSet DevInfo) SetSelectedDevice(deviceInfoData *DevInfoData) error { + return SetupDiSetSelectedDevice(deviceInfoSet, deviceInfoData) +} + +//sys setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (err error) = setupapi.SetupUninstallOEMInfW + +// SetupUninstallOEMInf uninstalls the specified driver. +func SetupUninstallOEMInf(infFileName string, flags SUOI) error { + infFileName16, err := UTF16PtrFromString(infFileName) + if err != nil { + return err + } + return setupUninstallOEMInf(infFileName16, flags, 0) +} + +//sys cm_MapCrToWin32Err(configRet CONFIGRET, defaultWin32Error Errno) (ret Errno) = CfgMgr32.CM_MapCrToWin32Err + +//sys cm_Get_Device_Interface_List_Size(len *uint32, interfaceClass *GUID, deviceID *uint16, flags uint32) (ret CONFIGRET) = CfgMgr32.CM_Get_Device_Interface_List_SizeW +//sys cm_Get_Device_Interface_List(interfaceClass *GUID, deviceID *uint16, buffer *uint16, bufferLen uint32, flags uint32) (ret CONFIGRET) = CfgMgr32.CM_Get_Device_Interface_ListW + +func CM_Get_Device_Interface_List(deviceID string, interfaceClass *GUID, flags uint32) ([]string, error) { + deviceID16, err := UTF16PtrFromString(deviceID) + if err != nil { + return nil, err + } + var buf []uint16 + var buflen uint32 + for { + if ret := cm_Get_Device_Interface_List_Size(&buflen, interfaceClass, deviceID16, flags); ret != CR_SUCCESS { + return nil, ret + } + buf = make([]uint16, buflen) + if ret := cm_Get_Device_Interface_List(interfaceClass, deviceID16, &buf[0], buflen, flags); ret == CR_SUCCESS { + break + } else if ret != CR_BUFFER_SMALL { + return nil, ret + } + } + var interfaces []string + for i := 0; i < len(buf); { + j := i + wcslen(buf[i:]) + if i < j { + interfaces = append(interfaces, UTF16ToString(buf[i:j])) + } + i = j + 1 + } + if interfaces == nil { + return nil, ERROR_NO_SUCH_DEVICE_INTERFACE + } + return interfaces, nil +} + +//sys cm_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) (ret CONFIGRET) = CfgMgr32.CM_Get_DevNode_Status + +func CM_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) error { + ret := cm_Get_DevNode_Status(status, problemNumber, devInst, flags) + if ret == CR_SUCCESS { + return nil + } + return ret +} diff --git a/vendor/golang.org/x/sys/unix/str.go b/vendor/golang.org/x/sys/windows/str.go similarity index 62% rename from vendor/golang.org/x/sys/unix/str.go rename to vendor/golang.org/x/sys/windows/str.go index 8ba89ed86..4fc01434e 100644 --- a/vendor/golang.org/x/sys/unix/str.go +++ b/vendor/golang.org/x/sys/windows/str.go @@ -2,19 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build windows +// +build windows -package unix +package windows func itoa(val int) string { // do it here rather than with fmt to avoid dependency if val < 0 { - return "-" + uitoa(uint(-val)) + return "-" + itoa(-val) } - return uitoa(uint(val)) -} - -func uitoa(val uint) string { var buf [32]byte // big enough for int64 i := len(buf) - 1 for val >= 10 { diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go new file mode 100644 index 000000000..8732cdb95 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/syscall.go @@ -0,0 +1,105 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +// Package windows contains an interface to the low-level operating system +// primitives. OS details vary depending on the underlying system, and +// by default, godoc will display the OS-specific documentation for the current +// system. If you want godoc to display syscall documentation for another +// system, set $GOOS and $GOARCH to the desired system. For example, if +// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS +// to freebsd and $GOARCH to arm. +// +// The primary use of this package is inside other packages that provide a more +// portable interface to the system, such as "os", "time" and "net". Use +// those packages rather than this one if you can. +// +// For details of the functions and data types in this package consult +// the manuals for the appropriate operating system. +// +// These calls return err == nil to indicate success; otherwise +// err represents an operating system error describing the failure and +// holds a value of type syscall.Errno. +package windows // import "golang.org/x/sys/windows" + +import ( + "bytes" + "strings" + "syscall" + "unsafe" +) + +// ByteSliceFromString returns a NUL-terminated slice of bytes +// containing the text of s. If s contains a NUL byte at any +// location, it returns (nil, syscall.EINVAL). +func ByteSliceFromString(s string) ([]byte, error) { + if strings.IndexByte(s, 0) != -1 { + return nil, syscall.EINVAL + } + a := make([]byte, len(s)+1) + copy(a, s) + return a, nil +} + +// BytePtrFromString returns a pointer to a NUL-terminated array of +// bytes containing the text of s. If s contains a NUL byte at any +// location, it returns (nil, syscall.EINVAL). +func BytePtrFromString(s string) (*byte, error) { + a, err := ByteSliceFromString(s) + if err != nil { + return nil, err + } + return &a[0], nil +} + +// ByteSliceToString returns a string form of the text represented by the slice s, with a terminating NUL and any +// bytes after the NUL removed. +func ByteSliceToString(s []byte) string { + if i := bytes.IndexByte(s, 0); i != -1 { + s = s[:i] + } + return string(s) +} + +// BytePtrToString takes a pointer to a sequence of text and returns the corresponding string. +// If the pointer is nil, it returns the empty string. It assumes that the text sequence is terminated +// at a zero byte; if the zero byte is not present, the program may crash. +func BytePtrToString(p *byte) string { + if p == nil { + return "" + } + if *p == 0 { + return "" + } + + // Find NUL terminator. + n := 0 + for ptr := unsafe.Pointer(p); *(*byte)(ptr) != 0; n++ { + ptr = unsafe.Pointer(uintptr(ptr) + 1) + } + + return string(unsafe.Slice(p, n)) +} + +// Single-word zero for use when we need a valid pointer to 0 bytes. +// See mksyscall.pl. +var _zero uintptr + +func (ts *Timespec) Unix() (sec int64, nsec int64) { + return int64(ts.Sec), int64(ts.Nsec) +} + +func (tv *Timeval) Unix() (sec int64, nsec int64) { + return int64(tv.Sec), int64(tv.Usec) * 1000 +} + +func (ts *Timespec) Nano() int64 { + return int64(ts.Sec)*1e9 + int64(ts.Nsec) +} + +func (tv *Timeval) Nano() int64 { + return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000 +} diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go new file mode 100644 index 000000000..3723b2c22 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -0,0 +1,1808 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Windows system calls. + +package windows + +import ( + errorspkg "errors" + "fmt" + "runtime" + "sync" + "syscall" + "time" + "unicode/utf16" + "unsafe" + + "golang.org/x/sys/internal/unsafeheader" +) + +type Handle uintptr +type HWND uintptr + +const ( + InvalidHandle = ^Handle(0) + InvalidHWND = ^HWND(0) + + // Flags for DefineDosDevice. + DDD_EXACT_MATCH_ON_REMOVE = 0x00000004 + DDD_NO_BROADCAST_SYSTEM = 0x00000008 + DDD_RAW_TARGET_PATH = 0x00000001 + DDD_REMOVE_DEFINITION = 0x00000002 + + // Return values for GetDriveType. + DRIVE_UNKNOWN = 0 + DRIVE_NO_ROOT_DIR = 1 + DRIVE_REMOVABLE = 2 + DRIVE_FIXED = 3 + DRIVE_REMOTE = 4 + DRIVE_CDROM = 5 + DRIVE_RAMDISK = 6 + + // File system flags from GetVolumeInformation and GetVolumeInformationByHandle. + FILE_CASE_SENSITIVE_SEARCH = 0x00000001 + FILE_CASE_PRESERVED_NAMES = 0x00000002 + FILE_FILE_COMPRESSION = 0x00000010 + FILE_DAX_VOLUME = 0x20000000 + FILE_NAMED_STREAMS = 0x00040000 + FILE_PERSISTENT_ACLS = 0x00000008 + FILE_READ_ONLY_VOLUME = 0x00080000 + FILE_SEQUENTIAL_WRITE_ONCE = 0x00100000 + FILE_SUPPORTS_ENCRYPTION = 0x00020000 + FILE_SUPPORTS_EXTENDED_ATTRIBUTES = 0x00800000 + FILE_SUPPORTS_HARD_LINKS = 0x00400000 + FILE_SUPPORTS_OBJECT_IDS = 0x00010000 + FILE_SUPPORTS_OPEN_BY_FILE_ID = 0x01000000 + FILE_SUPPORTS_REPARSE_POINTS = 0x00000080 + FILE_SUPPORTS_SPARSE_FILES = 0x00000040 + FILE_SUPPORTS_TRANSACTIONS = 0x00200000 + FILE_SUPPORTS_USN_JOURNAL = 0x02000000 + FILE_UNICODE_ON_DISK = 0x00000004 + FILE_VOLUME_IS_COMPRESSED = 0x00008000 + FILE_VOLUME_QUOTAS = 0x00000020 + + // Flags for LockFileEx. + LOCKFILE_FAIL_IMMEDIATELY = 0x00000001 + LOCKFILE_EXCLUSIVE_LOCK = 0x00000002 + + // Return value of SleepEx and other APC functions + WAIT_IO_COMPLETION = 0x000000C0 +) + +// StringToUTF16 is deprecated. Use UTF16FromString instead. +// If s contains a NUL byte this function panics instead of +// returning an error. +func StringToUTF16(s string) []uint16 { + a, err := UTF16FromString(s) + if err != nil { + panic("windows: string with NUL passed to StringToUTF16") + } + return a +} + +// UTF16FromString returns the UTF-16 encoding of the UTF-8 string +// s, with a terminating NUL added. If s contains a NUL byte at any +// location, it returns (nil, syscall.EINVAL). +func UTF16FromString(s string) ([]uint16, error) { + return syscall.UTF16FromString(s) +} + +// UTF16ToString returns the UTF-8 encoding of the UTF-16 sequence s, +// with a terminating NUL and any bytes after the NUL removed. +func UTF16ToString(s []uint16) string { + return syscall.UTF16ToString(s) +} + +// StringToUTF16Ptr is deprecated. Use UTF16PtrFromString instead. +// If s contains a NUL byte this function panics instead of +// returning an error. +func StringToUTF16Ptr(s string) *uint16 { return &StringToUTF16(s)[0] } + +// UTF16PtrFromString returns pointer to the UTF-16 encoding of +// the UTF-8 string s, with a terminating NUL added. If s +// contains a NUL byte at any location, it returns (nil, syscall.EINVAL). +func UTF16PtrFromString(s string) (*uint16, error) { + a, err := UTF16FromString(s) + if err != nil { + return nil, err + } + return &a[0], nil +} + +// UTF16PtrToString takes a pointer to a UTF-16 sequence and returns the corresponding UTF-8 encoded string. +// If the pointer is nil, it returns the empty string. It assumes that the UTF-16 sequence is terminated +// at a zero word; if the zero word is not present, the program may crash. +func UTF16PtrToString(p *uint16) string { + if p == nil { + return "" + } + if *p == 0 { + return "" + } + + // Find NUL terminator. + n := 0 + for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ { + ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) + } + + return string(utf16.Decode(unsafe.Slice(p, n))) +} + +func Getpagesize() int { return 4096 } + +// NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention. +// This is useful when interoperating with Windows code requiring callbacks. +// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. +func NewCallback(fn interface{}) uintptr { + return syscall.NewCallback(fn) +} + +// NewCallbackCDecl converts a Go function to a function pointer conforming to the cdecl calling convention. +// This is useful when interoperating with Windows code requiring callbacks. +// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. +func NewCallbackCDecl(fn interface{}) uintptr { + return syscall.NewCallbackCDecl(fn) +} + +// windows api calls + +//sys GetLastError() (lasterr error) +//sys LoadLibrary(libname string) (handle Handle, err error) = LoadLibraryW +//sys LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) = LoadLibraryExW +//sys FreeLibrary(handle Handle) (err error) +//sys GetProcAddress(module Handle, procname string) (proc uintptr, err error) +//sys GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) = kernel32.GetModuleFileNameW +//sys GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) = kernel32.GetModuleHandleExW +//sys SetDefaultDllDirectories(directoryFlags uint32) (err error) +//sys SetDllDirectory(path string) (err error) = kernel32.SetDllDirectoryW +//sys GetVersion() (ver uint32, err error) +//sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW +//sys ExitProcess(exitcode uint32) +//sys IsWow64Process(handle Handle, isWow64 *bool) (err error) = IsWow64Process +//sys IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint16) (err error) = IsWow64Process2? +//sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW +//sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW +//sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) +//sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) +//sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW +//sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState +//sys readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) = ReadFile +//sys writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) = WriteFile +//sys GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) +//sys SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) [failretval==0xffffffff] +//sys CloseHandle(handle Handle) (err error) +//sys GetStdHandle(stdhandle uint32) (handle Handle, err error) [failretval==InvalidHandle] +//sys SetStdHandle(stdhandle uint32, handle Handle) (err error) +//sys findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstFileW +//sys findNextFile1(handle Handle, data *win32finddata1) (err error) = FindNextFileW +//sys FindClose(handle Handle) (err error) +//sys GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) +//sys GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) +//sys SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inBufferLen uint32) (err error) +//sys GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) = GetCurrentDirectoryW +//sys SetCurrentDirectory(path *uint16) (err error) = SetCurrentDirectoryW +//sys CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) = CreateDirectoryW +//sys RemoveDirectory(path *uint16) (err error) = RemoveDirectoryW +//sys DeleteFile(path *uint16) (err error) = DeleteFileW +//sys MoveFile(from *uint16, to *uint16) (err error) = MoveFileW +//sys MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) = MoveFileExW +//sys LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) +//sys UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) +//sys GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW +//sys GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW +//sys SetEndOfFile(handle Handle) (err error) +//sys GetSystemTimeAsFileTime(time *Filetime) +//sys GetSystemTimePreciseAsFileTime(time *Filetime) +//sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff] +//sys CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, threadcnt uint32) (handle Handle, err error) +//sys GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overlapped **Overlapped, timeout uint32) (err error) +//sys PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overlapped *Overlapped) (err error) +//sys CancelIo(s Handle) (err error) +//sys CancelIoEx(s Handle, o *Overlapped) (err error) +//sys CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessW +//sys CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = advapi32.CreateProcessAsUserW +//sys initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) = InitializeProcThreadAttributeList +//sys deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) = DeleteProcThreadAttributeList +//sys updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) = UpdateProcThreadAttribute +//sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) +//sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) [failretval<=32] = shell32.ShellExecuteW +//sys GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) = user32.GetWindowThreadProcessId +//sys GetShellWindow() (shellWindow HWND) = user32.GetShellWindow +//sys MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW +//sys ExitWindowsEx(flags uint32, reason uint32) (err error) = user32.ExitWindowsEx +//sys shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) = shell32.SHGetKnownFolderPath +//sys TerminateProcess(handle Handle, exitcode uint32) (err error) +//sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) +//sys GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW +//sys GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) +//sys DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) +//sys WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] +//sys waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] = WaitForMultipleObjects +//sys GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) = GetTempPathW +//sys CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) +//sys GetFileType(filehandle Handle) (n uint32, err error) +//sys CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) = advapi32.CryptAcquireContextW +//sys CryptReleaseContext(provhandle Handle, flags uint32) (err error) = advapi32.CryptReleaseContext +//sys CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) = advapi32.CryptGenRandom +//sys GetEnvironmentStrings() (envs *uint16, err error) [failretval==nil] = kernel32.GetEnvironmentStringsW +//sys FreeEnvironmentStrings(envs *uint16) (err error) = kernel32.FreeEnvironmentStringsW +//sys GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) = kernel32.GetEnvironmentVariableW +//sys SetEnvironmentVariable(name *uint16, value *uint16) (err error) = kernel32.SetEnvironmentVariableW +//sys ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW +//sys CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock +//sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock +//sys getTickCount64() (ms uint64) = kernel32.GetTickCount64 +//sys SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) +//sys GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW +//sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW +//sys GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) = kernel32.GetFileAttributesExW +//sys GetCommandLine() (cmd *uint16) = kernel32.GetCommandLineW +//sys CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW +//sys LocalFree(hmem Handle) (handle Handle, err error) [failretval!=0] +//sys LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) +//sys SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) +//sys FlushFileBuffers(handle Handle) (err error) +//sys GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) = kernel32.GetFullPathNameW +//sys GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) = kernel32.GetLongPathNameW +//sys GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) = kernel32.GetShortPathNameW +//sys GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) = kernel32.GetFinalPathNameByHandleW +//sys CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateFileMappingW +//sys MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) +//sys UnmapViewOfFile(addr uintptr) (err error) +//sys FlushViewOfFile(addr uintptr, length uintptr) (err error) +//sys VirtualLock(addr uintptr, length uintptr) (err error) +//sys VirtualUnlock(addr uintptr, length uintptr) (err error) +//sys VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) = kernel32.VirtualAlloc +//sys VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) = kernel32.VirtualFree +//sys VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) = kernel32.VirtualProtect +//sys VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect uint32, oldProtect *uint32) (err error) = kernel32.VirtualProtectEx +//sys VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) = kernel32.VirtualQuery +//sys VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) = kernel32.VirtualQueryEx +//sys ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesRead *uintptr) (err error) = kernel32.ReadProcessMemory +//sys WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesWritten *uintptr) (err error) = kernel32.WriteProcessMemory +//sys TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) = mswsock.TransmitFile +//sys ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) = kernel32.ReadDirectoryChangesW +//sys FindFirstChangeNotification(path string, watchSubtree bool, notifyFilter uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.FindFirstChangeNotificationW +//sys FindNextChangeNotification(handle Handle) (err error) +//sys FindCloseChangeNotification(handle Handle) (err error) +//sys CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) = crypt32.CertOpenSystemStoreW +//sys CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) = crypt32.CertOpenStore +//sys CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) [failretval==nil] = crypt32.CertEnumCertificatesInStore +//sys CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) = crypt32.CertAddCertificateContextToStore +//sys CertCloseStore(store Handle, flags uint32) (err error) = crypt32.CertCloseStore +//sys CertDeleteCertificateFromStore(certContext *CertContext) (err error) = crypt32.CertDeleteCertificateFromStore +//sys CertDuplicateCertificateContext(certContext *CertContext) (dupContext *CertContext) = crypt32.CertDuplicateCertificateContext +//sys PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (store Handle, err error) = crypt32.PFXImportCertStore +//sys CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) = crypt32.CertGetCertificateChain +//sys CertFreeCertificateChain(ctx *CertChainContext) = crypt32.CertFreeCertificateChain +//sys CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) [failretval==nil] = crypt32.CertCreateCertificateContext +//sys CertFreeCertificateContext(ctx *CertContext) (err error) = crypt32.CertFreeCertificateContext +//sys CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) = crypt32.CertVerifyCertificateChainPolicy +//sys CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) = crypt32.CertGetNameStringW +//sys CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) = crypt32.CertFindExtension +//sys CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevCertContext *CertContext) (cert *CertContext, err error) [failretval==nil] = crypt32.CertFindCertificateInStore +//sys CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevChainContext *CertChainContext) (certchain *CertChainContext, err error) [failretval==nil] = crypt32.CertFindChainInStore +//sys CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, parameters unsafe.Pointer, cryptProvOrNCryptKey *Handle, keySpec *uint32, callerFreeProvOrNCryptKey *bool) (err error) = crypt32.CryptAcquireCertificatePrivateKey +//sys CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) = crypt32.CryptQueryObject +//sys CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) = crypt32.CryptDecodeObject +//sys CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) = crypt32.CryptProtectData +//sys CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) = crypt32.CryptUnprotectData +//sys WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) = wintrust.WinVerifyTrustEx +//sys RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) = advapi32.RegOpenKeyExW +//sys RegCloseKey(key Handle) (regerrno error) = advapi32.RegCloseKey +//sys RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegQueryInfoKeyW +//sys RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegEnumKeyExW +//sys RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegQueryValueExW +//sys RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, event Handle, asynchronous bool) (regerrno error) = advapi32.RegNotifyChangeKeyValue +//sys GetCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId +//sys ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) = kernel32.ProcessIdToSessionId +//sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode +//sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode +//sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo +//sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition +//sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW +//sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW +//sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot +//sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW +//sys Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32NextW +//sys Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32FirstW +//sys Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32NextW +//sys Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) +//sys Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) +//sys DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) +// This function returns 1 byte BOOLEAN rather than the 4 byte BOOL. +//sys CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) [failretval&0xff==0] = CreateSymbolicLinkW +//sys CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) [failretval&0xff==0] = CreateHardLinkW +//sys GetCurrentThreadId() (id uint32) +//sys CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateEventW +//sys CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateEventExW +//sys OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenEventW +//sys SetEvent(event Handle) (err error) = kernel32.SetEvent +//sys ResetEvent(event Handle) (err error) = kernel32.ResetEvent +//sys PulseEvent(event Handle) (err error) = kernel32.PulseEvent +//sys CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateMutexW +//sys CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateMutexExW +//sys OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenMutexW +//sys ReleaseMutex(mutex Handle) (err error) = kernel32.ReleaseMutex +//sys SleepEx(milliseconds uint32, alertable bool) (ret uint32) = kernel32.SleepEx +//sys CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) = kernel32.CreateJobObjectW +//sys AssignProcessToJobObject(job Handle, process Handle) (err error) = kernel32.AssignProcessToJobObject +//sys TerminateJobObject(job Handle, exitCode uint32) (err error) = kernel32.TerminateJobObject +//sys SetErrorMode(mode uint32) (ret uint32) = kernel32.SetErrorMode +//sys ResumeThread(thread Handle) (ret uint32, err error) [failretval==0xffffffff] = kernel32.ResumeThread +//sys SetPriorityClass(process Handle, priorityClass uint32) (err error) = kernel32.SetPriorityClass +//sys GetPriorityClass(process Handle) (ret uint32, err error) = kernel32.GetPriorityClass +//sys QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) = kernel32.QueryInformationJobObject +//sys SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) +//sys GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) +//sys GetProcessId(process Handle) (id uint32, err error) +//sys QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) = kernel32.QueryFullProcessImageNameW +//sys OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) +//sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost +//sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) +//sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) +//sys GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys GetActiveProcessorCount(groupNumber uint16) (ret uint32) +//sys GetMaximumProcessorCount(groupNumber uint16) (ret uint32) +//sys EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) = user32.EnumWindows +//sys EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) = user32.EnumChildWindows +//sys GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) = user32.GetClassNameW +//sys GetDesktopWindow() (hwnd HWND) = user32.GetDesktopWindow +//sys GetForegroundWindow() (hwnd HWND) = user32.GetForegroundWindow +//sys IsWindow(hwnd HWND) (isWindow bool) = user32.IsWindow +//sys IsWindowUnicode(hwnd HWND) (isUnicode bool) = user32.IsWindowUnicode +//sys IsWindowVisible(hwnd HWND) (isVisible bool) = user32.IsWindowVisible +//sys GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) = user32.GetGUIThreadInfo +//sys GetLargePageMinimum() (size uintptr) + +// Volume Management Functions +//sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW +//sys DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) = DeleteVolumeMountPointW +//sys FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstVolumeW +//sys FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstVolumeMountPointW +//sys FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) = FindNextVolumeW +//sys FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) = FindNextVolumeMountPointW +//sys FindVolumeClose(findVolume Handle) (err error) +//sys FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) +//sys GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) = GetDiskFreeSpaceExW +//sys GetDriveType(rootPathName *uint16) (driveType uint32) = GetDriveTypeW +//sys GetLogicalDrives() (drivesBitMask uint32, err error) [failretval==0] +//sys GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) [failretval==0] = GetLogicalDriveStringsW +//sys GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationW +//sys GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationByHandleW +//sys GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) = GetVolumeNameForVolumeMountPointW +//sys GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) = GetVolumePathNameW +//sys GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) = GetVolumePathNamesForVolumeNameW +//sys QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) [failretval==0] = QueryDosDeviceW +//sys SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) = SetVolumeLabelW +//sys SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) = SetVolumeMountPointW +//sys InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint32, forceAppsClosed bool, rebootAfterShutdown bool, reason uint32) (err error) = advapi32.InitiateSystemShutdownExW +//sys SetProcessShutdownParameters(level uint32, flags uint32) (err error) = kernel32.SetProcessShutdownParameters +//sys GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) = kernel32.GetProcessShutdownParameters +//sys clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) = ole32.CLSIDFromString +//sys stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) = ole32.StringFromGUID2 +//sys coCreateGuid(pguid *GUID) (ret error) = ole32.CoCreateGuid +//sys CoTaskMemFree(address unsafe.Pointer) = ole32.CoTaskMemFree +//sys CoInitializeEx(reserved uintptr, coInit uint32) (ret error) = ole32.CoInitializeEx +//sys CoUninitialize() = ole32.CoUninitialize +//sys CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) = ole32.CoGetObject +//sys getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetProcessPreferredUILanguages +//sys getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetThreadPreferredUILanguages +//sys getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetUserPreferredUILanguages +//sys getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetSystemPreferredUILanguages +//sys findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) = kernel32.FindResourceW +//sys SizeofResource(module Handle, resInfo Handle) (size uint32, err error) = kernel32.SizeofResource +//sys LoadResource(module Handle, resInfo Handle) (resData Handle, err error) = kernel32.LoadResource +//sys LockResource(resData Handle) (addr uintptr, err error) = kernel32.LockResource + +// Version APIs +//sys GetFileVersionInfoSize(filename string, zeroHandle *Handle) (bufSize uint32, err error) = version.GetFileVersionInfoSizeW +//sys GetFileVersionInfo(filename string, handle uint32, bufSize uint32, buffer unsafe.Pointer) (err error) = version.GetFileVersionInfoW +//sys VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) = version.VerQueryValueW + +// Process Status API (PSAPI) +//sys EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses +//sys EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) = psapi.EnumProcessModules +//sys EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) = psapi.EnumProcessModulesEx +//sys GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) = psapi.GetModuleInformation +//sys GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) = psapi.GetModuleFileNameExW +//sys GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) = psapi.GetModuleBaseNameW +//sys QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) = psapi.QueryWorkingSetEx + +// NT Native APIs +//sys rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) = ntdll.RtlNtStatusToDosErrorNoTeb +//sys rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) = ntdll.RtlGetVersion +//sys rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) = ntdll.RtlGetNtVersionNumbers +//sys RtlGetCurrentPeb() (peb *PEB) = ntdll.RtlGetCurrentPeb +//sys RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) = ntdll.RtlInitUnicodeString +//sys RtlInitString(destinationString *NTString, sourceString *byte) = ntdll.RtlInitString +//sys NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) = ntdll.NtCreateFile +//sys NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) = ntdll.NtCreateNamedPipeFile +//sys NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, inBufferLen uint32, class uint32) (ntstatus error) = ntdll.NtSetInformationFile +//sys RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) = ntdll.RtlDosPathNameToNtPathName_U_WithStatus +//sys RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) = ntdll.RtlDosPathNameToRelativeNtPathName_U_WithStatus +//sys RtlDefaultNpAcl(acl **ACL) (ntstatus error) = ntdll.RtlDefaultNpAcl +//sys NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) = ntdll.NtQueryInformationProcess +//sys NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) = ntdll.NtSetInformationProcess +//sys NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32, retLen *uint32) (ntstatus error) = ntdll.NtQuerySystemInformation +//sys NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32) (ntstatus error) = ntdll.NtSetSystemInformation +//sys RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) = ntdll.RtlAddFunctionTable +//sys RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) = ntdll.RtlDeleteFunctionTable + +// Desktop Window Manager API (Dwmapi) +//sys DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmGetWindowAttribute +//sys DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmSetWindowAttribute + +// syscall interface implementation for other packages + +// GetCurrentProcess returns the handle for the current process. +// It is a pseudo handle that does not need to be closed. +// The returned error is always nil. +// +// Deprecated: use CurrentProcess for the same Handle without the nil +// error. +func GetCurrentProcess() (Handle, error) { + return CurrentProcess(), nil +} + +// CurrentProcess returns the handle for the current process. +// It is a pseudo handle that does not need to be closed. +func CurrentProcess() Handle { return Handle(^uintptr(1 - 1)) } + +// GetCurrentThread returns the handle for the current thread. +// It is a pseudo handle that does not need to be closed. +// The returned error is always nil. +// +// Deprecated: use CurrentThread for the same Handle without the nil +// error. +func GetCurrentThread() (Handle, error) { + return CurrentThread(), nil +} + +// CurrentThread returns the handle for the current thread. +// It is a pseudo handle that does not need to be closed. +func CurrentThread() Handle { return Handle(^uintptr(2 - 1)) } + +// GetProcAddressByOrdinal retrieves the address of the exported +// function from module by ordinal. +func GetProcAddressByOrdinal(module Handle, ordinal uintptr) (proc uintptr, err error) { + r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), ordinal, 0) + proc = uintptr(r0) + if proc == 0 { + err = errnoErr(e1) + } + return +} + +func Exit(code int) { ExitProcess(uint32(code)) } + +func makeInheritSa() *SecurityAttributes { + var sa SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +func Open(path string, mode int, perm uint32) (fd Handle, err error) { + if len(path) == 0 { + return InvalidHandle, ERROR_FILE_NOT_FOUND + } + pathp, err := UTF16PtrFromString(path) + if err != nil { + return InvalidHandle, err + } + var access uint32 + switch mode & (O_RDONLY | O_WRONLY | O_RDWR) { + case O_RDONLY: + access = GENERIC_READ + case O_WRONLY: + access = GENERIC_WRITE + case O_RDWR: + access = GENERIC_READ | GENERIC_WRITE + } + if mode&O_CREAT != 0 { + access |= GENERIC_WRITE + } + if mode&O_APPEND != 0 { + access &^= GENERIC_WRITE + access |= FILE_APPEND_DATA + } + sharemode := uint32(FILE_SHARE_READ | FILE_SHARE_WRITE) + var sa *SecurityAttributes + if mode&O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(O_CREAT|O_EXCL) == (O_CREAT | O_EXCL): + createmode = CREATE_NEW + case mode&(O_CREAT|O_TRUNC) == (O_CREAT | O_TRUNC): + createmode = CREATE_ALWAYS + case mode&O_CREAT == O_CREAT: + createmode = OPEN_ALWAYS + case mode&O_TRUNC == O_TRUNC: + createmode = TRUNCATE_EXISTING + default: + createmode = OPEN_EXISTING + } + var attrs uint32 = FILE_ATTRIBUTE_NORMAL + if perm&S_IWRITE == 0 { + attrs = FILE_ATTRIBUTE_READONLY + } + h, e := CreateFile(pathp, access, sharemode, sa, createmode, attrs, 0) + return h, e +} + +func Read(fd Handle, p []byte) (n int, err error) { + var done uint32 + e := ReadFile(fd, p, &done, nil) + if e != nil { + if e == ERROR_BROKEN_PIPE { + // NOTE(brainman): work around ERROR_BROKEN_PIPE is returned on reading EOF from stdin + return 0, nil + } + return 0, e + } + return int(done), nil +} + +func Write(fd Handle, p []byte) (n int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + var done uint32 + e := WriteFile(fd, p, &done, nil) + if e != nil { + return 0, e + } + return int(done), nil +} + +func ReadFile(fd Handle, p []byte, done *uint32, overlapped *Overlapped) error { + err := readFile(fd, p, done, overlapped) + if raceenabled { + if *done > 0 { + raceWriteRange(unsafe.Pointer(&p[0]), int(*done)) + } + raceAcquire(unsafe.Pointer(&ioSync)) + } + return err +} + +func WriteFile(fd Handle, p []byte, done *uint32, overlapped *Overlapped) error { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + err := writeFile(fd, p, done, overlapped) + if raceenabled && *done > 0 { + raceReadRange(unsafe.Pointer(&p[0]), int(*done)) + } + return err +} + +var ioSync int64 + +func Seek(fd Handle, offset int64, whence int) (newoffset int64, err error) { + var w uint32 + switch whence { + case 0: + w = FILE_BEGIN + case 1: + w = FILE_CURRENT + case 2: + w = FILE_END + } + hi := int32(offset >> 32) + lo := int32(offset) + // use GetFileType to check pipe, pipe can't do seek + ft, _ := GetFileType(fd) + if ft == FILE_TYPE_PIPE { + return 0, syscall.EPIPE + } + rlo, e := SetFilePointer(fd, lo, &hi, w) + if e != nil { + return 0, e + } + return int64(hi)<<32 + int64(rlo), nil +} + +func Close(fd Handle) (err error) { + return CloseHandle(fd) +} + +var ( + Stdin = getStdHandle(STD_INPUT_HANDLE) + Stdout = getStdHandle(STD_OUTPUT_HANDLE) + Stderr = getStdHandle(STD_ERROR_HANDLE) +) + +func getStdHandle(stdhandle uint32) (fd Handle) { + r, _ := GetStdHandle(stdhandle) + return r +} + +const ImplementsGetwd = true + +func Getwd() (wd string, err error) { + b := make([]uint16, 300) + n, e := GetCurrentDirectory(uint32(len(b)), &b[0]) + if e != nil { + return "", e + } + return string(utf16.Decode(b[0:n])), nil +} + +func Chdir(path string) (err error) { + pathp, err := UTF16PtrFromString(path) + if err != nil { + return err + } + return SetCurrentDirectory(pathp) +} + +func Mkdir(path string, mode uint32) (err error) { + pathp, err := UTF16PtrFromString(path) + if err != nil { + return err + } + return CreateDirectory(pathp, nil) +} + +func Rmdir(path string) (err error) { + pathp, err := UTF16PtrFromString(path) + if err != nil { + return err + } + return RemoveDirectory(pathp) +} + +func Unlink(path string) (err error) { + pathp, err := UTF16PtrFromString(path) + if err != nil { + return err + } + return DeleteFile(pathp) +} + +func Rename(oldpath, newpath string) (err error) { + from, err := UTF16PtrFromString(oldpath) + if err != nil { + return err + } + to, err := UTF16PtrFromString(newpath) + if err != nil { + return err + } + return MoveFileEx(from, to, MOVEFILE_REPLACE_EXISTING) +} + +func ComputerName() (name string, err error) { + var n uint32 = MAX_COMPUTERNAME_LENGTH + 1 + b := make([]uint16, n) + e := GetComputerName(&b[0], &n) + if e != nil { + return "", e + } + return string(utf16.Decode(b[0:n])), nil +} + +func DurationSinceBoot() time.Duration { + return time.Duration(getTickCount64()) * time.Millisecond +} + +func Ftruncate(fd Handle, length int64) (err error) { + curoffset, e := Seek(fd, 0, 1) + if e != nil { + return e + } + defer Seek(fd, curoffset, 0) + _, e = Seek(fd, length, 0) + if e != nil { + return e + } + e = SetEndOfFile(fd) + if e != nil { + return e + } + return nil +} + +func Gettimeofday(tv *Timeval) (err error) { + var ft Filetime + GetSystemTimeAsFileTime(&ft) + *tv = NsecToTimeval(ft.Nanoseconds()) + return nil +} + +func Pipe(p []Handle) (err error) { + if len(p) != 2 { + return syscall.EINVAL + } + var r, w Handle + e := CreatePipe(&r, &w, makeInheritSa(), 0) + if e != nil { + return e + } + p[0] = r + p[1] = w + return nil +} + +func Utimes(path string, tv []Timeval) (err error) { + if len(tv) != 2 { + return syscall.EINVAL + } + pathp, e := UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := CreateFile(pathp, + FILE_WRITE_ATTRIBUTES, FILE_SHARE_WRITE, nil, + OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer CloseHandle(h) + a := NsecToFiletime(tv[0].Nanoseconds()) + w := NsecToFiletime(tv[1].Nanoseconds()) + return SetFileTime(h, nil, &a, &w) +} + +func UtimesNano(path string, ts []Timespec) (err error) { + if len(ts) != 2 { + return syscall.EINVAL + } + pathp, e := UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := CreateFile(pathp, + FILE_WRITE_ATTRIBUTES, FILE_SHARE_WRITE, nil, + OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer CloseHandle(h) + a := NsecToFiletime(TimespecToNsec(ts[0])) + w := NsecToFiletime(TimespecToNsec(ts[1])) + return SetFileTime(h, nil, &a, &w) +} + +func Fsync(fd Handle) (err error) { + return FlushFileBuffers(fd) +} + +func Chmod(path string, mode uint32) (err error) { + p, e := UTF16PtrFromString(path) + if e != nil { + return e + } + attrs, e := GetFileAttributes(p) + if e != nil { + return e + } + if mode&S_IWRITE != 0 { + attrs &^= FILE_ATTRIBUTE_READONLY + } else { + attrs |= FILE_ATTRIBUTE_READONLY + } + return SetFileAttributes(p, attrs) +} + +func LoadGetSystemTimePreciseAsFileTime() error { + return procGetSystemTimePreciseAsFileTime.Find() +} + +func LoadCancelIoEx() error { + return procCancelIoEx.Find() +} + +func LoadSetFileCompletionNotificationModes() error { + return procSetFileCompletionNotificationModes.Find() +} + +func WaitForMultipleObjects(handles []Handle, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { + // Every other win32 array API takes arguments as "pointer, count", except for this function. So we + // can't declare it as a usual [] type, because mksyscall will use the opposite order. We therefore + // trivially stub this ourselves. + + var handlePtr *Handle + if len(handles) > 0 { + handlePtr = &handles[0] + } + return waitForMultipleObjects(uint32(len(handles)), uintptr(unsafe.Pointer(handlePtr)), waitAll, waitMilliseconds) +} + +// net api calls + +const socket_error = uintptr(^uint32(0)) + +//sys WSAStartup(verreq uint32, data *WSAData) (sockerr error) = ws2_32.WSAStartup +//sys WSACleanup() (err error) [failretval==socket_error] = ws2_32.WSACleanup +//sys WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) [failretval==socket_error] = ws2_32.WSAIoctl +//sys WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceBeginW +//sys WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceNextW +//sys WSALookupServiceEnd(handle Handle) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceEnd +//sys socket(af int32, typ int32, protocol int32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.socket +//sys sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) [failretval==socket_error] = ws2_32.sendto +//sys recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) [failretval==-1] = ws2_32.recvfrom +//sys Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) [failretval==socket_error] = ws2_32.setsockopt +//sys Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) [failretval==socket_error] = ws2_32.getsockopt +//sys bind(s Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socket_error] = ws2_32.bind +//sys connect(s Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socket_error] = ws2_32.connect +//sys getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) [failretval==socket_error] = ws2_32.getsockname +//sys getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) [failretval==socket_error] = ws2_32.getpeername +//sys listen(s Handle, backlog int32) (err error) [failretval==socket_error] = ws2_32.listen +//sys shutdown(s Handle, how int32) (err error) [failretval==socket_error] = ws2_32.shutdown +//sys Closesocket(s Handle) (err error) [failretval==socket_error] = ws2_32.closesocket +//sys AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) = mswsock.AcceptEx +//sys GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) = mswsock.GetAcceptExSockaddrs +//sys WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecv +//sys WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASend +//sys WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecvFrom +//sys WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASendTo +//sys WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.WSASocketW +//sys GetHostByName(name string) (h *Hostent, err error) [failretval==nil] = ws2_32.gethostbyname +//sys GetServByName(name string, proto string) (s *Servent, err error) [failretval==nil] = ws2_32.getservbyname +//sys Ntohs(netshort uint16) (u uint16) = ws2_32.ntohs +//sys GetProtoByName(name string) (p *Protoent, err error) [failretval==nil] = ws2_32.getprotobyname +//sys DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) = dnsapi.DnsQuery_W +//sys DnsRecordListFree(rl *DNSRecord, freetype uint32) = dnsapi.DnsRecordListFree +//sys DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) = dnsapi.DnsNameCompare_W +//sys GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) = ws2_32.GetAddrInfoW +//sys FreeAddrInfoW(addrinfo *AddrinfoW) = ws2_32.FreeAddrInfoW +//sys GetIfEntry(pIfRow *MibIfRow) (errcode error) = iphlpapi.GetIfEntry +//sys GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) = iphlpapi.GetAdaptersInfo +//sys SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) = kernel32.SetFileCompletionNotificationModes +//sys WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) [failretval==-1] = ws2_32.WSAEnumProtocolsW +//sys WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult +//sys GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) = iphlpapi.GetAdaptersAddresses +//sys GetACP() (acp uint32) = kernel32.GetACP +//sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar +//sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx + +// For testing: clients can set this flag to force +// creation of IPv6 sockets to return EAFNOSUPPORT. +var SocketDisableIPv6 bool + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [100]int8 +} + +type Sockaddr interface { + sockaddr() (ptr unsafe.Pointer, len int32, err error) // lowercase; only we can define Sockaddrs +} + +type SockaddrInet4 struct { + Port int + Addr [4]byte + raw RawSockaddrInet4 +} + +func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, int32, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, syscall.EINVAL + } + sa.raw.Family = AF_INET + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + sa.raw.Addr = sa.Addr + return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil +} + +type SockaddrInet6 struct { + Port int + ZoneId uint32 + Addr [16]byte + raw RawSockaddrInet6 +} + +func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, int32, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, syscall.EINVAL + } + sa.raw.Family = AF_INET6 + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + sa.raw.Scope_id = sa.ZoneId + sa.raw.Addr = sa.Addr + return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil +} + +type RawSockaddrUnix struct { + Family uint16 + Path [UNIX_PATH_MAX]int8 +} + +type SockaddrUnix struct { + Name string + raw RawSockaddrUnix +} + +func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { + name := sa.Name + n := len(name) + if n > len(sa.raw.Path) { + return nil, 0, syscall.EINVAL + } + if n == len(sa.raw.Path) && name[0] != '@' { + return nil, 0, syscall.EINVAL + } + sa.raw.Family = AF_UNIX + for i := 0; i < n; i++ { + sa.raw.Path[i] = int8(name[i]) + } + // length is family (uint16), name, NUL. + sl := int32(2) + if n > 0 { + sl += int32(n) + 1 + } + if sa.raw.Path[0] == '@' { + sa.raw.Path[0] = 0 + // Don't count trailing NUL for abstract address. + sl-- + } + + return unsafe.Pointer(&sa.raw), sl, nil +} + +type RawSockaddrBth struct { + AddressFamily [2]byte + BtAddr [8]byte + ServiceClassId [16]byte + Port [4]byte +} + +type SockaddrBth struct { + BtAddr uint64 + ServiceClassId GUID + Port uint32 + + raw RawSockaddrBth +} + +func (sa *SockaddrBth) sockaddr() (unsafe.Pointer, int32, error) { + family := AF_BTH + sa.raw = RawSockaddrBth{ + AddressFamily: *(*[2]byte)(unsafe.Pointer(&family)), + BtAddr: *(*[8]byte)(unsafe.Pointer(&sa.BtAddr)), + Port: *(*[4]byte)(unsafe.Pointer(&sa.Port)), + ServiceClassId: *(*[16]byte)(unsafe.Pointer(&sa.ServiceClassId)), + } + return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil +} + +func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { + switch rsa.Addr.Family { + case AF_UNIX: + pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa)) + sa := new(SockaddrUnix) + if pp.Path[0] == 0 { + // "Abstract" Unix domain socket. + // Rewrite leading NUL as @ for textual display. + // (This is the standard convention.) + // Not friendly to overwrite in place, + // but the callers below don't care. + pp.Path[0] = '@' + } + + // Assume path ends at NUL. + // This is not technically the Linux semantics for + // abstract Unix domain sockets--they are supposed + // to be uninterpreted fixed-size binary blobs--but + // everyone uses this convention. + n := 0 + for n < len(pp.Path) && pp.Path[n] != 0 { + n++ + } + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) + return sa, nil + + case AF_INET: + pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet4) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + sa.Addr = pp.Addr + return sa, nil + + case AF_INET6: + pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet6) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + sa.ZoneId = pp.Scope_id + sa.Addr = pp.Addr + return sa, nil + } + return nil, syscall.EAFNOSUPPORT +} + +func Socket(domain, typ, proto int) (fd Handle, err error) { + if domain == AF_INET6 && SocketDisableIPv6 { + return InvalidHandle, syscall.EAFNOSUPPORT + } + return socket(int32(domain), int32(typ), int32(proto)) +} + +func SetsockoptInt(fd Handle, level, opt int, value int) (err error) { + v := int32(value) + return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&v)), int32(unsafe.Sizeof(v))) +} + +func Bind(fd Handle, sa Sockaddr) (err error) { + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return bind(fd, ptr, n) +} + +func Connect(fd Handle, sa Sockaddr) (err error) { + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return connect(fd, ptr, n) +} + +func GetBestInterfaceEx(sa Sockaddr, pdwBestIfIndex *uint32) (err error) { + ptr, _, err := sa.sockaddr() + if err != nil { + return err + } + return getBestInterfaceEx(ptr, pdwBestIfIndex) +} + +func Getsockname(fd Handle) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + l := int32(unsafe.Sizeof(rsa)) + if err = getsockname(fd, &rsa, &l); err != nil { + return + } + return rsa.Sockaddr() +} + +func Getpeername(fd Handle) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + l := int32(unsafe.Sizeof(rsa)) + if err = getpeername(fd, &rsa, &l); err != nil { + return + } + return rsa.Sockaddr() +} + +func Listen(s Handle, n int) (err error) { + return listen(s, int32(n)) +} + +func Shutdown(fd Handle, how int) (err error) { + return shutdown(fd, int32(how)) +} + +func WSASendto(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to Sockaddr, overlapped *Overlapped, croutine *byte) (err error) { + var rsa unsafe.Pointer + var l int32 + if to != nil { + rsa, l, err = to.sockaddr() + if err != nil { + return err + } + } + return WSASendTo(s, bufs, bufcnt, sent, flags, (*RawSockaddrAny)(unsafe.Pointer(rsa)), l, overlapped, croutine) +} + +func LoadGetAddrInfo() error { + return procGetAddrInfoW.Find() +} + +var connectExFunc struct { + once sync.Once + addr uintptr + err error +} + +func LoadConnectEx() error { + connectExFunc.once.Do(func() { + var s Handle + s, connectExFunc.err = Socket(AF_INET, SOCK_STREAM, IPPROTO_TCP) + if connectExFunc.err != nil { + return + } + defer CloseHandle(s) + var n uint32 + connectExFunc.err = WSAIoctl(s, + SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&WSAID_CONNECTEX)), + uint32(unsafe.Sizeof(WSAID_CONNECTEX)), + (*byte)(unsafe.Pointer(&connectExFunc.addr)), + uint32(unsafe.Sizeof(connectExFunc.addr)), + &n, nil, 0) + }) + return connectExFunc.err +} + +func connectEx(s Handle, name unsafe.Pointer, namelen int32, sendBuf *byte, sendDataLen uint32, bytesSent *uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall9(connectExFunc.addr, 7, uintptr(s), uintptr(name), uintptr(namelen), uintptr(unsafe.Pointer(sendBuf)), uintptr(sendDataLen), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ConnectEx(fd Handle, sa Sockaddr, sendBuf *byte, sendDataLen uint32, bytesSent *uint32, overlapped *Overlapped) error { + err := LoadConnectEx() + if err != nil { + return errorspkg.New("failed to find ConnectEx: " + err.Error()) + } + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped) +} + +var sendRecvMsgFunc struct { + once sync.Once + sendAddr uintptr + recvAddr uintptr + err error +} + +func loadWSASendRecvMsg() error { + sendRecvMsgFunc.once.Do(func() { + var s Handle + s, sendRecvMsgFunc.err = Socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP) + if sendRecvMsgFunc.err != nil { + return + } + defer CloseHandle(s) + var n uint32 + sendRecvMsgFunc.err = WSAIoctl(s, + SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&WSAID_WSARECVMSG)), + uint32(unsafe.Sizeof(WSAID_WSARECVMSG)), + (*byte)(unsafe.Pointer(&sendRecvMsgFunc.recvAddr)), + uint32(unsafe.Sizeof(sendRecvMsgFunc.recvAddr)), + &n, nil, 0) + if sendRecvMsgFunc.err != nil { + return + } + sendRecvMsgFunc.err = WSAIoctl(s, + SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&WSAID_WSASENDMSG)), + uint32(unsafe.Sizeof(WSAID_WSASENDMSG)), + (*byte)(unsafe.Pointer(&sendRecvMsgFunc.sendAddr)), + uint32(unsafe.Sizeof(sendRecvMsgFunc.sendAddr)), + &n, nil, 0) + }) + return sendRecvMsgFunc.err +} + +func WSASendMsg(fd Handle, msg *WSAMsg, flags uint32, bytesSent *uint32, overlapped *Overlapped, croutine *byte) error { + err := loadWSASendRecvMsg() + if err != nil { + return err + } + r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.sendAddr, 6, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(flags), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + err = errnoErr(e1) + } + return err +} + +func WSARecvMsg(fd Handle, msg *WSAMsg, bytesReceived *uint32, overlapped *Overlapped, croutine *byte) error { + err := loadWSASendRecvMsg() + if err != nil { + return err + } + r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.recvAddr, 5, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(bytesReceived)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return err +} + +// Invented structures to support what package os expects. +type Rusage struct { + CreationTime Filetime + ExitTime Filetime + KernelTime Filetime + UserTime Filetime +} + +type WaitStatus struct { + ExitCode uint32 +} + +func (w WaitStatus) Exited() bool { return true } + +func (w WaitStatus) ExitStatus() int { return int(w.ExitCode) } + +func (w WaitStatus) Signal() Signal { return -1 } + +func (w WaitStatus) CoreDump() bool { return false } + +func (w WaitStatus) Stopped() bool { return false } + +func (w WaitStatus) Continued() bool { return false } + +func (w WaitStatus) StopSignal() Signal { return -1 } + +func (w WaitStatus) Signaled() bool { return false } + +func (w WaitStatus) TrapCause() int { return -1 } + +// Timespec is an invented structure on Windows, but here for +// consistency with the corresponding package for other operating systems. +type Timespec struct { + Sec int64 + Nsec int64 +} + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = nsec / 1e9 + ts.Nsec = nsec % 1e9 + return +} + +// TODO(brainman): fix all needed for net + +func Accept(fd Handle) (nfd Handle, sa Sockaddr, err error) { return 0, nil, syscall.EWINDOWS } + +func Recvfrom(fd Handle, p []byte, flags int) (n int, from Sockaddr, err error) { + var rsa RawSockaddrAny + l := int32(unsafe.Sizeof(rsa)) + n32, err := recvfrom(fd, p, int32(flags), &rsa, &l) + n = int(n32) + if err != nil { + return + } + from, err = rsa.Sockaddr() + return +} + +func Sendto(fd Handle, p []byte, flags int, to Sockaddr) (err error) { + ptr, l, err := to.sockaddr() + if err != nil { + return err + } + return sendto(fd, p, int32(flags), ptr, l) +} + +func SetsockoptTimeval(fd Handle, level, opt int, tv *Timeval) (err error) { return syscall.EWINDOWS } + +// The Linger struct is wrong but we only noticed after Go 1. +// sysLinger is the real system call structure. + +// BUG(brainman): The definition of Linger is not appropriate for direct use +// with Setsockopt and Getsockopt. +// Use SetsockoptLinger instead. + +type Linger struct { + Onoff int32 + Linger int32 +} + +type sysLinger struct { + Onoff uint16 + Linger uint16 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +func GetsockoptInt(fd Handle, level, opt int) (int, error) { + v := int32(0) + l := int32(unsafe.Sizeof(v)) + err := Getsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&v)), &l) + return int(v), err +} + +func SetsockoptLinger(fd Handle, level, opt int, l *Linger) (err error) { + sys := sysLinger{Onoff: uint16(l.Onoff), Linger: uint16(l.Linger)} + return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&sys)), int32(unsafe.Sizeof(sys))) +} + +func SetsockoptInet4Addr(fd Handle, level, opt int, value [4]byte) (err error) { + return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&value[0])), 4) +} +func SetsockoptIPMreq(fd Handle, level, opt int, mreq *IPMreq) (err error) { + return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(mreq)), int32(unsafe.Sizeof(*mreq))) +} +func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { + return syscall.EWINDOWS +} + +func Getpid() (pid int) { return int(GetCurrentProcessId()) } + +func FindFirstFile(name *uint16, data *Win32finddata) (handle Handle, err error) { + // NOTE(rsc): The Win32finddata struct is wrong for the system call: + // the two paths are each one uint16 short. Use the correct struct, + // a win32finddata1, and then copy the results out. + // There is no loss of expressivity here, because the final + // uint16, if it is used, is supposed to be a NUL, and Go doesn't need that. + // For Go 1.1, we might avoid the allocation of win32finddata1 here + // by adding a final Bug [2]uint16 field to the struct and then + // adjusting the fields in the result directly. + var data1 win32finddata1 + handle, err = findFirstFile1(name, &data1) + if err == nil { + copyFindData(data, &data1) + } + return +} + +func FindNextFile(handle Handle, data *Win32finddata) (err error) { + var data1 win32finddata1 + err = findNextFile1(handle, &data1) + if err == nil { + copyFindData(data, &data1) + } + return +} + +func getProcessEntry(pid int) (*ProcessEntry32, error) { + snapshot, err := CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0) + if err != nil { + return nil, err + } + defer CloseHandle(snapshot) + var procEntry ProcessEntry32 + procEntry.Size = uint32(unsafe.Sizeof(procEntry)) + if err = Process32First(snapshot, &procEntry); err != nil { + return nil, err + } + for { + if procEntry.ProcessID == uint32(pid) { + return &procEntry, nil + } + err = Process32Next(snapshot, &procEntry) + if err != nil { + return nil, err + } + } +} + +func Getppid() (ppid int) { + pe, err := getProcessEntry(Getpid()) + if err != nil { + return -1 + } + return int(pe.ParentProcessID) +} + +// TODO(brainman): fix all needed for os +func Fchdir(fd Handle) (err error) { return syscall.EWINDOWS } +func Link(oldpath, newpath string) (err error) { return syscall.EWINDOWS } +func Symlink(path, link string) (err error) { return syscall.EWINDOWS } + +func Fchmod(fd Handle, mode uint32) (err error) { return syscall.EWINDOWS } +func Chown(path string, uid int, gid int) (err error) { return syscall.EWINDOWS } +func Lchown(path string, uid int, gid int) (err error) { return syscall.EWINDOWS } +func Fchown(fd Handle, uid int, gid int) (err error) { return syscall.EWINDOWS } + +func Getuid() (uid int) { return -1 } +func Geteuid() (euid int) { return -1 } +func Getgid() (gid int) { return -1 } +func Getegid() (egid int) { return -1 } +func Getgroups() (gids []int, err error) { return nil, syscall.EWINDOWS } + +type Signal int + +func (s Signal) Signal() {} + +func (s Signal) String() string { + if 0 <= s && int(s) < len(signals) { + str := signals[s] + if str != "" { + return str + } + } + return "signal " + itoa(int(s)) +} + +func LoadCreateSymbolicLink() error { + return procCreateSymbolicLinkW.Find() +} + +// Readlink returns the destination of the named symbolic link. +func Readlink(path string, buf []byte) (n int, err error) { + fd, err := CreateFile(StringToUTF16Ptr(path), GENERIC_READ, 0, nil, OPEN_EXISTING, + FILE_FLAG_OPEN_REPARSE_POINT|FILE_FLAG_BACKUP_SEMANTICS, 0) + if err != nil { + return -1, err + } + defer CloseHandle(fd) + + rdbbuf := make([]byte, MAXIMUM_REPARSE_DATA_BUFFER_SIZE) + var bytesReturned uint32 + err = DeviceIoControl(fd, FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0], uint32(len(rdbbuf)), &bytesReturned, nil) + if err != nil { + return -1, err + } + + rdb := (*reparseDataBuffer)(unsafe.Pointer(&rdbbuf[0])) + var s string + switch rdb.ReparseTag { + case IO_REPARSE_TAG_SYMLINK: + data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) + p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) + s = UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength-data.PrintNameOffset)/2]) + case IO_REPARSE_TAG_MOUNT_POINT: + data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) + p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) + s = UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength-data.PrintNameOffset)/2]) + default: + // the path is not a symlink or junction but another type of reparse + // point + return -1, syscall.ENOENT + } + n = copy(buf, []byte(s)) + + return n, nil +} + +// GUIDFromString parses a string in the form of +// "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}" into a GUID. +func GUIDFromString(str string) (GUID, error) { + guid := GUID{} + str16, err := syscall.UTF16PtrFromString(str) + if err != nil { + return guid, err + } + err = clsidFromString(str16, &guid) + if err != nil { + return guid, err + } + return guid, nil +} + +// GenerateGUID creates a new random GUID. +func GenerateGUID() (GUID, error) { + guid := GUID{} + err := coCreateGuid(&guid) + if err != nil { + return guid, err + } + return guid, nil +} + +// String returns the canonical string form of the GUID, +// in the form of "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}". +func (guid GUID) String() string { + var str [100]uint16 + chars := stringFromGUID2(&guid, &str[0], int32(len(str))) + if chars <= 1 { + return "" + } + return string(utf16.Decode(str[:chars-1])) +} + +// KnownFolderPath returns a well-known folder path for the current user, specified by one of +// the FOLDERID_ constants, and chosen and optionally created based on a KF_ flag. +func KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, error) { + return Token(0).KnownFolderPath(folderID, flags) +} + +// KnownFolderPath returns a well-known folder path for the user token, specified by one of +// the FOLDERID_ constants, and chosen and optionally created based on a KF_ flag. +func (t Token) KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, error) { + var p *uint16 + err := shGetKnownFolderPath(folderID, flags, t, &p) + if err != nil { + return "", err + } + defer CoTaskMemFree(unsafe.Pointer(p)) + return UTF16PtrToString(p), nil +} + +// RtlGetVersion returns the version of the underlying operating system, ignoring +// manifest semantics but is affected by the application compatibility layer. +func RtlGetVersion() *OsVersionInfoEx { + info := &OsVersionInfoEx{} + info.osVersionInfoSize = uint32(unsafe.Sizeof(*info)) + // According to documentation, this function always succeeds. + // The function doesn't even check the validity of the + // osVersionInfoSize member. Disassembling ntdll.dll indicates + // that the documentation is indeed correct about that. + _ = rtlGetVersion(info) + return info +} + +// RtlGetNtVersionNumbers returns the version of the underlying operating system, +// ignoring manifest semantics and the application compatibility layer. +func RtlGetNtVersionNumbers() (majorVersion, minorVersion, buildNumber uint32) { + rtlGetNtVersionNumbers(&majorVersion, &minorVersion, &buildNumber) + buildNumber &= 0xffff + return +} + +// GetProcessPreferredUILanguages retrieves the process preferred UI languages. +func GetProcessPreferredUILanguages(flags uint32) ([]string, error) { + return getUILanguages(flags, getProcessPreferredUILanguages) +} + +// GetThreadPreferredUILanguages retrieves the thread preferred UI languages for the current thread. +func GetThreadPreferredUILanguages(flags uint32) ([]string, error) { + return getUILanguages(flags, getThreadPreferredUILanguages) +} + +// GetUserPreferredUILanguages retrieves information about the user preferred UI languages. +func GetUserPreferredUILanguages(flags uint32) ([]string, error) { + return getUILanguages(flags, getUserPreferredUILanguages) +} + +// GetSystemPreferredUILanguages retrieves the system preferred UI languages. +func GetSystemPreferredUILanguages(flags uint32) ([]string, error) { + return getUILanguages(flags, getSystemPreferredUILanguages) +} + +func getUILanguages(flags uint32, f func(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) error) ([]string, error) { + size := uint32(128) + for { + var numLanguages uint32 + buf := make([]uint16, size) + err := f(flags, &numLanguages, &buf[0], &size) + if err == ERROR_INSUFFICIENT_BUFFER { + continue + } + if err != nil { + return nil, err + } + buf = buf[:size] + if numLanguages == 0 || len(buf) == 0 { // GetProcessPreferredUILanguages may return numLanguages==0 with "\0\0" + return []string{}, nil + } + if buf[len(buf)-1] == 0 { + buf = buf[:len(buf)-1] // remove terminating null + } + languages := make([]string, 0, numLanguages) + from := 0 + for i, c := range buf { + if c == 0 { + languages = append(languages, string(utf16.Decode(buf[from:i]))) + from = i + 1 + } + } + return languages, nil + } +} + +func SetConsoleCursorPosition(console Handle, position Coord) error { + return setConsoleCursorPosition(console, *((*uint32)(unsafe.Pointer(&position)))) +} + +func (s NTStatus) Errno() syscall.Errno { + return rtlNtStatusToDosErrorNoTeb(s) +} + +func langID(pri, sub uint16) uint32 { return uint32(sub)<<10 | uint32(pri) } + +func (s NTStatus) Error() string { + b := make([]uint16, 300) + n, err := FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_FROM_HMODULE|FORMAT_MESSAGE_ARGUMENT_ARRAY, modntdll.Handle(), uint32(s), langID(LANG_ENGLISH, SUBLANG_ENGLISH_US), b, nil) + if err != nil { + return fmt.Sprintf("NTSTATUS 0x%08x", uint32(s)) + } + // trim terminating \r and \n + for ; n > 0 && (b[n-1] == '\n' || b[n-1] == '\r'); n-- { + } + return string(utf16.Decode(b[:n])) +} + +// NewNTUnicodeString returns a new NTUnicodeString structure for use with native +// NT APIs that work over the NTUnicodeString type. Note that most Windows APIs +// do not use NTUnicodeString, and instead UTF16PtrFromString should be used for +// the more common *uint16 string type. +func NewNTUnicodeString(s string) (*NTUnicodeString, error) { + var u NTUnicodeString + s16, err := UTF16PtrFromString(s) + if err != nil { + return nil, err + } + RtlInitUnicodeString(&u, s16) + return &u, nil +} + +// Slice returns a uint16 slice that aliases the data in the NTUnicodeString. +func (s *NTUnicodeString) Slice() []uint16 { + var slice []uint16 + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) + hdr.Data = unsafe.Pointer(s.Buffer) + hdr.Len = int(s.Length) + hdr.Cap = int(s.MaximumLength) + return slice +} + +func (s *NTUnicodeString) String() string { + return UTF16ToString(s.Slice()) +} + +// NewNTString returns a new NTString structure for use with native +// NT APIs that work over the NTString type. Note that most Windows APIs +// do not use NTString, and instead UTF16PtrFromString should be used for +// the more common *uint16 string type. +func NewNTString(s string) (*NTString, error) { + var nts NTString + s8, err := BytePtrFromString(s) + if err != nil { + return nil, err + } + RtlInitString(&nts, s8) + return &nts, nil +} + +// Slice returns a byte slice that aliases the data in the NTString. +func (s *NTString) Slice() []byte { + var slice []byte + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) + hdr.Data = unsafe.Pointer(s.Buffer) + hdr.Len = int(s.Length) + hdr.Cap = int(s.MaximumLength) + return slice +} + +func (s *NTString) String() string { + return ByteSliceToString(s.Slice()) +} + +// FindResource resolves a resource of the given name and resource type. +func FindResource(module Handle, name, resType ResourceIDOrString) (Handle, error) { + var namePtr, resTypePtr uintptr + var name16, resType16 *uint16 + var err error + resolvePtr := func(i interface{}, keep **uint16) (uintptr, error) { + switch v := i.(type) { + case string: + *keep, err = UTF16PtrFromString(v) + if err != nil { + return 0, err + } + return uintptr(unsafe.Pointer(*keep)), nil + case ResourceID: + return uintptr(v), nil + } + return 0, errorspkg.New("parameter must be a ResourceID or a string") + } + namePtr, err = resolvePtr(name, &name16) + if err != nil { + return 0, err + } + resTypePtr, err = resolvePtr(resType, &resType16) + if err != nil { + return 0, err + } + resInfo, err := findResource(module, namePtr, resTypePtr) + runtime.KeepAlive(name16) + runtime.KeepAlive(resType16) + return resInfo, err +} + +func LoadResourceData(module, resInfo Handle) (data []byte, err error) { + size, err := SizeofResource(module, resInfo) + if err != nil { + return + } + resData, err := LoadResource(module, resInfo) + if err != nil { + return + } + ptr, err := LockResource(resData) + if err != nil { + return + } + h := (*unsafeheader.Slice)(unsafe.Pointer(&data)) + h.Data = unsafe.Pointer(ptr) + h.Len = int(size) + h.Cap = int(size) + return +} + +// PSAPI_WORKING_SET_EX_BLOCK contains extended working set information for a page. +type PSAPI_WORKING_SET_EX_BLOCK uint64 + +// Valid returns the validity of this page. +// If this bit is 1, the subsequent members are valid; otherwise they should be ignored. +func (b PSAPI_WORKING_SET_EX_BLOCK) Valid() bool { + return (b & 1) == 1 +} + +// ShareCount is the number of processes that share this page. The maximum value of this member is 7. +func (b PSAPI_WORKING_SET_EX_BLOCK) ShareCount() uint64 { + return b.intField(1, 3) +} + +// Win32Protection is the memory protection attributes of the page. For a list of values, see +// https://docs.microsoft.com/en-us/windows/win32/memory/memory-protection-constants +func (b PSAPI_WORKING_SET_EX_BLOCK) Win32Protection() uint64 { + return b.intField(4, 11) +} + +// Shared returns the shared status of this page. +// If this bit is 1, the page can be shared. +func (b PSAPI_WORKING_SET_EX_BLOCK) Shared() bool { + return (b & (1 << 15)) == 1 +} + +// Node is the NUMA node. The maximum value of this member is 63. +func (b PSAPI_WORKING_SET_EX_BLOCK) Node() uint64 { + return b.intField(16, 6) +} + +// Locked returns the locked status of this page. +// If this bit is 1, the virtual page is locked in physical memory. +func (b PSAPI_WORKING_SET_EX_BLOCK) Locked() bool { + return (b & (1 << 22)) == 1 +} + +// LargePage returns the large page status of this page. +// If this bit is 1, the page is a large page. +func (b PSAPI_WORKING_SET_EX_BLOCK) LargePage() bool { + return (b & (1 << 23)) == 1 +} + +// Bad returns the bad status of this page. +// If this bit is 1, the page is has been reported as bad. +func (b PSAPI_WORKING_SET_EX_BLOCK) Bad() bool { + return (b & (1 << 31)) == 1 +} + +// intField extracts an integer field in the PSAPI_WORKING_SET_EX_BLOCK union. +func (b PSAPI_WORKING_SET_EX_BLOCK) intField(start, length int) uint64 { + var mask PSAPI_WORKING_SET_EX_BLOCK + for pos := start; pos < start+length; pos++ { + mask |= (1 << pos) + } + + masked := b & mask + return uint64(masked >> start) +} + +// PSAPI_WORKING_SET_EX_INFORMATION contains extended working set information for a process. +type PSAPI_WORKING_SET_EX_INFORMATION struct { + // The virtual address. + VirtualAddress Pointer + // A PSAPI_WORKING_SET_EX_BLOCK union that indicates the attributes of the page at VirtualAddress. + VirtualAttributes PSAPI_WORKING_SET_EX_BLOCK +} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go new file mode 100644 index 000000000..88e62a638 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -0,0 +1,3349 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "net" + "syscall" + "unsafe" +) + +// NTStatus corresponds with NTSTATUS, error values returned by ntdll.dll and +// other native functions. +type NTStatus uint32 + +const ( + // Invented values to support what package os expects. + O_RDONLY = 0x00000 + O_WRONLY = 0x00001 + O_RDWR = 0x00002 + O_CREAT = 0x00040 + O_EXCL = 0x00080 + O_NOCTTY = 0x00100 + O_TRUNC = 0x00200 + O_NONBLOCK = 0x00800 + O_APPEND = 0x00400 + O_SYNC = 0x01000 + O_ASYNC = 0x02000 + O_CLOEXEC = 0x80000 +) + +const ( + // More invented values for signals + SIGHUP = Signal(0x1) + SIGINT = Signal(0x2) + SIGQUIT = Signal(0x3) + SIGILL = Signal(0x4) + SIGTRAP = Signal(0x5) + SIGABRT = Signal(0x6) + SIGBUS = Signal(0x7) + SIGFPE = Signal(0x8) + SIGKILL = Signal(0x9) + SIGSEGV = Signal(0xb) + SIGPIPE = Signal(0xd) + SIGALRM = Signal(0xe) + SIGTERM = Signal(0xf) +) + +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/breakpoint trap", + 6: "aborted", + 7: "bus error", + 8: "floating point exception", + 9: "killed", + 10: "user defined signal 1", + 11: "segmentation fault", + 12: "user defined signal 2", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", +} + +const ( + FILE_READ_DATA = 0x00000001 + FILE_READ_ATTRIBUTES = 0x00000080 + FILE_READ_EA = 0x00000008 + FILE_WRITE_DATA = 0x00000002 + FILE_WRITE_ATTRIBUTES = 0x00000100 + FILE_WRITE_EA = 0x00000010 + FILE_APPEND_DATA = 0x00000004 + FILE_EXECUTE = 0x00000020 + + FILE_GENERIC_READ = STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE + FILE_GENERIC_WRITE = STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE + FILE_GENERIC_EXECUTE = STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE + + FILE_LIST_DIRECTORY = 0x00000001 + FILE_TRAVERSE = 0x00000020 + + FILE_SHARE_READ = 0x00000001 + FILE_SHARE_WRITE = 0x00000002 + FILE_SHARE_DELETE = 0x00000004 + + FILE_ATTRIBUTE_READONLY = 0x00000001 + FILE_ATTRIBUTE_HIDDEN = 0x00000002 + FILE_ATTRIBUTE_SYSTEM = 0x00000004 + FILE_ATTRIBUTE_DIRECTORY = 0x00000010 + FILE_ATTRIBUTE_ARCHIVE = 0x00000020 + FILE_ATTRIBUTE_DEVICE = 0x00000040 + FILE_ATTRIBUTE_NORMAL = 0x00000080 + FILE_ATTRIBUTE_TEMPORARY = 0x00000100 + FILE_ATTRIBUTE_SPARSE_FILE = 0x00000200 + FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400 + FILE_ATTRIBUTE_COMPRESSED = 0x00000800 + FILE_ATTRIBUTE_OFFLINE = 0x00001000 + FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x00002000 + FILE_ATTRIBUTE_ENCRYPTED = 0x00004000 + FILE_ATTRIBUTE_INTEGRITY_STREAM = 0x00008000 + FILE_ATTRIBUTE_VIRTUAL = 0x00010000 + FILE_ATTRIBUTE_NO_SCRUB_DATA = 0x00020000 + FILE_ATTRIBUTE_RECALL_ON_OPEN = 0x00040000 + FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS = 0x00400000 + + INVALID_FILE_ATTRIBUTES = 0xffffffff + + CREATE_NEW = 1 + CREATE_ALWAYS = 2 + OPEN_EXISTING = 3 + OPEN_ALWAYS = 4 + TRUNCATE_EXISTING = 5 + + FILE_FLAG_OPEN_REQUIRING_OPLOCK = 0x00040000 + FILE_FLAG_FIRST_PIPE_INSTANCE = 0x00080000 + FILE_FLAG_OPEN_NO_RECALL = 0x00100000 + FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000 + FILE_FLAG_SESSION_AWARE = 0x00800000 + FILE_FLAG_POSIX_SEMANTICS = 0x01000000 + FILE_FLAG_BACKUP_SEMANTICS = 0x02000000 + FILE_FLAG_DELETE_ON_CLOSE = 0x04000000 + FILE_FLAG_SEQUENTIAL_SCAN = 0x08000000 + FILE_FLAG_RANDOM_ACCESS = 0x10000000 + FILE_FLAG_NO_BUFFERING = 0x20000000 + FILE_FLAG_OVERLAPPED = 0x40000000 + FILE_FLAG_WRITE_THROUGH = 0x80000000 + + HANDLE_FLAG_INHERIT = 0x00000001 + STARTF_USESTDHANDLES = 0x00000100 + STARTF_USESHOWWINDOW = 0x00000001 + DUPLICATE_CLOSE_SOURCE = 0x00000001 + DUPLICATE_SAME_ACCESS = 0x00000002 + + STD_INPUT_HANDLE = -10 & (1<<32 - 1) + STD_OUTPUT_HANDLE = -11 & (1<<32 - 1) + STD_ERROR_HANDLE = -12 & (1<<32 - 1) + + FILE_BEGIN = 0 + FILE_CURRENT = 1 + FILE_END = 2 + + LANG_ENGLISH = 0x09 + SUBLANG_ENGLISH_US = 0x01 + + FORMAT_MESSAGE_ALLOCATE_BUFFER = 256 + FORMAT_MESSAGE_IGNORE_INSERTS = 512 + FORMAT_MESSAGE_FROM_STRING = 1024 + FORMAT_MESSAGE_FROM_HMODULE = 2048 + FORMAT_MESSAGE_FROM_SYSTEM = 4096 + FORMAT_MESSAGE_ARGUMENT_ARRAY = 8192 + FORMAT_MESSAGE_MAX_WIDTH_MASK = 255 + + MAX_PATH = 260 + MAX_LONG_PATH = 32768 + + MAX_MODULE_NAME32 = 255 + + MAX_COMPUTERNAME_LENGTH = 15 + + MAX_DHCPV6_DUID_LENGTH = 130 + + MAX_DNS_SUFFIX_STRING_LENGTH = 256 + + TIME_ZONE_ID_UNKNOWN = 0 + TIME_ZONE_ID_STANDARD = 1 + + TIME_ZONE_ID_DAYLIGHT = 2 + IGNORE = 0 + INFINITE = 0xffffffff + + WAIT_ABANDONED = 0x00000080 + WAIT_OBJECT_0 = 0x00000000 + WAIT_FAILED = 0xFFFFFFFF + + // Access rights for process. + PROCESS_CREATE_PROCESS = 0x0080 + PROCESS_CREATE_THREAD = 0x0002 + PROCESS_DUP_HANDLE = 0x0040 + PROCESS_QUERY_INFORMATION = 0x0400 + PROCESS_QUERY_LIMITED_INFORMATION = 0x1000 + PROCESS_SET_INFORMATION = 0x0200 + PROCESS_SET_QUOTA = 0x0100 + PROCESS_SUSPEND_RESUME = 0x0800 + PROCESS_TERMINATE = 0x0001 + PROCESS_VM_OPERATION = 0x0008 + PROCESS_VM_READ = 0x0010 + PROCESS_VM_WRITE = 0x0020 + + // Access rights for thread. + THREAD_DIRECT_IMPERSONATION = 0x0200 + THREAD_GET_CONTEXT = 0x0008 + THREAD_IMPERSONATE = 0x0100 + THREAD_QUERY_INFORMATION = 0x0040 + THREAD_QUERY_LIMITED_INFORMATION = 0x0800 + THREAD_SET_CONTEXT = 0x0010 + THREAD_SET_INFORMATION = 0x0020 + THREAD_SET_LIMITED_INFORMATION = 0x0400 + THREAD_SET_THREAD_TOKEN = 0x0080 + THREAD_SUSPEND_RESUME = 0x0002 + THREAD_TERMINATE = 0x0001 + + FILE_MAP_COPY = 0x01 + FILE_MAP_WRITE = 0x02 + FILE_MAP_READ = 0x04 + FILE_MAP_EXECUTE = 0x20 + + CTRL_C_EVENT = 0 + CTRL_BREAK_EVENT = 1 + CTRL_CLOSE_EVENT = 2 + CTRL_LOGOFF_EVENT = 5 + CTRL_SHUTDOWN_EVENT = 6 + + // Windows reserves errors >= 1<<29 for application use. + APPLICATION_ERROR = 1 << 29 +) + +const ( + // Process creation flags. + CREATE_BREAKAWAY_FROM_JOB = 0x01000000 + CREATE_DEFAULT_ERROR_MODE = 0x04000000 + CREATE_NEW_CONSOLE = 0x00000010 + CREATE_NEW_PROCESS_GROUP = 0x00000200 + CREATE_NO_WINDOW = 0x08000000 + CREATE_PROTECTED_PROCESS = 0x00040000 + CREATE_PRESERVE_CODE_AUTHZ_LEVEL = 0x02000000 + CREATE_SEPARATE_WOW_VDM = 0x00000800 + CREATE_SHARED_WOW_VDM = 0x00001000 + CREATE_SUSPENDED = 0x00000004 + CREATE_UNICODE_ENVIRONMENT = 0x00000400 + DEBUG_ONLY_THIS_PROCESS = 0x00000002 + DEBUG_PROCESS = 0x00000001 + DETACHED_PROCESS = 0x00000008 + EXTENDED_STARTUPINFO_PRESENT = 0x00080000 + INHERIT_PARENT_AFFINITY = 0x00010000 +) + +const ( + // attributes for ProcThreadAttributeList + PROC_THREAD_ATTRIBUTE_PARENT_PROCESS = 0x00020000 + PROC_THREAD_ATTRIBUTE_HANDLE_LIST = 0x00020002 + PROC_THREAD_ATTRIBUTE_GROUP_AFFINITY = 0x00030003 + PROC_THREAD_ATTRIBUTE_PREFERRED_NODE = 0x00020004 + PROC_THREAD_ATTRIBUTE_IDEAL_PROCESSOR = 0x00030005 + PROC_THREAD_ATTRIBUTE_MITIGATION_POLICY = 0x00020007 + PROC_THREAD_ATTRIBUTE_UMS_THREAD = 0x00030006 + PROC_THREAD_ATTRIBUTE_PROTECTION_LEVEL = 0x0002000b +) + +const ( + // flags for CreateToolhelp32Snapshot + TH32CS_SNAPHEAPLIST = 0x01 + TH32CS_SNAPPROCESS = 0x02 + TH32CS_SNAPTHREAD = 0x04 + TH32CS_SNAPMODULE = 0x08 + TH32CS_SNAPMODULE32 = 0x10 + TH32CS_SNAPALL = TH32CS_SNAPHEAPLIST | TH32CS_SNAPMODULE | TH32CS_SNAPPROCESS | TH32CS_SNAPTHREAD + TH32CS_INHERIT = 0x80000000 +) + +const ( + // flags for EnumProcessModulesEx + LIST_MODULES_32BIT = 0x01 + LIST_MODULES_64BIT = 0x02 + LIST_MODULES_ALL = 0x03 + LIST_MODULES_DEFAULT = 0x00 +) + +const ( + // filters for ReadDirectoryChangesW and FindFirstChangeNotificationW + FILE_NOTIFY_CHANGE_FILE_NAME = 0x001 + FILE_NOTIFY_CHANGE_DIR_NAME = 0x002 + FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x004 + FILE_NOTIFY_CHANGE_SIZE = 0x008 + FILE_NOTIFY_CHANGE_LAST_WRITE = 0x010 + FILE_NOTIFY_CHANGE_LAST_ACCESS = 0x020 + FILE_NOTIFY_CHANGE_CREATION = 0x040 + FILE_NOTIFY_CHANGE_SECURITY = 0x100 +) + +const ( + // do not reorder + FILE_ACTION_ADDED = iota + 1 + FILE_ACTION_REMOVED + FILE_ACTION_MODIFIED + FILE_ACTION_RENAMED_OLD_NAME + FILE_ACTION_RENAMED_NEW_NAME +) + +const ( + // wincrypt.h + /* certenrolld_begin -- PROV_RSA_*/ + PROV_RSA_FULL = 1 + PROV_RSA_SIG = 2 + PROV_DSS = 3 + PROV_FORTEZZA = 4 + PROV_MS_EXCHANGE = 5 + PROV_SSL = 6 + PROV_RSA_SCHANNEL = 12 + PROV_DSS_DH = 13 + PROV_EC_ECDSA_SIG = 14 + PROV_EC_ECNRA_SIG = 15 + PROV_EC_ECDSA_FULL = 16 + PROV_EC_ECNRA_FULL = 17 + PROV_DH_SCHANNEL = 18 + PROV_SPYRUS_LYNKS = 20 + PROV_RNG = 21 + PROV_INTEL_SEC = 22 + PROV_REPLACE_OWF = 23 + PROV_RSA_AES = 24 + + /* dwFlags definitions for CryptAcquireContext */ + CRYPT_VERIFYCONTEXT = 0xF0000000 + CRYPT_NEWKEYSET = 0x00000008 + CRYPT_DELETEKEYSET = 0x00000010 + CRYPT_MACHINE_KEYSET = 0x00000020 + CRYPT_SILENT = 0x00000040 + CRYPT_DEFAULT_CONTAINER_OPTIONAL = 0x00000080 + + /* Flags for PFXImportCertStore */ + CRYPT_EXPORTABLE = 0x00000001 + CRYPT_USER_PROTECTED = 0x00000002 + CRYPT_USER_KEYSET = 0x00001000 + PKCS12_PREFER_CNG_KSP = 0x00000100 + PKCS12_ALWAYS_CNG_KSP = 0x00000200 + PKCS12_ALLOW_OVERWRITE_KEY = 0x00004000 + PKCS12_NO_PERSIST_KEY = 0x00008000 + PKCS12_INCLUDE_EXTENDED_PROPERTIES = 0x00000010 + + /* Flags for CryptAcquireCertificatePrivateKey */ + CRYPT_ACQUIRE_CACHE_FLAG = 0x00000001 + CRYPT_ACQUIRE_USE_PROV_INFO_FLAG = 0x00000002 + CRYPT_ACQUIRE_COMPARE_KEY_FLAG = 0x00000004 + CRYPT_ACQUIRE_NO_HEALING = 0x00000008 + CRYPT_ACQUIRE_SILENT_FLAG = 0x00000040 + CRYPT_ACQUIRE_WINDOW_HANDLE_FLAG = 0x00000080 + CRYPT_ACQUIRE_NCRYPT_KEY_FLAGS_MASK = 0x00070000 + CRYPT_ACQUIRE_ALLOW_NCRYPT_KEY_FLAG = 0x00010000 + CRYPT_ACQUIRE_PREFER_NCRYPT_KEY_FLAG = 0x00020000 + CRYPT_ACQUIRE_ONLY_NCRYPT_KEY_FLAG = 0x00040000 + + /* pdwKeySpec for CryptAcquireCertificatePrivateKey */ + AT_KEYEXCHANGE = 1 + AT_SIGNATURE = 2 + CERT_NCRYPT_KEY_SPEC = 0xFFFFFFFF + + /* Default usage match type is AND with value zero */ + USAGE_MATCH_TYPE_AND = 0 + USAGE_MATCH_TYPE_OR = 1 + + /* msgAndCertEncodingType values for CertOpenStore function */ + X509_ASN_ENCODING = 0x00000001 + PKCS_7_ASN_ENCODING = 0x00010000 + + /* storeProvider values for CertOpenStore function */ + CERT_STORE_PROV_MSG = 1 + CERT_STORE_PROV_MEMORY = 2 + CERT_STORE_PROV_FILE = 3 + CERT_STORE_PROV_REG = 4 + CERT_STORE_PROV_PKCS7 = 5 + CERT_STORE_PROV_SERIALIZED = 6 + CERT_STORE_PROV_FILENAME_A = 7 + CERT_STORE_PROV_FILENAME_W = 8 + CERT_STORE_PROV_FILENAME = CERT_STORE_PROV_FILENAME_W + CERT_STORE_PROV_SYSTEM_A = 9 + CERT_STORE_PROV_SYSTEM_W = 10 + CERT_STORE_PROV_SYSTEM = CERT_STORE_PROV_SYSTEM_W + CERT_STORE_PROV_COLLECTION = 11 + CERT_STORE_PROV_SYSTEM_REGISTRY_A = 12 + CERT_STORE_PROV_SYSTEM_REGISTRY_W = 13 + CERT_STORE_PROV_SYSTEM_REGISTRY = CERT_STORE_PROV_SYSTEM_REGISTRY_W + CERT_STORE_PROV_PHYSICAL_W = 14 + CERT_STORE_PROV_PHYSICAL = CERT_STORE_PROV_PHYSICAL_W + CERT_STORE_PROV_SMART_CARD_W = 15 + CERT_STORE_PROV_SMART_CARD = CERT_STORE_PROV_SMART_CARD_W + CERT_STORE_PROV_LDAP_W = 16 + CERT_STORE_PROV_LDAP = CERT_STORE_PROV_LDAP_W + CERT_STORE_PROV_PKCS12 = 17 + + /* store characteristics (low WORD of flag) for CertOpenStore function */ + CERT_STORE_NO_CRYPT_RELEASE_FLAG = 0x00000001 + CERT_STORE_SET_LOCALIZED_NAME_FLAG = 0x00000002 + CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG = 0x00000004 + CERT_STORE_DELETE_FLAG = 0x00000010 + CERT_STORE_UNSAFE_PHYSICAL_FLAG = 0x00000020 + CERT_STORE_SHARE_STORE_FLAG = 0x00000040 + CERT_STORE_SHARE_CONTEXT_FLAG = 0x00000080 + CERT_STORE_MANIFOLD_FLAG = 0x00000100 + CERT_STORE_ENUM_ARCHIVED_FLAG = 0x00000200 + CERT_STORE_UPDATE_KEYID_FLAG = 0x00000400 + CERT_STORE_BACKUP_RESTORE_FLAG = 0x00000800 + CERT_STORE_MAXIMUM_ALLOWED_FLAG = 0x00001000 + CERT_STORE_CREATE_NEW_FLAG = 0x00002000 + CERT_STORE_OPEN_EXISTING_FLAG = 0x00004000 + CERT_STORE_READONLY_FLAG = 0x00008000 + + /* store locations (high WORD of flag) for CertOpenStore function */ + CERT_SYSTEM_STORE_CURRENT_USER = 0x00010000 + CERT_SYSTEM_STORE_LOCAL_MACHINE = 0x00020000 + CERT_SYSTEM_STORE_CURRENT_SERVICE = 0x00040000 + CERT_SYSTEM_STORE_SERVICES = 0x00050000 + CERT_SYSTEM_STORE_USERS = 0x00060000 + CERT_SYSTEM_STORE_CURRENT_USER_GROUP_POLICY = 0x00070000 + CERT_SYSTEM_STORE_LOCAL_MACHINE_GROUP_POLICY = 0x00080000 + CERT_SYSTEM_STORE_LOCAL_MACHINE_ENTERPRISE = 0x00090000 + CERT_SYSTEM_STORE_UNPROTECTED_FLAG = 0x40000000 + CERT_SYSTEM_STORE_RELOCATE_FLAG = 0x80000000 + + /* Miscellaneous high-WORD flags for CertOpenStore function */ + CERT_REGISTRY_STORE_REMOTE_FLAG = 0x00010000 + CERT_REGISTRY_STORE_SERIALIZED_FLAG = 0x00020000 + CERT_REGISTRY_STORE_ROAMING_FLAG = 0x00040000 + CERT_REGISTRY_STORE_MY_IE_DIRTY_FLAG = 0x00080000 + CERT_REGISTRY_STORE_LM_GPT_FLAG = 0x01000000 + CERT_REGISTRY_STORE_CLIENT_GPT_FLAG = 0x80000000 + CERT_FILE_STORE_COMMIT_ENABLE_FLAG = 0x00010000 + CERT_LDAP_STORE_SIGN_FLAG = 0x00010000 + CERT_LDAP_STORE_AREC_EXCLUSIVE_FLAG = 0x00020000 + CERT_LDAP_STORE_OPENED_FLAG = 0x00040000 + CERT_LDAP_STORE_UNBIND_FLAG = 0x00080000 + + /* addDisposition values for CertAddCertificateContextToStore function */ + CERT_STORE_ADD_NEW = 1 + CERT_STORE_ADD_USE_EXISTING = 2 + CERT_STORE_ADD_REPLACE_EXISTING = 3 + CERT_STORE_ADD_ALWAYS = 4 + CERT_STORE_ADD_REPLACE_EXISTING_INHERIT_PROPERTIES = 5 + CERT_STORE_ADD_NEWER = 6 + CERT_STORE_ADD_NEWER_INHERIT_PROPERTIES = 7 + + /* ErrorStatus values for CertTrustStatus struct */ + CERT_TRUST_NO_ERROR = 0x00000000 + CERT_TRUST_IS_NOT_TIME_VALID = 0x00000001 + CERT_TRUST_IS_REVOKED = 0x00000004 + CERT_TRUST_IS_NOT_SIGNATURE_VALID = 0x00000008 + CERT_TRUST_IS_NOT_VALID_FOR_USAGE = 0x00000010 + CERT_TRUST_IS_UNTRUSTED_ROOT = 0x00000020 + CERT_TRUST_REVOCATION_STATUS_UNKNOWN = 0x00000040 + CERT_TRUST_IS_CYCLIC = 0x00000080 + CERT_TRUST_INVALID_EXTENSION = 0x00000100 + CERT_TRUST_INVALID_POLICY_CONSTRAINTS = 0x00000200 + CERT_TRUST_INVALID_BASIC_CONSTRAINTS = 0x00000400 + CERT_TRUST_INVALID_NAME_CONSTRAINTS = 0x00000800 + CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT = 0x00001000 + CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT = 0x00002000 + CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT = 0x00004000 + CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT = 0x00008000 + CERT_TRUST_IS_PARTIAL_CHAIN = 0x00010000 + CERT_TRUST_CTL_IS_NOT_TIME_VALID = 0x00020000 + CERT_TRUST_CTL_IS_NOT_SIGNATURE_VALID = 0x00040000 + CERT_TRUST_CTL_IS_NOT_VALID_FOR_USAGE = 0x00080000 + CERT_TRUST_HAS_WEAK_SIGNATURE = 0x00100000 + CERT_TRUST_IS_OFFLINE_REVOCATION = 0x01000000 + CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY = 0x02000000 + CERT_TRUST_IS_EXPLICIT_DISTRUST = 0x04000000 + CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT = 0x08000000 + + /* InfoStatus values for CertTrustStatus struct */ + CERT_TRUST_HAS_EXACT_MATCH_ISSUER = 0x00000001 + CERT_TRUST_HAS_KEY_MATCH_ISSUER = 0x00000002 + CERT_TRUST_HAS_NAME_MATCH_ISSUER = 0x00000004 + CERT_TRUST_IS_SELF_SIGNED = 0x00000008 + CERT_TRUST_HAS_PREFERRED_ISSUER = 0x00000100 + CERT_TRUST_HAS_ISSUANCE_CHAIN_POLICY = 0x00000400 + CERT_TRUST_HAS_VALID_NAME_CONSTRAINTS = 0x00000400 + CERT_TRUST_IS_PEER_TRUSTED = 0x00000800 + CERT_TRUST_HAS_CRL_VALIDITY_EXTENDED = 0x00001000 + CERT_TRUST_IS_FROM_EXCLUSIVE_TRUST_STORE = 0x00002000 + CERT_TRUST_IS_CA_TRUSTED = 0x00004000 + CERT_TRUST_IS_COMPLEX_CHAIN = 0x00010000 + + /* Certificate Information Flags */ + CERT_INFO_VERSION_FLAG = 1 + CERT_INFO_SERIAL_NUMBER_FLAG = 2 + CERT_INFO_SIGNATURE_ALGORITHM_FLAG = 3 + CERT_INFO_ISSUER_FLAG = 4 + CERT_INFO_NOT_BEFORE_FLAG = 5 + CERT_INFO_NOT_AFTER_FLAG = 6 + CERT_INFO_SUBJECT_FLAG = 7 + CERT_INFO_SUBJECT_PUBLIC_KEY_INFO_FLAG = 8 + CERT_INFO_ISSUER_UNIQUE_ID_FLAG = 9 + CERT_INFO_SUBJECT_UNIQUE_ID_FLAG = 10 + CERT_INFO_EXTENSION_FLAG = 11 + + /* dwFindType for CertFindCertificateInStore */ + CERT_COMPARE_MASK = 0xFFFF + CERT_COMPARE_SHIFT = 16 + CERT_COMPARE_ANY = 0 + CERT_COMPARE_SHA1_HASH = 1 + CERT_COMPARE_NAME = 2 + CERT_COMPARE_ATTR = 3 + CERT_COMPARE_MD5_HASH = 4 + CERT_COMPARE_PROPERTY = 5 + CERT_COMPARE_PUBLIC_KEY = 6 + CERT_COMPARE_HASH = CERT_COMPARE_SHA1_HASH + CERT_COMPARE_NAME_STR_A = 7 + CERT_COMPARE_NAME_STR_W = 8 + CERT_COMPARE_KEY_SPEC = 9 + CERT_COMPARE_ENHKEY_USAGE = 10 + CERT_COMPARE_CTL_USAGE = CERT_COMPARE_ENHKEY_USAGE + CERT_COMPARE_SUBJECT_CERT = 11 + CERT_COMPARE_ISSUER_OF = 12 + CERT_COMPARE_EXISTING = 13 + CERT_COMPARE_SIGNATURE_HASH = 14 + CERT_COMPARE_KEY_IDENTIFIER = 15 + CERT_COMPARE_CERT_ID = 16 + CERT_COMPARE_CROSS_CERT_DIST_POINTS = 17 + CERT_COMPARE_PUBKEY_MD5_HASH = 18 + CERT_COMPARE_SUBJECT_INFO_ACCESS = 19 + CERT_COMPARE_HASH_STR = 20 + CERT_COMPARE_HAS_PRIVATE_KEY = 21 + CERT_FIND_ANY = (CERT_COMPARE_ANY << CERT_COMPARE_SHIFT) + CERT_FIND_SHA1_HASH = (CERT_COMPARE_SHA1_HASH << CERT_COMPARE_SHIFT) + CERT_FIND_MD5_HASH = (CERT_COMPARE_MD5_HASH << CERT_COMPARE_SHIFT) + CERT_FIND_SIGNATURE_HASH = (CERT_COMPARE_SIGNATURE_HASH << CERT_COMPARE_SHIFT) + CERT_FIND_KEY_IDENTIFIER = (CERT_COMPARE_KEY_IDENTIFIER << CERT_COMPARE_SHIFT) + CERT_FIND_HASH = CERT_FIND_SHA1_HASH + CERT_FIND_PROPERTY = (CERT_COMPARE_PROPERTY << CERT_COMPARE_SHIFT) + CERT_FIND_PUBLIC_KEY = (CERT_COMPARE_PUBLIC_KEY << CERT_COMPARE_SHIFT) + CERT_FIND_SUBJECT_NAME = (CERT_COMPARE_NAME<> 32 & 0xffffffff) + return ft +} + +type Win32finddata struct { + FileAttributes uint32 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + FileSizeHigh uint32 + FileSizeLow uint32 + Reserved0 uint32 + Reserved1 uint32 + FileName [MAX_PATH - 1]uint16 + AlternateFileName [13]uint16 +} + +// This is the actual system call structure. +// Win32finddata is what we committed to in Go 1. +type win32finddata1 struct { + FileAttributes uint32 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + FileSizeHigh uint32 + FileSizeLow uint32 + Reserved0 uint32 + Reserved1 uint32 + FileName [MAX_PATH]uint16 + AlternateFileName [14]uint16 + + // The Microsoft documentation for this struct¹ describes three additional + // fields: dwFileType, dwCreatorType, and wFinderFlags. However, those fields + // are empirically only present in the macOS port of the Win32 API,² and thus + // not needed for binaries built for Windows. + // + // ¹ https://docs.microsoft.com/en-us/windows/win32/api/minwinbase/ns-minwinbase-win32_find_dataw describe + // ² https://golang.org/issue/42637#issuecomment-760715755. +} + +func copyFindData(dst *Win32finddata, src *win32finddata1) { + dst.FileAttributes = src.FileAttributes + dst.CreationTime = src.CreationTime + dst.LastAccessTime = src.LastAccessTime + dst.LastWriteTime = src.LastWriteTime + dst.FileSizeHigh = src.FileSizeHigh + dst.FileSizeLow = src.FileSizeLow + dst.Reserved0 = src.Reserved0 + dst.Reserved1 = src.Reserved1 + + // The src is 1 element bigger than dst, but it must be NUL. + copy(dst.FileName[:], src.FileName[:]) + copy(dst.AlternateFileName[:], src.AlternateFileName[:]) +} + +type ByHandleFileInformation struct { + FileAttributes uint32 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + VolumeSerialNumber uint32 + FileSizeHigh uint32 + FileSizeLow uint32 + NumberOfLinks uint32 + FileIndexHigh uint32 + FileIndexLow uint32 +} + +const ( + GetFileExInfoStandard = 0 + GetFileExMaxInfoLevel = 1 +) + +type Win32FileAttributeData struct { + FileAttributes uint32 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + FileSizeHigh uint32 + FileSizeLow uint32 +} + +// ShowWindow constants +const ( + // winuser.h + SW_HIDE = 0 + SW_NORMAL = 1 + SW_SHOWNORMAL = 1 + SW_SHOWMINIMIZED = 2 + SW_SHOWMAXIMIZED = 3 + SW_MAXIMIZE = 3 + SW_SHOWNOACTIVATE = 4 + SW_SHOW = 5 + SW_MINIMIZE = 6 + SW_SHOWMINNOACTIVE = 7 + SW_SHOWNA = 8 + SW_RESTORE = 9 + SW_SHOWDEFAULT = 10 + SW_FORCEMINIMIZE = 11 +) + +type StartupInfo struct { + Cb uint32 + _ *uint16 + Desktop *uint16 + Title *uint16 + X uint32 + Y uint32 + XSize uint32 + YSize uint32 + XCountChars uint32 + YCountChars uint32 + FillAttribute uint32 + Flags uint32 + ShowWindow uint16 + _ uint16 + _ *byte + StdInput Handle + StdOutput Handle + StdErr Handle +} + +type StartupInfoEx struct { + StartupInfo + ProcThreadAttributeList *ProcThreadAttributeList +} + +// ProcThreadAttributeList is a placeholder type to represent a PROC_THREAD_ATTRIBUTE_LIST. +// +// To create a *ProcThreadAttributeList, use NewProcThreadAttributeList, update +// it with ProcThreadAttributeListContainer.Update, free its memory using +// ProcThreadAttributeListContainer.Delete, and access the list itself using +// ProcThreadAttributeListContainer.List. +type ProcThreadAttributeList struct{} + +type ProcThreadAttributeListContainer struct { + data *ProcThreadAttributeList + pointers []unsafe.Pointer +} + +type ProcessInformation struct { + Process Handle + Thread Handle + ProcessId uint32 + ThreadId uint32 +} + +type ProcessEntry32 struct { + Size uint32 + Usage uint32 + ProcessID uint32 + DefaultHeapID uintptr + ModuleID uint32 + Threads uint32 + ParentProcessID uint32 + PriClassBase int32 + Flags uint32 + ExeFile [MAX_PATH]uint16 +} + +type ThreadEntry32 struct { + Size uint32 + Usage uint32 + ThreadID uint32 + OwnerProcessID uint32 + BasePri int32 + DeltaPri int32 + Flags uint32 +} + +type ModuleEntry32 struct { + Size uint32 + ModuleID uint32 + ProcessID uint32 + GlblcntUsage uint32 + ProccntUsage uint32 + ModBaseAddr uintptr + ModBaseSize uint32 + ModuleHandle Handle + Module [MAX_MODULE_NAME32 + 1]uint16 + ExePath [MAX_PATH]uint16 +} + +const SizeofModuleEntry32 = unsafe.Sizeof(ModuleEntry32{}) + +type Systemtime struct { + Year uint16 + Month uint16 + DayOfWeek uint16 + Day uint16 + Hour uint16 + Minute uint16 + Second uint16 + Milliseconds uint16 +} + +type Timezoneinformation struct { + Bias int32 + StandardName [32]uint16 + StandardDate Systemtime + StandardBias int32 + DaylightName [32]uint16 + DaylightDate Systemtime + DaylightBias int32 +} + +// Socket related. + +const ( + AF_UNSPEC = 0 + AF_UNIX = 1 + AF_INET = 2 + AF_NETBIOS = 17 + AF_INET6 = 23 + AF_IRDA = 26 + AF_BTH = 32 + + SOCK_STREAM = 1 + SOCK_DGRAM = 2 + SOCK_RAW = 3 + SOCK_RDM = 4 + SOCK_SEQPACKET = 5 + + IPPROTO_IP = 0 + IPPROTO_ICMP = 1 + IPPROTO_IGMP = 2 + BTHPROTO_RFCOMM = 3 + IPPROTO_TCP = 6 + IPPROTO_UDP = 17 + IPPROTO_IPV6 = 41 + IPPROTO_ICMPV6 = 58 + IPPROTO_RM = 113 + + SOL_SOCKET = 0xffff + SO_REUSEADDR = 4 + SO_KEEPALIVE = 8 + SO_DONTROUTE = 16 + SO_BROADCAST = 32 + SO_LINGER = 128 + SO_RCVBUF = 0x1002 + SO_RCVTIMEO = 0x1006 + SO_SNDBUF = 0x1001 + SO_UPDATE_ACCEPT_CONTEXT = 0x700b + SO_UPDATE_CONNECT_CONTEXT = 0x7010 + + IOC_OUT = 0x40000000 + IOC_IN = 0x80000000 + IOC_VENDOR = 0x18000000 + IOC_INOUT = IOC_IN | IOC_OUT + IOC_WS2 = 0x08000000 + SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6 + SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4 + SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12 + + // cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460 + + IP_HDRINCL = 0x2 + IP_TOS = 0x3 + IP_TTL = 0x4 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_LOOP = 0xb + IP_ADD_MEMBERSHIP = 0xc + IP_DROP_MEMBERSHIP = 0xd + IP_PKTINFO = 0x13 + + IPV6_V6ONLY = 0x1b + IPV6_UNICAST_HOPS = 0x4 + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_LOOP = 0xb + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + IPV6_PKTINFO = 0x13 + + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_DONTROUTE = 0x4 + MSG_WAITALL = 0x8 + + MSG_TRUNC = 0x0100 + MSG_CTRUNC = 0x0200 + MSG_BCAST = 0x0400 + MSG_MCAST = 0x0800 + + SOMAXCONN = 0x7fffffff + + TCP_NODELAY = 1 + + SHUT_RD = 0 + SHUT_WR = 1 + SHUT_RDWR = 2 + + WSADESCRIPTION_LEN = 256 + WSASYS_STATUS_LEN = 128 +) + +type WSABuf struct { + Len uint32 + Buf *byte +} + +type WSAMsg struct { + Name *syscall.RawSockaddrAny + Namelen int32 + Buffers *WSABuf + BufferCount uint32 + Control WSABuf + Flags uint32 +} + +// Flags for WSASocket +const ( + WSA_FLAG_OVERLAPPED = 0x01 + WSA_FLAG_MULTIPOINT_C_ROOT = 0x02 + WSA_FLAG_MULTIPOINT_C_LEAF = 0x04 + WSA_FLAG_MULTIPOINT_D_ROOT = 0x08 + WSA_FLAG_MULTIPOINT_D_LEAF = 0x10 + WSA_FLAG_ACCESS_SYSTEM_SECURITY = 0x40 + WSA_FLAG_NO_HANDLE_INHERIT = 0x80 + WSA_FLAG_REGISTERED_IO = 0x100 +) + +// Invented values to support what package os expects. +const ( + S_IFMT = 0x1f000 + S_IFIFO = 0x1000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFBLK = 0x6000 + S_IFREG = 0x8000 + S_IFLNK = 0xa000 + S_IFSOCK = 0xc000 + S_ISUID = 0x800 + S_ISGID = 0x400 + S_ISVTX = 0x200 + S_IRUSR = 0x100 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXUSR = 0x40 +) + +const ( + FILE_TYPE_CHAR = 0x0002 + FILE_TYPE_DISK = 0x0001 + FILE_TYPE_PIPE = 0x0003 + FILE_TYPE_REMOTE = 0x8000 + FILE_TYPE_UNKNOWN = 0x0000 +) + +type Hostent struct { + Name *byte + Aliases **byte + AddrType uint16 + Length uint16 + AddrList **byte +} + +type Protoent struct { + Name *byte + Aliases **byte + Proto uint16 +} + +const ( + DNS_TYPE_A = 0x0001 + DNS_TYPE_NS = 0x0002 + DNS_TYPE_MD = 0x0003 + DNS_TYPE_MF = 0x0004 + DNS_TYPE_CNAME = 0x0005 + DNS_TYPE_SOA = 0x0006 + DNS_TYPE_MB = 0x0007 + DNS_TYPE_MG = 0x0008 + DNS_TYPE_MR = 0x0009 + DNS_TYPE_NULL = 0x000a + DNS_TYPE_WKS = 0x000b + DNS_TYPE_PTR = 0x000c + DNS_TYPE_HINFO = 0x000d + DNS_TYPE_MINFO = 0x000e + DNS_TYPE_MX = 0x000f + DNS_TYPE_TEXT = 0x0010 + DNS_TYPE_RP = 0x0011 + DNS_TYPE_AFSDB = 0x0012 + DNS_TYPE_X25 = 0x0013 + DNS_TYPE_ISDN = 0x0014 + DNS_TYPE_RT = 0x0015 + DNS_TYPE_NSAP = 0x0016 + DNS_TYPE_NSAPPTR = 0x0017 + DNS_TYPE_SIG = 0x0018 + DNS_TYPE_KEY = 0x0019 + DNS_TYPE_PX = 0x001a + DNS_TYPE_GPOS = 0x001b + DNS_TYPE_AAAA = 0x001c + DNS_TYPE_LOC = 0x001d + DNS_TYPE_NXT = 0x001e + DNS_TYPE_EID = 0x001f + DNS_TYPE_NIMLOC = 0x0020 + DNS_TYPE_SRV = 0x0021 + DNS_TYPE_ATMA = 0x0022 + DNS_TYPE_NAPTR = 0x0023 + DNS_TYPE_KX = 0x0024 + DNS_TYPE_CERT = 0x0025 + DNS_TYPE_A6 = 0x0026 + DNS_TYPE_DNAME = 0x0027 + DNS_TYPE_SINK = 0x0028 + DNS_TYPE_OPT = 0x0029 + DNS_TYPE_DS = 0x002B + DNS_TYPE_RRSIG = 0x002E + DNS_TYPE_NSEC = 0x002F + DNS_TYPE_DNSKEY = 0x0030 + DNS_TYPE_DHCID = 0x0031 + DNS_TYPE_UINFO = 0x0064 + DNS_TYPE_UID = 0x0065 + DNS_TYPE_GID = 0x0066 + DNS_TYPE_UNSPEC = 0x0067 + DNS_TYPE_ADDRS = 0x00f8 + DNS_TYPE_TKEY = 0x00f9 + DNS_TYPE_TSIG = 0x00fa + DNS_TYPE_IXFR = 0x00fb + DNS_TYPE_AXFR = 0x00fc + DNS_TYPE_MAILB = 0x00fd + DNS_TYPE_MAILA = 0x00fe + DNS_TYPE_ALL = 0x00ff + DNS_TYPE_ANY = 0x00ff + DNS_TYPE_WINS = 0xff01 + DNS_TYPE_WINSR = 0xff02 + DNS_TYPE_NBSTAT = 0xff01 +) + +const ( + // flags inside DNSRecord.Dw + DnsSectionQuestion = 0x0000 + DnsSectionAnswer = 0x0001 + DnsSectionAuthority = 0x0002 + DnsSectionAdditional = 0x0003 +) + +const ( + // flags of WSALookupService + LUP_DEEP = 0x0001 + LUP_CONTAINERS = 0x0002 + LUP_NOCONTAINERS = 0x0004 + LUP_NEAREST = 0x0008 + LUP_RETURN_NAME = 0x0010 + LUP_RETURN_TYPE = 0x0020 + LUP_RETURN_VERSION = 0x0040 + LUP_RETURN_COMMENT = 0x0080 + LUP_RETURN_ADDR = 0x0100 + LUP_RETURN_BLOB = 0x0200 + LUP_RETURN_ALIASES = 0x0400 + LUP_RETURN_QUERY_STRING = 0x0800 + LUP_RETURN_ALL = 0x0FF0 + LUP_RES_SERVICE = 0x8000 + + LUP_FLUSHCACHE = 0x1000 + LUP_FLUSHPREVIOUS = 0x2000 + + LUP_NON_AUTHORITATIVE = 0x4000 + LUP_SECURE = 0x8000 + LUP_RETURN_PREFERRED_NAMES = 0x10000 + LUP_DNS_ONLY = 0x20000 + + LUP_ADDRCONFIG = 0x100000 + LUP_DUAL_ADDR = 0x200000 + LUP_FILESERVER = 0x400000 + LUP_DISABLE_IDN_ENCODING = 0x00800000 + LUP_API_ANSI = 0x01000000 + + LUP_RESOLUTION_HANDLE = 0x80000000 +) + +const ( + // values of WSAQUERYSET's namespace + NS_ALL = 0 + NS_DNS = 12 + NS_NLA = 15 + NS_BTH = 16 + NS_EMAIL = 37 + NS_PNRPNAME = 38 + NS_PNRPCLOUD = 39 +) + +type DNSSRVData struct { + Target *uint16 + Priority uint16 + Weight uint16 + Port uint16 + Pad uint16 +} + +type DNSPTRData struct { + Host *uint16 +} + +type DNSMXData struct { + NameExchange *uint16 + Preference uint16 + Pad uint16 +} + +type DNSTXTData struct { + StringCount uint16 + StringArray [1]*uint16 +} + +type DNSRecord struct { + Next *DNSRecord + Name *uint16 + Type uint16 + Length uint16 + Dw uint32 + Ttl uint32 + Reserved uint32 + Data [40]byte +} + +const ( + TF_DISCONNECT = 1 + TF_REUSE_SOCKET = 2 + TF_WRITE_BEHIND = 4 + TF_USE_DEFAULT_WORKER = 0 + TF_USE_SYSTEM_THREAD = 16 + TF_USE_KERNEL_APC = 32 +) + +type TransmitFileBuffers struct { + Head uintptr + HeadLength uint32 + Tail uintptr + TailLength uint32 +} + +const ( + IFF_UP = 1 + IFF_BROADCAST = 2 + IFF_LOOPBACK = 4 + IFF_POINTTOPOINT = 8 + IFF_MULTICAST = 16 +) + +const SIO_GET_INTERFACE_LIST = 0x4004747F + +// TODO(mattn): SockaddrGen is union of sockaddr/sockaddr_in/sockaddr_in6_old. +// will be fixed to change variable type as suitable. + +type SockaddrGen [24]byte + +type InterfaceInfo struct { + Flags uint32 + Address SockaddrGen + BroadcastAddress SockaddrGen + Netmask SockaddrGen +} + +type IpAddressString struct { + String [16]byte +} + +type IpMaskString IpAddressString + +type IpAddrString struct { + Next *IpAddrString + IpAddress IpAddressString + IpMask IpMaskString + Context uint32 +} + +const MAX_ADAPTER_NAME_LENGTH = 256 +const MAX_ADAPTER_DESCRIPTION_LENGTH = 128 +const MAX_ADAPTER_ADDRESS_LENGTH = 8 + +type IpAdapterInfo struct { + Next *IpAdapterInfo + ComboIndex uint32 + AdapterName [MAX_ADAPTER_NAME_LENGTH + 4]byte + Description [MAX_ADAPTER_DESCRIPTION_LENGTH + 4]byte + AddressLength uint32 + Address [MAX_ADAPTER_ADDRESS_LENGTH]byte + Index uint32 + Type uint32 + DhcpEnabled uint32 + CurrentIpAddress *IpAddrString + IpAddressList IpAddrString + GatewayList IpAddrString + DhcpServer IpAddrString + HaveWins bool + PrimaryWinsServer IpAddrString + SecondaryWinsServer IpAddrString + LeaseObtained int64 + LeaseExpires int64 +} + +const MAXLEN_PHYSADDR = 8 +const MAX_INTERFACE_NAME_LEN = 256 +const MAXLEN_IFDESCR = 256 + +type MibIfRow struct { + Name [MAX_INTERFACE_NAME_LEN]uint16 + Index uint32 + Type uint32 + Mtu uint32 + Speed uint32 + PhysAddrLen uint32 + PhysAddr [MAXLEN_PHYSADDR]byte + AdminStatus uint32 + OperStatus uint32 + LastChange uint32 + InOctets uint32 + InUcastPkts uint32 + InNUcastPkts uint32 + InDiscards uint32 + InErrors uint32 + InUnknownProtos uint32 + OutOctets uint32 + OutUcastPkts uint32 + OutNUcastPkts uint32 + OutDiscards uint32 + OutErrors uint32 + OutQLen uint32 + DescrLen uint32 + Descr [MAXLEN_IFDESCR]byte +} + +type CertInfo struct { + Version uint32 + SerialNumber CryptIntegerBlob + SignatureAlgorithm CryptAlgorithmIdentifier + Issuer CertNameBlob + NotBefore Filetime + NotAfter Filetime + Subject CertNameBlob + SubjectPublicKeyInfo CertPublicKeyInfo + IssuerUniqueId CryptBitBlob + SubjectUniqueId CryptBitBlob + CountExtensions uint32 + Extensions *CertExtension +} + +type CertExtension struct { + ObjId *byte + Critical int32 + Value CryptObjidBlob +} + +type CryptAlgorithmIdentifier struct { + ObjId *byte + Parameters CryptObjidBlob +} + +type CertPublicKeyInfo struct { + Algorithm CryptAlgorithmIdentifier + PublicKey CryptBitBlob +} + +type DataBlob struct { + Size uint32 + Data *byte +} +type CryptIntegerBlob DataBlob +type CryptUintBlob DataBlob +type CryptObjidBlob DataBlob +type CertNameBlob DataBlob +type CertRdnValueBlob DataBlob +type CertBlob DataBlob +type CrlBlob DataBlob +type CryptDataBlob DataBlob +type CryptHashBlob DataBlob +type CryptDigestBlob DataBlob +type CryptDerBlob DataBlob +type CryptAttrBlob DataBlob + +type CryptBitBlob struct { + Size uint32 + Data *byte + UnusedBits uint32 +} + +type CertContext struct { + EncodingType uint32 + EncodedCert *byte + Length uint32 + CertInfo *CertInfo + Store Handle +} + +type CertChainContext struct { + Size uint32 + TrustStatus CertTrustStatus + ChainCount uint32 + Chains **CertSimpleChain + LowerQualityChainCount uint32 + LowerQualityChains **CertChainContext + HasRevocationFreshnessTime uint32 + RevocationFreshnessTime uint32 +} + +type CertTrustListInfo struct { + // Not implemented +} + +type CertSimpleChain struct { + Size uint32 + TrustStatus CertTrustStatus + NumElements uint32 + Elements **CertChainElement + TrustListInfo *CertTrustListInfo + HasRevocationFreshnessTime uint32 + RevocationFreshnessTime uint32 +} + +type CertChainElement struct { + Size uint32 + CertContext *CertContext + TrustStatus CertTrustStatus + RevocationInfo *CertRevocationInfo + IssuanceUsage *CertEnhKeyUsage + ApplicationUsage *CertEnhKeyUsage + ExtendedErrorInfo *uint16 +} + +type CertRevocationCrlInfo struct { + // Not implemented +} + +type CertRevocationInfo struct { + Size uint32 + RevocationResult uint32 + RevocationOid *byte + OidSpecificInfo Pointer + HasFreshnessTime uint32 + FreshnessTime uint32 + CrlInfo *CertRevocationCrlInfo +} + +type CertTrustStatus struct { + ErrorStatus uint32 + InfoStatus uint32 +} + +type CertUsageMatch struct { + Type uint32 + Usage CertEnhKeyUsage +} + +type CertEnhKeyUsage struct { + Length uint32 + UsageIdentifiers **byte +} + +type CertChainPara struct { + Size uint32 + RequestedUsage CertUsageMatch + RequstedIssuancePolicy CertUsageMatch + URLRetrievalTimeout uint32 + CheckRevocationFreshnessTime uint32 + RevocationFreshnessTime uint32 + CacheResync *Filetime +} + +type CertChainPolicyPara struct { + Size uint32 + Flags uint32 + ExtraPolicyPara Pointer +} + +type SSLExtraCertChainPolicyPara struct { + Size uint32 + AuthType uint32 + Checks uint32 + ServerName *uint16 +} + +type CertChainPolicyStatus struct { + Size uint32 + Error uint32 + ChainIndex uint32 + ElementIndex uint32 + ExtraPolicyStatus Pointer +} + +type CertPolicyInfo struct { + Identifier *byte + CountQualifiers uint32 + Qualifiers *CertPolicyQualifierInfo +} + +type CertPoliciesInfo struct { + Count uint32 + PolicyInfos *CertPolicyInfo +} + +type CertPolicyQualifierInfo struct { + // Not implemented +} + +type CertStrongSignPara struct { + Size uint32 + InfoChoice uint32 + InfoOrSerializedInfoOrOID unsafe.Pointer +} + +type CryptProtectPromptStruct struct { + Size uint32 + PromptFlags uint32 + App HWND + Prompt *uint16 +} + +type CertChainFindByIssuerPara struct { + Size uint32 + UsageIdentifier *byte + KeySpec uint32 + AcquirePrivateKeyFlags uint32 + IssuerCount uint32 + Issuer Pointer + FindCallback Pointer + FindArg Pointer + IssuerChainIndex *uint32 + IssuerElementIndex *uint32 +} + +type WinTrustData struct { + Size uint32 + PolicyCallbackData uintptr + SIPClientData uintptr + UIChoice uint32 + RevocationChecks uint32 + UnionChoice uint32 + FileOrCatalogOrBlobOrSgnrOrCert unsafe.Pointer + StateAction uint32 + StateData Handle + URLReference *uint16 + ProvFlags uint32 + UIContext uint32 + SignatureSettings *WinTrustSignatureSettings +} + +type WinTrustFileInfo struct { + Size uint32 + FilePath *uint16 + File Handle + KnownSubject *GUID +} + +type WinTrustSignatureSettings struct { + Size uint32 + Index uint32 + Flags uint32 + SecondarySigs uint32 + VerifiedSigIndex uint32 + CryptoPolicy *CertStrongSignPara +} + +const ( + // do not reorder + HKEY_CLASSES_ROOT = 0x80000000 + iota + HKEY_CURRENT_USER + HKEY_LOCAL_MACHINE + HKEY_USERS + HKEY_PERFORMANCE_DATA + HKEY_CURRENT_CONFIG + HKEY_DYN_DATA + + KEY_QUERY_VALUE = 1 + KEY_SET_VALUE = 2 + KEY_CREATE_SUB_KEY = 4 + KEY_ENUMERATE_SUB_KEYS = 8 + KEY_NOTIFY = 16 + KEY_CREATE_LINK = 32 + KEY_WRITE = 0x20006 + KEY_EXECUTE = 0x20019 + KEY_READ = 0x20019 + KEY_WOW64_64KEY = 0x0100 + KEY_WOW64_32KEY = 0x0200 + KEY_ALL_ACCESS = 0xf003f +) + +const ( + // do not reorder + REG_NONE = iota + REG_SZ + REG_EXPAND_SZ + REG_BINARY + REG_DWORD_LITTLE_ENDIAN + REG_DWORD_BIG_ENDIAN + REG_LINK + REG_MULTI_SZ + REG_RESOURCE_LIST + REG_FULL_RESOURCE_DESCRIPTOR + REG_RESOURCE_REQUIREMENTS_LIST + REG_QWORD_LITTLE_ENDIAN + REG_DWORD = REG_DWORD_LITTLE_ENDIAN + REG_QWORD = REG_QWORD_LITTLE_ENDIAN +) + +const ( + EVENT_MODIFY_STATE = 0x0002 + EVENT_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3 + + MUTANT_QUERY_STATE = 0x0001 + MUTANT_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | MUTANT_QUERY_STATE + + SEMAPHORE_MODIFY_STATE = 0x0002 + SEMAPHORE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3 + + TIMER_QUERY_STATE = 0x0001 + TIMER_MODIFY_STATE = 0x0002 + TIMER_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | TIMER_QUERY_STATE | TIMER_MODIFY_STATE + + MUTEX_MODIFY_STATE = MUTANT_QUERY_STATE + MUTEX_ALL_ACCESS = MUTANT_ALL_ACCESS + + CREATE_EVENT_MANUAL_RESET = 0x1 + CREATE_EVENT_INITIAL_SET = 0x2 + CREATE_MUTEX_INITIAL_OWNER = 0x1 +) + +type AddrinfoW struct { + Flags int32 + Family int32 + Socktype int32 + Protocol int32 + Addrlen uintptr + Canonname *uint16 + Addr uintptr + Next *AddrinfoW +} + +const ( + AI_PASSIVE = 1 + AI_CANONNAME = 2 + AI_NUMERICHOST = 4 +) + +type GUID struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} + +var WSAID_CONNECTEX = GUID{ + 0x25a207b9, + 0xddf3, + 0x4660, + [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}, +} + +var WSAID_WSASENDMSG = GUID{ + 0xa441e712, + 0x754f, + 0x43ca, + [8]byte{0x84, 0xa7, 0x0d, 0xee, 0x44, 0xcf, 0x60, 0x6d}, +} + +var WSAID_WSARECVMSG = GUID{ + 0xf689d7c8, + 0x6f1f, + 0x436b, + [8]byte{0x8a, 0x53, 0xe5, 0x4f, 0xe3, 0x51, 0xc3, 0x22}, +} + +const ( + FILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 + FILE_SKIP_SET_EVENT_ON_HANDLE = 2 +) + +const ( + WSAPROTOCOL_LEN = 255 + MAX_PROTOCOL_CHAIN = 7 + BASE_PROTOCOL = 1 + LAYERED_PROTOCOL = 0 + + XP1_CONNECTIONLESS = 0x00000001 + XP1_GUARANTEED_DELIVERY = 0x00000002 + XP1_GUARANTEED_ORDER = 0x00000004 + XP1_MESSAGE_ORIENTED = 0x00000008 + XP1_PSEUDO_STREAM = 0x00000010 + XP1_GRACEFUL_CLOSE = 0x00000020 + XP1_EXPEDITED_DATA = 0x00000040 + XP1_CONNECT_DATA = 0x00000080 + XP1_DISCONNECT_DATA = 0x00000100 + XP1_SUPPORT_BROADCAST = 0x00000200 + XP1_SUPPORT_MULTIPOINT = 0x00000400 + XP1_MULTIPOINT_CONTROL_PLANE = 0x00000800 + XP1_MULTIPOINT_DATA_PLANE = 0x00001000 + XP1_QOS_SUPPORTED = 0x00002000 + XP1_UNI_SEND = 0x00008000 + XP1_UNI_RECV = 0x00010000 + XP1_IFS_HANDLES = 0x00020000 + XP1_PARTIAL_MESSAGE = 0x00040000 + XP1_SAN_SUPPORT_SDP = 0x00080000 + + PFL_MULTIPLE_PROTO_ENTRIES = 0x00000001 + PFL_RECOMMENDED_PROTO_ENTRY = 0x00000002 + PFL_HIDDEN = 0x00000004 + PFL_MATCHES_PROTOCOL_ZERO = 0x00000008 + PFL_NETWORKDIRECT_PROVIDER = 0x00000010 +) + +type WSAProtocolInfo struct { + ServiceFlags1 uint32 + ServiceFlags2 uint32 + ServiceFlags3 uint32 + ServiceFlags4 uint32 + ProviderFlags uint32 + ProviderId GUID + CatalogEntryId uint32 + ProtocolChain WSAProtocolChain + Version int32 + AddressFamily int32 + MaxSockAddr int32 + MinSockAddr int32 + SocketType int32 + Protocol int32 + ProtocolMaxOffset int32 + NetworkByteOrder int32 + SecurityScheme int32 + MessageSize uint32 + ProviderReserved uint32 + ProtocolName [WSAPROTOCOL_LEN + 1]uint16 +} + +type WSAProtocolChain struct { + ChainLen int32 + ChainEntries [MAX_PROTOCOL_CHAIN]uint32 +} + +type TCPKeepalive struct { + OnOff uint32 + Time uint32 + Interval uint32 +} + +type symbolicLinkReparseBuffer struct { + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 + Flags uint32 + PathBuffer [1]uint16 +} + +type mountPointReparseBuffer struct { + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 + PathBuffer [1]uint16 +} + +type reparseDataBuffer struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + + // GenericReparseBuffer + reparseBuffer byte +} + +const ( + FSCTL_CREATE_OR_GET_OBJECT_ID = 0x0900C0 + FSCTL_DELETE_OBJECT_ID = 0x0900A0 + FSCTL_DELETE_REPARSE_POINT = 0x0900AC + FSCTL_DUPLICATE_EXTENTS_TO_FILE = 0x098344 + FSCTL_DUPLICATE_EXTENTS_TO_FILE_EX = 0x0983E8 + FSCTL_FILESYSTEM_GET_STATISTICS = 0x090060 + FSCTL_FILE_LEVEL_TRIM = 0x098208 + FSCTL_FIND_FILES_BY_SID = 0x09008F + FSCTL_GET_COMPRESSION = 0x09003C + FSCTL_GET_INTEGRITY_INFORMATION = 0x09027C + FSCTL_GET_NTFS_VOLUME_DATA = 0x090064 + FSCTL_GET_REFS_VOLUME_DATA = 0x0902D8 + FSCTL_GET_OBJECT_ID = 0x09009C + FSCTL_GET_REPARSE_POINT = 0x0900A8 + FSCTL_GET_RETRIEVAL_POINTER_COUNT = 0x09042B + FSCTL_GET_RETRIEVAL_POINTERS = 0x090073 + FSCTL_GET_RETRIEVAL_POINTERS_AND_REFCOUNT = 0x0903D3 + FSCTL_IS_PATHNAME_VALID = 0x09002C + FSCTL_LMR_SET_LINK_TRACKING_INFORMATION = 0x1400EC + FSCTL_MARK_HANDLE = 0x0900FC + FSCTL_OFFLOAD_READ = 0x094264 + FSCTL_OFFLOAD_WRITE = 0x098268 + FSCTL_PIPE_PEEK = 0x11400C + FSCTL_PIPE_TRANSCEIVE = 0x11C017 + FSCTL_PIPE_WAIT = 0x110018 + FSCTL_QUERY_ALLOCATED_RANGES = 0x0940CF + FSCTL_QUERY_FAT_BPB = 0x090058 + FSCTL_QUERY_FILE_REGIONS = 0x090284 + FSCTL_QUERY_ON_DISK_VOLUME_INFO = 0x09013C + FSCTL_QUERY_SPARING_INFO = 0x090138 + FSCTL_READ_FILE_USN_DATA = 0x0900EB + FSCTL_RECALL_FILE = 0x090117 + FSCTL_REFS_STREAM_SNAPSHOT_MANAGEMENT = 0x090440 + FSCTL_SET_COMPRESSION = 0x09C040 + FSCTL_SET_DEFECT_MANAGEMENT = 0x098134 + FSCTL_SET_ENCRYPTION = 0x0900D7 + FSCTL_SET_INTEGRITY_INFORMATION = 0x09C280 + FSCTL_SET_INTEGRITY_INFORMATION_EX = 0x090380 + FSCTL_SET_OBJECT_ID = 0x090098 + FSCTL_SET_OBJECT_ID_EXTENDED = 0x0900BC + FSCTL_SET_REPARSE_POINT = 0x0900A4 + FSCTL_SET_SPARSE = 0x0900C4 + FSCTL_SET_ZERO_DATA = 0x0980C8 + FSCTL_SET_ZERO_ON_DEALLOCATION = 0x090194 + FSCTL_SIS_COPYFILE = 0x090100 + FSCTL_WRITE_USN_CLOSE_RECORD = 0x0900EF + + MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16 * 1024 + IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003 + IO_REPARSE_TAG_SYMLINK = 0xA000000C + SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1 +) + +const ( + ComputerNameNetBIOS = 0 + ComputerNameDnsHostname = 1 + ComputerNameDnsDomain = 2 + ComputerNameDnsFullyQualified = 3 + ComputerNamePhysicalNetBIOS = 4 + ComputerNamePhysicalDnsHostname = 5 + ComputerNamePhysicalDnsDomain = 6 + ComputerNamePhysicalDnsFullyQualified = 7 + ComputerNameMax = 8 +) + +// For MessageBox() +const ( + MB_OK = 0x00000000 + MB_OKCANCEL = 0x00000001 + MB_ABORTRETRYIGNORE = 0x00000002 + MB_YESNOCANCEL = 0x00000003 + MB_YESNO = 0x00000004 + MB_RETRYCANCEL = 0x00000005 + MB_CANCELTRYCONTINUE = 0x00000006 + MB_ICONHAND = 0x00000010 + MB_ICONQUESTION = 0x00000020 + MB_ICONEXCLAMATION = 0x00000030 + MB_ICONASTERISK = 0x00000040 + MB_USERICON = 0x00000080 + MB_ICONWARNING = MB_ICONEXCLAMATION + MB_ICONERROR = MB_ICONHAND + MB_ICONINFORMATION = MB_ICONASTERISK + MB_ICONSTOP = MB_ICONHAND + MB_DEFBUTTON1 = 0x00000000 + MB_DEFBUTTON2 = 0x00000100 + MB_DEFBUTTON3 = 0x00000200 + MB_DEFBUTTON4 = 0x00000300 + MB_APPLMODAL = 0x00000000 + MB_SYSTEMMODAL = 0x00001000 + MB_TASKMODAL = 0x00002000 + MB_HELP = 0x00004000 + MB_NOFOCUS = 0x00008000 + MB_SETFOREGROUND = 0x00010000 + MB_DEFAULT_DESKTOP_ONLY = 0x00020000 + MB_TOPMOST = 0x00040000 + MB_RIGHT = 0x00080000 + MB_RTLREADING = 0x00100000 + MB_SERVICE_NOTIFICATION = 0x00200000 +) + +const ( + MOVEFILE_REPLACE_EXISTING = 0x1 + MOVEFILE_COPY_ALLOWED = 0x2 + MOVEFILE_DELAY_UNTIL_REBOOT = 0x4 + MOVEFILE_WRITE_THROUGH = 0x8 + MOVEFILE_CREATE_HARDLINK = 0x10 + MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20 +) + +const GAA_FLAG_INCLUDE_PREFIX = 0x00000010 + +const ( + IF_TYPE_OTHER = 1 + IF_TYPE_ETHERNET_CSMACD = 6 + IF_TYPE_ISO88025_TOKENRING = 9 + IF_TYPE_PPP = 23 + IF_TYPE_SOFTWARE_LOOPBACK = 24 + IF_TYPE_ATM = 37 + IF_TYPE_IEEE80211 = 71 + IF_TYPE_TUNNEL = 131 + IF_TYPE_IEEE1394 = 144 +) + +type SocketAddress struct { + Sockaddr *syscall.RawSockaddrAny + SockaddrLength int32 +} + +// IP returns an IPv4 or IPv6 address, or nil if the underlying SocketAddress is neither. +func (addr *SocketAddress) IP() net.IP { + if uintptr(addr.SockaddrLength) >= unsafe.Sizeof(RawSockaddrInet4{}) && addr.Sockaddr.Addr.Family == AF_INET { + return (*RawSockaddrInet4)(unsafe.Pointer(addr.Sockaddr)).Addr[:] + } else if uintptr(addr.SockaddrLength) >= unsafe.Sizeof(RawSockaddrInet6{}) && addr.Sockaddr.Addr.Family == AF_INET6 { + return (*RawSockaddrInet6)(unsafe.Pointer(addr.Sockaddr)).Addr[:] + } + return nil +} + +type IpAdapterUnicastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterUnicastAddress + Address SocketAddress + PrefixOrigin int32 + SuffixOrigin int32 + DadState int32 + ValidLifetime uint32 + PreferredLifetime uint32 + LeaseLifetime uint32 + OnLinkPrefixLength uint8 +} + +type IpAdapterAnycastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterAnycastAddress + Address SocketAddress +} + +type IpAdapterMulticastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterMulticastAddress + Address SocketAddress +} + +type IpAdapterDnsServerAdapter struct { + Length uint32 + Reserved uint32 + Next *IpAdapterDnsServerAdapter + Address SocketAddress +} + +type IpAdapterPrefix struct { + Length uint32 + Flags uint32 + Next *IpAdapterPrefix + Address SocketAddress + PrefixLength uint32 +} + +type IpAdapterAddresses struct { + Length uint32 + IfIndex uint32 + Next *IpAdapterAddresses + AdapterName *byte + FirstUnicastAddress *IpAdapterUnicastAddress + FirstAnycastAddress *IpAdapterAnycastAddress + FirstMulticastAddress *IpAdapterMulticastAddress + FirstDnsServerAddress *IpAdapterDnsServerAdapter + DnsSuffix *uint16 + Description *uint16 + FriendlyName *uint16 + PhysicalAddress [syscall.MAX_ADAPTER_ADDRESS_LENGTH]byte + PhysicalAddressLength uint32 + Flags uint32 + Mtu uint32 + IfType uint32 + OperStatus uint32 + Ipv6IfIndex uint32 + ZoneIndices [16]uint32 + FirstPrefix *IpAdapterPrefix + TransmitLinkSpeed uint64 + ReceiveLinkSpeed uint64 + FirstWinsServerAddress *IpAdapterWinsServerAddress + FirstGatewayAddress *IpAdapterGatewayAddress + Ipv4Metric uint32 + Ipv6Metric uint32 + Luid uint64 + Dhcpv4Server SocketAddress + CompartmentId uint32 + NetworkGuid GUID + ConnectionType uint32 + TunnelType uint32 + Dhcpv6Server SocketAddress + Dhcpv6ClientDuid [MAX_DHCPV6_DUID_LENGTH]byte + Dhcpv6ClientDuidLength uint32 + Dhcpv6Iaid uint32 + FirstDnsSuffix *IpAdapterDNSSuffix +} + +type IpAdapterWinsServerAddress struct { + Length uint32 + Reserved uint32 + Next *IpAdapterWinsServerAddress + Address SocketAddress +} + +type IpAdapterGatewayAddress struct { + Length uint32 + Reserved uint32 + Next *IpAdapterGatewayAddress + Address SocketAddress +} + +type IpAdapterDNSSuffix struct { + Next *IpAdapterDNSSuffix + String [MAX_DNS_SUFFIX_STRING_LENGTH]uint16 +} + +const ( + IfOperStatusUp = 1 + IfOperStatusDown = 2 + IfOperStatusTesting = 3 + IfOperStatusUnknown = 4 + IfOperStatusDormant = 5 + IfOperStatusNotPresent = 6 + IfOperStatusLowerLayerDown = 7 +) + +// Console related constants used for the mode parameter to SetConsoleMode. See +// https://docs.microsoft.com/en-us/windows/console/setconsolemode for details. + +const ( + ENABLE_PROCESSED_INPUT = 0x1 + ENABLE_LINE_INPUT = 0x2 + ENABLE_ECHO_INPUT = 0x4 + ENABLE_WINDOW_INPUT = 0x8 + ENABLE_MOUSE_INPUT = 0x10 + ENABLE_INSERT_MODE = 0x20 + ENABLE_QUICK_EDIT_MODE = 0x40 + ENABLE_EXTENDED_FLAGS = 0x80 + ENABLE_AUTO_POSITION = 0x100 + ENABLE_VIRTUAL_TERMINAL_INPUT = 0x200 + + ENABLE_PROCESSED_OUTPUT = 0x1 + ENABLE_WRAP_AT_EOL_OUTPUT = 0x2 + ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 + DISABLE_NEWLINE_AUTO_RETURN = 0x8 + ENABLE_LVB_GRID_WORLDWIDE = 0x10 +) + +type Coord struct { + X int16 + Y int16 +} + +type SmallRect struct { + Left int16 + Top int16 + Right int16 + Bottom int16 +} + +// Used with GetConsoleScreenBuffer to retrieve information about a console +// screen buffer. See +// https://docs.microsoft.com/en-us/windows/console/console-screen-buffer-info-str +// for details. + +type ConsoleScreenBufferInfo struct { + Size Coord + CursorPosition Coord + Attributes uint16 + Window SmallRect + MaximumWindowSize Coord +} + +const UNIX_PATH_MAX = 108 // defined in afunix.h + +const ( + // flags for JOBOBJECT_BASIC_LIMIT_INFORMATION.LimitFlags + JOB_OBJECT_LIMIT_ACTIVE_PROCESS = 0x00000008 + JOB_OBJECT_LIMIT_AFFINITY = 0x00000010 + JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x00000800 + JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION = 0x00000400 + JOB_OBJECT_LIMIT_JOB_MEMORY = 0x00000200 + JOB_OBJECT_LIMIT_JOB_TIME = 0x00000004 + JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x00002000 + JOB_OBJECT_LIMIT_PRESERVE_JOB_TIME = 0x00000040 + JOB_OBJECT_LIMIT_PRIORITY_CLASS = 0x00000020 + JOB_OBJECT_LIMIT_PROCESS_MEMORY = 0x00000100 + JOB_OBJECT_LIMIT_PROCESS_TIME = 0x00000002 + JOB_OBJECT_LIMIT_SCHEDULING_CLASS = 0x00000080 + JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK = 0x00001000 + JOB_OBJECT_LIMIT_SUBSET_AFFINITY = 0x00004000 + JOB_OBJECT_LIMIT_WORKINGSET = 0x00000001 +) + +type IO_COUNTERS struct { + ReadOperationCount uint64 + WriteOperationCount uint64 + OtherOperationCount uint64 + ReadTransferCount uint64 + WriteTransferCount uint64 + OtherTransferCount uint64 +} + +type JOBOBJECT_EXTENDED_LIMIT_INFORMATION struct { + BasicLimitInformation JOBOBJECT_BASIC_LIMIT_INFORMATION + IoInfo IO_COUNTERS + ProcessMemoryLimit uintptr + JobMemoryLimit uintptr + PeakProcessMemoryUsed uintptr + PeakJobMemoryUsed uintptr +} + +const ( + // UIRestrictionsClass + JOB_OBJECT_UILIMIT_DESKTOP = 0x00000040 + JOB_OBJECT_UILIMIT_DISPLAYSETTINGS = 0x00000010 + JOB_OBJECT_UILIMIT_EXITWINDOWS = 0x00000080 + JOB_OBJECT_UILIMIT_GLOBALATOMS = 0x00000020 + JOB_OBJECT_UILIMIT_HANDLES = 0x00000001 + JOB_OBJECT_UILIMIT_READCLIPBOARD = 0x00000002 + JOB_OBJECT_UILIMIT_SYSTEMPARAMETERS = 0x00000008 + JOB_OBJECT_UILIMIT_WRITECLIPBOARD = 0x00000004 +) + +type JOBOBJECT_BASIC_UI_RESTRICTIONS struct { + UIRestrictionsClass uint32 +} + +const ( + // JobObjectInformationClass for QueryInformationJobObject and SetInformationJobObject + JobObjectAssociateCompletionPortInformation = 7 + JobObjectBasicAccountingInformation = 1 + JobObjectBasicAndIoAccountingInformation = 8 + JobObjectBasicLimitInformation = 2 + JobObjectBasicProcessIdList = 3 + JobObjectBasicUIRestrictions = 4 + JobObjectCpuRateControlInformation = 15 + JobObjectEndOfJobTimeInformation = 6 + JobObjectExtendedLimitInformation = 9 + JobObjectGroupInformation = 11 + JobObjectGroupInformationEx = 14 + JobObjectLimitViolationInformation = 13 + JobObjectLimitViolationInformation2 = 34 + JobObjectNetRateControlInformation = 32 + JobObjectNotificationLimitInformation = 12 + JobObjectNotificationLimitInformation2 = 33 + JobObjectSecurityLimitInformation = 5 +) + +const ( + KF_FLAG_DEFAULT = 0x00000000 + KF_FLAG_FORCE_APP_DATA_REDIRECTION = 0x00080000 + KF_FLAG_RETURN_FILTER_REDIRECTION_TARGET = 0x00040000 + KF_FLAG_FORCE_PACKAGE_REDIRECTION = 0x00020000 + KF_FLAG_NO_PACKAGE_REDIRECTION = 0x00010000 + KF_FLAG_FORCE_APPCONTAINER_REDIRECTION = 0x00020000 + KF_FLAG_NO_APPCONTAINER_REDIRECTION = 0x00010000 + KF_FLAG_CREATE = 0x00008000 + KF_FLAG_DONT_VERIFY = 0x00004000 + KF_FLAG_DONT_UNEXPAND = 0x00002000 + KF_FLAG_NO_ALIAS = 0x00001000 + KF_FLAG_INIT = 0x00000800 + KF_FLAG_DEFAULT_PATH = 0x00000400 + KF_FLAG_NOT_PARENT_RELATIVE = 0x00000200 + KF_FLAG_SIMPLE_IDLIST = 0x00000100 + KF_FLAG_ALIAS_ONLY = 0x80000000 +) + +type OsVersionInfoEx struct { + osVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformId uint32 + CsdVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + _ byte +} + +const ( + EWX_LOGOFF = 0x00000000 + EWX_SHUTDOWN = 0x00000001 + EWX_REBOOT = 0x00000002 + EWX_FORCE = 0x00000004 + EWX_POWEROFF = 0x00000008 + EWX_FORCEIFHUNG = 0x00000010 + EWX_QUICKRESOLVE = 0x00000020 + EWX_RESTARTAPPS = 0x00000040 + EWX_HYBRID_SHUTDOWN = 0x00400000 + EWX_BOOTOPTIONS = 0x01000000 + + SHTDN_REASON_FLAG_COMMENT_REQUIRED = 0x01000000 + SHTDN_REASON_FLAG_DIRTY_PROBLEM_ID_REQUIRED = 0x02000000 + SHTDN_REASON_FLAG_CLEAN_UI = 0x04000000 + SHTDN_REASON_FLAG_DIRTY_UI = 0x08000000 + SHTDN_REASON_FLAG_USER_DEFINED = 0x40000000 + SHTDN_REASON_FLAG_PLANNED = 0x80000000 + SHTDN_REASON_MAJOR_OTHER = 0x00000000 + SHTDN_REASON_MAJOR_NONE = 0x00000000 + SHTDN_REASON_MAJOR_HARDWARE = 0x00010000 + SHTDN_REASON_MAJOR_OPERATINGSYSTEM = 0x00020000 + SHTDN_REASON_MAJOR_SOFTWARE = 0x00030000 + SHTDN_REASON_MAJOR_APPLICATION = 0x00040000 + SHTDN_REASON_MAJOR_SYSTEM = 0x00050000 + SHTDN_REASON_MAJOR_POWER = 0x00060000 + SHTDN_REASON_MAJOR_LEGACY_API = 0x00070000 + SHTDN_REASON_MINOR_OTHER = 0x00000000 + SHTDN_REASON_MINOR_NONE = 0x000000ff + SHTDN_REASON_MINOR_MAINTENANCE = 0x00000001 + SHTDN_REASON_MINOR_INSTALLATION = 0x00000002 + SHTDN_REASON_MINOR_UPGRADE = 0x00000003 + SHTDN_REASON_MINOR_RECONFIG = 0x00000004 + SHTDN_REASON_MINOR_HUNG = 0x00000005 + SHTDN_REASON_MINOR_UNSTABLE = 0x00000006 + SHTDN_REASON_MINOR_DISK = 0x00000007 + SHTDN_REASON_MINOR_PROCESSOR = 0x00000008 + SHTDN_REASON_MINOR_NETWORKCARD = 0x00000009 + SHTDN_REASON_MINOR_POWER_SUPPLY = 0x0000000a + SHTDN_REASON_MINOR_CORDUNPLUGGED = 0x0000000b + SHTDN_REASON_MINOR_ENVIRONMENT = 0x0000000c + SHTDN_REASON_MINOR_HARDWARE_DRIVER = 0x0000000d + SHTDN_REASON_MINOR_OTHERDRIVER = 0x0000000e + SHTDN_REASON_MINOR_BLUESCREEN = 0x0000000F + SHTDN_REASON_MINOR_SERVICEPACK = 0x00000010 + SHTDN_REASON_MINOR_HOTFIX = 0x00000011 + SHTDN_REASON_MINOR_SECURITYFIX = 0x00000012 + SHTDN_REASON_MINOR_SECURITY = 0x00000013 + SHTDN_REASON_MINOR_NETWORK_CONNECTIVITY = 0x00000014 + SHTDN_REASON_MINOR_WMI = 0x00000015 + SHTDN_REASON_MINOR_SERVICEPACK_UNINSTALL = 0x00000016 + SHTDN_REASON_MINOR_HOTFIX_UNINSTALL = 0x00000017 + SHTDN_REASON_MINOR_SECURITYFIX_UNINSTALL = 0x00000018 + SHTDN_REASON_MINOR_MMC = 0x00000019 + SHTDN_REASON_MINOR_SYSTEMRESTORE = 0x0000001a + SHTDN_REASON_MINOR_TERMSRV = 0x00000020 + SHTDN_REASON_MINOR_DC_PROMOTION = 0x00000021 + SHTDN_REASON_MINOR_DC_DEMOTION = 0x00000022 + SHTDN_REASON_UNKNOWN = SHTDN_REASON_MINOR_NONE + SHTDN_REASON_LEGACY_API = SHTDN_REASON_MAJOR_LEGACY_API | SHTDN_REASON_FLAG_PLANNED + SHTDN_REASON_VALID_BIT_MASK = 0xc0ffffff + + SHUTDOWN_NORETRY = 0x1 +) + +// Flags used for GetModuleHandleEx +const ( + GET_MODULE_HANDLE_EX_FLAG_PIN = 1 + GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT = 2 + GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS = 4 +) + +// MUI function flag values +const ( + MUI_LANGUAGE_ID = 0x4 + MUI_LANGUAGE_NAME = 0x8 + MUI_MERGE_SYSTEM_FALLBACK = 0x10 + MUI_MERGE_USER_FALLBACK = 0x20 + MUI_UI_FALLBACK = MUI_MERGE_SYSTEM_FALLBACK | MUI_MERGE_USER_FALLBACK + MUI_THREAD_LANGUAGES = 0x40 + MUI_CONSOLE_FILTER = 0x100 + MUI_COMPLEX_SCRIPT_FILTER = 0x200 + MUI_RESET_FILTERS = 0x001 + MUI_USER_PREFERRED_UI_LANGUAGES = 0x10 + MUI_USE_INSTALLED_LANGUAGES = 0x20 + MUI_USE_SEARCH_ALL_LANGUAGES = 0x40 + MUI_LANG_NEUTRAL_PE_FILE = 0x100 + MUI_NON_LANG_NEUTRAL_FILE = 0x200 + MUI_MACHINE_LANGUAGE_SETTINGS = 0x400 + MUI_FILETYPE_NOT_LANGUAGE_NEUTRAL = 0x001 + MUI_FILETYPE_LANGUAGE_NEUTRAL_MAIN = 0x002 + MUI_FILETYPE_LANGUAGE_NEUTRAL_MUI = 0x004 + MUI_QUERY_TYPE = 0x001 + MUI_QUERY_CHECKSUM = 0x002 + MUI_QUERY_LANGUAGE_NAME = 0x004 + MUI_QUERY_RESOURCE_TYPES = 0x008 + MUI_FILEINFO_VERSION = 0x001 + + MUI_FULL_LANGUAGE = 0x01 + MUI_PARTIAL_LANGUAGE = 0x02 + MUI_LIP_LANGUAGE = 0x04 + MUI_LANGUAGE_INSTALLED = 0x20 + MUI_LANGUAGE_LICENSED = 0x40 +) + +// FILE_INFO_BY_HANDLE_CLASS constants for SetFileInformationByHandle/GetFileInformationByHandleEx +const ( + FileBasicInfo = 0 + FileStandardInfo = 1 + FileNameInfo = 2 + FileRenameInfo = 3 + FileDispositionInfo = 4 + FileAllocationInfo = 5 + FileEndOfFileInfo = 6 + FileStreamInfo = 7 + FileCompressionInfo = 8 + FileAttributeTagInfo = 9 + FileIdBothDirectoryInfo = 10 + FileIdBothDirectoryRestartInfo = 11 + FileIoPriorityHintInfo = 12 + FileRemoteProtocolInfo = 13 + FileFullDirectoryInfo = 14 + FileFullDirectoryRestartInfo = 15 + FileStorageInfo = 16 + FileAlignmentInfo = 17 + FileIdInfo = 18 + FileIdExtdDirectoryInfo = 19 + FileIdExtdDirectoryRestartInfo = 20 + FileDispositionInfoEx = 21 + FileRenameInfoEx = 22 + FileCaseSensitiveInfo = 23 + FileNormalizedNameInfo = 24 +) + +// LoadLibrary flags for determining from where to search for a DLL +const ( + DONT_RESOLVE_DLL_REFERENCES = 0x1 + LOAD_LIBRARY_AS_DATAFILE = 0x2 + LOAD_WITH_ALTERED_SEARCH_PATH = 0x8 + LOAD_IGNORE_CODE_AUTHZ_LEVEL = 0x10 + LOAD_LIBRARY_AS_IMAGE_RESOURCE = 0x20 + LOAD_LIBRARY_AS_DATAFILE_EXCLUSIVE = 0x40 + LOAD_LIBRARY_REQUIRE_SIGNED_TARGET = 0x80 + LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR = 0x100 + LOAD_LIBRARY_SEARCH_APPLICATION_DIR = 0x200 + LOAD_LIBRARY_SEARCH_USER_DIRS = 0x400 + LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x800 + LOAD_LIBRARY_SEARCH_DEFAULT_DIRS = 0x1000 + LOAD_LIBRARY_SAFE_CURRENT_DIRS = 0x00002000 + LOAD_LIBRARY_SEARCH_SYSTEM32_NO_FORWARDER = 0x00004000 + LOAD_LIBRARY_OS_INTEGRITY_CONTINUITY = 0x00008000 +) + +// RegNotifyChangeKeyValue notifyFilter flags. +const ( + // REG_NOTIFY_CHANGE_NAME notifies the caller if a subkey is added or deleted. + REG_NOTIFY_CHANGE_NAME = 0x00000001 + + // REG_NOTIFY_CHANGE_ATTRIBUTES notifies the caller of changes to the attributes of the key, such as the security descriptor information. + REG_NOTIFY_CHANGE_ATTRIBUTES = 0x00000002 + + // REG_NOTIFY_CHANGE_LAST_SET notifies the caller of changes to a value of the key. This can include adding or deleting a value, or changing an existing value. + REG_NOTIFY_CHANGE_LAST_SET = 0x00000004 + + // REG_NOTIFY_CHANGE_SECURITY notifies the caller of changes to the security descriptor of the key. + REG_NOTIFY_CHANGE_SECURITY = 0x00000008 + + // REG_NOTIFY_THREAD_AGNOSTIC indicates that the lifetime of the registration must not be tied to the lifetime of the thread issuing the RegNotifyChangeKeyValue call. Note: This flag value is only supported in Windows 8 and later. + REG_NOTIFY_THREAD_AGNOSTIC = 0x10000000 +) + +type CommTimeouts struct { + ReadIntervalTimeout uint32 + ReadTotalTimeoutMultiplier uint32 + ReadTotalTimeoutConstant uint32 + WriteTotalTimeoutMultiplier uint32 + WriteTotalTimeoutConstant uint32 +} + +// NTUnicodeString is a UTF-16 string for NT native APIs, corresponding to UNICODE_STRING. +type NTUnicodeString struct { + Length uint16 + MaximumLength uint16 + Buffer *uint16 +} + +// NTString is an ANSI string for NT native APIs, corresponding to STRING. +type NTString struct { + Length uint16 + MaximumLength uint16 + Buffer *byte +} + +type LIST_ENTRY struct { + Flink *LIST_ENTRY + Blink *LIST_ENTRY +} + +type RUNTIME_FUNCTION struct { + BeginAddress uint32 + EndAddress uint32 + UnwindData uint32 +} + +type LDR_DATA_TABLE_ENTRY struct { + reserved1 [2]uintptr + InMemoryOrderLinks LIST_ENTRY + reserved2 [2]uintptr + DllBase uintptr + reserved3 [2]uintptr + FullDllName NTUnicodeString + reserved4 [8]byte + reserved5 [3]uintptr + reserved6 uintptr + TimeDateStamp uint32 +} + +type PEB_LDR_DATA struct { + reserved1 [8]byte + reserved2 [3]uintptr + InMemoryOrderModuleList LIST_ENTRY +} + +type CURDIR struct { + DosPath NTUnicodeString + Handle Handle +} + +type RTL_DRIVE_LETTER_CURDIR struct { + Flags uint16 + Length uint16 + TimeStamp uint32 + DosPath NTString +} + +type RTL_USER_PROCESS_PARAMETERS struct { + MaximumLength, Length uint32 + + Flags, DebugFlags uint32 + + ConsoleHandle Handle + ConsoleFlags uint32 + StandardInput, StandardOutput, StandardError Handle + + CurrentDirectory CURDIR + DllPath NTUnicodeString + ImagePathName NTUnicodeString + CommandLine NTUnicodeString + Environment unsafe.Pointer + + StartingX, StartingY, CountX, CountY, CountCharsX, CountCharsY, FillAttribute uint32 + + WindowFlags, ShowWindowFlags uint32 + WindowTitle, DesktopInfo, ShellInfo, RuntimeData NTUnicodeString + CurrentDirectories [32]RTL_DRIVE_LETTER_CURDIR + + EnvironmentSize, EnvironmentVersion uintptr + + PackageDependencyData unsafe.Pointer + ProcessGroupId uint32 + LoaderThreads uint32 + + RedirectionDllName NTUnicodeString + HeapPartitionName NTUnicodeString + DefaultThreadpoolCpuSetMasks uintptr + DefaultThreadpoolCpuSetMaskCount uint32 +} + +type PEB struct { + reserved1 [2]byte + BeingDebugged byte + BitField byte + reserved3 uintptr + ImageBaseAddress uintptr + Ldr *PEB_LDR_DATA + ProcessParameters *RTL_USER_PROCESS_PARAMETERS + reserved4 [3]uintptr + AtlThunkSListPtr uintptr + reserved5 uintptr + reserved6 uint32 + reserved7 uintptr + reserved8 uint32 + AtlThunkSListPtr32 uint32 + reserved9 [45]uintptr + reserved10 [96]byte + PostProcessInitRoutine uintptr + reserved11 [128]byte + reserved12 [1]uintptr + SessionId uint32 +} + +type OBJECT_ATTRIBUTES struct { + Length uint32 + RootDirectory Handle + ObjectName *NTUnicodeString + Attributes uint32 + SecurityDescriptor *SECURITY_DESCRIPTOR + SecurityQoS *SECURITY_QUALITY_OF_SERVICE +} + +// Values for the Attributes member of OBJECT_ATTRIBUTES. +const ( + OBJ_INHERIT = 0x00000002 + OBJ_PERMANENT = 0x00000010 + OBJ_EXCLUSIVE = 0x00000020 + OBJ_CASE_INSENSITIVE = 0x00000040 + OBJ_OPENIF = 0x00000080 + OBJ_OPENLINK = 0x00000100 + OBJ_KERNEL_HANDLE = 0x00000200 + OBJ_FORCE_ACCESS_CHECK = 0x00000400 + OBJ_IGNORE_IMPERSONATED_DEVICEMAP = 0x00000800 + OBJ_DONT_REPARSE = 0x00001000 + OBJ_VALID_ATTRIBUTES = 0x00001FF2 +) + +type IO_STATUS_BLOCK struct { + Status NTStatus + Information uintptr +} + +type RTLP_CURDIR_REF struct { + RefCount int32 + Handle Handle +} + +type RTL_RELATIVE_NAME struct { + RelativeName NTUnicodeString + ContainingDirectory Handle + CurDirRef *RTLP_CURDIR_REF +} + +const ( + // CreateDisposition flags for NtCreateFile and NtCreateNamedPipeFile. + FILE_SUPERSEDE = 0x00000000 + FILE_OPEN = 0x00000001 + FILE_CREATE = 0x00000002 + FILE_OPEN_IF = 0x00000003 + FILE_OVERWRITE = 0x00000004 + FILE_OVERWRITE_IF = 0x00000005 + FILE_MAXIMUM_DISPOSITION = 0x00000005 + + // CreateOptions flags for NtCreateFile and NtCreateNamedPipeFile. + FILE_DIRECTORY_FILE = 0x00000001 + FILE_WRITE_THROUGH = 0x00000002 + FILE_SEQUENTIAL_ONLY = 0x00000004 + FILE_NO_INTERMEDIATE_BUFFERING = 0x00000008 + FILE_SYNCHRONOUS_IO_ALERT = 0x00000010 + FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020 + FILE_NON_DIRECTORY_FILE = 0x00000040 + FILE_CREATE_TREE_CONNECTION = 0x00000080 + FILE_COMPLETE_IF_OPLOCKED = 0x00000100 + FILE_NO_EA_KNOWLEDGE = 0x00000200 + FILE_OPEN_REMOTE_INSTANCE = 0x00000400 + FILE_RANDOM_ACCESS = 0x00000800 + FILE_DELETE_ON_CLOSE = 0x00001000 + FILE_OPEN_BY_FILE_ID = 0x00002000 + FILE_OPEN_FOR_BACKUP_INTENT = 0x00004000 + FILE_NO_COMPRESSION = 0x00008000 + FILE_OPEN_REQUIRING_OPLOCK = 0x00010000 + FILE_DISALLOW_EXCLUSIVE = 0x00020000 + FILE_RESERVE_OPFILTER = 0x00100000 + FILE_OPEN_REPARSE_POINT = 0x00200000 + FILE_OPEN_NO_RECALL = 0x00400000 + FILE_OPEN_FOR_FREE_SPACE_QUERY = 0x00800000 + + // Parameter constants for NtCreateNamedPipeFile. + + FILE_PIPE_BYTE_STREAM_TYPE = 0x00000000 + FILE_PIPE_MESSAGE_TYPE = 0x00000001 + + FILE_PIPE_ACCEPT_REMOTE_CLIENTS = 0x00000000 + FILE_PIPE_REJECT_REMOTE_CLIENTS = 0x00000002 + + FILE_PIPE_TYPE_VALID_MASK = 0x00000003 + + FILE_PIPE_BYTE_STREAM_MODE = 0x00000000 + FILE_PIPE_MESSAGE_MODE = 0x00000001 + + FILE_PIPE_QUEUE_OPERATION = 0x00000000 + FILE_PIPE_COMPLETE_OPERATION = 0x00000001 + + FILE_PIPE_INBOUND = 0x00000000 + FILE_PIPE_OUTBOUND = 0x00000001 + FILE_PIPE_FULL_DUPLEX = 0x00000002 + + FILE_PIPE_DISCONNECTED_STATE = 0x00000001 + FILE_PIPE_LISTENING_STATE = 0x00000002 + FILE_PIPE_CONNECTED_STATE = 0x00000003 + FILE_PIPE_CLOSING_STATE = 0x00000004 + + FILE_PIPE_CLIENT_END = 0x00000000 + FILE_PIPE_SERVER_END = 0x00000001 +) + +const ( + // FileInformationClass for NtSetInformationFile + FileBasicInformation = 4 + FileRenameInformation = 10 + FileDispositionInformation = 13 + FilePositionInformation = 14 + FileEndOfFileInformation = 20 + FileValidDataLengthInformation = 39 + FileShortNameInformation = 40 + FileIoPriorityHintInformation = 43 + FileReplaceCompletionInformation = 61 + FileDispositionInformationEx = 64 + FileCaseSensitiveInformation = 71 + FileLinkInformation = 72 + FileCaseSensitiveInformationForceAccessCheck = 75 + FileKnownFolderInformation = 76 + + // Flags for FILE_RENAME_INFORMATION + FILE_RENAME_REPLACE_IF_EXISTS = 0x00000001 + FILE_RENAME_POSIX_SEMANTICS = 0x00000002 + FILE_RENAME_SUPPRESS_PIN_STATE_INHERITANCE = 0x00000004 + FILE_RENAME_SUPPRESS_STORAGE_RESERVE_INHERITANCE = 0x00000008 + FILE_RENAME_NO_INCREASE_AVAILABLE_SPACE = 0x00000010 + FILE_RENAME_NO_DECREASE_AVAILABLE_SPACE = 0x00000020 + FILE_RENAME_PRESERVE_AVAILABLE_SPACE = 0x00000030 + FILE_RENAME_IGNORE_READONLY_ATTRIBUTE = 0x00000040 + FILE_RENAME_FORCE_RESIZE_TARGET_SR = 0x00000080 + FILE_RENAME_FORCE_RESIZE_SOURCE_SR = 0x00000100 + FILE_RENAME_FORCE_RESIZE_SR = 0x00000180 + + // Flags for FILE_DISPOSITION_INFORMATION_EX + FILE_DISPOSITION_DO_NOT_DELETE = 0x00000000 + FILE_DISPOSITION_DELETE = 0x00000001 + FILE_DISPOSITION_POSIX_SEMANTICS = 0x00000002 + FILE_DISPOSITION_FORCE_IMAGE_SECTION_CHECK = 0x00000004 + FILE_DISPOSITION_ON_CLOSE = 0x00000008 + FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE = 0x00000010 + + // Flags for FILE_CASE_SENSITIVE_INFORMATION + FILE_CS_FLAG_CASE_SENSITIVE_DIR = 0x00000001 + + // Flags for FILE_LINK_INFORMATION + FILE_LINK_REPLACE_IF_EXISTS = 0x00000001 + FILE_LINK_POSIX_SEMANTICS = 0x00000002 + FILE_LINK_SUPPRESS_STORAGE_RESERVE_INHERITANCE = 0x00000008 + FILE_LINK_NO_INCREASE_AVAILABLE_SPACE = 0x00000010 + FILE_LINK_NO_DECREASE_AVAILABLE_SPACE = 0x00000020 + FILE_LINK_PRESERVE_AVAILABLE_SPACE = 0x00000030 + FILE_LINK_IGNORE_READONLY_ATTRIBUTE = 0x00000040 + FILE_LINK_FORCE_RESIZE_TARGET_SR = 0x00000080 + FILE_LINK_FORCE_RESIZE_SOURCE_SR = 0x00000100 + FILE_LINK_FORCE_RESIZE_SR = 0x00000180 +) + +// ProcessInformationClasses for NtQueryInformationProcess and NtSetInformationProcess. +const ( + ProcessBasicInformation = iota + ProcessQuotaLimits + ProcessIoCounters + ProcessVmCounters + ProcessTimes + ProcessBasePriority + ProcessRaisePriority + ProcessDebugPort + ProcessExceptionPort + ProcessAccessToken + ProcessLdtInformation + ProcessLdtSize + ProcessDefaultHardErrorMode + ProcessIoPortHandlers + ProcessPooledUsageAndLimits + ProcessWorkingSetWatch + ProcessUserModeIOPL + ProcessEnableAlignmentFaultFixup + ProcessPriorityClass + ProcessWx86Information + ProcessHandleCount + ProcessAffinityMask + ProcessPriorityBoost + ProcessDeviceMap + ProcessSessionInformation + ProcessForegroundInformation + ProcessWow64Information + ProcessImageFileName + ProcessLUIDDeviceMapsEnabled + ProcessBreakOnTermination + ProcessDebugObjectHandle + ProcessDebugFlags + ProcessHandleTracing + ProcessIoPriority + ProcessExecuteFlags + ProcessTlsInformation + ProcessCookie + ProcessImageInformation + ProcessCycleTime + ProcessPagePriority + ProcessInstrumentationCallback + ProcessThreadStackAllocation + ProcessWorkingSetWatchEx + ProcessImageFileNameWin32 + ProcessImageFileMapping + ProcessAffinityUpdateMode + ProcessMemoryAllocationMode + ProcessGroupInformation + ProcessTokenVirtualizationEnabled + ProcessConsoleHostProcess + ProcessWindowInformation + ProcessHandleInformation + ProcessMitigationPolicy + ProcessDynamicFunctionTableInformation + ProcessHandleCheckingMode + ProcessKeepAliveCount + ProcessRevokeFileHandles + ProcessWorkingSetControl + ProcessHandleTable + ProcessCheckStackExtentsMode + ProcessCommandLineInformation + ProcessProtectionInformation + ProcessMemoryExhaustion + ProcessFaultInformation + ProcessTelemetryIdInformation + ProcessCommitReleaseInformation + ProcessDefaultCpuSetsInformation + ProcessAllowedCpuSetsInformation + ProcessSubsystemProcess + ProcessJobMemoryInformation + ProcessInPrivate + ProcessRaiseUMExceptionOnInvalidHandleClose + ProcessIumChallengeResponse + ProcessChildProcessInformation + ProcessHighGraphicsPriorityInformation + ProcessSubsystemInformation + ProcessEnergyValues + ProcessActivityThrottleState + ProcessActivityThrottlePolicy + ProcessWin32kSyscallFilterInformation + ProcessDisableSystemAllowedCpuSets + ProcessWakeInformation + ProcessEnergyTrackingState + ProcessManageWritesToExecutableMemory + ProcessCaptureTrustletLiveDump + ProcessTelemetryCoverage + ProcessEnclaveInformation + ProcessEnableReadWriteVmLogging + ProcessUptimeInformation + ProcessImageSection + ProcessDebugAuthInformation + ProcessSystemResourceManagement + ProcessSequenceNumber + ProcessLoaderDetour + ProcessSecurityDomainInformation + ProcessCombineSecurityDomainsInformation + ProcessEnableLogging + ProcessLeapSecondInformation + ProcessFiberShadowStackAllocation + ProcessFreeFiberShadowStackAllocation + ProcessAltSystemCallInformation + ProcessDynamicEHContinuationTargets + ProcessDynamicEnforcedCetCompatibleRanges +) + +type PROCESS_BASIC_INFORMATION struct { + ExitStatus NTStatus + PebBaseAddress *PEB + AffinityMask uintptr + BasePriority int32 + UniqueProcessId uintptr + InheritedFromUniqueProcessId uintptr +} + +type SYSTEM_PROCESS_INFORMATION struct { + NextEntryOffset uint32 + NumberOfThreads uint32 + WorkingSetPrivateSize int64 + HardFaultCount uint32 + NumberOfThreadsHighWatermark uint32 + CycleTime uint64 + CreateTime int64 + UserTime int64 + KernelTime int64 + ImageName NTUnicodeString + BasePriority int32 + UniqueProcessID uintptr + InheritedFromUniqueProcessID uintptr + HandleCount uint32 + SessionID uint32 + UniqueProcessKey *uint32 + PeakVirtualSize uintptr + VirtualSize uintptr + PageFaultCount uint32 + PeakWorkingSetSize uintptr + WorkingSetSize uintptr + QuotaPeakPagedPoolUsage uintptr + QuotaPagedPoolUsage uintptr + QuotaPeakNonPagedPoolUsage uintptr + QuotaNonPagedPoolUsage uintptr + PagefileUsage uintptr + PeakPagefileUsage uintptr + PrivatePageCount uintptr + ReadOperationCount int64 + WriteOperationCount int64 + OtherOperationCount int64 + ReadTransferCount int64 + WriteTransferCount int64 + OtherTransferCount int64 +} + +// SystemInformationClasses for NtQuerySystemInformation and NtSetSystemInformation +const ( + SystemBasicInformation = iota + SystemProcessorInformation + SystemPerformanceInformation + SystemTimeOfDayInformation + SystemPathInformation + SystemProcessInformation + SystemCallCountInformation + SystemDeviceInformation + SystemProcessorPerformanceInformation + SystemFlagsInformation + SystemCallTimeInformation + SystemModuleInformation + SystemLocksInformation + SystemStackTraceInformation + SystemPagedPoolInformation + SystemNonPagedPoolInformation + SystemHandleInformation + SystemObjectInformation + SystemPageFileInformation + SystemVdmInstemulInformation + SystemVdmBopInformation + SystemFileCacheInformation + SystemPoolTagInformation + SystemInterruptInformation + SystemDpcBehaviorInformation + SystemFullMemoryInformation + SystemLoadGdiDriverInformation + SystemUnloadGdiDriverInformation + SystemTimeAdjustmentInformation + SystemSummaryMemoryInformation + SystemMirrorMemoryInformation + SystemPerformanceTraceInformation + systemObsolete0 + SystemExceptionInformation + SystemCrashDumpStateInformation + SystemKernelDebuggerInformation + SystemContextSwitchInformation + SystemRegistryQuotaInformation + SystemExtendServiceTableInformation + SystemPrioritySeperation + SystemVerifierAddDriverInformation + SystemVerifierRemoveDriverInformation + SystemProcessorIdleInformation + SystemLegacyDriverInformation + SystemCurrentTimeZoneInformation + SystemLookasideInformation + SystemTimeSlipNotification + SystemSessionCreate + SystemSessionDetach + SystemSessionInformation + SystemRangeStartInformation + SystemVerifierInformation + SystemVerifierThunkExtend + SystemSessionProcessInformation + SystemLoadGdiDriverInSystemSpace + SystemNumaProcessorMap + SystemPrefetcherInformation + SystemExtendedProcessInformation + SystemRecommendedSharedDataAlignment + SystemComPlusPackage + SystemNumaAvailableMemory + SystemProcessorPowerInformation + SystemEmulationBasicInformation + SystemEmulationProcessorInformation + SystemExtendedHandleInformation + SystemLostDelayedWriteInformation + SystemBigPoolInformation + SystemSessionPoolTagInformation + SystemSessionMappedViewInformation + SystemHotpatchInformation + SystemObjectSecurityMode + SystemWatchdogTimerHandler + SystemWatchdogTimerInformation + SystemLogicalProcessorInformation + SystemWow64SharedInformationObsolete + SystemRegisterFirmwareTableInformationHandler + SystemFirmwareTableInformation + SystemModuleInformationEx + SystemVerifierTriageInformation + SystemSuperfetchInformation + SystemMemoryListInformation + SystemFileCacheInformationEx + SystemThreadPriorityClientIdInformation + SystemProcessorIdleCycleTimeInformation + SystemVerifierCancellationInformation + SystemProcessorPowerInformationEx + SystemRefTraceInformation + SystemSpecialPoolInformation + SystemProcessIdInformation + SystemErrorPortInformation + SystemBootEnvironmentInformation + SystemHypervisorInformation + SystemVerifierInformationEx + SystemTimeZoneInformation + SystemImageFileExecutionOptionsInformation + SystemCoverageInformation + SystemPrefetchPatchInformation + SystemVerifierFaultsInformation + SystemSystemPartitionInformation + SystemSystemDiskInformation + SystemProcessorPerformanceDistribution + SystemNumaProximityNodeInformation + SystemDynamicTimeZoneInformation + SystemCodeIntegrityInformation + SystemProcessorMicrocodeUpdateInformation + SystemProcessorBrandString + SystemVirtualAddressInformation + SystemLogicalProcessorAndGroupInformation + SystemProcessorCycleTimeInformation + SystemStoreInformation + SystemRegistryAppendString + SystemAitSamplingValue + SystemVhdBootInformation + SystemCpuQuotaInformation + SystemNativeBasicInformation + systemSpare1 + SystemLowPriorityIoInformation + SystemTpmBootEntropyInformation + SystemVerifierCountersInformation + SystemPagedPoolInformationEx + SystemSystemPtesInformationEx + SystemNodeDistanceInformation + SystemAcpiAuditInformation + SystemBasicPerformanceInformation + SystemQueryPerformanceCounterInformation + SystemSessionBigPoolInformation + SystemBootGraphicsInformation + SystemScrubPhysicalMemoryInformation + SystemBadPageInformation + SystemProcessorProfileControlArea + SystemCombinePhysicalMemoryInformation + SystemEntropyInterruptTimingCallback + SystemConsoleInformation + SystemPlatformBinaryInformation + SystemThrottleNotificationInformation + SystemHypervisorProcessorCountInformation + SystemDeviceDataInformation + SystemDeviceDataEnumerationInformation + SystemMemoryTopologyInformation + SystemMemoryChannelInformation + SystemBootLogoInformation + SystemProcessorPerformanceInformationEx + systemSpare0 + SystemSecureBootPolicyInformation + SystemPageFileInformationEx + SystemSecureBootInformation + SystemEntropyInterruptTimingRawInformation + SystemPortableWorkspaceEfiLauncherInformation + SystemFullProcessInformation + SystemKernelDebuggerInformationEx + SystemBootMetadataInformation + SystemSoftRebootInformation + SystemElamCertificateInformation + SystemOfflineDumpConfigInformation + SystemProcessorFeaturesInformation + SystemRegistryReconciliationInformation + SystemEdidInformation + SystemManufacturingInformation + SystemEnergyEstimationConfigInformation + SystemHypervisorDetailInformation + SystemProcessorCycleStatsInformation + SystemVmGenerationCountInformation + SystemTrustedPlatformModuleInformation + SystemKernelDebuggerFlags + SystemCodeIntegrityPolicyInformation + SystemIsolatedUserModeInformation + SystemHardwareSecurityTestInterfaceResultsInformation + SystemSingleModuleInformation + SystemAllowedCpuSetsInformation + SystemDmaProtectionInformation + SystemInterruptCpuSetsInformation + SystemSecureBootPolicyFullInformation + SystemCodeIntegrityPolicyFullInformation + SystemAffinitizedInterruptProcessorInformation + SystemRootSiloInformation +) + +type RTL_PROCESS_MODULE_INFORMATION struct { + Section Handle + MappedBase uintptr + ImageBase uintptr + ImageSize uint32 + Flags uint32 + LoadOrderIndex uint16 + InitOrderIndex uint16 + LoadCount uint16 + OffsetToFileName uint16 + FullPathName [256]byte +} + +type RTL_PROCESS_MODULES struct { + NumberOfModules uint32 + Modules [1]RTL_PROCESS_MODULE_INFORMATION +} + +// Constants for LocalAlloc flags. +const ( + LMEM_FIXED = 0x0 + LMEM_MOVEABLE = 0x2 + LMEM_NOCOMPACT = 0x10 + LMEM_NODISCARD = 0x20 + LMEM_ZEROINIT = 0x40 + LMEM_MODIFY = 0x80 + LMEM_DISCARDABLE = 0xf00 + LMEM_VALID_FLAGS = 0xf72 + LMEM_INVALID_HANDLE = 0x8000 + LHND = LMEM_MOVEABLE | LMEM_ZEROINIT + LPTR = LMEM_FIXED | LMEM_ZEROINIT + NONZEROLHND = LMEM_MOVEABLE + NONZEROLPTR = LMEM_FIXED +) + +// Constants for the CreateNamedPipe-family of functions. +const ( + PIPE_ACCESS_INBOUND = 0x1 + PIPE_ACCESS_OUTBOUND = 0x2 + PIPE_ACCESS_DUPLEX = 0x3 + + PIPE_CLIENT_END = 0x0 + PIPE_SERVER_END = 0x1 + + PIPE_WAIT = 0x0 + PIPE_NOWAIT = 0x1 + PIPE_READMODE_BYTE = 0x0 + PIPE_READMODE_MESSAGE = 0x2 + PIPE_TYPE_BYTE = 0x0 + PIPE_TYPE_MESSAGE = 0x4 + PIPE_ACCEPT_REMOTE_CLIENTS = 0x0 + PIPE_REJECT_REMOTE_CLIENTS = 0x8 + + PIPE_UNLIMITED_INSTANCES = 255 +) + +// Constants for security attributes when opening named pipes. +const ( + SECURITY_ANONYMOUS = SecurityAnonymous << 16 + SECURITY_IDENTIFICATION = SecurityIdentification << 16 + SECURITY_IMPERSONATION = SecurityImpersonation << 16 + SECURITY_DELEGATION = SecurityDelegation << 16 + + SECURITY_CONTEXT_TRACKING = 0x40000 + SECURITY_EFFECTIVE_ONLY = 0x80000 + + SECURITY_SQOS_PRESENT = 0x100000 + SECURITY_VALID_SQOS_FLAGS = 0x1f0000 +) + +// ResourceID represents a 16-bit resource identifier, traditionally created with the MAKEINTRESOURCE macro. +type ResourceID uint16 + +// ResourceIDOrString must be either a ResourceID, to specify a resource or resource type by ID, +// or a string, to specify a resource or resource type by name. +type ResourceIDOrString interface{} + +// Predefined resource names and types. +var ( + // Predefined names. + CREATEPROCESS_MANIFEST_RESOURCE_ID ResourceID = 1 + ISOLATIONAWARE_MANIFEST_RESOURCE_ID ResourceID = 2 + ISOLATIONAWARE_NOSTATICIMPORT_MANIFEST_RESOURCE_ID ResourceID = 3 + ISOLATIONPOLICY_MANIFEST_RESOURCE_ID ResourceID = 4 + ISOLATIONPOLICY_BROWSER_MANIFEST_RESOURCE_ID ResourceID = 5 + MINIMUM_RESERVED_MANIFEST_RESOURCE_ID ResourceID = 1 // inclusive + MAXIMUM_RESERVED_MANIFEST_RESOURCE_ID ResourceID = 16 // inclusive + + // Predefined types. + RT_CURSOR ResourceID = 1 + RT_BITMAP ResourceID = 2 + RT_ICON ResourceID = 3 + RT_MENU ResourceID = 4 + RT_DIALOG ResourceID = 5 + RT_STRING ResourceID = 6 + RT_FONTDIR ResourceID = 7 + RT_FONT ResourceID = 8 + RT_ACCELERATOR ResourceID = 9 + RT_RCDATA ResourceID = 10 + RT_MESSAGETABLE ResourceID = 11 + RT_GROUP_CURSOR ResourceID = 12 + RT_GROUP_ICON ResourceID = 14 + RT_VERSION ResourceID = 16 + RT_DLGINCLUDE ResourceID = 17 + RT_PLUGPLAY ResourceID = 19 + RT_VXD ResourceID = 20 + RT_ANICURSOR ResourceID = 21 + RT_ANIICON ResourceID = 22 + RT_HTML ResourceID = 23 + RT_MANIFEST ResourceID = 24 +) + +type VS_FIXEDFILEINFO struct { + Signature uint32 + StrucVersion uint32 + FileVersionMS uint32 + FileVersionLS uint32 + ProductVersionMS uint32 + ProductVersionLS uint32 + FileFlagsMask uint32 + FileFlags uint32 + FileOS uint32 + FileType uint32 + FileSubtype uint32 + FileDateMS uint32 + FileDateLS uint32 +} + +type COAUTHIDENTITY struct { + User *uint16 + UserLength uint32 + Domain *uint16 + DomainLength uint32 + Password *uint16 + PasswordLength uint32 + Flags uint32 +} + +type COAUTHINFO struct { + AuthnSvc uint32 + AuthzSvc uint32 + ServerPrincName *uint16 + AuthnLevel uint32 + ImpersonationLevel uint32 + AuthIdentityData *COAUTHIDENTITY + Capabilities uint32 +} + +type COSERVERINFO struct { + Reserved1 uint32 + Aame *uint16 + AuthInfo *COAUTHINFO + Reserved2 uint32 +} + +type BIND_OPTS3 struct { + CbStruct uint32 + Flags uint32 + Mode uint32 + TickCountDeadline uint32 + TrackFlags uint32 + ClassContext uint32 + Locale uint32 + ServerInfo *COSERVERINFO + Hwnd HWND +} + +const ( + CLSCTX_INPROC_SERVER = 0x1 + CLSCTX_INPROC_HANDLER = 0x2 + CLSCTX_LOCAL_SERVER = 0x4 + CLSCTX_INPROC_SERVER16 = 0x8 + CLSCTX_REMOTE_SERVER = 0x10 + CLSCTX_INPROC_HANDLER16 = 0x20 + CLSCTX_RESERVED1 = 0x40 + CLSCTX_RESERVED2 = 0x80 + CLSCTX_RESERVED3 = 0x100 + CLSCTX_RESERVED4 = 0x200 + CLSCTX_NO_CODE_DOWNLOAD = 0x400 + CLSCTX_RESERVED5 = 0x800 + CLSCTX_NO_CUSTOM_MARSHAL = 0x1000 + CLSCTX_ENABLE_CODE_DOWNLOAD = 0x2000 + CLSCTX_NO_FAILURE_LOG = 0x4000 + CLSCTX_DISABLE_AAA = 0x8000 + CLSCTX_ENABLE_AAA = 0x10000 + CLSCTX_FROM_DEFAULT_CONTEXT = 0x20000 + CLSCTX_ACTIVATE_32_BIT_SERVER = 0x40000 + CLSCTX_ACTIVATE_64_BIT_SERVER = 0x80000 + CLSCTX_ENABLE_CLOAKING = 0x100000 + CLSCTX_APPCONTAINER = 0x400000 + CLSCTX_ACTIVATE_AAA_AS_IU = 0x800000 + CLSCTX_PS_DLL = 0x80000000 + + COINIT_MULTITHREADED = 0x0 + COINIT_APARTMENTTHREADED = 0x2 + COINIT_DISABLE_OLE1DDE = 0x4 + COINIT_SPEED_OVER_MEMORY = 0x8 +) + +// Flag for QueryFullProcessImageName. +const PROCESS_NAME_NATIVE = 1 + +type ModuleInfo struct { + BaseOfDll uintptr + SizeOfImage uint32 + EntryPoint uintptr +} + +const ALL_PROCESSOR_GROUPS = 0xFFFF + +type Rect struct { + Left int32 + Top int32 + Right int32 + Bottom int32 +} + +type GUIThreadInfo struct { + Size uint32 + Flags uint32 + Active HWND + Focus HWND + Capture HWND + MenuOwner HWND + MoveSize HWND + CaretHandle HWND + CaretRect Rect +} + +const ( + DWMWA_NCRENDERING_ENABLED = 1 + DWMWA_NCRENDERING_POLICY = 2 + DWMWA_TRANSITIONS_FORCEDISABLED = 3 + DWMWA_ALLOW_NCPAINT = 4 + DWMWA_CAPTION_BUTTON_BOUNDS = 5 + DWMWA_NONCLIENT_RTL_LAYOUT = 6 + DWMWA_FORCE_ICONIC_REPRESENTATION = 7 + DWMWA_FLIP3D_POLICY = 8 + DWMWA_EXTENDED_FRAME_BOUNDS = 9 + DWMWA_HAS_ICONIC_BITMAP = 10 + DWMWA_DISALLOW_PEEK = 11 + DWMWA_EXCLUDED_FROM_PEEK = 12 + DWMWA_CLOAK = 13 + DWMWA_CLOAKED = 14 + DWMWA_FREEZE_REPRESENTATION = 15 + DWMWA_PASSIVE_UPDATE_MODE = 16 + DWMWA_USE_HOSTBACKDROPBRUSH = 17 + DWMWA_USE_IMMERSIVE_DARK_MODE = 20 + DWMWA_WINDOW_CORNER_PREFERENCE = 33 + DWMWA_BORDER_COLOR = 34 + DWMWA_CAPTION_COLOR = 35 + DWMWA_TEXT_COLOR = 36 + DWMWA_VISIBLE_FRAME_BORDER_THICKNESS = 37 +) + +type WSAQUERYSET struct { + Size uint32 + ServiceInstanceName *uint16 + ServiceClassId *GUID + Version *WSAVersion + Comment *uint16 + NameSpace uint32 + NSProviderId *GUID + Context *uint16 + NumberOfProtocols uint32 + AfpProtocols *AFProtocols + QueryString *uint16 + NumberOfCsAddrs uint32 + SaBuffer *CSAddrInfo + OutputFlags uint32 + Blob *BLOB +} + +type WSAVersion struct { + Version uint32 + EnumerationOfComparison int32 +} + +type AFProtocols struct { + AddressFamily int32 + Protocol int32 +} + +type CSAddrInfo struct { + LocalAddr SocketAddress + RemoteAddr SocketAddress + SocketType int32 + Protocol int32 +} + +type BLOB struct { + Size uint32 + BlobData *byte +} diff --git a/vendor/golang.org/x/sys/windows/types_windows_386.go b/vendor/golang.org/x/sys/windows/types_windows_386.go new file mode 100644 index 000000000..8bce3e2fc --- /dev/null +++ b/vendor/golang.org/x/sys/windows/types_windows_386.go @@ -0,0 +1,35 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +type WSAData struct { + Version uint16 + HighVersion uint16 + Description [WSADESCRIPTION_LEN + 1]byte + SystemStatus [WSASYS_STATUS_LEN + 1]byte + MaxSockets uint16 + MaxUdpDg uint16 + VendorInfo *byte +} + +type Servent struct { + Name *byte + Aliases **byte + Port uint16 + Proto *byte +} + +type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { + PerProcessUserTimeLimit int64 + PerJobUserTimeLimit int64 + LimitFlags uint32 + MinimumWorkingSetSize uintptr + MaximumWorkingSetSize uintptr + ActiveProcessLimit uint32 + Affinity uintptr + PriorityClass uint32 + SchedulingClass uint32 + _ uint32 // pad to 8 byte boundary +} diff --git a/vendor/golang.org/x/sys/windows/types_windows_amd64.go b/vendor/golang.org/x/sys/windows/types_windows_amd64.go new file mode 100644 index 000000000..fdddc0c70 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/types_windows_amd64.go @@ -0,0 +1,34 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +type WSAData struct { + Version uint16 + HighVersion uint16 + MaxSockets uint16 + MaxUdpDg uint16 + VendorInfo *byte + Description [WSADESCRIPTION_LEN + 1]byte + SystemStatus [WSASYS_STATUS_LEN + 1]byte +} + +type Servent struct { + Name *byte + Aliases **byte + Proto *byte + Port uint16 +} + +type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { + PerProcessUserTimeLimit int64 + PerJobUserTimeLimit int64 + LimitFlags uint32 + MinimumWorkingSetSize uintptr + MaximumWorkingSetSize uintptr + ActiveProcessLimit uint32 + Affinity uintptr + PriorityClass uint32 + SchedulingClass uint32 +} diff --git a/vendor/golang.org/x/sys/windows/types_windows_arm.go b/vendor/golang.org/x/sys/windows/types_windows_arm.go new file mode 100644 index 000000000..321872c3e --- /dev/null +++ b/vendor/golang.org/x/sys/windows/types_windows_arm.go @@ -0,0 +1,35 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +type WSAData struct { + Version uint16 + HighVersion uint16 + Description [WSADESCRIPTION_LEN + 1]byte + SystemStatus [WSASYS_STATUS_LEN + 1]byte + MaxSockets uint16 + MaxUdpDg uint16 + VendorInfo *byte +} + +type Servent struct { + Name *byte + Aliases **byte + Port uint16 + Proto *byte +} + +type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { + PerProcessUserTimeLimit int64 + PerJobUserTimeLimit int64 + LimitFlags uint32 + MinimumWorkingSetSize uintptr + MaximumWorkingSetSize uintptr + ActiveProcessLimit uint32 + Affinity uintptr + PriorityClass uint32 + SchedulingClass uint32 + _ uint32 // pad to 8 byte boundary +} diff --git a/vendor/golang.org/x/sys/windows/types_windows_arm64.go b/vendor/golang.org/x/sys/windows/types_windows_arm64.go new file mode 100644 index 000000000..fdddc0c70 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/types_windows_arm64.go @@ -0,0 +1,34 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +type WSAData struct { + Version uint16 + HighVersion uint16 + MaxSockets uint16 + MaxUdpDg uint16 + VendorInfo *byte + Description [WSADESCRIPTION_LEN + 1]byte + SystemStatus [WSASYS_STATUS_LEN + 1]byte +} + +type Servent struct { + Name *byte + Aliases **byte + Proto *byte + Port uint16 +} + +type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { + PerProcessUserTimeLimit int64 + PerJobUserTimeLimit int64 + LimitFlags uint32 + MinimumWorkingSetSize uintptr + MaximumWorkingSetSize uintptr + ActiveProcessLimit uint32 + Affinity uintptr + PriorityClass uint32 + SchedulingClass uint32 +} diff --git a/vendor/golang.org/x/sys/windows/zerrors_windows.go b/vendor/golang.org/x/sys/windows/zerrors_windows.go new file mode 100644 index 000000000..0cf658fbd --- /dev/null +++ b/vendor/golang.org/x/sys/windows/zerrors_windows.go @@ -0,0 +1,9468 @@ +// Code generated by 'mkerrors.bash'; DO NOT EDIT. + +package windows + +import "syscall" + +const ( + FACILITY_NULL = 0 + FACILITY_RPC = 1 + FACILITY_DISPATCH = 2 + FACILITY_STORAGE = 3 + FACILITY_ITF = 4 + FACILITY_WIN32 = 7 + FACILITY_WINDOWS = 8 + FACILITY_SSPI = 9 + FACILITY_SECURITY = 9 + FACILITY_CONTROL = 10 + FACILITY_CERT = 11 + FACILITY_INTERNET = 12 + FACILITY_MEDIASERVER = 13 + FACILITY_MSMQ = 14 + FACILITY_SETUPAPI = 15 + FACILITY_SCARD = 16 + FACILITY_COMPLUS = 17 + FACILITY_AAF = 18 + FACILITY_URT = 19 + FACILITY_ACS = 20 + FACILITY_DPLAY = 21 + FACILITY_UMI = 22 + FACILITY_SXS = 23 + FACILITY_WINDOWS_CE = 24 + FACILITY_HTTP = 25 + FACILITY_USERMODE_COMMONLOG = 26 + FACILITY_WER = 27 + FACILITY_USERMODE_FILTER_MANAGER = 31 + FACILITY_BACKGROUNDCOPY = 32 + FACILITY_CONFIGURATION = 33 + FACILITY_WIA = 33 + FACILITY_STATE_MANAGEMENT = 34 + FACILITY_METADIRECTORY = 35 + FACILITY_WINDOWSUPDATE = 36 + FACILITY_DIRECTORYSERVICE = 37 + FACILITY_GRAPHICS = 38 + FACILITY_SHELL = 39 + FACILITY_NAP = 39 + FACILITY_TPM_SERVICES = 40 + FACILITY_TPM_SOFTWARE = 41 + FACILITY_UI = 42 + FACILITY_XAML = 43 + FACILITY_ACTION_QUEUE = 44 + FACILITY_PLA = 48 + FACILITY_WINDOWS_SETUP = 48 + FACILITY_FVE = 49 + FACILITY_FWP = 50 + FACILITY_WINRM = 51 + FACILITY_NDIS = 52 + FACILITY_USERMODE_HYPERVISOR = 53 + FACILITY_CMI = 54 + FACILITY_USERMODE_VIRTUALIZATION = 55 + FACILITY_USERMODE_VOLMGR = 56 + FACILITY_BCD = 57 + FACILITY_USERMODE_VHD = 58 + FACILITY_USERMODE_HNS = 59 + FACILITY_SDIAG = 60 + FACILITY_WEBSERVICES = 61 + FACILITY_WINPE = 61 + FACILITY_WPN = 62 + FACILITY_WINDOWS_STORE = 63 + FACILITY_INPUT = 64 + FACILITY_EAP = 66 + FACILITY_WINDOWS_DEFENDER = 80 + FACILITY_OPC = 81 + FACILITY_XPS = 82 + FACILITY_MBN = 84 + FACILITY_POWERSHELL = 84 + FACILITY_RAS = 83 + FACILITY_P2P_INT = 98 + FACILITY_P2P = 99 + FACILITY_DAF = 100 + FACILITY_BLUETOOTH_ATT = 101 + FACILITY_AUDIO = 102 + FACILITY_STATEREPOSITORY = 103 + FACILITY_VISUALCPP = 109 + FACILITY_SCRIPT = 112 + FACILITY_PARSE = 113 + FACILITY_BLB = 120 + FACILITY_BLB_CLI = 121 + FACILITY_WSBAPP = 122 + FACILITY_BLBUI = 128 + FACILITY_USN = 129 + FACILITY_USERMODE_VOLSNAP = 130 + FACILITY_TIERING = 131 + FACILITY_WSB_ONLINE = 133 + FACILITY_ONLINE_ID = 134 + FACILITY_DEVICE_UPDATE_AGENT = 135 + FACILITY_DRVSERVICING = 136 + FACILITY_DLS = 153 + FACILITY_DELIVERY_OPTIMIZATION = 208 + FACILITY_USERMODE_SPACES = 231 + FACILITY_USER_MODE_SECURITY_CORE = 232 + FACILITY_USERMODE_LICENSING = 234 + FACILITY_SOS = 160 + FACILITY_DEBUGGERS = 176 + FACILITY_SPP = 256 + FACILITY_RESTORE = 256 + FACILITY_DMSERVER = 256 + FACILITY_DEPLOYMENT_SERVICES_SERVER = 257 + FACILITY_DEPLOYMENT_SERVICES_IMAGING = 258 + FACILITY_DEPLOYMENT_SERVICES_MANAGEMENT = 259 + FACILITY_DEPLOYMENT_SERVICES_UTIL = 260 + FACILITY_DEPLOYMENT_SERVICES_BINLSVC = 261 + FACILITY_DEPLOYMENT_SERVICES_PXE = 263 + FACILITY_DEPLOYMENT_SERVICES_TFTP = 264 + FACILITY_DEPLOYMENT_SERVICES_TRANSPORT_MANAGEMENT = 272 + FACILITY_DEPLOYMENT_SERVICES_DRIVER_PROVISIONING = 278 + FACILITY_DEPLOYMENT_SERVICES_MULTICAST_SERVER = 289 + FACILITY_DEPLOYMENT_SERVICES_MULTICAST_CLIENT = 290 + FACILITY_DEPLOYMENT_SERVICES_CONTENT_PROVIDER = 293 + FACILITY_LINGUISTIC_SERVICES = 305 + FACILITY_AUDIOSTREAMING = 1094 + FACILITY_ACCELERATOR = 1536 + FACILITY_WMAAECMA = 1996 + FACILITY_DIRECTMUSIC = 2168 + FACILITY_DIRECT3D10 = 2169 + FACILITY_DXGI = 2170 + FACILITY_DXGI_DDI = 2171 + FACILITY_DIRECT3D11 = 2172 + FACILITY_DIRECT3D11_DEBUG = 2173 + FACILITY_DIRECT3D12 = 2174 + FACILITY_DIRECT3D12_DEBUG = 2175 + FACILITY_LEAP = 2184 + FACILITY_AUDCLNT = 2185 + FACILITY_WINCODEC_DWRITE_DWM = 2200 + FACILITY_WINML = 2192 + FACILITY_DIRECT2D = 2201 + FACILITY_DEFRAG = 2304 + FACILITY_USERMODE_SDBUS = 2305 + FACILITY_JSCRIPT = 2306 + FACILITY_PIDGENX = 2561 + FACILITY_EAS = 85 + FACILITY_WEB = 885 + FACILITY_WEB_SOCKET = 886 + FACILITY_MOBILE = 1793 + FACILITY_SQLITE = 1967 + FACILITY_UTC = 1989 + FACILITY_WEP = 2049 + FACILITY_SYNCENGINE = 2050 + FACILITY_XBOX = 2339 + FACILITY_GAME = 2340 + FACILITY_PIX = 2748 + ERROR_SUCCESS syscall.Errno = 0 + NO_ERROR = 0 + SEC_E_OK Handle = 0x00000000 + ERROR_INVALID_FUNCTION syscall.Errno = 1 + ERROR_FILE_NOT_FOUND syscall.Errno = 2 + ERROR_PATH_NOT_FOUND syscall.Errno = 3 + ERROR_TOO_MANY_OPEN_FILES syscall.Errno = 4 + ERROR_ACCESS_DENIED syscall.Errno = 5 + ERROR_INVALID_HANDLE syscall.Errno = 6 + ERROR_ARENA_TRASHED syscall.Errno = 7 + ERROR_NOT_ENOUGH_MEMORY syscall.Errno = 8 + ERROR_INVALID_BLOCK syscall.Errno = 9 + ERROR_BAD_ENVIRONMENT syscall.Errno = 10 + ERROR_BAD_FORMAT syscall.Errno = 11 + ERROR_INVALID_ACCESS syscall.Errno = 12 + ERROR_INVALID_DATA syscall.Errno = 13 + ERROR_OUTOFMEMORY syscall.Errno = 14 + ERROR_INVALID_DRIVE syscall.Errno = 15 + ERROR_CURRENT_DIRECTORY syscall.Errno = 16 + ERROR_NOT_SAME_DEVICE syscall.Errno = 17 + ERROR_NO_MORE_FILES syscall.Errno = 18 + ERROR_WRITE_PROTECT syscall.Errno = 19 + ERROR_BAD_UNIT syscall.Errno = 20 + ERROR_NOT_READY syscall.Errno = 21 + ERROR_BAD_COMMAND syscall.Errno = 22 + ERROR_CRC syscall.Errno = 23 + ERROR_BAD_LENGTH syscall.Errno = 24 + ERROR_SEEK syscall.Errno = 25 + ERROR_NOT_DOS_DISK syscall.Errno = 26 + ERROR_SECTOR_NOT_FOUND syscall.Errno = 27 + ERROR_OUT_OF_PAPER syscall.Errno = 28 + ERROR_WRITE_FAULT syscall.Errno = 29 + ERROR_READ_FAULT syscall.Errno = 30 + ERROR_GEN_FAILURE syscall.Errno = 31 + ERROR_SHARING_VIOLATION syscall.Errno = 32 + ERROR_LOCK_VIOLATION syscall.Errno = 33 + ERROR_WRONG_DISK syscall.Errno = 34 + ERROR_SHARING_BUFFER_EXCEEDED syscall.Errno = 36 + ERROR_HANDLE_EOF syscall.Errno = 38 + ERROR_HANDLE_DISK_FULL syscall.Errno = 39 + ERROR_NOT_SUPPORTED syscall.Errno = 50 + ERROR_REM_NOT_LIST syscall.Errno = 51 + ERROR_DUP_NAME syscall.Errno = 52 + ERROR_BAD_NETPATH syscall.Errno = 53 + ERROR_NETWORK_BUSY syscall.Errno = 54 + ERROR_DEV_NOT_EXIST syscall.Errno = 55 + ERROR_TOO_MANY_CMDS syscall.Errno = 56 + ERROR_ADAP_HDW_ERR syscall.Errno = 57 + ERROR_BAD_NET_RESP syscall.Errno = 58 + ERROR_UNEXP_NET_ERR syscall.Errno = 59 + ERROR_BAD_REM_ADAP syscall.Errno = 60 + ERROR_PRINTQ_FULL syscall.Errno = 61 + ERROR_NO_SPOOL_SPACE syscall.Errno = 62 + ERROR_PRINT_CANCELLED syscall.Errno = 63 + ERROR_NETNAME_DELETED syscall.Errno = 64 + ERROR_NETWORK_ACCESS_DENIED syscall.Errno = 65 + ERROR_BAD_DEV_TYPE syscall.Errno = 66 + ERROR_BAD_NET_NAME syscall.Errno = 67 + ERROR_TOO_MANY_NAMES syscall.Errno = 68 + ERROR_TOO_MANY_SESS syscall.Errno = 69 + ERROR_SHARING_PAUSED syscall.Errno = 70 + ERROR_REQ_NOT_ACCEP syscall.Errno = 71 + ERROR_REDIR_PAUSED syscall.Errno = 72 + ERROR_FILE_EXISTS syscall.Errno = 80 + ERROR_CANNOT_MAKE syscall.Errno = 82 + ERROR_FAIL_I24 syscall.Errno = 83 + ERROR_OUT_OF_STRUCTURES syscall.Errno = 84 + ERROR_ALREADY_ASSIGNED syscall.Errno = 85 + ERROR_INVALID_PASSWORD syscall.Errno = 86 + ERROR_INVALID_PARAMETER syscall.Errno = 87 + ERROR_NET_WRITE_FAULT syscall.Errno = 88 + ERROR_NO_PROC_SLOTS syscall.Errno = 89 + ERROR_TOO_MANY_SEMAPHORES syscall.Errno = 100 + ERROR_EXCL_SEM_ALREADY_OWNED syscall.Errno = 101 + ERROR_SEM_IS_SET syscall.Errno = 102 + ERROR_TOO_MANY_SEM_REQUESTS syscall.Errno = 103 + ERROR_INVALID_AT_INTERRUPT_TIME syscall.Errno = 104 + ERROR_SEM_OWNER_DIED syscall.Errno = 105 + ERROR_SEM_USER_LIMIT syscall.Errno = 106 + ERROR_DISK_CHANGE syscall.Errno = 107 + ERROR_DRIVE_LOCKED syscall.Errno = 108 + ERROR_BROKEN_PIPE syscall.Errno = 109 + ERROR_OPEN_FAILED syscall.Errno = 110 + ERROR_BUFFER_OVERFLOW syscall.Errno = 111 + ERROR_DISK_FULL syscall.Errno = 112 + ERROR_NO_MORE_SEARCH_HANDLES syscall.Errno = 113 + ERROR_INVALID_TARGET_HANDLE syscall.Errno = 114 + ERROR_INVALID_CATEGORY syscall.Errno = 117 + ERROR_INVALID_VERIFY_SWITCH syscall.Errno = 118 + ERROR_BAD_DRIVER_LEVEL syscall.Errno = 119 + ERROR_CALL_NOT_IMPLEMENTED syscall.Errno = 120 + ERROR_SEM_TIMEOUT syscall.Errno = 121 + ERROR_INSUFFICIENT_BUFFER syscall.Errno = 122 + ERROR_INVALID_NAME syscall.Errno = 123 + ERROR_INVALID_LEVEL syscall.Errno = 124 + ERROR_NO_VOLUME_LABEL syscall.Errno = 125 + ERROR_MOD_NOT_FOUND syscall.Errno = 126 + ERROR_PROC_NOT_FOUND syscall.Errno = 127 + ERROR_WAIT_NO_CHILDREN syscall.Errno = 128 + ERROR_CHILD_NOT_COMPLETE syscall.Errno = 129 + ERROR_DIRECT_ACCESS_HANDLE syscall.Errno = 130 + ERROR_NEGATIVE_SEEK syscall.Errno = 131 + ERROR_SEEK_ON_DEVICE syscall.Errno = 132 + ERROR_IS_JOIN_TARGET syscall.Errno = 133 + ERROR_IS_JOINED syscall.Errno = 134 + ERROR_IS_SUBSTED syscall.Errno = 135 + ERROR_NOT_JOINED syscall.Errno = 136 + ERROR_NOT_SUBSTED syscall.Errno = 137 + ERROR_JOIN_TO_JOIN syscall.Errno = 138 + ERROR_SUBST_TO_SUBST syscall.Errno = 139 + ERROR_JOIN_TO_SUBST syscall.Errno = 140 + ERROR_SUBST_TO_JOIN syscall.Errno = 141 + ERROR_BUSY_DRIVE syscall.Errno = 142 + ERROR_SAME_DRIVE syscall.Errno = 143 + ERROR_DIR_NOT_ROOT syscall.Errno = 144 + ERROR_DIR_NOT_EMPTY syscall.Errno = 145 + ERROR_IS_SUBST_PATH syscall.Errno = 146 + ERROR_IS_JOIN_PATH syscall.Errno = 147 + ERROR_PATH_BUSY syscall.Errno = 148 + ERROR_IS_SUBST_TARGET syscall.Errno = 149 + ERROR_SYSTEM_TRACE syscall.Errno = 150 + ERROR_INVALID_EVENT_COUNT syscall.Errno = 151 + ERROR_TOO_MANY_MUXWAITERS syscall.Errno = 152 + ERROR_INVALID_LIST_FORMAT syscall.Errno = 153 + ERROR_LABEL_TOO_LONG syscall.Errno = 154 + ERROR_TOO_MANY_TCBS syscall.Errno = 155 + ERROR_SIGNAL_REFUSED syscall.Errno = 156 + ERROR_DISCARDED syscall.Errno = 157 + ERROR_NOT_LOCKED syscall.Errno = 158 + ERROR_BAD_THREADID_ADDR syscall.Errno = 159 + ERROR_BAD_ARGUMENTS syscall.Errno = 160 + ERROR_BAD_PATHNAME syscall.Errno = 161 + ERROR_SIGNAL_PENDING syscall.Errno = 162 + ERROR_MAX_THRDS_REACHED syscall.Errno = 164 + ERROR_LOCK_FAILED syscall.Errno = 167 + ERROR_BUSY syscall.Errno = 170 + ERROR_DEVICE_SUPPORT_IN_PROGRESS syscall.Errno = 171 + ERROR_CANCEL_VIOLATION syscall.Errno = 173 + ERROR_ATOMIC_LOCKS_NOT_SUPPORTED syscall.Errno = 174 + ERROR_INVALID_SEGMENT_NUMBER syscall.Errno = 180 + ERROR_INVALID_ORDINAL syscall.Errno = 182 + ERROR_ALREADY_EXISTS syscall.Errno = 183 + ERROR_INVALID_FLAG_NUMBER syscall.Errno = 186 + ERROR_SEM_NOT_FOUND syscall.Errno = 187 + ERROR_INVALID_STARTING_CODESEG syscall.Errno = 188 + ERROR_INVALID_STACKSEG syscall.Errno = 189 + ERROR_INVALID_MODULETYPE syscall.Errno = 190 + ERROR_INVALID_EXE_SIGNATURE syscall.Errno = 191 + ERROR_EXE_MARKED_INVALID syscall.Errno = 192 + ERROR_BAD_EXE_FORMAT syscall.Errno = 193 + ERROR_ITERATED_DATA_EXCEEDS_64k syscall.Errno = 194 + ERROR_INVALID_MINALLOCSIZE syscall.Errno = 195 + ERROR_DYNLINK_FROM_INVALID_RING syscall.Errno = 196 + ERROR_IOPL_NOT_ENABLED syscall.Errno = 197 + ERROR_INVALID_SEGDPL syscall.Errno = 198 + ERROR_AUTODATASEG_EXCEEDS_64k syscall.Errno = 199 + ERROR_RING2SEG_MUST_BE_MOVABLE syscall.Errno = 200 + ERROR_RELOC_CHAIN_XEEDS_SEGLIM syscall.Errno = 201 + ERROR_INFLOOP_IN_RELOC_CHAIN syscall.Errno = 202 + ERROR_ENVVAR_NOT_FOUND syscall.Errno = 203 + ERROR_NO_SIGNAL_SENT syscall.Errno = 205 + ERROR_FILENAME_EXCED_RANGE syscall.Errno = 206 + ERROR_RING2_STACK_IN_USE syscall.Errno = 207 + ERROR_META_EXPANSION_TOO_LONG syscall.Errno = 208 + ERROR_INVALID_SIGNAL_NUMBER syscall.Errno = 209 + ERROR_THREAD_1_INACTIVE syscall.Errno = 210 + ERROR_LOCKED syscall.Errno = 212 + ERROR_TOO_MANY_MODULES syscall.Errno = 214 + ERROR_NESTING_NOT_ALLOWED syscall.Errno = 215 + ERROR_EXE_MACHINE_TYPE_MISMATCH syscall.Errno = 216 + ERROR_EXE_CANNOT_MODIFY_SIGNED_BINARY syscall.Errno = 217 + ERROR_EXE_CANNOT_MODIFY_STRONG_SIGNED_BINARY syscall.Errno = 218 + ERROR_FILE_CHECKED_OUT syscall.Errno = 220 + ERROR_CHECKOUT_REQUIRED syscall.Errno = 221 + ERROR_BAD_FILE_TYPE syscall.Errno = 222 + ERROR_FILE_TOO_LARGE syscall.Errno = 223 + ERROR_FORMS_AUTH_REQUIRED syscall.Errno = 224 + ERROR_VIRUS_INFECTED syscall.Errno = 225 + ERROR_VIRUS_DELETED syscall.Errno = 226 + ERROR_PIPE_LOCAL syscall.Errno = 229 + ERROR_BAD_PIPE syscall.Errno = 230 + ERROR_PIPE_BUSY syscall.Errno = 231 + ERROR_NO_DATA syscall.Errno = 232 + ERROR_PIPE_NOT_CONNECTED syscall.Errno = 233 + ERROR_MORE_DATA syscall.Errno = 234 + ERROR_NO_WORK_DONE syscall.Errno = 235 + ERROR_VC_DISCONNECTED syscall.Errno = 240 + ERROR_INVALID_EA_NAME syscall.Errno = 254 + ERROR_EA_LIST_INCONSISTENT syscall.Errno = 255 + WAIT_TIMEOUT syscall.Errno = 258 + ERROR_NO_MORE_ITEMS syscall.Errno = 259 + ERROR_CANNOT_COPY syscall.Errno = 266 + ERROR_DIRECTORY syscall.Errno = 267 + ERROR_EAS_DIDNT_FIT syscall.Errno = 275 + ERROR_EA_FILE_CORRUPT syscall.Errno = 276 + ERROR_EA_TABLE_FULL syscall.Errno = 277 + ERROR_INVALID_EA_HANDLE syscall.Errno = 278 + ERROR_EAS_NOT_SUPPORTED syscall.Errno = 282 + ERROR_NOT_OWNER syscall.Errno = 288 + ERROR_TOO_MANY_POSTS syscall.Errno = 298 + ERROR_PARTIAL_COPY syscall.Errno = 299 + ERROR_OPLOCK_NOT_GRANTED syscall.Errno = 300 + ERROR_INVALID_OPLOCK_PROTOCOL syscall.Errno = 301 + ERROR_DISK_TOO_FRAGMENTED syscall.Errno = 302 + ERROR_DELETE_PENDING syscall.Errno = 303 + ERROR_INCOMPATIBLE_WITH_GLOBAL_SHORT_NAME_REGISTRY_SETTING syscall.Errno = 304 + ERROR_SHORT_NAMES_NOT_ENABLED_ON_VOLUME syscall.Errno = 305 + ERROR_SECURITY_STREAM_IS_INCONSISTENT syscall.Errno = 306 + ERROR_INVALID_LOCK_RANGE syscall.Errno = 307 + ERROR_IMAGE_SUBSYSTEM_NOT_PRESENT syscall.Errno = 308 + ERROR_NOTIFICATION_GUID_ALREADY_DEFINED syscall.Errno = 309 + ERROR_INVALID_EXCEPTION_HANDLER syscall.Errno = 310 + ERROR_DUPLICATE_PRIVILEGES syscall.Errno = 311 + ERROR_NO_RANGES_PROCESSED syscall.Errno = 312 + ERROR_NOT_ALLOWED_ON_SYSTEM_FILE syscall.Errno = 313 + ERROR_DISK_RESOURCES_EXHAUSTED syscall.Errno = 314 + ERROR_INVALID_TOKEN syscall.Errno = 315 + ERROR_DEVICE_FEATURE_NOT_SUPPORTED syscall.Errno = 316 + ERROR_MR_MID_NOT_FOUND syscall.Errno = 317 + ERROR_SCOPE_NOT_FOUND syscall.Errno = 318 + ERROR_UNDEFINED_SCOPE syscall.Errno = 319 + ERROR_INVALID_CAP syscall.Errno = 320 + ERROR_DEVICE_UNREACHABLE syscall.Errno = 321 + ERROR_DEVICE_NO_RESOURCES syscall.Errno = 322 + ERROR_DATA_CHECKSUM_ERROR syscall.Errno = 323 + ERROR_INTERMIXED_KERNEL_EA_OPERATION syscall.Errno = 324 + ERROR_FILE_LEVEL_TRIM_NOT_SUPPORTED syscall.Errno = 326 + ERROR_OFFSET_ALIGNMENT_VIOLATION syscall.Errno = 327 + ERROR_INVALID_FIELD_IN_PARAMETER_LIST syscall.Errno = 328 + ERROR_OPERATION_IN_PROGRESS syscall.Errno = 329 + ERROR_BAD_DEVICE_PATH syscall.Errno = 330 + ERROR_TOO_MANY_DESCRIPTORS syscall.Errno = 331 + ERROR_SCRUB_DATA_DISABLED syscall.Errno = 332 + ERROR_NOT_REDUNDANT_STORAGE syscall.Errno = 333 + ERROR_RESIDENT_FILE_NOT_SUPPORTED syscall.Errno = 334 + ERROR_COMPRESSED_FILE_NOT_SUPPORTED syscall.Errno = 335 + ERROR_DIRECTORY_NOT_SUPPORTED syscall.Errno = 336 + ERROR_NOT_READ_FROM_COPY syscall.Errno = 337 + ERROR_FT_WRITE_FAILURE syscall.Errno = 338 + ERROR_FT_DI_SCAN_REQUIRED syscall.Errno = 339 + ERROR_INVALID_KERNEL_INFO_VERSION syscall.Errno = 340 + ERROR_INVALID_PEP_INFO_VERSION syscall.Errno = 341 + ERROR_OBJECT_NOT_EXTERNALLY_BACKED syscall.Errno = 342 + ERROR_EXTERNAL_BACKING_PROVIDER_UNKNOWN syscall.Errno = 343 + ERROR_COMPRESSION_NOT_BENEFICIAL syscall.Errno = 344 + ERROR_STORAGE_TOPOLOGY_ID_MISMATCH syscall.Errno = 345 + ERROR_BLOCKED_BY_PARENTAL_CONTROLS syscall.Errno = 346 + ERROR_BLOCK_TOO_MANY_REFERENCES syscall.Errno = 347 + ERROR_MARKED_TO_DISALLOW_WRITES syscall.Errno = 348 + ERROR_ENCLAVE_FAILURE syscall.Errno = 349 + ERROR_FAIL_NOACTION_REBOOT syscall.Errno = 350 + ERROR_FAIL_SHUTDOWN syscall.Errno = 351 + ERROR_FAIL_RESTART syscall.Errno = 352 + ERROR_MAX_SESSIONS_REACHED syscall.Errno = 353 + ERROR_NETWORK_ACCESS_DENIED_EDP syscall.Errno = 354 + ERROR_DEVICE_HINT_NAME_BUFFER_TOO_SMALL syscall.Errno = 355 + ERROR_EDP_POLICY_DENIES_OPERATION syscall.Errno = 356 + ERROR_EDP_DPL_POLICY_CANT_BE_SATISFIED syscall.Errno = 357 + ERROR_CLOUD_FILE_SYNC_ROOT_METADATA_CORRUPT syscall.Errno = 358 + ERROR_DEVICE_IN_MAINTENANCE syscall.Errno = 359 + ERROR_NOT_SUPPORTED_ON_DAX syscall.Errno = 360 + ERROR_DAX_MAPPING_EXISTS syscall.Errno = 361 + ERROR_CLOUD_FILE_PROVIDER_NOT_RUNNING syscall.Errno = 362 + ERROR_CLOUD_FILE_METADATA_CORRUPT syscall.Errno = 363 + ERROR_CLOUD_FILE_METADATA_TOO_LARGE syscall.Errno = 364 + ERROR_CLOUD_FILE_PROPERTY_BLOB_TOO_LARGE syscall.Errno = 365 + ERROR_CLOUD_FILE_PROPERTY_BLOB_CHECKSUM_MISMATCH syscall.Errno = 366 + ERROR_CHILD_PROCESS_BLOCKED syscall.Errno = 367 + ERROR_STORAGE_LOST_DATA_PERSISTENCE syscall.Errno = 368 + ERROR_FILE_SYSTEM_VIRTUALIZATION_UNAVAILABLE syscall.Errno = 369 + ERROR_FILE_SYSTEM_VIRTUALIZATION_METADATA_CORRUPT syscall.Errno = 370 + ERROR_FILE_SYSTEM_VIRTUALIZATION_BUSY syscall.Errno = 371 + ERROR_FILE_SYSTEM_VIRTUALIZATION_PROVIDER_UNKNOWN syscall.Errno = 372 + ERROR_GDI_HANDLE_LEAK syscall.Errno = 373 + ERROR_CLOUD_FILE_TOO_MANY_PROPERTY_BLOBS syscall.Errno = 374 + ERROR_CLOUD_FILE_PROPERTY_VERSION_NOT_SUPPORTED syscall.Errno = 375 + ERROR_NOT_A_CLOUD_FILE syscall.Errno = 376 + ERROR_CLOUD_FILE_NOT_IN_SYNC syscall.Errno = 377 + ERROR_CLOUD_FILE_ALREADY_CONNECTED syscall.Errno = 378 + ERROR_CLOUD_FILE_NOT_SUPPORTED syscall.Errno = 379 + ERROR_CLOUD_FILE_INVALID_REQUEST syscall.Errno = 380 + ERROR_CLOUD_FILE_READ_ONLY_VOLUME syscall.Errno = 381 + ERROR_CLOUD_FILE_CONNECTED_PROVIDER_ONLY syscall.Errno = 382 + ERROR_CLOUD_FILE_VALIDATION_FAILED syscall.Errno = 383 + ERROR_SMB1_NOT_AVAILABLE syscall.Errno = 384 + ERROR_FILE_SYSTEM_VIRTUALIZATION_INVALID_OPERATION syscall.Errno = 385 + ERROR_CLOUD_FILE_AUTHENTICATION_FAILED syscall.Errno = 386 + ERROR_CLOUD_FILE_INSUFFICIENT_RESOURCES syscall.Errno = 387 + ERROR_CLOUD_FILE_NETWORK_UNAVAILABLE syscall.Errno = 388 + ERROR_CLOUD_FILE_UNSUCCESSFUL syscall.Errno = 389 + ERROR_CLOUD_FILE_NOT_UNDER_SYNC_ROOT syscall.Errno = 390 + ERROR_CLOUD_FILE_IN_USE syscall.Errno = 391 + ERROR_CLOUD_FILE_PINNED syscall.Errno = 392 + ERROR_CLOUD_FILE_REQUEST_ABORTED syscall.Errno = 393 + ERROR_CLOUD_FILE_PROPERTY_CORRUPT syscall.Errno = 394 + ERROR_CLOUD_FILE_ACCESS_DENIED syscall.Errno = 395 + ERROR_CLOUD_FILE_INCOMPATIBLE_HARDLINKS syscall.Errno = 396 + ERROR_CLOUD_FILE_PROPERTY_LOCK_CONFLICT syscall.Errno = 397 + ERROR_CLOUD_FILE_REQUEST_CANCELED syscall.Errno = 398 + ERROR_EXTERNAL_SYSKEY_NOT_SUPPORTED syscall.Errno = 399 + ERROR_THREAD_MODE_ALREADY_BACKGROUND syscall.Errno = 400 + ERROR_THREAD_MODE_NOT_BACKGROUND syscall.Errno = 401 + ERROR_PROCESS_MODE_ALREADY_BACKGROUND syscall.Errno = 402 + ERROR_PROCESS_MODE_NOT_BACKGROUND syscall.Errno = 403 + ERROR_CLOUD_FILE_PROVIDER_TERMINATED syscall.Errno = 404 + ERROR_NOT_A_CLOUD_SYNC_ROOT syscall.Errno = 405 + ERROR_FILE_PROTECTED_UNDER_DPL syscall.Errno = 406 + ERROR_VOLUME_NOT_CLUSTER_ALIGNED syscall.Errno = 407 + ERROR_NO_PHYSICALLY_ALIGNED_FREE_SPACE_FOUND syscall.Errno = 408 + ERROR_APPX_FILE_NOT_ENCRYPTED syscall.Errno = 409 + ERROR_RWRAW_ENCRYPTED_FILE_NOT_ENCRYPTED syscall.Errno = 410 + ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILEOFFSET syscall.Errno = 411 + ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILERANGE syscall.Errno = 412 + ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_PARAMETER syscall.Errno = 413 + ERROR_LINUX_SUBSYSTEM_NOT_PRESENT syscall.Errno = 414 + ERROR_FT_READ_FAILURE syscall.Errno = 415 + ERROR_STORAGE_RESERVE_ID_INVALID syscall.Errno = 416 + ERROR_STORAGE_RESERVE_DOES_NOT_EXIST syscall.Errno = 417 + ERROR_STORAGE_RESERVE_ALREADY_EXISTS syscall.Errno = 418 + ERROR_STORAGE_RESERVE_NOT_EMPTY syscall.Errno = 419 + ERROR_NOT_A_DAX_VOLUME syscall.Errno = 420 + ERROR_NOT_DAX_MAPPABLE syscall.Errno = 421 + ERROR_TIME_SENSITIVE_THREAD syscall.Errno = 422 + ERROR_DPL_NOT_SUPPORTED_FOR_USER syscall.Errno = 423 + ERROR_CASE_DIFFERING_NAMES_IN_DIR syscall.Errno = 424 + ERROR_FILE_NOT_SUPPORTED syscall.Errno = 425 + ERROR_CLOUD_FILE_REQUEST_TIMEOUT syscall.Errno = 426 + ERROR_NO_TASK_QUEUE syscall.Errno = 427 + ERROR_SRC_SRV_DLL_LOAD_FAILED syscall.Errno = 428 + ERROR_NOT_SUPPORTED_WITH_BTT syscall.Errno = 429 + ERROR_ENCRYPTION_DISABLED syscall.Errno = 430 + ERROR_ENCRYPTING_METADATA_DISALLOWED syscall.Errno = 431 + ERROR_CANT_CLEAR_ENCRYPTION_FLAG syscall.Errno = 432 + ERROR_NO_SUCH_DEVICE syscall.Errno = 433 + ERROR_CAPAUTHZ_NOT_DEVUNLOCKED syscall.Errno = 450 + ERROR_CAPAUTHZ_CHANGE_TYPE syscall.Errno = 451 + ERROR_CAPAUTHZ_NOT_PROVISIONED syscall.Errno = 452 + ERROR_CAPAUTHZ_NOT_AUTHORIZED syscall.Errno = 453 + ERROR_CAPAUTHZ_NO_POLICY syscall.Errno = 454 + ERROR_CAPAUTHZ_DB_CORRUPTED syscall.Errno = 455 + ERROR_CAPAUTHZ_SCCD_INVALID_CATALOG syscall.Errno = 456 + ERROR_CAPAUTHZ_SCCD_NO_AUTH_ENTITY syscall.Errno = 457 + ERROR_CAPAUTHZ_SCCD_PARSE_ERROR syscall.Errno = 458 + ERROR_CAPAUTHZ_SCCD_DEV_MODE_REQUIRED syscall.Errno = 459 + ERROR_CAPAUTHZ_SCCD_NO_CAPABILITY_MATCH syscall.Errno = 460 + ERROR_PNP_QUERY_REMOVE_DEVICE_TIMEOUT syscall.Errno = 480 + ERROR_PNP_QUERY_REMOVE_RELATED_DEVICE_TIMEOUT syscall.Errno = 481 + ERROR_PNP_QUERY_REMOVE_UNRELATED_DEVICE_TIMEOUT syscall.Errno = 482 + ERROR_DEVICE_HARDWARE_ERROR syscall.Errno = 483 + ERROR_INVALID_ADDRESS syscall.Errno = 487 + ERROR_VRF_CFG_ENABLED syscall.Errno = 1183 + ERROR_PARTITION_TERMINATING syscall.Errno = 1184 + ERROR_USER_PROFILE_LOAD syscall.Errno = 500 + ERROR_ARITHMETIC_OVERFLOW syscall.Errno = 534 + ERROR_PIPE_CONNECTED syscall.Errno = 535 + ERROR_PIPE_LISTENING syscall.Errno = 536 + ERROR_VERIFIER_STOP syscall.Errno = 537 + ERROR_ABIOS_ERROR syscall.Errno = 538 + ERROR_WX86_WARNING syscall.Errno = 539 + ERROR_WX86_ERROR syscall.Errno = 540 + ERROR_TIMER_NOT_CANCELED syscall.Errno = 541 + ERROR_UNWIND syscall.Errno = 542 + ERROR_BAD_STACK syscall.Errno = 543 + ERROR_INVALID_UNWIND_TARGET syscall.Errno = 544 + ERROR_INVALID_PORT_ATTRIBUTES syscall.Errno = 545 + ERROR_PORT_MESSAGE_TOO_LONG syscall.Errno = 546 + ERROR_INVALID_QUOTA_LOWER syscall.Errno = 547 + ERROR_DEVICE_ALREADY_ATTACHED syscall.Errno = 548 + ERROR_INSTRUCTION_MISALIGNMENT syscall.Errno = 549 + ERROR_PROFILING_NOT_STARTED syscall.Errno = 550 + ERROR_PROFILING_NOT_STOPPED syscall.Errno = 551 + ERROR_COULD_NOT_INTERPRET syscall.Errno = 552 + ERROR_PROFILING_AT_LIMIT syscall.Errno = 553 + ERROR_CANT_WAIT syscall.Errno = 554 + ERROR_CANT_TERMINATE_SELF syscall.Errno = 555 + ERROR_UNEXPECTED_MM_CREATE_ERR syscall.Errno = 556 + ERROR_UNEXPECTED_MM_MAP_ERROR syscall.Errno = 557 + ERROR_UNEXPECTED_MM_EXTEND_ERR syscall.Errno = 558 + ERROR_BAD_FUNCTION_TABLE syscall.Errno = 559 + ERROR_NO_GUID_TRANSLATION syscall.Errno = 560 + ERROR_INVALID_LDT_SIZE syscall.Errno = 561 + ERROR_INVALID_LDT_OFFSET syscall.Errno = 563 + ERROR_INVALID_LDT_DESCRIPTOR syscall.Errno = 564 + ERROR_TOO_MANY_THREADS syscall.Errno = 565 + ERROR_THREAD_NOT_IN_PROCESS syscall.Errno = 566 + ERROR_PAGEFILE_QUOTA_EXCEEDED syscall.Errno = 567 + ERROR_LOGON_SERVER_CONFLICT syscall.Errno = 568 + ERROR_SYNCHRONIZATION_REQUIRED syscall.Errno = 569 + ERROR_NET_OPEN_FAILED syscall.Errno = 570 + ERROR_IO_PRIVILEGE_FAILED syscall.Errno = 571 + ERROR_CONTROL_C_EXIT syscall.Errno = 572 + ERROR_MISSING_SYSTEMFILE syscall.Errno = 573 + ERROR_UNHANDLED_EXCEPTION syscall.Errno = 574 + ERROR_APP_INIT_FAILURE syscall.Errno = 575 + ERROR_PAGEFILE_CREATE_FAILED syscall.Errno = 576 + ERROR_INVALID_IMAGE_HASH syscall.Errno = 577 + ERROR_NO_PAGEFILE syscall.Errno = 578 + ERROR_ILLEGAL_FLOAT_CONTEXT syscall.Errno = 579 + ERROR_NO_EVENT_PAIR syscall.Errno = 580 + ERROR_DOMAIN_CTRLR_CONFIG_ERROR syscall.Errno = 581 + ERROR_ILLEGAL_CHARACTER syscall.Errno = 582 + ERROR_UNDEFINED_CHARACTER syscall.Errno = 583 + ERROR_FLOPPY_VOLUME syscall.Errno = 584 + ERROR_BIOS_FAILED_TO_CONNECT_INTERRUPT syscall.Errno = 585 + ERROR_BACKUP_CONTROLLER syscall.Errno = 586 + ERROR_MUTANT_LIMIT_EXCEEDED syscall.Errno = 587 + ERROR_FS_DRIVER_REQUIRED syscall.Errno = 588 + ERROR_CANNOT_LOAD_REGISTRY_FILE syscall.Errno = 589 + ERROR_DEBUG_ATTACH_FAILED syscall.Errno = 590 + ERROR_SYSTEM_PROCESS_TERMINATED syscall.Errno = 591 + ERROR_DATA_NOT_ACCEPTED syscall.Errno = 592 + ERROR_VDM_HARD_ERROR syscall.Errno = 593 + ERROR_DRIVER_CANCEL_TIMEOUT syscall.Errno = 594 + ERROR_REPLY_MESSAGE_MISMATCH syscall.Errno = 595 + ERROR_LOST_WRITEBEHIND_DATA syscall.Errno = 596 + ERROR_CLIENT_SERVER_PARAMETERS_INVALID syscall.Errno = 597 + ERROR_NOT_TINY_STREAM syscall.Errno = 598 + ERROR_STACK_OVERFLOW_READ syscall.Errno = 599 + ERROR_CONVERT_TO_LARGE syscall.Errno = 600 + ERROR_FOUND_OUT_OF_SCOPE syscall.Errno = 601 + ERROR_ALLOCATE_BUCKET syscall.Errno = 602 + ERROR_MARSHALL_OVERFLOW syscall.Errno = 603 + ERROR_INVALID_VARIANT syscall.Errno = 604 + ERROR_BAD_COMPRESSION_BUFFER syscall.Errno = 605 + ERROR_AUDIT_FAILED syscall.Errno = 606 + ERROR_TIMER_RESOLUTION_NOT_SET syscall.Errno = 607 + ERROR_INSUFFICIENT_LOGON_INFO syscall.Errno = 608 + ERROR_BAD_DLL_ENTRYPOINT syscall.Errno = 609 + ERROR_BAD_SERVICE_ENTRYPOINT syscall.Errno = 610 + ERROR_IP_ADDRESS_CONFLICT1 syscall.Errno = 611 + ERROR_IP_ADDRESS_CONFLICT2 syscall.Errno = 612 + ERROR_REGISTRY_QUOTA_LIMIT syscall.Errno = 613 + ERROR_NO_CALLBACK_ACTIVE syscall.Errno = 614 + ERROR_PWD_TOO_SHORT syscall.Errno = 615 + ERROR_PWD_TOO_RECENT syscall.Errno = 616 + ERROR_PWD_HISTORY_CONFLICT syscall.Errno = 617 + ERROR_UNSUPPORTED_COMPRESSION syscall.Errno = 618 + ERROR_INVALID_HW_PROFILE syscall.Errno = 619 + ERROR_INVALID_PLUGPLAY_DEVICE_PATH syscall.Errno = 620 + ERROR_QUOTA_LIST_INCONSISTENT syscall.Errno = 621 + ERROR_EVALUATION_EXPIRATION syscall.Errno = 622 + ERROR_ILLEGAL_DLL_RELOCATION syscall.Errno = 623 + ERROR_DLL_INIT_FAILED_LOGOFF syscall.Errno = 624 + ERROR_VALIDATE_CONTINUE syscall.Errno = 625 + ERROR_NO_MORE_MATCHES syscall.Errno = 626 + ERROR_RANGE_LIST_CONFLICT syscall.Errno = 627 + ERROR_SERVER_SID_MISMATCH syscall.Errno = 628 + ERROR_CANT_ENABLE_DENY_ONLY syscall.Errno = 629 + ERROR_FLOAT_MULTIPLE_FAULTS syscall.Errno = 630 + ERROR_FLOAT_MULTIPLE_TRAPS syscall.Errno = 631 + ERROR_NOINTERFACE syscall.Errno = 632 + ERROR_DRIVER_FAILED_SLEEP syscall.Errno = 633 + ERROR_CORRUPT_SYSTEM_FILE syscall.Errno = 634 + ERROR_COMMITMENT_MINIMUM syscall.Errno = 635 + ERROR_PNP_RESTART_ENUMERATION syscall.Errno = 636 + ERROR_SYSTEM_IMAGE_BAD_SIGNATURE syscall.Errno = 637 + ERROR_PNP_REBOOT_REQUIRED syscall.Errno = 638 + ERROR_INSUFFICIENT_POWER syscall.Errno = 639 + ERROR_MULTIPLE_FAULT_VIOLATION syscall.Errno = 640 + ERROR_SYSTEM_SHUTDOWN syscall.Errno = 641 + ERROR_PORT_NOT_SET syscall.Errno = 642 + ERROR_DS_VERSION_CHECK_FAILURE syscall.Errno = 643 + ERROR_RANGE_NOT_FOUND syscall.Errno = 644 + ERROR_NOT_SAFE_MODE_DRIVER syscall.Errno = 646 + ERROR_FAILED_DRIVER_ENTRY syscall.Errno = 647 + ERROR_DEVICE_ENUMERATION_ERROR syscall.Errno = 648 + ERROR_MOUNT_POINT_NOT_RESOLVED syscall.Errno = 649 + ERROR_INVALID_DEVICE_OBJECT_PARAMETER syscall.Errno = 650 + ERROR_MCA_OCCURED syscall.Errno = 651 + ERROR_DRIVER_DATABASE_ERROR syscall.Errno = 652 + ERROR_SYSTEM_HIVE_TOO_LARGE syscall.Errno = 653 + ERROR_DRIVER_FAILED_PRIOR_UNLOAD syscall.Errno = 654 + ERROR_VOLSNAP_PREPARE_HIBERNATE syscall.Errno = 655 + ERROR_HIBERNATION_FAILURE syscall.Errno = 656 + ERROR_PWD_TOO_LONG syscall.Errno = 657 + ERROR_FILE_SYSTEM_LIMITATION syscall.Errno = 665 + ERROR_ASSERTION_FAILURE syscall.Errno = 668 + ERROR_ACPI_ERROR syscall.Errno = 669 + ERROR_WOW_ASSERTION syscall.Errno = 670 + ERROR_PNP_BAD_MPS_TABLE syscall.Errno = 671 + ERROR_PNP_TRANSLATION_FAILED syscall.Errno = 672 + ERROR_PNP_IRQ_TRANSLATION_FAILED syscall.Errno = 673 + ERROR_PNP_INVALID_ID syscall.Errno = 674 + ERROR_WAKE_SYSTEM_DEBUGGER syscall.Errno = 675 + ERROR_HANDLES_CLOSED syscall.Errno = 676 + ERROR_EXTRANEOUS_INFORMATION syscall.Errno = 677 + ERROR_RXACT_COMMIT_NECESSARY syscall.Errno = 678 + ERROR_MEDIA_CHECK syscall.Errno = 679 + ERROR_GUID_SUBSTITUTION_MADE syscall.Errno = 680 + ERROR_STOPPED_ON_SYMLINK syscall.Errno = 681 + ERROR_LONGJUMP syscall.Errno = 682 + ERROR_PLUGPLAY_QUERY_VETOED syscall.Errno = 683 + ERROR_UNWIND_CONSOLIDATE syscall.Errno = 684 + ERROR_REGISTRY_HIVE_RECOVERED syscall.Errno = 685 + ERROR_DLL_MIGHT_BE_INSECURE syscall.Errno = 686 + ERROR_DLL_MIGHT_BE_INCOMPATIBLE syscall.Errno = 687 + ERROR_DBG_EXCEPTION_NOT_HANDLED syscall.Errno = 688 + ERROR_DBG_REPLY_LATER syscall.Errno = 689 + ERROR_DBG_UNABLE_TO_PROVIDE_HANDLE syscall.Errno = 690 + ERROR_DBG_TERMINATE_THREAD syscall.Errno = 691 + ERROR_DBG_TERMINATE_PROCESS syscall.Errno = 692 + ERROR_DBG_CONTROL_C syscall.Errno = 693 + ERROR_DBG_PRINTEXCEPTION_C syscall.Errno = 694 + ERROR_DBG_RIPEXCEPTION syscall.Errno = 695 + ERROR_DBG_CONTROL_BREAK syscall.Errno = 696 + ERROR_DBG_COMMAND_EXCEPTION syscall.Errno = 697 + ERROR_OBJECT_NAME_EXISTS syscall.Errno = 698 + ERROR_THREAD_WAS_SUSPENDED syscall.Errno = 699 + ERROR_IMAGE_NOT_AT_BASE syscall.Errno = 700 + ERROR_RXACT_STATE_CREATED syscall.Errno = 701 + ERROR_SEGMENT_NOTIFICATION syscall.Errno = 702 + ERROR_BAD_CURRENT_DIRECTORY syscall.Errno = 703 + ERROR_FT_READ_RECOVERY_FROM_BACKUP syscall.Errno = 704 + ERROR_FT_WRITE_RECOVERY syscall.Errno = 705 + ERROR_IMAGE_MACHINE_TYPE_MISMATCH syscall.Errno = 706 + ERROR_RECEIVE_PARTIAL syscall.Errno = 707 + ERROR_RECEIVE_EXPEDITED syscall.Errno = 708 + ERROR_RECEIVE_PARTIAL_EXPEDITED syscall.Errno = 709 + ERROR_EVENT_DONE syscall.Errno = 710 + ERROR_EVENT_PENDING syscall.Errno = 711 + ERROR_CHECKING_FILE_SYSTEM syscall.Errno = 712 + ERROR_FATAL_APP_EXIT syscall.Errno = 713 + ERROR_PREDEFINED_HANDLE syscall.Errno = 714 + ERROR_WAS_UNLOCKED syscall.Errno = 715 + ERROR_SERVICE_NOTIFICATION syscall.Errno = 716 + ERROR_WAS_LOCKED syscall.Errno = 717 + ERROR_LOG_HARD_ERROR syscall.Errno = 718 + ERROR_ALREADY_WIN32 syscall.Errno = 719 + ERROR_IMAGE_MACHINE_TYPE_MISMATCH_EXE syscall.Errno = 720 + ERROR_NO_YIELD_PERFORMED syscall.Errno = 721 + ERROR_TIMER_RESUME_IGNORED syscall.Errno = 722 + ERROR_ARBITRATION_UNHANDLED syscall.Errno = 723 + ERROR_CARDBUS_NOT_SUPPORTED syscall.Errno = 724 + ERROR_MP_PROCESSOR_MISMATCH syscall.Errno = 725 + ERROR_HIBERNATED syscall.Errno = 726 + ERROR_RESUME_HIBERNATION syscall.Errno = 727 + ERROR_FIRMWARE_UPDATED syscall.Errno = 728 + ERROR_DRIVERS_LEAKING_LOCKED_PAGES syscall.Errno = 729 + ERROR_WAKE_SYSTEM syscall.Errno = 730 + ERROR_WAIT_1 syscall.Errno = 731 + ERROR_WAIT_2 syscall.Errno = 732 + ERROR_WAIT_3 syscall.Errno = 733 + ERROR_WAIT_63 syscall.Errno = 734 + ERROR_ABANDONED_WAIT_0 syscall.Errno = 735 + ERROR_ABANDONED_WAIT_63 syscall.Errno = 736 + ERROR_USER_APC syscall.Errno = 737 + ERROR_KERNEL_APC syscall.Errno = 738 + ERROR_ALERTED syscall.Errno = 739 + ERROR_ELEVATION_REQUIRED syscall.Errno = 740 + ERROR_REPARSE syscall.Errno = 741 + ERROR_OPLOCK_BREAK_IN_PROGRESS syscall.Errno = 742 + ERROR_VOLUME_MOUNTED syscall.Errno = 743 + ERROR_RXACT_COMMITTED syscall.Errno = 744 + ERROR_NOTIFY_CLEANUP syscall.Errno = 745 + ERROR_PRIMARY_TRANSPORT_CONNECT_FAILED syscall.Errno = 746 + ERROR_PAGE_FAULT_TRANSITION syscall.Errno = 747 + ERROR_PAGE_FAULT_DEMAND_ZERO syscall.Errno = 748 + ERROR_PAGE_FAULT_COPY_ON_WRITE syscall.Errno = 749 + ERROR_PAGE_FAULT_GUARD_PAGE syscall.Errno = 750 + ERROR_PAGE_FAULT_PAGING_FILE syscall.Errno = 751 + ERROR_CACHE_PAGE_LOCKED syscall.Errno = 752 + ERROR_CRASH_DUMP syscall.Errno = 753 + ERROR_BUFFER_ALL_ZEROS syscall.Errno = 754 + ERROR_REPARSE_OBJECT syscall.Errno = 755 + ERROR_RESOURCE_REQUIREMENTS_CHANGED syscall.Errno = 756 + ERROR_TRANSLATION_COMPLETE syscall.Errno = 757 + ERROR_NOTHING_TO_TERMINATE syscall.Errno = 758 + ERROR_PROCESS_NOT_IN_JOB syscall.Errno = 759 + ERROR_PROCESS_IN_JOB syscall.Errno = 760 + ERROR_VOLSNAP_HIBERNATE_READY syscall.Errno = 761 + ERROR_FSFILTER_OP_COMPLETED_SUCCESSFULLY syscall.Errno = 762 + ERROR_INTERRUPT_VECTOR_ALREADY_CONNECTED syscall.Errno = 763 + ERROR_INTERRUPT_STILL_CONNECTED syscall.Errno = 764 + ERROR_WAIT_FOR_OPLOCK syscall.Errno = 765 + ERROR_DBG_EXCEPTION_HANDLED syscall.Errno = 766 + ERROR_DBG_CONTINUE syscall.Errno = 767 + ERROR_CALLBACK_POP_STACK syscall.Errno = 768 + ERROR_COMPRESSION_DISABLED syscall.Errno = 769 + ERROR_CANTFETCHBACKWARDS syscall.Errno = 770 + ERROR_CANTSCROLLBACKWARDS syscall.Errno = 771 + ERROR_ROWSNOTRELEASED syscall.Errno = 772 + ERROR_BAD_ACCESSOR_FLAGS syscall.Errno = 773 + ERROR_ERRORS_ENCOUNTERED syscall.Errno = 774 + ERROR_NOT_CAPABLE syscall.Errno = 775 + ERROR_REQUEST_OUT_OF_SEQUENCE syscall.Errno = 776 + ERROR_VERSION_PARSE_ERROR syscall.Errno = 777 + ERROR_BADSTARTPOSITION syscall.Errno = 778 + ERROR_MEMORY_HARDWARE syscall.Errno = 779 + ERROR_DISK_REPAIR_DISABLED syscall.Errno = 780 + ERROR_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE syscall.Errno = 781 + ERROR_SYSTEM_POWERSTATE_TRANSITION syscall.Errno = 782 + ERROR_SYSTEM_POWERSTATE_COMPLEX_TRANSITION syscall.Errno = 783 + ERROR_MCA_EXCEPTION syscall.Errno = 784 + ERROR_ACCESS_AUDIT_BY_POLICY syscall.Errno = 785 + ERROR_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY syscall.Errno = 786 + ERROR_ABANDON_HIBERFILE syscall.Errno = 787 + ERROR_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED syscall.Errno = 788 + ERROR_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR syscall.Errno = 789 + ERROR_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR syscall.Errno = 790 + ERROR_BAD_MCFG_TABLE syscall.Errno = 791 + ERROR_DISK_REPAIR_REDIRECTED syscall.Errno = 792 + ERROR_DISK_REPAIR_UNSUCCESSFUL syscall.Errno = 793 + ERROR_CORRUPT_LOG_OVERFULL syscall.Errno = 794 + ERROR_CORRUPT_LOG_CORRUPTED syscall.Errno = 795 + ERROR_CORRUPT_LOG_UNAVAILABLE syscall.Errno = 796 + ERROR_CORRUPT_LOG_DELETED_FULL syscall.Errno = 797 + ERROR_CORRUPT_LOG_CLEARED syscall.Errno = 798 + ERROR_ORPHAN_NAME_EXHAUSTED syscall.Errno = 799 + ERROR_OPLOCK_SWITCHED_TO_NEW_HANDLE syscall.Errno = 800 + ERROR_CANNOT_GRANT_REQUESTED_OPLOCK syscall.Errno = 801 + ERROR_CANNOT_BREAK_OPLOCK syscall.Errno = 802 + ERROR_OPLOCK_HANDLE_CLOSED syscall.Errno = 803 + ERROR_NO_ACE_CONDITION syscall.Errno = 804 + ERROR_INVALID_ACE_CONDITION syscall.Errno = 805 + ERROR_FILE_HANDLE_REVOKED syscall.Errno = 806 + ERROR_IMAGE_AT_DIFFERENT_BASE syscall.Errno = 807 + ERROR_ENCRYPTED_IO_NOT_POSSIBLE syscall.Errno = 808 + ERROR_FILE_METADATA_OPTIMIZATION_IN_PROGRESS syscall.Errno = 809 + ERROR_QUOTA_ACTIVITY syscall.Errno = 810 + ERROR_HANDLE_REVOKED syscall.Errno = 811 + ERROR_CALLBACK_INVOKE_INLINE syscall.Errno = 812 + ERROR_CPU_SET_INVALID syscall.Errno = 813 + ERROR_ENCLAVE_NOT_TERMINATED syscall.Errno = 814 + ERROR_ENCLAVE_VIOLATION syscall.Errno = 815 + ERROR_EA_ACCESS_DENIED syscall.Errno = 994 + ERROR_OPERATION_ABORTED syscall.Errno = 995 + ERROR_IO_INCOMPLETE syscall.Errno = 996 + ERROR_IO_PENDING syscall.Errno = 997 + ERROR_NOACCESS syscall.Errno = 998 + ERROR_SWAPERROR syscall.Errno = 999 + ERROR_STACK_OVERFLOW syscall.Errno = 1001 + ERROR_INVALID_MESSAGE syscall.Errno = 1002 + ERROR_CAN_NOT_COMPLETE syscall.Errno = 1003 + ERROR_INVALID_FLAGS syscall.Errno = 1004 + ERROR_UNRECOGNIZED_VOLUME syscall.Errno = 1005 + ERROR_FILE_INVALID syscall.Errno = 1006 + ERROR_FULLSCREEN_MODE syscall.Errno = 1007 + ERROR_NO_TOKEN syscall.Errno = 1008 + ERROR_BADDB syscall.Errno = 1009 + ERROR_BADKEY syscall.Errno = 1010 + ERROR_CANTOPEN syscall.Errno = 1011 + ERROR_CANTREAD syscall.Errno = 1012 + ERROR_CANTWRITE syscall.Errno = 1013 + ERROR_REGISTRY_RECOVERED syscall.Errno = 1014 + ERROR_REGISTRY_CORRUPT syscall.Errno = 1015 + ERROR_REGISTRY_IO_FAILED syscall.Errno = 1016 + ERROR_NOT_REGISTRY_FILE syscall.Errno = 1017 + ERROR_KEY_DELETED syscall.Errno = 1018 + ERROR_NO_LOG_SPACE syscall.Errno = 1019 + ERROR_KEY_HAS_CHILDREN syscall.Errno = 1020 + ERROR_CHILD_MUST_BE_VOLATILE syscall.Errno = 1021 + ERROR_NOTIFY_ENUM_DIR syscall.Errno = 1022 + ERROR_DEPENDENT_SERVICES_RUNNING syscall.Errno = 1051 + ERROR_INVALID_SERVICE_CONTROL syscall.Errno = 1052 + ERROR_SERVICE_REQUEST_TIMEOUT syscall.Errno = 1053 + ERROR_SERVICE_NO_THREAD syscall.Errno = 1054 + ERROR_SERVICE_DATABASE_LOCKED syscall.Errno = 1055 + ERROR_SERVICE_ALREADY_RUNNING syscall.Errno = 1056 + ERROR_INVALID_SERVICE_ACCOUNT syscall.Errno = 1057 + ERROR_SERVICE_DISABLED syscall.Errno = 1058 + ERROR_CIRCULAR_DEPENDENCY syscall.Errno = 1059 + ERROR_SERVICE_DOES_NOT_EXIST syscall.Errno = 1060 + ERROR_SERVICE_CANNOT_ACCEPT_CTRL syscall.Errno = 1061 + ERROR_SERVICE_NOT_ACTIVE syscall.Errno = 1062 + ERROR_FAILED_SERVICE_CONTROLLER_CONNECT syscall.Errno = 1063 + ERROR_EXCEPTION_IN_SERVICE syscall.Errno = 1064 + ERROR_DATABASE_DOES_NOT_EXIST syscall.Errno = 1065 + ERROR_SERVICE_SPECIFIC_ERROR syscall.Errno = 1066 + ERROR_PROCESS_ABORTED syscall.Errno = 1067 + ERROR_SERVICE_DEPENDENCY_FAIL syscall.Errno = 1068 + ERROR_SERVICE_LOGON_FAILED syscall.Errno = 1069 + ERROR_SERVICE_START_HANG syscall.Errno = 1070 + ERROR_INVALID_SERVICE_LOCK syscall.Errno = 1071 + ERROR_SERVICE_MARKED_FOR_DELETE syscall.Errno = 1072 + ERROR_SERVICE_EXISTS syscall.Errno = 1073 + ERROR_ALREADY_RUNNING_LKG syscall.Errno = 1074 + ERROR_SERVICE_DEPENDENCY_DELETED syscall.Errno = 1075 + ERROR_BOOT_ALREADY_ACCEPTED syscall.Errno = 1076 + ERROR_SERVICE_NEVER_STARTED syscall.Errno = 1077 + ERROR_DUPLICATE_SERVICE_NAME syscall.Errno = 1078 + ERROR_DIFFERENT_SERVICE_ACCOUNT syscall.Errno = 1079 + ERROR_CANNOT_DETECT_DRIVER_FAILURE syscall.Errno = 1080 + ERROR_CANNOT_DETECT_PROCESS_ABORT syscall.Errno = 1081 + ERROR_NO_RECOVERY_PROGRAM syscall.Errno = 1082 + ERROR_SERVICE_NOT_IN_EXE syscall.Errno = 1083 + ERROR_NOT_SAFEBOOT_SERVICE syscall.Errno = 1084 + ERROR_END_OF_MEDIA syscall.Errno = 1100 + ERROR_FILEMARK_DETECTED syscall.Errno = 1101 + ERROR_BEGINNING_OF_MEDIA syscall.Errno = 1102 + ERROR_SETMARK_DETECTED syscall.Errno = 1103 + ERROR_NO_DATA_DETECTED syscall.Errno = 1104 + ERROR_PARTITION_FAILURE syscall.Errno = 1105 + ERROR_INVALID_BLOCK_LENGTH syscall.Errno = 1106 + ERROR_DEVICE_NOT_PARTITIONED syscall.Errno = 1107 + ERROR_UNABLE_TO_LOCK_MEDIA syscall.Errno = 1108 + ERROR_UNABLE_TO_UNLOAD_MEDIA syscall.Errno = 1109 + ERROR_MEDIA_CHANGED syscall.Errno = 1110 + ERROR_BUS_RESET syscall.Errno = 1111 + ERROR_NO_MEDIA_IN_DRIVE syscall.Errno = 1112 + ERROR_NO_UNICODE_TRANSLATION syscall.Errno = 1113 + ERROR_DLL_INIT_FAILED syscall.Errno = 1114 + ERROR_SHUTDOWN_IN_PROGRESS syscall.Errno = 1115 + ERROR_NO_SHUTDOWN_IN_PROGRESS syscall.Errno = 1116 + ERROR_IO_DEVICE syscall.Errno = 1117 + ERROR_SERIAL_NO_DEVICE syscall.Errno = 1118 + ERROR_IRQ_BUSY syscall.Errno = 1119 + ERROR_MORE_WRITES syscall.Errno = 1120 + ERROR_COUNTER_TIMEOUT syscall.Errno = 1121 + ERROR_FLOPPY_ID_MARK_NOT_FOUND syscall.Errno = 1122 + ERROR_FLOPPY_WRONG_CYLINDER syscall.Errno = 1123 + ERROR_FLOPPY_UNKNOWN_ERROR syscall.Errno = 1124 + ERROR_FLOPPY_BAD_REGISTERS syscall.Errno = 1125 + ERROR_DISK_RECALIBRATE_FAILED syscall.Errno = 1126 + ERROR_DISK_OPERATION_FAILED syscall.Errno = 1127 + ERROR_DISK_RESET_FAILED syscall.Errno = 1128 + ERROR_EOM_OVERFLOW syscall.Errno = 1129 + ERROR_NOT_ENOUGH_SERVER_MEMORY syscall.Errno = 1130 + ERROR_POSSIBLE_DEADLOCK syscall.Errno = 1131 + ERROR_MAPPED_ALIGNMENT syscall.Errno = 1132 + ERROR_SET_POWER_STATE_VETOED syscall.Errno = 1140 + ERROR_SET_POWER_STATE_FAILED syscall.Errno = 1141 + ERROR_TOO_MANY_LINKS syscall.Errno = 1142 + ERROR_OLD_WIN_VERSION syscall.Errno = 1150 + ERROR_APP_WRONG_OS syscall.Errno = 1151 + ERROR_SINGLE_INSTANCE_APP syscall.Errno = 1152 + ERROR_RMODE_APP syscall.Errno = 1153 + ERROR_INVALID_DLL syscall.Errno = 1154 + ERROR_NO_ASSOCIATION syscall.Errno = 1155 + ERROR_DDE_FAIL syscall.Errno = 1156 + ERROR_DLL_NOT_FOUND syscall.Errno = 1157 + ERROR_NO_MORE_USER_HANDLES syscall.Errno = 1158 + ERROR_MESSAGE_SYNC_ONLY syscall.Errno = 1159 + ERROR_SOURCE_ELEMENT_EMPTY syscall.Errno = 1160 + ERROR_DESTINATION_ELEMENT_FULL syscall.Errno = 1161 + ERROR_ILLEGAL_ELEMENT_ADDRESS syscall.Errno = 1162 + ERROR_MAGAZINE_NOT_PRESENT syscall.Errno = 1163 + ERROR_DEVICE_REINITIALIZATION_NEEDED syscall.Errno = 1164 + ERROR_DEVICE_REQUIRES_CLEANING syscall.Errno = 1165 + ERROR_DEVICE_DOOR_OPEN syscall.Errno = 1166 + ERROR_DEVICE_NOT_CONNECTED syscall.Errno = 1167 + ERROR_NOT_FOUND syscall.Errno = 1168 + ERROR_NO_MATCH syscall.Errno = 1169 + ERROR_SET_NOT_FOUND syscall.Errno = 1170 + ERROR_POINT_NOT_FOUND syscall.Errno = 1171 + ERROR_NO_TRACKING_SERVICE syscall.Errno = 1172 + ERROR_NO_VOLUME_ID syscall.Errno = 1173 + ERROR_UNABLE_TO_REMOVE_REPLACED syscall.Errno = 1175 + ERROR_UNABLE_TO_MOVE_REPLACEMENT syscall.Errno = 1176 + ERROR_UNABLE_TO_MOVE_REPLACEMENT_2 syscall.Errno = 1177 + ERROR_JOURNAL_DELETE_IN_PROGRESS syscall.Errno = 1178 + ERROR_JOURNAL_NOT_ACTIVE syscall.Errno = 1179 + ERROR_POTENTIAL_FILE_FOUND syscall.Errno = 1180 + ERROR_JOURNAL_ENTRY_DELETED syscall.Errno = 1181 + ERROR_SHUTDOWN_IS_SCHEDULED syscall.Errno = 1190 + ERROR_SHUTDOWN_USERS_LOGGED_ON syscall.Errno = 1191 + ERROR_BAD_DEVICE syscall.Errno = 1200 + ERROR_CONNECTION_UNAVAIL syscall.Errno = 1201 + ERROR_DEVICE_ALREADY_REMEMBERED syscall.Errno = 1202 + ERROR_NO_NET_OR_BAD_PATH syscall.Errno = 1203 + ERROR_BAD_PROVIDER syscall.Errno = 1204 + ERROR_CANNOT_OPEN_PROFILE syscall.Errno = 1205 + ERROR_BAD_PROFILE syscall.Errno = 1206 + ERROR_NOT_CONTAINER syscall.Errno = 1207 + ERROR_EXTENDED_ERROR syscall.Errno = 1208 + ERROR_INVALID_GROUPNAME syscall.Errno = 1209 + ERROR_INVALID_COMPUTERNAME syscall.Errno = 1210 + ERROR_INVALID_EVENTNAME syscall.Errno = 1211 + ERROR_INVALID_DOMAINNAME syscall.Errno = 1212 + ERROR_INVALID_SERVICENAME syscall.Errno = 1213 + ERROR_INVALID_NETNAME syscall.Errno = 1214 + ERROR_INVALID_SHARENAME syscall.Errno = 1215 + ERROR_INVALID_PASSWORDNAME syscall.Errno = 1216 + ERROR_INVALID_MESSAGENAME syscall.Errno = 1217 + ERROR_INVALID_MESSAGEDEST syscall.Errno = 1218 + ERROR_SESSION_CREDENTIAL_CONFLICT syscall.Errno = 1219 + ERROR_REMOTE_SESSION_LIMIT_EXCEEDED syscall.Errno = 1220 + ERROR_DUP_DOMAINNAME syscall.Errno = 1221 + ERROR_NO_NETWORK syscall.Errno = 1222 + ERROR_CANCELLED syscall.Errno = 1223 + ERROR_USER_MAPPED_FILE syscall.Errno = 1224 + ERROR_CONNECTION_REFUSED syscall.Errno = 1225 + ERROR_GRACEFUL_DISCONNECT syscall.Errno = 1226 + ERROR_ADDRESS_ALREADY_ASSOCIATED syscall.Errno = 1227 + ERROR_ADDRESS_NOT_ASSOCIATED syscall.Errno = 1228 + ERROR_CONNECTION_INVALID syscall.Errno = 1229 + ERROR_CONNECTION_ACTIVE syscall.Errno = 1230 + ERROR_NETWORK_UNREACHABLE syscall.Errno = 1231 + ERROR_HOST_UNREACHABLE syscall.Errno = 1232 + ERROR_PROTOCOL_UNREACHABLE syscall.Errno = 1233 + ERROR_PORT_UNREACHABLE syscall.Errno = 1234 + ERROR_REQUEST_ABORTED syscall.Errno = 1235 + ERROR_CONNECTION_ABORTED syscall.Errno = 1236 + ERROR_RETRY syscall.Errno = 1237 + ERROR_CONNECTION_COUNT_LIMIT syscall.Errno = 1238 + ERROR_LOGIN_TIME_RESTRICTION syscall.Errno = 1239 + ERROR_LOGIN_WKSTA_RESTRICTION syscall.Errno = 1240 + ERROR_INCORRECT_ADDRESS syscall.Errno = 1241 + ERROR_ALREADY_REGISTERED syscall.Errno = 1242 + ERROR_SERVICE_NOT_FOUND syscall.Errno = 1243 + ERROR_NOT_AUTHENTICATED syscall.Errno = 1244 + ERROR_NOT_LOGGED_ON syscall.Errno = 1245 + ERROR_CONTINUE syscall.Errno = 1246 + ERROR_ALREADY_INITIALIZED syscall.Errno = 1247 + ERROR_NO_MORE_DEVICES syscall.Errno = 1248 + ERROR_NO_SUCH_SITE syscall.Errno = 1249 + ERROR_DOMAIN_CONTROLLER_EXISTS syscall.Errno = 1250 + ERROR_ONLY_IF_CONNECTED syscall.Errno = 1251 + ERROR_OVERRIDE_NOCHANGES syscall.Errno = 1252 + ERROR_BAD_USER_PROFILE syscall.Errno = 1253 + ERROR_NOT_SUPPORTED_ON_SBS syscall.Errno = 1254 + ERROR_SERVER_SHUTDOWN_IN_PROGRESS syscall.Errno = 1255 + ERROR_HOST_DOWN syscall.Errno = 1256 + ERROR_NON_ACCOUNT_SID syscall.Errno = 1257 + ERROR_NON_DOMAIN_SID syscall.Errno = 1258 + ERROR_APPHELP_BLOCK syscall.Errno = 1259 + ERROR_ACCESS_DISABLED_BY_POLICY syscall.Errno = 1260 + ERROR_REG_NAT_CONSUMPTION syscall.Errno = 1261 + ERROR_CSCSHARE_OFFLINE syscall.Errno = 1262 + ERROR_PKINIT_FAILURE syscall.Errno = 1263 + ERROR_SMARTCARD_SUBSYSTEM_FAILURE syscall.Errno = 1264 + ERROR_DOWNGRADE_DETECTED syscall.Errno = 1265 + ERROR_MACHINE_LOCKED syscall.Errno = 1271 + ERROR_SMB_GUEST_LOGON_BLOCKED syscall.Errno = 1272 + ERROR_CALLBACK_SUPPLIED_INVALID_DATA syscall.Errno = 1273 + ERROR_SYNC_FOREGROUND_REFRESH_REQUIRED syscall.Errno = 1274 + ERROR_DRIVER_BLOCKED syscall.Errno = 1275 + ERROR_INVALID_IMPORT_OF_NON_DLL syscall.Errno = 1276 + ERROR_ACCESS_DISABLED_WEBBLADE syscall.Errno = 1277 + ERROR_ACCESS_DISABLED_WEBBLADE_TAMPER syscall.Errno = 1278 + ERROR_RECOVERY_FAILURE syscall.Errno = 1279 + ERROR_ALREADY_FIBER syscall.Errno = 1280 + ERROR_ALREADY_THREAD syscall.Errno = 1281 + ERROR_STACK_BUFFER_OVERRUN syscall.Errno = 1282 + ERROR_PARAMETER_QUOTA_EXCEEDED syscall.Errno = 1283 + ERROR_DEBUGGER_INACTIVE syscall.Errno = 1284 + ERROR_DELAY_LOAD_FAILED syscall.Errno = 1285 + ERROR_VDM_DISALLOWED syscall.Errno = 1286 + ERROR_UNIDENTIFIED_ERROR syscall.Errno = 1287 + ERROR_INVALID_CRUNTIME_PARAMETER syscall.Errno = 1288 + ERROR_BEYOND_VDL syscall.Errno = 1289 + ERROR_INCOMPATIBLE_SERVICE_SID_TYPE syscall.Errno = 1290 + ERROR_DRIVER_PROCESS_TERMINATED syscall.Errno = 1291 + ERROR_IMPLEMENTATION_LIMIT syscall.Errno = 1292 + ERROR_PROCESS_IS_PROTECTED syscall.Errno = 1293 + ERROR_SERVICE_NOTIFY_CLIENT_LAGGING syscall.Errno = 1294 + ERROR_DISK_QUOTA_EXCEEDED syscall.Errno = 1295 + ERROR_CONTENT_BLOCKED syscall.Errno = 1296 + ERROR_INCOMPATIBLE_SERVICE_PRIVILEGE syscall.Errno = 1297 + ERROR_APP_HANG syscall.Errno = 1298 + ERROR_INVALID_LABEL syscall.Errno = 1299 + ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 + ERROR_SOME_NOT_MAPPED syscall.Errno = 1301 + ERROR_NO_QUOTAS_FOR_ACCOUNT syscall.Errno = 1302 + ERROR_LOCAL_USER_SESSION_KEY syscall.Errno = 1303 + ERROR_NULL_LM_PASSWORD syscall.Errno = 1304 + ERROR_UNKNOWN_REVISION syscall.Errno = 1305 + ERROR_REVISION_MISMATCH syscall.Errno = 1306 + ERROR_INVALID_OWNER syscall.Errno = 1307 + ERROR_INVALID_PRIMARY_GROUP syscall.Errno = 1308 + ERROR_NO_IMPERSONATION_TOKEN syscall.Errno = 1309 + ERROR_CANT_DISABLE_MANDATORY syscall.Errno = 1310 + ERROR_NO_LOGON_SERVERS syscall.Errno = 1311 + ERROR_NO_SUCH_LOGON_SESSION syscall.Errno = 1312 + ERROR_NO_SUCH_PRIVILEGE syscall.Errno = 1313 + ERROR_PRIVILEGE_NOT_HELD syscall.Errno = 1314 + ERROR_INVALID_ACCOUNT_NAME syscall.Errno = 1315 + ERROR_USER_EXISTS syscall.Errno = 1316 + ERROR_NO_SUCH_USER syscall.Errno = 1317 + ERROR_GROUP_EXISTS syscall.Errno = 1318 + ERROR_NO_SUCH_GROUP syscall.Errno = 1319 + ERROR_MEMBER_IN_GROUP syscall.Errno = 1320 + ERROR_MEMBER_NOT_IN_GROUP syscall.Errno = 1321 + ERROR_LAST_ADMIN syscall.Errno = 1322 + ERROR_WRONG_PASSWORD syscall.Errno = 1323 + ERROR_ILL_FORMED_PASSWORD syscall.Errno = 1324 + ERROR_PASSWORD_RESTRICTION syscall.Errno = 1325 + ERROR_LOGON_FAILURE syscall.Errno = 1326 + ERROR_ACCOUNT_RESTRICTION syscall.Errno = 1327 + ERROR_INVALID_LOGON_HOURS syscall.Errno = 1328 + ERROR_INVALID_WORKSTATION syscall.Errno = 1329 + ERROR_PASSWORD_EXPIRED syscall.Errno = 1330 + ERROR_ACCOUNT_DISABLED syscall.Errno = 1331 + ERROR_NONE_MAPPED syscall.Errno = 1332 + ERROR_TOO_MANY_LUIDS_REQUESTED syscall.Errno = 1333 + ERROR_LUIDS_EXHAUSTED syscall.Errno = 1334 + ERROR_INVALID_SUB_AUTHORITY syscall.Errno = 1335 + ERROR_INVALID_ACL syscall.Errno = 1336 + ERROR_INVALID_SID syscall.Errno = 1337 + ERROR_INVALID_SECURITY_DESCR syscall.Errno = 1338 + ERROR_BAD_INHERITANCE_ACL syscall.Errno = 1340 + ERROR_SERVER_DISABLED syscall.Errno = 1341 + ERROR_SERVER_NOT_DISABLED syscall.Errno = 1342 + ERROR_INVALID_ID_AUTHORITY syscall.Errno = 1343 + ERROR_ALLOTTED_SPACE_EXCEEDED syscall.Errno = 1344 + ERROR_INVALID_GROUP_ATTRIBUTES syscall.Errno = 1345 + ERROR_BAD_IMPERSONATION_LEVEL syscall.Errno = 1346 + ERROR_CANT_OPEN_ANONYMOUS syscall.Errno = 1347 + ERROR_BAD_VALIDATION_CLASS syscall.Errno = 1348 + ERROR_BAD_TOKEN_TYPE syscall.Errno = 1349 + ERROR_NO_SECURITY_ON_OBJECT syscall.Errno = 1350 + ERROR_CANT_ACCESS_DOMAIN_INFO syscall.Errno = 1351 + ERROR_INVALID_SERVER_STATE syscall.Errno = 1352 + ERROR_INVALID_DOMAIN_STATE syscall.Errno = 1353 + ERROR_INVALID_DOMAIN_ROLE syscall.Errno = 1354 + ERROR_NO_SUCH_DOMAIN syscall.Errno = 1355 + ERROR_DOMAIN_EXISTS syscall.Errno = 1356 + ERROR_DOMAIN_LIMIT_EXCEEDED syscall.Errno = 1357 + ERROR_INTERNAL_DB_CORRUPTION syscall.Errno = 1358 + ERROR_INTERNAL_ERROR syscall.Errno = 1359 + ERROR_GENERIC_NOT_MAPPED syscall.Errno = 1360 + ERROR_BAD_DESCRIPTOR_FORMAT syscall.Errno = 1361 + ERROR_NOT_LOGON_PROCESS syscall.Errno = 1362 + ERROR_LOGON_SESSION_EXISTS syscall.Errno = 1363 + ERROR_NO_SUCH_PACKAGE syscall.Errno = 1364 + ERROR_BAD_LOGON_SESSION_STATE syscall.Errno = 1365 + ERROR_LOGON_SESSION_COLLISION syscall.Errno = 1366 + ERROR_INVALID_LOGON_TYPE syscall.Errno = 1367 + ERROR_CANNOT_IMPERSONATE syscall.Errno = 1368 + ERROR_RXACT_INVALID_STATE syscall.Errno = 1369 + ERROR_RXACT_COMMIT_FAILURE syscall.Errno = 1370 + ERROR_SPECIAL_ACCOUNT syscall.Errno = 1371 + ERROR_SPECIAL_GROUP syscall.Errno = 1372 + ERROR_SPECIAL_USER syscall.Errno = 1373 + ERROR_MEMBERS_PRIMARY_GROUP syscall.Errno = 1374 + ERROR_TOKEN_ALREADY_IN_USE syscall.Errno = 1375 + ERROR_NO_SUCH_ALIAS syscall.Errno = 1376 + ERROR_MEMBER_NOT_IN_ALIAS syscall.Errno = 1377 + ERROR_MEMBER_IN_ALIAS syscall.Errno = 1378 + ERROR_ALIAS_EXISTS syscall.Errno = 1379 + ERROR_LOGON_NOT_GRANTED syscall.Errno = 1380 + ERROR_TOO_MANY_SECRETS syscall.Errno = 1381 + ERROR_SECRET_TOO_LONG syscall.Errno = 1382 + ERROR_INTERNAL_DB_ERROR syscall.Errno = 1383 + ERROR_TOO_MANY_CONTEXT_IDS syscall.Errno = 1384 + ERROR_LOGON_TYPE_NOT_GRANTED syscall.Errno = 1385 + ERROR_NT_CROSS_ENCRYPTION_REQUIRED syscall.Errno = 1386 + ERROR_NO_SUCH_MEMBER syscall.Errno = 1387 + ERROR_INVALID_MEMBER syscall.Errno = 1388 + ERROR_TOO_MANY_SIDS syscall.Errno = 1389 + ERROR_LM_CROSS_ENCRYPTION_REQUIRED syscall.Errno = 1390 + ERROR_NO_INHERITANCE syscall.Errno = 1391 + ERROR_FILE_CORRUPT syscall.Errno = 1392 + ERROR_DISK_CORRUPT syscall.Errno = 1393 + ERROR_NO_USER_SESSION_KEY syscall.Errno = 1394 + ERROR_LICENSE_QUOTA_EXCEEDED syscall.Errno = 1395 + ERROR_WRONG_TARGET_NAME syscall.Errno = 1396 + ERROR_MUTUAL_AUTH_FAILED syscall.Errno = 1397 + ERROR_TIME_SKEW syscall.Errno = 1398 + ERROR_CURRENT_DOMAIN_NOT_ALLOWED syscall.Errno = 1399 + ERROR_INVALID_WINDOW_HANDLE syscall.Errno = 1400 + ERROR_INVALID_MENU_HANDLE syscall.Errno = 1401 + ERROR_INVALID_CURSOR_HANDLE syscall.Errno = 1402 + ERROR_INVALID_ACCEL_HANDLE syscall.Errno = 1403 + ERROR_INVALID_HOOK_HANDLE syscall.Errno = 1404 + ERROR_INVALID_DWP_HANDLE syscall.Errno = 1405 + ERROR_TLW_WITH_WSCHILD syscall.Errno = 1406 + ERROR_CANNOT_FIND_WND_CLASS syscall.Errno = 1407 + ERROR_WINDOW_OF_OTHER_THREAD syscall.Errno = 1408 + ERROR_HOTKEY_ALREADY_REGISTERED syscall.Errno = 1409 + ERROR_CLASS_ALREADY_EXISTS syscall.Errno = 1410 + ERROR_CLASS_DOES_NOT_EXIST syscall.Errno = 1411 + ERROR_CLASS_HAS_WINDOWS syscall.Errno = 1412 + ERROR_INVALID_INDEX syscall.Errno = 1413 + ERROR_INVALID_ICON_HANDLE syscall.Errno = 1414 + ERROR_PRIVATE_DIALOG_INDEX syscall.Errno = 1415 + ERROR_LISTBOX_ID_NOT_FOUND syscall.Errno = 1416 + ERROR_NO_WILDCARD_CHARACTERS syscall.Errno = 1417 + ERROR_CLIPBOARD_NOT_OPEN syscall.Errno = 1418 + ERROR_HOTKEY_NOT_REGISTERED syscall.Errno = 1419 + ERROR_WINDOW_NOT_DIALOG syscall.Errno = 1420 + ERROR_CONTROL_ID_NOT_FOUND syscall.Errno = 1421 + ERROR_INVALID_COMBOBOX_MESSAGE syscall.Errno = 1422 + ERROR_WINDOW_NOT_COMBOBOX syscall.Errno = 1423 + ERROR_INVALID_EDIT_HEIGHT syscall.Errno = 1424 + ERROR_DC_NOT_FOUND syscall.Errno = 1425 + ERROR_INVALID_HOOK_FILTER syscall.Errno = 1426 + ERROR_INVALID_FILTER_PROC syscall.Errno = 1427 + ERROR_HOOK_NEEDS_HMOD syscall.Errno = 1428 + ERROR_GLOBAL_ONLY_HOOK syscall.Errno = 1429 + ERROR_JOURNAL_HOOK_SET syscall.Errno = 1430 + ERROR_HOOK_NOT_INSTALLED syscall.Errno = 1431 + ERROR_INVALID_LB_MESSAGE syscall.Errno = 1432 + ERROR_SETCOUNT_ON_BAD_LB syscall.Errno = 1433 + ERROR_LB_WITHOUT_TABSTOPS syscall.Errno = 1434 + ERROR_DESTROY_OBJECT_OF_OTHER_THREAD syscall.Errno = 1435 + ERROR_CHILD_WINDOW_MENU syscall.Errno = 1436 + ERROR_NO_SYSTEM_MENU syscall.Errno = 1437 + ERROR_INVALID_MSGBOX_STYLE syscall.Errno = 1438 + ERROR_INVALID_SPI_VALUE syscall.Errno = 1439 + ERROR_SCREEN_ALREADY_LOCKED syscall.Errno = 1440 + ERROR_HWNDS_HAVE_DIFF_PARENT syscall.Errno = 1441 + ERROR_NOT_CHILD_WINDOW syscall.Errno = 1442 + ERROR_INVALID_GW_COMMAND syscall.Errno = 1443 + ERROR_INVALID_THREAD_ID syscall.Errno = 1444 + ERROR_NON_MDICHILD_WINDOW syscall.Errno = 1445 + ERROR_POPUP_ALREADY_ACTIVE syscall.Errno = 1446 + ERROR_NO_SCROLLBARS syscall.Errno = 1447 + ERROR_INVALID_SCROLLBAR_RANGE syscall.Errno = 1448 + ERROR_INVALID_SHOWWIN_COMMAND syscall.Errno = 1449 + ERROR_NO_SYSTEM_RESOURCES syscall.Errno = 1450 + ERROR_NONPAGED_SYSTEM_RESOURCES syscall.Errno = 1451 + ERROR_PAGED_SYSTEM_RESOURCES syscall.Errno = 1452 + ERROR_WORKING_SET_QUOTA syscall.Errno = 1453 + ERROR_PAGEFILE_QUOTA syscall.Errno = 1454 + ERROR_COMMITMENT_LIMIT syscall.Errno = 1455 + ERROR_MENU_ITEM_NOT_FOUND syscall.Errno = 1456 + ERROR_INVALID_KEYBOARD_HANDLE syscall.Errno = 1457 + ERROR_HOOK_TYPE_NOT_ALLOWED syscall.Errno = 1458 + ERROR_REQUIRES_INTERACTIVE_WINDOWSTATION syscall.Errno = 1459 + ERROR_TIMEOUT syscall.Errno = 1460 + ERROR_INVALID_MONITOR_HANDLE syscall.Errno = 1461 + ERROR_INCORRECT_SIZE syscall.Errno = 1462 + ERROR_SYMLINK_CLASS_DISABLED syscall.Errno = 1463 + ERROR_SYMLINK_NOT_SUPPORTED syscall.Errno = 1464 + ERROR_XML_PARSE_ERROR syscall.Errno = 1465 + ERROR_XMLDSIG_ERROR syscall.Errno = 1466 + ERROR_RESTART_APPLICATION syscall.Errno = 1467 + ERROR_WRONG_COMPARTMENT syscall.Errno = 1468 + ERROR_AUTHIP_FAILURE syscall.Errno = 1469 + ERROR_NO_NVRAM_RESOURCES syscall.Errno = 1470 + ERROR_NOT_GUI_PROCESS syscall.Errno = 1471 + ERROR_EVENTLOG_FILE_CORRUPT syscall.Errno = 1500 + ERROR_EVENTLOG_CANT_START syscall.Errno = 1501 + ERROR_LOG_FILE_FULL syscall.Errno = 1502 + ERROR_EVENTLOG_FILE_CHANGED syscall.Errno = 1503 + ERROR_CONTAINER_ASSIGNED syscall.Errno = 1504 + ERROR_JOB_NO_CONTAINER syscall.Errno = 1505 + ERROR_INVALID_TASK_NAME syscall.Errno = 1550 + ERROR_INVALID_TASK_INDEX syscall.Errno = 1551 + ERROR_THREAD_ALREADY_IN_TASK syscall.Errno = 1552 + ERROR_INSTALL_SERVICE_FAILURE syscall.Errno = 1601 + ERROR_INSTALL_USEREXIT syscall.Errno = 1602 + ERROR_INSTALL_FAILURE syscall.Errno = 1603 + ERROR_INSTALL_SUSPEND syscall.Errno = 1604 + ERROR_UNKNOWN_PRODUCT syscall.Errno = 1605 + ERROR_UNKNOWN_FEATURE syscall.Errno = 1606 + ERROR_UNKNOWN_COMPONENT syscall.Errno = 1607 + ERROR_UNKNOWN_PROPERTY syscall.Errno = 1608 + ERROR_INVALID_HANDLE_STATE syscall.Errno = 1609 + ERROR_BAD_CONFIGURATION syscall.Errno = 1610 + ERROR_INDEX_ABSENT syscall.Errno = 1611 + ERROR_INSTALL_SOURCE_ABSENT syscall.Errno = 1612 + ERROR_INSTALL_PACKAGE_VERSION syscall.Errno = 1613 + ERROR_PRODUCT_UNINSTALLED syscall.Errno = 1614 + ERROR_BAD_QUERY_SYNTAX syscall.Errno = 1615 + ERROR_INVALID_FIELD syscall.Errno = 1616 + ERROR_DEVICE_REMOVED syscall.Errno = 1617 + ERROR_INSTALL_ALREADY_RUNNING syscall.Errno = 1618 + ERROR_INSTALL_PACKAGE_OPEN_FAILED syscall.Errno = 1619 + ERROR_INSTALL_PACKAGE_INVALID syscall.Errno = 1620 + ERROR_INSTALL_UI_FAILURE syscall.Errno = 1621 + ERROR_INSTALL_LOG_FAILURE syscall.Errno = 1622 + ERROR_INSTALL_LANGUAGE_UNSUPPORTED syscall.Errno = 1623 + ERROR_INSTALL_TRANSFORM_FAILURE syscall.Errno = 1624 + ERROR_INSTALL_PACKAGE_REJECTED syscall.Errno = 1625 + ERROR_FUNCTION_NOT_CALLED syscall.Errno = 1626 + ERROR_FUNCTION_FAILED syscall.Errno = 1627 + ERROR_INVALID_TABLE syscall.Errno = 1628 + ERROR_DATATYPE_MISMATCH syscall.Errno = 1629 + ERROR_UNSUPPORTED_TYPE syscall.Errno = 1630 + ERROR_CREATE_FAILED syscall.Errno = 1631 + ERROR_INSTALL_TEMP_UNWRITABLE syscall.Errno = 1632 + ERROR_INSTALL_PLATFORM_UNSUPPORTED syscall.Errno = 1633 + ERROR_INSTALL_NOTUSED syscall.Errno = 1634 + ERROR_PATCH_PACKAGE_OPEN_FAILED syscall.Errno = 1635 + ERROR_PATCH_PACKAGE_INVALID syscall.Errno = 1636 + ERROR_PATCH_PACKAGE_UNSUPPORTED syscall.Errno = 1637 + ERROR_PRODUCT_VERSION syscall.Errno = 1638 + ERROR_INVALID_COMMAND_LINE syscall.Errno = 1639 + ERROR_INSTALL_REMOTE_DISALLOWED syscall.Errno = 1640 + ERROR_SUCCESS_REBOOT_INITIATED syscall.Errno = 1641 + ERROR_PATCH_TARGET_NOT_FOUND syscall.Errno = 1642 + ERROR_PATCH_PACKAGE_REJECTED syscall.Errno = 1643 + ERROR_INSTALL_TRANSFORM_REJECTED syscall.Errno = 1644 + ERROR_INSTALL_REMOTE_PROHIBITED syscall.Errno = 1645 + ERROR_PATCH_REMOVAL_UNSUPPORTED syscall.Errno = 1646 + ERROR_UNKNOWN_PATCH syscall.Errno = 1647 + ERROR_PATCH_NO_SEQUENCE syscall.Errno = 1648 + ERROR_PATCH_REMOVAL_DISALLOWED syscall.Errno = 1649 + ERROR_INVALID_PATCH_XML syscall.Errno = 1650 + ERROR_PATCH_MANAGED_ADVERTISED_PRODUCT syscall.Errno = 1651 + ERROR_INSTALL_SERVICE_SAFEBOOT syscall.Errno = 1652 + ERROR_FAIL_FAST_EXCEPTION syscall.Errno = 1653 + ERROR_INSTALL_REJECTED syscall.Errno = 1654 + ERROR_DYNAMIC_CODE_BLOCKED syscall.Errno = 1655 + ERROR_NOT_SAME_OBJECT syscall.Errno = 1656 + ERROR_STRICT_CFG_VIOLATION syscall.Errno = 1657 + ERROR_SET_CONTEXT_DENIED syscall.Errno = 1660 + ERROR_CROSS_PARTITION_VIOLATION syscall.Errno = 1661 + RPC_S_INVALID_STRING_BINDING syscall.Errno = 1700 + RPC_S_WRONG_KIND_OF_BINDING syscall.Errno = 1701 + RPC_S_INVALID_BINDING syscall.Errno = 1702 + RPC_S_PROTSEQ_NOT_SUPPORTED syscall.Errno = 1703 + RPC_S_INVALID_RPC_PROTSEQ syscall.Errno = 1704 + RPC_S_INVALID_STRING_UUID syscall.Errno = 1705 + RPC_S_INVALID_ENDPOINT_FORMAT syscall.Errno = 1706 + RPC_S_INVALID_NET_ADDR syscall.Errno = 1707 + RPC_S_NO_ENDPOINT_FOUND syscall.Errno = 1708 + RPC_S_INVALID_TIMEOUT syscall.Errno = 1709 + RPC_S_OBJECT_NOT_FOUND syscall.Errno = 1710 + RPC_S_ALREADY_REGISTERED syscall.Errno = 1711 + RPC_S_TYPE_ALREADY_REGISTERED syscall.Errno = 1712 + RPC_S_ALREADY_LISTENING syscall.Errno = 1713 + RPC_S_NO_PROTSEQS_REGISTERED syscall.Errno = 1714 + RPC_S_NOT_LISTENING syscall.Errno = 1715 + RPC_S_UNKNOWN_MGR_TYPE syscall.Errno = 1716 + RPC_S_UNKNOWN_IF syscall.Errno = 1717 + RPC_S_NO_BINDINGS syscall.Errno = 1718 + RPC_S_NO_PROTSEQS syscall.Errno = 1719 + RPC_S_CANT_CREATE_ENDPOINT syscall.Errno = 1720 + RPC_S_OUT_OF_RESOURCES syscall.Errno = 1721 + RPC_S_SERVER_UNAVAILABLE syscall.Errno = 1722 + RPC_S_SERVER_TOO_BUSY syscall.Errno = 1723 + RPC_S_INVALID_NETWORK_OPTIONS syscall.Errno = 1724 + RPC_S_NO_CALL_ACTIVE syscall.Errno = 1725 + RPC_S_CALL_FAILED syscall.Errno = 1726 + RPC_S_CALL_FAILED_DNE syscall.Errno = 1727 + RPC_S_PROTOCOL_ERROR syscall.Errno = 1728 + RPC_S_PROXY_ACCESS_DENIED syscall.Errno = 1729 + RPC_S_UNSUPPORTED_TRANS_SYN syscall.Errno = 1730 + RPC_S_UNSUPPORTED_TYPE syscall.Errno = 1732 + RPC_S_INVALID_TAG syscall.Errno = 1733 + RPC_S_INVALID_BOUND syscall.Errno = 1734 + RPC_S_NO_ENTRY_NAME syscall.Errno = 1735 + RPC_S_INVALID_NAME_SYNTAX syscall.Errno = 1736 + RPC_S_UNSUPPORTED_NAME_SYNTAX syscall.Errno = 1737 + RPC_S_UUID_NO_ADDRESS syscall.Errno = 1739 + RPC_S_DUPLICATE_ENDPOINT syscall.Errno = 1740 + RPC_S_UNKNOWN_AUTHN_TYPE syscall.Errno = 1741 + RPC_S_MAX_CALLS_TOO_SMALL syscall.Errno = 1742 + RPC_S_STRING_TOO_LONG syscall.Errno = 1743 + RPC_S_PROTSEQ_NOT_FOUND syscall.Errno = 1744 + RPC_S_PROCNUM_OUT_OF_RANGE syscall.Errno = 1745 + RPC_S_BINDING_HAS_NO_AUTH syscall.Errno = 1746 + RPC_S_UNKNOWN_AUTHN_SERVICE syscall.Errno = 1747 + RPC_S_UNKNOWN_AUTHN_LEVEL syscall.Errno = 1748 + RPC_S_INVALID_AUTH_IDENTITY syscall.Errno = 1749 + RPC_S_UNKNOWN_AUTHZ_SERVICE syscall.Errno = 1750 + EPT_S_INVALID_ENTRY syscall.Errno = 1751 + EPT_S_CANT_PERFORM_OP syscall.Errno = 1752 + EPT_S_NOT_REGISTERED syscall.Errno = 1753 + RPC_S_NOTHING_TO_EXPORT syscall.Errno = 1754 + RPC_S_INCOMPLETE_NAME syscall.Errno = 1755 + RPC_S_INVALID_VERS_OPTION syscall.Errno = 1756 + RPC_S_NO_MORE_MEMBERS syscall.Errno = 1757 + RPC_S_NOT_ALL_OBJS_UNEXPORTED syscall.Errno = 1758 + RPC_S_INTERFACE_NOT_FOUND syscall.Errno = 1759 + RPC_S_ENTRY_ALREADY_EXISTS syscall.Errno = 1760 + RPC_S_ENTRY_NOT_FOUND syscall.Errno = 1761 + RPC_S_NAME_SERVICE_UNAVAILABLE syscall.Errno = 1762 + RPC_S_INVALID_NAF_ID syscall.Errno = 1763 + RPC_S_CANNOT_SUPPORT syscall.Errno = 1764 + RPC_S_NO_CONTEXT_AVAILABLE syscall.Errno = 1765 + RPC_S_INTERNAL_ERROR syscall.Errno = 1766 + RPC_S_ZERO_DIVIDE syscall.Errno = 1767 + RPC_S_ADDRESS_ERROR syscall.Errno = 1768 + RPC_S_FP_DIV_ZERO syscall.Errno = 1769 + RPC_S_FP_UNDERFLOW syscall.Errno = 1770 + RPC_S_FP_OVERFLOW syscall.Errno = 1771 + RPC_X_NO_MORE_ENTRIES syscall.Errno = 1772 + RPC_X_SS_CHAR_TRANS_OPEN_FAIL syscall.Errno = 1773 + RPC_X_SS_CHAR_TRANS_SHORT_FILE syscall.Errno = 1774 + RPC_X_SS_IN_NULL_CONTEXT syscall.Errno = 1775 + RPC_X_SS_CONTEXT_DAMAGED syscall.Errno = 1777 + RPC_X_SS_HANDLES_MISMATCH syscall.Errno = 1778 + RPC_X_SS_CANNOT_GET_CALL_HANDLE syscall.Errno = 1779 + RPC_X_NULL_REF_POINTER syscall.Errno = 1780 + RPC_X_ENUM_VALUE_OUT_OF_RANGE syscall.Errno = 1781 + RPC_X_BYTE_COUNT_TOO_SMALL syscall.Errno = 1782 + RPC_X_BAD_STUB_DATA syscall.Errno = 1783 + ERROR_INVALID_USER_BUFFER syscall.Errno = 1784 + ERROR_UNRECOGNIZED_MEDIA syscall.Errno = 1785 + ERROR_NO_TRUST_LSA_SECRET syscall.Errno = 1786 + ERROR_NO_TRUST_SAM_ACCOUNT syscall.Errno = 1787 + ERROR_TRUSTED_DOMAIN_FAILURE syscall.Errno = 1788 + ERROR_TRUSTED_RELATIONSHIP_FAILURE syscall.Errno = 1789 + ERROR_TRUST_FAILURE syscall.Errno = 1790 + RPC_S_CALL_IN_PROGRESS syscall.Errno = 1791 + ERROR_NETLOGON_NOT_STARTED syscall.Errno = 1792 + ERROR_ACCOUNT_EXPIRED syscall.Errno = 1793 + ERROR_REDIRECTOR_HAS_OPEN_HANDLES syscall.Errno = 1794 + ERROR_PRINTER_DRIVER_ALREADY_INSTALLED syscall.Errno = 1795 + ERROR_UNKNOWN_PORT syscall.Errno = 1796 + ERROR_UNKNOWN_PRINTER_DRIVER syscall.Errno = 1797 + ERROR_UNKNOWN_PRINTPROCESSOR syscall.Errno = 1798 + ERROR_INVALID_SEPARATOR_FILE syscall.Errno = 1799 + ERROR_INVALID_PRIORITY syscall.Errno = 1800 + ERROR_INVALID_PRINTER_NAME syscall.Errno = 1801 + ERROR_PRINTER_ALREADY_EXISTS syscall.Errno = 1802 + ERROR_INVALID_PRINTER_COMMAND syscall.Errno = 1803 + ERROR_INVALID_DATATYPE syscall.Errno = 1804 + ERROR_INVALID_ENVIRONMENT syscall.Errno = 1805 + RPC_S_NO_MORE_BINDINGS syscall.Errno = 1806 + ERROR_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT syscall.Errno = 1807 + ERROR_NOLOGON_WORKSTATION_TRUST_ACCOUNT syscall.Errno = 1808 + ERROR_NOLOGON_SERVER_TRUST_ACCOUNT syscall.Errno = 1809 + ERROR_DOMAIN_TRUST_INCONSISTENT syscall.Errno = 1810 + ERROR_SERVER_HAS_OPEN_HANDLES syscall.Errno = 1811 + ERROR_RESOURCE_DATA_NOT_FOUND syscall.Errno = 1812 + ERROR_RESOURCE_TYPE_NOT_FOUND syscall.Errno = 1813 + ERROR_RESOURCE_NAME_NOT_FOUND syscall.Errno = 1814 + ERROR_RESOURCE_LANG_NOT_FOUND syscall.Errno = 1815 + ERROR_NOT_ENOUGH_QUOTA syscall.Errno = 1816 + RPC_S_NO_INTERFACES syscall.Errno = 1817 + RPC_S_CALL_CANCELLED syscall.Errno = 1818 + RPC_S_BINDING_INCOMPLETE syscall.Errno = 1819 + RPC_S_COMM_FAILURE syscall.Errno = 1820 + RPC_S_UNSUPPORTED_AUTHN_LEVEL syscall.Errno = 1821 + RPC_S_NO_PRINC_NAME syscall.Errno = 1822 + RPC_S_NOT_RPC_ERROR syscall.Errno = 1823 + RPC_S_UUID_LOCAL_ONLY syscall.Errno = 1824 + RPC_S_SEC_PKG_ERROR syscall.Errno = 1825 + RPC_S_NOT_CANCELLED syscall.Errno = 1826 + RPC_X_INVALID_ES_ACTION syscall.Errno = 1827 + RPC_X_WRONG_ES_VERSION syscall.Errno = 1828 + RPC_X_WRONG_STUB_VERSION syscall.Errno = 1829 + RPC_X_INVALID_PIPE_OBJECT syscall.Errno = 1830 + RPC_X_WRONG_PIPE_ORDER syscall.Errno = 1831 + RPC_X_WRONG_PIPE_VERSION syscall.Errno = 1832 + RPC_S_COOKIE_AUTH_FAILED syscall.Errno = 1833 + RPC_S_DO_NOT_DISTURB syscall.Errno = 1834 + RPC_S_SYSTEM_HANDLE_COUNT_EXCEEDED syscall.Errno = 1835 + RPC_S_SYSTEM_HANDLE_TYPE_MISMATCH syscall.Errno = 1836 + RPC_S_GROUP_MEMBER_NOT_FOUND syscall.Errno = 1898 + EPT_S_CANT_CREATE syscall.Errno = 1899 + RPC_S_INVALID_OBJECT syscall.Errno = 1900 + ERROR_INVALID_TIME syscall.Errno = 1901 + ERROR_INVALID_FORM_NAME syscall.Errno = 1902 + ERROR_INVALID_FORM_SIZE syscall.Errno = 1903 + ERROR_ALREADY_WAITING syscall.Errno = 1904 + ERROR_PRINTER_DELETED syscall.Errno = 1905 + ERROR_INVALID_PRINTER_STATE syscall.Errno = 1906 + ERROR_PASSWORD_MUST_CHANGE syscall.Errno = 1907 + ERROR_DOMAIN_CONTROLLER_NOT_FOUND syscall.Errno = 1908 + ERROR_ACCOUNT_LOCKED_OUT syscall.Errno = 1909 + OR_INVALID_OXID syscall.Errno = 1910 + OR_INVALID_OID syscall.Errno = 1911 + OR_INVALID_SET syscall.Errno = 1912 + RPC_S_SEND_INCOMPLETE syscall.Errno = 1913 + RPC_S_INVALID_ASYNC_HANDLE syscall.Errno = 1914 + RPC_S_INVALID_ASYNC_CALL syscall.Errno = 1915 + RPC_X_PIPE_CLOSED syscall.Errno = 1916 + RPC_X_PIPE_DISCIPLINE_ERROR syscall.Errno = 1917 + RPC_X_PIPE_EMPTY syscall.Errno = 1918 + ERROR_NO_SITENAME syscall.Errno = 1919 + ERROR_CANT_ACCESS_FILE syscall.Errno = 1920 + ERROR_CANT_RESOLVE_FILENAME syscall.Errno = 1921 + RPC_S_ENTRY_TYPE_MISMATCH syscall.Errno = 1922 + RPC_S_NOT_ALL_OBJS_EXPORTED syscall.Errno = 1923 + RPC_S_INTERFACE_NOT_EXPORTED syscall.Errno = 1924 + RPC_S_PROFILE_NOT_ADDED syscall.Errno = 1925 + RPC_S_PRF_ELT_NOT_ADDED syscall.Errno = 1926 + RPC_S_PRF_ELT_NOT_REMOVED syscall.Errno = 1927 + RPC_S_GRP_ELT_NOT_ADDED syscall.Errno = 1928 + RPC_S_GRP_ELT_NOT_REMOVED syscall.Errno = 1929 + ERROR_KM_DRIVER_BLOCKED syscall.Errno = 1930 + ERROR_CONTEXT_EXPIRED syscall.Errno = 1931 + ERROR_PER_USER_TRUST_QUOTA_EXCEEDED syscall.Errno = 1932 + ERROR_ALL_USER_TRUST_QUOTA_EXCEEDED syscall.Errno = 1933 + ERROR_USER_DELETE_TRUST_QUOTA_EXCEEDED syscall.Errno = 1934 + ERROR_AUTHENTICATION_FIREWALL_FAILED syscall.Errno = 1935 + ERROR_REMOTE_PRINT_CONNECTIONS_BLOCKED syscall.Errno = 1936 + ERROR_NTLM_BLOCKED syscall.Errno = 1937 + ERROR_PASSWORD_CHANGE_REQUIRED syscall.Errno = 1938 + ERROR_LOST_MODE_LOGON_RESTRICTION syscall.Errno = 1939 + ERROR_INVALID_PIXEL_FORMAT syscall.Errno = 2000 + ERROR_BAD_DRIVER syscall.Errno = 2001 + ERROR_INVALID_WINDOW_STYLE syscall.Errno = 2002 + ERROR_METAFILE_NOT_SUPPORTED syscall.Errno = 2003 + ERROR_TRANSFORM_NOT_SUPPORTED syscall.Errno = 2004 + ERROR_CLIPPING_NOT_SUPPORTED syscall.Errno = 2005 + ERROR_INVALID_CMM syscall.Errno = 2010 + ERROR_INVALID_PROFILE syscall.Errno = 2011 + ERROR_TAG_NOT_FOUND syscall.Errno = 2012 + ERROR_TAG_NOT_PRESENT syscall.Errno = 2013 + ERROR_DUPLICATE_TAG syscall.Errno = 2014 + ERROR_PROFILE_NOT_ASSOCIATED_WITH_DEVICE syscall.Errno = 2015 + ERROR_PROFILE_NOT_FOUND syscall.Errno = 2016 + ERROR_INVALID_COLORSPACE syscall.Errno = 2017 + ERROR_ICM_NOT_ENABLED syscall.Errno = 2018 + ERROR_DELETING_ICM_XFORM syscall.Errno = 2019 + ERROR_INVALID_TRANSFORM syscall.Errno = 2020 + ERROR_COLORSPACE_MISMATCH syscall.Errno = 2021 + ERROR_INVALID_COLORINDEX syscall.Errno = 2022 + ERROR_PROFILE_DOES_NOT_MATCH_DEVICE syscall.Errno = 2023 + ERROR_CONNECTED_OTHER_PASSWORD syscall.Errno = 2108 + ERROR_CONNECTED_OTHER_PASSWORD_DEFAULT syscall.Errno = 2109 + ERROR_BAD_USERNAME syscall.Errno = 2202 + ERROR_NOT_CONNECTED syscall.Errno = 2250 + ERROR_OPEN_FILES syscall.Errno = 2401 + ERROR_ACTIVE_CONNECTIONS syscall.Errno = 2402 + ERROR_DEVICE_IN_USE syscall.Errno = 2404 + ERROR_UNKNOWN_PRINT_MONITOR syscall.Errno = 3000 + ERROR_PRINTER_DRIVER_IN_USE syscall.Errno = 3001 + ERROR_SPOOL_FILE_NOT_FOUND syscall.Errno = 3002 + ERROR_SPL_NO_STARTDOC syscall.Errno = 3003 + ERROR_SPL_NO_ADDJOB syscall.Errno = 3004 + ERROR_PRINT_PROCESSOR_ALREADY_INSTALLED syscall.Errno = 3005 + ERROR_PRINT_MONITOR_ALREADY_INSTALLED syscall.Errno = 3006 + ERROR_INVALID_PRINT_MONITOR syscall.Errno = 3007 + ERROR_PRINT_MONITOR_IN_USE syscall.Errno = 3008 + ERROR_PRINTER_HAS_JOBS_QUEUED syscall.Errno = 3009 + ERROR_SUCCESS_REBOOT_REQUIRED syscall.Errno = 3010 + ERROR_SUCCESS_RESTART_REQUIRED syscall.Errno = 3011 + ERROR_PRINTER_NOT_FOUND syscall.Errno = 3012 + ERROR_PRINTER_DRIVER_WARNED syscall.Errno = 3013 + ERROR_PRINTER_DRIVER_BLOCKED syscall.Errno = 3014 + ERROR_PRINTER_DRIVER_PACKAGE_IN_USE syscall.Errno = 3015 + ERROR_CORE_DRIVER_PACKAGE_NOT_FOUND syscall.Errno = 3016 + ERROR_FAIL_REBOOT_REQUIRED syscall.Errno = 3017 + ERROR_FAIL_REBOOT_INITIATED syscall.Errno = 3018 + ERROR_PRINTER_DRIVER_DOWNLOAD_NEEDED syscall.Errno = 3019 + ERROR_PRINT_JOB_RESTART_REQUIRED syscall.Errno = 3020 + ERROR_INVALID_PRINTER_DRIVER_MANIFEST syscall.Errno = 3021 + ERROR_PRINTER_NOT_SHAREABLE syscall.Errno = 3022 + ERROR_REQUEST_PAUSED syscall.Errno = 3050 + ERROR_APPEXEC_CONDITION_NOT_SATISFIED syscall.Errno = 3060 + ERROR_APPEXEC_HANDLE_INVALIDATED syscall.Errno = 3061 + ERROR_APPEXEC_INVALID_HOST_GENERATION syscall.Errno = 3062 + ERROR_APPEXEC_UNEXPECTED_PROCESS_REGISTRATION syscall.Errno = 3063 + ERROR_APPEXEC_INVALID_HOST_STATE syscall.Errno = 3064 + ERROR_APPEXEC_NO_DONOR syscall.Errno = 3065 + ERROR_APPEXEC_HOST_ID_MISMATCH syscall.Errno = 3066 + ERROR_APPEXEC_UNKNOWN_USER syscall.Errno = 3067 + ERROR_IO_REISSUE_AS_CACHED syscall.Errno = 3950 + ERROR_WINS_INTERNAL syscall.Errno = 4000 + ERROR_CAN_NOT_DEL_LOCAL_WINS syscall.Errno = 4001 + ERROR_STATIC_INIT syscall.Errno = 4002 + ERROR_INC_BACKUP syscall.Errno = 4003 + ERROR_FULL_BACKUP syscall.Errno = 4004 + ERROR_REC_NON_EXISTENT syscall.Errno = 4005 + ERROR_RPL_NOT_ALLOWED syscall.Errno = 4006 + PEERDIST_ERROR_CONTENTINFO_VERSION_UNSUPPORTED syscall.Errno = 4050 + PEERDIST_ERROR_CANNOT_PARSE_CONTENTINFO syscall.Errno = 4051 + PEERDIST_ERROR_MISSING_DATA syscall.Errno = 4052 + PEERDIST_ERROR_NO_MORE syscall.Errno = 4053 + PEERDIST_ERROR_NOT_INITIALIZED syscall.Errno = 4054 + PEERDIST_ERROR_ALREADY_INITIALIZED syscall.Errno = 4055 + PEERDIST_ERROR_SHUTDOWN_IN_PROGRESS syscall.Errno = 4056 + PEERDIST_ERROR_INVALIDATED syscall.Errno = 4057 + PEERDIST_ERROR_ALREADY_EXISTS syscall.Errno = 4058 + PEERDIST_ERROR_OPERATION_NOTFOUND syscall.Errno = 4059 + PEERDIST_ERROR_ALREADY_COMPLETED syscall.Errno = 4060 + PEERDIST_ERROR_OUT_OF_BOUNDS syscall.Errno = 4061 + PEERDIST_ERROR_VERSION_UNSUPPORTED syscall.Errno = 4062 + PEERDIST_ERROR_INVALID_CONFIGURATION syscall.Errno = 4063 + PEERDIST_ERROR_NOT_LICENSED syscall.Errno = 4064 + PEERDIST_ERROR_SERVICE_UNAVAILABLE syscall.Errno = 4065 + PEERDIST_ERROR_TRUST_FAILURE syscall.Errno = 4066 + ERROR_DHCP_ADDRESS_CONFLICT syscall.Errno = 4100 + ERROR_WMI_GUID_NOT_FOUND syscall.Errno = 4200 + ERROR_WMI_INSTANCE_NOT_FOUND syscall.Errno = 4201 + ERROR_WMI_ITEMID_NOT_FOUND syscall.Errno = 4202 + ERROR_WMI_TRY_AGAIN syscall.Errno = 4203 + ERROR_WMI_DP_NOT_FOUND syscall.Errno = 4204 + ERROR_WMI_UNRESOLVED_INSTANCE_REF syscall.Errno = 4205 + ERROR_WMI_ALREADY_ENABLED syscall.Errno = 4206 + ERROR_WMI_GUID_DISCONNECTED syscall.Errno = 4207 + ERROR_WMI_SERVER_UNAVAILABLE syscall.Errno = 4208 + ERROR_WMI_DP_FAILED syscall.Errno = 4209 + ERROR_WMI_INVALID_MOF syscall.Errno = 4210 + ERROR_WMI_INVALID_REGINFO syscall.Errno = 4211 + ERROR_WMI_ALREADY_DISABLED syscall.Errno = 4212 + ERROR_WMI_READ_ONLY syscall.Errno = 4213 + ERROR_WMI_SET_FAILURE syscall.Errno = 4214 + ERROR_NOT_APPCONTAINER syscall.Errno = 4250 + ERROR_APPCONTAINER_REQUIRED syscall.Errno = 4251 + ERROR_NOT_SUPPORTED_IN_APPCONTAINER syscall.Errno = 4252 + ERROR_INVALID_PACKAGE_SID_LENGTH syscall.Errno = 4253 + ERROR_INVALID_MEDIA syscall.Errno = 4300 + ERROR_INVALID_LIBRARY syscall.Errno = 4301 + ERROR_INVALID_MEDIA_POOL syscall.Errno = 4302 + ERROR_DRIVE_MEDIA_MISMATCH syscall.Errno = 4303 + ERROR_MEDIA_OFFLINE syscall.Errno = 4304 + ERROR_LIBRARY_OFFLINE syscall.Errno = 4305 + ERROR_EMPTY syscall.Errno = 4306 + ERROR_NOT_EMPTY syscall.Errno = 4307 + ERROR_MEDIA_UNAVAILABLE syscall.Errno = 4308 + ERROR_RESOURCE_DISABLED syscall.Errno = 4309 + ERROR_INVALID_CLEANER syscall.Errno = 4310 + ERROR_UNABLE_TO_CLEAN syscall.Errno = 4311 + ERROR_OBJECT_NOT_FOUND syscall.Errno = 4312 + ERROR_DATABASE_FAILURE syscall.Errno = 4313 + ERROR_DATABASE_FULL syscall.Errno = 4314 + ERROR_MEDIA_INCOMPATIBLE syscall.Errno = 4315 + ERROR_RESOURCE_NOT_PRESENT syscall.Errno = 4316 + ERROR_INVALID_OPERATION syscall.Errno = 4317 + ERROR_MEDIA_NOT_AVAILABLE syscall.Errno = 4318 + ERROR_DEVICE_NOT_AVAILABLE syscall.Errno = 4319 + ERROR_REQUEST_REFUSED syscall.Errno = 4320 + ERROR_INVALID_DRIVE_OBJECT syscall.Errno = 4321 + ERROR_LIBRARY_FULL syscall.Errno = 4322 + ERROR_MEDIUM_NOT_ACCESSIBLE syscall.Errno = 4323 + ERROR_UNABLE_TO_LOAD_MEDIUM syscall.Errno = 4324 + ERROR_UNABLE_TO_INVENTORY_DRIVE syscall.Errno = 4325 + ERROR_UNABLE_TO_INVENTORY_SLOT syscall.Errno = 4326 + ERROR_UNABLE_TO_INVENTORY_TRANSPORT syscall.Errno = 4327 + ERROR_TRANSPORT_FULL syscall.Errno = 4328 + ERROR_CONTROLLING_IEPORT syscall.Errno = 4329 + ERROR_UNABLE_TO_EJECT_MOUNTED_MEDIA syscall.Errno = 4330 + ERROR_CLEANER_SLOT_SET syscall.Errno = 4331 + ERROR_CLEANER_SLOT_NOT_SET syscall.Errno = 4332 + ERROR_CLEANER_CARTRIDGE_SPENT syscall.Errno = 4333 + ERROR_UNEXPECTED_OMID syscall.Errno = 4334 + ERROR_CANT_DELETE_LAST_ITEM syscall.Errno = 4335 + ERROR_MESSAGE_EXCEEDS_MAX_SIZE syscall.Errno = 4336 + ERROR_VOLUME_CONTAINS_SYS_FILES syscall.Errno = 4337 + ERROR_INDIGENOUS_TYPE syscall.Errno = 4338 + ERROR_NO_SUPPORTING_DRIVES syscall.Errno = 4339 + ERROR_CLEANER_CARTRIDGE_INSTALLED syscall.Errno = 4340 + ERROR_IEPORT_FULL syscall.Errno = 4341 + ERROR_FILE_OFFLINE syscall.Errno = 4350 + ERROR_REMOTE_STORAGE_NOT_ACTIVE syscall.Errno = 4351 + ERROR_REMOTE_STORAGE_MEDIA_ERROR syscall.Errno = 4352 + ERROR_NOT_A_REPARSE_POINT syscall.Errno = 4390 + ERROR_REPARSE_ATTRIBUTE_CONFLICT syscall.Errno = 4391 + ERROR_INVALID_REPARSE_DATA syscall.Errno = 4392 + ERROR_REPARSE_TAG_INVALID syscall.Errno = 4393 + ERROR_REPARSE_TAG_MISMATCH syscall.Errno = 4394 + ERROR_REPARSE_POINT_ENCOUNTERED syscall.Errno = 4395 + ERROR_APP_DATA_NOT_FOUND syscall.Errno = 4400 + ERROR_APP_DATA_EXPIRED syscall.Errno = 4401 + ERROR_APP_DATA_CORRUPT syscall.Errno = 4402 + ERROR_APP_DATA_LIMIT_EXCEEDED syscall.Errno = 4403 + ERROR_APP_DATA_REBOOT_REQUIRED syscall.Errno = 4404 + ERROR_SECUREBOOT_ROLLBACK_DETECTED syscall.Errno = 4420 + ERROR_SECUREBOOT_POLICY_VIOLATION syscall.Errno = 4421 + ERROR_SECUREBOOT_INVALID_POLICY syscall.Errno = 4422 + ERROR_SECUREBOOT_POLICY_PUBLISHER_NOT_FOUND syscall.Errno = 4423 + ERROR_SECUREBOOT_POLICY_NOT_SIGNED syscall.Errno = 4424 + ERROR_SECUREBOOT_NOT_ENABLED syscall.Errno = 4425 + ERROR_SECUREBOOT_FILE_REPLACED syscall.Errno = 4426 + ERROR_SECUREBOOT_POLICY_NOT_AUTHORIZED syscall.Errno = 4427 + ERROR_SECUREBOOT_POLICY_UNKNOWN syscall.Errno = 4428 + ERROR_SECUREBOOT_POLICY_MISSING_ANTIROLLBACKVERSION syscall.Errno = 4429 + ERROR_SECUREBOOT_PLATFORM_ID_MISMATCH syscall.Errno = 4430 + ERROR_SECUREBOOT_POLICY_ROLLBACK_DETECTED syscall.Errno = 4431 + ERROR_SECUREBOOT_POLICY_UPGRADE_MISMATCH syscall.Errno = 4432 + ERROR_SECUREBOOT_REQUIRED_POLICY_FILE_MISSING syscall.Errno = 4433 + ERROR_SECUREBOOT_NOT_BASE_POLICY syscall.Errno = 4434 + ERROR_SECUREBOOT_NOT_SUPPLEMENTAL_POLICY syscall.Errno = 4435 + ERROR_OFFLOAD_READ_FLT_NOT_SUPPORTED syscall.Errno = 4440 + ERROR_OFFLOAD_WRITE_FLT_NOT_SUPPORTED syscall.Errno = 4441 + ERROR_OFFLOAD_READ_FILE_NOT_SUPPORTED syscall.Errno = 4442 + ERROR_OFFLOAD_WRITE_FILE_NOT_SUPPORTED syscall.Errno = 4443 + ERROR_ALREADY_HAS_STREAM_ID syscall.Errno = 4444 + ERROR_SMR_GARBAGE_COLLECTION_REQUIRED syscall.Errno = 4445 + ERROR_WOF_WIM_HEADER_CORRUPT syscall.Errno = 4446 + ERROR_WOF_WIM_RESOURCE_TABLE_CORRUPT syscall.Errno = 4447 + ERROR_WOF_FILE_RESOURCE_TABLE_CORRUPT syscall.Errno = 4448 + ERROR_VOLUME_NOT_SIS_ENABLED syscall.Errno = 4500 + ERROR_SYSTEM_INTEGRITY_ROLLBACK_DETECTED syscall.Errno = 4550 + ERROR_SYSTEM_INTEGRITY_POLICY_VIOLATION syscall.Errno = 4551 + ERROR_SYSTEM_INTEGRITY_INVALID_POLICY syscall.Errno = 4552 + ERROR_SYSTEM_INTEGRITY_POLICY_NOT_SIGNED syscall.Errno = 4553 + ERROR_SYSTEM_INTEGRITY_TOO_MANY_POLICIES syscall.Errno = 4554 + ERROR_SYSTEM_INTEGRITY_SUPPLEMENTAL_POLICY_NOT_AUTHORIZED syscall.Errno = 4555 + ERROR_VSM_NOT_INITIALIZED syscall.Errno = 4560 + ERROR_VSM_DMA_PROTECTION_NOT_IN_USE syscall.Errno = 4561 + ERROR_PLATFORM_MANIFEST_NOT_AUTHORIZED syscall.Errno = 4570 + ERROR_PLATFORM_MANIFEST_INVALID syscall.Errno = 4571 + ERROR_PLATFORM_MANIFEST_FILE_NOT_AUTHORIZED syscall.Errno = 4572 + ERROR_PLATFORM_MANIFEST_CATALOG_NOT_AUTHORIZED syscall.Errno = 4573 + ERROR_PLATFORM_MANIFEST_BINARY_ID_NOT_FOUND syscall.Errno = 4574 + ERROR_PLATFORM_MANIFEST_NOT_ACTIVE syscall.Errno = 4575 + ERROR_PLATFORM_MANIFEST_NOT_SIGNED syscall.Errno = 4576 + ERROR_DEPENDENT_RESOURCE_EXISTS syscall.Errno = 5001 + ERROR_DEPENDENCY_NOT_FOUND syscall.Errno = 5002 + ERROR_DEPENDENCY_ALREADY_EXISTS syscall.Errno = 5003 + ERROR_RESOURCE_NOT_ONLINE syscall.Errno = 5004 + ERROR_HOST_NODE_NOT_AVAILABLE syscall.Errno = 5005 + ERROR_RESOURCE_NOT_AVAILABLE syscall.Errno = 5006 + ERROR_RESOURCE_NOT_FOUND syscall.Errno = 5007 + ERROR_SHUTDOWN_CLUSTER syscall.Errno = 5008 + ERROR_CANT_EVICT_ACTIVE_NODE syscall.Errno = 5009 + ERROR_OBJECT_ALREADY_EXISTS syscall.Errno = 5010 + ERROR_OBJECT_IN_LIST syscall.Errno = 5011 + ERROR_GROUP_NOT_AVAILABLE syscall.Errno = 5012 + ERROR_GROUP_NOT_FOUND syscall.Errno = 5013 + ERROR_GROUP_NOT_ONLINE syscall.Errno = 5014 + ERROR_HOST_NODE_NOT_RESOURCE_OWNER syscall.Errno = 5015 + ERROR_HOST_NODE_NOT_GROUP_OWNER syscall.Errno = 5016 + ERROR_RESMON_CREATE_FAILED syscall.Errno = 5017 + ERROR_RESMON_ONLINE_FAILED syscall.Errno = 5018 + ERROR_RESOURCE_ONLINE syscall.Errno = 5019 + ERROR_QUORUM_RESOURCE syscall.Errno = 5020 + ERROR_NOT_QUORUM_CAPABLE syscall.Errno = 5021 + ERROR_CLUSTER_SHUTTING_DOWN syscall.Errno = 5022 + ERROR_INVALID_STATE syscall.Errno = 5023 + ERROR_RESOURCE_PROPERTIES_STORED syscall.Errno = 5024 + ERROR_NOT_QUORUM_CLASS syscall.Errno = 5025 + ERROR_CORE_RESOURCE syscall.Errno = 5026 + ERROR_QUORUM_RESOURCE_ONLINE_FAILED syscall.Errno = 5027 + ERROR_QUORUMLOG_OPEN_FAILED syscall.Errno = 5028 + ERROR_CLUSTERLOG_CORRUPT syscall.Errno = 5029 + ERROR_CLUSTERLOG_RECORD_EXCEEDS_MAXSIZE syscall.Errno = 5030 + ERROR_CLUSTERLOG_EXCEEDS_MAXSIZE syscall.Errno = 5031 + ERROR_CLUSTERLOG_CHKPOINT_NOT_FOUND syscall.Errno = 5032 + ERROR_CLUSTERLOG_NOT_ENOUGH_SPACE syscall.Errno = 5033 + ERROR_QUORUM_OWNER_ALIVE syscall.Errno = 5034 + ERROR_NETWORK_NOT_AVAILABLE syscall.Errno = 5035 + ERROR_NODE_NOT_AVAILABLE syscall.Errno = 5036 + ERROR_ALL_NODES_NOT_AVAILABLE syscall.Errno = 5037 + ERROR_RESOURCE_FAILED syscall.Errno = 5038 + ERROR_CLUSTER_INVALID_NODE syscall.Errno = 5039 + ERROR_CLUSTER_NODE_EXISTS syscall.Errno = 5040 + ERROR_CLUSTER_JOIN_IN_PROGRESS syscall.Errno = 5041 + ERROR_CLUSTER_NODE_NOT_FOUND syscall.Errno = 5042 + ERROR_CLUSTER_LOCAL_NODE_NOT_FOUND syscall.Errno = 5043 + ERROR_CLUSTER_NETWORK_EXISTS syscall.Errno = 5044 + ERROR_CLUSTER_NETWORK_NOT_FOUND syscall.Errno = 5045 + ERROR_CLUSTER_NETINTERFACE_EXISTS syscall.Errno = 5046 + ERROR_CLUSTER_NETINTERFACE_NOT_FOUND syscall.Errno = 5047 + ERROR_CLUSTER_INVALID_REQUEST syscall.Errno = 5048 + ERROR_CLUSTER_INVALID_NETWORK_PROVIDER syscall.Errno = 5049 + ERROR_CLUSTER_NODE_DOWN syscall.Errno = 5050 + ERROR_CLUSTER_NODE_UNREACHABLE syscall.Errno = 5051 + ERROR_CLUSTER_NODE_NOT_MEMBER syscall.Errno = 5052 + ERROR_CLUSTER_JOIN_NOT_IN_PROGRESS syscall.Errno = 5053 + ERROR_CLUSTER_INVALID_NETWORK syscall.Errno = 5054 + ERROR_CLUSTER_NODE_UP syscall.Errno = 5056 + ERROR_CLUSTER_IPADDR_IN_USE syscall.Errno = 5057 + ERROR_CLUSTER_NODE_NOT_PAUSED syscall.Errno = 5058 + ERROR_CLUSTER_NO_SECURITY_CONTEXT syscall.Errno = 5059 + ERROR_CLUSTER_NETWORK_NOT_INTERNAL syscall.Errno = 5060 + ERROR_CLUSTER_NODE_ALREADY_UP syscall.Errno = 5061 + ERROR_CLUSTER_NODE_ALREADY_DOWN syscall.Errno = 5062 + ERROR_CLUSTER_NETWORK_ALREADY_ONLINE syscall.Errno = 5063 + ERROR_CLUSTER_NETWORK_ALREADY_OFFLINE syscall.Errno = 5064 + ERROR_CLUSTER_NODE_ALREADY_MEMBER syscall.Errno = 5065 + ERROR_CLUSTER_LAST_INTERNAL_NETWORK syscall.Errno = 5066 + ERROR_CLUSTER_NETWORK_HAS_DEPENDENTS syscall.Errno = 5067 + ERROR_INVALID_OPERATION_ON_QUORUM syscall.Errno = 5068 + ERROR_DEPENDENCY_NOT_ALLOWED syscall.Errno = 5069 + ERROR_CLUSTER_NODE_PAUSED syscall.Errno = 5070 + ERROR_NODE_CANT_HOST_RESOURCE syscall.Errno = 5071 + ERROR_CLUSTER_NODE_NOT_READY syscall.Errno = 5072 + ERROR_CLUSTER_NODE_SHUTTING_DOWN syscall.Errno = 5073 + ERROR_CLUSTER_JOIN_ABORTED syscall.Errno = 5074 + ERROR_CLUSTER_INCOMPATIBLE_VERSIONS syscall.Errno = 5075 + ERROR_CLUSTER_MAXNUM_OF_RESOURCES_EXCEEDED syscall.Errno = 5076 + ERROR_CLUSTER_SYSTEM_CONFIG_CHANGED syscall.Errno = 5077 + ERROR_CLUSTER_RESOURCE_TYPE_NOT_FOUND syscall.Errno = 5078 + ERROR_CLUSTER_RESTYPE_NOT_SUPPORTED syscall.Errno = 5079 + ERROR_CLUSTER_RESNAME_NOT_FOUND syscall.Errno = 5080 + ERROR_CLUSTER_NO_RPC_PACKAGES_REGISTERED syscall.Errno = 5081 + ERROR_CLUSTER_OWNER_NOT_IN_PREFLIST syscall.Errno = 5082 + ERROR_CLUSTER_DATABASE_SEQMISMATCH syscall.Errno = 5083 + ERROR_RESMON_INVALID_STATE syscall.Errno = 5084 + ERROR_CLUSTER_GUM_NOT_LOCKER syscall.Errno = 5085 + ERROR_QUORUM_DISK_NOT_FOUND syscall.Errno = 5086 + ERROR_DATABASE_BACKUP_CORRUPT syscall.Errno = 5087 + ERROR_CLUSTER_NODE_ALREADY_HAS_DFS_ROOT syscall.Errno = 5088 + ERROR_RESOURCE_PROPERTY_UNCHANGEABLE syscall.Errno = 5089 + ERROR_NO_ADMIN_ACCESS_POINT syscall.Errno = 5090 + ERROR_CLUSTER_MEMBERSHIP_INVALID_STATE syscall.Errno = 5890 + ERROR_CLUSTER_QUORUMLOG_NOT_FOUND syscall.Errno = 5891 + ERROR_CLUSTER_MEMBERSHIP_HALT syscall.Errno = 5892 + ERROR_CLUSTER_INSTANCE_ID_MISMATCH syscall.Errno = 5893 + ERROR_CLUSTER_NETWORK_NOT_FOUND_FOR_IP syscall.Errno = 5894 + ERROR_CLUSTER_PROPERTY_DATA_TYPE_MISMATCH syscall.Errno = 5895 + ERROR_CLUSTER_EVICT_WITHOUT_CLEANUP syscall.Errno = 5896 + ERROR_CLUSTER_PARAMETER_MISMATCH syscall.Errno = 5897 + ERROR_NODE_CANNOT_BE_CLUSTERED syscall.Errno = 5898 + ERROR_CLUSTER_WRONG_OS_VERSION syscall.Errno = 5899 + ERROR_CLUSTER_CANT_CREATE_DUP_CLUSTER_NAME syscall.Errno = 5900 + ERROR_CLUSCFG_ALREADY_COMMITTED syscall.Errno = 5901 + ERROR_CLUSCFG_ROLLBACK_FAILED syscall.Errno = 5902 + ERROR_CLUSCFG_SYSTEM_DISK_DRIVE_LETTER_CONFLICT syscall.Errno = 5903 + ERROR_CLUSTER_OLD_VERSION syscall.Errno = 5904 + ERROR_CLUSTER_MISMATCHED_COMPUTER_ACCT_NAME syscall.Errno = 5905 + ERROR_CLUSTER_NO_NET_ADAPTERS syscall.Errno = 5906 + ERROR_CLUSTER_POISONED syscall.Errno = 5907 + ERROR_CLUSTER_GROUP_MOVING syscall.Errno = 5908 + ERROR_CLUSTER_RESOURCE_TYPE_BUSY syscall.Errno = 5909 + ERROR_RESOURCE_CALL_TIMED_OUT syscall.Errno = 5910 + ERROR_INVALID_CLUSTER_IPV6_ADDRESS syscall.Errno = 5911 + ERROR_CLUSTER_INTERNAL_INVALID_FUNCTION syscall.Errno = 5912 + ERROR_CLUSTER_PARAMETER_OUT_OF_BOUNDS syscall.Errno = 5913 + ERROR_CLUSTER_PARTIAL_SEND syscall.Errno = 5914 + ERROR_CLUSTER_REGISTRY_INVALID_FUNCTION syscall.Errno = 5915 + ERROR_CLUSTER_INVALID_STRING_TERMINATION syscall.Errno = 5916 + ERROR_CLUSTER_INVALID_STRING_FORMAT syscall.Errno = 5917 + ERROR_CLUSTER_DATABASE_TRANSACTION_IN_PROGRESS syscall.Errno = 5918 + ERROR_CLUSTER_DATABASE_TRANSACTION_NOT_IN_PROGRESS syscall.Errno = 5919 + ERROR_CLUSTER_NULL_DATA syscall.Errno = 5920 + ERROR_CLUSTER_PARTIAL_READ syscall.Errno = 5921 + ERROR_CLUSTER_PARTIAL_WRITE syscall.Errno = 5922 + ERROR_CLUSTER_CANT_DESERIALIZE_DATA syscall.Errno = 5923 + ERROR_DEPENDENT_RESOURCE_PROPERTY_CONFLICT syscall.Errno = 5924 + ERROR_CLUSTER_NO_QUORUM syscall.Errno = 5925 + ERROR_CLUSTER_INVALID_IPV6_NETWORK syscall.Errno = 5926 + ERROR_CLUSTER_INVALID_IPV6_TUNNEL_NETWORK syscall.Errno = 5927 + ERROR_QUORUM_NOT_ALLOWED_IN_THIS_GROUP syscall.Errno = 5928 + ERROR_DEPENDENCY_TREE_TOO_COMPLEX syscall.Errno = 5929 + ERROR_EXCEPTION_IN_RESOURCE_CALL syscall.Errno = 5930 + ERROR_CLUSTER_RHS_FAILED_INITIALIZATION syscall.Errno = 5931 + ERROR_CLUSTER_NOT_INSTALLED syscall.Errno = 5932 + ERROR_CLUSTER_RESOURCES_MUST_BE_ONLINE_ON_THE_SAME_NODE syscall.Errno = 5933 + ERROR_CLUSTER_MAX_NODES_IN_CLUSTER syscall.Errno = 5934 + ERROR_CLUSTER_TOO_MANY_NODES syscall.Errno = 5935 + ERROR_CLUSTER_OBJECT_ALREADY_USED syscall.Errno = 5936 + ERROR_NONCORE_GROUPS_FOUND syscall.Errno = 5937 + ERROR_FILE_SHARE_RESOURCE_CONFLICT syscall.Errno = 5938 + ERROR_CLUSTER_EVICT_INVALID_REQUEST syscall.Errno = 5939 + ERROR_CLUSTER_SINGLETON_RESOURCE syscall.Errno = 5940 + ERROR_CLUSTER_GROUP_SINGLETON_RESOURCE syscall.Errno = 5941 + ERROR_CLUSTER_RESOURCE_PROVIDER_FAILED syscall.Errno = 5942 + ERROR_CLUSTER_RESOURCE_CONFIGURATION_ERROR syscall.Errno = 5943 + ERROR_CLUSTER_GROUP_BUSY syscall.Errno = 5944 + ERROR_CLUSTER_NOT_SHARED_VOLUME syscall.Errno = 5945 + ERROR_CLUSTER_INVALID_SECURITY_DESCRIPTOR syscall.Errno = 5946 + ERROR_CLUSTER_SHARED_VOLUMES_IN_USE syscall.Errno = 5947 + ERROR_CLUSTER_USE_SHARED_VOLUMES_API syscall.Errno = 5948 + ERROR_CLUSTER_BACKUP_IN_PROGRESS syscall.Errno = 5949 + ERROR_NON_CSV_PATH syscall.Errno = 5950 + ERROR_CSV_VOLUME_NOT_LOCAL syscall.Errno = 5951 + ERROR_CLUSTER_WATCHDOG_TERMINATING syscall.Errno = 5952 + ERROR_CLUSTER_RESOURCE_VETOED_MOVE_INCOMPATIBLE_NODES syscall.Errno = 5953 + ERROR_CLUSTER_INVALID_NODE_WEIGHT syscall.Errno = 5954 + ERROR_CLUSTER_RESOURCE_VETOED_CALL syscall.Errno = 5955 + ERROR_RESMON_SYSTEM_RESOURCES_LACKING syscall.Errno = 5956 + ERROR_CLUSTER_RESOURCE_VETOED_MOVE_NOT_ENOUGH_RESOURCES_ON_DESTINATION syscall.Errno = 5957 + ERROR_CLUSTER_RESOURCE_VETOED_MOVE_NOT_ENOUGH_RESOURCES_ON_SOURCE syscall.Errno = 5958 + ERROR_CLUSTER_GROUP_QUEUED syscall.Errno = 5959 + ERROR_CLUSTER_RESOURCE_LOCKED_STATUS syscall.Errno = 5960 + ERROR_CLUSTER_SHARED_VOLUME_FAILOVER_NOT_ALLOWED syscall.Errno = 5961 + ERROR_CLUSTER_NODE_DRAIN_IN_PROGRESS syscall.Errno = 5962 + ERROR_CLUSTER_DISK_NOT_CONNECTED syscall.Errno = 5963 + ERROR_DISK_NOT_CSV_CAPABLE syscall.Errno = 5964 + ERROR_RESOURCE_NOT_IN_AVAILABLE_STORAGE syscall.Errno = 5965 + ERROR_CLUSTER_SHARED_VOLUME_REDIRECTED syscall.Errno = 5966 + ERROR_CLUSTER_SHARED_VOLUME_NOT_REDIRECTED syscall.Errno = 5967 + ERROR_CLUSTER_CANNOT_RETURN_PROPERTIES syscall.Errno = 5968 + ERROR_CLUSTER_RESOURCE_CONTAINS_UNSUPPORTED_DIFF_AREA_FOR_SHARED_VOLUMES syscall.Errno = 5969 + ERROR_CLUSTER_RESOURCE_IS_IN_MAINTENANCE_MODE syscall.Errno = 5970 + ERROR_CLUSTER_AFFINITY_CONFLICT syscall.Errno = 5971 + ERROR_CLUSTER_RESOURCE_IS_REPLICA_VIRTUAL_MACHINE syscall.Errno = 5972 + ERROR_CLUSTER_UPGRADE_INCOMPATIBLE_VERSIONS syscall.Errno = 5973 + ERROR_CLUSTER_UPGRADE_FIX_QUORUM_NOT_SUPPORTED syscall.Errno = 5974 + ERROR_CLUSTER_UPGRADE_RESTART_REQUIRED syscall.Errno = 5975 + ERROR_CLUSTER_UPGRADE_IN_PROGRESS syscall.Errno = 5976 + ERROR_CLUSTER_UPGRADE_INCOMPLETE syscall.Errno = 5977 + ERROR_CLUSTER_NODE_IN_GRACE_PERIOD syscall.Errno = 5978 + ERROR_CLUSTER_CSV_IO_PAUSE_TIMEOUT syscall.Errno = 5979 + ERROR_NODE_NOT_ACTIVE_CLUSTER_MEMBER syscall.Errno = 5980 + ERROR_CLUSTER_RESOURCE_NOT_MONITORED syscall.Errno = 5981 + ERROR_CLUSTER_RESOURCE_DOES_NOT_SUPPORT_UNMONITORED syscall.Errno = 5982 + ERROR_CLUSTER_RESOURCE_IS_REPLICATED syscall.Errno = 5983 + ERROR_CLUSTER_NODE_ISOLATED syscall.Errno = 5984 + ERROR_CLUSTER_NODE_QUARANTINED syscall.Errno = 5985 + ERROR_CLUSTER_DATABASE_UPDATE_CONDITION_FAILED syscall.Errno = 5986 + ERROR_CLUSTER_SPACE_DEGRADED syscall.Errno = 5987 + ERROR_CLUSTER_TOKEN_DELEGATION_NOT_SUPPORTED syscall.Errno = 5988 + ERROR_CLUSTER_CSV_INVALID_HANDLE syscall.Errno = 5989 + ERROR_CLUSTER_CSV_SUPPORTED_ONLY_ON_COORDINATOR syscall.Errno = 5990 + ERROR_GROUPSET_NOT_AVAILABLE syscall.Errno = 5991 + ERROR_GROUPSET_NOT_FOUND syscall.Errno = 5992 + ERROR_GROUPSET_CANT_PROVIDE syscall.Errno = 5993 + ERROR_CLUSTER_FAULT_DOMAIN_PARENT_NOT_FOUND syscall.Errno = 5994 + ERROR_CLUSTER_FAULT_DOMAIN_INVALID_HIERARCHY syscall.Errno = 5995 + ERROR_CLUSTER_FAULT_DOMAIN_FAILED_S2D_VALIDATION syscall.Errno = 5996 + ERROR_CLUSTER_FAULT_DOMAIN_S2D_CONNECTIVITY_LOSS syscall.Errno = 5997 + ERROR_CLUSTER_INVALID_INFRASTRUCTURE_FILESERVER_NAME syscall.Errno = 5998 + ERROR_CLUSTERSET_MANAGEMENT_CLUSTER_UNREACHABLE syscall.Errno = 5999 + ERROR_ENCRYPTION_FAILED syscall.Errno = 6000 + ERROR_DECRYPTION_FAILED syscall.Errno = 6001 + ERROR_FILE_ENCRYPTED syscall.Errno = 6002 + ERROR_NO_RECOVERY_POLICY syscall.Errno = 6003 + ERROR_NO_EFS syscall.Errno = 6004 + ERROR_WRONG_EFS syscall.Errno = 6005 + ERROR_NO_USER_KEYS syscall.Errno = 6006 + ERROR_FILE_NOT_ENCRYPTED syscall.Errno = 6007 + ERROR_NOT_EXPORT_FORMAT syscall.Errno = 6008 + ERROR_FILE_READ_ONLY syscall.Errno = 6009 + ERROR_DIR_EFS_DISALLOWED syscall.Errno = 6010 + ERROR_EFS_SERVER_NOT_TRUSTED syscall.Errno = 6011 + ERROR_BAD_RECOVERY_POLICY syscall.Errno = 6012 + ERROR_EFS_ALG_BLOB_TOO_BIG syscall.Errno = 6013 + ERROR_VOLUME_NOT_SUPPORT_EFS syscall.Errno = 6014 + ERROR_EFS_DISABLED syscall.Errno = 6015 + ERROR_EFS_VERSION_NOT_SUPPORT syscall.Errno = 6016 + ERROR_CS_ENCRYPTION_INVALID_SERVER_RESPONSE syscall.Errno = 6017 + ERROR_CS_ENCRYPTION_UNSUPPORTED_SERVER syscall.Errno = 6018 + ERROR_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE syscall.Errno = 6019 + ERROR_CS_ENCRYPTION_NEW_ENCRYPTED_FILE syscall.Errno = 6020 + ERROR_CS_ENCRYPTION_FILE_NOT_CSE syscall.Errno = 6021 + ERROR_ENCRYPTION_POLICY_DENIES_OPERATION syscall.Errno = 6022 + ERROR_WIP_ENCRYPTION_FAILED syscall.Errno = 6023 + ERROR_NO_BROWSER_SERVERS_FOUND syscall.Errno = 6118 + SCHED_E_SERVICE_NOT_LOCALSYSTEM syscall.Errno = 6200 + ERROR_LOG_SECTOR_INVALID syscall.Errno = 6600 + ERROR_LOG_SECTOR_PARITY_INVALID syscall.Errno = 6601 + ERROR_LOG_SECTOR_REMAPPED syscall.Errno = 6602 + ERROR_LOG_BLOCK_INCOMPLETE syscall.Errno = 6603 + ERROR_LOG_INVALID_RANGE syscall.Errno = 6604 + ERROR_LOG_BLOCKS_EXHAUSTED syscall.Errno = 6605 + ERROR_LOG_READ_CONTEXT_INVALID syscall.Errno = 6606 + ERROR_LOG_RESTART_INVALID syscall.Errno = 6607 + ERROR_LOG_BLOCK_VERSION syscall.Errno = 6608 + ERROR_LOG_BLOCK_INVALID syscall.Errno = 6609 + ERROR_LOG_READ_MODE_INVALID syscall.Errno = 6610 + ERROR_LOG_NO_RESTART syscall.Errno = 6611 + ERROR_LOG_METADATA_CORRUPT syscall.Errno = 6612 + ERROR_LOG_METADATA_INVALID syscall.Errno = 6613 + ERROR_LOG_METADATA_INCONSISTENT syscall.Errno = 6614 + ERROR_LOG_RESERVATION_INVALID syscall.Errno = 6615 + ERROR_LOG_CANT_DELETE syscall.Errno = 6616 + ERROR_LOG_CONTAINER_LIMIT_EXCEEDED syscall.Errno = 6617 + ERROR_LOG_START_OF_LOG syscall.Errno = 6618 + ERROR_LOG_POLICY_ALREADY_INSTALLED syscall.Errno = 6619 + ERROR_LOG_POLICY_NOT_INSTALLED syscall.Errno = 6620 + ERROR_LOG_POLICY_INVALID syscall.Errno = 6621 + ERROR_LOG_POLICY_CONFLICT syscall.Errno = 6622 + ERROR_LOG_PINNED_ARCHIVE_TAIL syscall.Errno = 6623 + ERROR_LOG_RECORD_NONEXISTENT syscall.Errno = 6624 + ERROR_LOG_RECORDS_RESERVED_INVALID syscall.Errno = 6625 + ERROR_LOG_SPACE_RESERVED_INVALID syscall.Errno = 6626 + ERROR_LOG_TAIL_INVALID syscall.Errno = 6627 + ERROR_LOG_FULL syscall.Errno = 6628 + ERROR_COULD_NOT_RESIZE_LOG syscall.Errno = 6629 + ERROR_LOG_MULTIPLEXED syscall.Errno = 6630 + ERROR_LOG_DEDICATED syscall.Errno = 6631 + ERROR_LOG_ARCHIVE_NOT_IN_PROGRESS syscall.Errno = 6632 + ERROR_LOG_ARCHIVE_IN_PROGRESS syscall.Errno = 6633 + ERROR_LOG_EPHEMERAL syscall.Errno = 6634 + ERROR_LOG_NOT_ENOUGH_CONTAINERS syscall.Errno = 6635 + ERROR_LOG_CLIENT_ALREADY_REGISTERED syscall.Errno = 6636 + ERROR_LOG_CLIENT_NOT_REGISTERED syscall.Errno = 6637 + ERROR_LOG_FULL_HANDLER_IN_PROGRESS syscall.Errno = 6638 + ERROR_LOG_CONTAINER_READ_FAILED syscall.Errno = 6639 + ERROR_LOG_CONTAINER_WRITE_FAILED syscall.Errno = 6640 + ERROR_LOG_CONTAINER_OPEN_FAILED syscall.Errno = 6641 + ERROR_LOG_CONTAINER_STATE_INVALID syscall.Errno = 6642 + ERROR_LOG_STATE_INVALID syscall.Errno = 6643 + ERROR_LOG_PINNED syscall.Errno = 6644 + ERROR_LOG_METADATA_FLUSH_FAILED syscall.Errno = 6645 + ERROR_LOG_INCONSISTENT_SECURITY syscall.Errno = 6646 + ERROR_LOG_APPENDED_FLUSH_FAILED syscall.Errno = 6647 + ERROR_LOG_PINNED_RESERVATION syscall.Errno = 6648 + ERROR_INVALID_TRANSACTION syscall.Errno = 6700 + ERROR_TRANSACTION_NOT_ACTIVE syscall.Errno = 6701 + ERROR_TRANSACTION_REQUEST_NOT_VALID syscall.Errno = 6702 + ERROR_TRANSACTION_NOT_REQUESTED syscall.Errno = 6703 + ERROR_TRANSACTION_ALREADY_ABORTED syscall.Errno = 6704 + ERROR_TRANSACTION_ALREADY_COMMITTED syscall.Errno = 6705 + ERROR_TM_INITIALIZATION_FAILED syscall.Errno = 6706 + ERROR_RESOURCEMANAGER_READ_ONLY syscall.Errno = 6707 + ERROR_TRANSACTION_NOT_JOINED syscall.Errno = 6708 + ERROR_TRANSACTION_SUPERIOR_EXISTS syscall.Errno = 6709 + ERROR_CRM_PROTOCOL_ALREADY_EXISTS syscall.Errno = 6710 + ERROR_TRANSACTION_PROPAGATION_FAILED syscall.Errno = 6711 + ERROR_CRM_PROTOCOL_NOT_FOUND syscall.Errno = 6712 + ERROR_TRANSACTION_INVALID_MARSHALL_BUFFER syscall.Errno = 6713 + ERROR_CURRENT_TRANSACTION_NOT_VALID syscall.Errno = 6714 + ERROR_TRANSACTION_NOT_FOUND syscall.Errno = 6715 + ERROR_RESOURCEMANAGER_NOT_FOUND syscall.Errno = 6716 + ERROR_ENLISTMENT_NOT_FOUND syscall.Errno = 6717 + ERROR_TRANSACTIONMANAGER_NOT_FOUND syscall.Errno = 6718 + ERROR_TRANSACTIONMANAGER_NOT_ONLINE syscall.Errno = 6719 + ERROR_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION syscall.Errno = 6720 + ERROR_TRANSACTION_NOT_ROOT syscall.Errno = 6721 + ERROR_TRANSACTION_OBJECT_EXPIRED syscall.Errno = 6722 + ERROR_TRANSACTION_RESPONSE_NOT_ENLISTED syscall.Errno = 6723 + ERROR_TRANSACTION_RECORD_TOO_LONG syscall.Errno = 6724 + ERROR_IMPLICIT_TRANSACTION_NOT_SUPPORTED syscall.Errno = 6725 + ERROR_TRANSACTION_INTEGRITY_VIOLATED syscall.Errno = 6726 + ERROR_TRANSACTIONMANAGER_IDENTITY_MISMATCH syscall.Errno = 6727 + ERROR_RM_CANNOT_BE_FROZEN_FOR_SNAPSHOT syscall.Errno = 6728 + ERROR_TRANSACTION_MUST_WRITETHROUGH syscall.Errno = 6729 + ERROR_TRANSACTION_NO_SUPERIOR syscall.Errno = 6730 + ERROR_HEURISTIC_DAMAGE_POSSIBLE syscall.Errno = 6731 + ERROR_TRANSACTIONAL_CONFLICT syscall.Errno = 6800 + ERROR_RM_NOT_ACTIVE syscall.Errno = 6801 + ERROR_RM_METADATA_CORRUPT syscall.Errno = 6802 + ERROR_DIRECTORY_NOT_RM syscall.Errno = 6803 + ERROR_TRANSACTIONS_UNSUPPORTED_REMOTE syscall.Errno = 6805 + ERROR_LOG_RESIZE_INVALID_SIZE syscall.Errno = 6806 + ERROR_OBJECT_NO_LONGER_EXISTS syscall.Errno = 6807 + ERROR_STREAM_MINIVERSION_NOT_FOUND syscall.Errno = 6808 + ERROR_STREAM_MINIVERSION_NOT_VALID syscall.Errno = 6809 + ERROR_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION syscall.Errno = 6810 + ERROR_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT syscall.Errno = 6811 + ERROR_CANT_CREATE_MORE_STREAM_MINIVERSIONS syscall.Errno = 6812 + ERROR_REMOTE_FILE_VERSION_MISMATCH syscall.Errno = 6814 + ERROR_HANDLE_NO_LONGER_VALID syscall.Errno = 6815 + ERROR_NO_TXF_METADATA syscall.Errno = 6816 + ERROR_LOG_CORRUPTION_DETECTED syscall.Errno = 6817 + ERROR_CANT_RECOVER_WITH_HANDLE_OPEN syscall.Errno = 6818 + ERROR_RM_DISCONNECTED syscall.Errno = 6819 + ERROR_ENLISTMENT_NOT_SUPERIOR syscall.Errno = 6820 + ERROR_RECOVERY_NOT_NEEDED syscall.Errno = 6821 + ERROR_RM_ALREADY_STARTED syscall.Errno = 6822 + ERROR_FILE_IDENTITY_NOT_PERSISTENT syscall.Errno = 6823 + ERROR_CANT_BREAK_TRANSACTIONAL_DEPENDENCY syscall.Errno = 6824 + ERROR_CANT_CROSS_RM_BOUNDARY syscall.Errno = 6825 + ERROR_TXF_DIR_NOT_EMPTY syscall.Errno = 6826 + ERROR_INDOUBT_TRANSACTIONS_EXIST syscall.Errno = 6827 + ERROR_TM_VOLATILE syscall.Errno = 6828 + ERROR_ROLLBACK_TIMER_EXPIRED syscall.Errno = 6829 + ERROR_TXF_ATTRIBUTE_CORRUPT syscall.Errno = 6830 + ERROR_EFS_NOT_ALLOWED_IN_TRANSACTION syscall.Errno = 6831 + ERROR_TRANSACTIONAL_OPEN_NOT_ALLOWED syscall.Errno = 6832 + ERROR_LOG_GROWTH_FAILED syscall.Errno = 6833 + ERROR_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE syscall.Errno = 6834 + ERROR_TXF_METADATA_ALREADY_PRESENT syscall.Errno = 6835 + ERROR_TRANSACTION_SCOPE_CALLBACKS_NOT_SET syscall.Errno = 6836 + ERROR_TRANSACTION_REQUIRED_PROMOTION syscall.Errno = 6837 + ERROR_CANNOT_EXECUTE_FILE_IN_TRANSACTION syscall.Errno = 6838 + ERROR_TRANSACTIONS_NOT_FROZEN syscall.Errno = 6839 + ERROR_TRANSACTION_FREEZE_IN_PROGRESS syscall.Errno = 6840 + ERROR_NOT_SNAPSHOT_VOLUME syscall.Errno = 6841 + ERROR_NO_SAVEPOINT_WITH_OPEN_FILES syscall.Errno = 6842 + ERROR_DATA_LOST_REPAIR syscall.Errno = 6843 + ERROR_SPARSE_NOT_ALLOWED_IN_TRANSACTION syscall.Errno = 6844 + ERROR_TM_IDENTITY_MISMATCH syscall.Errno = 6845 + ERROR_FLOATED_SECTION syscall.Errno = 6846 + ERROR_CANNOT_ACCEPT_TRANSACTED_WORK syscall.Errno = 6847 + ERROR_CANNOT_ABORT_TRANSACTIONS syscall.Errno = 6848 + ERROR_BAD_CLUSTERS syscall.Errno = 6849 + ERROR_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION syscall.Errno = 6850 + ERROR_VOLUME_DIRTY syscall.Errno = 6851 + ERROR_NO_LINK_TRACKING_IN_TRANSACTION syscall.Errno = 6852 + ERROR_OPERATION_NOT_SUPPORTED_IN_TRANSACTION syscall.Errno = 6853 + ERROR_EXPIRED_HANDLE syscall.Errno = 6854 + ERROR_TRANSACTION_NOT_ENLISTED syscall.Errno = 6855 + ERROR_CTX_WINSTATION_NAME_INVALID syscall.Errno = 7001 + ERROR_CTX_INVALID_PD syscall.Errno = 7002 + ERROR_CTX_PD_NOT_FOUND syscall.Errno = 7003 + ERROR_CTX_WD_NOT_FOUND syscall.Errno = 7004 + ERROR_CTX_CANNOT_MAKE_EVENTLOG_ENTRY syscall.Errno = 7005 + ERROR_CTX_SERVICE_NAME_COLLISION syscall.Errno = 7006 + ERROR_CTX_CLOSE_PENDING syscall.Errno = 7007 + ERROR_CTX_NO_OUTBUF syscall.Errno = 7008 + ERROR_CTX_MODEM_INF_NOT_FOUND syscall.Errno = 7009 + ERROR_CTX_INVALID_MODEMNAME syscall.Errno = 7010 + ERROR_CTX_MODEM_RESPONSE_ERROR syscall.Errno = 7011 + ERROR_CTX_MODEM_RESPONSE_TIMEOUT syscall.Errno = 7012 + ERROR_CTX_MODEM_RESPONSE_NO_CARRIER syscall.Errno = 7013 + ERROR_CTX_MODEM_RESPONSE_NO_DIALTONE syscall.Errno = 7014 + ERROR_CTX_MODEM_RESPONSE_BUSY syscall.Errno = 7015 + ERROR_CTX_MODEM_RESPONSE_VOICE syscall.Errno = 7016 + ERROR_CTX_TD_ERROR syscall.Errno = 7017 + ERROR_CTX_WINSTATION_NOT_FOUND syscall.Errno = 7022 + ERROR_CTX_WINSTATION_ALREADY_EXISTS syscall.Errno = 7023 + ERROR_CTX_WINSTATION_BUSY syscall.Errno = 7024 + ERROR_CTX_BAD_VIDEO_MODE syscall.Errno = 7025 + ERROR_CTX_GRAPHICS_INVALID syscall.Errno = 7035 + ERROR_CTX_LOGON_DISABLED syscall.Errno = 7037 + ERROR_CTX_NOT_CONSOLE syscall.Errno = 7038 + ERROR_CTX_CLIENT_QUERY_TIMEOUT syscall.Errno = 7040 + ERROR_CTX_CONSOLE_DISCONNECT syscall.Errno = 7041 + ERROR_CTX_CONSOLE_CONNECT syscall.Errno = 7042 + ERROR_CTX_SHADOW_DENIED syscall.Errno = 7044 + ERROR_CTX_WINSTATION_ACCESS_DENIED syscall.Errno = 7045 + ERROR_CTX_INVALID_WD syscall.Errno = 7049 + ERROR_CTX_SHADOW_INVALID syscall.Errno = 7050 + ERROR_CTX_SHADOW_DISABLED syscall.Errno = 7051 + ERROR_CTX_CLIENT_LICENSE_IN_USE syscall.Errno = 7052 + ERROR_CTX_CLIENT_LICENSE_NOT_SET syscall.Errno = 7053 + ERROR_CTX_LICENSE_NOT_AVAILABLE syscall.Errno = 7054 + ERROR_CTX_LICENSE_CLIENT_INVALID syscall.Errno = 7055 + ERROR_CTX_LICENSE_EXPIRED syscall.Errno = 7056 + ERROR_CTX_SHADOW_NOT_RUNNING syscall.Errno = 7057 + ERROR_CTX_SHADOW_ENDED_BY_MODE_CHANGE syscall.Errno = 7058 + ERROR_ACTIVATION_COUNT_EXCEEDED syscall.Errno = 7059 + ERROR_CTX_WINSTATIONS_DISABLED syscall.Errno = 7060 + ERROR_CTX_ENCRYPTION_LEVEL_REQUIRED syscall.Errno = 7061 + ERROR_CTX_SESSION_IN_USE syscall.Errno = 7062 + ERROR_CTX_NO_FORCE_LOGOFF syscall.Errno = 7063 + ERROR_CTX_ACCOUNT_RESTRICTION syscall.Errno = 7064 + ERROR_RDP_PROTOCOL_ERROR syscall.Errno = 7065 + ERROR_CTX_CDM_CONNECT syscall.Errno = 7066 + ERROR_CTX_CDM_DISCONNECT syscall.Errno = 7067 + ERROR_CTX_SECURITY_LAYER_ERROR syscall.Errno = 7068 + ERROR_TS_INCOMPATIBLE_SESSIONS syscall.Errno = 7069 + ERROR_TS_VIDEO_SUBSYSTEM_ERROR syscall.Errno = 7070 + FRS_ERR_INVALID_API_SEQUENCE syscall.Errno = 8001 + FRS_ERR_STARTING_SERVICE syscall.Errno = 8002 + FRS_ERR_STOPPING_SERVICE syscall.Errno = 8003 + FRS_ERR_INTERNAL_API syscall.Errno = 8004 + FRS_ERR_INTERNAL syscall.Errno = 8005 + FRS_ERR_SERVICE_COMM syscall.Errno = 8006 + FRS_ERR_INSUFFICIENT_PRIV syscall.Errno = 8007 + FRS_ERR_AUTHENTICATION syscall.Errno = 8008 + FRS_ERR_PARENT_INSUFFICIENT_PRIV syscall.Errno = 8009 + FRS_ERR_PARENT_AUTHENTICATION syscall.Errno = 8010 + FRS_ERR_CHILD_TO_PARENT_COMM syscall.Errno = 8011 + FRS_ERR_PARENT_TO_CHILD_COMM syscall.Errno = 8012 + FRS_ERR_SYSVOL_POPULATE syscall.Errno = 8013 + FRS_ERR_SYSVOL_POPULATE_TIMEOUT syscall.Errno = 8014 + FRS_ERR_SYSVOL_IS_BUSY syscall.Errno = 8015 + FRS_ERR_SYSVOL_DEMOTE syscall.Errno = 8016 + FRS_ERR_INVALID_SERVICE_PARAMETER syscall.Errno = 8017 + DS_S_SUCCESS = ERROR_SUCCESS + ERROR_DS_NOT_INSTALLED syscall.Errno = 8200 + ERROR_DS_MEMBERSHIP_EVALUATED_LOCALLY syscall.Errno = 8201 + ERROR_DS_NO_ATTRIBUTE_OR_VALUE syscall.Errno = 8202 + ERROR_DS_INVALID_ATTRIBUTE_SYNTAX syscall.Errno = 8203 + ERROR_DS_ATTRIBUTE_TYPE_UNDEFINED syscall.Errno = 8204 + ERROR_DS_ATTRIBUTE_OR_VALUE_EXISTS syscall.Errno = 8205 + ERROR_DS_BUSY syscall.Errno = 8206 + ERROR_DS_UNAVAILABLE syscall.Errno = 8207 + ERROR_DS_NO_RIDS_ALLOCATED syscall.Errno = 8208 + ERROR_DS_NO_MORE_RIDS syscall.Errno = 8209 + ERROR_DS_INCORRECT_ROLE_OWNER syscall.Errno = 8210 + ERROR_DS_RIDMGR_INIT_ERROR syscall.Errno = 8211 + ERROR_DS_OBJ_CLASS_VIOLATION syscall.Errno = 8212 + ERROR_DS_CANT_ON_NON_LEAF syscall.Errno = 8213 + ERROR_DS_CANT_ON_RDN syscall.Errno = 8214 + ERROR_DS_CANT_MOD_OBJ_CLASS syscall.Errno = 8215 + ERROR_DS_CROSS_DOM_MOVE_ERROR syscall.Errno = 8216 + ERROR_DS_GC_NOT_AVAILABLE syscall.Errno = 8217 + ERROR_SHARED_POLICY syscall.Errno = 8218 + ERROR_POLICY_OBJECT_NOT_FOUND syscall.Errno = 8219 + ERROR_POLICY_ONLY_IN_DS syscall.Errno = 8220 + ERROR_PROMOTION_ACTIVE syscall.Errno = 8221 + ERROR_NO_PROMOTION_ACTIVE syscall.Errno = 8222 + ERROR_DS_OPERATIONS_ERROR syscall.Errno = 8224 + ERROR_DS_PROTOCOL_ERROR syscall.Errno = 8225 + ERROR_DS_TIMELIMIT_EXCEEDED syscall.Errno = 8226 + ERROR_DS_SIZELIMIT_EXCEEDED syscall.Errno = 8227 + ERROR_DS_ADMIN_LIMIT_EXCEEDED syscall.Errno = 8228 + ERROR_DS_COMPARE_FALSE syscall.Errno = 8229 + ERROR_DS_COMPARE_TRUE syscall.Errno = 8230 + ERROR_DS_AUTH_METHOD_NOT_SUPPORTED syscall.Errno = 8231 + ERROR_DS_STRONG_AUTH_REQUIRED syscall.Errno = 8232 + ERROR_DS_INAPPROPRIATE_AUTH syscall.Errno = 8233 + ERROR_DS_AUTH_UNKNOWN syscall.Errno = 8234 + ERROR_DS_REFERRAL syscall.Errno = 8235 + ERROR_DS_UNAVAILABLE_CRIT_EXTENSION syscall.Errno = 8236 + ERROR_DS_CONFIDENTIALITY_REQUIRED syscall.Errno = 8237 + ERROR_DS_INAPPROPRIATE_MATCHING syscall.Errno = 8238 + ERROR_DS_CONSTRAINT_VIOLATION syscall.Errno = 8239 + ERROR_DS_NO_SUCH_OBJECT syscall.Errno = 8240 + ERROR_DS_ALIAS_PROBLEM syscall.Errno = 8241 + ERROR_DS_INVALID_DN_SYNTAX syscall.Errno = 8242 + ERROR_DS_IS_LEAF syscall.Errno = 8243 + ERROR_DS_ALIAS_DEREF_PROBLEM syscall.Errno = 8244 + ERROR_DS_UNWILLING_TO_PERFORM syscall.Errno = 8245 + ERROR_DS_LOOP_DETECT syscall.Errno = 8246 + ERROR_DS_NAMING_VIOLATION syscall.Errno = 8247 + ERROR_DS_OBJECT_RESULTS_TOO_LARGE syscall.Errno = 8248 + ERROR_DS_AFFECTS_MULTIPLE_DSAS syscall.Errno = 8249 + ERROR_DS_SERVER_DOWN syscall.Errno = 8250 + ERROR_DS_LOCAL_ERROR syscall.Errno = 8251 + ERROR_DS_ENCODING_ERROR syscall.Errno = 8252 + ERROR_DS_DECODING_ERROR syscall.Errno = 8253 + ERROR_DS_FILTER_UNKNOWN syscall.Errno = 8254 + ERROR_DS_PARAM_ERROR syscall.Errno = 8255 + ERROR_DS_NOT_SUPPORTED syscall.Errno = 8256 + ERROR_DS_NO_RESULTS_RETURNED syscall.Errno = 8257 + ERROR_DS_CONTROL_NOT_FOUND syscall.Errno = 8258 + ERROR_DS_CLIENT_LOOP syscall.Errno = 8259 + ERROR_DS_REFERRAL_LIMIT_EXCEEDED syscall.Errno = 8260 + ERROR_DS_SORT_CONTROL_MISSING syscall.Errno = 8261 + ERROR_DS_OFFSET_RANGE_ERROR syscall.Errno = 8262 + ERROR_DS_RIDMGR_DISABLED syscall.Errno = 8263 + ERROR_DS_ROOT_MUST_BE_NC syscall.Errno = 8301 + ERROR_DS_ADD_REPLICA_INHIBITED syscall.Errno = 8302 + ERROR_DS_ATT_NOT_DEF_IN_SCHEMA syscall.Errno = 8303 + ERROR_DS_MAX_OBJ_SIZE_EXCEEDED syscall.Errno = 8304 + ERROR_DS_OBJ_STRING_NAME_EXISTS syscall.Errno = 8305 + ERROR_DS_NO_RDN_DEFINED_IN_SCHEMA syscall.Errno = 8306 + ERROR_DS_RDN_DOESNT_MATCH_SCHEMA syscall.Errno = 8307 + ERROR_DS_NO_REQUESTED_ATTS_FOUND syscall.Errno = 8308 + ERROR_DS_USER_BUFFER_TO_SMALL syscall.Errno = 8309 + ERROR_DS_ATT_IS_NOT_ON_OBJ syscall.Errno = 8310 + ERROR_DS_ILLEGAL_MOD_OPERATION syscall.Errno = 8311 + ERROR_DS_OBJ_TOO_LARGE syscall.Errno = 8312 + ERROR_DS_BAD_INSTANCE_TYPE syscall.Errno = 8313 + ERROR_DS_MASTERDSA_REQUIRED syscall.Errno = 8314 + ERROR_DS_OBJECT_CLASS_REQUIRED syscall.Errno = 8315 + ERROR_DS_MISSING_REQUIRED_ATT syscall.Errno = 8316 + ERROR_DS_ATT_NOT_DEF_FOR_CLASS syscall.Errno = 8317 + ERROR_DS_ATT_ALREADY_EXISTS syscall.Errno = 8318 + ERROR_DS_CANT_ADD_ATT_VALUES syscall.Errno = 8320 + ERROR_DS_SINGLE_VALUE_CONSTRAINT syscall.Errno = 8321 + ERROR_DS_RANGE_CONSTRAINT syscall.Errno = 8322 + ERROR_DS_ATT_VAL_ALREADY_EXISTS syscall.Errno = 8323 + ERROR_DS_CANT_REM_MISSING_ATT syscall.Errno = 8324 + ERROR_DS_CANT_REM_MISSING_ATT_VAL syscall.Errno = 8325 + ERROR_DS_ROOT_CANT_BE_SUBREF syscall.Errno = 8326 + ERROR_DS_NO_CHAINING syscall.Errno = 8327 + ERROR_DS_NO_CHAINED_EVAL syscall.Errno = 8328 + ERROR_DS_NO_PARENT_OBJECT syscall.Errno = 8329 + ERROR_DS_PARENT_IS_AN_ALIAS syscall.Errno = 8330 + ERROR_DS_CANT_MIX_MASTER_AND_REPS syscall.Errno = 8331 + ERROR_DS_CHILDREN_EXIST syscall.Errno = 8332 + ERROR_DS_OBJ_NOT_FOUND syscall.Errno = 8333 + ERROR_DS_ALIASED_OBJ_MISSING syscall.Errno = 8334 + ERROR_DS_BAD_NAME_SYNTAX syscall.Errno = 8335 + ERROR_DS_ALIAS_POINTS_TO_ALIAS syscall.Errno = 8336 + ERROR_DS_CANT_DEREF_ALIAS syscall.Errno = 8337 + ERROR_DS_OUT_OF_SCOPE syscall.Errno = 8338 + ERROR_DS_OBJECT_BEING_REMOVED syscall.Errno = 8339 + ERROR_DS_CANT_DELETE_DSA_OBJ syscall.Errno = 8340 + ERROR_DS_GENERIC_ERROR syscall.Errno = 8341 + ERROR_DS_DSA_MUST_BE_INT_MASTER syscall.Errno = 8342 + ERROR_DS_CLASS_NOT_DSA syscall.Errno = 8343 + ERROR_DS_INSUFF_ACCESS_RIGHTS syscall.Errno = 8344 + ERROR_DS_ILLEGAL_SUPERIOR syscall.Errno = 8345 + ERROR_DS_ATTRIBUTE_OWNED_BY_SAM syscall.Errno = 8346 + ERROR_DS_NAME_TOO_MANY_PARTS syscall.Errno = 8347 + ERROR_DS_NAME_TOO_LONG syscall.Errno = 8348 + ERROR_DS_NAME_VALUE_TOO_LONG syscall.Errno = 8349 + ERROR_DS_NAME_UNPARSEABLE syscall.Errno = 8350 + ERROR_DS_NAME_TYPE_UNKNOWN syscall.Errno = 8351 + ERROR_DS_NOT_AN_OBJECT syscall.Errno = 8352 + ERROR_DS_SEC_DESC_TOO_SHORT syscall.Errno = 8353 + ERROR_DS_SEC_DESC_INVALID syscall.Errno = 8354 + ERROR_DS_NO_DELETED_NAME syscall.Errno = 8355 + ERROR_DS_SUBREF_MUST_HAVE_PARENT syscall.Errno = 8356 + ERROR_DS_NCNAME_MUST_BE_NC syscall.Errno = 8357 + ERROR_DS_CANT_ADD_SYSTEM_ONLY syscall.Errno = 8358 + ERROR_DS_CLASS_MUST_BE_CONCRETE syscall.Errno = 8359 + ERROR_DS_INVALID_DMD syscall.Errno = 8360 + ERROR_DS_OBJ_GUID_EXISTS syscall.Errno = 8361 + ERROR_DS_NOT_ON_BACKLINK syscall.Errno = 8362 + ERROR_DS_NO_CROSSREF_FOR_NC syscall.Errno = 8363 + ERROR_DS_SHUTTING_DOWN syscall.Errno = 8364 + ERROR_DS_UNKNOWN_OPERATION syscall.Errno = 8365 + ERROR_DS_INVALID_ROLE_OWNER syscall.Errno = 8366 + ERROR_DS_COULDNT_CONTACT_FSMO syscall.Errno = 8367 + ERROR_DS_CROSS_NC_DN_RENAME syscall.Errno = 8368 + ERROR_DS_CANT_MOD_SYSTEM_ONLY syscall.Errno = 8369 + ERROR_DS_REPLICATOR_ONLY syscall.Errno = 8370 + ERROR_DS_OBJ_CLASS_NOT_DEFINED syscall.Errno = 8371 + ERROR_DS_OBJ_CLASS_NOT_SUBCLASS syscall.Errno = 8372 + ERROR_DS_NAME_REFERENCE_INVALID syscall.Errno = 8373 + ERROR_DS_CROSS_REF_EXISTS syscall.Errno = 8374 + ERROR_DS_CANT_DEL_MASTER_CROSSREF syscall.Errno = 8375 + ERROR_DS_SUBTREE_NOTIFY_NOT_NC_HEAD syscall.Errno = 8376 + ERROR_DS_NOTIFY_FILTER_TOO_COMPLEX syscall.Errno = 8377 + ERROR_DS_DUP_RDN syscall.Errno = 8378 + ERROR_DS_DUP_OID syscall.Errno = 8379 + ERROR_DS_DUP_MAPI_ID syscall.Errno = 8380 + ERROR_DS_DUP_SCHEMA_ID_GUID syscall.Errno = 8381 + ERROR_DS_DUP_LDAP_DISPLAY_NAME syscall.Errno = 8382 + ERROR_DS_SEMANTIC_ATT_TEST syscall.Errno = 8383 + ERROR_DS_SYNTAX_MISMATCH syscall.Errno = 8384 + ERROR_DS_EXISTS_IN_MUST_HAVE syscall.Errno = 8385 + ERROR_DS_EXISTS_IN_MAY_HAVE syscall.Errno = 8386 + ERROR_DS_NONEXISTENT_MAY_HAVE syscall.Errno = 8387 + ERROR_DS_NONEXISTENT_MUST_HAVE syscall.Errno = 8388 + ERROR_DS_AUX_CLS_TEST_FAIL syscall.Errno = 8389 + ERROR_DS_NONEXISTENT_POSS_SUP syscall.Errno = 8390 + ERROR_DS_SUB_CLS_TEST_FAIL syscall.Errno = 8391 + ERROR_DS_BAD_RDN_ATT_ID_SYNTAX syscall.Errno = 8392 + ERROR_DS_EXISTS_IN_AUX_CLS syscall.Errno = 8393 + ERROR_DS_EXISTS_IN_SUB_CLS syscall.Errno = 8394 + ERROR_DS_EXISTS_IN_POSS_SUP syscall.Errno = 8395 + ERROR_DS_RECALCSCHEMA_FAILED syscall.Errno = 8396 + ERROR_DS_TREE_DELETE_NOT_FINISHED syscall.Errno = 8397 + ERROR_DS_CANT_DELETE syscall.Errno = 8398 + ERROR_DS_ATT_SCHEMA_REQ_ID syscall.Errno = 8399 + ERROR_DS_BAD_ATT_SCHEMA_SYNTAX syscall.Errno = 8400 + ERROR_DS_CANT_CACHE_ATT syscall.Errno = 8401 + ERROR_DS_CANT_CACHE_CLASS syscall.Errno = 8402 + ERROR_DS_CANT_REMOVE_ATT_CACHE syscall.Errno = 8403 + ERROR_DS_CANT_REMOVE_CLASS_CACHE syscall.Errno = 8404 + ERROR_DS_CANT_RETRIEVE_DN syscall.Errno = 8405 + ERROR_DS_MISSING_SUPREF syscall.Errno = 8406 + ERROR_DS_CANT_RETRIEVE_INSTANCE syscall.Errno = 8407 + ERROR_DS_CODE_INCONSISTENCY syscall.Errno = 8408 + ERROR_DS_DATABASE_ERROR syscall.Errno = 8409 + ERROR_DS_GOVERNSID_MISSING syscall.Errno = 8410 + ERROR_DS_MISSING_EXPECTED_ATT syscall.Errno = 8411 + ERROR_DS_NCNAME_MISSING_CR_REF syscall.Errno = 8412 + ERROR_DS_SECURITY_CHECKING_ERROR syscall.Errno = 8413 + ERROR_DS_SCHEMA_NOT_LOADED syscall.Errno = 8414 + ERROR_DS_SCHEMA_ALLOC_FAILED syscall.Errno = 8415 + ERROR_DS_ATT_SCHEMA_REQ_SYNTAX syscall.Errno = 8416 + ERROR_DS_GCVERIFY_ERROR syscall.Errno = 8417 + ERROR_DS_DRA_SCHEMA_MISMATCH syscall.Errno = 8418 + ERROR_DS_CANT_FIND_DSA_OBJ syscall.Errno = 8419 + ERROR_DS_CANT_FIND_EXPECTED_NC syscall.Errno = 8420 + ERROR_DS_CANT_FIND_NC_IN_CACHE syscall.Errno = 8421 + ERROR_DS_CANT_RETRIEVE_CHILD syscall.Errno = 8422 + ERROR_DS_SECURITY_ILLEGAL_MODIFY syscall.Errno = 8423 + ERROR_DS_CANT_REPLACE_HIDDEN_REC syscall.Errno = 8424 + ERROR_DS_BAD_HIERARCHY_FILE syscall.Errno = 8425 + ERROR_DS_BUILD_HIERARCHY_TABLE_FAILED syscall.Errno = 8426 + ERROR_DS_CONFIG_PARAM_MISSING syscall.Errno = 8427 + ERROR_DS_COUNTING_AB_INDICES_FAILED syscall.Errno = 8428 + ERROR_DS_HIERARCHY_TABLE_MALLOC_FAILED syscall.Errno = 8429 + ERROR_DS_INTERNAL_FAILURE syscall.Errno = 8430 + ERROR_DS_UNKNOWN_ERROR syscall.Errno = 8431 + ERROR_DS_ROOT_REQUIRES_CLASS_TOP syscall.Errno = 8432 + ERROR_DS_REFUSING_FSMO_ROLES syscall.Errno = 8433 + ERROR_DS_MISSING_FSMO_SETTINGS syscall.Errno = 8434 + ERROR_DS_UNABLE_TO_SURRENDER_ROLES syscall.Errno = 8435 + ERROR_DS_DRA_GENERIC syscall.Errno = 8436 + ERROR_DS_DRA_INVALID_PARAMETER syscall.Errno = 8437 + ERROR_DS_DRA_BUSY syscall.Errno = 8438 + ERROR_DS_DRA_BAD_DN syscall.Errno = 8439 + ERROR_DS_DRA_BAD_NC syscall.Errno = 8440 + ERROR_DS_DRA_DN_EXISTS syscall.Errno = 8441 + ERROR_DS_DRA_INTERNAL_ERROR syscall.Errno = 8442 + ERROR_DS_DRA_INCONSISTENT_DIT syscall.Errno = 8443 + ERROR_DS_DRA_CONNECTION_FAILED syscall.Errno = 8444 + ERROR_DS_DRA_BAD_INSTANCE_TYPE syscall.Errno = 8445 + ERROR_DS_DRA_OUT_OF_MEM syscall.Errno = 8446 + ERROR_DS_DRA_MAIL_PROBLEM syscall.Errno = 8447 + ERROR_DS_DRA_REF_ALREADY_EXISTS syscall.Errno = 8448 + ERROR_DS_DRA_REF_NOT_FOUND syscall.Errno = 8449 + ERROR_DS_DRA_OBJ_IS_REP_SOURCE syscall.Errno = 8450 + ERROR_DS_DRA_DB_ERROR syscall.Errno = 8451 + ERROR_DS_DRA_NO_REPLICA syscall.Errno = 8452 + ERROR_DS_DRA_ACCESS_DENIED syscall.Errno = 8453 + ERROR_DS_DRA_NOT_SUPPORTED syscall.Errno = 8454 + ERROR_DS_DRA_RPC_CANCELLED syscall.Errno = 8455 + ERROR_DS_DRA_SOURCE_DISABLED syscall.Errno = 8456 + ERROR_DS_DRA_SINK_DISABLED syscall.Errno = 8457 + ERROR_DS_DRA_NAME_COLLISION syscall.Errno = 8458 + ERROR_DS_DRA_SOURCE_REINSTALLED syscall.Errno = 8459 + ERROR_DS_DRA_MISSING_PARENT syscall.Errno = 8460 + ERROR_DS_DRA_PREEMPTED syscall.Errno = 8461 + ERROR_DS_DRA_ABANDON_SYNC syscall.Errno = 8462 + ERROR_DS_DRA_SHUTDOWN syscall.Errno = 8463 + ERROR_DS_DRA_INCOMPATIBLE_PARTIAL_SET syscall.Errno = 8464 + ERROR_DS_DRA_SOURCE_IS_PARTIAL_REPLICA syscall.Errno = 8465 + ERROR_DS_DRA_EXTN_CONNECTION_FAILED syscall.Errno = 8466 + ERROR_DS_INSTALL_SCHEMA_MISMATCH syscall.Errno = 8467 + ERROR_DS_DUP_LINK_ID syscall.Errno = 8468 + ERROR_DS_NAME_ERROR_RESOLVING syscall.Errno = 8469 + ERROR_DS_NAME_ERROR_NOT_FOUND syscall.Errno = 8470 + ERROR_DS_NAME_ERROR_NOT_UNIQUE syscall.Errno = 8471 + ERROR_DS_NAME_ERROR_NO_MAPPING syscall.Errno = 8472 + ERROR_DS_NAME_ERROR_DOMAIN_ONLY syscall.Errno = 8473 + ERROR_DS_NAME_ERROR_NO_SYNTACTICAL_MAPPING syscall.Errno = 8474 + ERROR_DS_CONSTRUCTED_ATT_MOD syscall.Errno = 8475 + ERROR_DS_WRONG_OM_OBJ_CLASS syscall.Errno = 8476 + ERROR_DS_DRA_REPL_PENDING syscall.Errno = 8477 + ERROR_DS_DS_REQUIRED syscall.Errno = 8478 + ERROR_DS_INVALID_LDAP_DISPLAY_NAME syscall.Errno = 8479 + ERROR_DS_NON_BASE_SEARCH syscall.Errno = 8480 + ERROR_DS_CANT_RETRIEVE_ATTS syscall.Errno = 8481 + ERROR_DS_BACKLINK_WITHOUT_LINK syscall.Errno = 8482 + ERROR_DS_EPOCH_MISMATCH syscall.Errno = 8483 + ERROR_DS_SRC_NAME_MISMATCH syscall.Errno = 8484 + ERROR_DS_SRC_AND_DST_NC_IDENTICAL syscall.Errno = 8485 + ERROR_DS_DST_NC_MISMATCH syscall.Errno = 8486 + ERROR_DS_NOT_AUTHORITIVE_FOR_DST_NC syscall.Errno = 8487 + ERROR_DS_SRC_GUID_MISMATCH syscall.Errno = 8488 + ERROR_DS_CANT_MOVE_DELETED_OBJECT syscall.Errno = 8489 + ERROR_DS_PDC_OPERATION_IN_PROGRESS syscall.Errno = 8490 + ERROR_DS_CROSS_DOMAIN_CLEANUP_REQD syscall.Errno = 8491 + ERROR_DS_ILLEGAL_XDOM_MOVE_OPERATION syscall.Errno = 8492 + ERROR_DS_CANT_WITH_ACCT_GROUP_MEMBERSHPS syscall.Errno = 8493 + ERROR_DS_NC_MUST_HAVE_NC_PARENT syscall.Errno = 8494 + ERROR_DS_CR_IMPOSSIBLE_TO_VALIDATE syscall.Errno = 8495 + ERROR_DS_DST_DOMAIN_NOT_NATIVE syscall.Errno = 8496 + ERROR_DS_MISSING_INFRASTRUCTURE_CONTAINER syscall.Errno = 8497 + ERROR_DS_CANT_MOVE_ACCOUNT_GROUP syscall.Errno = 8498 + ERROR_DS_CANT_MOVE_RESOURCE_GROUP syscall.Errno = 8499 + ERROR_DS_INVALID_SEARCH_FLAG syscall.Errno = 8500 + ERROR_DS_NO_TREE_DELETE_ABOVE_NC syscall.Errno = 8501 + ERROR_DS_COULDNT_LOCK_TREE_FOR_DELETE syscall.Errno = 8502 + ERROR_DS_COULDNT_IDENTIFY_OBJECTS_FOR_TREE_DELETE syscall.Errno = 8503 + ERROR_DS_SAM_INIT_FAILURE syscall.Errno = 8504 + ERROR_DS_SENSITIVE_GROUP_VIOLATION syscall.Errno = 8505 + ERROR_DS_CANT_MOD_PRIMARYGROUPID syscall.Errno = 8506 + ERROR_DS_ILLEGAL_BASE_SCHEMA_MOD syscall.Errno = 8507 + ERROR_DS_NONSAFE_SCHEMA_CHANGE syscall.Errno = 8508 + ERROR_DS_SCHEMA_UPDATE_DISALLOWED syscall.Errno = 8509 + ERROR_DS_CANT_CREATE_UNDER_SCHEMA syscall.Errno = 8510 + ERROR_DS_INSTALL_NO_SRC_SCH_VERSION syscall.Errno = 8511 + ERROR_DS_INSTALL_NO_SCH_VERSION_IN_INIFILE syscall.Errno = 8512 + ERROR_DS_INVALID_GROUP_TYPE syscall.Errno = 8513 + ERROR_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN syscall.Errno = 8514 + ERROR_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN syscall.Errno = 8515 + ERROR_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER syscall.Errno = 8516 + ERROR_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER syscall.Errno = 8517 + ERROR_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER syscall.Errno = 8518 + ERROR_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER syscall.Errno = 8519 + ERROR_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER syscall.Errno = 8520 + ERROR_DS_HAVE_PRIMARY_MEMBERS syscall.Errno = 8521 + ERROR_DS_STRING_SD_CONVERSION_FAILED syscall.Errno = 8522 + ERROR_DS_NAMING_MASTER_GC syscall.Errno = 8523 + ERROR_DS_DNS_LOOKUP_FAILURE syscall.Errno = 8524 + ERROR_DS_COULDNT_UPDATE_SPNS syscall.Errno = 8525 + ERROR_DS_CANT_RETRIEVE_SD syscall.Errno = 8526 + ERROR_DS_KEY_NOT_UNIQUE syscall.Errno = 8527 + ERROR_DS_WRONG_LINKED_ATT_SYNTAX syscall.Errno = 8528 + ERROR_DS_SAM_NEED_BOOTKEY_PASSWORD syscall.Errno = 8529 + ERROR_DS_SAM_NEED_BOOTKEY_FLOPPY syscall.Errno = 8530 + ERROR_DS_CANT_START syscall.Errno = 8531 + ERROR_DS_INIT_FAILURE syscall.Errno = 8532 + ERROR_DS_NO_PKT_PRIVACY_ON_CONNECTION syscall.Errno = 8533 + ERROR_DS_SOURCE_DOMAIN_IN_FOREST syscall.Errno = 8534 + ERROR_DS_DESTINATION_DOMAIN_NOT_IN_FOREST syscall.Errno = 8535 + ERROR_DS_DESTINATION_AUDITING_NOT_ENABLED syscall.Errno = 8536 + ERROR_DS_CANT_FIND_DC_FOR_SRC_DOMAIN syscall.Errno = 8537 + ERROR_DS_SRC_OBJ_NOT_GROUP_OR_USER syscall.Errno = 8538 + ERROR_DS_SRC_SID_EXISTS_IN_FOREST syscall.Errno = 8539 + ERROR_DS_SRC_AND_DST_OBJECT_CLASS_MISMATCH syscall.Errno = 8540 + ERROR_SAM_INIT_FAILURE syscall.Errno = 8541 + ERROR_DS_DRA_SCHEMA_INFO_SHIP syscall.Errno = 8542 + ERROR_DS_DRA_SCHEMA_CONFLICT syscall.Errno = 8543 + ERROR_DS_DRA_EARLIER_SCHEMA_CONFLICT syscall.Errno = 8544 + ERROR_DS_DRA_OBJ_NC_MISMATCH syscall.Errno = 8545 + ERROR_DS_NC_STILL_HAS_DSAS syscall.Errno = 8546 + ERROR_DS_GC_REQUIRED syscall.Errno = 8547 + ERROR_DS_LOCAL_MEMBER_OF_LOCAL_ONLY syscall.Errno = 8548 + ERROR_DS_NO_FPO_IN_UNIVERSAL_GROUPS syscall.Errno = 8549 + ERROR_DS_CANT_ADD_TO_GC syscall.Errno = 8550 + ERROR_DS_NO_CHECKPOINT_WITH_PDC syscall.Errno = 8551 + ERROR_DS_SOURCE_AUDITING_NOT_ENABLED syscall.Errno = 8552 + ERROR_DS_CANT_CREATE_IN_NONDOMAIN_NC syscall.Errno = 8553 + ERROR_DS_INVALID_NAME_FOR_SPN syscall.Errno = 8554 + ERROR_DS_FILTER_USES_CONTRUCTED_ATTRS syscall.Errno = 8555 + ERROR_DS_UNICODEPWD_NOT_IN_QUOTES syscall.Errno = 8556 + ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED syscall.Errno = 8557 + ERROR_DS_MUST_BE_RUN_ON_DST_DC syscall.Errno = 8558 + ERROR_DS_SRC_DC_MUST_BE_SP4_OR_GREATER syscall.Errno = 8559 + ERROR_DS_CANT_TREE_DELETE_CRITICAL_OBJ syscall.Errno = 8560 + ERROR_DS_INIT_FAILURE_CONSOLE syscall.Errno = 8561 + ERROR_DS_SAM_INIT_FAILURE_CONSOLE syscall.Errno = 8562 + ERROR_DS_FOREST_VERSION_TOO_HIGH syscall.Errno = 8563 + ERROR_DS_DOMAIN_VERSION_TOO_HIGH syscall.Errno = 8564 + ERROR_DS_FOREST_VERSION_TOO_LOW syscall.Errno = 8565 + ERROR_DS_DOMAIN_VERSION_TOO_LOW syscall.Errno = 8566 + ERROR_DS_INCOMPATIBLE_VERSION syscall.Errno = 8567 + ERROR_DS_LOW_DSA_VERSION syscall.Errno = 8568 + ERROR_DS_NO_BEHAVIOR_VERSION_IN_MIXEDDOMAIN syscall.Errno = 8569 + ERROR_DS_NOT_SUPPORTED_SORT_ORDER syscall.Errno = 8570 + ERROR_DS_NAME_NOT_UNIQUE syscall.Errno = 8571 + ERROR_DS_MACHINE_ACCOUNT_CREATED_PRENT4 syscall.Errno = 8572 + ERROR_DS_OUT_OF_VERSION_STORE syscall.Errno = 8573 + ERROR_DS_INCOMPATIBLE_CONTROLS_USED syscall.Errno = 8574 + ERROR_DS_NO_REF_DOMAIN syscall.Errno = 8575 + ERROR_DS_RESERVED_LINK_ID syscall.Errno = 8576 + ERROR_DS_LINK_ID_NOT_AVAILABLE syscall.Errno = 8577 + ERROR_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER syscall.Errno = 8578 + ERROR_DS_MODIFYDN_DISALLOWED_BY_INSTANCE_TYPE syscall.Errno = 8579 + ERROR_DS_NO_OBJECT_MOVE_IN_SCHEMA_NC syscall.Errno = 8580 + ERROR_DS_MODIFYDN_DISALLOWED_BY_FLAG syscall.Errno = 8581 + ERROR_DS_MODIFYDN_WRONG_GRANDPARENT syscall.Errno = 8582 + ERROR_DS_NAME_ERROR_TRUST_REFERRAL syscall.Errno = 8583 + ERROR_NOT_SUPPORTED_ON_STANDARD_SERVER syscall.Errno = 8584 + ERROR_DS_CANT_ACCESS_REMOTE_PART_OF_AD syscall.Errno = 8585 + ERROR_DS_CR_IMPOSSIBLE_TO_VALIDATE_V2 syscall.Errno = 8586 + ERROR_DS_THREAD_LIMIT_EXCEEDED syscall.Errno = 8587 + ERROR_DS_NOT_CLOSEST syscall.Errno = 8588 + ERROR_DS_CANT_DERIVE_SPN_WITHOUT_SERVER_REF syscall.Errno = 8589 + ERROR_DS_SINGLE_USER_MODE_FAILED syscall.Errno = 8590 + ERROR_DS_NTDSCRIPT_SYNTAX_ERROR syscall.Errno = 8591 + ERROR_DS_NTDSCRIPT_PROCESS_ERROR syscall.Errno = 8592 + ERROR_DS_DIFFERENT_REPL_EPOCHS syscall.Errno = 8593 + ERROR_DS_DRS_EXTENSIONS_CHANGED syscall.Errno = 8594 + ERROR_DS_REPLICA_SET_CHANGE_NOT_ALLOWED_ON_DISABLED_CR syscall.Errno = 8595 + ERROR_DS_NO_MSDS_INTID syscall.Errno = 8596 + ERROR_DS_DUP_MSDS_INTID syscall.Errno = 8597 + ERROR_DS_EXISTS_IN_RDNATTID syscall.Errno = 8598 + ERROR_DS_AUTHORIZATION_FAILED syscall.Errno = 8599 + ERROR_DS_INVALID_SCRIPT syscall.Errno = 8600 + ERROR_DS_REMOTE_CROSSREF_OP_FAILED syscall.Errno = 8601 + ERROR_DS_CROSS_REF_BUSY syscall.Errno = 8602 + ERROR_DS_CANT_DERIVE_SPN_FOR_DELETED_DOMAIN syscall.Errno = 8603 + ERROR_DS_CANT_DEMOTE_WITH_WRITEABLE_NC syscall.Errno = 8604 + ERROR_DS_DUPLICATE_ID_FOUND syscall.Errno = 8605 + ERROR_DS_INSUFFICIENT_ATTR_TO_CREATE_OBJECT syscall.Errno = 8606 + ERROR_DS_GROUP_CONVERSION_ERROR syscall.Errno = 8607 + ERROR_DS_CANT_MOVE_APP_BASIC_GROUP syscall.Errno = 8608 + ERROR_DS_CANT_MOVE_APP_QUERY_GROUP syscall.Errno = 8609 + ERROR_DS_ROLE_NOT_VERIFIED syscall.Errno = 8610 + ERROR_DS_WKO_CONTAINER_CANNOT_BE_SPECIAL syscall.Errno = 8611 + ERROR_DS_DOMAIN_RENAME_IN_PROGRESS syscall.Errno = 8612 + ERROR_DS_EXISTING_AD_CHILD_NC syscall.Errno = 8613 + ERROR_DS_REPL_LIFETIME_EXCEEDED syscall.Errno = 8614 + ERROR_DS_DISALLOWED_IN_SYSTEM_CONTAINER syscall.Errno = 8615 + ERROR_DS_LDAP_SEND_QUEUE_FULL syscall.Errno = 8616 + ERROR_DS_DRA_OUT_SCHEDULE_WINDOW syscall.Errno = 8617 + ERROR_DS_POLICY_NOT_KNOWN syscall.Errno = 8618 + ERROR_NO_SITE_SETTINGS_OBJECT syscall.Errno = 8619 + ERROR_NO_SECRETS syscall.Errno = 8620 + ERROR_NO_WRITABLE_DC_FOUND syscall.Errno = 8621 + ERROR_DS_NO_SERVER_OBJECT syscall.Errno = 8622 + ERROR_DS_NO_NTDSA_OBJECT syscall.Errno = 8623 + ERROR_DS_NON_ASQ_SEARCH syscall.Errno = 8624 + ERROR_DS_AUDIT_FAILURE syscall.Errno = 8625 + ERROR_DS_INVALID_SEARCH_FLAG_SUBTREE syscall.Errno = 8626 + ERROR_DS_INVALID_SEARCH_FLAG_TUPLE syscall.Errno = 8627 + ERROR_DS_HIERARCHY_TABLE_TOO_DEEP syscall.Errno = 8628 + ERROR_DS_DRA_CORRUPT_UTD_VECTOR syscall.Errno = 8629 + ERROR_DS_DRA_SECRETS_DENIED syscall.Errno = 8630 + ERROR_DS_RESERVED_MAPI_ID syscall.Errno = 8631 + ERROR_DS_MAPI_ID_NOT_AVAILABLE syscall.Errno = 8632 + ERROR_DS_DRA_MISSING_KRBTGT_SECRET syscall.Errno = 8633 + ERROR_DS_DOMAIN_NAME_EXISTS_IN_FOREST syscall.Errno = 8634 + ERROR_DS_FLAT_NAME_EXISTS_IN_FOREST syscall.Errno = 8635 + ERROR_INVALID_USER_PRINCIPAL_NAME syscall.Errno = 8636 + ERROR_DS_OID_MAPPED_GROUP_CANT_HAVE_MEMBERS syscall.Errno = 8637 + ERROR_DS_OID_NOT_FOUND syscall.Errno = 8638 + ERROR_DS_DRA_RECYCLED_TARGET syscall.Errno = 8639 + ERROR_DS_DISALLOWED_NC_REDIRECT syscall.Errno = 8640 + ERROR_DS_HIGH_ADLDS_FFL syscall.Errno = 8641 + ERROR_DS_HIGH_DSA_VERSION syscall.Errno = 8642 + ERROR_DS_LOW_ADLDS_FFL syscall.Errno = 8643 + ERROR_DOMAIN_SID_SAME_AS_LOCAL_WORKSTATION syscall.Errno = 8644 + ERROR_DS_UNDELETE_SAM_VALIDATION_FAILED syscall.Errno = 8645 + ERROR_INCORRECT_ACCOUNT_TYPE syscall.Errno = 8646 + ERROR_DS_SPN_VALUE_NOT_UNIQUE_IN_FOREST syscall.Errno = 8647 + ERROR_DS_UPN_VALUE_NOT_UNIQUE_IN_FOREST syscall.Errno = 8648 + ERROR_DS_MISSING_FOREST_TRUST syscall.Errno = 8649 + ERROR_DS_VALUE_KEY_NOT_UNIQUE syscall.Errno = 8650 + DNS_ERROR_RESPONSE_CODES_BASE syscall.Errno = 9000 + DNS_ERROR_RCODE_NO_ERROR = ERROR_SUCCESS + DNS_ERROR_MASK syscall.Errno = 0x00002328 + DNS_ERROR_RCODE_FORMAT_ERROR syscall.Errno = 9001 + DNS_ERROR_RCODE_SERVER_FAILURE syscall.Errno = 9002 + DNS_ERROR_RCODE_NAME_ERROR syscall.Errno = 9003 + DNS_ERROR_RCODE_NOT_IMPLEMENTED syscall.Errno = 9004 + DNS_ERROR_RCODE_REFUSED syscall.Errno = 9005 + DNS_ERROR_RCODE_YXDOMAIN syscall.Errno = 9006 + DNS_ERROR_RCODE_YXRRSET syscall.Errno = 9007 + DNS_ERROR_RCODE_NXRRSET syscall.Errno = 9008 + DNS_ERROR_RCODE_NOTAUTH syscall.Errno = 9009 + DNS_ERROR_RCODE_NOTZONE syscall.Errno = 9010 + DNS_ERROR_RCODE_BADSIG syscall.Errno = 9016 + DNS_ERROR_RCODE_BADKEY syscall.Errno = 9017 + DNS_ERROR_RCODE_BADTIME syscall.Errno = 9018 + DNS_ERROR_RCODE_LAST = DNS_ERROR_RCODE_BADTIME + DNS_ERROR_DNSSEC_BASE syscall.Errno = 9100 + DNS_ERROR_KEYMASTER_REQUIRED syscall.Errno = 9101 + DNS_ERROR_NOT_ALLOWED_ON_SIGNED_ZONE syscall.Errno = 9102 + DNS_ERROR_NSEC3_INCOMPATIBLE_WITH_RSA_SHA1 syscall.Errno = 9103 + DNS_ERROR_NOT_ENOUGH_SIGNING_KEY_DESCRIPTORS syscall.Errno = 9104 + DNS_ERROR_UNSUPPORTED_ALGORITHM syscall.Errno = 9105 + DNS_ERROR_INVALID_KEY_SIZE syscall.Errno = 9106 + DNS_ERROR_SIGNING_KEY_NOT_ACCESSIBLE syscall.Errno = 9107 + DNS_ERROR_KSP_DOES_NOT_SUPPORT_PROTECTION syscall.Errno = 9108 + DNS_ERROR_UNEXPECTED_DATA_PROTECTION_ERROR syscall.Errno = 9109 + DNS_ERROR_UNEXPECTED_CNG_ERROR syscall.Errno = 9110 + DNS_ERROR_UNKNOWN_SIGNING_PARAMETER_VERSION syscall.Errno = 9111 + DNS_ERROR_KSP_NOT_ACCESSIBLE syscall.Errno = 9112 + DNS_ERROR_TOO_MANY_SKDS syscall.Errno = 9113 + DNS_ERROR_INVALID_ROLLOVER_PERIOD syscall.Errno = 9114 + DNS_ERROR_INVALID_INITIAL_ROLLOVER_OFFSET syscall.Errno = 9115 + DNS_ERROR_ROLLOVER_IN_PROGRESS syscall.Errno = 9116 + DNS_ERROR_STANDBY_KEY_NOT_PRESENT syscall.Errno = 9117 + DNS_ERROR_NOT_ALLOWED_ON_ZSK syscall.Errno = 9118 + DNS_ERROR_NOT_ALLOWED_ON_ACTIVE_SKD syscall.Errno = 9119 + DNS_ERROR_ROLLOVER_ALREADY_QUEUED syscall.Errno = 9120 + DNS_ERROR_NOT_ALLOWED_ON_UNSIGNED_ZONE syscall.Errno = 9121 + DNS_ERROR_BAD_KEYMASTER syscall.Errno = 9122 + DNS_ERROR_INVALID_SIGNATURE_VALIDITY_PERIOD syscall.Errno = 9123 + DNS_ERROR_INVALID_NSEC3_ITERATION_COUNT syscall.Errno = 9124 + DNS_ERROR_DNSSEC_IS_DISABLED syscall.Errno = 9125 + DNS_ERROR_INVALID_XML syscall.Errno = 9126 + DNS_ERROR_NO_VALID_TRUST_ANCHORS syscall.Errno = 9127 + DNS_ERROR_ROLLOVER_NOT_POKEABLE syscall.Errno = 9128 + DNS_ERROR_NSEC3_NAME_COLLISION syscall.Errno = 9129 + DNS_ERROR_NSEC_INCOMPATIBLE_WITH_NSEC3_RSA_SHA1 syscall.Errno = 9130 + DNS_ERROR_PACKET_FMT_BASE syscall.Errno = 9500 + DNS_INFO_NO_RECORDS syscall.Errno = 9501 + DNS_ERROR_BAD_PACKET syscall.Errno = 9502 + DNS_ERROR_NO_PACKET syscall.Errno = 9503 + DNS_ERROR_RCODE syscall.Errno = 9504 + DNS_ERROR_UNSECURE_PACKET syscall.Errno = 9505 + DNS_STATUS_PACKET_UNSECURE = DNS_ERROR_UNSECURE_PACKET + DNS_REQUEST_PENDING syscall.Errno = 9506 + DNS_ERROR_NO_MEMORY = ERROR_OUTOFMEMORY + DNS_ERROR_INVALID_NAME = ERROR_INVALID_NAME + DNS_ERROR_INVALID_DATA = ERROR_INVALID_DATA + DNS_ERROR_GENERAL_API_BASE syscall.Errno = 9550 + DNS_ERROR_INVALID_TYPE syscall.Errno = 9551 + DNS_ERROR_INVALID_IP_ADDRESS syscall.Errno = 9552 + DNS_ERROR_INVALID_PROPERTY syscall.Errno = 9553 + DNS_ERROR_TRY_AGAIN_LATER syscall.Errno = 9554 + DNS_ERROR_NOT_UNIQUE syscall.Errno = 9555 + DNS_ERROR_NON_RFC_NAME syscall.Errno = 9556 + DNS_STATUS_FQDN syscall.Errno = 9557 + DNS_STATUS_DOTTED_NAME syscall.Errno = 9558 + DNS_STATUS_SINGLE_PART_NAME syscall.Errno = 9559 + DNS_ERROR_INVALID_NAME_CHAR syscall.Errno = 9560 + DNS_ERROR_NUMERIC_NAME syscall.Errno = 9561 + DNS_ERROR_NOT_ALLOWED_ON_ROOT_SERVER syscall.Errno = 9562 + DNS_ERROR_NOT_ALLOWED_UNDER_DELEGATION syscall.Errno = 9563 + DNS_ERROR_CANNOT_FIND_ROOT_HINTS syscall.Errno = 9564 + DNS_ERROR_INCONSISTENT_ROOT_HINTS syscall.Errno = 9565 + DNS_ERROR_DWORD_VALUE_TOO_SMALL syscall.Errno = 9566 + DNS_ERROR_DWORD_VALUE_TOO_LARGE syscall.Errno = 9567 + DNS_ERROR_BACKGROUND_LOADING syscall.Errno = 9568 + DNS_ERROR_NOT_ALLOWED_ON_RODC syscall.Errno = 9569 + DNS_ERROR_NOT_ALLOWED_UNDER_DNAME syscall.Errno = 9570 + DNS_ERROR_DELEGATION_REQUIRED syscall.Errno = 9571 + DNS_ERROR_INVALID_POLICY_TABLE syscall.Errno = 9572 + DNS_ERROR_ADDRESS_REQUIRED syscall.Errno = 9573 + DNS_ERROR_ZONE_BASE syscall.Errno = 9600 + DNS_ERROR_ZONE_DOES_NOT_EXIST syscall.Errno = 9601 + DNS_ERROR_NO_ZONE_INFO syscall.Errno = 9602 + DNS_ERROR_INVALID_ZONE_OPERATION syscall.Errno = 9603 + DNS_ERROR_ZONE_CONFIGURATION_ERROR syscall.Errno = 9604 + DNS_ERROR_ZONE_HAS_NO_SOA_RECORD syscall.Errno = 9605 + DNS_ERROR_ZONE_HAS_NO_NS_RECORDS syscall.Errno = 9606 + DNS_ERROR_ZONE_LOCKED syscall.Errno = 9607 + DNS_ERROR_ZONE_CREATION_FAILED syscall.Errno = 9608 + DNS_ERROR_ZONE_ALREADY_EXISTS syscall.Errno = 9609 + DNS_ERROR_AUTOZONE_ALREADY_EXISTS syscall.Errno = 9610 + DNS_ERROR_INVALID_ZONE_TYPE syscall.Errno = 9611 + DNS_ERROR_SECONDARY_REQUIRES_MASTER_IP syscall.Errno = 9612 + DNS_ERROR_ZONE_NOT_SECONDARY syscall.Errno = 9613 + DNS_ERROR_NEED_SECONDARY_ADDRESSES syscall.Errno = 9614 + DNS_ERROR_WINS_INIT_FAILED syscall.Errno = 9615 + DNS_ERROR_NEED_WINS_SERVERS syscall.Errno = 9616 + DNS_ERROR_NBSTAT_INIT_FAILED syscall.Errno = 9617 + DNS_ERROR_SOA_DELETE_INVALID syscall.Errno = 9618 + DNS_ERROR_FORWARDER_ALREADY_EXISTS syscall.Errno = 9619 + DNS_ERROR_ZONE_REQUIRES_MASTER_IP syscall.Errno = 9620 + DNS_ERROR_ZONE_IS_SHUTDOWN syscall.Errno = 9621 + DNS_ERROR_ZONE_LOCKED_FOR_SIGNING syscall.Errno = 9622 + DNS_ERROR_DATAFILE_BASE syscall.Errno = 9650 + DNS_ERROR_PRIMARY_REQUIRES_DATAFILE syscall.Errno = 9651 + DNS_ERROR_INVALID_DATAFILE_NAME syscall.Errno = 9652 + DNS_ERROR_DATAFILE_OPEN_FAILURE syscall.Errno = 9653 + DNS_ERROR_FILE_WRITEBACK_FAILED syscall.Errno = 9654 + DNS_ERROR_DATAFILE_PARSING syscall.Errno = 9655 + DNS_ERROR_DATABASE_BASE syscall.Errno = 9700 + DNS_ERROR_RECORD_DOES_NOT_EXIST syscall.Errno = 9701 + DNS_ERROR_RECORD_FORMAT syscall.Errno = 9702 + DNS_ERROR_NODE_CREATION_FAILED syscall.Errno = 9703 + DNS_ERROR_UNKNOWN_RECORD_TYPE syscall.Errno = 9704 + DNS_ERROR_RECORD_TIMED_OUT syscall.Errno = 9705 + DNS_ERROR_NAME_NOT_IN_ZONE syscall.Errno = 9706 + DNS_ERROR_CNAME_LOOP syscall.Errno = 9707 + DNS_ERROR_NODE_IS_CNAME syscall.Errno = 9708 + DNS_ERROR_CNAME_COLLISION syscall.Errno = 9709 + DNS_ERROR_RECORD_ONLY_AT_ZONE_ROOT syscall.Errno = 9710 + DNS_ERROR_RECORD_ALREADY_EXISTS syscall.Errno = 9711 + DNS_ERROR_SECONDARY_DATA syscall.Errno = 9712 + DNS_ERROR_NO_CREATE_CACHE_DATA syscall.Errno = 9713 + DNS_ERROR_NAME_DOES_NOT_EXIST syscall.Errno = 9714 + DNS_WARNING_PTR_CREATE_FAILED syscall.Errno = 9715 + DNS_WARNING_DOMAIN_UNDELETED syscall.Errno = 9716 + DNS_ERROR_DS_UNAVAILABLE syscall.Errno = 9717 + DNS_ERROR_DS_ZONE_ALREADY_EXISTS syscall.Errno = 9718 + DNS_ERROR_NO_BOOTFILE_IF_DS_ZONE syscall.Errno = 9719 + DNS_ERROR_NODE_IS_DNAME syscall.Errno = 9720 + DNS_ERROR_DNAME_COLLISION syscall.Errno = 9721 + DNS_ERROR_ALIAS_LOOP syscall.Errno = 9722 + DNS_ERROR_OPERATION_BASE syscall.Errno = 9750 + DNS_INFO_AXFR_COMPLETE syscall.Errno = 9751 + DNS_ERROR_AXFR syscall.Errno = 9752 + DNS_INFO_ADDED_LOCAL_WINS syscall.Errno = 9753 + DNS_ERROR_SECURE_BASE syscall.Errno = 9800 + DNS_STATUS_CONTINUE_NEEDED syscall.Errno = 9801 + DNS_ERROR_SETUP_BASE syscall.Errno = 9850 + DNS_ERROR_NO_TCPIP syscall.Errno = 9851 + DNS_ERROR_NO_DNS_SERVERS syscall.Errno = 9852 + DNS_ERROR_DP_BASE syscall.Errno = 9900 + DNS_ERROR_DP_DOES_NOT_EXIST syscall.Errno = 9901 + DNS_ERROR_DP_ALREADY_EXISTS syscall.Errno = 9902 + DNS_ERROR_DP_NOT_ENLISTED syscall.Errno = 9903 + DNS_ERROR_DP_ALREADY_ENLISTED syscall.Errno = 9904 + DNS_ERROR_DP_NOT_AVAILABLE syscall.Errno = 9905 + DNS_ERROR_DP_FSMO_ERROR syscall.Errno = 9906 + DNS_ERROR_RRL_NOT_ENABLED syscall.Errno = 9911 + DNS_ERROR_RRL_INVALID_WINDOW_SIZE syscall.Errno = 9912 + DNS_ERROR_RRL_INVALID_IPV4_PREFIX syscall.Errno = 9913 + DNS_ERROR_RRL_INVALID_IPV6_PREFIX syscall.Errno = 9914 + DNS_ERROR_RRL_INVALID_TC_RATE syscall.Errno = 9915 + DNS_ERROR_RRL_INVALID_LEAK_RATE syscall.Errno = 9916 + DNS_ERROR_RRL_LEAK_RATE_LESSTHAN_TC_RATE syscall.Errno = 9917 + DNS_ERROR_VIRTUALIZATION_INSTANCE_ALREADY_EXISTS syscall.Errno = 9921 + DNS_ERROR_VIRTUALIZATION_INSTANCE_DOES_NOT_EXIST syscall.Errno = 9922 + DNS_ERROR_VIRTUALIZATION_TREE_LOCKED syscall.Errno = 9923 + DNS_ERROR_INVAILD_VIRTUALIZATION_INSTANCE_NAME syscall.Errno = 9924 + DNS_ERROR_DEFAULT_VIRTUALIZATION_INSTANCE syscall.Errno = 9925 + DNS_ERROR_ZONESCOPE_ALREADY_EXISTS syscall.Errno = 9951 + DNS_ERROR_ZONESCOPE_DOES_NOT_EXIST syscall.Errno = 9952 + DNS_ERROR_DEFAULT_ZONESCOPE syscall.Errno = 9953 + DNS_ERROR_INVALID_ZONESCOPE_NAME syscall.Errno = 9954 + DNS_ERROR_NOT_ALLOWED_WITH_ZONESCOPES syscall.Errno = 9955 + DNS_ERROR_LOAD_ZONESCOPE_FAILED syscall.Errno = 9956 + DNS_ERROR_ZONESCOPE_FILE_WRITEBACK_FAILED syscall.Errno = 9957 + DNS_ERROR_INVALID_SCOPE_NAME syscall.Errno = 9958 + DNS_ERROR_SCOPE_DOES_NOT_EXIST syscall.Errno = 9959 + DNS_ERROR_DEFAULT_SCOPE syscall.Errno = 9960 + DNS_ERROR_INVALID_SCOPE_OPERATION syscall.Errno = 9961 + DNS_ERROR_SCOPE_LOCKED syscall.Errno = 9962 + DNS_ERROR_SCOPE_ALREADY_EXISTS syscall.Errno = 9963 + DNS_ERROR_POLICY_ALREADY_EXISTS syscall.Errno = 9971 + DNS_ERROR_POLICY_DOES_NOT_EXIST syscall.Errno = 9972 + DNS_ERROR_POLICY_INVALID_CRITERIA syscall.Errno = 9973 + DNS_ERROR_POLICY_INVALID_SETTINGS syscall.Errno = 9974 + DNS_ERROR_CLIENT_SUBNET_IS_ACCESSED syscall.Errno = 9975 + DNS_ERROR_CLIENT_SUBNET_DOES_NOT_EXIST syscall.Errno = 9976 + DNS_ERROR_CLIENT_SUBNET_ALREADY_EXISTS syscall.Errno = 9977 + DNS_ERROR_SUBNET_DOES_NOT_EXIST syscall.Errno = 9978 + DNS_ERROR_SUBNET_ALREADY_EXISTS syscall.Errno = 9979 + DNS_ERROR_POLICY_LOCKED syscall.Errno = 9980 + DNS_ERROR_POLICY_INVALID_WEIGHT syscall.Errno = 9981 + DNS_ERROR_POLICY_INVALID_NAME syscall.Errno = 9982 + DNS_ERROR_POLICY_MISSING_CRITERIA syscall.Errno = 9983 + DNS_ERROR_INVALID_CLIENT_SUBNET_NAME syscall.Errno = 9984 + DNS_ERROR_POLICY_PROCESSING_ORDER_INVALID syscall.Errno = 9985 + DNS_ERROR_POLICY_SCOPE_MISSING syscall.Errno = 9986 + DNS_ERROR_POLICY_SCOPE_NOT_ALLOWED syscall.Errno = 9987 + DNS_ERROR_SERVERSCOPE_IS_REFERENCED syscall.Errno = 9988 + DNS_ERROR_ZONESCOPE_IS_REFERENCED syscall.Errno = 9989 + DNS_ERROR_POLICY_INVALID_CRITERIA_CLIENT_SUBNET syscall.Errno = 9990 + DNS_ERROR_POLICY_INVALID_CRITERIA_TRANSPORT_PROTOCOL syscall.Errno = 9991 + DNS_ERROR_POLICY_INVALID_CRITERIA_NETWORK_PROTOCOL syscall.Errno = 9992 + DNS_ERROR_POLICY_INVALID_CRITERIA_INTERFACE syscall.Errno = 9993 + DNS_ERROR_POLICY_INVALID_CRITERIA_FQDN syscall.Errno = 9994 + DNS_ERROR_POLICY_INVALID_CRITERIA_QUERY_TYPE syscall.Errno = 9995 + DNS_ERROR_POLICY_INVALID_CRITERIA_TIME_OF_DAY syscall.Errno = 9996 + WSABASEERR syscall.Errno = 10000 + WSAEINTR syscall.Errno = 10004 + WSAEBADF syscall.Errno = 10009 + WSAEACCES syscall.Errno = 10013 + WSAEFAULT syscall.Errno = 10014 + WSAEINVAL syscall.Errno = 10022 + WSAEMFILE syscall.Errno = 10024 + WSAEWOULDBLOCK syscall.Errno = 10035 + WSAEINPROGRESS syscall.Errno = 10036 + WSAEALREADY syscall.Errno = 10037 + WSAENOTSOCK syscall.Errno = 10038 + WSAEDESTADDRREQ syscall.Errno = 10039 + WSAEMSGSIZE syscall.Errno = 10040 + WSAEPROTOTYPE syscall.Errno = 10041 + WSAENOPROTOOPT syscall.Errno = 10042 + WSAEPROTONOSUPPORT syscall.Errno = 10043 + WSAESOCKTNOSUPPORT syscall.Errno = 10044 + WSAEOPNOTSUPP syscall.Errno = 10045 + WSAEPFNOSUPPORT syscall.Errno = 10046 + WSAEAFNOSUPPORT syscall.Errno = 10047 + WSAEADDRINUSE syscall.Errno = 10048 + WSAEADDRNOTAVAIL syscall.Errno = 10049 + WSAENETDOWN syscall.Errno = 10050 + WSAENETUNREACH syscall.Errno = 10051 + WSAENETRESET syscall.Errno = 10052 + WSAECONNABORTED syscall.Errno = 10053 + WSAECONNRESET syscall.Errno = 10054 + WSAENOBUFS syscall.Errno = 10055 + WSAEISCONN syscall.Errno = 10056 + WSAENOTCONN syscall.Errno = 10057 + WSAESHUTDOWN syscall.Errno = 10058 + WSAETOOMANYREFS syscall.Errno = 10059 + WSAETIMEDOUT syscall.Errno = 10060 + WSAECONNREFUSED syscall.Errno = 10061 + WSAELOOP syscall.Errno = 10062 + WSAENAMETOOLONG syscall.Errno = 10063 + WSAEHOSTDOWN syscall.Errno = 10064 + WSAEHOSTUNREACH syscall.Errno = 10065 + WSAENOTEMPTY syscall.Errno = 10066 + WSAEPROCLIM syscall.Errno = 10067 + WSAEUSERS syscall.Errno = 10068 + WSAEDQUOT syscall.Errno = 10069 + WSAESTALE syscall.Errno = 10070 + WSAEREMOTE syscall.Errno = 10071 + WSASYSNOTREADY syscall.Errno = 10091 + WSAVERNOTSUPPORTED syscall.Errno = 10092 + WSANOTINITIALISED syscall.Errno = 10093 + WSAEDISCON syscall.Errno = 10101 + WSAENOMORE syscall.Errno = 10102 + WSAECANCELLED syscall.Errno = 10103 + WSAEINVALIDPROCTABLE syscall.Errno = 10104 + WSAEINVALIDPROVIDER syscall.Errno = 10105 + WSAEPROVIDERFAILEDINIT syscall.Errno = 10106 + WSASYSCALLFAILURE syscall.Errno = 10107 + WSASERVICE_NOT_FOUND syscall.Errno = 10108 + WSATYPE_NOT_FOUND syscall.Errno = 10109 + WSA_E_NO_MORE syscall.Errno = 10110 + WSA_E_CANCELLED syscall.Errno = 10111 + WSAEREFUSED syscall.Errno = 10112 + WSAHOST_NOT_FOUND syscall.Errno = 11001 + WSATRY_AGAIN syscall.Errno = 11002 + WSANO_RECOVERY syscall.Errno = 11003 + WSANO_DATA syscall.Errno = 11004 + WSA_QOS_RECEIVERS syscall.Errno = 11005 + WSA_QOS_SENDERS syscall.Errno = 11006 + WSA_QOS_NO_SENDERS syscall.Errno = 11007 + WSA_QOS_NO_RECEIVERS syscall.Errno = 11008 + WSA_QOS_REQUEST_CONFIRMED syscall.Errno = 11009 + WSA_QOS_ADMISSION_FAILURE syscall.Errno = 11010 + WSA_QOS_POLICY_FAILURE syscall.Errno = 11011 + WSA_QOS_BAD_STYLE syscall.Errno = 11012 + WSA_QOS_BAD_OBJECT syscall.Errno = 11013 + WSA_QOS_TRAFFIC_CTRL_ERROR syscall.Errno = 11014 + WSA_QOS_GENERIC_ERROR syscall.Errno = 11015 + WSA_QOS_ESERVICETYPE syscall.Errno = 11016 + WSA_QOS_EFLOWSPEC syscall.Errno = 11017 + WSA_QOS_EPROVSPECBUF syscall.Errno = 11018 + WSA_QOS_EFILTERSTYLE syscall.Errno = 11019 + WSA_QOS_EFILTERTYPE syscall.Errno = 11020 + WSA_QOS_EFILTERCOUNT syscall.Errno = 11021 + WSA_QOS_EOBJLENGTH syscall.Errno = 11022 + WSA_QOS_EFLOWCOUNT syscall.Errno = 11023 + WSA_QOS_EUNKOWNPSOBJ syscall.Errno = 11024 + WSA_QOS_EPOLICYOBJ syscall.Errno = 11025 + WSA_QOS_EFLOWDESC syscall.Errno = 11026 + WSA_QOS_EPSFLOWSPEC syscall.Errno = 11027 + WSA_QOS_EPSFILTERSPEC syscall.Errno = 11028 + WSA_QOS_ESDMODEOBJ syscall.Errno = 11029 + WSA_QOS_ESHAPERATEOBJ syscall.Errno = 11030 + WSA_QOS_RESERVED_PETYPE syscall.Errno = 11031 + WSA_SECURE_HOST_NOT_FOUND syscall.Errno = 11032 + WSA_IPSEC_NAME_POLICY_ERROR syscall.Errno = 11033 + ERROR_IPSEC_QM_POLICY_EXISTS syscall.Errno = 13000 + ERROR_IPSEC_QM_POLICY_NOT_FOUND syscall.Errno = 13001 + ERROR_IPSEC_QM_POLICY_IN_USE syscall.Errno = 13002 + ERROR_IPSEC_MM_POLICY_EXISTS syscall.Errno = 13003 + ERROR_IPSEC_MM_POLICY_NOT_FOUND syscall.Errno = 13004 + ERROR_IPSEC_MM_POLICY_IN_USE syscall.Errno = 13005 + ERROR_IPSEC_MM_FILTER_EXISTS syscall.Errno = 13006 + ERROR_IPSEC_MM_FILTER_NOT_FOUND syscall.Errno = 13007 + ERROR_IPSEC_TRANSPORT_FILTER_EXISTS syscall.Errno = 13008 + ERROR_IPSEC_TRANSPORT_FILTER_NOT_FOUND syscall.Errno = 13009 + ERROR_IPSEC_MM_AUTH_EXISTS syscall.Errno = 13010 + ERROR_IPSEC_MM_AUTH_NOT_FOUND syscall.Errno = 13011 + ERROR_IPSEC_MM_AUTH_IN_USE syscall.Errno = 13012 + ERROR_IPSEC_DEFAULT_MM_POLICY_NOT_FOUND syscall.Errno = 13013 + ERROR_IPSEC_DEFAULT_MM_AUTH_NOT_FOUND syscall.Errno = 13014 + ERROR_IPSEC_DEFAULT_QM_POLICY_NOT_FOUND syscall.Errno = 13015 + ERROR_IPSEC_TUNNEL_FILTER_EXISTS syscall.Errno = 13016 + ERROR_IPSEC_TUNNEL_FILTER_NOT_FOUND syscall.Errno = 13017 + ERROR_IPSEC_MM_FILTER_PENDING_DELETION syscall.Errno = 13018 + ERROR_IPSEC_TRANSPORT_FILTER_PENDING_DELETION syscall.Errno = 13019 + ERROR_IPSEC_TUNNEL_FILTER_PENDING_DELETION syscall.Errno = 13020 + ERROR_IPSEC_MM_POLICY_PENDING_DELETION syscall.Errno = 13021 + ERROR_IPSEC_MM_AUTH_PENDING_DELETION syscall.Errno = 13022 + ERROR_IPSEC_QM_POLICY_PENDING_DELETION syscall.Errno = 13023 + WARNING_IPSEC_MM_POLICY_PRUNED syscall.Errno = 13024 + WARNING_IPSEC_QM_POLICY_PRUNED syscall.Errno = 13025 + ERROR_IPSEC_IKE_NEG_STATUS_BEGIN syscall.Errno = 13800 + ERROR_IPSEC_IKE_AUTH_FAIL syscall.Errno = 13801 + ERROR_IPSEC_IKE_ATTRIB_FAIL syscall.Errno = 13802 + ERROR_IPSEC_IKE_NEGOTIATION_PENDING syscall.Errno = 13803 + ERROR_IPSEC_IKE_GENERAL_PROCESSING_ERROR syscall.Errno = 13804 + ERROR_IPSEC_IKE_TIMED_OUT syscall.Errno = 13805 + ERROR_IPSEC_IKE_NO_CERT syscall.Errno = 13806 + ERROR_IPSEC_IKE_SA_DELETED syscall.Errno = 13807 + ERROR_IPSEC_IKE_SA_REAPED syscall.Errno = 13808 + ERROR_IPSEC_IKE_MM_ACQUIRE_DROP syscall.Errno = 13809 + ERROR_IPSEC_IKE_QM_ACQUIRE_DROP syscall.Errno = 13810 + ERROR_IPSEC_IKE_QUEUE_DROP_MM syscall.Errno = 13811 + ERROR_IPSEC_IKE_QUEUE_DROP_NO_MM syscall.Errno = 13812 + ERROR_IPSEC_IKE_DROP_NO_RESPONSE syscall.Errno = 13813 + ERROR_IPSEC_IKE_MM_DELAY_DROP syscall.Errno = 13814 + ERROR_IPSEC_IKE_QM_DELAY_DROP syscall.Errno = 13815 + ERROR_IPSEC_IKE_ERROR syscall.Errno = 13816 + ERROR_IPSEC_IKE_CRL_FAILED syscall.Errno = 13817 + ERROR_IPSEC_IKE_INVALID_KEY_USAGE syscall.Errno = 13818 + ERROR_IPSEC_IKE_INVALID_CERT_TYPE syscall.Errno = 13819 + ERROR_IPSEC_IKE_NO_PRIVATE_KEY syscall.Errno = 13820 + ERROR_IPSEC_IKE_SIMULTANEOUS_REKEY syscall.Errno = 13821 + ERROR_IPSEC_IKE_DH_FAIL syscall.Errno = 13822 + ERROR_IPSEC_IKE_CRITICAL_PAYLOAD_NOT_RECOGNIZED syscall.Errno = 13823 + ERROR_IPSEC_IKE_INVALID_HEADER syscall.Errno = 13824 + ERROR_IPSEC_IKE_NO_POLICY syscall.Errno = 13825 + ERROR_IPSEC_IKE_INVALID_SIGNATURE syscall.Errno = 13826 + ERROR_IPSEC_IKE_KERBEROS_ERROR syscall.Errno = 13827 + ERROR_IPSEC_IKE_NO_PUBLIC_KEY syscall.Errno = 13828 + ERROR_IPSEC_IKE_PROCESS_ERR syscall.Errno = 13829 + ERROR_IPSEC_IKE_PROCESS_ERR_SA syscall.Errno = 13830 + ERROR_IPSEC_IKE_PROCESS_ERR_PROP syscall.Errno = 13831 + ERROR_IPSEC_IKE_PROCESS_ERR_TRANS syscall.Errno = 13832 + ERROR_IPSEC_IKE_PROCESS_ERR_KE syscall.Errno = 13833 + ERROR_IPSEC_IKE_PROCESS_ERR_ID syscall.Errno = 13834 + ERROR_IPSEC_IKE_PROCESS_ERR_CERT syscall.Errno = 13835 + ERROR_IPSEC_IKE_PROCESS_ERR_CERT_REQ syscall.Errno = 13836 + ERROR_IPSEC_IKE_PROCESS_ERR_HASH syscall.Errno = 13837 + ERROR_IPSEC_IKE_PROCESS_ERR_SIG syscall.Errno = 13838 + ERROR_IPSEC_IKE_PROCESS_ERR_NONCE syscall.Errno = 13839 + ERROR_IPSEC_IKE_PROCESS_ERR_NOTIFY syscall.Errno = 13840 + ERROR_IPSEC_IKE_PROCESS_ERR_DELETE syscall.Errno = 13841 + ERROR_IPSEC_IKE_PROCESS_ERR_VENDOR syscall.Errno = 13842 + ERROR_IPSEC_IKE_INVALID_PAYLOAD syscall.Errno = 13843 + ERROR_IPSEC_IKE_LOAD_SOFT_SA syscall.Errno = 13844 + ERROR_IPSEC_IKE_SOFT_SA_TORN_DOWN syscall.Errno = 13845 + ERROR_IPSEC_IKE_INVALID_COOKIE syscall.Errno = 13846 + ERROR_IPSEC_IKE_NO_PEER_CERT syscall.Errno = 13847 + ERROR_IPSEC_IKE_PEER_CRL_FAILED syscall.Errno = 13848 + ERROR_IPSEC_IKE_POLICY_CHANGE syscall.Errno = 13849 + ERROR_IPSEC_IKE_NO_MM_POLICY syscall.Errno = 13850 + ERROR_IPSEC_IKE_NOTCBPRIV syscall.Errno = 13851 + ERROR_IPSEC_IKE_SECLOADFAIL syscall.Errno = 13852 + ERROR_IPSEC_IKE_FAILSSPINIT syscall.Errno = 13853 + ERROR_IPSEC_IKE_FAILQUERYSSP syscall.Errno = 13854 + ERROR_IPSEC_IKE_SRVACQFAIL syscall.Errno = 13855 + ERROR_IPSEC_IKE_SRVQUERYCRED syscall.Errno = 13856 + ERROR_IPSEC_IKE_GETSPIFAIL syscall.Errno = 13857 + ERROR_IPSEC_IKE_INVALID_FILTER syscall.Errno = 13858 + ERROR_IPSEC_IKE_OUT_OF_MEMORY syscall.Errno = 13859 + ERROR_IPSEC_IKE_ADD_UPDATE_KEY_FAILED syscall.Errno = 13860 + ERROR_IPSEC_IKE_INVALID_POLICY syscall.Errno = 13861 + ERROR_IPSEC_IKE_UNKNOWN_DOI syscall.Errno = 13862 + ERROR_IPSEC_IKE_INVALID_SITUATION syscall.Errno = 13863 + ERROR_IPSEC_IKE_DH_FAILURE syscall.Errno = 13864 + ERROR_IPSEC_IKE_INVALID_GROUP syscall.Errno = 13865 + ERROR_IPSEC_IKE_ENCRYPT syscall.Errno = 13866 + ERROR_IPSEC_IKE_DECRYPT syscall.Errno = 13867 + ERROR_IPSEC_IKE_POLICY_MATCH syscall.Errno = 13868 + ERROR_IPSEC_IKE_UNSUPPORTED_ID syscall.Errno = 13869 + ERROR_IPSEC_IKE_INVALID_HASH syscall.Errno = 13870 + ERROR_IPSEC_IKE_INVALID_HASH_ALG syscall.Errno = 13871 + ERROR_IPSEC_IKE_INVALID_HASH_SIZE syscall.Errno = 13872 + ERROR_IPSEC_IKE_INVALID_ENCRYPT_ALG syscall.Errno = 13873 + ERROR_IPSEC_IKE_INVALID_AUTH_ALG syscall.Errno = 13874 + ERROR_IPSEC_IKE_INVALID_SIG syscall.Errno = 13875 + ERROR_IPSEC_IKE_LOAD_FAILED syscall.Errno = 13876 + ERROR_IPSEC_IKE_RPC_DELETE syscall.Errno = 13877 + ERROR_IPSEC_IKE_BENIGN_REINIT syscall.Errno = 13878 + ERROR_IPSEC_IKE_INVALID_RESPONDER_LIFETIME_NOTIFY syscall.Errno = 13879 + ERROR_IPSEC_IKE_INVALID_MAJOR_VERSION syscall.Errno = 13880 + ERROR_IPSEC_IKE_INVALID_CERT_KEYLEN syscall.Errno = 13881 + ERROR_IPSEC_IKE_MM_LIMIT syscall.Errno = 13882 + ERROR_IPSEC_IKE_NEGOTIATION_DISABLED syscall.Errno = 13883 + ERROR_IPSEC_IKE_QM_LIMIT syscall.Errno = 13884 + ERROR_IPSEC_IKE_MM_EXPIRED syscall.Errno = 13885 + ERROR_IPSEC_IKE_PEER_MM_ASSUMED_INVALID syscall.Errno = 13886 + ERROR_IPSEC_IKE_CERT_CHAIN_POLICY_MISMATCH syscall.Errno = 13887 + ERROR_IPSEC_IKE_UNEXPECTED_MESSAGE_ID syscall.Errno = 13888 + ERROR_IPSEC_IKE_INVALID_AUTH_PAYLOAD syscall.Errno = 13889 + ERROR_IPSEC_IKE_DOS_COOKIE_SENT syscall.Errno = 13890 + ERROR_IPSEC_IKE_SHUTTING_DOWN syscall.Errno = 13891 + ERROR_IPSEC_IKE_CGA_AUTH_FAILED syscall.Errno = 13892 + ERROR_IPSEC_IKE_PROCESS_ERR_NATOA syscall.Errno = 13893 + ERROR_IPSEC_IKE_INVALID_MM_FOR_QM syscall.Errno = 13894 + ERROR_IPSEC_IKE_QM_EXPIRED syscall.Errno = 13895 + ERROR_IPSEC_IKE_TOO_MANY_FILTERS syscall.Errno = 13896 + ERROR_IPSEC_IKE_NEG_STATUS_END syscall.Errno = 13897 + ERROR_IPSEC_IKE_KILL_DUMMY_NAP_TUNNEL syscall.Errno = 13898 + ERROR_IPSEC_IKE_INNER_IP_ASSIGNMENT_FAILURE syscall.Errno = 13899 + ERROR_IPSEC_IKE_REQUIRE_CP_PAYLOAD_MISSING syscall.Errno = 13900 + ERROR_IPSEC_KEY_MODULE_IMPERSONATION_NEGOTIATION_PENDING syscall.Errno = 13901 + ERROR_IPSEC_IKE_COEXISTENCE_SUPPRESS syscall.Errno = 13902 + ERROR_IPSEC_IKE_RATELIMIT_DROP syscall.Errno = 13903 + ERROR_IPSEC_IKE_PEER_DOESNT_SUPPORT_MOBIKE syscall.Errno = 13904 + ERROR_IPSEC_IKE_AUTHORIZATION_FAILURE syscall.Errno = 13905 + ERROR_IPSEC_IKE_STRONG_CRED_AUTHORIZATION_FAILURE syscall.Errno = 13906 + ERROR_IPSEC_IKE_AUTHORIZATION_FAILURE_WITH_OPTIONAL_RETRY syscall.Errno = 13907 + ERROR_IPSEC_IKE_STRONG_CRED_AUTHORIZATION_AND_CERTMAP_FAILURE syscall.Errno = 13908 + ERROR_IPSEC_IKE_NEG_STATUS_EXTENDED_END syscall.Errno = 13909 + ERROR_IPSEC_BAD_SPI syscall.Errno = 13910 + ERROR_IPSEC_SA_LIFETIME_EXPIRED syscall.Errno = 13911 + ERROR_IPSEC_WRONG_SA syscall.Errno = 13912 + ERROR_IPSEC_REPLAY_CHECK_FAILED syscall.Errno = 13913 + ERROR_IPSEC_INVALID_PACKET syscall.Errno = 13914 + ERROR_IPSEC_INTEGRITY_CHECK_FAILED syscall.Errno = 13915 + ERROR_IPSEC_CLEAR_TEXT_DROP syscall.Errno = 13916 + ERROR_IPSEC_AUTH_FIREWALL_DROP syscall.Errno = 13917 + ERROR_IPSEC_THROTTLE_DROP syscall.Errno = 13918 + ERROR_IPSEC_DOSP_BLOCK syscall.Errno = 13925 + ERROR_IPSEC_DOSP_RECEIVED_MULTICAST syscall.Errno = 13926 + ERROR_IPSEC_DOSP_INVALID_PACKET syscall.Errno = 13927 + ERROR_IPSEC_DOSP_STATE_LOOKUP_FAILED syscall.Errno = 13928 + ERROR_IPSEC_DOSP_MAX_ENTRIES syscall.Errno = 13929 + ERROR_IPSEC_DOSP_KEYMOD_NOT_ALLOWED syscall.Errno = 13930 + ERROR_IPSEC_DOSP_NOT_INSTALLED syscall.Errno = 13931 + ERROR_IPSEC_DOSP_MAX_PER_IP_RATELIMIT_QUEUES syscall.Errno = 13932 + ERROR_SXS_SECTION_NOT_FOUND syscall.Errno = 14000 + ERROR_SXS_CANT_GEN_ACTCTX syscall.Errno = 14001 + ERROR_SXS_INVALID_ACTCTXDATA_FORMAT syscall.Errno = 14002 + ERROR_SXS_ASSEMBLY_NOT_FOUND syscall.Errno = 14003 + ERROR_SXS_MANIFEST_FORMAT_ERROR syscall.Errno = 14004 + ERROR_SXS_MANIFEST_PARSE_ERROR syscall.Errno = 14005 + ERROR_SXS_ACTIVATION_CONTEXT_DISABLED syscall.Errno = 14006 + ERROR_SXS_KEY_NOT_FOUND syscall.Errno = 14007 + ERROR_SXS_VERSION_CONFLICT syscall.Errno = 14008 + ERROR_SXS_WRONG_SECTION_TYPE syscall.Errno = 14009 + ERROR_SXS_THREAD_QUERIES_DISABLED syscall.Errno = 14010 + ERROR_SXS_PROCESS_DEFAULT_ALREADY_SET syscall.Errno = 14011 + ERROR_SXS_UNKNOWN_ENCODING_GROUP syscall.Errno = 14012 + ERROR_SXS_UNKNOWN_ENCODING syscall.Errno = 14013 + ERROR_SXS_INVALID_XML_NAMESPACE_URI syscall.Errno = 14014 + ERROR_SXS_ROOT_MANIFEST_DEPENDENCY_NOT_INSTALLED syscall.Errno = 14015 + ERROR_SXS_LEAF_MANIFEST_DEPENDENCY_NOT_INSTALLED syscall.Errno = 14016 + ERROR_SXS_INVALID_ASSEMBLY_IDENTITY_ATTRIBUTE syscall.Errno = 14017 + ERROR_SXS_MANIFEST_MISSING_REQUIRED_DEFAULT_NAMESPACE syscall.Errno = 14018 + ERROR_SXS_MANIFEST_INVALID_REQUIRED_DEFAULT_NAMESPACE syscall.Errno = 14019 + ERROR_SXS_PRIVATE_MANIFEST_CROSS_PATH_WITH_REPARSE_POINT syscall.Errno = 14020 + ERROR_SXS_DUPLICATE_DLL_NAME syscall.Errno = 14021 + ERROR_SXS_DUPLICATE_WINDOWCLASS_NAME syscall.Errno = 14022 + ERROR_SXS_DUPLICATE_CLSID syscall.Errno = 14023 + ERROR_SXS_DUPLICATE_IID syscall.Errno = 14024 + ERROR_SXS_DUPLICATE_TLBID syscall.Errno = 14025 + ERROR_SXS_DUPLICATE_PROGID syscall.Errno = 14026 + ERROR_SXS_DUPLICATE_ASSEMBLY_NAME syscall.Errno = 14027 + ERROR_SXS_FILE_HASH_MISMATCH syscall.Errno = 14028 + ERROR_SXS_POLICY_PARSE_ERROR syscall.Errno = 14029 + ERROR_SXS_XML_E_MISSINGQUOTE syscall.Errno = 14030 + ERROR_SXS_XML_E_COMMENTSYNTAX syscall.Errno = 14031 + ERROR_SXS_XML_E_BADSTARTNAMECHAR syscall.Errno = 14032 + ERROR_SXS_XML_E_BADNAMECHAR syscall.Errno = 14033 + ERROR_SXS_XML_E_BADCHARINSTRING syscall.Errno = 14034 + ERROR_SXS_XML_E_XMLDECLSYNTAX syscall.Errno = 14035 + ERROR_SXS_XML_E_BADCHARDATA syscall.Errno = 14036 + ERROR_SXS_XML_E_MISSINGWHITESPACE syscall.Errno = 14037 + ERROR_SXS_XML_E_EXPECTINGTAGEND syscall.Errno = 14038 + ERROR_SXS_XML_E_MISSINGSEMICOLON syscall.Errno = 14039 + ERROR_SXS_XML_E_UNBALANCEDPAREN syscall.Errno = 14040 + ERROR_SXS_XML_E_INTERNALERROR syscall.Errno = 14041 + ERROR_SXS_XML_E_UNEXPECTED_WHITESPACE syscall.Errno = 14042 + ERROR_SXS_XML_E_INCOMPLETE_ENCODING syscall.Errno = 14043 + ERROR_SXS_XML_E_MISSING_PAREN syscall.Errno = 14044 + ERROR_SXS_XML_E_EXPECTINGCLOSEQUOTE syscall.Errno = 14045 + ERROR_SXS_XML_E_MULTIPLE_COLONS syscall.Errno = 14046 + ERROR_SXS_XML_E_INVALID_DECIMAL syscall.Errno = 14047 + ERROR_SXS_XML_E_INVALID_HEXIDECIMAL syscall.Errno = 14048 + ERROR_SXS_XML_E_INVALID_UNICODE syscall.Errno = 14049 + ERROR_SXS_XML_E_WHITESPACEORQUESTIONMARK syscall.Errno = 14050 + ERROR_SXS_XML_E_UNEXPECTEDENDTAG syscall.Errno = 14051 + ERROR_SXS_XML_E_UNCLOSEDTAG syscall.Errno = 14052 + ERROR_SXS_XML_E_DUPLICATEATTRIBUTE syscall.Errno = 14053 + ERROR_SXS_XML_E_MULTIPLEROOTS syscall.Errno = 14054 + ERROR_SXS_XML_E_INVALIDATROOTLEVEL syscall.Errno = 14055 + ERROR_SXS_XML_E_BADXMLDECL syscall.Errno = 14056 + ERROR_SXS_XML_E_MISSINGROOT syscall.Errno = 14057 + ERROR_SXS_XML_E_UNEXPECTEDEOF syscall.Errno = 14058 + ERROR_SXS_XML_E_BADPEREFINSUBSET syscall.Errno = 14059 + ERROR_SXS_XML_E_UNCLOSEDSTARTTAG syscall.Errno = 14060 + ERROR_SXS_XML_E_UNCLOSEDENDTAG syscall.Errno = 14061 + ERROR_SXS_XML_E_UNCLOSEDSTRING syscall.Errno = 14062 + ERROR_SXS_XML_E_UNCLOSEDCOMMENT syscall.Errno = 14063 + ERROR_SXS_XML_E_UNCLOSEDDECL syscall.Errno = 14064 + ERROR_SXS_XML_E_UNCLOSEDCDATA syscall.Errno = 14065 + ERROR_SXS_XML_E_RESERVEDNAMESPACE syscall.Errno = 14066 + ERROR_SXS_XML_E_INVALIDENCODING syscall.Errno = 14067 + ERROR_SXS_XML_E_INVALIDSWITCH syscall.Errno = 14068 + ERROR_SXS_XML_E_BADXMLCASE syscall.Errno = 14069 + ERROR_SXS_XML_E_INVALID_STANDALONE syscall.Errno = 14070 + ERROR_SXS_XML_E_UNEXPECTED_STANDALONE syscall.Errno = 14071 + ERROR_SXS_XML_E_INVALID_VERSION syscall.Errno = 14072 + ERROR_SXS_XML_E_MISSINGEQUALS syscall.Errno = 14073 + ERROR_SXS_PROTECTION_RECOVERY_FAILED syscall.Errno = 14074 + ERROR_SXS_PROTECTION_PUBLIC_KEY_TOO_SHORT syscall.Errno = 14075 + ERROR_SXS_PROTECTION_CATALOG_NOT_VALID syscall.Errno = 14076 + ERROR_SXS_UNTRANSLATABLE_HRESULT syscall.Errno = 14077 + ERROR_SXS_PROTECTION_CATALOG_FILE_MISSING syscall.Errno = 14078 + ERROR_SXS_MISSING_ASSEMBLY_IDENTITY_ATTRIBUTE syscall.Errno = 14079 + ERROR_SXS_INVALID_ASSEMBLY_IDENTITY_ATTRIBUTE_NAME syscall.Errno = 14080 + ERROR_SXS_ASSEMBLY_MISSING syscall.Errno = 14081 + ERROR_SXS_CORRUPT_ACTIVATION_STACK syscall.Errno = 14082 + ERROR_SXS_CORRUPTION syscall.Errno = 14083 + ERROR_SXS_EARLY_DEACTIVATION syscall.Errno = 14084 + ERROR_SXS_INVALID_DEACTIVATION syscall.Errno = 14085 + ERROR_SXS_MULTIPLE_DEACTIVATION syscall.Errno = 14086 + ERROR_SXS_PROCESS_TERMINATION_REQUESTED syscall.Errno = 14087 + ERROR_SXS_RELEASE_ACTIVATION_CONTEXT syscall.Errno = 14088 + ERROR_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY syscall.Errno = 14089 + ERROR_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE syscall.Errno = 14090 + ERROR_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME syscall.Errno = 14091 + ERROR_SXS_IDENTITY_DUPLICATE_ATTRIBUTE syscall.Errno = 14092 + ERROR_SXS_IDENTITY_PARSE_ERROR syscall.Errno = 14093 + ERROR_MALFORMED_SUBSTITUTION_STRING syscall.Errno = 14094 + ERROR_SXS_INCORRECT_PUBLIC_KEY_TOKEN syscall.Errno = 14095 + ERROR_UNMAPPED_SUBSTITUTION_STRING syscall.Errno = 14096 + ERROR_SXS_ASSEMBLY_NOT_LOCKED syscall.Errno = 14097 + ERROR_SXS_COMPONENT_STORE_CORRUPT syscall.Errno = 14098 + ERROR_ADVANCED_INSTALLER_FAILED syscall.Errno = 14099 + ERROR_XML_ENCODING_MISMATCH syscall.Errno = 14100 + ERROR_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT syscall.Errno = 14101 + ERROR_SXS_IDENTITIES_DIFFERENT syscall.Errno = 14102 + ERROR_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT syscall.Errno = 14103 + ERROR_SXS_FILE_NOT_PART_OF_ASSEMBLY syscall.Errno = 14104 + ERROR_SXS_MANIFEST_TOO_BIG syscall.Errno = 14105 + ERROR_SXS_SETTING_NOT_REGISTERED syscall.Errno = 14106 + ERROR_SXS_TRANSACTION_CLOSURE_INCOMPLETE syscall.Errno = 14107 + ERROR_SMI_PRIMITIVE_INSTALLER_FAILED syscall.Errno = 14108 + ERROR_GENERIC_COMMAND_FAILED syscall.Errno = 14109 + ERROR_SXS_FILE_HASH_MISSING syscall.Errno = 14110 + ERROR_SXS_DUPLICATE_ACTIVATABLE_CLASS syscall.Errno = 14111 + ERROR_EVT_INVALID_CHANNEL_PATH syscall.Errno = 15000 + ERROR_EVT_INVALID_QUERY syscall.Errno = 15001 + ERROR_EVT_PUBLISHER_METADATA_NOT_FOUND syscall.Errno = 15002 + ERROR_EVT_EVENT_TEMPLATE_NOT_FOUND syscall.Errno = 15003 + ERROR_EVT_INVALID_PUBLISHER_NAME syscall.Errno = 15004 + ERROR_EVT_INVALID_EVENT_DATA syscall.Errno = 15005 + ERROR_EVT_CHANNEL_NOT_FOUND syscall.Errno = 15007 + ERROR_EVT_MALFORMED_XML_TEXT syscall.Errno = 15008 + ERROR_EVT_SUBSCRIPTION_TO_DIRECT_CHANNEL syscall.Errno = 15009 + ERROR_EVT_CONFIGURATION_ERROR syscall.Errno = 15010 + ERROR_EVT_QUERY_RESULT_STALE syscall.Errno = 15011 + ERROR_EVT_QUERY_RESULT_INVALID_POSITION syscall.Errno = 15012 + ERROR_EVT_NON_VALIDATING_MSXML syscall.Errno = 15013 + ERROR_EVT_FILTER_ALREADYSCOPED syscall.Errno = 15014 + ERROR_EVT_FILTER_NOTELTSET syscall.Errno = 15015 + ERROR_EVT_FILTER_INVARG syscall.Errno = 15016 + ERROR_EVT_FILTER_INVTEST syscall.Errno = 15017 + ERROR_EVT_FILTER_INVTYPE syscall.Errno = 15018 + ERROR_EVT_FILTER_PARSEERR syscall.Errno = 15019 + ERROR_EVT_FILTER_UNSUPPORTEDOP syscall.Errno = 15020 + ERROR_EVT_FILTER_UNEXPECTEDTOKEN syscall.Errno = 15021 + ERROR_EVT_INVALID_OPERATION_OVER_ENABLED_DIRECT_CHANNEL syscall.Errno = 15022 + ERROR_EVT_INVALID_CHANNEL_PROPERTY_VALUE syscall.Errno = 15023 + ERROR_EVT_INVALID_PUBLISHER_PROPERTY_VALUE syscall.Errno = 15024 + ERROR_EVT_CHANNEL_CANNOT_ACTIVATE syscall.Errno = 15025 + ERROR_EVT_FILTER_TOO_COMPLEX syscall.Errno = 15026 + ERROR_EVT_MESSAGE_NOT_FOUND syscall.Errno = 15027 + ERROR_EVT_MESSAGE_ID_NOT_FOUND syscall.Errno = 15028 + ERROR_EVT_UNRESOLVED_VALUE_INSERT syscall.Errno = 15029 + ERROR_EVT_UNRESOLVED_PARAMETER_INSERT syscall.Errno = 15030 + ERROR_EVT_MAX_INSERTS_REACHED syscall.Errno = 15031 + ERROR_EVT_EVENT_DEFINITION_NOT_FOUND syscall.Errno = 15032 + ERROR_EVT_MESSAGE_LOCALE_NOT_FOUND syscall.Errno = 15033 + ERROR_EVT_VERSION_TOO_OLD syscall.Errno = 15034 + ERROR_EVT_VERSION_TOO_NEW syscall.Errno = 15035 + ERROR_EVT_CANNOT_OPEN_CHANNEL_OF_QUERY syscall.Errno = 15036 + ERROR_EVT_PUBLISHER_DISABLED syscall.Errno = 15037 + ERROR_EVT_FILTER_OUT_OF_RANGE syscall.Errno = 15038 + ERROR_EC_SUBSCRIPTION_CANNOT_ACTIVATE syscall.Errno = 15080 + ERROR_EC_LOG_DISABLED syscall.Errno = 15081 + ERROR_EC_CIRCULAR_FORWARDING syscall.Errno = 15082 + ERROR_EC_CREDSTORE_FULL syscall.Errno = 15083 + ERROR_EC_CRED_NOT_FOUND syscall.Errno = 15084 + ERROR_EC_NO_ACTIVE_CHANNEL syscall.Errno = 15085 + ERROR_MUI_FILE_NOT_FOUND syscall.Errno = 15100 + ERROR_MUI_INVALID_FILE syscall.Errno = 15101 + ERROR_MUI_INVALID_RC_CONFIG syscall.Errno = 15102 + ERROR_MUI_INVALID_LOCALE_NAME syscall.Errno = 15103 + ERROR_MUI_INVALID_ULTIMATEFALLBACK_NAME syscall.Errno = 15104 + ERROR_MUI_FILE_NOT_LOADED syscall.Errno = 15105 + ERROR_RESOURCE_ENUM_USER_STOP syscall.Errno = 15106 + ERROR_MUI_INTLSETTINGS_UILANG_NOT_INSTALLED syscall.Errno = 15107 + ERROR_MUI_INTLSETTINGS_INVALID_LOCALE_NAME syscall.Errno = 15108 + ERROR_MRM_RUNTIME_NO_DEFAULT_OR_NEUTRAL_RESOURCE syscall.Errno = 15110 + ERROR_MRM_INVALID_PRICONFIG syscall.Errno = 15111 + ERROR_MRM_INVALID_FILE_TYPE syscall.Errno = 15112 + ERROR_MRM_UNKNOWN_QUALIFIER syscall.Errno = 15113 + ERROR_MRM_INVALID_QUALIFIER_VALUE syscall.Errno = 15114 + ERROR_MRM_NO_CANDIDATE syscall.Errno = 15115 + ERROR_MRM_NO_MATCH_OR_DEFAULT_CANDIDATE syscall.Errno = 15116 + ERROR_MRM_RESOURCE_TYPE_MISMATCH syscall.Errno = 15117 + ERROR_MRM_DUPLICATE_MAP_NAME syscall.Errno = 15118 + ERROR_MRM_DUPLICATE_ENTRY syscall.Errno = 15119 + ERROR_MRM_INVALID_RESOURCE_IDENTIFIER syscall.Errno = 15120 + ERROR_MRM_FILEPATH_TOO_LONG syscall.Errno = 15121 + ERROR_MRM_UNSUPPORTED_DIRECTORY_TYPE syscall.Errno = 15122 + ERROR_MRM_INVALID_PRI_FILE syscall.Errno = 15126 + ERROR_MRM_NAMED_RESOURCE_NOT_FOUND syscall.Errno = 15127 + ERROR_MRM_MAP_NOT_FOUND syscall.Errno = 15135 + ERROR_MRM_UNSUPPORTED_PROFILE_TYPE syscall.Errno = 15136 + ERROR_MRM_INVALID_QUALIFIER_OPERATOR syscall.Errno = 15137 + ERROR_MRM_INDETERMINATE_QUALIFIER_VALUE syscall.Errno = 15138 + ERROR_MRM_AUTOMERGE_ENABLED syscall.Errno = 15139 + ERROR_MRM_TOO_MANY_RESOURCES syscall.Errno = 15140 + ERROR_MRM_UNSUPPORTED_FILE_TYPE_FOR_MERGE syscall.Errno = 15141 + ERROR_MRM_UNSUPPORTED_FILE_TYPE_FOR_LOAD_UNLOAD_PRI_FILE syscall.Errno = 15142 + ERROR_MRM_NO_CURRENT_VIEW_ON_THREAD syscall.Errno = 15143 + ERROR_DIFFERENT_PROFILE_RESOURCE_MANAGER_EXIST syscall.Errno = 15144 + ERROR_OPERATION_NOT_ALLOWED_FROM_SYSTEM_COMPONENT syscall.Errno = 15145 + ERROR_MRM_DIRECT_REF_TO_NON_DEFAULT_RESOURCE syscall.Errno = 15146 + ERROR_MRM_GENERATION_COUNT_MISMATCH syscall.Errno = 15147 + ERROR_PRI_MERGE_VERSION_MISMATCH syscall.Errno = 15148 + ERROR_PRI_MERGE_MISSING_SCHEMA syscall.Errno = 15149 + ERROR_PRI_MERGE_LOAD_FILE_FAILED syscall.Errno = 15150 + ERROR_PRI_MERGE_ADD_FILE_FAILED syscall.Errno = 15151 + ERROR_PRI_MERGE_WRITE_FILE_FAILED syscall.Errno = 15152 + ERROR_PRI_MERGE_MULTIPLE_PACKAGE_FAMILIES_NOT_ALLOWED syscall.Errno = 15153 + ERROR_PRI_MERGE_MULTIPLE_MAIN_PACKAGES_NOT_ALLOWED syscall.Errno = 15154 + ERROR_PRI_MERGE_BUNDLE_PACKAGES_NOT_ALLOWED syscall.Errno = 15155 + ERROR_PRI_MERGE_MAIN_PACKAGE_REQUIRED syscall.Errno = 15156 + ERROR_PRI_MERGE_RESOURCE_PACKAGE_REQUIRED syscall.Errno = 15157 + ERROR_PRI_MERGE_INVALID_FILE_NAME syscall.Errno = 15158 + ERROR_MRM_PACKAGE_NOT_FOUND syscall.Errno = 15159 + ERROR_MRM_MISSING_DEFAULT_LANGUAGE syscall.Errno = 15160 + ERROR_MCA_INVALID_CAPABILITIES_STRING syscall.Errno = 15200 + ERROR_MCA_INVALID_VCP_VERSION syscall.Errno = 15201 + ERROR_MCA_MONITOR_VIOLATES_MCCS_SPECIFICATION syscall.Errno = 15202 + ERROR_MCA_MCCS_VERSION_MISMATCH syscall.Errno = 15203 + ERROR_MCA_UNSUPPORTED_MCCS_VERSION syscall.Errno = 15204 + ERROR_MCA_INTERNAL_ERROR syscall.Errno = 15205 + ERROR_MCA_INVALID_TECHNOLOGY_TYPE_RETURNED syscall.Errno = 15206 + ERROR_MCA_UNSUPPORTED_COLOR_TEMPERATURE syscall.Errno = 15207 + ERROR_AMBIGUOUS_SYSTEM_DEVICE syscall.Errno = 15250 + ERROR_SYSTEM_DEVICE_NOT_FOUND syscall.Errno = 15299 + ERROR_HASH_NOT_SUPPORTED syscall.Errno = 15300 + ERROR_HASH_NOT_PRESENT syscall.Errno = 15301 + ERROR_SECONDARY_IC_PROVIDER_NOT_REGISTERED syscall.Errno = 15321 + ERROR_GPIO_CLIENT_INFORMATION_INVALID syscall.Errno = 15322 + ERROR_GPIO_VERSION_NOT_SUPPORTED syscall.Errno = 15323 + ERROR_GPIO_INVALID_REGISTRATION_PACKET syscall.Errno = 15324 + ERROR_GPIO_OPERATION_DENIED syscall.Errno = 15325 + ERROR_GPIO_INCOMPATIBLE_CONNECT_MODE syscall.Errno = 15326 + ERROR_GPIO_INTERRUPT_ALREADY_UNMASKED syscall.Errno = 15327 + ERROR_CANNOT_SWITCH_RUNLEVEL syscall.Errno = 15400 + ERROR_INVALID_RUNLEVEL_SETTING syscall.Errno = 15401 + ERROR_RUNLEVEL_SWITCH_TIMEOUT syscall.Errno = 15402 + ERROR_RUNLEVEL_SWITCH_AGENT_TIMEOUT syscall.Errno = 15403 + ERROR_RUNLEVEL_SWITCH_IN_PROGRESS syscall.Errno = 15404 + ERROR_SERVICES_FAILED_AUTOSTART syscall.Errno = 15405 + ERROR_COM_TASK_STOP_PENDING syscall.Errno = 15501 + ERROR_INSTALL_OPEN_PACKAGE_FAILED syscall.Errno = 15600 + ERROR_INSTALL_PACKAGE_NOT_FOUND syscall.Errno = 15601 + ERROR_INSTALL_INVALID_PACKAGE syscall.Errno = 15602 + ERROR_INSTALL_RESOLVE_DEPENDENCY_FAILED syscall.Errno = 15603 + ERROR_INSTALL_OUT_OF_DISK_SPACE syscall.Errno = 15604 + ERROR_INSTALL_NETWORK_FAILURE syscall.Errno = 15605 + ERROR_INSTALL_REGISTRATION_FAILURE syscall.Errno = 15606 + ERROR_INSTALL_DEREGISTRATION_FAILURE syscall.Errno = 15607 + ERROR_INSTALL_CANCEL syscall.Errno = 15608 + ERROR_INSTALL_FAILED syscall.Errno = 15609 + ERROR_REMOVE_FAILED syscall.Errno = 15610 + ERROR_PACKAGE_ALREADY_EXISTS syscall.Errno = 15611 + ERROR_NEEDS_REMEDIATION syscall.Errno = 15612 + ERROR_INSTALL_PREREQUISITE_FAILED syscall.Errno = 15613 + ERROR_PACKAGE_REPOSITORY_CORRUPTED syscall.Errno = 15614 + ERROR_INSTALL_POLICY_FAILURE syscall.Errno = 15615 + ERROR_PACKAGE_UPDATING syscall.Errno = 15616 + ERROR_DEPLOYMENT_BLOCKED_BY_POLICY syscall.Errno = 15617 + ERROR_PACKAGES_IN_USE syscall.Errno = 15618 + ERROR_RECOVERY_FILE_CORRUPT syscall.Errno = 15619 + ERROR_INVALID_STAGED_SIGNATURE syscall.Errno = 15620 + ERROR_DELETING_EXISTING_APPLICATIONDATA_STORE_FAILED syscall.Errno = 15621 + ERROR_INSTALL_PACKAGE_DOWNGRADE syscall.Errno = 15622 + ERROR_SYSTEM_NEEDS_REMEDIATION syscall.Errno = 15623 + ERROR_APPX_INTEGRITY_FAILURE_CLR_NGEN syscall.Errno = 15624 + ERROR_RESILIENCY_FILE_CORRUPT syscall.Errno = 15625 + ERROR_INSTALL_FIREWALL_SERVICE_NOT_RUNNING syscall.Errno = 15626 + ERROR_PACKAGE_MOVE_FAILED syscall.Errno = 15627 + ERROR_INSTALL_VOLUME_NOT_EMPTY syscall.Errno = 15628 + ERROR_INSTALL_VOLUME_OFFLINE syscall.Errno = 15629 + ERROR_INSTALL_VOLUME_CORRUPT syscall.Errno = 15630 + ERROR_NEEDS_REGISTRATION syscall.Errno = 15631 + ERROR_INSTALL_WRONG_PROCESSOR_ARCHITECTURE syscall.Errno = 15632 + ERROR_DEV_SIDELOAD_LIMIT_EXCEEDED syscall.Errno = 15633 + ERROR_INSTALL_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE syscall.Errno = 15634 + ERROR_PACKAGE_NOT_SUPPORTED_ON_FILESYSTEM syscall.Errno = 15635 + ERROR_PACKAGE_MOVE_BLOCKED_BY_STREAMING syscall.Errno = 15636 + ERROR_INSTALL_OPTIONAL_PACKAGE_APPLICATIONID_NOT_UNIQUE syscall.Errno = 15637 + ERROR_PACKAGE_STAGING_ONHOLD syscall.Errno = 15638 + ERROR_INSTALL_INVALID_RELATED_SET_UPDATE syscall.Errno = 15639 + ERROR_INSTALL_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE_FULLTRUST_CAPABILITY syscall.Errno = 15640 + ERROR_DEPLOYMENT_BLOCKED_BY_USER_LOG_OFF syscall.Errno = 15641 + ERROR_PROVISION_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE_PROVISIONED syscall.Errno = 15642 + ERROR_PACKAGES_REPUTATION_CHECK_FAILED syscall.Errno = 15643 + ERROR_PACKAGES_REPUTATION_CHECK_TIMEDOUT syscall.Errno = 15644 + ERROR_DEPLOYMENT_OPTION_NOT_SUPPORTED syscall.Errno = 15645 + ERROR_APPINSTALLER_ACTIVATION_BLOCKED syscall.Errno = 15646 + ERROR_REGISTRATION_FROM_REMOTE_DRIVE_NOT_SUPPORTED syscall.Errno = 15647 + ERROR_APPX_RAW_DATA_WRITE_FAILED syscall.Errno = 15648 + ERROR_DEPLOYMENT_BLOCKED_BY_VOLUME_POLICY_PACKAGE syscall.Errno = 15649 + ERROR_DEPLOYMENT_BLOCKED_BY_VOLUME_POLICY_MACHINE syscall.Errno = 15650 + ERROR_DEPLOYMENT_BLOCKED_BY_PROFILE_POLICY syscall.Errno = 15651 + ERROR_DEPLOYMENT_FAILED_CONFLICTING_MUTABLE_PACKAGE_DIRECTORY syscall.Errno = 15652 + ERROR_SINGLETON_RESOURCE_INSTALLED_IN_ACTIVE_USER syscall.Errno = 15653 + ERROR_DIFFERENT_VERSION_OF_PACKAGED_SERVICE_INSTALLED syscall.Errno = 15654 + ERROR_SERVICE_EXISTS_AS_NON_PACKAGED_SERVICE syscall.Errno = 15655 + ERROR_PACKAGED_SERVICE_REQUIRES_ADMIN_PRIVILEGES syscall.Errno = 15656 + APPMODEL_ERROR_NO_PACKAGE syscall.Errno = 15700 + APPMODEL_ERROR_PACKAGE_RUNTIME_CORRUPT syscall.Errno = 15701 + APPMODEL_ERROR_PACKAGE_IDENTITY_CORRUPT syscall.Errno = 15702 + APPMODEL_ERROR_NO_APPLICATION syscall.Errno = 15703 + APPMODEL_ERROR_DYNAMIC_PROPERTY_READ_FAILED syscall.Errno = 15704 + APPMODEL_ERROR_DYNAMIC_PROPERTY_INVALID syscall.Errno = 15705 + APPMODEL_ERROR_PACKAGE_NOT_AVAILABLE syscall.Errno = 15706 + APPMODEL_ERROR_NO_MUTABLE_DIRECTORY syscall.Errno = 15707 + ERROR_STATE_LOAD_STORE_FAILED syscall.Errno = 15800 + ERROR_STATE_GET_VERSION_FAILED syscall.Errno = 15801 + ERROR_STATE_SET_VERSION_FAILED syscall.Errno = 15802 + ERROR_STATE_STRUCTURED_RESET_FAILED syscall.Errno = 15803 + ERROR_STATE_OPEN_CONTAINER_FAILED syscall.Errno = 15804 + ERROR_STATE_CREATE_CONTAINER_FAILED syscall.Errno = 15805 + ERROR_STATE_DELETE_CONTAINER_FAILED syscall.Errno = 15806 + ERROR_STATE_READ_SETTING_FAILED syscall.Errno = 15807 + ERROR_STATE_WRITE_SETTING_FAILED syscall.Errno = 15808 + ERROR_STATE_DELETE_SETTING_FAILED syscall.Errno = 15809 + ERROR_STATE_QUERY_SETTING_FAILED syscall.Errno = 15810 + ERROR_STATE_READ_COMPOSITE_SETTING_FAILED syscall.Errno = 15811 + ERROR_STATE_WRITE_COMPOSITE_SETTING_FAILED syscall.Errno = 15812 + ERROR_STATE_ENUMERATE_CONTAINER_FAILED syscall.Errno = 15813 + ERROR_STATE_ENUMERATE_SETTINGS_FAILED syscall.Errno = 15814 + ERROR_STATE_COMPOSITE_SETTING_VALUE_SIZE_LIMIT_EXCEEDED syscall.Errno = 15815 + ERROR_STATE_SETTING_VALUE_SIZE_LIMIT_EXCEEDED syscall.Errno = 15816 + ERROR_STATE_SETTING_NAME_SIZE_LIMIT_EXCEEDED syscall.Errno = 15817 + ERROR_STATE_CONTAINER_NAME_SIZE_LIMIT_EXCEEDED syscall.Errno = 15818 + ERROR_API_UNAVAILABLE syscall.Errno = 15841 + STORE_ERROR_UNLICENSED syscall.Errno = 15861 + STORE_ERROR_UNLICENSED_USER syscall.Errno = 15862 + STORE_ERROR_PENDING_COM_TRANSACTION syscall.Errno = 15863 + STORE_ERROR_LICENSE_REVOKED syscall.Errno = 15864 + SEVERITY_SUCCESS syscall.Errno = 0 + SEVERITY_ERROR syscall.Errno = 1 + FACILITY_NT_BIT = 0x10000000 + E_NOT_SET = ERROR_NOT_FOUND + E_NOT_VALID_STATE = ERROR_INVALID_STATE + E_NOT_SUFFICIENT_BUFFER = ERROR_INSUFFICIENT_BUFFER + E_TIME_SENSITIVE_THREAD = ERROR_TIME_SENSITIVE_THREAD + E_NO_TASK_QUEUE = ERROR_NO_TASK_QUEUE + NOERROR syscall.Errno = 0 + E_UNEXPECTED Handle = 0x8000FFFF + E_NOTIMPL Handle = 0x80004001 + E_OUTOFMEMORY Handle = 0x8007000E + E_INVALIDARG Handle = 0x80070057 + E_NOINTERFACE Handle = 0x80004002 + E_POINTER Handle = 0x80004003 + E_HANDLE Handle = 0x80070006 + E_ABORT Handle = 0x80004004 + E_FAIL Handle = 0x80004005 + E_ACCESSDENIED Handle = 0x80070005 + E_PENDING Handle = 0x8000000A + E_BOUNDS Handle = 0x8000000B + E_CHANGED_STATE Handle = 0x8000000C + E_ILLEGAL_STATE_CHANGE Handle = 0x8000000D + E_ILLEGAL_METHOD_CALL Handle = 0x8000000E + RO_E_METADATA_NAME_NOT_FOUND Handle = 0x8000000F + RO_E_METADATA_NAME_IS_NAMESPACE Handle = 0x80000010 + RO_E_METADATA_INVALID_TYPE_FORMAT Handle = 0x80000011 + RO_E_INVALID_METADATA_FILE Handle = 0x80000012 + RO_E_CLOSED Handle = 0x80000013 + RO_E_EXCLUSIVE_WRITE Handle = 0x80000014 + RO_E_CHANGE_NOTIFICATION_IN_PROGRESS Handle = 0x80000015 + RO_E_ERROR_STRING_NOT_FOUND Handle = 0x80000016 + E_STRING_NOT_NULL_TERMINATED Handle = 0x80000017 + E_ILLEGAL_DELEGATE_ASSIGNMENT Handle = 0x80000018 + E_ASYNC_OPERATION_NOT_STARTED Handle = 0x80000019 + E_APPLICATION_EXITING Handle = 0x8000001A + E_APPLICATION_VIEW_EXITING Handle = 0x8000001B + RO_E_MUST_BE_AGILE Handle = 0x8000001C + RO_E_UNSUPPORTED_FROM_MTA Handle = 0x8000001D + RO_E_COMMITTED Handle = 0x8000001E + RO_E_BLOCKED_CROSS_ASTA_CALL Handle = 0x8000001F + RO_E_CANNOT_ACTIVATE_FULL_TRUST_SERVER Handle = 0x80000020 + RO_E_CANNOT_ACTIVATE_UNIVERSAL_APPLICATION_SERVER Handle = 0x80000021 + CO_E_INIT_TLS Handle = 0x80004006 + CO_E_INIT_SHARED_ALLOCATOR Handle = 0x80004007 + CO_E_INIT_MEMORY_ALLOCATOR Handle = 0x80004008 + CO_E_INIT_CLASS_CACHE Handle = 0x80004009 + CO_E_INIT_RPC_CHANNEL Handle = 0x8000400A + CO_E_INIT_TLS_SET_CHANNEL_CONTROL Handle = 0x8000400B + CO_E_INIT_TLS_CHANNEL_CONTROL Handle = 0x8000400C + CO_E_INIT_UNACCEPTED_USER_ALLOCATOR Handle = 0x8000400D + CO_E_INIT_SCM_MUTEX_EXISTS Handle = 0x8000400E + CO_E_INIT_SCM_FILE_MAPPING_EXISTS Handle = 0x8000400F + CO_E_INIT_SCM_MAP_VIEW_OF_FILE Handle = 0x80004010 + CO_E_INIT_SCM_EXEC_FAILURE Handle = 0x80004011 + CO_E_INIT_ONLY_SINGLE_THREADED Handle = 0x80004012 + CO_E_CANT_REMOTE Handle = 0x80004013 + CO_E_BAD_SERVER_NAME Handle = 0x80004014 + CO_E_WRONG_SERVER_IDENTITY Handle = 0x80004015 + CO_E_OLE1DDE_DISABLED Handle = 0x80004016 + CO_E_RUNAS_SYNTAX Handle = 0x80004017 + CO_E_CREATEPROCESS_FAILURE Handle = 0x80004018 + CO_E_RUNAS_CREATEPROCESS_FAILURE Handle = 0x80004019 + CO_E_RUNAS_LOGON_FAILURE Handle = 0x8000401A + CO_E_LAUNCH_PERMSSION_DENIED Handle = 0x8000401B + CO_E_START_SERVICE_FAILURE Handle = 0x8000401C + CO_E_REMOTE_COMMUNICATION_FAILURE Handle = 0x8000401D + CO_E_SERVER_START_TIMEOUT Handle = 0x8000401E + CO_E_CLSREG_INCONSISTENT Handle = 0x8000401F + CO_E_IIDREG_INCONSISTENT Handle = 0x80004020 + CO_E_NOT_SUPPORTED Handle = 0x80004021 + CO_E_RELOAD_DLL Handle = 0x80004022 + CO_E_MSI_ERROR Handle = 0x80004023 + CO_E_ATTEMPT_TO_CREATE_OUTSIDE_CLIENT_CONTEXT Handle = 0x80004024 + CO_E_SERVER_PAUSED Handle = 0x80004025 + CO_E_SERVER_NOT_PAUSED Handle = 0x80004026 + CO_E_CLASS_DISABLED Handle = 0x80004027 + CO_E_CLRNOTAVAILABLE Handle = 0x80004028 + CO_E_ASYNC_WORK_REJECTED Handle = 0x80004029 + CO_E_SERVER_INIT_TIMEOUT Handle = 0x8000402A + CO_E_NO_SECCTX_IN_ACTIVATE Handle = 0x8000402B + CO_E_TRACKER_CONFIG Handle = 0x80004030 + CO_E_THREADPOOL_CONFIG Handle = 0x80004031 + CO_E_SXS_CONFIG Handle = 0x80004032 + CO_E_MALFORMED_SPN Handle = 0x80004033 + CO_E_UNREVOKED_REGISTRATION_ON_APARTMENT_SHUTDOWN Handle = 0x80004034 + CO_E_PREMATURE_STUB_RUNDOWN Handle = 0x80004035 + S_OK Handle = 0 + S_FALSE Handle = 1 + OLE_E_FIRST Handle = 0x80040000 + OLE_E_LAST Handle = 0x800400FF + OLE_S_FIRST Handle = 0x00040000 + OLE_S_LAST Handle = 0x000400FF + OLE_E_OLEVERB Handle = 0x80040000 + OLE_E_ADVF Handle = 0x80040001 + OLE_E_ENUM_NOMORE Handle = 0x80040002 + OLE_E_ADVISENOTSUPPORTED Handle = 0x80040003 + OLE_E_NOCONNECTION Handle = 0x80040004 + OLE_E_NOTRUNNING Handle = 0x80040005 + OLE_E_NOCACHE Handle = 0x80040006 + OLE_E_BLANK Handle = 0x80040007 + OLE_E_CLASSDIFF Handle = 0x80040008 + OLE_E_CANT_GETMONIKER Handle = 0x80040009 + OLE_E_CANT_BINDTOSOURCE Handle = 0x8004000A + OLE_E_STATIC Handle = 0x8004000B + OLE_E_PROMPTSAVECANCELLED Handle = 0x8004000C + OLE_E_INVALIDRECT Handle = 0x8004000D + OLE_E_WRONGCOMPOBJ Handle = 0x8004000E + OLE_E_INVALIDHWND Handle = 0x8004000F + OLE_E_NOT_INPLACEACTIVE Handle = 0x80040010 + OLE_E_CANTCONVERT Handle = 0x80040011 + OLE_E_NOSTORAGE Handle = 0x80040012 + DV_E_FORMATETC Handle = 0x80040064 + DV_E_DVTARGETDEVICE Handle = 0x80040065 + DV_E_STGMEDIUM Handle = 0x80040066 + DV_E_STATDATA Handle = 0x80040067 + DV_E_LINDEX Handle = 0x80040068 + DV_E_TYMED Handle = 0x80040069 + DV_E_CLIPFORMAT Handle = 0x8004006A + DV_E_DVASPECT Handle = 0x8004006B + DV_E_DVTARGETDEVICE_SIZE Handle = 0x8004006C + DV_E_NOIVIEWOBJECT Handle = 0x8004006D + DRAGDROP_E_FIRST syscall.Errno = 0x80040100 + DRAGDROP_E_LAST syscall.Errno = 0x8004010F + DRAGDROP_S_FIRST syscall.Errno = 0x00040100 + DRAGDROP_S_LAST syscall.Errno = 0x0004010F + DRAGDROP_E_NOTREGISTERED Handle = 0x80040100 + DRAGDROP_E_ALREADYREGISTERED Handle = 0x80040101 + DRAGDROP_E_INVALIDHWND Handle = 0x80040102 + DRAGDROP_E_CONCURRENT_DRAG_ATTEMPTED Handle = 0x80040103 + CLASSFACTORY_E_FIRST syscall.Errno = 0x80040110 + CLASSFACTORY_E_LAST syscall.Errno = 0x8004011F + CLASSFACTORY_S_FIRST syscall.Errno = 0x00040110 + CLASSFACTORY_S_LAST syscall.Errno = 0x0004011F + CLASS_E_NOAGGREGATION Handle = 0x80040110 + CLASS_E_CLASSNOTAVAILABLE Handle = 0x80040111 + CLASS_E_NOTLICENSED Handle = 0x80040112 + MARSHAL_E_FIRST syscall.Errno = 0x80040120 + MARSHAL_E_LAST syscall.Errno = 0x8004012F + MARSHAL_S_FIRST syscall.Errno = 0x00040120 + MARSHAL_S_LAST syscall.Errno = 0x0004012F + DATA_E_FIRST syscall.Errno = 0x80040130 + DATA_E_LAST syscall.Errno = 0x8004013F + DATA_S_FIRST syscall.Errno = 0x00040130 + DATA_S_LAST syscall.Errno = 0x0004013F + VIEW_E_FIRST syscall.Errno = 0x80040140 + VIEW_E_LAST syscall.Errno = 0x8004014F + VIEW_S_FIRST syscall.Errno = 0x00040140 + VIEW_S_LAST syscall.Errno = 0x0004014F + VIEW_E_DRAW Handle = 0x80040140 + REGDB_E_FIRST syscall.Errno = 0x80040150 + REGDB_E_LAST syscall.Errno = 0x8004015F + REGDB_S_FIRST syscall.Errno = 0x00040150 + REGDB_S_LAST syscall.Errno = 0x0004015F + REGDB_E_READREGDB Handle = 0x80040150 + REGDB_E_WRITEREGDB Handle = 0x80040151 + REGDB_E_KEYMISSING Handle = 0x80040152 + REGDB_E_INVALIDVALUE Handle = 0x80040153 + REGDB_E_CLASSNOTREG Handle = 0x80040154 + REGDB_E_IIDNOTREG Handle = 0x80040155 + REGDB_E_BADTHREADINGMODEL Handle = 0x80040156 + REGDB_E_PACKAGEPOLICYVIOLATION Handle = 0x80040157 + CAT_E_FIRST syscall.Errno = 0x80040160 + CAT_E_LAST syscall.Errno = 0x80040161 + CAT_E_CATIDNOEXIST Handle = 0x80040160 + CAT_E_NODESCRIPTION Handle = 0x80040161 + CS_E_FIRST syscall.Errno = 0x80040164 + CS_E_LAST syscall.Errno = 0x8004016F + CS_E_PACKAGE_NOTFOUND Handle = 0x80040164 + CS_E_NOT_DELETABLE Handle = 0x80040165 + CS_E_CLASS_NOTFOUND Handle = 0x80040166 + CS_E_INVALID_VERSION Handle = 0x80040167 + CS_E_NO_CLASSSTORE Handle = 0x80040168 + CS_E_OBJECT_NOTFOUND Handle = 0x80040169 + CS_E_OBJECT_ALREADY_EXISTS Handle = 0x8004016A + CS_E_INVALID_PATH Handle = 0x8004016B + CS_E_NETWORK_ERROR Handle = 0x8004016C + CS_E_ADMIN_LIMIT_EXCEEDED Handle = 0x8004016D + CS_E_SCHEMA_MISMATCH Handle = 0x8004016E + CS_E_INTERNAL_ERROR Handle = 0x8004016F + CACHE_E_FIRST syscall.Errno = 0x80040170 + CACHE_E_LAST syscall.Errno = 0x8004017F + CACHE_S_FIRST syscall.Errno = 0x00040170 + CACHE_S_LAST syscall.Errno = 0x0004017F + CACHE_E_NOCACHE_UPDATED Handle = 0x80040170 + OLEOBJ_E_FIRST syscall.Errno = 0x80040180 + OLEOBJ_E_LAST syscall.Errno = 0x8004018F + OLEOBJ_S_FIRST syscall.Errno = 0x00040180 + OLEOBJ_S_LAST syscall.Errno = 0x0004018F + OLEOBJ_E_NOVERBS Handle = 0x80040180 + OLEOBJ_E_INVALIDVERB Handle = 0x80040181 + CLIENTSITE_E_FIRST syscall.Errno = 0x80040190 + CLIENTSITE_E_LAST syscall.Errno = 0x8004019F + CLIENTSITE_S_FIRST syscall.Errno = 0x00040190 + CLIENTSITE_S_LAST syscall.Errno = 0x0004019F + INPLACE_E_NOTUNDOABLE Handle = 0x800401A0 + INPLACE_E_NOTOOLSPACE Handle = 0x800401A1 + INPLACE_E_FIRST syscall.Errno = 0x800401A0 + INPLACE_E_LAST syscall.Errno = 0x800401AF + INPLACE_S_FIRST syscall.Errno = 0x000401A0 + INPLACE_S_LAST syscall.Errno = 0x000401AF + ENUM_E_FIRST syscall.Errno = 0x800401B0 + ENUM_E_LAST syscall.Errno = 0x800401BF + ENUM_S_FIRST syscall.Errno = 0x000401B0 + ENUM_S_LAST syscall.Errno = 0x000401BF + CONVERT10_E_FIRST syscall.Errno = 0x800401C0 + CONVERT10_E_LAST syscall.Errno = 0x800401CF + CONVERT10_S_FIRST syscall.Errno = 0x000401C0 + CONVERT10_S_LAST syscall.Errno = 0x000401CF + CONVERT10_E_OLESTREAM_GET Handle = 0x800401C0 + CONVERT10_E_OLESTREAM_PUT Handle = 0x800401C1 + CONVERT10_E_OLESTREAM_FMT Handle = 0x800401C2 + CONVERT10_E_OLESTREAM_BITMAP_TO_DIB Handle = 0x800401C3 + CONVERT10_E_STG_FMT Handle = 0x800401C4 + CONVERT10_E_STG_NO_STD_STREAM Handle = 0x800401C5 + CONVERT10_E_STG_DIB_TO_BITMAP Handle = 0x800401C6 + CLIPBRD_E_FIRST syscall.Errno = 0x800401D0 + CLIPBRD_E_LAST syscall.Errno = 0x800401DF + CLIPBRD_S_FIRST syscall.Errno = 0x000401D0 + CLIPBRD_S_LAST syscall.Errno = 0x000401DF + CLIPBRD_E_CANT_OPEN Handle = 0x800401D0 + CLIPBRD_E_CANT_EMPTY Handle = 0x800401D1 + CLIPBRD_E_CANT_SET Handle = 0x800401D2 + CLIPBRD_E_BAD_DATA Handle = 0x800401D3 + CLIPBRD_E_CANT_CLOSE Handle = 0x800401D4 + MK_E_FIRST syscall.Errno = 0x800401E0 + MK_E_LAST syscall.Errno = 0x800401EF + MK_S_FIRST syscall.Errno = 0x000401E0 + MK_S_LAST syscall.Errno = 0x000401EF + MK_E_CONNECTMANUALLY Handle = 0x800401E0 + MK_E_EXCEEDEDDEADLINE Handle = 0x800401E1 + MK_E_NEEDGENERIC Handle = 0x800401E2 + MK_E_UNAVAILABLE Handle = 0x800401E3 + MK_E_SYNTAX Handle = 0x800401E4 + MK_E_NOOBJECT Handle = 0x800401E5 + MK_E_INVALIDEXTENSION Handle = 0x800401E6 + MK_E_INTERMEDIATEINTERFACENOTSUPPORTED Handle = 0x800401E7 + MK_E_NOTBINDABLE Handle = 0x800401E8 + MK_E_NOTBOUND Handle = 0x800401E9 + MK_E_CANTOPENFILE Handle = 0x800401EA + MK_E_MUSTBOTHERUSER Handle = 0x800401EB + MK_E_NOINVERSE Handle = 0x800401EC + MK_E_NOSTORAGE Handle = 0x800401ED + MK_E_NOPREFIX Handle = 0x800401EE + MK_E_ENUMERATION_FAILED Handle = 0x800401EF + CO_E_FIRST syscall.Errno = 0x800401F0 + CO_E_LAST syscall.Errno = 0x800401FF + CO_S_FIRST syscall.Errno = 0x000401F0 + CO_S_LAST syscall.Errno = 0x000401FF + CO_E_NOTINITIALIZED Handle = 0x800401F0 + CO_E_ALREADYINITIALIZED Handle = 0x800401F1 + CO_E_CANTDETERMINECLASS Handle = 0x800401F2 + CO_E_CLASSSTRING Handle = 0x800401F3 + CO_E_IIDSTRING Handle = 0x800401F4 + CO_E_APPNOTFOUND Handle = 0x800401F5 + CO_E_APPSINGLEUSE Handle = 0x800401F6 + CO_E_ERRORINAPP Handle = 0x800401F7 + CO_E_DLLNOTFOUND Handle = 0x800401F8 + CO_E_ERRORINDLL Handle = 0x800401F9 + CO_E_WRONGOSFORAPP Handle = 0x800401FA + CO_E_OBJNOTREG Handle = 0x800401FB + CO_E_OBJISREG Handle = 0x800401FC + CO_E_OBJNOTCONNECTED Handle = 0x800401FD + CO_E_APPDIDNTREG Handle = 0x800401FE + CO_E_RELEASED Handle = 0x800401FF + EVENT_E_FIRST syscall.Errno = 0x80040200 + EVENT_E_LAST syscall.Errno = 0x8004021F + EVENT_S_FIRST syscall.Errno = 0x00040200 + EVENT_S_LAST syscall.Errno = 0x0004021F + EVENT_S_SOME_SUBSCRIBERS_FAILED Handle = 0x00040200 + EVENT_E_ALL_SUBSCRIBERS_FAILED Handle = 0x80040201 + EVENT_S_NOSUBSCRIBERS Handle = 0x00040202 + EVENT_E_QUERYSYNTAX Handle = 0x80040203 + EVENT_E_QUERYFIELD Handle = 0x80040204 + EVENT_E_INTERNALEXCEPTION Handle = 0x80040205 + EVENT_E_INTERNALERROR Handle = 0x80040206 + EVENT_E_INVALID_PER_USER_SID Handle = 0x80040207 + EVENT_E_USER_EXCEPTION Handle = 0x80040208 + EVENT_E_TOO_MANY_METHODS Handle = 0x80040209 + EVENT_E_MISSING_EVENTCLASS Handle = 0x8004020A + EVENT_E_NOT_ALL_REMOVED Handle = 0x8004020B + EVENT_E_COMPLUS_NOT_INSTALLED Handle = 0x8004020C + EVENT_E_CANT_MODIFY_OR_DELETE_UNCONFIGURED_OBJECT Handle = 0x8004020D + EVENT_E_CANT_MODIFY_OR_DELETE_CONFIGURED_OBJECT Handle = 0x8004020E + EVENT_E_INVALID_EVENT_CLASS_PARTITION Handle = 0x8004020F + EVENT_E_PER_USER_SID_NOT_LOGGED_ON Handle = 0x80040210 + TPC_E_INVALID_PROPERTY Handle = 0x80040241 + TPC_E_NO_DEFAULT_TABLET Handle = 0x80040212 + TPC_E_UNKNOWN_PROPERTY Handle = 0x8004021B + TPC_E_INVALID_INPUT_RECT Handle = 0x80040219 + TPC_E_INVALID_STROKE Handle = 0x80040222 + TPC_E_INITIALIZE_FAIL Handle = 0x80040223 + TPC_E_NOT_RELEVANT Handle = 0x80040232 + TPC_E_INVALID_PACKET_DESCRIPTION Handle = 0x80040233 + TPC_E_RECOGNIZER_NOT_REGISTERED Handle = 0x80040235 + TPC_E_INVALID_RIGHTS Handle = 0x80040236 + TPC_E_OUT_OF_ORDER_CALL Handle = 0x80040237 + TPC_E_QUEUE_FULL Handle = 0x80040238 + TPC_E_INVALID_CONFIGURATION Handle = 0x80040239 + TPC_E_INVALID_DATA_FROM_RECOGNIZER Handle = 0x8004023A + TPC_S_TRUNCATED Handle = 0x00040252 + TPC_S_INTERRUPTED Handle = 0x00040253 + TPC_S_NO_DATA_TO_PROCESS Handle = 0x00040254 + XACT_E_FIRST syscall.Errno = 0x8004D000 + XACT_E_LAST syscall.Errno = 0x8004D02B + XACT_S_FIRST syscall.Errno = 0x0004D000 + XACT_S_LAST syscall.Errno = 0x0004D010 + XACT_E_ALREADYOTHERSINGLEPHASE Handle = 0x8004D000 + XACT_E_CANTRETAIN Handle = 0x8004D001 + XACT_E_COMMITFAILED Handle = 0x8004D002 + XACT_E_COMMITPREVENTED Handle = 0x8004D003 + XACT_E_HEURISTICABORT Handle = 0x8004D004 + XACT_E_HEURISTICCOMMIT Handle = 0x8004D005 + XACT_E_HEURISTICDAMAGE Handle = 0x8004D006 + XACT_E_HEURISTICDANGER Handle = 0x8004D007 + XACT_E_ISOLATIONLEVEL Handle = 0x8004D008 + XACT_E_NOASYNC Handle = 0x8004D009 + XACT_E_NOENLIST Handle = 0x8004D00A + XACT_E_NOISORETAIN Handle = 0x8004D00B + XACT_E_NORESOURCE Handle = 0x8004D00C + XACT_E_NOTCURRENT Handle = 0x8004D00D + XACT_E_NOTRANSACTION Handle = 0x8004D00E + XACT_E_NOTSUPPORTED Handle = 0x8004D00F + XACT_E_UNKNOWNRMGRID Handle = 0x8004D010 + XACT_E_WRONGSTATE Handle = 0x8004D011 + XACT_E_WRONGUOW Handle = 0x8004D012 + XACT_E_XTIONEXISTS Handle = 0x8004D013 + XACT_E_NOIMPORTOBJECT Handle = 0x8004D014 + XACT_E_INVALIDCOOKIE Handle = 0x8004D015 + XACT_E_INDOUBT Handle = 0x8004D016 + XACT_E_NOTIMEOUT Handle = 0x8004D017 + XACT_E_ALREADYINPROGRESS Handle = 0x8004D018 + XACT_E_ABORTED Handle = 0x8004D019 + XACT_E_LOGFULL Handle = 0x8004D01A + XACT_E_TMNOTAVAILABLE Handle = 0x8004D01B + XACT_E_CONNECTION_DOWN Handle = 0x8004D01C + XACT_E_CONNECTION_DENIED Handle = 0x8004D01D + XACT_E_REENLISTTIMEOUT Handle = 0x8004D01E + XACT_E_TIP_CONNECT_FAILED Handle = 0x8004D01F + XACT_E_TIP_PROTOCOL_ERROR Handle = 0x8004D020 + XACT_E_TIP_PULL_FAILED Handle = 0x8004D021 + XACT_E_DEST_TMNOTAVAILABLE Handle = 0x8004D022 + XACT_E_TIP_DISABLED Handle = 0x8004D023 + XACT_E_NETWORK_TX_DISABLED Handle = 0x8004D024 + XACT_E_PARTNER_NETWORK_TX_DISABLED Handle = 0x8004D025 + XACT_E_XA_TX_DISABLED Handle = 0x8004D026 + XACT_E_UNABLE_TO_READ_DTC_CONFIG Handle = 0x8004D027 + XACT_E_UNABLE_TO_LOAD_DTC_PROXY Handle = 0x8004D028 + XACT_E_ABORTING Handle = 0x8004D029 + XACT_E_PUSH_COMM_FAILURE Handle = 0x8004D02A + XACT_E_PULL_COMM_FAILURE Handle = 0x8004D02B + XACT_E_LU_TX_DISABLED Handle = 0x8004D02C + XACT_E_CLERKNOTFOUND Handle = 0x8004D080 + XACT_E_CLERKEXISTS Handle = 0x8004D081 + XACT_E_RECOVERYINPROGRESS Handle = 0x8004D082 + XACT_E_TRANSACTIONCLOSED Handle = 0x8004D083 + XACT_E_INVALIDLSN Handle = 0x8004D084 + XACT_E_REPLAYREQUEST Handle = 0x8004D085 + XACT_S_ASYNC Handle = 0x0004D000 + XACT_S_DEFECT Handle = 0x0004D001 + XACT_S_READONLY Handle = 0x0004D002 + XACT_S_SOMENORETAIN Handle = 0x0004D003 + XACT_S_OKINFORM Handle = 0x0004D004 + XACT_S_MADECHANGESCONTENT Handle = 0x0004D005 + XACT_S_MADECHANGESINFORM Handle = 0x0004D006 + XACT_S_ALLNORETAIN Handle = 0x0004D007 + XACT_S_ABORTING Handle = 0x0004D008 + XACT_S_SINGLEPHASE Handle = 0x0004D009 + XACT_S_LOCALLY_OK Handle = 0x0004D00A + XACT_S_LASTRESOURCEMANAGER Handle = 0x0004D010 + CONTEXT_E_FIRST syscall.Errno = 0x8004E000 + CONTEXT_E_LAST syscall.Errno = 0x8004E02F + CONTEXT_S_FIRST syscall.Errno = 0x0004E000 + CONTEXT_S_LAST syscall.Errno = 0x0004E02F + CONTEXT_E_ABORTED Handle = 0x8004E002 + CONTEXT_E_ABORTING Handle = 0x8004E003 + CONTEXT_E_NOCONTEXT Handle = 0x8004E004 + CONTEXT_E_WOULD_DEADLOCK Handle = 0x8004E005 + CONTEXT_E_SYNCH_TIMEOUT Handle = 0x8004E006 + CONTEXT_E_OLDREF Handle = 0x8004E007 + CONTEXT_E_ROLENOTFOUND Handle = 0x8004E00C + CONTEXT_E_TMNOTAVAILABLE Handle = 0x8004E00F + CO_E_ACTIVATIONFAILED Handle = 0x8004E021 + CO_E_ACTIVATIONFAILED_EVENTLOGGED Handle = 0x8004E022 + CO_E_ACTIVATIONFAILED_CATALOGERROR Handle = 0x8004E023 + CO_E_ACTIVATIONFAILED_TIMEOUT Handle = 0x8004E024 + CO_E_INITIALIZATIONFAILED Handle = 0x8004E025 + CONTEXT_E_NOJIT Handle = 0x8004E026 + CONTEXT_E_NOTRANSACTION Handle = 0x8004E027 + CO_E_THREADINGMODEL_CHANGED Handle = 0x8004E028 + CO_E_NOIISINTRINSICS Handle = 0x8004E029 + CO_E_NOCOOKIES Handle = 0x8004E02A + CO_E_DBERROR Handle = 0x8004E02B + CO_E_NOTPOOLED Handle = 0x8004E02C + CO_E_NOTCONSTRUCTED Handle = 0x8004E02D + CO_E_NOSYNCHRONIZATION Handle = 0x8004E02E + CO_E_ISOLEVELMISMATCH Handle = 0x8004E02F + CO_E_CALL_OUT_OF_TX_SCOPE_NOT_ALLOWED Handle = 0x8004E030 + CO_E_EXIT_TRANSACTION_SCOPE_NOT_CALLED Handle = 0x8004E031 + OLE_S_USEREG Handle = 0x00040000 + OLE_S_STATIC Handle = 0x00040001 + OLE_S_MAC_CLIPFORMAT Handle = 0x00040002 + DRAGDROP_S_DROP Handle = 0x00040100 + DRAGDROP_S_CANCEL Handle = 0x00040101 + DRAGDROP_S_USEDEFAULTCURSORS Handle = 0x00040102 + DATA_S_SAMEFORMATETC Handle = 0x00040130 + VIEW_S_ALREADY_FROZEN Handle = 0x00040140 + CACHE_S_FORMATETC_NOTSUPPORTED Handle = 0x00040170 + CACHE_S_SAMECACHE Handle = 0x00040171 + CACHE_S_SOMECACHES_NOTUPDATED Handle = 0x00040172 + OLEOBJ_S_INVALIDVERB Handle = 0x00040180 + OLEOBJ_S_CANNOT_DOVERB_NOW Handle = 0x00040181 + OLEOBJ_S_INVALIDHWND Handle = 0x00040182 + INPLACE_S_TRUNCATED Handle = 0x000401A0 + CONVERT10_S_NO_PRESENTATION Handle = 0x000401C0 + MK_S_REDUCED_TO_SELF Handle = 0x000401E2 + MK_S_ME Handle = 0x000401E4 + MK_S_HIM Handle = 0x000401E5 + MK_S_US Handle = 0x000401E6 + MK_S_MONIKERALREADYREGISTERED Handle = 0x000401E7 + SCHED_S_TASK_READY Handle = 0x00041300 + SCHED_S_TASK_RUNNING Handle = 0x00041301 + SCHED_S_TASK_DISABLED Handle = 0x00041302 + SCHED_S_TASK_HAS_NOT_RUN Handle = 0x00041303 + SCHED_S_TASK_NO_MORE_RUNS Handle = 0x00041304 + SCHED_S_TASK_NOT_SCHEDULED Handle = 0x00041305 + SCHED_S_TASK_TERMINATED Handle = 0x00041306 + SCHED_S_TASK_NO_VALID_TRIGGERS Handle = 0x00041307 + SCHED_S_EVENT_TRIGGER Handle = 0x00041308 + SCHED_E_TRIGGER_NOT_FOUND Handle = 0x80041309 + SCHED_E_TASK_NOT_READY Handle = 0x8004130A + SCHED_E_TASK_NOT_RUNNING Handle = 0x8004130B + SCHED_E_SERVICE_NOT_INSTALLED Handle = 0x8004130C + SCHED_E_CANNOT_OPEN_TASK Handle = 0x8004130D + SCHED_E_INVALID_TASK Handle = 0x8004130E + SCHED_E_ACCOUNT_INFORMATION_NOT_SET Handle = 0x8004130F + SCHED_E_ACCOUNT_NAME_NOT_FOUND Handle = 0x80041310 + SCHED_E_ACCOUNT_DBASE_CORRUPT Handle = 0x80041311 + SCHED_E_NO_SECURITY_SERVICES Handle = 0x80041312 + SCHED_E_UNKNOWN_OBJECT_VERSION Handle = 0x80041313 + SCHED_E_UNSUPPORTED_ACCOUNT_OPTION Handle = 0x80041314 + SCHED_E_SERVICE_NOT_RUNNING Handle = 0x80041315 + SCHED_E_UNEXPECTEDNODE Handle = 0x80041316 + SCHED_E_NAMESPACE Handle = 0x80041317 + SCHED_E_INVALIDVALUE Handle = 0x80041318 + SCHED_E_MISSINGNODE Handle = 0x80041319 + SCHED_E_MALFORMEDXML Handle = 0x8004131A + SCHED_S_SOME_TRIGGERS_FAILED Handle = 0x0004131B + SCHED_S_BATCH_LOGON_PROBLEM Handle = 0x0004131C + SCHED_E_TOO_MANY_NODES Handle = 0x8004131D + SCHED_E_PAST_END_BOUNDARY Handle = 0x8004131E + SCHED_E_ALREADY_RUNNING Handle = 0x8004131F + SCHED_E_USER_NOT_LOGGED_ON Handle = 0x80041320 + SCHED_E_INVALID_TASK_HASH Handle = 0x80041321 + SCHED_E_SERVICE_NOT_AVAILABLE Handle = 0x80041322 + SCHED_E_SERVICE_TOO_BUSY Handle = 0x80041323 + SCHED_E_TASK_ATTEMPTED Handle = 0x80041324 + SCHED_S_TASK_QUEUED Handle = 0x00041325 + SCHED_E_TASK_DISABLED Handle = 0x80041326 + SCHED_E_TASK_NOT_V1_COMPAT Handle = 0x80041327 + SCHED_E_START_ON_DEMAND Handle = 0x80041328 + SCHED_E_TASK_NOT_UBPM_COMPAT Handle = 0x80041329 + SCHED_E_DEPRECATED_FEATURE_USED Handle = 0x80041330 + CO_E_CLASS_CREATE_FAILED Handle = 0x80080001 + CO_E_SCM_ERROR Handle = 0x80080002 + CO_E_SCM_RPC_FAILURE Handle = 0x80080003 + CO_E_BAD_PATH Handle = 0x80080004 + CO_E_SERVER_EXEC_FAILURE Handle = 0x80080005 + CO_E_OBJSRV_RPC_FAILURE Handle = 0x80080006 + MK_E_NO_NORMALIZED Handle = 0x80080007 + CO_E_SERVER_STOPPING Handle = 0x80080008 + MEM_E_INVALID_ROOT Handle = 0x80080009 + MEM_E_INVALID_LINK Handle = 0x80080010 + MEM_E_INVALID_SIZE Handle = 0x80080011 + CO_S_NOTALLINTERFACES Handle = 0x00080012 + CO_S_MACHINENAMENOTFOUND Handle = 0x00080013 + CO_E_MISSING_DISPLAYNAME Handle = 0x80080015 + CO_E_RUNAS_VALUE_MUST_BE_AAA Handle = 0x80080016 + CO_E_ELEVATION_DISABLED Handle = 0x80080017 + APPX_E_PACKAGING_INTERNAL Handle = 0x80080200 + APPX_E_INTERLEAVING_NOT_ALLOWED Handle = 0x80080201 + APPX_E_RELATIONSHIPS_NOT_ALLOWED Handle = 0x80080202 + APPX_E_MISSING_REQUIRED_FILE Handle = 0x80080203 + APPX_E_INVALID_MANIFEST Handle = 0x80080204 + APPX_E_INVALID_BLOCKMAP Handle = 0x80080205 + APPX_E_CORRUPT_CONTENT Handle = 0x80080206 + APPX_E_BLOCK_HASH_INVALID Handle = 0x80080207 + APPX_E_REQUESTED_RANGE_TOO_LARGE Handle = 0x80080208 + APPX_E_INVALID_SIP_CLIENT_DATA Handle = 0x80080209 + APPX_E_INVALID_KEY_INFO Handle = 0x8008020A + APPX_E_INVALID_CONTENTGROUPMAP Handle = 0x8008020B + APPX_E_INVALID_APPINSTALLER Handle = 0x8008020C + APPX_E_DELTA_BASELINE_VERSION_MISMATCH Handle = 0x8008020D + APPX_E_DELTA_PACKAGE_MISSING_FILE Handle = 0x8008020E + APPX_E_INVALID_DELTA_PACKAGE Handle = 0x8008020F + APPX_E_DELTA_APPENDED_PACKAGE_NOT_ALLOWED Handle = 0x80080210 + APPX_E_INVALID_PACKAGING_LAYOUT Handle = 0x80080211 + APPX_E_INVALID_PACKAGESIGNCONFIG Handle = 0x80080212 + APPX_E_RESOURCESPRI_NOT_ALLOWED Handle = 0x80080213 + APPX_E_FILE_COMPRESSION_MISMATCH Handle = 0x80080214 + APPX_E_INVALID_PAYLOAD_PACKAGE_EXTENSION Handle = 0x80080215 + APPX_E_INVALID_ENCRYPTION_EXCLUSION_FILE_LIST Handle = 0x80080216 + BT_E_SPURIOUS_ACTIVATION Handle = 0x80080300 + DISP_E_UNKNOWNINTERFACE Handle = 0x80020001 + DISP_E_MEMBERNOTFOUND Handle = 0x80020003 + DISP_E_PARAMNOTFOUND Handle = 0x80020004 + DISP_E_TYPEMISMATCH Handle = 0x80020005 + DISP_E_UNKNOWNNAME Handle = 0x80020006 + DISP_E_NONAMEDARGS Handle = 0x80020007 + DISP_E_BADVARTYPE Handle = 0x80020008 + DISP_E_EXCEPTION Handle = 0x80020009 + DISP_E_OVERFLOW Handle = 0x8002000A + DISP_E_BADINDEX Handle = 0x8002000B + DISP_E_UNKNOWNLCID Handle = 0x8002000C + DISP_E_ARRAYISLOCKED Handle = 0x8002000D + DISP_E_BADPARAMCOUNT Handle = 0x8002000E + DISP_E_PARAMNOTOPTIONAL Handle = 0x8002000F + DISP_E_BADCALLEE Handle = 0x80020010 + DISP_E_NOTACOLLECTION Handle = 0x80020011 + DISP_E_DIVBYZERO Handle = 0x80020012 + DISP_E_BUFFERTOOSMALL Handle = 0x80020013 + TYPE_E_BUFFERTOOSMALL Handle = 0x80028016 + TYPE_E_FIELDNOTFOUND Handle = 0x80028017 + TYPE_E_INVDATAREAD Handle = 0x80028018 + TYPE_E_UNSUPFORMAT Handle = 0x80028019 + TYPE_E_REGISTRYACCESS Handle = 0x8002801C + TYPE_E_LIBNOTREGISTERED Handle = 0x8002801D + TYPE_E_UNDEFINEDTYPE Handle = 0x80028027 + TYPE_E_QUALIFIEDNAMEDISALLOWED Handle = 0x80028028 + TYPE_E_INVALIDSTATE Handle = 0x80028029 + TYPE_E_WRONGTYPEKIND Handle = 0x8002802A + TYPE_E_ELEMENTNOTFOUND Handle = 0x8002802B + TYPE_E_AMBIGUOUSNAME Handle = 0x8002802C + TYPE_E_NAMECONFLICT Handle = 0x8002802D + TYPE_E_UNKNOWNLCID Handle = 0x8002802E + TYPE_E_DLLFUNCTIONNOTFOUND Handle = 0x8002802F + TYPE_E_BADMODULEKIND Handle = 0x800288BD + TYPE_E_SIZETOOBIG Handle = 0x800288C5 + TYPE_E_DUPLICATEID Handle = 0x800288C6 + TYPE_E_INVALIDID Handle = 0x800288CF + TYPE_E_TYPEMISMATCH Handle = 0x80028CA0 + TYPE_E_OUTOFBOUNDS Handle = 0x80028CA1 + TYPE_E_IOERROR Handle = 0x80028CA2 + TYPE_E_CANTCREATETMPFILE Handle = 0x80028CA3 + TYPE_E_CANTLOADLIBRARY Handle = 0x80029C4A + TYPE_E_INCONSISTENTPROPFUNCS Handle = 0x80029C83 + TYPE_E_CIRCULARTYPE Handle = 0x80029C84 + STG_E_INVALIDFUNCTION Handle = 0x80030001 + STG_E_FILENOTFOUND Handle = 0x80030002 + STG_E_PATHNOTFOUND Handle = 0x80030003 + STG_E_TOOMANYOPENFILES Handle = 0x80030004 + STG_E_ACCESSDENIED Handle = 0x80030005 + STG_E_INVALIDHANDLE Handle = 0x80030006 + STG_E_INSUFFICIENTMEMORY Handle = 0x80030008 + STG_E_INVALIDPOINTER Handle = 0x80030009 + STG_E_NOMOREFILES Handle = 0x80030012 + STG_E_DISKISWRITEPROTECTED Handle = 0x80030013 + STG_E_SEEKERROR Handle = 0x80030019 + STG_E_WRITEFAULT Handle = 0x8003001D + STG_E_READFAULT Handle = 0x8003001E + STG_E_SHAREVIOLATION Handle = 0x80030020 + STG_E_LOCKVIOLATION Handle = 0x80030021 + STG_E_FILEALREADYEXISTS Handle = 0x80030050 + STG_E_INVALIDPARAMETER Handle = 0x80030057 + STG_E_MEDIUMFULL Handle = 0x80030070 + STG_E_PROPSETMISMATCHED Handle = 0x800300F0 + STG_E_ABNORMALAPIEXIT Handle = 0x800300FA + STG_E_INVALIDHEADER Handle = 0x800300FB + STG_E_INVALIDNAME Handle = 0x800300FC + STG_E_UNKNOWN Handle = 0x800300FD + STG_E_UNIMPLEMENTEDFUNCTION Handle = 0x800300FE + STG_E_INVALIDFLAG Handle = 0x800300FF + STG_E_INUSE Handle = 0x80030100 + STG_E_NOTCURRENT Handle = 0x80030101 + STG_E_REVERTED Handle = 0x80030102 + STG_E_CANTSAVE Handle = 0x80030103 + STG_E_OLDFORMAT Handle = 0x80030104 + STG_E_OLDDLL Handle = 0x80030105 + STG_E_SHAREREQUIRED Handle = 0x80030106 + STG_E_NOTFILEBASEDSTORAGE Handle = 0x80030107 + STG_E_EXTANTMARSHALLINGS Handle = 0x80030108 + STG_E_DOCFILECORRUPT Handle = 0x80030109 + STG_E_BADBASEADDRESS Handle = 0x80030110 + STG_E_DOCFILETOOLARGE Handle = 0x80030111 + STG_E_NOTSIMPLEFORMAT Handle = 0x80030112 + STG_E_INCOMPLETE Handle = 0x80030201 + STG_E_TERMINATED Handle = 0x80030202 + STG_S_CONVERTED Handle = 0x00030200 + STG_S_BLOCK Handle = 0x00030201 + STG_S_RETRYNOW Handle = 0x00030202 + STG_S_MONITORING Handle = 0x00030203 + STG_S_MULTIPLEOPENS Handle = 0x00030204 + STG_S_CONSOLIDATIONFAILED Handle = 0x00030205 + STG_S_CANNOTCONSOLIDATE Handle = 0x00030206 + STG_S_POWER_CYCLE_REQUIRED Handle = 0x00030207 + STG_E_FIRMWARE_SLOT_INVALID Handle = 0x80030208 + STG_E_FIRMWARE_IMAGE_INVALID Handle = 0x80030209 + STG_E_DEVICE_UNRESPONSIVE Handle = 0x8003020A + STG_E_STATUS_COPY_PROTECTION_FAILURE Handle = 0x80030305 + STG_E_CSS_AUTHENTICATION_FAILURE Handle = 0x80030306 + STG_E_CSS_KEY_NOT_PRESENT Handle = 0x80030307 + STG_E_CSS_KEY_NOT_ESTABLISHED Handle = 0x80030308 + STG_E_CSS_SCRAMBLED_SECTOR Handle = 0x80030309 + STG_E_CSS_REGION_MISMATCH Handle = 0x8003030A + STG_E_RESETS_EXHAUSTED Handle = 0x8003030B + RPC_E_CALL_REJECTED Handle = 0x80010001 + RPC_E_CALL_CANCELED Handle = 0x80010002 + RPC_E_CANTPOST_INSENDCALL Handle = 0x80010003 + RPC_E_CANTCALLOUT_INASYNCCALL Handle = 0x80010004 + RPC_E_CANTCALLOUT_INEXTERNALCALL Handle = 0x80010005 + RPC_E_CONNECTION_TERMINATED Handle = 0x80010006 + RPC_E_SERVER_DIED Handle = 0x80010007 + RPC_E_CLIENT_DIED Handle = 0x80010008 + RPC_E_INVALID_DATAPACKET Handle = 0x80010009 + RPC_E_CANTTRANSMIT_CALL Handle = 0x8001000A + RPC_E_CLIENT_CANTMARSHAL_DATA Handle = 0x8001000B + RPC_E_CLIENT_CANTUNMARSHAL_DATA Handle = 0x8001000C + RPC_E_SERVER_CANTMARSHAL_DATA Handle = 0x8001000D + RPC_E_SERVER_CANTUNMARSHAL_DATA Handle = 0x8001000E + RPC_E_INVALID_DATA Handle = 0x8001000F + RPC_E_INVALID_PARAMETER Handle = 0x80010010 + RPC_E_CANTCALLOUT_AGAIN Handle = 0x80010011 + RPC_E_SERVER_DIED_DNE Handle = 0x80010012 + RPC_E_SYS_CALL_FAILED Handle = 0x80010100 + RPC_E_OUT_OF_RESOURCES Handle = 0x80010101 + RPC_E_ATTEMPTED_MULTITHREAD Handle = 0x80010102 + RPC_E_NOT_REGISTERED Handle = 0x80010103 + RPC_E_FAULT Handle = 0x80010104 + RPC_E_SERVERFAULT Handle = 0x80010105 + RPC_E_CHANGED_MODE Handle = 0x80010106 + RPC_E_INVALIDMETHOD Handle = 0x80010107 + RPC_E_DISCONNECTED Handle = 0x80010108 + RPC_E_RETRY Handle = 0x80010109 + RPC_E_SERVERCALL_RETRYLATER Handle = 0x8001010A + RPC_E_SERVERCALL_REJECTED Handle = 0x8001010B + RPC_E_INVALID_CALLDATA Handle = 0x8001010C + RPC_E_CANTCALLOUT_ININPUTSYNCCALL Handle = 0x8001010D + RPC_E_WRONG_THREAD Handle = 0x8001010E + RPC_E_THREAD_NOT_INIT Handle = 0x8001010F + RPC_E_VERSION_MISMATCH Handle = 0x80010110 + RPC_E_INVALID_HEADER Handle = 0x80010111 + RPC_E_INVALID_EXTENSION Handle = 0x80010112 + RPC_E_INVALID_IPID Handle = 0x80010113 + RPC_E_INVALID_OBJECT Handle = 0x80010114 + RPC_S_CALLPENDING Handle = 0x80010115 + RPC_S_WAITONTIMER Handle = 0x80010116 + RPC_E_CALL_COMPLETE Handle = 0x80010117 + RPC_E_UNSECURE_CALL Handle = 0x80010118 + RPC_E_TOO_LATE Handle = 0x80010119 + RPC_E_NO_GOOD_SECURITY_PACKAGES Handle = 0x8001011A + RPC_E_ACCESS_DENIED Handle = 0x8001011B + RPC_E_REMOTE_DISABLED Handle = 0x8001011C + RPC_E_INVALID_OBJREF Handle = 0x8001011D + RPC_E_NO_CONTEXT Handle = 0x8001011E + RPC_E_TIMEOUT Handle = 0x8001011F + RPC_E_NO_SYNC Handle = 0x80010120 + RPC_E_FULLSIC_REQUIRED Handle = 0x80010121 + RPC_E_INVALID_STD_NAME Handle = 0x80010122 + CO_E_FAILEDTOIMPERSONATE Handle = 0x80010123 + CO_E_FAILEDTOGETSECCTX Handle = 0x80010124 + CO_E_FAILEDTOOPENTHREADTOKEN Handle = 0x80010125 + CO_E_FAILEDTOGETTOKENINFO Handle = 0x80010126 + CO_E_TRUSTEEDOESNTMATCHCLIENT Handle = 0x80010127 + CO_E_FAILEDTOQUERYCLIENTBLANKET Handle = 0x80010128 + CO_E_FAILEDTOSETDACL Handle = 0x80010129 + CO_E_ACCESSCHECKFAILED Handle = 0x8001012A + CO_E_NETACCESSAPIFAILED Handle = 0x8001012B + CO_E_WRONGTRUSTEENAMESYNTAX Handle = 0x8001012C + CO_E_INVALIDSID Handle = 0x8001012D + CO_E_CONVERSIONFAILED Handle = 0x8001012E + CO_E_NOMATCHINGSIDFOUND Handle = 0x8001012F + CO_E_LOOKUPACCSIDFAILED Handle = 0x80010130 + CO_E_NOMATCHINGNAMEFOUND Handle = 0x80010131 + CO_E_LOOKUPACCNAMEFAILED Handle = 0x80010132 + CO_E_SETSERLHNDLFAILED Handle = 0x80010133 + CO_E_FAILEDTOGETWINDIR Handle = 0x80010134 + CO_E_PATHTOOLONG Handle = 0x80010135 + CO_E_FAILEDTOGENUUID Handle = 0x80010136 + CO_E_FAILEDTOCREATEFILE Handle = 0x80010137 + CO_E_FAILEDTOCLOSEHANDLE Handle = 0x80010138 + CO_E_EXCEEDSYSACLLIMIT Handle = 0x80010139 + CO_E_ACESINWRONGORDER Handle = 0x8001013A + CO_E_INCOMPATIBLESTREAMVERSION Handle = 0x8001013B + CO_E_FAILEDTOOPENPROCESSTOKEN Handle = 0x8001013C + CO_E_DECODEFAILED Handle = 0x8001013D + CO_E_ACNOTINITIALIZED Handle = 0x8001013F + CO_E_CANCEL_DISABLED Handle = 0x80010140 + RPC_E_UNEXPECTED Handle = 0x8001FFFF + ERROR_AUDITING_DISABLED Handle = 0xC0090001 + ERROR_ALL_SIDS_FILTERED Handle = 0xC0090002 + ERROR_BIZRULES_NOT_ENABLED Handle = 0xC0090003 + NTE_BAD_UID Handle = 0x80090001 + NTE_BAD_HASH Handle = 0x80090002 + NTE_BAD_KEY Handle = 0x80090003 + NTE_BAD_LEN Handle = 0x80090004 + NTE_BAD_DATA Handle = 0x80090005 + NTE_BAD_SIGNATURE Handle = 0x80090006 + NTE_BAD_VER Handle = 0x80090007 + NTE_BAD_ALGID Handle = 0x80090008 + NTE_BAD_FLAGS Handle = 0x80090009 + NTE_BAD_TYPE Handle = 0x8009000A + NTE_BAD_KEY_STATE Handle = 0x8009000B + NTE_BAD_HASH_STATE Handle = 0x8009000C + NTE_NO_KEY Handle = 0x8009000D + NTE_NO_MEMORY Handle = 0x8009000E + NTE_EXISTS Handle = 0x8009000F + NTE_PERM Handle = 0x80090010 + NTE_NOT_FOUND Handle = 0x80090011 + NTE_DOUBLE_ENCRYPT Handle = 0x80090012 + NTE_BAD_PROVIDER Handle = 0x80090013 + NTE_BAD_PROV_TYPE Handle = 0x80090014 + NTE_BAD_PUBLIC_KEY Handle = 0x80090015 + NTE_BAD_KEYSET Handle = 0x80090016 + NTE_PROV_TYPE_NOT_DEF Handle = 0x80090017 + NTE_PROV_TYPE_ENTRY_BAD Handle = 0x80090018 + NTE_KEYSET_NOT_DEF Handle = 0x80090019 + NTE_KEYSET_ENTRY_BAD Handle = 0x8009001A + NTE_PROV_TYPE_NO_MATCH Handle = 0x8009001B + NTE_SIGNATURE_FILE_BAD Handle = 0x8009001C + NTE_PROVIDER_DLL_FAIL Handle = 0x8009001D + NTE_PROV_DLL_NOT_FOUND Handle = 0x8009001E + NTE_BAD_KEYSET_PARAM Handle = 0x8009001F + NTE_FAIL Handle = 0x80090020 + NTE_SYS_ERR Handle = 0x80090021 + NTE_SILENT_CONTEXT Handle = 0x80090022 + NTE_TOKEN_KEYSET_STORAGE_FULL Handle = 0x80090023 + NTE_TEMPORARY_PROFILE Handle = 0x80090024 + NTE_FIXEDPARAMETER Handle = 0x80090025 + NTE_INVALID_HANDLE Handle = 0x80090026 + NTE_INVALID_PARAMETER Handle = 0x80090027 + NTE_BUFFER_TOO_SMALL Handle = 0x80090028 + NTE_NOT_SUPPORTED Handle = 0x80090029 + NTE_NO_MORE_ITEMS Handle = 0x8009002A + NTE_BUFFERS_OVERLAP Handle = 0x8009002B + NTE_DECRYPTION_FAILURE Handle = 0x8009002C + NTE_INTERNAL_ERROR Handle = 0x8009002D + NTE_UI_REQUIRED Handle = 0x8009002E + NTE_HMAC_NOT_SUPPORTED Handle = 0x8009002F + NTE_DEVICE_NOT_READY Handle = 0x80090030 + NTE_AUTHENTICATION_IGNORED Handle = 0x80090031 + NTE_VALIDATION_FAILED Handle = 0x80090032 + NTE_INCORRECT_PASSWORD Handle = 0x80090033 + NTE_ENCRYPTION_FAILURE Handle = 0x80090034 + NTE_DEVICE_NOT_FOUND Handle = 0x80090035 + NTE_USER_CANCELLED Handle = 0x80090036 + NTE_PASSWORD_CHANGE_REQUIRED Handle = 0x80090037 + NTE_NOT_ACTIVE_CONSOLE Handle = 0x80090038 + SEC_E_INSUFFICIENT_MEMORY Handle = 0x80090300 + SEC_E_INVALID_HANDLE Handle = 0x80090301 + SEC_E_UNSUPPORTED_FUNCTION Handle = 0x80090302 + SEC_E_TARGET_UNKNOWN Handle = 0x80090303 + SEC_E_INTERNAL_ERROR Handle = 0x80090304 + SEC_E_SECPKG_NOT_FOUND Handle = 0x80090305 + SEC_E_NOT_OWNER Handle = 0x80090306 + SEC_E_CANNOT_INSTALL Handle = 0x80090307 + SEC_E_INVALID_TOKEN Handle = 0x80090308 + SEC_E_CANNOT_PACK Handle = 0x80090309 + SEC_E_QOP_NOT_SUPPORTED Handle = 0x8009030A + SEC_E_NO_IMPERSONATION Handle = 0x8009030B + SEC_E_LOGON_DENIED Handle = 0x8009030C + SEC_E_UNKNOWN_CREDENTIALS Handle = 0x8009030D + SEC_E_NO_CREDENTIALS Handle = 0x8009030E + SEC_E_MESSAGE_ALTERED Handle = 0x8009030F + SEC_E_OUT_OF_SEQUENCE Handle = 0x80090310 + SEC_E_NO_AUTHENTICATING_AUTHORITY Handle = 0x80090311 + SEC_I_CONTINUE_NEEDED Handle = 0x00090312 + SEC_I_COMPLETE_NEEDED Handle = 0x00090313 + SEC_I_COMPLETE_AND_CONTINUE Handle = 0x00090314 + SEC_I_LOCAL_LOGON Handle = 0x00090315 + SEC_I_GENERIC_EXTENSION_RECEIVED Handle = 0x00090316 + SEC_E_BAD_PKGID Handle = 0x80090316 + SEC_E_CONTEXT_EXPIRED Handle = 0x80090317 + SEC_I_CONTEXT_EXPIRED Handle = 0x00090317 + SEC_E_INCOMPLETE_MESSAGE Handle = 0x80090318 + SEC_E_INCOMPLETE_CREDENTIALS Handle = 0x80090320 + SEC_E_BUFFER_TOO_SMALL Handle = 0x80090321 + SEC_I_INCOMPLETE_CREDENTIALS Handle = 0x00090320 + SEC_I_RENEGOTIATE Handle = 0x00090321 + SEC_E_WRONG_PRINCIPAL Handle = 0x80090322 + SEC_I_NO_LSA_CONTEXT Handle = 0x00090323 + SEC_E_TIME_SKEW Handle = 0x80090324 + SEC_E_UNTRUSTED_ROOT Handle = 0x80090325 + SEC_E_ILLEGAL_MESSAGE Handle = 0x80090326 + SEC_E_CERT_UNKNOWN Handle = 0x80090327 + SEC_E_CERT_EXPIRED Handle = 0x80090328 + SEC_E_ENCRYPT_FAILURE Handle = 0x80090329 + SEC_E_DECRYPT_FAILURE Handle = 0x80090330 + SEC_E_ALGORITHM_MISMATCH Handle = 0x80090331 + SEC_E_SECURITY_QOS_FAILED Handle = 0x80090332 + SEC_E_UNFINISHED_CONTEXT_DELETED Handle = 0x80090333 + SEC_E_NO_TGT_REPLY Handle = 0x80090334 + SEC_E_NO_IP_ADDRESSES Handle = 0x80090335 + SEC_E_WRONG_CREDENTIAL_HANDLE Handle = 0x80090336 + SEC_E_CRYPTO_SYSTEM_INVALID Handle = 0x80090337 + SEC_E_MAX_REFERRALS_EXCEEDED Handle = 0x80090338 + SEC_E_MUST_BE_KDC Handle = 0x80090339 + SEC_E_STRONG_CRYPTO_NOT_SUPPORTED Handle = 0x8009033A + SEC_E_TOO_MANY_PRINCIPALS Handle = 0x8009033B + SEC_E_NO_PA_DATA Handle = 0x8009033C + SEC_E_PKINIT_NAME_MISMATCH Handle = 0x8009033D + SEC_E_SMARTCARD_LOGON_REQUIRED Handle = 0x8009033E + SEC_E_SHUTDOWN_IN_PROGRESS Handle = 0x8009033F + SEC_E_KDC_INVALID_REQUEST Handle = 0x80090340 + SEC_E_KDC_UNABLE_TO_REFER Handle = 0x80090341 + SEC_E_KDC_UNKNOWN_ETYPE Handle = 0x80090342 + SEC_E_UNSUPPORTED_PREAUTH Handle = 0x80090343 + SEC_E_DELEGATION_REQUIRED Handle = 0x80090345 + SEC_E_BAD_BINDINGS Handle = 0x80090346 + SEC_E_MULTIPLE_ACCOUNTS Handle = 0x80090347 + SEC_E_NO_KERB_KEY Handle = 0x80090348 + SEC_E_CERT_WRONG_USAGE Handle = 0x80090349 + SEC_E_DOWNGRADE_DETECTED Handle = 0x80090350 + SEC_E_SMARTCARD_CERT_REVOKED Handle = 0x80090351 + SEC_E_ISSUING_CA_UNTRUSTED Handle = 0x80090352 + SEC_E_REVOCATION_OFFLINE_C Handle = 0x80090353 + SEC_E_PKINIT_CLIENT_FAILURE Handle = 0x80090354 + SEC_E_SMARTCARD_CERT_EXPIRED Handle = 0x80090355 + SEC_E_NO_S4U_PROT_SUPPORT Handle = 0x80090356 + SEC_E_CROSSREALM_DELEGATION_FAILURE Handle = 0x80090357 + SEC_E_REVOCATION_OFFLINE_KDC Handle = 0x80090358 + SEC_E_ISSUING_CA_UNTRUSTED_KDC Handle = 0x80090359 + SEC_E_KDC_CERT_EXPIRED Handle = 0x8009035A + SEC_E_KDC_CERT_REVOKED Handle = 0x8009035B + SEC_I_SIGNATURE_NEEDED Handle = 0x0009035C + SEC_E_INVALID_PARAMETER Handle = 0x8009035D + SEC_E_DELEGATION_POLICY Handle = 0x8009035E + SEC_E_POLICY_NLTM_ONLY Handle = 0x8009035F + SEC_I_NO_RENEGOTIATION Handle = 0x00090360 + SEC_E_NO_CONTEXT Handle = 0x80090361 + SEC_E_PKU2U_CERT_FAILURE Handle = 0x80090362 + SEC_E_MUTUAL_AUTH_FAILED Handle = 0x80090363 + SEC_I_MESSAGE_FRAGMENT Handle = 0x00090364 + SEC_E_ONLY_HTTPS_ALLOWED Handle = 0x80090365 + SEC_I_CONTINUE_NEEDED_MESSAGE_OK Handle = 0x00090366 + SEC_E_APPLICATION_PROTOCOL_MISMATCH Handle = 0x80090367 + SEC_I_ASYNC_CALL_PENDING Handle = 0x00090368 + SEC_E_INVALID_UPN_NAME Handle = 0x80090369 + SEC_E_EXT_BUFFER_TOO_SMALL Handle = 0x8009036A + SEC_E_INSUFFICIENT_BUFFERS Handle = 0x8009036B + SEC_E_NO_SPM = SEC_E_INTERNAL_ERROR + SEC_E_NOT_SUPPORTED = SEC_E_UNSUPPORTED_FUNCTION + CRYPT_E_MSG_ERROR Handle = 0x80091001 + CRYPT_E_UNKNOWN_ALGO Handle = 0x80091002 + CRYPT_E_OID_FORMAT Handle = 0x80091003 + CRYPT_E_INVALID_MSG_TYPE Handle = 0x80091004 + CRYPT_E_UNEXPECTED_ENCODING Handle = 0x80091005 + CRYPT_E_AUTH_ATTR_MISSING Handle = 0x80091006 + CRYPT_E_HASH_VALUE Handle = 0x80091007 + CRYPT_E_INVALID_INDEX Handle = 0x80091008 + CRYPT_E_ALREADY_DECRYPTED Handle = 0x80091009 + CRYPT_E_NOT_DECRYPTED Handle = 0x8009100A + CRYPT_E_RECIPIENT_NOT_FOUND Handle = 0x8009100B + CRYPT_E_CONTROL_TYPE Handle = 0x8009100C + CRYPT_E_ISSUER_SERIALNUMBER Handle = 0x8009100D + CRYPT_E_SIGNER_NOT_FOUND Handle = 0x8009100E + CRYPT_E_ATTRIBUTES_MISSING Handle = 0x8009100F + CRYPT_E_STREAM_MSG_NOT_READY Handle = 0x80091010 + CRYPT_E_STREAM_INSUFFICIENT_DATA Handle = 0x80091011 + CRYPT_I_NEW_PROTECTION_REQUIRED Handle = 0x00091012 + CRYPT_E_BAD_LEN Handle = 0x80092001 + CRYPT_E_BAD_ENCODE Handle = 0x80092002 + CRYPT_E_FILE_ERROR Handle = 0x80092003 + CRYPT_E_NOT_FOUND Handle = 0x80092004 + CRYPT_E_EXISTS Handle = 0x80092005 + CRYPT_E_NO_PROVIDER Handle = 0x80092006 + CRYPT_E_SELF_SIGNED Handle = 0x80092007 + CRYPT_E_DELETED_PREV Handle = 0x80092008 + CRYPT_E_NO_MATCH Handle = 0x80092009 + CRYPT_E_UNEXPECTED_MSG_TYPE Handle = 0x8009200A + CRYPT_E_NO_KEY_PROPERTY Handle = 0x8009200B + CRYPT_E_NO_DECRYPT_CERT Handle = 0x8009200C + CRYPT_E_BAD_MSG Handle = 0x8009200D + CRYPT_E_NO_SIGNER Handle = 0x8009200E + CRYPT_E_PENDING_CLOSE Handle = 0x8009200F + CRYPT_E_REVOKED Handle = 0x80092010 + CRYPT_E_NO_REVOCATION_DLL Handle = 0x80092011 + CRYPT_E_NO_REVOCATION_CHECK Handle = 0x80092012 + CRYPT_E_REVOCATION_OFFLINE Handle = 0x80092013 + CRYPT_E_NOT_IN_REVOCATION_DATABASE Handle = 0x80092014 + CRYPT_E_INVALID_NUMERIC_STRING Handle = 0x80092020 + CRYPT_E_INVALID_PRINTABLE_STRING Handle = 0x80092021 + CRYPT_E_INVALID_IA5_STRING Handle = 0x80092022 + CRYPT_E_INVALID_X500_STRING Handle = 0x80092023 + CRYPT_E_NOT_CHAR_STRING Handle = 0x80092024 + CRYPT_E_FILERESIZED Handle = 0x80092025 + CRYPT_E_SECURITY_SETTINGS Handle = 0x80092026 + CRYPT_E_NO_VERIFY_USAGE_DLL Handle = 0x80092027 + CRYPT_E_NO_VERIFY_USAGE_CHECK Handle = 0x80092028 + CRYPT_E_VERIFY_USAGE_OFFLINE Handle = 0x80092029 + CRYPT_E_NOT_IN_CTL Handle = 0x8009202A + CRYPT_E_NO_TRUSTED_SIGNER Handle = 0x8009202B + CRYPT_E_MISSING_PUBKEY_PARA Handle = 0x8009202C + CRYPT_E_OBJECT_LOCATOR_OBJECT_NOT_FOUND Handle = 0x8009202D + CRYPT_E_OSS_ERROR Handle = 0x80093000 + OSS_MORE_BUF Handle = 0x80093001 + OSS_NEGATIVE_UINTEGER Handle = 0x80093002 + OSS_PDU_RANGE Handle = 0x80093003 + OSS_MORE_INPUT Handle = 0x80093004 + OSS_DATA_ERROR Handle = 0x80093005 + OSS_BAD_ARG Handle = 0x80093006 + OSS_BAD_VERSION Handle = 0x80093007 + OSS_OUT_MEMORY Handle = 0x80093008 + OSS_PDU_MISMATCH Handle = 0x80093009 + OSS_LIMITED Handle = 0x8009300A + OSS_BAD_PTR Handle = 0x8009300B + OSS_BAD_TIME Handle = 0x8009300C + OSS_INDEFINITE_NOT_SUPPORTED Handle = 0x8009300D + OSS_MEM_ERROR Handle = 0x8009300E + OSS_BAD_TABLE Handle = 0x8009300F + OSS_TOO_LONG Handle = 0x80093010 + OSS_CONSTRAINT_VIOLATED Handle = 0x80093011 + OSS_FATAL_ERROR Handle = 0x80093012 + OSS_ACCESS_SERIALIZATION_ERROR Handle = 0x80093013 + OSS_NULL_TBL Handle = 0x80093014 + OSS_NULL_FCN Handle = 0x80093015 + OSS_BAD_ENCRULES Handle = 0x80093016 + OSS_UNAVAIL_ENCRULES Handle = 0x80093017 + OSS_CANT_OPEN_TRACE_WINDOW Handle = 0x80093018 + OSS_UNIMPLEMENTED Handle = 0x80093019 + OSS_OID_DLL_NOT_LINKED Handle = 0x8009301A + OSS_CANT_OPEN_TRACE_FILE Handle = 0x8009301B + OSS_TRACE_FILE_ALREADY_OPEN Handle = 0x8009301C + OSS_TABLE_MISMATCH Handle = 0x8009301D + OSS_TYPE_NOT_SUPPORTED Handle = 0x8009301E + OSS_REAL_DLL_NOT_LINKED Handle = 0x8009301F + OSS_REAL_CODE_NOT_LINKED Handle = 0x80093020 + OSS_OUT_OF_RANGE Handle = 0x80093021 + OSS_COPIER_DLL_NOT_LINKED Handle = 0x80093022 + OSS_CONSTRAINT_DLL_NOT_LINKED Handle = 0x80093023 + OSS_COMPARATOR_DLL_NOT_LINKED Handle = 0x80093024 + OSS_COMPARATOR_CODE_NOT_LINKED Handle = 0x80093025 + OSS_MEM_MGR_DLL_NOT_LINKED Handle = 0x80093026 + OSS_PDV_DLL_NOT_LINKED Handle = 0x80093027 + OSS_PDV_CODE_NOT_LINKED Handle = 0x80093028 + OSS_API_DLL_NOT_LINKED Handle = 0x80093029 + OSS_BERDER_DLL_NOT_LINKED Handle = 0x8009302A + OSS_PER_DLL_NOT_LINKED Handle = 0x8009302B + OSS_OPEN_TYPE_ERROR Handle = 0x8009302C + OSS_MUTEX_NOT_CREATED Handle = 0x8009302D + OSS_CANT_CLOSE_TRACE_FILE Handle = 0x8009302E + CRYPT_E_ASN1_ERROR Handle = 0x80093100 + CRYPT_E_ASN1_INTERNAL Handle = 0x80093101 + CRYPT_E_ASN1_EOD Handle = 0x80093102 + CRYPT_E_ASN1_CORRUPT Handle = 0x80093103 + CRYPT_E_ASN1_LARGE Handle = 0x80093104 + CRYPT_E_ASN1_CONSTRAINT Handle = 0x80093105 + CRYPT_E_ASN1_MEMORY Handle = 0x80093106 + CRYPT_E_ASN1_OVERFLOW Handle = 0x80093107 + CRYPT_E_ASN1_BADPDU Handle = 0x80093108 + CRYPT_E_ASN1_BADARGS Handle = 0x80093109 + CRYPT_E_ASN1_BADREAL Handle = 0x8009310A + CRYPT_E_ASN1_BADTAG Handle = 0x8009310B + CRYPT_E_ASN1_CHOICE Handle = 0x8009310C + CRYPT_E_ASN1_RULE Handle = 0x8009310D + CRYPT_E_ASN1_UTF8 Handle = 0x8009310E + CRYPT_E_ASN1_PDU_TYPE Handle = 0x80093133 + CRYPT_E_ASN1_NYI Handle = 0x80093134 + CRYPT_E_ASN1_EXTENDED Handle = 0x80093201 + CRYPT_E_ASN1_NOEOD Handle = 0x80093202 + CERTSRV_E_BAD_REQUESTSUBJECT Handle = 0x80094001 + CERTSRV_E_NO_REQUEST Handle = 0x80094002 + CERTSRV_E_BAD_REQUESTSTATUS Handle = 0x80094003 + CERTSRV_E_PROPERTY_EMPTY Handle = 0x80094004 + CERTSRV_E_INVALID_CA_CERTIFICATE Handle = 0x80094005 + CERTSRV_E_SERVER_SUSPENDED Handle = 0x80094006 + CERTSRV_E_ENCODING_LENGTH Handle = 0x80094007 + CERTSRV_E_ROLECONFLICT Handle = 0x80094008 + CERTSRV_E_RESTRICTEDOFFICER Handle = 0x80094009 + CERTSRV_E_KEY_ARCHIVAL_NOT_CONFIGURED Handle = 0x8009400A + CERTSRV_E_NO_VALID_KRA Handle = 0x8009400B + CERTSRV_E_BAD_REQUEST_KEY_ARCHIVAL Handle = 0x8009400C + CERTSRV_E_NO_CAADMIN_DEFINED Handle = 0x8009400D + CERTSRV_E_BAD_RENEWAL_CERT_ATTRIBUTE Handle = 0x8009400E + CERTSRV_E_NO_DB_SESSIONS Handle = 0x8009400F + CERTSRV_E_ALIGNMENT_FAULT Handle = 0x80094010 + CERTSRV_E_ENROLL_DENIED Handle = 0x80094011 + CERTSRV_E_TEMPLATE_DENIED Handle = 0x80094012 + CERTSRV_E_DOWNLEVEL_DC_SSL_OR_UPGRADE Handle = 0x80094013 + CERTSRV_E_ADMIN_DENIED_REQUEST Handle = 0x80094014 + CERTSRV_E_NO_POLICY_SERVER Handle = 0x80094015 + CERTSRV_E_WEAK_SIGNATURE_OR_KEY Handle = 0x80094016 + CERTSRV_E_KEY_ATTESTATION_NOT_SUPPORTED Handle = 0x80094017 + CERTSRV_E_ENCRYPTION_CERT_REQUIRED Handle = 0x80094018 + CERTSRV_E_UNSUPPORTED_CERT_TYPE Handle = 0x80094800 + CERTSRV_E_NO_CERT_TYPE Handle = 0x80094801 + CERTSRV_E_TEMPLATE_CONFLICT Handle = 0x80094802 + CERTSRV_E_SUBJECT_ALT_NAME_REQUIRED Handle = 0x80094803 + CERTSRV_E_ARCHIVED_KEY_REQUIRED Handle = 0x80094804 + CERTSRV_E_SMIME_REQUIRED Handle = 0x80094805 + CERTSRV_E_BAD_RENEWAL_SUBJECT Handle = 0x80094806 + CERTSRV_E_BAD_TEMPLATE_VERSION Handle = 0x80094807 + CERTSRV_E_TEMPLATE_POLICY_REQUIRED Handle = 0x80094808 + CERTSRV_E_SIGNATURE_POLICY_REQUIRED Handle = 0x80094809 + CERTSRV_E_SIGNATURE_COUNT Handle = 0x8009480A + CERTSRV_E_SIGNATURE_REJECTED Handle = 0x8009480B + CERTSRV_E_ISSUANCE_POLICY_REQUIRED Handle = 0x8009480C + CERTSRV_E_SUBJECT_UPN_REQUIRED Handle = 0x8009480D + CERTSRV_E_SUBJECT_DIRECTORY_GUID_REQUIRED Handle = 0x8009480E + CERTSRV_E_SUBJECT_DNS_REQUIRED Handle = 0x8009480F + CERTSRV_E_ARCHIVED_KEY_UNEXPECTED Handle = 0x80094810 + CERTSRV_E_KEY_LENGTH Handle = 0x80094811 + CERTSRV_E_SUBJECT_EMAIL_REQUIRED Handle = 0x80094812 + CERTSRV_E_UNKNOWN_CERT_TYPE Handle = 0x80094813 + CERTSRV_E_CERT_TYPE_OVERLAP Handle = 0x80094814 + CERTSRV_E_TOO_MANY_SIGNATURES Handle = 0x80094815 + CERTSRV_E_RENEWAL_BAD_PUBLIC_KEY Handle = 0x80094816 + CERTSRV_E_INVALID_EK Handle = 0x80094817 + CERTSRV_E_INVALID_IDBINDING Handle = 0x80094818 + CERTSRV_E_INVALID_ATTESTATION Handle = 0x80094819 + CERTSRV_E_KEY_ATTESTATION Handle = 0x8009481A + CERTSRV_E_CORRUPT_KEY_ATTESTATION Handle = 0x8009481B + CERTSRV_E_EXPIRED_CHALLENGE Handle = 0x8009481C + CERTSRV_E_INVALID_RESPONSE Handle = 0x8009481D + CERTSRV_E_INVALID_REQUESTID Handle = 0x8009481E + CERTSRV_E_REQUEST_PRECERTIFICATE_MISMATCH Handle = 0x8009481F + CERTSRV_E_PENDING_CLIENT_RESPONSE Handle = 0x80094820 + XENROLL_E_KEY_NOT_EXPORTABLE Handle = 0x80095000 + XENROLL_E_CANNOT_ADD_ROOT_CERT Handle = 0x80095001 + XENROLL_E_RESPONSE_KA_HASH_NOT_FOUND Handle = 0x80095002 + XENROLL_E_RESPONSE_UNEXPECTED_KA_HASH Handle = 0x80095003 + XENROLL_E_RESPONSE_KA_HASH_MISMATCH Handle = 0x80095004 + XENROLL_E_KEYSPEC_SMIME_MISMATCH Handle = 0x80095005 + TRUST_E_SYSTEM_ERROR Handle = 0x80096001 + TRUST_E_NO_SIGNER_CERT Handle = 0x80096002 + TRUST_E_COUNTER_SIGNER Handle = 0x80096003 + TRUST_E_CERT_SIGNATURE Handle = 0x80096004 + TRUST_E_TIME_STAMP Handle = 0x80096005 + TRUST_E_BAD_DIGEST Handle = 0x80096010 + TRUST_E_MALFORMED_SIGNATURE Handle = 0x80096011 + TRUST_E_BASIC_CONSTRAINTS Handle = 0x80096019 + TRUST_E_FINANCIAL_CRITERIA Handle = 0x8009601E + MSSIPOTF_E_OUTOFMEMRANGE Handle = 0x80097001 + MSSIPOTF_E_CANTGETOBJECT Handle = 0x80097002 + MSSIPOTF_E_NOHEADTABLE Handle = 0x80097003 + MSSIPOTF_E_BAD_MAGICNUMBER Handle = 0x80097004 + MSSIPOTF_E_BAD_OFFSET_TABLE Handle = 0x80097005 + MSSIPOTF_E_TABLE_TAGORDER Handle = 0x80097006 + MSSIPOTF_E_TABLE_LONGWORD Handle = 0x80097007 + MSSIPOTF_E_BAD_FIRST_TABLE_PLACEMENT Handle = 0x80097008 + MSSIPOTF_E_TABLES_OVERLAP Handle = 0x80097009 + MSSIPOTF_E_TABLE_PADBYTES Handle = 0x8009700A + MSSIPOTF_E_FILETOOSMALL Handle = 0x8009700B + MSSIPOTF_E_TABLE_CHECKSUM Handle = 0x8009700C + MSSIPOTF_E_FILE_CHECKSUM Handle = 0x8009700D + MSSIPOTF_E_FAILED_POLICY Handle = 0x80097010 + MSSIPOTF_E_FAILED_HINTS_CHECK Handle = 0x80097011 + MSSIPOTF_E_NOT_OPENTYPE Handle = 0x80097012 + MSSIPOTF_E_FILE Handle = 0x80097013 + MSSIPOTF_E_CRYPT Handle = 0x80097014 + MSSIPOTF_E_BADVERSION Handle = 0x80097015 + MSSIPOTF_E_DSIG_STRUCTURE Handle = 0x80097016 + MSSIPOTF_E_PCONST_CHECK Handle = 0x80097017 + MSSIPOTF_E_STRUCTURE Handle = 0x80097018 + ERROR_CRED_REQUIRES_CONFIRMATION Handle = 0x80097019 + NTE_OP_OK syscall.Errno = 0 + TRUST_E_PROVIDER_UNKNOWN Handle = 0x800B0001 + TRUST_E_ACTION_UNKNOWN Handle = 0x800B0002 + TRUST_E_SUBJECT_FORM_UNKNOWN Handle = 0x800B0003 + TRUST_E_SUBJECT_NOT_TRUSTED Handle = 0x800B0004 + DIGSIG_E_ENCODE Handle = 0x800B0005 + DIGSIG_E_DECODE Handle = 0x800B0006 + DIGSIG_E_EXTENSIBILITY Handle = 0x800B0007 + DIGSIG_E_CRYPTO Handle = 0x800B0008 + PERSIST_E_SIZEDEFINITE Handle = 0x800B0009 + PERSIST_E_SIZEINDEFINITE Handle = 0x800B000A + PERSIST_E_NOTSELFSIZING Handle = 0x800B000B + TRUST_E_NOSIGNATURE Handle = 0x800B0100 + CERT_E_EXPIRED Handle = 0x800B0101 + CERT_E_VALIDITYPERIODNESTING Handle = 0x800B0102 + CERT_E_ROLE Handle = 0x800B0103 + CERT_E_PATHLENCONST Handle = 0x800B0104 + CERT_E_CRITICAL Handle = 0x800B0105 + CERT_E_PURPOSE Handle = 0x800B0106 + CERT_E_ISSUERCHAINING Handle = 0x800B0107 + CERT_E_MALFORMED Handle = 0x800B0108 + CERT_E_UNTRUSTEDROOT Handle = 0x800B0109 + CERT_E_CHAINING Handle = 0x800B010A + TRUST_E_FAIL Handle = 0x800B010B + CERT_E_REVOKED Handle = 0x800B010C + CERT_E_UNTRUSTEDTESTROOT Handle = 0x800B010D + CERT_E_REVOCATION_FAILURE Handle = 0x800B010E + CERT_E_CN_NO_MATCH Handle = 0x800B010F + CERT_E_WRONG_USAGE Handle = 0x800B0110 + TRUST_E_EXPLICIT_DISTRUST Handle = 0x800B0111 + CERT_E_UNTRUSTEDCA Handle = 0x800B0112 + CERT_E_INVALID_POLICY Handle = 0x800B0113 + CERT_E_INVALID_NAME Handle = 0x800B0114 + SPAPI_E_EXPECTED_SECTION_NAME Handle = 0x800F0000 + SPAPI_E_BAD_SECTION_NAME_LINE Handle = 0x800F0001 + SPAPI_E_SECTION_NAME_TOO_LONG Handle = 0x800F0002 + SPAPI_E_GENERAL_SYNTAX Handle = 0x800F0003 + SPAPI_E_WRONG_INF_STYLE Handle = 0x800F0100 + SPAPI_E_SECTION_NOT_FOUND Handle = 0x800F0101 + SPAPI_E_LINE_NOT_FOUND Handle = 0x800F0102 + SPAPI_E_NO_BACKUP Handle = 0x800F0103 + SPAPI_E_NO_ASSOCIATED_CLASS Handle = 0x800F0200 + SPAPI_E_CLASS_MISMATCH Handle = 0x800F0201 + SPAPI_E_DUPLICATE_FOUND Handle = 0x800F0202 + SPAPI_E_NO_DRIVER_SELECTED Handle = 0x800F0203 + SPAPI_E_KEY_DOES_NOT_EXIST Handle = 0x800F0204 + SPAPI_E_INVALID_DEVINST_NAME Handle = 0x800F0205 + SPAPI_E_INVALID_CLASS Handle = 0x800F0206 + SPAPI_E_DEVINST_ALREADY_EXISTS Handle = 0x800F0207 + SPAPI_E_DEVINFO_NOT_REGISTERED Handle = 0x800F0208 + SPAPI_E_INVALID_REG_PROPERTY Handle = 0x800F0209 + SPAPI_E_NO_INF Handle = 0x800F020A + SPAPI_E_NO_SUCH_DEVINST Handle = 0x800F020B + SPAPI_E_CANT_LOAD_CLASS_ICON Handle = 0x800F020C + SPAPI_E_INVALID_CLASS_INSTALLER Handle = 0x800F020D + SPAPI_E_DI_DO_DEFAULT Handle = 0x800F020E + SPAPI_E_DI_NOFILECOPY Handle = 0x800F020F + SPAPI_E_INVALID_HWPROFILE Handle = 0x800F0210 + SPAPI_E_NO_DEVICE_SELECTED Handle = 0x800F0211 + SPAPI_E_DEVINFO_LIST_LOCKED Handle = 0x800F0212 + SPAPI_E_DEVINFO_DATA_LOCKED Handle = 0x800F0213 + SPAPI_E_DI_BAD_PATH Handle = 0x800F0214 + SPAPI_E_NO_CLASSINSTALL_PARAMS Handle = 0x800F0215 + SPAPI_E_FILEQUEUE_LOCKED Handle = 0x800F0216 + SPAPI_E_BAD_SERVICE_INSTALLSECT Handle = 0x800F0217 + SPAPI_E_NO_CLASS_DRIVER_LIST Handle = 0x800F0218 + SPAPI_E_NO_ASSOCIATED_SERVICE Handle = 0x800F0219 + SPAPI_E_NO_DEFAULT_DEVICE_INTERFACE Handle = 0x800F021A + SPAPI_E_DEVICE_INTERFACE_ACTIVE Handle = 0x800F021B + SPAPI_E_DEVICE_INTERFACE_REMOVED Handle = 0x800F021C + SPAPI_E_BAD_INTERFACE_INSTALLSECT Handle = 0x800F021D + SPAPI_E_NO_SUCH_INTERFACE_CLASS Handle = 0x800F021E + SPAPI_E_INVALID_REFERENCE_STRING Handle = 0x800F021F + SPAPI_E_INVALID_MACHINENAME Handle = 0x800F0220 + SPAPI_E_REMOTE_COMM_FAILURE Handle = 0x800F0221 + SPAPI_E_MACHINE_UNAVAILABLE Handle = 0x800F0222 + SPAPI_E_NO_CONFIGMGR_SERVICES Handle = 0x800F0223 + SPAPI_E_INVALID_PROPPAGE_PROVIDER Handle = 0x800F0224 + SPAPI_E_NO_SUCH_DEVICE_INTERFACE Handle = 0x800F0225 + SPAPI_E_DI_POSTPROCESSING_REQUIRED Handle = 0x800F0226 + SPAPI_E_INVALID_COINSTALLER Handle = 0x800F0227 + SPAPI_E_NO_COMPAT_DRIVERS Handle = 0x800F0228 + SPAPI_E_NO_DEVICE_ICON Handle = 0x800F0229 + SPAPI_E_INVALID_INF_LOGCONFIG Handle = 0x800F022A + SPAPI_E_DI_DONT_INSTALL Handle = 0x800F022B + SPAPI_E_INVALID_FILTER_DRIVER Handle = 0x800F022C + SPAPI_E_NON_WINDOWS_NT_DRIVER Handle = 0x800F022D + SPAPI_E_NON_WINDOWS_DRIVER Handle = 0x800F022E + SPAPI_E_NO_CATALOG_FOR_OEM_INF Handle = 0x800F022F + SPAPI_E_DEVINSTALL_QUEUE_NONNATIVE Handle = 0x800F0230 + SPAPI_E_NOT_DISABLEABLE Handle = 0x800F0231 + SPAPI_E_CANT_REMOVE_DEVINST Handle = 0x800F0232 + SPAPI_E_INVALID_TARGET Handle = 0x800F0233 + SPAPI_E_DRIVER_NONNATIVE Handle = 0x800F0234 + SPAPI_E_IN_WOW64 Handle = 0x800F0235 + SPAPI_E_SET_SYSTEM_RESTORE_POINT Handle = 0x800F0236 + SPAPI_E_INCORRECTLY_COPIED_INF Handle = 0x800F0237 + SPAPI_E_SCE_DISABLED Handle = 0x800F0238 + SPAPI_E_UNKNOWN_EXCEPTION Handle = 0x800F0239 + SPAPI_E_PNP_REGISTRY_ERROR Handle = 0x800F023A + SPAPI_E_REMOTE_REQUEST_UNSUPPORTED Handle = 0x800F023B + SPAPI_E_NOT_AN_INSTALLED_OEM_INF Handle = 0x800F023C + SPAPI_E_INF_IN_USE_BY_DEVICES Handle = 0x800F023D + SPAPI_E_DI_FUNCTION_OBSOLETE Handle = 0x800F023E + SPAPI_E_NO_AUTHENTICODE_CATALOG Handle = 0x800F023F + SPAPI_E_AUTHENTICODE_DISALLOWED Handle = 0x800F0240 + SPAPI_E_AUTHENTICODE_TRUSTED_PUBLISHER Handle = 0x800F0241 + SPAPI_E_AUTHENTICODE_TRUST_NOT_ESTABLISHED Handle = 0x800F0242 + SPAPI_E_AUTHENTICODE_PUBLISHER_NOT_TRUSTED Handle = 0x800F0243 + SPAPI_E_SIGNATURE_OSATTRIBUTE_MISMATCH Handle = 0x800F0244 + SPAPI_E_ONLY_VALIDATE_VIA_AUTHENTICODE Handle = 0x800F0245 + SPAPI_E_DEVICE_INSTALLER_NOT_READY Handle = 0x800F0246 + SPAPI_E_DRIVER_STORE_ADD_FAILED Handle = 0x800F0247 + SPAPI_E_DEVICE_INSTALL_BLOCKED Handle = 0x800F0248 + SPAPI_E_DRIVER_INSTALL_BLOCKED Handle = 0x800F0249 + SPAPI_E_WRONG_INF_TYPE Handle = 0x800F024A + SPAPI_E_FILE_HASH_NOT_IN_CATALOG Handle = 0x800F024B + SPAPI_E_DRIVER_STORE_DELETE_FAILED Handle = 0x800F024C + SPAPI_E_UNRECOVERABLE_STACK_OVERFLOW Handle = 0x800F0300 + SPAPI_E_ERROR_NOT_INSTALLED Handle = 0x800F1000 + SCARD_S_SUCCESS = S_OK + SCARD_F_INTERNAL_ERROR Handle = 0x80100001 + SCARD_E_CANCELLED Handle = 0x80100002 + SCARD_E_INVALID_HANDLE Handle = 0x80100003 + SCARD_E_INVALID_PARAMETER Handle = 0x80100004 + SCARD_E_INVALID_TARGET Handle = 0x80100005 + SCARD_E_NO_MEMORY Handle = 0x80100006 + SCARD_F_WAITED_TOO_LONG Handle = 0x80100007 + SCARD_E_INSUFFICIENT_BUFFER Handle = 0x80100008 + SCARD_E_UNKNOWN_READER Handle = 0x80100009 + SCARD_E_TIMEOUT Handle = 0x8010000A + SCARD_E_SHARING_VIOLATION Handle = 0x8010000B + SCARD_E_NO_SMARTCARD Handle = 0x8010000C + SCARD_E_UNKNOWN_CARD Handle = 0x8010000D + SCARD_E_CANT_DISPOSE Handle = 0x8010000E + SCARD_E_PROTO_MISMATCH Handle = 0x8010000F + SCARD_E_NOT_READY Handle = 0x80100010 + SCARD_E_INVALID_VALUE Handle = 0x80100011 + SCARD_E_SYSTEM_CANCELLED Handle = 0x80100012 + SCARD_F_COMM_ERROR Handle = 0x80100013 + SCARD_F_UNKNOWN_ERROR Handle = 0x80100014 + SCARD_E_INVALID_ATR Handle = 0x80100015 + SCARD_E_NOT_TRANSACTED Handle = 0x80100016 + SCARD_E_READER_UNAVAILABLE Handle = 0x80100017 + SCARD_P_SHUTDOWN Handle = 0x80100018 + SCARD_E_PCI_TOO_SMALL Handle = 0x80100019 + SCARD_E_READER_UNSUPPORTED Handle = 0x8010001A + SCARD_E_DUPLICATE_READER Handle = 0x8010001B + SCARD_E_CARD_UNSUPPORTED Handle = 0x8010001C + SCARD_E_NO_SERVICE Handle = 0x8010001D + SCARD_E_SERVICE_STOPPED Handle = 0x8010001E + SCARD_E_UNEXPECTED Handle = 0x8010001F + SCARD_E_ICC_INSTALLATION Handle = 0x80100020 + SCARD_E_ICC_CREATEORDER Handle = 0x80100021 + SCARD_E_UNSUPPORTED_FEATURE Handle = 0x80100022 + SCARD_E_DIR_NOT_FOUND Handle = 0x80100023 + SCARD_E_FILE_NOT_FOUND Handle = 0x80100024 + SCARD_E_NO_DIR Handle = 0x80100025 + SCARD_E_NO_FILE Handle = 0x80100026 + SCARD_E_NO_ACCESS Handle = 0x80100027 + SCARD_E_WRITE_TOO_MANY Handle = 0x80100028 + SCARD_E_BAD_SEEK Handle = 0x80100029 + SCARD_E_INVALID_CHV Handle = 0x8010002A + SCARD_E_UNKNOWN_RES_MNG Handle = 0x8010002B + SCARD_E_NO_SUCH_CERTIFICATE Handle = 0x8010002C + SCARD_E_CERTIFICATE_UNAVAILABLE Handle = 0x8010002D + SCARD_E_NO_READERS_AVAILABLE Handle = 0x8010002E + SCARD_E_COMM_DATA_LOST Handle = 0x8010002F + SCARD_E_NO_KEY_CONTAINER Handle = 0x80100030 + SCARD_E_SERVER_TOO_BUSY Handle = 0x80100031 + SCARD_E_PIN_CACHE_EXPIRED Handle = 0x80100032 + SCARD_E_NO_PIN_CACHE Handle = 0x80100033 + SCARD_E_READ_ONLY_CARD Handle = 0x80100034 + SCARD_W_UNSUPPORTED_CARD Handle = 0x80100065 + SCARD_W_UNRESPONSIVE_CARD Handle = 0x80100066 + SCARD_W_UNPOWERED_CARD Handle = 0x80100067 + SCARD_W_RESET_CARD Handle = 0x80100068 + SCARD_W_REMOVED_CARD Handle = 0x80100069 + SCARD_W_SECURITY_VIOLATION Handle = 0x8010006A + SCARD_W_WRONG_CHV Handle = 0x8010006B + SCARD_W_CHV_BLOCKED Handle = 0x8010006C + SCARD_W_EOF Handle = 0x8010006D + SCARD_W_CANCELLED_BY_USER Handle = 0x8010006E + SCARD_W_CARD_NOT_AUTHENTICATED Handle = 0x8010006F + SCARD_W_CACHE_ITEM_NOT_FOUND Handle = 0x80100070 + SCARD_W_CACHE_ITEM_STALE Handle = 0x80100071 + SCARD_W_CACHE_ITEM_TOO_BIG Handle = 0x80100072 + COMADMIN_E_OBJECTERRORS Handle = 0x80110401 + COMADMIN_E_OBJECTINVALID Handle = 0x80110402 + COMADMIN_E_KEYMISSING Handle = 0x80110403 + COMADMIN_E_ALREADYINSTALLED Handle = 0x80110404 + COMADMIN_E_APP_FILE_WRITEFAIL Handle = 0x80110407 + COMADMIN_E_APP_FILE_READFAIL Handle = 0x80110408 + COMADMIN_E_APP_FILE_VERSION Handle = 0x80110409 + COMADMIN_E_BADPATH Handle = 0x8011040A + COMADMIN_E_APPLICATIONEXISTS Handle = 0x8011040B + COMADMIN_E_ROLEEXISTS Handle = 0x8011040C + COMADMIN_E_CANTCOPYFILE Handle = 0x8011040D + COMADMIN_E_NOUSER Handle = 0x8011040F + COMADMIN_E_INVALIDUSERIDS Handle = 0x80110410 + COMADMIN_E_NOREGISTRYCLSID Handle = 0x80110411 + COMADMIN_E_BADREGISTRYPROGID Handle = 0x80110412 + COMADMIN_E_AUTHENTICATIONLEVEL Handle = 0x80110413 + COMADMIN_E_USERPASSWDNOTVALID Handle = 0x80110414 + COMADMIN_E_CLSIDORIIDMISMATCH Handle = 0x80110418 + COMADMIN_E_REMOTEINTERFACE Handle = 0x80110419 + COMADMIN_E_DLLREGISTERSERVER Handle = 0x8011041A + COMADMIN_E_NOSERVERSHARE Handle = 0x8011041B + COMADMIN_E_DLLLOADFAILED Handle = 0x8011041D + COMADMIN_E_BADREGISTRYLIBID Handle = 0x8011041E + COMADMIN_E_APPDIRNOTFOUND Handle = 0x8011041F + COMADMIN_E_REGISTRARFAILED Handle = 0x80110423 + COMADMIN_E_COMPFILE_DOESNOTEXIST Handle = 0x80110424 + COMADMIN_E_COMPFILE_LOADDLLFAIL Handle = 0x80110425 + COMADMIN_E_COMPFILE_GETCLASSOBJ Handle = 0x80110426 + COMADMIN_E_COMPFILE_CLASSNOTAVAIL Handle = 0x80110427 + COMADMIN_E_COMPFILE_BADTLB Handle = 0x80110428 + COMADMIN_E_COMPFILE_NOTINSTALLABLE Handle = 0x80110429 + COMADMIN_E_NOTCHANGEABLE Handle = 0x8011042A + COMADMIN_E_NOTDELETEABLE Handle = 0x8011042B + COMADMIN_E_SESSION Handle = 0x8011042C + COMADMIN_E_COMP_MOVE_LOCKED Handle = 0x8011042D + COMADMIN_E_COMP_MOVE_BAD_DEST Handle = 0x8011042E + COMADMIN_E_REGISTERTLB Handle = 0x80110430 + COMADMIN_E_SYSTEMAPP Handle = 0x80110433 + COMADMIN_E_COMPFILE_NOREGISTRAR Handle = 0x80110434 + COMADMIN_E_COREQCOMPINSTALLED Handle = 0x80110435 + COMADMIN_E_SERVICENOTINSTALLED Handle = 0x80110436 + COMADMIN_E_PROPERTYSAVEFAILED Handle = 0x80110437 + COMADMIN_E_OBJECTEXISTS Handle = 0x80110438 + COMADMIN_E_COMPONENTEXISTS Handle = 0x80110439 + COMADMIN_E_REGFILE_CORRUPT Handle = 0x8011043B + COMADMIN_E_PROPERTY_OVERFLOW Handle = 0x8011043C + COMADMIN_E_NOTINREGISTRY Handle = 0x8011043E + COMADMIN_E_OBJECTNOTPOOLABLE Handle = 0x8011043F + COMADMIN_E_APPLID_MATCHES_CLSID Handle = 0x80110446 + COMADMIN_E_ROLE_DOES_NOT_EXIST Handle = 0x80110447 + COMADMIN_E_START_APP_NEEDS_COMPONENTS Handle = 0x80110448 + COMADMIN_E_REQUIRES_DIFFERENT_PLATFORM Handle = 0x80110449 + COMADMIN_E_CAN_NOT_EXPORT_APP_PROXY Handle = 0x8011044A + COMADMIN_E_CAN_NOT_START_APP Handle = 0x8011044B + COMADMIN_E_CAN_NOT_EXPORT_SYS_APP Handle = 0x8011044C + COMADMIN_E_CANT_SUBSCRIBE_TO_COMPONENT Handle = 0x8011044D + COMADMIN_E_EVENTCLASS_CANT_BE_SUBSCRIBER Handle = 0x8011044E + COMADMIN_E_LIB_APP_PROXY_INCOMPATIBLE Handle = 0x8011044F + COMADMIN_E_BASE_PARTITION_ONLY Handle = 0x80110450 + COMADMIN_E_START_APP_DISABLED Handle = 0x80110451 + COMADMIN_E_CAT_DUPLICATE_PARTITION_NAME Handle = 0x80110457 + COMADMIN_E_CAT_INVALID_PARTITION_NAME Handle = 0x80110458 + COMADMIN_E_CAT_PARTITION_IN_USE Handle = 0x80110459 + COMADMIN_E_FILE_PARTITION_DUPLICATE_FILES Handle = 0x8011045A + COMADMIN_E_CAT_IMPORTED_COMPONENTS_NOT_ALLOWED Handle = 0x8011045B + COMADMIN_E_AMBIGUOUS_APPLICATION_NAME Handle = 0x8011045C + COMADMIN_E_AMBIGUOUS_PARTITION_NAME Handle = 0x8011045D + COMADMIN_E_REGDB_NOTINITIALIZED Handle = 0x80110472 + COMADMIN_E_REGDB_NOTOPEN Handle = 0x80110473 + COMADMIN_E_REGDB_SYSTEMERR Handle = 0x80110474 + COMADMIN_E_REGDB_ALREADYRUNNING Handle = 0x80110475 + COMADMIN_E_MIG_VERSIONNOTSUPPORTED Handle = 0x80110480 + COMADMIN_E_MIG_SCHEMANOTFOUND Handle = 0x80110481 + COMADMIN_E_CAT_BITNESSMISMATCH Handle = 0x80110482 + COMADMIN_E_CAT_UNACCEPTABLEBITNESS Handle = 0x80110483 + COMADMIN_E_CAT_WRONGAPPBITNESS Handle = 0x80110484 + COMADMIN_E_CAT_PAUSE_RESUME_NOT_SUPPORTED Handle = 0x80110485 + COMADMIN_E_CAT_SERVERFAULT Handle = 0x80110486 + COMQC_E_APPLICATION_NOT_QUEUED Handle = 0x80110600 + COMQC_E_NO_QUEUEABLE_INTERFACES Handle = 0x80110601 + COMQC_E_QUEUING_SERVICE_NOT_AVAILABLE Handle = 0x80110602 + COMQC_E_NO_IPERSISTSTREAM Handle = 0x80110603 + COMQC_E_BAD_MESSAGE Handle = 0x80110604 + COMQC_E_UNAUTHENTICATED Handle = 0x80110605 + COMQC_E_UNTRUSTED_ENQUEUER Handle = 0x80110606 + MSDTC_E_DUPLICATE_RESOURCE Handle = 0x80110701 + COMADMIN_E_OBJECT_PARENT_MISSING Handle = 0x80110808 + COMADMIN_E_OBJECT_DOES_NOT_EXIST Handle = 0x80110809 + COMADMIN_E_APP_NOT_RUNNING Handle = 0x8011080A + COMADMIN_E_INVALID_PARTITION Handle = 0x8011080B + COMADMIN_E_SVCAPP_NOT_POOLABLE_OR_RECYCLABLE Handle = 0x8011080D + COMADMIN_E_USER_IN_SET Handle = 0x8011080E + COMADMIN_E_CANTRECYCLELIBRARYAPPS Handle = 0x8011080F + COMADMIN_E_CANTRECYCLESERVICEAPPS Handle = 0x80110811 + COMADMIN_E_PROCESSALREADYRECYCLED Handle = 0x80110812 + COMADMIN_E_PAUSEDPROCESSMAYNOTBERECYCLED Handle = 0x80110813 + COMADMIN_E_CANTMAKEINPROCSERVICE Handle = 0x80110814 + COMADMIN_E_PROGIDINUSEBYCLSID Handle = 0x80110815 + COMADMIN_E_DEFAULT_PARTITION_NOT_IN_SET Handle = 0x80110816 + COMADMIN_E_RECYCLEDPROCESSMAYNOTBEPAUSED Handle = 0x80110817 + COMADMIN_E_PARTITION_ACCESSDENIED Handle = 0x80110818 + COMADMIN_E_PARTITION_MSI_ONLY Handle = 0x80110819 + COMADMIN_E_LEGACYCOMPS_NOT_ALLOWED_IN_1_0_FORMAT Handle = 0x8011081A + COMADMIN_E_LEGACYCOMPS_NOT_ALLOWED_IN_NONBASE_PARTITIONS Handle = 0x8011081B + COMADMIN_E_COMP_MOVE_SOURCE Handle = 0x8011081C + COMADMIN_E_COMP_MOVE_DEST Handle = 0x8011081D + COMADMIN_E_COMP_MOVE_PRIVATE Handle = 0x8011081E + COMADMIN_E_BASEPARTITION_REQUIRED_IN_SET Handle = 0x8011081F + COMADMIN_E_CANNOT_ALIAS_EVENTCLASS Handle = 0x80110820 + COMADMIN_E_PRIVATE_ACCESSDENIED Handle = 0x80110821 + COMADMIN_E_SAFERINVALID Handle = 0x80110822 + COMADMIN_E_REGISTRY_ACCESSDENIED Handle = 0x80110823 + COMADMIN_E_PARTITIONS_DISABLED Handle = 0x80110824 + WER_S_REPORT_DEBUG Handle = 0x001B0000 + WER_S_REPORT_UPLOADED Handle = 0x001B0001 + WER_S_REPORT_QUEUED Handle = 0x001B0002 + WER_S_DISABLED Handle = 0x001B0003 + WER_S_SUSPENDED_UPLOAD Handle = 0x001B0004 + WER_S_DISABLED_QUEUE Handle = 0x001B0005 + WER_S_DISABLED_ARCHIVE Handle = 0x001B0006 + WER_S_REPORT_ASYNC Handle = 0x001B0007 + WER_S_IGNORE_ASSERT_INSTANCE Handle = 0x001B0008 + WER_S_IGNORE_ALL_ASSERTS Handle = 0x001B0009 + WER_S_ASSERT_CONTINUE Handle = 0x001B000A + WER_S_THROTTLED Handle = 0x001B000B + WER_S_REPORT_UPLOADED_CAB Handle = 0x001B000C + WER_E_CRASH_FAILURE Handle = 0x801B8000 + WER_E_CANCELED Handle = 0x801B8001 + WER_E_NETWORK_FAILURE Handle = 0x801B8002 + WER_E_NOT_INITIALIZED Handle = 0x801B8003 + WER_E_ALREADY_REPORTING Handle = 0x801B8004 + WER_E_DUMP_THROTTLED Handle = 0x801B8005 + WER_E_INSUFFICIENT_CONSENT Handle = 0x801B8006 + WER_E_TOO_HEAVY Handle = 0x801B8007 + ERROR_FLT_IO_COMPLETE Handle = 0x001F0001 + ERROR_FLT_NO_HANDLER_DEFINED Handle = 0x801F0001 + ERROR_FLT_CONTEXT_ALREADY_DEFINED Handle = 0x801F0002 + ERROR_FLT_INVALID_ASYNCHRONOUS_REQUEST Handle = 0x801F0003 + ERROR_FLT_DISALLOW_FAST_IO Handle = 0x801F0004 + ERROR_FLT_INVALID_NAME_REQUEST Handle = 0x801F0005 + ERROR_FLT_NOT_SAFE_TO_POST_OPERATION Handle = 0x801F0006 + ERROR_FLT_NOT_INITIALIZED Handle = 0x801F0007 + ERROR_FLT_FILTER_NOT_READY Handle = 0x801F0008 + ERROR_FLT_POST_OPERATION_CLEANUP Handle = 0x801F0009 + ERROR_FLT_INTERNAL_ERROR Handle = 0x801F000A + ERROR_FLT_DELETING_OBJECT Handle = 0x801F000B + ERROR_FLT_MUST_BE_NONPAGED_POOL Handle = 0x801F000C + ERROR_FLT_DUPLICATE_ENTRY Handle = 0x801F000D + ERROR_FLT_CBDQ_DISABLED Handle = 0x801F000E + ERROR_FLT_DO_NOT_ATTACH Handle = 0x801F000F + ERROR_FLT_DO_NOT_DETACH Handle = 0x801F0010 + ERROR_FLT_INSTANCE_ALTITUDE_COLLISION Handle = 0x801F0011 + ERROR_FLT_INSTANCE_NAME_COLLISION Handle = 0x801F0012 + ERROR_FLT_FILTER_NOT_FOUND Handle = 0x801F0013 + ERROR_FLT_VOLUME_NOT_FOUND Handle = 0x801F0014 + ERROR_FLT_INSTANCE_NOT_FOUND Handle = 0x801F0015 + ERROR_FLT_CONTEXT_ALLOCATION_NOT_FOUND Handle = 0x801F0016 + ERROR_FLT_INVALID_CONTEXT_REGISTRATION Handle = 0x801F0017 + ERROR_FLT_NAME_CACHE_MISS Handle = 0x801F0018 + ERROR_FLT_NO_DEVICE_OBJECT Handle = 0x801F0019 + ERROR_FLT_VOLUME_ALREADY_MOUNTED Handle = 0x801F001A + ERROR_FLT_ALREADY_ENLISTED Handle = 0x801F001B + ERROR_FLT_CONTEXT_ALREADY_LINKED Handle = 0x801F001C + ERROR_FLT_NO_WAITER_FOR_REPLY Handle = 0x801F0020 + ERROR_FLT_REGISTRATION_BUSY Handle = 0x801F0023 + ERROR_HUNG_DISPLAY_DRIVER_THREAD Handle = 0x80260001 + DWM_E_COMPOSITIONDISABLED Handle = 0x80263001 + DWM_E_REMOTING_NOT_SUPPORTED Handle = 0x80263002 + DWM_E_NO_REDIRECTION_SURFACE_AVAILABLE Handle = 0x80263003 + DWM_E_NOT_QUEUING_PRESENTS Handle = 0x80263004 + DWM_E_ADAPTER_NOT_FOUND Handle = 0x80263005 + DWM_S_GDI_REDIRECTION_SURFACE Handle = 0x00263005 + DWM_E_TEXTURE_TOO_LARGE Handle = 0x80263007 + DWM_S_GDI_REDIRECTION_SURFACE_BLT_VIA_GDI Handle = 0x00263008 + ERROR_MONITOR_NO_DESCRIPTOR Handle = 0x00261001 + ERROR_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT Handle = 0x00261002 + ERROR_MONITOR_INVALID_DESCRIPTOR_CHECKSUM Handle = 0xC0261003 + ERROR_MONITOR_INVALID_STANDARD_TIMING_BLOCK Handle = 0xC0261004 + ERROR_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED Handle = 0xC0261005 + ERROR_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK Handle = 0xC0261006 + ERROR_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK Handle = 0xC0261007 + ERROR_MONITOR_NO_MORE_DESCRIPTOR_DATA Handle = 0xC0261008 + ERROR_MONITOR_INVALID_DETAILED_TIMING_BLOCK Handle = 0xC0261009 + ERROR_MONITOR_INVALID_MANUFACTURE_DATE Handle = 0xC026100A + ERROR_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER Handle = 0xC0262000 + ERROR_GRAPHICS_INSUFFICIENT_DMA_BUFFER Handle = 0xC0262001 + ERROR_GRAPHICS_INVALID_DISPLAY_ADAPTER Handle = 0xC0262002 + ERROR_GRAPHICS_ADAPTER_WAS_RESET Handle = 0xC0262003 + ERROR_GRAPHICS_INVALID_DRIVER_MODEL Handle = 0xC0262004 + ERROR_GRAPHICS_PRESENT_MODE_CHANGED Handle = 0xC0262005 + ERROR_GRAPHICS_PRESENT_OCCLUDED Handle = 0xC0262006 + ERROR_GRAPHICS_PRESENT_DENIED Handle = 0xC0262007 + ERROR_GRAPHICS_CANNOTCOLORCONVERT Handle = 0xC0262008 + ERROR_GRAPHICS_DRIVER_MISMATCH Handle = 0xC0262009 + ERROR_GRAPHICS_PARTIAL_DATA_POPULATED Handle = 0x4026200A + ERROR_GRAPHICS_PRESENT_REDIRECTION_DISABLED Handle = 0xC026200B + ERROR_GRAPHICS_PRESENT_UNOCCLUDED Handle = 0xC026200C + ERROR_GRAPHICS_WINDOWDC_NOT_AVAILABLE Handle = 0xC026200D + ERROR_GRAPHICS_WINDOWLESS_PRESENT_DISABLED Handle = 0xC026200E + ERROR_GRAPHICS_PRESENT_INVALID_WINDOW Handle = 0xC026200F + ERROR_GRAPHICS_PRESENT_BUFFER_NOT_BOUND Handle = 0xC0262010 + ERROR_GRAPHICS_VAIL_STATE_CHANGED Handle = 0xC0262011 + ERROR_GRAPHICS_INDIRECT_DISPLAY_ABANDON_SWAPCHAIN Handle = 0xC0262012 + ERROR_GRAPHICS_INDIRECT_DISPLAY_DEVICE_STOPPED Handle = 0xC0262013 + ERROR_GRAPHICS_NO_VIDEO_MEMORY Handle = 0xC0262100 + ERROR_GRAPHICS_CANT_LOCK_MEMORY Handle = 0xC0262101 + ERROR_GRAPHICS_ALLOCATION_BUSY Handle = 0xC0262102 + ERROR_GRAPHICS_TOO_MANY_REFERENCES Handle = 0xC0262103 + ERROR_GRAPHICS_TRY_AGAIN_LATER Handle = 0xC0262104 + ERROR_GRAPHICS_TRY_AGAIN_NOW Handle = 0xC0262105 + ERROR_GRAPHICS_ALLOCATION_INVALID Handle = 0xC0262106 + ERROR_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE Handle = 0xC0262107 + ERROR_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED Handle = 0xC0262108 + ERROR_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION Handle = 0xC0262109 + ERROR_GRAPHICS_INVALID_ALLOCATION_USAGE Handle = 0xC0262110 + ERROR_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION Handle = 0xC0262111 + ERROR_GRAPHICS_ALLOCATION_CLOSED Handle = 0xC0262112 + ERROR_GRAPHICS_INVALID_ALLOCATION_INSTANCE Handle = 0xC0262113 + ERROR_GRAPHICS_INVALID_ALLOCATION_HANDLE Handle = 0xC0262114 + ERROR_GRAPHICS_WRONG_ALLOCATION_DEVICE Handle = 0xC0262115 + ERROR_GRAPHICS_ALLOCATION_CONTENT_LOST Handle = 0xC0262116 + ERROR_GRAPHICS_GPU_EXCEPTION_ON_DEVICE Handle = 0xC0262200 + ERROR_GRAPHICS_SKIP_ALLOCATION_PREPARATION Handle = 0x40262201 + ERROR_GRAPHICS_INVALID_VIDPN_TOPOLOGY Handle = 0xC0262300 + ERROR_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED Handle = 0xC0262301 + ERROR_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED Handle = 0xC0262302 + ERROR_GRAPHICS_INVALID_VIDPN Handle = 0xC0262303 + ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE Handle = 0xC0262304 + ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET Handle = 0xC0262305 + ERROR_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED Handle = 0xC0262306 + ERROR_GRAPHICS_MODE_NOT_PINNED Handle = 0x00262307 + ERROR_GRAPHICS_INVALID_VIDPN_SOURCEMODESET Handle = 0xC0262308 + ERROR_GRAPHICS_INVALID_VIDPN_TARGETMODESET Handle = 0xC0262309 + ERROR_GRAPHICS_INVALID_FREQUENCY Handle = 0xC026230A + ERROR_GRAPHICS_INVALID_ACTIVE_REGION Handle = 0xC026230B + ERROR_GRAPHICS_INVALID_TOTAL_REGION Handle = 0xC026230C + ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE Handle = 0xC0262310 + ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE Handle = 0xC0262311 + ERROR_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET Handle = 0xC0262312 + ERROR_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY Handle = 0xC0262313 + ERROR_GRAPHICS_MODE_ALREADY_IN_MODESET Handle = 0xC0262314 + ERROR_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET Handle = 0xC0262315 + ERROR_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET Handle = 0xC0262316 + ERROR_GRAPHICS_SOURCE_ALREADY_IN_SET Handle = 0xC0262317 + ERROR_GRAPHICS_TARGET_ALREADY_IN_SET Handle = 0xC0262318 + ERROR_GRAPHICS_INVALID_VIDPN_PRESENT_PATH Handle = 0xC0262319 + ERROR_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY Handle = 0xC026231A + ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET Handle = 0xC026231B + ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE Handle = 0xC026231C + ERROR_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET Handle = 0xC026231D + ERROR_GRAPHICS_NO_PREFERRED_MODE Handle = 0x0026231E + ERROR_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET Handle = 0xC026231F + ERROR_GRAPHICS_STALE_MODESET Handle = 0xC0262320 + ERROR_GRAPHICS_INVALID_MONITOR_SOURCEMODESET Handle = 0xC0262321 + ERROR_GRAPHICS_INVALID_MONITOR_SOURCE_MODE Handle = 0xC0262322 + ERROR_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN Handle = 0xC0262323 + ERROR_GRAPHICS_MODE_ID_MUST_BE_UNIQUE Handle = 0xC0262324 + ERROR_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION Handle = 0xC0262325 + ERROR_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES Handle = 0xC0262326 + ERROR_GRAPHICS_PATH_NOT_IN_TOPOLOGY Handle = 0xC0262327 + ERROR_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE Handle = 0xC0262328 + ERROR_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET Handle = 0xC0262329 + ERROR_GRAPHICS_INVALID_MONITORDESCRIPTORSET Handle = 0xC026232A + ERROR_GRAPHICS_INVALID_MONITORDESCRIPTOR Handle = 0xC026232B + ERROR_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET Handle = 0xC026232C + ERROR_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET Handle = 0xC026232D + ERROR_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE Handle = 0xC026232E + ERROR_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE Handle = 0xC026232F + ERROR_GRAPHICS_RESOURCES_NOT_RELATED Handle = 0xC0262330 + ERROR_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE Handle = 0xC0262331 + ERROR_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE Handle = 0xC0262332 + ERROR_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET Handle = 0xC0262333 + ERROR_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER Handle = 0xC0262334 + ERROR_GRAPHICS_NO_VIDPNMGR Handle = 0xC0262335 + ERROR_GRAPHICS_NO_ACTIVE_VIDPN Handle = 0xC0262336 + ERROR_GRAPHICS_STALE_VIDPN_TOPOLOGY Handle = 0xC0262337 + ERROR_GRAPHICS_MONITOR_NOT_CONNECTED Handle = 0xC0262338 + ERROR_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY Handle = 0xC0262339 + ERROR_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE Handle = 0xC026233A + ERROR_GRAPHICS_INVALID_VISIBLEREGION_SIZE Handle = 0xC026233B + ERROR_GRAPHICS_INVALID_STRIDE Handle = 0xC026233C + ERROR_GRAPHICS_INVALID_PIXELFORMAT Handle = 0xC026233D + ERROR_GRAPHICS_INVALID_COLORBASIS Handle = 0xC026233E + ERROR_GRAPHICS_INVALID_PIXELVALUEACCESSMODE Handle = 0xC026233F + ERROR_GRAPHICS_TARGET_NOT_IN_TOPOLOGY Handle = 0xC0262340 + ERROR_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT Handle = 0xC0262341 + ERROR_GRAPHICS_VIDPN_SOURCE_IN_USE Handle = 0xC0262342 + ERROR_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN Handle = 0xC0262343 + ERROR_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL Handle = 0xC0262344 + ERROR_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION Handle = 0xC0262345 + ERROR_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED Handle = 0xC0262346 + ERROR_GRAPHICS_INVALID_GAMMA_RAMP Handle = 0xC0262347 + ERROR_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED Handle = 0xC0262348 + ERROR_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED Handle = 0xC0262349 + ERROR_GRAPHICS_MODE_NOT_IN_MODESET Handle = 0xC026234A + ERROR_GRAPHICS_DATASET_IS_EMPTY Handle = 0x0026234B + ERROR_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET Handle = 0x0026234C + ERROR_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON Handle = 0xC026234D + ERROR_GRAPHICS_INVALID_PATH_CONTENT_TYPE Handle = 0xC026234E + ERROR_GRAPHICS_INVALID_COPYPROTECTION_TYPE Handle = 0xC026234F + ERROR_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS Handle = 0xC0262350 + ERROR_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED Handle = 0x00262351 + ERROR_GRAPHICS_INVALID_SCANLINE_ORDERING Handle = 0xC0262352 + ERROR_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED Handle = 0xC0262353 + ERROR_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS Handle = 0xC0262354 + ERROR_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT Handle = 0xC0262355 + ERROR_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM Handle = 0xC0262356 + ERROR_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN Handle = 0xC0262357 + ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT Handle = 0xC0262358 + ERROR_GRAPHICS_MAX_NUM_PATHS_REACHED Handle = 0xC0262359 + ERROR_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION Handle = 0xC026235A + ERROR_GRAPHICS_INVALID_CLIENT_TYPE Handle = 0xC026235B + ERROR_GRAPHICS_CLIENTVIDPN_NOT_SET Handle = 0xC026235C + ERROR_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED Handle = 0xC0262400 + ERROR_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED Handle = 0xC0262401 + ERROR_GRAPHICS_UNKNOWN_CHILD_STATUS Handle = 0x4026242F + ERROR_GRAPHICS_NOT_A_LINKED_ADAPTER Handle = 0xC0262430 + ERROR_GRAPHICS_LEADLINK_NOT_ENUMERATED Handle = 0xC0262431 + ERROR_GRAPHICS_CHAINLINKS_NOT_ENUMERATED Handle = 0xC0262432 + ERROR_GRAPHICS_ADAPTER_CHAIN_NOT_READY Handle = 0xC0262433 + ERROR_GRAPHICS_CHAINLINKS_NOT_STARTED Handle = 0xC0262434 + ERROR_GRAPHICS_CHAINLINKS_NOT_POWERED_ON Handle = 0xC0262435 + ERROR_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE Handle = 0xC0262436 + ERROR_GRAPHICS_LEADLINK_START_DEFERRED Handle = 0x40262437 + ERROR_GRAPHICS_NOT_POST_DEVICE_DRIVER Handle = 0xC0262438 + ERROR_GRAPHICS_POLLING_TOO_FREQUENTLY Handle = 0x40262439 + ERROR_GRAPHICS_START_DEFERRED Handle = 0x4026243A + ERROR_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED Handle = 0xC026243B + ERROR_GRAPHICS_DEPENDABLE_CHILD_STATUS Handle = 0x4026243C + ERROR_GRAPHICS_OPM_NOT_SUPPORTED Handle = 0xC0262500 + ERROR_GRAPHICS_COPP_NOT_SUPPORTED Handle = 0xC0262501 + ERROR_GRAPHICS_UAB_NOT_SUPPORTED Handle = 0xC0262502 + ERROR_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS Handle = 0xC0262503 + ERROR_GRAPHICS_OPM_NO_VIDEO_OUTPUTS_EXIST Handle = 0xC0262505 + ERROR_GRAPHICS_OPM_INTERNAL_ERROR Handle = 0xC026250B + ERROR_GRAPHICS_OPM_INVALID_HANDLE Handle = 0xC026250C + ERROR_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH Handle = 0xC026250E + ERROR_GRAPHICS_OPM_SPANNING_MODE_ENABLED Handle = 0xC026250F + ERROR_GRAPHICS_OPM_THEATER_MODE_ENABLED Handle = 0xC0262510 + ERROR_GRAPHICS_PVP_HFS_FAILED Handle = 0xC0262511 + ERROR_GRAPHICS_OPM_INVALID_SRM Handle = 0xC0262512 + ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP Handle = 0xC0262513 + ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP Handle = 0xC0262514 + ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA Handle = 0xC0262515 + ERROR_GRAPHICS_OPM_HDCP_SRM_NEVER_SET Handle = 0xC0262516 + ERROR_GRAPHICS_OPM_RESOLUTION_TOO_HIGH Handle = 0xC0262517 + ERROR_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE Handle = 0xC0262518 + ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_NO_LONGER_EXISTS Handle = 0xC026251A + ERROR_GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS Handle = 0xC026251B + ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS Handle = 0xC026251C + ERROR_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST Handle = 0xC026251D + ERROR_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR Handle = 0xC026251E + ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS Handle = 0xC026251F + ERROR_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED Handle = 0xC0262520 + ERROR_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST Handle = 0xC0262521 + ERROR_GRAPHICS_I2C_NOT_SUPPORTED Handle = 0xC0262580 + ERROR_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST Handle = 0xC0262581 + ERROR_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA Handle = 0xC0262582 + ERROR_GRAPHICS_I2C_ERROR_RECEIVING_DATA Handle = 0xC0262583 + ERROR_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED Handle = 0xC0262584 + ERROR_GRAPHICS_DDCCI_INVALID_DATA Handle = 0xC0262585 + ERROR_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE Handle = 0xC0262586 + ERROR_GRAPHICS_MCA_INVALID_CAPABILITIES_STRING Handle = 0xC0262587 + ERROR_GRAPHICS_MCA_INTERNAL_ERROR Handle = 0xC0262588 + ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND Handle = 0xC0262589 + ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH Handle = 0xC026258A + ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM Handle = 0xC026258B + ERROR_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE Handle = 0xC026258C + ERROR_GRAPHICS_MONITOR_NO_LONGER_EXISTS Handle = 0xC026258D + ERROR_GRAPHICS_DDCCI_CURRENT_CURRENT_VALUE_GREATER_THAN_MAXIMUM_VALUE Handle = 0xC02625D8 + ERROR_GRAPHICS_MCA_INVALID_VCP_VERSION Handle = 0xC02625D9 + ERROR_GRAPHICS_MCA_MONITOR_VIOLATES_MCCS_SPECIFICATION Handle = 0xC02625DA + ERROR_GRAPHICS_MCA_MCCS_VERSION_MISMATCH Handle = 0xC02625DB + ERROR_GRAPHICS_MCA_UNSUPPORTED_MCCS_VERSION Handle = 0xC02625DC + ERROR_GRAPHICS_MCA_INVALID_TECHNOLOGY_TYPE_RETURNED Handle = 0xC02625DE + ERROR_GRAPHICS_MCA_UNSUPPORTED_COLOR_TEMPERATURE Handle = 0xC02625DF + ERROR_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED Handle = 0xC02625E0 + ERROR_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME Handle = 0xC02625E1 + ERROR_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP Handle = 0xC02625E2 + ERROR_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED Handle = 0xC02625E3 + ERROR_GRAPHICS_INVALID_POINTER Handle = 0xC02625E4 + ERROR_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE Handle = 0xC02625E5 + ERROR_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL Handle = 0xC02625E6 + ERROR_GRAPHICS_INTERNAL_ERROR Handle = 0xC02625E7 + ERROR_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS Handle = 0xC02605E8 + NAP_E_INVALID_PACKET Handle = 0x80270001 + NAP_E_MISSING_SOH Handle = 0x80270002 + NAP_E_CONFLICTING_ID Handle = 0x80270003 + NAP_E_NO_CACHED_SOH Handle = 0x80270004 + NAP_E_STILL_BOUND Handle = 0x80270005 + NAP_E_NOT_REGISTERED Handle = 0x80270006 + NAP_E_NOT_INITIALIZED Handle = 0x80270007 + NAP_E_MISMATCHED_ID Handle = 0x80270008 + NAP_E_NOT_PENDING Handle = 0x80270009 + NAP_E_ID_NOT_FOUND Handle = 0x8027000A + NAP_E_MAXSIZE_TOO_SMALL Handle = 0x8027000B + NAP_E_SERVICE_NOT_RUNNING Handle = 0x8027000C + NAP_S_CERT_ALREADY_PRESENT Handle = 0x0027000D + NAP_E_ENTITY_DISABLED Handle = 0x8027000E + NAP_E_NETSH_GROUPPOLICY_ERROR Handle = 0x8027000F + NAP_E_TOO_MANY_CALLS Handle = 0x80270010 + NAP_E_SHV_CONFIG_EXISTED Handle = 0x80270011 + NAP_E_SHV_CONFIG_NOT_FOUND Handle = 0x80270012 + NAP_E_SHV_TIMEOUT Handle = 0x80270013 + TPM_E_ERROR_MASK Handle = 0x80280000 + TPM_E_AUTHFAIL Handle = 0x80280001 + TPM_E_BADINDEX Handle = 0x80280002 + TPM_E_BAD_PARAMETER Handle = 0x80280003 + TPM_E_AUDITFAILURE Handle = 0x80280004 + TPM_E_CLEAR_DISABLED Handle = 0x80280005 + TPM_E_DEACTIVATED Handle = 0x80280006 + TPM_E_DISABLED Handle = 0x80280007 + TPM_E_DISABLED_CMD Handle = 0x80280008 + TPM_E_FAIL Handle = 0x80280009 + TPM_E_BAD_ORDINAL Handle = 0x8028000A + TPM_E_INSTALL_DISABLED Handle = 0x8028000B + TPM_E_INVALID_KEYHANDLE Handle = 0x8028000C + TPM_E_KEYNOTFOUND Handle = 0x8028000D + TPM_E_INAPPROPRIATE_ENC Handle = 0x8028000E + TPM_E_MIGRATEFAIL Handle = 0x8028000F + TPM_E_INVALID_PCR_INFO Handle = 0x80280010 + TPM_E_NOSPACE Handle = 0x80280011 + TPM_E_NOSRK Handle = 0x80280012 + TPM_E_NOTSEALED_BLOB Handle = 0x80280013 + TPM_E_OWNER_SET Handle = 0x80280014 + TPM_E_RESOURCES Handle = 0x80280015 + TPM_E_SHORTRANDOM Handle = 0x80280016 + TPM_E_SIZE Handle = 0x80280017 + TPM_E_WRONGPCRVAL Handle = 0x80280018 + TPM_E_BAD_PARAM_SIZE Handle = 0x80280019 + TPM_E_SHA_THREAD Handle = 0x8028001A + TPM_E_SHA_ERROR Handle = 0x8028001B + TPM_E_FAILEDSELFTEST Handle = 0x8028001C + TPM_E_AUTH2FAIL Handle = 0x8028001D + TPM_E_BADTAG Handle = 0x8028001E + TPM_E_IOERROR Handle = 0x8028001F + TPM_E_ENCRYPT_ERROR Handle = 0x80280020 + TPM_E_DECRYPT_ERROR Handle = 0x80280021 + TPM_E_INVALID_AUTHHANDLE Handle = 0x80280022 + TPM_E_NO_ENDORSEMENT Handle = 0x80280023 + TPM_E_INVALID_KEYUSAGE Handle = 0x80280024 + TPM_E_WRONG_ENTITYTYPE Handle = 0x80280025 + TPM_E_INVALID_POSTINIT Handle = 0x80280026 + TPM_E_INAPPROPRIATE_SIG Handle = 0x80280027 + TPM_E_BAD_KEY_PROPERTY Handle = 0x80280028 + TPM_E_BAD_MIGRATION Handle = 0x80280029 + TPM_E_BAD_SCHEME Handle = 0x8028002A + TPM_E_BAD_DATASIZE Handle = 0x8028002B + TPM_E_BAD_MODE Handle = 0x8028002C + TPM_E_BAD_PRESENCE Handle = 0x8028002D + TPM_E_BAD_VERSION Handle = 0x8028002E + TPM_E_NO_WRAP_TRANSPORT Handle = 0x8028002F + TPM_E_AUDITFAIL_UNSUCCESSFUL Handle = 0x80280030 + TPM_E_AUDITFAIL_SUCCESSFUL Handle = 0x80280031 + TPM_E_NOTRESETABLE Handle = 0x80280032 + TPM_E_NOTLOCAL Handle = 0x80280033 + TPM_E_BAD_TYPE Handle = 0x80280034 + TPM_E_INVALID_RESOURCE Handle = 0x80280035 + TPM_E_NOTFIPS Handle = 0x80280036 + TPM_E_INVALID_FAMILY Handle = 0x80280037 + TPM_E_NO_NV_PERMISSION Handle = 0x80280038 + TPM_E_REQUIRES_SIGN Handle = 0x80280039 + TPM_E_KEY_NOTSUPPORTED Handle = 0x8028003A + TPM_E_AUTH_CONFLICT Handle = 0x8028003B + TPM_E_AREA_LOCKED Handle = 0x8028003C + TPM_E_BAD_LOCALITY Handle = 0x8028003D + TPM_E_READ_ONLY Handle = 0x8028003E + TPM_E_PER_NOWRITE Handle = 0x8028003F + TPM_E_FAMILYCOUNT Handle = 0x80280040 + TPM_E_WRITE_LOCKED Handle = 0x80280041 + TPM_E_BAD_ATTRIBUTES Handle = 0x80280042 + TPM_E_INVALID_STRUCTURE Handle = 0x80280043 + TPM_E_KEY_OWNER_CONTROL Handle = 0x80280044 + TPM_E_BAD_COUNTER Handle = 0x80280045 + TPM_E_NOT_FULLWRITE Handle = 0x80280046 + TPM_E_CONTEXT_GAP Handle = 0x80280047 + TPM_E_MAXNVWRITES Handle = 0x80280048 + TPM_E_NOOPERATOR Handle = 0x80280049 + TPM_E_RESOURCEMISSING Handle = 0x8028004A + TPM_E_DELEGATE_LOCK Handle = 0x8028004B + TPM_E_DELEGATE_FAMILY Handle = 0x8028004C + TPM_E_DELEGATE_ADMIN Handle = 0x8028004D + TPM_E_TRANSPORT_NOTEXCLUSIVE Handle = 0x8028004E + TPM_E_OWNER_CONTROL Handle = 0x8028004F + TPM_E_DAA_RESOURCES Handle = 0x80280050 + TPM_E_DAA_INPUT_DATA0 Handle = 0x80280051 + TPM_E_DAA_INPUT_DATA1 Handle = 0x80280052 + TPM_E_DAA_ISSUER_SETTINGS Handle = 0x80280053 + TPM_E_DAA_TPM_SETTINGS Handle = 0x80280054 + TPM_E_DAA_STAGE Handle = 0x80280055 + TPM_E_DAA_ISSUER_VALIDITY Handle = 0x80280056 + TPM_E_DAA_WRONG_W Handle = 0x80280057 + TPM_E_BAD_HANDLE Handle = 0x80280058 + TPM_E_BAD_DELEGATE Handle = 0x80280059 + TPM_E_BADCONTEXT Handle = 0x8028005A + TPM_E_TOOMANYCONTEXTS Handle = 0x8028005B + TPM_E_MA_TICKET_SIGNATURE Handle = 0x8028005C + TPM_E_MA_DESTINATION Handle = 0x8028005D + TPM_E_MA_SOURCE Handle = 0x8028005E + TPM_E_MA_AUTHORITY Handle = 0x8028005F + TPM_E_PERMANENTEK Handle = 0x80280061 + TPM_E_BAD_SIGNATURE Handle = 0x80280062 + TPM_E_NOCONTEXTSPACE Handle = 0x80280063 + TPM_20_E_ASYMMETRIC Handle = 0x80280081 + TPM_20_E_ATTRIBUTES Handle = 0x80280082 + TPM_20_E_HASH Handle = 0x80280083 + TPM_20_E_VALUE Handle = 0x80280084 + TPM_20_E_HIERARCHY Handle = 0x80280085 + TPM_20_E_KEY_SIZE Handle = 0x80280087 + TPM_20_E_MGF Handle = 0x80280088 + TPM_20_E_MODE Handle = 0x80280089 + TPM_20_E_TYPE Handle = 0x8028008A + TPM_20_E_HANDLE Handle = 0x8028008B + TPM_20_E_KDF Handle = 0x8028008C + TPM_20_E_RANGE Handle = 0x8028008D + TPM_20_E_AUTH_FAIL Handle = 0x8028008E + TPM_20_E_NONCE Handle = 0x8028008F + TPM_20_E_PP Handle = 0x80280090 + TPM_20_E_SCHEME Handle = 0x80280092 + TPM_20_E_SIZE Handle = 0x80280095 + TPM_20_E_SYMMETRIC Handle = 0x80280096 + TPM_20_E_TAG Handle = 0x80280097 + TPM_20_E_SELECTOR Handle = 0x80280098 + TPM_20_E_INSUFFICIENT Handle = 0x8028009A + TPM_20_E_SIGNATURE Handle = 0x8028009B + TPM_20_E_KEY Handle = 0x8028009C + TPM_20_E_POLICY_FAIL Handle = 0x8028009D + TPM_20_E_INTEGRITY Handle = 0x8028009F + TPM_20_E_TICKET Handle = 0x802800A0 + TPM_20_E_RESERVED_BITS Handle = 0x802800A1 + TPM_20_E_BAD_AUTH Handle = 0x802800A2 + TPM_20_E_EXPIRED Handle = 0x802800A3 + TPM_20_E_POLICY_CC Handle = 0x802800A4 + TPM_20_E_BINDING Handle = 0x802800A5 + TPM_20_E_CURVE Handle = 0x802800A6 + TPM_20_E_ECC_POINT Handle = 0x802800A7 + TPM_20_E_INITIALIZE Handle = 0x80280100 + TPM_20_E_FAILURE Handle = 0x80280101 + TPM_20_E_SEQUENCE Handle = 0x80280103 + TPM_20_E_PRIVATE Handle = 0x8028010B + TPM_20_E_HMAC Handle = 0x80280119 + TPM_20_E_DISABLED Handle = 0x80280120 + TPM_20_E_EXCLUSIVE Handle = 0x80280121 + TPM_20_E_ECC_CURVE Handle = 0x80280123 + TPM_20_E_AUTH_TYPE Handle = 0x80280124 + TPM_20_E_AUTH_MISSING Handle = 0x80280125 + TPM_20_E_POLICY Handle = 0x80280126 + TPM_20_E_PCR Handle = 0x80280127 + TPM_20_E_PCR_CHANGED Handle = 0x80280128 + TPM_20_E_UPGRADE Handle = 0x8028012D + TPM_20_E_TOO_MANY_CONTEXTS Handle = 0x8028012E + TPM_20_E_AUTH_UNAVAILABLE Handle = 0x8028012F + TPM_20_E_REBOOT Handle = 0x80280130 + TPM_20_E_UNBALANCED Handle = 0x80280131 + TPM_20_E_COMMAND_SIZE Handle = 0x80280142 + TPM_20_E_COMMAND_CODE Handle = 0x80280143 + TPM_20_E_AUTHSIZE Handle = 0x80280144 + TPM_20_E_AUTH_CONTEXT Handle = 0x80280145 + TPM_20_E_NV_RANGE Handle = 0x80280146 + TPM_20_E_NV_SIZE Handle = 0x80280147 + TPM_20_E_NV_LOCKED Handle = 0x80280148 + TPM_20_E_NV_AUTHORIZATION Handle = 0x80280149 + TPM_20_E_NV_UNINITIALIZED Handle = 0x8028014A + TPM_20_E_NV_SPACE Handle = 0x8028014B + TPM_20_E_NV_DEFINED Handle = 0x8028014C + TPM_20_E_BAD_CONTEXT Handle = 0x80280150 + TPM_20_E_CPHASH Handle = 0x80280151 + TPM_20_E_PARENT Handle = 0x80280152 + TPM_20_E_NEEDS_TEST Handle = 0x80280153 + TPM_20_E_NO_RESULT Handle = 0x80280154 + TPM_20_E_SENSITIVE Handle = 0x80280155 + TPM_E_COMMAND_BLOCKED Handle = 0x80280400 + TPM_E_INVALID_HANDLE Handle = 0x80280401 + TPM_E_DUPLICATE_VHANDLE Handle = 0x80280402 + TPM_E_EMBEDDED_COMMAND_BLOCKED Handle = 0x80280403 + TPM_E_EMBEDDED_COMMAND_UNSUPPORTED Handle = 0x80280404 + TPM_E_RETRY Handle = 0x80280800 + TPM_E_NEEDS_SELFTEST Handle = 0x80280801 + TPM_E_DOING_SELFTEST Handle = 0x80280802 + TPM_E_DEFEND_LOCK_RUNNING Handle = 0x80280803 + TPM_20_E_CONTEXT_GAP Handle = 0x80280901 + TPM_20_E_OBJECT_MEMORY Handle = 0x80280902 + TPM_20_E_SESSION_MEMORY Handle = 0x80280903 + TPM_20_E_MEMORY Handle = 0x80280904 + TPM_20_E_SESSION_HANDLES Handle = 0x80280905 + TPM_20_E_OBJECT_HANDLES Handle = 0x80280906 + TPM_20_E_LOCALITY Handle = 0x80280907 + TPM_20_E_YIELDED Handle = 0x80280908 + TPM_20_E_CANCELED Handle = 0x80280909 + TPM_20_E_TESTING Handle = 0x8028090A + TPM_20_E_NV_RATE Handle = 0x80280920 + TPM_20_E_LOCKOUT Handle = 0x80280921 + TPM_20_E_RETRY Handle = 0x80280922 + TPM_20_E_NV_UNAVAILABLE Handle = 0x80280923 + TBS_E_INTERNAL_ERROR Handle = 0x80284001 + TBS_E_BAD_PARAMETER Handle = 0x80284002 + TBS_E_INVALID_OUTPUT_POINTER Handle = 0x80284003 + TBS_E_INVALID_CONTEXT Handle = 0x80284004 + TBS_E_INSUFFICIENT_BUFFER Handle = 0x80284005 + TBS_E_IOERROR Handle = 0x80284006 + TBS_E_INVALID_CONTEXT_PARAM Handle = 0x80284007 + TBS_E_SERVICE_NOT_RUNNING Handle = 0x80284008 + TBS_E_TOO_MANY_TBS_CONTEXTS Handle = 0x80284009 + TBS_E_TOO_MANY_RESOURCES Handle = 0x8028400A + TBS_E_SERVICE_START_PENDING Handle = 0x8028400B + TBS_E_PPI_NOT_SUPPORTED Handle = 0x8028400C + TBS_E_COMMAND_CANCELED Handle = 0x8028400D + TBS_E_BUFFER_TOO_LARGE Handle = 0x8028400E + TBS_E_TPM_NOT_FOUND Handle = 0x8028400F + TBS_E_SERVICE_DISABLED Handle = 0x80284010 + TBS_E_NO_EVENT_LOG Handle = 0x80284011 + TBS_E_ACCESS_DENIED Handle = 0x80284012 + TBS_E_PROVISIONING_NOT_ALLOWED Handle = 0x80284013 + TBS_E_PPI_FUNCTION_UNSUPPORTED Handle = 0x80284014 + TBS_E_OWNERAUTH_NOT_FOUND Handle = 0x80284015 + TBS_E_PROVISIONING_INCOMPLETE Handle = 0x80284016 + TPMAPI_E_INVALID_STATE Handle = 0x80290100 + TPMAPI_E_NOT_ENOUGH_DATA Handle = 0x80290101 + TPMAPI_E_TOO_MUCH_DATA Handle = 0x80290102 + TPMAPI_E_INVALID_OUTPUT_POINTER Handle = 0x80290103 + TPMAPI_E_INVALID_PARAMETER Handle = 0x80290104 + TPMAPI_E_OUT_OF_MEMORY Handle = 0x80290105 + TPMAPI_E_BUFFER_TOO_SMALL Handle = 0x80290106 + TPMAPI_E_INTERNAL_ERROR Handle = 0x80290107 + TPMAPI_E_ACCESS_DENIED Handle = 0x80290108 + TPMAPI_E_AUTHORIZATION_FAILED Handle = 0x80290109 + TPMAPI_E_INVALID_CONTEXT_HANDLE Handle = 0x8029010A + TPMAPI_E_TBS_COMMUNICATION_ERROR Handle = 0x8029010B + TPMAPI_E_TPM_COMMAND_ERROR Handle = 0x8029010C + TPMAPI_E_MESSAGE_TOO_LARGE Handle = 0x8029010D + TPMAPI_E_INVALID_ENCODING Handle = 0x8029010E + TPMAPI_E_INVALID_KEY_SIZE Handle = 0x8029010F + TPMAPI_E_ENCRYPTION_FAILED Handle = 0x80290110 + TPMAPI_E_INVALID_KEY_PARAMS Handle = 0x80290111 + TPMAPI_E_INVALID_MIGRATION_AUTHORIZATION_BLOB Handle = 0x80290112 + TPMAPI_E_INVALID_PCR_INDEX Handle = 0x80290113 + TPMAPI_E_INVALID_DELEGATE_BLOB Handle = 0x80290114 + TPMAPI_E_INVALID_CONTEXT_PARAMS Handle = 0x80290115 + TPMAPI_E_INVALID_KEY_BLOB Handle = 0x80290116 + TPMAPI_E_INVALID_PCR_DATA Handle = 0x80290117 + TPMAPI_E_INVALID_OWNER_AUTH Handle = 0x80290118 + TPMAPI_E_FIPS_RNG_CHECK_FAILED Handle = 0x80290119 + TPMAPI_E_EMPTY_TCG_LOG Handle = 0x8029011A + TPMAPI_E_INVALID_TCG_LOG_ENTRY Handle = 0x8029011B + TPMAPI_E_TCG_SEPARATOR_ABSENT Handle = 0x8029011C + TPMAPI_E_TCG_INVALID_DIGEST_ENTRY Handle = 0x8029011D + TPMAPI_E_POLICY_DENIES_OPERATION Handle = 0x8029011E + TPMAPI_E_NV_BITS_NOT_DEFINED Handle = 0x8029011F + TPMAPI_E_NV_BITS_NOT_READY Handle = 0x80290120 + TPMAPI_E_SEALING_KEY_NOT_AVAILABLE Handle = 0x80290121 + TPMAPI_E_NO_AUTHORIZATION_CHAIN_FOUND Handle = 0x80290122 + TPMAPI_E_SVN_COUNTER_NOT_AVAILABLE Handle = 0x80290123 + TPMAPI_E_OWNER_AUTH_NOT_NULL Handle = 0x80290124 + TPMAPI_E_ENDORSEMENT_AUTH_NOT_NULL Handle = 0x80290125 + TPMAPI_E_AUTHORIZATION_REVOKED Handle = 0x80290126 + TPMAPI_E_MALFORMED_AUTHORIZATION_KEY Handle = 0x80290127 + TPMAPI_E_AUTHORIZING_KEY_NOT_SUPPORTED Handle = 0x80290128 + TPMAPI_E_INVALID_AUTHORIZATION_SIGNATURE Handle = 0x80290129 + TPMAPI_E_MALFORMED_AUTHORIZATION_POLICY Handle = 0x8029012A + TPMAPI_E_MALFORMED_AUTHORIZATION_OTHER Handle = 0x8029012B + TPMAPI_E_SEALING_KEY_CHANGED Handle = 0x8029012C + TBSIMP_E_BUFFER_TOO_SMALL Handle = 0x80290200 + TBSIMP_E_CLEANUP_FAILED Handle = 0x80290201 + TBSIMP_E_INVALID_CONTEXT_HANDLE Handle = 0x80290202 + TBSIMP_E_INVALID_CONTEXT_PARAM Handle = 0x80290203 + TBSIMP_E_TPM_ERROR Handle = 0x80290204 + TBSIMP_E_HASH_BAD_KEY Handle = 0x80290205 + TBSIMP_E_DUPLICATE_VHANDLE Handle = 0x80290206 + TBSIMP_E_INVALID_OUTPUT_POINTER Handle = 0x80290207 + TBSIMP_E_INVALID_PARAMETER Handle = 0x80290208 + TBSIMP_E_RPC_INIT_FAILED Handle = 0x80290209 + TBSIMP_E_SCHEDULER_NOT_RUNNING Handle = 0x8029020A + TBSIMP_E_COMMAND_CANCELED Handle = 0x8029020B + TBSIMP_E_OUT_OF_MEMORY Handle = 0x8029020C + TBSIMP_E_LIST_NO_MORE_ITEMS Handle = 0x8029020D + TBSIMP_E_LIST_NOT_FOUND Handle = 0x8029020E + TBSIMP_E_NOT_ENOUGH_SPACE Handle = 0x8029020F + TBSIMP_E_NOT_ENOUGH_TPM_CONTEXTS Handle = 0x80290210 + TBSIMP_E_COMMAND_FAILED Handle = 0x80290211 + TBSIMP_E_UNKNOWN_ORDINAL Handle = 0x80290212 + TBSIMP_E_RESOURCE_EXPIRED Handle = 0x80290213 + TBSIMP_E_INVALID_RESOURCE Handle = 0x80290214 + TBSIMP_E_NOTHING_TO_UNLOAD Handle = 0x80290215 + TBSIMP_E_HASH_TABLE_FULL Handle = 0x80290216 + TBSIMP_E_TOO_MANY_TBS_CONTEXTS Handle = 0x80290217 + TBSIMP_E_TOO_MANY_RESOURCES Handle = 0x80290218 + TBSIMP_E_PPI_NOT_SUPPORTED Handle = 0x80290219 + TBSIMP_E_TPM_INCOMPATIBLE Handle = 0x8029021A + TBSIMP_E_NO_EVENT_LOG Handle = 0x8029021B + TPM_E_PPI_ACPI_FAILURE Handle = 0x80290300 + TPM_E_PPI_USER_ABORT Handle = 0x80290301 + TPM_E_PPI_BIOS_FAILURE Handle = 0x80290302 + TPM_E_PPI_NOT_SUPPORTED Handle = 0x80290303 + TPM_E_PPI_BLOCKED_IN_BIOS Handle = 0x80290304 + TPM_E_PCP_ERROR_MASK Handle = 0x80290400 + TPM_E_PCP_DEVICE_NOT_READY Handle = 0x80290401 + TPM_E_PCP_INVALID_HANDLE Handle = 0x80290402 + TPM_E_PCP_INVALID_PARAMETER Handle = 0x80290403 + TPM_E_PCP_FLAG_NOT_SUPPORTED Handle = 0x80290404 + TPM_E_PCP_NOT_SUPPORTED Handle = 0x80290405 + TPM_E_PCP_BUFFER_TOO_SMALL Handle = 0x80290406 + TPM_E_PCP_INTERNAL_ERROR Handle = 0x80290407 + TPM_E_PCP_AUTHENTICATION_FAILED Handle = 0x80290408 + TPM_E_PCP_AUTHENTICATION_IGNORED Handle = 0x80290409 + TPM_E_PCP_POLICY_NOT_FOUND Handle = 0x8029040A + TPM_E_PCP_PROFILE_NOT_FOUND Handle = 0x8029040B + TPM_E_PCP_VALIDATION_FAILED Handle = 0x8029040C + TPM_E_PCP_WRONG_PARENT Handle = 0x8029040E + TPM_E_KEY_NOT_LOADED Handle = 0x8029040F + TPM_E_NO_KEY_CERTIFICATION Handle = 0x80290410 + TPM_E_KEY_NOT_FINALIZED Handle = 0x80290411 + TPM_E_ATTESTATION_CHALLENGE_NOT_SET Handle = 0x80290412 + TPM_E_NOT_PCR_BOUND Handle = 0x80290413 + TPM_E_KEY_ALREADY_FINALIZED Handle = 0x80290414 + TPM_E_KEY_USAGE_POLICY_NOT_SUPPORTED Handle = 0x80290415 + TPM_E_KEY_USAGE_POLICY_INVALID Handle = 0x80290416 + TPM_E_SOFT_KEY_ERROR Handle = 0x80290417 + TPM_E_KEY_NOT_AUTHENTICATED Handle = 0x80290418 + TPM_E_PCP_KEY_NOT_AIK Handle = 0x80290419 + TPM_E_KEY_NOT_SIGNING_KEY Handle = 0x8029041A + TPM_E_LOCKED_OUT Handle = 0x8029041B + TPM_E_CLAIM_TYPE_NOT_SUPPORTED Handle = 0x8029041C + TPM_E_VERSION_NOT_SUPPORTED Handle = 0x8029041D + TPM_E_BUFFER_LENGTH_MISMATCH Handle = 0x8029041E + TPM_E_PCP_IFX_RSA_KEY_CREATION_BLOCKED Handle = 0x8029041F + TPM_E_PCP_TICKET_MISSING Handle = 0x80290420 + TPM_E_PCP_RAW_POLICY_NOT_SUPPORTED Handle = 0x80290421 + TPM_E_PCP_KEY_HANDLE_INVALIDATED Handle = 0x80290422 + TPM_E_PCP_UNSUPPORTED_PSS_SALT Handle = 0x40290423 + TPM_E_ZERO_EXHAUST_ENABLED Handle = 0x80290500 + PLA_E_DCS_NOT_FOUND Handle = 0x80300002 + PLA_E_DCS_IN_USE Handle = 0x803000AA + PLA_E_TOO_MANY_FOLDERS Handle = 0x80300045 + PLA_E_NO_MIN_DISK Handle = 0x80300070 + PLA_E_DCS_ALREADY_EXISTS Handle = 0x803000B7 + PLA_S_PROPERTY_IGNORED Handle = 0x00300100 + PLA_E_PROPERTY_CONFLICT Handle = 0x80300101 + PLA_E_DCS_SINGLETON_REQUIRED Handle = 0x80300102 + PLA_E_CREDENTIALS_REQUIRED Handle = 0x80300103 + PLA_E_DCS_NOT_RUNNING Handle = 0x80300104 + PLA_E_CONFLICT_INCL_EXCL_API Handle = 0x80300105 + PLA_E_NETWORK_EXE_NOT_VALID Handle = 0x80300106 + PLA_E_EXE_ALREADY_CONFIGURED Handle = 0x80300107 + PLA_E_EXE_PATH_NOT_VALID Handle = 0x80300108 + PLA_E_DC_ALREADY_EXISTS Handle = 0x80300109 + PLA_E_DCS_START_WAIT_TIMEOUT Handle = 0x8030010A + PLA_E_DC_START_WAIT_TIMEOUT Handle = 0x8030010B + PLA_E_REPORT_WAIT_TIMEOUT Handle = 0x8030010C + PLA_E_NO_DUPLICATES Handle = 0x8030010D + PLA_E_EXE_FULL_PATH_REQUIRED Handle = 0x8030010E + PLA_E_INVALID_SESSION_NAME Handle = 0x8030010F + PLA_E_PLA_CHANNEL_NOT_ENABLED Handle = 0x80300110 + PLA_E_TASKSCHED_CHANNEL_NOT_ENABLED Handle = 0x80300111 + PLA_E_RULES_MANAGER_FAILED Handle = 0x80300112 + PLA_E_CABAPI_FAILURE Handle = 0x80300113 + FVE_E_LOCKED_VOLUME Handle = 0x80310000 + FVE_E_NOT_ENCRYPTED Handle = 0x80310001 + FVE_E_NO_TPM_BIOS Handle = 0x80310002 + FVE_E_NO_MBR_METRIC Handle = 0x80310003 + FVE_E_NO_BOOTSECTOR_METRIC Handle = 0x80310004 + FVE_E_NO_BOOTMGR_METRIC Handle = 0x80310005 + FVE_E_WRONG_BOOTMGR Handle = 0x80310006 + FVE_E_SECURE_KEY_REQUIRED Handle = 0x80310007 + FVE_E_NOT_ACTIVATED Handle = 0x80310008 + FVE_E_ACTION_NOT_ALLOWED Handle = 0x80310009 + FVE_E_AD_SCHEMA_NOT_INSTALLED Handle = 0x8031000A + FVE_E_AD_INVALID_DATATYPE Handle = 0x8031000B + FVE_E_AD_INVALID_DATASIZE Handle = 0x8031000C + FVE_E_AD_NO_VALUES Handle = 0x8031000D + FVE_E_AD_ATTR_NOT_SET Handle = 0x8031000E + FVE_E_AD_GUID_NOT_FOUND Handle = 0x8031000F + FVE_E_BAD_INFORMATION Handle = 0x80310010 + FVE_E_TOO_SMALL Handle = 0x80310011 + FVE_E_SYSTEM_VOLUME Handle = 0x80310012 + FVE_E_FAILED_WRONG_FS Handle = 0x80310013 + FVE_E_BAD_PARTITION_SIZE Handle = 0x80310014 + FVE_E_NOT_SUPPORTED Handle = 0x80310015 + FVE_E_BAD_DATA Handle = 0x80310016 + FVE_E_VOLUME_NOT_BOUND Handle = 0x80310017 + FVE_E_TPM_NOT_OWNED Handle = 0x80310018 + FVE_E_NOT_DATA_VOLUME Handle = 0x80310019 + FVE_E_AD_INSUFFICIENT_BUFFER Handle = 0x8031001A + FVE_E_CONV_READ Handle = 0x8031001B + FVE_E_CONV_WRITE Handle = 0x8031001C + FVE_E_KEY_REQUIRED Handle = 0x8031001D + FVE_E_CLUSTERING_NOT_SUPPORTED Handle = 0x8031001E + FVE_E_VOLUME_BOUND_ALREADY Handle = 0x8031001F + FVE_E_OS_NOT_PROTECTED Handle = 0x80310020 + FVE_E_PROTECTION_DISABLED Handle = 0x80310021 + FVE_E_RECOVERY_KEY_REQUIRED Handle = 0x80310022 + FVE_E_FOREIGN_VOLUME Handle = 0x80310023 + FVE_E_OVERLAPPED_UPDATE Handle = 0x80310024 + FVE_E_TPM_SRK_AUTH_NOT_ZERO Handle = 0x80310025 + FVE_E_FAILED_SECTOR_SIZE Handle = 0x80310026 + FVE_E_FAILED_AUTHENTICATION Handle = 0x80310027 + FVE_E_NOT_OS_VOLUME Handle = 0x80310028 + FVE_E_AUTOUNLOCK_ENABLED Handle = 0x80310029 + FVE_E_WRONG_BOOTSECTOR Handle = 0x8031002A + FVE_E_WRONG_SYSTEM_FS Handle = 0x8031002B + FVE_E_POLICY_PASSWORD_REQUIRED Handle = 0x8031002C + FVE_E_CANNOT_SET_FVEK_ENCRYPTED Handle = 0x8031002D + FVE_E_CANNOT_ENCRYPT_NO_KEY Handle = 0x8031002E + FVE_E_BOOTABLE_CDDVD Handle = 0x80310030 + FVE_E_PROTECTOR_EXISTS Handle = 0x80310031 + FVE_E_RELATIVE_PATH Handle = 0x80310032 + FVE_E_PROTECTOR_NOT_FOUND Handle = 0x80310033 + FVE_E_INVALID_KEY_FORMAT Handle = 0x80310034 + FVE_E_INVALID_PASSWORD_FORMAT Handle = 0x80310035 + FVE_E_FIPS_RNG_CHECK_FAILED Handle = 0x80310036 + FVE_E_FIPS_PREVENTS_RECOVERY_PASSWORD Handle = 0x80310037 + FVE_E_FIPS_PREVENTS_EXTERNAL_KEY_EXPORT Handle = 0x80310038 + FVE_E_NOT_DECRYPTED Handle = 0x80310039 + FVE_E_INVALID_PROTECTOR_TYPE Handle = 0x8031003A + FVE_E_NO_PROTECTORS_TO_TEST Handle = 0x8031003B + FVE_E_KEYFILE_NOT_FOUND Handle = 0x8031003C + FVE_E_KEYFILE_INVALID Handle = 0x8031003D + FVE_E_KEYFILE_NO_VMK Handle = 0x8031003E + FVE_E_TPM_DISABLED Handle = 0x8031003F + FVE_E_NOT_ALLOWED_IN_SAFE_MODE Handle = 0x80310040 + FVE_E_TPM_INVALID_PCR Handle = 0x80310041 + FVE_E_TPM_NO_VMK Handle = 0x80310042 + FVE_E_PIN_INVALID Handle = 0x80310043 + FVE_E_AUTH_INVALID_APPLICATION Handle = 0x80310044 + FVE_E_AUTH_INVALID_CONFIG Handle = 0x80310045 + FVE_E_FIPS_DISABLE_PROTECTION_NOT_ALLOWED Handle = 0x80310046 + FVE_E_FS_NOT_EXTENDED Handle = 0x80310047 + FVE_E_FIRMWARE_TYPE_NOT_SUPPORTED Handle = 0x80310048 + FVE_E_NO_LICENSE Handle = 0x80310049 + FVE_E_NOT_ON_STACK Handle = 0x8031004A + FVE_E_FS_MOUNTED Handle = 0x8031004B + FVE_E_TOKEN_NOT_IMPERSONATED Handle = 0x8031004C + FVE_E_DRY_RUN_FAILED Handle = 0x8031004D + FVE_E_REBOOT_REQUIRED Handle = 0x8031004E + FVE_E_DEBUGGER_ENABLED Handle = 0x8031004F + FVE_E_RAW_ACCESS Handle = 0x80310050 + FVE_E_RAW_BLOCKED Handle = 0x80310051 + FVE_E_BCD_APPLICATIONS_PATH_INCORRECT Handle = 0x80310052 + FVE_E_NOT_ALLOWED_IN_VERSION Handle = 0x80310053 + FVE_E_NO_AUTOUNLOCK_MASTER_KEY Handle = 0x80310054 + FVE_E_MOR_FAILED Handle = 0x80310055 + FVE_E_HIDDEN_VOLUME Handle = 0x80310056 + FVE_E_TRANSIENT_STATE Handle = 0x80310057 + FVE_E_PUBKEY_NOT_ALLOWED Handle = 0x80310058 + FVE_E_VOLUME_HANDLE_OPEN Handle = 0x80310059 + FVE_E_NO_FEATURE_LICENSE Handle = 0x8031005A + FVE_E_INVALID_STARTUP_OPTIONS Handle = 0x8031005B + FVE_E_POLICY_RECOVERY_PASSWORD_NOT_ALLOWED Handle = 0x8031005C + FVE_E_POLICY_RECOVERY_PASSWORD_REQUIRED Handle = 0x8031005D + FVE_E_POLICY_RECOVERY_KEY_NOT_ALLOWED Handle = 0x8031005E + FVE_E_POLICY_RECOVERY_KEY_REQUIRED Handle = 0x8031005F + FVE_E_POLICY_STARTUP_PIN_NOT_ALLOWED Handle = 0x80310060 + FVE_E_POLICY_STARTUP_PIN_REQUIRED Handle = 0x80310061 + FVE_E_POLICY_STARTUP_KEY_NOT_ALLOWED Handle = 0x80310062 + FVE_E_POLICY_STARTUP_KEY_REQUIRED Handle = 0x80310063 + FVE_E_POLICY_STARTUP_PIN_KEY_NOT_ALLOWED Handle = 0x80310064 + FVE_E_POLICY_STARTUP_PIN_KEY_REQUIRED Handle = 0x80310065 + FVE_E_POLICY_STARTUP_TPM_NOT_ALLOWED Handle = 0x80310066 + FVE_E_POLICY_STARTUP_TPM_REQUIRED Handle = 0x80310067 + FVE_E_POLICY_INVALID_PIN_LENGTH Handle = 0x80310068 + FVE_E_KEY_PROTECTOR_NOT_SUPPORTED Handle = 0x80310069 + FVE_E_POLICY_PASSPHRASE_NOT_ALLOWED Handle = 0x8031006A + FVE_E_POLICY_PASSPHRASE_REQUIRED Handle = 0x8031006B + FVE_E_FIPS_PREVENTS_PASSPHRASE Handle = 0x8031006C + FVE_E_OS_VOLUME_PASSPHRASE_NOT_ALLOWED Handle = 0x8031006D + FVE_E_INVALID_BITLOCKER_OID Handle = 0x8031006E + FVE_E_VOLUME_TOO_SMALL Handle = 0x8031006F + FVE_E_DV_NOT_SUPPORTED_ON_FS Handle = 0x80310070 + FVE_E_DV_NOT_ALLOWED_BY_GP Handle = 0x80310071 + FVE_E_POLICY_USER_CERTIFICATE_NOT_ALLOWED Handle = 0x80310072 + FVE_E_POLICY_USER_CERTIFICATE_REQUIRED Handle = 0x80310073 + FVE_E_POLICY_USER_CERT_MUST_BE_HW Handle = 0x80310074 + FVE_E_POLICY_USER_CONFIGURE_FDV_AUTOUNLOCK_NOT_ALLOWED Handle = 0x80310075 + FVE_E_POLICY_USER_CONFIGURE_RDV_AUTOUNLOCK_NOT_ALLOWED Handle = 0x80310076 + FVE_E_POLICY_USER_CONFIGURE_RDV_NOT_ALLOWED Handle = 0x80310077 + FVE_E_POLICY_USER_ENABLE_RDV_NOT_ALLOWED Handle = 0x80310078 + FVE_E_POLICY_USER_DISABLE_RDV_NOT_ALLOWED Handle = 0x80310079 + FVE_E_POLICY_INVALID_PASSPHRASE_LENGTH Handle = 0x80310080 + FVE_E_POLICY_PASSPHRASE_TOO_SIMPLE Handle = 0x80310081 + FVE_E_RECOVERY_PARTITION Handle = 0x80310082 + FVE_E_POLICY_CONFLICT_FDV_RK_OFF_AUK_ON Handle = 0x80310083 + FVE_E_POLICY_CONFLICT_RDV_RK_OFF_AUK_ON Handle = 0x80310084 + FVE_E_NON_BITLOCKER_OID Handle = 0x80310085 + FVE_E_POLICY_PROHIBITS_SELFSIGNED Handle = 0x80310086 + FVE_E_POLICY_CONFLICT_RO_AND_STARTUP_KEY_REQUIRED Handle = 0x80310087 + FVE_E_CONV_RECOVERY_FAILED Handle = 0x80310088 + FVE_E_VIRTUALIZED_SPACE_TOO_BIG Handle = 0x80310089 + FVE_E_POLICY_CONFLICT_OSV_RP_OFF_ADB_ON Handle = 0x80310090 + FVE_E_POLICY_CONFLICT_FDV_RP_OFF_ADB_ON Handle = 0x80310091 + FVE_E_POLICY_CONFLICT_RDV_RP_OFF_ADB_ON Handle = 0x80310092 + FVE_E_NON_BITLOCKER_KU Handle = 0x80310093 + FVE_E_PRIVATEKEY_AUTH_FAILED Handle = 0x80310094 + FVE_E_REMOVAL_OF_DRA_FAILED Handle = 0x80310095 + FVE_E_OPERATION_NOT_SUPPORTED_ON_VISTA_VOLUME Handle = 0x80310096 + FVE_E_CANT_LOCK_AUTOUNLOCK_ENABLED_VOLUME Handle = 0x80310097 + FVE_E_FIPS_HASH_KDF_NOT_ALLOWED Handle = 0x80310098 + FVE_E_ENH_PIN_INVALID Handle = 0x80310099 + FVE_E_INVALID_PIN_CHARS Handle = 0x8031009A + FVE_E_INVALID_DATUM_TYPE Handle = 0x8031009B + FVE_E_EFI_ONLY Handle = 0x8031009C + FVE_E_MULTIPLE_NKP_CERTS Handle = 0x8031009D + FVE_E_REMOVAL_OF_NKP_FAILED Handle = 0x8031009E + FVE_E_INVALID_NKP_CERT Handle = 0x8031009F + FVE_E_NO_EXISTING_PIN Handle = 0x803100A0 + FVE_E_PROTECTOR_CHANGE_PIN_MISMATCH Handle = 0x803100A1 + FVE_E_PIN_PROTECTOR_CHANGE_BY_STD_USER_DISALLOWED Handle = 0x803100A2 + FVE_E_PROTECTOR_CHANGE_MAX_PIN_CHANGE_ATTEMPTS_REACHED Handle = 0x803100A3 + FVE_E_POLICY_PASSPHRASE_REQUIRES_ASCII Handle = 0x803100A4 + FVE_E_FULL_ENCRYPTION_NOT_ALLOWED_ON_TP_STORAGE Handle = 0x803100A5 + FVE_E_WIPE_NOT_ALLOWED_ON_TP_STORAGE Handle = 0x803100A6 + FVE_E_KEY_LENGTH_NOT_SUPPORTED_BY_EDRIVE Handle = 0x803100A7 + FVE_E_NO_EXISTING_PASSPHRASE Handle = 0x803100A8 + FVE_E_PROTECTOR_CHANGE_PASSPHRASE_MISMATCH Handle = 0x803100A9 + FVE_E_PASSPHRASE_TOO_LONG Handle = 0x803100AA + FVE_E_NO_PASSPHRASE_WITH_TPM Handle = 0x803100AB + FVE_E_NO_TPM_WITH_PASSPHRASE Handle = 0x803100AC + FVE_E_NOT_ALLOWED_ON_CSV_STACK Handle = 0x803100AD + FVE_E_NOT_ALLOWED_ON_CLUSTER Handle = 0x803100AE + FVE_E_EDRIVE_NO_FAILOVER_TO_SW Handle = 0x803100AF + FVE_E_EDRIVE_BAND_IN_USE Handle = 0x803100B0 + FVE_E_EDRIVE_DISALLOWED_BY_GP Handle = 0x803100B1 + FVE_E_EDRIVE_INCOMPATIBLE_VOLUME Handle = 0x803100B2 + FVE_E_NOT_ALLOWED_TO_UPGRADE_WHILE_CONVERTING Handle = 0x803100B3 + FVE_E_EDRIVE_DV_NOT_SUPPORTED Handle = 0x803100B4 + FVE_E_NO_PREBOOT_KEYBOARD_DETECTED Handle = 0x803100B5 + FVE_E_NO_PREBOOT_KEYBOARD_OR_WINRE_DETECTED Handle = 0x803100B6 + FVE_E_POLICY_REQUIRES_STARTUP_PIN_ON_TOUCH_DEVICE Handle = 0x803100B7 + FVE_E_POLICY_REQUIRES_RECOVERY_PASSWORD_ON_TOUCH_DEVICE Handle = 0x803100B8 + FVE_E_WIPE_CANCEL_NOT_APPLICABLE Handle = 0x803100B9 + FVE_E_SECUREBOOT_DISABLED Handle = 0x803100BA + FVE_E_SECUREBOOT_CONFIGURATION_INVALID Handle = 0x803100BB + FVE_E_EDRIVE_DRY_RUN_FAILED Handle = 0x803100BC + FVE_E_SHADOW_COPY_PRESENT Handle = 0x803100BD + FVE_E_POLICY_INVALID_ENHANCED_BCD_SETTINGS Handle = 0x803100BE + FVE_E_EDRIVE_INCOMPATIBLE_FIRMWARE Handle = 0x803100BF + FVE_E_PROTECTOR_CHANGE_MAX_PASSPHRASE_CHANGE_ATTEMPTS_REACHED Handle = 0x803100C0 + FVE_E_PASSPHRASE_PROTECTOR_CHANGE_BY_STD_USER_DISALLOWED Handle = 0x803100C1 + FVE_E_LIVEID_ACCOUNT_SUSPENDED Handle = 0x803100C2 + FVE_E_LIVEID_ACCOUNT_BLOCKED Handle = 0x803100C3 + FVE_E_NOT_PROVISIONED_ON_ALL_VOLUMES Handle = 0x803100C4 + FVE_E_DE_FIXED_DATA_NOT_SUPPORTED Handle = 0x803100C5 + FVE_E_DE_HARDWARE_NOT_COMPLIANT Handle = 0x803100C6 + FVE_E_DE_WINRE_NOT_CONFIGURED Handle = 0x803100C7 + FVE_E_DE_PROTECTION_SUSPENDED Handle = 0x803100C8 + FVE_E_DE_OS_VOLUME_NOT_PROTECTED Handle = 0x803100C9 + FVE_E_DE_DEVICE_LOCKEDOUT Handle = 0x803100CA + FVE_E_DE_PROTECTION_NOT_YET_ENABLED Handle = 0x803100CB + FVE_E_INVALID_PIN_CHARS_DETAILED Handle = 0x803100CC + FVE_E_DEVICE_LOCKOUT_COUNTER_UNAVAILABLE Handle = 0x803100CD + FVE_E_DEVICELOCKOUT_COUNTER_MISMATCH Handle = 0x803100CE + FVE_E_BUFFER_TOO_LARGE Handle = 0x803100CF + FVE_E_NO_SUCH_CAPABILITY_ON_TARGET Handle = 0x803100D0 + FVE_E_DE_PREVENTED_FOR_OS Handle = 0x803100D1 + FVE_E_DE_VOLUME_OPTED_OUT Handle = 0x803100D2 + FVE_E_DE_VOLUME_NOT_SUPPORTED Handle = 0x803100D3 + FVE_E_EOW_NOT_SUPPORTED_IN_VERSION Handle = 0x803100D4 + FVE_E_ADBACKUP_NOT_ENABLED Handle = 0x803100D5 + FVE_E_VOLUME_EXTEND_PREVENTS_EOW_DECRYPT Handle = 0x803100D6 + FVE_E_NOT_DE_VOLUME Handle = 0x803100D7 + FVE_E_PROTECTION_CANNOT_BE_DISABLED Handle = 0x803100D8 + FVE_E_OSV_KSR_NOT_ALLOWED Handle = 0x803100D9 + FVE_E_AD_BACKUP_REQUIRED_POLICY_NOT_SET_OS_DRIVE Handle = 0x803100DA + FVE_E_AD_BACKUP_REQUIRED_POLICY_NOT_SET_FIXED_DRIVE Handle = 0x803100DB + FVE_E_AD_BACKUP_REQUIRED_POLICY_NOT_SET_REMOVABLE_DRIVE Handle = 0x803100DC + FVE_E_KEY_ROTATION_NOT_SUPPORTED Handle = 0x803100DD + FVE_E_EXECUTE_REQUEST_SENT_TOO_SOON Handle = 0x803100DE + FVE_E_KEY_ROTATION_NOT_ENABLED Handle = 0x803100DF + FVE_E_DEVICE_NOT_JOINED Handle = 0x803100E0 + FWP_E_CALLOUT_NOT_FOUND Handle = 0x80320001 + FWP_E_CONDITION_NOT_FOUND Handle = 0x80320002 + FWP_E_FILTER_NOT_FOUND Handle = 0x80320003 + FWP_E_LAYER_NOT_FOUND Handle = 0x80320004 + FWP_E_PROVIDER_NOT_FOUND Handle = 0x80320005 + FWP_E_PROVIDER_CONTEXT_NOT_FOUND Handle = 0x80320006 + FWP_E_SUBLAYER_NOT_FOUND Handle = 0x80320007 + FWP_E_NOT_FOUND Handle = 0x80320008 + FWP_E_ALREADY_EXISTS Handle = 0x80320009 + FWP_E_IN_USE Handle = 0x8032000A + FWP_E_DYNAMIC_SESSION_IN_PROGRESS Handle = 0x8032000B + FWP_E_WRONG_SESSION Handle = 0x8032000C + FWP_E_NO_TXN_IN_PROGRESS Handle = 0x8032000D + FWP_E_TXN_IN_PROGRESS Handle = 0x8032000E + FWP_E_TXN_ABORTED Handle = 0x8032000F + FWP_E_SESSION_ABORTED Handle = 0x80320010 + FWP_E_INCOMPATIBLE_TXN Handle = 0x80320011 + FWP_E_TIMEOUT Handle = 0x80320012 + FWP_E_NET_EVENTS_DISABLED Handle = 0x80320013 + FWP_E_INCOMPATIBLE_LAYER Handle = 0x80320014 + FWP_E_KM_CLIENTS_ONLY Handle = 0x80320015 + FWP_E_LIFETIME_MISMATCH Handle = 0x80320016 + FWP_E_BUILTIN_OBJECT Handle = 0x80320017 + FWP_E_TOO_MANY_CALLOUTS Handle = 0x80320018 + FWP_E_NOTIFICATION_DROPPED Handle = 0x80320019 + FWP_E_TRAFFIC_MISMATCH Handle = 0x8032001A + FWP_E_INCOMPATIBLE_SA_STATE Handle = 0x8032001B + FWP_E_NULL_POINTER Handle = 0x8032001C + FWP_E_INVALID_ENUMERATOR Handle = 0x8032001D + FWP_E_INVALID_FLAGS Handle = 0x8032001E + FWP_E_INVALID_NET_MASK Handle = 0x8032001F + FWP_E_INVALID_RANGE Handle = 0x80320020 + FWP_E_INVALID_INTERVAL Handle = 0x80320021 + FWP_E_ZERO_LENGTH_ARRAY Handle = 0x80320022 + FWP_E_NULL_DISPLAY_NAME Handle = 0x80320023 + FWP_E_INVALID_ACTION_TYPE Handle = 0x80320024 + FWP_E_INVALID_WEIGHT Handle = 0x80320025 + FWP_E_MATCH_TYPE_MISMATCH Handle = 0x80320026 + FWP_E_TYPE_MISMATCH Handle = 0x80320027 + FWP_E_OUT_OF_BOUNDS Handle = 0x80320028 + FWP_E_RESERVED Handle = 0x80320029 + FWP_E_DUPLICATE_CONDITION Handle = 0x8032002A + FWP_E_DUPLICATE_KEYMOD Handle = 0x8032002B + FWP_E_ACTION_INCOMPATIBLE_WITH_LAYER Handle = 0x8032002C + FWP_E_ACTION_INCOMPATIBLE_WITH_SUBLAYER Handle = 0x8032002D + FWP_E_CONTEXT_INCOMPATIBLE_WITH_LAYER Handle = 0x8032002E + FWP_E_CONTEXT_INCOMPATIBLE_WITH_CALLOUT Handle = 0x8032002F + FWP_E_INCOMPATIBLE_AUTH_METHOD Handle = 0x80320030 + FWP_E_INCOMPATIBLE_DH_GROUP Handle = 0x80320031 + FWP_E_EM_NOT_SUPPORTED Handle = 0x80320032 + FWP_E_NEVER_MATCH Handle = 0x80320033 + FWP_E_PROVIDER_CONTEXT_MISMATCH Handle = 0x80320034 + FWP_E_INVALID_PARAMETER Handle = 0x80320035 + FWP_E_TOO_MANY_SUBLAYERS Handle = 0x80320036 + FWP_E_CALLOUT_NOTIFICATION_FAILED Handle = 0x80320037 + FWP_E_INVALID_AUTH_TRANSFORM Handle = 0x80320038 + FWP_E_INVALID_CIPHER_TRANSFORM Handle = 0x80320039 + FWP_E_INCOMPATIBLE_CIPHER_TRANSFORM Handle = 0x8032003A + FWP_E_INVALID_TRANSFORM_COMBINATION Handle = 0x8032003B + FWP_E_DUPLICATE_AUTH_METHOD Handle = 0x8032003C + FWP_E_INVALID_TUNNEL_ENDPOINT Handle = 0x8032003D + FWP_E_L2_DRIVER_NOT_READY Handle = 0x8032003E + FWP_E_KEY_DICTATOR_ALREADY_REGISTERED Handle = 0x8032003F + FWP_E_KEY_DICTATION_INVALID_KEYING_MATERIAL Handle = 0x80320040 + FWP_E_CONNECTIONS_DISABLED Handle = 0x80320041 + FWP_E_INVALID_DNS_NAME Handle = 0x80320042 + FWP_E_STILL_ON Handle = 0x80320043 + FWP_E_IKEEXT_NOT_RUNNING Handle = 0x80320044 + FWP_E_DROP_NOICMP Handle = 0x80320104 + WS_S_ASYNC Handle = 0x003D0000 + WS_S_END Handle = 0x003D0001 + WS_E_INVALID_FORMAT Handle = 0x803D0000 + WS_E_OBJECT_FAULTED Handle = 0x803D0001 + WS_E_NUMERIC_OVERFLOW Handle = 0x803D0002 + WS_E_INVALID_OPERATION Handle = 0x803D0003 + WS_E_OPERATION_ABORTED Handle = 0x803D0004 + WS_E_ENDPOINT_ACCESS_DENIED Handle = 0x803D0005 + WS_E_OPERATION_TIMED_OUT Handle = 0x803D0006 + WS_E_OPERATION_ABANDONED Handle = 0x803D0007 + WS_E_QUOTA_EXCEEDED Handle = 0x803D0008 + WS_E_NO_TRANSLATION_AVAILABLE Handle = 0x803D0009 + WS_E_SECURITY_VERIFICATION_FAILURE Handle = 0x803D000A + WS_E_ADDRESS_IN_USE Handle = 0x803D000B + WS_E_ADDRESS_NOT_AVAILABLE Handle = 0x803D000C + WS_E_ENDPOINT_NOT_FOUND Handle = 0x803D000D + WS_E_ENDPOINT_NOT_AVAILABLE Handle = 0x803D000E + WS_E_ENDPOINT_FAILURE Handle = 0x803D000F + WS_E_ENDPOINT_UNREACHABLE Handle = 0x803D0010 + WS_E_ENDPOINT_ACTION_NOT_SUPPORTED Handle = 0x803D0011 + WS_E_ENDPOINT_TOO_BUSY Handle = 0x803D0012 + WS_E_ENDPOINT_FAULT_RECEIVED Handle = 0x803D0013 + WS_E_ENDPOINT_DISCONNECTED Handle = 0x803D0014 + WS_E_PROXY_FAILURE Handle = 0x803D0015 + WS_E_PROXY_ACCESS_DENIED Handle = 0x803D0016 + WS_E_NOT_SUPPORTED Handle = 0x803D0017 + WS_E_PROXY_REQUIRES_BASIC_AUTH Handle = 0x803D0018 + WS_E_PROXY_REQUIRES_DIGEST_AUTH Handle = 0x803D0019 + WS_E_PROXY_REQUIRES_NTLM_AUTH Handle = 0x803D001A + WS_E_PROXY_REQUIRES_NEGOTIATE_AUTH Handle = 0x803D001B + WS_E_SERVER_REQUIRES_BASIC_AUTH Handle = 0x803D001C + WS_E_SERVER_REQUIRES_DIGEST_AUTH Handle = 0x803D001D + WS_E_SERVER_REQUIRES_NTLM_AUTH Handle = 0x803D001E + WS_E_SERVER_REQUIRES_NEGOTIATE_AUTH Handle = 0x803D001F + WS_E_INVALID_ENDPOINT_URL Handle = 0x803D0020 + WS_E_OTHER Handle = 0x803D0021 + WS_E_SECURITY_TOKEN_EXPIRED Handle = 0x803D0022 + WS_E_SECURITY_SYSTEM_FAILURE Handle = 0x803D0023 + ERROR_NDIS_INTERFACE_CLOSING syscall.Errno = 0x80340002 + ERROR_NDIS_BAD_VERSION syscall.Errno = 0x80340004 + ERROR_NDIS_BAD_CHARACTERISTICS syscall.Errno = 0x80340005 + ERROR_NDIS_ADAPTER_NOT_FOUND syscall.Errno = 0x80340006 + ERROR_NDIS_OPEN_FAILED syscall.Errno = 0x80340007 + ERROR_NDIS_DEVICE_FAILED syscall.Errno = 0x80340008 + ERROR_NDIS_MULTICAST_FULL syscall.Errno = 0x80340009 + ERROR_NDIS_MULTICAST_EXISTS syscall.Errno = 0x8034000A + ERROR_NDIS_MULTICAST_NOT_FOUND syscall.Errno = 0x8034000B + ERROR_NDIS_REQUEST_ABORTED syscall.Errno = 0x8034000C + ERROR_NDIS_RESET_IN_PROGRESS syscall.Errno = 0x8034000D + ERROR_NDIS_NOT_SUPPORTED syscall.Errno = 0x803400BB + ERROR_NDIS_INVALID_PACKET syscall.Errno = 0x8034000F + ERROR_NDIS_ADAPTER_NOT_READY syscall.Errno = 0x80340011 + ERROR_NDIS_INVALID_LENGTH syscall.Errno = 0x80340014 + ERROR_NDIS_INVALID_DATA syscall.Errno = 0x80340015 + ERROR_NDIS_BUFFER_TOO_SHORT syscall.Errno = 0x80340016 + ERROR_NDIS_INVALID_OID syscall.Errno = 0x80340017 + ERROR_NDIS_ADAPTER_REMOVED syscall.Errno = 0x80340018 + ERROR_NDIS_UNSUPPORTED_MEDIA syscall.Errno = 0x80340019 + ERROR_NDIS_GROUP_ADDRESS_IN_USE syscall.Errno = 0x8034001A + ERROR_NDIS_FILE_NOT_FOUND syscall.Errno = 0x8034001B + ERROR_NDIS_ERROR_READING_FILE syscall.Errno = 0x8034001C + ERROR_NDIS_ALREADY_MAPPED syscall.Errno = 0x8034001D + ERROR_NDIS_RESOURCE_CONFLICT syscall.Errno = 0x8034001E + ERROR_NDIS_MEDIA_DISCONNECTED syscall.Errno = 0x8034001F + ERROR_NDIS_INVALID_ADDRESS syscall.Errno = 0x80340022 + ERROR_NDIS_INVALID_DEVICE_REQUEST syscall.Errno = 0x80340010 + ERROR_NDIS_PAUSED syscall.Errno = 0x8034002A + ERROR_NDIS_INTERFACE_NOT_FOUND syscall.Errno = 0x8034002B + ERROR_NDIS_UNSUPPORTED_REVISION syscall.Errno = 0x8034002C + ERROR_NDIS_INVALID_PORT syscall.Errno = 0x8034002D + ERROR_NDIS_INVALID_PORT_STATE syscall.Errno = 0x8034002E + ERROR_NDIS_LOW_POWER_STATE syscall.Errno = 0x8034002F + ERROR_NDIS_REINIT_REQUIRED syscall.Errno = 0x80340030 + ERROR_NDIS_NO_QUEUES syscall.Errno = 0x80340031 + ERROR_NDIS_DOT11_AUTO_CONFIG_ENABLED syscall.Errno = 0x80342000 + ERROR_NDIS_DOT11_MEDIA_IN_USE syscall.Errno = 0x80342001 + ERROR_NDIS_DOT11_POWER_STATE_INVALID syscall.Errno = 0x80342002 + ERROR_NDIS_PM_WOL_PATTERN_LIST_FULL syscall.Errno = 0x80342003 + ERROR_NDIS_PM_PROTOCOL_OFFLOAD_LIST_FULL syscall.Errno = 0x80342004 + ERROR_NDIS_DOT11_AP_CHANNEL_CURRENTLY_NOT_AVAILABLE syscall.Errno = 0x80342005 + ERROR_NDIS_DOT11_AP_BAND_CURRENTLY_NOT_AVAILABLE syscall.Errno = 0x80342006 + ERROR_NDIS_DOT11_AP_CHANNEL_NOT_ALLOWED syscall.Errno = 0x80342007 + ERROR_NDIS_DOT11_AP_BAND_NOT_ALLOWED syscall.Errno = 0x80342008 + ERROR_NDIS_INDICATION_REQUIRED syscall.Errno = 0x00340001 + ERROR_NDIS_OFFLOAD_POLICY syscall.Errno = 0xC034100F + ERROR_NDIS_OFFLOAD_CONNECTION_REJECTED syscall.Errno = 0xC0341012 + ERROR_NDIS_OFFLOAD_PATH_REJECTED syscall.Errno = 0xC0341013 + ERROR_HV_INVALID_HYPERCALL_CODE syscall.Errno = 0xC0350002 + ERROR_HV_INVALID_HYPERCALL_INPUT syscall.Errno = 0xC0350003 + ERROR_HV_INVALID_ALIGNMENT syscall.Errno = 0xC0350004 + ERROR_HV_INVALID_PARAMETER syscall.Errno = 0xC0350005 + ERROR_HV_ACCESS_DENIED syscall.Errno = 0xC0350006 + ERROR_HV_INVALID_PARTITION_STATE syscall.Errno = 0xC0350007 + ERROR_HV_OPERATION_DENIED syscall.Errno = 0xC0350008 + ERROR_HV_UNKNOWN_PROPERTY syscall.Errno = 0xC0350009 + ERROR_HV_PROPERTY_VALUE_OUT_OF_RANGE syscall.Errno = 0xC035000A + ERROR_HV_INSUFFICIENT_MEMORY syscall.Errno = 0xC035000B + ERROR_HV_PARTITION_TOO_DEEP syscall.Errno = 0xC035000C + ERROR_HV_INVALID_PARTITION_ID syscall.Errno = 0xC035000D + ERROR_HV_INVALID_VP_INDEX syscall.Errno = 0xC035000E + ERROR_HV_INVALID_PORT_ID syscall.Errno = 0xC0350011 + ERROR_HV_INVALID_CONNECTION_ID syscall.Errno = 0xC0350012 + ERROR_HV_INSUFFICIENT_BUFFERS syscall.Errno = 0xC0350013 + ERROR_HV_NOT_ACKNOWLEDGED syscall.Errno = 0xC0350014 + ERROR_HV_INVALID_VP_STATE syscall.Errno = 0xC0350015 + ERROR_HV_ACKNOWLEDGED syscall.Errno = 0xC0350016 + ERROR_HV_INVALID_SAVE_RESTORE_STATE syscall.Errno = 0xC0350017 + ERROR_HV_INVALID_SYNIC_STATE syscall.Errno = 0xC0350018 + ERROR_HV_OBJECT_IN_USE syscall.Errno = 0xC0350019 + ERROR_HV_INVALID_PROXIMITY_DOMAIN_INFO syscall.Errno = 0xC035001A + ERROR_HV_NO_DATA syscall.Errno = 0xC035001B + ERROR_HV_INACTIVE syscall.Errno = 0xC035001C + ERROR_HV_NO_RESOURCES syscall.Errno = 0xC035001D + ERROR_HV_FEATURE_UNAVAILABLE syscall.Errno = 0xC035001E + ERROR_HV_INSUFFICIENT_BUFFER syscall.Errno = 0xC0350033 + ERROR_HV_INSUFFICIENT_DEVICE_DOMAINS syscall.Errno = 0xC0350038 + ERROR_HV_CPUID_FEATURE_VALIDATION syscall.Errno = 0xC035003C + ERROR_HV_CPUID_XSAVE_FEATURE_VALIDATION syscall.Errno = 0xC035003D + ERROR_HV_PROCESSOR_STARTUP_TIMEOUT syscall.Errno = 0xC035003E + ERROR_HV_SMX_ENABLED syscall.Errno = 0xC035003F + ERROR_HV_INVALID_LP_INDEX syscall.Errno = 0xC0350041 + ERROR_HV_INVALID_REGISTER_VALUE syscall.Errno = 0xC0350050 + ERROR_HV_INVALID_VTL_STATE syscall.Errno = 0xC0350051 + ERROR_HV_NX_NOT_DETECTED syscall.Errno = 0xC0350055 + ERROR_HV_INVALID_DEVICE_ID syscall.Errno = 0xC0350057 + ERROR_HV_INVALID_DEVICE_STATE syscall.Errno = 0xC0350058 + ERROR_HV_PENDING_PAGE_REQUESTS syscall.Errno = 0x00350059 + ERROR_HV_PAGE_REQUEST_INVALID syscall.Errno = 0xC0350060 + ERROR_HV_INVALID_CPU_GROUP_ID syscall.Errno = 0xC035006F + ERROR_HV_INVALID_CPU_GROUP_STATE syscall.Errno = 0xC0350070 + ERROR_HV_OPERATION_FAILED syscall.Errno = 0xC0350071 + ERROR_HV_NOT_ALLOWED_WITH_NESTED_VIRT_ACTIVE syscall.Errno = 0xC0350072 + ERROR_HV_INSUFFICIENT_ROOT_MEMORY syscall.Errno = 0xC0350073 + ERROR_HV_NOT_PRESENT syscall.Errno = 0xC0351000 + ERROR_VID_DUPLICATE_HANDLER syscall.Errno = 0xC0370001 + ERROR_VID_TOO_MANY_HANDLERS syscall.Errno = 0xC0370002 + ERROR_VID_QUEUE_FULL syscall.Errno = 0xC0370003 + ERROR_VID_HANDLER_NOT_PRESENT syscall.Errno = 0xC0370004 + ERROR_VID_INVALID_OBJECT_NAME syscall.Errno = 0xC0370005 + ERROR_VID_PARTITION_NAME_TOO_LONG syscall.Errno = 0xC0370006 + ERROR_VID_MESSAGE_QUEUE_NAME_TOO_LONG syscall.Errno = 0xC0370007 + ERROR_VID_PARTITION_ALREADY_EXISTS syscall.Errno = 0xC0370008 + ERROR_VID_PARTITION_DOES_NOT_EXIST syscall.Errno = 0xC0370009 + ERROR_VID_PARTITION_NAME_NOT_FOUND syscall.Errno = 0xC037000A + ERROR_VID_MESSAGE_QUEUE_ALREADY_EXISTS syscall.Errno = 0xC037000B + ERROR_VID_EXCEEDED_MBP_ENTRY_MAP_LIMIT syscall.Errno = 0xC037000C + ERROR_VID_MB_STILL_REFERENCED syscall.Errno = 0xC037000D + ERROR_VID_CHILD_GPA_PAGE_SET_CORRUPTED syscall.Errno = 0xC037000E + ERROR_VID_INVALID_NUMA_SETTINGS syscall.Errno = 0xC037000F + ERROR_VID_INVALID_NUMA_NODE_INDEX syscall.Errno = 0xC0370010 + ERROR_VID_NOTIFICATION_QUEUE_ALREADY_ASSOCIATED syscall.Errno = 0xC0370011 + ERROR_VID_INVALID_MEMORY_BLOCK_HANDLE syscall.Errno = 0xC0370012 + ERROR_VID_PAGE_RANGE_OVERFLOW syscall.Errno = 0xC0370013 + ERROR_VID_INVALID_MESSAGE_QUEUE_HANDLE syscall.Errno = 0xC0370014 + ERROR_VID_INVALID_GPA_RANGE_HANDLE syscall.Errno = 0xC0370015 + ERROR_VID_NO_MEMORY_BLOCK_NOTIFICATION_QUEUE syscall.Errno = 0xC0370016 + ERROR_VID_MEMORY_BLOCK_LOCK_COUNT_EXCEEDED syscall.Errno = 0xC0370017 + ERROR_VID_INVALID_PPM_HANDLE syscall.Errno = 0xC0370018 + ERROR_VID_MBPS_ARE_LOCKED syscall.Errno = 0xC0370019 + ERROR_VID_MESSAGE_QUEUE_CLOSED syscall.Errno = 0xC037001A + ERROR_VID_VIRTUAL_PROCESSOR_LIMIT_EXCEEDED syscall.Errno = 0xC037001B + ERROR_VID_STOP_PENDING syscall.Errno = 0xC037001C + ERROR_VID_INVALID_PROCESSOR_STATE syscall.Errno = 0xC037001D + ERROR_VID_EXCEEDED_KM_CONTEXT_COUNT_LIMIT syscall.Errno = 0xC037001E + ERROR_VID_KM_INTERFACE_ALREADY_INITIALIZED syscall.Errno = 0xC037001F + ERROR_VID_MB_PROPERTY_ALREADY_SET_RESET syscall.Errno = 0xC0370020 + ERROR_VID_MMIO_RANGE_DESTROYED syscall.Errno = 0xC0370021 + ERROR_VID_INVALID_CHILD_GPA_PAGE_SET syscall.Errno = 0xC0370022 + ERROR_VID_RESERVE_PAGE_SET_IS_BEING_USED syscall.Errno = 0xC0370023 + ERROR_VID_RESERVE_PAGE_SET_TOO_SMALL syscall.Errno = 0xC0370024 + ERROR_VID_MBP_ALREADY_LOCKED_USING_RESERVED_PAGE syscall.Errno = 0xC0370025 + ERROR_VID_MBP_COUNT_EXCEEDED_LIMIT syscall.Errno = 0xC0370026 + ERROR_VID_SAVED_STATE_CORRUPT syscall.Errno = 0xC0370027 + ERROR_VID_SAVED_STATE_UNRECOGNIZED_ITEM syscall.Errno = 0xC0370028 + ERROR_VID_SAVED_STATE_INCOMPATIBLE syscall.Errno = 0xC0370029 + ERROR_VID_VTL_ACCESS_DENIED syscall.Errno = 0xC037002A + ERROR_VMCOMPUTE_TERMINATED_DURING_START syscall.Errno = 0xC0370100 + ERROR_VMCOMPUTE_IMAGE_MISMATCH syscall.Errno = 0xC0370101 + ERROR_VMCOMPUTE_HYPERV_NOT_INSTALLED syscall.Errno = 0xC0370102 + ERROR_VMCOMPUTE_OPERATION_PENDING syscall.Errno = 0xC0370103 + ERROR_VMCOMPUTE_TOO_MANY_NOTIFICATIONS syscall.Errno = 0xC0370104 + ERROR_VMCOMPUTE_INVALID_STATE syscall.Errno = 0xC0370105 + ERROR_VMCOMPUTE_UNEXPECTED_EXIT syscall.Errno = 0xC0370106 + ERROR_VMCOMPUTE_TERMINATED syscall.Errno = 0xC0370107 + ERROR_VMCOMPUTE_CONNECT_FAILED syscall.Errno = 0xC0370108 + ERROR_VMCOMPUTE_TIMEOUT syscall.Errno = 0xC0370109 + ERROR_VMCOMPUTE_CONNECTION_CLOSED syscall.Errno = 0xC037010A + ERROR_VMCOMPUTE_UNKNOWN_MESSAGE syscall.Errno = 0xC037010B + ERROR_VMCOMPUTE_UNSUPPORTED_PROTOCOL_VERSION syscall.Errno = 0xC037010C + ERROR_VMCOMPUTE_INVALID_JSON syscall.Errno = 0xC037010D + ERROR_VMCOMPUTE_SYSTEM_NOT_FOUND syscall.Errno = 0xC037010E + ERROR_VMCOMPUTE_SYSTEM_ALREADY_EXISTS syscall.Errno = 0xC037010F + ERROR_VMCOMPUTE_SYSTEM_ALREADY_STOPPED syscall.Errno = 0xC0370110 + ERROR_VMCOMPUTE_PROTOCOL_ERROR syscall.Errno = 0xC0370111 + ERROR_VMCOMPUTE_INVALID_LAYER syscall.Errno = 0xC0370112 + ERROR_VMCOMPUTE_WINDOWS_INSIDER_REQUIRED syscall.Errno = 0xC0370113 + HCS_E_TERMINATED_DURING_START Handle = 0x80370100 + HCS_E_IMAGE_MISMATCH Handle = 0x80370101 + HCS_E_HYPERV_NOT_INSTALLED Handle = 0x80370102 + HCS_E_INVALID_STATE Handle = 0x80370105 + HCS_E_UNEXPECTED_EXIT Handle = 0x80370106 + HCS_E_TERMINATED Handle = 0x80370107 + HCS_E_CONNECT_FAILED Handle = 0x80370108 + HCS_E_CONNECTION_TIMEOUT Handle = 0x80370109 + HCS_E_CONNECTION_CLOSED Handle = 0x8037010A + HCS_E_UNKNOWN_MESSAGE Handle = 0x8037010B + HCS_E_UNSUPPORTED_PROTOCOL_VERSION Handle = 0x8037010C + HCS_E_INVALID_JSON Handle = 0x8037010D + HCS_E_SYSTEM_NOT_FOUND Handle = 0x8037010E + HCS_E_SYSTEM_ALREADY_EXISTS Handle = 0x8037010F + HCS_E_SYSTEM_ALREADY_STOPPED Handle = 0x80370110 + HCS_E_PROTOCOL_ERROR Handle = 0x80370111 + HCS_E_INVALID_LAYER Handle = 0x80370112 + HCS_E_WINDOWS_INSIDER_REQUIRED Handle = 0x80370113 + HCS_E_SERVICE_NOT_AVAILABLE Handle = 0x80370114 + HCS_E_OPERATION_NOT_STARTED Handle = 0x80370115 + HCS_E_OPERATION_ALREADY_STARTED Handle = 0x80370116 + HCS_E_OPERATION_PENDING Handle = 0x80370117 + HCS_E_OPERATION_TIMEOUT Handle = 0x80370118 + HCS_E_OPERATION_SYSTEM_CALLBACK_ALREADY_SET Handle = 0x80370119 + HCS_E_OPERATION_RESULT_ALLOCATION_FAILED Handle = 0x8037011A + HCS_E_ACCESS_DENIED Handle = 0x8037011B + HCS_E_GUEST_CRITICAL_ERROR Handle = 0x8037011C + ERROR_VNET_VIRTUAL_SWITCH_NAME_NOT_FOUND syscall.Errno = 0xC0370200 + ERROR_VID_REMOTE_NODE_PARENT_GPA_PAGES_USED syscall.Errno = 0x80370001 + WHV_E_UNKNOWN_CAPABILITY Handle = 0x80370300 + WHV_E_INSUFFICIENT_BUFFER Handle = 0x80370301 + WHV_E_UNKNOWN_PROPERTY Handle = 0x80370302 + WHV_E_UNSUPPORTED_HYPERVISOR_CONFIG Handle = 0x80370303 + WHV_E_INVALID_PARTITION_CONFIG Handle = 0x80370304 + WHV_E_GPA_RANGE_NOT_FOUND Handle = 0x80370305 + WHV_E_VP_ALREADY_EXISTS Handle = 0x80370306 + WHV_E_VP_DOES_NOT_EXIST Handle = 0x80370307 + WHV_E_INVALID_VP_STATE Handle = 0x80370308 + WHV_E_INVALID_VP_REGISTER_NAME Handle = 0x80370309 + ERROR_VSMB_SAVED_STATE_FILE_NOT_FOUND syscall.Errno = 0xC0370400 + ERROR_VSMB_SAVED_STATE_CORRUPT syscall.Errno = 0xC0370401 + ERROR_VOLMGR_INCOMPLETE_REGENERATION syscall.Errno = 0x80380001 + ERROR_VOLMGR_INCOMPLETE_DISK_MIGRATION syscall.Errno = 0x80380002 + ERROR_VOLMGR_DATABASE_FULL syscall.Errno = 0xC0380001 + ERROR_VOLMGR_DISK_CONFIGURATION_CORRUPTED syscall.Errno = 0xC0380002 + ERROR_VOLMGR_DISK_CONFIGURATION_NOT_IN_SYNC syscall.Errno = 0xC0380003 + ERROR_VOLMGR_PACK_CONFIG_UPDATE_FAILED syscall.Errno = 0xC0380004 + ERROR_VOLMGR_DISK_CONTAINS_NON_SIMPLE_VOLUME syscall.Errno = 0xC0380005 + ERROR_VOLMGR_DISK_DUPLICATE syscall.Errno = 0xC0380006 + ERROR_VOLMGR_DISK_DYNAMIC syscall.Errno = 0xC0380007 + ERROR_VOLMGR_DISK_ID_INVALID syscall.Errno = 0xC0380008 + ERROR_VOLMGR_DISK_INVALID syscall.Errno = 0xC0380009 + ERROR_VOLMGR_DISK_LAST_VOTER syscall.Errno = 0xC038000A + ERROR_VOLMGR_DISK_LAYOUT_INVALID syscall.Errno = 0xC038000B + ERROR_VOLMGR_DISK_LAYOUT_NON_BASIC_BETWEEN_BASIC_PARTITIONS syscall.Errno = 0xC038000C + ERROR_VOLMGR_DISK_LAYOUT_NOT_CYLINDER_ALIGNED syscall.Errno = 0xC038000D + ERROR_VOLMGR_DISK_LAYOUT_PARTITIONS_TOO_SMALL syscall.Errno = 0xC038000E + ERROR_VOLMGR_DISK_LAYOUT_PRIMARY_BETWEEN_LOGICAL_PARTITIONS syscall.Errno = 0xC038000F + ERROR_VOLMGR_DISK_LAYOUT_TOO_MANY_PARTITIONS syscall.Errno = 0xC0380010 + ERROR_VOLMGR_DISK_MISSING syscall.Errno = 0xC0380011 + ERROR_VOLMGR_DISK_NOT_EMPTY syscall.Errno = 0xC0380012 + ERROR_VOLMGR_DISK_NOT_ENOUGH_SPACE syscall.Errno = 0xC0380013 + ERROR_VOLMGR_DISK_REVECTORING_FAILED syscall.Errno = 0xC0380014 + ERROR_VOLMGR_DISK_SECTOR_SIZE_INVALID syscall.Errno = 0xC0380015 + ERROR_VOLMGR_DISK_SET_NOT_CONTAINED syscall.Errno = 0xC0380016 + ERROR_VOLMGR_DISK_USED_BY_MULTIPLE_MEMBERS syscall.Errno = 0xC0380017 + ERROR_VOLMGR_DISK_USED_BY_MULTIPLE_PLEXES syscall.Errno = 0xC0380018 + ERROR_VOLMGR_DYNAMIC_DISK_NOT_SUPPORTED syscall.Errno = 0xC0380019 + ERROR_VOLMGR_EXTENT_ALREADY_USED syscall.Errno = 0xC038001A + ERROR_VOLMGR_EXTENT_NOT_CONTIGUOUS syscall.Errno = 0xC038001B + ERROR_VOLMGR_EXTENT_NOT_IN_PUBLIC_REGION syscall.Errno = 0xC038001C + ERROR_VOLMGR_EXTENT_NOT_SECTOR_ALIGNED syscall.Errno = 0xC038001D + ERROR_VOLMGR_EXTENT_OVERLAPS_EBR_PARTITION syscall.Errno = 0xC038001E + ERROR_VOLMGR_EXTENT_VOLUME_LENGTHS_DO_NOT_MATCH syscall.Errno = 0xC038001F + ERROR_VOLMGR_FAULT_TOLERANT_NOT_SUPPORTED syscall.Errno = 0xC0380020 + ERROR_VOLMGR_INTERLEAVE_LENGTH_INVALID syscall.Errno = 0xC0380021 + ERROR_VOLMGR_MAXIMUM_REGISTERED_USERS syscall.Errno = 0xC0380022 + ERROR_VOLMGR_MEMBER_IN_SYNC syscall.Errno = 0xC0380023 + ERROR_VOLMGR_MEMBER_INDEX_DUPLICATE syscall.Errno = 0xC0380024 + ERROR_VOLMGR_MEMBER_INDEX_INVALID syscall.Errno = 0xC0380025 + ERROR_VOLMGR_MEMBER_MISSING syscall.Errno = 0xC0380026 + ERROR_VOLMGR_MEMBER_NOT_DETACHED syscall.Errno = 0xC0380027 + ERROR_VOLMGR_MEMBER_REGENERATING syscall.Errno = 0xC0380028 + ERROR_VOLMGR_ALL_DISKS_FAILED syscall.Errno = 0xC0380029 + ERROR_VOLMGR_NO_REGISTERED_USERS syscall.Errno = 0xC038002A + ERROR_VOLMGR_NO_SUCH_USER syscall.Errno = 0xC038002B + ERROR_VOLMGR_NOTIFICATION_RESET syscall.Errno = 0xC038002C + ERROR_VOLMGR_NUMBER_OF_MEMBERS_INVALID syscall.Errno = 0xC038002D + ERROR_VOLMGR_NUMBER_OF_PLEXES_INVALID syscall.Errno = 0xC038002E + ERROR_VOLMGR_PACK_DUPLICATE syscall.Errno = 0xC038002F + ERROR_VOLMGR_PACK_ID_INVALID syscall.Errno = 0xC0380030 + ERROR_VOLMGR_PACK_INVALID syscall.Errno = 0xC0380031 + ERROR_VOLMGR_PACK_NAME_INVALID syscall.Errno = 0xC0380032 + ERROR_VOLMGR_PACK_OFFLINE syscall.Errno = 0xC0380033 + ERROR_VOLMGR_PACK_HAS_QUORUM syscall.Errno = 0xC0380034 + ERROR_VOLMGR_PACK_WITHOUT_QUORUM syscall.Errno = 0xC0380035 + ERROR_VOLMGR_PARTITION_STYLE_INVALID syscall.Errno = 0xC0380036 + ERROR_VOLMGR_PARTITION_UPDATE_FAILED syscall.Errno = 0xC0380037 + ERROR_VOLMGR_PLEX_IN_SYNC syscall.Errno = 0xC0380038 + ERROR_VOLMGR_PLEX_INDEX_DUPLICATE syscall.Errno = 0xC0380039 + ERROR_VOLMGR_PLEX_INDEX_INVALID syscall.Errno = 0xC038003A + ERROR_VOLMGR_PLEX_LAST_ACTIVE syscall.Errno = 0xC038003B + ERROR_VOLMGR_PLEX_MISSING syscall.Errno = 0xC038003C + ERROR_VOLMGR_PLEX_REGENERATING syscall.Errno = 0xC038003D + ERROR_VOLMGR_PLEX_TYPE_INVALID syscall.Errno = 0xC038003E + ERROR_VOLMGR_PLEX_NOT_RAID5 syscall.Errno = 0xC038003F + ERROR_VOLMGR_PLEX_NOT_SIMPLE syscall.Errno = 0xC0380040 + ERROR_VOLMGR_STRUCTURE_SIZE_INVALID syscall.Errno = 0xC0380041 + ERROR_VOLMGR_TOO_MANY_NOTIFICATION_REQUESTS syscall.Errno = 0xC0380042 + ERROR_VOLMGR_TRANSACTION_IN_PROGRESS syscall.Errno = 0xC0380043 + ERROR_VOLMGR_UNEXPECTED_DISK_LAYOUT_CHANGE syscall.Errno = 0xC0380044 + ERROR_VOLMGR_VOLUME_CONTAINS_MISSING_DISK syscall.Errno = 0xC0380045 + ERROR_VOLMGR_VOLUME_ID_INVALID syscall.Errno = 0xC0380046 + ERROR_VOLMGR_VOLUME_LENGTH_INVALID syscall.Errno = 0xC0380047 + ERROR_VOLMGR_VOLUME_LENGTH_NOT_SECTOR_SIZE_MULTIPLE syscall.Errno = 0xC0380048 + ERROR_VOLMGR_VOLUME_NOT_MIRRORED syscall.Errno = 0xC0380049 + ERROR_VOLMGR_VOLUME_NOT_RETAINED syscall.Errno = 0xC038004A + ERROR_VOLMGR_VOLUME_OFFLINE syscall.Errno = 0xC038004B + ERROR_VOLMGR_VOLUME_RETAINED syscall.Errno = 0xC038004C + ERROR_VOLMGR_NUMBER_OF_EXTENTS_INVALID syscall.Errno = 0xC038004D + ERROR_VOLMGR_DIFFERENT_SECTOR_SIZE syscall.Errno = 0xC038004E + ERROR_VOLMGR_BAD_BOOT_DISK syscall.Errno = 0xC038004F + ERROR_VOLMGR_PACK_CONFIG_OFFLINE syscall.Errno = 0xC0380050 + ERROR_VOLMGR_PACK_CONFIG_ONLINE syscall.Errno = 0xC0380051 + ERROR_VOLMGR_NOT_PRIMARY_PACK syscall.Errno = 0xC0380052 + ERROR_VOLMGR_PACK_LOG_UPDATE_FAILED syscall.Errno = 0xC0380053 + ERROR_VOLMGR_NUMBER_OF_DISKS_IN_PLEX_INVALID syscall.Errno = 0xC0380054 + ERROR_VOLMGR_NUMBER_OF_DISKS_IN_MEMBER_INVALID syscall.Errno = 0xC0380055 + ERROR_VOLMGR_VOLUME_MIRRORED syscall.Errno = 0xC0380056 + ERROR_VOLMGR_PLEX_NOT_SIMPLE_SPANNED syscall.Errno = 0xC0380057 + ERROR_VOLMGR_NO_VALID_LOG_COPIES syscall.Errno = 0xC0380058 + ERROR_VOLMGR_PRIMARY_PACK_PRESENT syscall.Errno = 0xC0380059 + ERROR_VOLMGR_NUMBER_OF_DISKS_INVALID syscall.Errno = 0xC038005A + ERROR_VOLMGR_MIRROR_NOT_SUPPORTED syscall.Errno = 0xC038005B + ERROR_VOLMGR_RAID5_NOT_SUPPORTED syscall.Errno = 0xC038005C + ERROR_BCD_NOT_ALL_ENTRIES_IMPORTED syscall.Errno = 0x80390001 + ERROR_BCD_TOO_MANY_ELEMENTS syscall.Errno = 0xC0390002 + ERROR_BCD_NOT_ALL_ENTRIES_SYNCHRONIZED syscall.Errno = 0x80390003 + ERROR_VHD_DRIVE_FOOTER_MISSING syscall.Errno = 0xC03A0001 + ERROR_VHD_DRIVE_FOOTER_CHECKSUM_MISMATCH syscall.Errno = 0xC03A0002 + ERROR_VHD_DRIVE_FOOTER_CORRUPT syscall.Errno = 0xC03A0003 + ERROR_VHD_FORMAT_UNKNOWN syscall.Errno = 0xC03A0004 + ERROR_VHD_FORMAT_UNSUPPORTED_VERSION syscall.Errno = 0xC03A0005 + ERROR_VHD_SPARSE_HEADER_CHECKSUM_MISMATCH syscall.Errno = 0xC03A0006 + ERROR_VHD_SPARSE_HEADER_UNSUPPORTED_VERSION syscall.Errno = 0xC03A0007 + ERROR_VHD_SPARSE_HEADER_CORRUPT syscall.Errno = 0xC03A0008 + ERROR_VHD_BLOCK_ALLOCATION_FAILURE syscall.Errno = 0xC03A0009 + ERROR_VHD_BLOCK_ALLOCATION_TABLE_CORRUPT syscall.Errno = 0xC03A000A + ERROR_VHD_INVALID_BLOCK_SIZE syscall.Errno = 0xC03A000B + ERROR_VHD_BITMAP_MISMATCH syscall.Errno = 0xC03A000C + ERROR_VHD_PARENT_VHD_NOT_FOUND syscall.Errno = 0xC03A000D + ERROR_VHD_CHILD_PARENT_ID_MISMATCH syscall.Errno = 0xC03A000E + ERROR_VHD_CHILD_PARENT_TIMESTAMP_MISMATCH syscall.Errno = 0xC03A000F + ERROR_VHD_METADATA_READ_FAILURE syscall.Errno = 0xC03A0010 + ERROR_VHD_METADATA_WRITE_FAILURE syscall.Errno = 0xC03A0011 + ERROR_VHD_INVALID_SIZE syscall.Errno = 0xC03A0012 + ERROR_VHD_INVALID_FILE_SIZE syscall.Errno = 0xC03A0013 + ERROR_VIRTDISK_PROVIDER_NOT_FOUND syscall.Errno = 0xC03A0014 + ERROR_VIRTDISK_NOT_VIRTUAL_DISK syscall.Errno = 0xC03A0015 + ERROR_VHD_PARENT_VHD_ACCESS_DENIED syscall.Errno = 0xC03A0016 + ERROR_VHD_CHILD_PARENT_SIZE_MISMATCH syscall.Errno = 0xC03A0017 + ERROR_VHD_DIFFERENCING_CHAIN_CYCLE_DETECTED syscall.Errno = 0xC03A0018 + ERROR_VHD_DIFFERENCING_CHAIN_ERROR_IN_PARENT syscall.Errno = 0xC03A0019 + ERROR_VIRTUAL_DISK_LIMITATION syscall.Errno = 0xC03A001A + ERROR_VHD_INVALID_TYPE syscall.Errno = 0xC03A001B + ERROR_VHD_INVALID_STATE syscall.Errno = 0xC03A001C + ERROR_VIRTDISK_UNSUPPORTED_DISK_SECTOR_SIZE syscall.Errno = 0xC03A001D + ERROR_VIRTDISK_DISK_ALREADY_OWNED syscall.Errno = 0xC03A001E + ERROR_VIRTDISK_DISK_ONLINE_AND_WRITABLE syscall.Errno = 0xC03A001F + ERROR_CTLOG_TRACKING_NOT_INITIALIZED syscall.Errno = 0xC03A0020 + ERROR_CTLOG_LOGFILE_SIZE_EXCEEDED_MAXSIZE syscall.Errno = 0xC03A0021 + ERROR_CTLOG_VHD_CHANGED_OFFLINE syscall.Errno = 0xC03A0022 + ERROR_CTLOG_INVALID_TRACKING_STATE syscall.Errno = 0xC03A0023 + ERROR_CTLOG_INCONSISTENT_TRACKING_FILE syscall.Errno = 0xC03A0024 + ERROR_VHD_RESIZE_WOULD_TRUNCATE_DATA syscall.Errno = 0xC03A0025 + ERROR_VHD_COULD_NOT_COMPUTE_MINIMUM_VIRTUAL_SIZE syscall.Errno = 0xC03A0026 + ERROR_VHD_ALREADY_AT_OR_BELOW_MINIMUM_VIRTUAL_SIZE syscall.Errno = 0xC03A0027 + ERROR_VHD_METADATA_FULL syscall.Errno = 0xC03A0028 + ERROR_VHD_INVALID_CHANGE_TRACKING_ID syscall.Errno = 0xC03A0029 + ERROR_VHD_CHANGE_TRACKING_DISABLED syscall.Errno = 0xC03A002A + ERROR_VHD_MISSING_CHANGE_TRACKING_INFORMATION syscall.Errno = 0xC03A0030 + ERROR_QUERY_STORAGE_ERROR syscall.Errno = 0x803A0001 + HCN_E_NETWORK_NOT_FOUND Handle = 0x803B0001 + HCN_E_ENDPOINT_NOT_FOUND Handle = 0x803B0002 + HCN_E_LAYER_NOT_FOUND Handle = 0x803B0003 + HCN_E_SWITCH_NOT_FOUND Handle = 0x803B0004 + HCN_E_SUBNET_NOT_FOUND Handle = 0x803B0005 + HCN_E_ADAPTER_NOT_FOUND Handle = 0x803B0006 + HCN_E_PORT_NOT_FOUND Handle = 0x803B0007 + HCN_E_POLICY_NOT_FOUND Handle = 0x803B0008 + HCN_E_VFP_PORTSETTING_NOT_FOUND Handle = 0x803B0009 + HCN_E_INVALID_NETWORK Handle = 0x803B000A + HCN_E_INVALID_NETWORK_TYPE Handle = 0x803B000B + HCN_E_INVALID_ENDPOINT Handle = 0x803B000C + HCN_E_INVALID_POLICY Handle = 0x803B000D + HCN_E_INVALID_POLICY_TYPE Handle = 0x803B000E + HCN_E_INVALID_REMOTE_ENDPOINT_OPERATION Handle = 0x803B000F + HCN_E_NETWORK_ALREADY_EXISTS Handle = 0x803B0010 + HCN_E_LAYER_ALREADY_EXISTS Handle = 0x803B0011 + HCN_E_POLICY_ALREADY_EXISTS Handle = 0x803B0012 + HCN_E_PORT_ALREADY_EXISTS Handle = 0x803B0013 + HCN_E_ENDPOINT_ALREADY_ATTACHED Handle = 0x803B0014 + HCN_E_REQUEST_UNSUPPORTED Handle = 0x803B0015 + HCN_E_MAPPING_NOT_SUPPORTED Handle = 0x803B0016 + HCN_E_DEGRADED_OPERATION Handle = 0x803B0017 + HCN_E_SHARED_SWITCH_MODIFICATION Handle = 0x803B0018 + HCN_E_GUID_CONVERSION_FAILURE Handle = 0x803B0019 + HCN_E_REGKEY_FAILURE Handle = 0x803B001A + HCN_E_INVALID_JSON Handle = 0x803B001B + HCN_E_INVALID_JSON_REFERENCE Handle = 0x803B001C + HCN_E_ENDPOINT_SHARING_DISABLED Handle = 0x803B001D + HCN_E_INVALID_IP Handle = 0x803B001E + HCN_E_SWITCH_EXTENSION_NOT_FOUND Handle = 0x803B001F + HCN_E_MANAGER_STOPPED Handle = 0x803B0020 + GCN_E_MODULE_NOT_FOUND Handle = 0x803B0021 + GCN_E_NO_REQUEST_HANDLERS Handle = 0x803B0022 + GCN_E_REQUEST_UNSUPPORTED Handle = 0x803B0023 + GCN_E_RUNTIMEKEYS_FAILED Handle = 0x803B0024 + GCN_E_NETADAPTER_TIMEOUT Handle = 0x803B0025 + GCN_E_NETADAPTER_NOT_FOUND Handle = 0x803B0026 + GCN_E_NETCOMPARTMENT_NOT_FOUND Handle = 0x803B0027 + GCN_E_NETINTERFACE_NOT_FOUND Handle = 0x803B0028 + GCN_E_DEFAULTNAMESPACE_EXISTS Handle = 0x803B0029 + HCN_E_ICS_DISABLED Handle = 0x803B002A + HCN_E_ENDPOINT_NAMESPACE_ALREADY_EXISTS Handle = 0x803B002B + HCN_E_ENTITY_HAS_REFERENCES Handle = 0x803B002C + HCN_E_INVALID_INTERNAL_PORT Handle = 0x803B002D + HCN_E_NAMESPACE_ATTACH_FAILED Handle = 0x803B002E + HCN_E_ADDR_INVALID_OR_RESERVED Handle = 0x803B002F + SDIAG_E_CANCELLED syscall.Errno = 0x803C0100 + SDIAG_E_SCRIPT syscall.Errno = 0x803C0101 + SDIAG_E_POWERSHELL syscall.Errno = 0x803C0102 + SDIAG_E_MANAGEDHOST syscall.Errno = 0x803C0103 + SDIAG_E_NOVERIFIER syscall.Errno = 0x803C0104 + SDIAG_S_CANNOTRUN syscall.Errno = 0x003C0105 + SDIAG_E_DISABLED syscall.Errno = 0x803C0106 + SDIAG_E_TRUST syscall.Errno = 0x803C0107 + SDIAG_E_CANNOTRUN syscall.Errno = 0x803C0108 + SDIAG_E_VERSION syscall.Errno = 0x803C0109 + SDIAG_E_RESOURCE syscall.Errno = 0x803C010A + SDIAG_E_ROOTCAUSE syscall.Errno = 0x803C010B + WPN_E_CHANNEL_CLOSED Handle = 0x803E0100 + WPN_E_CHANNEL_REQUEST_NOT_COMPLETE Handle = 0x803E0101 + WPN_E_INVALID_APP Handle = 0x803E0102 + WPN_E_OUTSTANDING_CHANNEL_REQUEST Handle = 0x803E0103 + WPN_E_DUPLICATE_CHANNEL Handle = 0x803E0104 + WPN_E_PLATFORM_UNAVAILABLE Handle = 0x803E0105 + WPN_E_NOTIFICATION_POSTED Handle = 0x803E0106 + WPN_E_NOTIFICATION_HIDDEN Handle = 0x803E0107 + WPN_E_NOTIFICATION_NOT_POSTED Handle = 0x803E0108 + WPN_E_CLOUD_DISABLED Handle = 0x803E0109 + WPN_E_CLOUD_INCAPABLE Handle = 0x803E0110 + WPN_E_CLOUD_AUTH_UNAVAILABLE Handle = 0x803E011A + WPN_E_CLOUD_SERVICE_UNAVAILABLE Handle = 0x803E011B + WPN_E_FAILED_LOCK_SCREEN_UPDATE_INTIALIZATION Handle = 0x803E011C + WPN_E_NOTIFICATION_DISABLED Handle = 0x803E0111 + WPN_E_NOTIFICATION_INCAPABLE Handle = 0x803E0112 + WPN_E_INTERNET_INCAPABLE Handle = 0x803E0113 + WPN_E_NOTIFICATION_TYPE_DISABLED Handle = 0x803E0114 + WPN_E_NOTIFICATION_SIZE Handle = 0x803E0115 + WPN_E_TAG_SIZE Handle = 0x803E0116 + WPN_E_ACCESS_DENIED Handle = 0x803E0117 + WPN_E_DUPLICATE_REGISTRATION Handle = 0x803E0118 + WPN_E_PUSH_NOTIFICATION_INCAPABLE Handle = 0x803E0119 + WPN_E_DEV_ID_SIZE Handle = 0x803E0120 + WPN_E_TAG_ALPHANUMERIC Handle = 0x803E012A + WPN_E_INVALID_HTTP_STATUS_CODE Handle = 0x803E012B + WPN_E_OUT_OF_SESSION Handle = 0x803E0200 + WPN_E_POWER_SAVE Handle = 0x803E0201 + WPN_E_IMAGE_NOT_FOUND_IN_CACHE Handle = 0x803E0202 + WPN_E_ALL_URL_NOT_COMPLETED Handle = 0x803E0203 + WPN_E_INVALID_CLOUD_IMAGE Handle = 0x803E0204 + WPN_E_NOTIFICATION_ID_MATCHED Handle = 0x803E0205 + WPN_E_CALLBACK_ALREADY_REGISTERED Handle = 0x803E0206 + WPN_E_TOAST_NOTIFICATION_DROPPED Handle = 0x803E0207 + WPN_E_STORAGE_LOCKED Handle = 0x803E0208 + WPN_E_GROUP_SIZE Handle = 0x803E0209 + WPN_E_GROUP_ALPHANUMERIC Handle = 0x803E020A + WPN_E_CLOUD_DISABLED_FOR_APP Handle = 0x803E020B + E_MBN_CONTEXT_NOT_ACTIVATED Handle = 0x80548201 + E_MBN_BAD_SIM Handle = 0x80548202 + E_MBN_DATA_CLASS_NOT_AVAILABLE Handle = 0x80548203 + E_MBN_INVALID_ACCESS_STRING Handle = 0x80548204 + E_MBN_MAX_ACTIVATED_CONTEXTS Handle = 0x80548205 + E_MBN_PACKET_SVC_DETACHED Handle = 0x80548206 + E_MBN_PROVIDER_NOT_VISIBLE Handle = 0x80548207 + E_MBN_RADIO_POWER_OFF Handle = 0x80548208 + E_MBN_SERVICE_NOT_ACTIVATED Handle = 0x80548209 + E_MBN_SIM_NOT_INSERTED Handle = 0x8054820A + E_MBN_VOICE_CALL_IN_PROGRESS Handle = 0x8054820B + E_MBN_INVALID_CACHE Handle = 0x8054820C + E_MBN_NOT_REGISTERED Handle = 0x8054820D + E_MBN_PROVIDERS_NOT_FOUND Handle = 0x8054820E + E_MBN_PIN_NOT_SUPPORTED Handle = 0x8054820F + E_MBN_PIN_REQUIRED Handle = 0x80548210 + E_MBN_PIN_DISABLED Handle = 0x80548211 + E_MBN_FAILURE Handle = 0x80548212 + E_MBN_INVALID_PROFILE Handle = 0x80548218 + E_MBN_DEFAULT_PROFILE_EXIST Handle = 0x80548219 + E_MBN_SMS_ENCODING_NOT_SUPPORTED Handle = 0x80548220 + E_MBN_SMS_FILTER_NOT_SUPPORTED Handle = 0x80548221 + E_MBN_SMS_INVALID_MEMORY_INDEX Handle = 0x80548222 + E_MBN_SMS_LANG_NOT_SUPPORTED Handle = 0x80548223 + E_MBN_SMS_MEMORY_FAILURE Handle = 0x80548224 + E_MBN_SMS_NETWORK_TIMEOUT Handle = 0x80548225 + E_MBN_SMS_UNKNOWN_SMSC_ADDRESS Handle = 0x80548226 + E_MBN_SMS_FORMAT_NOT_SUPPORTED Handle = 0x80548227 + E_MBN_SMS_OPERATION_NOT_ALLOWED Handle = 0x80548228 + E_MBN_SMS_MEMORY_FULL Handle = 0x80548229 + PEER_E_IPV6_NOT_INSTALLED Handle = 0x80630001 + PEER_E_NOT_INITIALIZED Handle = 0x80630002 + PEER_E_CANNOT_START_SERVICE Handle = 0x80630003 + PEER_E_NOT_LICENSED Handle = 0x80630004 + PEER_E_INVALID_GRAPH Handle = 0x80630010 + PEER_E_DBNAME_CHANGED Handle = 0x80630011 + PEER_E_DUPLICATE_GRAPH Handle = 0x80630012 + PEER_E_GRAPH_NOT_READY Handle = 0x80630013 + PEER_E_GRAPH_SHUTTING_DOWN Handle = 0x80630014 + PEER_E_GRAPH_IN_USE Handle = 0x80630015 + PEER_E_INVALID_DATABASE Handle = 0x80630016 + PEER_E_TOO_MANY_ATTRIBUTES Handle = 0x80630017 + PEER_E_CONNECTION_NOT_FOUND Handle = 0x80630103 + PEER_E_CONNECT_SELF Handle = 0x80630106 + PEER_E_ALREADY_LISTENING Handle = 0x80630107 + PEER_E_NODE_NOT_FOUND Handle = 0x80630108 + PEER_E_CONNECTION_FAILED Handle = 0x80630109 + PEER_E_CONNECTION_NOT_AUTHENTICATED Handle = 0x8063010A + PEER_E_CONNECTION_REFUSED Handle = 0x8063010B + PEER_E_CLASSIFIER_TOO_LONG Handle = 0x80630201 + PEER_E_TOO_MANY_IDENTITIES Handle = 0x80630202 + PEER_E_NO_KEY_ACCESS Handle = 0x80630203 + PEER_E_GROUPS_EXIST Handle = 0x80630204 + PEER_E_RECORD_NOT_FOUND Handle = 0x80630301 + PEER_E_DATABASE_ACCESSDENIED Handle = 0x80630302 + PEER_E_DBINITIALIZATION_FAILED Handle = 0x80630303 + PEER_E_MAX_RECORD_SIZE_EXCEEDED Handle = 0x80630304 + PEER_E_DATABASE_ALREADY_PRESENT Handle = 0x80630305 + PEER_E_DATABASE_NOT_PRESENT Handle = 0x80630306 + PEER_E_IDENTITY_NOT_FOUND Handle = 0x80630401 + PEER_E_EVENT_HANDLE_NOT_FOUND Handle = 0x80630501 + PEER_E_INVALID_SEARCH Handle = 0x80630601 + PEER_E_INVALID_ATTRIBUTES Handle = 0x80630602 + PEER_E_INVITATION_NOT_TRUSTED Handle = 0x80630701 + PEER_E_CHAIN_TOO_LONG Handle = 0x80630703 + PEER_E_INVALID_TIME_PERIOD Handle = 0x80630705 + PEER_E_CIRCULAR_CHAIN_DETECTED Handle = 0x80630706 + PEER_E_CERT_STORE_CORRUPTED Handle = 0x80630801 + PEER_E_NO_CLOUD Handle = 0x80631001 + PEER_E_CLOUD_NAME_AMBIGUOUS Handle = 0x80631005 + PEER_E_INVALID_RECORD Handle = 0x80632010 + PEER_E_NOT_AUTHORIZED Handle = 0x80632020 + PEER_E_PASSWORD_DOES_NOT_MEET_POLICY Handle = 0x80632021 + PEER_E_DEFERRED_VALIDATION Handle = 0x80632030 + PEER_E_INVALID_GROUP_PROPERTIES Handle = 0x80632040 + PEER_E_INVALID_PEER_NAME Handle = 0x80632050 + PEER_E_INVALID_CLASSIFIER Handle = 0x80632060 + PEER_E_INVALID_FRIENDLY_NAME Handle = 0x80632070 + PEER_E_INVALID_ROLE_PROPERTY Handle = 0x80632071 + PEER_E_INVALID_CLASSIFIER_PROPERTY Handle = 0x80632072 + PEER_E_INVALID_RECORD_EXPIRATION Handle = 0x80632080 + PEER_E_INVALID_CREDENTIAL_INFO Handle = 0x80632081 + PEER_E_INVALID_CREDENTIAL Handle = 0x80632082 + PEER_E_INVALID_RECORD_SIZE Handle = 0x80632083 + PEER_E_UNSUPPORTED_VERSION Handle = 0x80632090 + PEER_E_GROUP_NOT_READY Handle = 0x80632091 + PEER_E_GROUP_IN_USE Handle = 0x80632092 + PEER_E_INVALID_GROUP Handle = 0x80632093 + PEER_E_NO_MEMBERS_FOUND Handle = 0x80632094 + PEER_E_NO_MEMBER_CONNECTIONS Handle = 0x80632095 + PEER_E_UNABLE_TO_LISTEN Handle = 0x80632096 + PEER_E_IDENTITY_DELETED Handle = 0x806320A0 + PEER_E_SERVICE_NOT_AVAILABLE Handle = 0x806320A1 + PEER_E_CONTACT_NOT_FOUND Handle = 0x80636001 + PEER_S_GRAPH_DATA_CREATED Handle = 0x00630001 + PEER_S_NO_EVENT_DATA Handle = 0x00630002 + PEER_S_ALREADY_CONNECTED Handle = 0x00632000 + PEER_S_SUBSCRIPTION_EXISTS Handle = 0x00636000 + PEER_S_NO_CONNECTIVITY Handle = 0x00630005 + PEER_S_ALREADY_A_MEMBER Handle = 0x00630006 + PEER_E_CANNOT_CONVERT_PEER_NAME Handle = 0x80634001 + PEER_E_INVALID_PEER_HOST_NAME Handle = 0x80634002 + PEER_E_NO_MORE Handle = 0x80634003 + PEER_E_PNRP_DUPLICATE_PEER_NAME Handle = 0x80634005 + PEER_E_INVITE_CANCELLED Handle = 0x80637000 + PEER_E_INVITE_RESPONSE_NOT_AVAILABLE Handle = 0x80637001 + PEER_E_NOT_SIGNED_IN Handle = 0x80637003 + PEER_E_PRIVACY_DECLINED Handle = 0x80637004 + PEER_E_TIMEOUT Handle = 0x80637005 + PEER_E_INVALID_ADDRESS Handle = 0x80637007 + PEER_E_FW_EXCEPTION_DISABLED Handle = 0x80637008 + PEER_E_FW_BLOCKED_BY_POLICY Handle = 0x80637009 + PEER_E_FW_BLOCKED_BY_SHIELDS_UP Handle = 0x8063700A + PEER_E_FW_DECLINED Handle = 0x8063700B + UI_E_CREATE_FAILED Handle = 0x802A0001 + UI_E_SHUTDOWN_CALLED Handle = 0x802A0002 + UI_E_ILLEGAL_REENTRANCY Handle = 0x802A0003 + UI_E_OBJECT_SEALED Handle = 0x802A0004 + UI_E_VALUE_NOT_SET Handle = 0x802A0005 + UI_E_VALUE_NOT_DETERMINED Handle = 0x802A0006 + UI_E_INVALID_OUTPUT Handle = 0x802A0007 + UI_E_BOOLEAN_EXPECTED Handle = 0x802A0008 + UI_E_DIFFERENT_OWNER Handle = 0x802A0009 + UI_E_AMBIGUOUS_MATCH Handle = 0x802A000A + UI_E_FP_OVERFLOW Handle = 0x802A000B + UI_E_WRONG_THREAD Handle = 0x802A000C + UI_E_STORYBOARD_ACTIVE Handle = 0x802A0101 + UI_E_STORYBOARD_NOT_PLAYING Handle = 0x802A0102 + UI_E_START_KEYFRAME_AFTER_END Handle = 0x802A0103 + UI_E_END_KEYFRAME_NOT_DETERMINED Handle = 0x802A0104 + UI_E_LOOPS_OVERLAP Handle = 0x802A0105 + UI_E_TRANSITION_ALREADY_USED Handle = 0x802A0106 + UI_E_TRANSITION_NOT_IN_STORYBOARD Handle = 0x802A0107 + UI_E_TRANSITION_ECLIPSED Handle = 0x802A0108 + UI_E_TIME_BEFORE_LAST_UPDATE Handle = 0x802A0109 + UI_E_TIMER_CLIENT_ALREADY_CONNECTED Handle = 0x802A010A + UI_E_INVALID_DIMENSION Handle = 0x802A010B + UI_E_PRIMITIVE_OUT_OF_BOUNDS Handle = 0x802A010C + UI_E_WINDOW_CLOSED Handle = 0x802A0201 + E_BLUETOOTH_ATT_INVALID_HANDLE Handle = 0x80650001 + E_BLUETOOTH_ATT_READ_NOT_PERMITTED Handle = 0x80650002 + E_BLUETOOTH_ATT_WRITE_NOT_PERMITTED Handle = 0x80650003 + E_BLUETOOTH_ATT_INVALID_PDU Handle = 0x80650004 + E_BLUETOOTH_ATT_INSUFFICIENT_AUTHENTICATION Handle = 0x80650005 + E_BLUETOOTH_ATT_REQUEST_NOT_SUPPORTED Handle = 0x80650006 + E_BLUETOOTH_ATT_INVALID_OFFSET Handle = 0x80650007 + E_BLUETOOTH_ATT_INSUFFICIENT_AUTHORIZATION Handle = 0x80650008 + E_BLUETOOTH_ATT_PREPARE_QUEUE_FULL Handle = 0x80650009 + E_BLUETOOTH_ATT_ATTRIBUTE_NOT_FOUND Handle = 0x8065000A + E_BLUETOOTH_ATT_ATTRIBUTE_NOT_LONG Handle = 0x8065000B + E_BLUETOOTH_ATT_INSUFFICIENT_ENCRYPTION_KEY_SIZE Handle = 0x8065000C + E_BLUETOOTH_ATT_INVALID_ATTRIBUTE_VALUE_LENGTH Handle = 0x8065000D + E_BLUETOOTH_ATT_UNLIKELY Handle = 0x8065000E + E_BLUETOOTH_ATT_INSUFFICIENT_ENCRYPTION Handle = 0x8065000F + E_BLUETOOTH_ATT_UNSUPPORTED_GROUP_TYPE Handle = 0x80650010 + E_BLUETOOTH_ATT_INSUFFICIENT_RESOURCES Handle = 0x80650011 + E_BLUETOOTH_ATT_UNKNOWN_ERROR Handle = 0x80651000 + E_AUDIO_ENGINE_NODE_NOT_FOUND Handle = 0x80660001 + E_HDAUDIO_EMPTY_CONNECTION_LIST Handle = 0x80660002 + E_HDAUDIO_CONNECTION_LIST_NOT_SUPPORTED Handle = 0x80660003 + E_HDAUDIO_NO_LOGICAL_DEVICES_CREATED Handle = 0x80660004 + E_HDAUDIO_NULL_LINKED_LIST_ENTRY Handle = 0x80660005 + STATEREPOSITORY_E_CONCURRENCY_LOCKING_FAILURE Handle = 0x80670001 + STATEREPOSITORY_E_STATEMENT_INPROGRESS Handle = 0x80670002 + STATEREPOSITORY_E_CONFIGURATION_INVALID Handle = 0x80670003 + STATEREPOSITORY_E_UNKNOWN_SCHEMA_VERSION Handle = 0x80670004 + STATEREPOSITORY_ERROR_DICTIONARY_CORRUPTED Handle = 0x80670005 + STATEREPOSITORY_E_BLOCKED Handle = 0x80670006 + STATEREPOSITORY_E_BUSY_RETRY Handle = 0x80670007 + STATEREPOSITORY_E_BUSY_RECOVERY_RETRY Handle = 0x80670008 + STATEREPOSITORY_E_LOCKED_RETRY Handle = 0x80670009 + STATEREPOSITORY_E_LOCKED_SHAREDCACHE_RETRY Handle = 0x8067000A + STATEREPOSITORY_E_TRANSACTION_REQUIRED Handle = 0x8067000B + STATEREPOSITORY_E_BUSY_TIMEOUT_EXCEEDED Handle = 0x8067000C + STATEREPOSITORY_E_BUSY_RECOVERY_TIMEOUT_EXCEEDED Handle = 0x8067000D + STATEREPOSITORY_E_LOCKED_TIMEOUT_EXCEEDED Handle = 0x8067000E + STATEREPOSITORY_E_LOCKED_SHAREDCACHE_TIMEOUT_EXCEEDED Handle = 0x8067000F + STATEREPOSITORY_E_SERVICE_STOP_IN_PROGRESS Handle = 0x80670010 + STATEREPOSTORY_E_NESTED_TRANSACTION_NOT_SUPPORTED Handle = 0x80670011 + STATEREPOSITORY_ERROR_CACHE_CORRUPTED Handle = 0x80670012 + STATEREPOSITORY_TRANSACTION_CALLER_ID_CHANGED Handle = 0x00670013 + STATEREPOSITORY_TRANSACTION_IN_PROGRESS Handle = 0x00670014 + ERROR_SPACES_POOL_WAS_DELETED Handle = 0x00E70001 + ERROR_SPACES_FAULT_DOMAIN_TYPE_INVALID Handle = 0x80E70001 + ERROR_SPACES_INTERNAL_ERROR Handle = 0x80E70002 + ERROR_SPACES_RESILIENCY_TYPE_INVALID Handle = 0x80E70003 + ERROR_SPACES_DRIVE_SECTOR_SIZE_INVALID Handle = 0x80E70004 + ERROR_SPACES_DRIVE_REDUNDANCY_INVALID Handle = 0x80E70006 + ERROR_SPACES_NUMBER_OF_DATA_COPIES_INVALID Handle = 0x80E70007 + ERROR_SPACES_PARITY_LAYOUT_INVALID Handle = 0x80E70008 + ERROR_SPACES_INTERLEAVE_LENGTH_INVALID Handle = 0x80E70009 + ERROR_SPACES_NUMBER_OF_COLUMNS_INVALID Handle = 0x80E7000A + ERROR_SPACES_NOT_ENOUGH_DRIVES Handle = 0x80E7000B + ERROR_SPACES_EXTENDED_ERROR Handle = 0x80E7000C + ERROR_SPACES_PROVISIONING_TYPE_INVALID Handle = 0x80E7000D + ERROR_SPACES_ALLOCATION_SIZE_INVALID Handle = 0x80E7000E + ERROR_SPACES_ENCLOSURE_AWARE_INVALID Handle = 0x80E7000F + ERROR_SPACES_WRITE_CACHE_SIZE_INVALID Handle = 0x80E70010 + ERROR_SPACES_NUMBER_OF_GROUPS_INVALID Handle = 0x80E70011 + ERROR_SPACES_DRIVE_OPERATIONAL_STATE_INVALID Handle = 0x80E70012 + ERROR_SPACES_ENTRY_INCOMPLETE Handle = 0x80E70013 + ERROR_SPACES_ENTRY_INVALID Handle = 0x80E70014 + ERROR_VOLSNAP_BOOTFILE_NOT_VALID Handle = 0x80820001 + ERROR_VOLSNAP_ACTIVATION_TIMEOUT Handle = 0x80820002 + ERROR_TIERING_NOT_SUPPORTED_ON_VOLUME Handle = 0x80830001 + ERROR_TIERING_VOLUME_DISMOUNT_IN_PROGRESS Handle = 0x80830002 + ERROR_TIERING_STORAGE_TIER_NOT_FOUND Handle = 0x80830003 + ERROR_TIERING_INVALID_FILE_ID Handle = 0x80830004 + ERROR_TIERING_WRONG_CLUSTER_NODE Handle = 0x80830005 + ERROR_TIERING_ALREADY_PROCESSING Handle = 0x80830006 + ERROR_TIERING_CANNOT_PIN_OBJECT Handle = 0x80830007 + ERROR_TIERING_FILE_IS_NOT_PINNED Handle = 0x80830008 + ERROR_NOT_A_TIERED_VOLUME Handle = 0x80830009 + ERROR_ATTRIBUTE_NOT_PRESENT Handle = 0x8083000A + ERROR_SECCORE_INVALID_COMMAND Handle = 0xC0E80000 + ERROR_NO_APPLICABLE_APP_LICENSES_FOUND Handle = 0xC0EA0001 + ERROR_CLIP_LICENSE_NOT_FOUND Handle = 0xC0EA0002 + ERROR_CLIP_DEVICE_LICENSE_MISSING Handle = 0xC0EA0003 + ERROR_CLIP_LICENSE_INVALID_SIGNATURE Handle = 0xC0EA0004 + ERROR_CLIP_KEYHOLDER_LICENSE_MISSING_OR_INVALID Handle = 0xC0EA0005 + ERROR_CLIP_LICENSE_EXPIRED Handle = 0xC0EA0006 + ERROR_CLIP_LICENSE_SIGNED_BY_UNKNOWN_SOURCE Handle = 0xC0EA0007 + ERROR_CLIP_LICENSE_NOT_SIGNED Handle = 0xC0EA0008 + ERROR_CLIP_LICENSE_HARDWARE_ID_OUT_OF_TOLERANCE Handle = 0xC0EA0009 + ERROR_CLIP_LICENSE_DEVICE_ID_MISMATCH Handle = 0xC0EA000A + DXGI_STATUS_OCCLUDED Handle = 0x087A0001 + DXGI_STATUS_CLIPPED Handle = 0x087A0002 + DXGI_STATUS_NO_REDIRECTION Handle = 0x087A0004 + DXGI_STATUS_NO_DESKTOP_ACCESS Handle = 0x087A0005 + DXGI_STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE Handle = 0x087A0006 + DXGI_STATUS_MODE_CHANGED Handle = 0x087A0007 + DXGI_STATUS_MODE_CHANGE_IN_PROGRESS Handle = 0x087A0008 + DXGI_ERROR_INVALID_CALL Handle = 0x887A0001 + DXGI_ERROR_NOT_FOUND Handle = 0x887A0002 + DXGI_ERROR_MORE_DATA Handle = 0x887A0003 + DXGI_ERROR_UNSUPPORTED Handle = 0x887A0004 + DXGI_ERROR_DEVICE_REMOVED Handle = 0x887A0005 + DXGI_ERROR_DEVICE_HUNG Handle = 0x887A0006 + DXGI_ERROR_DEVICE_RESET Handle = 0x887A0007 + DXGI_ERROR_WAS_STILL_DRAWING Handle = 0x887A000A + DXGI_ERROR_FRAME_STATISTICS_DISJOINT Handle = 0x887A000B + DXGI_ERROR_GRAPHICS_VIDPN_SOURCE_IN_USE Handle = 0x887A000C + DXGI_ERROR_DRIVER_INTERNAL_ERROR Handle = 0x887A0020 + DXGI_ERROR_NONEXCLUSIVE Handle = 0x887A0021 + DXGI_ERROR_NOT_CURRENTLY_AVAILABLE Handle = 0x887A0022 + DXGI_ERROR_REMOTE_CLIENT_DISCONNECTED Handle = 0x887A0023 + DXGI_ERROR_REMOTE_OUTOFMEMORY Handle = 0x887A0024 + DXGI_ERROR_ACCESS_LOST Handle = 0x887A0026 + DXGI_ERROR_WAIT_TIMEOUT Handle = 0x887A0027 + DXGI_ERROR_SESSION_DISCONNECTED Handle = 0x887A0028 + DXGI_ERROR_RESTRICT_TO_OUTPUT_STALE Handle = 0x887A0029 + DXGI_ERROR_CANNOT_PROTECT_CONTENT Handle = 0x887A002A + DXGI_ERROR_ACCESS_DENIED Handle = 0x887A002B + DXGI_ERROR_NAME_ALREADY_EXISTS Handle = 0x887A002C + DXGI_ERROR_SDK_COMPONENT_MISSING Handle = 0x887A002D + DXGI_ERROR_NOT_CURRENT Handle = 0x887A002E + DXGI_ERROR_HW_PROTECTION_OUTOFMEMORY Handle = 0x887A0030 + DXGI_ERROR_DYNAMIC_CODE_POLICY_VIOLATION Handle = 0x887A0031 + DXGI_ERROR_NON_COMPOSITED_UI Handle = 0x887A0032 + DXGI_STATUS_UNOCCLUDED Handle = 0x087A0009 + DXGI_STATUS_DDA_WAS_STILL_DRAWING Handle = 0x087A000A + DXGI_ERROR_MODE_CHANGE_IN_PROGRESS Handle = 0x887A0025 + DXGI_STATUS_PRESENT_REQUIRED Handle = 0x087A002F + DXGI_ERROR_CACHE_CORRUPT Handle = 0x887A0033 + DXGI_ERROR_CACHE_FULL Handle = 0x887A0034 + DXGI_ERROR_CACHE_HASH_COLLISION Handle = 0x887A0035 + DXGI_ERROR_ALREADY_EXISTS Handle = 0x887A0036 + DXGI_DDI_ERR_WASSTILLDRAWING Handle = 0x887B0001 + DXGI_DDI_ERR_UNSUPPORTED Handle = 0x887B0002 + DXGI_DDI_ERR_NONEXCLUSIVE Handle = 0x887B0003 + D3D10_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS Handle = 0x88790001 + D3D10_ERROR_FILE_NOT_FOUND Handle = 0x88790002 + D3D11_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS Handle = 0x887C0001 + D3D11_ERROR_FILE_NOT_FOUND Handle = 0x887C0002 + D3D11_ERROR_TOO_MANY_UNIQUE_VIEW_OBJECTS Handle = 0x887C0003 + D3D11_ERROR_DEFERRED_CONTEXT_MAP_WITHOUT_INITIAL_DISCARD Handle = 0x887C0004 + D3D12_ERROR_ADAPTER_NOT_FOUND Handle = 0x887E0001 + D3D12_ERROR_DRIVER_VERSION_MISMATCH Handle = 0x887E0002 + D2DERR_WRONG_STATE Handle = 0x88990001 + D2DERR_NOT_INITIALIZED Handle = 0x88990002 + D2DERR_UNSUPPORTED_OPERATION Handle = 0x88990003 + D2DERR_SCANNER_FAILED Handle = 0x88990004 + D2DERR_SCREEN_ACCESS_DENIED Handle = 0x88990005 + D2DERR_DISPLAY_STATE_INVALID Handle = 0x88990006 + D2DERR_ZERO_VECTOR Handle = 0x88990007 + D2DERR_INTERNAL_ERROR Handle = 0x88990008 + D2DERR_DISPLAY_FORMAT_NOT_SUPPORTED Handle = 0x88990009 + D2DERR_INVALID_CALL Handle = 0x8899000A + D2DERR_NO_HARDWARE_DEVICE Handle = 0x8899000B + D2DERR_RECREATE_TARGET Handle = 0x8899000C + D2DERR_TOO_MANY_SHADER_ELEMENTS Handle = 0x8899000D + D2DERR_SHADER_COMPILE_FAILED Handle = 0x8899000E + D2DERR_MAX_TEXTURE_SIZE_EXCEEDED Handle = 0x8899000F + D2DERR_UNSUPPORTED_VERSION Handle = 0x88990010 + D2DERR_BAD_NUMBER Handle = 0x88990011 + D2DERR_WRONG_FACTORY Handle = 0x88990012 + D2DERR_LAYER_ALREADY_IN_USE Handle = 0x88990013 + D2DERR_POP_CALL_DID_NOT_MATCH_PUSH Handle = 0x88990014 + D2DERR_WRONG_RESOURCE_DOMAIN Handle = 0x88990015 + D2DERR_PUSH_POP_UNBALANCED Handle = 0x88990016 + D2DERR_RENDER_TARGET_HAS_LAYER_OR_CLIPRECT Handle = 0x88990017 + D2DERR_INCOMPATIBLE_BRUSH_TYPES Handle = 0x88990018 + D2DERR_WIN32_ERROR Handle = 0x88990019 + D2DERR_TARGET_NOT_GDI_COMPATIBLE Handle = 0x8899001A + D2DERR_TEXT_EFFECT_IS_WRONG_TYPE Handle = 0x8899001B + D2DERR_TEXT_RENDERER_NOT_RELEASED Handle = 0x8899001C + D2DERR_EXCEEDS_MAX_BITMAP_SIZE Handle = 0x8899001D + D2DERR_INVALID_GRAPH_CONFIGURATION Handle = 0x8899001E + D2DERR_INVALID_INTERNAL_GRAPH_CONFIGURATION Handle = 0x8899001F + D2DERR_CYCLIC_GRAPH Handle = 0x88990020 + D2DERR_BITMAP_CANNOT_DRAW Handle = 0x88990021 + D2DERR_OUTSTANDING_BITMAP_REFERENCES Handle = 0x88990022 + D2DERR_ORIGINAL_TARGET_NOT_BOUND Handle = 0x88990023 + D2DERR_INVALID_TARGET Handle = 0x88990024 + D2DERR_BITMAP_BOUND_AS_TARGET Handle = 0x88990025 + D2DERR_INSUFFICIENT_DEVICE_CAPABILITIES Handle = 0x88990026 + D2DERR_INTERMEDIATE_TOO_LARGE Handle = 0x88990027 + D2DERR_EFFECT_IS_NOT_REGISTERED Handle = 0x88990028 + D2DERR_INVALID_PROPERTY Handle = 0x88990029 + D2DERR_NO_SUBPROPERTIES Handle = 0x8899002A + D2DERR_PRINT_JOB_CLOSED Handle = 0x8899002B + D2DERR_PRINT_FORMAT_NOT_SUPPORTED Handle = 0x8899002C + D2DERR_TOO_MANY_TRANSFORM_INPUTS Handle = 0x8899002D + D2DERR_INVALID_GLYPH_IMAGE Handle = 0x8899002E + DWRITE_E_FILEFORMAT Handle = 0x88985000 + DWRITE_E_UNEXPECTED Handle = 0x88985001 + DWRITE_E_NOFONT Handle = 0x88985002 + DWRITE_E_FILENOTFOUND Handle = 0x88985003 + DWRITE_E_FILEACCESS Handle = 0x88985004 + DWRITE_E_FONTCOLLECTIONOBSOLETE Handle = 0x88985005 + DWRITE_E_ALREADYREGISTERED Handle = 0x88985006 + DWRITE_E_CACHEFORMAT Handle = 0x88985007 + DWRITE_E_CACHEVERSION Handle = 0x88985008 + DWRITE_E_UNSUPPORTEDOPERATION Handle = 0x88985009 + DWRITE_E_TEXTRENDERERINCOMPATIBLE Handle = 0x8898500A + DWRITE_E_FLOWDIRECTIONCONFLICTS Handle = 0x8898500B + DWRITE_E_NOCOLOR Handle = 0x8898500C + DWRITE_E_REMOTEFONT Handle = 0x8898500D + DWRITE_E_DOWNLOADCANCELLED Handle = 0x8898500E + DWRITE_E_DOWNLOADFAILED Handle = 0x8898500F + DWRITE_E_TOOMANYDOWNLOADS Handle = 0x88985010 + WINCODEC_ERR_WRONGSTATE Handle = 0x88982F04 + WINCODEC_ERR_VALUEOUTOFRANGE Handle = 0x88982F05 + WINCODEC_ERR_UNKNOWNIMAGEFORMAT Handle = 0x88982F07 + WINCODEC_ERR_UNSUPPORTEDVERSION Handle = 0x88982F0B + WINCODEC_ERR_NOTINITIALIZED Handle = 0x88982F0C + WINCODEC_ERR_ALREADYLOCKED Handle = 0x88982F0D + WINCODEC_ERR_PROPERTYNOTFOUND Handle = 0x88982F40 + WINCODEC_ERR_PROPERTYNOTSUPPORTED Handle = 0x88982F41 + WINCODEC_ERR_PROPERTYSIZE Handle = 0x88982F42 + WINCODEC_ERR_CODECPRESENT Handle = 0x88982F43 + WINCODEC_ERR_CODECNOTHUMBNAIL Handle = 0x88982F44 + WINCODEC_ERR_PALETTEUNAVAILABLE Handle = 0x88982F45 + WINCODEC_ERR_CODECTOOMANYSCANLINES Handle = 0x88982F46 + WINCODEC_ERR_INTERNALERROR Handle = 0x88982F48 + WINCODEC_ERR_SOURCERECTDOESNOTMATCHDIMENSIONS Handle = 0x88982F49 + WINCODEC_ERR_COMPONENTNOTFOUND Handle = 0x88982F50 + WINCODEC_ERR_IMAGESIZEOUTOFRANGE Handle = 0x88982F51 + WINCODEC_ERR_TOOMUCHMETADATA Handle = 0x88982F52 + WINCODEC_ERR_BADIMAGE Handle = 0x88982F60 + WINCODEC_ERR_BADHEADER Handle = 0x88982F61 + WINCODEC_ERR_FRAMEMISSING Handle = 0x88982F62 + WINCODEC_ERR_BADMETADATAHEADER Handle = 0x88982F63 + WINCODEC_ERR_BADSTREAMDATA Handle = 0x88982F70 + WINCODEC_ERR_STREAMWRITE Handle = 0x88982F71 + WINCODEC_ERR_STREAMREAD Handle = 0x88982F72 + WINCODEC_ERR_STREAMNOTAVAILABLE Handle = 0x88982F73 + WINCODEC_ERR_UNSUPPORTEDPIXELFORMAT Handle = 0x88982F80 + WINCODEC_ERR_UNSUPPORTEDOPERATION Handle = 0x88982F81 + WINCODEC_ERR_INVALIDREGISTRATION Handle = 0x88982F8A + WINCODEC_ERR_COMPONENTINITIALIZEFAILURE Handle = 0x88982F8B + WINCODEC_ERR_INSUFFICIENTBUFFER Handle = 0x88982F8C + WINCODEC_ERR_DUPLICATEMETADATAPRESENT Handle = 0x88982F8D + WINCODEC_ERR_PROPERTYUNEXPECTEDTYPE Handle = 0x88982F8E + WINCODEC_ERR_UNEXPECTEDSIZE Handle = 0x88982F8F + WINCODEC_ERR_INVALIDQUERYREQUEST Handle = 0x88982F90 + WINCODEC_ERR_UNEXPECTEDMETADATATYPE Handle = 0x88982F91 + WINCODEC_ERR_REQUESTONLYVALIDATMETADATAROOT Handle = 0x88982F92 + WINCODEC_ERR_INVALIDQUERYCHARACTER Handle = 0x88982F93 + WINCODEC_ERR_WIN32ERROR Handle = 0x88982F94 + WINCODEC_ERR_INVALIDPROGRESSIVELEVEL Handle = 0x88982F95 + WINCODEC_ERR_INVALIDJPEGSCANINDEX Handle = 0x88982F96 + MILERR_OBJECTBUSY Handle = 0x88980001 + MILERR_INSUFFICIENTBUFFER Handle = 0x88980002 + MILERR_WIN32ERROR Handle = 0x88980003 + MILERR_SCANNER_FAILED Handle = 0x88980004 + MILERR_SCREENACCESSDENIED Handle = 0x88980005 + MILERR_DISPLAYSTATEINVALID Handle = 0x88980006 + MILERR_NONINVERTIBLEMATRIX Handle = 0x88980007 + MILERR_ZEROVECTOR Handle = 0x88980008 + MILERR_TERMINATED Handle = 0x88980009 + MILERR_BADNUMBER Handle = 0x8898000A + MILERR_INTERNALERROR Handle = 0x88980080 + MILERR_DISPLAYFORMATNOTSUPPORTED Handle = 0x88980084 + MILERR_INVALIDCALL Handle = 0x88980085 + MILERR_ALREADYLOCKED Handle = 0x88980086 + MILERR_NOTLOCKED Handle = 0x88980087 + MILERR_DEVICECANNOTRENDERTEXT Handle = 0x88980088 + MILERR_GLYPHBITMAPMISSED Handle = 0x88980089 + MILERR_MALFORMEDGLYPHCACHE Handle = 0x8898008A + MILERR_GENERIC_IGNORE Handle = 0x8898008B + MILERR_MALFORMED_GUIDELINE_DATA Handle = 0x8898008C + MILERR_NO_HARDWARE_DEVICE Handle = 0x8898008D + MILERR_NEED_RECREATE_AND_PRESENT Handle = 0x8898008E + MILERR_ALREADY_INITIALIZED Handle = 0x8898008F + MILERR_MISMATCHED_SIZE Handle = 0x88980090 + MILERR_NO_REDIRECTION_SURFACE_AVAILABLE Handle = 0x88980091 + MILERR_REMOTING_NOT_SUPPORTED Handle = 0x88980092 + MILERR_QUEUED_PRESENT_NOT_SUPPORTED Handle = 0x88980093 + MILERR_NOT_QUEUING_PRESENTS Handle = 0x88980094 + MILERR_NO_REDIRECTION_SURFACE_RETRY_LATER Handle = 0x88980095 + MILERR_TOOMANYSHADERELEMNTS Handle = 0x88980096 + MILERR_MROW_READLOCK_FAILED Handle = 0x88980097 + MILERR_MROW_UPDATE_FAILED Handle = 0x88980098 + MILERR_SHADER_COMPILE_FAILED Handle = 0x88980099 + MILERR_MAX_TEXTURE_SIZE_EXCEEDED Handle = 0x8898009A + MILERR_QPC_TIME_WENT_BACKWARD Handle = 0x8898009B + MILERR_DXGI_ENUMERATION_OUT_OF_SYNC Handle = 0x8898009D + MILERR_ADAPTER_NOT_FOUND Handle = 0x8898009E + MILERR_COLORSPACE_NOT_SUPPORTED Handle = 0x8898009F + MILERR_PREFILTER_NOT_SUPPORTED Handle = 0x889800A0 + MILERR_DISPLAYID_ACCESS_DENIED Handle = 0x889800A1 + UCEERR_INVALIDPACKETHEADER Handle = 0x88980400 + UCEERR_UNKNOWNPACKET Handle = 0x88980401 + UCEERR_ILLEGALPACKET Handle = 0x88980402 + UCEERR_MALFORMEDPACKET Handle = 0x88980403 + UCEERR_ILLEGALHANDLE Handle = 0x88980404 + UCEERR_HANDLELOOKUPFAILED Handle = 0x88980405 + UCEERR_RENDERTHREADFAILURE Handle = 0x88980406 + UCEERR_CTXSTACKFRSTTARGETNULL Handle = 0x88980407 + UCEERR_CONNECTIONIDLOOKUPFAILED Handle = 0x88980408 + UCEERR_BLOCKSFULL Handle = 0x88980409 + UCEERR_MEMORYFAILURE Handle = 0x8898040A + UCEERR_PACKETRECORDOUTOFRANGE Handle = 0x8898040B + UCEERR_ILLEGALRECORDTYPE Handle = 0x8898040C + UCEERR_OUTOFHANDLES Handle = 0x8898040D + UCEERR_UNCHANGABLE_UPDATE_ATTEMPTED Handle = 0x8898040E + UCEERR_NO_MULTIPLE_WORKER_THREADS Handle = 0x8898040F + UCEERR_REMOTINGNOTSUPPORTED Handle = 0x88980410 + UCEERR_MISSINGENDCOMMAND Handle = 0x88980411 + UCEERR_MISSINGBEGINCOMMAND Handle = 0x88980412 + UCEERR_CHANNELSYNCTIMEDOUT Handle = 0x88980413 + UCEERR_CHANNELSYNCABANDONED Handle = 0x88980414 + UCEERR_UNSUPPORTEDTRANSPORTVERSION Handle = 0x88980415 + UCEERR_TRANSPORTUNAVAILABLE Handle = 0x88980416 + UCEERR_FEEDBACK_UNSUPPORTED Handle = 0x88980417 + UCEERR_COMMANDTRANSPORTDENIED Handle = 0x88980418 + UCEERR_GRAPHICSSTREAMUNAVAILABLE Handle = 0x88980419 + UCEERR_GRAPHICSSTREAMALREADYOPEN Handle = 0x88980420 + UCEERR_TRANSPORTDISCONNECTED Handle = 0x88980421 + UCEERR_TRANSPORTOVERLOADED Handle = 0x88980422 + UCEERR_PARTITION_ZOMBIED Handle = 0x88980423 + MILAVERR_NOCLOCK Handle = 0x88980500 + MILAVERR_NOMEDIATYPE Handle = 0x88980501 + MILAVERR_NOVIDEOMIXER Handle = 0x88980502 + MILAVERR_NOVIDEOPRESENTER Handle = 0x88980503 + MILAVERR_NOREADYFRAMES Handle = 0x88980504 + MILAVERR_MODULENOTLOADED Handle = 0x88980505 + MILAVERR_WMPFACTORYNOTREGISTERED Handle = 0x88980506 + MILAVERR_INVALIDWMPVERSION Handle = 0x88980507 + MILAVERR_INSUFFICIENTVIDEORESOURCES Handle = 0x88980508 + MILAVERR_VIDEOACCELERATIONNOTAVAILABLE Handle = 0x88980509 + MILAVERR_REQUESTEDTEXTURETOOBIG Handle = 0x8898050A + MILAVERR_SEEKFAILED Handle = 0x8898050B + MILAVERR_UNEXPECTEDWMPFAILURE Handle = 0x8898050C + MILAVERR_MEDIAPLAYERCLOSED Handle = 0x8898050D + MILAVERR_UNKNOWNHARDWAREERROR Handle = 0x8898050E + MILEFFECTSERR_UNKNOWNPROPERTY Handle = 0x8898060E + MILEFFECTSERR_EFFECTNOTPARTOFGROUP Handle = 0x8898060F + MILEFFECTSERR_NOINPUTSOURCEATTACHED Handle = 0x88980610 + MILEFFECTSERR_CONNECTORNOTCONNECTED Handle = 0x88980611 + MILEFFECTSERR_CONNECTORNOTASSOCIATEDWITHEFFECT Handle = 0x88980612 + MILEFFECTSERR_RESERVED Handle = 0x88980613 + MILEFFECTSERR_CYCLEDETECTED Handle = 0x88980614 + MILEFFECTSERR_EFFECTINMORETHANONEGRAPH Handle = 0x88980615 + MILEFFECTSERR_EFFECTALREADYINAGRAPH Handle = 0x88980616 + MILEFFECTSERR_EFFECTHASNOCHILDREN Handle = 0x88980617 + MILEFFECTSERR_ALREADYATTACHEDTOLISTENER Handle = 0x88980618 + MILEFFECTSERR_NOTAFFINETRANSFORM Handle = 0x88980619 + MILEFFECTSERR_EMPTYBOUNDS Handle = 0x8898061A + MILEFFECTSERR_OUTPUTSIZETOOLARGE Handle = 0x8898061B + DWMERR_STATE_TRANSITION_FAILED Handle = 0x88980700 + DWMERR_THEME_FAILED Handle = 0x88980701 + DWMERR_CATASTROPHIC_FAILURE Handle = 0x88980702 + DCOMPOSITION_ERROR_WINDOW_ALREADY_COMPOSED Handle = 0x88980800 + DCOMPOSITION_ERROR_SURFACE_BEING_RENDERED Handle = 0x88980801 + DCOMPOSITION_ERROR_SURFACE_NOT_BEING_RENDERED Handle = 0x88980802 + ONL_E_INVALID_AUTHENTICATION_TARGET Handle = 0x80860001 + ONL_E_ACCESS_DENIED_BY_TOU Handle = 0x80860002 + ONL_E_INVALID_APPLICATION Handle = 0x80860003 + ONL_E_PASSWORD_UPDATE_REQUIRED Handle = 0x80860004 + ONL_E_ACCOUNT_UPDATE_REQUIRED Handle = 0x80860005 + ONL_E_FORCESIGNIN Handle = 0x80860006 + ONL_E_ACCOUNT_LOCKED Handle = 0x80860007 + ONL_E_PARENTAL_CONSENT_REQUIRED Handle = 0x80860008 + ONL_E_EMAIL_VERIFICATION_REQUIRED Handle = 0x80860009 + ONL_E_ACCOUNT_SUSPENDED_COMPROIMISE Handle = 0x8086000A + ONL_E_ACCOUNT_SUSPENDED_ABUSE Handle = 0x8086000B + ONL_E_ACTION_REQUIRED Handle = 0x8086000C + ONL_CONNECTION_COUNT_LIMIT Handle = 0x8086000D + ONL_E_CONNECTED_ACCOUNT_CAN_NOT_SIGNOUT Handle = 0x8086000E + ONL_E_USER_AUTHENTICATION_REQUIRED Handle = 0x8086000F + ONL_E_REQUEST_THROTTLED Handle = 0x80860010 + FA_E_MAX_PERSISTED_ITEMS_REACHED Handle = 0x80270220 + FA_E_HOMEGROUP_NOT_AVAILABLE Handle = 0x80270222 + E_MONITOR_RESOLUTION_TOO_LOW Handle = 0x80270250 + E_ELEVATED_ACTIVATION_NOT_SUPPORTED Handle = 0x80270251 + E_UAC_DISABLED Handle = 0x80270252 + E_FULL_ADMIN_NOT_SUPPORTED Handle = 0x80270253 + E_APPLICATION_NOT_REGISTERED Handle = 0x80270254 + E_MULTIPLE_EXTENSIONS_FOR_APPLICATION Handle = 0x80270255 + E_MULTIPLE_PACKAGES_FOR_FAMILY Handle = 0x80270256 + E_APPLICATION_MANAGER_NOT_RUNNING Handle = 0x80270257 + S_STORE_LAUNCHED_FOR_REMEDIATION Handle = 0x00270258 + S_APPLICATION_ACTIVATION_ERROR_HANDLED_BY_DIALOG Handle = 0x00270259 + E_APPLICATION_ACTIVATION_TIMED_OUT Handle = 0x8027025A + E_APPLICATION_ACTIVATION_EXEC_FAILURE Handle = 0x8027025B + E_APPLICATION_TEMPORARY_LICENSE_ERROR Handle = 0x8027025C + E_APPLICATION_TRIAL_LICENSE_EXPIRED Handle = 0x8027025D + E_SKYDRIVE_ROOT_TARGET_FILE_SYSTEM_NOT_SUPPORTED Handle = 0x80270260 + E_SKYDRIVE_ROOT_TARGET_OVERLAP Handle = 0x80270261 + E_SKYDRIVE_ROOT_TARGET_CANNOT_INDEX Handle = 0x80270262 + E_SKYDRIVE_FILE_NOT_UPLOADED Handle = 0x80270263 + E_SKYDRIVE_UPDATE_AVAILABILITY_FAIL Handle = 0x80270264 + E_SKYDRIVE_ROOT_TARGET_VOLUME_ROOT_NOT_SUPPORTED Handle = 0x80270265 + E_SYNCENGINE_FILE_SIZE_OVER_LIMIT Handle = 0x8802B001 + E_SYNCENGINE_FILE_SIZE_EXCEEDS_REMAINING_QUOTA Handle = 0x8802B002 + E_SYNCENGINE_UNSUPPORTED_FILE_NAME Handle = 0x8802B003 + E_SYNCENGINE_FOLDER_ITEM_COUNT_LIMIT_EXCEEDED Handle = 0x8802B004 + E_SYNCENGINE_FILE_SYNC_PARTNER_ERROR Handle = 0x8802B005 + E_SYNCENGINE_SYNC_PAUSED_BY_SERVICE Handle = 0x8802B006 + E_SYNCENGINE_FILE_IDENTIFIER_UNKNOWN Handle = 0x8802C002 + E_SYNCENGINE_SERVICE_AUTHENTICATION_FAILED Handle = 0x8802C003 + E_SYNCENGINE_UNKNOWN_SERVICE_ERROR Handle = 0x8802C004 + E_SYNCENGINE_SERVICE_RETURNED_UNEXPECTED_SIZE Handle = 0x8802C005 + E_SYNCENGINE_REQUEST_BLOCKED_BY_SERVICE Handle = 0x8802C006 + E_SYNCENGINE_REQUEST_BLOCKED_DUE_TO_CLIENT_ERROR Handle = 0x8802C007 + E_SYNCENGINE_FOLDER_INACCESSIBLE Handle = 0x8802D001 + E_SYNCENGINE_UNSUPPORTED_FOLDER_NAME Handle = 0x8802D002 + E_SYNCENGINE_UNSUPPORTED_MARKET Handle = 0x8802D003 + E_SYNCENGINE_PATH_LENGTH_LIMIT_EXCEEDED Handle = 0x8802D004 + E_SYNCENGINE_REMOTE_PATH_LENGTH_LIMIT_EXCEEDED Handle = 0x8802D005 + E_SYNCENGINE_CLIENT_UPDATE_NEEDED Handle = 0x8802D006 + E_SYNCENGINE_PROXY_AUTHENTICATION_REQUIRED Handle = 0x8802D007 + E_SYNCENGINE_STORAGE_SERVICE_PROVISIONING_FAILED Handle = 0x8802D008 + E_SYNCENGINE_UNSUPPORTED_REPARSE_POINT Handle = 0x8802D009 + E_SYNCENGINE_STORAGE_SERVICE_BLOCKED Handle = 0x8802D00A + E_SYNCENGINE_FOLDER_IN_REDIRECTION Handle = 0x8802D00B + EAS_E_POLICY_NOT_MANAGED_BY_OS Handle = 0x80550001 + EAS_E_POLICY_COMPLIANT_WITH_ACTIONS Handle = 0x80550002 + EAS_E_REQUESTED_POLICY_NOT_ENFORCEABLE Handle = 0x80550003 + EAS_E_CURRENT_USER_HAS_BLANK_PASSWORD Handle = 0x80550004 + EAS_E_REQUESTED_POLICY_PASSWORD_EXPIRATION_INCOMPATIBLE Handle = 0x80550005 + EAS_E_USER_CANNOT_CHANGE_PASSWORD Handle = 0x80550006 + EAS_E_ADMINS_HAVE_BLANK_PASSWORD Handle = 0x80550007 + EAS_E_ADMINS_CANNOT_CHANGE_PASSWORD Handle = 0x80550008 + EAS_E_LOCAL_CONTROLLED_USERS_CANNOT_CHANGE_PASSWORD Handle = 0x80550009 + EAS_E_PASSWORD_POLICY_NOT_ENFORCEABLE_FOR_CONNECTED_ADMINS Handle = 0x8055000A + EAS_E_CONNECTED_ADMINS_NEED_TO_CHANGE_PASSWORD Handle = 0x8055000B + EAS_E_PASSWORD_POLICY_NOT_ENFORCEABLE_FOR_CURRENT_CONNECTED_USER Handle = 0x8055000C + EAS_E_CURRENT_CONNECTED_USER_NEED_TO_CHANGE_PASSWORD Handle = 0x8055000D + WEB_E_UNSUPPORTED_FORMAT Handle = 0x83750001 + WEB_E_INVALID_XML Handle = 0x83750002 + WEB_E_MISSING_REQUIRED_ELEMENT Handle = 0x83750003 + WEB_E_MISSING_REQUIRED_ATTRIBUTE Handle = 0x83750004 + WEB_E_UNEXPECTED_CONTENT Handle = 0x83750005 + WEB_E_RESOURCE_TOO_LARGE Handle = 0x83750006 + WEB_E_INVALID_JSON_STRING Handle = 0x83750007 + WEB_E_INVALID_JSON_NUMBER Handle = 0x83750008 + WEB_E_JSON_VALUE_NOT_FOUND Handle = 0x83750009 + HTTP_E_STATUS_UNEXPECTED Handle = 0x80190001 + HTTP_E_STATUS_UNEXPECTED_REDIRECTION Handle = 0x80190003 + HTTP_E_STATUS_UNEXPECTED_CLIENT_ERROR Handle = 0x80190004 + HTTP_E_STATUS_UNEXPECTED_SERVER_ERROR Handle = 0x80190005 + HTTP_E_STATUS_AMBIGUOUS Handle = 0x8019012C + HTTP_E_STATUS_MOVED Handle = 0x8019012D + HTTP_E_STATUS_REDIRECT Handle = 0x8019012E + HTTP_E_STATUS_REDIRECT_METHOD Handle = 0x8019012F + HTTP_E_STATUS_NOT_MODIFIED Handle = 0x80190130 + HTTP_E_STATUS_USE_PROXY Handle = 0x80190131 + HTTP_E_STATUS_REDIRECT_KEEP_VERB Handle = 0x80190133 + HTTP_E_STATUS_BAD_REQUEST Handle = 0x80190190 + HTTP_E_STATUS_DENIED Handle = 0x80190191 + HTTP_E_STATUS_PAYMENT_REQ Handle = 0x80190192 + HTTP_E_STATUS_FORBIDDEN Handle = 0x80190193 + HTTP_E_STATUS_NOT_FOUND Handle = 0x80190194 + HTTP_E_STATUS_BAD_METHOD Handle = 0x80190195 + HTTP_E_STATUS_NONE_ACCEPTABLE Handle = 0x80190196 + HTTP_E_STATUS_PROXY_AUTH_REQ Handle = 0x80190197 + HTTP_E_STATUS_REQUEST_TIMEOUT Handle = 0x80190198 + HTTP_E_STATUS_CONFLICT Handle = 0x80190199 + HTTP_E_STATUS_GONE Handle = 0x8019019A + HTTP_E_STATUS_LENGTH_REQUIRED Handle = 0x8019019B + HTTP_E_STATUS_PRECOND_FAILED Handle = 0x8019019C + HTTP_E_STATUS_REQUEST_TOO_LARGE Handle = 0x8019019D + HTTP_E_STATUS_URI_TOO_LONG Handle = 0x8019019E + HTTP_E_STATUS_UNSUPPORTED_MEDIA Handle = 0x8019019F + HTTP_E_STATUS_RANGE_NOT_SATISFIABLE Handle = 0x801901A0 + HTTP_E_STATUS_EXPECTATION_FAILED Handle = 0x801901A1 + HTTP_E_STATUS_SERVER_ERROR Handle = 0x801901F4 + HTTP_E_STATUS_NOT_SUPPORTED Handle = 0x801901F5 + HTTP_E_STATUS_BAD_GATEWAY Handle = 0x801901F6 + HTTP_E_STATUS_SERVICE_UNAVAIL Handle = 0x801901F7 + HTTP_E_STATUS_GATEWAY_TIMEOUT Handle = 0x801901F8 + HTTP_E_STATUS_VERSION_NOT_SUP Handle = 0x801901F9 + E_INVALID_PROTOCOL_OPERATION Handle = 0x83760001 + E_INVALID_PROTOCOL_FORMAT Handle = 0x83760002 + E_PROTOCOL_EXTENSIONS_NOT_SUPPORTED Handle = 0x83760003 + E_SUBPROTOCOL_NOT_SUPPORTED Handle = 0x83760004 + E_PROTOCOL_VERSION_NOT_SUPPORTED Handle = 0x83760005 + INPUT_E_OUT_OF_ORDER Handle = 0x80400000 + INPUT_E_REENTRANCY Handle = 0x80400001 + INPUT_E_MULTIMODAL Handle = 0x80400002 + INPUT_E_PACKET Handle = 0x80400003 + INPUT_E_FRAME Handle = 0x80400004 + INPUT_E_HISTORY Handle = 0x80400005 + INPUT_E_DEVICE_INFO Handle = 0x80400006 + INPUT_E_TRANSFORM Handle = 0x80400007 + INPUT_E_DEVICE_PROPERTY Handle = 0x80400008 + INET_E_INVALID_URL Handle = 0x800C0002 + INET_E_NO_SESSION Handle = 0x800C0003 + INET_E_CANNOT_CONNECT Handle = 0x800C0004 + INET_E_RESOURCE_NOT_FOUND Handle = 0x800C0005 + INET_E_OBJECT_NOT_FOUND Handle = 0x800C0006 + INET_E_DATA_NOT_AVAILABLE Handle = 0x800C0007 + INET_E_DOWNLOAD_FAILURE Handle = 0x800C0008 + INET_E_AUTHENTICATION_REQUIRED Handle = 0x800C0009 + INET_E_NO_VALID_MEDIA Handle = 0x800C000A + INET_E_CONNECTION_TIMEOUT Handle = 0x800C000B + INET_E_INVALID_REQUEST Handle = 0x800C000C + INET_E_UNKNOWN_PROTOCOL Handle = 0x800C000D + INET_E_SECURITY_PROBLEM Handle = 0x800C000E + INET_E_CANNOT_LOAD_DATA Handle = 0x800C000F + INET_E_CANNOT_INSTANTIATE_OBJECT Handle = 0x800C0010 + INET_E_INVALID_CERTIFICATE Handle = 0x800C0019 + INET_E_REDIRECT_FAILED Handle = 0x800C0014 + INET_E_REDIRECT_TO_DIR Handle = 0x800C0015 + ERROR_DBG_CREATE_PROCESS_FAILURE_LOCKDOWN Handle = 0x80B00001 + ERROR_DBG_ATTACH_PROCESS_FAILURE_LOCKDOWN Handle = 0x80B00002 + ERROR_DBG_CONNECT_SERVER_FAILURE_LOCKDOWN Handle = 0x80B00003 + ERROR_DBG_START_SERVER_FAILURE_LOCKDOWN Handle = 0x80B00004 + ERROR_IO_PREEMPTED Handle = 0x89010001 + JSCRIPT_E_CANTEXECUTE Handle = 0x89020001 + WEP_E_NOT_PROVISIONED_ON_ALL_VOLUMES Handle = 0x88010001 + WEP_E_FIXED_DATA_NOT_SUPPORTED Handle = 0x88010002 + WEP_E_HARDWARE_NOT_COMPLIANT Handle = 0x88010003 + WEP_E_LOCK_NOT_CONFIGURED Handle = 0x88010004 + WEP_E_PROTECTION_SUSPENDED Handle = 0x88010005 + WEP_E_NO_LICENSE Handle = 0x88010006 + WEP_E_OS_NOT_PROTECTED Handle = 0x88010007 + WEP_E_UNEXPECTED_FAIL Handle = 0x88010008 + WEP_E_BUFFER_TOO_LARGE Handle = 0x88010009 + ERROR_SVHDX_ERROR_STORED Handle = 0xC05C0000 + ERROR_SVHDX_ERROR_NOT_AVAILABLE Handle = 0xC05CFF00 + ERROR_SVHDX_UNIT_ATTENTION_AVAILABLE Handle = 0xC05CFF01 + ERROR_SVHDX_UNIT_ATTENTION_CAPACITY_DATA_CHANGED Handle = 0xC05CFF02 + ERROR_SVHDX_UNIT_ATTENTION_RESERVATIONS_PREEMPTED Handle = 0xC05CFF03 + ERROR_SVHDX_UNIT_ATTENTION_RESERVATIONS_RELEASED Handle = 0xC05CFF04 + ERROR_SVHDX_UNIT_ATTENTION_REGISTRATIONS_PREEMPTED Handle = 0xC05CFF05 + ERROR_SVHDX_UNIT_ATTENTION_OPERATING_DEFINITION_CHANGED Handle = 0xC05CFF06 + ERROR_SVHDX_RESERVATION_CONFLICT Handle = 0xC05CFF07 + ERROR_SVHDX_WRONG_FILE_TYPE Handle = 0xC05CFF08 + ERROR_SVHDX_VERSION_MISMATCH Handle = 0xC05CFF09 + ERROR_VHD_SHARED Handle = 0xC05CFF0A + ERROR_SVHDX_NO_INITIATOR Handle = 0xC05CFF0B + ERROR_VHDSET_BACKING_STORAGE_NOT_FOUND Handle = 0xC05CFF0C + ERROR_SMB_NO_PREAUTH_INTEGRITY_HASH_OVERLAP Handle = 0xC05D0000 + ERROR_SMB_BAD_CLUSTER_DIALECT Handle = 0xC05D0001 + WININET_E_OUT_OF_HANDLES Handle = 0x80072EE1 + WININET_E_TIMEOUT Handle = 0x80072EE2 + WININET_E_EXTENDED_ERROR Handle = 0x80072EE3 + WININET_E_INTERNAL_ERROR Handle = 0x80072EE4 + WININET_E_INVALID_URL Handle = 0x80072EE5 + WININET_E_UNRECOGNIZED_SCHEME Handle = 0x80072EE6 + WININET_E_NAME_NOT_RESOLVED Handle = 0x80072EE7 + WININET_E_PROTOCOL_NOT_FOUND Handle = 0x80072EE8 + WININET_E_INVALID_OPTION Handle = 0x80072EE9 + WININET_E_BAD_OPTION_LENGTH Handle = 0x80072EEA + WININET_E_OPTION_NOT_SETTABLE Handle = 0x80072EEB + WININET_E_SHUTDOWN Handle = 0x80072EEC + WININET_E_INCORRECT_USER_NAME Handle = 0x80072EED + WININET_E_INCORRECT_PASSWORD Handle = 0x80072EEE + WININET_E_LOGIN_FAILURE Handle = 0x80072EEF + WININET_E_INVALID_OPERATION Handle = 0x80072EF0 + WININET_E_OPERATION_CANCELLED Handle = 0x80072EF1 + WININET_E_INCORRECT_HANDLE_TYPE Handle = 0x80072EF2 + WININET_E_INCORRECT_HANDLE_STATE Handle = 0x80072EF3 + WININET_E_NOT_PROXY_REQUEST Handle = 0x80072EF4 + WININET_E_REGISTRY_VALUE_NOT_FOUND Handle = 0x80072EF5 + WININET_E_BAD_REGISTRY_PARAMETER Handle = 0x80072EF6 + WININET_E_NO_DIRECT_ACCESS Handle = 0x80072EF7 + WININET_E_NO_CONTEXT Handle = 0x80072EF8 + WININET_E_NO_CALLBACK Handle = 0x80072EF9 + WININET_E_REQUEST_PENDING Handle = 0x80072EFA + WININET_E_INCORRECT_FORMAT Handle = 0x80072EFB + WININET_E_ITEM_NOT_FOUND Handle = 0x80072EFC + WININET_E_CANNOT_CONNECT Handle = 0x80072EFD + WININET_E_CONNECTION_ABORTED Handle = 0x80072EFE + WININET_E_CONNECTION_RESET Handle = 0x80072EFF + WININET_E_FORCE_RETRY Handle = 0x80072F00 + WININET_E_INVALID_PROXY_REQUEST Handle = 0x80072F01 + WININET_E_NEED_UI Handle = 0x80072F02 + WININET_E_HANDLE_EXISTS Handle = 0x80072F04 + WININET_E_SEC_CERT_DATE_INVALID Handle = 0x80072F05 + WININET_E_SEC_CERT_CN_INVALID Handle = 0x80072F06 + WININET_E_HTTP_TO_HTTPS_ON_REDIR Handle = 0x80072F07 + WININET_E_HTTPS_TO_HTTP_ON_REDIR Handle = 0x80072F08 + WININET_E_MIXED_SECURITY Handle = 0x80072F09 + WININET_E_CHG_POST_IS_NON_SECURE Handle = 0x80072F0A + WININET_E_POST_IS_NON_SECURE Handle = 0x80072F0B + WININET_E_CLIENT_AUTH_CERT_NEEDED Handle = 0x80072F0C + WININET_E_INVALID_CA Handle = 0x80072F0D + WININET_E_CLIENT_AUTH_NOT_SETUP Handle = 0x80072F0E + WININET_E_ASYNC_THREAD_FAILED Handle = 0x80072F0F + WININET_E_REDIRECT_SCHEME_CHANGE Handle = 0x80072F10 + WININET_E_DIALOG_PENDING Handle = 0x80072F11 + WININET_E_RETRY_DIALOG Handle = 0x80072F12 + WININET_E_NO_NEW_CONTAINERS Handle = 0x80072F13 + WININET_E_HTTPS_HTTP_SUBMIT_REDIR Handle = 0x80072F14 + WININET_E_SEC_CERT_ERRORS Handle = 0x80072F17 + WININET_E_SEC_CERT_REV_FAILED Handle = 0x80072F19 + WININET_E_HEADER_NOT_FOUND Handle = 0x80072F76 + WININET_E_DOWNLEVEL_SERVER Handle = 0x80072F77 + WININET_E_INVALID_SERVER_RESPONSE Handle = 0x80072F78 + WININET_E_INVALID_HEADER Handle = 0x80072F79 + WININET_E_INVALID_QUERY_REQUEST Handle = 0x80072F7A + WININET_E_HEADER_ALREADY_EXISTS Handle = 0x80072F7B + WININET_E_REDIRECT_FAILED Handle = 0x80072F7C + WININET_E_SECURITY_CHANNEL_ERROR Handle = 0x80072F7D + WININET_E_UNABLE_TO_CACHE_FILE Handle = 0x80072F7E + WININET_E_TCPIP_NOT_INSTALLED Handle = 0x80072F7F + WININET_E_DISCONNECTED Handle = 0x80072F83 + WININET_E_SERVER_UNREACHABLE Handle = 0x80072F84 + WININET_E_PROXY_SERVER_UNREACHABLE Handle = 0x80072F85 + WININET_E_BAD_AUTO_PROXY_SCRIPT Handle = 0x80072F86 + WININET_E_UNABLE_TO_DOWNLOAD_SCRIPT Handle = 0x80072F87 + WININET_E_SEC_INVALID_CERT Handle = 0x80072F89 + WININET_E_SEC_CERT_REVOKED Handle = 0x80072F8A + WININET_E_FAILED_DUETOSECURITYCHECK Handle = 0x80072F8B + WININET_E_NOT_INITIALIZED Handle = 0x80072F8C + WININET_E_LOGIN_FAILURE_DISPLAY_ENTITY_BODY Handle = 0x80072F8E + WININET_E_DECODING_FAILED Handle = 0x80072F8F + WININET_E_NOT_REDIRECTED Handle = 0x80072F80 + WININET_E_COOKIE_NEEDS_CONFIRMATION Handle = 0x80072F81 + WININET_E_COOKIE_DECLINED Handle = 0x80072F82 + WININET_E_REDIRECT_NEEDS_CONFIRMATION Handle = 0x80072F88 + SQLITE_E_ERROR Handle = 0x87AF0001 + SQLITE_E_INTERNAL Handle = 0x87AF0002 + SQLITE_E_PERM Handle = 0x87AF0003 + SQLITE_E_ABORT Handle = 0x87AF0004 + SQLITE_E_BUSY Handle = 0x87AF0005 + SQLITE_E_LOCKED Handle = 0x87AF0006 + SQLITE_E_NOMEM Handle = 0x87AF0007 + SQLITE_E_READONLY Handle = 0x87AF0008 + SQLITE_E_INTERRUPT Handle = 0x87AF0009 + SQLITE_E_IOERR Handle = 0x87AF000A + SQLITE_E_CORRUPT Handle = 0x87AF000B + SQLITE_E_NOTFOUND Handle = 0x87AF000C + SQLITE_E_FULL Handle = 0x87AF000D + SQLITE_E_CANTOPEN Handle = 0x87AF000E + SQLITE_E_PROTOCOL Handle = 0x87AF000F + SQLITE_E_EMPTY Handle = 0x87AF0010 + SQLITE_E_SCHEMA Handle = 0x87AF0011 + SQLITE_E_TOOBIG Handle = 0x87AF0012 + SQLITE_E_CONSTRAINT Handle = 0x87AF0013 + SQLITE_E_MISMATCH Handle = 0x87AF0014 + SQLITE_E_MISUSE Handle = 0x87AF0015 + SQLITE_E_NOLFS Handle = 0x87AF0016 + SQLITE_E_AUTH Handle = 0x87AF0017 + SQLITE_E_FORMAT Handle = 0x87AF0018 + SQLITE_E_RANGE Handle = 0x87AF0019 + SQLITE_E_NOTADB Handle = 0x87AF001A + SQLITE_E_NOTICE Handle = 0x87AF001B + SQLITE_E_WARNING Handle = 0x87AF001C + SQLITE_E_ROW Handle = 0x87AF0064 + SQLITE_E_DONE Handle = 0x87AF0065 + SQLITE_E_IOERR_READ Handle = 0x87AF010A + SQLITE_E_IOERR_SHORT_READ Handle = 0x87AF020A + SQLITE_E_IOERR_WRITE Handle = 0x87AF030A + SQLITE_E_IOERR_FSYNC Handle = 0x87AF040A + SQLITE_E_IOERR_DIR_FSYNC Handle = 0x87AF050A + SQLITE_E_IOERR_TRUNCATE Handle = 0x87AF060A + SQLITE_E_IOERR_FSTAT Handle = 0x87AF070A + SQLITE_E_IOERR_UNLOCK Handle = 0x87AF080A + SQLITE_E_IOERR_RDLOCK Handle = 0x87AF090A + SQLITE_E_IOERR_DELETE Handle = 0x87AF0A0A + SQLITE_E_IOERR_BLOCKED Handle = 0x87AF0B0A + SQLITE_E_IOERR_NOMEM Handle = 0x87AF0C0A + SQLITE_E_IOERR_ACCESS Handle = 0x87AF0D0A + SQLITE_E_IOERR_CHECKRESERVEDLOCK Handle = 0x87AF0E0A + SQLITE_E_IOERR_LOCK Handle = 0x87AF0F0A + SQLITE_E_IOERR_CLOSE Handle = 0x87AF100A + SQLITE_E_IOERR_DIR_CLOSE Handle = 0x87AF110A + SQLITE_E_IOERR_SHMOPEN Handle = 0x87AF120A + SQLITE_E_IOERR_SHMSIZE Handle = 0x87AF130A + SQLITE_E_IOERR_SHMLOCK Handle = 0x87AF140A + SQLITE_E_IOERR_SHMMAP Handle = 0x87AF150A + SQLITE_E_IOERR_SEEK Handle = 0x87AF160A + SQLITE_E_IOERR_DELETE_NOENT Handle = 0x87AF170A + SQLITE_E_IOERR_MMAP Handle = 0x87AF180A + SQLITE_E_IOERR_GETTEMPPATH Handle = 0x87AF190A + SQLITE_E_IOERR_CONVPATH Handle = 0x87AF1A0A + SQLITE_E_IOERR_VNODE Handle = 0x87AF1A02 + SQLITE_E_IOERR_AUTH Handle = 0x87AF1A03 + SQLITE_E_LOCKED_SHAREDCACHE Handle = 0x87AF0106 + SQLITE_E_BUSY_RECOVERY Handle = 0x87AF0105 + SQLITE_E_BUSY_SNAPSHOT Handle = 0x87AF0205 + SQLITE_E_CANTOPEN_NOTEMPDIR Handle = 0x87AF010E + SQLITE_E_CANTOPEN_ISDIR Handle = 0x87AF020E + SQLITE_E_CANTOPEN_FULLPATH Handle = 0x87AF030E + SQLITE_E_CANTOPEN_CONVPATH Handle = 0x87AF040E + SQLITE_E_CORRUPT_VTAB Handle = 0x87AF010B + SQLITE_E_READONLY_RECOVERY Handle = 0x87AF0108 + SQLITE_E_READONLY_CANTLOCK Handle = 0x87AF0208 + SQLITE_E_READONLY_ROLLBACK Handle = 0x87AF0308 + SQLITE_E_READONLY_DBMOVED Handle = 0x87AF0408 + SQLITE_E_ABORT_ROLLBACK Handle = 0x87AF0204 + SQLITE_E_CONSTRAINT_CHECK Handle = 0x87AF0113 + SQLITE_E_CONSTRAINT_COMMITHOOK Handle = 0x87AF0213 + SQLITE_E_CONSTRAINT_FOREIGNKEY Handle = 0x87AF0313 + SQLITE_E_CONSTRAINT_FUNCTION Handle = 0x87AF0413 + SQLITE_E_CONSTRAINT_NOTNULL Handle = 0x87AF0513 + SQLITE_E_CONSTRAINT_PRIMARYKEY Handle = 0x87AF0613 + SQLITE_E_CONSTRAINT_TRIGGER Handle = 0x87AF0713 + SQLITE_E_CONSTRAINT_UNIQUE Handle = 0x87AF0813 + SQLITE_E_CONSTRAINT_VTAB Handle = 0x87AF0913 + SQLITE_E_CONSTRAINT_ROWID Handle = 0x87AF0A13 + SQLITE_E_NOTICE_RECOVER_WAL Handle = 0x87AF011B + SQLITE_E_NOTICE_RECOVER_ROLLBACK Handle = 0x87AF021B + SQLITE_E_WARNING_AUTOINDEX Handle = 0x87AF011C + UTC_E_TOGGLE_TRACE_STARTED Handle = 0x87C51001 + UTC_E_ALTERNATIVE_TRACE_CANNOT_PREEMPT Handle = 0x87C51002 + UTC_E_AOT_NOT_RUNNING Handle = 0x87C51003 + UTC_E_SCRIPT_TYPE_INVALID Handle = 0x87C51004 + UTC_E_SCENARIODEF_NOT_FOUND Handle = 0x87C51005 + UTC_E_TRACEPROFILE_NOT_FOUND Handle = 0x87C51006 + UTC_E_FORWARDER_ALREADY_ENABLED Handle = 0x87C51007 + UTC_E_FORWARDER_ALREADY_DISABLED Handle = 0x87C51008 + UTC_E_EVENTLOG_ENTRY_MALFORMED Handle = 0x87C51009 + UTC_E_DIAGRULES_SCHEMAVERSION_MISMATCH Handle = 0x87C5100A + UTC_E_SCRIPT_TERMINATED Handle = 0x87C5100B + UTC_E_INVALID_CUSTOM_FILTER Handle = 0x87C5100C + UTC_E_TRACE_NOT_RUNNING Handle = 0x87C5100D + UTC_E_REESCALATED_TOO_QUICKLY Handle = 0x87C5100E + UTC_E_ESCALATION_ALREADY_RUNNING Handle = 0x87C5100F + UTC_E_PERFTRACK_ALREADY_TRACING Handle = 0x87C51010 + UTC_E_REACHED_MAX_ESCALATIONS Handle = 0x87C51011 + UTC_E_FORWARDER_PRODUCER_MISMATCH Handle = 0x87C51012 + UTC_E_INTENTIONAL_SCRIPT_FAILURE Handle = 0x87C51013 + UTC_E_SQM_INIT_FAILED Handle = 0x87C51014 + UTC_E_NO_WER_LOGGER_SUPPORTED Handle = 0x87C51015 + UTC_E_TRACERS_DONT_EXIST Handle = 0x87C51016 + UTC_E_WINRT_INIT_FAILED Handle = 0x87C51017 + UTC_E_SCENARIODEF_SCHEMAVERSION_MISMATCH Handle = 0x87C51018 + UTC_E_INVALID_FILTER Handle = 0x87C51019 + UTC_E_EXE_TERMINATED Handle = 0x87C5101A + UTC_E_ESCALATION_NOT_AUTHORIZED Handle = 0x87C5101B + UTC_E_SETUP_NOT_AUTHORIZED Handle = 0x87C5101C + UTC_E_CHILD_PROCESS_FAILED Handle = 0x87C5101D + UTC_E_COMMAND_LINE_NOT_AUTHORIZED Handle = 0x87C5101E + UTC_E_CANNOT_LOAD_SCENARIO_EDITOR_XML Handle = 0x87C5101F + UTC_E_ESCALATION_TIMED_OUT Handle = 0x87C51020 + UTC_E_SETUP_TIMED_OUT Handle = 0x87C51021 + UTC_E_TRIGGER_MISMATCH Handle = 0x87C51022 + UTC_E_TRIGGER_NOT_FOUND Handle = 0x87C51023 + UTC_E_SIF_NOT_SUPPORTED Handle = 0x87C51024 + UTC_E_DELAY_TERMINATED Handle = 0x87C51025 + UTC_E_DEVICE_TICKET_ERROR Handle = 0x87C51026 + UTC_E_TRACE_BUFFER_LIMIT_EXCEEDED Handle = 0x87C51027 + UTC_E_API_RESULT_UNAVAILABLE Handle = 0x87C51028 + UTC_E_RPC_TIMEOUT Handle = 0x87C51029 + UTC_E_RPC_WAIT_FAILED Handle = 0x87C5102A + UTC_E_API_BUSY Handle = 0x87C5102B + UTC_E_TRACE_MIN_DURATION_REQUIREMENT_NOT_MET Handle = 0x87C5102C + UTC_E_EXCLUSIVITY_NOT_AVAILABLE Handle = 0x87C5102D + UTC_E_GETFILE_FILE_PATH_NOT_APPROVED Handle = 0x87C5102E + UTC_E_ESCALATION_DIRECTORY_ALREADY_EXISTS Handle = 0x87C5102F + UTC_E_TIME_TRIGGER_ON_START_INVALID Handle = 0x87C51030 + UTC_E_TIME_TRIGGER_ONLY_VALID_ON_SINGLE_TRANSITION Handle = 0x87C51031 + UTC_E_TIME_TRIGGER_INVALID_TIME_RANGE Handle = 0x87C51032 + UTC_E_MULTIPLE_TIME_TRIGGER_ON_SINGLE_STATE Handle = 0x87C51033 + UTC_E_BINARY_MISSING Handle = 0x87C51034 + UTC_E_NETWORK_CAPTURE_NOT_ALLOWED Handle = 0x87C51035 + UTC_E_FAILED_TO_RESOLVE_CONTAINER_ID Handle = 0x87C51036 + UTC_E_UNABLE_TO_RESOLVE_SESSION Handle = 0x87C51037 + UTC_E_THROTTLED Handle = 0x87C51038 + UTC_E_UNAPPROVED_SCRIPT Handle = 0x87C51039 + UTC_E_SCRIPT_MISSING Handle = 0x87C5103A + UTC_E_SCENARIO_THROTTLED Handle = 0x87C5103B + UTC_E_API_NOT_SUPPORTED Handle = 0x87C5103C + UTC_E_GETFILE_EXTERNAL_PATH_NOT_APPROVED Handle = 0x87C5103D + UTC_E_TRY_GET_SCENARIO_TIMEOUT_EXCEEDED Handle = 0x87C5103E + UTC_E_CERT_REV_FAILED Handle = 0x87C5103F + UTC_E_FAILED_TO_START_NDISCAP Handle = 0x87C51040 + UTC_E_KERNELDUMP_LIMIT_REACHED Handle = 0x87C51041 + UTC_E_MISSING_AGGREGATE_EVENT_TAG Handle = 0x87C51042 + UTC_E_INVALID_AGGREGATION_STRUCT Handle = 0x87C51043 + UTC_E_ACTION_NOT_SUPPORTED_IN_DESTINATION Handle = 0x87C51044 + UTC_E_FILTER_MISSING_ATTRIBUTE Handle = 0x87C51045 + UTC_E_FILTER_INVALID_TYPE Handle = 0x87C51046 + UTC_E_FILTER_VARIABLE_NOT_FOUND Handle = 0x87C51047 + UTC_E_FILTER_FUNCTION_RESTRICTED Handle = 0x87C51048 + UTC_E_FILTER_VERSION_MISMATCH Handle = 0x87C51049 + UTC_E_FILTER_INVALID_FUNCTION Handle = 0x87C51050 + UTC_E_FILTER_INVALID_FUNCTION_PARAMS Handle = 0x87C51051 + UTC_E_FILTER_INVALID_COMMAND Handle = 0x87C51052 + UTC_E_FILTER_ILLEGAL_EVAL Handle = 0x87C51053 + UTC_E_TTTRACER_RETURNED_ERROR Handle = 0x87C51054 + UTC_E_AGENT_DIAGNOSTICS_TOO_LARGE Handle = 0x87C51055 + UTC_E_FAILED_TO_RECEIVE_AGENT_DIAGNOSTICS Handle = 0x87C51056 + UTC_E_SCENARIO_HAS_NO_ACTIONS Handle = 0x87C51057 + UTC_E_TTTRACER_STORAGE_FULL Handle = 0x87C51058 + UTC_E_INSUFFICIENT_SPACE_TO_START_TRACE Handle = 0x87C51059 + UTC_E_ESCALATION_CANCELLED_AT_SHUTDOWN Handle = 0x87C5105A + UTC_E_GETFILEINFOACTION_FILE_NOT_APPROVED Handle = 0x87C5105B + UTC_E_SETREGKEYACTION_TYPE_NOT_APPROVED Handle = 0x87C5105C + WINML_ERR_INVALID_DEVICE Handle = 0x88900001 + WINML_ERR_INVALID_BINDING Handle = 0x88900002 + WINML_ERR_VALUE_NOTFOUND Handle = 0x88900003 + WINML_ERR_SIZE_MISMATCH Handle = 0x88900004 + STATUS_WAIT_0 NTStatus = 0x00000000 + STATUS_SUCCESS NTStatus = 0x00000000 + STATUS_WAIT_1 NTStatus = 0x00000001 + STATUS_WAIT_2 NTStatus = 0x00000002 + STATUS_WAIT_3 NTStatus = 0x00000003 + STATUS_WAIT_63 NTStatus = 0x0000003F + STATUS_ABANDONED NTStatus = 0x00000080 + STATUS_ABANDONED_WAIT_0 NTStatus = 0x00000080 + STATUS_ABANDONED_WAIT_63 NTStatus = 0x000000BF + STATUS_USER_APC NTStatus = 0x000000C0 + STATUS_ALREADY_COMPLETE NTStatus = 0x000000FF + STATUS_KERNEL_APC NTStatus = 0x00000100 + STATUS_ALERTED NTStatus = 0x00000101 + STATUS_TIMEOUT NTStatus = 0x00000102 + STATUS_PENDING NTStatus = 0x00000103 + STATUS_REPARSE NTStatus = 0x00000104 + STATUS_MORE_ENTRIES NTStatus = 0x00000105 + STATUS_NOT_ALL_ASSIGNED NTStatus = 0x00000106 + STATUS_SOME_NOT_MAPPED NTStatus = 0x00000107 + STATUS_OPLOCK_BREAK_IN_PROGRESS NTStatus = 0x00000108 + STATUS_VOLUME_MOUNTED NTStatus = 0x00000109 + STATUS_RXACT_COMMITTED NTStatus = 0x0000010A + STATUS_NOTIFY_CLEANUP NTStatus = 0x0000010B + STATUS_NOTIFY_ENUM_DIR NTStatus = 0x0000010C + STATUS_NO_QUOTAS_FOR_ACCOUNT NTStatus = 0x0000010D + STATUS_PRIMARY_TRANSPORT_CONNECT_FAILED NTStatus = 0x0000010E + STATUS_PAGE_FAULT_TRANSITION NTStatus = 0x00000110 + STATUS_PAGE_FAULT_DEMAND_ZERO NTStatus = 0x00000111 + STATUS_PAGE_FAULT_COPY_ON_WRITE NTStatus = 0x00000112 + STATUS_PAGE_FAULT_GUARD_PAGE NTStatus = 0x00000113 + STATUS_PAGE_FAULT_PAGING_FILE NTStatus = 0x00000114 + STATUS_CACHE_PAGE_LOCKED NTStatus = 0x00000115 + STATUS_CRASH_DUMP NTStatus = 0x00000116 + STATUS_BUFFER_ALL_ZEROS NTStatus = 0x00000117 + STATUS_REPARSE_OBJECT NTStatus = 0x00000118 + STATUS_RESOURCE_REQUIREMENTS_CHANGED NTStatus = 0x00000119 + STATUS_TRANSLATION_COMPLETE NTStatus = 0x00000120 + STATUS_DS_MEMBERSHIP_EVALUATED_LOCALLY NTStatus = 0x00000121 + STATUS_NOTHING_TO_TERMINATE NTStatus = 0x00000122 + STATUS_PROCESS_NOT_IN_JOB NTStatus = 0x00000123 + STATUS_PROCESS_IN_JOB NTStatus = 0x00000124 + STATUS_VOLSNAP_HIBERNATE_READY NTStatus = 0x00000125 + STATUS_FSFILTER_OP_COMPLETED_SUCCESSFULLY NTStatus = 0x00000126 + STATUS_INTERRUPT_VECTOR_ALREADY_CONNECTED NTStatus = 0x00000127 + STATUS_INTERRUPT_STILL_CONNECTED NTStatus = 0x00000128 + STATUS_PROCESS_CLONED NTStatus = 0x00000129 + STATUS_FILE_LOCKED_WITH_ONLY_READERS NTStatus = 0x0000012A + STATUS_FILE_LOCKED_WITH_WRITERS NTStatus = 0x0000012B + STATUS_VALID_IMAGE_HASH NTStatus = 0x0000012C + STATUS_VALID_CATALOG_HASH NTStatus = 0x0000012D + STATUS_VALID_STRONG_CODE_HASH NTStatus = 0x0000012E + STATUS_GHOSTED NTStatus = 0x0000012F + STATUS_DATA_OVERWRITTEN NTStatus = 0x00000130 + STATUS_RESOURCEMANAGER_READ_ONLY NTStatus = 0x00000202 + STATUS_RING_PREVIOUSLY_EMPTY NTStatus = 0x00000210 + STATUS_RING_PREVIOUSLY_FULL NTStatus = 0x00000211 + STATUS_RING_PREVIOUSLY_ABOVE_QUOTA NTStatus = 0x00000212 + STATUS_RING_NEWLY_EMPTY NTStatus = 0x00000213 + STATUS_RING_SIGNAL_OPPOSITE_ENDPOINT NTStatus = 0x00000214 + STATUS_OPLOCK_SWITCHED_TO_NEW_HANDLE NTStatus = 0x00000215 + STATUS_OPLOCK_HANDLE_CLOSED NTStatus = 0x00000216 + STATUS_WAIT_FOR_OPLOCK NTStatus = 0x00000367 + STATUS_REPARSE_GLOBAL NTStatus = 0x00000368 + STATUS_FLT_IO_COMPLETE NTStatus = 0x001C0001 + STATUS_OBJECT_NAME_EXISTS NTStatus = 0x40000000 + STATUS_THREAD_WAS_SUSPENDED NTStatus = 0x40000001 + STATUS_WORKING_SET_LIMIT_RANGE NTStatus = 0x40000002 + STATUS_IMAGE_NOT_AT_BASE NTStatus = 0x40000003 + STATUS_RXACT_STATE_CREATED NTStatus = 0x40000004 + STATUS_SEGMENT_NOTIFICATION NTStatus = 0x40000005 + STATUS_LOCAL_USER_SESSION_KEY NTStatus = 0x40000006 + STATUS_BAD_CURRENT_DIRECTORY NTStatus = 0x40000007 + STATUS_SERIAL_MORE_WRITES NTStatus = 0x40000008 + STATUS_REGISTRY_RECOVERED NTStatus = 0x40000009 + STATUS_FT_READ_RECOVERY_FROM_BACKUP NTStatus = 0x4000000A + STATUS_FT_WRITE_RECOVERY NTStatus = 0x4000000B + STATUS_SERIAL_COUNTER_TIMEOUT NTStatus = 0x4000000C + STATUS_NULL_LM_PASSWORD NTStatus = 0x4000000D + STATUS_IMAGE_MACHINE_TYPE_MISMATCH NTStatus = 0x4000000E + STATUS_RECEIVE_PARTIAL NTStatus = 0x4000000F + STATUS_RECEIVE_EXPEDITED NTStatus = 0x40000010 + STATUS_RECEIVE_PARTIAL_EXPEDITED NTStatus = 0x40000011 + STATUS_EVENT_DONE NTStatus = 0x40000012 + STATUS_EVENT_PENDING NTStatus = 0x40000013 + STATUS_CHECKING_FILE_SYSTEM NTStatus = 0x40000014 + STATUS_FATAL_APP_EXIT NTStatus = 0x40000015 + STATUS_PREDEFINED_HANDLE NTStatus = 0x40000016 + STATUS_WAS_UNLOCKED NTStatus = 0x40000017 + STATUS_SERVICE_NOTIFICATION NTStatus = 0x40000018 + STATUS_WAS_LOCKED NTStatus = 0x40000019 + STATUS_LOG_HARD_ERROR NTStatus = 0x4000001A + STATUS_ALREADY_WIN32 NTStatus = 0x4000001B + STATUS_WX86_UNSIMULATE NTStatus = 0x4000001C + STATUS_WX86_CONTINUE NTStatus = 0x4000001D + STATUS_WX86_SINGLE_STEP NTStatus = 0x4000001E + STATUS_WX86_BREAKPOINT NTStatus = 0x4000001F + STATUS_WX86_EXCEPTION_CONTINUE NTStatus = 0x40000020 + STATUS_WX86_EXCEPTION_LASTCHANCE NTStatus = 0x40000021 + STATUS_WX86_EXCEPTION_CHAIN NTStatus = 0x40000022 + STATUS_IMAGE_MACHINE_TYPE_MISMATCH_EXE NTStatus = 0x40000023 + STATUS_NO_YIELD_PERFORMED NTStatus = 0x40000024 + STATUS_TIMER_RESUME_IGNORED NTStatus = 0x40000025 + STATUS_ARBITRATION_UNHANDLED NTStatus = 0x40000026 + STATUS_CARDBUS_NOT_SUPPORTED NTStatus = 0x40000027 + STATUS_WX86_CREATEWX86TIB NTStatus = 0x40000028 + STATUS_MP_PROCESSOR_MISMATCH NTStatus = 0x40000029 + STATUS_HIBERNATED NTStatus = 0x4000002A + STATUS_RESUME_HIBERNATION NTStatus = 0x4000002B + STATUS_FIRMWARE_UPDATED NTStatus = 0x4000002C + STATUS_DRIVERS_LEAKING_LOCKED_PAGES NTStatus = 0x4000002D + STATUS_MESSAGE_RETRIEVED NTStatus = 0x4000002E + STATUS_SYSTEM_POWERSTATE_TRANSITION NTStatus = 0x4000002F + STATUS_ALPC_CHECK_COMPLETION_LIST NTStatus = 0x40000030 + STATUS_SYSTEM_POWERSTATE_COMPLEX_TRANSITION NTStatus = 0x40000031 + STATUS_ACCESS_AUDIT_BY_POLICY NTStatus = 0x40000032 + STATUS_ABANDON_HIBERFILE NTStatus = 0x40000033 + STATUS_BIZRULES_NOT_ENABLED NTStatus = 0x40000034 + STATUS_FT_READ_FROM_COPY NTStatus = 0x40000035 + STATUS_IMAGE_AT_DIFFERENT_BASE NTStatus = 0x40000036 + STATUS_PATCH_DEFERRED NTStatus = 0x40000037 + STATUS_HEURISTIC_DAMAGE_POSSIBLE NTStatus = 0x40190001 + STATUS_GUARD_PAGE_VIOLATION NTStatus = 0x80000001 + STATUS_DATATYPE_MISALIGNMENT NTStatus = 0x80000002 + STATUS_BREAKPOINT NTStatus = 0x80000003 + STATUS_SINGLE_STEP NTStatus = 0x80000004 + STATUS_BUFFER_OVERFLOW NTStatus = 0x80000005 + STATUS_NO_MORE_FILES NTStatus = 0x80000006 + STATUS_WAKE_SYSTEM_DEBUGGER NTStatus = 0x80000007 + STATUS_HANDLES_CLOSED NTStatus = 0x8000000A + STATUS_NO_INHERITANCE NTStatus = 0x8000000B + STATUS_GUID_SUBSTITUTION_MADE NTStatus = 0x8000000C + STATUS_PARTIAL_COPY NTStatus = 0x8000000D + STATUS_DEVICE_PAPER_EMPTY NTStatus = 0x8000000E + STATUS_DEVICE_POWERED_OFF NTStatus = 0x8000000F + STATUS_DEVICE_OFF_LINE NTStatus = 0x80000010 + STATUS_DEVICE_BUSY NTStatus = 0x80000011 + STATUS_NO_MORE_EAS NTStatus = 0x80000012 + STATUS_INVALID_EA_NAME NTStatus = 0x80000013 + STATUS_EA_LIST_INCONSISTENT NTStatus = 0x80000014 + STATUS_INVALID_EA_FLAG NTStatus = 0x80000015 + STATUS_VERIFY_REQUIRED NTStatus = 0x80000016 + STATUS_EXTRANEOUS_INFORMATION NTStatus = 0x80000017 + STATUS_RXACT_COMMIT_NECESSARY NTStatus = 0x80000018 + STATUS_NO_MORE_ENTRIES NTStatus = 0x8000001A + STATUS_FILEMARK_DETECTED NTStatus = 0x8000001B + STATUS_MEDIA_CHANGED NTStatus = 0x8000001C + STATUS_BUS_RESET NTStatus = 0x8000001D + STATUS_END_OF_MEDIA NTStatus = 0x8000001E + STATUS_BEGINNING_OF_MEDIA NTStatus = 0x8000001F + STATUS_MEDIA_CHECK NTStatus = 0x80000020 + STATUS_SETMARK_DETECTED NTStatus = 0x80000021 + STATUS_NO_DATA_DETECTED NTStatus = 0x80000022 + STATUS_REDIRECTOR_HAS_OPEN_HANDLES NTStatus = 0x80000023 + STATUS_SERVER_HAS_OPEN_HANDLES NTStatus = 0x80000024 + STATUS_ALREADY_DISCONNECTED NTStatus = 0x80000025 + STATUS_LONGJUMP NTStatus = 0x80000026 + STATUS_CLEANER_CARTRIDGE_INSTALLED NTStatus = 0x80000027 + STATUS_PLUGPLAY_QUERY_VETOED NTStatus = 0x80000028 + STATUS_UNWIND_CONSOLIDATE NTStatus = 0x80000029 + STATUS_REGISTRY_HIVE_RECOVERED NTStatus = 0x8000002A + STATUS_DLL_MIGHT_BE_INSECURE NTStatus = 0x8000002B + STATUS_DLL_MIGHT_BE_INCOMPATIBLE NTStatus = 0x8000002C + STATUS_STOPPED_ON_SYMLINK NTStatus = 0x8000002D + STATUS_CANNOT_GRANT_REQUESTED_OPLOCK NTStatus = 0x8000002E + STATUS_NO_ACE_CONDITION NTStatus = 0x8000002F + STATUS_DEVICE_SUPPORT_IN_PROGRESS NTStatus = 0x80000030 + STATUS_DEVICE_POWER_CYCLE_REQUIRED NTStatus = 0x80000031 + STATUS_NO_WORK_DONE NTStatus = 0x80000032 + STATUS_CLUSTER_NODE_ALREADY_UP NTStatus = 0x80130001 + STATUS_CLUSTER_NODE_ALREADY_DOWN NTStatus = 0x80130002 + STATUS_CLUSTER_NETWORK_ALREADY_ONLINE NTStatus = 0x80130003 + STATUS_CLUSTER_NETWORK_ALREADY_OFFLINE NTStatus = 0x80130004 + STATUS_CLUSTER_NODE_ALREADY_MEMBER NTStatus = 0x80130005 + STATUS_FLT_BUFFER_TOO_SMALL NTStatus = 0x801C0001 + STATUS_FVE_PARTIAL_METADATA NTStatus = 0x80210001 + STATUS_FVE_TRANSIENT_STATE NTStatus = 0x80210002 + STATUS_CLOUD_FILE_PROPERTY_BLOB_CHECKSUM_MISMATCH NTStatus = 0x8000CF00 + STATUS_UNSUCCESSFUL NTStatus = 0xC0000001 + STATUS_NOT_IMPLEMENTED NTStatus = 0xC0000002 + STATUS_INVALID_INFO_CLASS NTStatus = 0xC0000003 + STATUS_INFO_LENGTH_MISMATCH NTStatus = 0xC0000004 + STATUS_ACCESS_VIOLATION NTStatus = 0xC0000005 + STATUS_IN_PAGE_ERROR NTStatus = 0xC0000006 + STATUS_PAGEFILE_QUOTA NTStatus = 0xC0000007 + STATUS_INVALID_HANDLE NTStatus = 0xC0000008 + STATUS_BAD_INITIAL_STACK NTStatus = 0xC0000009 + STATUS_BAD_INITIAL_PC NTStatus = 0xC000000A + STATUS_INVALID_CID NTStatus = 0xC000000B + STATUS_TIMER_NOT_CANCELED NTStatus = 0xC000000C + STATUS_INVALID_PARAMETER NTStatus = 0xC000000D + STATUS_NO_SUCH_DEVICE NTStatus = 0xC000000E + STATUS_NO_SUCH_FILE NTStatus = 0xC000000F + STATUS_INVALID_DEVICE_REQUEST NTStatus = 0xC0000010 + STATUS_END_OF_FILE NTStatus = 0xC0000011 + STATUS_WRONG_VOLUME NTStatus = 0xC0000012 + STATUS_NO_MEDIA_IN_DEVICE NTStatus = 0xC0000013 + STATUS_UNRECOGNIZED_MEDIA NTStatus = 0xC0000014 + STATUS_NONEXISTENT_SECTOR NTStatus = 0xC0000015 + STATUS_MORE_PROCESSING_REQUIRED NTStatus = 0xC0000016 + STATUS_NO_MEMORY NTStatus = 0xC0000017 + STATUS_CONFLICTING_ADDRESSES NTStatus = 0xC0000018 + STATUS_NOT_MAPPED_VIEW NTStatus = 0xC0000019 + STATUS_UNABLE_TO_FREE_VM NTStatus = 0xC000001A + STATUS_UNABLE_TO_DELETE_SECTION NTStatus = 0xC000001B + STATUS_INVALID_SYSTEM_SERVICE NTStatus = 0xC000001C + STATUS_ILLEGAL_INSTRUCTION NTStatus = 0xC000001D + STATUS_INVALID_LOCK_SEQUENCE NTStatus = 0xC000001E + STATUS_INVALID_VIEW_SIZE NTStatus = 0xC000001F + STATUS_INVALID_FILE_FOR_SECTION NTStatus = 0xC0000020 + STATUS_ALREADY_COMMITTED NTStatus = 0xC0000021 + STATUS_ACCESS_DENIED NTStatus = 0xC0000022 + STATUS_BUFFER_TOO_SMALL NTStatus = 0xC0000023 + STATUS_OBJECT_TYPE_MISMATCH NTStatus = 0xC0000024 + STATUS_NONCONTINUABLE_EXCEPTION NTStatus = 0xC0000025 + STATUS_INVALID_DISPOSITION NTStatus = 0xC0000026 + STATUS_UNWIND NTStatus = 0xC0000027 + STATUS_BAD_STACK NTStatus = 0xC0000028 + STATUS_INVALID_UNWIND_TARGET NTStatus = 0xC0000029 + STATUS_NOT_LOCKED NTStatus = 0xC000002A + STATUS_PARITY_ERROR NTStatus = 0xC000002B + STATUS_UNABLE_TO_DECOMMIT_VM NTStatus = 0xC000002C + STATUS_NOT_COMMITTED NTStatus = 0xC000002D + STATUS_INVALID_PORT_ATTRIBUTES NTStatus = 0xC000002E + STATUS_PORT_MESSAGE_TOO_LONG NTStatus = 0xC000002F + STATUS_INVALID_PARAMETER_MIX NTStatus = 0xC0000030 + STATUS_INVALID_QUOTA_LOWER NTStatus = 0xC0000031 + STATUS_DISK_CORRUPT_ERROR NTStatus = 0xC0000032 + STATUS_OBJECT_NAME_INVALID NTStatus = 0xC0000033 + STATUS_OBJECT_NAME_NOT_FOUND NTStatus = 0xC0000034 + STATUS_OBJECT_NAME_COLLISION NTStatus = 0xC0000035 + STATUS_PORT_DO_NOT_DISTURB NTStatus = 0xC0000036 + STATUS_PORT_DISCONNECTED NTStatus = 0xC0000037 + STATUS_DEVICE_ALREADY_ATTACHED NTStatus = 0xC0000038 + STATUS_OBJECT_PATH_INVALID NTStatus = 0xC0000039 + STATUS_OBJECT_PATH_NOT_FOUND NTStatus = 0xC000003A + STATUS_OBJECT_PATH_SYNTAX_BAD NTStatus = 0xC000003B + STATUS_DATA_OVERRUN NTStatus = 0xC000003C + STATUS_DATA_LATE_ERROR NTStatus = 0xC000003D + STATUS_DATA_ERROR NTStatus = 0xC000003E + STATUS_CRC_ERROR NTStatus = 0xC000003F + STATUS_SECTION_TOO_BIG NTStatus = 0xC0000040 + STATUS_PORT_CONNECTION_REFUSED NTStatus = 0xC0000041 + STATUS_INVALID_PORT_HANDLE NTStatus = 0xC0000042 + STATUS_SHARING_VIOLATION NTStatus = 0xC0000043 + STATUS_QUOTA_EXCEEDED NTStatus = 0xC0000044 + STATUS_INVALID_PAGE_PROTECTION NTStatus = 0xC0000045 + STATUS_MUTANT_NOT_OWNED NTStatus = 0xC0000046 + STATUS_SEMAPHORE_LIMIT_EXCEEDED NTStatus = 0xC0000047 + STATUS_PORT_ALREADY_SET NTStatus = 0xC0000048 + STATUS_SECTION_NOT_IMAGE NTStatus = 0xC0000049 + STATUS_SUSPEND_COUNT_EXCEEDED NTStatus = 0xC000004A + STATUS_THREAD_IS_TERMINATING NTStatus = 0xC000004B + STATUS_BAD_WORKING_SET_LIMIT NTStatus = 0xC000004C + STATUS_INCOMPATIBLE_FILE_MAP NTStatus = 0xC000004D + STATUS_SECTION_PROTECTION NTStatus = 0xC000004E + STATUS_EAS_NOT_SUPPORTED NTStatus = 0xC000004F + STATUS_EA_TOO_LARGE NTStatus = 0xC0000050 + STATUS_NONEXISTENT_EA_ENTRY NTStatus = 0xC0000051 + STATUS_NO_EAS_ON_FILE NTStatus = 0xC0000052 + STATUS_EA_CORRUPT_ERROR NTStatus = 0xC0000053 + STATUS_FILE_LOCK_CONFLICT NTStatus = 0xC0000054 + STATUS_LOCK_NOT_GRANTED NTStatus = 0xC0000055 + STATUS_DELETE_PENDING NTStatus = 0xC0000056 + STATUS_CTL_FILE_NOT_SUPPORTED NTStatus = 0xC0000057 + STATUS_UNKNOWN_REVISION NTStatus = 0xC0000058 + STATUS_REVISION_MISMATCH NTStatus = 0xC0000059 + STATUS_INVALID_OWNER NTStatus = 0xC000005A + STATUS_INVALID_PRIMARY_GROUP NTStatus = 0xC000005B + STATUS_NO_IMPERSONATION_TOKEN NTStatus = 0xC000005C + STATUS_CANT_DISABLE_MANDATORY NTStatus = 0xC000005D + STATUS_NO_LOGON_SERVERS NTStatus = 0xC000005E + STATUS_NO_SUCH_LOGON_SESSION NTStatus = 0xC000005F + STATUS_NO_SUCH_PRIVILEGE NTStatus = 0xC0000060 + STATUS_PRIVILEGE_NOT_HELD NTStatus = 0xC0000061 + STATUS_INVALID_ACCOUNT_NAME NTStatus = 0xC0000062 + STATUS_USER_EXISTS NTStatus = 0xC0000063 + STATUS_NO_SUCH_USER NTStatus = 0xC0000064 + STATUS_GROUP_EXISTS NTStatus = 0xC0000065 + STATUS_NO_SUCH_GROUP NTStatus = 0xC0000066 + STATUS_MEMBER_IN_GROUP NTStatus = 0xC0000067 + STATUS_MEMBER_NOT_IN_GROUP NTStatus = 0xC0000068 + STATUS_LAST_ADMIN NTStatus = 0xC0000069 + STATUS_WRONG_PASSWORD NTStatus = 0xC000006A + STATUS_ILL_FORMED_PASSWORD NTStatus = 0xC000006B + STATUS_PASSWORD_RESTRICTION NTStatus = 0xC000006C + STATUS_LOGON_FAILURE NTStatus = 0xC000006D + STATUS_ACCOUNT_RESTRICTION NTStatus = 0xC000006E + STATUS_INVALID_LOGON_HOURS NTStatus = 0xC000006F + STATUS_INVALID_WORKSTATION NTStatus = 0xC0000070 + STATUS_PASSWORD_EXPIRED NTStatus = 0xC0000071 + STATUS_ACCOUNT_DISABLED NTStatus = 0xC0000072 + STATUS_NONE_MAPPED NTStatus = 0xC0000073 + STATUS_TOO_MANY_LUIDS_REQUESTED NTStatus = 0xC0000074 + STATUS_LUIDS_EXHAUSTED NTStatus = 0xC0000075 + STATUS_INVALID_SUB_AUTHORITY NTStatus = 0xC0000076 + STATUS_INVALID_ACL NTStatus = 0xC0000077 + STATUS_INVALID_SID NTStatus = 0xC0000078 + STATUS_INVALID_SECURITY_DESCR NTStatus = 0xC0000079 + STATUS_PROCEDURE_NOT_FOUND NTStatus = 0xC000007A + STATUS_INVALID_IMAGE_FORMAT NTStatus = 0xC000007B + STATUS_NO_TOKEN NTStatus = 0xC000007C + STATUS_BAD_INHERITANCE_ACL NTStatus = 0xC000007D + STATUS_RANGE_NOT_LOCKED NTStatus = 0xC000007E + STATUS_DISK_FULL NTStatus = 0xC000007F + STATUS_SERVER_DISABLED NTStatus = 0xC0000080 + STATUS_SERVER_NOT_DISABLED NTStatus = 0xC0000081 + STATUS_TOO_MANY_GUIDS_REQUESTED NTStatus = 0xC0000082 + STATUS_GUIDS_EXHAUSTED NTStatus = 0xC0000083 + STATUS_INVALID_ID_AUTHORITY NTStatus = 0xC0000084 + STATUS_AGENTS_EXHAUSTED NTStatus = 0xC0000085 + STATUS_INVALID_VOLUME_LABEL NTStatus = 0xC0000086 + STATUS_SECTION_NOT_EXTENDED NTStatus = 0xC0000087 + STATUS_NOT_MAPPED_DATA NTStatus = 0xC0000088 + STATUS_RESOURCE_DATA_NOT_FOUND NTStatus = 0xC0000089 + STATUS_RESOURCE_TYPE_NOT_FOUND NTStatus = 0xC000008A + STATUS_RESOURCE_NAME_NOT_FOUND NTStatus = 0xC000008B + STATUS_ARRAY_BOUNDS_EXCEEDED NTStatus = 0xC000008C + STATUS_FLOAT_DENORMAL_OPERAND NTStatus = 0xC000008D + STATUS_FLOAT_DIVIDE_BY_ZERO NTStatus = 0xC000008E + STATUS_FLOAT_INEXACT_RESULT NTStatus = 0xC000008F + STATUS_FLOAT_INVALID_OPERATION NTStatus = 0xC0000090 + STATUS_FLOAT_OVERFLOW NTStatus = 0xC0000091 + STATUS_FLOAT_STACK_CHECK NTStatus = 0xC0000092 + STATUS_FLOAT_UNDERFLOW NTStatus = 0xC0000093 + STATUS_INTEGER_DIVIDE_BY_ZERO NTStatus = 0xC0000094 + STATUS_INTEGER_OVERFLOW NTStatus = 0xC0000095 + STATUS_PRIVILEGED_INSTRUCTION NTStatus = 0xC0000096 + STATUS_TOO_MANY_PAGING_FILES NTStatus = 0xC0000097 + STATUS_FILE_INVALID NTStatus = 0xC0000098 + STATUS_ALLOTTED_SPACE_EXCEEDED NTStatus = 0xC0000099 + STATUS_INSUFFICIENT_RESOURCES NTStatus = 0xC000009A + STATUS_DFS_EXIT_PATH_FOUND NTStatus = 0xC000009B + STATUS_DEVICE_DATA_ERROR NTStatus = 0xC000009C + STATUS_DEVICE_NOT_CONNECTED NTStatus = 0xC000009D + STATUS_DEVICE_POWER_FAILURE NTStatus = 0xC000009E + STATUS_FREE_VM_NOT_AT_BASE NTStatus = 0xC000009F + STATUS_MEMORY_NOT_ALLOCATED NTStatus = 0xC00000A0 + STATUS_WORKING_SET_QUOTA NTStatus = 0xC00000A1 + STATUS_MEDIA_WRITE_PROTECTED NTStatus = 0xC00000A2 + STATUS_DEVICE_NOT_READY NTStatus = 0xC00000A3 + STATUS_INVALID_GROUP_ATTRIBUTES NTStatus = 0xC00000A4 + STATUS_BAD_IMPERSONATION_LEVEL NTStatus = 0xC00000A5 + STATUS_CANT_OPEN_ANONYMOUS NTStatus = 0xC00000A6 + STATUS_BAD_VALIDATION_CLASS NTStatus = 0xC00000A7 + STATUS_BAD_TOKEN_TYPE NTStatus = 0xC00000A8 + STATUS_BAD_MASTER_BOOT_RECORD NTStatus = 0xC00000A9 + STATUS_INSTRUCTION_MISALIGNMENT NTStatus = 0xC00000AA + STATUS_INSTANCE_NOT_AVAILABLE NTStatus = 0xC00000AB + STATUS_PIPE_NOT_AVAILABLE NTStatus = 0xC00000AC + STATUS_INVALID_PIPE_STATE NTStatus = 0xC00000AD + STATUS_PIPE_BUSY NTStatus = 0xC00000AE + STATUS_ILLEGAL_FUNCTION NTStatus = 0xC00000AF + STATUS_PIPE_DISCONNECTED NTStatus = 0xC00000B0 + STATUS_PIPE_CLOSING NTStatus = 0xC00000B1 + STATUS_PIPE_CONNECTED NTStatus = 0xC00000B2 + STATUS_PIPE_LISTENING NTStatus = 0xC00000B3 + STATUS_INVALID_READ_MODE NTStatus = 0xC00000B4 + STATUS_IO_TIMEOUT NTStatus = 0xC00000B5 + STATUS_FILE_FORCED_CLOSED NTStatus = 0xC00000B6 + STATUS_PROFILING_NOT_STARTED NTStatus = 0xC00000B7 + STATUS_PROFILING_NOT_STOPPED NTStatus = 0xC00000B8 + STATUS_COULD_NOT_INTERPRET NTStatus = 0xC00000B9 + STATUS_FILE_IS_A_DIRECTORY NTStatus = 0xC00000BA + STATUS_NOT_SUPPORTED NTStatus = 0xC00000BB + STATUS_REMOTE_NOT_LISTENING NTStatus = 0xC00000BC + STATUS_DUPLICATE_NAME NTStatus = 0xC00000BD + STATUS_BAD_NETWORK_PATH NTStatus = 0xC00000BE + STATUS_NETWORK_BUSY NTStatus = 0xC00000BF + STATUS_DEVICE_DOES_NOT_EXIST NTStatus = 0xC00000C0 + STATUS_TOO_MANY_COMMANDS NTStatus = 0xC00000C1 + STATUS_ADAPTER_HARDWARE_ERROR NTStatus = 0xC00000C2 + STATUS_INVALID_NETWORK_RESPONSE NTStatus = 0xC00000C3 + STATUS_UNEXPECTED_NETWORK_ERROR NTStatus = 0xC00000C4 + STATUS_BAD_REMOTE_ADAPTER NTStatus = 0xC00000C5 + STATUS_PRINT_QUEUE_FULL NTStatus = 0xC00000C6 + STATUS_NO_SPOOL_SPACE NTStatus = 0xC00000C7 + STATUS_PRINT_CANCELLED NTStatus = 0xC00000C8 + STATUS_NETWORK_NAME_DELETED NTStatus = 0xC00000C9 + STATUS_NETWORK_ACCESS_DENIED NTStatus = 0xC00000CA + STATUS_BAD_DEVICE_TYPE NTStatus = 0xC00000CB + STATUS_BAD_NETWORK_NAME NTStatus = 0xC00000CC + STATUS_TOO_MANY_NAMES NTStatus = 0xC00000CD + STATUS_TOO_MANY_SESSIONS NTStatus = 0xC00000CE + STATUS_SHARING_PAUSED NTStatus = 0xC00000CF + STATUS_REQUEST_NOT_ACCEPTED NTStatus = 0xC00000D0 + STATUS_REDIRECTOR_PAUSED NTStatus = 0xC00000D1 + STATUS_NET_WRITE_FAULT NTStatus = 0xC00000D2 + STATUS_PROFILING_AT_LIMIT NTStatus = 0xC00000D3 + STATUS_NOT_SAME_DEVICE NTStatus = 0xC00000D4 + STATUS_FILE_RENAMED NTStatus = 0xC00000D5 + STATUS_VIRTUAL_CIRCUIT_CLOSED NTStatus = 0xC00000D6 + STATUS_NO_SECURITY_ON_OBJECT NTStatus = 0xC00000D7 + STATUS_CANT_WAIT NTStatus = 0xC00000D8 + STATUS_PIPE_EMPTY NTStatus = 0xC00000D9 + STATUS_CANT_ACCESS_DOMAIN_INFO NTStatus = 0xC00000DA + STATUS_CANT_TERMINATE_SELF NTStatus = 0xC00000DB + STATUS_INVALID_SERVER_STATE NTStatus = 0xC00000DC + STATUS_INVALID_DOMAIN_STATE NTStatus = 0xC00000DD + STATUS_INVALID_DOMAIN_ROLE NTStatus = 0xC00000DE + STATUS_NO_SUCH_DOMAIN NTStatus = 0xC00000DF + STATUS_DOMAIN_EXISTS NTStatus = 0xC00000E0 + STATUS_DOMAIN_LIMIT_EXCEEDED NTStatus = 0xC00000E1 + STATUS_OPLOCK_NOT_GRANTED NTStatus = 0xC00000E2 + STATUS_INVALID_OPLOCK_PROTOCOL NTStatus = 0xC00000E3 + STATUS_INTERNAL_DB_CORRUPTION NTStatus = 0xC00000E4 + STATUS_INTERNAL_ERROR NTStatus = 0xC00000E5 + STATUS_GENERIC_NOT_MAPPED NTStatus = 0xC00000E6 + STATUS_BAD_DESCRIPTOR_FORMAT NTStatus = 0xC00000E7 + STATUS_INVALID_USER_BUFFER NTStatus = 0xC00000E8 + STATUS_UNEXPECTED_IO_ERROR NTStatus = 0xC00000E9 + STATUS_UNEXPECTED_MM_CREATE_ERR NTStatus = 0xC00000EA + STATUS_UNEXPECTED_MM_MAP_ERROR NTStatus = 0xC00000EB + STATUS_UNEXPECTED_MM_EXTEND_ERR NTStatus = 0xC00000EC + STATUS_NOT_LOGON_PROCESS NTStatus = 0xC00000ED + STATUS_LOGON_SESSION_EXISTS NTStatus = 0xC00000EE + STATUS_INVALID_PARAMETER_1 NTStatus = 0xC00000EF + STATUS_INVALID_PARAMETER_2 NTStatus = 0xC00000F0 + STATUS_INVALID_PARAMETER_3 NTStatus = 0xC00000F1 + STATUS_INVALID_PARAMETER_4 NTStatus = 0xC00000F2 + STATUS_INVALID_PARAMETER_5 NTStatus = 0xC00000F3 + STATUS_INVALID_PARAMETER_6 NTStatus = 0xC00000F4 + STATUS_INVALID_PARAMETER_7 NTStatus = 0xC00000F5 + STATUS_INVALID_PARAMETER_8 NTStatus = 0xC00000F6 + STATUS_INVALID_PARAMETER_9 NTStatus = 0xC00000F7 + STATUS_INVALID_PARAMETER_10 NTStatus = 0xC00000F8 + STATUS_INVALID_PARAMETER_11 NTStatus = 0xC00000F9 + STATUS_INVALID_PARAMETER_12 NTStatus = 0xC00000FA + STATUS_REDIRECTOR_NOT_STARTED NTStatus = 0xC00000FB + STATUS_REDIRECTOR_STARTED NTStatus = 0xC00000FC + STATUS_STACK_OVERFLOW NTStatus = 0xC00000FD + STATUS_NO_SUCH_PACKAGE NTStatus = 0xC00000FE + STATUS_BAD_FUNCTION_TABLE NTStatus = 0xC00000FF + STATUS_VARIABLE_NOT_FOUND NTStatus = 0xC0000100 + STATUS_DIRECTORY_NOT_EMPTY NTStatus = 0xC0000101 + STATUS_FILE_CORRUPT_ERROR NTStatus = 0xC0000102 + STATUS_NOT_A_DIRECTORY NTStatus = 0xC0000103 + STATUS_BAD_LOGON_SESSION_STATE NTStatus = 0xC0000104 + STATUS_LOGON_SESSION_COLLISION NTStatus = 0xC0000105 + STATUS_NAME_TOO_LONG NTStatus = 0xC0000106 + STATUS_FILES_OPEN NTStatus = 0xC0000107 + STATUS_CONNECTION_IN_USE NTStatus = 0xC0000108 + STATUS_MESSAGE_NOT_FOUND NTStatus = 0xC0000109 + STATUS_PROCESS_IS_TERMINATING NTStatus = 0xC000010A + STATUS_INVALID_LOGON_TYPE NTStatus = 0xC000010B + STATUS_NO_GUID_TRANSLATION NTStatus = 0xC000010C + STATUS_CANNOT_IMPERSONATE NTStatus = 0xC000010D + STATUS_IMAGE_ALREADY_LOADED NTStatus = 0xC000010E + STATUS_ABIOS_NOT_PRESENT NTStatus = 0xC000010F + STATUS_ABIOS_LID_NOT_EXIST NTStatus = 0xC0000110 + STATUS_ABIOS_LID_ALREADY_OWNED NTStatus = 0xC0000111 + STATUS_ABIOS_NOT_LID_OWNER NTStatus = 0xC0000112 + STATUS_ABIOS_INVALID_COMMAND NTStatus = 0xC0000113 + STATUS_ABIOS_INVALID_LID NTStatus = 0xC0000114 + STATUS_ABIOS_SELECTOR_NOT_AVAILABLE NTStatus = 0xC0000115 + STATUS_ABIOS_INVALID_SELECTOR NTStatus = 0xC0000116 + STATUS_NO_LDT NTStatus = 0xC0000117 + STATUS_INVALID_LDT_SIZE NTStatus = 0xC0000118 + STATUS_INVALID_LDT_OFFSET NTStatus = 0xC0000119 + STATUS_INVALID_LDT_DESCRIPTOR NTStatus = 0xC000011A + STATUS_INVALID_IMAGE_NE_FORMAT NTStatus = 0xC000011B + STATUS_RXACT_INVALID_STATE NTStatus = 0xC000011C + STATUS_RXACT_COMMIT_FAILURE NTStatus = 0xC000011D + STATUS_MAPPED_FILE_SIZE_ZERO NTStatus = 0xC000011E + STATUS_TOO_MANY_OPENED_FILES NTStatus = 0xC000011F + STATUS_CANCELLED NTStatus = 0xC0000120 + STATUS_CANNOT_DELETE NTStatus = 0xC0000121 + STATUS_INVALID_COMPUTER_NAME NTStatus = 0xC0000122 + STATUS_FILE_DELETED NTStatus = 0xC0000123 + STATUS_SPECIAL_ACCOUNT NTStatus = 0xC0000124 + STATUS_SPECIAL_GROUP NTStatus = 0xC0000125 + STATUS_SPECIAL_USER NTStatus = 0xC0000126 + STATUS_MEMBERS_PRIMARY_GROUP NTStatus = 0xC0000127 + STATUS_FILE_CLOSED NTStatus = 0xC0000128 + STATUS_TOO_MANY_THREADS NTStatus = 0xC0000129 + STATUS_THREAD_NOT_IN_PROCESS NTStatus = 0xC000012A + STATUS_TOKEN_ALREADY_IN_USE NTStatus = 0xC000012B + STATUS_PAGEFILE_QUOTA_EXCEEDED NTStatus = 0xC000012C + STATUS_COMMITMENT_LIMIT NTStatus = 0xC000012D + STATUS_INVALID_IMAGE_LE_FORMAT NTStatus = 0xC000012E + STATUS_INVALID_IMAGE_NOT_MZ NTStatus = 0xC000012F + STATUS_INVALID_IMAGE_PROTECT NTStatus = 0xC0000130 + STATUS_INVALID_IMAGE_WIN_16 NTStatus = 0xC0000131 + STATUS_LOGON_SERVER_CONFLICT NTStatus = 0xC0000132 + STATUS_TIME_DIFFERENCE_AT_DC NTStatus = 0xC0000133 + STATUS_SYNCHRONIZATION_REQUIRED NTStatus = 0xC0000134 + STATUS_DLL_NOT_FOUND NTStatus = 0xC0000135 + STATUS_OPEN_FAILED NTStatus = 0xC0000136 + STATUS_IO_PRIVILEGE_FAILED NTStatus = 0xC0000137 + STATUS_ORDINAL_NOT_FOUND NTStatus = 0xC0000138 + STATUS_ENTRYPOINT_NOT_FOUND NTStatus = 0xC0000139 + STATUS_CONTROL_C_EXIT NTStatus = 0xC000013A + STATUS_LOCAL_DISCONNECT NTStatus = 0xC000013B + STATUS_REMOTE_DISCONNECT NTStatus = 0xC000013C + STATUS_REMOTE_RESOURCES NTStatus = 0xC000013D + STATUS_LINK_FAILED NTStatus = 0xC000013E + STATUS_LINK_TIMEOUT NTStatus = 0xC000013F + STATUS_INVALID_CONNECTION NTStatus = 0xC0000140 + STATUS_INVALID_ADDRESS NTStatus = 0xC0000141 + STATUS_DLL_INIT_FAILED NTStatus = 0xC0000142 + STATUS_MISSING_SYSTEMFILE NTStatus = 0xC0000143 + STATUS_UNHANDLED_EXCEPTION NTStatus = 0xC0000144 + STATUS_APP_INIT_FAILURE NTStatus = 0xC0000145 + STATUS_PAGEFILE_CREATE_FAILED NTStatus = 0xC0000146 + STATUS_NO_PAGEFILE NTStatus = 0xC0000147 + STATUS_INVALID_LEVEL NTStatus = 0xC0000148 + STATUS_WRONG_PASSWORD_CORE NTStatus = 0xC0000149 + STATUS_ILLEGAL_FLOAT_CONTEXT NTStatus = 0xC000014A + STATUS_PIPE_BROKEN NTStatus = 0xC000014B + STATUS_REGISTRY_CORRUPT NTStatus = 0xC000014C + STATUS_REGISTRY_IO_FAILED NTStatus = 0xC000014D + STATUS_NO_EVENT_PAIR NTStatus = 0xC000014E + STATUS_UNRECOGNIZED_VOLUME NTStatus = 0xC000014F + STATUS_SERIAL_NO_DEVICE_INITED NTStatus = 0xC0000150 + STATUS_NO_SUCH_ALIAS NTStatus = 0xC0000151 + STATUS_MEMBER_NOT_IN_ALIAS NTStatus = 0xC0000152 + STATUS_MEMBER_IN_ALIAS NTStatus = 0xC0000153 + STATUS_ALIAS_EXISTS NTStatus = 0xC0000154 + STATUS_LOGON_NOT_GRANTED NTStatus = 0xC0000155 + STATUS_TOO_MANY_SECRETS NTStatus = 0xC0000156 + STATUS_SECRET_TOO_LONG NTStatus = 0xC0000157 + STATUS_INTERNAL_DB_ERROR NTStatus = 0xC0000158 + STATUS_FULLSCREEN_MODE NTStatus = 0xC0000159 + STATUS_TOO_MANY_CONTEXT_IDS NTStatus = 0xC000015A + STATUS_LOGON_TYPE_NOT_GRANTED NTStatus = 0xC000015B + STATUS_NOT_REGISTRY_FILE NTStatus = 0xC000015C + STATUS_NT_CROSS_ENCRYPTION_REQUIRED NTStatus = 0xC000015D + STATUS_DOMAIN_CTRLR_CONFIG_ERROR NTStatus = 0xC000015E + STATUS_FT_MISSING_MEMBER NTStatus = 0xC000015F + STATUS_ILL_FORMED_SERVICE_ENTRY NTStatus = 0xC0000160 + STATUS_ILLEGAL_CHARACTER NTStatus = 0xC0000161 + STATUS_UNMAPPABLE_CHARACTER NTStatus = 0xC0000162 + STATUS_UNDEFINED_CHARACTER NTStatus = 0xC0000163 + STATUS_FLOPPY_VOLUME NTStatus = 0xC0000164 + STATUS_FLOPPY_ID_MARK_NOT_FOUND NTStatus = 0xC0000165 + STATUS_FLOPPY_WRONG_CYLINDER NTStatus = 0xC0000166 + STATUS_FLOPPY_UNKNOWN_ERROR NTStatus = 0xC0000167 + STATUS_FLOPPY_BAD_REGISTERS NTStatus = 0xC0000168 + STATUS_DISK_RECALIBRATE_FAILED NTStatus = 0xC0000169 + STATUS_DISK_OPERATION_FAILED NTStatus = 0xC000016A + STATUS_DISK_RESET_FAILED NTStatus = 0xC000016B + STATUS_SHARED_IRQ_BUSY NTStatus = 0xC000016C + STATUS_FT_ORPHANING NTStatus = 0xC000016D + STATUS_BIOS_FAILED_TO_CONNECT_INTERRUPT NTStatus = 0xC000016E + STATUS_PARTITION_FAILURE NTStatus = 0xC0000172 + STATUS_INVALID_BLOCK_LENGTH NTStatus = 0xC0000173 + STATUS_DEVICE_NOT_PARTITIONED NTStatus = 0xC0000174 + STATUS_UNABLE_TO_LOCK_MEDIA NTStatus = 0xC0000175 + STATUS_UNABLE_TO_UNLOAD_MEDIA NTStatus = 0xC0000176 + STATUS_EOM_OVERFLOW NTStatus = 0xC0000177 + STATUS_NO_MEDIA NTStatus = 0xC0000178 + STATUS_NO_SUCH_MEMBER NTStatus = 0xC000017A + STATUS_INVALID_MEMBER NTStatus = 0xC000017B + STATUS_KEY_DELETED NTStatus = 0xC000017C + STATUS_NO_LOG_SPACE NTStatus = 0xC000017D + STATUS_TOO_MANY_SIDS NTStatus = 0xC000017E + STATUS_LM_CROSS_ENCRYPTION_REQUIRED NTStatus = 0xC000017F + STATUS_KEY_HAS_CHILDREN NTStatus = 0xC0000180 + STATUS_CHILD_MUST_BE_VOLATILE NTStatus = 0xC0000181 + STATUS_DEVICE_CONFIGURATION_ERROR NTStatus = 0xC0000182 + STATUS_DRIVER_INTERNAL_ERROR NTStatus = 0xC0000183 + STATUS_INVALID_DEVICE_STATE NTStatus = 0xC0000184 + STATUS_IO_DEVICE_ERROR NTStatus = 0xC0000185 + STATUS_DEVICE_PROTOCOL_ERROR NTStatus = 0xC0000186 + STATUS_BACKUP_CONTROLLER NTStatus = 0xC0000187 + STATUS_LOG_FILE_FULL NTStatus = 0xC0000188 + STATUS_TOO_LATE NTStatus = 0xC0000189 + STATUS_NO_TRUST_LSA_SECRET NTStatus = 0xC000018A + STATUS_NO_TRUST_SAM_ACCOUNT NTStatus = 0xC000018B + STATUS_TRUSTED_DOMAIN_FAILURE NTStatus = 0xC000018C + STATUS_TRUSTED_RELATIONSHIP_FAILURE NTStatus = 0xC000018D + STATUS_EVENTLOG_FILE_CORRUPT NTStatus = 0xC000018E + STATUS_EVENTLOG_CANT_START NTStatus = 0xC000018F + STATUS_TRUST_FAILURE NTStatus = 0xC0000190 + STATUS_MUTANT_LIMIT_EXCEEDED NTStatus = 0xC0000191 + STATUS_NETLOGON_NOT_STARTED NTStatus = 0xC0000192 + STATUS_ACCOUNT_EXPIRED NTStatus = 0xC0000193 + STATUS_POSSIBLE_DEADLOCK NTStatus = 0xC0000194 + STATUS_NETWORK_CREDENTIAL_CONFLICT NTStatus = 0xC0000195 + STATUS_REMOTE_SESSION_LIMIT NTStatus = 0xC0000196 + STATUS_EVENTLOG_FILE_CHANGED NTStatus = 0xC0000197 + STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT NTStatus = 0xC0000198 + STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT NTStatus = 0xC0000199 + STATUS_NOLOGON_SERVER_TRUST_ACCOUNT NTStatus = 0xC000019A + STATUS_DOMAIN_TRUST_INCONSISTENT NTStatus = 0xC000019B + STATUS_FS_DRIVER_REQUIRED NTStatus = 0xC000019C + STATUS_IMAGE_ALREADY_LOADED_AS_DLL NTStatus = 0xC000019D + STATUS_INCOMPATIBLE_WITH_GLOBAL_SHORT_NAME_REGISTRY_SETTING NTStatus = 0xC000019E + STATUS_SHORT_NAMES_NOT_ENABLED_ON_VOLUME NTStatus = 0xC000019F + STATUS_SECURITY_STREAM_IS_INCONSISTENT NTStatus = 0xC00001A0 + STATUS_INVALID_LOCK_RANGE NTStatus = 0xC00001A1 + STATUS_INVALID_ACE_CONDITION NTStatus = 0xC00001A2 + STATUS_IMAGE_SUBSYSTEM_NOT_PRESENT NTStatus = 0xC00001A3 + STATUS_NOTIFICATION_GUID_ALREADY_DEFINED NTStatus = 0xC00001A4 + STATUS_INVALID_EXCEPTION_HANDLER NTStatus = 0xC00001A5 + STATUS_DUPLICATE_PRIVILEGES NTStatus = 0xC00001A6 + STATUS_NOT_ALLOWED_ON_SYSTEM_FILE NTStatus = 0xC00001A7 + STATUS_REPAIR_NEEDED NTStatus = 0xC00001A8 + STATUS_QUOTA_NOT_ENABLED NTStatus = 0xC00001A9 + STATUS_NO_APPLICATION_PACKAGE NTStatus = 0xC00001AA + STATUS_FILE_METADATA_OPTIMIZATION_IN_PROGRESS NTStatus = 0xC00001AB + STATUS_NOT_SAME_OBJECT NTStatus = 0xC00001AC + STATUS_FATAL_MEMORY_EXHAUSTION NTStatus = 0xC00001AD + STATUS_ERROR_PROCESS_NOT_IN_JOB NTStatus = 0xC00001AE + STATUS_CPU_SET_INVALID NTStatus = 0xC00001AF + STATUS_IO_DEVICE_INVALID_DATA NTStatus = 0xC00001B0 + STATUS_IO_UNALIGNED_WRITE NTStatus = 0xC00001B1 + STATUS_NETWORK_OPEN_RESTRICTION NTStatus = 0xC0000201 + STATUS_NO_USER_SESSION_KEY NTStatus = 0xC0000202 + STATUS_USER_SESSION_DELETED NTStatus = 0xC0000203 + STATUS_RESOURCE_LANG_NOT_FOUND NTStatus = 0xC0000204 + STATUS_INSUFF_SERVER_RESOURCES NTStatus = 0xC0000205 + STATUS_INVALID_BUFFER_SIZE NTStatus = 0xC0000206 + STATUS_INVALID_ADDRESS_COMPONENT NTStatus = 0xC0000207 + STATUS_INVALID_ADDRESS_WILDCARD NTStatus = 0xC0000208 + STATUS_TOO_MANY_ADDRESSES NTStatus = 0xC0000209 + STATUS_ADDRESS_ALREADY_EXISTS NTStatus = 0xC000020A + STATUS_ADDRESS_CLOSED NTStatus = 0xC000020B + STATUS_CONNECTION_DISCONNECTED NTStatus = 0xC000020C + STATUS_CONNECTION_RESET NTStatus = 0xC000020D + STATUS_TOO_MANY_NODES NTStatus = 0xC000020E + STATUS_TRANSACTION_ABORTED NTStatus = 0xC000020F + STATUS_TRANSACTION_TIMED_OUT NTStatus = 0xC0000210 + STATUS_TRANSACTION_NO_RELEASE NTStatus = 0xC0000211 + STATUS_TRANSACTION_NO_MATCH NTStatus = 0xC0000212 + STATUS_TRANSACTION_RESPONDED NTStatus = 0xC0000213 + STATUS_TRANSACTION_INVALID_ID NTStatus = 0xC0000214 + STATUS_TRANSACTION_INVALID_TYPE NTStatus = 0xC0000215 + STATUS_NOT_SERVER_SESSION NTStatus = 0xC0000216 + STATUS_NOT_CLIENT_SESSION NTStatus = 0xC0000217 + STATUS_CANNOT_LOAD_REGISTRY_FILE NTStatus = 0xC0000218 + STATUS_DEBUG_ATTACH_FAILED NTStatus = 0xC0000219 + STATUS_SYSTEM_PROCESS_TERMINATED NTStatus = 0xC000021A + STATUS_DATA_NOT_ACCEPTED NTStatus = 0xC000021B + STATUS_NO_BROWSER_SERVERS_FOUND NTStatus = 0xC000021C + STATUS_VDM_HARD_ERROR NTStatus = 0xC000021D + STATUS_DRIVER_CANCEL_TIMEOUT NTStatus = 0xC000021E + STATUS_REPLY_MESSAGE_MISMATCH NTStatus = 0xC000021F + STATUS_MAPPED_ALIGNMENT NTStatus = 0xC0000220 + STATUS_IMAGE_CHECKSUM_MISMATCH NTStatus = 0xC0000221 + STATUS_LOST_WRITEBEHIND_DATA NTStatus = 0xC0000222 + STATUS_CLIENT_SERVER_PARAMETERS_INVALID NTStatus = 0xC0000223 + STATUS_PASSWORD_MUST_CHANGE NTStatus = 0xC0000224 + STATUS_NOT_FOUND NTStatus = 0xC0000225 + STATUS_NOT_TINY_STREAM NTStatus = 0xC0000226 + STATUS_RECOVERY_FAILURE NTStatus = 0xC0000227 + STATUS_STACK_OVERFLOW_READ NTStatus = 0xC0000228 + STATUS_FAIL_CHECK NTStatus = 0xC0000229 + STATUS_DUPLICATE_OBJECTID NTStatus = 0xC000022A + STATUS_OBJECTID_EXISTS NTStatus = 0xC000022B + STATUS_CONVERT_TO_LARGE NTStatus = 0xC000022C + STATUS_RETRY NTStatus = 0xC000022D + STATUS_FOUND_OUT_OF_SCOPE NTStatus = 0xC000022E + STATUS_ALLOCATE_BUCKET NTStatus = 0xC000022F + STATUS_PROPSET_NOT_FOUND NTStatus = 0xC0000230 + STATUS_MARSHALL_OVERFLOW NTStatus = 0xC0000231 + STATUS_INVALID_VARIANT NTStatus = 0xC0000232 + STATUS_DOMAIN_CONTROLLER_NOT_FOUND NTStatus = 0xC0000233 + STATUS_ACCOUNT_LOCKED_OUT NTStatus = 0xC0000234 + STATUS_HANDLE_NOT_CLOSABLE NTStatus = 0xC0000235 + STATUS_CONNECTION_REFUSED NTStatus = 0xC0000236 + STATUS_GRACEFUL_DISCONNECT NTStatus = 0xC0000237 + STATUS_ADDRESS_ALREADY_ASSOCIATED NTStatus = 0xC0000238 + STATUS_ADDRESS_NOT_ASSOCIATED NTStatus = 0xC0000239 + STATUS_CONNECTION_INVALID NTStatus = 0xC000023A + STATUS_CONNECTION_ACTIVE NTStatus = 0xC000023B + STATUS_NETWORK_UNREACHABLE NTStatus = 0xC000023C + STATUS_HOST_UNREACHABLE NTStatus = 0xC000023D + STATUS_PROTOCOL_UNREACHABLE NTStatus = 0xC000023E + STATUS_PORT_UNREACHABLE NTStatus = 0xC000023F + STATUS_REQUEST_ABORTED NTStatus = 0xC0000240 + STATUS_CONNECTION_ABORTED NTStatus = 0xC0000241 + STATUS_BAD_COMPRESSION_BUFFER NTStatus = 0xC0000242 + STATUS_USER_MAPPED_FILE NTStatus = 0xC0000243 + STATUS_AUDIT_FAILED NTStatus = 0xC0000244 + STATUS_TIMER_RESOLUTION_NOT_SET NTStatus = 0xC0000245 + STATUS_CONNECTION_COUNT_LIMIT NTStatus = 0xC0000246 + STATUS_LOGIN_TIME_RESTRICTION NTStatus = 0xC0000247 + STATUS_LOGIN_WKSTA_RESTRICTION NTStatus = 0xC0000248 + STATUS_IMAGE_MP_UP_MISMATCH NTStatus = 0xC0000249 + STATUS_INSUFFICIENT_LOGON_INFO NTStatus = 0xC0000250 + STATUS_BAD_DLL_ENTRYPOINT NTStatus = 0xC0000251 + STATUS_BAD_SERVICE_ENTRYPOINT NTStatus = 0xC0000252 + STATUS_LPC_REPLY_LOST NTStatus = 0xC0000253 + STATUS_IP_ADDRESS_CONFLICT1 NTStatus = 0xC0000254 + STATUS_IP_ADDRESS_CONFLICT2 NTStatus = 0xC0000255 + STATUS_REGISTRY_QUOTA_LIMIT NTStatus = 0xC0000256 + STATUS_PATH_NOT_COVERED NTStatus = 0xC0000257 + STATUS_NO_CALLBACK_ACTIVE NTStatus = 0xC0000258 + STATUS_LICENSE_QUOTA_EXCEEDED NTStatus = 0xC0000259 + STATUS_PWD_TOO_SHORT NTStatus = 0xC000025A + STATUS_PWD_TOO_RECENT NTStatus = 0xC000025B + STATUS_PWD_HISTORY_CONFLICT NTStatus = 0xC000025C + STATUS_PLUGPLAY_NO_DEVICE NTStatus = 0xC000025E + STATUS_UNSUPPORTED_COMPRESSION NTStatus = 0xC000025F + STATUS_INVALID_HW_PROFILE NTStatus = 0xC0000260 + STATUS_INVALID_PLUGPLAY_DEVICE_PATH NTStatus = 0xC0000261 + STATUS_DRIVER_ORDINAL_NOT_FOUND NTStatus = 0xC0000262 + STATUS_DRIVER_ENTRYPOINT_NOT_FOUND NTStatus = 0xC0000263 + STATUS_RESOURCE_NOT_OWNED NTStatus = 0xC0000264 + STATUS_TOO_MANY_LINKS NTStatus = 0xC0000265 + STATUS_QUOTA_LIST_INCONSISTENT NTStatus = 0xC0000266 + STATUS_FILE_IS_OFFLINE NTStatus = 0xC0000267 + STATUS_EVALUATION_EXPIRATION NTStatus = 0xC0000268 + STATUS_ILLEGAL_DLL_RELOCATION NTStatus = 0xC0000269 + STATUS_LICENSE_VIOLATION NTStatus = 0xC000026A + STATUS_DLL_INIT_FAILED_LOGOFF NTStatus = 0xC000026B + STATUS_DRIVER_UNABLE_TO_LOAD NTStatus = 0xC000026C + STATUS_DFS_UNAVAILABLE NTStatus = 0xC000026D + STATUS_VOLUME_DISMOUNTED NTStatus = 0xC000026E + STATUS_WX86_INTERNAL_ERROR NTStatus = 0xC000026F + STATUS_WX86_FLOAT_STACK_CHECK NTStatus = 0xC0000270 + STATUS_VALIDATE_CONTINUE NTStatus = 0xC0000271 + STATUS_NO_MATCH NTStatus = 0xC0000272 + STATUS_NO_MORE_MATCHES NTStatus = 0xC0000273 + STATUS_NOT_A_REPARSE_POINT NTStatus = 0xC0000275 + STATUS_IO_REPARSE_TAG_INVALID NTStatus = 0xC0000276 + STATUS_IO_REPARSE_TAG_MISMATCH NTStatus = 0xC0000277 + STATUS_IO_REPARSE_DATA_INVALID NTStatus = 0xC0000278 + STATUS_IO_REPARSE_TAG_NOT_HANDLED NTStatus = 0xC0000279 + STATUS_PWD_TOO_LONG NTStatus = 0xC000027A + STATUS_STOWED_EXCEPTION NTStatus = 0xC000027B + STATUS_CONTEXT_STOWED_EXCEPTION NTStatus = 0xC000027C + STATUS_REPARSE_POINT_NOT_RESOLVED NTStatus = 0xC0000280 + STATUS_DIRECTORY_IS_A_REPARSE_POINT NTStatus = 0xC0000281 + STATUS_RANGE_LIST_CONFLICT NTStatus = 0xC0000282 + STATUS_SOURCE_ELEMENT_EMPTY NTStatus = 0xC0000283 + STATUS_DESTINATION_ELEMENT_FULL NTStatus = 0xC0000284 + STATUS_ILLEGAL_ELEMENT_ADDRESS NTStatus = 0xC0000285 + STATUS_MAGAZINE_NOT_PRESENT NTStatus = 0xC0000286 + STATUS_REINITIALIZATION_NEEDED NTStatus = 0xC0000287 + STATUS_DEVICE_REQUIRES_CLEANING NTStatus = 0x80000288 + STATUS_DEVICE_DOOR_OPEN NTStatus = 0x80000289 + STATUS_ENCRYPTION_FAILED NTStatus = 0xC000028A + STATUS_DECRYPTION_FAILED NTStatus = 0xC000028B + STATUS_RANGE_NOT_FOUND NTStatus = 0xC000028C + STATUS_NO_RECOVERY_POLICY NTStatus = 0xC000028D + STATUS_NO_EFS NTStatus = 0xC000028E + STATUS_WRONG_EFS NTStatus = 0xC000028F + STATUS_NO_USER_KEYS NTStatus = 0xC0000290 + STATUS_FILE_NOT_ENCRYPTED NTStatus = 0xC0000291 + STATUS_NOT_EXPORT_FORMAT NTStatus = 0xC0000292 + STATUS_FILE_ENCRYPTED NTStatus = 0xC0000293 + STATUS_WAKE_SYSTEM NTStatus = 0x40000294 + STATUS_WMI_GUID_NOT_FOUND NTStatus = 0xC0000295 + STATUS_WMI_INSTANCE_NOT_FOUND NTStatus = 0xC0000296 + STATUS_WMI_ITEMID_NOT_FOUND NTStatus = 0xC0000297 + STATUS_WMI_TRY_AGAIN NTStatus = 0xC0000298 + STATUS_SHARED_POLICY NTStatus = 0xC0000299 + STATUS_POLICY_OBJECT_NOT_FOUND NTStatus = 0xC000029A + STATUS_POLICY_ONLY_IN_DS NTStatus = 0xC000029B + STATUS_VOLUME_NOT_UPGRADED NTStatus = 0xC000029C + STATUS_REMOTE_STORAGE_NOT_ACTIVE NTStatus = 0xC000029D + STATUS_REMOTE_STORAGE_MEDIA_ERROR NTStatus = 0xC000029E + STATUS_NO_TRACKING_SERVICE NTStatus = 0xC000029F + STATUS_SERVER_SID_MISMATCH NTStatus = 0xC00002A0 + STATUS_DS_NO_ATTRIBUTE_OR_VALUE NTStatus = 0xC00002A1 + STATUS_DS_INVALID_ATTRIBUTE_SYNTAX NTStatus = 0xC00002A2 + STATUS_DS_ATTRIBUTE_TYPE_UNDEFINED NTStatus = 0xC00002A3 + STATUS_DS_ATTRIBUTE_OR_VALUE_EXISTS NTStatus = 0xC00002A4 + STATUS_DS_BUSY NTStatus = 0xC00002A5 + STATUS_DS_UNAVAILABLE NTStatus = 0xC00002A6 + STATUS_DS_NO_RIDS_ALLOCATED NTStatus = 0xC00002A7 + STATUS_DS_NO_MORE_RIDS NTStatus = 0xC00002A8 + STATUS_DS_INCORRECT_ROLE_OWNER NTStatus = 0xC00002A9 + STATUS_DS_RIDMGR_INIT_ERROR NTStatus = 0xC00002AA + STATUS_DS_OBJ_CLASS_VIOLATION NTStatus = 0xC00002AB + STATUS_DS_CANT_ON_NON_LEAF NTStatus = 0xC00002AC + STATUS_DS_CANT_ON_RDN NTStatus = 0xC00002AD + STATUS_DS_CANT_MOD_OBJ_CLASS NTStatus = 0xC00002AE + STATUS_DS_CROSS_DOM_MOVE_FAILED NTStatus = 0xC00002AF + STATUS_DS_GC_NOT_AVAILABLE NTStatus = 0xC00002B0 + STATUS_DIRECTORY_SERVICE_REQUIRED NTStatus = 0xC00002B1 + STATUS_REPARSE_ATTRIBUTE_CONFLICT NTStatus = 0xC00002B2 + STATUS_CANT_ENABLE_DENY_ONLY NTStatus = 0xC00002B3 + STATUS_FLOAT_MULTIPLE_FAULTS NTStatus = 0xC00002B4 + STATUS_FLOAT_MULTIPLE_TRAPS NTStatus = 0xC00002B5 + STATUS_DEVICE_REMOVED NTStatus = 0xC00002B6 + STATUS_JOURNAL_DELETE_IN_PROGRESS NTStatus = 0xC00002B7 + STATUS_JOURNAL_NOT_ACTIVE NTStatus = 0xC00002B8 + STATUS_NOINTERFACE NTStatus = 0xC00002B9 + STATUS_DS_RIDMGR_DISABLED NTStatus = 0xC00002BA + STATUS_DS_ADMIN_LIMIT_EXCEEDED NTStatus = 0xC00002C1 + STATUS_DRIVER_FAILED_SLEEP NTStatus = 0xC00002C2 + STATUS_MUTUAL_AUTHENTICATION_FAILED NTStatus = 0xC00002C3 + STATUS_CORRUPT_SYSTEM_FILE NTStatus = 0xC00002C4 + STATUS_DATATYPE_MISALIGNMENT_ERROR NTStatus = 0xC00002C5 + STATUS_WMI_READ_ONLY NTStatus = 0xC00002C6 + STATUS_WMI_SET_FAILURE NTStatus = 0xC00002C7 + STATUS_COMMITMENT_MINIMUM NTStatus = 0xC00002C8 + STATUS_REG_NAT_CONSUMPTION NTStatus = 0xC00002C9 + STATUS_TRANSPORT_FULL NTStatus = 0xC00002CA + STATUS_DS_SAM_INIT_FAILURE NTStatus = 0xC00002CB + STATUS_ONLY_IF_CONNECTED NTStatus = 0xC00002CC + STATUS_DS_SENSITIVE_GROUP_VIOLATION NTStatus = 0xC00002CD + STATUS_PNP_RESTART_ENUMERATION NTStatus = 0xC00002CE + STATUS_JOURNAL_ENTRY_DELETED NTStatus = 0xC00002CF + STATUS_DS_CANT_MOD_PRIMARYGROUPID NTStatus = 0xC00002D0 + STATUS_SYSTEM_IMAGE_BAD_SIGNATURE NTStatus = 0xC00002D1 + STATUS_PNP_REBOOT_REQUIRED NTStatus = 0xC00002D2 + STATUS_POWER_STATE_INVALID NTStatus = 0xC00002D3 + STATUS_DS_INVALID_GROUP_TYPE NTStatus = 0xC00002D4 + STATUS_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN NTStatus = 0xC00002D5 + STATUS_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN NTStatus = 0xC00002D6 + STATUS_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER NTStatus = 0xC00002D7 + STATUS_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER NTStatus = 0xC00002D8 + STATUS_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER NTStatus = 0xC00002D9 + STATUS_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER NTStatus = 0xC00002DA + STATUS_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER NTStatus = 0xC00002DB + STATUS_DS_HAVE_PRIMARY_MEMBERS NTStatus = 0xC00002DC + STATUS_WMI_NOT_SUPPORTED NTStatus = 0xC00002DD + STATUS_INSUFFICIENT_POWER NTStatus = 0xC00002DE + STATUS_SAM_NEED_BOOTKEY_PASSWORD NTStatus = 0xC00002DF + STATUS_SAM_NEED_BOOTKEY_FLOPPY NTStatus = 0xC00002E0 + STATUS_DS_CANT_START NTStatus = 0xC00002E1 + STATUS_DS_INIT_FAILURE NTStatus = 0xC00002E2 + STATUS_SAM_INIT_FAILURE NTStatus = 0xC00002E3 + STATUS_DS_GC_REQUIRED NTStatus = 0xC00002E4 + STATUS_DS_LOCAL_MEMBER_OF_LOCAL_ONLY NTStatus = 0xC00002E5 + STATUS_DS_NO_FPO_IN_UNIVERSAL_GROUPS NTStatus = 0xC00002E6 + STATUS_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED NTStatus = 0xC00002E7 + STATUS_MULTIPLE_FAULT_VIOLATION NTStatus = 0xC00002E8 + STATUS_CURRENT_DOMAIN_NOT_ALLOWED NTStatus = 0xC00002E9 + STATUS_CANNOT_MAKE NTStatus = 0xC00002EA + STATUS_SYSTEM_SHUTDOWN NTStatus = 0xC00002EB + STATUS_DS_INIT_FAILURE_CONSOLE NTStatus = 0xC00002EC + STATUS_DS_SAM_INIT_FAILURE_CONSOLE NTStatus = 0xC00002ED + STATUS_UNFINISHED_CONTEXT_DELETED NTStatus = 0xC00002EE + STATUS_NO_TGT_REPLY NTStatus = 0xC00002EF + STATUS_OBJECTID_NOT_FOUND NTStatus = 0xC00002F0 + STATUS_NO_IP_ADDRESSES NTStatus = 0xC00002F1 + STATUS_WRONG_CREDENTIAL_HANDLE NTStatus = 0xC00002F2 + STATUS_CRYPTO_SYSTEM_INVALID NTStatus = 0xC00002F3 + STATUS_MAX_REFERRALS_EXCEEDED NTStatus = 0xC00002F4 + STATUS_MUST_BE_KDC NTStatus = 0xC00002F5 + STATUS_STRONG_CRYPTO_NOT_SUPPORTED NTStatus = 0xC00002F6 + STATUS_TOO_MANY_PRINCIPALS NTStatus = 0xC00002F7 + STATUS_NO_PA_DATA NTStatus = 0xC00002F8 + STATUS_PKINIT_NAME_MISMATCH NTStatus = 0xC00002F9 + STATUS_SMARTCARD_LOGON_REQUIRED NTStatus = 0xC00002FA + STATUS_KDC_INVALID_REQUEST NTStatus = 0xC00002FB + STATUS_KDC_UNABLE_TO_REFER NTStatus = 0xC00002FC + STATUS_KDC_UNKNOWN_ETYPE NTStatus = 0xC00002FD + STATUS_SHUTDOWN_IN_PROGRESS NTStatus = 0xC00002FE + STATUS_SERVER_SHUTDOWN_IN_PROGRESS NTStatus = 0xC00002FF + STATUS_NOT_SUPPORTED_ON_SBS NTStatus = 0xC0000300 + STATUS_WMI_GUID_DISCONNECTED NTStatus = 0xC0000301 + STATUS_WMI_ALREADY_DISABLED NTStatus = 0xC0000302 + STATUS_WMI_ALREADY_ENABLED NTStatus = 0xC0000303 + STATUS_MFT_TOO_FRAGMENTED NTStatus = 0xC0000304 + STATUS_COPY_PROTECTION_FAILURE NTStatus = 0xC0000305 + STATUS_CSS_AUTHENTICATION_FAILURE NTStatus = 0xC0000306 + STATUS_CSS_KEY_NOT_PRESENT NTStatus = 0xC0000307 + STATUS_CSS_KEY_NOT_ESTABLISHED NTStatus = 0xC0000308 + STATUS_CSS_SCRAMBLED_SECTOR NTStatus = 0xC0000309 + STATUS_CSS_REGION_MISMATCH NTStatus = 0xC000030A + STATUS_CSS_RESETS_EXHAUSTED NTStatus = 0xC000030B + STATUS_PASSWORD_CHANGE_REQUIRED NTStatus = 0xC000030C + STATUS_LOST_MODE_LOGON_RESTRICTION NTStatus = 0xC000030D + STATUS_PKINIT_FAILURE NTStatus = 0xC0000320 + STATUS_SMARTCARD_SUBSYSTEM_FAILURE NTStatus = 0xC0000321 + STATUS_NO_KERB_KEY NTStatus = 0xC0000322 + STATUS_HOST_DOWN NTStatus = 0xC0000350 + STATUS_UNSUPPORTED_PREAUTH NTStatus = 0xC0000351 + STATUS_EFS_ALG_BLOB_TOO_BIG NTStatus = 0xC0000352 + STATUS_PORT_NOT_SET NTStatus = 0xC0000353 + STATUS_DEBUGGER_INACTIVE NTStatus = 0xC0000354 + STATUS_DS_VERSION_CHECK_FAILURE NTStatus = 0xC0000355 + STATUS_AUDITING_DISABLED NTStatus = 0xC0000356 + STATUS_PRENT4_MACHINE_ACCOUNT NTStatus = 0xC0000357 + STATUS_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER NTStatus = 0xC0000358 + STATUS_INVALID_IMAGE_WIN_32 NTStatus = 0xC0000359 + STATUS_INVALID_IMAGE_WIN_64 NTStatus = 0xC000035A + STATUS_BAD_BINDINGS NTStatus = 0xC000035B + STATUS_NETWORK_SESSION_EXPIRED NTStatus = 0xC000035C + STATUS_APPHELP_BLOCK NTStatus = 0xC000035D + STATUS_ALL_SIDS_FILTERED NTStatus = 0xC000035E + STATUS_NOT_SAFE_MODE_DRIVER NTStatus = 0xC000035F + STATUS_ACCESS_DISABLED_BY_POLICY_DEFAULT NTStatus = 0xC0000361 + STATUS_ACCESS_DISABLED_BY_POLICY_PATH NTStatus = 0xC0000362 + STATUS_ACCESS_DISABLED_BY_POLICY_PUBLISHER NTStatus = 0xC0000363 + STATUS_ACCESS_DISABLED_BY_POLICY_OTHER NTStatus = 0xC0000364 + STATUS_FAILED_DRIVER_ENTRY NTStatus = 0xC0000365 + STATUS_DEVICE_ENUMERATION_ERROR NTStatus = 0xC0000366 + STATUS_MOUNT_POINT_NOT_RESOLVED NTStatus = 0xC0000368 + STATUS_INVALID_DEVICE_OBJECT_PARAMETER NTStatus = 0xC0000369 + STATUS_MCA_OCCURED NTStatus = 0xC000036A + STATUS_DRIVER_BLOCKED_CRITICAL NTStatus = 0xC000036B + STATUS_DRIVER_BLOCKED NTStatus = 0xC000036C + STATUS_DRIVER_DATABASE_ERROR NTStatus = 0xC000036D + STATUS_SYSTEM_HIVE_TOO_LARGE NTStatus = 0xC000036E + STATUS_INVALID_IMPORT_OF_NON_DLL NTStatus = 0xC000036F + STATUS_DS_SHUTTING_DOWN NTStatus = 0x40000370 + STATUS_NO_SECRETS NTStatus = 0xC0000371 + STATUS_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY NTStatus = 0xC0000372 + STATUS_FAILED_STACK_SWITCH NTStatus = 0xC0000373 + STATUS_HEAP_CORRUPTION NTStatus = 0xC0000374 + STATUS_SMARTCARD_WRONG_PIN NTStatus = 0xC0000380 + STATUS_SMARTCARD_CARD_BLOCKED NTStatus = 0xC0000381 + STATUS_SMARTCARD_CARD_NOT_AUTHENTICATED NTStatus = 0xC0000382 + STATUS_SMARTCARD_NO_CARD NTStatus = 0xC0000383 + STATUS_SMARTCARD_NO_KEY_CONTAINER NTStatus = 0xC0000384 + STATUS_SMARTCARD_NO_CERTIFICATE NTStatus = 0xC0000385 + STATUS_SMARTCARD_NO_KEYSET NTStatus = 0xC0000386 + STATUS_SMARTCARD_IO_ERROR NTStatus = 0xC0000387 + STATUS_DOWNGRADE_DETECTED NTStatus = 0xC0000388 + STATUS_SMARTCARD_CERT_REVOKED NTStatus = 0xC0000389 + STATUS_ISSUING_CA_UNTRUSTED NTStatus = 0xC000038A + STATUS_REVOCATION_OFFLINE_C NTStatus = 0xC000038B + STATUS_PKINIT_CLIENT_FAILURE NTStatus = 0xC000038C + STATUS_SMARTCARD_CERT_EXPIRED NTStatus = 0xC000038D + STATUS_DRIVER_FAILED_PRIOR_UNLOAD NTStatus = 0xC000038E + STATUS_SMARTCARD_SILENT_CONTEXT NTStatus = 0xC000038F + STATUS_PER_USER_TRUST_QUOTA_EXCEEDED NTStatus = 0xC0000401 + STATUS_ALL_USER_TRUST_QUOTA_EXCEEDED NTStatus = 0xC0000402 + STATUS_USER_DELETE_TRUST_QUOTA_EXCEEDED NTStatus = 0xC0000403 + STATUS_DS_NAME_NOT_UNIQUE NTStatus = 0xC0000404 + STATUS_DS_DUPLICATE_ID_FOUND NTStatus = 0xC0000405 + STATUS_DS_GROUP_CONVERSION_ERROR NTStatus = 0xC0000406 + STATUS_VOLSNAP_PREPARE_HIBERNATE NTStatus = 0xC0000407 + STATUS_USER2USER_REQUIRED NTStatus = 0xC0000408 + STATUS_STACK_BUFFER_OVERRUN NTStatus = 0xC0000409 + STATUS_NO_S4U_PROT_SUPPORT NTStatus = 0xC000040A + STATUS_CROSSREALM_DELEGATION_FAILURE NTStatus = 0xC000040B + STATUS_REVOCATION_OFFLINE_KDC NTStatus = 0xC000040C + STATUS_ISSUING_CA_UNTRUSTED_KDC NTStatus = 0xC000040D + STATUS_KDC_CERT_EXPIRED NTStatus = 0xC000040E + STATUS_KDC_CERT_REVOKED NTStatus = 0xC000040F + STATUS_PARAMETER_QUOTA_EXCEEDED NTStatus = 0xC0000410 + STATUS_HIBERNATION_FAILURE NTStatus = 0xC0000411 + STATUS_DELAY_LOAD_FAILED NTStatus = 0xC0000412 + STATUS_AUTHENTICATION_FIREWALL_FAILED NTStatus = 0xC0000413 + STATUS_VDM_DISALLOWED NTStatus = 0xC0000414 + STATUS_HUNG_DISPLAY_DRIVER_THREAD NTStatus = 0xC0000415 + STATUS_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE NTStatus = 0xC0000416 + STATUS_INVALID_CRUNTIME_PARAMETER NTStatus = 0xC0000417 + STATUS_NTLM_BLOCKED NTStatus = 0xC0000418 + STATUS_DS_SRC_SID_EXISTS_IN_FOREST NTStatus = 0xC0000419 + STATUS_DS_DOMAIN_NAME_EXISTS_IN_FOREST NTStatus = 0xC000041A + STATUS_DS_FLAT_NAME_EXISTS_IN_FOREST NTStatus = 0xC000041B + STATUS_INVALID_USER_PRINCIPAL_NAME NTStatus = 0xC000041C + STATUS_FATAL_USER_CALLBACK_EXCEPTION NTStatus = 0xC000041D + STATUS_ASSERTION_FAILURE NTStatus = 0xC0000420 + STATUS_VERIFIER_STOP NTStatus = 0xC0000421 + STATUS_CALLBACK_POP_STACK NTStatus = 0xC0000423 + STATUS_INCOMPATIBLE_DRIVER_BLOCKED NTStatus = 0xC0000424 + STATUS_HIVE_UNLOADED NTStatus = 0xC0000425 + STATUS_COMPRESSION_DISABLED NTStatus = 0xC0000426 + STATUS_FILE_SYSTEM_LIMITATION NTStatus = 0xC0000427 + STATUS_INVALID_IMAGE_HASH NTStatus = 0xC0000428 + STATUS_NOT_CAPABLE NTStatus = 0xC0000429 + STATUS_REQUEST_OUT_OF_SEQUENCE NTStatus = 0xC000042A + STATUS_IMPLEMENTATION_LIMIT NTStatus = 0xC000042B + STATUS_ELEVATION_REQUIRED NTStatus = 0xC000042C + STATUS_NO_SECURITY_CONTEXT NTStatus = 0xC000042D + STATUS_PKU2U_CERT_FAILURE NTStatus = 0xC000042F + STATUS_BEYOND_VDL NTStatus = 0xC0000432 + STATUS_ENCOUNTERED_WRITE_IN_PROGRESS NTStatus = 0xC0000433 + STATUS_PTE_CHANGED NTStatus = 0xC0000434 + STATUS_PURGE_FAILED NTStatus = 0xC0000435 + STATUS_CRED_REQUIRES_CONFIRMATION NTStatus = 0xC0000440 + STATUS_CS_ENCRYPTION_INVALID_SERVER_RESPONSE NTStatus = 0xC0000441 + STATUS_CS_ENCRYPTION_UNSUPPORTED_SERVER NTStatus = 0xC0000442 + STATUS_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE NTStatus = 0xC0000443 + STATUS_CS_ENCRYPTION_NEW_ENCRYPTED_FILE NTStatus = 0xC0000444 + STATUS_CS_ENCRYPTION_FILE_NOT_CSE NTStatus = 0xC0000445 + STATUS_INVALID_LABEL NTStatus = 0xC0000446 + STATUS_DRIVER_PROCESS_TERMINATED NTStatus = 0xC0000450 + STATUS_AMBIGUOUS_SYSTEM_DEVICE NTStatus = 0xC0000451 + STATUS_SYSTEM_DEVICE_NOT_FOUND NTStatus = 0xC0000452 + STATUS_RESTART_BOOT_APPLICATION NTStatus = 0xC0000453 + STATUS_INSUFFICIENT_NVRAM_RESOURCES NTStatus = 0xC0000454 + STATUS_INVALID_SESSION NTStatus = 0xC0000455 + STATUS_THREAD_ALREADY_IN_SESSION NTStatus = 0xC0000456 + STATUS_THREAD_NOT_IN_SESSION NTStatus = 0xC0000457 + STATUS_INVALID_WEIGHT NTStatus = 0xC0000458 + STATUS_REQUEST_PAUSED NTStatus = 0xC0000459 + STATUS_NO_RANGES_PROCESSED NTStatus = 0xC0000460 + STATUS_DISK_RESOURCES_EXHAUSTED NTStatus = 0xC0000461 + STATUS_NEEDS_REMEDIATION NTStatus = 0xC0000462 + STATUS_DEVICE_FEATURE_NOT_SUPPORTED NTStatus = 0xC0000463 + STATUS_DEVICE_UNREACHABLE NTStatus = 0xC0000464 + STATUS_INVALID_TOKEN NTStatus = 0xC0000465 + STATUS_SERVER_UNAVAILABLE NTStatus = 0xC0000466 + STATUS_FILE_NOT_AVAILABLE NTStatus = 0xC0000467 + STATUS_DEVICE_INSUFFICIENT_RESOURCES NTStatus = 0xC0000468 + STATUS_PACKAGE_UPDATING NTStatus = 0xC0000469 + STATUS_NOT_READ_FROM_COPY NTStatus = 0xC000046A + STATUS_FT_WRITE_FAILURE NTStatus = 0xC000046B + STATUS_FT_DI_SCAN_REQUIRED NTStatus = 0xC000046C + STATUS_OBJECT_NOT_EXTERNALLY_BACKED NTStatus = 0xC000046D + STATUS_EXTERNAL_BACKING_PROVIDER_UNKNOWN NTStatus = 0xC000046E + STATUS_COMPRESSION_NOT_BENEFICIAL NTStatus = 0xC000046F + STATUS_DATA_CHECKSUM_ERROR NTStatus = 0xC0000470 + STATUS_INTERMIXED_KERNEL_EA_OPERATION NTStatus = 0xC0000471 + STATUS_TRIM_READ_ZERO_NOT_SUPPORTED NTStatus = 0xC0000472 + STATUS_TOO_MANY_SEGMENT_DESCRIPTORS NTStatus = 0xC0000473 + STATUS_INVALID_OFFSET_ALIGNMENT NTStatus = 0xC0000474 + STATUS_INVALID_FIELD_IN_PARAMETER_LIST NTStatus = 0xC0000475 + STATUS_OPERATION_IN_PROGRESS NTStatus = 0xC0000476 + STATUS_INVALID_INITIATOR_TARGET_PATH NTStatus = 0xC0000477 + STATUS_SCRUB_DATA_DISABLED NTStatus = 0xC0000478 + STATUS_NOT_REDUNDANT_STORAGE NTStatus = 0xC0000479 + STATUS_RESIDENT_FILE_NOT_SUPPORTED NTStatus = 0xC000047A + STATUS_COMPRESSED_FILE_NOT_SUPPORTED NTStatus = 0xC000047B + STATUS_DIRECTORY_NOT_SUPPORTED NTStatus = 0xC000047C + STATUS_IO_OPERATION_TIMEOUT NTStatus = 0xC000047D + STATUS_SYSTEM_NEEDS_REMEDIATION NTStatus = 0xC000047E + STATUS_APPX_INTEGRITY_FAILURE_CLR_NGEN NTStatus = 0xC000047F + STATUS_SHARE_UNAVAILABLE NTStatus = 0xC0000480 + STATUS_APISET_NOT_HOSTED NTStatus = 0xC0000481 + STATUS_APISET_NOT_PRESENT NTStatus = 0xC0000482 + STATUS_DEVICE_HARDWARE_ERROR NTStatus = 0xC0000483 + STATUS_FIRMWARE_SLOT_INVALID NTStatus = 0xC0000484 + STATUS_FIRMWARE_IMAGE_INVALID NTStatus = 0xC0000485 + STATUS_STORAGE_TOPOLOGY_ID_MISMATCH NTStatus = 0xC0000486 + STATUS_WIM_NOT_BOOTABLE NTStatus = 0xC0000487 + STATUS_BLOCKED_BY_PARENTAL_CONTROLS NTStatus = 0xC0000488 + STATUS_NEEDS_REGISTRATION NTStatus = 0xC0000489 + STATUS_QUOTA_ACTIVITY NTStatus = 0xC000048A + STATUS_CALLBACK_INVOKE_INLINE NTStatus = 0xC000048B + STATUS_BLOCK_TOO_MANY_REFERENCES NTStatus = 0xC000048C + STATUS_MARKED_TO_DISALLOW_WRITES NTStatus = 0xC000048D + STATUS_NETWORK_ACCESS_DENIED_EDP NTStatus = 0xC000048E + STATUS_ENCLAVE_FAILURE NTStatus = 0xC000048F + STATUS_PNP_NO_COMPAT_DRIVERS NTStatus = 0xC0000490 + STATUS_PNP_DRIVER_PACKAGE_NOT_FOUND NTStatus = 0xC0000491 + STATUS_PNP_DRIVER_CONFIGURATION_NOT_FOUND NTStatus = 0xC0000492 + STATUS_PNP_DRIVER_CONFIGURATION_INCOMPLETE NTStatus = 0xC0000493 + STATUS_PNP_FUNCTION_DRIVER_REQUIRED NTStatus = 0xC0000494 + STATUS_PNP_DEVICE_CONFIGURATION_PENDING NTStatus = 0xC0000495 + STATUS_DEVICE_HINT_NAME_BUFFER_TOO_SMALL NTStatus = 0xC0000496 + STATUS_PACKAGE_NOT_AVAILABLE NTStatus = 0xC0000497 + STATUS_DEVICE_IN_MAINTENANCE NTStatus = 0xC0000499 + STATUS_NOT_SUPPORTED_ON_DAX NTStatus = 0xC000049A + STATUS_FREE_SPACE_TOO_FRAGMENTED NTStatus = 0xC000049B + STATUS_DAX_MAPPING_EXISTS NTStatus = 0xC000049C + STATUS_CHILD_PROCESS_BLOCKED NTStatus = 0xC000049D + STATUS_STORAGE_LOST_DATA_PERSISTENCE NTStatus = 0xC000049E + STATUS_VRF_CFG_ENABLED NTStatus = 0xC000049F + STATUS_PARTITION_TERMINATING NTStatus = 0xC00004A0 + STATUS_EXTERNAL_SYSKEY_NOT_SUPPORTED NTStatus = 0xC00004A1 + STATUS_ENCLAVE_VIOLATION NTStatus = 0xC00004A2 + STATUS_FILE_PROTECTED_UNDER_DPL NTStatus = 0xC00004A3 + STATUS_VOLUME_NOT_CLUSTER_ALIGNED NTStatus = 0xC00004A4 + STATUS_NO_PHYSICALLY_ALIGNED_FREE_SPACE_FOUND NTStatus = 0xC00004A5 + STATUS_APPX_FILE_NOT_ENCRYPTED NTStatus = 0xC00004A6 + STATUS_RWRAW_ENCRYPTED_FILE_NOT_ENCRYPTED NTStatus = 0xC00004A7 + STATUS_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILEOFFSET NTStatus = 0xC00004A8 + STATUS_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILERANGE NTStatus = 0xC00004A9 + STATUS_RWRAW_ENCRYPTED_INVALID_EDATAINFO_PARAMETER NTStatus = 0xC00004AA + STATUS_FT_READ_FAILURE NTStatus = 0xC00004AB + STATUS_PATCH_CONFLICT NTStatus = 0xC00004AC + STATUS_STORAGE_RESERVE_ID_INVALID NTStatus = 0xC00004AD + STATUS_STORAGE_RESERVE_DOES_NOT_EXIST NTStatus = 0xC00004AE + STATUS_STORAGE_RESERVE_ALREADY_EXISTS NTStatus = 0xC00004AF + STATUS_STORAGE_RESERVE_NOT_EMPTY NTStatus = 0xC00004B0 + STATUS_NOT_A_DAX_VOLUME NTStatus = 0xC00004B1 + STATUS_NOT_DAX_MAPPABLE NTStatus = 0xC00004B2 + STATUS_CASE_DIFFERING_NAMES_IN_DIR NTStatus = 0xC00004B3 + STATUS_FILE_NOT_SUPPORTED NTStatus = 0xC00004B4 + STATUS_NOT_SUPPORTED_WITH_BTT NTStatus = 0xC00004B5 + STATUS_ENCRYPTION_DISABLED NTStatus = 0xC00004B6 + STATUS_ENCRYPTING_METADATA_DISALLOWED NTStatus = 0xC00004B7 + STATUS_CANT_CLEAR_ENCRYPTION_FLAG NTStatus = 0xC00004B8 + STATUS_INVALID_TASK_NAME NTStatus = 0xC0000500 + STATUS_INVALID_TASK_INDEX NTStatus = 0xC0000501 + STATUS_THREAD_ALREADY_IN_TASK NTStatus = 0xC0000502 + STATUS_CALLBACK_BYPASS NTStatus = 0xC0000503 + STATUS_UNDEFINED_SCOPE NTStatus = 0xC0000504 + STATUS_INVALID_CAP NTStatus = 0xC0000505 + STATUS_NOT_GUI_PROCESS NTStatus = 0xC0000506 + STATUS_DEVICE_HUNG NTStatus = 0xC0000507 + STATUS_CONTAINER_ASSIGNED NTStatus = 0xC0000508 + STATUS_JOB_NO_CONTAINER NTStatus = 0xC0000509 + STATUS_DEVICE_UNRESPONSIVE NTStatus = 0xC000050A + STATUS_REPARSE_POINT_ENCOUNTERED NTStatus = 0xC000050B + STATUS_ATTRIBUTE_NOT_PRESENT NTStatus = 0xC000050C + STATUS_NOT_A_TIERED_VOLUME NTStatus = 0xC000050D + STATUS_ALREADY_HAS_STREAM_ID NTStatus = 0xC000050E + STATUS_JOB_NOT_EMPTY NTStatus = 0xC000050F + STATUS_ALREADY_INITIALIZED NTStatus = 0xC0000510 + STATUS_ENCLAVE_NOT_TERMINATED NTStatus = 0xC0000511 + STATUS_ENCLAVE_IS_TERMINATING NTStatus = 0xC0000512 + STATUS_SMB1_NOT_AVAILABLE NTStatus = 0xC0000513 + STATUS_SMR_GARBAGE_COLLECTION_REQUIRED NTStatus = 0xC0000514 + STATUS_INTERRUPTED NTStatus = 0xC0000515 + STATUS_THREAD_NOT_RUNNING NTStatus = 0xC0000516 + STATUS_FAIL_FAST_EXCEPTION NTStatus = 0xC0000602 + STATUS_IMAGE_CERT_REVOKED NTStatus = 0xC0000603 + STATUS_DYNAMIC_CODE_BLOCKED NTStatus = 0xC0000604 + STATUS_IMAGE_CERT_EXPIRED NTStatus = 0xC0000605 + STATUS_STRICT_CFG_VIOLATION NTStatus = 0xC0000606 + STATUS_SET_CONTEXT_DENIED NTStatus = 0xC000060A + STATUS_CROSS_PARTITION_VIOLATION NTStatus = 0xC000060B + STATUS_PORT_CLOSED NTStatus = 0xC0000700 + STATUS_MESSAGE_LOST NTStatus = 0xC0000701 + STATUS_INVALID_MESSAGE NTStatus = 0xC0000702 + STATUS_REQUEST_CANCELED NTStatus = 0xC0000703 + STATUS_RECURSIVE_DISPATCH NTStatus = 0xC0000704 + STATUS_LPC_RECEIVE_BUFFER_EXPECTED NTStatus = 0xC0000705 + STATUS_LPC_INVALID_CONNECTION_USAGE NTStatus = 0xC0000706 + STATUS_LPC_REQUESTS_NOT_ALLOWED NTStatus = 0xC0000707 + STATUS_RESOURCE_IN_USE NTStatus = 0xC0000708 + STATUS_HARDWARE_MEMORY_ERROR NTStatus = 0xC0000709 + STATUS_THREADPOOL_HANDLE_EXCEPTION NTStatus = 0xC000070A + STATUS_THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED NTStatus = 0xC000070B + STATUS_THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED NTStatus = 0xC000070C + STATUS_THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED NTStatus = 0xC000070D + STATUS_THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED NTStatus = 0xC000070E + STATUS_THREADPOOL_RELEASED_DURING_OPERATION NTStatus = 0xC000070F + STATUS_CALLBACK_RETURNED_WHILE_IMPERSONATING NTStatus = 0xC0000710 + STATUS_APC_RETURNED_WHILE_IMPERSONATING NTStatus = 0xC0000711 + STATUS_PROCESS_IS_PROTECTED NTStatus = 0xC0000712 + STATUS_MCA_EXCEPTION NTStatus = 0xC0000713 + STATUS_CERTIFICATE_MAPPING_NOT_UNIQUE NTStatus = 0xC0000714 + STATUS_SYMLINK_CLASS_DISABLED NTStatus = 0xC0000715 + STATUS_INVALID_IDN_NORMALIZATION NTStatus = 0xC0000716 + STATUS_NO_UNICODE_TRANSLATION NTStatus = 0xC0000717 + STATUS_ALREADY_REGISTERED NTStatus = 0xC0000718 + STATUS_CONTEXT_MISMATCH NTStatus = 0xC0000719 + STATUS_PORT_ALREADY_HAS_COMPLETION_LIST NTStatus = 0xC000071A + STATUS_CALLBACK_RETURNED_THREAD_PRIORITY NTStatus = 0xC000071B + STATUS_INVALID_THREAD NTStatus = 0xC000071C + STATUS_CALLBACK_RETURNED_TRANSACTION NTStatus = 0xC000071D + STATUS_CALLBACK_RETURNED_LDR_LOCK NTStatus = 0xC000071E + STATUS_CALLBACK_RETURNED_LANG NTStatus = 0xC000071F + STATUS_CALLBACK_RETURNED_PRI_BACK NTStatus = 0xC0000720 + STATUS_CALLBACK_RETURNED_THREAD_AFFINITY NTStatus = 0xC0000721 + STATUS_LPC_HANDLE_COUNT_EXCEEDED NTStatus = 0xC0000722 + STATUS_EXECUTABLE_MEMORY_WRITE NTStatus = 0xC0000723 + STATUS_KERNEL_EXECUTABLE_MEMORY_WRITE NTStatus = 0xC0000724 + STATUS_ATTACHED_EXECUTABLE_MEMORY_WRITE NTStatus = 0xC0000725 + STATUS_TRIGGERED_EXECUTABLE_MEMORY_WRITE NTStatus = 0xC0000726 + STATUS_DISK_REPAIR_DISABLED NTStatus = 0xC0000800 + STATUS_DS_DOMAIN_RENAME_IN_PROGRESS NTStatus = 0xC0000801 + STATUS_DISK_QUOTA_EXCEEDED NTStatus = 0xC0000802 + STATUS_DATA_LOST_REPAIR NTStatus = 0x80000803 + STATUS_CONTENT_BLOCKED NTStatus = 0xC0000804 + STATUS_BAD_CLUSTERS NTStatus = 0xC0000805 + STATUS_VOLUME_DIRTY NTStatus = 0xC0000806 + STATUS_DISK_REPAIR_REDIRECTED NTStatus = 0x40000807 + STATUS_DISK_REPAIR_UNSUCCESSFUL NTStatus = 0xC0000808 + STATUS_CORRUPT_LOG_OVERFULL NTStatus = 0xC0000809 + STATUS_CORRUPT_LOG_CORRUPTED NTStatus = 0xC000080A + STATUS_CORRUPT_LOG_UNAVAILABLE NTStatus = 0xC000080B + STATUS_CORRUPT_LOG_DELETED_FULL NTStatus = 0xC000080C + STATUS_CORRUPT_LOG_CLEARED NTStatus = 0xC000080D + STATUS_ORPHAN_NAME_EXHAUSTED NTStatus = 0xC000080E + STATUS_PROACTIVE_SCAN_IN_PROGRESS NTStatus = 0xC000080F + STATUS_ENCRYPTED_IO_NOT_POSSIBLE NTStatus = 0xC0000810 + STATUS_CORRUPT_LOG_UPLEVEL_RECORDS NTStatus = 0xC0000811 + STATUS_FILE_CHECKED_OUT NTStatus = 0xC0000901 + STATUS_CHECKOUT_REQUIRED NTStatus = 0xC0000902 + STATUS_BAD_FILE_TYPE NTStatus = 0xC0000903 + STATUS_FILE_TOO_LARGE NTStatus = 0xC0000904 + STATUS_FORMS_AUTH_REQUIRED NTStatus = 0xC0000905 + STATUS_VIRUS_INFECTED NTStatus = 0xC0000906 + STATUS_VIRUS_DELETED NTStatus = 0xC0000907 + STATUS_BAD_MCFG_TABLE NTStatus = 0xC0000908 + STATUS_CANNOT_BREAK_OPLOCK NTStatus = 0xC0000909 + STATUS_BAD_KEY NTStatus = 0xC000090A + STATUS_BAD_DATA NTStatus = 0xC000090B + STATUS_NO_KEY NTStatus = 0xC000090C + STATUS_FILE_HANDLE_REVOKED NTStatus = 0xC0000910 + STATUS_WOW_ASSERTION NTStatus = 0xC0009898 + STATUS_INVALID_SIGNATURE NTStatus = 0xC000A000 + STATUS_HMAC_NOT_SUPPORTED NTStatus = 0xC000A001 + STATUS_AUTH_TAG_MISMATCH NTStatus = 0xC000A002 + STATUS_INVALID_STATE_TRANSITION NTStatus = 0xC000A003 + STATUS_INVALID_KERNEL_INFO_VERSION NTStatus = 0xC000A004 + STATUS_INVALID_PEP_INFO_VERSION NTStatus = 0xC000A005 + STATUS_HANDLE_REVOKED NTStatus = 0xC000A006 + STATUS_EOF_ON_GHOSTED_RANGE NTStatus = 0xC000A007 + STATUS_IPSEC_QUEUE_OVERFLOW NTStatus = 0xC000A010 + STATUS_ND_QUEUE_OVERFLOW NTStatus = 0xC000A011 + STATUS_HOPLIMIT_EXCEEDED NTStatus = 0xC000A012 + STATUS_PROTOCOL_NOT_SUPPORTED NTStatus = 0xC000A013 + STATUS_FASTPATH_REJECTED NTStatus = 0xC000A014 + STATUS_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED NTStatus = 0xC000A080 + STATUS_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR NTStatus = 0xC000A081 + STATUS_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR NTStatus = 0xC000A082 + STATUS_XML_PARSE_ERROR NTStatus = 0xC000A083 + STATUS_XMLDSIG_ERROR NTStatus = 0xC000A084 + STATUS_WRONG_COMPARTMENT NTStatus = 0xC000A085 + STATUS_AUTHIP_FAILURE NTStatus = 0xC000A086 + STATUS_DS_OID_MAPPED_GROUP_CANT_HAVE_MEMBERS NTStatus = 0xC000A087 + STATUS_DS_OID_NOT_FOUND NTStatus = 0xC000A088 + STATUS_INCORRECT_ACCOUNT_TYPE NTStatus = 0xC000A089 + STATUS_HASH_NOT_SUPPORTED NTStatus = 0xC000A100 + STATUS_HASH_NOT_PRESENT NTStatus = 0xC000A101 + STATUS_SECONDARY_IC_PROVIDER_NOT_REGISTERED NTStatus = 0xC000A121 + STATUS_GPIO_CLIENT_INFORMATION_INVALID NTStatus = 0xC000A122 + STATUS_GPIO_VERSION_NOT_SUPPORTED NTStatus = 0xC000A123 + STATUS_GPIO_INVALID_REGISTRATION_PACKET NTStatus = 0xC000A124 + STATUS_GPIO_OPERATION_DENIED NTStatus = 0xC000A125 + STATUS_GPIO_INCOMPATIBLE_CONNECT_MODE NTStatus = 0xC000A126 + STATUS_GPIO_INTERRUPT_ALREADY_UNMASKED NTStatus = 0x8000A127 + STATUS_CANNOT_SWITCH_RUNLEVEL NTStatus = 0xC000A141 + STATUS_INVALID_RUNLEVEL_SETTING NTStatus = 0xC000A142 + STATUS_RUNLEVEL_SWITCH_TIMEOUT NTStatus = 0xC000A143 + STATUS_SERVICES_FAILED_AUTOSTART NTStatus = 0x4000A144 + STATUS_RUNLEVEL_SWITCH_AGENT_TIMEOUT NTStatus = 0xC000A145 + STATUS_RUNLEVEL_SWITCH_IN_PROGRESS NTStatus = 0xC000A146 + STATUS_NOT_APPCONTAINER NTStatus = 0xC000A200 + STATUS_NOT_SUPPORTED_IN_APPCONTAINER NTStatus = 0xC000A201 + STATUS_INVALID_PACKAGE_SID_LENGTH NTStatus = 0xC000A202 + STATUS_LPAC_ACCESS_DENIED NTStatus = 0xC000A203 + STATUS_ADMINLESS_ACCESS_DENIED NTStatus = 0xC000A204 + STATUS_APP_DATA_NOT_FOUND NTStatus = 0xC000A281 + STATUS_APP_DATA_EXPIRED NTStatus = 0xC000A282 + STATUS_APP_DATA_CORRUPT NTStatus = 0xC000A283 + STATUS_APP_DATA_LIMIT_EXCEEDED NTStatus = 0xC000A284 + STATUS_APP_DATA_REBOOT_REQUIRED NTStatus = 0xC000A285 + STATUS_OFFLOAD_READ_FLT_NOT_SUPPORTED NTStatus = 0xC000A2A1 + STATUS_OFFLOAD_WRITE_FLT_NOT_SUPPORTED NTStatus = 0xC000A2A2 + STATUS_OFFLOAD_READ_FILE_NOT_SUPPORTED NTStatus = 0xC000A2A3 + STATUS_OFFLOAD_WRITE_FILE_NOT_SUPPORTED NTStatus = 0xC000A2A4 + STATUS_WOF_WIM_HEADER_CORRUPT NTStatus = 0xC000A2A5 + STATUS_WOF_WIM_RESOURCE_TABLE_CORRUPT NTStatus = 0xC000A2A6 + STATUS_WOF_FILE_RESOURCE_TABLE_CORRUPT NTStatus = 0xC000A2A7 + STATUS_FILE_SYSTEM_VIRTUALIZATION_UNAVAILABLE NTStatus = 0xC000CE01 + STATUS_FILE_SYSTEM_VIRTUALIZATION_METADATA_CORRUPT NTStatus = 0xC000CE02 + STATUS_FILE_SYSTEM_VIRTUALIZATION_BUSY NTStatus = 0xC000CE03 + STATUS_FILE_SYSTEM_VIRTUALIZATION_PROVIDER_UNKNOWN NTStatus = 0xC000CE04 + STATUS_FILE_SYSTEM_VIRTUALIZATION_INVALID_OPERATION NTStatus = 0xC000CE05 + STATUS_CLOUD_FILE_SYNC_ROOT_METADATA_CORRUPT NTStatus = 0xC000CF00 + STATUS_CLOUD_FILE_PROVIDER_NOT_RUNNING NTStatus = 0xC000CF01 + STATUS_CLOUD_FILE_METADATA_CORRUPT NTStatus = 0xC000CF02 + STATUS_CLOUD_FILE_METADATA_TOO_LARGE NTStatus = 0xC000CF03 + STATUS_CLOUD_FILE_PROPERTY_BLOB_TOO_LARGE NTStatus = 0x8000CF04 + STATUS_CLOUD_FILE_TOO_MANY_PROPERTY_BLOBS NTStatus = 0x8000CF05 + STATUS_CLOUD_FILE_PROPERTY_VERSION_NOT_SUPPORTED NTStatus = 0xC000CF06 + STATUS_NOT_A_CLOUD_FILE NTStatus = 0xC000CF07 + STATUS_CLOUD_FILE_NOT_IN_SYNC NTStatus = 0xC000CF08 + STATUS_CLOUD_FILE_ALREADY_CONNECTED NTStatus = 0xC000CF09 + STATUS_CLOUD_FILE_NOT_SUPPORTED NTStatus = 0xC000CF0A + STATUS_CLOUD_FILE_INVALID_REQUEST NTStatus = 0xC000CF0B + STATUS_CLOUD_FILE_READ_ONLY_VOLUME NTStatus = 0xC000CF0C + STATUS_CLOUD_FILE_CONNECTED_PROVIDER_ONLY NTStatus = 0xC000CF0D + STATUS_CLOUD_FILE_VALIDATION_FAILED NTStatus = 0xC000CF0E + STATUS_CLOUD_FILE_AUTHENTICATION_FAILED NTStatus = 0xC000CF0F + STATUS_CLOUD_FILE_INSUFFICIENT_RESOURCES NTStatus = 0xC000CF10 + STATUS_CLOUD_FILE_NETWORK_UNAVAILABLE NTStatus = 0xC000CF11 + STATUS_CLOUD_FILE_UNSUCCESSFUL NTStatus = 0xC000CF12 + STATUS_CLOUD_FILE_NOT_UNDER_SYNC_ROOT NTStatus = 0xC000CF13 + STATUS_CLOUD_FILE_IN_USE NTStatus = 0xC000CF14 + STATUS_CLOUD_FILE_PINNED NTStatus = 0xC000CF15 + STATUS_CLOUD_FILE_REQUEST_ABORTED NTStatus = 0xC000CF16 + STATUS_CLOUD_FILE_PROPERTY_CORRUPT NTStatus = 0xC000CF17 + STATUS_CLOUD_FILE_ACCESS_DENIED NTStatus = 0xC000CF18 + STATUS_CLOUD_FILE_INCOMPATIBLE_HARDLINKS NTStatus = 0xC000CF19 + STATUS_CLOUD_FILE_PROPERTY_LOCK_CONFLICT NTStatus = 0xC000CF1A + STATUS_CLOUD_FILE_REQUEST_CANCELED NTStatus = 0xC000CF1B + STATUS_CLOUD_FILE_PROVIDER_TERMINATED NTStatus = 0xC000CF1D + STATUS_NOT_A_CLOUD_SYNC_ROOT NTStatus = 0xC000CF1E + STATUS_CLOUD_FILE_REQUEST_TIMEOUT NTStatus = 0xC000CF1F + STATUS_ACPI_INVALID_OPCODE NTStatus = 0xC0140001 + STATUS_ACPI_STACK_OVERFLOW NTStatus = 0xC0140002 + STATUS_ACPI_ASSERT_FAILED NTStatus = 0xC0140003 + STATUS_ACPI_INVALID_INDEX NTStatus = 0xC0140004 + STATUS_ACPI_INVALID_ARGUMENT NTStatus = 0xC0140005 + STATUS_ACPI_FATAL NTStatus = 0xC0140006 + STATUS_ACPI_INVALID_SUPERNAME NTStatus = 0xC0140007 + STATUS_ACPI_INVALID_ARGTYPE NTStatus = 0xC0140008 + STATUS_ACPI_INVALID_OBJTYPE NTStatus = 0xC0140009 + STATUS_ACPI_INVALID_TARGETTYPE NTStatus = 0xC014000A + STATUS_ACPI_INCORRECT_ARGUMENT_COUNT NTStatus = 0xC014000B + STATUS_ACPI_ADDRESS_NOT_MAPPED NTStatus = 0xC014000C + STATUS_ACPI_INVALID_EVENTTYPE NTStatus = 0xC014000D + STATUS_ACPI_HANDLER_COLLISION NTStatus = 0xC014000E + STATUS_ACPI_INVALID_DATA NTStatus = 0xC014000F + STATUS_ACPI_INVALID_REGION NTStatus = 0xC0140010 + STATUS_ACPI_INVALID_ACCESS_SIZE NTStatus = 0xC0140011 + STATUS_ACPI_ACQUIRE_GLOBAL_LOCK NTStatus = 0xC0140012 + STATUS_ACPI_ALREADY_INITIALIZED NTStatus = 0xC0140013 + STATUS_ACPI_NOT_INITIALIZED NTStatus = 0xC0140014 + STATUS_ACPI_INVALID_MUTEX_LEVEL NTStatus = 0xC0140015 + STATUS_ACPI_MUTEX_NOT_OWNED NTStatus = 0xC0140016 + STATUS_ACPI_MUTEX_NOT_OWNER NTStatus = 0xC0140017 + STATUS_ACPI_RS_ACCESS NTStatus = 0xC0140018 + STATUS_ACPI_INVALID_TABLE NTStatus = 0xC0140019 + STATUS_ACPI_REG_HANDLER_FAILED NTStatus = 0xC0140020 + STATUS_ACPI_POWER_REQUEST_FAILED NTStatus = 0xC0140021 + STATUS_CTX_WINSTATION_NAME_INVALID NTStatus = 0xC00A0001 + STATUS_CTX_INVALID_PD NTStatus = 0xC00A0002 + STATUS_CTX_PD_NOT_FOUND NTStatus = 0xC00A0003 + STATUS_CTX_CDM_CONNECT NTStatus = 0x400A0004 + STATUS_CTX_CDM_DISCONNECT NTStatus = 0x400A0005 + STATUS_CTX_CLOSE_PENDING NTStatus = 0xC00A0006 + STATUS_CTX_NO_OUTBUF NTStatus = 0xC00A0007 + STATUS_CTX_MODEM_INF_NOT_FOUND NTStatus = 0xC00A0008 + STATUS_CTX_INVALID_MODEMNAME NTStatus = 0xC00A0009 + STATUS_CTX_RESPONSE_ERROR NTStatus = 0xC00A000A + STATUS_CTX_MODEM_RESPONSE_TIMEOUT NTStatus = 0xC00A000B + STATUS_CTX_MODEM_RESPONSE_NO_CARRIER NTStatus = 0xC00A000C + STATUS_CTX_MODEM_RESPONSE_NO_DIALTONE NTStatus = 0xC00A000D + STATUS_CTX_MODEM_RESPONSE_BUSY NTStatus = 0xC00A000E + STATUS_CTX_MODEM_RESPONSE_VOICE NTStatus = 0xC00A000F + STATUS_CTX_TD_ERROR NTStatus = 0xC00A0010 + STATUS_CTX_LICENSE_CLIENT_INVALID NTStatus = 0xC00A0012 + STATUS_CTX_LICENSE_NOT_AVAILABLE NTStatus = 0xC00A0013 + STATUS_CTX_LICENSE_EXPIRED NTStatus = 0xC00A0014 + STATUS_CTX_WINSTATION_NOT_FOUND NTStatus = 0xC00A0015 + STATUS_CTX_WINSTATION_NAME_COLLISION NTStatus = 0xC00A0016 + STATUS_CTX_WINSTATION_BUSY NTStatus = 0xC00A0017 + STATUS_CTX_BAD_VIDEO_MODE NTStatus = 0xC00A0018 + STATUS_CTX_GRAPHICS_INVALID NTStatus = 0xC00A0022 + STATUS_CTX_NOT_CONSOLE NTStatus = 0xC00A0024 + STATUS_CTX_CLIENT_QUERY_TIMEOUT NTStatus = 0xC00A0026 + STATUS_CTX_CONSOLE_DISCONNECT NTStatus = 0xC00A0027 + STATUS_CTX_CONSOLE_CONNECT NTStatus = 0xC00A0028 + STATUS_CTX_SHADOW_DENIED NTStatus = 0xC00A002A + STATUS_CTX_WINSTATION_ACCESS_DENIED NTStatus = 0xC00A002B + STATUS_CTX_INVALID_WD NTStatus = 0xC00A002E + STATUS_CTX_WD_NOT_FOUND NTStatus = 0xC00A002F + STATUS_CTX_SHADOW_INVALID NTStatus = 0xC00A0030 + STATUS_CTX_SHADOW_DISABLED NTStatus = 0xC00A0031 + STATUS_RDP_PROTOCOL_ERROR NTStatus = 0xC00A0032 + STATUS_CTX_CLIENT_LICENSE_NOT_SET NTStatus = 0xC00A0033 + STATUS_CTX_CLIENT_LICENSE_IN_USE NTStatus = 0xC00A0034 + STATUS_CTX_SHADOW_ENDED_BY_MODE_CHANGE NTStatus = 0xC00A0035 + STATUS_CTX_SHADOW_NOT_RUNNING NTStatus = 0xC00A0036 + STATUS_CTX_LOGON_DISABLED NTStatus = 0xC00A0037 + STATUS_CTX_SECURITY_LAYER_ERROR NTStatus = 0xC00A0038 + STATUS_TS_INCOMPATIBLE_SESSIONS NTStatus = 0xC00A0039 + STATUS_TS_VIDEO_SUBSYSTEM_ERROR NTStatus = 0xC00A003A + STATUS_PNP_BAD_MPS_TABLE NTStatus = 0xC0040035 + STATUS_PNP_TRANSLATION_FAILED NTStatus = 0xC0040036 + STATUS_PNP_IRQ_TRANSLATION_FAILED NTStatus = 0xC0040037 + STATUS_PNP_INVALID_ID NTStatus = 0xC0040038 + STATUS_IO_REISSUE_AS_CACHED NTStatus = 0xC0040039 + STATUS_MUI_FILE_NOT_FOUND NTStatus = 0xC00B0001 + STATUS_MUI_INVALID_FILE NTStatus = 0xC00B0002 + STATUS_MUI_INVALID_RC_CONFIG NTStatus = 0xC00B0003 + STATUS_MUI_INVALID_LOCALE_NAME NTStatus = 0xC00B0004 + STATUS_MUI_INVALID_ULTIMATEFALLBACK_NAME NTStatus = 0xC00B0005 + STATUS_MUI_FILE_NOT_LOADED NTStatus = 0xC00B0006 + STATUS_RESOURCE_ENUM_USER_STOP NTStatus = 0xC00B0007 + STATUS_FLT_NO_HANDLER_DEFINED NTStatus = 0xC01C0001 + STATUS_FLT_CONTEXT_ALREADY_DEFINED NTStatus = 0xC01C0002 + STATUS_FLT_INVALID_ASYNCHRONOUS_REQUEST NTStatus = 0xC01C0003 + STATUS_FLT_DISALLOW_FAST_IO NTStatus = 0xC01C0004 + STATUS_FLT_INVALID_NAME_REQUEST NTStatus = 0xC01C0005 + STATUS_FLT_NOT_SAFE_TO_POST_OPERATION NTStatus = 0xC01C0006 + STATUS_FLT_NOT_INITIALIZED NTStatus = 0xC01C0007 + STATUS_FLT_FILTER_NOT_READY NTStatus = 0xC01C0008 + STATUS_FLT_POST_OPERATION_CLEANUP NTStatus = 0xC01C0009 + STATUS_FLT_INTERNAL_ERROR NTStatus = 0xC01C000A + STATUS_FLT_DELETING_OBJECT NTStatus = 0xC01C000B + STATUS_FLT_MUST_BE_NONPAGED_POOL NTStatus = 0xC01C000C + STATUS_FLT_DUPLICATE_ENTRY NTStatus = 0xC01C000D + STATUS_FLT_CBDQ_DISABLED NTStatus = 0xC01C000E + STATUS_FLT_DO_NOT_ATTACH NTStatus = 0xC01C000F + STATUS_FLT_DO_NOT_DETACH NTStatus = 0xC01C0010 + STATUS_FLT_INSTANCE_ALTITUDE_COLLISION NTStatus = 0xC01C0011 + STATUS_FLT_INSTANCE_NAME_COLLISION NTStatus = 0xC01C0012 + STATUS_FLT_FILTER_NOT_FOUND NTStatus = 0xC01C0013 + STATUS_FLT_VOLUME_NOT_FOUND NTStatus = 0xC01C0014 + STATUS_FLT_INSTANCE_NOT_FOUND NTStatus = 0xC01C0015 + STATUS_FLT_CONTEXT_ALLOCATION_NOT_FOUND NTStatus = 0xC01C0016 + STATUS_FLT_INVALID_CONTEXT_REGISTRATION NTStatus = 0xC01C0017 + STATUS_FLT_NAME_CACHE_MISS NTStatus = 0xC01C0018 + STATUS_FLT_NO_DEVICE_OBJECT NTStatus = 0xC01C0019 + STATUS_FLT_VOLUME_ALREADY_MOUNTED NTStatus = 0xC01C001A + STATUS_FLT_ALREADY_ENLISTED NTStatus = 0xC01C001B + STATUS_FLT_CONTEXT_ALREADY_LINKED NTStatus = 0xC01C001C + STATUS_FLT_NO_WAITER_FOR_REPLY NTStatus = 0xC01C0020 + STATUS_FLT_REGISTRATION_BUSY NTStatus = 0xC01C0023 + STATUS_SXS_SECTION_NOT_FOUND NTStatus = 0xC0150001 + STATUS_SXS_CANT_GEN_ACTCTX NTStatus = 0xC0150002 + STATUS_SXS_INVALID_ACTCTXDATA_FORMAT NTStatus = 0xC0150003 + STATUS_SXS_ASSEMBLY_NOT_FOUND NTStatus = 0xC0150004 + STATUS_SXS_MANIFEST_FORMAT_ERROR NTStatus = 0xC0150005 + STATUS_SXS_MANIFEST_PARSE_ERROR NTStatus = 0xC0150006 + STATUS_SXS_ACTIVATION_CONTEXT_DISABLED NTStatus = 0xC0150007 + STATUS_SXS_KEY_NOT_FOUND NTStatus = 0xC0150008 + STATUS_SXS_VERSION_CONFLICT NTStatus = 0xC0150009 + STATUS_SXS_WRONG_SECTION_TYPE NTStatus = 0xC015000A + STATUS_SXS_THREAD_QUERIES_DISABLED NTStatus = 0xC015000B + STATUS_SXS_ASSEMBLY_MISSING NTStatus = 0xC015000C + STATUS_SXS_RELEASE_ACTIVATION_CONTEXT NTStatus = 0x4015000D + STATUS_SXS_PROCESS_DEFAULT_ALREADY_SET NTStatus = 0xC015000E + STATUS_SXS_EARLY_DEACTIVATION NTStatus = 0xC015000F + STATUS_SXS_INVALID_DEACTIVATION NTStatus = 0xC0150010 + STATUS_SXS_MULTIPLE_DEACTIVATION NTStatus = 0xC0150011 + STATUS_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY NTStatus = 0xC0150012 + STATUS_SXS_PROCESS_TERMINATION_REQUESTED NTStatus = 0xC0150013 + STATUS_SXS_CORRUPT_ACTIVATION_STACK NTStatus = 0xC0150014 + STATUS_SXS_CORRUPTION NTStatus = 0xC0150015 + STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE NTStatus = 0xC0150016 + STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME NTStatus = 0xC0150017 + STATUS_SXS_IDENTITY_DUPLICATE_ATTRIBUTE NTStatus = 0xC0150018 + STATUS_SXS_IDENTITY_PARSE_ERROR NTStatus = 0xC0150019 + STATUS_SXS_COMPONENT_STORE_CORRUPT NTStatus = 0xC015001A + STATUS_SXS_FILE_HASH_MISMATCH NTStatus = 0xC015001B + STATUS_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT NTStatus = 0xC015001C + STATUS_SXS_IDENTITIES_DIFFERENT NTStatus = 0xC015001D + STATUS_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT NTStatus = 0xC015001E + STATUS_SXS_FILE_NOT_PART_OF_ASSEMBLY NTStatus = 0xC015001F + STATUS_ADVANCED_INSTALLER_FAILED NTStatus = 0xC0150020 + STATUS_XML_ENCODING_MISMATCH NTStatus = 0xC0150021 + STATUS_SXS_MANIFEST_TOO_BIG NTStatus = 0xC0150022 + STATUS_SXS_SETTING_NOT_REGISTERED NTStatus = 0xC0150023 + STATUS_SXS_TRANSACTION_CLOSURE_INCOMPLETE NTStatus = 0xC0150024 + STATUS_SMI_PRIMITIVE_INSTALLER_FAILED NTStatus = 0xC0150025 + STATUS_GENERIC_COMMAND_FAILED NTStatus = 0xC0150026 + STATUS_SXS_FILE_HASH_MISSING NTStatus = 0xC0150027 + STATUS_CLUSTER_INVALID_NODE NTStatus = 0xC0130001 + STATUS_CLUSTER_NODE_EXISTS NTStatus = 0xC0130002 + STATUS_CLUSTER_JOIN_IN_PROGRESS NTStatus = 0xC0130003 + STATUS_CLUSTER_NODE_NOT_FOUND NTStatus = 0xC0130004 + STATUS_CLUSTER_LOCAL_NODE_NOT_FOUND NTStatus = 0xC0130005 + STATUS_CLUSTER_NETWORK_EXISTS NTStatus = 0xC0130006 + STATUS_CLUSTER_NETWORK_NOT_FOUND NTStatus = 0xC0130007 + STATUS_CLUSTER_NETINTERFACE_EXISTS NTStatus = 0xC0130008 + STATUS_CLUSTER_NETINTERFACE_NOT_FOUND NTStatus = 0xC0130009 + STATUS_CLUSTER_INVALID_REQUEST NTStatus = 0xC013000A + STATUS_CLUSTER_INVALID_NETWORK_PROVIDER NTStatus = 0xC013000B + STATUS_CLUSTER_NODE_DOWN NTStatus = 0xC013000C + STATUS_CLUSTER_NODE_UNREACHABLE NTStatus = 0xC013000D + STATUS_CLUSTER_NODE_NOT_MEMBER NTStatus = 0xC013000E + STATUS_CLUSTER_JOIN_NOT_IN_PROGRESS NTStatus = 0xC013000F + STATUS_CLUSTER_INVALID_NETWORK NTStatus = 0xC0130010 + STATUS_CLUSTER_NO_NET_ADAPTERS NTStatus = 0xC0130011 + STATUS_CLUSTER_NODE_UP NTStatus = 0xC0130012 + STATUS_CLUSTER_NODE_PAUSED NTStatus = 0xC0130013 + STATUS_CLUSTER_NODE_NOT_PAUSED NTStatus = 0xC0130014 + STATUS_CLUSTER_NO_SECURITY_CONTEXT NTStatus = 0xC0130015 + STATUS_CLUSTER_NETWORK_NOT_INTERNAL NTStatus = 0xC0130016 + STATUS_CLUSTER_POISONED NTStatus = 0xC0130017 + STATUS_CLUSTER_NON_CSV_PATH NTStatus = 0xC0130018 + STATUS_CLUSTER_CSV_VOLUME_NOT_LOCAL NTStatus = 0xC0130019 + STATUS_CLUSTER_CSV_READ_OPLOCK_BREAK_IN_PROGRESS NTStatus = 0xC0130020 + STATUS_CLUSTER_CSV_AUTO_PAUSE_ERROR NTStatus = 0xC0130021 + STATUS_CLUSTER_CSV_REDIRECTED NTStatus = 0xC0130022 + STATUS_CLUSTER_CSV_NOT_REDIRECTED NTStatus = 0xC0130023 + STATUS_CLUSTER_CSV_VOLUME_DRAINING NTStatus = 0xC0130024 + STATUS_CLUSTER_CSV_SNAPSHOT_CREATION_IN_PROGRESS NTStatus = 0xC0130025 + STATUS_CLUSTER_CSV_VOLUME_DRAINING_SUCCEEDED_DOWNLEVEL NTStatus = 0xC0130026 + STATUS_CLUSTER_CSV_NO_SNAPSHOTS NTStatus = 0xC0130027 + STATUS_CSV_IO_PAUSE_TIMEOUT NTStatus = 0xC0130028 + STATUS_CLUSTER_CSV_INVALID_HANDLE NTStatus = 0xC0130029 + STATUS_CLUSTER_CSV_SUPPORTED_ONLY_ON_COORDINATOR NTStatus = 0xC0130030 + STATUS_CLUSTER_CAM_TICKET_REPLAY_DETECTED NTStatus = 0xC0130031 + STATUS_TRANSACTIONAL_CONFLICT NTStatus = 0xC0190001 + STATUS_INVALID_TRANSACTION NTStatus = 0xC0190002 + STATUS_TRANSACTION_NOT_ACTIVE NTStatus = 0xC0190003 + STATUS_TM_INITIALIZATION_FAILED NTStatus = 0xC0190004 + STATUS_RM_NOT_ACTIVE NTStatus = 0xC0190005 + STATUS_RM_METADATA_CORRUPT NTStatus = 0xC0190006 + STATUS_TRANSACTION_NOT_JOINED NTStatus = 0xC0190007 + STATUS_DIRECTORY_NOT_RM NTStatus = 0xC0190008 + STATUS_COULD_NOT_RESIZE_LOG NTStatus = 0x80190009 + STATUS_TRANSACTIONS_UNSUPPORTED_REMOTE NTStatus = 0xC019000A + STATUS_LOG_RESIZE_INVALID_SIZE NTStatus = 0xC019000B + STATUS_REMOTE_FILE_VERSION_MISMATCH NTStatus = 0xC019000C + STATUS_CRM_PROTOCOL_ALREADY_EXISTS NTStatus = 0xC019000F + STATUS_TRANSACTION_PROPAGATION_FAILED NTStatus = 0xC0190010 + STATUS_CRM_PROTOCOL_NOT_FOUND NTStatus = 0xC0190011 + STATUS_TRANSACTION_SUPERIOR_EXISTS NTStatus = 0xC0190012 + STATUS_TRANSACTION_REQUEST_NOT_VALID NTStatus = 0xC0190013 + STATUS_TRANSACTION_NOT_REQUESTED NTStatus = 0xC0190014 + STATUS_TRANSACTION_ALREADY_ABORTED NTStatus = 0xC0190015 + STATUS_TRANSACTION_ALREADY_COMMITTED NTStatus = 0xC0190016 + STATUS_TRANSACTION_INVALID_MARSHALL_BUFFER NTStatus = 0xC0190017 + STATUS_CURRENT_TRANSACTION_NOT_VALID NTStatus = 0xC0190018 + STATUS_LOG_GROWTH_FAILED NTStatus = 0xC0190019 + STATUS_OBJECT_NO_LONGER_EXISTS NTStatus = 0xC0190021 + STATUS_STREAM_MINIVERSION_NOT_FOUND NTStatus = 0xC0190022 + STATUS_STREAM_MINIVERSION_NOT_VALID NTStatus = 0xC0190023 + STATUS_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION NTStatus = 0xC0190024 + STATUS_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT NTStatus = 0xC0190025 + STATUS_CANT_CREATE_MORE_STREAM_MINIVERSIONS NTStatus = 0xC0190026 + STATUS_HANDLE_NO_LONGER_VALID NTStatus = 0xC0190028 + STATUS_NO_TXF_METADATA NTStatus = 0x80190029 + STATUS_LOG_CORRUPTION_DETECTED NTStatus = 0xC0190030 + STATUS_CANT_RECOVER_WITH_HANDLE_OPEN NTStatus = 0x80190031 + STATUS_RM_DISCONNECTED NTStatus = 0xC0190032 + STATUS_ENLISTMENT_NOT_SUPERIOR NTStatus = 0xC0190033 + STATUS_RECOVERY_NOT_NEEDED NTStatus = 0x40190034 + STATUS_RM_ALREADY_STARTED NTStatus = 0x40190035 + STATUS_FILE_IDENTITY_NOT_PERSISTENT NTStatus = 0xC0190036 + STATUS_CANT_BREAK_TRANSACTIONAL_DEPENDENCY NTStatus = 0xC0190037 + STATUS_CANT_CROSS_RM_BOUNDARY NTStatus = 0xC0190038 + STATUS_TXF_DIR_NOT_EMPTY NTStatus = 0xC0190039 + STATUS_INDOUBT_TRANSACTIONS_EXIST NTStatus = 0xC019003A + STATUS_TM_VOLATILE NTStatus = 0xC019003B + STATUS_ROLLBACK_TIMER_EXPIRED NTStatus = 0xC019003C + STATUS_TXF_ATTRIBUTE_CORRUPT NTStatus = 0xC019003D + STATUS_EFS_NOT_ALLOWED_IN_TRANSACTION NTStatus = 0xC019003E + STATUS_TRANSACTIONAL_OPEN_NOT_ALLOWED NTStatus = 0xC019003F + STATUS_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE NTStatus = 0xC0190040 + STATUS_TXF_METADATA_ALREADY_PRESENT NTStatus = 0x80190041 + STATUS_TRANSACTION_SCOPE_CALLBACKS_NOT_SET NTStatus = 0x80190042 + STATUS_TRANSACTION_REQUIRED_PROMOTION NTStatus = 0xC0190043 + STATUS_CANNOT_EXECUTE_FILE_IN_TRANSACTION NTStatus = 0xC0190044 + STATUS_TRANSACTIONS_NOT_FROZEN NTStatus = 0xC0190045 + STATUS_TRANSACTION_FREEZE_IN_PROGRESS NTStatus = 0xC0190046 + STATUS_NOT_SNAPSHOT_VOLUME NTStatus = 0xC0190047 + STATUS_NO_SAVEPOINT_WITH_OPEN_FILES NTStatus = 0xC0190048 + STATUS_SPARSE_NOT_ALLOWED_IN_TRANSACTION NTStatus = 0xC0190049 + STATUS_TM_IDENTITY_MISMATCH NTStatus = 0xC019004A + STATUS_FLOATED_SECTION NTStatus = 0xC019004B + STATUS_CANNOT_ACCEPT_TRANSACTED_WORK NTStatus = 0xC019004C + STATUS_CANNOT_ABORT_TRANSACTIONS NTStatus = 0xC019004D + STATUS_TRANSACTION_NOT_FOUND NTStatus = 0xC019004E + STATUS_RESOURCEMANAGER_NOT_FOUND NTStatus = 0xC019004F + STATUS_ENLISTMENT_NOT_FOUND NTStatus = 0xC0190050 + STATUS_TRANSACTIONMANAGER_NOT_FOUND NTStatus = 0xC0190051 + STATUS_TRANSACTIONMANAGER_NOT_ONLINE NTStatus = 0xC0190052 + STATUS_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION NTStatus = 0xC0190053 + STATUS_TRANSACTION_NOT_ROOT NTStatus = 0xC0190054 + STATUS_TRANSACTION_OBJECT_EXPIRED NTStatus = 0xC0190055 + STATUS_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION NTStatus = 0xC0190056 + STATUS_TRANSACTION_RESPONSE_NOT_ENLISTED NTStatus = 0xC0190057 + STATUS_TRANSACTION_RECORD_TOO_LONG NTStatus = 0xC0190058 + STATUS_NO_LINK_TRACKING_IN_TRANSACTION NTStatus = 0xC0190059 + STATUS_OPERATION_NOT_SUPPORTED_IN_TRANSACTION NTStatus = 0xC019005A + STATUS_TRANSACTION_INTEGRITY_VIOLATED NTStatus = 0xC019005B + STATUS_TRANSACTIONMANAGER_IDENTITY_MISMATCH NTStatus = 0xC019005C + STATUS_RM_CANNOT_BE_FROZEN_FOR_SNAPSHOT NTStatus = 0xC019005D + STATUS_TRANSACTION_MUST_WRITETHROUGH NTStatus = 0xC019005E + STATUS_TRANSACTION_NO_SUPERIOR NTStatus = 0xC019005F + STATUS_EXPIRED_HANDLE NTStatus = 0xC0190060 + STATUS_TRANSACTION_NOT_ENLISTED NTStatus = 0xC0190061 + STATUS_LOG_SECTOR_INVALID NTStatus = 0xC01A0001 + STATUS_LOG_SECTOR_PARITY_INVALID NTStatus = 0xC01A0002 + STATUS_LOG_SECTOR_REMAPPED NTStatus = 0xC01A0003 + STATUS_LOG_BLOCK_INCOMPLETE NTStatus = 0xC01A0004 + STATUS_LOG_INVALID_RANGE NTStatus = 0xC01A0005 + STATUS_LOG_BLOCKS_EXHAUSTED NTStatus = 0xC01A0006 + STATUS_LOG_READ_CONTEXT_INVALID NTStatus = 0xC01A0007 + STATUS_LOG_RESTART_INVALID NTStatus = 0xC01A0008 + STATUS_LOG_BLOCK_VERSION NTStatus = 0xC01A0009 + STATUS_LOG_BLOCK_INVALID NTStatus = 0xC01A000A + STATUS_LOG_READ_MODE_INVALID NTStatus = 0xC01A000B + STATUS_LOG_NO_RESTART NTStatus = 0x401A000C + STATUS_LOG_METADATA_CORRUPT NTStatus = 0xC01A000D + STATUS_LOG_METADATA_INVALID NTStatus = 0xC01A000E + STATUS_LOG_METADATA_INCONSISTENT NTStatus = 0xC01A000F + STATUS_LOG_RESERVATION_INVALID NTStatus = 0xC01A0010 + STATUS_LOG_CANT_DELETE NTStatus = 0xC01A0011 + STATUS_LOG_CONTAINER_LIMIT_EXCEEDED NTStatus = 0xC01A0012 + STATUS_LOG_START_OF_LOG NTStatus = 0xC01A0013 + STATUS_LOG_POLICY_ALREADY_INSTALLED NTStatus = 0xC01A0014 + STATUS_LOG_POLICY_NOT_INSTALLED NTStatus = 0xC01A0015 + STATUS_LOG_POLICY_INVALID NTStatus = 0xC01A0016 + STATUS_LOG_POLICY_CONFLICT NTStatus = 0xC01A0017 + STATUS_LOG_PINNED_ARCHIVE_TAIL NTStatus = 0xC01A0018 + STATUS_LOG_RECORD_NONEXISTENT NTStatus = 0xC01A0019 + STATUS_LOG_RECORDS_RESERVED_INVALID NTStatus = 0xC01A001A + STATUS_LOG_SPACE_RESERVED_INVALID NTStatus = 0xC01A001B + STATUS_LOG_TAIL_INVALID NTStatus = 0xC01A001C + STATUS_LOG_FULL NTStatus = 0xC01A001D + STATUS_LOG_MULTIPLEXED NTStatus = 0xC01A001E + STATUS_LOG_DEDICATED NTStatus = 0xC01A001F + STATUS_LOG_ARCHIVE_NOT_IN_PROGRESS NTStatus = 0xC01A0020 + STATUS_LOG_ARCHIVE_IN_PROGRESS NTStatus = 0xC01A0021 + STATUS_LOG_EPHEMERAL NTStatus = 0xC01A0022 + STATUS_LOG_NOT_ENOUGH_CONTAINERS NTStatus = 0xC01A0023 + STATUS_LOG_CLIENT_ALREADY_REGISTERED NTStatus = 0xC01A0024 + STATUS_LOG_CLIENT_NOT_REGISTERED NTStatus = 0xC01A0025 + STATUS_LOG_FULL_HANDLER_IN_PROGRESS NTStatus = 0xC01A0026 + STATUS_LOG_CONTAINER_READ_FAILED NTStatus = 0xC01A0027 + STATUS_LOG_CONTAINER_WRITE_FAILED NTStatus = 0xC01A0028 + STATUS_LOG_CONTAINER_OPEN_FAILED NTStatus = 0xC01A0029 + STATUS_LOG_CONTAINER_STATE_INVALID NTStatus = 0xC01A002A + STATUS_LOG_STATE_INVALID NTStatus = 0xC01A002B + STATUS_LOG_PINNED NTStatus = 0xC01A002C + STATUS_LOG_METADATA_FLUSH_FAILED NTStatus = 0xC01A002D + STATUS_LOG_INCONSISTENT_SECURITY NTStatus = 0xC01A002E + STATUS_LOG_APPENDED_FLUSH_FAILED NTStatus = 0xC01A002F + STATUS_LOG_PINNED_RESERVATION NTStatus = 0xC01A0030 + STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD NTStatus = 0xC01B00EA + STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD_RECOVERED NTStatus = 0x801B00EB + STATUS_VIDEO_DRIVER_DEBUG_REPORT_REQUEST NTStatus = 0x401B00EC + STATUS_MONITOR_NO_DESCRIPTOR NTStatus = 0xC01D0001 + STATUS_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT NTStatus = 0xC01D0002 + STATUS_MONITOR_INVALID_DESCRIPTOR_CHECKSUM NTStatus = 0xC01D0003 + STATUS_MONITOR_INVALID_STANDARD_TIMING_BLOCK NTStatus = 0xC01D0004 + STATUS_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED NTStatus = 0xC01D0005 + STATUS_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK NTStatus = 0xC01D0006 + STATUS_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK NTStatus = 0xC01D0007 + STATUS_MONITOR_NO_MORE_DESCRIPTOR_DATA NTStatus = 0xC01D0008 + STATUS_MONITOR_INVALID_DETAILED_TIMING_BLOCK NTStatus = 0xC01D0009 + STATUS_MONITOR_INVALID_MANUFACTURE_DATE NTStatus = 0xC01D000A + STATUS_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER NTStatus = 0xC01E0000 + STATUS_GRAPHICS_INSUFFICIENT_DMA_BUFFER NTStatus = 0xC01E0001 + STATUS_GRAPHICS_INVALID_DISPLAY_ADAPTER NTStatus = 0xC01E0002 + STATUS_GRAPHICS_ADAPTER_WAS_RESET NTStatus = 0xC01E0003 + STATUS_GRAPHICS_INVALID_DRIVER_MODEL NTStatus = 0xC01E0004 + STATUS_GRAPHICS_PRESENT_MODE_CHANGED NTStatus = 0xC01E0005 + STATUS_GRAPHICS_PRESENT_OCCLUDED NTStatus = 0xC01E0006 + STATUS_GRAPHICS_PRESENT_DENIED NTStatus = 0xC01E0007 + STATUS_GRAPHICS_CANNOTCOLORCONVERT NTStatus = 0xC01E0008 + STATUS_GRAPHICS_DRIVER_MISMATCH NTStatus = 0xC01E0009 + STATUS_GRAPHICS_PARTIAL_DATA_POPULATED NTStatus = 0x401E000A + STATUS_GRAPHICS_PRESENT_REDIRECTION_DISABLED NTStatus = 0xC01E000B + STATUS_GRAPHICS_PRESENT_UNOCCLUDED NTStatus = 0xC01E000C + STATUS_GRAPHICS_WINDOWDC_NOT_AVAILABLE NTStatus = 0xC01E000D + STATUS_GRAPHICS_WINDOWLESS_PRESENT_DISABLED NTStatus = 0xC01E000E + STATUS_GRAPHICS_PRESENT_INVALID_WINDOW NTStatus = 0xC01E000F + STATUS_GRAPHICS_PRESENT_BUFFER_NOT_BOUND NTStatus = 0xC01E0010 + STATUS_GRAPHICS_VAIL_STATE_CHANGED NTStatus = 0xC01E0011 + STATUS_GRAPHICS_INDIRECT_DISPLAY_ABANDON_SWAPCHAIN NTStatus = 0xC01E0012 + STATUS_GRAPHICS_INDIRECT_DISPLAY_DEVICE_STOPPED NTStatus = 0xC01E0013 + STATUS_GRAPHICS_NO_VIDEO_MEMORY NTStatus = 0xC01E0100 + STATUS_GRAPHICS_CANT_LOCK_MEMORY NTStatus = 0xC01E0101 + STATUS_GRAPHICS_ALLOCATION_BUSY NTStatus = 0xC01E0102 + STATUS_GRAPHICS_TOO_MANY_REFERENCES NTStatus = 0xC01E0103 + STATUS_GRAPHICS_TRY_AGAIN_LATER NTStatus = 0xC01E0104 + STATUS_GRAPHICS_TRY_AGAIN_NOW NTStatus = 0xC01E0105 + STATUS_GRAPHICS_ALLOCATION_INVALID NTStatus = 0xC01E0106 + STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE NTStatus = 0xC01E0107 + STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED NTStatus = 0xC01E0108 + STATUS_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION NTStatus = 0xC01E0109 + STATUS_GRAPHICS_INVALID_ALLOCATION_USAGE NTStatus = 0xC01E0110 + STATUS_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION NTStatus = 0xC01E0111 + STATUS_GRAPHICS_ALLOCATION_CLOSED NTStatus = 0xC01E0112 + STATUS_GRAPHICS_INVALID_ALLOCATION_INSTANCE NTStatus = 0xC01E0113 + STATUS_GRAPHICS_INVALID_ALLOCATION_HANDLE NTStatus = 0xC01E0114 + STATUS_GRAPHICS_WRONG_ALLOCATION_DEVICE NTStatus = 0xC01E0115 + STATUS_GRAPHICS_ALLOCATION_CONTENT_LOST NTStatus = 0xC01E0116 + STATUS_GRAPHICS_GPU_EXCEPTION_ON_DEVICE NTStatus = 0xC01E0200 + STATUS_GRAPHICS_SKIP_ALLOCATION_PREPARATION NTStatus = 0x401E0201 + STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY NTStatus = 0xC01E0300 + STATUS_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED NTStatus = 0xC01E0301 + STATUS_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED NTStatus = 0xC01E0302 + STATUS_GRAPHICS_INVALID_VIDPN NTStatus = 0xC01E0303 + STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE NTStatus = 0xC01E0304 + STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET NTStatus = 0xC01E0305 + STATUS_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED NTStatus = 0xC01E0306 + STATUS_GRAPHICS_MODE_NOT_PINNED NTStatus = 0x401E0307 + STATUS_GRAPHICS_INVALID_VIDPN_SOURCEMODESET NTStatus = 0xC01E0308 + STATUS_GRAPHICS_INVALID_VIDPN_TARGETMODESET NTStatus = 0xC01E0309 + STATUS_GRAPHICS_INVALID_FREQUENCY NTStatus = 0xC01E030A + STATUS_GRAPHICS_INVALID_ACTIVE_REGION NTStatus = 0xC01E030B + STATUS_GRAPHICS_INVALID_TOTAL_REGION NTStatus = 0xC01E030C + STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE NTStatus = 0xC01E0310 + STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE NTStatus = 0xC01E0311 + STATUS_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET NTStatus = 0xC01E0312 + STATUS_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY NTStatus = 0xC01E0313 + STATUS_GRAPHICS_MODE_ALREADY_IN_MODESET NTStatus = 0xC01E0314 + STATUS_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET NTStatus = 0xC01E0315 + STATUS_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET NTStatus = 0xC01E0316 + STATUS_GRAPHICS_SOURCE_ALREADY_IN_SET NTStatus = 0xC01E0317 + STATUS_GRAPHICS_TARGET_ALREADY_IN_SET NTStatus = 0xC01E0318 + STATUS_GRAPHICS_INVALID_VIDPN_PRESENT_PATH NTStatus = 0xC01E0319 + STATUS_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY NTStatus = 0xC01E031A + STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET NTStatus = 0xC01E031B + STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE NTStatus = 0xC01E031C + STATUS_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET NTStatus = 0xC01E031D + STATUS_GRAPHICS_NO_PREFERRED_MODE NTStatus = 0x401E031E + STATUS_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET NTStatus = 0xC01E031F + STATUS_GRAPHICS_STALE_MODESET NTStatus = 0xC01E0320 + STATUS_GRAPHICS_INVALID_MONITOR_SOURCEMODESET NTStatus = 0xC01E0321 + STATUS_GRAPHICS_INVALID_MONITOR_SOURCE_MODE NTStatus = 0xC01E0322 + STATUS_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN NTStatus = 0xC01E0323 + STATUS_GRAPHICS_MODE_ID_MUST_BE_UNIQUE NTStatus = 0xC01E0324 + STATUS_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION NTStatus = 0xC01E0325 + STATUS_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES NTStatus = 0xC01E0326 + STATUS_GRAPHICS_PATH_NOT_IN_TOPOLOGY NTStatus = 0xC01E0327 + STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE NTStatus = 0xC01E0328 + STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET NTStatus = 0xC01E0329 + STATUS_GRAPHICS_INVALID_MONITORDESCRIPTORSET NTStatus = 0xC01E032A + STATUS_GRAPHICS_INVALID_MONITORDESCRIPTOR NTStatus = 0xC01E032B + STATUS_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET NTStatus = 0xC01E032C + STATUS_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET NTStatus = 0xC01E032D + STATUS_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE NTStatus = 0xC01E032E + STATUS_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE NTStatus = 0xC01E032F + STATUS_GRAPHICS_RESOURCES_NOT_RELATED NTStatus = 0xC01E0330 + STATUS_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE NTStatus = 0xC01E0331 + STATUS_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE NTStatus = 0xC01E0332 + STATUS_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET NTStatus = 0xC01E0333 + STATUS_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER NTStatus = 0xC01E0334 + STATUS_GRAPHICS_NO_VIDPNMGR NTStatus = 0xC01E0335 + STATUS_GRAPHICS_NO_ACTIVE_VIDPN NTStatus = 0xC01E0336 + STATUS_GRAPHICS_STALE_VIDPN_TOPOLOGY NTStatus = 0xC01E0337 + STATUS_GRAPHICS_MONITOR_NOT_CONNECTED NTStatus = 0xC01E0338 + STATUS_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY NTStatus = 0xC01E0339 + STATUS_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE NTStatus = 0xC01E033A + STATUS_GRAPHICS_INVALID_VISIBLEREGION_SIZE NTStatus = 0xC01E033B + STATUS_GRAPHICS_INVALID_STRIDE NTStatus = 0xC01E033C + STATUS_GRAPHICS_INVALID_PIXELFORMAT NTStatus = 0xC01E033D + STATUS_GRAPHICS_INVALID_COLORBASIS NTStatus = 0xC01E033E + STATUS_GRAPHICS_INVALID_PIXELVALUEACCESSMODE NTStatus = 0xC01E033F + STATUS_GRAPHICS_TARGET_NOT_IN_TOPOLOGY NTStatus = 0xC01E0340 + STATUS_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT NTStatus = 0xC01E0341 + STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE NTStatus = 0xC01E0342 + STATUS_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN NTStatus = 0xC01E0343 + STATUS_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL NTStatus = 0xC01E0344 + STATUS_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION NTStatus = 0xC01E0345 + STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED NTStatus = 0xC01E0346 + STATUS_GRAPHICS_INVALID_GAMMA_RAMP NTStatus = 0xC01E0347 + STATUS_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED NTStatus = 0xC01E0348 + STATUS_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED NTStatus = 0xC01E0349 + STATUS_GRAPHICS_MODE_NOT_IN_MODESET NTStatus = 0xC01E034A + STATUS_GRAPHICS_DATASET_IS_EMPTY NTStatus = 0x401E034B + STATUS_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET NTStatus = 0x401E034C + STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON NTStatus = 0xC01E034D + STATUS_GRAPHICS_INVALID_PATH_CONTENT_TYPE NTStatus = 0xC01E034E + STATUS_GRAPHICS_INVALID_COPYPROTECTION_TYPE NTStatus = 0xC01E034F + STATUS_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS NTStatus = 0xC01E0350 + STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED NTStatus = 0x401E0351 + STATUS_GRAPHICS_INVALID_SCANLINE_ORDERING NTStatus = 0xC01E0352 + STATUS_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED NTStatus = 0xC01E0353 + STATUS_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS NTStatus = 0xC01E0354 + STATUS_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT NTStatus = 0xC01E0355 + STATUS_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM NTStatus = 0xC01E0356 + STATUS_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN NTStatus = 0xC01E0357 + STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT NTStatus = 0xC01E0358 + STATUS_GRAPHICS_MAX_NUM_PATHS_REACHED NTStatus = 0xC01E0359 + STATUS_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION NTStatus = 0xC01E035A + STATUS_GRAPHICS_INVALID_CLIENT_TYPE NTStatus = 0xC01E035B + STATUS_GRAPHICS_CLIENTVIDPN_NOT_SET NTStatus = 0xC01E035C + STATUS_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED NTStatus = 0xC01E0400 + STATUS_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED NTStatus = 0xC01E0401 + STATUS_GRAPHICS_UNKNOWN_CHILD_STATUS NTStatus = 0x401E042F + STATUS_GRAPHICS_NOT_A_LINKED_ADAPTER NTStatus = 0xC01E0430 + STATUS_GRAPHICS_LEADLINK_NOT_ENUMERATED NTStatus = 0xC01E0431 + STATUS_GRAPHICS_CHAINLINKS_NOT_ENUMERATED NTStatus = 0xC01E0432 + STATUS_GRAPHICS_ADAPTER_CHAIN_NOT_READY NTStatus = 0xC01E0433 + STATUS_GRAPHICS_CHAINLINKS_NOT_STARTED NTStatus = 0xC01E0434 + STATUS_GRAPHICS_CHAINLINKS_NOT_POWERED_ON NTStatus = 0xC01E0435 + STATUS_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE NTStatus = 0xC01E0436 + STATUS_GRAPHICS_LEADLINK_START_DEFERRED NTStatus = 0x401E0437 + STATUS_GRAPHICS_NOT_POST_DEVICE_DRIVER NTStatus = 0xC01E0438 + STATUS_GRAPHICS_POLLING_TOO_FREQUENTLY NTStatus = 0x401E0439 + STATUS_GRAPHICS_START_DEFERRED NTStatus = 0x401E043A + STATUS_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED NTStatus = 0xC01E043B + STATUS_GRAPHICS_DEPENDABLE_CHILD_STATUS NTStatus = 0x401E043C + STATUS_GRAPHICS_OPM_NOT_SUPPORTED NTStatus = 0xC01E0500 + STATUS_GRAPHICS_COPP_NOT_SUPPORTED NTStatus = 0xC01E0501 + STATUS_GRAPHICS_UAB_NOT_SUPPORTED NTStatus = 0xC01E0502 + STATUS_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS NTStatus = 0xC01E0503 + STATUS_GRAPHICS_OPM_NO_PROTECTED_OUTPUTS_EXIST NTStatus = 0xC01E0505 + STATUS_GRAPHICS_OPM_INTERNAL_ERROR NTStatus = 0xC01E050B + STATUS_GRAPHICS_OPM_INVALID_HANDLE NTStatus = 0xC01E050C + STATUS_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH NTStatus = 0xC01E050E + STATUS_GRAPHICS_OPM_SPANNING_MODE_ENABLED NTStatus = 0xC01E050F + STATUS_GRAPHICS_OPM_THEATER_MODE_ENABLED NTStatus = 0xC01E0510 + STATUS_GRAPHICS_PVP_HFS_FAILED NTStatus = 0xC01E0511 + STATUS_GRAPHICS_OPM_INVALID_SRM NTStatus = 0xC01E0512 + STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP NTStatus = 0xC01E0513 + STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP NTStatus = 0xC01E0514 + STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA NTStatus = 0xC01E0515 + STATUS_GRAPHICS_OPM_HDCP_SRM_NEVER_SET NTStatus = 0xC01E0516 + STATUS_GRAPHICS_OPM_RESOLUTION_TOO_HIGH NTStatus = 0xC01E0517 + STATUS_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE NTStatus = 0xC01E0518 + STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_NO_LONGER_EXISTS NTStatus = 0xC01E051A + STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS NTStatus = 0xC01E051C + STATUS_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST NTStatus = 0xC01E051D + STATUS_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR NTStatus = 0xC01E051E + STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS NTStatus = 0xC01E051F + STATUS_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED NTStatus = 0xC01E0520 + STATUS_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST NTStatus = 0xC01E0521 + STATUS_GRAPHICS_I2C_NOT_SUPPORTED NTStatus = 0xC01E0580 + STATUS_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST NTStatus = 0xC01E0581 + STATUS_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA NTStatus = 0xC01E0582 + STATUS_GRAPHICS_I2C_ERROR_RECEIVING_DATA NTStatus = 0xC01E0583 + STATUS_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED NTStatus = 0xC01E0584 + STATUS_GRAPHICS_DDCCI_INVALID_DATA NTStatus = 0xC01E0585 + STATUS_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE NTStatus = 0xC01E0586 + STATUS_GRAPHICS_DDCCI_INVALID_CAPABILITIES_STRING NTStatus = 0xC01E0587 + STATUS_GRAPHICS_MCA_INTERNAL_ERROR NTStatus = 0xC01E0588 + STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND NTStatus = 0xC01E0589 + STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH NTStatus = 0xC01E058A + STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM NTStatus = 0xC01E058B + STATUS_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE NTStatus = 0xC01E058C + STATUS_GRAPHICS_MONITOR_NO_LONGER_EXISTS NTStatus = 0xC01E058D + STATUS_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED NTStatus = 0xC01E05E0 + STATUS_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME NTStatus = 0xC01E05E1 + STATUS_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP NTStatus = 0xC01E05E2 + STATUS_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED NTStatus = 0xC01E05E3 + STATUS_GRAPHICS_INVALID_POINTER NTStatus = 0xC01E05E4 + STATUS_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE NTStatus = 0xC01E05E5 + STATUS_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL NTStatus = 0xC01E05E6 + STATUS_GRAPHICS_INTERNAL_ERROR NTStatus = 0xC01E05E7 + STATUS_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS NTStatus = 0xC01E05E8 + STATUS_FVE_LOCKED_VOLUME NTStatus = 0xC0210000 + STATUS_FVE_NOT_ENCRYPTED NTStatus = 0xC0210001 + STATUS_FVE_BAD_INFORMATION NTStatus = 0xC0210002 + STATUS_FVE_TOO_SMALL NTStatus = 0xC0210003 + STATUS_FVE_FAILED_WRONG_FS NTStatus = 0xC0210004 + STATUS_FVE_BAD_PARTITION_SIZE NTStatus = 0xC0210005 + STATUS_FVE_FS_NOT_EXTENDED NTStatus = 0xC0210006 + STATUS_FVE_FS_MOUNTED NTStatus = 0xC0210007 + STATUS_FVE_NO_LICENSE NTStatus = 0xC0210008 + STATUS_FVE_ACTION_NOT_ALLOWED NTStatus = 0xC0210009 + STATUS_FVE_BAD_DATA NTStatus = 0xC021000A + STATUS_FVE_VOLUME_NOT_BOUND NTStatus = 0xC021000B + STATUS_FVE_NOT_DATA_VOLUME NTStatus = 0xC021000C + STATUS_FVE_CONV_READ_ERROR NTStatus = 0xC021000D + STATUS_FVE_CONV_WRITE_ERROR NTStatus = 0xC021000E + STATUS_FVE_OVERLAPPED_UPDATE NTStatus = 0xC021000F + STATUS_FVE_FAILED_SECTOR_SIZE NTStatus = 0xC0210010 + STATUS_FVE_FAILED_AUTHENTICATION NTStatus = 0xC0210011 + STATUS_FVE_NOT_OS_VOLUME NTStatus = 0xC0210012 + STATUS_FVE_KEYFILE_NOT_FOUND NTStatus = 0xC0210013 + STATUS_FVE_KEYFILE_INVALID NTStatus = 0xC0210014 + STATUS_FVE_KEYFILE_NO_VMK NTStatus = 0xC0210015 + STATUS_FVE_TPM_DISABLED NTStatus = 0xC0210016 + STATUS_FVE_TPM_SRK_AUTH_NOT_ZERO NTStatus = 0xC0210017 + STATUS_FVE_TPM_INVALID_PCR NTStatus = 0xC0210018 + STATUS_FVE_TPM_NO_VMK NTStatus = 0xC0210019 + STATUS_FVE_PIN_INVALID NTStatus = 0xC021001A + STATUS_FVE_AUTH_INVALID_APPLICATION NTStatus = 0xC021001B + STATUS_FVE_AUTH_INVALID_CONFIG NTStatus = 0xC021001C + STATUS_FVE_DEBUGGER_ENABLED NTStatus = 0xC021001D + STATUS_FVE_DRY_RUN_FAILED NTStatus = 0xC021001E + STATUS_FVE_BAD_METADATA_POINTER NTStatus = 0xC021001F + STATUS_FVE_OLD_METADATA_COPY NTStatus = 0xC0210020 + STATUS_FVE_REBOOT_REQUIRED NTStatus = 0xC0210021 + STATUS_FVE_RAW_ACCESS NTStatus = 0xC0210022 + STATUS_FVE_RAW_BLOCKED NTStatus = 0xC0210023 + STATUS_FVE_NO_AUTOUNLOCK_MASTER_KEY NTStatus = 0xC0210024 + STATUS_FVE_MOR_FAILED NTStatus = 0xC0210025 + STATUS_FVE_NO_FEATURE_LICENSE NTStatus = 0xC0210026 + STATUS_FVE_POLICY_USER_DISABLE_RDV_NOT_ALLOWED NTStatus = 0xC0210027 + STATUS_FVE_CONV_RECOVERY_FAILED NTStatus = 0xC0210028 + STATUS_FVE_VIRTUALIZED_SPACE_TOO_BIG NTStatus = 0xC0210029 + STATUS_FVE_INVALID_DATUM_TYPE NTStatus = 0xC021002A + STATUS_FVE_VOLUME_TOO_SMALL NTStatus = 0xC0210030 + STATUS_FVE_ENH_PIN_INVALID NTStatus = 0xC0210031 + STATUS_FVE_FULL_ENCRYPTION_NOT_ALLOWED_ON_TP_STORAGE NTStatus = 0xC0210032 + STATUS_FVE_WIPE_NOT_ALLOWED_ON_TP_STORAGE NTStatus = 0xC0210033 + STATUS_FVE_NOT_ALLOWED_ON_CSV_STACK NTStatus = 0xC0210034 + STATUS_FVE_NOT_ALLOWED_ON_CLUSTER NTStatus = 0xC0210035 + STATUS_FVE_NOT_ALLOWED_TO_UPGRADE_WHILE_CONVERTING NTStatus = 0xC0210036 + STATUS_FVE_WIPE_CANCEL_NOT_APPLICABLE NTStatus = 0xC0210037 + STATUS_FVE_EDRIVE_DRY_RUN_FAILED NTStatus = 0xC0210038 + STATUS_FVE_SECUREBOOT_DISABLED NTStatus = 0xC0210039 + STATUS_FVE_SECUREBOOT_CONFIG_CHANGE NTStatus = 0xC021003A + STATUS_FVE_DEVICE_LOCKEDOUT NTStatus = 0xC021003B + STATUS_FVE_VOLUME_EXTEND_PREVENTS_EOW_DECRYPT NTStatus = 0xC021003C + STATUS_FVE_NOT_DE_VOLUME NTStatus = 0xC021003D + STATUS_FVE_PROTECTION_DISABLED NTStatus = 0xC021003E + STATUS_FVE_PROTECTION_CANNOT_BE_DISABLED NTStatus = 0xC021003F + STATUS_FVE_OSV_KSR_NOT_ALLOWED NTStatus = 0xC0210040 + STATUS_FWP_CALLOUT_NOT_FOUND NTStatus = 0xC0220001 + STATUS_FWP_CONDITION_NOT_FOUND NTStatus = 0xC0220002 + STATUS_FWP_FILTER_NOT_FOUND NTStatus = 0xC0220003 + STATUS_FWP_LAYER_NOT_FOUND NTStatus = 0xC0220004 + STATUS_FWP_PROVIDER_NOT_FOUND NTStatus = 0xC0220005 + STATUS_FWP_PROVIDER_CONTEXT_NOT_FOUND NTStatus = 0xC0220006 + STATUS_FWP_SUBLAYER_NOT_FOUND NTStatus = 0xC0220007 + STATUS_FWP_NOT_FOUND NTStatus = 0xC0220008 + STATUS_FWP_ALREADY_EXISTS NTStatus = 0xC0220009 + STATUS_FWP_IN_USE NTStatus = 0xC022000A + STATUS_FWP_DYNAMIC_SESSION_IN_PROGRESS NTStatus = 0xC022000B + STATUS_FWP_WRONG_SESSION NTStatus = 0xC022000C + STATUS_FWP_NO_TXN_IN_PROGRESS NTStatus = 0xC022000D + STATUS_FWP_TXN_IN_PROGRESS NTStatus = 0xC022000E + STATUS_FWP_TXN_ABORTED NTStatus = 0xC022000F + STATUS_FWP_SESSION_ABORTED NTStatus = 0xC0220010 + STATUS_FWP_INCOMPATIBLE_TXN NTStatus = 0xC0220011 + STATUS_FWP_TIMEOUT NTStatus = 0xC0220012 + STATUS_FWP_NET_EVENTS_DISABLED NTStatus = 0xC0220013 + STATUS_FWP_INCOMPATIBLE_LAYER NTStatus = 0xC0220014 + STATUS_FWP_KM_CLIENTS_ONLY NTStatus = 0xC0220015 + STATUS_FWP_LIFETIME_MISMATCH NTStatus = 0xC0220016 + STATUS_FWP_BUILTIN_OBJECT NTStatus = 0xC0220017 + STATUS_FWP_TOO_MANY_CALLOUTS NTStatus = 0xC0220018 + STATUS_FWP_NOTIFICATION_DROPPED NTStatus = 0xC0220019 + STATUS_FWP_TRAFFIC_MISMATCH NTStatus = 0xC022001A + STATUS_FWP_INCOMPATIBLE_SA_STATE NTStatus = 0xC022001B + STATUS_FWP_NULL_POINTER NTStatus = 0xC022001C + STATUS_FWP_INVALID_ENUMERATOR NTStatus = 0xC022001D + STATUS_FWP_INVALID_FLAGS NTStatus = 0xC022001E + STATUS_FWP_INVALID_NET_MASK NTStatus = 0xC022001F + STATUS_FWP_INVALID_RANGE NTStatus = 0xC0220020 + STATUS_FWP_INVALID_INTERVAL NTStatus = 0xC0220021 + STATUS_FWP_ZERO_LENGTH_ARRAY NTStatus = 0xC0220022 + STATUS_FWP_NULL_DISPLAY_NAME NTStatus = 0xC0220023 + STATUS_FWP_INVALID_ACTION_TYPE NTStatus = 0xC0220024 + STATUS_FWP_INVALID_WEIGHT NTStatus = 0xC0220025 + STATUS_FWP_MATCH_TYPE_MISMATCH NTStatus = 0xC0220026 + STATUS_FWP_TYPE_MISMATCH NTStatus = 0xC0220027 + STATUS_FWP_OUT_OF_BOUNDS NTStatus = 0xC0220028 + STATUS_FWP_RESERVED NTStatus = 0xC0220029 + STATUS_FWP_DUPLICATE_CONDITION NTStatus = 0xC022002A + STATUS_FWP_DUPLICATE_KEYMOD NTStatus = 0xC022002B + STATUS_FWP_ACTION_INCOMPATIBLE_WITH_LAYER NTStatus = 0xC022002C + STATUS_FWP_ACTION_INCOMPATIBLE_WITH_SUBLAYER NTStatus = 0xC022002D + STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_LAYER NTStatus = 0xC022002E + STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_CALLOUT NTStatus = 0xC022002F + STATUS_FWP_INCOMPATIBLE_AUTH_METHOD NTStatus = 0xC0220030 + STATUS_FWP_INCOMPATIBLE_DH_GROUP NTStatus = 0xC0220031 + STATUS_FWP_EM_NOT_SUPPORTED NTStatus = 0xC0220032 + STATUS_FWP_NEVER_MATCH NTStatus = 0xC0220033 + STATUS_FWP_PROVIDER_CONTEXT_MISMATCH NTStatus = 0xC0220034 + STATUS_FWP_INVALID_PARAMETER NTStatus = 0xC0220035 + STATUS_FWP_TOO_MANY_SUBLAYERS NTStatus = 0xC0220036 + STATUS_FWP_CALLOUT_NOTIFICATION_FAILED NTStatus = 0xC0220037 + STATUS_FWP_INVALID_AUTH_TRANSFORM NTStatus = 0xC0220038 + STATUS_FWP_INVALID_CIPHER_TRANSFORM NTStatus = 0xC0220039 + STATUS_FWP_INCOMPATIBLE_CIPHER_TRANSFORM NTStatus = 0xC022003A + STATUS_FWP_INVALID_TRANSFORM_COMBINATION NTStatus = 0xC022003B + STATUS_FWP_DUPLICATE_AUTH_METHOD NTStatus = 0xC022003C + STATUS_FWP_INVALID_TUNNEL_ENDPOINT NTStatus = 0xC022003D + STATUS_FWP_L2_DRIVER_NOT_READY NTStatus = 0xC022003E + STATUS_FWP_KEY_DICTATOR_ALREADY_REGISTERED NTStatus = 0xC022003F + STATUS_FWP_KEY_DICTATION_INVALID_KEYING_MATERIAL NTStatus = 0xC0220040 + STATUS_FWP_CONNECTIONS_DISABLED NTStatus = 0xC0220041 + STATUS_FWP_INVALID_DNS_NAME NTStatus = 0xC0220042 + STATUS_FWP_STILL_ON NTStatus = 0xC0220043 + STATUS_FWP_IKEEXT_NOT_RUNNING NTStatus = 0xC0220044 + STATUS_FWP_TCPIP_NOT_READY NTStatus = 0xC0220100 + STATUS_FWP_INJECT_HANDLE_CLOSING NTStatus = 0xC0220101 + STATUS_FWP_INJECT_HANDLE_STALE NTStatus = 0xC0220102 + STATUS_FWP_CANNOT_PEND NTStatus = 0xC0220103 + STATUS_FWP_DROP_NOICMP NTStatus = 0xC0220104 + STATUS_NDIS_CLOSING NTStatus = 0xC0230002 + STATUS_NDIS_BAD_VERSION NTStatus = 0xC0230004 + STATUS_NDIS_BAD_CHARACTERISTICS NTStatus = 0xC0230005 + STATUS_NDIS_ADAPTER_NOT_FOUND NTStatus = 0xC0230006 + STATUS_NDIS_OPEN_FAILED NTStatus = 0xC0230007 + STATUS_NDIS_DEVICE_FAILED NTStatus = 0xC0230008 + STATUS_NDIS_MULTICAST_FULL NTStatus = 0xC0230009 + STATUS_NDIS_MULTICAST_EXISTS NTStatus = 0xC023000A + STATUS_NDIS_MULTICAST_NOT_FOUND NTStatus = 0xC023000B + STATUS_NDIS_REQUEST_ABORTED NTStatus = 0xC023000C + STATUS_NDIS_RESET_IN_PROGRESS NTStatus = 0xC023000D + STATUS_NDIS_NOT_SUPPORTED NTStatus = 0xC02300BB + STATUS_NDIS_INVALID_PACKET NTStatus = 0xC023000F + STATUS_NDIS_ADAPTER_NOT_READY NTStatus = 0xC0230011 + STATUS_NDIS_INVALID_LENGTH NTStatus = 0xC0230014 + STATUS_NDIS_INVALID_DATA NTStatus = 0xC0230015 + STATUS_NDIS_BUFFER_TOO_SHORT NTStatus = 0xC0230016 + STATUS_NDIS_INVALID_OID NTStatus = 0xC0230017 + STATUS_NDIS_ADAPTER_REMOVED NTStatus = 0xC0230018 + STATUS_NDIS_UNSUPPORTED_MEDIA NTStatus = 0xC0230019 + STATUS_NDIS_GROUP_ADDRESS_IN_USE NTStatus = 0xC023001A + STATUS_NDIS_FILE_NOT_FOUND NTStatus = 0xC023001B + STATUS_NDIS_ERROR_READING_FILE NTStatus = 0xC023001C + STATUS_NDIS_ALREADY_MAPPED NTStatus = 0xC023001D + STATUS_NDIS_RESOURCE_CONFLICT NTStatus = 0xC023001E + STATUS_NDIS_MEDIA_DISCONNECTED NTStatus = 0xC023001F + STATUS_NDIS_INVALID_ADDRESS NTStatus = 0xC0230022 + STATUS_NDIS_INVALID_DEVICE_REQUEST NTStatus = 0xC0230010 + STATUS_NDIS_PAUSED NTStatus = 0xC023002A + STATUS_NDIS_INTERFACE_NOT_FOUND NTStatus = 0xC023002B + STATUS_NDIS_UNSUPPORTED_REVISION NTStatus = 0xC023002C + STATUS_NDIS_INVALID_PORT NTStatus = 0xC023002D + STATUS_NDIS_INVALID_PORT_STATE NTStatus = 0xC023002E + STATUS_NDIS_LOW_POWER_STATE NTStatus = 0xC023002F + STATUS_NDIS_REINIT_REQUIRED NTStatus = 0xC0230030 + STATUS_NDIS_NO_QUEUES NTStatus = 0xC0230031 + STATUS_NDIS_DOT11_AUTO_CONFIG_ENABLED NTStatus = 0xC0232000 + STATUS_NDIS_DOT11_MEDIA_IN_USE NTStatus = 0xC0232001 + STATUS_NDIS_DOT11_POWER_STATE_INVALID NTStatus = 0xC0232002 + STATUS_NDIS_PM_WOL_PATTERN_LIST_FULL NTStatus = 0xC0232003 + STATUS_NDIS_PM_PROTOCOL_OFFLOAD_LIST_FULL NTStatus = 0xC0232004 + STATUS_NDIS_DOT11_AP_CHANNEL_CURRENTLY_NOT_AVAILABLE NTStatus = 0xC0232005 + STATUS_NDIS_DOT11_AP_BAND_CURRENTLY_NOT_AVAILABLE NTStatus = 0xC0232006 + STATUS_NDIS_DOT11_AP_CHANNEL_NOT_ALLOWED NTStatus = 0xC0232007 + STATUS_NDIS_DOT11_AP_BAND_NOT_ALLOWED NTStatus = 0xC0232008 + STATUS_NDIS_INDICATION_REQUIRED NTStatus = 0x40230001 + STATUS_NDIS_OFFLOAD_POLICY NTStatus = 0xC023100F + STATUS_NDIS_OFFLOAD_CONNECTION_REJECTED NTStatus = 0xC0231012 + STATUS_NDIS_OFFLOAD_PATH_REJECTED NTStatus = 0xC0231013 + STATUS_TPM_ERROR_MASK NTStatus = 0xC0290000 + STATUS_TPM_AUTHFAIL NTStatus = 0xC0290001 + STATUS_TPM_BADINDEX NTStatus = 0xC0290002 + STATUS_TPM_BAD_PARAMETER NTStatus = 0xC0290003 + STATUS_TPM_AUDITFAILURE NTStatus = 0xC0290004 + STATUS_TPM_CLEAR_DISABLED NTStatus = 0xC0290005 + STATUS_TPM_DEACTIVATED NTStatus = 0xC0290006 + STATUS_TPM_DISABLED NTStatus = 0xC0290007 + STATUS_TPM_DISABLED_CMD NTStatus = 0xC0290008 + STATUS_TPM_FAIL NTStatus = 0xC0290009 + STATUS_TPM_BAD_ORDINAL NTStatus = 0xC029000A + STATUS_TPM_INSTALL_DISABLED NTStatus = 0xC029000B + STATUS_TPM_INVALID_KEYHANDLE NTStatus = 0xC029000C + STATUS_TPM_KEYNOTFOUND NTStatus = 0xC029000D + STATUS_TPM_INAPPROPRIATE_ENC NTStatus = 0xC029000E + STATUS_TPM_MIGRATEFAIL NTStatus = 0xC029000F + STATUS_TPM_INVALID_PCR_INFO NTStatus = 0xC0290010 + STATUS_TPM_NOSPACE NTStatus = 0xC0290011 + STATUS_TPM_NOSRK NTStatus = 0xC0290012 + STATUS_TPM_NOTSEALED_BLOB NTStatus = 0xC0290013 + STATUS_TPM_OWNER_SET NTStatus = 0xC0290014 + STATUS_TPM_RESOURCES NTStatus = 0xC0290015 + STATUS_TPM_SHORTRANDOM NTStatus = 0xC0290016 + STATUS_TPM_SIZE NTStatus = 0xC0290017 + STATUS_TPM_WRONGPCRVAL NTStatus = 0xC0290018 + STATUS_TPM_BAD_PARAM_SIZE NTStatus = 0xC0290019 + STATUS_TPM_SHA_THREAD NTStatus = 0xC029001A + STATUS_TPM_SHA_ERROR NTStatus = 0xC029001B + STATUS_TPM_FAILEDSELFTEST NTStatus = 0xC029001C + STATUS_TPM_AUTH2FAIL NTStatus = 0xC029001D + STATUS_TPM_BADTAG NTStatus = 0xC029001E + STATUS_TPM_IOERROR NTStatus = 0xC029001F + STATUS_TPM_ENCRYPT_ERROR NTStatus = 0xC0290020 + STATUS_TPM_DECRYPT_ERROR NTStatus = 0xC0290021 + STATUS_TPM_INVALID_AUTHHANDLE NTStatus = 0xC0290022 + STATUS_TPM_NO_ENDORSEMENT NTStatus = 0xC0290023 + STATUS_TPM_INVALID_KEYUSAGE NTStatus = 0xC0290024 + STATUS_TPM_WRONG_ENTITYTYPE NTStatus = 0xC0290025 + STATUS_TPM_INVALID_POSTINIT NTStatus = 0xC0290026 + STATUS_TPM_INAPPROPRIATE_SIG NTStatus = 0xC0290027 + STATUS_TPM_BAD_KEY_PROPERTY NTStatus = 0xC0290028 + STATUS_TPM_BAD_MIGRATION NTStatus = 0xC0290029 + STATUS_TPM_BAD_SCHEME NTStatus = 0xC029002A + STATUS_TPM_BAD_DATASIZE NTStatus = 0xC029002B + STATUS_TPM_BAD_MODE NTStatus = 0xC029002C + STATUS_TPM_BAD_PRESENCE NTStatus = 0xC029002D + STATUS_TPM_BAD_VERSION NTStatus = 0xC029002E + STATUS_TPM_NO_WRAP_TRANSPORT NTStatus = 0xC029002F + STATUS_TPM_AUDITFAIL_UNSUCCESSFUL NTStatus = 0xC0290030 + STATUS_TPM_AUDITFAIL_SUCCESSFUL NTStatus = 0xC0290031 + STATUS_TPM_NOTRESETABLE NTStatus = 0xC0290032 + STATUS_TPM_NOTLOCAL NTStatus = 0xC0290033 + STATUS_TPM_BAD_TYPE NTStatus = 0xC0290034 + STATUS_TPM_INVALID_RESOURCE NTStatus = 0xC0290035 + STATUS_TPM_NOTFIPS NTStatus = 0xC0290036 + STATUS_TPM_INVALID_FAMILY NTStatus = 0xC0290037 + STATUS_TPM_NO_NV_PERMISSION NTStatus = 0xC0290038 + STATUS_TPM_REQUIRES_SIGN NTStatus = 0xC0290039 + STATUS_TPM_KEY_NOTSUPPORTED NTStatus = 0xC029003A + STATUS_TPM_AUTH_CONFLICT NTStatus = 0xC029003B + STATUS_TPM_AREA_LOCKED NTStatus = 0xC029003C + STATUS_TPM_BAD_LOCALITY NTStatus = 0xC029003D + STATUS_TPM_READ_ONLY NTStatus = 0xC029003E + STATUS_TPM_PER_NOWRITE NTStatus = 0xC029003F + STATUS_TPM_FAMILYCOUNT NTStatus = 0xC0290040 + STATUS_TPM_WRITE_LOCKED NTStatus = 0xC0290041 + STATUS_TPM_BAD_ATTRIBUTES NTStatus = 0xC0290042 + STATUS_TPM_INVALID_STRUCTURE NTStatus = 0xC0290043 + STATUS_TPM_KEY_OWNER_CONTROL NTStatus = 0xC0290044 + STATUS_TPM_BAD_COUNTER NTStatus = 0xC0290045 + STATUS_TPM_NOT_FULLWRITE NTStatus = 0xC0290046 + STATUS_TPM_CONTEXT_GAP NTStatus = 0xC0290047 + STATUS_TPM_MAXNVWRITES NTStatus = 0xC0290048 + STATUS_TPM_NOOPERATOR NTStatus = 0xC0290049 + STATUS_TPM_RESOURCEMISSING NTStatus = 0xC029004A + STATUS_TPM_DELEGATE_LOCK NTStatus = 0xC029004B + STATUS_TPM_DELEGATE_FAMILY NTStatus = 0xC029004C + STATUS_TPM_DELEGATE_ADMIN NTStatus = 0xC029004D + STATUS_TPM_TRANSPORT_NOTEXCLUSIVE NTStatus = 0xC029004E + STATUS_TPM_OWNER_CONTROL NTStatus = 0xC029004F + STATUS_TPM_DAA_RESOURCES NTStatus = 0xC0290050 + STATUS_TPM_DAA_INPUT_DATA0 NTStatus = 0xC0290051 + STATUS_TPM_DAA_INPUT_DATA1 NTStatus = 0xC0290052 + STATUS_TPM_DAA_ISSUER_SETTINGS NTStatus = 0xC0290053 + STATUS_TPM_DAA_TPM_SETTINGS NTStatus = 0xC0290054 + STATUS_TPM_DAA_STAGE NTStatus = 0xC0290055 + STATUS_TPM_DAA_ISSUER_VALIDITY NTStatus = 0xC0290056 + STATUS_TPM_DAA_WRONG_W NTStatus = 0xC0290057 + STATUS_TPM_BAD_HANDLE NTStatus = 0xC0290058 + STATUS_TPM_BAD_DELEGATE NTStatus = 0xC0290059 + STATUS_TPM_BADCONTEXT NTStatus = 0xC029005A + STATUS_TPM_TOOMANYCONTEXTS NTStatus = 0xC029005B + STATUS_TPM_MA_TICKET_SIGNATURE NTStatus = 0xC029005C + STATUS_TPM_MA_DESTINATION NTStatus = 0xC029005D + STATUS_TPM_MA_SOURCE NTStatus = 0xC029005E + STATUS_TPM_MA_AUTHORITY NTStatus = 0xC029005F + STATUS_TPM_PERMANENTEK NTStatus = 0xC0290061 + STATUS_TPM_BAD_SIGNATURE NTStatus = 0xC0290062 + STATUS_TPM_NOCONTEXTSPACE NTStatus = 0xC0290063 + STATUS_TPM_20_E_ASYMMETRIC NTStatus = 0xC0290081 + STATUS_TPM_20_E_ATTRIBUTES NTStatus = 0xC0290082 + STATUS_TPM_20_E_HASH NTStatus = 0xC0290083 + STATUS_TPM_20_E_VALUE NTStatus = 0xC0290084 + STATUS_TPM_20_E_HIERARCHY NTStatus = 0xC0290085 + STATUS_TPM_20_E_KEY_SIZE NTStatus = 0xC0290087 + STATUS_TPM_20_E_MGF NTStatus = 0xC0290088 + STATUS_TPM_20_E_MODE NTStatus = 0xC0290089 + STATUS_TPM_20_E_TYPE NTStatus = 0xC029008A + STATUS_TPM_20_E_HANDLE NTStatus = 0xC029008B + STATUS_TPM_20_E_KDF NTStatus = 0xC029008C + STATUS_TPM_20_E_RANGE NTStatus = 0xC029008D + STATUS_TPM_20_E_AUTH_FAIL NTStatus = 0xC029008E + STATUS_TPM_20_E_NONCE NTStatus = 0xC029008F + STATUS_TPM_20_E_PP NTStatus = 0xC0290090 + STATUS_TPM_20_E_SCHEME NTStatus = 0xC0290092 + STATUS_TPM_20_E_SIZE NTStatus = 0xC0290095 + STATUS_TPM_20_E_SYMMETRIC NTStatus = 0xC0290096 + STATUS_TPM_20_E_TAG NTStatus = 0xC0290097 + STATUS_TPM_20_E_SELECTOR NTStatus = 0xC0290098 + STATUS_TPM_20_E_INSUFFICIENT NTStatus = 0xC029009A + STATUS_TPM_20_E_SIGNATURE NTStatus = 0xC029009B + STATUS_TPM_20_E_KEY NTStatus = 0xC029009C + STATUS_TPM_20_E_POLICY_FAIL NTStatus = 0xC029009D + STATUS_TPM_20_E_INTEGRITY NTStatus = 0xC029009F + STATUS_TPM_20_E_TICKET NTStatus = 0xC02900A0 + STATUS_TPM_20_E_RESERVED_BITS NTStatus = 0xC02900A1 + STATUS_TPM_20_E_BAD_AUTH NTStatus = 0xC02900A2 + STATUS_TPM_20_E_EXPIRED NTStatus = 0xC02900A3 + STATUS_TPM_20_E_POLICY_CC NTStatus = 0xC02900A4 + STATUS_TPM_20_E_BINDING NTStatus = 0xC02900A5 + STATUS_TPM_20_E_CURVE NTStatus = 0xC02900A6 + STATUS_TPM_20_E_ECC_POINT NTStatus = 0xC02900A7 + STATUS_TPM_20_E_INITIALIZE NTStatus = 0xC0290100 + STATUS_TPM_20_E_FAILURE NTStatus = 0xC0290101 + STATUS_TPM_20_E_SEQUENCE NTStatus = 0xC0290103 + STATUS_TPM_20_E_PRIVATE NTStatus = 0xC029010B + STATUS_TPM_20_E_HMAC NTStatus = 0xC0290119 + STATUS_TPM_20_E_DISABLED NTStatus = 0xC0290120 + STATUS_TPM_20_E_EXCLUSIVE NTStatus = 0xC0290121 + STATUS_TPM_20_E_ECC_CURVE NTStatus = 0xC0290123 + STATUS_TPM_20_E_AUTH_TYPE NTStatus = 0xC0290124 + STATUS_TPM_20_E_AUTH_MISSING NTStatus = 0xC0290125 + STATUS_TPM_20_E_POLICY NTStatus = 0xC0290126 + STATUS_TPM_20_E_PCR NTStatus = 0xC0290127 + STATUS_TPM_20_E_PCR_CHANGED NTStatus = 0xC0290128 + STATUS_TPM_20_E_UPGRADE NTStatus = 0xC029012D + STATUS_TPM_20_E_TOO_MANY_CONTEXTS NTStatus = 0xC029012E + STATUS_TPM_20_E_AUTH_UNAVAILABLE NTStatus = 0xC029012F + STATUS_TPM_20_E_REBOOT NTStatus = 0xC0290130 + STATUS_TPM_20_E_UNBALANCED NTStatus = 0xC0290131 + STATUS_TPM_20_E_COMMAND_SIZE NTStatus = 0xC0290142 + STATUS_TPM_20_E_COMMAND_CODE NTStatus = 0xC0290143 + STATUS_TPM_20_E_AUTHSIZE NTStatus = 0xC0290144 + STATUS_TPM_20_E_AUTH_CONTEXT NTStatus = 0xC0290145 + STATUS_TPM_20_E_NV_RANGE NTStatus = 0xC0290146 + STATUS_TPM_20_E_NV_SIZE NTStatus = 0xC0290147 + STATUS_TPM_20_E_NV_LOCKED NTStatus = 0xC0290148 + STATUS_TPM_20_E_NV_AUTHORIZATION NTStatus = 0xC0290149 + STATUS_TPM_20_E_NV_UNINITIALIZED NTStatus = 0xC029014A + STATUS_TPM_20_E_NV_SPACE NTStatus = 0xC029014B + STATUS_TPM_20_E_NV_DEFINED NTStatus = 0xC029014C + STATUS_TPM_20_E_BAD_CONTEXT NTStatus = 0xC0290150 + STATUS_TPM_20_E_CPHASH NTStatus = 0xC0290151 + STATUS_TPM_20_E_PARENT NTStatus = 0xC0290152 + STATUS_TPM_20_E_NEEDS_TEST NTStatus = 0xC0290153 + STATUS_TPM_20_E_NO_RESULT NTStatus = 0xC0290154 + STATUS_TPM_20_E_SENSITIVE NTStatus = 0xC0290155 + STATUS_TPM_COMMAND_BLOCKED NTStatus = 0xC0290400 + STATUS_TPM_INVALID_HANDLE NTStatus = 0xC0290401 + STATUS_TPM_DUPLICATE_VHANDLE NTStatus = 0xC0290402 + STATUS_TPM_EMBEDDED_COMMAND_BLOCKED NTStatus = 0xC0290403 + STATUS_TPM_EMBEDDED_COMMAND_UNSUPPORTED NTStatus = 0xC0290404 + STATUS_TPM_RETRY NTStatus = 0xC0290800 + STATUS_TPM_NEEDS_SELFTEST NTStatus = 0xC0290801 + STATUS_TPM_DOING_SELFTEST NTStatus = 0xC0290802 + STATUS_TPM_DEFEND_LOCK_RUNNING NTStatus = 0xC0290803 + STATUS_TPM_COMMAND_CANCELED NTStatus = 0xC0291001 + STATUS_TPM_TOO_MANY_CONTEXTS NTStatus = 0xC0291002 + STATUS_TPM_NOT_FOUND NTStatus = 0xC0291003 + STATUS_TPM_ACCESS_DENIED NTStatus = 0xC0291004 + STATUS_TPM_INSUFFICIENT_BUFFER NTStatus = 0xC0291005 + STATUS_TPM_PPI_FUNCTION_UNSUPPORTED NTStatus = 0xC0291006 + STATUS_PCP_ERROR_MASK NTStatus = 0xC0292000 + STATUS_PCP_DEVICE_NOT_READY NTStatus = 0xC0292001 + STATUS_PCP_INVALID_HANDLE NTStatus = 0xC0292002 + STATUS_PCP_INVALID_PARAMETER NTStatus = 0xC0292003 + STATUS_PCP_FLAG_NOT_SUPPORTED NTStatus = 0xC0292004 + STATUS_PCP_NOT_SUPPORTED NTStatus = 0xC0292005 + STATUS_PCP_BUFFER_TOO_SMALL NTStatus = 0xC0292006 + STATUS_PCP_INTERNAL_ERROR NTStatus = 0xC0292007 + STATUS_PCP_AUTHENTICATION_FAILED NTStatus = 0xC0292008 + STATUS_PCP_AUTHENTICATION_IGNORED NTStatus = 0xC0292009 + STATUS_PCP_POLICY_NOT_FOUND NTStatus = 0xC029200A + STATUS_PCP_PROFILE_NOT_FOUND NTStatus = 0xC029200B + STATUS_PCP_VALIDATION_FAILED NTStatus = 0xC029200C + STATUS_PCP_DEVICE_NOT_FOUND NTStatus = 0xC029200D + STATUS_PCP_WRONG_PARENT NTStatus = 0xC029200E + STATUS_PCP_KEY_NOT_LOADED NTStatus = 0xC029200F + STATUS_PCP_NO_KEY_CERTIFICATION NTStatus = 0xC0292010 + STATUS_PCP_KEY_NOT_FINALIZED NTStatus = 0xC0292011 + STATUS_PCP_ATTESTATION_CHALLENGE_NOT_SET NTStatus = 0xC0292012 + STATUS_PCP_NOT_PCR_BOUND NTStatus = 0xC0292013 + STATUS_PCP_KEY_ALREADY_FINALIZED NTStatus = 0xC0292014 + STATUS_PCP_KEY_USAGE_POLICY_NOT_SUPPORTED NTStatus = 0xC0292015 + STATUS_PCP_KEY_USAGE_POLICY_INVALID NTStatus = 0xC0292016 + STATUS_PCP_SOFT_KEY_ERROR NTStatus = 0xC0292017 + STATUS_PCP_KEY_NOT_AUTHENTICATED NTStatus = 0xC0292018 + STATUS_PCP_KEY_NOT_AIK NTStatus = 0xC0292019 + STATUS_PCP_KEY_NOT_SIGNING_KEY NTStatus = 0xC029201A + STATUS_PCP_LOCKED_OUT NTStatus = 0xC029201B + STATUS_PCP_CLAIM_TYPE_NOT_SUPPORTED NTStatus = 0xC029201C + STATUS_PCP_TPM_VERSION_NOT_SUPPORTED NTStatus = 0xC029201D + STATUS_PCP_BUFFER_LENGTH_MISMATCH NTStatus = 0xC029201E + STATUS_PCP_IFX_RSA_KEY_CREATION_BLOCKED NTStatus = 0xC029201F + STATUS_PCP_TICKET_MISSING NTStatus = 0xC0292020 + STATUS_PCP_RAW_POLICY_NOT_SUPPORTED NTStatus = 0xC0292021 + STATUS_PCP_KEY_HANDLE_INVALIDATED NTStatus = 0xC0292022 + STATUS_PCP_UNSUPPORTED_PSS_SALT NTStatus = 0x40292023 + STATUS_RTPM_CONTEXT_CONTINUE NTStatus = 0x00293000 + STATUS_RTPM_CONTEXT_COMPLETE NTStatus = 0x00293001 + STATUS_RTPM_NO_RESULT NTStatus = 0xC0293002 + STATUS_RTPM_PCR_READ_INCOMPLETE NTStatus = 0xC0293003 + STATUS_RTPM_INVALID_CONTEXT NTStatus = 0xC0293004 + STATUS_RTPM_UNSUPPORTED_CMD NTStatus = 0xC0293005 + STATUS_TPM_ZERO_EXHAUST_ENABLED NTStatus = 0xC0294000 + STATUS_HV_INVALID_HYPERCALL_CODE NTStatus = 0xC0350002 + STATUS_HV_INVALID_HYPERCALL_INPUT NTStatus = 0xC0350003 + STATUS_HV_INVALID_ALIGNMENT NTStatus = 0xC0350004 + STATUS_HV_INVALID_PARAMETER NTStatus = 0xC0350005 + STATUS_HV_ACCESS_DENIED NTStatus = 0xC0350006 + STATUS_HV_INVALID_PARTITION_STATE NTStatus = 0xC0350007 + STATUS_HV_OPERATION_DENIED NTStatus = 0xC0350008 + STATUS_HV_UNKNOWN_PROPERTY NTStatus = 0xC0350009 + STATUS_HV_PROPERTY_VALUE_OUT_OF_RANGE NTStatus = 0xC035000A + STATUS_HV_INSUFFICIENT_MEMORY NTStatus = 0xC035000B + STATUS_HV_PARTITION_TOO_DEEP NTStatus = 0xC035000C + STATUS_HV_INVALID_PARTITION_ID NTStatus = 0xC035000D + STATUS_HV_INVALID_VP_INDEX NTStatus = 0xC035000E + STATUS_HV_INVALID_PORT_ID NTStatus = 0xC0350011 + STATUS_HV_INVALID_CONNECTION_ID NTStatus = 0xC0350012 + STATUS_HV_INSUFFICIENT_BUFFERS NTStatus = 0xC0350013 + STATUS_HV_NOT_ACKNOWLEDGED NTStatus = 0xC0350014 + STATUS_HV_INVALID_VP_STATE NTStatus = 0xC0350015 + STATUS_HV_ACKNOWLEDGED NTStatus = 0xC0350016 + STATUS_HV_INVALID_SAVE_RESTORE_STATE NTStatus = 0xC0350017 + STATUS_HV_INVALID_SYNIC_STATE NTStatus = 0xC0350018 + STATUS_HV_OBJECT_IN_USE NTStatus = 0xC0350019 + STATUS_HV_INVALID_PROXIMITY_DOMAIN_INFO NTStatus = 0xC035001A + STATUS_HV_NO_DATA NTStatus = 0xC035001B + STATUS_HV_INACTIVE NTStatus = 0xC035001C + STATUS_HV_NO_RESOURCES NTStatus = 0xC035001D + STATUS_HV_FEATURE_UNAVAILABLE NTStatus = 0xC035001E + STATUS_HV_INSUFFICIENT_BUFFER NTStatus = 0xC0350033 + STATUS_HV_INSUFFICIENT_DEVICE_DOMAINS NTStatus = 0xC0350038 + STATUS_HV_CPUID_FEATURE_VALIDATION_ERROR NTStatus = 0xC035003C + STATUS_HV_CPUID_XSAVE_FEATURE_VALIDATION_ERROR NTStatus = 0xC035003D + STATUS_HV_PROCESSOR_STARTUP_TIMEOUT NTStatus = 0xC035003E + STATUS_HV_SMX_ENABLED NTStatus = 0xC035003F + STATUS_HV_INVALID_LP_INDEX NTStatus = 0xC0350041 + STATUS_HV_INVALID_REGISTER_VALUE NTStatus = 0xC0350050 + STATUS_HV_INVALID_VTL_STATE NTStatus = 0xC0350051 + STATUS_HV_NX_NOT_DETECTED NTStatus = 0xC0350055 + STATUS_HV_INVALID_DEVICE_ID NTStatus = 0xC0350057 + STATUS_HV_INVALID_DEVICE_STATE NTStatus = 0xC0350058 + STATUS_HV_PENDING_PAGE_REQUESTS NTStatus = 0x00350059 + STATUS_HV_PAGE_REQUEST_INVALID NTStatus = 0xC0350060 + STATUS_HV_INVALID_CPU_GROUP_ID NTStatus = 0xC035006F + STATUS_HV_INVALID_CPU_GROUP_STATE NTStatus = 0xC0350070 + STATUS_HV_OPERATION_FAILED NTStatus = 0xC0350071 + STATUS_HV_NOT_ALLOWED_WITH_NESTED_VIRT_ACTIVE NTStatus = 0xC0350072 + STATUS_HV_INSUFFICIENT_ROOT_MEMORY NTStatus = 0xC0350073 + STATUS_HV_NOT_PRESENT NTStatus = 0xC0351000 + STATUS_VID_DUPLICATE_HANDLER NTStatus = 0xC0370001 + STATUS_VID_TOO_MANY_HANDLERS NTStatus = 0xC0370002 + STATUS_VID_QUEUE_FULL NTStatus = 0xC0370003 + STATUS_VID_HANDLER_NOT_PRESENT NTStatus = 0xC0370004 + STATUS_VID_INVALID_OBJECT_NAME NTStatus = 0xC0370005 + STATUS_VID_PARTITION_NAME_TOO_LONG NTStatus = 0xC0370006 + STATUS_VID_MESSAGE_QUEUE_NAME_TOO_LONG NTStatus = 0xC0370007 + STATUS_VID_PARTITION_ALREADY_EXISTS NTStatus = 0xC0370008 + STATUS_VID_PARTITION_DOES_NOT_EXIST NTStatus = 0xC0370009 + STATUS_VID_PARTITION_NAME_NOT_FOUND NTStatus = 0xC037000A + STATUS_VID_MESSAGE_QUEUE_ALREADY_EXISTS NTStatus = 0xC037000B + STATUS_VID_EXCEEDED_MBP_ENTRY_MAP_LIMIT NTStatus = 0xC037000C + STATUS_VID_MB_STILL_REFERENCED NTStatus = 0xC037000D + STATUS_VID_CHILD_GPA_PAGE_SET_CORRUPTED NTStatus = 0xC037000E + STATUS_VID_INVALID_NUMA_SETTINGS NTStatus = 0xC037000F + STATUS_VID_INVALID_NUMA_NODE_INDEX NTStatus = 0xC0370010 + STATUS_VID_NOTIFICATION_QUEUE_ALREADY_ASSOCIATED NTStatus = 0xC0370011 + STATUS_VID_INVALID_MEMORY_BLOCK_HANDLE NTStatus = 0xC0370012 + STATUS_VID_PAGE_RANGE_OVERFLOW NTStatus = 0xC0370013 + STATUS_VID_INVALID_MESSAGE_QUEUE_HANDLE NTStatus = 0xC0370014 + STATUS_VID_INVALID_GPA_RANGE_HANDLE NTStatus = 0xC0370015 + STATUS_VID_NO_MEMORY_BLOCK_NOTIFICATION_QUEUE NTStatus = 0xC0370016 + STATUS_VID_MEMORY_BLOCK_LOCK_COUNT_EXCEEDED NTStatus = 0xC0370017 + STATUS_VID_INVALID_PPM_HANDLE NTStatus = 0xC0370018 + STATUS_VID_MBPS_ARE_LOCKED NTStatus = 0xC0370019 + STATUS_VID_MESSAGE_QUEUE_CLOSED NTStatus = 0xC037001A + STATUS_VID_VIRTUAL_PROCESSOR_LIMIT_EXCEEDED NTStatus = 0xC037001B + STATUS_VID_STOP_PENDING NTStatus = 0xC037001C + STATUS_VID_INVALID_PROCESSOR_STATE NTStatus = 0xC037001D + STATUS_VID_EXCEEDED_KM_CONTEXT_COUNT_LIMIT NTStatus = 0xC037001E + STATUS_VID_KM_INTERFACE_ALREADY_INITIALIZED NTStatus = 0xC037001F + STATUS_VID_MB_PROPERTY_ALREADY_SET_RESET NTStatus = 0xC0370020 + STATUS_VID_MMIO_RANGE_DESTROYED NTStatus = 0xC0370021 + STATUS_VID_INVALID_CHILD_GPA_PAGE_SET NTStatus = 0xC0370022 + STATUS_VID_RESERVE_PAGE_SET_IS_BEING_USED NTStatus = 0xC0370023 + STATUS_VID_RESERVE_PAGE_SET_TOO_SMALL NTStatus = 0xC0370024 + STATUS_VID_MBP_ALREADY_LOCKED_USING_RESERVED_PAGE NTStatus = 0xC0370025 + STATUS_VID_MBP_COUNT_EXCEEDED_LIMIT NTStatus = 0xC0370026 + STATUS_VID_SAVED_STATE_CORRUPT NTStatus = 0xC0370027 + STATUS_VID_SAVED_STATE_UNRECOGNIZED_ITEM NTStatus = 0xC0370028 + STATUS_VID_SAVED_STATE_INCOMPATIBLE NTStatus = 0xC0370029 + STATUS_VID_VTL_ACCESS_DENIED NTStatus = 0xC037002A + STATUS_VID_REMOTE_NODE_PARENT_GPA_PAGES_USED NTStatus = 0x80370001 + STATUS_IPSEC_BAD_SPI NTStatus = 0xC0360001 + STATUS_IPSEC_SA_LIFETIME_EXPIRED NTStatus = 0xC0360002 + STATUS_IPSEC_WRONG_SA NTStatus = 0xC0360003 + STATUS_IPSEC_REPLAY_CHECK_FAILED NTStatus = 0xC0360004 + STATUS_IPSEC_INVALID_PACKET NTStatus = 0xC0360005 + STATUS_IPSEC_INTEGRITY_CHECK_FAILED NTStatus = 0xC0360006 + STATUS_IPSEC_CLEAR_TEXT_DROP NTStatus = 0xC0360007 + STATUS_IPSEC_AUTH_FIREWALL_DROP NTStatus = 0xC0360008 + STATUS_IPSEC_THROTTLE_DROP NTStatus = 0xC0360009 + STATUS_IPSEC_DOSP_BLOCK NTStatus = 0xC0368000 + STATUS_IPSEC_DOSP_RECEIVED_MULTICAST NTStatus = 0xC0368001 + STATUS_IPSEC_DOSP_INVALID_PACKET NTStatus = 0xC0368002 + STATUS_IPSEC_DOSP_STATE_LOOKUP_FAILED NTStatus = 0xC0368003 + STATUS_IPSEC_DOSP_MAX_ENTRIES NTStatus = 0xC0368004 + STATUS_IPSEC_DOSP_KEYMOD_NOT_ALLOWED NTStatus = 0xC0368005 + STATUS_IPSEC_DOSP_MAX_PER_IP_RATELIMIT_QUEUES NTStatus = 0xC0368006 + STATUS_VOLMGR_INCOMPLETE_REGENERATION NTStatus = 0x80380001 + STATUS_VOLMGR_INCOMPLETE_DISK_MIGRATION NTStatus = 0x80380002 + STATUS_VOLMGR_DATABASE_FULL NTStatus = 0xC0380001 + STATUS_VOLMGR_DISK_CONFIGURATION_CORRUPTED NTStatus = 0xC0380002 + STATUS_VOLMGR_DISK_CONFIGURATION_NOT_IN_SYNC NTStatus = 0xC0380003 + STATUS_VOLMGR_PACK_CONFIG_UPDATE_FAILED NTStatus = 0xC0380004 + STATUS_VOLMGR_DISK_CONTAINS_NON_SIMPLE_VOLUME NTStatus = 0xC0380005 + STATUS_VOLMGR_DISK_DUPLICATE NTStatus = 0xC0380006 + STATUS_VOLMGR_DISK_DYNAMIC NTStatus = 0xC0380007 + STATUS_VOLMGR_DISK_ID_INVALID NTStatus = 0xC0380008 + STATUS_VOLMGR_DISK_INVALID NTStatus = 0xC0380009 + STATUS_VOLMGR_DISK_LAST_VOTER NTStatus = 0xC038000A + STATUS_VOLMGR_DISK_LAYOUT_INVALID NTStatus = 0xC038000B + STATUS_VOLMGR_DISK_LAYOUT_NON_BASIC_BETWEEN_BASIC_PARTITIONS NTStatus = 0xC038000C + STATUS_VOLMGR_DISK_LAYOUT_NOT_CYLINDER_ALIGNED NTStatus = 0xC038000D + STATUS_VOLMGR_DISK_LAYOUT_PARTITIONS_TOO_SMALL NTStatus = 0xC038000E + STATUS_VOLMGR_DISK_LAYOUT_PRIMARY_BETWEEN_LOGICAL_PARTITIONS NTStatus = 0xC038000F + STATUS_VOLMGR_DISK_LAYOUT_TOO_MANY_PARTITIONS NTStatus = 0xC0380010 + STATUS_VOLMGR_DISK_MISSING NTStatus = 0xC0380011 + STATUS_VOLMGR_DISK_NOT_EMPTY NTStatus = 0xC0380012 + STATUS_VOLMGR_DISK_NOT_ENOUGH_SPACE NTStatus = 0xC0380013 + STATUS_VOLMGR_DISK_REVECTORING_FAILED NTStatus = 0xC0380014 + STATUS_VOLMGR_DISK_SECTOR_SIZE_INVALID NTStatus = 0xC0380015 + STATUS_VOLMGR_DISK_SET_NOT_CONTAINED NTStatus = 0xC0380016 + STATUS_VOLMGR_DISK_USED_BY_MULTIPLE_MEMBERS NTStatus = 0xC0380017 + STATUS_VOLMGR_DISK_USED_BY_MULTIPLE_PLEXES NTStatus = 0xC0380018 + STATUS_VOLMGR_DYNAMIC_DISK_NOT_SUPPORTED NTStatus = 0xC0380019 + STATUS_VOLMGR_EXTENT_ALREADY_USED NTStatus = 0xC038001A + STATUS_VOLMGR_EXTENT_NOT_CONTIGUOUS NTStatus = 0xC038001B + STATUS_VOLMGR_EXTENT_NOT_IN_PUBLIC_REGION NTStatus = 0xC038001C + STATUS_VOLMGR_EXTENT_NOT_SECTOR_ALIGNED NTStatus = 0xC038001D + STATUS_VOLMGR_EXTENT_OVERLAPS_EBR_PARTITION NTStatus = 0xC038001E + STATUS_VOLMGR_EXTENT_VOLUME_LENGTHS_DO_NOT_MATCH NTStatus = 0xC038001F + STATUS_VOLMGR_FAULT_TOLERANT_NOT_SUPPORTED NTStatus = 0xC0380020 + STATUS_VOLMGR_INTERLEAVE_LENGTH_INVALID NTStatus = 0xC0380021 + STATUS_VOLMGR_MAXIMUM_REGISTERED_USERS NTStatus = 0xC0380022 + STATUS_VOLMGR_MEMBER_IN_SYNC NTStatus = 0xC0380023 + STATUS_VOLMGR_MEMBER_INDEX_DUPLICATE NTStatus = 0xC0380024 + STATUS_VOLMGR_MEMBER_INDEX_INVALID NTStatus = 0xC0380025 + STATUS_VOLMGR_MEMBER_MISSING NTStatus = 0xC0380026 + STATUS_VOLMGR_MEMBER_NOT_DETACHED NTStatus = 0xC0380027 + STATUS_VOLMGR_MEMBER_REGENERATING NTStatus = 0xC0380028 + STATUS_VOLMGR_ALL_DISKS_FAILED NTStatus = 0xC0380029 + STATUS_VOLMGR_NO_REGISTERED_USERS NTStatus = 0xC038002A + STATUS_VOLMGR_NO_SUCH_USER NTStatus = 0xC038002B + STATUS_VOLMGR_NOTIFICATION_RESET NTStatus = 0xC038002C + STATUS_VOLMGR_NUMBER_OF_MEMBERS_INVALID NTStatus = 0xC038002D + STATUS_VOLMGR_NUMBER_OF_PLEXES_INVALID NTStatus = 0xC038002E + STATUS_VOLMGR_PACK_DUPLICATE NTStatus = 0xC038002F + STATUS_VOLMGR_PACK_ID_INVALID NTStatus = 0xC0380030 + STATUS_VOLMGR_PACK_INVALID NTStatus = 0xC0380031 + STATUS_VOLMGR_PACK_NAME_INVALID NTStatus = 0xC0380032 + STATUS_VOLMGR_PACK_OFFLINE NTStatus = 0xC0380033 + STATUS_VOLMGR_PACK_HAS_QUORUM NTStatus = 0xC0380034 + STATUS_VOLMGR_PACK_WITHOUT_QUORUM NTStatus = 0xC0380035 + STATUS_VOLMGR_PARTITION_STYLE_INVALID NTStatus = 0xC0380036 + STATUS_VOLMGR_PARTITION_UPDATE_FAILED NTStatus = 0xC0380037 + STATUS_VOLMGR_PLEX_IN_SYNC NTStatus = 0xC0380038 + STATUS_VOLMGR_PLEX_INDEX_DUPLICATE NTStatus = 0xC0380039 + STATUS_VOLMGR_PLEX_INDEX_INVALID NTStatus = 0xC038003A + STATUS_VOLMGR_PLEX_LAST_ACTIVE NTStatus = 0xC038003B + STATUS_VOLMGR_PLEX_MISSING NTStatus = 0xC038003C + STATUS_VOLMGR_PLEX_REGENERATING NTStatus = 0xC038003D + STATUS_VOLMGR_PLEX_TYPE_INVALID NTStatus = 0xC038003E + STATUS_VOLMGR_PLEX_NOT_RAID5 NTStatus = 0xC038003F + STATUS_VOLMGR_PLEX_NOT_SIMPLE NTStatus = 0xC0380040 + STATUS_VOLMGR_STRUCTURE_SIZE_INVALID NTStatus = 0xC0380041 + STATUS_VOLMGR_TOO_MANY_NOTIFICATION_REQUESTS NTStatus = 0xC0380042 + STATUS_VOLMGR_TRANSACTION_IN_PROGRESS NTStatus = 0xC0380043 + STATUS_VOLMGR_UNEXPECTED_DISK_LAYOUT_CHANGE NTStatus = 0xC0380044 + STATUS_VOLMGR_VOLUME_CONTAINS_MISSING_DISK NTStatus = 0xC0380045 + STATUS_VOLMGR_VOLUME_ID_INVALID NTStatus = 0xC0380046 + STATUS_VOLMGR_VOLUME_LENGTH_INVALID NTStatus = 0xC0380047 + STATUS_VOLMGR_VOLUME_LENGTH_NOT_SECTOR_SIZE_MULTIPLE NTStatus = 0xC0380048 + STATUS_VOLMGR_VOLUME_NOT_MIRRORED NTStatus = 0xC0380049 + STATUS_VOLMGR_VOLUME_NOT_RETAINED NTStatus = 0xC038004A + STATUS_VOLMGR_VOLUME_OFFLINE NTStatus = 0xC038004B + STATUS_VOLMGR_VOLUME_RETAINED NTStatus = 0xC038004C + STATUS_VOLMGR_NUMBER_OF_EXTENTS_INVALID NTStatus = 0xC038004D + STATUS_VOLMGR_DIFFERENT_SECTOR_SIZE NTStatus = 0xC038004E + STATUS_VOLMGR_BAD_BOOT_DISK NTStatus = 0xC038004F + STATUS_VOLMGR_PACK_CONFIG_OFFLINE NTStatus = 0xC0380050 + STATUS_VOLMGR_PACK_CONFIG_ONLINE NTStatus = 0xC0380051 + STATUS_VOLMGR_NOT_PRIMARY_PACK NTStatus = 0xC0380052 + STATUS_VOLMGR_PACK_LOG_UPDATE_FAILED NTStatus = 0xC0380053 + STATUS_VOLMGR_NUMBER_OF_DISKS_IN_PLEX_INVALID NTStatus = 0xC0380054 + STATUS_VOLMGR_NUMBER_OF_DISKS_IN_MEMBER_INVALID NTStatus = 0xC0380055 + STATUS_VOLMGR_VOLUME_MIRRORED NTStatus = 0xC0380056 + STATUS_VOLMGR_PLEX_NOT_SIMPLE_SPANNED NTStatus = 0xC0380057 + STATUS_VOLMGR_NO_VALID_LOG_COPIES NTStatus = 0xC0380058 + STATUS_VOLMGR_PRIMARY_PACK_PRESENT NTStatus = 0xC0380059 + STATUS_VOLMGR_NUMBER_OF_DISKS_INVALID NTStatus = 0xC038005A + STATUS_VOLMGR_MIRROR_NOT_SUPPORTED NTStatus = 0xC038005B + STATUS_VOLMGR_RAID5_NOT_SUPPORTED NTStatus = 0xC038005C + STATUS_BCD_NOT_ALL_ENTRIES_IMPORTED NTStatus = 0x80390001 + STATUS_BCD_TOO_MANY_ELEMENTS NTStatus = 0xC0390002 + STATUS_BCD_NOT_ALL_ENTRIES_SYNCHRONIZED NTStatus = 0x80390003 + STATUS_VHD_DRIVE_FOOTER_MISSING NTStatus = 0xC03A0001 + STATUS_VHD_DRIVE_FOOTER_CHECKSUM_MISMATCH NTStatus = 0xC03A0002 + STATUS_VHD_DRIVE_FOOTER_CORRUPT NTStatus = 0xC03A0003 + STATUS_VHD_FORMAT_UNKNOWN NTStatus = 0xC03A0004 + STATUS_VHD_FORMAT_UNSUPPORTED_VERSION NTStatus = 0xC03A0005 + STATUS_VHD_SPARSE_HEADER_CHECKSUM_MISMATCH NTStatus = 0xC03A0006 + STATUS_VHD_SPARSE_HEADER_UNSUPPORTED_VERSION NTStatus = 0xC03A0007 + STATUS_VHD_SPARSE_HEADER_CORRUPT NTStatus = 0xC03A0008 + STATUS_VHD_BLOCK_ALLOCATION_FAILURE NTStatus = 0xC03A0009 + STATUS_VHD_BLOCK_ALLOCATION_TABLE_CORRUPT NTStatus = 0xC03A000A + STATUS_VHD_INVALID_BLOCK_SIZE NTStatus = 0xC03A000B + STATUS_VHD_BITMAP_MISMATCH NTStatus = 0xC03A000C + STATUS_VHD_PARENT_VHD_NOT_FOUND NTStatus = 0xC03A000D + STATUS_VHD_CHILD_PARENT_ID_MISMATCH NTStatus = 0xC03A000E + STATUS_VHD_CHILD_PARENT_TIMESTAMP_MISMATCH NTStatus = 0xC03A000F + STATUS_VHD_METADATA_READ_FAILURE NTStatus = 0xC03A0010 + STATUS_VHD_METADATA_WRITE_FAILURE NTStatus = 0xC03A0011 + STATUS_VHD_INVALID_SIZE NTStatus = 0xC03A0012 + STATUS_VHD_INVALID_FILE_SIZE NTStatus = 0xC03A0013 + STATUS_VIRTDISK_PROVIDER_NOT_FOUND NTStatus = 0xC03A0014 + STATUS_VIRTDISK_NOT_VIRTUAL_DISK NTStatus = 0xC03A0015 + STATUS_VHD_PARENT_VHD_ACCESS_DENIED NTStatus = 0xC03A0016 + STATUS_VHD_CHILD_PARENT_SIZE_MISMATCH NTStatus = 0xC03A0017 + STATUS_VHD_DIFFERENCING_CHAIN_CYCLE_DETECTED NTStatus = 0xC03A0018 + STATUS_VHD_DIFFERENCING_CHAIN_ERROR_IN_PARENT NTStatus = 0xC03A0019 + STATUS_VIRTUAL_DISK_LIMITATION NTStatus = 0xC03A001A + STATUS_VHD_INVALID_TYPE NTStatus = 0xC03A001B + STATUS_VHD_INVALID_STATE NTStatus = 0xC03A001C + STATUS_VIRTDISK_UNSUPPORTED_DISK_SECTOR_SIZE NTStatus = 0xC03A001D + STATUS_VIRTDISK_DISK_ALREADY_OWNED NTStatus = 0xC03A001E + STATUS_VIRTDISK_DISK_ONLINE_AND_WRITABLE NTStatus = 0xC03A001F + STATUS_CTLOG_TRACKING_NOT_INITIALIZED NTStatus = 0xC03A0020 + STATUS_CTLOG_LOGFILE_SIZE_EXCEEDED_MAXSIZE NTStatus = 0xC03A0021 + STATUS_CTLOG_VHD_CHANGED_OFFLINE NTStatus = 0xC03A0022 + STATUS_CTLOG_INVALID_TRACKING_STATE NTStatus = 0xC03A0023 + STATUS_CTLOG_INCONSISTENT_TRACKING_FILE NTStatus = 0xC03A0024 + STATUS_VHD_METADATA_FULL NTStatus = 0xC03A0028 + STATUS_VHD_INVALID_CHANGE_TRACKING_ID NTStatus = 0xC03A0029 + STATUS_VHD_CHANGE_TRACKING_DISABLED NTStatus = 0xC03A002A + STATUS_VHD_MISSING_CHANGE_TRACKING_INFORMATION NTStatus = 0xC03A0030 + STATUS_VHD_RESIZE_WOULD_TRUNCATE_DATA NTStatus = 0xC03A0031 + STATUS_VHD_COULD_NOT_COMPUTE_MINIMUM_VIRTUAL_SIZE NTStatus = 0xC03A0032 + STATUS_VHD_ALREADY_AT_OR_BELOW_MINIMUM_VIRTUAL_SIZE NTStatus = 0xC03A0033 + STATUS_QUERY_STORAGE_ERROR NTStatus = 0x803A0001 + STATUS_GDI_HANDLE_LEAK NTStatus = 0x803F0001 + STATUS_RKF_KEY_NOT_FOUND NTStatus = 0xC0400001 + STATUS_RKF_DUPLICATE_KEY NTStatus = 0xC0400002 + STATUS_RKF_BLOB_FULL NTStatus = 0xC0400003 + STATUS_RKF_STORE_FULL NTStatus = 0xC0400004 + STATUS_RKF_FILE_BLOCKED NTStatus = 0xC0400005 + STATUS_RKF_ACTIVE_KEY NTStatus = 0xC0400006 + STATUS_RDBSS_RESTART_OPERATION NTStatus = 0xC0410001 + STATUS_RDBSS_CONTINUE_OPERATION NTStatus = 0xC0410002 + STATUS_RDBSS_POST_OPERATION NTStatus = 0xC0410003 + STATUS_RDBSS_RETRY_LOOKUP NTStatus = 0xC0410004 + STATUS_BTH_ATT_INVALID_HANDLE NTStatus = 0xC0420001 + STATUS_BTH_ATT_READ_NOT_PERMITTED NTStatus = 0xC0420002 + STATUS_BTH_ATT_WRITE_NOT_PERMITTED NTStatus = 0xC0420003 + STATUS_BTH_ATT_INVALID_PDU NTStatus = 0xC0420004 + STATUS_BTH_ATT_INSUFFICIENT_AUTHENTICATION NTStatus = 0xC0420005 + STATUS_BTH_ATT_REQUEST_NOT_SUPPORTED NTStatus = 0xC0420006 + STATUS_BTH_ATT_INVALID_OFFSET NTStatus = 0xC0420007 + STATUS_BTH_ATT_INSUFFICIENT_AUTHORIZATION NTStatus = 0xC0420008 + STATUS_BTH_ATT_PREPARE_QUEUE_FULL NTStatus = 0xC0420009 + STATUS_BTH_ATT_ATTRIBUTE_NOT_FOUND NTStatus = 0xC042000A + STATUS_BTH_ATT_ATTRIBUTE_NOT_LONG NTStatus = 0xC042000B + STATUS_BTH_ATT_INSUFFICIENT_ENCRYPTION_KEY_SIZE NTStatus = 0xC042000C + STATUS_BTH_ATT_INVALID_ATTRIBUTE_VALUE_LENGTH NTStatus = 0xC042000D + STATUS_BTH_ATT_UNLIKELY NTStatus = 0xC042000E + STATUS_BTH_ATT_INSUFFICIENT_ENCRYPTION NTStatus = 0xC042000F + STATUS_BTH_ATT_UNSUPPORTED_GROUP_TYPE NTStatus = 0xC0420010 + STATUS_BTH_ATT_INSUFFICIENT_RESOURCES NTStatus = 0xC0420011 + STATUS_BTH_ATT_UNKNOWN_ERROR NTStatus = 0xC0421000 + STATUS_SECUREBOOT_ROLLBACK_DETECTED NTStatus = 0xC0430001 + STATUS_SECUREBOOT_POLICY_VIOLATION NTStatus = 0xC0430002 + STATUS_SECUREBOOT_INVALID_POLICY NTStatus = 0xC0430003 + STATUS_SECUREBOOT_POLICY_PUBLISHER_NOT_FOUND NTStatus = 0xC0430004 + STATUS_SECUREBOOT_POLICY_NOT_SIGNED NTStatus = 0xC0430005 + STATUS_SECUREBOOT_NOT_ENABLED NTStatus = 0x80430006 + STATUS_SECUREBOOT_FILE_REPLACED NTStatus = 0xC0430007 + STATUS_SECUREBOOT_POLICY_NOT_AUTHORIZED NTStatus = 0xC0430008 + STATUS_SECUREBOOT_POLICY_UNKNOWN NTStatus = 0xC0430009 + STATUS_SECUREBOOT_POLICY_MISSING_ANTIROLLBACKVERSION NTStatus = 0xC043000A + STATUS_SECUREBOOT_PLATFORM_ID_MISMATCH NTStatus = 0xC043000B + STATUS_SECUREBOOT_POLICY_ROLLBACK_DETECTED NTStatus = 0xC043000C + STATUS_SECUREBOOT_POLICY_UPGRADE_MISMATCH NTStatus = 0xC043000D + STATUS_SECUREBOOT_REQUIRED_POLICY_FILE_MISSING NTStatus = 0xC043000E + STATUS_SECUREBOOT_NOT_BASE_POLICY NTStatus = 0xC043000F + STATUS_SECUREBOOT_NOT_SUPPLEMENTAL_POLICY NTStatus = 0xC0430010 + STATUS_PLATFORM_MANIFEST_NOT_AUTHORIZED NTStatus = 0xC0EB0001 + STATUS_PLATFORM_MANIFEST_INVALID NTStatus = 0xC0EB0002 + STATUS_PLATFORM_MANIFEST_FILE_NOT_AUTHORIZED NTStatus = 0xC0EB0003 + STATUS_PLATFORM_MANIFEST_CATALOG_NOT_AUTHORIZED NTStatus = 0xC0EB0004 + STATUS_PLATFORM_MANIFEST_BINARY_ID_NOT_FOUND NTStatus = 0xC0EB0005 + STATUS_PLATFORM_MANIFEST_NOT_ACTIVE NTStatus = 0xC0EB0006 + STATUS_PLATFORM_MANIFEST_NOT_SIGNED NTStatus = 0xC0EB0007 + STATUS_SYSTEM_INTEGRITY_ROLLBACK_DETECTED NTStatus = 0xC0E90001 + STATUS_SYSTEM_INTEGRITY_POLICY_VIOLATION NTStatus = 0xC0E90002 + STATUS_SYSTEM_INTEGRITY_INVALID_POLICY NTStatus = 0xC0E90003 + STATUS_SYSTEM_INTEGRITY_POLICY_NOT_SIGNED NTStatus = 0xC0E90004 + STATUS_SYSTEM_INTEGRITY_TOO_MANY_POLICIES NTStatus = 0xC0E90005 + STATUS_SYSTEM_INTEGRITY_SUPPLEMENTAL_POLICY_NOT_AUTHORIZED NTStatus = 0xC0E90006 + STATUS_NO_APPLICABLE_APP_LICENSES_FOUND NTStatus = 0xC0EA0001 + STATUS_CLIP_LICENSE_NOT_FOUND NTStatus = 0xC0EA0002 + STATUS_CLIP_DEVICE_LICENSE_MISSING NTStatus = 0xC0EA0003 + STATUS_CLIP_LICENSE_INVALID_SIGNATURE NTStatus = 0xC0EA0004 + STATUS_CLIP_KEYHOLDER_LICENSE_MISSING_OR_INVALID NTStatus = 0xC0EA0005 + STATUS_CLIP_LICENSE_EXPIRED NTStatus = 0xC0EA0006 + STATUS_CLIP_LICENSE_SIGNED_BY_UNKNOWN_SOURCE NTStatus = 0xC0EA0007 + STATUS_CLIP_LICENSE_NOT_SIGNED NTStatus = 0xC0EA0008 + STATUS_CLIP_LICENSE_HARDWARE_ID_OUT_OF_TOLERANCE NTStatus = 0xC0EA0009 + STATUS_CLIP_LICENSE_DEVICE_ID_MISMATCH NTStatus = 0xC0EA000A + STATUS_AUDIO_ENGINE_NODE_NOT_FOUND NTStatus = 0xC0440001 + STATUS_HDAUDIO_EMPTY_CONNECTION_LIST NTStatus = 0xC0440002 + STATUS_HDAUDIO_CONNECTION_LIST_NOT_SUPPORTED NTStatus = 0xC0440003 + STATUS_HDAUDIO_NO_LOGICAL_DEVICES_CREATED NTStatus = 0xC0440004 + STATUS_HDAUDIO_NULL_LINKED_LIST_ENTRY NTStatus = 0xC0440005 + STATUS_SPACES_REPAIRED NTStatus = 0x00E70000 + STATUS_SPACES_PAUSE NTStatus = 0x00E70001 + STATUS_SPACES_COMPLETE NTStatus = 0x00E70002 + STATUS_SPACES_REDIRECT NTStatus = 0x00E70003 + STATUS_SPACES_FAULT_DOMAIN_TYPE_INVALID NTStatus = 0xC0E70001 + STATUS_SPACES_RESILIENCY_TYPE_INVALID NTStatus = 0xC0E70003 + STATUS_SPACES_DRIVE_SECTOR_SIZE_INVALID NTStatus = 0xC0E70004 + STATUS_SPACES_DRIVE_REDUNDANCY_INVALID NTStatus = 0xC0E70006 + STATUS_SPACES_NUMBER_OF_DATA_COPIES_INVALID NTStatus = 0xC0E70007 + STATUS_SPACES_INTERLEAVE_LENGTH_INVALID NTStatus = 0xC0E70009 + STATUS_SPACES_NUMBER_OF_COLUMNS_INVALID NTStatus = 0xC0E7000A + STATUS_SPACES_NOT_ENOUGH_DRIVES NTStatus = 0xC0E7000B + STATUS_SPACES_EXTENDED_ERROR NTStatus = 0xC0E7000C + STATUS_SPACES_PROVISIONING_TYPE_INVALID NTStatus = 0xC0E7000D + STATUS_SPACES_ALLOCATION_SIZE_INVALID NTStatus = 0xC0E7000E + STATUS_SPACES_ENCLOSURE_AWARE_INVALID NTStatus = 0xC0E7000F + STATUS_SPACES_WRITE_CACHE_SIZE_INVALID NTStatus = 0xC0E70010 + STATUS_SPACES_NUMBER_OF_GROUPS_INVALID NTStatus = 0xC0E70011 + STATUS_SPACES_DRIVE_OPERATIONAL_STATE_INVALID NTStatus = 0xC0E70012 + STATUS_SPACES_UPDATE_COLUMN_STATE NTStatus = 0xC0E70013 + STATUS_SPACES_MAP_REQUIRED NTStatus = 0xC0E70014 + STATUS_SPACES_UNSUPPORTED_VERSION NTStatus = 0xC0E70015 + STATUS_SPACES_CORRUPT_METADATA NTStatus = 0xC0E70016 + STATUS_SPACES_DRT_FULL NTStatus = 0xC0E70017 + STATUS_SPACES_INCONSISTENCY NTStatus = 0xC0E70018 + STATUS_SPACES_LOG_NOT_READY NTStatus = 0xC0E70019 + STATUS_SPACES_NO_REDUNDANCY NTStatus = 0xC0E7001A + STATUS_SPACES_DRIVE_NOT_READY NTStatus = 0xC0E7001B + STATUS_SPACES_DRIVE_SPLIT NTStatus = 0xC0E7001C + STATUS_SPACES_DRIVE_LOST_DATA NTStatus = 0xC0E7001D + STATUS_SPACES_ENTRY_INCOMPLETE NTStatus = 0xC0E7001E + STATUS_SPACES_ENTRY_INVALID NTStatus = 0xC0E7001F + STATUS_SPACES_MARK_DIRTY NTStatus = 0xC0E70020 + STATUS_VOLSNAP_BOOTFILE_NOT_VALID NTStatus = 0xC0500003 + STATUS_VOLSNAP_ACTIVATION_TIMEOUT NTStatus = 0xC0500004 + STATUS_IO_PREEMPTED NTStatus = 0xC0510001 + STATUS_SVHDX_ERROR_STORED NTStatus = 0xC05C0000 + STATUS_SVHDX_ERROR_NOT_AVAILABLE NTStatus = 0xC05CFF00 + STATUS_SVHDX_UNIT_ATTENTION_AVAILABLE NTStatus = 0xC05CFF01 + STATUS_SVHDX_UNIT_ATTENTION_CAPACITY_DATA_CHANGED NTStatus = 0xC05CFF02 + STATUS_SVHDX_UNIT_ATTENTION_RESERVATIONS_PREEMPTED NTStatus = 0xC05CFF03 + STATUS_SVHDX_UNIT_ATTENTION_RESERVATIONS_RELEASED NTStatus = 0xC05CFF04 + STATUS_SVHDX_UNIT_ATTENTION_REGISTRATIONS_PREEMPTED NTStatus = 0xC05CFF05 + STATUS_SVHDX_UNIT_ATTENTION_OPERATING_DEFINITION_CHANGED NTStatus = 0xC05CFF06 + STATUS_SVHDX_RESERVATION_CONFLICT NTStatus = 0xC05CFF07 + STATUS_SVHDX_WRONG_FILE_TYPE NTStatus = 0xC05CFF08 + STATUS_SVHDX_VERSION_MISMATCH NTStatus = 0xC05CFF09 + STATUS_VHD_SHARED NTStatus = 0xC05CFF0A + STATUS_SVHDX_NO_INITIATOR NTStatus = 0xC05CFF0B + STATUS_VHDSET_BACKING_STORAGE_NOT_FOUND NTStatus = 0xC05CFF0C + STATUS_SMB_NO_PREAUTH_INTEGRITY_HASH_OVERLAP NTStatus = 0xC05D0000 + STATUS_SMB_BAD_CLUSTER_DIALECT NTStatus = 0xC05D0001 + STATUS_SMB_GUEST_LOGON_BLOCKED NTStatus = 0xC05D0002 + STATUS_SECCORE_INVALID_COMMAND NTStatus = 0xC0E80000 + STATUS_VSM_NOT_INITIALIZED NTStatus = 0xC0450000 + STATUS_VSM_DMA_PROTECTION_NOT_IN_USE NTStatus = 0xC0450001 + STATUS_APPEXEC_CONDITION_NOT_SATISFIED NTStatus = 0xC0EC0000 + STATUS_APPEXEC_HANDLE_INVALIDATED NTStatus = 0xC0EC0001 + STATUS_APPEXEC_INVALID_HOST_GENERATION NTStatus = 0xC0EC0002 + STATUS_APPEXEC_UNEXPECTED_PROCESS_REGISTRATION NTStatus = 0xC0EC0003 + STATUS_APPEXEC_INVALID_HOST_STATE NTStatus = 0xC0EC0004 + STATUS_APPEXEC_NO_DONOR NTStatus = 0xC0EC0005 + STATUS_APPEXEC_HOST_ID_MISMATCH NTStatus = 0xC0EC0006 + STATUS_APPEXEC_UNKNOWN_USER NTStatus = 0xC0EC0007 +) diff --git a/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go b/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go new file mode 100644 index 000000000..6048ac679 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go @@ -0,0 +1,149 @@ +// Code generated by 'mkknownfolderids.bash'; DO NOT EDIT. + +package windows + +type KNOWNFOLDERID GUID + +var ( + FOLDERID_NetworkFolder = &KNOWNFOLDERID{0xd20beec4, 0x5ca8, 0x4905, [8]byte{0xae, 0x3b, 0xbf, 0x25, 0x1e, 0xa0, 0x9b, 0x53}} + FOLDERID_ComputerFolder = &KNOWNFOLDERID{0x0ac0837c, 0xbbf8, 0x452a, [8]byte{0x85, 0x0d, 0x79, 0xd0, 0x8e, 0x66, 0x7c, 0xa7}} + FOLDERID_InternetFolder = &KNOWNFOLDERID{0x4d9f7874, 0x4e0c, 0x4904, [8]byte{0x96, 0x7b, 0x40, 0xb0, 0xd2, 0x0c, 0x3e, 0x4b}} + FOLDERID_ControlPanelFolder = &KNOWNFOLDERID{0x82a74aeb, 0xaeb4, 0x465c, [8]byte{0xa0, 0x14, 0xd0, 0x97, 0xee, 0x34, 0x6d, 0x63}} + FOLDERID_PrintersFolder = &KNOWNFOLDERID{0x76fc4e2d, 0xd6ad, 0x4519, [8]byte{0xa6, 0x63, 0x37, 0xbd, 0x56, 0x06, 0x81, 0x85}} + FOLDERID_SyncManagerFolder = &KNOWNFOLDERID{0x43668bf8, 0xc14e, 0x49b2, [8]byte{0x97, 0xc9, 0x74, 0x77, 0x84, 0xd7, 0x84, 0xb7}} + FOLDERID_SyncSetupFolder = &KNOWNFOLDERID{0x0f214138, 0xb1d3, 0x4a90, [8]byte{0xbb, 0xa9, 0x27, 0xcb, 0xc0, 0xc5, 0x38, 0x9a}} + FOLDERID_ConflictFolder = &KNOWNFOLDERID{0x4bfefb45, 0x347d, 0x4006, [8]byte{0xa5, 0xbe, 0xac, 0x0c, 0xb0, 0x56, 0x71, 0x92}} + FOLDERID_SyncResultsFolder = &KNOWNFOLDERID{0x289a9a43, 0xbe44, 0x4057, [8]byte{0xa4, 0x1b, 0x58, 0x7a, 0x76, 0xd7, 0xe7, 0xf9}} + FOLDERID_RecycleBinFolder = &KNOWNFOLDERID{0xb7534046, 0x3ecb, 0x4c18, [8]byte{0xbe, 0x4e, 0x64, 0xcd, 0x4c, 0xb7, 0xd6, 0xac}} + FOLDERID_ConnectionsFolder = &KNOWNFOLDERID{0x6f0cd92b, 0x2e97, 0x45d1, [8]byte{0x88, 0xff, 0xb0, 0xd1, 0x86, 0xb8, 0xde, 0xdd}} + FOLDERID_Fonts = &KNOWNFOLDERID{0xfd228cb7, 0xae11, 0x4ae3, [8]byte{0x86, 0x4c, 0x16, 0xf3, 0x91, 0x0a, 0xb8, 0xfe}} + FOLDERID_Desktop = &KNOWNFOLDERID{0xb4bfcc3a, 0xdb2c, 0x424c, [8]byte{0xb0, 0x29, 0x7f, 0xe9, 0x9a, 0x87, 0xc6, 0x41}} + FOLDERID_Startup = &KNOWNFOLDERID{0xb97d20bb, 0xf46a, 0x4c97, [8]byte{0xba, 0x10, 0x5e, 0x36, 0x08, 0x43, 0x08, 0x54}} + FOLDERID_Programs = &KNOWNFOLDERID{0xa77f5d77, 0x2e2b, 0x44c3, [8]byte{0xa6, 0xa2, 0xab, 0xa6, 0x01, 0x05, 0x4a, 0x51}} + FOLDERID_StartMenu = &KNOWNFOLDERID{0x625b53c3, 0xab48, 0x4ec1, [8]byte{0xba, 0x1f, 0xa1, 0xef, 0x41, 0x46, 0xfc, 0x19}} + FOLDERID_Recent = &KNOWNFOLDERID{0xae50c081, 0xebd2, 0x438a, [8]byte{0x86, 0x55, 0x8a, 0x09, 0x2e, 0x34, 0x98, 0x7a}} + FOLDERID_SendTo = &KNOWNFOLDERID{0x8983036c, 0x27c0, 0x404b, [8]byte{0x8f, 0x08, 0x10, 0x2d, 0x10, 0xdc, 0xfd, 0x74}} + FOLDERID_Documents = &KNOWNFOLDERID{0xfdd39ad0, 0x238f, 0x46af, [8]byte{0xad, 0xb4, 0x6c, 0x85, 0x48, 0x03, 0x69, 0xc7}} + FOLDERID_Favorites = &KNOWNFOLDERID{0x1777f761, 0x68ad, 0x4d8a, [8]byte{0x87, 0xbd, 0x30, 0xb7, 0x59, 0xfa, 0x33, 0xdd}} + FOLDERID_NetHood = &KNOWNFOLDERID{0xc5abbf53, 0xe17f, 0x4121, [8]byte{0x89, 0x00, 0x86, 0x62, 0x6f, 0xc2, 0xc9, 0x73}} + FOLDERID_PrintHood = &KNOWNFOLDERID{0x9274bd8d, 0xcfd1, 0x41c3, [8]byte{0xb3, 0x5e, 0xb1, 0x3f, 0x55, 0xa7, 0x58, 0xf4}} + FOLDERID_Templates = &KNOWNFOLDERID{0xa63293e8, 0x664e, 0x48db, [8]byte{0xa0, 0x79, 0xdf, 0x75, 0x9e, 0x05, 0x09, 0xf7}} + FOLDERID_CommonStartup = &KNOWNFOLDERID{0x82a5ea35, 0xd9cd, 0x47c5, [8]byte{0x96, 0x29, 0xe1, 0x5d, 0x2f, 0x71, 0x4e, 0x6e}} + FOLDERID_CommonPrograms = &KNOWNFOLDERID{0x0139d44e, 0x6afe, 0x49f2, [8]byte{0x86, 0x90, 0x3d, 0xaf, 0xca, 0xe6, 0xff, 0xb8}} + FOLDERID_CommonStartMenu = &KNOWNFOLDERID{0xa4115719, 0xd62e, 0x491d, [8]byte{0xaa, 0x7c, 0xe7, 0x4b, 0x8b, 0xe3, 0xb0, 0x67}} + FOLDERID_PublicDesktop = &KNOWNFOLDERID{0xc4aa340d, 0xf20f, 0x4863, [8]byte{0xaf, 0xef, 0xf8, 0x7e, 0xf2, 0xe6, 0xba, 0x25}} + FOLDERID_ProgramData = &KNOWNFOLDERID{0x62ab5d82, 0xfdc1, 0x4dc3, [8]byte{0xa9, 0xdd, 0x07, 0x0d, 0x1d, 0x49, 0x5d, 0x97}} + FOLDERID_CommonTemplates = &KNOWNFOLDERID{0xb94237e7, 0x57ac, 0x4347, [8]byte{0x91, 0x51, 0xb0, 0x8c, 0x6c, 0x32, 0xd1, 0xf7}} + FOLDERID_PublicDocuments = &KNOWNFOLDERID{0xed4824af, 0xdce4, 0x45a8, [8]byte{0x81, 0xe2, 0xfc, 0x79, 0x65, 0x08, 0x36, 0x34}} + FOLDERID_RoamingAppData = &KNOWNFOLDERID{0x3eb685db, 0x65f9, 0x4cf6, [8]byte{0xa0, 0x3a, 0xe3, 0xef, 0x65, 0x72, 0x9f, 0x3d}} + FOLDERID_LocalAppData = &KNOWNFOLDERID{0xf1b32785, 0x6fba, 0x4fcf, [8]byte{0x9d, 0x55, 0x7b, 0x8e, 0x7f, 0x15, 0x70, 0x91}} + FOLDERID_LocalAppDataLow = &KNOWNFOLDERID{0xa520a1a4, 0x1780, 0x4ff6, [8]byte{0xbd, 0x18, 0x16, 0x73, 0x43, 0xc5, 0xaf, 0x16}} + FOLDERID_InternetCache = &KNOWNFOLDERID{0x352481e8, 0x33be, 0x4251, [8]byte{0xba, 0x85, 0x60, 0x07, 0xca, 0xed, 0xcf, 0x9d}} + FOLDERID_Cookies = &KNOWNFOLDERID{0x2b0f765d, 0xc0e9, 0x4171, [8]byte{0x90, 0x8e, 0x08, 0xa6, 0x11, 0xb8, 0x4f, 0xf6}} + FOLDERID_History = &KNOWNFOLDERID{0xd9dc8a3b, 0xb784, 0x432e, [8]byte{0xa7, 0x81, 0x5a, 0x11, 0x30, 0xa7, 0x59, 0x63}} + FOLDERID_System = &KNOWNFOLDERID{0x1ac14e77, 0x02e7, 0x4e5d, [8]byte{0xb7, 0x44, 0x2e, 0xb1, 0xae, 0x51, 0x98, 0xb7}} + FOLDERID_SystemX86 = &KNOWNFOLDERID{0xd65231b0, 0xb2f1, 0x4857, [8]byte{0xa4, 0xce, 0xa8, 0xe7, 0xc6, 0xea, 0x7d, 0x27}} + FOLDERID_Windows = &KNOWNFOLDERID{0xf38bf404, 0x1d43, 0x42f2, [8]byte{0x93, 0x05, 0x67, 0xde, 0x0b, 0x28, 0xfc, 0x23}} + FOLDERID_Profile = &KNOWNFOLDERID{0x5e6c858f, 0x0e22, 0x4760, [8]byte{0x9a, 0xfe, 0xea, 0x33, 0x17, 0xb6, 0x71, 0x73}} + FOLDERID_Pictures = &KNOWNFOLDERID{0x33e28130, 0x4e1e, 0x4676, [8]byte{0x83, 0x5a, 0x98, 0x39, 0x5c, 0x3b, 0xc3, 0xbb}} + FOLDERID_ProgramFilesX86 = &KNOWNFOLDERID{0x7c5a40ef, 0xa0fb, 0x4bfc, [8]byte{0x87, 0x4a, 0xc0, 0xf2, 0xe0, 0xb9, 0xfa, 0x8e}} + FOLDERID_ProgramFilesCommonX86 = &KNOWNFOLDERID{0xde974d24, 0xd9c6, 0x4d3e, [8]byte{0xbf, 0x91, 0xf4, 0x45, 0x51, 0x20, 0xb9, 0x17}} + FOLDERID_ProgramFilesX64 = &KNOWNFOLDERID{0x6d809377, 0x6af0, 0x444b, [8]byte{0x89, 0x57, 0xa3, 0x77, 0x3f, 0x02, 0x20, 0x0e}} + FOLDERID_ProgramFilesCommonX64 = &KNOWNFOLDERID{0x6365d5a7, 0x0f0d, 0x45e5, [8]byte{0x87, 0xf6, 0x0d, 0xa5, 0x6b, 0x6a, 0x4f, 0x7d}} + FOLDERID_ProgramFiles = &KNOWNFOLDERID{0x905e63b6, 0xc1bf, 0x494e, [8]byte{0xb2, 0x9c, 0x65, 0xb7, 0x32, 0xd3, 0xd2, 0x1a}} + FOLDERID_ProgramFilesCommon = &KNOWNFOLDERID{0xf7f1ed05, 0x9f6d, 0x47a2, [8]byte{0xaa, 0xae, 0x29, 0xd3, 0x17, 0xc6, 0xf0, 0x66}} + FOLDERID_UserProgramFiles = &KNOWNFOLDERID{0x5cd7aee2, 0x2219, 0x4a67, [8]byte{0xb8, 0x5d, 0x6c, 0x9c, 0xe1, 0x56, 0x60, 0xcb}} + FOLDERID_UserProgramFilesCommon = &KNOWNFOLDERID{0xbcbd3057, 0xca5c, 0x4622, [8]byte{0xb4, 0x2d, 0xbc, 0x56, 0xdb, 0x0a, 0xe5, 0x16}} + FOLDERID_AdminTools = &KNOWNFOLDERID{0x724ef170, 0xa42d, 0x4fef, [8]byte{0x9f, 0x26, 0xb6, 0x0e, 0x84, 0x6f, 0xba, 0x4f}} + FOLDERID_CommonAdminTools = &KNOWNFOLDERID{0xd0384e7d, 0xbac3, 0x4797, [8]byte{0x8f, 0x14, 0xcb, 0xa2, 0x29, 0xb3, 0x92, 0xb5}} + FOLDERID_Music = &KNOWNFOLDERID{0x4bd8d571, 0x6d19, 0x48d3, [8]byte{0xbe, 0x97, 0x42, 0x22, 0x20, 0x08, 0x0e, 0x43}} + FOLDERID_Videos = &KNOWNFOLDERID{0x18989b1d, 0x99b5, 0x455b, [8]byte{0x84, 0x1c, 0xab, 0x7c, 0x74, 0xe4, 0xdd, 0xfc}} + FOLDERID_Ringtones = &KNOWNFOLDERID{0xc870044b, 0xf49e, 0x4126, [8]byte{0xa9, 0xc3, 0xb5, 0x2a, 0x1f, 0xf4, 0x11, 0xe8}} + FOLDERID_PublicPictures = &KNOWNFOLDERID{0xb6ebfb86, 0x6907, 0x413c, [8]byte{0x9a, 0xf7, 0x4f, 0xc2, 0xab, 0xf0, 0x7c, 0xc5}} + FOLDERID_PublicMusic = &KNOWNFOLDERID{0x3214fab5, 0x9757, 0x4298, [8]byte{0xbb, 0x61, 0x92, 0xa9, 0xde, 0xaa, 0x44, 0xff}} + FOLDERID_PublicVideos = &KNOWNFOLDERID{0x2400183a, 0x6185, 0x49fb, [8]byte{0xa2, 0xd8, 0x4a, 0x39, 0x2a, 0x60, 0x2b, 0xa3}} + FOLDERID_PublicRingtones = &KNOWNFOLDERID{0xe555ab60, 0x153b, 0x4d17, [8]byte{0x9f, 0x04, 0xa5, 0xfe, 0x99, 0xfc, 0x15, 0xec}} + FOLDERID_ResourceDir = &KNOWNFOLDERID{0x8ad10c31, 0x2adb, 0x4296, [8]byte{0xa8, 0xf7, 0xe4, 0x70, 0x12, 0x32, 0xc9, 0x72}} + FOLDERID_LocalizedResourcesDir = &KNOWNFOLDERID{0x2a00375e, 0x224c, 0x49de, [8]byte{0xb8, 0xd1, 0x44, 0x0d, 0xf7, 0xef, 0x3d, 0xdc}} + FOLDERID_CommonOEMLinks = &KNOWNFOLDERID{0xc1bae2d0, 0x10df, 0x4334, [8]byte{0xbe, 0xdd, 0x7a, 0xa2, 0x0b, 0x22, 0x7a, 0x9d}} + FOLDERID_CDBurning = &KNOWNFOLDERID{0x9e52ab10, 0xf80d, 0x49df, [8]byte{0xac, 0xb8, 0x43, 0x30, 0xf5, 0x68, 0x78, 0x55}} + FOLDERID_UserProfiles = &KNOWNFOLDERID{0x0762d272, 0xc50a, 0x4bb0, [8]byte{0xa3, 0x82, 0x69, 0x7d, 0xcd, 0x72, 0x9b, 0x80}} + FOLDERID_Playlists = &KNOWNFOLDERID{0xde92c1c7, 0x837f, 0x4f69, [8]byte{0xa3, 0xbb, 0x86, 0xe6, 0x31, 0x20, 0x4a, 0x23}} + FOLDERID_SamplePlaylists = &KNOWNFOLDERID{0x15ca69b3, 0x30ee, 0x49c1, [8]byte{0xac, 0xe1, 0x6b, 0x5e, 0xc3, 0x72, 0xaf, 0xb5}} + FOLDERID_SampleMusic = &KNOWNFOLDERID{0xb250c668, 0xf57d, 0x4ee1, [8]byte{0xa6, 0x3c, 0x29, 0x0e, 0xe7, 0xd1, 0xaa, 0x1f}} + FOLDERID_SamplePictures = &KNOWNFOLDERID{0xc4900540, 0x2379, 0x4c75, [8]byte{0x84, 0x4b, 0x64, 0xe6, 0xfa, 0xf8, 0x71, 0x6b}} + FOLDERID_SampleVideos = &KNOWNFOLDERID{0x859ead94, 0x2e85, 0x48ad, [8]byte{0xa7, 0x1a, 0x09, 0x69, 0xcb, 0x56, 0xa6, 0xcd}} + FOLDERID_PhotoAlbums = &KNOWNFOLDERID{0x69d2cf90, 0xfc33, 0x4fb7, [8]byte{0x9a, 0x0c, 0xeb, 0xb0, 0xf0, 0xfc, 0xb4, 0x3c}} + FOLDERID_Public = &KNOWNFOLDERID{0xdfdf76a2, 0xc82a, 0x4d63, [8]byte{0x90, 0x6a, 0x56, 0x44, 0xac, 0x45, 0x73, 0x85}} + FOLDERID_ChangeRemovePrograms = &KNOWNFOLDERID{0xdf7266ac, 0x9274, 0x4867, [8]byte{0x8d, 0x55, 0x3b, 0xd6, 0x61, 0xde, 0x87, 0x2d}} + FOLDERID_AppUpdates = &KNOWNFOLDERID{0xa305ce99, 0xf527, 0x492b, [8]byte{0x8b, 0x1a, 0x7e, 0x76, 0xfa, 0x98, 0xd6, 0xe4}} + FOLDERID_AddNewPrograms = &KNOWNFOLDERID{0xde61d971, 0x5ebc, 0x4f02, [8]byte{0xa3, 0xa9, 0x6c, 0x82, 0x89, 0x5e, 0x5c, 0x04}} + FOLDERID_Downloads = &KNOWNFOLDERID{0x374de290, 0x123f, 0x4565, [8]byte{0x91, 0x64, 0x39, 0xc4, 0x92, 0x5e, 0x46, 0x7b}} + FOLDERID_PublicDownloads = &KNOWNFOLDERID{0x3d644c9b, 0x1fb8, 0x4f30, [8]byte{0x9b, 0x45, 0xf6, 0x70, 0x23, 0x5f, 0x79, 0xc0}} + FOLDERID_SavedSearches = &KNOWNFOLDERID{0x7d1d3a04, 0xdebb, 0x4115, [8]byte{0x95, 0xcf, 0x2f, 0x29, 0xda, 0x29, 0x20, 0xda}} + FOLDERID_QuickLaunch = &KNOWNFOLDERID{0x52a4f021, 0x7b75, 0x48a9, [8]byte{0x9f, 0x6b, 0x4b, 0x87, 0xa2, 0x10, 0xbc, 0x8f}} + FOLDERID_Contacts = &KNOWNFOLDERID{0x56784854, 0xc6cb, 0x462b, [8]byte{0x81, 0x69, 0x88, 0xe3, 0x50, 0xac, 0xb8, 0x82}} + FOLDERID_SidebarParts = &KNOWNFOLDERID{0xa75d362e, 0x50fc, 0x4fb7, [8]byte{0xac, 0x2c, 0xa8, 0xbe, 0xaa, 0x31, 0x44, 0x93}} + FOLDERID_SidebarDefaultParts = &KNOWNFOLDERID{0x7b396e54, 0x9ec5, 0x4300, [8]byte{0xbe, 0x0a, 0x24, 0x82, 0xeb, 0xae, 0x1a, 0x26}} + FOLDERID_PublicGameTasks = &KNOWNFOLDERID{0xdebf2536, 0xe1a8, 0x4c59, [8]byte{0xb6, 0xa2, 0x41, 0x45, 0x86, 0x47, 0x6a, 0xea}} + FOLDERID_GameTasks = &KNOWNFOLDERID{0x054fae61, 0x4dd8, 0x4787, [8]byte{0x80, 0xb6, 0x09, 0x02, 0x20, 0xc4, 0xb7, 0x00}} + FOLDERID_SavedGames = &KNOWNFOLDERID{0x4c5c32ff, 0xbb9d, 0x43b0, [8]byte{0xb5, 0xb4, 0x2d, 0x72, 0xe5, 0x4e, 0xaa, 0xa4}} + FOLDERID_Games = &KNOWNFOLDERID{0xcac52c1a, 0xb53d, 0x4edc, [8]byte{0x92, 0xd7, 0x6b, 0x2e, 0x8a, 0xc1, 0x94, 0x34}} + FOLDERID_SEARCH_MAPI = &KNOWNFOLDERID{0x98ec0e18, 0x2098, 0x4d44, [8]byte{0x86, 0x44, 0x66, 0x97, 0x93, 0x15, 0xa2, 0x81}} + FOLDERID_SEARCH_CSC = &KNOWNFOLDERID{0xee32e446, 0x31ca, 0x4aba, [8]byte{0x81, 0x4f, 0xa5, 0xeb, 0xd2, 0xfd, 0x6d, 0x5e}} + FOLDERID_Links = &KNOWNFOLDERID{0xbfb9d5e0, 0xc6a9, 0x404c, [8]byte{0xb2, 0xb2, 0xae, 0x6d, 0xb6, 0xaf, 0x49, 0x68}} + FOLDERID_UsersFiles = &KNOWNFOLDERID{0xf3ce0f7c, 0x4901, 0x4acc, [8]byte{0x86, 0x48, 0xd5, 0xd4, 0x4b, 0x04, 0xef, 0x8f}} + FOLDERID_UsersLibraries = &KNOWNFOLDERID{0xa302545d, 0xdeff, 0x464b, [8]byte{0xab, 0xe8, 0x61, 0xc8, 0x64, 0x8d, 0x93, 0x9b}} + FOLDERID_SearchHome = &KNOWNFOLDERID{0x190337d1, 0xb8ca, 0x4121, [8]byte{0xa6, 0x39, 0x6d, 0x47, 0x2d, 0x16, 0x97, 0x2a}} + FOLDERID_OriginalImages = &KNOWNFOLDERID{0x2c36c0aa, 0x5812, 0x4b87, [8]byte{0xbf, 0xd0, 0x4c, 0xd0, 0xdf, 0xb1, 0x9b, 0x39}} + FOLDERID_DocumentsLibrary = &KNOWNFOLDERID{0x7b0db17d, 0x9cd2, 0x4a93, [8]byte{0x97, 0x33, 0x46, 0xcc, 0x89, 0x02, 0x2e, 0x7c}} + FOLDERID_MusicLibrary = &KNOWNFOLDERID{0x2112ab0a, 0xc86a, 0x4ffe, [8]byte{0xa3, 0x68, 0x0d, 0xe9, 0x6e, 0x47, 0x01, 0x2e}} + FOLDERID_PicturesLibrary = &KNOWNFOLDERID{0xa990ae9f, 0xa03b, 0x4e80, [8]byte{0x94, 0xbc, 0x99, 0x12, 0xd7, 0x50, 0x41, 0x04}} + FOLDERID_VideosLibrary = &KNOWNFOLDERID{0x491e922f, 0x5643, 0x4af4, [8]byte{0xa7, 0xeb, 0x4e, 0x7a, 0x13, 0x8d, 0x81, 0x74}} + FOLDERID_RecordedTVLibrary = &KNOWNFOLDERID{0x1a6fdba2, 0xf42d, 0x4358, [8]byte{0xa7, 0x98, 0xb7, 0x4d, 0x74, 0x59, 0x26, 0xc5}} + FOLDERID_HomeGroup = &KNOWNFOLDERID{0x52528a6b, 0xb9e3, 0x4add, [8]byte{0xb6, 0x0d, 0x58, 0x8c, 0x2d, 0xba, 0x84, 0x2d}} + FOLDERID_HomeGroupCurrentUser = &KNOWNFOLDERID{0x9b74b6a3, 0x0dfd, 0x4f11, [8]byte{0x9e, 0x78, 0x5f, 0x78, 0x00, 0xf2, 0xe7, 0x72}} + FOLDERID_DeviceMetadataStore = &KNOWNFOLDERID{0x5ce4a5e9, 0xe4eb, 0x479d, [8]byte{0xb8, 0x9f, 0x13, 0x0c, 0x02, 0x88, 0x61, 0x55}} + FOLDERID_Libraries = &KNOWNFOLDERID{0x1b3ea5dc, 0xb587, 0x4786, [8]byte{0xb4, 0xef, 0xbd, 0x1d, 0xc3, 0x32, 0xae, 0xae}} + FOLDERID_PublicLibraries = &KNOWNFOLDERID{0x48daf80b, 0xe6cf, 0x4f4e, [8]byte{0xb8, 0x00, 0x0e, 0x69, 0xd8, 0x4e, 0xe3, 0x84}} + FOLDERID_UserPinned = &KNOWNFOLDERID{0x9e3995ab, 0x1f9c, 0x4f13, [8]byte{0xb8, 0x27, 0x48, 0xb2, 0x4b, 0x6c, 0x71, 0x74}} + FOLDERID_ImplicitAppShortcuts = &KNOWNFOLDERID{0xbcb5256f, 0x79f6, 0x4cee, [8]byte{0xb7, 0x25, 0xdc, 0x34, 0xe4, 0x02, 0xfd, 0x46}} + FOLDERID_AccountPictures = &KNOWNFOLDERID{0x008ca0b1, 0x55b4, 0x4c56, [8]byte{0xb8, 0xa8, 0x4d, 0xe4, 0xb2, 0x99, 0xd3, 0xbe}} + FOLDERID_PublicUserTiles = &KNOWNFOLDERID{0x0482af6c, 0x08f1, 0x4c34, [8]byte{0x8c, 0x90, 0xe1, 0x7e, 0xc9, 0x8b, 0x1e, 0x17}} + FOLDERID_AppsFolder = &KNOWNFOLDERID{0x1e87508d, 0x89c2, 0x42f0, [8]byte{0x8a, 0x7e, 0x64, 0x5a, 0x0f, 0x50, 0xca, 0x58}} + FOLDERID_StartMenuAllPrograms = &KNOWNFOLDERID{0xf26305ef, 0x6948, 0x40b9, [8]byte{0xb2, 0x55, 0x81, 0x45, 0x3d, 0x09, 0xc7, 0x85}} + FOLDERID_CommonStartMenuPlaces = &KNOWNFOLDERID{0xa440879f, 0x87a0, 0x4f7d, [8]byte{0xb7, 0x00, 0x02, 0x07, 0xb9, 0x66, 0x19, 0x4a}} + FOLDERID_ApplicationShortcuts = &KNOWNFOLDERID{0xa3918781, 0xe5f2, 0x4890, [8]byte{0xb3, 0xd9, 0xa7, 0xe5, 0x43, 0x32, 0x32, 0x8c}} + FOLDERID_RoamingTiles = &KNOWNFOLDERID{0x00bcfc5a, 0xed94, 0x4e48, [8]byte{0x96, 0xa1, 0x3f, 0x62, 0x17, 0xf2, 0x19, 0x90}} + FOLDERID_RoamedTileImages = &KNOWNFOLDERID{0xaaa8d5a5, 0xf1d6, 0x4259, [8]byte{0xba, 0xa8, 0x78, 0xe7, 0xef, 0x60, 0x83, 0x5e}} + FOLDERID_Screenshots = &KNOWNFOLDERID{0xb7bede81, 0xdf94, 0x4682, [8]byte{0xa7, 0xd8, 0x57, 0xa5, 0x26, 0x20, 0xb8, 0x6f}} + FOLDERID_CameraRoll = &KNOWNFOLDERID{0xab5fb87b, 0x7ce2, 0x4f83, [8]byte{0x91, 0x5d, 0x55, 0x08, 0x46, 0xc9, 0x53, 0x7b}} + FOLDERID_SkyDrive = &KNOWNFOLDERID{0xa52bba46, 0xe9e1, 0x435f, [8]byte{0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}} + FOLDERID_OneDrive = &KNOWNFOLDERID{0xa52bba46, 0xe9e1, 0x435f, [8]byte{0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}} + FOLDERID_SkyDriveDocuments = &KNOWNFOLDERID{0x24d89e24, 0x2f19, 0x4534, [8]byte{0x9d, 0xde, 0x6a, 0x66, 0x71, 0xfb, 0xb8, 0xfe}} + FOLDERID_SkyDrivePictures = &KNOWNFOLDERID{0x339719b5, 0x8c47, 0x4894, [8]byte{0x94, 0xc2, 0xd8, 0xf7, 0x7a, 0xdd, 0x44, 0xa6}} + FOLDERID_SkyDriveMusic = &KNOWNFOLDERID{0xc3f2459e, 0x80d6, 0x45dc, [8]byte{0xbf, 0xef, 0x1f, 0x76, 0x9f, 0x2b, 0xe7, 0x30}} + FOLDERID_SkyDriveCameraRoll = &KNOWNFOLDERID{0x767e6811, 0x49cb, 0x4273, [8]byte{0x87, 0xc2, 0x20, 0xf3, 0x55, 0xe1, 0x08, 0x5b}} + FOLDERID_SearchHistory = &KNOWNFOLDERID{0x0d4c3db6, 0x03a3, 0x462f, [8]byte{0xa0, 0xe6, 0x08, 0x92, 0x4c, 0x41, 0xb5, 0xd4}} + FOLDERID_SearchTemplates = &KNOWNFOLDERID{0x7e636bfe, 0xdfa9, 0x4d5e, [8]byte{0xb4, 0x56, 0xd7, 0xb3, 0x98, 0x51, 0xd8, 0xa9}} + FOLDERID_CameraRollLibrary = &KNOWNFOLDERID{0x2b20df75, 0x1eda, 0x4039, [8]byte{0x80, 0x97, 0x38, 0x79, 0x82, 0x27, 0xd5, 0xb7}} + FOLDERID_SavedPictures = &KNOWNFOLDERID{0x3b193882, 0xd3ad, 0x4eab, [8]byte{0x96, 0x5a, 0x69, 0x82, 0x9d, 0x1f, 0xb5, 0x9f}} + FOLDERID_SavedPicturesLibrary = &KNOWNFOLDERID{0xe25b5812, 0xbe88, 0x4bd9, [8]byte{0x94, 0xb0, 0x29, 0x23, 0x34, 0x77, 0xb6, 0xc3}} + FOLDERID_RetailDemo = &KNOWNFOLDERID{0x12d4c69e, 0x24ad, 0x4923, [8]byte{0xbe, 0x19, 0x31, 0x32, 0x1c, 0x43, 0xa7, 0x67}} + FOLDERID_Device = &KNOWNFOLDERID{0x1c2ac1dc, 0x4358, 0x4b6c, [8]byte{0x97, 0x33, 0xaf, 0x21, 0x15, 0x65, 0x76, 0xf0}} + FOLDERID_DevelopmentFiles = &KNOWNFOLDERID{0xdbe8e08e, 0x3053, 0x4bbc, [8]byte{0xb1, 0x83, 0x2a, 0x7b, 0x2b, 0x19, 0x1e, 0x59}} + FOLDERID_Objects3D = &KNOWNFOLDERID{0x31c0dd25, 0x9439, 0x4f12, [8]byte{0xbf, 0x41, 0x7f, 0xf4, 0xed, 0xa3, 0x87, 0x22}} + FOLDERID_AppCaptures = &KNOWNFOLDERID{0xedc0fe71, 0x98d8, 0x4f4a, [8]byte{0xb9, 0x20, 0xc8, 0xdc, 0x13, 0x3c, 0xb1, 0x65}} + FOLDERID_LocalDocuments = &KNOWNFOLDERID{0xf42ee2d3, 0x909f, 0x4907, [8]byte{0x88, 0x71, 0x4c, 0x22, 0xfc, 0x0b, 0xf7, 0x56}} + FOLDERID_LocalPictures = &KNOWNFOLDERID{0x0ddd015d, 0xb06c, 0x45d5, [8]byte{0x8c, 0x4c, 0xf5, 0x97, 0x13, 0x85, 0x46, 0x39}} + FOLDERID_LocalVideos = &KNOWNFOLDERID{0x35286a68, 0x3c57, 0x41a1, [8]byte{0xbb, 0xb1, 0x0e, 0xae, 0x73, 0xd7, 0x6c, 0x95}} + FOLDERID_LocalMusic = &KNOWNFOLDERID{0xa0c69a99, 0x21c8, 0x4671, [8]byte{0x87, 0x03, 0x79, 0x34, 0x16, 0x2f, 0xcf, 0x1d}} + FOLDERID_LocalDownloads = &KNOWNFOLDERID{0x7d83ee9b, 0x2244, 0x4e70, [8]byte{0xb1, 0xf5, 0x53, 0x93, 0x04, 0x2a, 0xf1, 0xe4}} + FOLDERID_RecordedCalls = &KNOWNFOLDERID{0x2f8b40c2, 0x83ed, 0x48ee, [8]byte{0xb3, 0x83, 0xa1, 0xf1, 0x57, 0xec, 0x6f, 0x9a}} + FOLDERID_AllAppMods = &KNOWNFOLDERID{0x7ad67899, 0x66af, 0x43ba, [8]byte{0x91, 0x56, 0x6a, 0xad, 0x42, 0xe6, 0xc5, 0x96}} + FOLDERID_CurrentAppMods = &KNOWNFOLDERID{0x3db40b20, 0x2a30, 0x4dbe, [8]byte{0x91, 0x7e, 0x77, 0x1d, 0xd2, 0x1d, 0xd0, 0x99}} + FOLDERID_AppDataDesktop = &KNOWNFOLDERID{0xb2c5e279, 0x7add, 0x439f, [8]byte{0xb2, 0x8c, 0xc4, 0x1f, 0xe1, 0xbb, 0xf6, 0x72}} + FOLDERID_AppDataDocuments = &KNOWNFOLDERID{0x7be16610, 0x1f7f, 0x44ac, [8]byte{0xbf, 0xf0, 0x83, 0xe1, 0x5f, 0x2f, 0xfc, 0xa1}} + FOLDERID_AppDataFavorites = &KNOWNFOLDERID{0x7cfbefbc, 0xde1f, 0x45aa, [8]byte{0xb8, 0x43, 0xa5, 0x42, 0xac, 0x53, 0x6c, 0xc9}} + FOLDERID_AppDataProgramData = &KNOWNFOLDERID{0x559d40a3, 0xa036, 0x40fa, [8]byte{0xaf, 0x61, 0x84, 0xcb, 0x43, 0x0a, 0x4d, 0x34}} +) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go new file mode 100644 index 000000000..a81ea2c70 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -0,0 +1,4345 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package windows + +import ( + "syscall" + "unsafe" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modCfgMgr32 = NewLazySystemDLL("CfgMgr32.dll") + modadvapi32 = NewLazySystemDLL("advapi32.dll") + modcrypt32 = NewLazySystemDLL("crypt32.dll") + moddnsapi = NewLazySystemDLL("dnsapi.dll") + moddwmapi = NewLazySystemDLL("dwmapi.dll") + modiphlpapi = NewLazySystemDLL("iphlpapi.dll") + modkernel32 = NewLazySystemDLL("kernel32.dll") + modmswsock = NewLazySystemDLL("mswsock.dll") + modnetapi32 = NewLazySystemDLL("netapi32.dll") + modntdll = NewLazySystemDLL("ntdll.dll") + modole32 = NewLazySystemDLL("ole32.dll") + modpsapi = NewLazySystemDLL("psapi.dll") + modsechost = NewLazySystemDLL("sechost.dll") + modsecur32 = NewLazySystemDLL("secur32.dll") + modsetupapi = NewLazySystemDLL("setupapi.dll") + modshell32 = NewLazySystemDLL("shell32.dll") + moduser32 = NewLazySystemDLL("user32.dll") + moduserenv = NewLazySystemDLL("userenv.dll") + modversion = NewLazySystemDLL("version.dll") + modwintrust = NewLazySystemDLL("wintrust.dll") + modws2_32 = NewLazySystemDLL("ws2_32.dll") + modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") + + procCM_Get_DevNode_Status = modCfgMgr32.NewProc("CM_Get_DevNode_Status") + procCM_Get_Device_Interface_ListW = modCfgMgr32.NewProc("CM_Get_Device_Interface_ListW") + procCM_Get_Device_Interface_List_SizeW = modCfgMgr32.NewProc("CM_Get_Device_Interface_List_SizeW") + procCM_MapCrToWin32Err = modCfgMgr32.NewProc("CM_MapCrToWin32Err") + procAdjustTokenGroups = modadvapi32.NewProc("AdjustTokenGroups") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") + procBuildSecurityDescriptorW = modadvapi32.NewProc("BuildSecurityDescriptorW") + procChangeServiceConfig2W = modadvapi32.NewProc("ChangeServiceConfig2W") + procChangeServiceConfigW = modadvapi32.NewProc("ChangeServiceConfigW") + procCheckTokenMembership = modadvapi32.NewProc("CheckTokenMembership") + procCloseServiceHandle = modadvapi32.NewProc("CloseServiceHandle") + procControlService = modadvapi32.NewProc("ControlService") + procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") + procCopySid = modadvapi32.NewProc("CopySid") + procCreateProcessAsUserW = modadvapi32.NewProc("CreateProcessAsUserW") + procCreateServiceW = modadvapi32.NewProc("CreateServiceW") + procCreateWellKnownSid = modadvapi32.NewProc("CreateWellKnownSid") + procCryptAcquireContextW = modadvapi32.NewProc("CryptAcquireContextW") + procCryptGenRandom = modadvapi32.NewProc("CryptGenRandom") + procCryptReleaseContext = modadvapi32.NewProc("CryptReleaseContext") + procDeleteService = modadvapi32.NewProc("DeleteService") + procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") + procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") + procEnumDependentServicesW = modadvapi32.NewProc("EnumDependentServicesW") + procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") + procEqualSid = modadvapi32.NewProc("EqualSid") + procFreeSid = modadvapi32.NewProc("FreeSid") + procGetLengthSid = modadvapi32.NewProc("GetLengthSid") + procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") + procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") + procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") + procGetSecurityDescriptorGroup = modadvapi32.NewProc("GetSecurityDescriptorGroup") + procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") + procGetSecurityDescriptorOwner = modadvapi32.NewProc("GetSecurityDescriptorOwner") + procGetSecurityDescriptorRMControl = modadvapi32.NewProc("GetSecurityDescriptorRMControl") + procGetSecurityDescriptorSacl = modadvapi32.NewProc("GetSecurityDescriptorSacl") + procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo") + procGetSidIdentifierAuthority = modadvapi32.NewProc("GetSidIdentifierAuthority") + procGetSidSubAuthority = modadvapi32.NewProc("GetSidSubAuthority") + procGetSidSubAuthorityCount = modadvapi32.NewProc("GetSidSubAuthorityCount") + procGetTokenInformation = modadvapi32.NewProc("GetTokenInformation") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procInitializeSecurityDescriptor = modadvapi32.NewProc("InitializeSecurityDescriptor") + procInitiateSystemShutdownExW = modadvapi32.NewProc("InitiateSystemShutdownExW") + procIsTokenRestricted = modadvapi32.NewProc("IsTokenRestricted") + procIsValidSecurityDescriptor = modadvapi32.NewProc("IsValidSecurityDescriptor") + procIsValidSid = modadvapi32.NewProc("IsValidSid") + procIsWellKnownSid = modadvapi32.NewProc("IsWellKnownSid") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procMakeAbsoluteSD = modadvapi32.NewProc("MakeAbsoluteSD") + procMakeSelfRelativeSD = modadvapi32.NewProc("MakeSelfRelativeSD") + procNotifyServiceStatusChangeW = modadvapi32.NewProc("NotifyServiceStatusChangeW") + procOpenProcessToken = modadvapi32.NewProc("OpenProcessToken") + procOpenSCManagerW = modadvapi32.NewProc("OpenSCManagerW") + procOpenServiceW = modadvapi32.NewProc("OpenServiceW") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") + procQueryServiceConfigW = modadvapi32.NewProc("QueryServiceConfigW") + procQueryServiceDynamicInformation = modadvapi32.NewProc("QueryServiceDynamicInformation") + procQueryServiceLockStatusW = modadvapi32.NewProc("QueryServiceLockStatusW") + procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") + procQueryServiceStatusEx = modadvapi32.NewProc("QueryServiceStatusEx") + procRegCloseKey = modadvapi32.NewProc("RegCloseKey") + procRegEnumKeyExW = modadvapi32.NewProc("RegEnumKeyExW") + procRegNotifyChangeKeyValue = modadvapi32.NewProc("RegNotifyChangeKeyValue") + procRegOpenKeyExW = modadvapi32.NewProc("RegOpenKeyExW") + procRegQueryInfoKeyW = modadvapi32.NewProc("RegQueryInfoKeyW") + procRegQueryValueExW = modadvapi32.NewProc("RegQueryValueExW") + procRegisterEventSourceW = modadvapi32.NewProc("RegisterEventSourceW") + procRegisterServiceCtrlHandlerExW = modadvapi32.NewProc("RegisterServiceCtrlHandlerExW") + procReportEventW = modadvapi32.NewProc("ReportEventW") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW") + procSetKernelObjectSecurity = modadvapi32.NewProc("SetKernelObjectSecurity") + procSetNamedSecurityInfoW = modadvapi32.NewProc("SetNamedSecurityInfoW") + procSetSecurityDescriptorControl = modadvapi32.NewProc("SetSecurityDescriptorControl") + procSetSecurityDescriptorDacl = modadvapi32.NewProc("SetSecurityDescriptorDacl") + procSetSecurityDescriptorGroup = modadvapi32.NewProc("SetSecurityDescriptorGroup") + procSetSecurityDescriptorOwner = modadvapi32.NewProc("SetSecurityDescriptorOwner") + procSetSecurityDescriptorRMControl = modadvapi32.NewProc("SetSecurityDescriptorRMControl") + procSetSecurityDescriptorSacl = modadvapi32.NewProc("SetSecurityDescriptorSacl") + procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo") + procSetServiceStatus = modadvapi32.NewProc("SetServiceStatus") + procSetThreadToken = modadvapi32.NewProc("SetThreadToken") + procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation") + procStartServiceCtrlDispatcherW = modadvapi32.NewProc("StartServiceCtrlDispatcherW") + procStartServiceW = modadvapi32.NewProc("StartServiceW") + procCertAddCertificateContextToStore = modcrypt32.NewProc("CertAddCertificateContextToStore") + procCertCloseStore = modcrypt32.NewProc("CertCloseStore") + procCertCreateCertificateContext = modcrypt32.NewProc("CertCreateCertificateContext") + procCertDeleteCertificateFromStore = modcrypt32.NewProc("CertDeleteCertificateFromStore") + procCertDuplicateCertificateContext = modcrypt32.NewProc("CertDuplicateCertificateContext") + procCertEnumCertificatesInStore = modcrypt32.NewProc("CertEnumCertificatesInStore") + procCertFindCertificateInStore = modcrypt32.NewProc("CertFindCertificateInStore") + procCertFindChainInStore = modcrypt32.NewProc("CertFindChainInStore") + procCertFindExtension = modcrypt32.NewProc("CertFindExtension") + procCertFreeCertificateChain = modcrypt32.NewProc("CertFreeCertificateChain") + procCertFreeCertificateContext = modcrypt32.NewProc("CertFreeCertificateContext") + procCertGetCertificateChain = modcrypt32.NewProc("CertGetCertificateChain") + procCertGetNameStringW = modcrypt32.NewProc("CertGetNameStringW") + procCertOpenStore = modcrypt32.NewProc("CertOpenStore") + procCertOpenSystemStoreW = modcrypt32.NewProc("CertOpenSystemStoreW") + procCertVerifyCertificateChainPolicy = modcrypt32.NewProc("CertVerifyCertificateChainPolicy") + procCryptAcquireCertificatePrivateKey = modcrypt32.NewProc("CryptAcquireCertificatePrivateKey") + procCryptDecodeObject = modcrypt32.NewProc("CryptDecodeObject") + procCryptProtectData = modcrypt32.NewProc("CryptProtectData") + procCryptQueryObject = modcrypt32.NewProc("CryptQueryObject") + procCryptUnprotectData = modcrypt32.NewProc("CryptUnprotectData") + procPFXImportCertStore = modcrypt32.NewProc("PFXImportCertStore") + procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W") + procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W") + procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") + procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") + procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") + procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") + procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") + procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") + procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") + procCancelIo = modkernel32.NewProc("CancelIo") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procCloseHandle = modkernel32.NewProc("CloseHandle") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") + procCreateEventExW = modkernel32.NewProc("CreateEventExW") + procCreateEventW = modkernel32.NewProc("CreateEventW") + procCreateFileMappingW = modkernel32.NewProc("CreateFileMappingW") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procCreateHardLinkW = modkernel32.NewProc("CreateHardLinkW") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procCreateJobObjectW = modkernel32.NewProc("CreateJobObjectW") + procCreateMutexExW = modkernel32.NewProc("CreateMutexExW") + procCreateMutexW = modkernel32.NewProc("CreateMutexW") + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procCreatePipe = modkernel32.NewProc("CreatePipe") + procCreateProcessW = modkernel32.NewProc("CreateProcessW") + procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") + procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") + procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") + procDeleteFileW = modkernel32.NewProc("DeleteFileW") + procDeleteProcThreadAttributeList = modkernel32.NewProc("DeleteProcThreadAttributeList") + procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") + procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") + procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") + procExitProcess = modkernel32.NewProc("ExitProcess") + procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") + procFindClose = modkernel32.NewProc("FindClose") + procFindCloseChangeNotification = modkernel32.NewProc("FindCloseChangeNotification") + procFindFirstChangeNotificationW = modkernel32.NewProc("FindFirstChangeNotificationW") + procFindFirstFileW = modkernel32.NewProc("FindFirstFileW") + procFindFirstVolumeMountPointW = modkernel32.NewProc("FindFirstVolumeMountPointW") + procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") + procFindNextChangeNotification = modkernel32.NewProc("FindNextChangeNotification") + procFindNextFileW = modkernel32.NewProc("FindNextFileW") + procFindNextVolumeMountPointW = modkernel32.NewProc("FindNextVolumeMountPointW") + procFindNextVolumeW = modkernel32.NewProc("FindNextVolumeW") + procFindResourceW = modkernel32.NewProc("FindResourceW") + procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") + procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") + procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") + procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") + procFormatMessageW = modkernel32.NewProc("FormatMessageW") + procFreeEnvironmentStringsW = modkernel32.NewProc("FreeEnvironmentStringsW") + procFreeLibrary = modkernel32.NewProc("FreeLibrary") + procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") + procGetACP = modkernel32.NewProc("GetACP") + procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") + procGetCommTimeouts = modkernel32.NewProc("GetCommTimeouts") + procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") + procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") + procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") + procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") + procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") + procGetCurrentThreadId = modkernel32.NewProc("GetCurrentThreadId") + procGetDiskFreeSpaceExW = modkernel32.NewProc("GetDiskFreeSpaceExW") + procGetDriveTypeW = modkernel32.NewProc("GetDriveTypeW") + procGetEnvironmentStringsW = modkernel32.NewProc("GetEnvironmentStringsW") + procGetEnvironmentVariableW = modkernel32.NewProc("GetEnvironmentVariableW") + procGetExitCodeProcess = modkernel32.NewProc("GetExitCodeProcess") + procGetFileAttributesExW = modkernel32.NewProc("GetFileAttributesExW") + procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") + procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") + procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = modkernel32.NewProc("GetFileType") + procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") + procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") + procGetLargePageMinimum = modkernel32.NewProc("GetLargePageMinimum") + procGetLastError = modkernel32.NewProc("GetLastError") + procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") + procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") + procGetLongPathNameW = modkernel32.NewProc("GetLongPathNameW") + procGetMaximumProcessorCount = modkernel32.NewProc("GetMaximumProcessorCount") + procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") + procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") + procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") + procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") + procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") + procGetProcAddress = modkernel32.NewProc("GetProcAddress") + procGetProcessId = modkernel32.NewProc("GetProcessId") + procGetProcessPreferredUILanguages = modkernel32.NewProc("GetProcessPreferredUILanguages") + procGetProcessShutdownParameters = modkernel32.NewProc("GetProcessShutdownParameters") + procGetProcessTimes = modkernel32.NewProc("GetProcessTimes") + procGetProcessWorkingSetSizeEx = modkernel32.NewProc("GetProcessWorkingSetSizeEx") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procGetShortPathNameW = modkernel32.NewProc("GetShortPathNameW") + procGetStartupInfoW = modkernel32.NewProc("GetStartupInfoW") + procGetStdHandle = modkernel32.NewProc("GetStdHandle") + procGetSystemDirectoryW = modkernel32.NewProc("GetSystemDirectoryW") + procGetSystemPreferredUILanguages = modkernel32.NewProc("GetSystemPreferredUILanguages") + procGetSystemTimeAsFileTime = modkernel32.NewProc("GetSystemTimeAsFileTime") + procGetSystemTimePreciseAsFileTime = modkernel32.NewProc("GetSystemTimePreciseAsFileTime") + procGetSystemWindowsDirectoryW = modkernel32.NewProc("GetSystemWindowsDirectoryW") + procGetTempPathW = modkernel32.NewProc("GetTempPathW") + procGetThreadPreferredUILanguages = modkernel32.NewProc("GetThreadPreferredUILanguages") + procGetTickCount64 = modkernel32.NewProc("GetTickCount64") + procGetTimeZoneInformation = modkernel32.NewProc("GetTimeZoneInformation") + procGetUserPreferredUILanguages = modkernel32.NewProc("GetUserPreferredUILanguages") + procGetVersion = modkernel32.NewProc("GetVersion") + procGetVolumeInformationByHandleW = modkernel32.NewProc("GetVolumeInformationByHandleW") + procGetVolumeInformationW = modkernel32.NewProc("GetVolumeInformationW") + procGetVolumeNameForVolumeMountPointW = modkernel32.NewProc("GetVolumeNameForVolumeMountPointW") + procGetVolumePathNameW = modkernel32.NewProc("GetVolumePathNameW") + procGetVolumePathNamesForVolumeNameW = modkernel32.NewProc("GetVolumePathNamesForVolumeNameW") + procGetWindowsDirectoryW = modkernel32.NewProc("GetWindowsDirectoryW") + procInitializeProcThreadAttributeList = modkernel32.NewProc("InitializeProcThreadAttributeList") + procIsWow64Process = modkernel32.NewProc("IsWow64Process") + procIsWow64Process2 = modkernel32.NewProc("IsWow64Process2") + procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") + procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") + procLoadResource = modkernel32.NewProc("LoadResource") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procLocalFree = modkernel32.NewProc("LocalFree") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procLockResource = modkernel32.NewProc("LockResource") + procMapViewOfFile = modkernel32.NewProc("MapViewOfFile") + procModule32FirstW = modkernel32.NewProc("Module32FirstW") + procModule32NextW = modkernel32.NewProc("Module32NextW") + procMoveFileExW = modkernel32.NewProc("MoveFileExW") + procMoveFileW = modkernel32.NewProc("MoveFileW") + procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") + procOpenEventW = modkernel32.NewProc("OpenEventW") + procOpenMutexW = modkernel32.NewProc("OpenMutexW") + procOpenProcess = modkernel32.NewProc("OpenProcess") + procOpenThread = modkernel32.NewProc("OpenThread") + procPostQueuedCompletionStatus = modkernel32.NewProc("PostQueuedCompletionStatus") + procProcess32FirstW = modkernel32.NewProc("Process32FirstW") + procProcess32NextW = modkernel32.NewProc("Process32NextW") + procProcessIdToSessionId = modkernel32.NewProc("ProcessIdToSessionId") + procPulseEvent = modkernel32.NewProc("PulseEvent") + procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") + procQueryFullProcessImageNameW = modkernel32.NewProc("QueryFullProcessImageNameW") + procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") + procReadConsoleW = modkernel32.NewProc("ReadConsoleW") + procReadDirectoryChangesW = modkernel32.NewProc("ReadDirectoryChangesW") + procReadFile = modkernel32.NewProc("ReadFile") + procReadProcessMemory = modkernel32.NewProc("ReadProcessMemory") + procReleaseMutex = modkernel32.NewProc("ReleaseMutex") + procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") + procResetEvent = modkernel32.NewProc("ResetEvent") + procResumeThread = modkernel32.NewProc("ResumeThread") + procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") + procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") + procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") + procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") + procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") + procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") + procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") + procSetErrorMode = modkernel32.NewProc("SetErrorMode") + procSetEvent = modkernel32.NewProc("SetEvent") + procSetFileAttributesW = modkernel32.NewProc("SetFileAttributesW") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") + procSetFilePointer = modkernel32.NewProc("SetFilePointer") + procSetFileTime = modkernel32.NewProc("SetFileTime") + procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") + procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") + procSetNamedPipeHandleState = modkernel32.NewProc("SetNamedPipeHandleState") + procSetPriorityClass = modkernel32.NewProc("SetPriorityClass") + procSetProcessPriorityBoost = modkernel32.NewProc("SetProcessPriorityBoost") + procSetProcessShutdownParameters = modkernel32.NewProc("SetProcessShutdownParameters") + procSetProcessWorkingSetSizeEx = modkernel32.NewProc("SetProcessWorkingSetSizeEx") + procSetStdHandle = modkernel32.NewProc("SetStdHandle") + procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") + procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") + procSizeofResource = modkernel32.NewProc("SizeofResource") + procSleepEx = modkernel32.NewProc("SleepEx") + procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") + procTerminateProcess = modkernel32.NewProc("TerminateProcess") + procThread32First = modkernel32.NewProc("Thread32First") + procThread32Next = modkernel32.NewProc("Thread32Next") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") + procUnmapViewOfFile = modkernel32.NewProc("UnmapViewOfFile") + procUpdateProcThreadAttribute = modkernel32.NewProc("UpdateProcThreadAttribute") + procVirtualAlloc = modkernel32.NewProc("VirtualAlloc") + procVirtualFree = modkernel32.NewProc("VirtualFree") + procVirtualLock = modkernel32.NewProc("VirtualLock") + procVirtualProtect = modkernel32.NewProc("VirtualProtect") + procVirtualProtectEx = modkernel32.NewProc("VirtualProtectEx") + procVirtualQuery = modkernel32.NewProc("VirtualQuery") + procVirtualQueryEx = modkernel32.NewProc("VirtualQueryEx") + procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") + procWTSGetActiveConsoleSessionId = modkernel32.NewProc("WTSGetActiveConsoleSessionId") + procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") + procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") + procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") + procWriteFile = modkernel32.NewProc("WriteFile") + procWriteProcessMemory = modkernel32.NewProc("WriteProcessMemory") + procAcceptEx = modmswsock.NewProc("AcceptEx") + procGetAcceptExSockaddrs = modmswsock.NewProc("GetAcceptExSockaddrs") + procTransmitFile = modmswsock.NewProc("TransmitFile") + procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") + procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") + procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") + procNtCreateFile = modntdll.NewProc("NtCreateFile") + procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") + procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess") + procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation") + procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile") + procNtSetInformationProcess = modntdll.NewProc("NtSetInformationProcess") + procNtSetSystemInformation = modntdll.NewProc("NtSetSystemInformation") + procRtlAddFunctionTable = modntdll.NewProc("RtlAddFunctionTable") + procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") + procRtlDeleteFunctionTable = modntdll.NewProc("RtlDeleteFunctionTable") + procRtlDosPathNameToNtPathName_U_WithStatus = modntdll.NewProc("RtlDosPathNameToNtPathName_U_WithStatus") + procRtlDosPathNameToRelativeNtPathName_U_WithStatus = modntdll.NewProc("RtlDosPathNameToRelativeNtPathName_U_WithStatus") + procRtlGetCurrentPeb = modntdll.NewProc("RtlGetCurrentPeb") + procRtlGetNtVersionNumbers = modntdll.NewProc("RtlGetNtVersionNumbers") + procRtlGetVersion = modntdll.NewProc("RtlGetVersion") + procRtlInitString = modntdll.NewProc("RtlInitString") + procRtlInitUnicodeString = modntdll.NewProc("RtlInitUnicodeString") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") + procCLSIDFromString = modole32.NewProc("CLSIDFromString") + procCoCreateGuid = modole32.NewProc("CoCreateGuid") + procCoGetObject = modole32.NewProc("CoGetObject") + procCoInitializeEx = modole32.NewProc("CoInitializeEx") + procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") + procCoUninitialize = modole32.NewProc("CoUninitialize") + procStringFromGUID2 = modole32.NewProc("StringFromGUID2") + procEnumProcessModules = modpsapi.NewProc("EnumProcessModules") + procEnumProcessModulesEx = modpsapi.NewProc("EnumProcessModulesEx") + procEnumProcesses = modpsapi.NewProc("EnumProcesses") + procGetModuleBaseNameW = modpsapi.NewProc("GetModuleBaseNameW") + procGetModuleFileNameExW = modpsapi.NewProc("GetModuleFileNameExW") + procGetModuleInformation = modpsapi.NewProc("GetModuleInformation") + procQueryWorkingSetEx = modpsapi.NewProc("QueryWorkingSetEx") + procSubscribeServiceChangeNotifications = modsechost.NewProc("SubscribeServiceChangeNotifications") + procUnsubscribeServiceChangeNotifications = modsechost.NewProc("UnsubscribeServiceChangeNotifications") + procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") + procTranslateNameW = modsecur32.NewProc("TranslateNameW") + procSetupDiBuildDriverInfoList = modsetupapi.NewProc("SetupDiBuildDriverInfoList") + procSetupDiCallClassInstaller = modsetupapi.NewProc("SetupDiCallClassInstaller") + procSetupDiCancelDriverInfoSearch = modsetupapi.NewProc("SetupDiCancelDriverInfoSearch") + procSetupDiClassGuidsFromNameExW = modsetupapi.NewProc("SetupDiClassGuidsFromNameExW") + procSetupDiClassNameFromGuidExW = modsetupapi.NewProc("SetupDiClassNameFromGuidExW") + procSetupDiCreateDeviceInfoListExW = modsetupapi.NewProc("SetupDiCreateDeviceInfoListExW") + procSetupDiCreateDeviceInfoW = modsetupapi.NewProc("SetupDiCreateDeviceInfoW") + procSetupDiDestroyDeviceInfoList = modsetupapi.NewProc("SetupDiDestroyDeviceInfoList") + procSetupDiDestroyDriverInfoList = modsetupapi.NewProc("SetupDiDestroyDriverInfoList") + procSetupDiEnumDeviceInfo = modsetupapi.NewProc("SetupDiEnumDeviceInfo") + procSetupDiEnumDriverInfoW = modsetupapi.NewProc("SetupDiEnumDriverInfoW") + procSetupDiGetClassDevsExW = modsetupapi.NewProc("SetupDiGetClassDevsExW") + procSetupDiGetClassInstallParamsW = modsetupapi.NewProc("SetupDiGetClassInstallParamsW") + procSetupDiGetDeviceInfoListDetailW = modsetupapi.NewProc("SetupDiGetDeviceInfoListDetailW") + procSetupDiGetDeviceInstallParamsW = modsetupapi.NewProc("SetupDiGetDeviceInstallParamsW") + procSetupDiGetDeviceInstanceIdW = modsetupapi.NewProc("SetupDiGetDeviceInstanceIdW") + procSetupDiGetDevicePropertyW = modsetupapi.NewProc("SetupDiGetDevicePropertyW") + procSetupDiGetDeviceRegistryPropertyW = modsetupapi.NewProc("SetupDiGetDeviceRegistryPropertyW") + procSetupDiGetDriverInfoDetailW = modsetupapi.NewProc("SetupDiGetDriverInfoDetailW") + procSetupDiGetSelectedDevice = modsetupapi.NewProc("SetupDiGetSelectedDevice") + procSetupDiGetSelectedDriverW = modsetupapi.NewProc("SetupDiGetSelectedDriverW") + procSetupDiOpenDevRegKey = modsetupapi.NewProc("SetupDiOpenDevRegKey") + procSetupDiSetClassInstallParamsW = modsetupapi.NewProc("SetupDiSetClassInstallParamsW") + procSetupDiSetDeviceInstallParamsW = modsetupapi.NewProc("SetupDiSetDeviceInstallParamsW") + procSetupDiSetDeviceRegistryPropertyW = modsetupapi.NewProc("SetupDiSetDeviceRegistryPropertyW") + procSetupDiSetSelectedDevice = modsetupapi.NewProc("SetupDiSetSelectedDevice") + procSetupDiSetSelectedDriverW = modsetupapi.NewProc("SetupDiSetSelectedDriverW") + procSetupUninstallOEMInfW = modsetupapi.NewProc("SetupUninstallOEMInfW") + procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") + procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") + procShellExecuteW = modshell32.NewProc("ShellExecuteW") + procEnumChildWindows = moduser32.NewProc("EnumChildWindows") + procEnumWindows = moduser32.NewProc("EnumWindows") + procExitWindowsEx = moduser32.NewProc("ExitWindowsEx") + procGetClassNameW = moduser32.NewProc("GetClassNameW") + procGetDesktopWindow = moduser32.NewProc("GetDesktopWindow") + procGetForegroundWindow = moduser32.NewProc("GetForegroundWindow") + procGetGUIThreadInfo = moduser32.NewProc("GetGUIThreadInfo") + procGetShellWindow = moduser32.NewProc("GetShellWindow") + procGetWindowThreadProcessId = moduser32.NewProc("GetWindowThreadProcessId") + procIsWindow = moduser32.NewProc("IsWindow") + procIsWindowUnicode = moduser32.NewProc("IsWindowUnicode") + procIsWindowVisible = moduser32.NewProc("IsWindowVisible") + procMessageBoxW = moduser32.NewProc("MessageBoxW") + procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") + procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") + procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") + procGetFileVersionInfoSizeW = modversion.NewProc("GetFileVersionInfoSizeW") + procGetFileVersionInfoW = modversion.NewProc("GetFileVersionInfoW") + procVerQueryValueW = modversion.NewProc("VerQueryValueW") + procWinVerifyTrustEx = modwintrust.NewProc("WinVerifyTrustEx") + procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") + procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") + procWSACleanup = modws2_32.NewProc("WSACleanup") + procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") + procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") + procWSAIoctl = modws2_32.NewProc("WSAIoctl") + procWSALookupServiceBeginW = modws2_32.NewProc("WSALookupServiceBeginW") + procWSALookupServiceEnd = modws2_32.NewProc("WSALookupServiceEnd") + procWSALookupServiceNextW = modws2_32.NewProc("WSALookupServiceNextW") + procWSARecv = modws2_32.NewProc("WSARecv") + procWSARecvFrom = modws2_32.NewProc("WSARecvFrom") + procWSASend = modws2_32.NewProc("WSASend") + procWSASendTo = modws2_32.NewProc("WSASendTo") + procWSASocketW = modws2_32.NewProc("WSASocketW") + procWSAStartup = modws2_32.NewProc("WSAStartup") + procbind = modws2_32.NewProc("bind") + procclosesocket = modws2_32.NewProc("closesocket") + procconnect = modws2_32.NewProc("connect") + procgethostbyname = modws2_32.NewProc("gethostbyname") + procgetpeername = modws2_32.NewProc("getpeername") + procgetprotobyname = modws2_32.NewProc("getprotobyname") + procgetservbyname = modws2_32.NewProc("getservbyname") + procgetsockname = modws2_32.NewProc("getsockname") + procgetsockopt = modws2_32.NewProc("getsockopt") + proclisten = modws2_32.NewProc("listen") + procntohs = modws2_32.NewProc("ntohs") + procrecvfrom = modws2_32.NewProc("recvfrom") + procsendto = modws2_32.NewProc("sendto") + procsetsockopt = modws2_32.NewProc("setsockopt") + procshutdown = modws2_32.NewProc("shutdown") + procsocket = modws2_32.NewProc("socket") + procWTSEnumerateSessionsW = modwtsapi32.NewProc("WTSEnumerateSessionsW") + procWTSFreeMemory = modwtsapi32.NewProc("WTSFreeMemory") + procWTSQueryUserToken = modwtsapi32.NewProc("WTSQueryUserToken") +) + +func cm_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) (ret CONFIGRET) { + r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_Status.Addr(), 4, uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags), 0, 0) + ret = CONFIGRET(r0) + return +} + +func cm_Get_Device_Interface_List(interfaceClass *GUID, deviceID *uint16, buffer *uint16, bufferLen uint32, flags uint32) (ret CONFIGRET) { + r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_ListW.Addr(), 5, uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags), 0) + ret = CONFIGRET(r0) + return +} + +func cm_Get_Device_Interface_List_Size(len *uint32, interfaceClass *GUID, deviceID *uint16, flags uint32) (ret CONFIGRET) { + r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_List_SizeW.Addr(), 4, uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags), 0, 0) + ret = CONFIGRET(r0) + return +} + +func cm_MapCrToWin32Err(configRet CONFIGRET, defaultWin32Error Errno) (ret Errno) { + r0, _, _ := syscall.Syscall(procCM_MapCrToWin32Err.Addr(), 2, uintptr(configRet), uintptr(defaultWin32Error), 0) + ret = Errno(r0) + return +} + +func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) { + var _p0 uint32 + if resetToDefault { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) { + var _p0 uint32 + if disableAllPrivileges { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) { + r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) { + r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) { + r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) { + r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { + r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CloseServiceHandle(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(str) + if err != nil { + return + } + return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) +} + +func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { + r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) { + r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) { + var _p0 uint32 + if inheritHandles { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall12(procCreateProcessAsUserW.Addr(), 11, uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { + r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func DeleteService(service Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func DeregisterEventSource(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) { + r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procEnumDependentServicesW.Addr(), 6, uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { + r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { + r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0) + isEqual = r0 != 0 + return +} + +func FreeSid(sid *SID) (err error) { + r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + +func GetLengthSid(sid *SID) (len uint32) { + r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + len = uint32(r0) + return +} + +func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { + var _p0 *uint16 + _p0, ret = syscall.UTF16PtrFromString(objectName) + if ret != nil { + return + } + return _getNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl, sd) +} + +func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { + r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl **ACL, daclDefaulted *bool) (err error) { + var _p0 uint32 + if *daclPresent { + _p0 = 1 + } + var _p1 uint32 + if *daclDefaulted { + _p1 = 1 + } + r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + *daclPresent = _p0 != 0 + *daclDefaulted = _p1 != 0 + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefaulted *bool) (err error) { + var _p0 uint32 + if *groupDefaulted { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) + *groupDefaulted = _p0 != 0 + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + len = uint32(r0) + return +} + +func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefaulted *bool) (err error) { + var _p0 uint32 + if *ownerDefaulted { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) + *ownerDefaulted = _p0 != 0 + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl **ACL, saclDefaulted *bool) (err error) { + var _p0 uint32 + if *saclPresent { + _p0 = 1 + } + var _p1 uint32 + if *saclDefaulted { + _p1 = 1 + } + r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + *saclPresent = _p0 != 0 + *saclDefaulted = _p1 != 0 + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { + r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) { + r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0)) + return +} + +func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) { + r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0) + subAuthority = (*uint32)(unsafe.Pointer(r0)) + return +} + +func getSidSubAuthorityCount(sid *SID) (count *uint8) { + r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + count = (*uint8)(unsafe.Pointer(r0)) + return +} + +func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ImpersonateSelf(impersonationlevel uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) { + r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint32, forceAppsClosed bool, rebootAfterShutdown bool, reason uint32) (err error) { + var _p0 uint32 + if forceAppsClosed { + _p0 = 1 + } + var _p1 uint32 + if rebootAfterShutdown { + _p1 = 1 + } + r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func isTokenRestricted(tokenHandle Token) (ret bool, err error) { + r0, _, e1 := syscall.Syscall(procIsTokenRestricted.Addr(), 1, uintptr(tokenHandle), 0, 0) + ret = r0 != 0 + if !ret { + err = errnoErr(e1) + } + return +} + +func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) { + r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + isValid = r0 != 0 + return +} + +func isValidSid(sid *SID) (isValid bool) { + r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + isValid = r0 != 0 + return +} + +func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) { + r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0) + isWellKnown = r0 != 0 + return +} + +func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) { + r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { + r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInfo unsafe.Pointer) (err error) { + err = procQueryServiceDynamicInformation.Find() + if err != nil { + return + } + r1, _, e1 := syscall.Syscall(procQueryServiceDynamicInformation.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func RegCloseKey(key Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, event Handle, asynchronous bool) (regerrno error) { + var _p0 uint32 + if watchSubtree { + _p0 = 1 + } + var _p1 uint32 + if asynchronous { + _p1 = 1 + } + r0, _, _ := syscall.Syscall6(procRegNotifyChangeKeyValue.Addr(), 5, uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) { + r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procRegisterServiceCtrlHandlerExW.Addr(), 3, uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context)) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func RevertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) { + r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) { + r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { + var _p0 *uint16 + _p0, ret = syscall.UTF16PtrFromString(objectName) + if ret != nil { + return + } + return _SetNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl) +} + +func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { + r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) { + r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *ACL, daclDefaulted bool) (err error) { + var _p0 uint32 + if daclPresent { + _p0 = 1 + } + var _p1 uint32 + if daclDefaulted { + _p1 = 1 + } + r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaulted bool) (err error) { + var _p0 uint32 + if groupDefaulted { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaulted bool) (err error) { + var _p0 uint32 + if ownerDefaulted { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) { + syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + return +} + +func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *ACL, saclDefaulted bool) (err error) { + var _p0 uint32 + if saclPresent { + _p0 = 1 + } + var _p1 uint32 + if saclDefaulted { + _p1 = 1 + } + r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { + r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetThreadToken(thread *Handle, token Token) (err error) { + r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { + r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) { + r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CertCloseStore(store Handle, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) { + r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) + context = (*CertContext)(unsafe.Pointer(r0)) + if context == nil { + err = errnoErr(e1) + } + return +} + +func CertDeleteCertificateFromStore(certContext *CertContext) (err error) { + r1, _, e1 := syscall.Syscall(procCertDeleteCertificateFromStore.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CertDuplicateCertificateContext(certContext *CertContext) (dupContext *CertContext) { + r0, _, _ := syscall.Syscall(procCertDuplicateCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) + dupContext = (*CertContext)(unsafe.Pointer(r0)) + return +} + +func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) { + r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0) + context = (*CertContext)(unsafe.Pointer(r0)) + if context == nil { + err = errnoErr(e1) + } + return +} + +func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevCertContext *CertContext) (cert *CertContext, err error) { + r0, _, e1 := syscall.Syscall6(procCertFindCertificateInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext))) + cert = (*CertContext)(unsafe.Pointer(r0)) + if cert == nil { + err = errnoErr(e1) + } + return +} + +func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevChainContext *CertChainContext) (certchain *CertChainContext, err error) { + r0, _, e1 := syscall.Syscall6(procCertFindChainInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext))) + certchain = (*CertChainContext)(unsafe.Pointer(r0)) + if certchain == nil { + err = errnoErr(e1) + } + return +} + +func CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) { + r0, _, _ := syscall.Syscall(procCertFindExtension.Addr(), 3, uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions))) + ret = (*CertExtension)(unsafe.Pointer(r0)) + return +} + +func CertFreeCertificateChain(ctx *CertChainContext) { + syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + return +} + +func CertFreeCertificateContext(ctx *CertContext) (err error) { + r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) { + r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) { + r0, _, _ := syscall.Syscall6(procCertGetNameStringW.Addr(), 6, uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size)) + chars = uint32(r0) + return +} + +func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { + r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0) + store = Handle(r0) + if store == 0 { + err = errnoErr(e1) + } + return +} + +func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) { + r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, parameters unsafe.Pointer, cryptProvOrNCryptKey *Handle, keySpec *uint32, callerFreeProvOrNCryptKey *bool) (err error) { + var _p0 uint32 + if *callerFreeProvOrNCryptKey { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procCryptAcquireCertificatePrivateKey.Addr(), 6, uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0))) + *callerFreeProvOrNCryptKey = _p0 != 0 + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procCryptDecodeObject.Addr(), 7, uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { + r1, _, e1 := syscall.Syscall9(procCryptProtectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) { + r1, _, e1 := syscall.Syscall12(procCryptQueryObject.Addr(), 11, uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { + r1, _, e1 := syscall.Syscall9(procCryptUnprotectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (store Handle, err error) { + r0, _, e1 := syscall.Syscall(procPFXImportCertStore.Addr(), 3, uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags)) + store = Handle(r0) + if store == 0 { + err = errnoErr(e1) + } + return +} + +func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) { + r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0) + same = r0 != 0 + return +} + +func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { + var _p0 *uint16 + _p0, status = syscall.UTF16PtrFromString(name) + if status != nil { + return + } + return _DnsQuery(_p0, qtype, options, extra, qrs, pr) +} + +func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { + r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) + if r0 != 0 { + status = syscall.Errno(r0) + } + return +} + +func DnsRecordListFree(rl *DNSRecord, freetype uint32) { + syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0) + return +} + +func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { + r0, _, _ := syscall.Syscall6(procDwmGetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { + r0, _, _ := syscall.Syscall6(procDwmSetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { + r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { + r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) { + r0, _, _ := syscall.Syscall(procGetBestInterfaceEx.Addr(), 2, uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetIfEntry(pIfRow *MibIfRow) (errcode error) { + r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func AssignProcessToJobObject(job Handle, process Handle) (err error) { + r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CancelIo(s Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CancelIoEx(s Handle, o *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CloseHandle(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { + r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + handle = Handle(r0) + if handle == 0 || e1 == ERROR_ALREADY_EXISTS { + err = errnoErr(e1) + } + return +} + +func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) + handle = Handle(r0) + if handle == 0 || e1 == ERROR_ALREADY_EXISTS { + err = errnoErr(e1) + } + return +} + +func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 || e1 == ERROR_ALREADY_EXISTS { + err = errnoErr(e1) + } + return +} + +func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) + } + return +} + +func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) + if r1&0xff == 0 { + err = errnoErr(e1) + } + return +} + +func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, threadcnt uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + handle = Handle(r0) + if handle == 0 || e1 == ERROR_ALREADY_EXISTS { + err = errnoErr(e1) + } + return +} + +func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if initialOwner { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 || e1 == ERROR_ALREADY_EXISTS { + err = errnoErr(e1) + } + return +} + +func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) + } + return +} + +func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) { + var _p0 uint32 + if inheritHandles { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) + if r1&0xff == 0 { + err = errnoErr(e1) + } + return +} + +func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) + } + return +} + +func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func DeleteFile(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) { + syscall.Syscall(procDeleteProcThreadAttributeList.Addr(), 1, uintptr(unsafe.Pointer(attrlist)), 0, 0) + return +} + +func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { + var _p0 uint32 + if bInheritHandle { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ExitProcess(exitcode uint32) { + syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) + return +} + +func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func FindClose(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func FindCloseChangeNotification(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindCloseChangeNotification.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func FindFirstChangeNotification(path string, watchSubtree bool, notifyFilter uint32) (handle Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(path) + if err != nil { + return + } + return _FindFirstChangeNotification(_p0, watchSubtree, notifyFilter) +} + +func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter uint32) (handle Handle, err error) { + var _p1 uint32 + if watchSubtree { + _p1 = 1 + } + r0, _, e1 := syscall.Syscall(procFindFirstChangeNotificationW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter)) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) + } + return +} + +func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) + } + return +} + +func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) + } + return +} + +func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) + } + return +} + +func FindNextChangeNotification(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextChangeNotification.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func findNextFile1(handle Handle, data *win32finddata1) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindResourceW.Addr(), 3, uintptr(module), uintptr(name), uintptr(resType)) + resInfo = Handle(r0) + if resInfo == 0 { + err = errnoErr(e1) + } + return +} + +func FindVolumeClose(findVolume Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func FlushFileBuffers(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func FlushViewOfFile(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) { + var _p0 *uint16 + if len(buf) > 0 { + _p0 = &buf[0] + } + r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func FreeEnvironmentStrings(envs *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func FreeLibrary(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetACP() (acp uint32) { + r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) + acp = uint32(r0) + return +} + +func GetActiveProcessorCount(groupNumber uint16) (ret uint32) { + r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + ret = uint32(r0) + return +} + +func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetCommandLine() (cmd *uint16) { + r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0) + cmd = (*uint16)(unsafe.Pointer(r0)) + return +} + +func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetComputerName(buf *uint16, n *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetConsoleMode(console Handle, mode *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { + r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func GetCurrentProcessId() (pid uint32) { + r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) + pid = uint32(r0) + return +} + +func GetCurrentThreadId() (id uint32) { + r0, _, _ := syscall.Syscall(procGetCurrentThreadId.Addr(), 0, 0, 0, 0) + id = uint32(r0) + return +} + +func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) { + r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetDriveType(rootPathName *uint16) (driveType uint32) { + r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) + driveType = uint32(r0) + return +} + +func GetEnvironmentStrings() (envs *uint16, err error) { + r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0) + envs = (*uint16)(unsafe.Pointer(r0)) + if envs == nil { + err = errnoErr(e1) + } + return +} + +func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { + r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetFileAttributes(name *uint16) (attrs uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + attrs = uint32(r0) + if attrs == INVALID_FILE_ATTRIBUTES { + err = errnoErr(e1) + } + return +} + +func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) { + r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetFileType(filehandle Handle) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags), 0, 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func GetLargePageMinimum() (size uintptr) { + r0, _, _ := syscall.Syscall(procGetLargePageMinimum.Addr(), 0, 0, 0, 0) + size = uintptr(r0) + return +} + +func GetLastError() (lasterr error) { + r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) + if r0 != 0 { + lasterr = syscall.Errno(r0) + } + return +} + +func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func GetLogicalDrives() (drivesBitMask uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0) + drivesBitMask = uint32(r0) + if drivesBitMask == 0 { + err = errnoErr(e1) + } + return +} + +func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func GetMaximumProcessorCount(groupNumber uint16) (ret uint32) { + r0, _, _ := syscall.Syscall(procGetMaximumProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + ret = uint32(r0) + return +} + +func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) { + r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetPriorityClass(process Handle) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0) + ret = uint32(r0) + if ret == 0 { + err = errnoErr(e1) + } + return +} + +func GetProcAddress(module Handle, procname string) (proc uintptr, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(procname) + if err != nil { + return + } + return _GetProcAddress(module, _p0) +} + +func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { + r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0) + proc = uintptr(r0) + if proc == 0 { + err = errnoErr(e1) + } + return +} + +func GetProcessId(process Handle) (id uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0) + id = uint32(r0) + if id == 0 { + err = errnoErr(e1) + } + return +} + +func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetProcessPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) { + syscall.Syscall6(procGetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)), 0, 0) + return +} + +func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overlapped **Overlapped, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func GetStartupInfo(startupInfo *StartupInfo) (err error) { + r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetStdHandle(stdhandle uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) + } + return +} + +func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + len = uint32(r0) + if len == 0 { + err = errnoErr(e1) + } + return +} + +func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetSystemPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetSystemTimeAsFileTime(time *Filetime) { + syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + return +} + +func GetSystemTimePreciseAsFileTime(time *Filetime) { + syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + return +} + +func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + len = uint32(r0) + if len == 0 { + err = errnoErr(e1) + } + return +} + +func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetThreadPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getTickCount64() (ms uint64) { + r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) + ms = uint64(r0) + return +} + +func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0) + rc = uint32(r0) + if rc == 0xffffffff { + err = errnoErr(e1) + } + return +} + +func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetUserPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetVersion() (ver uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) + ver = uint32(r0) + if ver == 0 { + err = errnoErr(e1) + } + return +} + +func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + len = uint32(r0) + if len == 0 { + err = errnoErr(e1) + } + return +} + +func initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) { + r1, _, e1 := syscall.Syscall6(procInitializeProcThreadAttributeList.Addr(), 4, uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func IsWow64Process(handle Handle, isWow64 *bool) (err error) { + var _p0 uint32 + if *isWow64 { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0) + *isWow64 = _p0 != 0 + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint16) (err error) { + err = procIsWow64Process2.Find() + if err != nil { + return + } + r1, _, e1 := syscall.Syscall(procIsWow64Process2.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(libname) + if err != nil { + return + } + return _LoadLibraryEx(_p0, zero, flags) +} + +func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func LoadLibrary(libname string) (handle Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(libname) + if err != nil { + return + } + return _LoadLibrary(_p0) +} + +func _LoadLibrary(libname *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) + resData = Handle(r0) + if resData == 0 { + err = errnoErr(e1) + } + return +} + +func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) { + r0, _, e1 := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(length), 0) + ptr = uintptr(r0) + if ptr == 0 { + err = errnoErr(e1) + } + return +} + +func LocalFree(hmem Handle) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0) + handle = Handle(r0) + if handle != 0 { + err = errnoErr(e1) + } + return +} + +func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func LockResource(resData Handle) (addr uintptr, err error) { + r0, _, e1 := syscall.Syscall(procLockResource.Addr(), 1, uintptr(resData), 0, 0) + addr = uintptr(r0) + if addr == 0 { + err = errnoErr(e1) + } + return +} + +func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) { + r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0) + addr = uintptr(r0) + if addr == 0 { + err = errnoErr(e1) + } + return +} + +func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procModule32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procModule32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func MoveFile(from *uint16, to *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { + r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) + nwrite = int32(r0) + if nwrite == 0 { + err = errnoErr(e1) + } + return +} + +func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) + handle = Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } + return +} + +func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procProcessIdToSessionId.Addr(), 2, uintptr(pid), uintptr(unsafe.Pointer(sessionid)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func PulseEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} + +func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryFullProcessImageNameW.Addr(), 4, uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) { + r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { + var _p0 uint32 + if watchSubTree { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesRead *uintptr) (err error) { + r1, _, e1 := syscall.Syscall6(procReadProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ReleaseMutex(mutex Handle) (err error) { + r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func RemoveDirectory(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ResetEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ResumeThread(thread Handle) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) + ret = uint32(r0) + if ret == 0xffffffff { + err = errnoErr(e1) + } + return +} + +func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setConsoleCursorPosition(console Handle, position uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetConsoleMode(console Handle, mode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCurrentDirectory(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetDefaultDllDirectories(directoryFlags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetDefaultDllDirectories.Addr(), 1, uintptr(directoryFlags), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetDllDirectory(path string) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(path) + if err != nil { + return + } + return _SetDllDirectory(_p0) +} + +func _SetDllDirectory(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetDllDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetEndOfFile(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetErrorMode(mode uint32) (ret uint32) { + r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0) + ret = uint32(r0) + return +} + +func SetEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetFileAttributes(name *uint16, attrs uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inBufferLen uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { + r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) + newlowoffset = uint32(r0) + if newlowoffset == 0xffffffff { + err = errnoErr(e1) + } + return +} + +func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) { + r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0) + ret = int(r0) + if ret == 0 { + err = errnoErr(e1) + } + return +} + +func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetNamedPipeHandleState.Addr(), 4, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetPriorityClass(process Handle, priorityClass uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetProcessPriorityBoost(process Handle, disable bool) (err error) { + var _p0 uint32 + if disable { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetStdHandle(stdhandle uint32, handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { + r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) + size = uint32(r0) + if size == 0 { + err = errnoErr(e1) + } + return +} + +func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { + var _p0 uint32 + if alertable { + _p0 = 1 + } + r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0) + ret = uint32(r0) + return +} + +func TerminateJobObject(job Handle, exitCode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func TerminateProcess(handle Handle, exitcode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func UnmapViewOfFile(addr uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) { + r1, _, e1 := syscall.Syscall9(procUpdateProcThreadAttribute.Addr(), 7, uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) { + r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0) + value = uintptr(r0) + if value == 0 { + err = errnoErr(e1) + } + return +} + +func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func VirtualLock(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect uint32, oldProtect *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procVirtualProtectEx.Addr(), 5, uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualQuery.Addr(), 3, uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall6(procVirtualQueryEx.Addr(), 4, uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func VirtualUnlock(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func WTSGetActiveConsoleSessionId() (sessionID uint32) { + r0, _, _ := syscall.Syscall(procWTSGetActiveConsoleSessionId.Addr(), 0, 0, 0, 0) + sessionID = uint32(r0) + return +} + +func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { + var _p0 uint32 + if waitAll { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0) + event = uint32(r0) + if event == 0xffffffff { + err = errnoErr(e1) + } + return +} + +func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) { + r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0) + event = uint32(r0) + if event == 0xffffffff { + err = errnoErr(e1) + } + return +} + +func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) { + r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesWritten *uintptr) (err error) { + r1, _, e1 := syscall.Syscall6(procWriteProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) { + syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0) + return +} + +func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func NetApiBufferFree(buf *byte) (neterr error) { + r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) { + r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { + r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) { + r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength), 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) { + r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) { + r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen)), 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32, retLen *uint32) (ntstatus error) { + r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen)), 0, 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, inBufferLen uint32, class uint32) (ntstatus error) { + r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class), 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) { + r0, _, _ := syscall.Syscall6(procNtSetInformationProcess.Addr(), 4, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), 0, 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32) (ntstatus error) { + r0, _, _ := syscall.Syscall(procNtSetSystemInformation.Addr(), 3, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen)) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) { + r0, _, _ := syscall.Syscall(procRtlAddFunctionTable.Addr(), 3, uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress)) + ret = r0 != 0 + return +} + +func RtlDefaultNpAcl(acl **ACL) (ntstatus error) { + r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(acl)), 0, 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) { + r0, _, _ := syscall.Syscall(procRtlDeleteFunctionTable.Addr(), 1, uintptr(unsafe.Pointer(functionTable)), 0, 0) + ret = r0 != 0 + return +} + +func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { + r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { + r0, _, _ := syscall.Syscall6(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func RtlGetCurrentPeb() (peb *PEB) { + r0, _, _ := syscall.Syscall(procRtlGetCurrentPeb.Addr(), 0, 0, 0, 0) + peb = (*PEB)(unsafe.Pointer(r0)) + return +} + +func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) { + syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) + return +} + +func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) { + r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) + if r0 != 0 { + ntstatus = NTStatus(r0) + } + return +} + +func RtlInitString(destinationString *NTString, sourceString *byte) { + syscall.Syscall(procRtlInitString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) + return +} + +func RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) { + syscall.Syscall(procRtlInitUnicodeString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) + return +} + +func rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) { + r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(ntstatus), 0, 0) + ret = syscall.Errno(r0) + return +} + +func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { + r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func coCreateGuid(pguid *GUID) (ret error) { + r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) { + r0, _, _ := syscall.Syscall6(procCoGetObject.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) { + r0, _, _ := syscall.Syscall(procCoInitializeEx.Addr(), 2, uintptr(reserved), uintptr(coInit), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func CoTaskMemFree(address unsafe.Pointer) { + syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) + return +} + +func CoUninitialize() { + syscall.Syscall(procCoUninitialize.Addr(), 0, 0, 0, 0) + return +} + +func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { + r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) + chars = int32(r0) + return +} + +func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procEnumProcessModules.Addr(), 4, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procEnumProcessModulesEx.Addr(), 5, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) { + var _p0 *uint32 + if len(processIds) > 0 { + _p0 = &processIds[0] + } + r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(processIds)), uintptr(unsafe.Pointer(bytesReturned))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetModuleBaseNameW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetModuleFileNameExW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetModuleInformation.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) { + r1, _, e1 := syscall.Syscall(procQueryWorkingSetEx.Addr(), 3, uintptr(process), uintptr(pv), uintptr(cb)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SubscribeServiceChangeNotifications(service Handle, eventType uint32, callback uintptr, callbackCtx uintptr, subscription *uintptr) (ret error) { + ret = procSubscribeServiceChangeNotifications.Find() + if ret != nil { + return + } + r0, _, _ := syscall.Syscall6(procSubscribeServiceChangeNotifications.Addr(), 5, uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func UnsubscribeServiceChangeNotifications(subscription uintptr) (err error) { + err = procUnsubscribeServiceChangeNotifications.Find() + if err != nil { + return + } + syscall.Syscall(procUnsubscribeServiceChangeNotifications.Addr(), 1, uintptr(subscription), 0, 0) + return +} + +func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) + if r1&0xff == 0 { + err = errnoErr(e1) + } + return +} + +func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0) + if r1&0xff == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiBuildDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiCallClassInstaller.Addr(), 3, uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiCancelDriverInfoSearch.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiClassGuidsFromNameExW.Addr(), 6, uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiClassNameFromGuidExW.Addr(), 6, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { + r0, _, e1 := syscall.Syscall6(procSetupDiCreateDeviceInfoListExW.Addr(), 4, uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + handle = DevInfo(r0) + if handle == DevInfo(InvalidHandle) { + err = errnoErr(e1) + } + return +} + +func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) { + r1, _, e1 := syscall.Syscall9(procSetupDiCreateDeviceInfoW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiDestroyDeviceInfoList.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiDestroyDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiEnumDeviceInfo.Addr(), 3, uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiEnumDriverInfoW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { + r0, _, e1 := syscall.Syscall9(procSetupDiGetClassDevsExW.Addr(), 7, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + handle = DevInfo(r0) + if handle == DevInfo(InvalidHandle) { + err = errnoErr(e1) + } + return +} + +func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiGetClassInstallParamsW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInfoListDetailW.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, instanceId *uint16, instanceIdSize uint32, instanceIdRequiredSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiGetDeviceInstanceIdW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY, propertyType *DEVPROPTYPE, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procSetupDiGetDevicePropertyW.Addr(), 8, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procSetupDiGetDeviceRegistryPropertyW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiGetDriverInfoDetailW.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key Handle, err error) { + r0, _, e1 := syscall.Syscall6(procSetupDiOpenDevRegKey.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired)) + key = Handle(r0) + if key == InvalidHandle { + err = errnoErr(e1) + } + return +} + +func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiSetClassInstallParamsW.Addr(), 4, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiSetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetupDiSetDeviceRegistryPropertyW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { + r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procSetupUninstallOEMInfW.Addr(), 3, uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { + r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) + argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0)) + if argv == nil { + err = errnoErr(e1) + } + return +} + +func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) { + r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { + r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) + if r1 <= 32 { + err = errnoErr(e1) + } + return +} + +func EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) { + syscall.Syscall(procEnumChildWindows.Addr(), 3, uintptr(hwnd), uintptr(enumFunc), uintptr(param)) + return +} + +func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) { + r1, _, e1 := syscall.Syscall(procEnumWindows.Addr(), 2, uintptr(enumFunc), uintptr(param), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ExitWindowsEx(flags uint32, reason uint32) (err error) { + r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) { + r0, _, e1 := syscall.Syscall(procGetClassNameW.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount)) + copied = int32(r0) + if copied == 0 { + err = errnoErr(e1) + } + return +} + +func GetDesktopWindow() (hwnd HWND) { + r0, _, _ := syscall.Syscall(procGetDesktopWindow.Addr(), 0, 0, 0, 0) + hwnd = HWND(r0) + return +} + +func GetForegroundWindow() (hwnd HWND) { + r0, _, _ := syscall.Syscall(procGetForegroundWindow.Addr(), 0, 0, 0, 0) + hwnd = HWND(r0) + return +} + +func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { + r1, _, e1 := syscall.Syscall(procGetGUIThreadInfo.Addr(), 2, uintptr(thread), uintptr(unsafe.Pointer(info)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetShellWindow() (shellWindow HWND) { + r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0) + shellWindow = HWND(r0) + return +} + +func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetWindowThreadProcessId.Addr(), 2, uintptr(hwnd), uintptr(unsafe.Pointer(pid)), 0) + tid = uint32(r0) + if tid == 0 { + err = errnoErr(e1) + } + return +} + +func IsWindow(hwnd HWND) (isWindow bool) { + r0, _, _ := syscall.Syscall(procIsWindow.Addr(), 1, uintptr(hwnd), 0, 0) + isWindow = r0 != 0 + return +} + +func IsWindowUnicode(hwnd HWND) (isUnicode bool) { + r0, _, _ := syscall.Syscall(procIsWindowUnicode.Addr(), 1, uintptr(hwnd), 0, 0) + isUnicode = r0 != 0 + return +} + +func IsWindowVisible(hwnd HWND) (isVisible bool) { + r0, _, _ := syscall.Syscall(procIsWindowVisible.Addr(), 1, uintptr(hwnd), 0, 0) + isVisible = r0 != 0 + return +} + +func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { + r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) + ret = int32(r0) + if ret == 0 { + err = errnoErr(e1) + } + return +} + +func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) { + var _p0 uint32 + if inheritExisting { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func DestroyEnvironmentBlock(block *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetFileVersionInfoSize(filename string, zeroHandle *Handle) (bufSize uint32, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(filename) + if err != nil { + return + } + return _GetFileVersionInfoSize(_p0, zeroHandle) +} + +func _GetFileVersionInfoSize(filename *uint16, zeroHandle *Handle) (bufSize uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetFileVersionInfoSizeW.Addr(), 2, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle)), 0) + bufSize = uint32(r0) + if bufSize == 0 { + err = errnoErr(e1) + } + return +} + +func GetFileVersionInfo(filename string, handle uint32, bufSize uint32, buffer unsafe.Pointer) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(filename) + if err != nil { + return + } + return _GetFileVersionInfo(_p0, handle, bufSize, buffer) +} + +func _GetFileVersionInfo(filename *uint16, handle uint32, bufSize uint32, buffer unsafe.Pointer) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileVersionInfoW.Addr(), 4, uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(subBlock) + if err != nil { + return + } + return _VerQueryValue(block, _p0, pointerToBufferPointer, bufSize) +} + +func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procVerQueryValueW.Addr(), 4, uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) { + r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func FreeAddrInfoW(addrinfo *AddrinfoW) { + syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0) + return +} + +func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) { + r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0) + if r0 != 0 { + sockerr = syscall.Errno(r0) + } + return +} + +func WSACleanup() (err error) { + r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) { + r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) + n = int32(r0) + if n == -1 { + err = errnoErr(e1) + } + return +} + +func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { + r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) { + r1, _, e1 := syscall.Syscall(procWSALookupServiceBeginW.Addr(), 3, uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle))) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func WSALookupServiceEnd(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procWSALookupServiceEnd.Addr(), 1, uintptr(handle), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) { + r1, _, e1 := syscall.Syscall6(procWSALookupServiceNextW.Addr(), 4, uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procWSASocketW.Addr(), 6, uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags)) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) + } + return +} + +func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { + r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) + if r0 != 0 { + sockerr = syscall.Errno(r0) + } + return +} + +func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func Closesocket(s Handle) (err error) { + r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func GetHostByName(name string) (h *Hostent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return + } + return _GetHostByName(_p0) +} + +func _GetHostByName(name *byte) (h *Hostent, err error) { + r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + h = (*Hostent)(unsafe.Pointer(r0)) + if h == nil { + err = errnoErr(e1) + } + return +} + +func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func GetProtoByName(name string) (p *Protoent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return + } + return _GetProtoByName(_p0) +} + +func _GetProtoByName(name *byte) (p *Protoent, err error) { + r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + p = (*Protoent)(unsafe.Pointer(r0)) + if p == nil { + err = errnoErr(e1) + } + return +} + +func GetServByName(name string, proto string) (s *Servent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(proto) + if err != nil { + return + } + return _GetServByName(_p0, _p1) +} + +func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { + r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0) + s = (*Servent)(unsafe.Pointer(r0)) + if s == nil { + err = errnoErr(e1) + } + return +} + +func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) { + r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func listen(s Handle, backlog int32) (err error) { + r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func Ntohs(netshort uint16) (u uint16) { + r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0) + u = uint16(r0) + return +} + +func recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r0, _, e1 := syscall.Syscall6(procrecvfrom.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + n = int32(r0) + if n == -1 { + err = errnoErr(e1) + } + return +} + +func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r1, _, e1 := syscall.Syscall6(procsendto.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) { + r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func shutdown(s Handle, how int32) (err error) { + r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol)) + handle = Handle(r0) + if handle == InvalidHandle { + err = errnoErr(e1) + } + return +} + +func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func WTSFreeMemory(ptr uintptr) { + syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) + return +} + +func WTSQueryUserToken(session uint32, token *Token) (err error) { + r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/tools/AUTHORS b/vendor/golang.org/x/tools/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/golang.org/x/tools/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/tools/CONTRIBUTORS b/vendor/golang.org/x/tools/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/golang.org/x/tools/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/tools/cover/profile.go b/vendor/golang.org/x/tools/cover/profile.go index 57195774c..47a9a5411 100644 --- a/vendor/golang.org/x/tools/cover/profile.go +++ b/vendor/golang.org/x/tools/cover/profile.go @@ -10,6 +10,7 @@ import ( "bufio" "errors" "fmt" + "io" "math" "os" "sort" @@ -45,14 +46,18 @@ func ParseProfiles(fileName string) ([]*Profile, error) { return nil, err } defer pf.Close() + return ParseProfilesFromReader(pf) +} - files := make(map[string]*Profile) - buf := bufio.NewReader(pf) +// ParseProfilesFromReader parses profile data from the Reader and +// returns a Profile for each source file described therein. +func ParseProfilesFromReader(rd io.Reader) ([]*Profile, error) { // First line is "mode: foo", where foo is "set", "count", or "atomic". // Rest of file is in the format // encoding/base64/base64.go:34.44,37.40 3 1 // where the fields are: name.go:line.column,line.column numberOfStatements count - s := bufio.NewScanner(buf) + files := make(map[string]*Profile) + s := bufio.NewScanner(rd) mode := "" for s.Scan() { line := s.Text() diff --git a/vendor/golang.org/x/xerrors/README b/vendor/golang.org/x/xerrors/README deleted file mode 100644 index aac7867a5..000000000 --- a/vendor/golang.org/x/xerrors/README +++ /dev/null @@ -1,2 +0,0 @@ -This repository holds the transition packages for the new Go 1.13 error values. -See golang.org/design/29934-error-values. diff --git a/vendor/golang.org/x/xerrors/adaptor.go b/vendor/golang.org/x/xerrors/adaptor.go deleted file mode 100644 index 4317f2483..000000000 --- a/vendor/golang.org/x/xerrors/adaptor.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strconv" -) - -// FormatError calls the FormatError method of f with an errors.Printer -// configured according to s and verb, and writes the result to s. -func FormatError(f Formatter, s fmt.State, verb rune) { - // Assuming this function is only called from the Format method, and given - // that FormatError takes precedence over Format, it cannot be called from - // any package that supports errors.Formatter. It is therefore safe to - // disregard that State may be a specific printer implementation and use one - // of our choice instead. - - // limitations: does not support printing error as Go struct. - - var ( - sep = " " // separator before next error - p = &state{State: s} - direct = true - ) - - var err error = f - - switch verb { - // Note that this switch must match the preference order - // for ordinary string printing (%#v before %+v, and so on). - - case 'v': - if s.Flag('#') { - if stringer, ok := err.(fmt.GoStringer); ok { - io.WriteString(&p.buf, stringer.GoString()) - goto exit - } - // proceed as if it were %v - } else if s.Flag('+') { - p.printDetail = true - sep = "\n - " - } - case 's': - case 'q', 'x', 'X': - // Use an intermediate buffer in the rare cases that precision, - // truncation, or one of the alternative verbs (q, x, and X) are - // specified. - direct = false - - default: - p.buf.WriteString("%!") - p.buf.WriteRune(verb) - p.buf.WriteByte('(') - switch { - case err != nil: - p.buf.WriteString(reflect.TypeOf(f).String()) - default: - p.buf.WriteString("") - } - p.buf.WriteByte(')') - io.Copy(s, &p.buf) - return - } - -loop: - for { - switch v := err.(type) { - case Formatter: - err = v.FormatError((*printer)(p)) - case fmt.Formatter: - v.Format(p, 'v') - break loop - default: - io.WriteString(&p.buf, v.Error()) - break loop - } - if err == nil { - break - } - if p.needColon || !p.printDetail { - p.buf.WriteByte(':') - p.needColon = false - } - p.buf.WriteString(sep) - p.inDetail = false - p.needNewline = false - } - -exit: - width, okW := s.Width() - prec, okP := s.Precision() - - if !direct || (okW && width > 0) || okP { - // Construct format string from State s. - format := []byte{'%'} - if s.Flag('-') { - format = append(format, '-') - } - if s.Flag('+') { - format = append(format, '+') - } - if s.Flag(' ') { - format = append(format, ' ') - } - if okW { - format = strconv.AppendInt(format, int64(width), 10) - } - if okP { - format = append(format, '.') - format = strconv.AppendInt(format, int64(prec), 10) - } - format = append(format, string(verb)...) - fmt.Fprintf(s, string(format), p.buf.String()) - } else { - io.Copy(s, &p.buf) - } -} - -var detailSep = []byte("\n ") - -// state tracks error printing state. It implements fmt.State. -type state struct { - fmt.State - buf bytes.Buffer - - printDetail bool - inDetail bool - needColon bool - needNewline bool -} - -func (s *state) Write(b []byte) (n int, err error) { - if s.printDetail { - if len(b) == 0 { - return 0, nil - } - if s.inDetail && s.needColon { - s.needNewline = true - if b[0] == '\n' { - b = b[1:] - } - } - k := 0 - for i, c := range b { - if s.needNewline { - if s.inDetail && s.needColon { - s.buf.WriteByte(':') - s.needColon = false - } - s.buf.Write(detailSep) - s.needNewline = false - } - if c == '\n' { - s.buf.Write(b[k:i]) - k = i + 1 - s.needNewline = true - } - } - s.buf.Write(b[k:]) - if !s.inDetail { - s.needColon = true - } - } else if !s.inDetail { - s.buf.Write(b) - } - return len(b), nil -} - -// printer wraps a state to implement an xerrors.Printer. -type printer state - -func (s *printer) Print(args ...interface{}) { - if !s.inDetail || s.printDetail { - fmt.Fprint((*state)(s), args...) - } -} - -func (s *printer) Printf(format string, args ...interface{}) { - if !s.inDetail || s.printDetail { - fmt.Fprintf((*state)(s), format, args...) - } -} - -func (s *printer) Detail() bool { - s.inDetail = true - return s.printDetail -} diff --git a/vendor/golang.org/x/xerrors/codereview.cfg b/vendor/golang.org/x/xerrors/codereview.cfg deleted file mode 100644 index 3f8b14b64..000000000 --- a/vendor/golang.org/x/xerrors/codereview.cfg +++ /dev/null @@ -1 +0,0 @@ -issuerepo: golang/go diff --git a/vendor/golang.org/x/xerrors/doc.go b/vendor/golang.org/x/xerrors/doc.go deleted file mode 100644 index eef99d9d5..000000000 --- a/vendor/golang.org/x/xerrors/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package xerrors implements functions to manipulate errors. -// -// This package is based on the Go 2 proposal for error values: -// https://golang.org/design/29934-error-values -// -// These functions were incorporated into the standard library's errors package -// in Go 1.13: -// - Is -// - As -// - Unwrap -// -// Also, Errorf's %w verb was incorporated into fmt.Errorf. -// -// Use this package to get equivalent behavior in all supported Go versions. -// -// No other features of this package were included in Go 1.13, and at present -// there are no plans to include any of them. -package xerrors // import "golang.org/x/xerrors" diff --git a/vendor/golang.org/x/xerrors/errors.go b/vendor/golang.org/x/xerrors/errors.go deleted file mode 100644 index e88d3772d..000000000 --- a/vendor/golang.org/x/xerrors/errors.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -import "fmt" - -// errorString is a trivial implementation of error. -type errorString struct { - s string - frame Frame -} - -// New returns an error that formats as the given text. -// -// The returned error contains a Frame set to the caller's location and -// implements Formatter to show this information when printed with details. -func New(text string) error { - return &errorString{text, Caller(1)} -} - -func (e *errorString) Error() string { - return e.s -} - -func (e *errorString) Format(s fmt.State, v rune) { FormatError(e, s, v) } - -func (e *errorString) FormatError(p Printer) (next error) { - p.Print(e.s) - e.frame.Format(p) - return nil -} diff --git a/vendor/golang.org/x/xerrors/fmt.go b/vendor/golang.org/x/xerrors/fmt.go deleted file mode 100644 index 829862ddf..000000000 --- a/vendor/golang.org/x/xerrors/fmt.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -import ( - "fmt" - "strings" - "unicode" - "unicode/utf8" - - "golang.org/x/xerrors/internal" -) - -const percentBangString = "%!" - -// Errorf formats according to a format specifier and returns the string as a -// value that satisfies error. -// -// The returned error includes the file and line number of the caller when -// formatted with additional detail enabled. If the last argument is an error -// the returned error's Format method will return it if the format string ends -// with ": %s", ": %v", or ": %w". If the last argument is an error and the -// format string ends with ": %w", the returned error implements an Unwrap -// method returning it. -// -// If the format specifier includes a %w verb with an error operand in a -// position other than at the end, the returned error will still implement an -// Unwrap method returning the operand, but the error's Format method will not -// return the wrapped error. -// -// It is invalid to include more than one %w verb or to supply it with an -// operand that does not implement the error interface. The %w verb is otherwise -// a synonym for %v. -func Errorf(format string, a ...interface{}) error { - format = formatPlusW(format) - // Support a ": %[wsv]" suffix, which works well with xerrors.Formatter. - wrap := strings.HasSuffix(format, ": %w") - idx, format2, ok := parsePercentW(format) - percentWElsewhere := !wrap && idx >= 0 - if !percentWElsewhere && (wrap || strings.HasSuffix(format, ": %s") || strings.HasSuffix(format, ": %v")) { - err := errorAt(a, len(a)-1) - if err == nil { - return &noWrapError{fmt.Sprintf(format, a...), nil, Caller(1)} - } - // TODO: this is not entirely correct. The error value could be - // printed elsewhere in format if it mixes numbered with unnumbered - // substitutions. With relatively small changes to doPrintf we can - // have it optionally ignore extra arguments and pass the argument - // list in its entirety. - msg := fmt.Sprintf(format[:len(format)-len(": %s")], a[:len(a)-1]...) - frame := Frame{} - if internal.EnableTrace { - frame = Caller(1) - } - if wrap { - return &wrapError{msg, err, frame} - } - return &noWrapError{msg, err, frame} - } - // Support %w anywhere. - // TODO: don't repeat the wrapped error's message when %w occurs in the middle. - msg := fmt.Sprintf(format2, a...) - if idx < 0 { - return &noWrapError{msg, nil, Caller(1)} - } - err := errorAt(a, idx) - if !ok || err == nil { - // Too many %ws or argument of %w is not an error. Approximate the Go - // 1.13 fmt.Errorf message. - return &noWrapError{fmt.Sprintf("%sw(%s)", percentBangString, msg), nil, Caller(1)} - } - frame := Frame{} - if internal.EnableTrace { - frame = Caller(1) - } - return &wrapError{msg, err, frame} -} - -func errorAt(args []interface{}, i int) error { - if i < 0 || i >= len(args) { - return nil - } - err, ok := args[i].(error) - if !ok { - return nil - } - return err -} - -// formatPlusW is used to avoid the vet check that will barf at %w. -func formatPlusW(s string) string { - return s -} - -// Return the index of the only %w in format, or -1 if none. -// Also return a rewritten format string with %w replaced by %v, and -// false if there is more than one %w. -// TODO: handle "%[N]w". -func parsePercentW(format string) (idx int, newFormat string, ok bool) { - // Loosely copied from golang.org/x/tools/go/analysis/passes/printf/printf.go. - idx = -1 - ok = true - n := 0 - sz := 0 - var isW bool - for i := 0; i < len(format); i += sz { - if format[i] != '%' { - sz = 1 - continue - } - // "%%" is not a format directive. - if i+1 < len(format) && format[i+1] == '%' { - sz = 2 - continue - } - sz, isW = parsePrintfVerb(format[i:]) - if isW { - if idx >= 0 { - ok = false - } else { - idx = n - } - // "Replace" the last character, the 'w', with a 'v'. - p := i + sz - 1 - format = format[:p] + "v" + format[p+1:] - } - n++ - } - return idx, format, ok -} - -// Parse the printf verb starting with a % at s[0]. -// Return how many bytes it occupies and whether the verb is 'w'. -func parsePrintfVerb(s string) (int, bool) { - // Assume only that the directive is a sequence of non-letters followed by a single letter. - sz := 0 - var r rune - for i := 1; i < len(s); i += sz { - r, sz = utf8.DecodeRuneInString(s[i:]) - if unicode.IsLetter(r) { - return i + sz, r == 'w' - } - } - return len(s), false -} - -type noWrapError struct { - msg string - err error - frame Frame -} - -func (e *noWrapError) Error() string { - return fmt.Sprint(e) -} - -func (e *noWrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) } - -func (e *noWrapError) FormatError(p Printer) (next error) { - p.Print(e.msg) - e.frame.Format(p) - return e.err -} - -type wrapError struct { - msg string - err error - frame Frame -} - -func (e *wrapError) Error() string { - return fmt.Sprint(e) -} - -func (e *wrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) } - -func (e *wrapError) FormatError(p Printer) (next error) { - p.Print(e.msg) - e.frame.Format(p) - return e.err -} - -func (e *wrapError) Unwrap() error { - return e.err -} diff --git a/vendor/golang.org/x/xerrors/format.go b/vendor/golang.org/x/xerrors/format.go deleted file mode 100644 index 1bc9c26b9..000000000 --- a/vendor/golang.org/x/xerrors/format.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -// A Formatter formats error messages. -type Formatter interface { - error - - // FormatError prints the receiver's first error and returns the next error in - // the error chain, if any. - FormatError(p Printer) (next error) -} - -// A Printer formats error messages. -// -// The most common implementation of Printer is the one provided by package fmt -// during Printf (as of Go 1.13). Localization packages such as golang.org/x/text/message -// typically provide their own implementations. -type Printer interface { - // Print appends args to the message output. - Print(args ...interface{}) - - // Printf writes a formatted string. - Printf(format string, args ...interface{}) - - // Detail reports whether error detail is requested. - // After the first call to Detail, all text written to the Printer - // is formatted as additional detail, or ignored when - // detail has not been requested. - // If Detail returns false, the caller can avoid printing the detail at all. - Detail() bool -} diff --git a/vendor/golang.org/x/xerrors/frame.go b/vendor/golang.org/x/xerrors/frame.go deleted file mode 100644 index 0de628ec5..000000000 --- a/vendor/golang.org/x/xerrors/frame.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -import ( - "runtime" -) - -// A Frame contains part of a call stack. -type Frame struct { - // Make room for three PCs: the one we were asked for, what it called, - // and possibly a PC for skipPleaseUseCallersFrames. See: - // https://go.googlesource.com/go/+/032678e0fb/src/runtime/extern.go#169 - frames [3]uintptr -} - -// Caller returns a Frame that describes a frame on the caller's stack. -// The argument skip is the number of frames to skip over. -// Caller(0) returns the frame for the caller of Caller. -func Caller(skip int) Frame { - var s Frame - runtime.Callers(skip+1, s.frames[:]) - return s -} - -// location reports the file, line, and function of a frame. -// -// The returned function may be "" even if file and line are not. -func (f Frame) location() (function, file string, line int) { - frames := runtime.CallersFrames(f.frames[:]) - if _, ok := frames.Next(); !ok { - return "", "", 0 - } - fr, ok := frames.Next() - if !ok { - return "", "", 0 - } - return fr.Function, fr.File, fr.Line -} - -// Format prints the stack as error detail. -// It should be called from an error's Format implementation -// after printing any other error detail. -func (f Frame) Format(p Printer) { - if p.Detail() { - function, file, line := f.location() - if function != "" { - p.Printf("%s\n ", function) - } - if file != "" { - p.Printf("%s:%d\n", file, line) - } - } -} diff --git a/vendor/golang.org/x/xerrors/wrap.go b/vendor/golang.org/x/xerrors/wrap.go deleted file mode 100644 index 9a3b51037..000000000 --- a/vendor/golang.org/x/xerrors/wrap.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xerrors - -import ( - "reflect" -) - -// A Wrapper provides context around another error. -type Wrapper interface { - // Unwrap returns the next error in the error chain. - // If there is no next error, Unwrap returns nil. - Unwrap() error -} - -// Opaque returns an error with the same error formatting as err -// but that does not match err and cannot be unwrapped. -func Opaque(err error) error { - return noWrapper{err} -} - -type noWrapper struct { - error -} - -func (e noWrapper) FormatError(p Printer) (next error) { - if f, ok := e.error.(Formatter); ok { - return f.FormatError(p) - } - p.Print(e.error) - return nil -} - -// Unwrap returns the result of calling the Unwrap method on err, if err implements -// Unwrap. Otherwise, Unwrap returns nil. -func Unwrap(err error) error { - u, ok := err.(Wrapper) - if !ok { - return nil - } - return u.Unwrap() -} - -// Is reports whether any error in err's chain matches target. -// -// An error is considered to match a target if it is equal to that target or if -// it implements a method Is(error) bool such that Is(target) returns true. -func Is(err, target error) bool { - if target == nil { - return err == target - } - - isComparable := reflect.TypeOf(target).Comparable() - for { - if isComparable && err == target { - return true - } - if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) { - return true - } - // TODO: consider supporing target.Is(err). This would allow - // user-definable predicates, but also may allow for coping with sloppy - // APIs, thereby making it easier to get away with them. - if err = Unwrap(err); err == nil { - return false - } - } -} - -// As finds the first error in err's chain that matches the type to which target -// points, and if so, sets the target to its value and returns true. An error -// matches a type if it is assignable to the target type, or if it has a method -// As(interface{}) bool such that As(target) returns true. As will panic if target -// is not a non-nil pointer to a type which implements error or is of interface type. -// -// The As method should set the target to its value and return true if err -// matches the type to which target points. -func As(err error, target interface{}) bool { - if target == nil { - panic("errors: target cannot be nil") - } - val := reflect.ValueOf(target) - typ := val.Type() - if typ.Kind() != reflect.Ptr || val.IsNil() { - panic("errors: target must be a non-nil pointer") - } - if e := typ.Elem(); e.Kind() != reflect.Interface && !e.Implements(errorType) { - panic("errors: *target must be interface or implement error") - } - targetType := typ.Elem() - for err != nil { - if reflect.TypeOf(err).AssignableTo(targetType) { - val.Elem().Set(reflect.ValueOf(err)) - return true - } - if x, ok := err.(interface{ As(interface{}) bool }); ok && x.As(target) { - return true - } - err = Unwrap(err) - } - return false -} - -var errorType = reflect.TypeOf((*error)(nil)).Elem() diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/google.golang.org/genproto/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/google.golang.org/genproto/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go new file mode 100644 index 000000000..f34a38e4e --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -0,0 +1,201 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.2 +// source: google/rpc/status.proto + +package status + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The `Status` type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by [gRPC](https://github.com/grpc). Each `Status` message contains +// three pieces of data: error code, error message, and error details. +// +// You can find out more about this error model and how to work with it in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). +type Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` +} + +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_status_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_google_rpc_status_proto_rawDescGZIP(), []int{0} +} + +func (x *Status) GetCode() int32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Status) GetDetails() []*anypb.Any { + if x != nil { + return x.Details + } + return nil +} + +var File_google_rpc_status_proto protoreflect.FileDescriptor + +var file_google_rpc_status_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x61, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x0b, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3b, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_google_rpc_status_proto_rawDescOnce sync.Once + file_google_rpc_status_proto_rawDescData = file_google_rpc_status_proto_rawDesc +) + +func file_google_rpc_status_proto_rawDescGZIP() []byte { + file_google_rpc_status_proto_rawDescOnce.Do(func() { + file_google_rpc_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_status_proto_rawDescData) + }) + return file_google_rpc_status_proto_rawDescData +} + +var file_google_rpc_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_rpc_status_proto_goTypes = []interface{}{ + (*Status)(nil), // 0: google.rpc.Status + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_google_rpc_status_proto_depIdxs = []int32{ + 1, // 0: google.rpc.Status.details:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_rpc_status_proto_init() } +func file_google_rpc_status_proto_init() { + if File_google_rpc_status_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_rpc_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_rpc_status_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_rpc_status_proto_goTypes, + DependencyIndexes: file_google_rpc_status_proto_depIdxs, + MessageInfos: file_google_rpc_status_proto_msgTypes, + }.Build() + File_google_rpc_status_proto = out.File + file_google_rpc_status_proto_rawDesc = nil + file_google_rpc_status_proto_goTypes = nil + file_google_rpc_status_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS new file mode 100644 index 000000000..e491a9e7f --- /dev/null +++ b/vendor/google.golang.org/grpc/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/google.golang.org/grpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/grpc/NOTICE.txt b/vendor/google.golang.org/grpc/NOTICE.txt new file mode 100644 index 000000000..530197749 --- /dev/null +++ b/vendor/google.golang.org/grpc/NOTICE.txt @@ -0,0 +1,13 @@ +Copyright 2014 gRPC authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go new file mode 100644 index 000000000..0b206a578 --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package codes + +import "strconv" + +func (c Code) String() string { + switch c { + case OK: + return "OK" + case Canceled: + return "Canceled" + case Unknown: + return "Unknown" + case InvalidArgument: + return "InvalidArgument" + case DeadlineExceeded: + return "DeadlineExceeded" + case NotFound: + return "NotFound" + case AlreadyExists: + return "AlreadyExists" + case PermissionDenied: + return "PermissionDenied" + case ResourceExhausted: + return "ResourceExhausted" + case FailedPrecondition: + return "FailedPrecondition" + case Aborted: + return "Aborted" + case OutOfRange: + return "OutOfRange" + case Unimplemented: + return "Unimplemented" + case Internal: + return "Internal" + case Unavailable: + return "Unavailable" + case DataLoss: + return "DataLoss" + case Unauthenticated: + return "Unauthenticated" + default: + return "Code(" + strconv.FormatInt(int64(c), 10) + ")" + } +} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go new file mode 100644 index 000000000..11b106182 --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -0,0 +1,244 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package codes defines the canonical error codes used by gRPC. It is +// consistent across various languages. +package codes // import "google.golang.org/grpc/codes" + +import ( + "fmt" + "strconv" +) + +// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +type Code uint32 + +const ( + // OK is returned on success. + OK Code = 0 + + // Canceled indicates the operation was canceled (typically by the caller). + // + // The gRPC framework will generate this error code when cancellation + // is requested. + Canceled Code = 1 + + // Unknown error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + // + // The gRPC framework will generate this error code in the above two + // mentioned cases. + Unknown Code = 2 + + // InvalidArgument indicates client specified an invalid argument. + // Note that this differs from FailedPrecondition. It indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + // + // This error code will not be generated by the gRPC framework. + InvalidArgument Code = 3 + + // DeadlineExceeded means operation expired before completion. + // For operations that change the state of the system, this error may be + // returned even if the operation has completed successfully. For + // example, a successful response from a server could have been delayed + // long enough for the deadline to expire. + // + // The gRPC framework will generate this error code when the deadline is + // exceeded. + DeadlineExceeded Code = 4 + + // NotFound means some requested entity (e.g., file or directory) was + // not found. + // + // This error code will not be generated by the gRPC framework. + NotFound Code = 5 + + // AlreadyExists means an attempt to create an entity failed because one + // already exists. + // + // This error code will not be generated by the gRPC framework. + AlreadyExists Code = 6 + + // PermissionDenied indicates the caller does not have permission to + // execute the specified operation. It must not be used for rejections + // caused by exhausting some resource (use ResourceExhausted + // instead for those errors). It must not be + // used if the caller cannot be identified (use Unauthenticated + // instead for those errors). + // + // This error code will not be generated by the gRPC core framework, + // but expect authentication middleware to use it. + PermissionDenied Code = 7 + + // ResourceExhausted indicates some resource has been exhausted, perhaps + // a per-user quota, or perhaps the entire file system is out of space. + // + // This error code will be generated by the gRPC framework in + // out-of-memory and server overload situations, or when a message is + // larger than the configured maximum size. + ResourceExhausted Code = 8 + + // FailedPrecondition indicates operation was rejected because the + // system is not in a state required for the operation's execution. + // For example, directory to be deleted may be non-empty, an rmdir + // operation is applied to a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FailedPrecondition, Aborted, and Unavailable: + // (a) Use Unavailable if the client can retry just the failing call. + // (b) Use Aborted if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FailedPrecondition if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FailedPrecondition + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FailedPrecondition if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + // + // This error code will not be generated by the gRPC framework. + FailedPrecondition Code = 9 + + // Aborted indicates the operation was aborted, typically due to a + // concurrency issue like sequencer check failures, transaction aborts, + // etc. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + // + // This error code will not be generated by the gRPC framework. + Aborted Code = 10 + + // OutOfRange means operation was attempted past the valid range. + // E.g., seeking or reading past end of file. + // + // Unlike InvalidArgument, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate InvalidArgument if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OutOfRange if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FailedPrecondition and + // OutOfRange. We recommend using OutOfRange (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OutOfRange error to detect when + // they are done. + // + // This error code will not be generated by the gRPC framework. + OutOfRange Code = 11 + + // Unimplemented indicates operation is not implemented or not + // supported/enabled in this service. + // + // This error code will be generated by the gRPC framework. Most + // commonly, you will see this error code when a method implementation + // is missing on the server. It can also be generated for unknown + // compression algorithms or a disagreement as to whether an RPC should + // be streaming. + Unimplemented Code = 12 + + // Internal errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + // + // This error code will be generated by the gRPC framework in several + // internal error conditions. + Internal Code = 13 + + // Unavailable indicates the service is currently unavailable. + // This is a most likely a transient condition and may be corrected + // by retrying with a backoff. Note that it is not always safe to retry + // non-idempotent operations. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + // + // This error code will be generated by the gRPC framework during + // abrupt shutdown of a server process or network connection. + Unavailable Code = 14 + + // DataLoss indicates unrecoverable data loss or corruption. + // + // This error code will not be generated by the gRPC framework. + DataLoss Code = 15 + + // Unauthenticated indicates the request does not have valid + // authentication credentials for the operation. + // + // The gRPC framework will generate this error code when the + // authentication metadata is invalid or a Credentials callback fails, + // but also expect authentication middleware to generate it. + Unauthenticated Code = 16 + + _maxCode = 17 +) + +var strToCode = map[string]Code{ + `"OK"`: OK, + `"CANCELLED"`:/* [sic] */ Canceled, + `"UNKNOWN"`: Unknown, + `"INVALID_ARGUMENT"`: InvalidArgument, + `"DEADLINE_EXCEEDED"`: DeadlineExceeded, + `"NOT_FOUND"`: NotFound, + `"ALREADY_EXISTS"`: AlreadyExists, + `"PERMISSION_DENIED"`: PermissionDenied, + `"RESOURCE_EXHAUSTED"`: ResourceExhausted, + `"FAILED_PRECONDITION"`: FailedPrecondition, + `"ABORTED"`: Aborted, + `"OUT_OF_RANGE"`: OutOfRange, + `"UNIMPLEMENTED"`: Unimplemented, + `"INTERNAL"`: Internal, + `"UNAVAILABLE"`: Unavailable, + `"DATA_LOSS"`: DataLoss, + `"UNAUTHENTICATED"`: Unauthenticated, +} + +// UnmarshalJSON unmarshals b into the Code. +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= _maxCode { + return fmt.Errorf("invalid code: %q", ci) + } + + *c = Code(ci) + return nil + } + + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go new file mode 100644 index 000000000..e5c6513ed --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -0,0 +1,166 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "errors" + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" +) + +// Status represents an RPC status code, message, and details. It is immutable +// and should be created with New, Newf, or FromProto. +type Status struct { + s *spb.Status +} + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...interface{}) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return &Status{s: proto.Clone(s).(*spb.Status)} +} + +// Err returns an error representing c and msg. If c is OK, returns nil. +func Err(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { + return Err(c, fmt.Sprintf(format, a...)) +} + +// Code returns the status code contained in s. +func (s *Status) Code() codes.Code { + if s == nil || s.s == nil { + return codes.OK + } + return codes.Code(s.s.Code) +} + +// Message returns the message contained in s. +func (s *Status) Message() string { + if s == nil || s.s == nil { + return "" + } + return s.s.Message +} + +// Proto returns s's status as an spb.Status proto message. +func (s *Status) Proto() *spb.Status { + if s == nil { + return nil + } + return proto.Clone(s.s).(*spb.Status) +} + +// Err returns an immutable error representing s; returns nil if s.Code() is OK. +func (s *Status) Err() error { + if s.Code() == codes.OK { + return nil + } + return &Error{s: s} +} + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + any, err := ptypes.MarshalAny(detail) + if err != nil { + return nil, err + } + p.Details = append(p.Details, any) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []interface{} { + if s == nil || s.s == nil { + return nil + } + details := make([]interface{}, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { + details = append(details, err) + continue + } + details = append(details, detail.Message) + } + return details +} + +func (s *Status) String() string { + return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message()) +} + +// Error wraps a pointer of a status proto. It implements error and Status, +// and a nil *Error should never be returned by this package. +type Error struct { + s *Status +} + +func (e *Error) Error() string { + return e.s.String() +} + +// GRPCStatus returns the Status represented by se. +func (e *Error) GRPCStatus() *Status { + return e.s +} + +// Is implements future error.Is functionality. +// A Error is equivalent if the code and message are identical. +func (e *Error) Is(target error) bool { + tse, ok := target.(*Error) + if !ok { + return false + } + return proto.Equal(e.s.s, tse.s.s) +} diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go new file mode 100644 index 000000000..6d163b6e3 --- /dev/null +++ b/vendor/google.golang.org/grpc/status/status.go @@ -0,0 +1,135 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "context" + "errors" + "fmt" + + spb "google.golang.org/genproto/googleapis/rpc/status" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/status" +) + +// Status references google.golang.org/grpc/internal/status. It represents an +// RPC status code, message, and details. It is immutable and should be +// created with New, Newf, or FromProto. +// https://godoc.org/google.golang.org/grpc/internal/status +type Status = status.Status + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return status.New(c, msg) +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...interface{}) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// Error returns an error representing c and msg. If c is OK, returns nil. +func Error(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { + return Error(c, fmt.Sprintf(format, a...)) +} + +// ErrorProto returns an error representing s. If s.Code is OK, returns nil. +func ErrorProto(s *spb.Status) error { + return FromProto(s).Err() +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return status.FromProto(s) +} + +// FromError returns a Status representation of err. +// +// - If err was produced by this package or implements the method `GRPCStatus() +// *Status`, the appropriate Status is returned. +// +// - If err is nil, a Status is returned with codes.OK and no message. +// +// - Otherwise, err is an error not compatible with this package. In this +// case, a Status is returned with codes.Unknown and err's Error() message, +// and ok is false. +func FromError(err error) (s *Status, ok bool) { + if err == nil { + return nil, true + } + if se, ok := err.(interface { + GRPCStatus() *Status + }); ok { + return se.GRPCStatus(), true + } + return New(codes.Unknown, err.Error()), false +} + +// Convert is a convenience function which removes the need to handle the +// boolean return value from FromError. +func Convert(err error) *Status { + s, _ := FromError(err) + return s +} + +// Code returns the Code of the error if it is a Status error, codes.OK if err +// is nil, or codes.Unknown otherwise. +func Code(err error) codes.Code { + // Don't use FromError to avoid allocation of OK status. + if err == nil { + return codes.OK + } + if se, ok := err.(interface { + GRPCStatus() *Status + }); ok { + return se.GRPCStatus().Code() + } + return codes.Unknown +} + +// FromContextError converts a context error or wrapped context error into a +// Status. It returns a Status with codes.OK if err is nil, or a Status with +// codes.Unknown if err is non-nil and not a context error. +func FromContextError(err error) *Status { + if err == nil { + return nil + } + if errors.Is(err, context.DeadlineExceeded) { + return New(codes.DeadlineExceeded, err.Error()) + } + if errors.Is(err, context.Canceled) { + return New(codes.Canceled, err.Error()) + } + return New(codes.Unknown, err.Error()) +} diff --git a/vendor/golang.org/x/sys/AUTHORS b/vendor/google.golang.org/protobuf/AUTHORS similarity index 74% rename from vendor/golang.org/x/sys/AUTHORS rename to vendor/google.golang.org/protobuf/AUTHORS index 15167cd74..2b00ddba0 100644 --- a/vendor/golang.org/x/sys/AUTHORS +++ b/vendor/google.golang.org/protobuf/AUTHORS @@ -1,3 +1,3 @@ # This source code refers to The Go Authors for copyright purposes. # The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. +# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sys/CONTRIBUTORS b/vendor/google.golang.org/protobuf/CONTRIBUTORS similarity index 70% rename from vendor/golang.org/x/sys/CONTRIBUTORS rename to vendor/google.golang.org/protobuf/CONTRIBUTORS index 1c4577e96..1fbd3e976 100644 --- a/vendor/golang.org/x/sys/CONTRIBUTORS +++ b/vendor/google.golang.org/protobuf/CONTRIBUTORS @@ -1,3 +1,3 @@ # This source code was written by the Go contributors. # The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. +# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/google.golang.org/protobuf/LICENSE b/vendor/google.golang.org/protobuf/LICENSE new file mode 100644 index 000000000..49ea0f928 --- /dev/null +++ b/vendor/google.golang.org/protobuf/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2018 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/google.golang.org/protobuf/PATENTS b/vendor/google.golang.org/protobuf/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/google.golang.org/protobuf/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go new file mode 100644 index 000000000..179d6e8fc --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -0,0 +1,770 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package prototext + +import ( + "fmt" + "unicode/utf8" + + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/encoding/text" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/set" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Unmarshal reads the given []byte into the given proto.Message. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func Unmarshal(b []byte, m proto.Message) error { + return UnmarshalOptions{}.Unmarshal(b, m) +} + +// UnmarshalOptions is a configurable textproto format unmarshaler. +type UnmarshalOptions struct { + pragma.NoUnkeyedLiterals + + // AllowPartial accepts input for messages that will result in missing + // required fields. If AllowPartial is false (the default), Unmarshal will + // return error if there are any missing required fields. + AllowPartial bool + + // DiscardUnknown specifies whether to ignore unknown fields when parsing. + // An unknown field is any field whose field name or field number does not + // resolve to any known or extension field in the message. + // By default, unmarshal rejects unknown fields as an error. + DiscardUnknown bool + + // Resolver is used for looking up types when unmarshaling + // google.protobuf.Any messages or extension fields. + // If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.MessageTypeResolver + protoregistry.ExtensionTypeResolver + } +} + +// Unmarshal reads the given []byte and populates the given proto.Message +// using options in the UnmarshalOptions object. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { + return o.unmarshal(b, m) +} + +// unmarshal is a centralized function that all unmarshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for unmarshal that do not go through this. +func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { + proto.Reset(m) + + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + dec := decoder{text.NewDecoder(b), o} + if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { + return err + } + if o.AllowPartial { + return nil + } + return proto.CheckInitialized(m) +} + +type decoder struct { + *text.Decoder + opts UnmarshalOptions +} + +// newError returns an error object with position info. +func (d decoder) newError(pos int, f string, x ...interface{}) error { + line, column := d.Position(pos) + head := fmt.Sprintf("(line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unexpectedTokenError returns a syntax error for the given unexpected token. +func (d decoder) unexpectedTokenError(tok text.Token) error { + return d.syntaxError(tok.Pos(), "unexpected token: %s", tok.RawString()) +} + +// syntaxError returns a syntax error for given position. +func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { + line, column := d.Position(pos) + head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unmarshalMessage unmarshals into the given protoreflect.Message. +func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") + } + + if messageDesc.FullName() == genid.Any_message_fullname { + return d.unmarshalAny(m, checkDelims) + } + + if checkDelims { + tok, err := d.Read() + if err != nil { + return err + } + + if tok.Kind() != text.MessageOpen { + return d.unexpectedTokenError(tok) + } + } + + var seenNums set.Ints + var seenOneofs set.Ints + fieldDescs := messageDesc.Fields() + + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch typ := tok.Kind(); typ { + case text.Name: + // Continue below. + case text.EOF: + if checkDelims { + return text.ErrUnexpectedEOF + } + return nil + default: + if checkDelims && typ == text.MessageClose { + return nil + } + return d.unexpectedTokenError(tok) + } + + // Resolve the field descriptor. + var name pref.Name + var fd pref.FieldDescriptor + var xt pref.ExtensionType + var xtErr error + var isFieldNumberName bool + + switch tok.NameKind() { + case text.IdentName: + name = pref.Name(tok.IdentName()) + fd = fieldDescs.ByTextName(string(name)) + + case text.TypeName: + // Handle extensions only. This code path is not for Any. + xt, xtErr = d.opts.Resolver.FindExtensionByName(pref.FullName(tok.TypeName())) + + case text.FieldNumber: + isFieldNumberName = true + num := pref.FieldNumber(tok.FieldNumber()) + if !num.IsValid() { + return d.newError(tok.Pos(), "invalid field number: %d", num) + } + fd = fieldDescs.ByNumber(num) + if fd == nil { + xt, xtErr = d.opts.Resolver.FindExtensionByNumber(messageDesc.FullName(), num) + } + } + + if xt != nil { + fd = xt.TypeDescriptor() + if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() { + return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName()) + } + } else if xtErr != nil && xtErr != protoregistry.NotFound { + return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr) + } + if flags.ProtoLegacy { + if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { + fd = nil // reset since the weak reference is not linked in + } + } + + // Handle unknown fields. + if fd == nil { + if d.opts.DiscardUnknown || messageDesc.ReservedNames().Has(name) { + d.skipValue() + continue + } + return d.newError(tok.Pos(), "unknown field: %v", tok.RawString()) + } + + // Handle fields identified by field number. + if isFieldNumberName { + // TODO: Add an option to permit parsing field numbers. + // + // This requires careful thought as the MarshalOptions.EmitUnknown + // option allows formatting unknown fields as the field number and the + // best-effort textual representation of the field value. In that case, + // it may not be possible to unmarshal the value from a parser that does + // have information about the unknown field. + return d.newError(tok.Pos(), "cannot specify field by number: %v", tok.RawString()) + } + + switch { + case fd.IsList(): + kind := fd.Kind() + if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + + list := m.Mutable(fd).List() + if err := d.unmarshalList(fd, list); err != nil { + return err + } + + case fd.IsMap(): + mmap := m.Mutable(fd).Map() + if err := d.unmarshalMap(fd, mmap); err != nil { + return err + } + + default: + kind := fd.Kind() + if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + + // If field is a oneof, check if it has already been set. + if od := fd.ContainingOneof(); od != nil { + idx := uint64(od.Index()) + if seenOneofs.Has(idx) { + return d.newError(tok.Pos(), "error parsing %q, oneof %v is already set", tok.RawString(), od.FullName()) + } + seenOneofs.Set(idx) + } + + num := uint64(fd.Number()) + if seenNums.Has(num) { + return d.newError(tok.Pos(), "non-repeated field %q is repeated", tok.RawString()) + } + + if err := d.unmarshalSingular(fd, m); err != nil { + return err + } + seenNums.Set(num) + } + } + + return nil +} + +// unmarshalSingular unmarshals a non-repeated field value specified by the +// given FieldDescriptor. +func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error { + var val pref.Value + var err error + switch fd.Kind() { + case pref.MessageKind, pref.GroupKind: + val = m.NewField(fd) + err = d.unmarshalMessage(val.Message(), true) + default: + val, err = d.unmarshalScalar(fd) + } + if err == nil { + m.Set(fd, val) + } + return err +} + +// unmarshalScalar unmarshals a scalar/enum protoreflect.Value specified by the +// given FieldDescriptor. +func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) { + tok, err := d.Read() + if err != nil { + return pref.Value{}, err + } + + if tok.Kind() != text.Scalar { + return pref.Value{}, d.unexpectedTokenError(tok) + } + + kind := fd.Kind() + switch kind { + case pref.BoolKind: + if b, ok := tok.Bool(); ok { + return pref.ValueOfBool(b), nil + } + + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + if n, ok := tok.Int32(); ok { + return pref.ValueOfInt32(n), nil + } + + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + if n, ok := tok.Int64(); ok { + return pref.ValueOfInt64(n), nil + } + + case pref.Uint32Kind, pref.Fixed32Kind: + if n, ok := tok.Uint32(); ok { + return pref.ValueOfUint32(n), nil + } + + case pref.Uint64Kind, pref.Fixed64Kind: + if n, ok := tok.Uint64(); ok { + return pref.ValueOfUint64(n), nil + } + + case pref.FloatKind: + if n, ok := tok.Float32(); ok { + return pref.ValueOfFloat32(n), nil + } + + case pref.DoubleKind: + if n, ok := tok.Float64(); ok { + return pref.ValueOfFloat64(n), nil + } + + case pref.StringKind: + if s, ok := tok.String(); ok { + if strs.EnforceUTF8(fd) && !utf8.ValidString(s) { + return pref.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8") + } + return pref.ValueOfString(s), nil + } + + case pref.BytesKind: + if b, ok := tok.String(); ok { + return pref.ValueOfBytes([]byte(b)), nil + } + + case pref.EnumKind: + if lit, ok := tok.Enum(); ok { + // Lookup EnumNumber based on name. + if enumVal := fd.Enum().Values().ByName(pref.Name(lit)); enumVal != nil { + return pref.ValueOfEnum(enumVal.Number()), nil + } + } + if num, ok := tok.Int32(); ok { + return pref.ValueOfEnum(pref.EnumNumber(num)), nil + } + + default: + panic(fmt.Sprintf("invalid scalar kind %v", kind)) + } + + return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) +} + +// unmarshalList unmarshals into given protoreflect.List. A list value can +// either be in [] syntax or simply just a single scalar/message value. +func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error { + tok, err := d.Peek() + if err != nil { + return err + } + + switch fd.Kind() { + case pref.MessageKind, pref.GroupKind: + switch tok.Kind() { + case text.ListOpen: + d.Read() + for { + tok, err := d.Peek() + if err != nil { + return err + } + + switch tok.Kind() { + case text.ListClose: + d.Read() + return nil + case text.MessageOpen: + pval := list.NewElement() + if err := d.unmarshalMessage(pval.Message(), true); err != nil { + return err + } + list.Append(pval) + default: + return d.unexpectedTokenError(tok) + } + } + + case text.MessageOpen: + pval := list.NewElement() + if err := d.unmarshalMessage(pval.Message(), true); err != nil { + return err + } + list.Append(pval) + return nil + } + + default: + switch tok.Kind() { + case text.ListOpen: + d.Read() + for { + tok, err := d.Peek() + if err != nil { + return err + } + + switch tok.Kind() { + case text.ListClose: + d.Read() + return nil + case text.Scalar: + pval, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + list.Append(pval) + default: + return d.unexpectedTokenError(tok) + } + } + + case text.Scalar: + pval, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + list.Append(pval) + return nil + } + } + + return d.unexpectedTokenError(tok) +} + +// unmarshalMap unmarshals into given protoreflect.Map. A map value is a +// textproto message containing {key: , value: }. +func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error { + // Determine ahead whether map entry is a scalar type or a message type in + // order to call the appropriate unmarshalMapValue func inside + // unmarshalMapEntry. + var unmarshalMapValue func() (pref.Value, error) + switch fd.MapValue().Kind() { + case pref.MessageKind, pref.GroupKind: + unmarshalMapValue = func() (pref.Value, error) { + pval := mmap.NewValue() + if err := d.unmarshalMessage(pval.Message(), true); err != nil { + return pref.Value{}, err + } + return pval, nil + } + default: + unmarshalMapValue = func() (pref.Value, error) { + return d.unmarshalScalar(fd.MapValue()) + } + } + + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.MessageOpen: + return d.unmarshalMapEntry(fd, mmap, unmarshalMapValue) + + case text.ListOpen: + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.ListClose: + return nil + case text.MessageOpen: + if err := d.unmarshalMapEntry(fd, mmap, unmarshalMapValue); err != nil { + return err + } + default: + return d.unexpectedTokenError(tok) + } + } + + default: + return d.unexpectedTokenError(tok) + } +} + +// unmarshalMap unmarshals into given protoreflect.Map. A map value is a +// textproto message containing {key: , value: }. +func (d decoder) unmarshalMapEntry(fd pref.FieldDescriptor, mmap pref.Map, unmarshalMapValue func() (pref.Value, error)) error { + var key pref.MapKey + var pval pref.Value +Loop: + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.Name: + if tok.NameKind() != text.IdentName { + if !d.opts.DiscardUnknown { + return d.newError(tok.Pos(), "unknown map entry field %q", tok.RawString()) + } + d.skipValue() + continue Loop + } + // Continue below. + case text.MessageClose: + break Loop + default: + return d.unexpectedTokenError(tok) + } + + switch name := pref.Name(tok.IdentName()); name { + case genid.MapEntry_Key_field_name: + if !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + if key.IsValid() { + return d.newError(tok.Pos(), "map entry %q cannot be repeated", name) + } + val, err := d.unmarshalScalar(fd.MapKey()) + if err != nil { + return err + } + key = val.MapKey() + + case genid.MapEntry_Value_field_name: + if kind := fd.MapValue().Kind(); (kind != pref.MessageKind) && (kind != pref.GroupKind) { + if !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + } + if pval.IsValid() { + return d.newError(tok.Pos(), "map entry %q cannot be repeated", name) + } + pval, err = unmarshalMapValue() + if err != nil { + return err + } + + default: + if !d.opts.DiscardUnknown { + return d.newError(tok.Pos(), "unknown map entry field %q", name) + } + d.skipValue() + } + } + + if !key.IsValid() { + key = fd.MapKey().Default().MapKey() + } + if !pval.IsValid() { + switch fd.MapValue().Kind() { + case pref.MessageKind, pref.GroupKind: + // If value field is not set for message/group types, construct an + // empty one as default. + pval = mmap.NewValue() + default: + pval = fd.MapValue().Default() + } + } + mmap.Set(key, pval) + return nil +} + +// unmarshalAny unmarshals an Any textproto. It can either be in expanded form +// or non-expanded form. +func (d decoder) unmarshalAny(m pref.Message, checkDelims bool) error { + var typeURL string + var bValue []byte + var seenTypeUrl bool + var seenValue bool + var isExpanded bool + + if checkDelims { + tok, err := d.Read() + if err != nil { + return err + } + + if tok.Kind() != text.MessageOpen { + return d.unexpectedTokenError(tok) + } + } + +Loop: + for { + // Read field name. Can only have 3 possible field names, i.e. type_url, + // value and type URL name inside []. + tok, err := d.Read() + if err != nil { + return err + } + if typ := tok.Kind(); typ != text.Name { + if checkDelims { + if typ == text.MessageClose { + break Loop + } + } else if typ == text.EOF { + break Loop + } + return d.unexpectedTokenError(tok) + } + + switch tok.NameKind() { + case text.IdentName: + // Both type_url and value fields require field separator :. + if !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + + switch name := pref.Name(tok.IdentName()); name { + case genid.Any_TypeUrl_field_name: + if seenTypeUrl { + return d.newError(tok.Pos(), "duplicate %v field", genid.Any_TypeUrl_field_fullname) + } + if isExpanded { + return d.newError(tok.Pos(), "conflict with [%s] field", typeURL) + } + tok, err := d.Read() + if err != nil { + return err + } + var ok bool + typeURL, ok = tok.String() + if !ok { + return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_TypeUrl_field_fullname, tok.RawString()) + } + seenTypeUrl = true + + case genid.Any_Value_field_name: + if seenValue { + return d.newError(tok.Pos(), "duplicate %v field", genid.Any_Value_field_fullname) + } + if isExpanded { + return d.newError(tok.Pos(), "conflict with [%s] field", typeURL) + } + tok, err := d.Read() + if err != nil { + return err + } + s, ok := tok.String() + if !ok { + return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_Value_field_fullname, tok.RawString()) + } + bValue = []byte(s) + seenValue = true + + default: + if !d.opts.DiscardUnknown { + return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname) + } + } + + case text.TypeName: + if isExpanded { + return d.newError(tok.Pos(), "cannot have more than one type") + } + if seenTypeUrl { + return d.newError(tok.Pos(), "conflict with type_url field") + } + typeURL = tok.TypeName() + var err error + bValue, err = d.unmarshalExpandedAny(typeURL, tok.Pos()) + if err != nil { + return err + } + isExpanded = true + + default: + if !d.opts.DiscardUnknown { + return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname) + } + } + } + + fds := m.Descriptor().Fields() + if len(typeURL) > 0 { + m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), pref.ValueOfString(typeURL)) + } + if len(bValue) > 0 { + m.Set(fds.ByNumber(genid.Any_Value_field_number), pref.ValueOfBytes(bValue)) + } + return nil +} + +func (d decoder) unmarshalExpandedAny(typeURL string, pos int) ([]byte, error) { + mt, err := d.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return nil, d.newError(pos, "unable to resolve message [%v]: %v", typeURL, err) + } + // Create new message for the embedded message type and unmarshal the value + // field into it. + m := mt.New() + if err := d.unmarshalMessage(m, true); err != nil { + return nil, err + } + // Serialize the embedded message and return the resulting bytes. + b, err := proto.MarshalOptions{ + AllowPartial: true, // Never check required fields inside an Any. + Deterministic: true, + }.Marshal(m.Interface()) + if err != nil { + return nil, d.newError(pos, "error in marshaling message into Any.value: %v", err) + } + return b, nil +} + +// skipValue makes the decoder parse a field value in order to advance the read +// to the next field. It relies on Read returning an error if the types are not +// in valid sequence. +func (d decoder) skipValue() error { + tok, err := d.Read() + if err != nil { + return err + } + // Only need to continue reading for messages and lists. + switch tok.Kind() { + case text.MessageOpen: + return d.skipMessageValue() + + case text.ListOpen: + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.ListClose: + return nil + case text.MessageOpen: + return d.skipMessageValue() + default: + // Skip items. This will not validate whether skipped values are + // of the same type or not, same behavior as C++ + // TextFormat::Parser::AllowUnknownField(true) version 3.8.0. + } + } + } + return nil +} + +// skipMessageValue makes the decoder parse and skip over all fields in a +// message. It assumes that the previous read type is MessageOpen. +func (d decoder) skipMessageValue() error { + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.MessageClose: + return nil + case text.Name: + if err := d.skipValue(); err != nil { + return err + } + } + } +} diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/doc.go b/vendor/google.golang.org/protobuf/encoding/prototext/doc.go new file mode 100644 index 000000000..162b4f98a --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/prototext/doc.go @@ -0,0 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package prototext marshals and unmarshals protocol buffer messages as the +// textproto format. +package prototext diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go new file mode 100644 index 000000000..8d5304dc5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -0,0 +1,371 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package prototext + +import ( + "fmt" + "strconv" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/encoding/text" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const defaultIndent = " " + +// Format formats the message as a multiline string. +// This function is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func Format(m proto.Message) string { + return MarshalOptions{Multiline: true}.Format(m) +} + +// Marshal writes the given proto.Message in textproto format using default +// options. Do not depend on the output being stable. It may change over time +// across different versions of the program. +func Marshal(m proto.Message) ([]byte, error) { + return MarshalOptions{}.Marshal(m) +} + +// MarshalOptions is a configurable text format marshaler. +type MarshalOptions struct { + pragma.NoUnkeyedLiterals + + // Multiline specifies whether the marshaler should format the output in + // indented-form with every textual element on a new line. + // If Indent is an empty string, then an arbitrary indent is chosen. + Multiline bool + + // Indent specifies the set of indentation characters to use in a multiline + // formatted output such that every entry is preceded by Indent and + // terminated by a newline. If non-empty, then Multiline is treated as true. + // Indent can only be composed of space or tab characters. + Indent string + + // EmitASCII specifies whether to format strings and bytes as ASCII only + // as opposed to using UTF-8 encoding when possible. + EmitASCII bool + + // allowInvalidUTF8 specifies whether to permit the encoding of strings + // with invalid UTF-8. This is unexported as it is intended to only + // be specified by the Format method. + allowInvalidUTF8 bool + + // AllowPartial allows messages that have missing required fields to marshal + // without returning an error. If AllowPartial is false (the default), + // Marshal will return error if there are any missing required fields. + AllowPartial bool + + // EmitUnknown specifies whether to emit unknown fields in the output. + // If specified, the unmarshaler may be unable to parse the output. + // The default is to exclude unknown fields. + EmitUnknown bool + + // Resolver is used for looking up types when expanding google.protobuf.Any + // messages. If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.ExtensionTypeResolver + protoregistry.MessageTypeResolver + } +} + +// Format formats the message as a string. +// This method is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func (o MarshalOptions) Format(m proto.Message) string { + if m == nil || !m.ProtoReflect().IsValid() { + return "" // invalid syntax, but okay since this is for debugging + } + o.allowInvalidUTF8 = true + o.AllowPartial = true + o.EmitUnknown = true + b, _ := o.Marshal(m) + return string(b) +} + +// Marshal writes the given proto.Message in textproto format using options in +// MarshalOptions object. Do not depend on the output being stable. It may +// change over time across different versions of the program. +func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { + return o.marshal(m) +} + +// marshal is a centralized function that all marshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for marshal that do not go through this. +func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { + var delims = [2]byte{'{', '}'} + + if o.Multiline && o.Indent == "" { + o.Indent = defaultIndent + } + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + internalEnc, err := text.NewEncoder(o.Indent, delims, o.EmitASCII) + if err != nil { + return nil, err + } + + // Treat nil message interface as an empty message, + // in which case there is nothing to output. + if m == nil { + return []byte{}, nil + } + + enc := encoder{internalEnc, o} + err = enc.marshalMessage(m.ProtoReflect(), false) + if err != nil { + return nil, err + } + out := enc.Bytes() + if len(o.Indent) > 0 && len(out) > 0 { + out = append(out, '\n') + } + if o.AllowPartial { + return out, nil + } + return out, proto.CheckInitialized(m) +} + +type encoder struct { + *text.Encoder + opts MarshalOptions +} + +// marshalMessage marshals the given protoreflect.Message. +func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") + } + + if inclDelims { + e.StartMessage() + defer e.EndMessage() + } + + // Handle Any expansion. + if messageDesc.FullName() == genid.Any_message_fullname { + if e.marshalAny(m) { + return nil + } + // If unable to expand, continue on to marshal Any as a regular message. + } + + // Marshal fields. + var err error + order.RangeFields(m, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if err = e.marshalField(fd.TextName(), v, fd); err != nil { + return false + } + return true + }) + if err != nil { + return err + } + + // Marshal unknown fields. + if e.opts.EmitUnknown { + e.marshalUnknown(m.GetUnknown()) + } + + return nil +} + +// marshalField marshals the given field with protoreflect.Value. +func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescriptor) error { + switch { + case fd.IsList(): + return e.marshalList(name, val.List(), fd) + case fd.IsMap(): + return e.marshalMap(name, val.Map(), fd) + default: + e.WriteName(name) + return e.marshalSingular(val, fd) + } +} + +// marshalSingular marshals the given non-repeated field value. This includes +// all scalar types, enums, messages, and groups. +func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error { + kind := fd.Kind() + switch kind { + case pref.BoolKind: + e.WriteBool(val.Bool()) + + case pref.StringKind: + s := val.String() + if !e.opts.allowInvalidUTF8 && strs.EnforceUTF8(fd) && !utf8.ValidString(s) { + return errors.InvalidUTF8(string(fd.FullName())) + } + e.WriteString(s) + + case pref.Int32Kind, pref.Int64Kind, + pref.Sint32Kind, pref.Sint64Kind, + pref.Sfixed32Kind, pref.Sfixed64Kind: + e.WriteInt(val.Int()) + + case pref.Uint32Kind, pref.Uint64Kind, + pref.Fixed32Kind, pref.Fixed64Kind: + e.WriteUint(val.Uint()) + + case pref.FloatKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 32) + + case pref.DoubleKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 64) + + case pref.BytesKind: + e.WriteString(string(val.Bytes())) + + case pref.EnumKind: + num := val.Enum() + if desc := fd.Enum().Values().ByNumber(num); desc != nil { + e.WriteLiteral(string(desc.Name())) + } else { + // Use numeric value if there is no enum description. + e.WriteInt(int64(num)) + } + + case pref.MessageKind, pref.GroupKind: + return e.marshalMessage(val.Message(), true) + + default: + panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind)) + } + return nil +} + +// marshalList marshals the given protoreflect.List as multiple name-value fields. +func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescriptor) error { + size := list.Len() + for i := 0; i < size; i++ { + e.WriteName(name) + if err := e.marshalSingular(list.Get(i), fd); err != nil { + return err + } + } + return nil +} + +// marshalMap marshals the given protoreflect.Map as multiple name-value fields. +func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error { + var err error + order.RangeEntries(mmap, order.GenericKeyOrder, func(key pref.MapKey, val pref.Value) bool { + e.WriteName(name) + e.StartMessage() + defer e.EndMessage() + + e.WriteName(string(genid.MapEntry_Key_field_name)) + err = e.marshalSingular(key.Value(), fd.MapKey()) + if err != nil { + return false + } + + e.WriteName(string(genid.MapEntry_Value_field_name)) + err = e.marshalSingular(val, fd.MapValue()) + if err != nil { + return false + } + return true + }) + return err +} + +// marshalUnknown parses the given []byte and marshals fields out. +// This function assumes proper encoding in the given []byte. +func (e encoder) marshalUnknown(b []byte) { + const dec = 10 + const hex = 16 + for len(b) > 0 { + num, wtype, n := protowire.ConsumeTag(b) + b = b[n:] + e.WriteName(strconv.FormatInt(int64(num), dec)) + + switch wtype { + case protowire.VarintType: + var v uint64 + v, n = protowire.ConsumeVarint(b) + e.WriteUint(v) + case protowire.Fixed32Type: + var v uint32 + v, n = protowire.ConsumeFixed32(b) + e.WriteLiteral("0x" + strconv.FormatUint(uint64(v), hex)) + case protowire.Fixed64Type: + var v uint64 + v, n = protowire.ConsumeFixed64(b) + e.WriteLiteral("0x" + strconv.FormatUint(v, hex)) + case protowire.BytesType: + var v []byte + v, n = protowire.ConsumeBytes(b) + e.WriteString(string(v)) + case protowire.StartGroupType: + e.StartMessage() + var v []byte + v, n = protowire.ConsumeGroup(num, b) + e.marshalUnknown(v) + e.EndMessage() + default: + panic(fmt.Sprintf("prototext: error parsing unknown field wire type: %v", wtype)) + } + + b = b[n:] + } +} + +// marshalAny marshals the given google.protobuf.Any message in expanded form. +// It returns true if it was able to marshal, else false. +func (e encoder) marshalAny(any pref.Message) bool { + // Construct the embedded message. + fds := any.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + typeURL := any.Get(fdType).String() + mt, err := e.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return false + } + m := mt.New().Interface() + + // Unmarshal bytes into embedded message. + fdValue := fds.ByNumber(genid.Any_Value_field_number) + value := any.Get(fdValue) + err = proto.UnmarshalOptions{ + AllowPartial: true, + Resolver: e.opts.Resolver, + }.Unmarshal(value.Bytes(), m) + if err != nil { + return false + } + + // Get current encoder position. If marshaling fails, reset encoder output + // back to this position. + pos := e.Snapshot() + + // Field name is the proto field name enclosed in []. + e.WriteName("[" + typeURL + "]") + err = e.marshalMessage(m.ProtoReflect(), true) + if err != nil { + e.Reset(pos) + return false + } + return true +} diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go new file mode 100644 index 000000000..9c61112f5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -0,0 +1,547 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protowire parses and formats the raw wire encoding. +// See https://developers.google.com/protocol-buffers/docs/encoding. +// +// For marshaling and unmarshaling entire protobuf messages, +// use the "google.golang.org/protobuf/proto" package instead. +package protowire + +import ( + "io" + "math" + "math/bits" + + "google.golang.org/protobuf/internal/errors" +) + +// Number represents the field number. +type Number int32 + +const ( + MinValidNumber Number = 1 + FirstReservedNumber Number = 19000 + LastReservedNumber Number = 19999 + MaxValidNumber Number = 1<<29 - 1 + DefaultRecursionLimit = 10000 +) + +// IsValid reports whether the field number is semantically valid. +// +// Note that while numbers within the reserved range are semantically invalid, +// they are syntactically valid in the wire format. +// Implementations may treat records with reserved field numbers as unknown. +func (n Number) IsValid() bool { + return MinValidNumber <= n && n < FirstReservedNumber || LastReservedNumber < n && n <= MaxValidNumber +} + +// Type represents the wire type. +type Type int8 + +const ( + VarintType Type = 0 + Fixed32Type Type = 5 + Fixed64Type Type = 1 + BytesType Type = 2 + StartGroupType Type = 3 + EndGroupType Type = 4 +) + +const ( + _ = -iota + errCodeTruncated + errCodeFieldNumber + errCodeOverflow + errCodeReserved + errCodeEndGroup + errCodeRecursionDepth +) + +var ( + errFieldNumber = errors.New("invalid field number") + errOverflow = errors.New("variable length integer overflow") + errReserved = errors.New("cannot parse reserved wire type") + errEndGroup = errors.New("mismatching end group marker") + errParse = errors.New("parse error") +) + +// ParseError converts an error code into an error value. +// This returns nil if n is a non-negative number. +func ParseError(n int) error { + if n >= 0 { + return nil + } + switch n { + case errCodeTruncated: + return io.ErrUnexpectedEOF + case errCodeFieldNumber: + return errFieldNumber + case errCodeOverflow: + return errOverflow + case errCodeReserved: + return errReserved + case errCodeEndGroup: + return errEndGroup + default: + return errParse + } +} + +// ConsumeField parses an entire field record (both tag and value) and returns +// the field number, the wire type, and the total length. +// This returns a negative length upon an error (see ParseError). +// +// The total length includes the tag header and the end group marker (if the +// field is a group). +func ConsumeField(b []byte) (Number, Type, int) { + num, typ, n := ConsumeTag(b) + if n < 0 { + return 0, 0, n // forward error code + } + m := ConsumeFieldValue(num, typ, b[n:]) + if m < 0 { + return 0, 0, m // forward error code + } + return num, typ, n + m +} + +// ConsumeFieldValue parses a field value and returns its length. +// This assumes that the field Number and wire Type have already been parsed. +// This returns a negative length upon an error (see ParseError). +// +// When parsing a group, the length includes the end group marker and +// the end group is verified to match the starting field number. +func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { + return consumeFieldValueD(num, typ, b, DefaultRecursionLimit) +} + +func consumeFieldValueD(num Number, typ Type, b []byte, depth int) (n int) { + switch typ { + case VarintType: + _, n = ConsumeVarint(b) + return n + case Fixed32Type: + _, n = ConsumeFixed32(b) + return n + case Fixed64Type: + _, n = ConsumeFixed64(b) + return n + case BytesType: + _, n = ConsumeBytes(b) + return n + case StartGroupType: + if depth < 0 { + return errCodeRecursionDepth + } + n0 := len(b) + for { + num2, typ2, n := ConsumeTag(b) + if n < 0 { + return n // forward error code + } + b = b[n:] + if typ2 == EndGroupType { + if num != num2 { + return errCodeEndGroup + } + return n0 - len(b) + } + + n = consumeFieldValueD(num2, typ2, b, depth-1) + if n < 0 { + return n // forward error code + } + b = b[n:] + } + case EndGroupType: + return errCodeEndGroup + default: + return errCodeReserved + } +} + +// AppendTag encodes num and typ as a varint-encoded tag and appends it to b. +func AppendTag(b []byte, num Number, typ Type) []byte { + return AppendVarint(b, EncodeTag(num, typ)) +} + +// ConsumeTag parses b as a varint-encoded tag, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeTag(b []byte) (Number, Type, int) { + v, n := ConsumeVarint(b) + if n < 0 { + return 0, 0, n // forward error code + } + num, typ := DecodeTag(v) + if num < MinValidNumber { + return 0, 0, errCodeFieldNumber + } + return num, typ, n +} + +func SizeTag(num Number) int { + return SizeVarint(EncodeTag(num, 0)) // wire type has no effect on size +} + +// AppendVarint appends v to b as a varint-encoded uint64. +func AppendVarint(b []byte, v uint64) []byte { + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +// ConsumeVarint parses b as a varint-encoded uint64, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeVarint(b []byte) (v uint64, n int) { + var y uint64 + if len(b) <= 0 { + return 0, errCodeTruncated + } + v = uint64(b[0]) + if v < 0x80 { + return v, 1 + } + v -= 0x80 + + if len(b) <= 1 { + return 0, errCodeTruncated + } + y = uint64(b[1]) + v += y << 7 + if y < 0x80 { + return v, 2 + } + v -= 0x80 << 7 + + if len(b) <= 2 { + return 0, errCodeTruncated + } + y = uint64(b[2]) + v += y << 14 + if y < 0x80 { + return v, 3 + } + v -= 0x80 << 14 + + if len(b) <= 3 { + return 0, errCodeTruncated + } + y = uint64(b[3]) + v += y << 21 + if y < 0x80 { + return v, 4 + } + v -= 0x80 << 21 + + if len(b) <= 4 { + return 0, errCodeTruncated + } + y = uint64(b[4]) + v += y << 28 + if y < 0x80 { + return v, 5 + } + v -= 0x80 << 28 + + if len(b) <= 5 { + return 0, errCodeTruncated + } + y = uint64(b[5]) + v += y << 35 + if y < 0x80 { + return v, 6 + } + v -= 0x80 << 35 + + if len(b) <= 6 { + return 0, errCodeTruncated + } + y = uint64(b[6]) + v += y << 42 + if y < 0x80 { + return v, 7 + } + v -= 0x80 << 42 + + if len(b) <= 7 { + return 0, errCodeTruncated + } + y = uint64(b[7]) + v += y << 49 + if y < 0x80 { + return v, 8 + } + v -= 0x80 << 49 + + if len(b) <= 8 { + return 0, errCodeTruncated + } + y = uint64(b[8]) + v += y << 56 + if y < 0x80 { + return v, 9 + } + v -= 0x80 << 56 + + if len(b) <= 9 { + return 0, errCodeTruncated + } + y = uint64(b[9]) + v += y << 63 + if y < 2 { + return v, 10 + } + return 0, errCodeOverflow +} + +// SizeVarint returns the encoded size of a varint. +// The size is guaranteed to be within 1 and 10, inclusive. +func SizeVarint(v uint64) int { + // This computes 1 + (bits.Len64(v)-1)/7. + // 9/64 is a good enough approximation of 1/7 + return int(9*uint32(bits.Len64(v))+64) / 64 +} + +// AppendFixed32 appends v to b as a little-endian uint32. +func AppendFixed32(b []byte, v uint32) []byte { + return append(b, + byte(v>>0), + byte(v>>8), + byte(v>>16), + byte(v>>24)) +} + +// ConsumeFixed32 parses b as a little-endian uint32, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeFixed32(b []byte) (v uint32, n int) { + if len(b) < 4 { + return 0, errCodeTruncated + } + v = uint32(b[0])<<0 | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + return v, 4 +} + +// SizeFixed32 returns the encoded size of a fixed32; which is always 4. +func SizeFixed32() int { + return 4 +} + +// AppendFixed64 appends v to b as a little-endian uint64. +func AppendFixed64(b []byte, v uint64) []byte { + return append(b, + byte(v>>0), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) +} + +// ConsumeFixed64 parses b as a little-endian uint64, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeFixed64(b []byte) (v uint64, n int) { + if len(b) < 8 { + return 0, errCodeTruncated + } + v = uint64(b[0])<<0 | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + return v, 8 +} + +// SizeFixed64 returns the encoded size of a fixed64; which is always 8. +func SizeFixed64() int { + return 8 +} + +// AppendBytes appends v to b as a length-prefixed bytes value. +func AppendBytes(b []byte, v []byte) []byte { + return append(AppendVarint(b, uint64(len(v))), v...) +} + +// ConsumeBytes parses b as a length-prefixed bytes value, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeBytes(b []byte) (v []byte, n int) { + m, n := ConsumeVarint(b) + if n < 0 { + return nil, n // forward error code + } + if m > uint64(len(b[n:])) { + return nil, errCodeTruncated + } + return b[n:][:m], n + int(m) +} + +// SizeBytes returns the encoded size of a length-prefixed bytes value, +// given only the length. +func SizeBytes(n int) int { + return SizeVarint(uint64(n)) + n +} + +// AppendString appends v to b as a length-prefixed bytes value. +func AppendString(b []byte, v string) []byte { + return append(AppendVarint(b, uint64(len(v))), v...) +} + +// ConsumeString parses b as a length-prefixed bytes value, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeString(b []byte) (v string, n int) { + bb, n := ConsumeBytes(b) + return string(bb), n +} + +// AppendGroup appends v to b as group value, with a trailing end group marker. +// The value v must not contain the end marker. +func AppendGroup(b []byte, num Number, v []byte) []byte { + return AppendVarint(append(b, v...), EncodeTag(num, EndGroupType)) +} + +// ConsumeGroup parses b as a group value until the trailing end group marker, +// and verifies that the end marker matches the provided num. The value v +// does not contain the end marker, while the length does contain the end marker. +// This returns a negative length upon an error (see ParseError). +func ConsumeGroup(num Number, b []byte) (v []byte, n int) { + n = ConsumeFieldValue(num, StartGroupType, b) + if n < 0 { + return nil, n // forward error code + } + b = b[:n] + + // Truncate off end group marker, but need to handle denormalized varints. + // Assuming end marker is never 0 (which is always the case since + // EndGroupType is non-zero), we can truncate all trailing bytes where the + // lower 7 bits are all zero (implying that the varint is denormalized). + for len(b) > 0 && b[len(b)-1]&0x7f == 0 { + b = b[:len(b)-1] + } + b = b[:len(b)-SizeTag(num)] + return b, n +} + +// SizeGroup returns the encoded size of a group, given only the length. +func SizeGroup(num Number, n int) int { + return n + SizeTag(num) +} + +// DecodeTag decodes the field Number and wire Type from its unified form. +// The Number is -1 if the decoded field number overflows int32. +// Other than overflow, this does not check for field number validity. +func DecodeTag(x uint64) (Number, Type) { + // NOTE: MessageSet allows for larger field numbers than normal. + if x>>3 > uint64(math.MaxInt32) { + return -1, 0 + } + return Number(x >> 3), Type(x & 7) +} + +// EncodeTag encodes the field Number and wire Type into its unified form. +func EncodeTag(num Number, typ Type) uint64 { + return uint64(num)<<3 | uint64(typ&7) +} + +// DecodeZigZag decodes a zig-zag-encoded uint64 as an int64. +// Input: {…, 5, 3, 1, 0, 2, 4, 6, …} +// Output: {…, -3, -2, -1, 0, +1, +2, +3, …} +func DecodeZigZag(x uint64) int64 { + return int64(x>>1) ^ int64(x)<<63>>63 +} + +// EncodeZigZag encodes an int64 as a zig-zag-encoded uint64. +// Input: {…, -3, -2, -1, 0, +1, +2, +3, …} +// Output: {…, 5, 3, 1, 0, 2, 4, 6, …} +func EncodeZigZag(x int64) uint64 { + return uint64(x<<1) ^ uint64(x>>63) +} + +// DecodeBool decodes a uint64 as a bool. +// Input: { 0, 1, 2, …} +// Output: {false, true, true, …} +func DecodeBool(x uint64) bool { + return x != 0 +} + +// EncodeBool encodes a bool as a uint64. +// Input: {false, true} +// Output: { 0, 1} +func EncodeBool(x bool) uint64 { + if x { + return 1 + } + return 0 +} diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go new file mode 100644 index 000000000..360c63329 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -0,0 +1,318 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package descfmt provides functionality to format descriptors. +package descfmt + +import ( + "fmt" + "io" + "reflect" + "strconv" + "strings" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/pragma" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type list interface { + Len() int + pragma.DoNotImplement +} + +func FormatList(s fmt.State, r rune, vs list) { + io.WriteString(s, formatListOpt(vs, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) +} +func formatListOpt(vs list, isRoot, allowMulti bool) string { + start, end := "[", "]" + if isRoot { + var name string + switch vs.(type) { + case pref.Names: + name = "Names" + case pref.FieldNumbers: + name = "FieldNumbers" + case pref.FieldRanges: + name = "FieldRanges" + case pref.EnumRanges: + name = "EnumRanges" + case pref.FileImports: + name = "FileImports" + case pref.Descriptor: + name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s" + default: + name = reflect.ValueOf(vs).Elem().Type().Name() + } + start, end = name+"{", "}" + } + + var ss []string + switch vs := vs.(type) { + case pref.Names: + for i := 0; i < vs.Len(); i++ { + ss = append(ss, fmt.Sprint(vs.Get(i))) + } + return start + joinStrings(ss, false) + end + case pref.FieldNumbers: + for i := 0; i < vs.Len(); i++ { + ss = append(ss, fmt.Sprint(vs.Get(i))) + } + return start + joinStrings(ss, false) + end + case pref.FieldRanges: + for i := 0; i < vs.Len(); i++ { + r := vs.Get(i) + if r[0]+1 == r[1] { + ss = append(ss, fmt.Sprintf("%d", r[0])) + } else { + ss = append(ss, fmt.Sprintf("%d:%d", r[0], r[1])) // enum ranges are end exclusive + } + } + return start + joinStrings(ss, false) + end + case pref.EnumRanges: + for i := 0; i < vs.Len(); i++ { + r := vs.Get(i) + if r[0] == r[1] { + ss = append(ss, fmt.Sprintf("%d", r[0])) + } else { + ss = append(ss, fmt.Sprintf("%d:%d", r[0], int64(r[1])+1)) // enum ranges are end inclusive + } + } + return start + joinStrings(ss, false) + end + case pref.FileImports: + for i := 0; i < vs.Len(); i++ { + var rs records + rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak") + ss = append(ss, "{"+rs.Join()+"}") + } + return start + joinStrings(ss, allowMulti) + end + default: + _, isEnumValue := vs.(pref.EnumValueDescriptors) + for i := 0; i < vs.Len(); i++ { + m := reflect.ValueOf(vs).MethodByName("Get") + v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface() + ss = append(ss, formatDescOpt(v.(pref.Descriptor), false, allowMulti && !isEnumValue)) + } + return start + joinStrings(ss, allowMulti && isEnumValue) + end + } +} + +// descriptorAccessors is a list of accessors to print for each descriptor. +// +// Do not print all accessors since some contain redundant information, +// while others are pointers that we do not want to follow since the descriptor +// is actually a cyclic graph. +// +// Using a list allows us to print the accessors in a sensible order. +var descriptorAccessors = map[reflect.Type][]string{ + reflect.TypeOf((*pref.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, + reflect.TypeOf((*pref.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, + reflect.TypeOf((*pref.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, + reflect.TypeOf((*pref.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt + reflect.TypeOf((*pref.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, + reflect.TypeOf((*pref.EnumValueDescriptor)(nil)).Elem(): {"Number"}, + reflect.TypeOf((*pref.ServiceDescriptor)(nil)).Elem(): {"Methods"}, + reflect.TypeOf((*pref.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, +} + +func FormatDesc(s fmt.State, r rune, t pref.Descriptor) { + io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) +} +func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { + rv := reflect.ValueOf(t) + rt := rv.MethodByName("ProtoType").Type().In(0) + + start, end := "{", "}" + if isRoot { + start = rt.Name() + "{" + } + + _, isFile := t.(pref.FileDescriptor) + rs := records{allowMulti: allowMulti} + if t.IsPlaceholder() { + if isFile { + rs.Append(rv, "Path", "Package", "IsPlaceholder") + } else { + rs.Append(rv, "FullName", "IsPlaceholder") + } + } else { + switch { + case isFile: + rs.Append(rv, "Syntax") + case isRoot: + rs.Append(rv, "Syntax", "FullName") + default: + rs.Append(rv, "Name") + } + switch t := t.(type) { + case pref.FieldDescriptor: + for _, s := range descriptorAccessors[rt] { + switch s { + case "MapKey": + if k := t.MapKey(); k != nil { + rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()}) + } + case "MapValue": + if v := t.MapValue(); v != nil { + switch v.Kind() { + case pref.EnumKind: + rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())}) + case pref.MessageKind, pref.GroupKind: + rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())}) + default: + rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()}) + } + } + case "ContainingOneof": + if od := t.ContainingOneof(); od != nil { + rs.recs = append(rs.recs, [2]string{"Oneof", string(od.Name())}) + } + case "ContainingMessage": + if t.IsExtension() { + rs.recs = append(rs.recs, [2]string{"Extendee", string(t.ContainingMessage().FullName())}) + } + case "Message": + if !t.IsMap() { + rs.Append(rv, s) + } + default: + rs.Append(rv, s) + } + } + case pref.OneofDescriptor: + var ss []string + fs := t.Fields() + for i := 0; i < fs.Len(); i++ { + ss = append(ss, string(fs.Get(i).Name())) + } + if len(ss) > 0 { + rs.recs = append(rs.recs, [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) + } + default: + rs.Append(rv, descriptorAccessors[rt]...) + } + if rv.MethodByName("GoType").IsValid() { + rs.Append(rv, "GoType") + } + } + return start + rs.Join() + end +} + +type records struct { + recs [][2]string + allowMulti bool +} + +func (rs *records) Append(v reflect.Value, accessors ...string) { + for _, a := range accessors { + var rv reflect.Value + if m := v.MethodByName(a); m.IsValid() { + rv = m.Call(nil)[0] + } + if v.Kind() == reflect.Struct && !rv.IsValid() { + rv = v.FieldByName(a) + } + if !rv.IsValid() { + panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a)) + } + if _, ok := rv.Interface().(pref.Value); ok { + rv = rv.MethodByName("Interface").Call(nil)[0] + if !rv.IsNil() { + rv = rv.Elem() + } + } + + // Ignore zero values. + var isZero bool + switch rv.Kind() { + case reflect.Interface, reflect.Slice: + isZero = rv.IsNil() + case reflect.Bool: + isZero = rv.Bool() == false + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + isZero = rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + isZero = rv.Uint() == 0 + case reflect.String: + isZero = rv.String() == "" + } + if n, ok := rv.Interface().(list); ok { + isZero = n.Len() == 0 + } + if isZero { + continue + } + + // Format the value. + var s string + v := rv.Interface() + switch v := v.(type) { + case list: + s = formatListOpt(v, false, rs.allowMulti) + case pref.FieldDescriptor, pref.OneofDescriptor, pref.EnumValueDescriptor, pref.MethodDescriptor: + s = string(v.(pref.Descriptor).Name()) + case pref.Descriptor: + s = string(v.FullName()) + case string: + s = strconv.Quote(v) + case []byte: + s = fmt.Sprintf("%q", v) + default: + s = fmt.Sprint(v) + } + rs.recs = append(rs.recs, [2]string{a, s}) + } +} + +func (rs *records) Join() string { + var ss []string + + // In single line mode, simply join all records with commas. + if !rs.allowMulti { + for _, r := range rs.recs { + ss = append(ss, r[0]+formatColon(0)+r[1]) + } + return joinStrings(ss, false) + } + + // In allowMulti line mode, align single line records for more readable output. + var maxLen int + flush := func(i int) { + for _, r := range rs.recs[len(ss):i] { + ss = append(ss, r[0]+formatColon(maxLen-len(r[0]))+r[1]) + } + maxLen = 0 + } + for i, r := range rs.recs { + if isMulti := strings.Contains(r[1], "\n"); isMulti { + flush(i) + ss = append(ss, r[0]+formatColon(0)+strings.Join(strings.Split(r[1], "\n"), "\n\t")) + } else if maxLen < len(r[0]) { + maxLen = len(r[0]) + } + } + flush(len(rs.recs)) + return joinStrings(ss, true) +} + +func formatColon(padding int) string { + // Deliberately introduce instability into the debug output to + // discourage users from performing string comparisons. + // This provides us flexibility to change the output in the future. + if detrand.Bool() { + return ":" + strings.Repeat(" ", 1+padding) // use non-breaking spaces (U+00a0) + } else { + return ":" + strings.Repeat(" ", 1+padding) // use regular spaces (U+0020) + } +} + +func joinStrings(ss []string, isMulti bool) string { + if len(ss) == 0 { + return "" + } + if isMulti { + return "\n\t" + strings.Join(ss, "\n\t") + "\n" + } + return strings.Join(ss, ", ") +} diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go new file mode 100644 index 000000000..8401be8c8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package descopts contains the nil pointers to concrete descriptor options. +// +// This package exists as a form of reverse dependency injection so that certain +// packages (e.g., internal/filedesc and internal/filetype can avoid a direct +// dependency on the descriptor proto package). +package descopts + +import pref "google.golang.org/protobuf/reflect/protoreflect" + +// These variables are set by the init function in descriptor.pb.go via logic +// in internal/filetype. In other words, so long as the descriptor proto package +// is linked in, these variables will be populated. +// +// Each variable is populated with a nil pointer to the options struct. +var ( + File pref.ProtoMessage + Enum pref.ProtoMessage + EnumValue pref.ProtoMessage + Message pref.ProtoMessage + Field pref.ProtoMessage + Oneof pref.ProtoMessage + ExtensionRange pref.ProtoMessage + Service pref.ProtoMessage + Method pref.ProtoMessage +) diff --git a/vendor/google.golang.org/protobuf/internal/detrand/rand.go b/vendor/google.golang.org/protobuf/internal/detrand/rand.go new file mode 100644 index 000000000..49c8676d4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/detrand/rand.go @@ -0,0 +1,69 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package detrand provides deterministically random functionality. +// +// The pseudo-randomness of these functions is seeded by the program binary +// itself and guarantees that the output does not change within a program, +// while ensuring that the output is unstable across different builds. +package detrand + +import ( + "encoding/binary" + "hash/fnv" + "os" +) + +// Disable disables detrand such that all functions returns the zero value. +// This function is not concurrent-safe and must be called during program init. +func Disable() { + randSeed = 0 +} + +// Bool returns a deterministically random boolean. +func Bool() bool { + return randSeed%2 == 1 +} + +// Intn returns a deterministically random integer between 0 and n-1, inclusive. +func Intn(n int) int { + if n <= 0 { + panic("must be positive") + } + return int(randSeed % uint64(n)) +} + +// randSeed is a best-effort at an approximate hash of the Go binary. +var randSeed = binaryHash() + +func binaryHash() uint64 { + // Open the Go binary. + s, err := os.Executable() + if err != nil { + return 0 + } + f, err := os.Open(s) + if err != nil { + return 0 + } + defer f.Close() + + // Hash the size and several samples of the Go binary. + const numSamples = 8 + var buf [64]byte + h := fnv.New64() + fi, err := f.Stat() + if err != nil { + return 0 + } + binary.LittleEndian.PutUint64(buf[:8], uint64(fi.Size())) + h.Write(buf[:8]) + for i := int64(0); i < numSamples; i++ { + if _, err := f.ReadAt(buf[:], i*fi.Size()/numSamples); err != nil { + return 0 + } + h.Write(buf[:]) + } + return h.Sum64() +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go new file mode 100644 index 000000000..fdd9b13f2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go @@ -0,0 +1,213 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package defval marshals and unmarshals textual forms of default values. +// +// This package handles both the form historically used in Go struct field tags +// and also the form used by google.protobuf.FieldDescriptorProto.default_value +// since they differ in superficial ways. +package defval + +import ( + "fmt" + "math" + "strconv" + + ptext "google.golang.org/protobuf/internal/encoding/text" + errors "google.golang.org/protobuf/internal/errors" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// Format is the serialization format used to represent the default value. +type Format int + +const ( + _ Format = iota + + // Descriptor uses the serialization format that protoc uses with the + // google.protobuf.FieldDescriptorProto.default_value field. + Descriptor + + // GoTag uses the historical serialization format in Go struct field tags. + GoTag +) + +// Unmarshal deserializes the default string s according to the given kind k. +// When k is an enum, a list of enum value descriptors must be provided. +func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) (pref.Value, pref.EnumValueDescriptor, error) { + switch k { + case pref.BoolKind: + if f == GoTag { + switch s { + case "1": + return pref.ValueOfBool(true), nil, nil + case "0": + return pref.ValueOfBool(false), nil, nil + } + } else { + switch s { + case "true": + return pref.ValueOfBool(true), nil, nil + case "false": + return pref.ValueOfBool(false), nil, nil + } + } + case pref.EnumKind: + if f == GoTag { + // Go tags use the numeric form of the enum value. + if n, err := strconv.ParseInt(s, 10, 32); err == nil { + if ev := evs.ByNumber(pref.EnumNumber(n)); ev != nil { + return pref.ValueOfEnum(ev.Number()), ev, nil + } + } + } else { + // Descriptor default_value use the enum identifier. + ev := evs.ByName(pref.Name(s)) + if ev != nil { + return pref.ValueOfEnum(ev.Number()), ev, nil + } + } + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + if v, err := strconv.ParseInt(s, 10, 32); err == nil { + return pref.ValueOfInt32(int32(v)), nil, nil + } + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + if v, err := strconv.ParseInt(s, 10, 64); err == nil { + return pref.ValueOfInt64(int64(v)), nil, nil + } + case pref.Uint32Kind, pref.Fixed32Kind: + if v, err := strconv.ParseUint(s, 10, 32); err == nil { + return pref.ValueOfUint32(uint32(v)), nil, nil + } + case pref.Uint64Kind, pref.Fixed64Kind: + if v, err := strconv.ParseUint(s, 10, 64); err == nil { + return pref.ValueOfUint64(uint64(v)), nil, nil + } + case pref.FloatKind, pref.DoubleKind: + var v float64 + var err error + switch s { + case "-inf": + v = math.Inf(-1) + case "inf": + v = math.Inf(+1) + case "nan": + v = math.NaN() + default: + v, err = strconv.ParseFloat(s, 64) + } + if err == nil { + if k == pref.FloatKind { + return pref.ValueOfFloat32(float32(v)), nil, nil + } else { + return pref.ValueOfFloat64(float64(v)), nil, nil + } + } + case pref.StringKind: + // String values are already unescaped and can be used as is. + return pref.ValueOfString(s), nil, nil + case pref.BytesKind: + if b, ok := unmarshalBytes(s); ok { + return pref.ValueOfBytes(b), nil, nil + } + } + return pref.Value{}, nil, errors.New("could not parse value for %v: %q", k, s) +} + +// Marshal serializes v as the default string according to the given kind k. +// When specifying the Descriptor format for an enum kind, the associated +// enum value descriptor must be provided. +func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) (string, error) { + switch k { + case pref.BoolKind: + if f == GoTag { + if v.Bool() { + return "1", nil + } else { + return "0", nil + } + } else { + if v.Bool() { + return "true", nil + } else { + return "false", nil + } + } + case pref.EnumKind: + if f == GoTag { + return strconv.FormatInt(int64(v.Enum()), 10), nil + } else { + return string(ev.Name()), nil + } + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind, pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + return strconv.FormatInt(v.Int(), 10), nil + case pref.Uint32Kind, pref.Fixed32Kind, pref.Uint64Kind, pref.Fixed64Kind: + return strconv.FormatUint(v.Uint(), 10), nil + case pref.FloatKind, pref.DoubleKind: + f := v.Float() + switch { + case math.IsInf(f, -1): + return "-inf", nil + case math.IsInf(f, +1): + return "inf", nil + case math.IsNaN(f): + return "nan", nil + default: + if k == pref.FloatKind { + return strconv.FormatFloat(f, 'g', -1, 32), nil + } else { + return strconv.FormatFloat(f, 'g', -1, 64), nil + } + } + case pref.StringKind: + // String values are serialized as is without any escaping. + return v.String(), nil + case pref.BytesKind: + if s, ok := marshalBytes(v.Bytes()); ok { + return s, nil + } + } + return "", errors.New("could not format value for %v: %v", k, v) +} + +// unmarshalBytes deserializes bytes by applying C unescaping. +func unmarshalBytes(s string) ([]byte, bool) { + // Bytes values use the same escaping as the text format, + // however they lack the surrounding double quotes. + v, err := ptext.UnmarshalString(`"` + s + `"`) + if err != nil { + return nil, false + } + return []byte(v), true +} + +// marshalBytes serializes bytes by using C escaping. +// To match the exact output of protoc, this is identical to the +// CEscape function in strutil.cc of the protoc source code. +func marshalBytes(b []byte) (string, bool) { + var s []byte + for _, c := range b { + switch c { + case '\n': + s = append(s, `\n`...) + case '\r': + s = append(s, `\r`...) + case '\t': + s = append(s, `\t`...) + case '"': + s = append(s, `\"`...) + case '\'': + s = append(s, `\'`...) + case '\\': + s = append(s, `\\`...) + default: + if printableASCII := c >= 0x20 && c <= 0x7e; printableASCII { + s = append(s, c) + } else { + s = append(s, fmt.Sprintf(`\%03o`, c)...) + } + } + } + return string(s), true +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go new file mode 100644 index 000000000..c1866f3c1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go @@ -0,0 +1,241 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package messageset encodes and decodes the obsolete MessageSet wire format. +package messageset + +import ( + "math" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// The MessageSet wire format is equivalent to a message defined as follows, +// where each Item defines an extension field with a field number of 'type_id' +// and content of 'message'. MessageSet extensions must be non-repeated message +// fields. +// +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// } +// } +const ( + FieldItem = protowire.Number(1) + FieldTypeID = protowire.Number(2) + FieldMessage = protowire.Number(3) +) + +// ExtensionName is the field name for extensions of MessageSet. +// +// A valid MessageSet extension must be of the form: +// message MyMessage { +// extend proto2.bridge.MessageSet { +// optional MyMessage message_set_extension = 1234; +// } +// ... +// } +const ExtensionName = "message_set_extension" + +// IsMessageSet returns whether the message uses the MessageSet wire format. +func IsMessageSet(md pref.MessageDescriptor) bool { + xmd, ok := md.(interface{ IsMessageSet() bool }) + return ok && xmd.IsMessageSet() +} + +// IsMessageSetExtension reports this field properly extends a MessageSet. +func IsMessageSetExtension(fd pref.FieldDescriptor) bool { + switch { + case fd.Name() != ExtensionName: + return false + case !IsMessageSet(fd.ContainingMessage()): + return false + case fd.FullName().Parent() != fd.Message().FullName(): + return false + } + return true +} + +// SizeField returns the size of a MessageSet item field containing an extension +// with the given field number, not counting the contents of the message subfield. +func SizeField(num protowire.Number) int { + return 2*protowire.SizeTag(FieldItem) + protowire.SizeTag(FieldTypeID) + protowire.SizeVarint(uint64(num)) +} + +// Unmarshal parses a MessageSet. +// +// It calls fn with the type ID and value of each item in the MessageSet. +// Unknown fields are discarded. +// +// If wantLen is true, the item values include the varint length prefix. +// This is ugly, but simplifies the fast-path decoder in internal/impl. +func Unmarshal(b []byte, wantLen bool, fn func(typeID protowire.Number, value []byte) error) error { + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return protowire.ParseError(n) + } + b = b[n:] + if num != FieldItem || wtyp != protowire.StartGroupType { + n := protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return protowire.ParseError(n) + } + b = b[n:] + continue + } + typeID, value, n, err := ConsumeFieldValue(b, wantLen) + if err != nil { + return err + } + b = b[n:] + if typeID == 0 { + continue + } + if err := fn(typeID, value); err != nil { + return err + } + } + return nil +} + +// ConsumeFieldValue parses b as a MessageSet item field value until and including +// the trailing end group marker. It assumes the start group tag has already been parsed. +// It returns the contents of the type_id and message subfields and the total +// item length. +// +// If wantLen is true, the returned message value includes the length prefix. +func ConsumeFieldValue(b []byte, wantLen bool) (typeid protowire.Number, message []byte, n int, err error) { + ilen := len(b) + for { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return 0, nil, 0, protowire.ParseError(n) + } + b = b[n:] + switch { + case num == FieldItem && wtyp == protowire.EndGroupType: + if wantLen && len(message) == 0 { + // The message field was missing, which should never happen. + // Be prepared for this case anyway. + message = protowire.AppendVarint(message, 0) + } + return typeid, message, ilen - len(b), nil + case num == FieldTypeID && wtyp == protowire.VarintType: + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, nil, 0, protowire.ParseError(n) + } + b = b[n:] + if v < 1 || v > math.MaxInt32 { + return 0, nil, 0, errors.New("invalid type_id in message set") + } + typeid = protowire.Number(v) + case num == FieldMessage && wtyp == protowire.BytesType: + m, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, nil, 0, protowire.ParseError(n) + } + if message == nil { + if wantLen { + message = b[:n:n] + } else { + message = m[:len(m):len(m)] + } + } else { + // This case should never happen in practice, but handle it for + // correctness: The MessageSet item contains multiple message + // fields, which need to be merged. + // + // In the case where we're returning the length, this becomes + // quite inefficient since we need to strip the length off + // the existing data and reconstruct it with the combined length. + if wantLen { + _, nn := protowire.ConsumeVarint(message) + m0 := message[nn:] + message = nil + message = protowire.AppendVarint(message, uint64(len(m0)+len(m))) + message = append(message, m0...) + message = append(message, m...) + } else { + message = append(message, m...) + } + } + b = b[n:] + default: + // We have no place to put it, so we just ignore unknown fields. + n := protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return 0, nil, 0, protowire.ParseError(n) + } + b = b[n:] + } + } +} + +// AppendFieldStart appends the start of a MessageSet item field containing +// an extension with the given number. The caller must add the message +// subfield (including the tag). +func AppendFieldStart(b []byte, num protowire.Number) []byte { + b = protowire.AppendTag(b, FieldItem, protowire.StartGroupType) + b = protowire.AppendTag(b, FieldTypeID, protowire.VarintType) + b = protowire.AppendVarint(b, uint64(num)) + return b +} + +// AppendFieldEnd appends the trailing end group marker for a MessageSet item field. +func AppendFieldEnd(b []byte) []byte { + return protowire.AppendTag(b, FieldItem, protowire.EndGroupType) +} + +// SizeUnknown returns the size of an unknown fields section in MessageSet format. +// +// See AppendUnknown. +func SizeUnknown(unknown []byte) (size int) { + for len(unknown) > 0 { + num, typ, n := protowire.ConsumeTag(unknown) + if n < 0 || typ != protowire.BytesType { + return 0 + } + unknown = unknown[n:] + _, n = protowire.ConsumeBytes(unknown) + if n < 0 { + return 0 + } + unknown = unknown[n:] + size += SizeField(num) + protowire.SizeTag(FieldMessage) + n + } + return size +} + +// AppendUnknown appends unknown fields to b in MessageSet format. +// +// For historic reasons, unresolved items in a MessageSet are stored in a +// message's unknown fields section in non-MessageSet format. That is, an +// unknown item with typeID T and value V appears in the unknown fields as +// a field with number T and value V. +// +// This function converts the unknown fields back into MessageSet form. +func AppendUnknown(b, unknown []byte) ([]byte, error) { + for len(unknown) > 0 { + num, typ, n := protowire.ConsumeTag(unknown) + if n < 0 || typ != protowire.BytesType { + return nil, errors.New("invalid data in message set unknown fields") + } + unknown = unknown[n:] + _, n = protowire.ConsumeBytes(unknown) + if n < 0 { + return nil, errors.New("invalid data in message set unknown fields") + } + b = AppendFieldStart(b, num) + b = protowire.AppendTag(b, FieldMessage, protowire.BytesType) + b = append(b, unknown[:n]...) + b = AppendFieldEnd(b) + unknown = unknown[n:] + } + return b, nil +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go new file mode 100644 index 000000000..38f1931c6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -0,0 +1,207 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tag marshals and unmarshals the legacy struct tags as generated +// by historical versions of protoc-gen-go. +package tag + +import ( + "reflect" + "strconv" + "strings" + + defval "google.golang.org/protobuf/internal/encoding/defval" + fdesc "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +var byteType = reflect.TypeOf(byte(0)) + +// Unmarshal decodes the tag into a prototype.Field. +// +// The goType is needed to determine the original protoreflect.Kind since the +// tag does not record sufficient information to determine that. +// The type is the underlying field type (e.g., a repeated field may be +// represented by []T, but the Go type passed in is just T). +// A list of enum value descriptors must be provided for enum fields. +// This does not populate the Enum or Message (except for weak message). +// +// This function is a best effort attempt; parsing errors are ignored. +func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) pref.FieldDescriptor { + f := new(fdesc.Field) + f.L0.ParentFile = fdesc.SurrogateProto2 + for len(tag) > 0 { + i := strings.IndexByte(tag, ',') + if i < 0 { + i = len(tag) + } + switch s := tag[:i]; { + case strings.HasPrefix(s, "name="): + f.L0.FullName = pref.FullName(s[len("name="):]) + case strings.Trim(s, "0123456789") == "": + n, _ := strconv.ParseUint(s, 10, 32) + f.L1.Number = pref.FieldNumber(n) + case s == "opt": + f.L1.Cardinality = pref.Optional + case s == "req": + f.L1.Cardinality = pref.Required + case s == "rep": + f.L1.Cardinality = pref.Repeated + case s == "varint": + switch goType.Kind() { + case reflect.Bool: + f.L1.Kind = pref.BoolKind + case reflect.Int32: + f.L1.Kind = pref.Int32Kind + case reflect.Int64: + f.L1.Kind = pref.Int64Kind + case reflect.Uint32: + f.L1.Kind = pref.Uint32Kind + case reflect.Uint64: + f.L1.Kind = pref.Uint64Kind + } + case s == "zigzag32": + if goType.Kind() == reflect.Int32 { + f.L1.Kind = pref.Sint32Kind + } + case s == "zigzag64": + if goType.Kind() == reflect.Int64 { + f.L1.Kind = pref.Sint64Kind + } + case s == "fixed32": + switch goType.Kind() { + case reflect.Int32: + f.L1.Kind = pref.Sfixed32Kind + case reflect.Uint32: + f.L1.Kind = pref.Fixed32Kind + case reflect.Float32: + f.L1.Kind = pref.FloatKind + } + case s == "fixed64": + switch goType.Kind() { + case reflect.Int64: + f.L1.Kind = pref.Sfixed64Kind + case reflect.Uint64: + f.L1.Kind = pref.Fixed64Kind + case reflect.Float64: + f.L1.Kind = pref.DoubleKind + } + case s == "bytes": + switch { + case goType.Kind() == reflect.String: + f.L1.Kind = pref.StringKind + case goType.Kind() == reflect.Slice && goType.Elem() == byteType: + f.L1.Kind = pref.BytesKind + default: + f.L1.Kind = pref.MessageKind + } + case s == "group": + f.L1.Kind = pref.GroupKind + case strings.HasPrefix(s, "enum="): + f.L1.Kind = pref.EnumKind + case strings.HasPrefix(s, "json="): + jsonName := s[len("json="):] + if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) { + f.L1.StringName.InitJSON(jsonName) + } + case s == "packed": + f.L1.HasPacked = true + f.L1.IsPacked = true + case strings.HasPrefix(s, "weak="): + f.L1.IsWeak = true + f.L1.Message = fdesc.PlaceholderMessage(pref.FullName(s[len("weak="):])) + case strings.HasPrefix(s, "def="): + // The default tag is special in that everything afterwards is the + // default regardless of the presence of commas. + s, i = tag[len("def="):], len(tag) + v, ev, _ := defval.Unmarshal(s, f.L1.Kind, evs, defval.GoTag) + f.L1.Default = fdesc.DefaultValue(v, ev) + case s == "proto3": + f.L0.ParentFile = fdesc.SurrogateProto3 + } + tag = strings.TrimPrefix(tag[i:], ",") + } + + // The generator uses the group message name instead of the field name. + // We obtain the real field name by lowercasing the group name. + if f.L1.Kind == pref.GroupKind { + f.L0.FullName = pref.FullName(strings.ToLower(string(f.L0.FullName))) + } + return f +} + +// Marshal encodes the protoreflect.FieldDescriptor as a tag. +// +// The enumName must be provided if the kind is an enum. +// Historically, the formulation of the enum "name" was the proto package +// dot-concatenated with the generated Go identifier for the enum type. +// Depending on the context on how Marshal is called, there are different ways +// through which that information is determined. As such it is the caller's +// responsibility to provide a function to obtain that information. +func Marshal(fd pref.FieldDescriptor, enumName string) string { + var tag []string + switch fd.Kind() { + case pref.BoolKind, pref.EnumKind, pref.Int32Kind, pref.Uint32Kind, pref.Int64Kind, pref.Uint64Kind: + tag = append(tag, "varint") + case pref.Sint32Kind: + tag = append(tag, "zigzag32") + case pref.Sint64Kind: + tag = append(tag, "zigzag64") + case pref.Sfixed32Kind, pref.Fixed32Kind, pref.FloatKind: + tag = append(tag, "fixed32") + case pref.Sfixed64Kind, pref.Fixed64Kind, pref.DoubleKind: + tag = append(tag, "fixed64") + case pref.StringKind, pref.BytesKind, pref.MessageKind: + tag = append(tag, "bytes") + case pref.GroupKind: + tag = append(tag, "group") + } + tag = append(tag, strconv.Itoa(int(fd.Number()))) + switch fd.Cardinality() { + case pref.Optional: + tag = append(tag, "opt") + case pref.Required: + tag = append(tag, "req") + case pref.Repeated: + tag = append(tag, "rep") + } + if fd.IsPacked() { + tag = append(tag, "packed") + } + name := string(fd.Name()) + if fd.Kind() == pref.GroupKind { + // The name of the FieldDescriptor for a group field is + // lowercased. To find the original capitalization, we + // look in the field's MessageType. + name = string(fd.Message().Name()) + } + tag = append(tag, "name="+name) + if jsonName := fd.JSONName(); jsonName != "" && jsonName != name && !fd.IsExtension() { + // NOTE: The jsonName != name condition is suspect, but it preserve + // the exact same semantics from the previous generator. + tag = append(tag, "json="+jsonName) + } + if fd.IsWeak() { + tag = append(tag, "weak="+string(fd.Message().FullName())) + } + // The previous implementation does not tag extension fields as proto3, + // even when the field is defined in a proto3 file. Match that behavior + // for consistency. + if fd.Syntax() == pref.Proto3 && !fd.IsExtension() { + tag = append(tag, "proto3") + } + if fd.Kind() == pref.EnumKind && enumName != "" { + tag = append(tag, "enum="+enumName) + } + if fd.ContainingOneof() != nil { + tag = append(tag, "oneof") + } + // This must appear last in the tag, since commas in strings aren't escaped. + if fd.HasDefault() { + def, _ := defval.Marshal(fd.Default(), fd.DefaultEnumValue(), fd.Kind(), defval.GoTag) + tag = append(tag, "def="+def) + } + return strings.Join(tag, ",") +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go new file mode 100644 index 000000000..37803773f --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -0,0 +1,665 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +import ( + "bytes" + "fmt" + "io" + "regexp" + "strconv" + "unicode/utf8" + + "google.golang.org/protobuf/internal/errors" +) + +// Decoder is a token-based textproto decoder. +type Decoder struct { + // lastCall is last method called, either readCall or peekCall. + // Initial value is readCall. + lastCall call + + // lastToken contains the last read token. + lastToken Token + + // lastErr contains the last read error. + lastErr error + + // openStack is a stack containing the byte characters for MessageOpen and + // ListOpen kinds. The top of stack represents the message or the list that + // the current token is nested in. An empty stack means the current token is + // at the top level message. The characters '{' and '<' both represent the + // MessageOpen kind. + openStack []byte + + // orig is used in reporting line and column. + orig []byte + // in contains the unconsumed input. + in []byte +} + +// NewDecoder returns a Decoder to read the given []byte. +func NewDecoder(b []byte) *Decoder { + return &Decoder{orig: b, in: b} +} + +// ErrUnexpectedEOF means that EOF was encountered in the middle of the input. +var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF) + +// call specifies which Decoder method was invoked. +type call uint8 + +const ( + readCall call = iota + peekCall +) + +// Peek looks ahead and returns the next token and error without advancing a read. +func (d *Decoder) Peek() (Token, error) { + defer func() { d.lastCall = peekCall }() + if d.lastCall == readCall { + d.lastToken, d.lastErr = d.Read() + } + return d.lastToken, d.lastErr +} + +// Read returns the next token. +// It will return an error if there is no valid token. +func (d *Decoder) Read() (Token, error) { + defer func() { d.lastCall = readCall }() + if d.lastCall == peekCall { + return d.lastToken, d.lastErr + } + + tok, err := d.parseNext(d.lastToken.kind) + if err != nil { + return Token{}, err + } + + switch tok.kind { + case comma, semicolon: + tok, err = d.parseNext(tok.kind) + if err != nil { + return Token{}, err + } + } + d.lastToken = tok + return tok, nil +} + +const ( + mismatchedFmt = "mismatched close character %q" + unexpectedFmt = "unexpected character %q" +) + +// parseNext parses the next Token based on given last kind. +func (d *Decoder) parseNext(lastKind Kind) (Token, error) { + // Trim leading spaces. + d.consume(0) + isEOF := false + if len(d.in) == 0 { + isEOF = true + } + + switch lastKind { + case EOF: + return d.consumeToken(EOF, 0, 0), nil + + case bof: + // Start of top level message. Next token can be EOF or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + return d.parseFieldName() + + case Name: + // Next token can be MessageOpen, ListOpen or Scalar. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case '{', '<': + d.pushOpenStack(ch) + return d.consumeToken(MessageOpen, 1, 0), nil + case '[': + d.pushOpenStack(ch) + return d.consumeToken(ListOpen, 1, 0), nil + default: + return d.parseScalar() + } + + case Scalar: + openKind, closeCh := d.currentOpenKind() + switch openKind { + case bof: + // Top level message. + // Next token can be EOF, comma, semicolon or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + switch d.in[0] { + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case MessageOpen: + // Next token can be MessageClose, comma, semicolon or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case ListOpen: + // Next token can be ListClose or comma. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case ']': + d.popOpenStack() + return d.consumeToken(ListClose, 1, 0), nil + case ',': + return d.consumeToken(comma, 1, 0), nil + default: + return Token{}, d.newSyntaxError(unexpectedFmt, ch) + } + } + + case MessageOpen: + // Next token can be MessageClose or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + _, closeCh := d.currentOpenKind() + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + default: + return d.parseFieldName() + } + + case MessageClose: + openKind, closeCh := d.currentOpenKind() + switch openKind { + case bof: + // Top level message. + // Next token can be EOF, comma, semicolon or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + switch ch := d.in[0]; ch { + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case MessageOpen: + // Next token can be MessageClose, comma, semicolon or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case ListOpen: + // Next token can be ListClose or comma + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(ListClose, 1, 0), nil + case ',': + return d.consumeToken(comma, 1, 0), nil + default: + return Token{}, d.newSyntaxError(unexpectedFmt, ch) + } + } + + case ListOpen: + // Next token can be ListClose, MessageStart or Scalar. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case ']': + d.popOpenStack() + return d.consumeToken(ListClose, 1, 0), nil + case '{', '<': + d.pushOpenStack(ch) + return d.consumeToken(MessageOpen, 1, 0), nil + default: + return d.parseScalar() + } + + case ListClose: + openKind, closeCh := d.currentOpenKind() + switch openKind { + case bof: + // Top level message. + // Next token can be EOF, comma, semicolon or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + switch ch := d.in[0]; ch { + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case MessageOpen: + // Next token can be MessageClose, comma, semicolon or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + default: + // It is not possible to have this case. Let it panic below. + } + + case comma, semicolon: + openKind, closeCh := d.currentOpenKind() + switch openKind { + case bof: + // Top level message. Next token can be EOF or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + return d.parseFieldName() + + case MessageOpen: + // Next token can be MessageClose or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + default: + return d.parseFieldName() + } + + case ListOpen: + if lastKind == semicolon { + // It is not be possible to have this case as logic here + // should not have produced a semicolon Token when inside a + // list. Let it panic below. + break + } + // Next token can be MessageOpen or Scalar. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case '{', '<': + d.pushOpenStack(ch) + return d.consumeToken(MessageOpen, 1, 0), nil + default: + return d.parseScalar() + } + } + } + + line, column := d.Position(len(d.orig) - len(d.in)) + panic(fmt.Sprintf("Decoder.parseNext: bug at handling line %d:%d with lastKind=%v", line, column, lastKind)) +} + +var otherCloseChar = map[byte]byte{ + '}': '>', + '>': '}', +} + +// currentOpenKind indicates whether current position is inside a message, list +// or top-level message by returning MessageOpen, ListOpen or bof respectively. +// If the returned kind is either a MessageOpen or ListOpen, it also returns the +// corresponding closing character. +func (d *Decoder) currentOpenKind() (Kind, byte) { + if len(d.openStack) == 0 { + return bof, 0 + } + openCh := d.openStack[len(d.openStack)-1] + switch openCh { + case '{': + return MessageOpen, '}' + case '<': + return MessageOpen, '>' + case '[': + return ListOpen, ']' + } + panic(fmt.Sprintf("Decoder: openStack contains invalid byte %c", openCh)) +} + +func (d *Decoder) pushOpenStack(ch byte) { + d.openStack = append(d.openStack, ch) +} + +func (d *Decoder) popOpenStack() { + d.openStack = d.openStack[:len(d.openStack)-1] +} + +// parseFieldName parses field name and separator. +func (d *Decoder) parseFieldName() (tok Token, err error) { + defer func() { + if err == nil && d.tryConsumeChar(':') { + tok.attrs |= hasSeparator + } + }() + + // Extension or Any type URL. + if d.in[0] == '[' { + return d.parseTypeName() + } + + // Identifier. + if size := parseIdent(d.in, false); size > 0 { + return d.consumeToken(Name, size, uint8(IdentName)), nil + } + + // Field number. Identify if input is a valid number that is not negative + // and is decimal integer within 32-bit range. + if num := parseNumber(d.in); num.size > 0 { + if !num.neg && num.kind == numDec { + if _, err := strconv.ParseInt(string(d.in[:num.size]), 10, 32); err == nil { + return d.consumeToken(Name, num.size, uint8(FieldNumber)), nil + } + } + return Token{}, d.newSyntaxError("invalid field number: %s", d.in[:num.size]) + } + + return Token{}, d.newSyntaxError("invalid field name: %s", errRegexp.Find(d.in)) +} + +// parseTypeName parses Any type URL or extension field name. The name is +// enclosed in [ and ] characters. The C++ parser does not handle many legal URL +// strings. This implementation is more liberal and allows for the pattern +// ^[-_a-zA-Z0-9]+([./][-_a-zA-Z0-9]+)*`). Whitespaces and comments are allowed +// in between [ ], '.', '/' and the sub names. +func (d *Decoder) parseTypeName() (Token, error) { + startPos := len(d.orig) - len(d.in) + // Use alias s to advance first in order to use d.in for error handling. + // Caller already checks for [ as first character. + s := consume(d.in[1:], 0) + if len(s) == 0 { + return Token{}, ErrUnexpectedEOF + } + + var name []byte + for len(s) > 0 && isTypeNameChar(s[0]) { + name = append(name, s[0]) + s = s[1:] + } + s = consume(s, 0) + + var closed bool + for len(s) > 0 && !closed { + switch { + case s[0] == ']': + s = s[1:] + closed = true + + case s[0] == '/', s[0] == '.': + if len(name) > 0 && (name[len(name)-1] == '/' || name[len(name)-1] == '.') { + return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s", + d.orig[startPos:len(d.orig)-len(s)+1]) + } + name = append(name, s[0]) + s = s[1:] + s = consume(s, 0) + for len(s) > 0 && isTypeNameChar(s[0]) { + name = append(name, s[0]) + s = s[1:] + } + s = consume(s, 0) + + default: + return Token{}, d.newSyntaxError( + "invalid type URL/extension field name: %s", d.orig[startPos:len(d.orig)-len(s)+1]) + } + } + + if !closed { + return Token{}, ErrUnexpectedEOF + } + + // First character cannot be '.'. Last character cannot be '.' or '/'. + size := len(name) + if size == 0 || name[0] == '.' || name[size-1] == '.' || name[size-1] == '/' { + return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s", + d.orig[startPos:len(d.orig)-len(s)]) + } + + d.in = s + endPos := len(d.orig) - len(d.in) + d.consume(0) + + return Token{ + kind: Name, + attrs: uint8(TypeName), + pos: startPos, + raw: d.orig[startPos:endPos], + str: string(name), + }, nil +} + +func isTypeNameChar(b byte) bool { + return (b == '-' || b == '_' || + ('0' <= b && b <= '9') || + ('a' <= b && b <= 'z') || + ('A' <= b && b <= 'Z')) +} + +func isWhiteSpace(b byte) bool { + switch b { + case ' ', '\n', '\r', '\t': + return true + default: + return false + } +} + +// parseIdent parses an unquoted proto identifier and returns size. +// If allowNeg is true, it allows '-' to be the first character in the +// identifier. This is used when parsing literal values like -infinity, etc. +// Regular expression matches an identifier: `^[_a-zA-Z][_a-zA-Z0-9]*` +func parseIdent(input []byte, allowNeg bool) int { + var size int + + s := input + if len(s) == 0 { + return 0 + } + + if allowNeg && s[0] == '-' { + s = s[1:] + size++ + if len(s) == 0 { + return 0 + } + } + + switch { + case s[0] == '_', + 'a' <= s[0] && s[0] <= 'z', + 'A' <= s[0] && s[0] <= 'Z': + s = s[1:] + size++ + default: + return 0 + } + + for len(s) > 0 && (s[0] == '_' || + 'a' <= s[0] && s[0] <= 'z' || + 'A' <= s[0] && s[0] <= 'Z' || + '0' <= s[0] && s[0] <= '9') { + s = s[1:] + size++ + } + + if len(s) > 0 && !isDelim(s[0]) { + return 0 + } + + return size +} + +// parseScalar parses for a string, literal or number value. +func (d *Decoder) parseScalar() (Token, error) { + if d.in[0] == '"' || d.in[0] == '\'' { + return d.parseStringValue() + } + + if tok, ok := d.parseLiteralValue(); ok { + return tok, nil + } + + if tok, ok := d.parseNumberValue(); ok { + return tok, nil + } + + return Token{}, d.newSyntaxError("invalid scalar value: %s", errRegexp.Find(d.in)) +} + +// parseLiteralValue parses a literal value. A literal value is used for +// bools, special floats and enums. This function simply identifies that the +// field value is a literal. +func (d *Decoder) parseLiteralValue() (Token, bool) { + size := parseIdent(d.in, true) + if size == 0 { + return Token{}, false + } + return d.consumeToken(Scalar, size, literalValue), true +} + +// consumeToken constructs a Token for given Kind from d.in and consumes given +// size-length from it. +func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token { + // Important to compute raw and pos before consuming. + tok := Token{ + kind: kind, + attrs: attrs, + pos: len(d.orig) - len(d.in), + raw: d.in[:size], + } + d.consume(size) + return tok +} + +// newSyntaxError returns a syntax error with line and column information for +// current position. +func (d *Decoder) newSyntaxError(f string, x ...interface{}) error { + e := errors.New(f, x...) + line, column := d.Position(len(d.orig) - len(d.in)) + return errors.New("syntax error (line %d:%d): %v", line, column, e) +} + +// Position returns line and column number of given index of the original input. +// It will panic if index is out of range. +func (d *Decoder) Position(idx int) (line int, column int) { + b := d.orig[:idx] + line = bytes.Count(b, []byte("\n")) + 1 + if i := bytes.LastIndexByte(b, '\n'); i >= 0 { + b = b[i+1:] + } + column = utf8.RuneCount(b) + 1 // ignore multi-rune characters + return line, column +} + +func (d *Decoder) tryConsumeChar(c byte) bool { + if len(d.in) > 0 && d.in[0] == c { + d.consume(1) + return true + } + return false +} + +// consume consumes n bytes of input and any subsequent whitespace or comments. +func (d *Decoder) consume(n int) { + d.in = consume(d.in, n) + return +} + +// consume consumes n bytes of input and any subsequent whitespace or comments. +func consume(b []byte, n int) []byte { + b = b[n:] + for len(b) > 0 { + switch b[0] { + case ' ', '\n', '\r', '\t': + b = b[1:] + case '#': + if i := bytes.IndexByte(b, '\n'); i >= 0 { + b = b[i+len("\n"):] + } else { + b = nil + } + default: + return b + } + } + return b +} + +// Any sequence that looks like a non-delimiter (for error reporting). +var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9\/]+|.)`) + +// isDelim returns true if given byte is a delimiter character. +func isDelim(c byte) bool { + return !(c == '-' || c == '+' || c == '.' || c == '_' || + ('a' <= c && c <= 'z') || + ('A' <= c && c <= 'Z') || + ('0' <= c && c <= '9')) +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go new file mode 100644 index 000000000..f2d90b789 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go @@ -0,0 +1,190 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +// parseNumberValue parses a number from the input and returns a Token object. +func (d *Decoder) parseNumberValue() (Token, bool) { + in := d.in + num := parseNumber(in) + if num.size == 0 { + return Token{}, false + } + numAttrs := num.kind + if num.neg { + numAttrs |= isNegative + } + strSize := num.size + last := num.size - 1 + if num.kind == numFloat && (d.in[last] == 'f' || d.in[last] == 'F') { + strSize = last + } + tok := Token{ + kind: Scalar, + attrs: numberValue, + pos: len(d.orig) - len(d.in), + raw: d.in[:num.size], + str: string(d.in[:strSize]), + numAttrs: numAttrs, + } + d.consume(num.size) + return tok, true +} + +const ( + numDec uint8 = (1 << iota) / 2 + numHex + numOct + numFloat +) + +// number is the result of parsing out a valid number from parseNumber. It +// contains data for doing float or integer conversion via the strconv package +// in conjunction with the input bytes. +type number struct { + kind uint8 + neg bool + size int +} + +// parseNumber constructs a number object from given input. It allows for the +// following patterns: +// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*) +// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?) +// It also returns the number of parsed bytes for the given number, 0 if it is +// not a number. +func parseNumber(input []byte) number { + kind := numDec + var size int + var neg bool + + s := input + if len(s) == 0 { + return number{} + } + + // Optional - + if s[0] == '-' { + neg = true + s = s[1:] + size++ + if len(s) == 0 { + return number{} + } + } + + // C++ allows for whitespace and comments in between the negative sign and + // the rest of the number. This logic currently does not but is consistent + // with v1. + + switch { + case s[0] == '0': + if len(s) > 1 { + switch { + case s[1] == 'x' || s[1] == 'X': + // Parse as hex number. + kind = numHex + n := 2 + s = s[2:] + for len(s) > 0 && (('0' <= s[0] && s[0] <= '9') || + ('a' <= s[0] && s[0] <= 'f') || + ('A' <= s[0] && s[0] <= 'F')) { + s = s[1:] + n++ + } + if n == 2 { + return number{} + } + size += n + + case '0' <= s[1] && s[1] <= '7': + // Parse as octal number. + kind = numOct + n := 2 + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '7' { + s = s[1:] + n++ + } + size += n + } + + if kind&(numHex|numOct) > 0 { + if len(s) > 0 && !isDelim(s[0]) { + return number{} + } + return number{kind: kind, neg: neg, size: size} + } + } + s = s[1:] + size++ + + case '1' <= s[0] && s[0] <= '9': + n := 1 + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + size += n + + case s[0] == '.': + // Set kind to numFloat to signify the intent to parse as float. And + // that it needs to have other digits after '.'. + kind = numFloat + + default: + return number{} + } + + // . followed by 0 or more digits. + if len(s) > 0 && s[0] == '.' { + n := 1 + s = s[1:] + // If decimal point was before any digits, it should be followed by + // other digits. + if len(s) == 0 && kind == numFloat { + return number{} + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + size += n + kind = numFloat + } + + // e or E followed by an optional - or + and 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + kind = numFloat + s = s[1:] + n := 1 + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return number{} + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + size += n + } + + // Optional suffix f or F for floats. + if len(s) > 0 && (s[0] == 'f' || s[0] == 'F') { + kind = numFloat + s = s[1:] + size++ + } + + // Check that next byte is a delimiter or it is at the end. + if len(s) > 0 && !isDelim(s[0]) { + return number{} + } + + return number{kind: kind, neg: neg, size: size} +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go new file mode 100644 index 000000000..d4d349023 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go @@ -0,0 +1,161 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +import ( + "bytes" + "strconv" + "strings" + "unicode" + "unicode/utf16" + "unicode/utf8" + + "google.golang.org/protobuf/internal/strs" +) + +// parseStringValue parses string field token. +// This differs from parseString since the text format allows +// multiple back-to-back string literals where they are semantically treated +// as a single large string with all values concatenated. +// +// E.g., `"foo" "bar" "baz"` => "foobarbaz" +func (d *Decoder) parseStringValue() (Token, error) { + // Note that the ending quote is sufficient to unambiguously mark the end + // of a string. Thus, the text grammar does not require intervening + // whitespace or control characters in-between strings. + // Thus, the following is valid: + // `"foo"'bar'"baz"` => "foobarbaz" + in0 := d.in + var ss []string + for len(d.in) > 0 && (d.in[0] == '"' || d.in[0] == '\'') { + s, err := d.parseString() + if err != nil { + return Token{}, err + } + ss = append(ss, s) + } + // d.in already points to the end of the value at this point. + return Token{ + kind: Scalar, + attrs: stringValue, + pos: len(d.orig) - len(in0), + raw: in0[:len(in0)-len(d.in)], + str: strings.Join(ss, ""), + }, nil +} + +// parseString parses a string value enclosed in " or '. +func (d *Decoder) parseString() (string, error) { + in := d.in + if len(in) == 0 { + return "", ErrUnexpectedEOF + } + quote := in[0] + in = in[1:] + i := indexNeedEscapeInBytes(in) + in, out := in[i:], in[:i:i] // set cap to prevent mutations + for len(in) > 0 { + switch r, n := utf8.DecodeRune(in); { + case r == utf8.RuneError && n == 1: + return "", d.newSyntaxError("invalid UTF-8 detected") + case r == 0 || r == '\n': + return "", d.newSyntaxError("invalid character %q in string", r) + case r == rune(quote): + in = in[1:] + d.consume(len(d.in) - len(in)) + return string(out), nil + case r == '\\': + if len(in) < 2 { + return "", ErrUnexpectedEOF + } + switch r := in[1]; r { + case '"', '\'', '\\', '?': + in, out = in[2:], append(out, r) + case 'a': + in, out = in[2:], append(out, '\a') + case 'b': + in, out = in[2:], append(out, '\b') + case 'n': + in, out = in[2:], append(out, '\n') + case 'r': + in, out = in[2:], append(out, '\r') + case 't': + in, out = in[2:], append(out, '\t') + case 'v': + in, out = in[2:], append(out, '\v') + case 'f': + in, out = in[2:], append(out, '\f') + case '0', '1', '2', '3', '4', '5', '6', '7': + // One, two, or three octal characters. + n := len(in[1:]) - len(bytes.TrimLeft(in[1:], "01234567")) + if n > 3 { + n = 3 + } + v, err := strconv.ParseUint(string(in[1:1+n]), 8, 8) + if err != nil { + return "", d.newSyntaxError("invalid octal escape code %q in string", in[:1+n]) + } + in, out = in[1+n:], append(out, byte(v)) + case 'x': + // One or two hexadecimal characters. + n := len(in[2:]) - len(bytes.TrimLeft(in[2:], "0123456789abcdefABCDEF")) + if n > 2 { + n = 2 + } + v, err := strconv.ParseUint(string(in[2:2+n]), 16, 8) + if err != nil { + return "", d.newSyntaxError("invalid hex escape code %q in string", in[:2+n]) + } + in, out = in[2+n:], append(out, byte(v)) + case 'u', 'U': + // Four or eight hexadecimal characters + n := 6 + if r == 'U' { + n = 10 + } + if len(in) < n { + return "", ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:n]), 16, 32) + if utf8.MaxRune < v || err != nil { + return "", d.newSyntaxError("invalid Unicode escape code %q in string", in[:n]) + } + in = in[n:] + + r := rune(v) + if utf16.IsSurrogate(r) { + if len(in) < 6 { + return "", ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + r = utf16.DecodeRune(r, rune(v)) + if in[0] != '\\' || in[1] != 'u' || r == unicode.ReplacementChar || err != nil { + return "", d.newSyntaxError("invalid Unicode escape code %q in string", in[:6]) + } + in = in[6:] + } + out = append(out, string(r)...) + default: + return "", d.newSyntaxError("invalid escape code %q in string", in[:2]) + } + default: + i := indexNeedEscapeInBytes(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + return "", ErrUnexpectedEOF +} + +// indexNeedEscapeInString returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) } + +// UnmarshalString returns an unescaped string given a textproto string value. +// String value needs to contain single or double quotes. This is only used by +// internal/encoding/defval package for unmarshaling bytes. +func UnmarshalString(s string) (string, error) { + d := NewDecoder([]byte(s)) + return d.parseString() +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go new file mode 100644 index 000000000..83d2b0d5a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go @@ -0,0 +1,373 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +import ( + "bytes" + "fmt" + "math" + "strconv" + "strings" + + "google.golang.org/protobuf/internal/flags" +) + +// Kind represents a token kind expressible in the textproto format. +type Kind uint8 + +// Kind values. +const ( + Invalid Kind = iota + EOF + Name // Name indicates the field name. + Scalar // Scalar are scalar values, e.g. "string", 47, ENUM_LITERAL, true. + MessageOpen + MessageClose + ListOpen + ListClose + + // comma and semi-colon are only for parsing in between values and should not be exposed. + comma + semicolon + + // bof indicates beginning of file, which is the default token + // kind at the beginning of parsing. + bof = Invalid +) + +func (t Kind) String() string { + switch t { + case Invalid: + return "" + case EOF: + return "eof" + case Scalar: + return "scalar" + case Name: + return "name" + case MessageOpen: + return "{" + case MessageClose: + return "}" + case ListOpen: + return "[" + case ListClose: + return "]" + case comma: + return "," + case semicolon: + return ";" + default: + return fmt.Sprintf("", uint8(t)) + } +} + +// NameKind represents different types of field names. +type NameKind uint8 + +// NameKind values. +const ( + IdentName NameKind = iota + 1 + TypeName + FieldNumber +) + +func (t NameKind) String() string { + switch t { + case IdentName: + return "IdentName" + case TypeName: + return "TypeName" + case FieldNumber: + return "FieldNumber" + default: + return fmt.Sprintf("", uint8(t)) + } +} + +// Bit mask in Token.attrs to indicate if a Name token is followed by the +// separator char ':'. The field name separator char is optional for message +// field or repeated message field, but required for all other types. Decoder +// simply indicates whether a Name token is followed by separator or not. It is +// up to the prototext package to validate. +const hasSeparator = 1 << 7 + +// Scalar value types. +const ( + numberValue = iota + 1 + stringValue + literalValue +) + +// Bit mask in Token.numAttrs to indicate that the number is a negative. +const isNegative = 1 << 7 + +// Token provides a parsed token kind and value. Values are provided by the +// different accessor methods. +type Token struct { + // Kind of the Token object. + kind Kind + // attrs contains metadata for the following Kinds: + // Name: hasSeparator bit and one of NameKind. + // Scalar: one of numberValue, stringValue, literalValue. + attrs uint8 + // numAttrs contains metadata for numberValue: + // - highest bit is whether negative or positive. + // - lower bits indicate one of numDec, numHex, numOct, numFloat. + numAttrs uint8 + // pos provides the position of the token in the original input. + pos int + // raw bytes of the serialized token. + // This is a subslice into the original input. + raw []byte + // str contains parsed string for the following: + // - stringValue of Scalar kind + // - numberValue of Scalar kind + // - TypeName of Name kind + str string +} + +// Kind returns the token kind. +func (t Token) Kind() Kind { + return t.kind +} + +// RawString returns the read value in string. +func (t Token) RawString() string { + return string(t.raw) +} + +// Pos returns the token position from the input. +func (t Token) Pos() int { + return t.pos +} + +// NameKind returns IdentName, TypeName or FieldNumber. +// It panics if type is not Name. +func (t Token) NameKind() NameKind { + if t.kind == Name { + return NameKind(t.attrs &^ hasSeparator) + } + panic(fmt.Sprintf("Token is not a Name type: %s", t.kind)) +} + +// HasSeparator returns true if the field name is followed by the separator char +// ':', else false. It panics if type is not Name. +func (t Token) HasSeparator() bool { + if t.kind == Name { + return t.attrs&hasSeparator != 0 + } + panic(fmt.Sprintf("Token is not a Name type: %s", t.kind)) +} + +// IdentName returns the value for IdentName type. +func (t Token) IdentName() string { + if t.kind == Name && t.attrs&uint8(IdentName) != 0 { + return string(t.raw) + } + panic(fmt.Sprintf("Token is not an IdentName: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator))) +} + +// TypeName returns the value for TypeName type. +func (t Token) TypeName() string { + if t.kind == Name && t.attrs&uint8(TypeName) != 0 { + return t.str + } + panic(fmt.Sprintf("Token is not a TypeName: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator))) +} + +// FieldNumber returns the value for FieldNumber type. It returns a +// non-negative int32 value. Caller will still need to validate for the correct +// field number range. +func (t Token) FieldNumber() int32 { + if t.kind != Name || t.attrs&uint8(FieldNumber) == 0 { + panic(fmt.Sprintf("Token is not a FieldNumber: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator))) + } + // Following should not return an error as it had already been called right + // before this Token was constructed. + num, _ := strconv.ParseInt(string(t.raw), 10, 32) + return int32(num) +} + +// String returns the string value for a Scalar type. +func (t Token) String() (string, bool) { + if t.kind != Scalar || t.attrs != stringValue { + return "", false + } + return t.str, true +} + +// Enum returns the literal value for a Scalar type for use as enum literals. +func (t Token) Enum() (string, bool) { + if t.kind != Scalar || t.attrs != literalValue || (len(t.raw) > 0 && t.raw[0] == '-') { + return "", false + } + return string(t.raw), true +} + +// Bool returns the bool value for a Scalar type. +func (t Token) Bool() (bool, bool) { + if t.kind != Scalar { + return false, false + } + switch t.attrs { + case literalValue: + if b, ok := boolLits[string(t.raw)]; ok { + return b, true + } + case numberValue: + // Unsigned integer representation of 0 or 1 is permitted: 00, 0x0, 01, + // 0x1, etc. + n, err := strconv.ParseUint(t.str, 0, 64) + if err == nil { + switch n { + case 0: + return false, true + case 1: + return true, true + } + } + } + return false, false +} + +// These exact boolean literals are the ones supported in C++. +var boolLits = map[string]bool{ + "t": true, + "true": true, + "True": true, + "f": false, + "false": false, + "False": false, +} + +// Uint64 returns the uint64 value for a Scalar type. +func (t Token) Uint64() (uint64, bool) { + if t.kind != Scalar || t.attrs != numberValue || + t.numAttrs&isNegative > 0 || t.numAttrs&numFloat > 0 { + return 0, false + } + n, err := strconv.ParseUint(t.str, 0, 64) + if err != nil { + return 0, false + } + return n, true +} + +// Uint32 returns the uint32 value for a Scalar type. +func (t Token) Uint32() (uint32, bool) { + if t.kind != Scalar || t.attrs != numberValue || + t.numAttrs&isNegative > 0 || t.numAttrs&numFloat > 0 { + return 0, false + } + n, err := strconv.ParseUint(t.str, 0, 32) + if err != nil { + return 0, false + } + return uint32(n), true +} + +// Int64 returns the int64 value for a Scalar type. +func (t Token) Int64() (int64, bool) { + if t.kind != Scalar || t.attrs != numberValue || t.numAttrs&numFloat > 0 { + return 0, false + } + if n, err := strconv.ParseInt(t.str, 0, 64); err == nil { + return n, true + } + // C++ accepts large positive hex numbers as negative values. + // This feature is here for proto1 backwards compatibility purposes. + if flags.ProtoLegacy && (t.numAttrs == numHex) { + if n, err := strconv.ParseUint(t.str, 0, 64); err == nil { + return int64(n), true + } + } + return 0, false +} + +// Int32 returns the int32 value for a Scalar type. +func (t Token) Int32() (int32, bool) { + if t.kind != Scalar || t.attrs != numberValue || t.numAttrs&numFloat > 0 { + return 0, false + } + if n, err := strconv.ParseInt(t.str, 0, 32); err == nil { + return int32(n), true + } + // C++ accepts large positive hex numbers as negative values. + // This feature is here for proto1 backwards compatibility purposes. + if flags.ProtoLegacy && (t.numAttrs == numHex) { + if n, err := strconv.ParseUint(t.str, 0, 32); err == nil { + return int32(n), true + } + } + return 0, false +} + +// Float64 returns the float64 value for a Scalar type. +func (t Token) Float64() (float64, bool) { + if t.kind != Scalar { + return 0, false + } + switch t.attrs { + case literalValue: + if f, ok := floatLits[strings.ToLower(string(t.raw))]; ok { + return f, true + } + case numberValue: + n, err := strconv.ParseFloat(t.str, 64) + if err == nil { + return n, true + } + nerr := err.(*strconv.NumError) + if nerr.Err == strconv.ErrRange { + return n, true + } + } + return 0, false +} + +// Float32 returns the float32 value for a Scalar type. +func (t Token) Float32() (float32, bool) { + if t.kind != Scalar { + return 0, false + } + switch t.attrs { + case literalValue: + if f, ok := floatLits[strings.ToLower(string(t.raw))]; ok { + return float32(f), true + } + case numberValue: + n, err := strconv.ParseFloat(t.str, 64) + if err == nil { + // Overflows are treated as (-)infinity. + return float32(n), true + } + nerr := err.(*strconv.NumError) + if nerr.Err == strconv.ErrRange { + return float32(n), true + } + } + return 0, false +} + +// These are the supported float literals which C++ permits case-insensitive +// variants of these. +var floatLits = map[string]float64{ + "nan": math.NaN(), + "inf": math.Inf(1), + "infinity": math.Inf(1), + "-inf": math.Inf(-1), + "-infinity": math.Inf(-1), +} + +// TokenEquals returns true if given Tokens are equal, else false. +func TokenEquals(x, y Token) bool { + return x.kind == y.kind && + x.attrs == y.attrs && + x.numAttrs == y.numAttrs && + x.pos == y.pos && + bytes.Equal(x.raw, y.raw) && + x.str == y.str +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go new file mode 100644 index 000000000..0ce8d6fb8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package text implements the text format for protocol buffers. +// This package has no semantic understanding for protocol buffers and is only +// a parser and composer for the format. +// +// There is no formal specification for the protobuf text format, as such the +// C++ implementation (see google::protobuf::TextFormat) is the reference +// implementation of the text format. +// +// This package is neither a superset nor a subset of the C++ implementation. +// This implementation permits a more liberal grammar in some cases to be +// backwards compatible with the historical Go implementation. +// Future parsings unique to Go should not be added. +// Some grammars allowed by the C++ implementation are deliberately +// not implemented here because they are considered a bug by the protobuf team +// and should not be replicated. +// +// The Go implementation should implement a sufficient amount of the C++ +// grammar such that the default text serialization by C++ can be parsed by Go. +// However, just because the C++ parser accepts some input does not mean that +// the Go implementation should as well. +// +// The text format is almost a superset of JSON except: +// * message keys are not quoted strings, but identifiers +// * the top-level value must be a message without the delimiters +package text diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go new file mode 100644 index 000000000..da289ccce --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go @@ -0,0 +1,270 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +import ( + "math" + "math/bits" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/errors" +) + +// encType represents an encoding type. +type encType uint8 + +const ( + _ encType = (1 << iota) / 2 + name + scalar + messageOpen + messageClose +) + +// Encoder provides methods to write out textproto constructs and values. The user is +// responsible for producing valid sequences of constructs and values. +type Encoder struct { + encoderState + + indent string + delims [2]byte + outputASCII bool +} + +type encoderState struct { + lastType encType + indents []byte + out []byte +} + +// NewEncoder returns an Encoder. +// +// If indent is a non-empty string, it causes every entry in a List or Message +// to be preceded by the indent and trailed by a newline. +// +// If delims is not the zero value, it controls the delimiter characters used +// for messages (e.g., "{}" vs "<>"). +// +// If outputASCII is true, strings will be serialized in such a way that +// multi-byte UTF-8 sequences are escaped. This property ensures that the +// overall output is ASCII (as opposed to UTF-8). +func NewEncoder(indent string, delims [2]byte, outputASCII bool) (*Encoder, error) { + e := &Encoder{} + if len(indent) > 0 { + if strings.Trim(indent, " \t") != "" { + return nil, errors.New("indent may only be composed of space and tab characters") + } + e.indent = indent + } + switch delims { + case [2]byte{0, 0}: + e.delims = [2]byte{'{', '}'} + case [2]byte{'{', '}'}, [2]byte{'<', '>'}: + e.delims = delims + default: + return nil, errors.New("delimiters may only be \"{}\" or \"<>\"") + } + e.outputASCII = outputASCII + + return e, nil +} + +// Bytes returns the content of the written bytes. +func (e *Encoder) Bytes() []byte { + return e.out +} + +// StartMessage writes out the '{' or '<' symbol. +func (e *Encoder) StartMessage() { + e.prepareNext(messageOpen) + e.out = append(e.out, e.delims[0]) +} + +// EndMessage writes out the '}' or '>' symbol. +func (e *Encoder) EndMessage() { + e.prepareNext(messageClose) + e.out = append(e.out, e.delims[1]) +} + +// WriteName writes out the field name and the separator ':'. +func (e *Encoder) WriteName(s string) { + e.prepareNext(name) + e.out = append(e.out, s...) + e.out = append(e.out, ':') +} + +// WriteBool writes out the given boolean value. +func (e *Encoder) WriteBool(b bool) { + if b { + e.WriteLiteral("true") + } else { + e.WriteLiteral("false") + } +} + +// WriteString writes out the given string value. +func (e *Encoder) WriteString(s string) { + e.prepareNext(scalar) + e.out = appendString(e.out, s, e.outputASCII) +} + +func appendString(out []byte, in string, outputASCII bool) []byte { + out = append(out, '"') + i := indexNeedEscapeInString(in) + in, out = in[i:], append(out, in[:i]...) + for len(in) > 0 { + switch r, n := utf8.DecodeRuneInString(in); { + case r == utf8.RuneError && n == 1: + // We do not report invalid UTF-8 because strings in the text format + // are used to represent both the proto string and bytes type. + r = rune(in[0]) + fallthrough + case r < ' ' || r == '"' || r == '\\' || r == 0x7f: + out = append(out, '\\') + switch r { + case '"', '\\': + out = append(out, byte(r)) + case '\n': + out = append(out, 'n') + case '\r': + out = append(out, 'r') + case '\t': + out = append(out, 't') + default: + out = append(out, 'x') + out = append(out, "00"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } + in = in[n:] + case r >= utf8.RuneSelf && (outputASCII || r <= 0x009f): + out = append(out, '\\') + if r <= math.MaxUint16 { + out = append(out, 'u') + out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } else { + out = append(out, 'U') + out = append(out, "00000000"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } + in = in[n:] + default: + i := indexNeedEscapeInString(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + out = append(out, '"') + return out +} + +// indexNeedEscapeInString returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInString(s string) int { + for i := 0; i < len(s); i++ { + if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= 0x7f { + return i + } + } + return len(s) +} + +// WriteFloat writes out the given float value for given bitSize. +func (e *Encoder) WriteFloat(n float64, bitSize int) { + e.prepareNext(scalar) + e.out = appendFloat(e.out, n, bitSize) +} + +func appendFloat(out []byte, n float64, bitSize int) []byte { + switch { + case math.IsNaN(n): + return append(out, "nan"...) + case math.IsInf(n, +1): + return append(out, "inf"...) + case math.IsInf(n, -1): + return append(out, "-inf"...) + default: + return strconv.AppendFloat(out, n, 'g', -1, bitSize) + } +} + +// WriteInt writes out the given signed integer value. +func (e *Encoder) WriteInt(n int64) { + e.prepareNext(scalar) + e.out = append(e.out, strconv.FormatInt(n, 10)...) +} + +// WriteUint writes out the given unsigned integer value. +func (e *Encoder) WriteUint(n uint64) { + e.prepareNext(scalar) + e.out = append(e.out, strconv.FormatUint(n, 10)...) +} + +// WriteLiteral writes out the given string as a literal value without quotes. +// This is used for writing enum literal strings. +func (e *Encoder) WriteLiteral(s string) { + e.prepareNext(scalar) + e.out = append(e.out, s...) +} + +// prepareNext adds possible space and indentation for the next value based +// on last encType and indent option. It also updates e.lastType to next. +func (e *Encoder) prepareNext(next encType) { + defer func() { + e.lastType = next + }() + + // Single line. + if len(e.indent) == 0 { + // Add space after each field before the next one. + if e.lastType&(scalar|messageClose) != 0 && next == name { + e.out = append(e.out, ' ') + // Add a random extra space to make output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } + return + } + + // Multi-line. + switch { + case e.lastType == name: + e.out = append(e.out, ' ') + // Add a random extra space after name: to make output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + + case e.lastType == messageOpen && next != messageClose: + e.indents = append(e.indents, e.indent...) + e.out = append(e.out, '\n') + e.out = append(e.out, e.indents...) + + case e.lastType&(scalar|messageClose) != 0: + if next == messageClose { + e.indents = e.indents[:len(e.indents)-len(e.indent)] + } + e.out = append(e.out, '\n') + e.out = append(e.out, e.indents...) + } +} + +// Snapshot returns the current snapshot for use in Reset. +func (e *Encoder) Snapshot() encoderState { + return e.encoderState +} + +// Reset resets the Encoder to the given encoderState from a Snapshot. +func (e *Encoder) Reset(es encoderState) { + e.encoderState = es +} + +// AppendString appends the escaped form of the input string to b. +func AppendString(b []byte, s string) []byte { + return appendString(b, s, false) +} diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go new file mode 100644 index 000000000..20c17b35e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go @@ -0,0 +1,89 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errors implements functions to manipulate errors. +package errors + +import ( + "errors" + "fmt" + + "google.golang.org/protobuf/internal/detrand" +) + +// Error is a sentinel matching all errors produced by this package. +var Error = errors.New("protobuf error") + +// New formats a string according to the format specifier and arguments and +// returns an error that has a "proto" prefix. +func New(f string, x ...interface{}) error { + return &prefixError{s: format(f, x...)} +} + +type prefixError struct{ s string } + +var prefix = func() string { + // Deliberately introduce instability into the error message string to + // discourage users from performing error string comparisons. + if detrand.Bool() { + return "proto: " // use non-breaking spaces (U+00a0) + } else { + return "proto: " // use regular spaces (U+0020) + } +}() + +func (e *prefixError) Error() string { + return prefix + e.s +} + +func (e *prefixError) Unwrap() error { + return Error +} + +// Wrap returns an error that has a "proto" prefix, the formatted string described +// by the format specifier and arguments, and a suffix of err. The error wraps err. +func Wrap(err error, f string, x ...interface{}) error { + return &wrapError{ + s: format(f, x...), + err: err, + } +} + +type wrapError struct { + s string + err error +} + +func (e *wrapError) Error() string { + return format("%v%v: %v", prefix, e.s, e.err) +} + +func (e *wrapError) Unwrap() error { + return e.err +} + +func (e *wrapError) Is(target error) bool { + return target == Error +} + +func format(f string, x ...interface{}) string { + // avoid "proto: " prefix when chaining + for i := 0; i < len(x); i++ { + switch e := x[i].(type) { + case *prefixError: + x[i] = e.s + case *wrapError: + x[i] = format("%v: %v", e.s, e.err) + } + } + return fmt.Sprintf(f, x...) +} + +func InvalidUTF8(name string) error { + return New("field %v contains invalid UTF-8", name) +} + +func RequiredNotSet(name string) error { + return New("required field %v not set", name) +} diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go new file mode 100644 index 000000000..fbcd34920 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go @@ -0,0 +1,40 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.13 +// +build !go1.13 + +package errors + +import "reflect" + +// Is is a copy of Go 1.13's errors.Is for use with older Go versions. +func Is(err, target error) bool { + if target == nil { + return err == target + } + + isComparable := reflect.TypeOf(target).Comparable() + for { + if isComparable && err == target { + return true + } + if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) { + return true + } + if err = unwrap(err); err == nil { + return false + } + } +} + +func unwrap(err error) error { + u, ok := err.(interface { + Unwrap() error + }) + if !ok { + return nil + } + return u.Unwrap() +} diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go new file mode 100644 index 000000000..5e72f1cde --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go @@ -0,0 +1,13 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.13 +// +build go1.13 + +package errors + +import "errors" + +// Is is errors.Is. +func Is(err, target error) bool { return errors.Is(err, target) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go new file mode 100644 index 000000000..b293b6947 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/build.go @@ -0,0 +1,158 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package filedesc provides functionality for constructing descriptors. +// +// The types in this package implement interfaces in the protoreflect package +// related to protobuf descripriptors. +package filedesc + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" +) + +// Builder construct a protoreflect.FileDescriptor from the raw descriptor. +type Builder struct { + // GoPackagePath is the Go package path that is invoking this builder. + GoPackagePath string + + // RawDescriptor is the wire-encoded bytes of FileDescriptorProto + // and must be populated. + RawDescriptor []byte + + // NumEnums is the total number of enums declared in the file. + NumEnums int32 + // NumMessages is the total number of messages declared in the file. + // It includes the implicit message declarations for map entries. + NumMessages int32 + // NumExtensions is the total number of extensions declared in the file. + NumExtensions int32 + // NumServices is the total number of services declared in the file. + NumServices int32 + + // TypeResolver resolves extension field types for descriptor options. + // If nil, it uses protoregistry.GlobalTypes. + TypeResolver interface { + preg.ExtensionTypeResolver + } + + // FileRegistry is use to lookup file, enum, and message dependencies. + // Once constructed, the file descriptor is registered here. + // If nil, it uses protoregistry.GlobalFiles. + FileRegistry interface { + FindFileByPath(string) (protoreflect.FileDescriptor, error) + FindDescriptorByName(pref.FullName) (pref.Descriptor, error) + RegisterFile(pref.FileDescriptor) error + } +} + +// resolverByIndex is an interface Builder.FileRegistry may implement. +// If so, it permits looking up an enum or message dependency based on the +// sub-list and element index into filetype.Builder.DependencyIndexes. +type resolverByIndex interface { + FindEnumByIndex(int32, int32, []Enum, []Message) pref.EnumDescriptor + FindMessageByIndex(int32, int32, []Enum, []Message) pref.MessageDescriptor +} + +// Indexes of each sub-list in filetype.Builder.DependencyIndexes. +const ( + listFieldDeps int32 = iota + listExtTargets + listExtDeps + listMethInDeps + listMethOutDeps +) + +// Out is the output of the Builder. +type Out struct { + File pref.FileDescriptor + + // Enums is all enum descriptors in "flattened ordering". + Enums []Enum + // Messages is all message descriptors in "flattened ordering". + // It includes the implicit message declarations for map entries. + Messages []Message + // Extensions is all extension descriptors in "flattened ordering". + Extensions []Extension + // Service is all service descriptors in "flattened ordering". + Services []Service +} + +// Build constructs a FileDescriptor given the parameters set in Builder. +// It assumes that the inputs are well-formed and panics if any inconsistencies +// are encountered. +// +// If NumEnums+NumMessages+NumExtensions+NumServices is zero, +// then Build automatically derives them from the raw descriptor. +func (db Builder) Build() (out Out) { + // Populate the counts if uninitialized. + if db.NumEnums+db.NumMessages+db.NumExtensions+db.NumServices == 0 { + db.unmarshalCounts(db.RawDescriptor, true) + } + + // Initialize resolvers and registries if unpopulated. + if db.TypeResolver == nil { + db.TypeResolver = preg.GlobalTypes + } + if db.FileRegistry == nil { + db.FileRegistry = preg.GlobalFiles + } + + fd := newRawFile(db) + out.File = fd + out.Enums = fd.allEnums + out.Messages = fd.allMessages + out.Extensions = fd.allExtensions + out.Services = fd.allServices + + if err := db.FileRegistry.RegisterFile(fd); err != nil { + panic(err) + } + return out +} + +// unmarshalCounts counts the number of enum, message, extension, and service +// declarations in the raw message, which is either a FileDescriptorProto +// or a MessageDescriptorProto depending on whether isFile is set. +func (db *Builder) unmarshalCounts(b []byte, isFile bool) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + if isFile { + switch num { + case genid.FileDescriptorProto_EnumType_field_number: + db.NumEnums++ + case genid.FileDescriptorProto_MessageType_field_number: + db.unmarshalCounts(v, false) + db.NumMessages++ + case genid.FileDescriptorProto_Extension_field_number: + db.NumExtensions++ + case genid.FileDescriptorProto_Service_field_number: + db.NumServices++ + } + } else { + switch num { + case genid.DescriptorProto_EnumType_field_number: + db.NumEnums++ + case genid.DescriptorProto_NestedType_field_number: + db.unmarshalCounts(v, false) + db.NumMessages++ + case genid.DescriptorProto_Extension_field_number: + db.NumExtensions++ + } + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go new file mode 100644 index 000000000..98ab142ae --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -0,0 +1,631 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "bytes" + "fmt" + "sync" + "sync/atomic" + + "google.golang.org/protobuf/internal/descfmt" + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/strs" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// The types in this file may have a suffix: +// • L0: Contains fields common to all descriptors (except File) and +// must be initialized up front. +// • L1: Contains fields specific to a descriptor and +// must be initialized up front. +// • L2: Contains fields that are lazily initialized when constructing +// from the raw file descriptor. When constructing as a literal, the L2 +// fields must be initialized up front. +// +// The types are exported so that packages like reflect/protodesc can +// directly construct descriptors. + +type ( + File struct { + fileRaw + L1 FileL1 + + once uint32 // atomically set if L2 is valid + mu sync.Mutex // protects L2 + L2 *FileL2 + } + FileL1 struct { + Syntax pref.Syntax + Path string + Package pref.FullName + + Enums Enums + Messages Messages + Extensions Extensions + Services Services + } + FileL2 struct { + Options func() pref.ProtoMessage + Imports FileImports + Locations SourceLocations + } +) + +func (fd *File) ParentFile() pref.FileDescriptor { return fd } +func (fd *File) Parent() pref.Descriptor { return nil } +func (fd *File) Index() int { return 0 } +func (fd *File) Syntax() pref.Syntax { return fd.L1.Syntax } +func (fd *File) Name() pref.Name { return fd.L1.Package.Name() } +func (fd *File) FullName() pref.FullName { return fd.L1.Package } +func (fd *File) IsPlaceholder() bool { return false } +func (fd *File) Options() pref.ProtoMessage { + if f := fd.lazyInit().Options; f != nil { + return f() + } + return descopts.File +} +func (fd *File) Path() string { return fd.L1.Path } +func (fd *File) Package() pref.FullName { return fd.L1.Package } +func (fd *File) Imports() pref.FileImports { return &fd.lazyInit().Imports } +func (fd *File) Enums() pref.EnumDescriptors { return &fd.L1.Enums } +func (fd *File) Messages() pref.MessageDescriptors { return &fd.L1.Messages } +func (fd *File) Extensions() pref.ExtensionDescriptors { return &fd.L1.Extensions } +func (fd *File) Services() pref.ServiceDescriptors { return &fd.L1.Services } +func (fd *File) SourceLocations() pref.SourceLocations { return &fd.lazyInit().Locations } +func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } +func (fd *File) ProtoType(pref.FileDescriptor) {} +func (fd *File) ProtoInternal(pragma.DoNotImplement) {} + +func (fd *File) lazyInit() *FileL2 { + if atomic.LoadUint32(&fd.once) == 0 { + fd.lazyInitOnce() + } + return fd.L2 +} + +func (fd *File) lazyInitOnce() { + fd.mu.Lock() + if fd.L2 == nil { + fd.lazyRawInit() // recursively initializes all L2 structures + } + atomic.StoreUint32(&fd.once, 1) + fd.mu.Unlock() +} + +// GoPackagePath is a pseudo-internal API for determining the Go package path +// that this file descriptor is declared in. +// +// WARNING: This method is exempt from the compatibility promise and may be +// removed in the future without warning. +func (fd *File) GoPackagePath() string { + return fd.builder.GoPackagePath +} + +type ( + Enum struct { + Base + L1 EnumL1 + L2 *EnumL2 // protected by fileDesc.once + } + EnumL1 struct { + eagerValues bool // controls whether EnumL2.Values is already populated + } + EnumL2 struct { + Options func() pref.ProtoMessage + Values EnumValues + ReservedNames Names + ReservedRanges EnumRanges + } + + EnumValue struct { + Base + L1 EnumValueL1 + } + EnumValueL1 struct { + Options func() pref.ProtoMessage + Number pref.EnumNumber + } +) + +func (ed *Enum) Options() pref.ProtoMessage { + if f := ed.lazyInit().Options; f != nil { + return f() + } + return descopts.Enum +} +func (ed *Enum) Values() pref.EnumValueDescriptors { + if ed.L1.eagerValues { + return &ed.L2.Values + } + return &ed.lazyInit().Values +} +func (ed *Enum) ReservedNames() pref.Names { return &ed.lazyInit().ReservedNames } +func (ed *Enum) ReservedRanges() pref.EnumRanges { return &ed.lazyInit().ReservedRanges } +func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } +func (ed *Enum) ProtoType(pref.EnumDescriptor) {} +func (ed *Enum) lazyInit() *EnumL2 { + ed.L0.ParentFile.lazyInit() // implicitly initializes L2 + return ed.L2 +} + +func (ed *EnumValue) Options() pref.ProtoMessage { + if f := ed.L1.Options; f != nil { + return f() + } + return descopts.EnumValue +} +func (ed *EnumValue) Number() pref.EnumNumber { return ed.L1.Number } +func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } +func (ed *EnumValue) ProtoType(pref.EnumValueDescriptor) {} + +type ( + Message struct { + Base + L1 MessageL1 + L2 *MessageL2 // protected by fileDesc.once + } + MessageL1 struct { + Enums Enums + Messages Messages + Extensions Extensions + IsMapEntry bool // promoted from google.protobuf.MessageOptions + IsMessageSet bool // promoted from google.protobuf.MessageOptions + } + MessageL2 struct { + Options func() pref.ProtoMessage + Fields Fields + Oneofs Oneofs + ReservedNames Names + ReservedRanges FieldRanges + RequiredNumbers FieldNumbers // must be consistent with Fields.Cardinality + ExtensionRanges FieldRanges + ExtensionRangeOptions []func() pref.ProtoMessage // must be same length as ExtensionRanges + } + + Field struct { + Base + L1 FieldL1 + } + FieldL1 struct { + Options func() pref.ProtoMessage + Number pref.FieldNumber + Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers + Kind pref.Kind + StringName stringName + IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto + IsWeak bool // promoted from google.protobuf.FieldOptions + HasPacked bool // promoted from google.protobuf.FieldOptions + IsPacked bool // promoted from google.protobuf.FieldOptions + HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions + EnforceUTF8 bool // promoted from google.protobuf.FieldOptions + Default defaultValue + ContainingOneof pref.OneofDescriptor // must be consistent with Message.Oneofs.Fields + Enum pref.EnumDescriptor + Message pref.MessageDescriptor + } + + Oneof struct { + Base + L1 OneofL1 + } + OneofL1 struct { + Options func() pref.ProtoMessage + Fields OneofFields // must be consistent with Message.Fields.ContainingOneof + } +) + +func (md *Message) Options() pref.ProtoMessage { + if f := md.lazyInit().Options; f != nil { + return f() + } + return descopts.Message +} +func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry } +func (md *Message) Fields() pref.FieldDescriptors { return &md.lazyInit().Fields } +func (md *Message) Oneofs() pref.OneofDescriptors { return &md.lazyInit().Oneofs } +func (md *Message) ReservedNames() pref.Names { return &md.lazyInit().ReservedNames } +func (md *Message) ReservedRanges() pref.FieldRanges { return &md.lazyInit().ReservedRanges } +func (md *Message) RequiredNumbers() pref.FieldNumbers { return &md.lazyInit().RequiredNumbers } +func (md *Message) ExtensionRanges() pref.FieldRanges { return &md.lazyInit().ExtensionRanges } +func (md *Message) ExtensionRangeOptions(i int) pref.ProtoMessage { + if f := md.lazyInit().ExtensionRangeOptions[i]; f != nil { + return f() + } + return descopts.ExtensionRange +} +func (md *Message) Enums() pref.EnumDescriptors { return &md.L1.Enums } +func (md *Message) Messages() pref.MessageDescriptors { return &md.L1.Messages } +func (md *Message) Extensions() pref.ExtensionDescriptors { return &md.L1.Extensions } +func (md *Message) ProtoType(pref.MessageDescriptor) {} +func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } +func (md *Message) lazyInit() *MessageL2 { + md.L0.ParentFile.lazyInit() // implicitly initializes L2 + return md.L2 +} + +// IsMessageSet is a pseudo-internal API for checking whether a message +// should serialize in the proto1 message format. +// +// WARNING: This method is exempt from the compatibility promise and may be +// removed in the future without warning. +func (md *Message) IsMessageSet() bool { + return md.L1.IsMessageSet +} + +func (fd *Field) Options() pref.ProtoMessage { + if f := fd.L1.Options; f != nil { + return f() + } + return descopts.Field +} +func (fd *Field) Number() pref.FieldNumber { return fd.L1.Number } +func (fd *Field) Cardinality() pref.Cardinality { return fd.L1.Cardinality } +func (fd *Field) Kind() pref.Kind { return fd.L1.Kind } +func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } +func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } +func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } +func (fd *Field) HasPresence() bool { + return fd.L1.Cardinality != pref.Repeated && (fd.L0.ParentFile.L1.Syntax == pref.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) +} +func (fd *Field) HasOptionalKeyword() bool { + return (fd.L0.ParentFile.L1.Syntax == pref.Proto2 && fd.L1.Cardinality == pref.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional +} +func (fd *Field) IsPacked() bool { + if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != pref.Proto2 && fd.L1.Cardinality == pref.Repeated { + switch fd.L1.Kind { + case pref.StringKind, pref.BytesKind, pref.MessageKind, pref.GroupKind: + default: + return true + } + } + return fd.L1.IsPacked +} +func (fd *Field) IsExtension() bool { return false } +func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsList() bool { return fd.Cardinality() == pref.Repeated && !fd.IsMap() } +func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } +func (fd *Field) MapKey() pref.FieldDescriptor { + if !fd.IsMap() { + return nil + } + return fd.Message().Fields().ByNumber(genid.MapEntry_Key_field_number) +} +func (fd *Field) MapValue() pref.FieldDescriptor { + if !fd.IsMap() { + return nil + } + return fd.Message().Fields().ByNumber(genid.MapEntry_Value_field_number) +} +func (fd *Field) HasDefault() bool { return fd.L1.Default.has } +func (fd *Field) Default() pref.Value { return fd.L1.Default.get(fd) } +func (fd *Field) DefaultEnumValue() pref.EnumValueDescriptor { return fd.L1.Default.enum } +func (fd *Field) ContainingOneof() pref.OneofDescriptor { return fd.L1.ContainingOneof } +func (fd *Field) ContainingMessage() pref.MessageDescriptor { + return fd.L0.Parent.(pref.MessageDescriptor) +} +func (fd *Field) Enum() pref.EnumDescriptor { + return fd.L1.Enum +} +func (fd *Field) Message() pref.MessageDescriptor { + if fd.L1.IsWeak { + if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil { + return d.(pref.MessageDescriptor) + } + } + return fd.L1.Message +} +func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } +func (fd *Field) ProtoType(pref.FieldDescriptor) {} + +// EnforceUTF8 is a pseudo-internal API to determine whether to enforce UTF-8 +// validation for the string field. This exists for Google-internal use only +// since proto3 did not enforce UTF-8 validity prior to the open-source release. +// If this method does not exist, the default is to enforce valid UTF-8. +// +// WARNING: This method is exempt from the compatibility promise and may be +// removed in the future without warning. +func (fd *Field) EnforceUTF8() bool { + if fd.L1.HasEnforceUTF8 { + return fd.L1.EnforceUTF8 + } + return fd.L0.ParentFile.L1.Syntax == pref.Proto3 +} + +func (od *Oneof) IsSynthetic() bool { + return od.L0.ParentFile.L1.Syntax == pref.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword() +} +func (od *Oneof) Options() pref.ProtoMessage { + if f := od.L1.Options; f != nil { + return f() + } + return descopts.Oneof +} +func (od *Oneof) Fields() pref.FieldDescriptors { return &od.L1.Fields } +func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) } +func (od *Oneof) ProtoType(pref.OneofDescriptor) {} + +type ( + Extension struct { + Base + L1 ExtensionL1 + L2 *ExtensionL2 // protected by fileDesc.once + } + ExtensionL1 struct { + Number pref.FieldNumber + Extendee pref.MessageDescriptor + Cardinality pref.Cardinality + Kind pref.Kind + } + ExtensionL2 struct { + Options func() pref.ProtoMessage + StringName stringName + IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto + IsPacked bool // promoted from google.protobuf.FieldOptions + Default defaultValue + Enum pref.EnumDescriptor + Message pref.MessageDescriptor + } +) + +func (xd *Extension) Options() pref.ProtoMessage { + if f := xd.lazyInit().Options; f != nil { + return f() + } + return descopts.Field +} +func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number } +func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality } +func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind } +func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON } +func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) } +func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) } +func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != pref.Repeated } +func (xd *Extension) HasOptionalKeyword() bool { + return (xd.L0.ParentFile.L1.Syntax == pref.Proto2 && xd.L1.Cardinality == pref.Optional) || xd.lazyInit().IsProto3Optional +} +func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } +func (xd *Extension) IsExtension() bool { return true } +func (xd *Extension) IsWeak() bool { return false } +func (xd *Extension) IsList() bool { return xd.Cardinality() == pref.Repeated } +func (xd *Extension) IsMap() bool { return false } +func (xd *Extension) MapKey() pref.FieldDescriptor { return nil } +func (xd *Extension) MapValue() pref.FieldDescriptor { return nil } +func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has } +func (xd *Extension) Default() pref.Value { return xd.lazyInit().Default.get(xd) } +func (xd *Extension) DefaultEnumValue() pref.EnumValueDescriptor { return xd.lazyInit().Default.enum } +func (xd *Extension) ContainingOneof() pref.OneofDescriptor { return nil } +func (xd *Extension) ContainingMessage() pref.MessageDescriptor { return xd.L1.Extendee } +func (xd *Extension) Enum() pref.EnumDescriptor { return xd.lazyInit().Enum } +func (xd *Extension) Message() pref.MessageDescriptor { return xd.lazyInit().Message } +func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) } +func (xd *Extension) ProtoType(pref.FieldDescriptor) {} +func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {} +func (xd *Extension) lazyInit() *ExtensionL2 { + xd.L0.ParentFile.lazyInit() // implicitly initializes L2 + return xd.L2 +} + +type ( + Service struct { + Base + L1 ServiceL1 + L2 *ServiceL2 // protected by fileDesc.once + } + ServiceL1 struct{} + ServiceL2 struct { + Options func() pref.ProtoMessage + Methods Methods + } + + Method struct { + Base + L1 MethodL1 + } + MethodL1 struct { + Options func() pref.ProtoMessage + Input pref.MessageDescriptor + Output pref.MessageDescriptor + IsStreamingClient bool + IsStreamingServer bool + } +) + +func (sd *Service) Options() pref.ProtoMessage { + if f := sd.lazyInit().Options; f != nil { + return f() + } + return descopts.Service +} +func (sd *Service) Methods() pref.MethodDescriptors { return &sd.lazyInit().Methods } +func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) } +func (sd *Service) ProtoType(pref.ServiceDescriptor) {} +func (sd *Service) ProtoInternal(pragma.DoNotImplement) {} +func (sd *Service) lazyInit() *ServiceL2 { + sd.L0.ParentFile.lazyInit() // implicitly initializes L2 + return sd.L2 +} + +func (md *Method) Options() pref.ProtoMessage { + if f := md.L1.Options; f != nil { + return f() + } + return descopts.Method +} +func (md *Method) Input() pref.MessageDescriptor { return md.L1.Input } +func (md *Method) Output() pref.MessageDescriptor { return md.L1.Output } +func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient } +func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer } +func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } +func (md *Method) ProtoType(pref.MethodDescriptor) {} +func (md *Method) ProtoInternal(pragma.DoNotImplement) {} + +// Surrogate files are can be used to create standalone descriptors +// where the syntax is only information derived from the parent file. +var ( + SurrogateProto2 = &File{L1: FileL1{Syntax: pref.Proto2}, L2: &FileL2{}} + SurrogateProto3 = &File{L1: FileL1{Syntax: pref.Proto3}, L2: &FileL2{}} +) + +type ( + Base struct { + L0 BaseL0 + } + BaseL0 struct { + FullName pref.FullName // must be populated + ParentFile *File // must be populated + Parent pref.Descriptor + Index int + } +) + +func (d *Base) Name() pref.Name { return d.L0.FullName.Name() } +func (d *Base) FullName() pref.FullName { return d.L0.FullName } +func (d *Base) ParentFile() pref.FileDescriptor { + if d.L0.ParentFile == SurrogateProto2 || d.L0.ParentFile == SurrogateProto3 { + return nil // surrogate files are not real parents + } + return d.L0.ParentFile +} +func (d *Base) Parent() pref.Descriptor { return d.L0.Parent } +func (d *Base) Index() int { return d.L0.Index } +func (d *Base) Syntax() pref.Syntax { return d.L0.ParentFile.Syntax() } +func (d *Base) IsPlaceholder() bool { return false } +func (d *Base) ProtoInternal(pragma.DoNotImplement) {} + +type stringName struct { + hasJSON bool + once sync.Once + nameJSON string + nameText string +} + +// InitJSON initializes the name. It is exported for use by other internal packages. +func (s *stringName) InitJSON(name string) { + s.hasJSON = true + s.nameJSON = name +} + +func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { + s.once.Do(func() { + if fd.IsExtension() { + // For extensions, JSON and text are formatted the same way. + var name string + if messageset.IsMessageSetExtension(fd) { + name = string("[" + fd.FullName().Parent() + "]") + } else { + name = string("[" + fd.FullName() + "]") + } + s.nameJSON = name + s.nameText = name + } else { + // Format the JSON name. + if !s.hasJSON { + s.nameJSON = strs.JSONCamelCase(string(fd.Name())) + } + + // Format the text name. + s.nameText = string(fd.Name()) + if fd.Kind() == pref.GroupKind { + s.nameText = string(fd.Message().Name()) + } + } + }) + return s +} + +func (s *stringName) getJSON(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameJSON } +func (s *stringName) getText(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameText } + +func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue { + dv := defaultValue{has: v.IsValid(), val: v, enum: ev} + if b, ok := v.Interface().([]byte); ok { + // Store a copy of the default bytes, so that we can detect + // accidental mutations of the original value. + dv.bytes = append([]byte(nil), b...) + } + return dv +} + +func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) defaultValue { + var evs pref.EnumValueDescriptors + if k == pref.EnumKind { + // If the enum is declared within the same file, be careful not to + // blindly call the Values method, lest we bind ourselves in a deadlock. + if e, ok := ed.(*Enum); ok && e.L0.ParentFile == pf { + evs = &e.L2.Values + } else { + evs = ed.Values() + } + + // If we are unable to resolve the enum dependency, use a placeholder + // enum value since we will not be able to parse the default value. + if ed.IsPlaceholder() && pref.Name(b).IsValid() { + v := pref.ValueOfEnum(0) + ev := PlaceholderEnumValue(ed.FullName().Parent().Append(pref.Name(b))) + return DefaultValue(v, ev) + } + } + + v, ev, err := defval.Unmarshal(string(b), k, evs, defval.Descriptor) + if err != nil { + panic(err) + } + return DefaultValue(v, ev) +} + +type defaultValue struct { + has bool + val pref.Value + enum pref.EnumValueDescriptor + bytes []byte +} + +func (dv *defaultValue) get(fd pref.FieldDescriptor) pref.Value { + // Return the zero value as the default if unpopulated. + if !dv.has { + if fd.Cardinality() == pref.Repeated { + return pref.Value{} + } + switch fd.Kind() { + case pref.BoolKind: + return pref.ValueOfBool(false) + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + return pref.ValueOfInt32(0) + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + return pref.ValueOfInt64(0) + case pref.Uint32Kind, pref.Fixed32Kind: + return pref.ValueOfUint32(0) + case pref.Uint64Kind, pref.Fixed64Kind: + return pref.ValueOfUint64(0) + case pref.FloatKind: + return pref.ValueOfFloat32(0) + case pref.DoubleKind: + return pref.ValueOfFloat64(0) + case pref.StringKind: + return pref.ValueOfString("") + case pref.BytesKind: + return pref.ValueOfBytes(nil) + case pref.EnumKind: + if evs := fd.Enum().Values(); evs.Len() > 0 { + return pref.ValueOfEnum(evs.Get(0).Number()) + } + return pref.ValueOfEnum(0) + } + } + + if len(dv.bytes) > 0 && !bytes.Equal(dv.bytes, dv.val.Bytes()) { + // TODO: Avoid panic if we're running with the race detector + // and instead spawn a goroutine that periodically resets + // this value back to the original to induce a race. + panic(fmt.Sprintf("detected mutation on the default bytes for %v", fd.FullName())) + } + return dv.val +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go new file mode 100644 index 000000000..66e1fee52 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -0,0 +1,471 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "sync" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// fileRaw is a data struct used when initializing a file descriptor from +// a raw FileDescriptorProto. +type fileRaw struct { + builder Builder + allEnums []Enum + allMessages []Message + allExtensions []Extension + allServices []Service +} + +func newRawFile(db Builder) *File { + fd := &File{fileRaw: fileRaw{builder: db}} + fd.initDecls(db.NumEnums, db.NumMessages, db.NumExtensions, db.NumServices) + fd.unmarshalSeed(db.RawDescriptor) + + // Extended message targets are eagerly resolved since registration + // needs this information at program init time. + for i := range fd.allExtensions { + xd := &fd.allExtensions[i] + xd.L1.Extendee = fd.resolveMessageDependency(xd.L1.Extendee, listExtTargets, int32(i)) + } + + fd.checkDecls() + return fd +} + +// initDecls pre-allocates slices for the exact number of enums, messages +// (including map entries), extensions, and services declared in the proto file. +// This is done to avoid regrowing the slice, which would change the address +// for any previously seen declaration. +// +// The alloc methods "allocates" slices by pulling from the capacity. +func (fd *File) initDecls(numEnums, numMessages, numExtensions, numServices int32) { + fd.allEnums = make([]Enum, 0, numEnums) + fd.allMessages = make([]Message, 0, numMessages) + fd.allExtensions = make([]Extension, 0, numExtensions) + fd.allServices = make([]Service, 0, numServices) +} + +func (fd *File) allocEnums(n int) []Enum { + total := len(fd.allEnums) + es := fd.allEnums[total : total+n] + fd.allEnums = fd.allEnums[:total+n] + return es +} +func (fd *File) allocMessages(n int) []Message { + total := len(fd.allMessages) + ms := fd.allMessages[total : total+n] + fd.allMessages = fd.allMessages[:total+n] + return ms +} +func (fd *File) allocExtensions(n int) []Extension { + total := len(fd.allExtensions) + xs := fd.allExtensions[total : total+n] + fd.allExtensions = fd.allExtensions[:total+n] + return xs +} +func (fd *File) allocServices(n int) []Service { + total := len(fd.allServices) + xs := fd.allServices[total : total+n] + fd.allServices = fd.allServices[:total+n] + return xs +} + +// checkDecls performs a sanity check that the expected number of expected +// declarations matches the number that were found in the descriptor proto. +func (fd *File) checkDecls() { + switch { + case len(fd.allEnums) != cap(fd.allEnums): + case len(fd.allMessages) != cap(fd.allMessages): + case len(fd.allExtensions) != cap(fd.allExtensions): + case len(fd.allServices) != cap(fd.allServices): + default: + return + } + panic("mismatching cardinality") +} + +func (fd *File) unmarshalSeed(b []byte) { + sb := getBuilder() + defer putBuilder(sb) + + var prevField pref.FieldNumber + var numEnums, numMessages, numExtensions, numServices int + var posEnums, posMessages, posExtensions, posServices int + b0 := b + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FileDescriptorProto_Syntax_field_number: + switch string(v) { + case "proto2": + fd.L1.Syntax = pref.Proto2 + case "proto3": + fd.L1.Syntax = pref.Proto3 + default: + panic("invalid syntax") + } + case genid.FileDescriptorProto_Name_field_number: + fd.L1.Path = sb.MakeString(v) + case genid.FileDescriptorProto_Package_field_number: + fd.L1.Package = pref.FullName(sb.MakeString(v)) + case genid.FileDescriptorProto_EnumType_field_number: + if prevField != genid.FileDescriptorProto_EnumType_field_number { + if numEnums > 0 { + panic("non-contiguous repeated field") + } + posEnums = len(b0) - len(b) - n - m + } + numEnums++ + case genid.FileDescriptorProto_MessageType_field_number: + if prevField != genid.FileDescriptorProto_MessageType_field_number { + if numMessages > 0 { + panic("non-contiguous repeated field") + } + posMessages = len(b0) - len(b) - n - m + } + numMessages++ + case genid.FileDescriptorProto_Extension_field_number: + if prevField != genid.FileDescriptorProto_Extension_field_number { + if numExtensions > 0 { + panic("non-contiguous repeated field") + } + posExtensions = len(b0) - len(b) - n - m + } + numExtensions++ + case genid.FileDescriptorProto_Service_field_number: + if prevField != genid.FileDescriptorProto_Service_field_number { + if numServices > 0 { + panic("non-contiguous repeated field") + } + posServices = len(b0) - len(b) - n - m + } + numServices++ + } + prevField = num + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + prevField = -1 // ignore known field numbers of unknown wire type + } + } + + // If syntax is missing, it is assumed to be proto2. + if fd.L1.Syntax == 0 { + fd.L1.Syntax = pref.Proto2 + } + + // Must allocate all declarations before parsing each descriptor type + // to ensure we handled all descriptors in "flattened ordering". + if numEnums > 0 { + fd.L1.Enums.List = fd.allocEnums(numEnums) + } + if numMessages > 0 { + fd.L1.Messages.List = fd.allocMessages(numMessages) + } + if numExtensions > 0 { + fd.L1.Extensions.List = fd.allocExtensions(numExtensions) + } + if numServices > 0 { + fd.L1.Services.List = fd.allocServices(numServices) + } + + if numEnums > 0 { + b := b0[posEnums:] + for i := range fd.L1.Enums.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + fd.L1.Enums.List[i].unmarshalSeed(v, sb, fd, fd, i) + b = b[n+m:] + } + } + if numMessages > 0 { + b := b0[posMessages:] + for i := range fd.L1.Messages.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + fd.L1.Messages.List[i].unmarshalSeed(v, sb, fd, fd, i) + b = b[n+m:] + } + } + if numExtensions > 0 { + b := b0[posExtensions:] + for i := range fd.L1.Extensions.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + fd.L1.Extensions.List[i].unmarshalSeed(v, sb, fd, fd, i) + b = b[n+m:] + } + } + if numServices > 0 { + b := b0[posServices:] + for i := range fd.L1.Services.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + fd.L1.Services.List[i].unmarshalSeed(v, sb, fd, fd, i) + b = b[n+m:] + } + } +} + +func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + ed.L0.ParentFile = pf + ed.L0.Parent = pd + ed.L0.Index = i + + var numValues int + for b := b; len(b) > 0; { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.EnumDescriptorProto_Name_field_number: + ed.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.EnumDescriptorProto_Value_field_number: + numValues++ + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + + // Only construct enum value descriptors for top-level enums since + // they are needed for registration. + if pd != pf { + return + } + ed.L1.eagerValues = true + ed.L2 = new(EnumL2) + ed.L2.Values.List = make([]EnumValue, numValues) + for i := 0; len(b) > 0; { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.EnumDescriptorProto_Value_field_number: + ed.L2.Values.List[i].unmarshalFull(v, sb, pf, ed, i) + i++ + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + md.L0.ParentFile = pf + md.L0.Parent = pd + md.L0.Index = i + + var prevField pref.FieldNumber + var numEnums, numMessages, numExtensions int + var posEnums, posMessages, posExtensions int + b0 := b + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.DescriptorProto_Name_field_number: + md.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.DescriptorProto_EnumType_field_number: + if prevField != genid.DescriptorProto_EnumType_field_number { + if numEnums > 0 { + panic("non-contiguous repeated field") + } + posEnums = len(b0) - len(b) - n - m + } + numEnums++ + case genid.DescriptorProto_NestedType_field_number: + if prevField != genid.DescriptorProto_NestedType_field_number { + if numMessages > 0 { + panic("non-contiguous repeated field") + } + posMessages = len(b0) - len(b) - n - m + } + numMessages++ + case genid.DescriptorProto_Extension_field_number: + if prevField != genid.DescriptorProto_Extension_field_number { + if numExtensions > 0 { + panic("non-contiguous repeated field") + } + posExtensions = len(b0) - len(b) - n - m + } + numExtensions++ + case genid.DescriptorProto_Options_field_number: + md.unmarshalSeedOptions(v) + } + prevField = num + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + prevField = -1 // ignore known field numbers of unknown wire type + } + } + + // Must allocate all declarations before parsing each descriptor type + // to ensure we handled all descriptors in "flattened ordering". + if numEnums > 0 { + md.L1.Enums.List = pf.allocEnums(numEnums) + } + if numMessages > 0 { + md.L1.Messages.List = pf.allocMessages(numMessages) + } + if numExtensions > 0 { + md.L1.Extensions.List = pf.allocExtensions(numExtensions) + } + + if numEnums > 0 { + b := b0[posEnums:] + for i := range md.L1.Enums.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + md.L1.Enums.List[i].unmarshalSeed(v, sb, pf, md, i) + b = b[n+m:] + } + } + if numMessages > 0 { + b := b0[posMessages:] + for i := range md.L1.Messages.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + md.L1.Messages.List[i].unmarshalSeed(v, sb, pf, md, i) + b = b[n+m:] + } + } + if numExtensions > 0 { + b := b0[posExtensions:] + for i := range md.L1.Extensions.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + md.L1.Extensions.List[i].unmarshalSeed(v, sb, pf, md, i) + b = b[n+m:] + } + } +} + +func (md *Message) unmarshalSeedOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.MessageOptions_MapEntry_field_number: + md.L1.IsMapEntry = protowire.DecodeBool(v) + case genid.MessageOptions_MessageSetWireFormat_field_number: + md.L1.IsMessageSet = protowire.DecodeBool(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + xd.L0.ParentFile = pf + xd.L0.Parent = pd + xd.L0.Index = i + + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Number_field_number: + xd.L1.Number = pref.FieldNumber(v) + case genid.FieldDescriptorProto_Label_field_number: + xd.L1.Cardinality = pref.Cardinality(v) + case genid.FieldDescriptorProto_Type_field_number: + xd.L1.Kind = pref.Kind(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Name_field_number: + xd.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.FieldDescriptorProto_Extendee_field_number: + xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v)) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + sd.L0.ParentFile = pf + sd.L0.Parent = pd + sd.L0.Index = i + + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.ServiceDescriptorProto_Name_field_number: + sd.L0.FullName = appendFullName(sb, pd.FullName(), v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +var nameBuilderPool = sync.Pool{ + New: func() interface{} { return new(strs.Builder) }, +} + +func getBuilder() *strs.Builder { + return nameBuilderPool.Get().(*strs.Builder) +} +func putBuilder(b *strs.Builder) { + nameBuilderPool.Put(b) +} + +// makeFullName converts b to a protoreflect.FullName, +// where b must start with a leading dot. +func makeFullName(sb *strs.Builder, b []byte) pref.FullName { + if len(b) == 0 || b[0] != '.' { + panic("name reference must be fully qualified") + } + return pref.FullName(sb.MakeString(b[1:])) +} + +func appendFullName(sb *strs.Builder, prefix pref.FullName, suffix []byte) pref.FullName { + return sb.AppendFullName(prefix, pref.Name(strs.UnsafeString(suffix))) +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go new file mode 100644 index 000000000..198451e3e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -0,0 +1,704 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "reflect" + "sync" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +func (fd *File) lazyRawInit() { + fd.unmarshalFull(fd.builder.RawDescriptor) + fd.resolveMessages() + fd.resolveExtensions() + fd.resolveServices() +} + +func (file *File) resolveMessages() { + var depIdx int32 + for i := range file.allMessages { + md := &file.allMessages[i] + + // Resolve message field dependencies. + for j := range md.L2.Fields.List { + fd := &md.L2.Fields.List[j] + + // Weak fields are resolved upon actual use. + if fd.L1.IsWeak { + continue + } + + // Resolve message field dependency. + switch fd.L1.Kind { + case pref.EnumKind: + fd.L1.Enum = file.resolveEnumDependency(fd.L1.Enum, listFieldDeps, depIdx) + depIdx++ + case pref.MessageKind, pref.GroupKind: + fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) + depIdx++ + } + + // Default is resolved here since it depends on Enum being resolved. + if v := fd.L1.Default.val; v.IsValid() { + fd.L1.Default = unmarshalDefault(v.Bytes(), fd.L1.Kind, file, fd.L1.Enum) + } + } + } +} + +func (file *File) resolveExtensions() { + var depIdx int32 + for i := range file.allExtensions { + xd := &file.allExtensions[i] + + // Resolve extension field dependency. + switch xd.L1.Kind { + case pref.EnumKind: + xd.L2.Enum = file.resolveEnumDependency(xd.L2.Enum, listExtDeps, depIdx) + depIdx++ + case pref.MessageKind, pref.GroupKind: + xd.L2.Message = file.resolveMessageDependency(xd.L2.Message, listExtDeps, depIdx) + depIdx++ + } + + // Default is resolved here since it depends on Enum being resolved. + if v := xd.L2.Default.val; v.IsValid() { + xd.L2.Default = unmarshalDefault(v.Bytes(), xd.L1.Kind, file, xd.L2.Enum) + } + } +} + +func (file *File) resolveServices() { + var depIdx int32 + for i := range file.allServices { + sd := &file.allServices[i] + + // Resolve method dependencies. + for j := range sd.L2.Methods.List { + md := &sd.L2.Methods.List[j] + md.L1.Input = file.resolveMessageDependency(md.L1.Input, listMethInDeps, depIdx) + md.L1.Output = file.resolveMessageDependency(md.L1.Output, listMethOutDeps, depIdx) + depIdx++ + } + } +} + +func (file *File) resolveEnumDependency(ed pref.EnumDescriptor, i, j int32) pref.EnumDescriptor { + r := file.builder.FileRegistry + if r, ok := r.(resolverByIndex); ok { + if ed2 := r.FindEnumByIndex(i, j, file.allEnums, file.allMessages); ed2 != nil { + return ed2 + } + } + for i := range file.allEnums { + if ed2 := &file.allEnums[i]; ed2.L0.FullName == ed.FullName() { + return ed2 + } + } + if d, _ := r.FindDescriptorByName(ed.FullName()); d != nil { + return d.(pref.EnumDescriptor) + } + return ed +} + +func (file *File) resolveMessageDependency(md pref.MessageDescriptor, i, j int32) pref.MessageDescriptor { + r := file.builder.FileRegistry + if r, ok := r.(resolverByIndex); ok { + if md2 := r.FindMessageByIndex(i, j, file.allEnums, file.allMessages); md2 != nil { + return md2 + } + } + for i := range file.allMessages { + if md2 := &file.allMessages[i]; md2.L0.FullName == md.FullName() { + return md2 + } + } + if d, _ := r.FindDescriptorByName(md.FullName()); d != nil { + return d.(pref.MessageDescriptor) + } + return md +} + +func (fd *File) unmarshalFull(b []byte) { + sb := getBuilder() + defer putBuilder(sb) + + var enumIdx, messageIdx, extensionIdx, serviceIdx int + var rawOptions []byte + fd.L2 = new(FileL2) + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FileDescriptorProto_PublicDependency_field_number: + fd.L2.Imports[v].IsPublic = true + case genid.FileDescriptorProto_WeakDependency_field_number: + fd.L2.Imports[v].IsWeak = true + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FileDescriptorProto_Dependency_field_number: + path := sb.MakeString(v) + imp, _ := fd.builder.FileRegistry.FindFileByPath(path) + if imp == nil { + imp = PlaceholderFile(path) + } + fd.L2.Imports = append(fd.L2.Imports, pref.FileImport{FileDescriptor: imp}) + case genid.FileDescriptorProto_EnumType_field_number: + fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb) + enumIdx++ + case genid.FileDescriptorProto_MessageType_field_number: + fd.L1.Messages.List[messageIdx].unmarshalFull(v, sb) + messageIdx++ + case genid.FileDescriptorProto_Extension_field_number: + fd.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb) + extensionIdx++ + case genid.FileDescriptorProto_Service_field_number: + fd.L1.Services.List[serviceIdx].unmarshalFull(v, sb) + serviceIdx++ + case genid.FileDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + fd.L2.Options = fd.builder.optionsUnmarshaler(&descopts.File, rawOptions) +} + +func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { + var rawValues [][]byte + var rawOptions []byte + if !ed.L1.eagerValues { + ed.L2 = new(EnumL2) + } + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.EnumDescriptorProto_Value_field_number: + rawValues = append(rawValues, v) + case genid.EnumDescriptorProto_ReservedName_field_number: + ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) + case genid.EnumDescriptorProto_ReservedRange_field_number: + ed.L2.ReservedRanges.List = append(ed.L2.ReservedRanges.List, unmarshalEnumReservedRange(v)) + case genid.EnumDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if !ed.L1.eagerValues && len(rawValues) > 0 { + ed.L2.Values.List = make([]EnumValue, len(rawValues)) + for i, b := range rawValues { + ed.L2.Values.List[i].unmarshalFull(b, sb, ed.L0.ParentFile, ed, i) + } + } + ed.L2.Options = ed.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Enum, rawOptions) +} + +func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.EnumDescriptorProto_EnumReservedRange_Start_field_number: + r[0] = pref.EnumNumber(v) + case genid.EnumDescriptorProto_EnumReservedRange_End_field_number: + r[1] = pref.EnumNumber(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + return r +} + +func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + vd.L0.ParentFile = pf + vd.L0.Parent = pd + vd.L0.Index = i + + var rawOptions []byte + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.EnumValueDescriptorProto_Number_field_number: + vd.L1.Number = pref.EnumNumber(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.EnumValueDescriptorProto_Name_field_number: + // NOTE: Enum values are in the same scope as the enum parent. + vd.L0.FullName = appendFullName(sb, pd.Parent().FullName(), v) + case genid.EnumValueDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + vd.L1.Options = pf.builder.optionsUnmarshaler(&descopts.EnumValue, rawOptions) +} + +func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { + var rawFields, rawOneofs [][]byte + var enumIdx, messageIdx, extensionIdx int + var rawOptions []byte + md.L2 = new(MessageL2) + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.DescriptorProto_Field_field_number: + rawFields = append(rawFields, v) + case genid.DescriptorProto_OneofDecl_field_number: + rawOneofs = append(rawOneofs, v) + case genid.DescriptorProto_ReservedName_field_number: + md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) + case genid.DescriptorProto_ReservedRange_field_number: + md.L2.ReservedRanges.List = append(md.L2.ReservedRanges.List, unmarshalMessageReservedRange(v)) + case genid.DescriptorProto_ExtensionRange_field_number: + r, rawOptions := unmarshalMessageExtensionRange(v) + opts := md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.ExtensionRange, rawOptions) + md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, r) + md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, opts) + case genid.DescriptorProto_EnumType_field_number: + md.L1.Enums.List[enumIdx].unmarshalFull(v, sb) + enumIdx++ + case genid.DescriptorProto_NestedType_field_number: + md.L1.Messages.List[messageIdx].unmarshalFull(v, sb) + messageIdx++ + case genid.DescriptorProto_Extension_field_number: + md.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb) + extensionIdx++ + case genid.DescriptorProto_Options_field_number: + md.unmarshalOptions(v) + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if len(rawFields) > 0 || len(rawOneofs) > 0 { + md.L2.Fields.List = make([]Field, len(rawFields)) + md.L2.Oneofs.List = make([]Oneof, len(rawOneofs)) + for i, b := range rawFields { + fd := &md.L2.Fields.List[i] + fd.unmarshalFull(b, sb, md.L0.ParentFile, md, i) + if fd.L1.Cardinality == pref.Required { + md.L2.RequiredNumbers.List = append(md.L2.RequiredNumbers.List, fd.L1.Number) + } + } + for i, b := range rawOneofs { + od := &md.L2.Oneofs.List[i] + od.unmarshalFull(b, sb, md.L0.ParentFile, md, i) + } + } + md.L2.Options = md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Message, rawOptions) +} + +func (md *Message) unmarshalOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.MessageOptions_MapEntry_field_number: + md.L1.IsMapEntry = protowire.DecodeBool(v) + case genid.MessageOptions_MessageSetWireFormat_field_number: + md.L1.IsMessageSet = protowire.DecodeBool(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.DescriptorProto_ReservedRange_Start_field_number: + r[0] = pref.FieldNumber(v) + case genid.DescriptorProto_ReservedRange_End_field_number: + r[1] = pref.FieldNumber(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + return r +} + +func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.DescriptorProto_ExtensionRange_Start_field_number: + r[0] = pref.FieldNumber(v) + case genid.DescriptorProto_ExtensionRange_End_field_number: + r[1] = pref.FieldNumber(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.DescriptorProto_ExtensionRange_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + return r, rawOptions +} + +func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + fd.L0.ParentFile = pf + fd.L0.Parent = pd + fd.L0.Index = i + + var rawTypeName []byte + var rawOptions []byte + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Number_field_number: + fd.L1.Number = pref.FieldNumber(v) + case genid.FieldDescriptorProto_Label_field_number: + fd.L1.Cardinality = pref.Cardinality(v) + case genid.FieldDescriptorProto_Type_field_number: + fd.L1.Kind = pref.Kind(v) + case genid.FieldDescriptorProto_OneofIndex_field_number: + // In Message.unmarshalFull, we allocate slices for both + // the field and oneof descriptors before unmarshaling either + // of them. This ensures pointers to slice elements are stable. + od := &pd.(*Message).L2.Oneofs.List[v] + od.L1.Fields.List = append(od.L1.Fields.List, fd) + if fd.L1.ContainingOneof != nil { + panic("oneof type already set") + } + fd.L1.ContainingOneof = od + case genid.FieldDescriptorProto_Proto3Optional_field_number: + fd.L1.IsProto3Optional = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Name_field_number: + fd.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.FieldDescriptorProto_JsonName_field_number: + fd.L1.StringName.InitJSON(sb.MakeString(v)) + case genid.FieldDescriptorProto_DefaultValue_field_number: + fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages + case genid.FieldDescriptorProto_TypeName_field_number: + rawTypeName = v + case genid.FieldDescriptorProto_Options_field_number: + fd.unmarshalOptions(v) + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if rawTypeName != nil { + name := makeFullName(sb, rawTypeName) + switch fd.L1.Kind { + case pref.EnumKind: + fd.L1.Enum = PlaceholderEnum(name) + case pref.MessageKind, pref.GroupKind: + fd.L1.Message = PlaceholderMessage(name) + } + } + fd.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Field, rawOptions) +} + +func (fd *Field) unmarshalOptions(b []byte) { + const FieldOptions_EnforceUTF8 = 13 + + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldOptions_Packed_field_number: + fd.L1.HasPacked = true + fd.L1.IsPacked = protowire.DecodeBool(v) + case genid.FieldOptions_Weak_field_number: + fd.L1.IsWeak = protowire.DecodeBool(v) + case FieldOptions_EnforceUTF8: + fd.L1.HasEnforceUTF8 = true + fd.L1.EnforceUTF8 = protowire.DecodeBool(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + od.L0.ParentFile = pf + od.L0.Parent = pd + od.L0.Index = i + + var rawOptions []byte + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.OneofDescriptorProto_Name_field_number: + od.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.OneofDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + od.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Oneof, rawOptions) +} + +func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { + var rawTypeName []byte + var rawOptions []byte + xd.L2 = new(ExtensionL2) + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Proto3Optional_field_number: + xd.L2.IsProto3Optional = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_JsonName_field_number: + xd.L2.StringName.InitJSON(sb.MakeString(v)) + case genid.FieldDescriptorProto_DefaultValue_field_number: + xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions + case genid.FieldDescriptorProto_TypeName_field_number: + rawTypeName = v + case genid.FieldDescriptorProto_Options_field_number: + xd.unmarshalOptions(v) + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if rawTypeName != nil { + name := makeFullName(sb, rawTypeName) + switch xd.L1.Kind { + case pref.EnumKind: + xd.L2.Enum = PlaceholderEnum(name) + case pref.MessageKind, pref.GroupKind: + xd.L2.Message = PlaceholderMessage(name) + } + } + xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions) +} + +func (xd *Extension) unmarshalOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldOptions_Packed_field_number: + xd.L2.IsPacked = protowire.DecodeBool(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { + var rawMethods [][]byte + var rawOptions []byte + sd.L2 = new(ServiceL2) + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.ServiceDescriptorProto_Method_field_number: + rawMethods = append(rawMethods, v) + case genid.ServiceDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if len(rawMethods) > 0 { + sd.L2.Methods.List = make([]Method, len(rawMethods)) + for i, b := range rawMethods { + sd.L2.Methods.List[i].unmarshalFull(b, sb, sd.L0.ParentFile, sd, i) + } + } + sd.L2.Options = sd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Service, rawOptions) +} + +func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + md.L0.ParentFile = pf + md.L0.Parent = pd + md.L0.Index = i + + var rawOptions []byte + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.MethodDescriptorProto_ClientStreaming_field_number: + md.L1.IsStreamingClient = protowire.DecodeBool(v) + case genid.MethodDescriptorProto_ServerStreaming_field_number: + md.L1.IsStreamingServer = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.MethodDescriptorProto_Name_field_number: + md.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.MethodDescriptorProto_InputType_field_number: + md.L1.Input = PlaceholderMessage(makeFullName(sb, v)) + case genid.MethodDescriptorProto_OutputType_field_number: + md.L1.Output = PlaceholderMessage(makeFullName(sb, v)) + case genid.MethodDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + md.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Method, rawOptions) +} + +// appendOptions appends src to dst, where the returned slice is never nil. +// This is necessary to distinguish between empty and unpopulated options. +func appendOptions(dst, src []byte) []byte { + if dst == nil { + dst = []byte{} + } + return append(dst, src...) +} + +// optionsUnmarshaler constructs a lazy unmarshal function for an options message. +// +// The type of message to unmarshal to is passed as a pointer since the +// vars in descopts may not yet be populated at the time this function is called. +func (db *Builder) optionsUnmarshaler(p *pref.ProtoMessage, b []byte) func() pref.ProtoMessage { + if b == nil { + return nil + } + var opts pref.ProtoMessage + var once sync.Once + return func() pref.ProtoMessage { + once.Do(func() { + if *p == nil { + panic("Descriptor.Options called without importing the descriptor package") + } + opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(pref.ProtoMessage) + if err := (proto.UnmarshalOptions{ + AllowPartial: true, + Resolver: db.TypeResolver, + }).Unmarshal(b, opts); err != nil { + panic(err) + } + }) + return opts + } +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go new file mode 100644 index 000000000..aa294fff9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go @@ -0,0 +1,450 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "fmt" + "math" + "sort" + "sync" + + "google.golang.org/protobuf/internal/genid" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/descfmt" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type FileImports []pref.FileImport + +func (p *FileImports) Len() int { return len(*p) } +func (p *FileImports) Get(i int) pref.FileImport { return (*p)[i] } +func (p *FileImports) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *FileImports) ProtoInternal(pragma.DoNotImplement) {} + +type Names struct { + List []pref.Name + once sync.Once + has map[pref.Name]int // protected by once +} + +func (p *Names) Len() int { return len(p.List) } +func (p *Names) Get(i int) pref.Name { return p.List[i] } +func (p *Names) Has(s pref.Name) bool { return p.lazyInit().has[s] > 0 } +func (p *Names) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *Names) ProtoInternal(pragma.DoNotImplement) {} +func (p *Names) lazyInit() *Names { + p.once.Do(func() { + if len(p.List) > 0 { + p.has = make(map[pref.Name]int, len(p.List)) + for _, s := range p.List { + p.has[s] = p.has[s] + 1 + } + } + }) + return p +} + +// CheckValid reports any errors with the set of names with an error message +// that completes the sentence: "ranges is invalid because it has ..." +func (p *Names) CheckValid() error { + for s, n := range p.lazyInit().has { + switch { + case n > 1: + return errors.New("duplicate name: %q", s) + case false && !s.IsValid(): + // NOTE: The C++ implementation does not validate the identifier. + // See https://github.com/protocolbuffers/protobuf/issues/6335. + return errors.New("invalid name: %q", s) + } + } + return nil +} + +type EnumRanges struct { + List [][2]pref.EnumNumber // start inclusive; end inclusive + once sync.Once + sorted [][2]pref.EnumNumber // protected by once +} + +func (p *EnumRanges) Len() int { return len(p.List) } +func (p *EnumRanges) Get(i int) [2]pref.EnumNumber { return p.List[i] } +func (p *EnumRanges) Has(n pref.EnumNumber) bool { + for ls := p.lazyInit().sorted; len(ls) > 0; { + i := len(ls) / 2 + switch r := enumRange(ls[i]); { + case n < r.Start(): + ls = ls[:i] // search lower + case n > r.End(): + ls = ls[i+1:] // search upper + default: + return true + } + } + return false +} +func (p *EnumRanges) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *EnumRanges) ProtoInternal(pragma.DoNotImplement) {} +func (p *EnumRanges) lazyInit() *EnumRanges { + p.once.Do(func() { + p.sorted = append(p.sorted, p.List...) + sort.Slice(p.sorted, func(i, j int) bool { + return p.sorted[i][0] < p.sorted[j][0] + }) + }) + return p +} + +// CheckValid reports any errors with the set of names with an error message +// that completes the sentence: "ranges is invalid because it has ..." +func (p *EnumRanges) CheckValid() error { + var rp enumRange + for i, r := range p.lazyInit().sorted { + r := enumRange(r) + switch { + case !(r.Start() <= r.End()): + return errors.New("invalid range: %v", r) + case !(rp.End() < r.Start()) && i > 0: + return errors.New("overlapping ranges: %v with %v", rp, r) + } + rp = r + } + return nil +} + +type enumRange [2]protoreflect.EnumNumber + +func (r enumRange) Start() protoreflect.EnumNumber { return r[0] } // inclusive +func (r enumRange) End() protoreflect.EnumNumber { return r[1] } // inclusive +func (r enumRange) String() string { + if r.Start() == r.End() { + return fmt.Sprintf("%d", r.Start()) + } + return fmt.Sprintf("%d to %d", r.Start(), r.End()) +} + +type FieldRanges struct { + List [][2]pref.FieldNumber // start inclusive; end exclusive + once sync.Once + sorted [][2]pref.FieldNumber // protected by once +} + +func (p *FieldRanges) Len() int { return len(p.List) } +func (p *FieldRanges) Get(i int) [2]pref.FieldNumber { return p.List[i] } +func (p *FieldRanges) Has(n pref.FieldNumber) bool { + for ls := p.lazyInit().sorted; len(ls) > 0; { + i := len(ls) / 2 + switch r := fieldRange(ls[i]); { + case n < r.Start(): + ls = ls[:i] // search lower + case n > r.End(): + ls = ls[i+1:] // search upper + default: + return true + } + } + return false +} +func (p *FieldRanges) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *FieldRanges) ProtoInternal(pragma.DoNotImplement) {} +func (p *FieldRanges) lazyInit() *FieldRanges { + p.once.Do(func() { + p.sorted = append(p.sorted, p.List...) + sort.Slice(p.sorted, func(i, j int) bool { + return p.sorted[i][0] < p.sorted[j][0] + }) + }) + return p +} + +// CheckValid reports any errors with the set of ranges with an error message +// that completes the sentence: "ranges is invalid because it has ..." +func (p *FieldRanges) CheckValid(isMessageSet bool) error { + var rp fieldRange + for i, r := range p.lazyInit().sorted { + r := fieldRange(r) + switch { + case !isValidFieldNumber(r.Start(), isMessageSet): + return errors.New("invalid field number: %d", r.Start()) + case !isValidFieldNumber(r.End(), isMessageSet): + return errors.New("invalid field number: %d", r.End()) + case !(r.Start() <= r.End()): + return errors.New("invalid range: %v", r) + case !(rp.End() < r.Start()) && i > 0: + return errors.New("overlapping ranges: %v with %v", rp, r) + } + rp = r + } + return nil +} + +// isValidFieldNumber reports whether the field number is valid. +// Unlike the FieldNumber.IsValid method, it allows ranges that cover the +// reserved number range. +func isValidFieldNumber(n protoreflect.FieldNumber, isMessageSet bool) bool { + return protowire.MinValidNumber <= n && (n <= protowire.MaxValidNumber || isMessageSet) +} + +// CheckOverlap reports an error if p and q overlap. +func (p *FieldRanges) CheckOverlap(q *FieldRanges) error { + rps := p.lazyInit().sorted + rqs := q.lazyInit().sorted + for pi, qi := 0, 0; pi < len(rps) && qi < len(rqs); { + rp := fieldRange(rps[pi]) + rq := fieldRange(rqs[qi]) + if !(rp.End() < rq.Start() || rq.End() < rp.Start()) { + return errors.New("overlapping ranges: %v with %v", rp, rq) + } + if rp.Start() < rq.Start() { + pi++ + } else { + qi++ + } + } + return nil +} + +type fieldRange [2]protoreflect.FieldNumber + +func (r fieldRange) Start() protoreflect.FieldNumber { return r[0] } // inclusive +func (r fieldRange) End() protoreflect.FieldNumber { return r[1] - 1 } // inclusive +func (r fieldRange) String() string { + if r.Start() == r.End() { + return fmt.Sprintf("%d", r.Start()) + } + return fmt.Sprintf("%d to %d", r.Start(), r.End()) +} + +type FieldNumbers struct { + List []pref.FieldNumber + once sync.Once + has map[pref.FieldNumber]struct{} // protected by once +} + +func (p *FieldNumbers) Len() int { return len(p.List) } +func (p *FieldNumbers) Get(i int) pref.FieldNumber { return p.List[i] } +func (p *FieldNumbers) Has(n pref.FieldNumber) bool { + p.once.Do(func() { + if len(p.List) > 0 { + p.has = make(map[pref.FieldNumber]struct{}, len(p.List)) + for _, n := range p.List { + p.has[n] = struct{}{} + } + } + }) + _, ok := p.has[n] + return ok +} +func (p *FieldNumbers) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *FieldNumbers) ProtoInternal(pragma.DoNotImplement) {} + +type OneofFields struct { + List []pref.FieldDescriptor + once sync.Once + byName map[pref.Name]pref.FieldDescriptor // protected by once + byJSON map[string]pref.FieldDescriptor // protected by once + byText map[string]pref.FieldDescriptor // protected by once + byNum map[pref.FieldNumber]pref.FieldDescriptor // protected by once +} + +func (p *OneofFields) Len() int { return len(p.List) } +func (p *OneofFields) Get(i int) pref.FieldDescriptor { return p.List[i] } +func (p *OneofFields) ByName(s pref.Name) pref.FieldDescriptor { return p.lazyInit().byName[s] } +func (p *OneofFields) ByJSONName(s string) pref.FieldDescriptor { return p.lazyInit().byJSON[s] } +func (p *OneofFields) ByTextName(s string) pref.FieldDescriptor { return p.lazyInit().byText[s] } +func (p *OneofFields) ByNumber(n pref.FieldNumber) pref.FieldDescriptor { return p.lazyInit().byNum[n] } +func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {} + +func (p *OneofFields) lazyInit() *OneofFields { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[pref.Name]pref.FieldDescriptor, len(p.List)) + p.byJSON = make(map[string]pref.FieldDescriptor, len(p.List)) + p.byText = make(map[string]pref.FieldDescriptor, len(p.List)) + p.byNum = make(map[pref.FieldNumber]pref.FieldDescriptor, len(p.List)) + for _, f := range p.List { + // Field names and numbers are guaranteed to be unique. + p.byName[f.Name()] = f + p.byJSON[f.JSONName()] = f + p.byText[f.TextName()] = f + p.byNum[f.Number()] = f + } + } + }) + return p +} + +type SourceLocations struct { + // List is a list of SourceLocations. + // The SourceLocation.Next field does not need to be populated + // as it will be lazily populated upon first need. + List []pref.SourceLocation + + // File is the parent file descriptor that these locations are relative to. + // If non-nil, ByDescriptor verifies that the provided descriptor + // is a child of this file descriptor. + File pref.FileDescriptor + + once sync.Once + byPath map[pathKey]int +} + +func (p *SourceLocations) Len() int { return len(p.List) } +func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.lazyInit().List[i] } +func (p *SourceLocations) byKey(k pathKey) pref.SourceLocation { + if i, ok := p.lazyInit().byPath[k]; ok { + return p.List[i] + } + return pref.SourceLocation{} +} +func (p *SourceLocations) ByPath(path pref.SourcePath) pref.SourceLocation { + return p.byKey(newPathKey(path)) +} +func (p *SourceLocations) ByDescriptor(desc pref.Descriptor) pref.SourceLocation { + if p.File != nil && desc != nil && p.File != desc.ParentFile() { + return pref.SourceLocation{} // mismatching parent files + } + var pathArr [16]int32 + path := pathArr[:0] + for { + switch desc.(type) { + case pref.FileDescriptor: + // Reverse the path since it was constructed in reverse. + for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 { + path[i], path[j] = path[j], path[i] + } + return p.byKey(newPathKey(path)) + case pref.MessageDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_MessageType_field_number)) + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_NestedType_field_number)) + default: + return pref.SourceLocation{} + } + case pref.FieldDescriptor: + isExtension := desc.(pref.FieldDescriptor).IsExtension() + path = append(path, int32(desc.Index())) + desc = desc.Parent() + if isExtension { + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_Extension_field_number)) + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_Extension_field_number)) + default: + return pref.SourceLocation{} + } + } else { + switch desc.(type) { + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_Field_field_number)) + default: + return pref.SourceLocation{} + } + } + case pref.OneofDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_OneofDecl_field_number)) + default: + return pref.SourceLocation{} + } + case pref.EnumDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_EnumType_field_number)) + case pref.MessageDescriptor: + path = append(path, int32(genid.DescriptorProto_EnumType_field_number)) + default: + return pref.SourceLocation{} + } + case pref.EnumValueDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.EnumDescriptor: + path = append(path, int32(genid.EnumDescriptorProto_Value_field_number)) + default: + return pref.SourceLocation{} + } + case pref.ServiceDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.FileDescriptor: + path = append(path, int32(genid.FileDescriptorProto_Service_field_number)) + default: + return pref.SourceLocation{} + } + case pref.MethodDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case pref.ServiceDescriptor: + path = append(path, int32(genid.ServiceDescriptorProto_Method_field_number)) + default: + return pref.SourceLocation{} + } + default: + return pref.SourceLocation{} + } + } +} +func (p *SourceLocations) lazyInit() *SourceLocations { + p.once.Do(func() { + if len(p.List) > 0 { + // Collect all the indexes for a given path. + pathIdxs := make(map[pathKey][]int, len(p.List)) + for i, l := range p.List { + k := newPathKey(l.Path) + pathIdxs[k] = append(pathIdxs[k], i) + } + + // Update the next index for all locations. + p.byPath = make(map[pathKey]int, len(p.List)) + for k, idxs := range pathIdxs { + for i := 0; i < len(idxs)-1; i++ { + p.List[idxs[i]].Next = idxs[i+1] + } + p.List[idxs[len(idxs)-1]].Next = 0 + p.byPath[k] = idxs[0] // record the first location for this path + } + } + }) + return p +} +func (p *SourceLocations) ProtoInternal(pragma.DoNotImplement) {} + +// pathKey is a comparable representation of protoreflect.SourcePath. +type pathKey struct { + arr [16]uint8 // first n-1 path segments; last element is the length + str string // used if the path does not fit in arr +} + +func newPathKey(p pref.SourcePath) (k pathKey) { + if len(p) < len(k.arr) { + for i, ps := range p { + if ps < 0 || math.MaxUint8 <= ps { + return pathKey{str: p.String()} + } + k.arr[i] = uint8(ps) + } + k.arr[len(k.arr)-1] = uint8(len(p)) + return k + } + return pathKey{str: p.String()} +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go new file mode 100644 index 000000000..30db19fdc --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go @@ -0,0 +1,356 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package filedesc + +import ( + "fmt" + "sync" + + "google.golang.org/protobuf/internal/descfmt" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" +) + +type Enums struct { + List []Enum + once sync.Once + byName map[protoreflect.Name]*Enum // protected by once +} + +func (p *Enums) Len() int { + return len(p.List) +} +func (p *Enums) Get(i int) protoreflect.EnumDescriptor { + return &p.List[i] +} +func (p *Enums) ByName(s protoreflect.Name) protoreflect.EnumDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Enums) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Enums) ProtoInternal(pragma.DoNotImplement) {} +func (p *Enums) lazyInit() *Enums { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Enum, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type EnumValues struct { + List []EnumValue + once sync.Once + byName map[protoreflect.Name]*EnumValue // protected by once + byNum map[protoreflect.EnumNumber]*EnumValue // protected by once +} + +func (p *EnumValues) Len() int { + return len(p.List) +} +func (p *EnumValues) Get(i int) protoreflect.EnumValueDescriptor { + return &p.List[i] +} +func (p *EnumValues) ByName(s protoreflect.Name) protoreflect.EnumValueDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *EnumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor { + if d := p.lazyInit().byNum[n]; d != nil { + return d + } + return nil +} +func (p *EnumValues) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *EnumValues) ProtoInternal(pragma.DoNotImplement) {} +func (p *EnumValues) lazyInit() *EnumValues { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*EnumValue, len(p.List)) + p.byNum = make(map[protoreflect.EnumNumber]*EnumValue, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + if _, ok := p.byNum[d.Number()]; !ok { + p.byNum[d.Number()] = d + } + } + } + }) + return p +} + +type Messages struct { + List []Message + once sync.Once + byName map[protoreflect.Name]*Message // protected by once +} + +func (p *Messages) Len() int { + return len(p.List) +} +func (p *Messages) Get(i int) protoreflect.MessageDescriptor { + return &p.List[i] +} +func (p *Messages) ByName(s protoreflect.Name) protoreflect.MessageDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Messages) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Messages) ProtoInternal(pragma.DoNotImplement) {} +func (p *Messages) lazyInit() *Messages { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Message, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type Fields struct { + List []Field + once sync.Once + byName map[protoreflect.Name]*Field // protected by once + byJSON map[string]*Field // protected by once + byText map[string]*Field // protected by once + byNum map[protoreflect.FieldNumber]*Field // protected by once +} + +func (p *Fields) Len() int { + return len(p.List) +} +func (p *Fields) Get(i int) protoreflect.FieldDescriptor { + return &p.List[i] +} +func (p *Fields) ByName(s protoreflect.Name) protoreflect.FieldDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Fields) ByJSONName(s string) protoreflect.FieldDescriptor { + if d := p.lazyInit().byJSON[s]; d != nil { + return d + } + return nil +} +func (p *Fields) ByTextName(s string) protoreflect.FieldDescriptor { + if d := p.lazyInit().byText[s]; d != nil { + return d + } + return nil +} +func (p *Fields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor { + if d := p.lazyInit().byNum[n]; d != nil { + return d + } + return nil +} +func (p *Fields) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Fields) ProtoInternal(pragma.DoNotImplement) {} +func (p *Fields) lazyInit() *Fields { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Field, len(p.List)) + p.byJSON = make(map[string]*Field, len(p.List)) + p.byText = make(map[string]*Field, len(p.List)) + p.byNum = make(map[protoreflect.FieldNumber]*Field, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + if _, ok := p.byJSON[d.JSONName()]; !ok { + p.byJSON[d.JSONName()] = d + } + if _, ok := p.byText[d.TextName()]; !ok { + p.byText[d.TextName()] = d + } + if _, ok := p.byNum[d.Number()]; !ok { + p.byNum[d.Number()] = d + } + } + } + }) + return p +} + +type Oneofs struct { + List []Oneof + once sync.Once + byName map[protoreflect.Name]*Oneof // protected by once +} + +func (p *Oneofs) Len() int { + return len(p.List) +} +func (p *Oneofs) Get(i int) protoreflect.OneofDescriptor { + return &p.List[i] +} +func (p *Oneofs) ByName(s protoreflect.Name) protoreflect.OneofDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Oneofs) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Oneofs) ProtoInternal(pragma.DoNotImplement) {} +func (p *Oneofs) lazyInit() *Oneofs { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Oneof, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type Extensions struct { + List []Extension + once sync.Once + byName map[protoreflect.Name]*Extension // protected by once +} + +func (p *Extensions) Len() int { + return len(p.List) +} +func (p *Extensions) Get(i int) protoreflect.ExtensionDescriptor { + return &p.List[i] +} +func (p *Extensions) ByName(s protoreflect.Name) protoreflect.ExtensionDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Extensions) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Extensions) ProtoInternal(pragma.DoNotImplement) {} +func (p *Extensions) lazyInit() *Extensions { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Extension, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type Services struct { + List []Service + once sync.Once + byName map[protoreflect.Name]*Service // protected by once +} + +func (p *Services) Len() int { + return len(p.List) +} +func (p *Services) Get(i int) protoreflect.ServiceDescriptor { + return &p.List[i] +} +func (p *Services) ByName(s protoreflect.Name) protoreflect.ServiceDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Services) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Services) ProtoInternal(pragma.DoNotImplement) {} +func (p *Services) lazyInit() *Services { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Service, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type Methods struct { + List []Method + once sync.Once + byName map[protoreflect.Name]*Method // protected by once +} + +func (p *Methods) Len() int { + return len(p.List) +} +func (p *Methods) Get(i int) protoreflect.MethodDescriptor { + return &p.List[i] +} +func (p *Methods) ByName(s protoreflect.Name) protoreflect.MethodDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Methods) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Methods) ProtoInternal(pragma.DoNotImplement) {} +func (p *Methods) lazyInit() *Methods { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Method, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go new file mode 100644 index 000000000..dbf2c605b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go @@ -0,0 +1,107 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/pragma" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +var ( + emptyNames = new(Names) + emptyEnumRanges = new(EnumRanges) + emptyFieldRanges = new(FieldRanges) + emptyFieldNumbers = new(FieldNumbers) + emptySourceLocations = new(SourceLocations) + + emptyFiles = new(FileImports) + emptyMessages = new(Messages) + emptyFields = new(Fields) + emptyOneofs = new(Oneofs) + emptyEnums = new(Enums) + emptyEnumValues = new(EnumValues) + emptyExtensions = new(Extensions) + emptyServices = new(Services) +) + +// PlaceholderFile is a placeholder, representing only the file path. +type PlaceholderFile string + +func (f PlaceholderFile) ParentFile() pref.FileDescriptor { return f } +func (f PlaceholderFile) Parent() pref.Descriptor { return nil } +func (f PlaceholderFile) Index() int { return 0 } +func (f PlaceholderFile) Syntax() pref.Syntax { return 0 } +func (f PlaceholderFile) Name() pref.Name { return "" } +func (f PlaceholderFile) FullName() pref.FullName { return "" } +func (f PlaceholderFile) IsPlaceholder() bool { return true } +func (f PlaceholderFile) Options() pref.ProtoMessage { return descopts.File } +func (f PlaceholderFile) Path() string { return string(f) } +func (f PlaceholderFile) Package() pref.FullName { return "" } +func (f PlaceholderFile) Imports() pref.FileImports { return emptyFiles } +func (f PlaceholderFile) Messages() pref.MessageDescriptors { return emptyMessages } +func (f PlaceholderFile) Enums() pref.EnumDescriptors { return emptyEnums } +func (f PlaceholderFile) Extensions() pref.ExtensionDescriptors { return emptyExtensions } +func (f PlaceholderFile) Services() pref.ServiceDescriptors { return emptyServices } +func (f PlaceholderFile) SourceLocations() pref.SourceLocations { return emptySourceLocations } +func (f PlaceholderFile) ProtoType(pref.FileDescriptor) { return } +func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return } + +// PlaceholderEnum is a placeholder, representing only the full name. +type PlaceholderEnum pref.FullName + +func (e PlaceholderEnum) ParentFile() pref.FileDescriptor { return nil } +func (e PlaceholderEnum) Parent() pref.Descriptor { return nil } +func (e PlaceholderEnum) Index() int { return 0 } +func (e PlaceholderEnum) Syntax() pref.Syntax { return 0 } +func (e PlaceholderEnum) Name() pref.Name { return pref.FullName(e).Name() } +func (e PlaceholderEnum) FullName() pref.FullName { return pref.FullName(e) } +func (e PlaceholderEnum) IsPlaceholder() bool { return true } +func (e PlaceholderEnum) Options() pref.ProtoMessage { return descopts.Enum } +func (e PlaceholderEnum) Values() pref.EnumValueDescriptors { return emptyEnumValues } +func (e PlaceholderEnum) ReservedNames() pref.Names { return emptyNames } +func (e PlaceholderEnum) ReservedRanges() pref.EnumRanges { return emptyEnumRanges } +func (e PlaceholderEnum) ProtoType(pref.EnumDescriptor) { return } +func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } + +// PlaceholderEnumValue is a placeholder, representing only the full name. +type PlaceholderEnumValue pref.FullName + +func (e PlaceholderEnumValue) ParentFile() pref.FileDescriptor { return nil } +func (e PlaceholderEnumValue) Parent() pref.Descriptor { return nil } +func (e PlaceholderEnumValue) Index() int { return 0 } +func (e PlaceholderEnumValue) Syntax() pref.Syntax { return 0 } +func (e PlaceholderEnumValue) Name() pref.Name { return pref.FullName(e).Name() } +func (e PlaceholderEnumValue) FullName() pref.FullName { return pref.FullName(e) } +func (e PlaceholderEnumValue) IsPlaceholder() bool { return true } +func (e PlaceholderEnumValue) Options() pref.ProtoMessage { return descopts.EnumValue } +func (e PlaceholderEnumValue) Number() pref.EnumNumber { return 0 } +func (e PlaceholderEnumValue) ProtoType(pref.EnumValueDescriptor) { return } +func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return } + +// PlaceholderMessage is a placeholder, representing only the full name. +type PlaceholderMessage pref.FullName + +func (m PlaceholderMessage) ParentFile() pref.FileDescriptor { return nil } +func (m PlaceholderMessage) Parent() pref.Descriptor { return nil } +func (m PlaceholderMessage) Index() int { return 0 } +func (m PlaceholderMessage) Syntax() pref.Syntax { return 0 } +func (m PlaceholderMessage) Name() pref.Name { return pref.FullName(m).Name() } +func (m PlaceholderMessage) FullName() pref.FullName { return pref.FullName(m) } +func (m PlaceholderMessage) IsPlaceholder() bool { return true } +func (m PlaceholderMessage) Options() pref.ProtoMessage { return descopts.Message } +func (m PlaceholderMessage) IsMapEntry() bool { return false } +func (m PlaceholderMessage) Fields() pref.FieldDescriptors { return emptyFields } +func (m PlaceholderMessage) Oneofs() pref.OneofDescriptors { return emptyOneofs } +func (m PlaceholderMessage) ReservedNames() pref.Names { return emptyNames } +func (m PlaceholderMessage) ReservedRanges() pref.FieldRanges { return emptyFieldRanges } +func (m PlaceholderMessage) RequiredNumbers() pref.FieldNumbers { return emptyFieldNumbers } +func (m PlaceholderMessage) ExtensionRanges() pref.FieldRanges { return emptyFieldRanges } +func (m PlaceholderMessage) ExtensionRangeOptions(int) pref.ProtoMessage { panic("index out of range") } +func (m PlaceholderMessage) Messages() pref.MessageDescriptors { return emptyMessages } +func (m PlaceholderMessage) Enums() pref.EnumDescriptors { return emptyEnums } +func (m PlaceholderMessage) Extensions() pref.ExtensionDescriptors { return emptyExtensions } +func (m PlaceholderMessage) ProtoType(pref.MessageDescriptor) { return } +func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go new file mode 100644 index 000000000..0a0dd35de --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -0,0 +1,297 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package filetype provides functionality for wrapping descriptors +// with Go type information. +package filetype + +import ( + "reflect" + + "google.golang.org/protobuf/internal/descopts" + fdesc "google.golang.org/protobuf/internal/filedesc" + pimpl "google.golang.org/protobuf/internal/impl" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" +) + +// Builder constructs type descriptors from a raw file descriptor +// and associated Go types for each enum and message declaration. +// +// +// Flattened Ordering +// +// The protobuf type system represents declarations as a tree. Certain nodes in +// the tree require us to either associate it with a concrete Go type or to +// resolve a dependency, which is information that must be provided separately +// since it cannot be derived from the file descriptor alone. +// +// However, representing a tree as Go literals is difficult to simply do in a +// space and time efficient way. Thus, we store them as a flattened list of +// objects where the serialization order from the tree-based form is important. +// +// The "flattened ordering" is defined as a tree traversal of all enum, message, +// extension, and service declarations using the following algorithm: +// +// def VisitFileDecls(fd): +// for e in fd.Enums: yield e +// for m in fd.Messages: yield m +// for x in fd.Extensions: yield x +// for s in fd.Services: yield s +// for m in fd.Messages: yield from VisitMessageDecls(m) +// +// def VisitMessageDecls(md): +// for e in md.Enums: yield e +// for m in md.Messages: yield m +// for x in md.Extensions: yield x +// for m in md.Messages: yield from VisitMessageDecls(m) +// +// The traversal starts at the root file descriptor and yields each direct +// declaration within each node before traversing into sub-declarations +// that children themselves may have. +type Builder struct { + // File is the underlying file descriptor builder. + File fdesc.Builder + + // GoTypes is a unique set of the Go types for all declarations and + // dependencies. Each type is represented as a zero value of the Go type. + // + // Declarations are Go types generated for enums and messages directly + // declared (not publicly imported) in the proto source file. + // Messages for map entries are accounted for, but represented by nil. + // Enum declarations in "flattened ordering" come first, followed by + // message declarations in "flattened ordering". + // + // Dependencies are Go types for enums or messages referenced by + // message fields (excluding weak fields), for parent extended messages of + // extension fields, for enums or messages referenced by extension fields, + // and for input and output messages referenced by service methods. + // Dependencies must come after declarations, but the ordering of + // dependencies themselves is unspecified. + GoTypes []interface{} + + // DependencyIndexes is an ordered list of indexes into GoTypes for the + // dependencies of messages, extensions, or services. + // + // There are 5 sub-lists in "flattened ordering" concatenated back-to-back: + // 0. Message field dependencies: list of the enum or message type + // referred to by every message field. + // 1. Extension field targets: list of the extended parent message of + // every extension. + // 2. Extension field dependencies: list of the enum or message type + // referred to by every extension field. + // 3. Service method inputs: list of the input message type + // referred to by every service method. + // 4. Service method outputs: list of the output message type + // referred to by every service method. + // + // The offset into DependencyIndexes for the start of each sub-list + // is appended to the end in reverse order. + DependencyIndexes []int32 + + // EnumInfos is a list of enum infos in "flattened ordering". + EnumInfos []pimpl.EnumInfo + + // MessageInfos is a list of message infos in "flattened ordering". + // If provided, the GoType and PBType for each element is populated. + // + // Requirement: len(MessageInfos) == len(Build.Messages) + MessageInfos []pimpl.MessageInfo + + // ExtensionInfos is a list of extension infos in "flattened ordering". + // Each element is initialized and registered with the protoregistry package. + // + // Requirement: len(LegacyExtensions) == len(Build.Extensions) + ExtensionInfos []pimpl.ExtensionInfo + + // TypeRegistry is the registry to register each type descriptor. + // If nil, it uses protoregistry.GlobalTypes. + TypeRegistry interface { + RegisterMessage(pref.MessageType) error + RegisterEnum(pref.EnumType) error + RegisterExtension(pref.ExtensionType) error + } +} + +// Out is the output of the builder. +type Out struct { + File pref.FileDescriptor +} + +func (tb Builder) Build() (out Out) { + // Replace the resolver with one that resolves dependencies by index, + // which is faster and more reliable than relying on the global registry. + if tb.File.FileRegistry == nil { + tb.File.FileRegistry = preg.GlobalFiles + } + tb.File.FileRegistry = &resolverByIndex{ + goTypes: tb.GoTypes, + depIdxs: tb.DependencyIndexes, + fileRegistry: tb.File.FileRegistry, + } + + // Initialize registry if unpopulated. + if tb.TypeRegistry == nil { + tb.TypeRegistry = preg.GlobalTypes + } + + fbOut := tb.File.Build() + out.File = fbOut.File + + // Process enums. + enumGoTypes := tb.GoTypes[:len(fbOut.Enums)] + if len(tb.EnumInfos) != len(fbOut.Enums) { + panic("mismatching enum lengths") + } + if len(fbOut.Enums) > 0 { + for i := range fbOut.Enums { + tb.EnumInfos[i] = pimpl.EnumInfo{ + GoReflectType: reflect.TypeOf(enumGoTypes[i]), + Desc: &fbOut.Enums[i], + } + // Register enum types. + if err := tb.TypeRegistry.RegisterEnum(&tb.EnumInfos[i]); err != nil { + panic(err) + } + } + } + + // Process messages. + messageGoTypes := tb.GoTypes[len(fbOut.Enums):][:len(fbOut.Messages)] + if len(tb.MessageInfos) != len(fbOut.Messages) { + panic("mismatching message lengths") + } + if len(fbOut.Messages) > 0 { + for i := range fbOut.Messages { + if messageGoTypes[i] == nil { + continue // skip map entry + } + + tb.MessageInfos[i].GoReflectType = reflect.TypeOf(messageGoTypes[i]) + tb.MessageInfos[i].Desc = &fbOut.Messages[i] + + // Register message types. + if err := tb.TypeRegistry.RegisterMessage(&tb.MessageInfos[i]); err != nil { + panic(err) + } + } + + // As a special-case for descriptor.proto, + // locally register concrete message type for the options. + if out.File.Path() == "google/protobuf/descriptor.proto" && out.File.Package() == "google.protobuf" { + for i := range fbOut.Messages { + switch fbOut.Messages[i].Name() { + case "FileOptions": + descopts.File = messageGoTypes[i].(pref.ProtoMessage) + case "EnumOptions": + descopts.Enum = messageGoTypes[i].(pref.ProtoMessage) + case "EnumValueOptions": + descopts.EnumValue = messageGoTypes[i].(pref.ProtoMessage) + case "MessageOptions": + descopts.Message = messageGoTypes[i].(pref.ProtoMessage) + case "FieldOptions": + descopts.Field = messageGoTypes[i].(pref.ProtoMessage) + case "OneofOptions": + descopts.Oneof = messageGoTypes[i].(pref.ProtoMessage) + case "ExtensionRangeOptions": + descopts.ExtensionRange = messageGoTypes[i].(pref.ProtoMessage) + case "ServiceOptions": + descopts.Service = messageGoTypes[i].(pref.ProtoMessage) + case "MethodOptions": + descopts.Method = messageGoTypes[i].(pref.ProtoMessage) + } + } + } + } + + // Process extensions. + if len(tb.ExtensionInfos) != len(fbOut.Extensions) { + panic("mismatching extension lengths") + } + var depIdx int32 + for i := range fbOut.Extensions { + // For enum and message kinds, determine the referent Go type so + // that we can construct their constructors. + const listExtDeps = 2 + var goType reflect.Type + switch fbOut.Extensions[i].L1.Kind { + case pref.EnumKind: + j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) + goType = reflect.TypeOf(tb.GoTypes[j]) + depIdx++ + case pref.MessageKind, pref.GroupKind: + j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) + goType = reflect.TypeOf(tb.GoTypes[j]) + depIdx++ + default: + goType = goTypeForPBKind[fbOut.Extensions[i].L1.Kind] + } + if fbOut.Extensions[i].IsList() { + goType = reflect.SliceOf(goType) + } + + pimpl.InitExtensionInfo(&tb.ExtensionInfos[i], &fbOut.Extensions[i], goType) + + // Register extension types. + if err := tb.TypeRegistry.RegisterExtension(&tb.ExtensionInfos[i]); err != nil { + panic(err) + } + } + + return out +} + +var goTypeForPBKind = map[pref.Kind]reflect.Type{ + pref.BoolKind: reflect.TypeOf(bool(false)), + pref.Int32Kind: reflect.TypeOf(int32(0)), + pref.Sint32Kind: reflect.TypeOf(int32(0)), + pref.Sfixed32Kind: reflect.TypeOf(int32(0)), + pref.Int64Kind: reflect.TypeOf(int64(0)), + pref.Sint64Kind: reflect.TypeOf(int64(0)), + pref.Sfixed64Kind: reflect.TypeOf(int64(0)), + pref.Uint32Kind: reflect.TypeOf(uint32(0)), + pref.Fixed32Kind: reflect.TypeOf(uint32(0)), + pref.Uint64Kind: reflect.TypeOf(uint64(0)), + pref.Fixed64Kind: reflect.TypeOf(uint64(0)), + pref.FloatKind: reflect.TypeOf(float32(0)), + pref.DoubleKind: reflect.TypeOf(float64(0)), + pref.StringKind: reflect.TypeOf(string("")), + pref.BytesKind: reflect.TypeOf([]byte(nil)), +} + +type depIdxs []int32 + +// Get retrieves the jth element of the ith sub-list. +func (x depIdxs) Get(i, j int32) int32 { + return x[x[int32(len(x))-i-1]+j] +} + +type ( + resolverByIndex struct { + goTypes []interface{} + depIdxs depIdxs + fileRegistry + } + fileRegistry interface { + FindFileByPath(string) (pref.FileDescriptor, error) + FindDescriptorByName(pref.FullName) (pref.Descriptor, error) + RegisterFile(pref.FileDescriptor) error + } +) + +func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.EnumDescriptor { + if depIdx := int(r.depIdxs.Get(i, j)); int(depIdx) < len(es)+len(ms) { + return &es[depIdx] + } else { + return pimpl.Export{}.EnumDescriptorOf(r.goTypes[depIdx]) + } +} + +func (r *resolverByIndex) FindMessageByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.MessageDescriptor { + if depIdx := int(r.depIdxs.Get(i, j)); depIdx < len(es)+len(ms) { + return &ms[depIdx-len(es)] + } else { + return pimpl.Export{}.MessageDescriptorOf(r.goTypes[depIdx]) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/flags/flags.go b/vendor/google.golang.org/protobuf/internal/flags/flags.go new file mode 100644 index 000000000..58372dd34 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/flags/flags.go @@ -0,0 +1,24 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package flags provides a set of flags controlled by build tags. +package flags + +// ProtoLegacy specifies whether to enable support for legacy functionality +// such as MessageSets, weak fields, and various other obscure behavior +// that is necessary to maintain backwards compatibility with proto1 or +// the pre-release variants of proto2 and proto3. +// +// This is disabled by default unless built with the "protolegacy" tag. +// +// WARNING: The compatibility agreement covers nothing provided by this flag. +// As such, functionality may suddenly be removed or changed at our discretion. +const ProtoLegacy = protoLegacy + +// LazyUnmarshalExtensions specifies whether to lazily unmarshal extensions. +// +// Lazy extension unmarshaling validates the contents of message-valued +// extension fields at unmarshal time, but defers creating the message +// structure until the extension is first accessed. +const LazyUnmarshalExtensions = ProtoLegacy diff --git a/vendor/golang.org/x/xerrors/internal/internal.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go similarity index 57% rename from vendor/golang.org/x/xerrors/internal/internal.go rename to vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go index 89f4eca5d..bda8e8cf3 100644 --- a/vendor/golang.org/x/xerrors/internal/internal.go +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go @@ -2,7 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package internal +//go:build !protolegacy +// +build !protolegacy -// EnableTrace indicates whether stack information should be recorded in errors. -var EnableTrace = true +package flags + +const protoLegacy = false diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go new file mode 100644 index 000000000..6d8d9bd6b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go @@ -0,0 +1,10 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build protolegacy +// +build protolegacy + +package flags + +const protoLegacy = true diff --git a/vendor/google.golang.org/protobuf/internal/genid/any_gen.go b/vendor/google.golang.org/protobuf/internal/genid/any_gen.go new file mode 100644 index 000000000..e6f7d47ab --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/any_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_any_proto = "google/protobuf/any.proto" + +// Names for google.protobuf.Any. +const ( + Any_message_name protoreflect.Name = "Any" + Any_message_fullname protoreflect.FullName = "google.protobuf.Any" +) + +// Field names for google.protobuf.Any. +const ( + Any_TypeUrl_field_name protoreflect.Name = "type_url" + Any_Value_field_name protoreflect.Name = "value" + + Any_TypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Any.type_url" + Any_Value_field_fullname protoreflect.FullName = "google.protobuf.Any.value" +) + +// Field numbers for google.protobuf.Any. +const ( + Any_TypeUrl_field_number protoreflect.FieldNumber = 1 + Any_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go new file mode 100644 index 000000000..df8f91850 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go @@ -0,0 +1,106 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_api_proto = "google/protobuf/api.proto" + +// Names for google.protobuf.Api. +const ( + Api_message_name protoreflect.Name = "Api" + Api_message_fullname protoreflect.FullName = "google.protobuf.Api" +) + +// Field names for google.protobuf.Api. +const ( + Api_Name_field_name protoreflect.Name = "name" + Api_Methods_field_name protoreflect.Name = "methods" + Api_Options_field_name protoreflect.Name = "options" + Api_Version_field_name protoreflect.Name = "version" + Api_SourceContext_field_name protoreflect.Name = "source_context" + Api_Mixins_field_name protoreflect.Name = "mixins" + Api_Syntax_field_name protoreflect.Name = "syntax" + + Api_Name_field_fullname protoreflect.FullName = "google.protobuf.Api.name" + Api_Methods_field_fullname protoreflect.FullName = "google.protobuf.Api.methods" + Api_Options_field_fullname protoreflect.FullName = "google.protobuf.Api.options" + Api_Version_field_fullname protoreflect.FullName = "google.protobuf.Api.version" + Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context" + Api_Mixins_field_fullname protoreflect.FullName = "google.protobuf.Api.mixins" + Api_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Api.syntax" +) + +// Field numbers for google.protobuf.Api. +const ( + Api_Name_field_number protoreflect.FieldNumber = 1 + Api_Methods_field_number protoreflect.FieldNumber = 2 + Api_Options_field_number protoreflect.FieldNumber = 3 + Api_Version_field_number protoreflect.FieldNumber = 4 + Api_SourceContext_field_number protoreflect.FieldNumber = 5 + Api_Mixins_field_number protoreflect.FieldNumber = 6 + Api_Syntax_field_number protoreflect.FieldNumber = 7 +) + +// Names for google.protobuf.Method. +const ( + Method_message_name protoreflect.Name = "Method" + Method_message_fullname protoreflect.FullName = "google.protobuf.Method" +) + +// Field names for google.protobuf.Method. +const ( + Method_Name_field_name protoreflect.Name = "name" + Method_RequestTypeUrl_field_name protoreflect.Name = "request_type_url" + Method_RequestStreaming_field_name protoreflect.Name = "request_streaming" + Method_ResponseTypeUrl_field_name protoreflect.Name = "response_type_url" + Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming" + Method_Options_field_name protoreflect.Name = "options" + Method_Syntax_field_name protoreflect.Name = "syntax" + + Method_Name_field_fullname protoreflect.FullName = "google.protobuf.Method.name" + Method_RequestTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.request_type_url" + Method_RequestStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.request_streaming" + Method_ResponseTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.response_type_url" + Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming" + Method_Options_field_fullname protoreflect.FullName = "google.protobuf.Method.options" + Method_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Method.syntax" +) + +// Field numbers for google.protobuf.Method. +const ( + Method_Name_field_number protoreflect.FieldNumber = 1 + Method_RequestTypeUrl_field_number protoreflect.FieldNumber = 2 + Method_RequestStreaming_field_number protoreflect.FieldNumber = 3 + Method_ResponseTypeUrl_field_number protoreflect.FieldNumber = 4 + Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5 + Method_Options_field_number protoreflect.FieldNumber = 6 + Method_Syntax_field_number protoreflect.FieldNumber = 7 +) + +// Names for google.protobuf.Mixin. +const ( + Mixin_message_name protoreflect.Name = "Mixin" + Mixin_message_fullname protoreflect.FullName = "google.protobuf.Mixin" +) + +// Field names for google.protobuf.Mixin. +const ( + Mixin_Name_field_name protoreflect.Name = "name" + Mixin_Root_field_name protoreflect.Name = "root" + + Mixin_Name_field_fullname protoreflect.FullName = "google.protobuf.Mixin.name" + Mixin_Root_field_fullname protoreflect.FullName = "google.protobuf.Mixin.root" +) + +// Field numbers for google.protobuf.Mixin. +const ( + Mixin_Name_field_number protoreflect.FieldNumber = 1 + Mixin_Root_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go new file mode 100644 index 000000000..e3cdf1c20 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -0,0 +1,829 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto" + +// Names for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" + FileDescriptorSet_message_fullname protoreflect.FullName = "google.protobuf.FileDescriptorSet" +) + +// Field names for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_File_field_name protoreflect.Name = "file" + + FileDescriptorSet_File_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorSet.file" +) + +// Field numbers for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_File_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_message_name protoreflect.Name = "FileDescriptorProto" + FileDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto" +) + +// Field names for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_Name_field_name protoreflect.Name = "name" + FileDescriptorProto_Package_field_name protoreflect.Name = "package" + FileDescriptorProto_Dependency_field_name protoreflect.Name = "dependency" + FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency" + FileDescriptorProto_WeakDependency_field_name protoreflect.Name = "weak_dependency" + FileDescriptorProto_MessageType_field_name protoreflect.Name = "message_type" + FileDescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" + FileDescriptorProto_Service_field_name protoreflect.Name = "service" + FileDescriptorProto_Extension_field_name protoreflect.Name = "extension" + FileDescriptorProto_Options_field_name protoreflect.Name = "options" + FileDescriptorProto_SourceCodeInfo_field_name protoreflect.Name = "source_code_info" + FileDescriptorProto_Syntax_field_name protoreflect.Name = "syntax" + + FileDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.name" + FileDescriptorProto_Package_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.package" + FileDescriptorProto_Dependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency" + FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency" + FileDescriptorProto_WeakDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency" + FileDescriptorProto_MessageType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type" + FileDescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type" + FileDescriptorProto_Service_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.service" + FileDescriptorProto_Extension_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.extension" + FileDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.options" + FileDescriptorProto_SourceCodeInfo_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.source_code_info" + FileDescriptorProto_Syntax_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.syntax" +) + +// Field numbers for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + FileDescriptorProto_Package_field_number protoreflect.FieldNumber = 2 + FileDescriptorProto_Dependency_field_number protoreflect.FieldNumber = 3 + FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10 + FileDescriptorProto_WeakDependency_field_number protoreflect.FieldNumber = 11 + FileDescriptorProto_MessageType_field_number protoreflect.FieldNumber = 4 + FileDescriptorProto_EnumType_field_number protoreflect.FieldNumber = 5 + FileDescriptorProto_Service_field_number protoreflect.FieldNumber = 6 + FileDescriptorProto_Extension_field_number protoreflect.FieldNumber = 7 + FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 + FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 + FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 +) + +// Names for google.protobuf.DescriptorProto. +const ( + DescriptorProto_message_name protoreflect.Name = "DescriptorProto" + DescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto" +) + +// Field names for google.protobuf.DescriptorProto. +const ( + DescriptorProto_Name_field_name protoreflect.Name = "name" + DescriptorProto_Field_field_name protoreflect.Name = "field" + DescriptorProto_Extension_field_name protoreflect.Name = "extension" + DescriptorProto_NestedType_field_name protoreflect.Name = "nested_type" + DescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" + DescriptorProto_ExtensionRange_field_name protoreflect.Name = "extension_range" + DescriptorProto_OneofDecl_field_name protoreflect.Name = "oneof_decl" + DescriptorProto_Options_field_name protoreflect.Name = "options" + DescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" + DescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + + DescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.name" + DescriptorProto_Field_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.field" + DescriptorProto_Extension_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.extension" + DescriptorProto_NestedType_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.nested_type" + DescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.enum_type" + DescriptorProto_ExtensionRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.extension_range" + DescriptorProto_OneofDecl_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.oneof_decl" + DescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.options" + DescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range" + DescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name" +) + +// Field numbers for google.protobuf.DescriptorProto. +const ( + DescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + DescriptorProto_Field_field_number protoreflect.FieldNumber = 2 + DescriptorProto_Extension_field_number protoreflect.FieldNumber = 6 + DescriptorProto_NestedType_field_number protoreflect.FieldNumber = 3 + DescriptorProto_EnumType_field_number protoreflect.FieldNumber = 4 + DescriptorProto_ExtensionRange_field_number protoreflect.FieldNumber = 5 + DescriptorProto_OneofDecl_field_number protoreflect.FieldNumber = 8 + DescriptorProto_Options_field_number protoreflect.FieldNumber = 7 + DescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 9 + DescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 10 +) + +// Names for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_message_name protoreflect.Name = "ExtensionRange" + DescriptorProto_ExtensionRange_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange" +) + +// Field names for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_Start_field_name protoreflect.Name = "start" + DescriptorProto_ExtensionRange_End_field_name protoreflect.Name = "end" + DescriptorProto_ExtensionRange_Options_field_name protoreflect.Name = "options" + + DescriptorProto_ExtensionRange_Start_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.start" + DescriptorProto_ExtensionRange_End_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.end" + DescriptorProto_ExtensionRange_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.options" +) + +// Field numbers for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_Start_field_number protoreflect.FieldNumber = 1 + DescriptorProto_ExtensionRange_End_field_number protoreflect.FieldNumber = 2 + DescriptorProto_ExtensionRange_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_message_name protoreflect.Name = "ReservedRange" + DescriptorProto_ReservedRange_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange" +) + +// Field names for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_Start_field_name protoreflect.Name = "start" + DescriptorProto_ReservedRange_End_field_name protoreflect.Name = "end" + + DescriptorProto_ReservedRange_Start_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange.start" + DescriptorProto_ReservedRange_End_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange.end" +) + +// Field numbers for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_Start_field_number protoreflect.FieldNumber = 1 + DescriptorProto_ReservedRange_End_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_message_name protoreflect.Name = "ExtensionRangeOptions" + ExtensionRangeOptions_message_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions" +) + +// Field names for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_message_name protoreflect.Name = "FieldDescriptorProto" + FieldDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto" +) + +// Field names for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_Name_field_name protoreflect.Name = "name" + FieldDescriptorProto_Number_field_name protoreflect.Name = "number" + FieldDescriptorProto_Label_field_name protoreflect.Name = "label" + FieldDescriptorProto_Type_field_name protoreflect.Name = "type" + FieldDescriptorProto_TypeName_field_name protoreflect.Name = "type_name" + FieldDescriptorProto_Extendee_field_name protoreflect.Name = "extendee" + FieldDescriptorProto_DefaultValue_field_name protoreflect.Name = "default_value" + FieldDescriptorProto_OneofIndex_field_name protoreflect.Name = "oneof_index" + FieldDescriptorProto_JsonName_field_name protoreflect.Name = "json_name" + FieldDescriptorProto_Options_field_name protoreflect.Name = "options" + FieldDescriptorProto_Proto3Optional_field_name protoreflect.Name = "proto3_optional" + + FieldDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.name" + FieldDescriptorProto_Number_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.number" + FieldDescriptorProto_Label_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.label" + FieldDescriptorProto_Type_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.type" + FieldDescriptorProto_TypeName_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.type_name" + FieldDescriptorProto_Extendee_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.extendee" + FieldDescriptorProto_DefaultValue_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.default_value" + FieldDescriptorProto_OneofIndex_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.oneof_index" + FieldDescriptorProto_JsonName_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.json_name" + FieldDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.options" + FieldDescriptorProto_Proto3Optional_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.proto3_optional" +) + +// Field numbers for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + FieldDescriptorProto_Number_field_number protoreflect.FieldNumber = 3 + FieldDescriptorProto_Label_field_number protoreflect.FieldNumber = 4 + FieldDescriptorProto_Type_field_number protoreflect.FieldNumber = 5 + FieldDescriptorProto_TypeName_field_number protoreflect.FieldNumber = 6 + FieldDescriptorProto_Extendee_field_number protoreflect.FieldNumber = 2 + FieldDescriptorProto_DefaultValue_field_number protoreflect.FieldNumber = 7 + FieldDescriptorProto_OneofIndex_field_number protoreflect.FieldNumber = 9 + FieldDescriptorProto_JsonName_field_number protoreflect.FieldNumber = 10 + FieldDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 + FieldDescriptorProto_Proto3Optional_field_number protoreflect.FieldNumber = 17 +) + +// Full and short names for google.protobuf.FieldDescriptorProto.Type. +const ( + FieldDescriptorProto_Type_enum_fullname = "google.protobuf.FieldDescriptorProto.Type" + FieldDescriptorProto_Type_enum_name = "Type" +) + +// Full and short names for google.protobuf.FieldDescriptorProto.Label. +const ( + FieldDescriptorProto_Label_enum_fullname = "google.protobuf.FieldDescriptorProto.Label" + FieldDescriptorProto_Label_enum_name = "Label" +) + +// Names for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_message_name protoreflect.Name = "OneofDescriptorProto" + OneofDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto" +) + +// Field names for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_Name_field_name protoreflect.Name = "name" + OneofDescriptorProto_Options_field_name protoreflect.Name = "options" + + OneofDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto.name" + OneofDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto.options" +) + +// Field numbers for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + OneofDescriptorProto_Options_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_message_name protoreflect.Name = "EnumDescriptorProto" + EnumDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto" +) + +// Field names for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_Name_field_name protoreflect.Name = "name" + EnumDescriptorProto_Value_field_name protoreflect.Name = "value" + EnumDescriptorProto_Options_field_name protoreflect.Name = "options" + EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" + EnumDescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + + EnumDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name" + EnumDescriptorProto_Value_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value" + EnumDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options" + EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range" + EnumDescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name" +) + +// Field numbers for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + EnumDescriptorProto_Value_field_number protoreflect.FieldNumber = 2 + EnumDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 + EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4 + EnumDescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_message_name protoreflect.Name = "EnumReservedRange" + EnumDescriptorProto_EnumReservedRange_message_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange" +) + +// Field names for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_Start_field_name protoreflect.Name = "start" + EnumDescriptorProto_EnumReservedRange_End_field_name protoreflect.Name = "end" + + EnumDescriptorProto_EnumReservedRange_Start_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange.start" + EnumDescriptorProto_EnumReservedRange_End_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange.end" +) + +// Field numbers for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_Start_field_number protoreflect.FieldNumber = 1 + EnumDescriptorProto_EnumReservedRange_End_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_message_name protoreflect.Name = "EnumValueDescriptorProto" + EnumValueDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto" +) + +// Field names for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_Name_field_name protoreflect.Name = "name" + EnumValueDescriptorProto_Number_field_name protoreflect.Name = "number" + EnumValueDescriptorProto_Options_field_name protoreflect.Name = "options" + + EnumValueDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.name" + EnumValueDescriptorProto_Number_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.number" + EnumValueDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.options" +) + +// Field numbers for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + EnumValueDescriptorProto_Number_field_number protoreflect.FieldNumber = 2 + EnumValueDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_message_name protoreflect.Name = "ServiceDescriptorProto" + ServiceDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto" +) + +// Field names for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_Name_field_name protoreflect.Name = "name" + ServiceDescriptorProto_Method_field_name protoreflect.Name = "method" + ServiceDescriptorProto_Options_field_name protoreflect.Name = "options" + + ServiceDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.name" + ServiceDescriptorProto_Method_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.method" + ServiceDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.options" +) + +// Field numbers for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + ServiceDescriptorProto_Method_field_number protoreflect.FieldNumber = 2 + ServiceDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_message_name protoreflect.Name = "MethodDescriptorProto" + MethodDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto" +) + +// Field names for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_Name_field_name protoreflect.Name = "name" + MethodDescriptorProto_InputType_field_name protoreflect.Name = "input_type" + MethodDescriptorProto_OutputType_field_name protoreflect.Name = "output_type" + MethodDescriptorProto_Options_field_name protoreflect.Name = "options" + MethodDescriptorProto_ClientStreaming_field_name protoreflect.Name = "client_streaming" + MethodDescriptorProto_ServerStreaming_field_name protoreflect.Name = "server_streaming" + + MethodDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.name" + MethodDescriptorProto_InputType_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.input_type" + MethodDescriptorProto_OutputType_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.output_type" + MethodDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.options" + MethodDescriptorProto_ClientStreaming_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.client_streaming" + MethodDescriptorProto_ServerStreaming_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.server_streaming" +) + +// Field numbers for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + MethodDescriptorProto_InputType_field_number protoreflect.FieldNumber = 2 + MethodDescriptorProto_OutputType_field_number protoreflect.FieldNumber = 3 + MethodDescriptorProto_Options_field_number protoreflect.FieldNumber = 4 + MethodDescriptorProto_ClientStreaming_field_number protoreflect.FieldNumber = 5 + MethodDescriptorProto_ServerStreaming_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.FileOptions. +const ( + FileOptions_message_name protoreflect.Name = "FileOptions" + FileOptions_message_fullname protoreflect.FullName = "google.protobuf.FileOptions" +) + +// Field names for google.protobuf.FileOptions. +const ( + FileOptions_JavaPackage_field_name protoreflect.Name = "java_package" + FileOptions_JavaOuterClassname_field_name protoreflect.Name = "java_outer_classname" + FileOptions_JavaMultipleFiles_field_name protoreflect.Name = "java_multiple_files" + FileOptions_JavaGenerateEqualsAndHash_field_name protoreflect.Name = "java_generate_equals_and_hash" + FileOptions_JavaStringCheckUtf8_field_name protoreflect.Name = "java_string_check_utf8" + FileOptions_OptimizeFor_field_name protoreflect.Name = "optimize_for" + FileOptions_GoPackage_field_name protoreflect.Name = "go_package" + FileOptions_CcGenericServices_field_name protoreflect.Name = "cc_generic_services" + FileOptions_JavaGenericServices_field_name protoreflect.Name = "java_generic_services" + FileOptions_PyGenericServices_field_name protoreflect.Name = "py_generic_services" + FileOptions_PhpGenericServices_field_name protoreflect.Name = "php_generic_services" + FileOptions_Deprecated_field_name protoreflect.Name = "deprecated" + FileOptions_CcEnableArenas_field_name protoreflect.Name = "cc_enable_arenas" + FileOptions_ObjcClassPrefix_field_name protoreflect.Name = "objc_class_prefix" + FileOptions_CsharpNamespace_field_name protoreflect.Name = "csharp_namespace" + FileOptions_SwiftPrefix_field_name protoreflect.Name = "swift_prefix" + FileOptions_PhpClassPrefix_field_name protoreflect.Name = "php_class_prefix" + FileOptions_PhpNamespace_field_name protoreflect.Name = "php_namespace" + FileOptions_PhpMetadataNamespace_field_name protoreflect.Name = "php_metadata_namespace" + FileOptions_RubyPackage_field_name protoreflect.Name = "ruby_package" + FileOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + FileOptions_JavaPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_package" + FileOptions_JavaOuterClassname_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_outer_classname" + FileOptions_JavaMultipleFiles_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_multiple_files" + FileOptions_JavaGenerateEqualsAndHash_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generate_equals_and_hash" + FileOptions_JavaStringCheckUtf8_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_string_check_utf8" + FileOptions_OptimizeFor_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.optimize_for" + FileOptions_GoPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.go_package" + FileOptions_CcGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_generic_services" + FileOptions_JavaGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generic_services" + FileOptions_PyGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.py_generic_services" + FileOptions_PhpGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_generic_services" + FileOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.deprecated" + FileOptions_CcEnableArenas_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_enable_arenas" + FileOptions_ObjcClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.objc_class_prefix" + FileOptions_CsharpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.csharp_namespace" + FileOptions_SwiftPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.swift_prefix" + FileOptions_PhpClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_class_prefix" + FileOptions_PhpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_namespace" + FileOptions_PhpMetadataNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace" + FileOptions_RubyPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.ruby_package" + FileOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.FileOptions. +const ( + FileOptions_JavaPackage_field_number protoreflect.FieldNumber = 1 + FileOptions_JavaOuterClassname_field_number protoreflect.FieldNumber = 8 + FileOptions_JavaMultipleFiles_field_number protoreflect.FieldNumber = 10 + FileOptions_JavaGenerateEqualsAndHash_field_number protoreflect.FieldNumber = 20 + FileOptions_JavaStringCheckUtf8_field_number protoreflect.FieldNumber = 27 + FileOptions_OptimizeFor_field_number protoreflect.FieldNumber = 9 + FileOptions_GoPackage_field_number protoreflect.FieldNumber = 11 + FileOptions_CcGenericServices_field_number protoreflect.FieldNumber = 16 + FileOptions_JavaGenericServices_field_number protoreflect.FieldNumber = 17 + FileOptions_PyGenericServices_field_number protoreflect.FieldNumber = 18 + FileOptions_PhpGenericServices_field_number protoreflect.FieldNumber = 42 + FileOptions_Deprecated_field_number protoreflect.FieldNumber = 23 + FileOptions_CcEnableArenas_field_number protoreflect.FieldNumber = 31 + FileOptions_ObjcClassPrefix_field_number protoreflect.FieldNumber = 36 + FileOptions_CsharpNamespace_field_number protoreflect.FieldNumber = 37 + FileOptions_SwiftPrefix_field_number protoreflect.FieldNumber = 39 + FileOptions_PhpClassPrefix_field_number protoreflect.FieldNumber = 40 + FileOptions_PhpNamespace_field_number protoreflect.FieldNumber = 41 + FileOptions_PhpMetadataNamespace_field_number protoreflect.FieldNumber = 44 + FileOptions_RubyPackage_field_number protoreflect.FieldNumber = 45 + FileOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.FileOptions.OptimizeMode. +const ( + FileOptions_OptimizeMode_enum_fullname = "google.protobuf.FileOptions.OptimizeMode" + FileOptions_OptimizeMode_enum_name = "OptimizeMode" +) + +// Names for google.protobuf.MessageOptions. +const ( + MessageOptions_message_name protoreflect.Name = "MessageOptions" + MessageOptions_message_fullname protoreflect.FullName = "google.protobuf.MessageOptions" +) + +// Field names for google.protobuf.MessageOptions. +const ( + MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" + MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" + MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" + MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" + MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.MessageOptions. +const ( + MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1 + MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2 + MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 + MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.FieldOptions. +const ( + FieldOptions_message_name protoreflect.Name = "FieldOptions" + FieldOptions_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions" +) + +// Field names for google.protobuf.FieldOptions. +const ( + FieldOptions_Ctype_field_name protoreflect.Name = "ctype" + FieldOptions_Packed_field_name protoreflect.Name = "packed" + FieldOptions_Jstype_field_name protoreflect.Name = "jstype" + FieldOptions_Lazy_field_name protoreflect.Name = "lazy" + FieldOptions_Deprecated_field_name protoreflect.Name = "deprecated" + FieldOptions_Weak_field_name protoreflect.Name = "weak" + FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" + FieldOptions_Packed_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.packed" + FieldOptions_Jstype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.jstype" + FieldOptions_Lazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.lazy" + FieldOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.deprecated" + FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" + FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.FieldOptions. +const ( + FieldOptions_Ctype_field_number protoreflect.FieldNumber = 1 + FieldOptions_Packed_field_number protoreflect.FieldNumber = 2 + FieldOptions_Jstype_field_number protoreflect.FieldNumber = 6 + FieldOptions_Lazy_field_number protoreflect.FieldNumber = 5 + FieldOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 + FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.FieldOptions.CType. +const ( + FieldOptions_CType_enum_fullname = "google.protobuf.FieldOptions.CType" + FieldOptions_CType_enum_name = "CType" +) + +// Full and short names for google.protobuf.FieldOptions.JSType. +const ( + FieldOptions_JSType_enum_fullname = "google.protobuf.FieldOptions.JSType" + FieldOptions_JSType_enum_name = "JSType" +) + +// Names for google.protobuf.OneofOptions. +const ( + OneofOptions_message_name protoreflect.Name = "OneofOptions" + OneofOptions_message_fullname protoreflect.FullName = "google.protobuf.OneofOptions" +) + +// Field names for google.protobuf.OneofOptions. +const ( + OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.OneofOptions. +const ( + OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.EnumOptions. +const ( + EnumOptions_message_name protoreflect.Name = "EnumOptions" + EnumOptions_message_fullname protoreflect.FullName = "google.protobuf.EnumOptions" +) + +// Field names for google.protobuf.EnumOptions. +const ( + EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" + EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" + EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" + EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.EnumOptions. +const ( + EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 + EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_message_name protoreflect.Name = "EnumValueOptions" + EnumValueOptions_message_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions" +) + +// Field names for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" + EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 + EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.ServiceOptions. +const ( + ServiceOptions_message_name protoreflect.Name = "ServiceOptions" + ServiceOptions_message_fullname protoreflect.FullName = "google.protobuf.ServiceOptions" +) + +// Field names for google.protobuf.ServiceOptions. +const ( + ServiceOptions_Deprecated_field_name protoreflect.Name = "deprecated" + ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + ServiceOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated" + ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.ServiceOptions. +const ( + ServiceOptions_Deprecated_field_number protoreflect.FieldNumber = 33 + ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.MethodOptions. +const ( + MethodOptions_message_name protoreflect.Name = "MethodOptions" + MethodOptions_message_fullname protoreflect.FullName = "google.protobuf.MethodOptions" +) + +// Field names for google.protobuf.MethodOptions. +const ( + MethodOptions_Deprecated_field_name protoreflect.Name = "deprecated" + MethodOptions_IdempotencyLevel_field_name protoreflect.Name = "idempotency_level" + MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + MethodOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.deprecated" + MethodOptions_IdempotencyLevel_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level" + MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.MethodOptions. +const ( + MethodOptions_Deprecated_field_number protoreflect.FieldNumber = 33 + MethodOptions_IdempotencyLevel_field_number protoreflect.FieldNumber = 34 + MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.MethodOptions.IdempotencyLevel. +const ( + MethodOptions_IdempotencyLevel_enum_fullname = "google.protobuf.MethodOptions.IdempotencyLevel" + MethodOptions_IdempotencyLevel_enum_name = "IdempotencyLevel" +) + +// Names for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_message_name protoreflect.Name = "UninterpretedOption" + UninterpretedOption_message_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption" +) + +// Field names for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_Name_field_name protoreflect.Name = "name" + UninterpretedOption_IdentifierValue_field_name protoreflect.Name = "identifier_value" + UninterpretedOption_PositiveIntValue_field_name protoreflect.Name = "positive_int_value" + UninterpretedOption_NegativeIntValue_field_name protoreflect.Name = "negative_int_value" + UninterpretedOption_DoubleValue_field_name protoreflect.Name = "double_value" + UninterpretedOption_StringValue_field_name protoreflect.Name = "string_value" + UninterpretedOption_AggregateValue_field_name protoreflect.Name = "aggregate_value" + + UninterpretedOption_Name_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.name" + UninterpretedOption_IdentifierValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.identifier_value" + UninterpretedOption_PositiveIntValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.positive_int_value" + UninterpretedOption_NegativeIntValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.negative_int_value" + UninterpretedOption_DoubleValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.double_value" + UninterpretedOption_StringValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.string_value" + UninterpretedOption_AggregateValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.aggregate_value" +) + +// Field numbers for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_Name_field_number protoreflect.FieldNumber = 2 + UninterpretedOption_IdentifierValue_field_number protoreflect.FieldNumber = 3 + UninterpretedOption_PositiveIntValue_field_number protoreflect.FieldNumber = 4 + UninterpretedOption_NegativeIntValue_field_number protoreflect.FieldNumber = 5 + UninterpretedOption_DoubleValue_field_number protoreflect.FieldNumber = 6 + UninterpretedOption_StringValue_field_number protoreflect.FieldNumber = 7 + UninterpretedOption_AggregateValue_field_number protoreflect.FieldNumber = 8 +) + +// Names for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_message_name protoreflect.Name = "NamePart" + UninterpretedOption_NamePart_message_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart" +) + +// Field names for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_NamePart_field_name protoreflect.Name = "name_part" + UninterpretedOption_NamePart_IsExtension_field_name protoreflect.Name = "is_extension" + + UninterpretedOption_NamePart_NamePart_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart.name_part" + UninterpretedOption_NamePart_IsExtension_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart.is_extension" +) + +// Field numbers for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_NamePart_field_number protoreflect.FieldNumber = 1 + UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_message_name protoreflect.Name = "SourceCodeInfo" + SourceCodeInfo_message_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo" +) + +// Field names for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_Location_field_name protoreflect.Name = "location" + + SourceCodeInfo_Location_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.location" +) + +// Field numbers for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_Location_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_message_name protoreflect.Name = "Location" + SourceCodeInfo_Location_message_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location" +) + +// Field names for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_Path_field_name protoreflect.Name = "path" + SourceCodeInfo_Location_Span_field_name protoreflect.Name = "span" + SourceCodeInfo_Location_LeadingComments_field_name protoreflect.Name = "leading_comments" + SourceCodeInfo_Location_TrailingComments_field_name protoreflect.Name = "trailing_comments" + SourceCodeInfo_Location_LeadingDetachedComments_field_name protoreflect.Name = "leading_detached_comments" + + SourceCodeInfo_Location_Path_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.path" + SourceCodeInfo_Location_Span_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.span" + SourceCodeInfo_Location_LeadingComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.leading_comments" + SourceCodeInfo_Location_TrailingComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.trailing_comments" + SourceCodeInfo_Location_LeadingDetachedComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.leading_detached_comments" +) + +// Field numbers for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_Path_field_number protoreflect.FieldNumber = 1 + SourceCodeInfo_Location_Span_field_number protoreflect.FieldNumber = 2 + SourceCodeInfo_Location_LeadingComments_field_number protoreflect.FieldNumber = 3 + SourceCodeInfo_Location_TrailingComments_field_number protoreflect.FieldNumber = 4 + SourceCodeInfo_Location_LeadingDetachedComments_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_message_name protoreflect.Name = "GeneratedCodeInfo" + GeneratedCodeInfo_message_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo" +) + +// Field names for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_Annotation_field_name protoreflect.Name = "annotation" + + GeneratedCodeInfo_Annotation_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.annotation" +) + +// Field numbers for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_Annotation_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_message_name protoreflect.Name = "Annotation" + GeneratedCodeInfo_Annotation_message_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation" +) + +// Field names for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_Path_field_name protoreflect.Name = "path" + GeneratedCodeInfo_Annotation_SourceFile_field_name protoreflect.Name = "source_file" + GeneratedCodeInfo_Annotation_Begin_field_name protoreflect.Name = "begin" + GeneratedCodeInfo_Annotation_End_field_name protoreflect.Name = "end" + + GeneratedCodeInfo_Annotation_Path_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.path" + GeneratedCodeInfo_Annotation_SourceFile_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.source_file" + GeneratedCodeInfo_Annotation_Begin_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.begin" + GeneratedCodeInfo_Annotation_End_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.end" +) + +// Field numbers for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_Path_field_number protoreflect.FieldNumber = 1 + GeneratedCodeInfo_Annotation_SourceFile_field_number protoreflect.FieldNumber = 2 + GeneratedCodeInfo_Annotation_Begin_field_number protoreflect.FieldNumber = 3 + GeneratedCodeInfo_Annotation_End_field_number protoreflect.FieldNumber = 4 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go new file mode 100644 index 000000000..45ccd0121 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package genid contains constants for declarations in descriptor.proto +// and the well-known types. +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +const GoogleProtobuf_package protoreflect.FullName = "google.protobuf" diff --git a/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go b/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go new file mode 100644 index 000000000..b070ef4fd --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_duration_proto = "google/protobuf/duration.proto" + +// Names for google.protobuf.Duration. +const ( + Duration_message_name protoreflect.Name = "Duration" + Duration_message_fullname protoreflect.FullName = "google.protobuf.Duration" +) + +// Field names for google.protobuf.Duration. +const ( + Duration_Seconds_field_name protoreflect.Name = "seconds" + Duration_Nanos_field_name protoreflect.Name = "nanos" + + Duration_Seconds_field_fullname protoreflect.FullName = "google.protobuf.Duration.seconds" + Duration_Nanos_field_fullname protoreflect.FullName = "google.protobuf.Duration.nanos" +) + +// Field numbers for google.protobuf.Duration. +const ( + Duration_Seconds_field_number protoreflect.FieldNumber = 1 + Duration_Nanos_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go b/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go new file mode 100644 index 000000000..762abb34a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go @@ -0,0 +1,19 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_empty_proto = "google/protobuf/empty.proto" + +// Names for google.protobuf.Empty. +const ( + Empty_message_name protoreflect.Name = "Empty" + Empty_message_fullname protoreflect.FullName = "google.protobuf.Empty" +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go b/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go new file mode 100644 index 000000000..70bed453f --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_field_mask_proto = "google/protobuf/field_mask.proto" + +// Names for google.protobuf.FieldMask. +const ( + FieldMask_message_name protoreflect.Name = "FieldMask" + FieldMask_message_fullname protoreflect.FullName = "google.protobuf.FieldMask" +) + +// Field names for google.protobuf.FieldMask. +const ( + FieldMask_Paths_field_name protoreflect.Name = "paths" + + FieldMask_Paths_field_fullname protoreflect.FullName = "google.protobuf.FieldMask.paths" +) + +// Field numbers for google.protobuf.FieldMask. +const ( + FieldMask_Paths_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/goname.go b/vendor/google.golang.org/protobuf/internal/genid/goname.go new file mode 100644 index 000000000..693d2e9e1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/goname.go @@ -0,0 +1,25 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +// Go names of implementation-specific struct fields in generated messages. +const ( + State_goname = "state" + + SizeCache_goname = "sizeCache" + SizeCacheA_goname = "XXX_sizecache" + + WeakFields_goname = "weakFields" + WeakFieldsA_goname = "XXX_weak" + + UnknownFields_goname = "unknownFields" + UnknownFieldsA_goname = "XXX_unrecognized" + + ExtensionFields_goname = "extensionFields" + ExtensionFieldsA_goname = "XXX_InternalExtensions" + ExtensionFieldsB_goname = "XXX_extensions" + + WeakFieldPrefix_goname = "XXX_weak_" +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go new file mode 100644 index 000000000..8f9ea02ff --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +// Generic field names and numbers for synthetic map entry messages. +const ( + MapEntry_Key_field_name protoreflect.Name = "key" + MapEntry_Value_field_name protoreflect.Name = "value" + + MapEntry_Key_field_number protoreflect.FieldNumber = 1 + MapEntry_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go b/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go new file mode 100644 index 000000000..3e99ae16c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_source_context_proto = "google/protobuf/source_context.proto" + +// Names for google.protobuf.SourceContext. +const ( + SourceContext_message_name protoreflect.Name = "SourceContext" + SourceContext_message_fullname protoreflect.FullName = "google.protobuf.SourceContext" +) + +// Field names for google.protobuf.SourceContext. +const ( + SourceContext_FileName_field_name protoreflect.Name = "file_name" + + SourceContext_FileName_field_fullname protoreflect.FullName = "google.protobuf.SourceContext.file_name" +) + +// Field numbers for google.protobuf.SourceContext. +const ( + SourceContext_FileName_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go new file mode 100644 index 000000000..1a38944b2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go @@ -0,0 +1,116 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_struct_proto = "google/protobuf/struct.proto" + +// Full and short names for google.protobuf.NullValue. +const ( + NullValue_enum_fullname = "google.protobuf.NullValue" + NullValue_enum_name = "NullValue" +) + +// Names for google.protobuf.Struct. +const ( + Struct_message_name protoreflect.Name = "Struct" + Struct_message_fullname protoreflect.FullName = "google.protobuf.Struct" +) + +// Field names for google.protobuf.Struct. +const ( + Struct_Fields_field_name protoreflect.Name = "fields" + + Struct_Fields_field_fullname protoreflect.FullName = "google.protobuf.Struct.fields" +) + +// Field numbers for google.protobuf.Struct. +const ( + Struct_Fields_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_message_name protoreflect.Name = "FieldsEntry" + Struct_FieldsEntry_message_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry" +) + +// Field names for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_Key_field_name protoreflect.Name = "key" + Struct_FieldsEntry_Value_field_name protoreflect.Name = "value" + + Struct_FieldsEntry_Key_field_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry.key" + Struct_FieldsEntry_Value_field_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry.value" +) + +// Field numbers for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_Key_field_number protoreflect.FieldNumber = 1 + Struct_FieldsEntry_Value_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.Value. +const ( + Value_message_name protoreflect.Name = "Value" + Value_message_fullname protoreflect.FullName = "google.protobuf.Value" +) + +// Field names for google.protobuf.Value. +const ( + Value_NullValue_field_name protoreflect.Name = "null_value" + Value_NumberValue_field_name protoreflect.Name = "number_value" + Value_StringValue_field_name protoreflect.Name = "string_value" + Value_BoolValue_field_name protoreflect.Name = "bool_value" + Value_StructValue_field_name protoreflect.Name = "struct_value" + Value_ListValue_field_name protoreflect.Name = "list_value" + + Value_NullValue_field_fullname protoreflect.FullName = "google.protobuf.Value.null_value" + Value_NumberValue_field_fullname protoreflect.FullName = "google.protobuf.Value.number_value" + Value_StringValue_field_fullname protoreflect.FullName = "google.protobuf.Value.string_value" + Value_BoolValue_field_fullname protoreflect.FullName = "google.protobuf.Value.bool_value" + Value_StructValue_field_fullname protoreflect.FullName = "google.protobuf.Value.struct_value" + Value_ListValue_field_fullname protoreflect.FullName = "google.protobuf.Value.list_value" +) + +// Field numbers for google.protobuf.Value. +const ( + Value_NullValue_field_number protoreflect.FieldNumber = 1 + Value_NumberValue_field_number protoreflect.FieldNumber = 2 + Value_StringValue_field_number protoreflect.FieldNumber = 3 + Value_BoolValue_field_number protoreflect.FieldNumber = 4 + Value_StructValue_field_number protoreflect.FieldNumber = 5 + Value_ListValue_field_number protoreflect.FieldNumber = 6 +) + +// Oneof names for google.protobuf.Value. +const ( + Value_Kind_oneof_name protoreflect.Name = "kind" + + Value_Kind_oneof_fullname protoreflect.FullName = "google.protobuf.Value.kind" +) + +// Names for google.protobuf.ListValue. +const ( + ListValue_message_name protoreflect.Name = "ListValue" + ListValue_message_fullname protoreflect.FullName = "google.protobuf.ListValue" +) + +// Field names for google.protobuf.ListValue. +const ( + ListValue_Values_field_name protoreflect.Name = "values" + + ListValue_Values_field_fullname protoreflect.FullName = "google.protobuf.ListValue.values" +) + +// Field numbers for google.protobuf.ListValue. +const ( + ListValue_Values_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go b/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go new file mode 100644 index 000000000..f5cd5634c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_timestamp_proto = "google/protobuf/timestamp.proto" + +// Names for google.protobuf.Timestamp. +const ( + Timestamp_message_name protoreflect.Name = "Timestamp" + Timestamp_message_fullname protoreflect.FullName = "google.protobuf.Timestamp" +) + +// Field names for google.protobuf.Timestamp. +const ( + Timestamp_Seconds_field_name protoreflect.Name = "seconds" + Timestamp_Nanos_field_name protoreflect.Name = "nanos" + + Timestamp_Seconds_field_fullname protoreflect.FullName = "google.protobuf.Timestamp.seconds" + Timestamp_Nanos_field_fullname protoreflect.FullName = "google.protobuf.Timestamp.nanos" +) + +// Field numbers for google.protobuf.Timestamp. +const ( + Timestamp_Seconds_field_number protoreflect.FieldNumber = 1 + Timestamp_Nanos_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go new file mode 100644 index 000000000..3bc710138 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go @@ -0,0 +1,184 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_type_proto = "google/protobuf/type.proto" + +// Full and short names for google.protobuf.Syntax. +const ( + Syntax_enum_fullname = "google.protobuf.Syntax" + Syntax_enum_name = "Syntax" +) + +// Names for google.protobuf.Type. +const ( + Type_message_name protoreflect.Name = "Type" + Type_message_fullname protoreflect.FullName = "google.protobuf.Type" +) + +// Field names for google.protobuf.Type. +const ( + Type_Name_field_name protoreflect.Name = "name" + Type_Fields_field_name protoreflect.Name = "fields" + Type_Oneofs_field_name protoreflect.Name = "oneofs" + Type_Options_field_name protoreflect.Name = "options" + Type_SourceContext_field_name protoreflect.Name = "source_context" + Type_Syntax_field_name protoreflect.Name = "syntax" + + Type_Name_field_fullname protoreflect.FullName = "google.protobuf.Type.name" + Type_Fields_field_fullname protoreflect.FullName = "google.protobuf.Type.fields" + Type_Oneofs_field_fullname protoreflect.FullName = "google.protobuf.Type.oneofs" + Type_Options_field_fullname protoreflect.FullName = "google.protobuf.Type.options" + Type_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Type.source_context" + Type_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Type.syntax" +) + +// Field numbers for google.protobuf.Type. +const ( + Type_Name_field_number protoreflect.FieldNumber = 1 + Type_Fields_field_number protoreflect.FieldNumber = 2 + Type_Oneofs_field_number protoreflect.FieldNumber = 3 + Type_Options_field_number protoreflect.FieldNumber = 4 + Type_SourceContext_field_number protoreflect.FieldNumber = 5 + Type_Syntax_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.Field. +const ( + Field_message_name protoreflect.Name = "Field" + Field_message_fullname protoreflect.FullName = "google.protobuf.Field" +) + +// Field names for google.protobuf.Field. +const ( + Field_Kind_field_name protoreflect.Name = "kind" + Field_Cardinality_field_name protoreflect.Name = "cardinality" + Field_Number_field_name protoreflect.Name = "number" + Field_Name_field_name protoreflect.Name = "name" + Field_TypeUrl_field_name protoreflect.Name = "type_url" + Field_OneofIndex_field_name protoreflect.Name = "oneof_index" + Field_Packed_field_name protoreflect.Name = "packed" + Field_Options_field_name protoreflect.Name = "options" + Field_JsonName_field_name protoreflect.Name = "json_name" + Field_DefaultValue_field_name protoreflect.Name = "default_value" + + Field_Kind_field_fullname protoreflect.FullName = "google.protobuf.Field.kind" + Field_Cardinality_field_fullname protoreflect.FullName = "google.protobuf.Field.cardinality" + Field_Number_field_fullname protoreflect.FullName = "google.protobuf.Field.number" + Field_Name_field_fullname protoreflect.FullName = "google.protobuf.Field.name" + Field_TypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Field.type_url" + Field_OneofIndex_field_fullname protoreflect.FullName = "google.protobuf.Field.oneof_index" + Field_Packed_field_fullname protoreflect.FullName = "google.protobuf.Field.packed" + Field_Options_field_fullname protoreflect.FullName = "google.protobuf.Field.options" + Field_JsonName_field_fullname protoreflect.FullName = "google.protobuf.Field.json_name" + Field_DefaultValue_field_fullname protoreflect.FullName = "google.protobuf.Field.default_value" +) + +// Field numbers for google.protobuf.Field. +const ( + Field_Kind_field_number protoreflect.FieldNumber = 1 + Field_Cardinality_field_number protoreflect.FieldNumber = 2 + Field_Number_field_number protoreflect.FieldNumber = 3 + Field_Name_field_number protoreflect.FieldNumber = 4 + Field_TypeUrl_field_number protoreflect.FieldNumber = 6 + Field_OneofIndex_field_number protoreflect.FieldNumber = 7 + Field_Packed_field_number protoreflect.FieldNumber = 8 + Field_Options_field_number protoreflect.FieldNumber = 9 + Field_JsonName_field_number protoreflect.FieldNumber = 10 + Field_DefaultValue_field_number protoreflect.FieldNumber = 11 +) + +// Full and short names for google.protobuf.Field.Kind. +const ( + Field_Kind_enum_fullname = "google.protobuf.Field.Kind" + Field_Kind_enum_name = "Kind" +) + +// Full and short names for google.protobuf.Field.Cardinality. +const ( + Field_Cardinality_enum_fullname = "google.protobuf.Field.Cardinality" + Field_Cardinality_enum_name = "Cardinality" +) + +// Names for google.protobuf.Enum. +const ( + Enum_message_name protoreflect.Name = "Enum" + Enum_message_fullname protoreflect.FullName = "google.protobuf.Enum" +) + +// Field names for google.protobuf.Enum. +const ( + Enum_Name_field_name protoreflect.Name = "name" + Enum_Enumvalue_field_name protoreflect.Name = "enumvalue" + Enum_Options_field_name protoreflect.Name = "options" + Enum_SourceContext_field_name protoreflect.Name = "source_context" + Enum_Syntax_field_name protoreflect.Name = "syntax" + + Enum_Name_field_fullname protoreflect.FullName = "google.protobuf.Enum.name" + Enum_Enumvalue_field_fullname protoreflect.FullName = "google.protobuf.Enum.enumvalue" + Enum_Options_field_fullname protoreflect.FullName = "google.protobuf.Enum.options" + Enum_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Enum.source_context" + Enum_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Enum.syntax" +) + +// Field numbers for google.protobuf.Enum. +const ( + Enum_Name_field_number protoreflect.FieldNumber = 1 + Enum_Enumvalue_field_number protoreflect.FieldNumber = 2 + Enum_Options_field_number protoreflect.FieldNumber = 3 + Enum_SourceContext_field_number protoreflect.FieldNumber = 4 + Enum_Syntax_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.EnumValue. +const ( + EnumValue_message_name protoreflect.Name = "EnumValue" + EnumValue_message_fullname protoreflect.FullName = "google.protobuf.EnumValue" +) + +// Field names for google.protobuf.EnumValue. +const ( + EnumValue_Name_field_name protoreflect.Name = "name" + EnumValue_Number_field_name protoreflect.Name = "number" + EnumValue_Options_field_name protoreflect.Name = "options" + + EnumValue_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.name" + EnumValue_Number_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.number" + EnumValue_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.options" +) + +// Field numbers for google.protobuf.EnumValue. +const ( + EnumValue_Name_field_number protoreflect.FieldNumber = 1 + EnumValue_Number_field_number protoreflect.FieldNumber = 2 + EnumValue_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.Option. +const ( + Option_message_name protoreflect.Name = "Option" + Option_message_fullname protoreflect.FullName = "google.protobuf.Option" +) + +// Field names for google.protobuf.Option. +const ( + Option_Name_field_name protoreflect.Name = "name" + Option_Value_field_name protoreflect.Name = "value" + + Option_Name_field_fullname protoreflect.FullName = "google.protobuf.Option.name" + Option_Value_field_fullname protoreflect.FullName = "google.protobuf.Option.value" +) + +// Field numbers for google.protobuf.Option. +const ( + Option_Name_field_number protoreflect.FieldNumber = 1 + Option_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go new file mode 100644 index 000000000..429384b85 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go @@ -0,0 +1,13 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +// Generic field name and number for messages in wrappers.proto. +const ( + WrapperValue_Value_field_name protoreflect.Name = "value" + WrapperValue_Value_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go new file mode 100644 index 000000000..72527d2ab --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go @@ -0,0 +1,175 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_wrappers_proto = "google/protobuf/wrappers.proto" + +// Names for google.protobuf.DoubleValue. +const ( + DoubleValue_message_name protoreflect.Name = "DoubleValue" + DoubleValue_message_fullname protoreflect.FullName = "google.protobuf.DoubleValue" +) + +// Field names for google.protobuf.DoubleValue. +const ( + DoubleValue_Value_field_name protoreflect.Name = "value" + + DoubleValue_Value_field_fullname protoreflect.FullName = "google.protobuf.DoubleValue.value" +) + +// Field numbers for google.protobuf.DoubleValue. +const ( + DoubleValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.FloatValue. +const ( + FloatValue_message_name protoreflect.Name = "FloatValue" + FloatValue_message_fullname protoreflect.FullName = "google.protobuf.FloatValue" +) + +// Field names for google.protobuf.FloatValue. +const ( + FloatValue_Value_field_name protoreflect.Name = "value" + + FloatValue_Value_field_fullname protoreflect.FullName = "google.protobuf.FloatValue.value" +) + +// Field numbers for google.protobuf.FloatValue. +const ( + FloatValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Int64Value. +const ( + Int64Value_message_name protoreflect.Name = "Int64Value" + Int64Value_message_fullname protoreflect.FullName = "google.protobuf.Int64Value" +) + +// Field names for google.protobuf.Int64Value. +const ( + Int64Value_Value_field_name protoreflect.Name = "value" + + Int64Value_Value_field_fullname protoreflect.FullName = "google.protobuf.Int64Value.value" +) + +// Field numbers for google.protobuf.Int64Value. +const ( + Int64Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.UInt64Value. +const ( + UInt64Value_message_name protoreflect.Name = "UInt64Value" + UInt64Value_message_fullname protoreflect.FullName = "google.protobuf.UInt64Value" +) + +// Field names for google.protobuf.UInt64Value. +const ( + UInt64Value_Value_field_name protoreflect.Name = "value" + + UInt64Value_Value_field_fullname protoreflect.FullName = "google.protobuf.UInt64Value.value" +) + +// Field numbers for google.protobuf.UInt64Value. +const ( + UInt64Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Int32Value. +const ( + Int32Value_message_name protoreflect.Name = "Int32Value" + Int32Value_message_fullname protoreflect.FullName = "google.protobuf.Int32Value" +) + +// Field names for google.protobuf.Int32Value. +const ( + Int32Value_Value_field_name protoreflect.Name = "value" + + Int32Value_Value_field_fullname protoreflect.FullName = "google.protobuf.Int32Value.value" +) + +// Field numbers for google.protobuf.Int32Value. +const ( + Int32Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.UInt32Value. +const ( + UInt32Value_message_name protoreflect.Name = "UInt32Value" + UInt32Value_message_fullname protoreflect.FullName = "google.protobuf.UInt32Value" +) + +// Field names for google.protobuf.UInt32Value. +const ( + UInt32Value_Value_field_name protoreflect.Name = "value" + + UInt32Value_Value_field_fullname protoreflect.FullName = "google.protobuf.UInt32Value.value" +) + +// Field numbers for google.protobuf.UInt32Value. +const ( + UInt32Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.BoolValue. +const ( + BoolValue_message_name protoreflect.Name = "BoolValue" + BoolValue_message_fullname protoreflect.FullName = "google.protobuf.BoolValue" +) + +// Field names for google.protobuf.BoolValue. +const ( + BoolValue_Value_field_name protoreflect.Name = "value" + + BoolValue_Value_field_fullname protoreflect.FullName = "google.protobuf.BoolValue.value" +) + +// Field numbers for google.protobuf.BoolValue. +const ( + BoolValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.StringValue. +const ( + StringValue_message_name protoreflect.Name = "StringValue" + StringValue_message_fullname protoreflect.FullName = "google.protobuf.StringValue" +) + +// Field names for google.protobuf.StringValue. +const ( + StringValue_Value_field_name protoreflect.Name = "value" + + StringValue_Value_field_fullname protoreflect.FullName = "google.protobuf.StringValue.value" +) + +// Field numbers for google.protobuf.StringValue. +const ( + StringValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.BytesValue. +const ( + BytesValue_message_name protoreflect.Name = "BytesValue" + BytesValue_message_fullname protoreflect.FullName = "google.protobuf.BytesValue" +) + +// Field names for google.protobuf.BytesValue. +const ( + BytesValue_Value_field_name protoreflect.Name = "value" + + BytesValue_Value_field_fullname protoreflect.FullName = "google.protobuf.BytesValue.value" +) + +// Field numbers for google.protobuf.BytesValue. +const ( + BytesValue_Value_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go new file mode 100644 index 000000000..abee5f30e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -0,0 +1,177 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "strconv" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// Export is a zero-length named type that exists only to export a set of +// functions that we do not want to appear in godoc. +type Export struct{} + +// NewError formats a string according to the format specifier and arguments and +// returns an error that has a "proto" prefix. +func (Export) NewError(f string, x ...interface{}) error { + return errors.New(f, x...) +} + +// enum is any enum type generated by protoc-gen-go +// and must be a named int32 type. +type enum = interface{} + +// EnumOf returns the protoreflect.Enum interface over e. +// It returns nil if e is nil. +func (Export) EnumOf(e enum) pref.Enum { + switch e := e.(type) { + case nil: + return nil + case pref.Enum: + return e + default: + return legacyWrapEnum(reflect.ValueOf(e)) + } +} + +// EnumDescriptorOf returns the protoreflect.EnumDescriptor for e. +// It returns nil if e is nil. +func (Export) EnumDescriptorOf(e enum) pref.EnumDescriptor { + switch e := e.(type) { + case nil: + return nil + case pref.Enum: + return e.Descriptor() + default: + return LegacyLoadEnumDesc(reflect.TypeOf(e)) + } +} + +// EnumTypeOf returns the protoreflect.EnumType for e. +// It returns nil if e is nil. +func (Export) EnumTypeOf(e enum) pref.EnumType { + switch e := e.(type) { + case nil: + return nil + case pref.Enum: + return e.Type() + default: + return legacyLoadEnumType(reflect.TypeOf(e)) + } +} + +// EnumStringOf returns the enum value as a string, either as the name if +// the number is resolvable, or the number formatted as a string. +func (Export) EnumStringOf(ed pref.EnumDescriptor, n pref.EnumNumber) string { + ev := ed.Values().ByNumber(n) + if ev != nil { + return string(ev.Name()) + } + return strconv.Itoa(int(n)) +} + +// message is any message type generated by protoc-gen-go +// and must be a pointer to a named struct type. +type message = interface{} + +// legacyMessageWrapper wraps a v2 message as a v1 message. +type legacyMessageWrapper struct{ m pref.ProtoMessage } + +func (m legacyMessageWrapper) Reset() { proto.Reset(m.m) } +func (m legacyMessageWrapper) String() string { return Export{}.MessageStringOf(m.m) } +func (m legacyMessageWrapper) ProtoMessage() {} + +// ProtoMessageV1Of converts either a v1 or v2 message to a v1 message. +// It returns nil if m is nil. +func (Export) ProtoMessageV1Of(m message) piface.MessageV1 { + switch mv := m.(type) { + case nil: + return nil + case piface.MessageV1: + return mv + case unwrapper: + return Export{}.ProtoMessageV1Of(mv.protoUnwrap()) + case pref.ProtoMessage: + return legacyMessageWrapper{mv} + default: + panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) + } +} + +func (Export) protoMessageV2Of(m message) pref.ProtoMessage { + switch mv := m.(type) { + case nil: + return nil + case pref.ProtoMessage: + return mv + case legacyMessageWrapper: + return mv.m + case piface.MessageV1: + return nil + default: + panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) + } +} + +// ProtoMessageV2Of converts either a v1 or v2 message to a v2 message. +// It returns nil if m is nil. +func (Export) ProtoMessageV2Of(m message) pref.ProtoMessage { + if m == nil { + return nil + } + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv + } + return legacyWrapMessage(reflect.ValueOf(m)).Interface() +} + +// MessageOf returns the protoreflect.Message interface over m. +// It returns nil if m is nil. +func (Export) MessageOf(m message) pref.Message { + if m == nil { + return nil + } + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv.ProtoReflect() + } + return legacyWrapMessage(reflect.ValueOf(m)) +} + +// MessageDescriptorOf returns the protoreflect.MessageDescriptor for m. +// It returns nil if m is nil. +func (Export) MessageDescriptorOf(m message) pref.MessageDescriptor { + if m == nil { + return nil + } + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv.ProtoReflect().Descriptor() + } + return LegacyLoadMessageDesc(reflect.TypeOf(m)) +} + +// MessageTypeOf returns the protoreflect.MessageType for m. +// It returns nil if m is nil. +func (Export) MessageTypeOf(m message) pref.MessageType { + if m == nil { + return nil + } + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv.ProtoReflect().Type() + } + return legacyLoadMessageType(reflect.TypeOf(m), "") +} + +// MessageStringOf returns the message value as a string, +// which is the message serialized in the protobuf text format. +func (Export) MessageStringOf(m pref.ProtoMessage) string { + return prototext.MarshalOptions{Multiline: false}.Format(m) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go new file mode 100644 index 000000000..b82341e57 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -0,0 +1,141 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sync" + + "google.golang.org/protobuf/internal/errors" + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +func (mi *MessageInfo) checkInitialized(in piface.CheckInitializedInput) (piface.CheckInitializedOutput, error) { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() + } else { + p = in.Message.(*messageReflectWrapper).pointer() + } + return piface.CheckInitializedOutput{}, mi.checkInitializedPointer(p) +} + +func (mi *MessageInfo) checkInitializedPointer(p pointer) error { + mi.init() + if !mi.needsInitCheck { + return nil + } + if p.IsNil() { + for _, f := range mi.orderedCoderFields { + if f.isRequired { + return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName())) + } + } + return nil + } + if mi.extensionOffset.IsValid() { + e := p.Apply(mi.extensionOffset).Extensions() + if err := mi.isInitExtensions(e); err != nil { + return err + } + } + for _, f := range mi.orderedCoderFields { + if !f.isRequired && f.funcs.isInit == nil { + continue + } + fptr := p.Apply(f.offset) + if f.isPointer && fptr.Elem().IsNil() { + if f.isRequired { + return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName())) + } + continue + } + if f.funcs.isInit == nil { + continue + } + if err := f.funcs.isInit(fptr, f); err != nil { + return err + } + } + return nil +} + +func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error { + if ext == nil { + return nil + } + for _, x := range *ext { + ei := getExtensionFieldInfo(x.Type()) + if ei.funcs.isInit == nil { + continue + } + v := x.Value() + if !v.IsValid() { + continue + } + if err := ei.funcs.isInit(v); err != nil { + return err + } + } + return nil +} + +var ( + needsInitCheckMu sync.Mutex + needsInitCheckMap sync.Map +) + +// needsInitCheck reports whether a message needs to be checked for partial initialization. +// +// It returns true if the message transitively includes any required or extension fields. +func needsInitCheck(md pref.MessageDescriptor) bool { + if v, ok := needsInitCheckMap.Load(md); ok { + if has, ok := v.(bool); ok { + return has + } + } + needsInitCheckMu.Lock() + defer needsInitCheckMu.Unlock() + return needsInitCheckLocked(md) +} + +func needsInitCheckLocked(md pref.MessageDescriptor) (has bool) { + if v, ok := needsInitCheckMap.Load(md); ok { + // If has is true, we've previously determined that this message + // needs init checks. + // + // If has is false, we've previously determined that it can never + // be uninitialized. + // + // If has is not a bool, we've just encountered a cycle in the + // message graph. In this case, it is safe to return false: If + // the message does have required fields, we'll detect them later + // in the graph traversal. + has, ok := v.(bool) + return ok && has + } + needsInitCheckMap.Store(md, struct{}{}) // avoid cycles while descending into this message + defer func() { + needsInitCheckMap.Store(md, has) + }() + if md.RequiredNumbers().Len() > 0 { + return true + } + if md.ExtensionRanges().Len() > 0 { + return true + } + for i := 0; i < md.Fields().Len(); i++ { + fd := md.Fields().Get(i) + // Map keys are never messages, so just consider the map value. + if fd.IsMap() { + fd = fd.MapValue() + } + fmd := fd.Message() + if fmd != nil && needsInitCheckLocked(fmd) { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go new file mode 100644 index 000000000..08d35170b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -0,0 +1,223 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sync" + "sync/atomic" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type extensionFieldInfo struct { + wiretag uint64 + tagsize int + unmarshalNeedsValue bool + funcs valueCoderFuncs + validation validationInfo +} + +var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo + +func getExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { + if xi, ok := xt.(*ExtensionInfo); ok { + xi.lazyInit() + return xi.info + } + return legacyLoadExtensionFieldInfo(xt) +} + +// legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt. +func legacyLoadExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { + if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok { + return xi.(*extensionFieldInfo) + } + e := makeExtensionFieldInfo(xt.TypeDescriptor()) + if e, ok := legacyMessageTypeCache.LoadOrStore(xt, e); ok { + return e.(*extensionFieldInfo) + } + return e +} + +func makeExtensionFieldInfo(xd pref.ExtensionDescriptor) *extensionFieldInfo { + var wiretag uint64 + if !xd.IsPacked() { + wiretag = protowire.EncodeTag(xd.Number(), wireTypes[xd.Kind()]) + } else { + wiretag = protowire.EncodeTag(xd.Number(), protowire.BytesType) + } + e := &extensionFieldInfo{ + wiretag: wiretag, + tagsize: protowire.SizeVarint(wiretag), + funcs: encoderFuncsForValue(xd), + } + // Does the unmarshal function need a value passed to it? + // This is true for composite types, where we pass in a message, list, or map to fill in, + // and for enums, where we pass in a prototype value to specify the concrete enum type. + switch xd.Kind() { + case pref.MessageKind, pref.GroupKind, pref.EnumKind: + e.unmarshalNeedsValue = true + default: + if xd.Cardinality() == pref.Repeated { + e.unmarshalNeedsValue = true + } + } + return e +} + +type lazyExtensionValue struct { + atomicOnce uint32 // atomically set if value is valid + mu sync.Mutex + xi *extensionFieldInfo + value pref.Value + b []byte + fn func() pref.Value +} + +type ExtensionField struct { + typ pref.ExtensionType + + // value is either the value of GetValue, + // or a *lazyExtensionValue that then returns the value of GetValue. + value pref.Value + lazy *lazyExtensionValue +} + +func (f *ExtensionField) appendLazyBytes(xt pref.ExtensionType, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, b []byte) { + if f.lazy == nil { + f.lazy = &lazyExtensionValue{xi: xi} + } + f.typ = xt + f.lazy.xi = xi + f.lazy.b = protowire.AppendTag(f.lazy.b, num, wtyp) + f.lazy.b = append(f.lazy.b, b...) +} + +func (f *ExtensionField) canLazy(xt pref.ExtensionType) bool { + if f.typ == nil { + return true + } + if f.typ == xt && f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 { + return true + } + return false +} + +func (f *ExtensionField) lazyInit() { + f.lazy.mu.Lock() + defer f.lazy.mu.Unlock() + if atomic.LoadUint32(&f.lazy.atomicOnce) == 1 { + return + } + if f.lazy.xi != nil { + b := f.lazy.b + val := f.typ.New() + for len(b) > 0 { + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + panic(errors.New("bad tag in lazy extension decoding")) + } + b = b[n:] + } + num := protowire.Number(tag >> 3) + wtyp := protowire.Type(tag & 7) + var out unmarshalOutput + var err error + val, out, err = f.lazy.xi.funcs.unmarshal(b, val, num, wtyp, lazyUnmarshalOptions) + if err != nil { + panic(errors.New("decode failure in lazy extension decoding: %v", err)) + } + b = b[out.n:] + } + f.lazy.value = val + } else { + f.lazy.value = f.lazy.fn() + } + f.lazy.xi = nil + f.lazy.fn = nil + f.lazy.b = nil + atomic.StoreUint32(&f.lazy.atomicOnce, 1) +} + +// Set sets the type and value of the extension field. +// This must not be called concurrently. +func (f *ExtensionField) Set(t pref.ExtensionType, v pref.Value) { + f.typ = t + f.value = v + f.lazy = nil +} + +// SetLazy sets the type and a value that is to be lazily evaluated upon first use. +// This must not be called concurrently. +func (f *ExtensionField) SetLazy(t pref.ExtensionType, fn func() pref.Value) { + f.typ = t + f.lazy = &lazyExtensionValue{fn: fn} +} + +// Value returns the value of the extension field. +// This may be called concurrently. +func (f *ExtensionField) Value() pref.Value { + if f.lazy != nil { + if atomic.LoadUint32(&f.lazy.atomicOnce) == 0 { + f.lazyInit() + } + return f.lazy.value + } + return f.value +} + +// Type returns the type of the extension field. +// This may be called concurrently. +func (f ExtensionField) Type() pref.ExtensionType { + return f.typ +} + +// IsSet returns whether the extension field is set. +// This may be called concurrently. +func (f ExtensionField) IsSet() bool { + return f.typ != nil +} + +// IsLazy reports whether a field is lazily encoded. +// It is exported for testing. +func IsLazy(m pref.Message, fd pref.FieldDescriptor) bool { + var mi *MessageInfo + var p pointer + switch m := m.(type) { + case *messageState: + mi = m.messageInfo() + p = m.pointer() + case *messageReflectWrapper: + mi = m.messageInfo() + p = m.pointer() + default: + return false + } + xd, ok := fd.(pref.ExtensionTypeDescriptor) + if !ok { + return false + } + xt := xd.Type() + ext := mi.extensionMap(p) + if ext == nil { + return false + } + f, ok := (*ext)[int32(fd.Number())] + if !ok { + return false + } + return f.typ == xt && f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go new file mode 100644 index 000000000..cb4b482d1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -0,0 +1,830 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "sync" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +type errInvalidUTF8 struct{} + +func (errInvalidUTF8) Error() string { return "string field contains invalid UTF-8" } +func (errInvalidUTF8) InvalidUTF8() bool { return true } +func (errInvalidUTF8) Unwrap() error { return errors.Error } + +// initOneofFieldCoders initializes the fast-path functions for the fields in a oneof. +// +// For size, marshal, and isInit operations, functions are set only on the first field +// in the oneof. The functions are called when the oneof is non-nil, and will dispatch +// to the appropriate field-specific function as necessary. +// +// The unmarshal function is set on each field individually as usual. +func (mi *MessageInfo) initOneofFieldCoders(od pref.OneofDescriptor, si structInfo) { + fs := si.oneofsByName[od.Name()] + ft := fs.Type + oneofFields := make(map[reflect.Type]*coderFieldInfo) + needIsInit := false + fields := od.Fields() + for i, lim := 0, fields.Len(); i < lim; i++ { + fd := od.Fields().Get(i) + num := fd.Number() + // Make a copy of the original coderFieldInfo for use in unmarshaling. + // + // oneofFields[oneofType].funcs.marshal is the field-specific marshal function. + // + // mi.coderFields[num].marshal is set on only the first field in the oneof, + // and dispatches to the field-specific marshaler in oneofFields. + cf := *mi.coderFields[num] + ot := si.oneofWrappersByNumber[num] + cf.ft = ot.Field(0).Type + cf.mi, cf.funcs = fieldCoder(fd, cf.ft) + oneofFields[ot] = &cf + if cf.funcs.isInit != nil { + needIsInit = true + } + mi.coderFields[num].funcs.unmarshal = func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + var vw reflect.Value // pointer to wrapper type + vi := p.AsValueOf(ft).Elem() // oneof field value of interface kind + if !vi.IsNil() && !vi.Elem().IsNil() && vi.Elem().Elem().Type() == ot { + vw = vi.Elem() + } else { + vw = reflect.New(ot) + } + out, err := cf.funcs.unmarshal(b, pointerOfValue(vw).Apply(zeroOffset), wtyp, &cf, opts) + if err != nil { + return out, err + } + vi.Set(vw) + return out, nil + } + } + getInfo := func(p pointer) (pointer, *coderFieldInfo) { + v := p.AsValueOf(ft).Elem() + if v.IsNil() { + return pointer{}, nil + } + v = v.Elem() // interface -> *struct + if v.IsNil() { + return pointer{}, nil + } + return pointerOfValue(v).Apply(zeroOffset), oneofFields[v.Elem().Type()] + } + first := mi.coderFields[od.Fields().Get(0).Number()] + first.funcs.size = func(p pointer, _ *coderFieldInfo, opts marshalOptions) int { + p, info := getInfo(p) + if info == nil || info.funcs.size == nil { + return 0 + } + return info.funcs.size(p, info, opts) + } + first.funcs.marshal = func(b []byte, p pointer, _ *coderFieldInfo, opts marshalOptions) ([]byte, error) { + p, info := getInfo(p) + if info == nil || info.funcs.marshal == nil { + return b, nil + } + return info.funcs.marshal(b, p, info, opts) + } + first.funcs.merge = func(dst, src pointer, _ *coderFieldInfo, opts mergeOptions) { + srcp, srcinfo := getInfo(src) + if srcinfo == nil || srcinfo.funcs.merge == nil { + return + } + dstp, dstinfo := getInfo(dst) + if dstinfo != srcinfo { + dst.AsValueOf(ft).Elem().Set(reflect.New(src.AsValueOf(ft).Elem().Elem().Elem().Type())) + dstp = pointerOfValue(dst.AsValueOf(ft).Elem().Elem()).Apply(zeroOffset) + } + srcinfo.funcs.merge(dstp, srcp, srcinfo, opts) + } + if needIsInit { + first.funcs.isInit = func(p pointer, _ *coderFieldInfo) error { + p, info := getInfo(p) + if info == nil || info.funcs.isInit == nil { + return nil + } + return info.funcs.isInit(p, info) + } + } +} + +func makeWeakMessageFieldCoder(fd pref.FieldDescriptor) pointerCoderFuncs { + var once sync.Once + var messageType pref.MessageType + lazyInit := func() { + once.Do(func() { + messageName := fd.Message().FullName() + messageType, _ = preg.GlobalTypes.FindMessageByName(messageName) + }) + } + + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + m, ok := p.WeakFields().get(f.num) + if !ok { + return 0 + } + lazyInit() + if messageType == nil { + panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) + } + return sizeMessage(m, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + m, ok := p.WeakFields().get(f.num) + if !ok { + return b, nil + } + lazyInit() + if messageType == nil { + panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) + } + return appendMessage(b, m, f.wiretag, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + fs := p.WeakFields() + m, ok := fs.get(f.num) + if !ok { + lazyInit() + if messageType == nil { + return unmarshalOutput{}, errUnknown + } + m = messageType.New().Interface() + fs.set(f.num, m) + } + return consumeMessage(b, m, wtyp, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + m, ok := p.WeakFields().get(f.num) + if !ok { + return nil + } + return proto.CheckInitialized(m) + }, + merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + sm, ok := src.WeakFields().get(f.num) + if !ok { + return + } + dm, ok := dst.WeakFields().get(f.num) + if !ok { + lazyInit() + if messageType == nil { + panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) + } + dm = messageType.New().Interface() + dst.WeakFields().set(f.num, dm) + } + opts.Merge(dm, sm) + }, + } +} + +func makeMessageFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeMessageInfo, + marshal: appendMessageInfo, + unmarshal: consumeMessageInfo, + merge: mergeMessage, + } + if needsInitCheck(mi.Desc) { + funcs.isInit = isInitMessageInfo + } + return funcs + } else { + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + m := asMessage(p.AsValueOf(ft).Elem()) + return sizeMessage(m, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + m := asMessage(p.AsValueOf(ft).Elem()) + return appendMessage(b, m, f.wiretag, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + mp := p.AsValueOf(ft).Elem() + if mp.IsNil() { + mp.Set(reflect.New(ft.Elem())) + } + return consumeMessage(b, asMessage(mp), wtyp, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + m := asMessage(p.AsValueOf(ft).Elem()) + return proto.CheckInitialized(m) + }, + merge: mergeMessage, + } + } +} + +func sizeMessageInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return protowire.SizeBytes(f.mi.sizePointer(p.Elem(), opts)) + f.tagsize +} + +func appendMessageInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(f.mi.sizePointer(p.Elem(), opts))) + return f.mi.marshalAppendPointer(b, p.Elem(), opts) +} + +func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + if p.Elem().IsNil() { + p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + o, err := f.mi.unmarshalPointer(v, p.Elem(), 0, opts) + if err != nil { + return out, err + } + out.n = n + out.initialized = o.initialized + return out, nil +} + +func isInitMessageInfo(p pointer, f *coderFieldInfo) error { + return f.mi.checkInitializedPointer(p.Elem()) +} + +func sizeMessage(m proto.Message, tagsize int, _ marshalOptions) int { + return protowire.SizeBytes(proto.Size(m)) + tagsize +} + +func appendMessage(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(proto.Size(m))) + return opts.Options().MarshalAppend(b, m) +} + +func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: v, + Message: m.ProtoReflect(), + }) + if err != nil { + return out, err + } + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return out, nil +} + +func sizeMessageValue(v pref.Value, tagsize int, opts marshalOptions) int { + m := v.Message().Interface() + return sizeMessage(m, tagsize, opts) +} + +func appendMessageValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + m := v.Message().Interface() + return appendMessage(b, m, wiretag, opts) +} + +func consumeMessageValue(b []byte, v pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) { + m := v.Message().Interface() + out, err := consumeMessage(b, m, wtyp, opts) + return v, out, err +} + +func isInitMessageValue(v pref.Value) error { + m := v.Message().Interface() + return proto.CheckInitialized(m) +} + +var coderMessageValue = valueCoderFuncs{ + size: sizeMessageValue, + marshal: appendMessageValue, + unmarshal: consumeMessageValue, + isInit: isInitMessageValue, + merge: mergeMessageValue, +} + +func sizeGroupValue(v pref.Value, tagsize int, opts marshalOptions) int { + m := v.Message().Interface() + return sizeGroup(m, tagsize, opts) +} + +func appendGroupValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + m := v.Message().Interface() + return appendGroup(b, m, wiretag, opts) +} + +func consumeGroupValue(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) { + m := v.Message().Interface() + out, err := consumeGroup(b, m, num, wtyp, opts) + return v, out, err +} + +var coderGroupValue = valueCoderFuncs{ + size: sizeGroupValue, + marshal: appendGroupValue, + unmarshal: consumeGroupValue, + isInit: isInitMessageValue, + merge: mergeMessageValue, +} + +func makeGroupFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + num := fd.Number() + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeGroupType, + marshal: appendGroupType, + unmarshal: consumeGroupType, + merge: mergeMessage, + } + if needsInitCheck(mi.Desc) { + funcs.isInit = isInitMessageInfo + } + return funcs + } else { + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + m := asMessage(p.AsValueOf(ft).Elem()) + return sizeGroup(m, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + m := asMessage(p.AsValueOf(ft).Elem()) + return appendGroup(b, m, f.wiretag, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + mp := p.AsValueOf(ft).Elem() + if mp.IsNil() { + mp.Set(reflect.New(ft.Elem())) + } + return consumeGroup(b, asMessage(mp), num, wtyp, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + m := asMessage(p.AsValueOf(ft).Elem()) + return proto.CheckInitialized(m) + }, + merge: mergeMessage, + } + } +} + +func sizeGroupType(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return 2*f.tagsize + f.mi.sizePointer(p.Elem(), opts) +} + +func appendGroupType(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, f.wiretag) // start group + b, err := f.mi.marshalAppendPointer(b, p.Elem(), opts) + b = protowire.AppendVarint(b, f.wiretag+1) // end group + return b, err +} + +func consumeGroupType(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + if p.Elem().IsNil() { + p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + return f.mi.unmarshalPointer(b, p.Elem(), f.num, opts) +} + +func sizeGroup(m proto.Message, tagsize int, _ marshalOptions) int { + return 2*tagsize + proto.Size(m) +} + +func appendGroup(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) // start group + b, err := opts.Options().MarshalAppend(b, m) + b = protowire.AppendVarint(b, wiretag+1) // end group + return b, err +} + +func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + b, n := protowire.ConsumeGroup(num, b) + if n < 0 { + return out, errDecode + } + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: b, + Message: m.ProtoReflect(), + }) + if err != nil { + return out, err + } + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return out, nil +} + +func makeMessageSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeMessageSliceInfo, + marshal: appendMessageSliceInfo, + unmarshal: consumeMessageSliceInfo, + merge: mergeMessageSlice, + } + if needsInitCheck(mi.Desc) { + funcs.isInit = isInitMessageSliceInfo + } + return funcs + } + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return sizeMessageSlice(p, ft, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return appendMessageSlice(b, p, f.wiretag, ft, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + return consumeMessageSlice(b, p, ft, wtyp, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + return isInitMessageSlice(p, ft) + }, + merge: mergeMessageSlice, + } +} + +func sizeMessageSliceInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { + s := p.PointerSlice() + n := 0 + for _, v := range s { + n += protowire.SizeBytes(f.mi.sizePointer(v, opts)) + f.tagsize + } + return n +} + +func appendMessageSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.PointerSlice() + var err error + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + siz := f.mi.sizePointer(v, opts) + b = protowire.AppendVarint(b, uint64(siz)) + b, err = f.mi.marshalAppendPointer(b, v, opts) + if err != nil { + return b, err + } + } + return b, nil +} + +func consumeMessageSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + m := reflect.New(f.mi.GoReflectType.Elem()).Interface() + mp := pointerOfIface(m) + o, err := f.mi.unmarshalPointer(v, mp, 0, opts) + if err != nil { + return out, err + } + p.AppendPointerSlice(mp) + out.n = n + out.initialized = o.initialized + return out, nil +} + +func isInitMessageSliceInfo(p pointer, f *coderFieldInfo) error { + s := p.PointerSlice() + for _, v := range s { + if err := f.mi.checkInitializedPointer(v); err != nil { + return err + } + } + return nil +} + +func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, _ marshalOptions) int { + s := p.PointerSlice() + n := 0 + for _, v := range s { + m := asMessage(v.AsValueOf(goType.Elem())) + n += protowire.SizeBytes(proto.Size(m)) + tagsize + } + return n +} + +func appendMessageSlice(b []byte, p pointer, wiretag uint64, goType reflect.Type, opts marshalOptions) ([]byte, error) { + s := p.PointerSlice() + var err error + for _, v := range s { + m := asMessage(v.AsValueOf(goType.Elem())) + b = protowire.AppendVarint(b, wiretag) + siz := proto.Size(m) + b = protowire.AppendVarint(b, uint64(siz)) + b, err = opts.Options().MarshalAppend(b, m) + if err != nil { + return b, err + } + } + return b, nil +} + +func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + mp := reflect.New(goType.Elem()) + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: v, + Message: asMessage(mp).ProtoReflect(), + }) + if err != nil { + return out, err + } + p.AppendPointerSlice(pointerOfValue(mp)) + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return out, nil +} + +func isInitMessageSlice(p pointer, goType reflect.Type) error { + s := p.PointerSlice() + for _, v := range s { + m := asMessage(v.AsValueOf(goType.Elem())) + if err := proto.CheckInitialized(m); err != nil { + return err + } + } + return nil +} + +// Slices of messages + +func sizeMessageSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int { + list := listv.List() + n := 0 + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + n += protowire.SizeBytes(proto.Size(m)) + tagsize + } + return n +} + +func appendMessageSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + mopts := opts.Options() + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + b = protowire.AppendVarint(b, wiretag) + siz := proto.Size(m) + b = protowire.AppendVarint(b, uint64(siz)) + var err error + b, err = mopts.MarshalAppend(b, m) + if err != nil { + return b, err + } + } + return b, nil +} + +func consumeMessageSliceValue(b []byte, listv pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.BytesType { + return pref.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return pref.Value{}, out, errDecode + } + m := list.NewElement() + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: v, + Message: m.Message(), + }) + if err != nil { + return pref.Value{}, out, err + } + list.Append(m) + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return listv, out, nil +} + +func isInitMessageSliceValue(listv pref.Value) error { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + if err := proto.CheckInitialized(m); err != nil { + return err + } + } + return nil +} + +var coderMessageSliceValue = valueCoderFuncs{ + size: sizeMessageSliceValue, + marshal: appendMessageSliceValue, + unmarshal: consumeMessageSliceValue, + isInit: isInitMessageSliceValue, + merge: mergeMessageListValue, +} + +func sizeGroupSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int { + list := listv.List() + n := 0 + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + n += 2*tagsize + proto.Size(m) + } + return n +} + +func appendGroupSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + mopts := opts.Options() + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + b = protowire.AppendVarint(b, wiretag) // start group + var err error + b, err = mopts.MarshalAppend(b, m) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, wiretag+1) // end group + } + return b, nil +} + +func consumeGroupSliceValue(b []byte, listv pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.StartGroupType { + return pref.Value{}, out, errUnknown + } + b, n := protowire.ConsumeGroup(num, b) + if n < 0 { + return pref.Value{}, out, errDecode + } + m := list.NewElement() + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: b, + Message: m.Message(), + }) + if err != nil { + return pref.Value{}, out, err + } + list.Append(m) + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return listv, out, nil +} + +var coderGroupSliceValue = valueCoderFuncs{ + size: sizeGroupSliceValue, + marshal: appendGroupSliceValue, + unmarshal: consumeGroupSliceValue, + isInit: isInitMessageSliceValue, + merge: mergeMessageListValue, +} + +func makeGroupSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + num := fd.Number() + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeGroupSliceInfo, + marshal: appendGroupSliceInfo, + unmarshal: consumeGroupSliceInfo, + merge: mergeMessageSlice, + } + if needsInitCheck(mi.Desc) { + funcs.isInit = isInitMessageSliceInfo + } + return funcs + } + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return sizeGroupSlice(p, ft, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return appendGroupSlice(b, p, f.wiretag, ft, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + return consumeGroupSlice(b, p, num, wtyp, ft, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + return isInitMessageSlice(p, ft) + }, + merge: mergeMessageSlice, + } +} + +func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, _ marshalOptions) int { + s := p.PointerSlice() + n := 0 + for _, v := range s { + m := asMessage(v.AsValueOf(messageType.Elem())) + n += 2*tagsize + proto.Size(m) + } + return n +} + +func appendGroupSlice(b []byte, p pointer, wiretag uint64, messageType reflect.Type, opts marshalOptions) ([]byte, error) { + s := p.PointerSlice() + var err error + for _, v := range s { + m := asMessage(v.AsValueOf(messageType.Elem())) + b = protowire.AppendVarint(b, wiretag) // start group + b, err = opts.Options().MarshalAppend(b, m) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, wiretag+1) // end group + } + return b, nil +} + +func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire.Type, goType reflect.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + b, n := protowire.ConsumeGroup(num, b) + if n < 0 { + return out, errDecode + } + mp := reflect.New(goType.Elem()) + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: b, + Message: asMessage(mp).ProtoReflect(), + }) + if err != nil { + return out, err + } + p.AppendPointerSlice(pointerOfValue(mp)) + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return out, nil +} + +func sizeGroupSliceInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { + s := p.PointerSlice() + n := 0 + for _, v := range s { + n += 2*f.tagsize + f.mi.sizePointer(v, opts) + } + return n +} + +func appendGroupSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.PointerSlice() + var err error + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) // start group + b, err = f.mi.marshalAppendPointer(b, v, opts) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, f.wiretag+1) // end group + } + return b, nil +} + +func consumeGroupSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + if wtyp != protowire.StartGroupType { + return unmarshalOutput{}, errUnknown + } + m := reflect.New(f.mi.GoReflectType.Elem()).Interface() + mp := pointerOfIface(m) + out, err := f.mi.unmarshalPointer(b, mp, f.num, opts) + if err != nil { + return out, err + } + p.AppendPointerSlice(mp) + return out, nil +} + +func asMessage(v reflect.Value) pref.ProtoMessage { + if m, ok := v.Interface().(pref.ProtoMessage); ok { + return m + } + return legacyWrapMessage(v).Interface() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go new file mode 100644 index 000000000..1a509b63e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go @@ -0,0 +1,5637 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import ( + "math" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// sizeBool returns the size of wire encoding a bool pointer as a Bool. +func sizeBool(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Bool() + return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) +} + +// appendBool wire encodes a bool pointer as a Bool. +func appendBool(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Bool() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + return b, nil +} + +// consumeBool wire decodes a bool pointer as a Bool. +func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *p.Bool() = protowire.DecodeBool(v) + out.n = n + return out, nil +} + +var coderBool = pointerCoderFuncs{ + size: sizeBool, + marshal: appendBool, + unmarshal: consumeBool, + merge: mergeBool, +} + +// sizeBoolNoZero returns the size of wire encoding a bool pointer as a Bool. +// The zero value is not encoded. +func sizeBoolNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Bool() + if v == false { + return 0 + } + return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) +} + +// appendBoolNoZero wire encodes a bool pointer as a Bool. +// The zero value is not encoded. +func appendBoolNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Bool() + if v == false { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + return b, nil +} + +var coderBoolNoZero = pointerCoderFuncs{ + size: sizeBoolNoZero, + marshal: appendBoolNoZero, + unmarshal: consumeBool, + merge: mergeBoolNoZero, +} + +// sizeBoolPtr returns the size of wire encoding a *bool pointer as a Bool. +// It panics if the pointer is nil. +func sizeBoolPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := **p.BoolPtr() + return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) +} + +// appendBoolPtr wire encodes a *bool pointer as a Bool. +// It panics if the pointer is nil. +func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.BoolPtr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + return b, nil +} + +// consumeBoolPtr wire decodes a *bool pointer as a Bool. +func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + vp := p.BoolPtr() + if *vp == nil { + *vp = new(bool) + } + **vp = protowire.DecodeBool(v) + out.n = n + return out, nil +} + +var coderBoolPtr = pointerCoderFuncs{ + size: sizeBoolPtr, + marshal: appendBoolPtr, + unmarshal: consumeBoolPtr, + merge: mergeBoolPtr, +} + +// sizeBoolSlice returns the size of wire encoding a []bool pointer as a repeated Bool. +func sizeBoolSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.BoolSlice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) + } + return size +} + +// appendBoolSlice encodes a []bool pointer as a repeated Bool. +func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.BoolSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + } + return b, nil +} + +// consumeBoolSlice wire decodes a []bool pointer as a repeated Bool. +func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.BoolSlice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + s = append(s, protowire.DecodeBool(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *sp = append(*sp, protowire.DecodeBool(v)) + out.n = n + return out, nil +} + +var coderBoolSlice = pointerCoderFuncs{ + size: sizeBoolSlice, + marshal: appendBoolSlice, + unmarshal: consumeBoolSlice, + merge: mergeBoolSlice, +} + +// sizeBoolPackedSlice returns the size of wire encoding a []bool pointer as a packed repeated Bool. +func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.BoolSlice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeBool(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendBoolPackedSlice encodes a []bool pointer as a packed repeated Bool. +func appendBoolPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.BoolSlice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeBool(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + } + return b, nil +} + +var coderBoolPackedSlice = pointerCoderFuncs{ + size: sizeBoolPackedSlice, + marshal: appendBoolPackedSlice, + unmarshal: consumeBoolSlice, + merge: mergeBoolSlice, +} + +// sizeBoolValue returns the size of wire encoding a bool value as a Bool. +func sizeBoolValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool())) +} + +// appendBoolValue encodes a bool value as a Bool. +func appendBoolValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) + return b, nil +} + +// consumeBoolValue decodes a bool value as a Bool. +func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfBool(protowire.DecodeBool(v)), out, nil +} + +var coderBoolValue = valueCoderFuncs{ + size: sizeBoolValue, + marshal: appendBoolValue, + unmarshal: consumeBoolValue, + merge: mergeScalarValue, +} + +// sizeBoolSliceValue returns the size of wire encoding a []bool value as a repeated Bool. +func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool())) + } + return size +} + +// appendBoolSliceValue encodes a []bool value as a repeated Bool. +func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) + } + return b, nil +} + +// consumeBoolSliceValue wire decodes a []bool value as a repeated Bool. +func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) + out.n = n + return listv, out, nil +} + +var coderBoolSliceValue = valueCoderFuncs{ + size: sizeBoolSliceValue, + marshal: appendBoolSliceValue, + unmarshal: consumeBoolSliceValue, + merge: mergeListValue, +} + +// sizeBoolPackedSliceValue returns the size of wire encoding a []bool value as a packed repeated Bool. +func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeBool(v.Bool())) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendBoolPackedSliceValue encodes a []bool value as a packed repeated Bool. +func appendBoolPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeBool(v.Bool())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) + } + return b, nil +} + +var coderBoolPackedSliceValue = valueCoderFuncs{ + size: sizeBoolPackedSliceValue, + marshal: appendBoolPackedSliceValue, + unmarshal: consumeBoolSliceValue, + merge: mergeListValue, +} + +// sizeEnumValue returns the size of wire encoding a value as a Enum. +func sizeEnumValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeVarint(uint64(v.Enum())) +} + +// appendEnumValue encodes a value as a Enum. +func appendEnumValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(v.Enum())) + return b, nil +} + +// consumeEnumValue decodes a value as a Enum. +func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), out, nil +} + +var coderEnumValue = valueCoderFuncs{ + size: sizeEnumValue, + marshal: appendEnumValue, + unmarshal: consumeEnumValue, + merge: mergeScalarValue, +} + +// sizeEnumSliceValue returns the size of wire encoding a [] value as a repeated Enum. +func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(uint64(v.Enum())) + } + return size +} + +// appendEnumSliceValue encodes a [] value as a repeated Enum. +func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(v.Enum())) + } + return b, nil +} + +// consumeEnumSliceValue wire decodes a [] value as a repeated Enum. +func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) + out.n = n + return listv, out, nil +} + +var coderEnumSliceValue = valueCoderFuncs{ + size: sizeEnumSliceValue, + marshal: appendEnumSliceValue, + unmarshal: consumeEnumSliceValue, + merge: mergeListValue, +} + +// sizeEnumPackedSliceValue returns the size of wire encoding a [] value as a packed repeated Enum. +func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(v.Enum())) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendEnumPackedSliceValue encodes a [] value as a packed repeated Enum. +func appendEnumPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(v.Enum())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, uint64(v.Enum())) + } + return b, nil +} + +var coderEnumPackedSliceValue = valueCoderFuncs{ + size: sizeEnumPackedSliceValue, + marshal: appendEnumPackedSliceValue, + unmarshal: consumeEnumSliceValue, + merge: mergeListValue, +} + +// sizeInt32 returns the size of wire encoding a int32 pointer as a Int32. +func sizeInt32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int32() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt32 wire encodes a int32 pointer as a Int32. +func appendInt32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeInt32 wire decodes a int32 pointer as a Int32. +func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *p.Int32() = int32(v) + out.n = n + return out, nil +} + +var coderInt32 = pointerCoderFuncs{ + size: sizeInt32, + marshal: appendInt32, + unmarshal: consumeInt32, + merge: mergeInt32, +} + +// sizeInt32NoZero returns the size of wire encoding a int32 pointer as a Int32. +// The zero value is not encoded. +func sizeInt32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt32NoZero wire encodes a int32 pointer as a Int32. +// The zero value is not encoded. +func appendInt32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +var coderInt32NoZero = pointerCoderFuncs{ + size: sizeInt32NoZero, + marshal: appendInt32NoZero, + unmarshal: consumeInt32, + merge: mergeInt32NoZero, +} + +// sizeInt32Ptr returns the size of wire encoding a *int32 pointer as a Int32. +// It panics if the pointer is nil. +func sizeInt32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := **p.Int32Ptr() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt32Ptr wire encodes a *int32 pointer as a Int32. +// It panics if the pointer is nil. +func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Int32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeInt32Ptr wire decodes a *int32 pointer as a Int32. +func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + vp := p.Int32Ptr() + if *vp == nil { + *vp = new(int32) + } + **vp = int32(v) + out.n = n + return out, nil +} + +var coderInt32Ptr = pointerCoderFuncs{ + size: sizeInt32Ptr, + marshal: appendInt32Ptr, + unmarshal: consumeInt32Ptr, + merge: mergeInt32Ptr, +} + +// sizeInt32Slice returns the size of wire encoding a []int32 pointer as a repeated Int32. +func sizeInt32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int32Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(uint64(v)) + } + return size +} + +// appendInt32Slice encodes a []int32 pointer as a repeated Int32. +func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +// consumeInt32Slice wire decodes a []int32 pointer as a repeated Int32. +func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + s = append(s, int32(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *sp = append(*sp, int32(v)) + out.n = n + return out, nil +} + +var coderInt32Slice = pointerCoderFuncs{ + size: sizeInt32Slice, + marshal: appendInt32Slice, + unmarshal: consumeInt32Slice, + merge: mergeInt32Slice, +} + +// sizeInt32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Int32. +func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendInt32PackedSlice encodes a []int32 pointer as a packed repeated Int32. +func appendInt32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +var coderInt32PackedSlice = pointerCoderFuncs{ + size: sizeInt32PackedSlice, + marshal: appendInt32PackedSlice, + unmarshal: consumeInt32Slice, + merge: mergeInt32Slice, +} + +// sizeInt32Value returns the size of wire encoding a int32 value as a Int32. +func sizeInt32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeVarint(uint64(int32(v.Int()))) +} + +// appendInt32Value encodes a int32 value as a Int32. +func appendInt32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(int32(v.Int()))) + return b, nil +} + +// consumeInt32Value decodes a int32 value as a Int32. +func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfInt32(int32(v)), out, nil +} + +var coderInt32Value = valueCoderFuncs{ + size: sizeInt32Value, + marshal: appendInt32Value, + unmarshal: consumeInt32Value, + merge: mergeScalarValue, +} + +// sizeInt32SliceValue returns the size of wire encoding a []int32 value as a repeated Int32. +func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(uint64(int32(v.Int()))) + } + return size +} + +// appendInt32SliceValue encodes a []int32 value as a repeated Int32. +func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(int32(v.Int()))) + } + return b, nil +} + +// consumeInt32SliceValue wire decodes a []int32 value as a repeated Int32. +func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + out.n = n + return listv, out, nil +} + +var coderInt32SliceValue = valueCoderFuncs{ + size: sizeInt32SliceValue, + marshal: appendInt32SliceValue, + unmarshal: consumeInt32SliceValue, + merge: mergeListValue, +} + +// sizeInt32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Int32. +func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(int32(v.Int()))) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendInt32PackedSliceValue encodes a []int32 value as a packed repeated Int32. +func appendInt32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(int32(v.Int()))) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, uint64(int32(v.Int()))) + } + return b, nil +} + +var coderInt32PackedSliceValue = valueCoderFuncs{ + size: sizeInt32PackedSliceValue, + marshal: appendInt32PackedSliceValue, + unmarshal: consumeInt32SliceValue, + merge: mergeListValue, +} + +// sizeSint32 returns the size of wire encoding a int32 pointer as a Sint32. +func sizeSint32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int32() + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) +} + +// appendSint32 wire encodes a int32 pointer as a Sint32. +func appendSint32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + return b, nil +} + +// consumeSint32 wire decodes a int32 pointer as a Sint32. +func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *p.Int32() = int32(protowire.DecodeZigZag(v & math.MaxUint32)) + out.n = n + return out, nil +} + +var coderSint32 = pointerCoderFuncs{ + size: sizeSint32, + marshal: appendSint32, + unmarshal: consumeSint32, + merge: mergeInt32, +} + +// sizeSint32NoZero returns the size of wire encoding a int32 pointer as a Sint32. +// The zero value is not encoded. +func sizeSint32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) +} + +// appendSint32NoZero wire encodes a int32 pointer as a Sint32. +// The zero value is not encoded. +func appendSint32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + return b, nil +} + +var coderSint32NoZero = pointerCoderFuncs{ + size: sizeSint32NoZero, + marshal: appendSint32NoZero, + unmarshal: consumeSint32, + merge: mergeInt32NoZero, +} + +// sizeSint32Ptr returns the size of wire encoding a *int32 pointer as a Sint32. +// It panics if the pointer is nil. +func sizeSint32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := **p.Int32Ptr() + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) +} + +// appendSint32Ptr wire encodes a *int32 pointer as a Sint32. +// It panics if the pointer is nil. +func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Int32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + return b, nil +} + +// consumeSint32Ptr wire decodes a *int32 pointer as a Sint32. +func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + vp := p.Int32Ptr() + if *vp == nil { + *vp = new(int32) + } + **vp = int32(protowire.DecodeZigZag(v & math.MaxUint32)) + out.n = n + return out, nil +} + +var coderSint32Ptr = pointerCoderFuncs{ + size: sizeSint32Ptr, + marshal: appendSint32Ptr, + unmarshal: consumeSint32Ptr, + merge: mergeInt32Ptr, +} + +// sizeSint32Slice returns the size of wire encoding a []int32 pointer as a repeated Sint32. +func sizeSint32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int32Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) + } + return size +} + +// appendSint32Slice encodes a []int32 pointer as a repeated Sint32. +func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + } + return b, nil +} + +// consumeSint32Slice wire decodes a []int32 pointer as a repeated Sint32. +func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + s = append(s, int32(protowire.DecodeZigZag(v&math.MaxUint32))) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *sp = append(*sp, int32(protowire.DecodeZigZag(v&math.MaxUint32))) + out.n = n + return out, nil +} + +var coderSint32Slice = pointerCoderFuncs{ + size: sizeSint32Slice, + marshal: appendSint32Slice, + unmarshal: consumeSint32Slice, + merge: mergeInt32Slice, +} + +// sizeSint32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sint32. +func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendSint32PackedSlice encodes a []int32 pointer as a packed repeated Sint32. +func appendSint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + } + return b, nil +} + +var coderSint32PackedSlice = pointerCoderFuncs{ + size: sizeSint32PackedSlice, + marshal: appendSint32PackedSlice, + unmarshal: consumeSint32Slice, + merge: mergeInt32Slice, +} + +// sizeSint32Value returns the size of wire encoding a int32 value as a Sint32. +func sizeSint32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) +} + +// appendSint32Value encodes a int32 value as a Sint32. +func appendSint32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) + return b, nil +} + +// consumeSint32Value decodes a int32 value as a Sint32. +func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), out, nil +} + +var coderSint32Value = valueCoderFuncs{ + size: sizeSint32Value, + marshal: appendSint32Value, + unmarshal: consumeSint32Value, + merge: mergeScalarValue, +} + +// sizeSint32SliceValue returns the size of wire encoding a []int32 value as a repeated Sint32. +func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) + } + return size +} + +// appendSint32SliceValue encodes a []int32 value as a repeated Sint32. +func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) + } + return b, nil +} + +// consumeSint32SliceValue wire decodes a []int32 value as a repeated Sint32. +func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) + out.n = n + return listv, out, nil +} + +var coderSint32SliceValue = valueCoderFuncs{ + size: sizeSint32SliceValue, + marshal: appendSint32SliceValue, + unmarshal: consumeSint32SliceValue, + merge: mergeListValue, +} + +// sizeSint32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sint32. +func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendSint32PackedSliceValue encodes a []int32 value as a packed repeated Sint32. +func appendSint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) + } + return b, nil +} + +var coderSint32PackedSliceValue = valueCoderFuncs{ + size: sizeSint32PackedSliceValue, + marshal: appendSint32PackedSliceValue, + unmarshal: consumeSint32SliceValue, + merge: mergeListValue, +} + +// sizeUint32 returns the size of wire encoding a uint32 pointer as a Uint32. +func sizeUint32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Uint32() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendUint32 wire encodes a uint32 pointer as a Uint32. +func appendUint32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Uint32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeUint32 wire decodes a uint32 pointer as a Uint32. +func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *p.Uint32() = uint32(v) + out.n = n + return out, nil +} + +var coderUint32 = pointerCoderFuncs{ + size: sizeUint32, + marshal: appendUint32, + unmarshal: consumeUint32, + merge: mergeUint32, +} + +// sizeUint32NoZero returns the size of wire encoding a uint32 pointer as a Uint32. +// The zero value is not encoded. +func sizeUint32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Uint32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendUint32NoZero wire encodes a uint32 pointer as a Uint32. +// The zero value is not encoded. +func appendUint32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Uint32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +var coderUint32NoZero = pointerCoderFuncs{ + size: sizeUint32NoZero, + marshal: appendUint32NoZero, + unmarshal: consumeUint32, + merge: mergeUint32NoZero, +} + +// sizeUint32Ptr returns the size of wire encoding a *uint32 pointer as a Uint32. +// It panics if the pointer is nil. +func sizeUint32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := **p.Uint32Ptr() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendUint32Ptr wire encodes a *uint32 pointer as a Uint32. +// It panics if the pointer is nil. +func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Uint32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeUint32Ptr wire decodes a *uint32 pointer as a Uint32. +func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + vp := p.Uint32Ptr() + if *vp == nil { + *vp = new(uint32) + } + **vp = uint32(v) + out.n = n + return out, nil +} + +var coderUint32Ptr = pointerCoderFuncs{ + size: sizeUint32Ptr, + marshal: appendUint32Ptr, + unmarshal: consumeUint32Ptr, + merge: mergeUint32Ptr, +} + +// sizeUint32Slice returns the size of wire encoding a []uint32 pointer as a repeated Uint32. +func sizeUint32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Uint32Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(uint64(v)) + } + return size +} + +// appendUint32Slice encodes a []uint32 pointer as a repeated Uint32. +func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Uint32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +// consumeUint32Slice wire decodes a []uint32 pointer as a repeated Uint32. +func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Uint32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + s = append(s, uint32(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *sp = append(*sp, uint32(v)) + out.n = n + return out, nil +} + +var coderUint32Slice = pointerCoderFuncs{ + size: sizeUint32Slice, + marshal: appendUint32Slice, + unmarshal: consumeUint32Slice, + merge: mergeUint32Slice, +} + +// sizeUint32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Uint32. +func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Uint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendUint32PackedSlice encodes a []uint32 pointer as a packed repeated Uint32. +func appendUint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Uint32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +var coderUint32PackedSlice = pointerCoderFuncs{ + size: sizeUint32PackedSlice, + marshal: appendUint32PackedSlice, + unmarshal: consumeUint32Slice, + merge: mergeUint32Slice, +} + +// sizeUint32Value returns the size of wire encoding a uint32 value as a Uint32. +func sizeUint32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeVarint(uint64(uint32(v.Uint()))) +} + +// appendUint32Value encodes a uint32 value as a Uint32. +func appendUint32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) + return b, nil +} + +// consumeUint32Value decodes a uint32 value as a Uint32. +func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfUint32(uint32(v)), out, nil +} + +var coderUint32Value = valueCoderFuncs{ + size: sizeUint32Value, + marshal: appendUint32Value, + unmarshal: consumeUint32Value, + merge: mergeScalarValue, +} + +// sizeUint32SliceValue returns the size of wire encoding a []uint32 value as a repeated Uint32. +func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(uint64(uint32(v.Uint()))) + } + return size +} + +// appendUint32SliceValue encodes a []uint32 value as a repeated Uint32. +func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) + } + return b, nil +} + +// consumeUint32SliceValue wire decodes a []uint32 value as a repeated Uint32. +func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + out.n = n + return listv, out, nil +} + +var coderUint32SliceValue = valueCoderFuncs{ + size: sizeUint32SliceValue, + marshal: appendUint32SliceValue, + unmarshal: consumeUint32SliceValue, + merge: mergeListValue, +} + +// sizeUint32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Uint32. +func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(uint32(v.Uint()))) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendUint32PackedSliceValue encodes a []uint32 value as a packed repeated Uint32. +func appendUint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(uint32(v.Uint()))) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) + } + return b, nil +} + +var coderUint32PackedSliceValue = valueCoderFuncs{ + size: sizeUint32PackedSliceValue, + marshal: appendUint32PackedSliceValue, + unmarshal: consumeUint32SliceValue, + merge: mergeListValue, +} + +// sizeInt64 returns the size of wire encoding a int64 pointer as a Int64. +func sizeInt64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int64() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt64 wire encodes a int64 pointer as a Int64. +func appendInt64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeInt64 wire decodes a int64 pointer as a Int64. +func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *p.Int64() = int64(v) + out.n = n + return out, nil +} + +var coderInt64 = pointerCoderFuncs{ + size: sizeInt64, + marshal: appendInt64, + unmarshal: consumeInt64, + merge: mergeInt64, +} + +// sizeInt64NoZero returns the size of wire encoding a int64 pointer as a Int64. +// The zero value is not encoded. +func sizeInt64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt64NoZero wire encodes a int64 pointer as a Int64. +// The zero value is not encoded. +func appendInt64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +var coderInt64NoZero = pointerCoderFuncs{ + size: sizeInt64NoZero, + marshal: appendInt64NoZero, + unmarshal: consumeInt64, + merge: mergeInt64NoZero, +} + +// sizeInt64Ptr returns the size of wire encoding a *int64 pointer as a Int64. +// It panics if the pointer is nil. +func sizeInt64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := **p.Int64Ptr() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt64Ptr wire encodes a *int64 pointer as a Int64. +// It panics if the pointer is nil. +func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Int64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeInt64Ptr wire decodes a *int64 pointer as a Int64. +func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + vp := p.Int64Ptr() + if *vp == nil { + *vp = new(int64) + } + **vp = int64(v) + out.n = n + return out, nil +} + +var coderInt64Ptr = pointerCoderFuncs{ + size: sizeInt64Ptr, + marshal: appendInt64Ptr, + unmarshal: consumeInt64Ptr, + merge: mergeInt64Ptr, +} + +// sizeInt64Slice returns the size of wire encoding a []int64 pointer as a repeated Int64. +func sizeInt64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int64Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(uint64(v)) + } + return size +} + +// appendInt64Slice encodes a []int64 pointer as a repeated Int64. +func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +// consumeInt64Slice wire decodes a []int64 pointer as a repeated Int64. +func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + s = append(s, int64(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *sp = append(*sp, int64(v)) + out.n = n + return out, nil +} + +var coderInt64Slice = pointerCoderFuncs{ + size: sizeInt64Slice, + marshal: appendInt64Slice, + unmarshal: consumeInt64Slice, + merge: mergeInt64Slice, +} + +// sizeInt64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Int64. +func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendInt64PackedSlice encodes a []int64 pointer as a packed repeated Int64. +func appendInt64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +var coderInt64PackedSlice = pointerCoderFuncs{ + size: sizeInt64PackedSlice, + marshal: appendInt64PackedSlice, + unmarshal: consumeInt64Slice, + merge: mergeInt64Slice, +} + +// sizeInt64Value returns the size of wire encoding a int64 value as a Int64. +func sizeInt64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeVarint(uint64(v.Int())) +} + +// appendInt64Value encodes a int64 value as a Int64. +func appendInt64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(v.Int())) + return b, nil +} + +// consumeInt64Value decodes a int64 value as a Int64. +func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfInt64(int64(v)), out, nil +} + +var coderInt64Value = valueCoderFuncs{ + size: sizeInt64Value, + marshal: appendInt64Value, + unmarshal: consumeInt64Value, + merge: mergeScalarValue, +} + +// sizeInt64SliceValue returns the size of wire encoding a []int64 value as a repeated Int64. +func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(uint64(v.Int())) + } + return size +} + +// appendInt64SliceValue encodes a []int64 value as a repeated Int64. +func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(v.Int())) + } + return b, nil +} + +// consumeInt64SliceValue wire decodes a []int64 value as a repeated Int64. +func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + out.n = n + return listv, out, nil +} + +var coderInt64SliceValue = valueCoderFuncs{ + size: sizeInt64SliceValue, + marshal: appendInt64SliceValue, + unmarshal: consumeInt64SliceValue, + merge: mergeListValue, +} + +// sizeInt64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Int64. +func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(v.Int())) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendInt64PackedSliceValue encodes a []int64 value as a packed repeated Int64. +func appendInt64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(v.Int())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, uint64(v.Int())) + } + return b, nil +} + +var coderInt64PackedSliceValue = valueCoderFuncs{ + size: sizeInt64PackedSliceValue, + marshal: appendInt64PackedSliceValue, + unmarshal: consumeInt64SliceValue, + merge: mergeListValue, +} + +// sizeSint64 returns the size of wire encoding a int64 pointer as a Sint64. +func sizeSint64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int64() + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) +} + +// appendSint64 wire encodes a int64 pointer as a Sint64. +func appendSint64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + return b, nil +} + +// consumeSint64 wire decodes a int64 pointer as a Sint64. +func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *p.Int64() = protowire.DecodeZigZag(v) + out.n = n + return out, nil +} + +var coderSint64 = pointerCoderFuncs{ + size: sizeSint64, + marshal: appendSint64, + unmarshal: consumeSint64, + merge: mergeInt64, +} + +// sizeSint64NoZero returns the size of wire encoding a int64 pointer as a Sint64. +// The zero value is not encoded. +func sizeSint64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) +} + +// appendSint64NoZero wire encodes a int64 pointer as a Sint64. +// The zero value is not encoded. +func appendSint64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + return b, nil +} + +var coderSint64NoZero = pointerCoderFuncs{ + size: sizeSint64NoZero, + marshal: appendSint64NoZero, + unmarshal: consumeSint64, + merge: mergeInt64NoZero, +} + +// sizeSint64Ptr returns the size of wire encoding a *int64 pointer as a Sint64. +// It panics if the pointer is nil. +func sizeSint64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := **p.Int64Ptr() + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) +} + +// appendSint64Ptr wire encodes a *int64 pointer as a Sint64. +// It panics if the pointer is nil. +func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Int64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + return b, nil +} + +// consumeSint64Ptr wire decodes a *int64 pointer as a Sint64. +func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + vp := p.Int64Ptr() + if *vp == nil { + *vp = new(int64) + } + **vp = protowire.DecodeZigZag(v) + out.n = n + return out, nil +} + +var coderSint64Ptr = pointerCoderFuncs{ + size: sizeSint64Ptr, + marshal: appendSint64Ptr, + unmarshal: consumeSint64Ptr, + merge: mergeInt64Ptr, +} + +// sizeSint64Slice returns the size of wire encoding a []int64 pointer as a repeated Sint64. +func sizeSint64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int64Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) + } + return size +} + +// appendSint64Slice encodes a []int64 pointer as a repeated Sint64. +func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + } + return b, nil +} + +// consumeSint64Slice wire decodes a []int64 pointer as a repeated Sint64. +func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + s = append(s, protowire.DecodeZigZag(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *sp = append(*sp, protowire.DecodeZigZag(v)) + out.n = n + return out, nil +} + +var coderSint64Slice = pointerCoderFuncs{ + size: sizeSint64Slice, + marshal: appendSint64Slice, + unmarshal: consumeSint64Slice, + merge: mergeInt64Slice, +} + +// sizeSint64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sint64. +func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeZigZag(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendSint64PackedSlice encodes a []int64 pointer as a packed repeated Sint64. +func appendSint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeZigZag(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + } + return b, nil +} + +var coderSint64PackedSlice = pointerCoderFuncs{ + size: sizeSint64PackedSlice, + marshal: appendSint64PackedSlice, + unmarshal: consumeSint64Slice, + merge: mergeInt64Slice, +} + +// sizeSint64Value returns the size of wire encoding a int64 value as a Sint64. +func sizeSint64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) +} + +// appendSint64Value encodes a int64 value as a Sint64. +func appendSint64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) + return b, nil +} + +// consumeSint64Value decodes a int64 value as a Sint64. +func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), out, nil +} + +var coderSint64Value = valueCoderFuncs{ + size: sizeSint64Value, + marshal: appendSint64Value, + unmarshal: consumeSint64Value, + merge: mergeScalarValue, +} + +// sizeSint64SliceValue returns the size of wire encoding a []int64 value as a repeated Sint64. +func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) + } + return size +} + +// appendSint64SliceValue encodes a []int64 value as a repeated Sint64. +func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) + } + return b, nil +} + +// consumeSint64SliceValue wire decodes a []int64 value as a repeated Sint64. +func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) + out.n = n + return listv, out, nil +} + +var coderSint64SliceValue = valueCoderFuncs{ + size: sizeSint64SliceValue, + marshal: appendSint64SliceValue, + unmarshal: consumeSint64SliceValue, + merge: mergeListValue, +} + +// sizeSint64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sint64. +func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendSint64PackedSliceValue encodes a []int64 value as a packed repeated Sint64. +func appendSint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) + } + return b, nil +} + +var coderSint64PackedSliceValue = valueCoderFuncs{ + size: sizeSint64PackedSliceValue, + marshal: appendSint64PackedSliceValue, + unmarshal: consumeSint64SliceValue, + merge: mergeListValue, +} + +// sizeUint64 returns the size of wire encoding a uint64 pointer as a Uint64. +func sizeUint64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Uint64() + return f.tagsize + protowire.SizeVarint(v) +} + +// appendUint64 wire encodes a uint64 pointer as a Uint64. +func appendUint64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Uint64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, v) + return b, nil +} + +// consumeUint64 wire decodes a uint64 pointer as a Uint64. +func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *p.Uint64() = v + out.n = n + return out, nil +} + +var coderUint64 = pointerCoderFuncs{ + size: sizeUint64, + marshal: appendUint64, + unmarshal: consumeUint64, + merge: mergeUint64, +} + +// sizeUint64NoZero returns the size of wire encoding a uint64 pointer as a Uint64. +// The zero value is not encoded. +func sizeUint64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Uint64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(v) +} + +// appendUint64NoZero wire encodes a uint64 pointer as a Uint64. +// The zero value is not encoded. +func appendUint64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Uint64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, v) + return b, nil +} + +var coderUint64NoZero = pointerCoderFuncs{ + size: sizeUint64NoZero, + marshal: appendUint64NoZero, + unmarshal: consumeUint64, + merge: mergeUint64NoZero, +} + +// sizeUint64Ptr returns the size of wire encoding a *uint64 pointer as a Uint64. +// It panics if the pointer is nil. +func sizeUint64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := **p.Uint64Ptr() + return f.tagsize + protowire.SizeVarint(v) +} + +// appendUint64Ptr wire encodes a *uint64 pointer as a Uint64. +// It panics if the pointer is nil. +func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Uint64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, v) + return b, nil +} + +// consumeUint64Ptr wire decodes a *uint64 pointer as a Uint64. +func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + vp := p.Uint64Ptr() + if *vp == nil { + *vp = new(uint64) + } + **vp = v + out.n = n + return out, nil +} + +var coderUint64Ptr = pointerCoderFuncs{ + size: sizeUint64Ptr, + marshal: appendUint64Ptr, + unmarshal: consumeUint64Ptr, + merge: mergeUint64Ptr, +} + +// sizeUint64Slice returns the size of wire encoding a []uint64 pointer as a repeated Uint64. +func sizeUint64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Uint64Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(v) + } + return size +} + +// appendUint64Slice encodes a []uint64 pointer as a repeated Uint64. +func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Uint64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, v) + } + return b, nil +} + +// consumeUint64Slice wire decodes a []uint64 pointer as a repeated Uint64. +func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Uint64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + s = append(s, v) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, errDecode + } + *sp = append(*sp, v) + out.n = n + return out, nil +} + +var coderUint64Slice = pointerCoderFuncs{ + size: sizeUint64Slice, + marshal: appendUint64Slice, + unmarshal: consumeUint64Slice, + merge: mergeUint64Slice, +} + +// sizeUint64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Uint64. +func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Uint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(v) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendUint64PackedSlice encodes a []uint64 pointer as a packed repeated Uint64. +func appendUint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Uint64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(v) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, v) + } + return b, nil +} + +var coderUint64PackedSlice = pointerCoderFuncs{ + size: sizeUint64PackedSlice, + marshal: appendUint64PackedSlice, + unmarshal: consumeUint64Slice, + merge: mergeUint64Slice, +} + +// sizeUint64Value returns the size of wire encoding a uint64 value as a Uint64. +func sizeUint64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeVarint(v.Uint()) +} + +// appendUint64Value encodes a uint64 value as a Uint64. +func appendUint64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, v.Uint()) + return b, nil +} + +// consumeUint64Value decodes a uint64 value as a Uint64. +func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfUint64(v), out, nil +} + +var coderUint64Value = valueCoderFuncs{ + size: sizeUint64Value, + marshal: appendUint64Value, + unmarshal: consumeUint64Value, + merge: mergeScalarValue, +} + +// sizeUint64SliceValue returns the size of wire encoding a []uint64 value as a repeated Uint64. +func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(v.Uint()) + } + return size +} + +// appendUint64SliceValue encodes a []uint64 value as a repeated Uint64. +func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, v.Uint()) + } + return b, nil +} + +// consumeUint64SliceValue wire decodes a []uint64 value as a repeated Uint64. +func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfUint64(v)) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfUint64(v)) + out.n = n + return listv, out, nil +} + +var coderUint64SliceValue = valueCoderFuncs{ + size: sizeUint64SliceValue, + marshal: appendUint64SliceValue, + unmarshal: consumeUint64SliceValue, + merge: mergeListValue, +} + +// sizeUint64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Uint64. +func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(v.Uint()) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendUint64PackedSliceValue encodes a []uint64 value as a packed repeated Uint64. +func appendUint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(v.Uint()) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, v.Uint()) + } + return b, nil +} + +var coderUint64PackedSliceValue = valueCoderFuncs{ + size: sizeUint64PackedSliceValue, + marshal: appendUint64PackedSliceValue, + unmarshal: consumeUint64SliceValue, + merge: mergeListValue, +} + +// sizeSfixed32 returns the size of wire encoding a int32 pointer as a Sfixed32. +func sizeSfixed32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed32() +} + +// appendSfixed32 wire encodes a int32 pointer as a Sfixed32. +func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, uint32(v)) + return b, nil +} + +// consumeSfixed32 wire decodes a int32 pointer as a Sfixed32. +func consumeSfixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + *p.Int32() = int32(v) + out.n = n + return out, nil +} + +var coderSfixed32 = pointerCoderFuncs{ + size: sizeSfixed32, + marshal: appendSfixed32, + unmarshal: consumeSfixed32, + merge: mergeInt32, +} + +// sizeSfixed32NoZero returns the size of wire encoding a int32 pointer as a Sfixed32. +// The zero value is not encoded. +func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeFixed32() +} + +// appendSfixed32NoZero wire encodes a int32 pointer as a Sfixed32. +// The zero value is not encoded. +func appendSfixed32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, uint32(v)) + return b, nil +} + +var coderSfixed32NoZero = pointerCoderFuncs{ + size: sizeSfixed32NoZero, + marshal: appendSfixed32NoZero, + unmarshal: consumeSfixed32, + merge: mergeInt32NoZero, +} + +// sizeSfixed32Ptr returns the size of wire encoding a *int32 pointer as a Sfixed32. +// It panics if the pointer is nil. +func sizeSfixed32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed32() +} + +// appendSfixed32Ptr wire encodes a *int32 pointer as a Sfixed32. +// It panics if the pointer is nil. +func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Int32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, uint32(v)) + return b, nil +} + +// consumeSfixed32Ptr wire decodes a *int32 pointer as a Sfixed32. +func consumeSfixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + vp := p.Int32Ptr() + if *vp == nil { + *vp = new(int32) + } + **vp = int32(v) + out.n = n + return out, nil +} + +var coderSfixed32Ptr = pointerCoderFuncs{ + size: sizeSfixed32Ptr, + marshal: appendSfixed32Ptr, + unmarshal: consumeSfixed32Ptr, + merge: mergeInt32Ptr, +} + +// sizeSfixed32Slice returns the size of wire encoding a []int32 pointer as a repeated Sfixed32. +func sizeSfixed32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int32Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed32()) + return size +} + +// appendSfixed32Slice encodes a []int32 pointer as a repeated Sfixed32. +func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, uint32(v)) + } + return b, nil +} + +// consumeSfixed32Slice wire decodes a []int32 pointer as a repeated Sfixed32. +func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + s = append(s, int32(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + *sp = append(*sp, int32(v)) + out.n = n + return out, nil +} + +var coderSfixed32Slice = pointerCoderFuncs{ + size: sizeSfixed32Slice, + marshal: appendSfixed32Slice, + unmarshal: consumeSfixed32Slice, + merge: mergeInt32Slice, +} + +// sizeSfixed32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sfixed32. +func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int32Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed32() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendSfixed32PackedSlice encodes a []int32 pointer as a packed repeated Sfixed32. +func appendSfixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed32(b, uint32(v)) + } + return b, nil +} + +var coderSfixed32PackedSlice = pointerCoderFuncs{ + size: sizeSfixed32PackedSlice, + marshal: appendSfixed32PackedSlice, + unmarshal: consumeSfixed32Slice, + merge: mergeInt32Slice, +} + +// sizeSfixed32Value returns the size of wire encoding a int32 value as a Sfixed32. +func sizeSfixed32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeFixed32() +} + +// appendSfixed32Value encodes a int32 value as a Sfixed32. +func appendSfixed32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, uint32(v.Int())) + return b, nil +} + +// consumeSfixed32Value decodes a int32 value as a Sfixed32. +func consumeSfixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfInt32(int32(v)), out, nil +} + +var coderSfixed32Value = valueCoderFuncs{ + size: sizeSfixed32Value, + marshal: appendSfixed32Value, + unmarshal: consumeSfixed32Value, + merge: mergeScalarValue, +} + +// sizeSfixed32SliceValue returns the size of wire encoding a []int32 value as a repeated Sfixed32. +func sizeSfixed32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed32()) + return size +} + +// appendSfixed32SliceValue encodes a []int32 value as a repeated Sfixed32. +func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, uint32(v.Int())) + } + return b, nil +} + +// consumeSfixed32SliceValue wire decodes a []int32 value as a repeated Sfixed32. +func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + out.n = n + return listv, out, nil +} + +var coderSfixed32SliceValue = valueCoderFuncs{ + size: sizeSfixed32SliceValue, + marshal: appendSfixed32SliceValue, + unmarshal: consumeSfixed32SliceValue, + merge: mergeListValue, +} + +// sizeSfixed32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sfixed32. +func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed32() + return tagsize + protowire.SizeBytes(n) +} + +// appendSfixed32PackedSliceValue encodes a []int32 value as a packed repeated Sfixed32. +func appendSfixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed32(b, uint32(v.Int())) + } + return b, nil +} + +var coderSfixed32PackedSliceValue = valueCoderFuncs{ + size: sizeSfixed32PackedSliceValue, + marshal: appendSfixed32PackedSliceValue, + unmarshal: consumeSfixed32SliceValue, + merge: mergeListValue, +} + +// sizeFixed32 returns the size of wire encoding a uint32 pointer as a Fixed32. +func sizeFixed32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed32() +} + +// appendFixed32 wire encodes a uint32 pointer as a Fixed32. +func appendFixed32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Uint32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, v) + return b, nil +} + +// consumeFixed32 wire decodes a uint32 pointer as a Fixed32. +func consumeFixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + *p.Uint32() = v + out.n = n + return out, nil +} + +var coderFixed32 = pointerCoderFuncs{ + size: sizeFixed32, + marshal: appendFixed32, + unmarshal: consumeFixed32, + merge: mergeUint32, +} + +// sizeFixed32NoZero returns the size of wire encoding a uint32 pointer as a Fixed32. +// The zero value is not encoded. +func sizeFixed32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Uint32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeFixed32() +} + +// appendFixed32NoZero wire encodes a uint32 pointer as a Fixed32. +// The zero value is not encoded. +func appendFixed32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Uint32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, v) + return b, nil +} + +var coderFixed32NoZero = pointerCoderFuncs{ + size: sizeFixed32NoZero, + marshal: appendFixed32NoZero, + unmarshal: consumeFixed32, + merge: mergeUint32NoZero, +} + +// sizeFixed32Ptr returns the size of wire encoding a *uint32 pointer as a Fixed32. +// It panics if the pointer is nil. +func sizeFixed32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed32() +} + +// appendFixed32Ptr wire encodes a *uint32 pointer as a Fixed32. +// It panics if the pointer is nil. +func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Uint32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, v) + return b, nil +} + +// consumeFixed32Ptr wire decodes a *uint32 pointer as a Fixed32. +func consumeFixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + vp := p.Uint32Ptr() + if *vp == nil { + *vp = new(uint32) + } + **vp = v + out.n = n + return out, nil +} + +var coderFixed32Ptr = pointerCoderFuncs{ + size: sizeFixed32Ptr, + marshal: appendFixed32Ptr, + unmarshal: consumeFixed32Ptr, + merge: mergeUint32Ptr, +} + +// sizeFixed32Slice returns the size of wire encoding a []uint32 pointer as a repeated Fixed32. +func sizeFixed32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Uint32Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed32()) + return size +} + +// appendFixed32Slice encodes a []uint32 pointer as a repeated Fixed32. +func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Uint32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, v) + } + return b, nil +} + +// consumeFixed32Slice wire decodes a []uint32 pointer as a repeated Fixed32. +func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Uint32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + s = append(s, v) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + *sp = append(*sp, v) + out.n = n + return out, nil +} + +var coderFixed32Slice = pointerCoderFuncs{ + size: sizeFixed32Slice, + marshal: appendFixed32Slice, + unmarshal: consumeFixed32Slice, + merge: mergeUint32Slice, +} + +// sizeFixed32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Fixed32. +func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Uint32Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed32() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendFixed32PackedSlice encodes a []uint32 pointer as a packed repeated Fixed32. +func appendFixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Uint32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed32(b, v) + } + return b, nil +} + +var coderFixed32PackedSlice = pointerCoderFuncs{ + size: sizeFixed32PackedSlice, + marshal: appendFixed32PackedSlice, + unmarshal: consumeFixed32Slice, + merge: mergeUint32Slice, +} + +// sizeFixed32Value returns the size of wire encoding a uint32 value as a Fixed32. +func sizeFixed32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeFixed32() +} + +// appendFixed32Value encodes a uint32 value as a Fixed32. +func appendFixed32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, uint32(v.Uint())) + return b, nil +} + +// consumeFixed32Value decodes a uint32 value as a Fixed32. +func consumeFixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfUint32(uint32(v)), out, nil +} + +var coderFixed32Value = valueCoderFuncs{ + size: sizeFixed32Value, + marshal: appendFixed32Value, + unmarshal: consumeFixed32Value, + merge: mergeScalarValue, +} + +// sizeFixed32SliceValue returns the size of wire encoding a []uint32 value as a repeated Fixed32. +func sizeFixed32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed32()) + return size +} + +// appendFixed32SliceValue encodes a []uint32 value as a repeated Fixed32. +func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, uint32(v.Uint())) + } + return b, nil +} + +// consumeFixed32SliceValue wire decodes a []uint32 value as a repeated Fixed32. +func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + out.n = n + return listv, out, nil +} + +var coderFixed32SliceValue = valueCoderFuncs{ + size: sizeFixed32SliceValue, + marshal: appendFixed32SliceValue, + unmarshal: consumeFixed32SliceValue, + merge: mergeListValue, +} + +// sizeFixed32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Fixed32. +func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed32() + return tagsize + protowire.SizeBytes(n) +} + +// appendFixed32PackedSliceValue encodes a []uint32 value as a packed repeated Fixed32. +func appendFixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed32(b, uint32(v.Uint())) + } + return b, nil +} + +var coderFixed32PackedSliceValue = valueCoderFuncs{ + size: sizeFixed32PackedSliceValue, + marshal: appendFixed32PackedSliceValue, + unmarshal: consumeFixed32SliceValue, + merge: mergeListValue, +} + +// sizeFloat returns the size of wire encoding a float32 pointer as a Float. +func sizeFloat(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed32() +} + +// appendFloat wire encodes a float32 pointer as a Float. +func appendFloat(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Float32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(v)) + return b, nil +} + +// consumeFloat wire decodes a float32 pointer as a Float. +func consumeFloat(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + *p.Float32() = math.Float32frombits(v) + out.n = n + return out, nil +} + +var coderFloat = pointerCoderFuncs{ + size: sizeFloat, + marshal: appendFloat, + unmarshal: consumeFloat, + merge: mergeFloat32, +} + +// sizeFloatNoZero returns the size of wire encoding a float32 pointer as a Float. +// The zero value is not encoded. +func sizeFloatNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Float32() + if v == 0 && !math.Signbit(float64(v)) { + return 0 + } + return f.tagsize + protowire.SizeFixed32() +} + +// appendFloatNoZero wire encodes a float32 pointer as a Float. +// The zero value is not encoded. +func appendFloatNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Float32() + if v == 0 && !math.Signbit(float64(v)) { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(v)) + return b, nil +} + +var coderFloatNoZero = pointerCoderFuncs{ + size: sizeFloatNoZero, + marshal: appendFloatNoZero, + unmarshal: consumeFloat, + merge: mergeFloat32NoZero, +} + +// sizeFloatPtr returns the size of wire encoding a *float32 pointer as a Float. +// It panics if the pointer is nil. +func sizeFloatPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed32() +} + +// appendFloatPtr wire encodes a *float32 pointer as a Float. +// It panics if the pointer is nil. +func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Float32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(v)) + return b, nil +} + +// consumeFloatPtr wire decodes a *float32 pointer as a Float. +func consumeFloatPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + vp := p.Float32Ptr() + if *vp == nil { + *vp = new(float32) + } + **vp = math.Float32frombits(v) + out.n = n + return out, nil +} + +var coderFloatPtr = pointerCoderFuncs{ + size: sizeFloatPtr, + marshal: appendFloatPtr, + unmarshal: consumeFloatPtr, + merge: mergeFloat32Ptr, +} + +// sizeFloatSlice returns the size of wire encoding a []float32 pointer as a repeated Float. +func sizeFloatSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Float32Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed32()) + return size +} + +// appendFloatSlice encodes a []float32 pointer as a repeated Float. +func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Float32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(v)) + } + return b, nil +} + +// consumeFloatSlice wire decodes a []float32 pointer as a repeated Float. +func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Float32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + s = append(s, math.Float32frombits(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, errDecode + } + *sp = append(*sp, math.Float32frombits(v)) + out.n = n + return out, nil +} + +var coderFloatSlice = pointerCoderFuncs{ + size: sizeFloatSlice, + marshal: appendFloatSlice, + unmarshal: consumeFloatSlice, + merge: mergeFloat32Slice, +} + +// sizeFloatPackedSlice returns the size of wire encoding a []float32 pointer as a packed repeated Float. +func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Float32Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed32() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendFloatPackedSlice encodes a []float32 pointer as a packed repeated Float. +func appendFloatPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Float32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed32(b, math.Float32bits(v)) + } + return b, nil +} + +var coderFloatPackedSlice = pointerCoderFuncs{ + size: sizeFloatPackedSlice, + marshal: appendFloatPackedSlice, + unmarshal: consumeFloatSlice, + merge: mergeFloat32Slice, +} + +// sizeFloatValue returns the size of wire encoding a float32 value as a Float. +func sizeFloatValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeFixed32() +} + +// appendFloatValue encodes a float32 value as a Float. +func appendFloatValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) + return b, nil +} + +// consumeFloatValue decodes a float32 value as a Float. +func consumeFloatValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), out, nil +} + +var coderFloatValue = valueCoderFuncs{ + size: sizeFloatValue, + marshal: appendFloatValue, + unmarshal: consumeFloatValue, + merge: mergeScalarValue, +} + +// sizeFloatSliceValue returns the size of wire encoding a []float32 value as a repeated Float. +func sizeFloatSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed32()) + return size +} + +// appendFloatSliceValue encodes a []float32 value as a repeated Float. +func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) + } + return b, nil +} + +// consumeFloatSliceValue wire decodes a []float32 value as a repeated Float. +func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) + out.n = n + return listv, out, nil +} + +var coderFloatSliceValue = valueCoderFuncs{ + size: sizeFloatSliceValue, + marshal: appendFloatSliceValue, + unmarshal: consumeFloatSliceValue, + merge: mergeListValue, +} + +// sizeFloatPackedSliceValue returns the size of wire encoding a []float32 value as a packed repeated Float. +func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed32() + return tagsize + protowire.SizeBytes(n) +} + +// appendFloatPackedSliceValue encodes a []float32 value as a packed repeated Float. +func appendFloatPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) + } + return b, nil +} + +var coderFloatPackedSliceValue = valueCoderFuncs{ + size: sizeFloatPackedSliceValue, + marshal: appendFloatPackedSliceValue, + unmarshal: consumeFloatSliceValue, + merge: mergeListValue, +} + +// sizeSfixed64 returns the size of wire encoding a int64 pointer as a Sfixed64. +func sizeSfixed64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed64() +} + +// appendSfixed64 wire encodes a int64 pointer as a Sfixed64. +func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, uint64(v)) + return b, nil +} + +// consumeSfixed64 wire decodes a int64 pointer as a Sfixed64. +func consumeSfixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + *p.Int64() = int64(v) + out.n = n + return out, nil +} + +var coderSfixed64 = pointerCoderFuncs{ + size: sizeSfixed64, + marshal: appendSfixed64, + unmarshal: consumeSfixed64, + merge: mergeInt64, +} + +// sizeSfixed64NoZero returns the size of wire encoding a int64 pointer as a Sfixed64. +// The zero value is not encoded. +func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Int64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeFixed64() +} + +// appendSfixed64NoZero wire encodes a int64 pointer as a Sfixed64. +// The zero value is not encoded. +func appendSfixed64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Int64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, uint64(v)) + return b, nil +} + +var coderSfixed64NoZero = pointerCoderFuncs{ + size: sizeSfixed64NoZero, + marshal: appendSfixed64NoZero, + unmarshal: consumeSfixed64, + merge: mergeInt64NoZero, +} + +// sizeSfixed64Ptr returns the size of wire encoding a *int64 pointer as a Sfixed64. +// It panics if the pointer is nil. +func sizeSfixed64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed64() +} + +// appendSfixed64Ptr wire encodes a *int64 pointer as a Sfixed64. +// It panics if the pointer is nil. +func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Int64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, uint64(v)) + return b, nil +} + +// consumeSfixed64Ptr wire decodes a *int64 pointer as a Sfixed64. +func consumeSfixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + vp := p.Int64Ptr() + if *vp == nil { + *vp = new(int64) + } + **vp = int64(v) + out.n = n + return out, nil +} + +var coderSfixed64Ptr = pointerCoderFuncs{ + size: sizeSfixed64Ptr, + marshal: appendSfixed64Ptr, + unmarshal: consumeSfixed64Ptr, + merge: mergeInt64Ptr, +} + +// sizeSfixed64Slice returns the size of wire encoding a []int64 pointer as a repeated Sfixed64. +func sizeSfixed64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int64Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed64()) + return size +} + +// appendSfixed64Slice encodes a []int64 pointer as a repeated Sfixed64. +func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, uint64(v)) + } + return b, nil +} + +// consumeSfixed64Slice wire decodes a []int64 pointer as a repeated Sfixed64. +func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + s = append(s, int64(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + *sp = append(*sp, int64(v)) + out.n = n + return out, nil +} + +var coderSfixed64Slice = pointerCoderFuncs{ + size: sizeSfixed64Slice, + marshal: appendSfixed64Slice, + unmarshal: consumeSfixed64Slice, + merge: mergeInt64Slice, +} + +// sizeSfixed64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sfixed64. +func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Int64Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed64() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendSfixed64PackedSlice encodes a []int64 pointer as a packed repeated Sfixed64. +func appendSfixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed64(b, uint64(v)) + } + return b, nil +} + +var coderSfixed64PackedSlice = pointerCoderFuncs{ + size: sizeSfixed64PackedSlice, + marshal: appendSfixed64PackedSlice, + unmarshal: consumeSfixed64Slice, + merge: mergeInt64Slice, +} + +// sizeSfixed64Value returns the size of wire encoding a int64 value as a Sfixed64. +func sizeSfixed64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeFixed64() +} + +// appendSfixed64Value encodes a int64 value as a Sfixed64. +func appendSfixed64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, uint64(v.Int())) + return b, nil +} + +// consumeSfixed64Value decodes a int64 value as a Sfixed64. +func consumeSfixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfInt64(int64(v)), out, nil +} + +var coderSfixed64Value = valueCoderFuncs{ + size: sizeSfixed64Value, + marshal: appendSfixed64Value, + unmarshal: consumeSfixed64Value, + merge: mergeScalarValue, +} + +// sizeSfixed64SliceValue returns the size of wire encoding a []int64 value as a repeated Sfixed64. +func sizeSfixed64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed64()) + return size +} + +// appendSfixed64SliceValue encodes a []int64 value as a repeated Sfixed64. +func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, uint64(v.Int())) + } + return b, nil +} + +// consumeSfixed64SliceValue wire decodes a []int64 value as a repeated Sfixed64. +func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + out.n = n + return listv, out, nil +} + +var coderSfixed64SliceValue = valueCoderFuncs{ + size: sizeSfixed64SliceValue, + marshal: appendSfixed64SliceValue, + unmarshal: consumeSfixed64SliceValue, + merge: mergeListValue, +} + +// sizeSfixed64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sfixed64. +func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed64() + return tagsize + protowire.SizeBytes(n) +} + +// appendSfixed64PackedSliceValue encodes a []int64 value as a packed repeated Sfixed64. +func appendSfixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed64(b, uint64(v.Int())) + } + return b, nil +} + +var coderSfixed64PackedSliceValue = valueCoderFuncs{ + size: sizeSfixed64PackedSliceValue, + marshal: appendSfixed64PackedSliceValue, + unmarshal: consumeSfixed64SliceValue, + merge: mergeListValue, +} + +// sizeFixed64 returns the size of wire encoding a uint64 pointer as a Fixed64. +func sizeFixed64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed64() +} + +// appendFixed64 wire encodes a uint64 pointer as a Fixed64. +func appendFixed64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Uint64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, v) + return b, nil +} + +// consumeFixed64 wire decodes a uint64 pointer as a Fixed64. +func consumeFixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + *p.Uint64() = v + out.n = n + return out, nil +} + +var coderFixed64 = pointerCoderFuncs{ + size: sizeFixed64, + marshal: appendFixed64, + unmarshal: consumeFixed64, + merge: mergeUint64, +} + +// sizeFixed64NoZero returns the size of wire encoding a uint64 pointer as a Fixed64. +// The zero value is not encoded. +func sizeFixed64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Uint64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeFixed64() +} + +// appendFixed64NoZero wire encodes a uint64 pointer as a Fixed64. +// The zero value is not encoded. +func appendFixed64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Uint64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, v) + return b, nil +} + +var coderFixed64NoZero = pointerCoderFuncs{ + size: sizeFixed64NoZero, + marshal: appendFixed64NoZero, + unmarshal: consumeFixed64, + merge: mergeUint64NoZero, +} + +// sizeFixed64Ptr returns the size of wire encoding a *uint64 pointer as a Fixed64. +// It panics if the pointer is nil. +func sizeFixed64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed64() +} + +// appendFixed64Ptr wire encodes a *uint64 pointer as a Fixed64. +// It panics if the pointer is nil. +func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Uint64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, v) + return b, nil +} + +// consumeFixed64Ptr wire decodes a *uint64 pointer as a Fixed64. +func consumeFixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + vp := p.Uint64Ptr() + if *vp == nil { + *vp = new(uint64) + } + **vp = v + out.n = n + return out, nil +} + +var coderFixed64Ptr = pointerCoderFuncs{ + size: sizeFixed64Ptr, + marshal: appendFixed64Ptr, + unmarshal: consumeFixed64Ptr, + merge: mergeUint64Ptr, +} + +// sizeFixed64Slice returns the size of wire encoding a []uint64 pointer as a repeated Fixed64. +func sizeFixed64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Uint64Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed64()) + return size +} + +// appendFixed64Slice encodes a []uint64 pointer as a repeated Fixed64. +func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Uint64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, v) + } + return b, nil +} + +// consumeFixed64Slice wire decodes a []uint64 pointer as a repeated Fixed64. +func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Uint64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + s = append(s, v) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + *sp = append(*sp, v) + out.n = n + return out, nil +} + +var coderFixed64Slice = pointerCoderFuncs{ + size: sizeFixed64Slice, + marshal: appendFixed64Slice, + unmarshal: consumeFixed64Slice, + merge: mergeUint64Slice, +} + +// sizeFixed64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Fixed64. +func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Uint64Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed64() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendFixed64PackedSlice encodes a []uint64 pointer as a packed repeated Fixed64. +func appendFixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Uint64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed64(b, v) + } + return b, nil +} + +var coderFixed64PackedSlice = pointerCoderFuncs{ + size: sizeFixed64PackedSlice, + marshal: appendFixed64PackedSlice, + unmarshal: consumeFixed64Slice, + merge: mergeUint64Slice, +} + +// sizeFixed64Value returns the size of wire encoding a uint64 value as a Fixed64. +func sizeFixed64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeFixed64() +} + +// appendFixed64Value encodes a uint64 value as a Fixed64. +func appendFixed64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, v.Uint()) + return b, nil +} + +// consumeFixed64Value decodes a uint64 value as a Fixed64. +func consumeFixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfUint64(v), out, nil +} + +var coderFixed64Value = valueCoderFuncs{ + size: sizeFixed64Value, + marshal: appendFixed64Value, + unmarshal: consumeFixed64Value, + merge: mergeScalarValue, +} + +// sizeFixed64SliceValue returns the size of wire encoding a []uint64 value as a repeated Fixed64. +func sizeFixed64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed64()) + return size +} + +// appendFixed64SliceValue encodes a []uint64 value as a repeated Fixed64. +func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, v.Uint()) + } + return b, nil +} + +// consumeFixed64SliceValue wire decodes a []uint64 value as a repeated Fixed64. +func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfUint64(v)) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfUint64(v)) + out.n = n + return listv, out, nil +} + +var coderFixed64SliceValue = valueCoderFuncs{ + size: sizeFixed64SliceValue, + marshal: appendFixed64SliceValue, + unmarshal: consumeFixed64SliceValue, + merge: mergeListValue, +} + +// sizeFixed64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Fixed64. +func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed64() + return tagsize + protowire.SizeBytes(n) +} + +// appendFixed64PackedSliceValue encodes a []uint64 value as a packed repeated Fixed64. +func appendFixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed64(b, v.Uint()) + } + return b, nil +} + +var coderFixed64PackedSliceValue = valueCoderFuncs{ + size: sizeFixed64PackedSliceValue, + marshal: appendFixed64PackedSliceValue, + unmarshal: consumeFixed64SliceValue, + merge: mergeListValue, +} + +// sizeDouble returns the size of wire encoding a float64 pointer as a Double. +func sizeDouble(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed64() +} + +// appendDouble wire encodes a float64 pointer as a Double. +func appendDouble(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Float64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v)) + return b, nil +} + +// consumeDouble wire decodes a float64 pointer as a Double. +func consumeDouble(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + *p.Float64() = math.Float64frombits(v) + out.n = n + return out, nil +} + +var coderDouble = pointerCoderFuncs{ + size: sizeDouble, + marshal: appendDouble, + unmarshal: consumeDouble, + merge: mergeFloat64, +} + +// sizeDoubleNoZero returns the size of wire encoding a float64 pointer as a Double. +// The zero value is not encoded. +func sizeDoubleNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Float64() + if v == 0 && !math.Signbit(float64(v)) { + return 0 + } + return f.tagsize + protowire.SizeFixed64() +} + +// appendDoubleNoZero wire encodes a float64 pointer as a Double. +// The zero value is not encoded. +func appendDoubleNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Float64() + if v == 0 && !math.Signbit(float64(v)) { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v)) + return b, nil +} + +var coderDoubleNoZero = pointerCoderFuncs{ + size: sizeDoubleNoZero, + marshal: appendDoubleNoZero, + unmarshal: consumeDouble, + merge: mergeFloat64NoZero, +} + +// sizeDoublePtr returns the size of wire encoding a *float64 pointer as a Double. +// It panics if the pointer is nil. +func sizeDoublePtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed64() +} + +// appendDoublePtr wire encodes a *float64 pointer as a Double. +// It panics if the pointer is nil. +func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.Float64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v)) + return b, nil +} + +// consumeDoublePtr wire decodes a *float64 pointer as a Double. +func consumeDoublePtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + vp := p.Float64Ptr() + if *vp == nil { + *vp = new(float64) + } + **vp = math.Float64frombits(v) + out.n = n + return out, nil +} + +var coderDoublePtr = pointerCoderFuncs{ + size: sizeDoublePtr, + marshal: appendDoublePtr, + unmarshal: consumeDoublePtr, + merge: mergeFloat64Ptr, +} + +// sizeDoubleSlice returns the size of wire encoding a []float64 pointer as a repeated Double. +func sizeDoubleSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Float64Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed64()) + return size +} + +// appendDoubleSlice encodes a []float64 pointer as a repeated Double. +func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Float64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v)) + } + return b, nil +} + +// consumeDoubleSlice wire decodes a []float64 pointer as a repeated Double. +func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Float64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + s = append(s, math.Float64frombits(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, errDecode + } + *sp = append(*sp, math.Float64frombits(v)) + out.n = n + return out, nil +} + +var coderDoubleSlice = pointerCoderFuncs{ + size: sizeDoubleSlice, + marshal: appendDoubleSlice, + unmarshal: consumeDoubleSlice, + merge: mergeFloat64Slice, +} + +// sizeDoublePackedSlice returns the size of wire encoding a []float64 pointer as a packed repeated Double. +func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.Float64Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed64() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendDoublePackedSlice encodes a []float64 pointer as a packed repeated Double. +func appendDoublePackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.Float64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed64(b, math.Float64bits(v)) + } + return b, nil +} + +var coderDoublePackedSlice = pointerCoderFuncs{ + size: sizeDoublePackedSlice, + marshal: appendDoublePackedSlice, + unmarshal: consumeDoubleSlice, + merge: mergeFloat64Slice, +} + +// sizeDoubleValue returns the size of wire encoding a float64 value as a Double. +func sizeDoubleValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeFixed64() +} + +// appendDoubleValue encodes a float64 value as a Double. +func appendDoubleValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) + return b, nil +} + +// consumeDoubleValue decodes a float64 value as a Double. +func consumeDoubleValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfFloat64(math.Float64frombits(v)), out, nil +} + +var coderDoubleValue = valueCoderFuncs{ + size: sizeDoubleValue, + marshal: appendDoubleValue, + unmarshal: consumeDoubleValue, + merge: mergeScalarValue, +} + +// sizeDoubleSliceValue returns the size of wire encoding a []float64 value as a repeated Double. +func sizeDoubleSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed64()) + return size +} + +// appendDoubleSliceValue encodes a []float64 value as a repeated Double. +func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) + } + return b, nil +} + +// consumeDoubleSliceValue wire decodes a []float64 value as a repeated Double. +func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) + out.n = n + return listv, out, nil +} + +var coderDoubleSliceValue = valueCoderFuncs{ + size: sizeDoubleSliceValue, + marshal: appendDoubleSliceValue, + unmarshal: consumeDoubleSliceValue, + merge: mergeListValue, +} + +// sizeDoublePackedSliceValue returns the size of wire encoding a []float64 value as a packed repeated Double. +func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed64() + return tagsize + protowire.SizeBytes(n) +} + +// appendDoublePackedSliceValue encodes a []float64 value as a packed repeated Double. +func appendDoublePackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) + } + return b, nil +} + +var coderDoublePackedSliceValue = valueCoderFuncs{ + size: sizeDoublePackedSliceValue, + marshal: appendDoublePackedSliceValue, + unmarshal: consumeDoubleSliceValue, + merge: mergeListValue, +} + +// sizeString returns the size of wire encoding a string pointer as a String. +func sizeString(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.String() + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendString wire encodes a string pointer as a String. +func appendString(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.String() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + return b, nil +} + +// consumeString wire decodes a string pointer as a String. +func consumeString(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + *p.String() = string(v) + out.n = n + return out, nil +} + +var coderString = pointerCoderFuncs{ + size: sizeString, + marshal: appendString, + unmarshal: consumeString, + merge: mergeString, +} + +// appendStringValidateUTF8 wire encodes a string pointer as a String. +func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.String() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + if !utf8.ValidString(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeStringValidateUTF8 wire decodes a string pointer as a String. +func consumeStringValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + *p.String() = string(v) + out.n = n + return out, nil +} + +var coderStringValidateUTF8 = pointerCoderFuncs{ + size: sizeString, + marshal: appendStringValidateUTF8, + unmarshal: consumeStringValidateUTF8, + merge: mergeString, +} + +// sizeStringNoZero returns the size of wire encoding a string pointer as a String. +// The zero value is not encoded. +func sizeStringNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.String() + if len(v) == 0 { + return 0 + } + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendStringNoZero wire encodes a string pointer as a String. +// The zero value is not encoded. +func appendStringNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.String() + if len(v) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + return b, nil +} + +var coderStringNoZero = pointerCoderFuncs{ + size: sizeStringNoZero, + marshal: appendStringNoZero, + unmarshal: consumeString, + merge: mergeStringNoZero, +} + +// appendStringNoZeroValidateUTF8 wire encodes a string pointer as a String. +// The zero value is not encoded. +func appendStringNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.String() + if len(v) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + if !utf8.ValidString(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +var coderStringNoZeroValidateUTF8 = pointerCoderFuncs{ + size: sizeStringNoZero, + marshal: appendStringNoZeroValidateUTF8, + unmarshal: consumeStringValidateUTF8, + merge: mergeStringNoZero, +} + +// sizeStringPtr returns the size of wire encoding a *string pointer as a String. +// It panics if the pointer is nil. +func sizeStringPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := **p.StringPtr() + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendStringPtr wire encodes a *string pointer as a String. +// It panics if the pointer is nil. +func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.StringPtr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + return b, nil +} + +// consumeStringPtr wire decodes a *string pointer as a String. +func consumeStringPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + vp := p.StringPtr() + if *vp == nil { + *vp = new(string) + } + **vp = string(v) + out.n = n + return out, nil +} + +var coderStringPtr = pointerCoderFuncs{ + size: sizeStringPtr, + marshal: appendStringPtr, + unmarshal: consumeStringPtr, + merge: mergeStringPtr, +} + +// appendStringPtrValidateUTF8 wire encodes a *string pointer as a String. +// It panics if the pointer is nil. +func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := **p.StringPtr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + if !utf8.ValidString(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeStringPtrValidateUTF8 wire decodes a *string pointer as a String. +func consumeStringPtrValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + vp := p.StringPtr() + if *vp == nil { + *vp = new(string) + } + **vp = string(v) + out.n = n + return out, nil +} + +var coderStringPtrValidateUTF8 = pointerCoderFuncs{ + size: sizeStringPtr, + marshal: appendStringPtrValidateUTF8, + unmarshal: consumeStringPtrValidateUTF8, + merge: mergeStringPtr, +} + +// sizeStringSlice returns the size of wire encoding a []string pointer as a repeated String. +func sizeStringSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.StringSlice() + for _, v := range s { + size += f.tagsize + protowire.SizeBytes(len(v)) + } + return size +} + +// appendStringSlice encodes a []string pointer as a repeated String. +func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.StringSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + } + return b, nil +} + +// consumeStringSlice wire decodes a []string pointer as a repeated String. +func consumeStringSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.StringSlice() + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + *sp = append(*sp, string(v)) + out.n = n + return out, nil +} + +var coderStringSlice = pointerCoderFuncs{ + size: sizeStringSlice, + marshal: appendStringSlice, + unmarshal: consumeStringSlice, + merge: mergeStringSlice, +} + +// appendStringSliceValidateUTF8 encodes a []string pointer as a repeated String. +func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.StringSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + if !utf8.ValidString(v) { + return b, errInvalidUTF8{} + } + } + return b, nil +} + +// consumeStringSliceValidateUTF8 wire decodes a []string pointer as a repeated String. +func consumeStringSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + sp := p.StringSlice() + *sp = append(*sp, string(v)) + out.n = n + return out, nil +} + +var coderStringSliceValidateUTF8 = pointerCoderFuncs{ + size: sizeStringSlice, + marshal: appendStringSliceValidateUTF8, + unmarshal: consumeStringSliceValidateUTF8, + merge: mergeStringSlice, +} + +// sizeStringValue returns the size of wire encoding a string value as a String. +func sizeStringValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeBytes(len(v.String())) +} + +// appendStringValue encodes a string value as a String. +func appendStringValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendString(b, v.String()) + return b, nil +} + +// consumeStringValue decodes a string value as a String. +func consumeStringValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfString(string(v)), out, nil +} + +var coderStringValue = valueCoderFuncs{ + size: sizeStringValue, + marshal: appendStringValue, + unmarshal: consumeStringValue, + merge: mergeScalarValue, +} + +// appendStringValueValidateUTF8 encodes a string value as a String. +func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendString(b, v.String()) + if !utf8.ValidString(v.String()) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeStringValueValidateUTF8 decodes a string value as a String. +func consumeStringValueValidateUTF8(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + if !utf8.Valid(v) { + return protoreflect.Value{}, out, errInvalidUTF8{} + } + out.n = n + return protoreflect.ValueOfString(string(v)), out, nil +} + +var coderStringValueValidateUTF8 = valueCoderFuncs{ + size: sizeStringValue, + marshal: appendStringValueValidateUTF8, + unmarshal: consumeStringValueValidateUTF8, + merge: mergeScalarValue, +} + +// sizeStringSliceValue returns the size of wire encoding a []string value as a repeated String. +func sizeStringSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeBytes(len(v.String())) + } + return size +} + +// appendStringSliceValue encodes a []string value as a repeated String. +func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendString(b, v.String()) + } + return b, nil +} + +// consumeStringSliceValue wire decodes a []string value as a repeated String. +func consumeStringSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfString(string(v))) + out.n = n + return listv, out, nil +} + +var coderStringSliceValue = valueCoderFuncs{ + size: sizeStringSliceValue, + marshal: appendStringSliceValue, + unmarshal: consumeStringSliceValue, + merge: mergeListValue, +} + +// sizeBytes returns the size of wire encoding a []byte pointer as a Bytes. +func sizeBytes(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Bytes() + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendBytes wire encodes a []byte pointer as a Bytes. +func appendBytes(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Bytes() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + return b, nil +} + +// consumeBytes wire decodes a []byte pointer as a Bytes. +func consumeBytes(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + *p.Bytes() = append(emptyBuf[:], v...) + out.n = n + return out, nil +} + +var coderBytes = pointerCoderFuncs{ + size: sizeBytes, + marshal: appendBytes, + unmarshal: consumeBytes, + merge: mergeBytes, +} + +// appendBytesValidateUTF8 wire encodes a []byte pointer as a Bytes. +func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Bytes() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + if !utf8.Valid(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeBytesValidateUTF8 wire decodes a []byte pointer as a Bytes. +func consumeBytesValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + *p.Bytes() = append(emptyBuf[:], v...) + out.n = n + return out, nil +} + +var coderBytesValidateUTF8 = pointerCoderFuncs{ + size: sizeBytes, + marshal: appendBytesValidateUTF8, + unmarshal: consumeBytesValidateUTF8, + merge: mergeBytes, +} + +// sizeBytesNoZero returns the size of wire encoding a []byte pointer as a Bytes. +// The zero value is not encoded. +func sizeBytesNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + v := *p.Bytes() + if len(v) == 0 { + return 0 + } + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendBytesNoZero wire encodes a []byte pointer as a Bytes. +// The zero value is not encoded. +func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Bytes() + if len(v) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + return b, nil +} + +// consumeBytesNoZero wire decodes a []byte pointer as a Bytes. +// The zero value is not decoded. +func consumeBytesNoZero(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + *p.Bytes() = append(([]byte)(nil), v...) + out.n = n + return out, nil +} + +var coderBytesNoZero = pointerCoderFuncs{ + size: sizeBytesNoZero, + marshal: appendBytesNoZero, + unmarshal: consumeBytesNoZero, + merge: mergeBytesNoZero, +} + +// appendBytesNoZeroValidateUTF8 wire encodes a []byte pointer as a Bytes. +// The zero value is not encoded. +func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := *p.Bytes() + if len(v) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + if !utf8.Valid(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeBytesNoZeroValidateUTF8 wire decodes a []byte pointer as a Bytes. +func consumeBytesNoZeroValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + *p.Bytes() = append(([]byte)(nil), v...) + out.n = n + return out, nil +} + +var coderBytesNoZeroValidateUTF8 = pointerCoderFuncs{ + size: sizeBytesNoZero, + marshal: appendBytesNoZeroValidateUTF8, + unmarshal: consumeBytesNoZeroValidateUTF8, + merge: mergeBytesNoZero, +} + +// sizeBytesSlice returns the size of wire encoding a [][]byte pointer as a repeated Bytes. +func sizeBytesSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := *p.BytesSlice() + for _, v := range s { + size += f.tagsize + protowire.SizeBytes(len(v)) + } + return size +} + +// appendBytesSlice encodes a [][]byte pointer as a repeated Bytes. +func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.BytesSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + } + return b, nil +} + +// consumeBytesSlice wire decodes a [][]byte pointer as a repeated Bytes. +func consumeBytesSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.BytesSlice() + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + *sp = append(*sp, append(emptyBuf[:], v...)) + out.n = n + return out, nil +} + +var coderBytesSlice = pointerCoderFuncs{ + size: sizeBytesSlice, + marshal: appendBytesSlice, + unmarshal: consumeBytesSlice, + merge: mergeBytesSlice, +} + +// appendBytesSliceValidateUTF8 encodes a [][]byte pointer as a repeated Bytes. +func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := *p.BytesSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + if !utf8.Valid(v) { + return b, errInvalidUTF8{} + } + } + return b, nil +} + +// consumeBytesSliceValidateUTF8 wire decodes a [][]byte pointer as a repeated Bytes. +func consumeBytesSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + sp := p.BytesSlice() + *sp = append(*sp, append(emptyBuf[:], v...)) + out.n = n + return out, nil +} + +var coderBytesSliceValidateUTF8 = pointerCoderFuncs{ + size: sizeBytesSlice, + marshal: appendBytesSliceValidateUTF8, + unmarshal: consumeBytesSliceValidateUTF8, + merge: mergeBytesSlice, +} + +// sizeBytesValue returns the size of wire encoding a []byte value as a Bytes. +func sizeBytesValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { + return tagsize + protowire.SizeBytes(len(v.Bytes())) +} + +// appendBytesValue encodes a []byte value as a Bytes. +func appendBytesValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendBytes(b, v.Bytes()) + return b, nil +} + +// consumeBytesValue decodes a []byte value as a Bytes. +func consumeBytesValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + out.n = n + return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), out, nil +} + +var coderBytesValue = valueCoderFuncs{ + size: sizeBytesValue, + marshal: appendBytesValue, + unmarshal: consumeBytesValue, + merge: mergeBytesValue, +} + +// sizeBytesSliceValue returns the size of wire encoding a [][]byte value as a repeated Bytes. +func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeBytes(len(v.Bytes())) + } + return size +} + +// appendBytesSliceValue encodes a [][]byte value as a repeated Bytes. +func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendBytes(b, v.Bytes()) + } + return b, nil +} + +// consumeBytesSliceValue wire decodes a [][]byte value as a repeated Bytes. +func consumeBytesSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, errDecode + } + list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...))) + out.n = n + return listv, out, nil +} + +var coderBytesSliceValue = valueCoderFuncs{ + size: sizeBytesSliceValue, + marshal: appendBytesSliceValue, + unmarshal: consumeBytesSliceValue, + merge: mergeBytesListValue, +} + +// We append to an empty array rather than a nil []byte to get non-nil zero-length byte slices. +var emptyBuf [0]byte + +var wireTypes = map[protoreflect.Kind]protowire.Type{ + protoreflect.BoolKind: protowire.VarintType, + protoreflect.EnumKind: protowire.VarintType, + protoreflect.Int32Kind: protowire.VarintType, + protoreflect.Sint32Kind: protowire.VarintType, + protoreflect.Uint32Kind: protowire.VarintType, + protoreflect.Int64Kind: protowire.VarintType, + protoreflect.Sint64Kind: protowire.VarintType, + protoreflect.Uint64Kind: protowire.VarintType, + protoreflect.Sfixed32Kind: protowire.Fixed32Type, + protoreflect.Fixed32Kind: protowire.Fixed32Type, + protoreflect.FloatKind: protowire.Fixed32Type, + protoreflect.Sfixed64Kind: protowire.Fixed64Type, + protoreflect.Fixed64Kind: protowire.Fixed64Type, + protoreflect.DoubleKind: protowire.Fixed64Type, + protoreflect.StringKind: protowire.BytesType, + protoreflect.BytesKind: protowire.BytesType, + protoreflect.MessageKind: protowire.BytesType, + protoreflect.GroupKind: protowire.StartGroupType, +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go new file mode 100644 index 000000000..c1245fef4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -0,0 +1,388 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "reflect" + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/genid" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type mapInfo struct { + goType reflect.Type + keyWiretag uint64 + valWiretag uint64 + keyFuncs valueCoderFuncs + valFuncs valueCoderFuncs + keyZero pref.Value + keyKind pref.Kind + conv *mapConverter +} + +func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage *MessageInfo, funcs pointerCoderFuncs) { + // TODO: Consider generating specialized map coders. + keyField := fd.MapKey() + valField := fd.MapValue() + keyWiretag := protowire.EncodeTag(1, wireTypes[keyField.Kind()]) + valWiretag := protowire.EncodeTag(2, wireTypes[valField.Kind()]) + keyFuncs := encoderFuncsForValue(keyField) + valFuncs := encoderFuncsForValue(valField) + conv := newMapConverter(ft, fd) + + mapi := &mapInfo{ + goType: ft, + keyWiretag: keyWiretag, + valWiretag: valWiretag, + keyFuncs: keyFuncs, + valFuncs: valFuncs, + keyZero: keyField.Default(), + keyKind: keyField.Kind(), + conv: conv, + } + if valField.Kind() == pref.MessageKind { + valueMessage = getMessageInfo(ft.Elem()) + } + + funcs = pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return sizeMap(p.AsValueOf(ft).Elem(), mapi, f, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return appendMap(b, p.AsValueOf(ft).Elem(), mapi, f, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + mp := p.AsValueOf(ft) + if mp.Elem().IsNil() { + mp.Elem().Set(reflect.MakeMap(mapi.goType)) + } + if f.mi == nil { + return consumeMap(b, mp.Elem(), wtyp, mapi, f, opts) + } else { + return consumeMapOfMessage(b, mp.Elem(), wtyp, mapi, f, opts) + } + }, + } + switch valField.Kind() { + case pref.MessageKind: + funcs.merge = mergeMapOfMessage + case pref.BytesKind: + funcs.merge = mergeMapOfBytes + default: + funcs.merge = mergeMap + } + if valFuncs.isInit != nil { + funcs.isInit = func(p pointer, f *coderFieldInfo) error { + return isInitMap(p.AsValueOf(ft).Elem(), mapi, f) + } + } + return valueMessage, funcs +} + +const ( + mapKeyTagSize = 1 // field 1, tag size 1. + mapValTagSize = 1 // field 2, tag size 2. +) + +func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) int { + if mapv.Len() == 0 { + return 0 + } + n := 0 + iter := mapRange(mapv) + for iter.Next() { + key := mapi.conv.keyConv.PBValueOf(iter.Key()).MapKey() + keySize := mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) + var valSize int + value := mapi.conv.valConv.PBValueOf(iter.Value()) + if f.mi == nil { + valSize = mapi.valFuncs.size(value, mapValTagSize, opts) + } else { + p := pointerOfValue(iter.Value()) + valSize += mapValTagSize + valSize += protowire.SizeBytes(f.mi.sizePointer(p, opts)) + } + n += f.tagsize + protowire.SizeBytes(keySize+valSize) + } + return n +} + +func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + var ( + key = mapi.keyZero + val = mapi.conv.valConv.New() + ) + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return out, errDecode + } + if num > protowire.MaxValidNumber { + return out, errDecode + } + b = b[n:] + err := errUnknown + switch num { + case genid.MapEntry_Key_field_number: + var v pref.Value + var o unmarshalOutput + v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) + if err != nil { + break + } + key = v + n = o.n + case genid.MapEntry_Value_field_number: + var v pref.Value + var o unmarshalOutput + v, o, err = mapi.valFuncs.unmarshal(b, val, num, wtyp, opts) + if err != nil { + break + } + val = v + n = o.n + } + if err == errUnknown { + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, errDecode + } + } else if err != nil { + return out, err + } + b = b[n:] + } + mapv.SetMapIndex(mapi.conv.keyConv.GoValueOf(key), mapi.conv.valConv.GoValueOf(val)) + out.n = n + return out, nil +} + +func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + var ( + key = mapi.keyZero + val = reflect.New(f.mi.GoReflectType.Elem()) + ) + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return out, errDecode + } + if num > protowire.MaxValidNumber { + return out, errDecode + } + b = b[n:] + err := errUnknown + switch num { + case 1: + var v pref.Value + var o unmarshalOutput + v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) + if err != nil { + break + } + key = v + n = o.n + case 2: + if wtyp != protowire.BytesType { + break + } + var v []byte + v, n = protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + var o unmarshalOutput + o, err = f.mi.unmarshalPointer(v, pointerOfValue(val), 0, opts) + if o.initialized { + // Consider this map item initialized so long as we see + // an initialized value. + out.initialized = true + } + } + if err == errUnknown { + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, errDecode + } + } else if err != nil { + return out, err + } + b = b[n:] + } + mapv.SetMapIndex(mapi.conv.keyConv.GoValueOf(key), val) + out.n = n + return out, nil +} + +func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + if f.mi == nil { + key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey() + val := mapi.conv.valConv.PBValueOf(valrv) + size := 0 + size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) + size += mapi.valFuncs.size(val, mapValTagSize, opts) + b = protowire.AppendVarint(b, uint64(size)) + b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts) + if err != nil { + return nil, err + } + return mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts) + } else { + key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey() + val := pointerOfValue(valrv) + valSize := f.mi.sizePointer(val, opts) + size := 0 + size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) + size += mapValTagSize + protowire.SizeBytes(valSize) + b = protowire.AppendVarint(b, uint64(size)) + b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts) + if err != nil { + return nil, err + } + b = protowire.AppendVarint(b, mapi.valWiretag) + b = protowire.AppendVarint(b, uint64(valSize)) + return f.mi.marshalAppendPointer(b, val, opts) + } +} + +func appendMap(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + if mapv.Len() == 0 { + return b, nil + } + if opts.Deterministic() { + return appendMapDeterministic(b, mapv, mapi, f, opts) + } + iter := mapRange(mapv) + for iter.Next() { + var err error + b = protowire.AppendVarint(b, f.wiretag) + b, err = appendMapItem(b, iter.Key(), iter.Value(), mapi, f, opts) + if err != nil { + return b, err + } + } + return b, nil +} + +func appendMapDeterministic(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + keys := mapv.MapKeys() + sort.Slice(keys, func(i, j int) bool { + switch keys[i].Kind() { + case reflect.Bool: + return !keys[i].Bool() && keys[j].Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return keys[i].Int() < keys[j].Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return keys[i].Uint() < keys[j].Uint() + case reflect.Float32, reflect.Float64: + return keys[i].Float() < keys[j].Float() + case reflect.String: + return keys[i].String() < keys[j].String() + default: + panic("invalid kind: " + keys[i].Kind().String()) + } + }) + for _, key := range keys { + var err error + b = protowire.AppendVarint(b, f.wiretag) + b, err = appendMapItem(b, key, mapv.MapIndex(key), mapi, f, opts) + if err != nil { + return b, err + } + } + return b, nil +} + +func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error { + if mi := f.mi; mi != nil { + mi.init() + if !mi.needsInitCheck { + return nil + } + iter := mapRange(mapv) + for iter.Next() { + val := pointerOfValue(iter.Value()) + if err := mi.checkInitializedPointer(val); err != nil { + return err + } + } + } else { + iter := mapRange(mapv) + for iter.Next() { + val := mapi.conv.valConv.PBValueOf(iter.Value()) + if err := mapi.valFuncs.isInit(val); err != nil { + return err + } + } + } + return nil +} + +func mergeMap(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + dstm := dst.AsValueOf(f.ft).Elem() + srcm := src.AsValueOf(f.ft).Elem() + if srcm.Len() == 0 { + return + } + if dstm.IsNil() { + dstm.Set(reflect.MakeMap(f.ft)) + } + iter := mapRange(srcm) + for iter.Next() { + dstm.SetMapIndex(iter.Key(), iter.Value()) + } +} + +func mergeMapOfBytes(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + dstm := dst.AsValueOf(f.ft).Elem() + srcm := src.AsValueOf(f.ft).Elem() + if srcm.Len() == 0 { + return + } + if dstm.IsNil() { + dstm.Set(reflect.MakeMap(f.ft)) + } + iter := mapRange(srcm) + for iter.Next() { + dstm.SetMapIndex(iter.Key(), reflect.ValueOf(append(emptyBuf[:], iter.Value().Bytes()...))) + } +} + +func mergeMapOfMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + dstm := dst.AsValueOf(f.ft).Elem() + srcm := src.AsValueOf(f.ft).Elem() + if srcm.Len() == 0 { + return + } + if dstm.IsNil() { + dstm.Set(reflect.MakeMap(f.ft)) + } + iter := mapRange(srcm) + for iter.Next() { + val := reflect.New(f.ft.Elem().Elem()) + if f.mi != nil { + f.mi.mergePointer(pointerOfValue(val), pointerOfValue(iter.Value()), opts) + } else { + opts.Merge(asMessage(val), asMessage(iter.Value())) + } + dstm.SetMapIndex(iter.Key(), val) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go new file mode 100644 index 000000000..4b15493f2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go @@ -0,0 +1,38 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.12 +// +build !go1.12 + +package impl + +import "reflect" + +type mapIter struct { + v reflect.Value + keys []reflect.Value +} + +// mapRange provides a less-efficient equivalent to +// the Go 1.12 reflect.Value.MapRange method. +func mapRange(v reflect.Value) *mapIter { + return &mapIter{v: v} +} + +func (i *mapIter) Next() bool { + if i.keys == nil { + i.keys = i.v.MapKeys() + } else { + i.keys = i.keys[1:] + } + return len(i.keys) > 0 +} + +func (i *mapIter) Key() reflect.Value { + return i.keys[0] +} + +func (i *mapIter) Value() reflect.Value { + return i.v.MapIndex(i.keys[0]) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go new file mode 100644 index 000000000..0b31b66ea --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go @@ -0,0 +1,12 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.12 +// +build go1.12 + +package impl + +import "reflect" + +func mapRange(v reflect.Value) *reflect.MapIter { return v.MapRange() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go new file mode 100644 index 000000000..cd40527ff --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -0,0 +1,217 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/order" + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// coderMessageInfo contains per-message information used by the fast-path functions. +// This is a different type from MessageInfo to keep MessageInfo as general-purpose as +// possible. +type coderMessageInfo struct { + methods piface.Methods + + orderedCoderFields []*coderFieldInfo + denseCoderFields []*coderFieldInfo + coderFields map[protowire.Number]*coderFieldInfo + sizecacheOffset offset + unknownOffset offset + unknownPtrKind bool + extensionOffset offset + needsInitCheck bool + isMessageSet bool + numRequiredFields uint8 +} + +type coderFieldInfo struct { + funcs pointerCoderFuncs // fast-path per-field functions + mi *MessageInfo // field's message + ft reflect.Type + validation validationInfo // information used by message validation + num pref.FieldNumber // field number + offset offset // struct field offset + wiretag uint64 // field tag (number + wire type) + tagsize int // size of the varint-encoded tag + isPointer bool // true if IsNil may be called on the struct field + isRequired bool // true if field is required +} + +func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { + mi.sizecacheOffset = invalidOffset + mi.unknownOffset = invalidOffset + mi.extensionOffset = invalidOffset + + if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType { + mi.sizecacheOffset = si.sizecacheOffset + } + if si.unknownOffset.IsValid() && (si.unknownType == unknownFieldsAType || si.unknownType == unknownFieldsBType) { + mi.unknownOffset = si.unknownOffset + mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr + } + if si.extensionOffset.IsValid() && si.extensionType == extensionFieldsType { + mi.extensionOffset = si.extensionOffset + } + + mi.coderFields = make(map[protowire.Number]*coderFieldInfo) + fields := mi.Desc.Fields() + preallocFields := make([]coderFieldInfo, fields.Len()) + for i := 0; i < fields.Len(); i++ { + fd := fields.Get(i) + + fs := si.fieldsByNumber[fd.Number()] + isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() + if isOneof { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } + ft := fs.Type + var wiretag uint64 + if !fd.IsPacked() { + wiretag = protowire.EncodeTag(fd.Number(), wireTypes[fd.Kind()]) + } else { + wiretag = protowire.EncodeTag(fd.Number(), protowire.BytesType) + } + var fieldOffset offset + var funcs pointerCoderFuncs + var childMessage *MessageInfo + switch { + case ft == nil: + // This never occurs for generated message types. + // It implies that a hand-crafted type has missing Go fields + // for specific protobuf message fields. + funcs = pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return 0 + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return nil, nil + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + panic("missing Go struct field for " + string(fd.FullName())) + }, + merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + } + case isOneof: + fieldOffset = offsetOf(fs, mi.Exporter) + case fd.IsWeak(): + fieldOffset = si.weakOffset + funcs = makeWeakMessageFieldCoder(fd) + default: + fieldOffset = offsetOf(fs, mi.Exporter) + childMessage, funcs = fieldCoder(fd, ft) + } + cf := &preallocFields[i] + *cf = coderFieldInfo{ + num: fd.Number(), + offset: fieldOffset, + wiretag: wiretag, + ft: ft, + tagsize: protowire.SizeVarint(wiretag), + funcs: funcs, + mi: childMessage, + validation: newFieldValidationInfo(mi, si, fd, ft), + isPointer: fd.Cardinality() == pref.Repeated || fd.HasPresence(), + isRequired: fd.Cardinality() == pref.Required, + } + mi.orderedCoderFields = append(mi.orderedCoderFields, cf) + mi.coderFields[cf.num] = cf + } + for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ { + if od := oneofs.Get(i); !od.IsSynthetic() { + mi.initOneofFieldCoders(od, si) + } + } + if messageset.IsMessageSet(mi.Desc) { + if !mi.extensionOffset.IsValid() { + panic(fmt.Sprintf("%v: MessageSet with no extensions field", mi.Desc.FullName())) + } + if !mi.unknownOffset.IsValid() { + panic(fmt.Sprintf("%v: MessageSet with no unknown field", mi.Desc.FullName())) + } + mi.isMessageSet = true + } + sort.Slice(mi.orderedCoderFields, func(i, j int) bool { + return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num + }) + + var maxDense pref.FieldNumber + for _, cf := range mi.orderedCoderFields { + if cf.num >= 16 && cf.num >= 2*maxDense { + break + } + maxDense = cf.num + } + mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1) + for _, cf := range mi.orderedCoderFields { + if int(cf.num) >= len(mi.denseCoderFields) { + break + } + mi.denseCoderFields[cf.num] = cf + } + + // To preserve compatibility with historic wire output, marshal oneofs last. + if mi.Desc.Oneofs().Len() > 0 { + sort.Slice(mi.orderedCoderFields, func(i, j int) bool { + fi := fields.ByNumber(mi.orderedCoderFields[i].num) + fj := fields.ByNumber(mi.orderedCoderFields[j].num) + return order.LegacyFieldOrder(fi, fj) + }) + } + + mi.needsInitCheck = needsInitCheck(mi.Desc) + if mi.methods.Marshal == nil && mi.methods.Size == nil { + mi.methods.Flags |= piface.SupportMarshalDeterministic + mi.methods.Marshal = mi.marshal + mi.methods.Size = mi.size + } + if mi.methods.Unmarshal == nil { + mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown + mi.methods.Unmarshal = mi.unmarshal + } + if mi.methods.CheckInitialized == nil { + mi.methods.CheckInitialized = mi.checkInitialized + } + if mi.methods.Merge == nil { + mi.methods.Merge = mi.merge + } +} + +// getUnknownBytes returns a *[]byte for the unknown fields. +// It is the caller's responsibility to check whether the pointer is nil. +// This function is specially designed to be inlineable. +func (mi *MessageInfo) getUnknownBytes(p pointer) *[]byte { + if mi.unknownPtrKind { + return *p.Apply(mi.unknownOffset).BytesPtr() + } else { + return p.Apply(mi.unknownOffset).Bytes() + } +} + +// mutableUnknownBytes returns a *[]byte for the unknown fields. +// The returned pointer is guaranteed to not be nil. +func (mi *MessageInfo) mutableUnknownBytes(p pointer) *[]byte { + if mi.unknownPtrKind { + bp := p.Apply(mi.unknownOffset).BytesPtr() + if *bp == nil { + *bp = new([]byte) + } + return *bp + } else { + return p.Apply(mi.unknownOffset).Bytes() + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go new file mode 100644 index 000000000..b7a23faf1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go @@ -0,0 +1,123 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" +) + +func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) { + if !flags.ProtoLegacy { + return 0 + } + + ext := *p.Apply(mi.extensionOffset).Extensions() + for _, x := range ext { + xi := getExtensionFieldInfo(x.Type()) + if xi.funcs.size == nil { + continue + } + num, _ := protowire.DecodeTag(xi.wiretag) + size += messageset.SizeField(num) + size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts) + } + + if u := mi.getUnknownBytes(p); u != nil { + size += messageset.SizeUnknown(*u) + } + + return size +} + +func marshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts marshalOptions) ([]byte, error) { + if !flags.ProtoLegacy { + return b, errors.New("no support for message_set_wire_format") + } + + ext := *p.Apply(mi.extensionOffset).Extensions() + switch len(ext) { + case 0: + case 1: + // Fast-path for one extension: Don't bother sorting the keys. + for _, x := range ext { + var err error + b, err = marshalMessageSetField(mi, b, x, opts) + if err != nil { + return b, err + } + } + default: + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(ext)) + for k := range ext { + keys = append(keys, int(k)) + } + sort.Ints(keys) + for _, k := range keys { + var err error + b, err = marshalMessageSetField(mi, b, ext[int32(k)], opts) + if err != nil { + return b, err + } + } + } + + if u := mi.getUnknownBytes(p); u != nil { + var err error + b, err = messageset.AppendUnknown(b, *u) + if err != nil { + return b, err + } + } + + return b, nil +} + +func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts marshalOptions) ([]byte, error) { + xi := getExtensionFieldInfo(x.Type()) + num, _ := protowire.DecodeTag(xi.wiretag) + b = messageset.AppendFieldStart(b, num) + b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts) + if err != nil { + return b, err + } + b = messageset.AppendFieldEnd(b) + return b, nil +} + +func unmarshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts unmarshalOptions) (out unmarshalOutput, err error) { + if !flags.ProtoLegacy { + return out, errors.New("no support for message_set_wire_format") + } + + ep := p.Apply(mi.extensionOffset).Extensions() + if *ep == nil { + *ep = make(map[int32]ExtensionField) + } + ext := *ep + initialized := true + err = messageset.Unmarshal(b, true, func(num protowire.Number, v []byte) error { + o, err := mi.unmarshalExtension(v, num, protowire.BytesType, ext, opts) + if err == errUnknown { + u := mi.mutableUnknownBytes(p) + *u = protowire.AppendTag(*u, num, protowire.BytesType) + *u = append(*u, v...) + return nil + } + if !o.initialized { + initialized = false + } + return err + }) + out.n = len(b) + out.initialized = initialized + return out, err +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go new file mode 100644 index 000000000..145c577bd --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go @@ -0,0 +1,210 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build purego || appengine +// +build purego appengine + +package impl + +import ( + "reflect" + + "google.golang.org/protobuf/encoding/protowire" +) + +func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := p.v.Elem().Int() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := p.v.Elem().Int() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return out, errDecode + } + p.v.Elem().SetInt(int64(v)) + out.n = n + return out, nil +} + +func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + dst.v.Elem().Set(src.v.Elem()) +} + +var coderEnum = pointerCoderFuncs{ + size: sizeEnum, + marshal: appendEnum, + unmarshal: consumeEnum, + merge: mergeEnum, +} + +func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + if p.v.Elem().Int() == 0 { + return 0 + } + return sizeEnum(p, f, opts) +} + +func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + if p.v.Elem().Int() == 0 { + return b, nil + } + return appendEnum(b, p, f, opts) +} + +func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + if src.v.Elem().Int() != 0 { + dst.v.Elem().Set(src.v.Elem()) + } +} + +var coderEnumNoZero = pointerCoderFuncs{ + size: sizeEnumNoZero, + marshal: appendEnumNoZero, + unmarshal: consumeEnum, + merge: mergeEnumNoZero, +} + +func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return sizeEnum(pointer{p.v.Elem()}, f, opts) +} + +func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return appendEnum(b, pointer{p.v.Elem()}, f, opts) +} + +func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + if p.v.Elem().IsNil() { + p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem())) + } + return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts) +} + +func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + if !src.v.Elem().IsNil() { + v := reflect.New(dst.v.Type().Elem().Elem()) + v.Elem().Set(src.v.Elem().Elem()) + dst.v.Elem().Set(v) + } +} + +var coderEnumPtr = pointerCoderFuncs{ + size: sizeEnumPtr, + marshal: appendEnumPtr, + unmarshal: consumeEnumPtr, + merge: mergeEnumPtr, +} + +func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := p.v.Elem() + for i, llen := 0, s.Len(); i < llen; i++ { + size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize + } + return size +} + +func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.v.Elem() + for i, llen := 0, s.Len(); i < llen; i++ { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) + } + return b, nil +} + +func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + s := p.v.Elem() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + for len(b) > 0 { + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return out, errDecode + } + rv := reflect.New(s.Type().Elem()).Elem() + rv.SetInt(int64(v)) + s.Set(reflect.Append(s, rv)) + b = b[n:] + } + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return out, errDecode + } + rv := reflect.New(s.Type().Elem()).Elem() + rv.SetInt(int64(v)) + s.Set(reflect.Append(s, rv)) + out.n = n + return out, nil +} + +func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem())) +} + +var coderEnumSlice = pointerCoderFuncs{ + size: sizeEnumSlice, + marshal: appendEnumSlice, + unmarshal: consumeEnumSlice, + merge: mergeEnumSlice, +} + +func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := p.v.Elem() + llen := s.Len() + if llen == 0 { + return 0 + } + n := 0 + for i := 0; i < llen; i++ { + n += protowire.SizeVarint(uint64(s.Index(i).Int())) + } + return f.tagsize + protowire.SizeBytes(n) +} + +func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.v.Elem() + llen := s.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for i := 0; i < llen; i++ { + n += protowire.SizeVarint(uint64(s.Index(i).Int())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) + } + return b, nil +} + +var coderEnumPackedSlice = pointerCoderFuncs{ + size: sizeEnumPackedSlice, + marshal: appendEnumPackedSlice, + unmarshal: consumeEnumSlice, + merge: mergeEnumSlice, +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go new file mode 100644 index 000000000..e89971238 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go @@ -0,0 +1,557 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/strs" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// pointerCoderFuncs is a set of pointer encoding functions. +type pointerCoderFuncs struct { + mi *MessageInfo + size func(p pointer, f *coderFieldInfo, opts marshalOptions) int + marshal func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) + unmarshal func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) + isInit func(p pointer, f *coderFieldInfo) error + merge func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) +} + +// valueCoderFuncs is a set of protoreflect.Value encoding functions. +type valueCoderFuncs struct { + size func(v pref.Value, tagsize int, opts marshalOptions) int + marshal func(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) + unmarshal func(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) + isInit func(v pref.Value) error + merge func(dst, src pref.Value, opts mergeOptions) pref.Value +} + +// fieldCoder returns pointer functions for a field, used for operating on +// struct fields. +func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { + switch { + case fd.IsMap(): + return encoderFuncsForMap(fd, ft) + case fd.Cardinality() == pref.Repeated && !fd.IsPacked(): + // Repeated fields (not packed). + if ft.Kind() != reflect.Slice { + break + } + ft := ft.Elem() + switch fd.Kind() { + case pref.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolSlice + } + case pref.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumSlice + } + case pref.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32Slice + } + case pref.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32Slice + } + case pref.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32Slice + } + case pref.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64Slice + } + case pref.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64Slice + } + case pref.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64Slice + } + case pref.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32Slice + } + case pref.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32Slice + } + case pref.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatSlice + } + case pref.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64Slice + } + case pref.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64Slice + } + case pref.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoubleSlice + } + case pref.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringSliceValidateUTF8 + } + if ft.Kind() == reflect.String { + return nil, coderStringSlice + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) { + return nil, coderBytesSliceValidateUTF8 + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesSlice + } + case pref.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderStringSlice + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesSlice + } + case pref.MessageKind: + return getMessageInfo(ft), makeMessageSliceFieldCoder(fd, ft) + case pref.GroupKind: + return getMessageInfo(ft), makeGroupSliceFieldCoder(fd, ft) + } + case fd.Cardinality() == pref.Repeated && fd.IsPacked(): + // Packed repeated fields. + // + // Only repeated fields of primitive numeric types + // (Varint, Fixed32, or Fixed64 wire type) can be packed. + if ft.Kind() != reflect.Slice { + break + } + ft := ft.Elem() + switch fd.Kind() { + case pref.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolPackedSlice + } + case pref.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumPackedSlice + } + case pref.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32PackedSlice + } + case pref.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32PackedSlice + } + case pref.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32PackedSlice + } + case pref.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64PackedSlice + } + case pref.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64PackedSlice + } + case pref.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64PackedSlice + } + case pref.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32PackedSlice + } + case pref.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32PackedSlice + } + case pref.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatPackedSlice + } + case pref.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64PackedSlice + } + case pref.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64PackedSlice + } + case pref.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoublePackedSlice + } + } + case fd.Kind() == pref.MessageKind: + return getMessageInfo(ft), makeMessageFieldCoder(fd, ft) + case fd.Kind() == pref.GroupKind: + return getMessageInfo(ft), makeGroupFieldCoder(fd, ft) + case fd.Syntax() == pref.Proto3 && fd.ContainingOneof() == nil: + // Populated oneof fields always encode even if set to the zero value, + // which normally are not encoded in proto3. + switch fd.Kind() { + case pref.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolNoZero + } + case pref.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumNoZero + } + case pref.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32NoZero + } + case pref.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32NoZero + } + case pref.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32NoZero + } + case pref.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64NoZero + } + case pref.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64NoZero + } + case pref.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64NoZero + } + case pref.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32NoZero + } + case pref.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32NoZero + } + case pref.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatNoZero + } + case pref.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64NoZero + } + case pref.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64NoZero + } + case pref.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoubleNoZero + } + case pref.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringNoZeroValidateUTF8 + } + if ft.Kind() == reflect.String { + return nil, coderStringNoZero + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) { + return nil, coderBytesNoZeroValidateUTF8 + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesNoZero + } + case pref.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderStringNoZero + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesNoZero + } + } + case ft.Kind() == reflect.Ptr: + ft := ft.Elem() + switch fd.Kind() { + case pref.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolPtr + } + case pref.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumPtr + } + case pref.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32Ptr + } + case pref.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32Ptr + } + case pref.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32Ptr + } + case pref.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64Ptr + } + case pref.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64Ptr + } + case pref.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64Ptr + } + case pref.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32Ptr + } + case pref.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32Ptr + } + case pref.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatPtr + } + case pref.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64Ptr + } + case pref.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64Ptr + } + case pref.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoublePtr + } + case pref.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringPtrValidateUTF8 + } + if ft.Kind() == reflect.String { + return nil, coderStringPtr + } + case pref.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderStringPtr + } + } + default: + switch fd.Kind() { + case pref.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBool + } + case pref.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnum + } + case pref.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32 + } + case pref.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32 + } + case pref.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32 + } + case pref.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64 + } + case pref.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64 + } + case pref.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64 + } + case pref.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32 + } + case pref.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32 + } + case pref.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloat + } + case pref.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64 + } + case pref.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64 + } + case pref.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDouble + } + case pref.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringValidateUTF8 + } + if ft.Kind() == reflect.String { + return nil, coderString + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) { + return nil, coderBytesValidateUTF8 + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytes + } + case pref.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderString + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytes + } + } + } + panic(fmt.Sprintf("invalid type: no encoder for %v %v %v/%v", fd.FullName(), fd.Cardinality(), fd.Kind(), ft)) +} + +// encoderFuncsForValue returns value functions for a field, used for +// extension values and map encoding. +func encoderFuncsForValue(fd pref.FieldDescriptor) valueCoderFuncs { + switch { + case fd.Cardinality() == pref.Repeated && !fd.IsPacked(): + switch fd.Kind() { + case pref.BoolKind: + return coderBoolSliceValue + case pref.EnumKind: + return coderEnumSliceValue + case pref.Int32Kind: + return coderInt32SliceValue + case pref.Sint32Kind: + return coderSint32SliceValue + case pref.Uint32Kind: + return coderUint32SliceValue + case pref.Int64Kind: + return coderInt64SliceValue + case pref.Sint64Kind: + return coderSint64SliceValue + case pref.Uint64Kind: + return coderUint64SliceValue + case pref.Sfixed32Kind: + return coderSfixed32SliceValue + case pref.Fixed32Kind: + return coderFixed32SliceValue + case pref.FloatKind: + return coderFloatSliceValue + case pref.Sfixed64Kind: + return coderSfixed64SliceValue + case pref.Fixed64Kind: + return coderFixed64SliceValue + case pref.DoubleKind: + return coderDoubleSliceValue + case pref.StringKind: + // We don't have a UTF-8 validating coder for repeated string fields. + // Value coders are used for extensions and maps. + // Extensions are never proto3, and maps never contain lists. + return coderStringSliceValue + case pref.BytesKind: + return coderBytesSliceValue + case pref.MessageKind: + return coderMessageSliceValue + case pref.GroupKind: + return coderGroupSliceValue + } + case fd.Cardinality() == pref.Repeated && fd.IsPacked(): + switch fd.Kind() { + case pref.BoolKind: + return coderBoolPackedSliceValue + case pref.EnumKind: + return coderEnumPackedSliceValue + case pref.Int32Kind: + return coderInt32PackedSliceValue + case pref.Sint32Kind: + return coderSint32PackedSliceValue + case pref.Uint32Kind: + return coderUint32PackedSliceValue + case pref.Int64Kind: + return coderInt64PackedSliceValue + case pref.Sint64Kind: + return coderSint64PackedSliceValue + case pref.Uint64Kind: + return coderUint64PackedSliceValue + case pref.Sfixed32Kind: + return coderSfixed32PackedSliceValue + case pref.Fixed32Kind: + return coderFixed32PackedSliceValue + case pref.FloatKind: + return coderFloatPackedSliceValue + case pref.Sfixed64Kind: + return coderSfixed64PackedSliceValue + case pref.Fixed64Kind: + return coderFixed64PackedSliceValue + case pref.DoubleKind: + return coderDoublePackedSliceValue + } + default: + switch fd.Kind() { + default: + case pref.BoolKind: + return coderBoolValue + case pref.EnumKind: + return coderEnumValue + case pref.Int32Kind: + return coderInt32Value + case pref.Sint32Kind: + return coderSint32Value + case pref.Uint32Kind: + return coderUint32Value + case pref.Int64Kind: + return coderInt64Value + case pref.Sint64Kind: + return coderSint64Value + case pref.Uint64Kind: + return coderUint64Value + case pref.Sfixed32Kind: + return coderSfixed32Value + case pref.Fixed32Kind: + return coderFixed32Value + case pref.FloatKind: + return coderFloatValue + case pref.Sfixed64Kind: + return coderSfixed64Value + case pref.Fixed64Kind: + return coderFixed64Value + case pref.DoubleKind: + return coderDoubleValue + case pref.StringKind: + if strs.EnforceUTF8(fd) { + return coderStringValueValidateUTF8 + } + return coderStringValue + case pref.BytesKind: + return coderBytesValue + case pref.MessageKind: + return coderMessageValue + case pref.GroupKind: + return coderGroupValue + } + } + panic(fmt.Sprintf("invalid field: no encoder for %v %v %v", fd.FullName(), fd.Cardinality(), fd.Kind())) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go new file mode 100644 index 000000000..757642e23 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -0,0 +1,18 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine +// +build !purego,!appengine + +package impl + +// When using unsafe pointers, we can just treat enum values as int32s. + +var ( + coderEnumNoZero = coderInt32NoZero + coderEnum = coderInt32 + coderEnumPtr = coderInt32Ptr + coderEnumSlice = coderInt32Slice + coderEnumPackedSlice = coderInt32PackedSlice +) diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go new file mode 100644 index 000000000..acd61bb50 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -0,0 +1,496 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// unwrapper unwraps the value to the underlying value. +// This is implemented by List and Map. +type unwrapper interface { + protoUnwrap() interface{} +} + +// A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. +type Converter interface { + // PBValueOf converts a reflect.Value to a protoreflect.Value. + PBValueOf(reflect.Value) pref.Value + + // GoValueOf converts a protoreflect.Value to a reflect.Value. + GoValueOf(pref.Value) reflect.Value + + // IsValidPB returns whether a protoreflect.Value is compatible with this type. + IsValidPB(pref.Value) bool + + // IsValidGo returns whether a reflect.Value is compatible with this type. + IsValidGo(reflect.Value) bool + + // New returns a new field value. + // For scalars, it returns the default value of the field. + // For composite types, it returns a new mutable value. + New() pref.Value + + // Zero returns a new field value. + // For scalars, it returns the default value of the field. + // For composite types, it returns an immutable, empty value. + Zero() pref.Value +} + +// NewConverter matches a Go type with a protobuf field and returns a Converter +// that converts between the two. Enums must be a named int32 kind that +// implements protoreflect.Enum, and messages must be pointer to a named +// struct type that implements protoreflect.ProtoMessage. +// +// This matcher deliberately supports a wider range of Go types than what +// protoc-gen-go historically generated to be able to automatically wrap some +// v1 messages generated by other forks of protoc-gen-go. +func NewConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { + switch { + case fd.IsList(): + return newListConverter(t, fd) + case fd.IsMap(): + return newMapConverter(t, fd) + default: + return newSingularConverter(t, fd) + } + panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) +} + +var ( + boolType = reflect.TypeOf(bool(false)) + int32Type = reflect.TypeOf(int32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint32Type = reflect.TypeOf(uint32(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float32Type = reflect.TypeOf(float32(0)) + float64Type = reflect.TypeOf(float64(0)) + stringType = reflect.TypeOf(string("")) + bytesType = reflect.TypeOf([]byte(nil)) + byteType = reflect.TypeOf(byte(0)) +) + +var ( + boolZero = pref.ValueOfBool(false) + int32Zero = pref.ValueOfInt32(0) + int64Zero = pref.ValueOfInt64(0) + uint32Zero = pref.ValueOfUint32(0) + uint64Zero = pref.ValueOfUint64(0) + float32Zero = pref.ValueOfFloat32(0) + float64Zero = pref.ValueOfFloat64(0) + stringZero = pref.ValueOfString("") + bytesZero = pref.ValueOfBytes(nil) +) + +func newSingularConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { + defVal := func(fd pref.FieldDescriptor, zero pref.Value) pref.Value { + if fd.Cardinality() == pref.Repeated { + // Default isn't defined for repeated fields. + return zero + } + return fd.Default() + } + switch fd.Kind() { + case pref.BoolKind: + if t.Kind() == reflect.Bool { + return &boolConverter{t, defVal(fd, boolZero)} + } + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + if t.Kind() == reflect.Int32 { + return &int32Converter{t, defVal(fd, int32Zero)} + } + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + if t.Kind() == reflect.Int64 { + return &int64Converter{t, defVal(fd, int64Zero)} + } + case pref.Uint32Kind, pref.Fixed32Kind: + if t.Kind() == reflect.Uint32 { + return &uint32Converter{t, defVal(fd, uint32Zero)} + } + case pref.Uint64Kind, pref.Fixed64Kind: + if t.Kind() == reflect.Uint64 { + return &uint64Converter{t, defVal(fd, uint64Zero)} + } + case pref.FloatKind: + if t.Kind() == reflect.Float32 { + return &float32Converter{t, defVal(fd, float32Zero)} + } + case pref.DoubleKind: + if t.Kind() == reflect.Float64 { + return &float64Converter{t, defVal(fd, float64Zero)} + } + case pref.StringKind: + if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { + return &stringConverter{t, defVal(fd, stringZero)} + } + case pref.BytesKind: + if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { + return &bytesConverter{t, defVal(fd, bytesZero)} + } + case pref.EnumKind: + // Handle enums, which must be a named int32 type. + if t.Kind() == reflect.Int32 { + return newEnumConverter(t, fd) + } + case pref.MessageKind, pref.GroupKind: + return newMessageConverter(t) + } + panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) +} + +type boolConverter struct { + goType reflect.Type + def pref.Value +} + +func (c *boolConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfBool(v.Bool()) +} +func (c *boolConverter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(v.Bool()).Convert(c.goType) +} +func (c *boolConverter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(bool) + return ok +} +func (c *boolConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *boolConverter) New() pref.Value { return c.def } +func (c *boolConverter) Zero() pref.Value { return c.def } + +type int32Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *int32Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfInt32(int32(v.Int())) +} +func (c *int32Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(int32(v.Int())).Convert(c.goType) +} +func (c *int32Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(int32) + return ok +} +func (c *int32Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *int32Converter) New() pref.Value { return c.def } +func (c *int32Converter) Zero() pref.Value { return c.def } + +type int64Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *int64Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfInt64(int64(v.Int())) +} +func (c *int64Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(int64(v.Int())).Convert(c.goType) +} +func (c *int64Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(int64) + return ok +} +func (c *int64Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *int64Converter) New() pref.Value { return c.def } +func (c *int64Converter) Zero() pref.Value { return c.def } + +type uint32Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *uint32Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfUint32(uint32(v.Uint())) +} +func (c *uint32Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(uint32(v.Uint())).Convert(c.goType) +} +func (c *uint32Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(uint32) + return ok +} +func (c *uint32Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *uint32Converter) New() pref.Value { return c.def } +func (c *uint32Converter) Zero() pref.Value { return c.def } + +type uint64Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *uint64Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfUint64(uint64(v.Uint())) +} +func (c *uint64Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(uint64(v.Uint())).Convert(c.goType) +} +func (c *uint64Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(uint64) + return ok +} +func (c *uint64Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *uint64Converter) New() pref.Value { return c.def } +func (c *uint64Converter) Zero() pref.Value { return c.def } + +type float32Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *float32Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfFloat32(float32(v.Float())) +} +func (c *float32Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(float32(v.Float())).Convert(c.goType) +} +func (c *float32Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(float32) + return ok +} +func (c *float32Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *float32Converter) New() pref.Value { return c.def } +func (c *float32Converter) Zero() pref.Value { return c.def } + +type float64Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *float64Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfFloat64(float64(v.Float())) +} +func (c *float64Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(float64(v.Float())).Convert(c.goType) +} +func (c *float64Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(float64) + return ok +} +func (c *float64Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *float64Converter) New() pref.Value { return c.def } +func (c *float64Converter) Zero() pref.Value { return c.def } + +type stringConverter struct { + goType reflect.Type + def pref.Value +} + +func (c *stringConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfString(v.Convert(stringType).String()) +} +func (c *stringConverter) GoValueOf(v pref.Value) reflect.Value { + // pref.Value.String never panics, so we go through an interface + // conversion here to check the type. + s := v.Interface().(string) + if c.goType.Kind() == reflect.Slice && s == "" { + return reflect.Zero(c.goType) // ensure empty string is []byte(nil) + } + return reflect.ValueOf(s).Convert(c.goType) +} +func (c *stringConverter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(string) + return ok +} +func (c *stringConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *stringConverter) New() pref.Value { return c.def } +func (c *stringConverter) Zero() pref.Value { return c.def } + +type bytesConverter struct { + goType reflect.Type + def pref.Value +} + +func (c *bytesConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + if c.goType.Kind() == reflect.String && v.Len() == 0 { + return pref.ValueOfBytes(nil) // ensure empty string is []byte(nil) + } + return pref.ValueOfBytes(v.Convert(bytesType).Bytes()) +} +func (c *bytesConverter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(v.Bytes()).Convert(c.goType) +} +func (c *bytesConverter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().([]byte) + return ok +} +func (c *bytesConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *bytesConverter) New() pref.Value { return c.def } +func (c *bytesConverter) Zero() pref.Value { return c.def } + +type enumConverter struct { + goType reflect.Type + def pref.Value +} + +func newEnumConverter(goType reflect.Type, fd pref.FieldDescriptor) Converter { + var def pref.Value + if fd.Cardinality() == pref.Repeated { + def = pref.ValueOfEnum(fd.Enum().Values().Get(0).Number()) + } else { + def = fd.Default() + } + return &enumConverter{goType, def} +} + +func (c *enumConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfEnum(pref.EnumNumber(v.Int())) +} + +func (c *enumConverter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(v.Enum()).Convert(c.goType) +} + +func (c *enumConverter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(pref.EnumNumber) + return ok +} + +func (c *enumConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *enumConverter) New() pref.Value { + return c.def +} + +func (c *enumConverter) Zero() pref.Value { + return c.def +} + +type messageConverter struct { + goType reflect.Type +} + +func newMessageConverter(goType reflect.Type) Converter { + return &messageConverter{goType} +} + +func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + if c.isNonPointer() { + if v.CanAddr() { + v = v.Addr() // T => *T + } else { + v = reflect.Zero(reflect.PtrTo(v.Type())) + } + } + if m, ok := v.Interface().(pref.ProtoMessage); ok { + return pref.ValueOfMessage(m.ProtoReflect()) + } + return pref.ValueOfMessage(legacyWrapMessage(v)) +} + +func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value { + m := v.Message() + var rv reflect.Value + if u, ok := m.(unwrapper); ok { + rv = reflect.ValueOf(u.protoUnwrap()) + } else { + rv = reflect.ValueOf(m.Interface()) + } + if c.isNonPointer() { + if rv.Type() != reflect.PtrTo(c.goType) { + panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), reflect.PtrTo(c.goType))) + } + if !rv.IsNil() { + rv = rv.Elem() // *T => T + } else { + rv = reflect.Zero(rv.Type().Elem()) + } + } + if rv.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), c.goType)) + } + return rv +} + +func (c *messageConverter) IsValidPB(v pref.Value) bool { + m := v.Message() + var rv reflect.Value + if u, ok := m.(unwrapper); ok { + rv = reflect.ValueOf(u.protoUnwrap()) + } else { + rv = reflect.ValueOf(m.Interface()) + } + if c.isNonPointer() { + return rv.Type() == reflect.PtrTo(c.goType) + } + return rv.Type() == c.goType +} + +func (c *messageConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *messageConverter) New() pref.Value { + if c.isNonPointer() { + return c.PBValueOf(reflect.New(c.goType).Elem()) + } + return c.PBValueOf(reflect.New(c.goType.Elem())) +} + +func (c *messageConverter) Zero() pref.Value { + return c.PBValueOf(reflect.Zero(c.goType)) +} + +// isNonPointer reports whether the type is a non-pointer type. +// This never occurs for generated message types. +func (c *messageConverter) isNonPointer() bool { + return c.goType.Kind() != reflect.Ptr +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go new file mode 100644 index 000000000..6fccab520 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go @@ -0,0 +1,141 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +func newListConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { + switch { + case t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Slice: + return &listPtrConverter{t, newSingularConverter(t.Elem().Elem(), fd)} + case t.Kind() == reflect.Slice: + return &listConverter{t, newSingularConverter(t.Elem(), fd)} + } + panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) +} + +type listConverter struct { + goType reflect.Type // []T + c Converter +} + +func (c *listConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + pv := reflect.New(c.goType) + pv.Elem().Set(v) + return pref.ValueOfList(&listReflect{pv, c.c}) +} + +func (c *listConverter) GoValueOf(v pref.Value) reflect.Value { + rv := v.List().(*listReflect).v + if rv.IsNil() { + return reflect.Zero(c.goType) + } + return rv.Elem() +} + +func (c *listConverter) IsValidPB(v pref.Value) bool { + list, ok := v.Interface().(*listReflect) + if !ok { + return false + } + return list.v.Type().Elem() == c.goType +} + +func (c *listConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *listConverter) New() pref.Value { + return pref.ValueOfList(&listReflect{reflect.New(c.goType), c.c}) +} + +func (c *listConverter) Zero() pref.Value { + return pref.ValueOfList(&listReflect{reflect.Zero(reflect.PtrTo(c.goType)), c.c}) +} + +type listPtrConverter struct { + goType reflect.Type // *[]T + c Converter +} + +func (c *listPtrConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfList(&listReflect{v, c.c}) +} + +func (c *listPtrConverter) GoValueOf(v pref.Value) reflect.Value { + return v.List().(*listReflect).v +} + +func (c *listPtrConverter) IsValidPB(v pref.Value) bool { + list, ok := v.Interface().(*listReflect) + if !ok { + return false + } + return list.v.Type() == c.goType +} + +func (c *listPtrConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *listPtrConverter) New() pref.Value { + return c.PBValueOf(reflect.New(c.goType.Elem())) +} + +func (c *listPtrConverter) Zero() pref.Value { + return c.PBValueOf(reflect.Zero(c.goType)) +} + +type listReflect struct { + v reflect.Value // *[]T + conv Converter +} + +func (ls *listReflect) Len() int { + if ls.v.IsNil() { + return 0 + } + return ls.v.Elem().Len() +} +func (ls *listReflect) Get(i int) pref.Value { + return ls.conv.PBValueOf(ls.v.Elem().Index(i)) +} +func (ls *listReflect) Set(i int, v pref.Value) { + ls.v.Elem().Index(i).Set(ls.conv.GoValueOf(v)) +} +func (ls *listReflect) Append(v pref.Value) { + ls.v.Elem().Set(reflect.Append(ls.v.Elem(), ls.conv.GoValueOf(v))) +} +func (ls *listReflect) AppendMutable() pref.Value { + if _, ok := ls.conv.(*messageConverter); !ok { + panic("invalid AppendMutable on list with non-message type") + } + v := ls.NewElement() + ls.Append(v) + return v +} +func (ls *listReflect) Truncate(i int) { + ls.v.Elem().Set(ls.v.Elem().Slice(0, i)) +} +func (ls *listReflect) NewElement() pref.Value { + return ls.conv.New() +} +func (ls *listReflect) IsValid() bool { + return !ls.v.IsNil() +} +func (ls *listReflect) protoUnwrap() interface{} { + return ls.v.Interface() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go new file mode 100644 index 000000000..de06b2593 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -0,0 +1,121 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type mapConverter struct { + goType reflect.Type // map[K]V + keyConv, valConv Converter +} + +func newMapConverter(t reflect.Type, fd pref.FieldDescriptor) *mapConverter { + if t.Kind() != reflect.Map { + panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) + } + return &mapConverter{ + goType: t, + keyConv: newSingularConverter(t.Key(), fd.MapKey()), + valConv: newSingularConverter(t.Elem(), fd.MapValue()), + } +} + +func (c *mapConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfMap(&mapReflect{v, c.keyConv, c.valConv}) +} + +func (c *mapConverter) GoValueOf(v pref.Value) reflect.Value { + return v.Map().(*mapReflect).v +} + +func (c *mapConverter) IsValidPB(v pref.Value) bool { + mapv, ok := v.Interface().(*mapReflect) + if !ok { + return false + } + return mapv.v.Type() == c.goType +} + +func (c *mapConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *mapConverter) New() pref.Value { + return c.PBValueOf(reflect.MakeMap(c.goType)) +} + +func (c *mapConverter) Zero() pref.Value { + return c.PBValueOf(reflect.Zero(c.goType)) +} + +type mapReflect struct { + v reflect.Value // map[K]V + keyConv Converter + valConv Converter +} + +func (ms *mapReflect) Len() int { + return ms.v.Len() +} +func (ms *mapReflect) Has(k pref.MapKey) bool { + rk := ms.keyConv.GoValueOf(k.Value()) + rv := ms.v.MapIndex(rk) + return rv.IsValid() +} +func (ms *mapReflect) Get(k pref.MapKey) pref.Value { + rk := ms.keyConv.GoValueOf(k.Value()) + rv := ms.v.MapIndex(rk) + if !rv.IsValid() { + return pref.Value{} + } + return ms.valConv.PBValueOf(rv) +} +func (ms *mapReflect) Set(k pref.MapKey, v pref.Value) { + rk := ms.keyConv.GoValueOf(k.Value()) + rv := ms.valConv.GoValueOf(v) + ms.v.SetMapIndex(rk, rv) +} +func (ms *mapReflect) Clear(k pref.MapKey) { + rk := ms.keyConv.GoValueOf(k.Value()) + ms.v.SetMapIndex(rk, reflect.Value{}) +} +func (ms *mapReflect) Mutable(k pref.MapKey) pref.Value { + if _, ok := ms.valConv.(*messageConverter); !ok { + panic("invalid Mutable on map with non-message value type") + } + v := ms.Get(k) + if !v.IsValid() { + v = ms.NewValue() + ms.Set(k, v) + } + return v +} +func (ms *mapReflect) Range(f func(pref.MapKey, pref.Value) bool) { + iter := mapRange(ms.v) + for iter.Next() { + k := ms.keyConv.PBValueOf(iter.Key()).MapKey() + v := ms.valConv.PBValueOf(iter.Value()) + if !f(k, v) { + return + } + } +} +func (ms *mapReflect) NewValue() pref.Value { + return ms.valConv.New() +} +func (ms *mapReflect) IsValid() bool { + return !ms.v.IsNil() +} +func (ms *mapReflect) protoUnwrap() interface{} { + return ms.v.Interface() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go new file mode 100644 index 000000000..c65b0325c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -0,0 +1,284 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "math/bits" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +var errDecode = errors.New("cannot parse invalid wire-format data") +var errRecursionDepth = errors.New("exceeded maximum recursion depth") + +type unmarshalOptions struct { + flags protoiface.UnmarshalInputFlags + resolver interface { + FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) + FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) + } + depth int +} + +func (o unmarshalOptions) Options() proto.UnmarshalOptions { + return proto.UnmarshalOptions{ + Merge: true, + AllowPartial: true, + DiscardUnknown: o.DiscardUnknown(), + Resolver: o.resolver, + } +} + +func (o unmarshalOptions) DiscardUnknown() bool { return o.flags&piface.UnmarshalDiscardUnknown != 0 } + +func (o unmarshalOptions) IsDefault() bool { + return o.flags == 0 && o.resolver == preg.GlobalTypes +} + +var lazyUnmarshalOptions = unmarshalOptions{ + resolver: preg.GlobalTypes, + depth: protowire.DefaultRecursionLimit, +} + +type unmarshalOutput struct { + n int // number of bytes consumed + initialized bool +} + +// unmarshal is protoreflect.Methods.Unmarshal. +func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() + } else { + p = in.Message.(*messageReflectWrapper).pointer() + } + out, err := mi.unmarshalPointer(in.Buf, p, 0, unmarshalOptions{ + flags: in.Flags, + resolver: in.Resolver, + depth: in.Depth, + }) + var flags piface.UnmarshalOutputFlags + if out.initialized { + flags |= piface.UnmarshalInitialized + } + return piface.UnmarshalOutput{ + Flags: flags, + }, err +} + +// errUnknown is returned during unmarshaling to indicate a parse error that +// should result in a field being placed in the unknown fields section (for example, +// when the wire type doesn't match) as opposed to the entire unmarshal operation +// failing (for example, when a field extends past the available input). +// +// This is a sentinel error which should never be visible to the user. +var errUnknown = errors.New("unknown") + +func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { + mi.init() + opts.depth-- + if opts.depth < 0 { + return out, errRecursionDepth + } + if flags.ProtoLegacy && mi.isMessageSet { + return unmarshalMessageSet(mi, b, p, opts) + } + initialized := true + var requiredMask uint64 + var exts *map[int32]ExtensionField + start := len(b) + for len(b) > 0 { + // Parse the tag (field number and wire type). + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + return out, errDecode + } + b = b[n:] + } + var num protowire.Number + if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { + return out, errDecode + } else { + num = protowire.Number(n) + } + wtyp := protowire.Type(tag & 7) + + if wtyp == protowire.EndGroupType { + if num != groupTag { + return out, errDecode + } + groupTag = 0 + break + } + + var f *coderFieldInfo + if int(num) < len(mi.denseCoderFields) { + f = mi.denseCoderFields[num] + } else { + f = mi.coderFields[num] + } + var n int + err := errUnknown + switch { + case f != nil: + if f.funcs.unmarshal == nil { + break + } + var o unmarshalOutput + o, err = f.funcs.unmarshal(b, p.Apply(f.offset), wtyp, f, opts) + n = o.n + if err != nil { + break + } + requiredMask |= f.validation.requiredBit + if f.funcs.isInit != nil && !o.initialized { + initialized = false + } + default: + // Possible extension. + if exts == nil && mi.extensionOffset.IsValid() { + exts = p.Apply(mi.extensionOffset).Extensions() + if *exts == nil { + *exts = make(map[int32]ExtensionField) + } + } + if exts == nil { + break + } + var o unmarshalOutput + o, err = mi.unmarshalExtension(b, num, wtyp, *exts, opts) + if err != nil { + break + } + n = o.n + if !o.initialized { + initialized = false + } + } + if err != nil { + if err != errUnknown { + return out, err + } + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, errDecode + } + if !opts.DiscardUnknown() && mi.unknownOffset.IsValid() { + u := mi.mutableUnknownBytes(p) + *u = protowire.AppendTag(*u, num, wtyp) + *u = append(*u, b[:n]...) + } + } + b = b[n:] + } + if groupTag != 0 { + return out, errDecode + } + if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) { + initialized = false + } + if initialized { + out.initialized = true + } + out.n = start - len(b) + return out, nil +} + +func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp protowire.Type, exts map[int32]ExtensionField, opts unmarshalOptions) (out unmarshalOutput, err error) { + x := exts[int32(num)] + xt := x.Type() + if xt == nil { + var err error + xt, err = opts.resolver.FindExtensionByNumber(mi.Desc.FullName(), num) + if err != nil { + if err == preg.NotFound { + return out, errUnknown + } + return out, errors.New("%v: unable to resolve extension %v: %v", mi.Desc.FullName(), num, err) + } + } + xi := getExtensionFieldInfo(xt) + if xi.funcs.unmarshal == nil { + return out, errUnknown + } + if flags.LazyUnmarshalExtensions { + if opts.IsDefault() && x.canLazy(xt) { + out, valid := skipExtension(b, xi, num, wtyp, opts) + switch valid { + case ValidationValid: + if out.initialized { + x.appendLazyBytes(xt, xi, num, wtyp, b[:out.n]) + exts[int32(num)] = x + return out, nil + } + case ValidationInvalid: + return out, errDecode + case ValidationUnknown: + } + } + } + ival := x.Value() + if !ival.IsValid() && xi.unmarshalNeedsValue { + // Create a new message, list, or map value to fill in. + // For enums, create a prototype value to let the unmarshal func know the + // concrete type. + ival = xt.New() + } + v, out, err := xi.funcs.unmarshal(b, ival, num, wtyp, opts) + if err != nil { + return out, err + } + if xi.funcs.isInit == nil { + out.initialized = true + } + x.Set(xt, v) + exts[int32(num)] = x + return out, nil +} + +func skipExtension(b []byte, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, _ ValidationStatus) { + if xi.validation.mi == nil { + return out, ValidationUnknown + } + xi.validation.mi.init() + switch xi.validation.typ { + case validationTypeMessage: + if wtyp != protowire.BytesType { + return out, ValidationUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, ValidationUnknown + } + out, st := xi.validation.mi.validate(v, 0, opts) + out.n = n + return out, st + case validationTypeGroup: + if wtyp != protowire.StartGroupType { + return out, ValidationUnknown + } + out, st := xi.validation.mi.validate(b, num, opts) + return out, st + default: + return out, ValidationUnknown + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go new file mode 100644 index 000000000..845c67d6e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -0,0 +1,201 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "math" + "sort" + "sync/atomic" + + "google.golang.org/protobuf/internal/flags" + proto "google.golang.org/protobuf/proto" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +type marshalOptions struct { + flags piface.MarshalInputFlags +} + +func (o marshalOptions) Options() proto.MarshalOptions { + return proto.MarshalOptions{ + AllowPartial: true, + Deterministic: o.Deterministic(), + UseCachedSize: o.UseCachedSize(), + } +} + +func (o marshalOptions) Deterministic() bool { return o.flags&piface.MarshalDeterministic != 0 } +func (o marshalOptions) UseCachedSize() bool { return o.flags&piface.MarshalUseCachedSize != 0 } + +// size is protoreflect.Methods.Size. +func (mi *MessageInfo) size(in piface.SizeInput) piface.SizeOutput { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() + } else { + p = in.Message.(*messageReflectWrapper).pointer() + } + size := mi.sizePointer(p, marshalOptions{ + flags: in.Flags, + }) + return piface.SizeOutput{Size: size} +} + +func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) { + mi.init() + if p.IsNil() { + return 0 + } + if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() { + if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 { + return int(size) + } + } + return mi.sizePointerSlow(p, opts) +} + +func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int) { + if flags.ProtoLegacy && mi.isMessageSet { + size = sizeMessageSet(mi, p, opts) + if mi.sizecacheOffset.IsValid() { + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + } + return size + } + if mi.extensionOffset.IsValid() { + e := p.Apply(mi.extensionOffset).Extensions() + size += mi.sizeExtensions(e, opts) + } + for _, f := range mi.orderedCoderFields { + if f.funcs.size == nil { + continue + } + fptr := p.Apply(f.offset) + if f.isPointer && fptr.Elem().IsNil() { + continue + } + size += f.funcs.size(fptr, f, opts) + } + if mi.unknownOffset.IsValid() { + if u := mi.getUnknownBytes(p); u != nil { + size += len(*u) + } + } + if mi.sizecacheOffset.IsValid() { + if size > math.MaxInt32 { + // The size is too large for the int32 sizecache field. + // We will need to recompute the size when encoding; + // unfortunately expensive, but better than invalid output. + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1) + } else { + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + } + } + return size +} + +// marshal is protoreflect.Methods.Marshal. +func (mi *MessageInfo) marshal(in piface.MarshalInput) (out piface.MarshalOutput, err error) { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() + } else { + p = in.Message.(*messageReflectWrapper).pointer() + } + b, err := mi.marshalAppendPointer(in.Buf, p, marshalOptions{ + flags: in.Flags, + }) + return piface.MarshalOutput{Buf: b}, err +} + +func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOptions) ([]byte, error) { + mi.init() + if p.IsNil() { + return b, nil + } + if flags.ProtoLegacy && mi.isMessageSet { + return marshalMessageSet(mi, b, p, opts) + } + var err error + // The old marshaler encodes extensions at beginning. + if mi.extensionOffset.IsValid() { + e := p.Apply(mi.extensionOffset).Extensions() + // TODO: Special handling for MessageSet? + b, err = mi.appendExtensions(b, e, opts) + if err != nil { + return b, err + } + } + for _, f := range mi.orderedCoderFields { + if f.funcs.marshal == nil { + continue + } + fptr := p.Apply(f.offset) + if f.isPointer && fptr.Elem().IsNil() { + continue + } + b, err = f.funcs.marshal(b, fptr, f, opts) + if err != nil { + return b, err + } + } + if mi.unknownOffset.IsValid() && !mi.isMessageSet { + if u := mi.getUnknownBytes(p); u != nil { + b = append(b, (*u)...) + } + } + return b, nil +} + +func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { + if ext == nil { + return 0 + } + for _, x := range *ext { + xi := getExtensionFieldInfo(x.Type()) + if xi.funcs.size == nil { + continue + } + n += xi.funcs.size(x.Value(), xi.tagsize, opts) + } + return n +} + +func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, opts marshalOptions) ([]byte, error) { + if ext == nil { + return b, nil + } + + switch len(*ext) { + case 0: + return b, nil + case 1: + // Fast-path for one extension: Don't bother sorting the keys. + var err error + for _, x := range *ext { + xi := getExtensionFieldInfo(x.Type()) + b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) + } + return b, err + default: + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(*ext)) + for k := range *ext { + keys = append(keys, int(k)) + } + sort.Ints(keys) + var err error + for _, k := range keys { + x := (*ext)[int32(k)] + xi := getExtensionFieldInfo(x.Type()) + b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) + if err != nil { + return b, err + } + } + return b, nil + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/enum.go b/vendor/google.golang.org/protobuf/internal/impl/enum.go new file mode 100644 index 000000000..8c1eab4bf --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/enum.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "reflect" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type EnumInfo struct { + GoReflectType reflect.Type // int32 kind + Desc pref.EnumDescriptor +} + +func (t *EnumInfo) New(n pref.EnumNumber) pref.Enum { + return reflect.ValueOf(n).Convert(t.GoReflectType).Interface().(pref.Enum) +} +func (t *EnumInfo) Descriptor() pref.EnumDescriptor { return t.Desc } diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go new file mode 100644 index 000000000..e904fd993 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go @@ -0,0 +1,156 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "reflect" + "sync" + "sync/atomic" + + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// ExtensionInfo implements ExtensionType. +// +// This type contains a number of exported fields for legacy compatibility. +// The only non-deprecated use of this type is through the methods of the +// ExtensionType interface. +type ExtensionInfo struct { + // An ExtensionInfo may exist in several stages of initialization. + // + // extensionInfoUninitialized: Some or all of the legacy exported + // fields may be set, but none of the unexported fields have been + // initialized. This is the starting state for an ExtensionInfo + // in legacy generated code. + // + // extensionInfoDescInit: The desc field is set, but other unexported fields + // may not be initialized. Legacy exported fields may or may not be set. + // This is the starting state for an ExtensionInfo in newly generated code. + // + // extensionInfoFullInit: The ExtensionInfo is fully initialized. + // This state is only entered after lazy initialization is complete. + init uint32 + mu sync.Mutex + + goType reflect.Type + desc extensionTypeDescriptor + conv Converter + info *extensionFieldInfo // for fast-path method implementations + + // ExtendedType is a typed nil-pointer to the parent message type that + // is being extended. It is possible for this to be unpopulated in v2 + // since the message may no longer implement the MessageV1 interface. + // + // Deprecated: Use the ExtendedType method instead. + ExtendedType piface.MessageV1 + + // ExtensionType is the zero value of the extension type. + // + // For historical reasons, reflect.TypeOf(ExtensionType) and the + // type returned by InterfaceOf may not be identical. + // + // Deprecated: Use InterfaceOf(xt.Zero()) instead. + ExtensionType interface{} + + // Field is the field number of the extension. + // + // Deprecated: Use the Descriptor().Number method instead. + Field int32 + + // Name is the fully qualified name of extension. + // + // Deprecated: Use the Descriptor().FullName method instead. + Name string + + // Tag is the protobuf struct tag used in the v1 API. + // + // Deprecated: Do not use. + Tag string + + // Filename is the proto filename in which the extension is defined. + // + // Deprecated: Use Descriptor().ParentFile().Path() instead. + Filename string +} + +// Stages of initialization: See the ExtensionInfo.init field. +const ( + extensionInfoUninitialized = 0 + extensionInfoDescInit = 1 + extensionInfoFullInit = 2 +) + +func InitExtensionInfo(xi *ExtensionInfo, xd pref.ExtensionDescriptor, goType reflect.Type) { + xi.goType = goType + xi.desc = extensionTypeDescriptor{xd, xi} + xi.init = extensionInfoDescInit +} + +func (xi *ExtensionInfo) New() pref.Value { + return xi.lazyInit().New() +} +func (xi *ExtensionInfo) Zero() pref.Value { + return xi.lazyInit().Zero() +} +func (xi *ExtensionInfo) ValueOf(v interface{}) pref.Value { + return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) +} +func (xi *ExtensionInfo) InterfaceOf(v pref.Value) interface{} { + return xi.lazyInit().GoValueOf(v).Interface() +} +func (xi *ExtensionInfo) IsValidValue(v pref.Value) bool { + return xi.lazyInit().IsValidPB(v) +} +func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool { + return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) +} +func (xi *ExtensionInfo) TypeDescriptor() pref.ExtensionTypeDescriptor { + if atomic.LoadUint32(&xi.init) < extensionInfoDescInit { + xi.lazyInitSlow() + } + return &xi.desc +} + +func (xi *ExtensionInfo) lazyInit() Converter { + if atomic.LoadUint32(&xi.init) < extensionInfoFullInit { + xi.lazyInitSlow() + } + return xi.conv +} + +func (xi *ExtensionInfo) lazyInitSlow() { + xi.mu.Lock() + defer xi.mu.Unlock() + + if xi.init == extensionInfoFullInit { + return + } + defer atomic.StoreUint32(&xi.init, extensionInfoFullInit) + + if xi.desc.ExtensionDescriptor == nil { + xi.initFromLegacy() + } + if !xi.desc.ExtensionDescriptor.IsPlaceholder() { + if xi.ExtensionType == nil { + xi.initToLegacy() + } + xi.conv = NewConverter(xi.goType, xi.desc.ExtensionDescriptor) + xi.info = makeExtensionFieldInfo(xi.desc.ExtensionDescriptor) + xi.info.validation = newValidationInfo(xi.desc.ExtensionDescriptor, xi.goType) + } +} + +type extensionTypeDescriptor struct { + pref.ExtensionDescriptor + xi *ExtensionInfo +} + +func (xtd *extensionTypeDescriptor) Type() pref.ExtensionType { + return xtd.xi +} +func (xtd *extensionTypeDescriptor) Descriptor() pref.ExtensionDescriptor { + return xtd.ExtensionDescriptor +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go new file mode 100644 index 000000000..f7d7ffb51 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -0,0 +1,219 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "strings" + "sync" + + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// legacyEnumName returns the name of enums used in legacy code. +// It is neither the protobuf full name nor the qualified Go name, +// but rather an odd hybrid of both. +func legacyEnumName(ed pref.EnumDescriptor) string { + var protoPkg string + enumName := string(ed.FullName()) + if fd := ed.ParentFile(); fd != nil { + protoPkg = string(fd.Package()) + enumName = strings.TrimPrefix(enumName, protoPkg+".") + } + if protoPkg == "" { + return strs.GoCamelCase(enumName) + } + return protoPkg + "." + strs.GoCamelCase(enumName) +} + +// legacyWrapEnum wraps v as a protoreflect.Enum, +// where v must be a int32 kind and not implement the v2 API already. +func legacyWrapEnum(v reflect.Value) pref.Enum { + et := legacyLoadEnumType(v.Type()) + return et.New(pref.EnumNumber(v.Int())) +} + +var legacyEnumTypeCache sync.Map // map[reflect.Type]protoreflect.EnumType + +// legacyLoadEnumType dynamically loads a protoreflect.EnumType for t, +// where t must be an int32 kind and not implement the v2 API already. +func legacyLoadEnumType(t reflect.Type) pref.EnumType { + // Fast-path: check if a EnumType is cached for this concrete type. + if et, ok := legacyEnumTypeCache.Load(t); ok { + return et.(pref.EnumType) + } + + // Slow-path: derive enum descriptor and initialize EnumType. + var et pref.EnumType + ed := LegacyLoadEnumDesc(t) + et = &legacyEnumType{ + desc: ed, + goType: t, + } + if et, ok := legacyEnumTypeCache.LoadOrStore(t, et); ok { + return et.(pref.EnumType) + } + return et +} + +type legacyEnumType struct { + desc pref.EnumDescriptor + goType reflect.Type + m sync.Map // map[protoreflect.EnumNumber]proto.Enum +} + +func (t *legacyEnumType) New(n pref.EnumNumber) pref.Enum { + if e, ok := t.m.Load(n); ok { + return e.(pref.Enum) + } + e := &legacyEnumWrapper{num: n, pbTyp: t, goTyp: t.goType} + t.m.Store(n, e) + return e +} +func (t *legacyEnumType) Descriptor() pref.EnumDescriptor { + return t.desc +} + +type legacyEnumWrapper struct { + num pref.EnumNumber + pbTyp pref.EnumType + goTyp reflect.Type +} + +func (e *legacyEnumWrapper) Descriptor() pref.EnumDescriptor { + return e.pbTyp.Descriptor() +} +func (e *legacyEnumWrapper) Type() pref.EnumType { + return e.pbTyp +} +func (e *legacyEnumWrapper) Number() pref.EnumNumber { + return e.num +} +func (e *legacyEnumWrapper) ProtoReflect() pref.Enum { + return e +} +func (e *legacyEnumWrapper) protoUnwrap() interface{} { + v := reflect.New(e.goTyp).Elem() + v.SetInt(int64(e.num)) + return v.Interface() +} + +var ( + _ pref.Enum = (*legacyEnumWrapper)(nil) + _ unwrapper = (*legacyEnumWrapper)(nil) +) + +var legacyEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor + +// LegacyLoadEnumDesc returns an EnumDescriptor derived from the Go type, +// which must be an int32 kind and not implement the v2 API already. +// +// This is exported for testing purposes. +func LegacyLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { + // Fast-path: check if an EnumDescriptor is cached for this concrete type. + if ed, ok := legacyEnumDescCache.Load(t); ok { + return ed.(pref.EnumDescriptor) + } + + // Slow-path: initialize EnumDescriptor from the raw descriptor. + ev := reflect.Zero(t).Interface() + if _, ok := ev.(pref.Enum); ok { + panic(fmt.Sprintf("%v already implements proto.Enum", t)) + } + edV1, ok := ev.(enumV1) + if !ok { + return aberrantLoadEnumDesc(t) + } + b, idxs := edV1.EnumDescriptor() + + var ed pref.EnumDescriptor + if len(idxs) == 1 { + ed = legacyLoadFileDesc(b).Enums().Get(idxs[0]) + } else { + md := legacyLoadFileDesc(b).Messages().Get(idxs[0]) + for _, i := range idxs[1 : len(idxs)-1] { + md = md.Messages().Get(i) + } + ed = md.Enums().Get(idxs[len(idxs)-1]) + } + if ed, ok := legacyEnumDescCache.LoadOrStore(t, ed); ok { + return ed.(protoreflect.EnumDescriptor) + } + return ed +} + +var aberrantEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor + +// aberrantLoadEnumDesc returns an EnumDescriptor derived from the Go type, +// which must not implement protoreflect.Enum or enumV1. +// +// If the type does not implement enumV1, then there is no reliable +// way to derive the original protobuf type information. +// We are unable to use the global enum registry since it is +// unfortunately keyed by the protobuf full name, which we also do not know. +// Thus, this produces some bogus enum descriptor based on the Go type name. +func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { + // Fast-path: check if an EnumDescriptor is cached for this concrete type. + if ed, ok := aberrantEnumDescCache.Load(t); ok { + return ed.(pref.EnumDescriptor) + } + + // Slow-path: construct a bogus, but unique EnumDescriptor. + ed := &filedesc.Enum{L2: new(filedesc.EnumL2)} + ed.L0.FullName = AberrantDeriveFullName(t) // e.g., github_com.user.repo.MyEnum + ed.L0.ParentFile = filedesc.SurrogateProto3 + ed.L2.Values.List = append(ed.L2.Values.List, filedesc.EnumValue{}) + + // TODO: Use the presence of a UnmarshalJSON method to determine proto2? + + vd := &ed.L2.Values.List[0] + vd.L0.FullName = ed.L0.FullName + "_UNKNOWN" // e.g., github_com.user.repo.MyEnum_UNKNOWN + vd.L0.ParentFile = ed.L0.ParentFile + vd.L0.Parent = ed + + // TODO: We could use the String method to obtain some enum value names by + // starting at 0 and print the enum until it produces invalid identifiers. + // An exhaustive query is clearly impractical, but can be best-effort. + + if ed, ok := aberrantEnumDescCache.LoadOrStore(t, ed); ok { + return ed.(pref.EnumDescriptor) + } + return ed +} + +// AberrantDeriveFullName derives a fully qualified protobuf name for the given Go type +// The provided name is not guaranteed to be stable nor universally unique. +// It should be sufficiently unique within a program. +// +// This is exported for testing purposes. +func AberrantDeriveFullName(t reflect.Type) pref.FullName { + sanitize := func(r rune) rune { + switch { + case r == '/': + return '.' + case 'a' <= r && r <= 'z', 'A' <= r && r <= 'Z', '0' <= r && r <= '9': + return r + default: + return '_' + } + } + prefix := strings.Map(sanitize, t.PkgPath()) + suffix := strings.Map(sanitize, t.Name()) + if suffix == "" { + suffix = fmt.Sprintf("UnknownX%X", reflect.ValueOf(t).Pointer()) + } + + ss := append(strings.Split(prefix, "."), suffix) + for i, s := range ss { + if s == "" || ('0' <= s[0] && s[0] <= '9') { + ss[i] = "x" + s + } + } + return pref.FullName(strings.Join(ss, ".")) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go new file mode 100644 index 000000000..e3fb0b578 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go @@ -0,0 +1,92 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "encoding/binary" + "encoding/json" + "hash/crc32" + "math" + "reflect" + + "google.golang.org/protobuf/internal/errors" + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// These functions exist to support exported APIs in generated protobufs. +// While these are deprecated, they cannot be removed for compatibility reasons. + +// LegacyEnumName returns the name of enums used in legacy code. +func (Export) LegacyEnumName(ed pref.EnumDescriptor) string { + return legacyEnumName(ed) +} + +// LegacyMessageTypeOf returns the protoreflect.MessageType for m, +// with name used as the message name if necessary. +func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.MessageType { + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv.ProtoReflect().Type() + } + return legacyLoadMessageType(reflect.TypeOf(m), name) +} + +// UnmarshalJSONEnum unmarshals an enum from a JSON-encoded input. +// The input can either be a string representing the enum value by name, +// or a number representing the enum number itself. +func (Export) UnmarshalJSONEnum(ed pref.EnumDescriptor, b []byte) (pref.EnumNumber, error) { + if b[0] == '"' { + var name pref.Name + if err := json.Unmarshal(b, &name); err != nil { + return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) + } + ev := ed.Values().ByName(name) + if ev == nil { + return 0, errors.New("invalid value for enum %v: %s", ed.FullName(), name) + } + return ev.Number(), nil + } else { + var num pref.EnumNumber + if err := json.Unmarshal(b, &num); err != nil { + return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) + } + return num, nil + } +} + +// CompressGZIP compresses the input as a GZIP-encoded file. +// The current implementation does no compression. +func (Export) CompressGZIP(in []byte) (out []byte) { + // RFC 1952, section 2.3.1. + var gzipHeader = [10]byte{0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff} + + // RFC 1951, section 3.2.4. + var blockHeader [5]byte + const maxBlockSize = math.MaxUint16 + numBlocks := 1 + len(in)/maxBlockSize + + // RFC 1952, section 2.3.1. + var gzipFooter [8]byte + binary.LittleEndian.PutUint32(gzipFooter[0:4], crc32.ChecksumIEEE(in)) + binary.LittleEndian.PutUint32(gzipFooter[4:8], uint32(len(in))) + + // Encode the input without compression using raw DEFLATE blocks. + out = make([]byte, 0, len(gzipHeader)+len(blockHeader)*numBlocks+len(in)+len(gzipFooter)) + out = append(out, gzipHeader[:]...) + for blockHeader[0] == 0 { + blockSize := maxBlockSize + if blockSize > len(in) { + blockHeader[0] = 0x01 // final bit per RFC 1951, section 3.2.3. + blockSize = len(in) + } + binary.LittleEndian.PutUint16(blockHeader[1:3], uint16(blockSize)^0x0000) + binary.LittleEndian.PutUint16(blockHeader[3:5], uint16(blockSize)^0xffff) + out = append(out, blockHeader[:]...) + out = append(out, in[:blockSize]...) + in = in[blockSize:] + } + out = append(out, gzipFooter[:]...) + return out +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go new file mode 100644 index 000000000..49e723161 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -0,0 +1,176 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "reflect" + + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/encoding/messageset" + ptag "google.golang.org/protobuf/internal/encoding/tag" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/pragma" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +func (xi *ExtensionInfo) initToLegacy() { + xd := xi.desc + var parent piface.MessageV1 + messageName := xd.ContainingMessage().FullName() + if mt, _ := preg.GlobalTypes.FindMessageByName(messageName); mt != nil { + // Create a new parent message and unwrap it if possible. + mv := mt.New().Interface() + t := reflect.TypeOf(mv) + if mv, ok := mv.(unwrapper); ok { + t = reflect.TypeOf(mv.protoUnwrap()) + } + + // Check whether the message implements the legacy v1 Message interface. + mz := reflect.Zero(t).Interface() + if mz, ok := mz.(piface.MessageV1); ok { + parent = mz + } + } + + // Determine the v1 extension type, which is unfortunately not the same as + // the v2 ExtensionType.GoType. + extType := xi.goType + switch extType.Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + extType = reflect.PtrTo(extType) // T -> *T for singular scalar fields + } + + // Reconstruct the legacy enum full name. + var enumName string + if xd.Kind() == pref.EnumKind { + enumName = legacyEnumName(xd.Enum()) + } + + // Derive the proto file that the extension was declared within. + var filename string + if fd := xd.ParentFile(); fd != nil { + filename = fd.Path() + } + + // For MessageSet extensions, the name used is the parent message. + name := xd.FullName() + if messageset.IsMessageSetExtension(xd) { + name = name.Parent() + } + + xi.ExtendedType = parent + xi.ExtensionType = reflect.Zero(extType).Interface() + xi.Field = int32(xd.Number()) + xi.Name = string(name) + xi.Tag = ptag.Marshal(xd, enumName) + xi.Filename = filename +} + +// initFromLegacy initializes an ExtensionInfo from +// the contents of the deprecated exported fields of the type. +func (xi *ExtensionInfo) initFromLegacy() { + // The v1 API returns "type incomplete" descriptors where only the + // field number is specified. In such a case, use a placeholder. + if xi.ExtendedType == nil || xi.ExtensionType == nil { + xd := placeholderExtension{ + name: pref.FullName(xi.Name), + number: pref.FieldNumber(xi.Field), + } + xi.desc = extensionTypeDescriptor{xd, xi} + return + } + + // Resolve enum or message dependencies. + var ed pref.EnumDescriptor + var md pref.MessageDescriptor + t := reflect.TypeOf(xi.ExtensionType) + isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct + isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 + if isOptional || isRepeated { + t = t.Elem() + } + switch v := reflect.Zero(t).Interface().(type) { + case pref.Enum: + ed = v.Descriptor() + case enumV1: + ed = LegacyLoadEnumDesc(t) + case pref.ProtoMessage: + md = v.ProtoReflect().Descriptor() + case messageV1: + md = LegacyLoadMessageDesc(t) + } + + // Derive basic field information from the struct tag. + var evs pref.EnumValueDescriptors + if ed != nil { + evs = ed.Values() + } + fd := ptag.Unmarshal(xi.Tag, t, evs).(*filedesc.Field) + + // Construct a v2 ExtensionType. + xd := &filedesc.Extension{L2: new(filedesc.ExtensionL2)} + xd.L0.ParentFile = filedesc.SurrogateProto2 + xd.L0.FullName = pref.FullName(xi.Name) + xd.L1.Number = pref.FieldNumber(xi.Field) + xd.L1.Cardinality = fd.L1.Cardinality + xd.L1.Kind = fd.L1.Kind + xd.L2.IsPacked = fd.L1.IsPacked + xd.L2.Default = fd.L1.Default + xd.L1.Extendee = Export{}.MessageDescriptorOf(xi.ExtendedType) + xd.L2.Enum = ed + xd.L2.Message = md + + // Derive real extension field name for MessageSets. + if messageset.IsMessageSet(xd.L1.Extendee) && md.FullName() == xd.L0.FullName { + xd.L0.FullName = xd.L0.FullName.Append(messageset.ExtensionName) + } + + tt := reflect.TypeOf(xi.ExtensionType) + if isOptional { + tt = tt.Elem() + } + xi.goType = tt + xi.desc = extensionTypeDescriptor{xd, xi} +} + +type placeholderExtension struct { + name pref.FullName + number pref.FieldNumber +} + +func (x placeholderExtension) ParentFile() pref.FileDescriptor { return nil } +func (x placeholderExtension) Parent() pref.Descriptor { return nil } +func (x placeholderExtension) Index() int { return 0 } +func (x placeholderExtension) Syntax() pref.Syntax { return 0 } +func (x placeholderExtension) Name() pref.Name { return x.name.Name() } +func (x placeholderExtension) FullName() pref.FullName { return x.name } +func (x placeholderExtension) IsPlaceholder() bool { return true } +func (x placeholderExtension) Options() pref.ProtoMessage { return descopts.Field } +func (x placeholderExtension) Number() pref.FieldNumber { return x.number } +func (x placeholderExtension) Cardinality() pref.Cardinality { return 0 } +func (x placeholderExtension) Kind() pref.Kind { return 0 } +func (x placeholderExtension) HasJSONName() bool { return false } +func (x placeholderExtension) JSONName() string { return "[" + string(x.name) + "]" } +func (x placeholderExtension) TextName() string { return "[" + string(x.name) + "]" } +func (x placeholderExtension) HasPresence() bool { return false } +func (x placeholderExtension) HasOptionalKeyword() bool { return false } +func (x placeholderExtension) IsExtension() bool { return true } +func (x placeholderExtension) IsWeak() bool { return false } +func (x placeholderExtension) IsPacked() bool { return false } +func (x placeholderExtension) IsList() bool { return false } +func (x placeholderExtension) IsMap() bool { return false } +func (x placeholderExtension) MapKey() pref.FieldDescriptor { return nil } +func (x placeholderExtension) MapValue() pref.FieldDescriptor { return nil } +func (x placeholderExtension) HasDefault() bool { return false } +func (x placeholderExtension) Default() pref.Value { return pref.Value{} } +func (x placeholderExtension) DefaultEnumValue() pref.EnumValueDescriptor { return nil } +func (x placeholderExtension) ContainingOneof() pref.OneofDescriptor { return nil } +func (x placeholderExtension) ContainingMessage() pref.MessageDescriptor { return nil } +func (x placeholderExtension) Enum() pref.EnumDescriptor { return nil } +func (x placeholderExtension) Message() pref.MessageDescriptor { return nil } +func (x placeholderExtension) ProtoType(pref.FieldDescriptor) { return } +func (x placeholderExtension) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go new file mode 100644 index 000000000..9ab091086 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go @@ -0,0 +1,81 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "bytes" + "compress/gzip" + "io/ioutil" + "sync" + + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Every enum and message type generated by protoc-gen-go since commit 2fc053c5 +// on February 25th, 2016 has had a method to get the raw descriptor. +// Types that were not generated by protoc-gen-go or were generated prior +// to that version are not supported. +// +// The []byte returned is the encoded form of a FileDescriptorProto message +// compressed using GZIP. The []int is the path from the top-level file +// to the specific message or enum declaration. +type ( + enumV1 interface { + EnumDescriptor() ([]byte, []int) + } + messageV1 interface { + Descriptor() ([]byte, []int) + } +) + +var legacyFileDescCache sync.Map // map[*byte]protoreflect.FileDescriptor + +// legacyLoadFileDesc unmarshals b as a compressed FileDescriptorProto message. +// +// This assumes that b is immutable and that b does not refer to part of a +// concatenated series of GZIP files (which would require shenanigans that +// rely on the concatenation properties of both protobufs and GZIP). +// File descriptors generated by protoc-gen-go do not rely on that property. +func legacyLoadFileDesc(b []byte) protoreflect.FileDescriptor { + // Fast-path: check whether we already have a cached file descriptor. + if fd, ok := legacyFileDescCache.Load(&b[0]); ok { + return fd.(protoreflect.FileDescriptor) + } + + // Slow-path: decompress and unmarshal the file descriptor proto. + zr, err := gzip.NewReader(bytes.NewReader(b)) + if err != nil { + panic(err) + } + b2, err := ioutil.ReadAll(zr) + if err != nil { + panic(err) + } + + fd := filedesc.Builder{ + RawDescriptor: b2, + FileRegistry: resolverOnly{protoregistry.GlobalFiles}, // do not register back to global registry + }.Build().File + if fd, ok := legacyFileDescCache.LoadOrStore(&b[0], fd); ok { + return fd.(protoreflect.FileDescriptor) + } + return fd +} + +type resolverOnly struct { + reg *protoregistry.Files +} + +func (r resolverOnly) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { + return r.reg.FindFileByPath(path) +} +func (r resolverOnly) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { + return r.reg.FindDescriptorByName(name) +} +func (resolverOnly) RegisterFile(protoreflect.FileDescriptor) error { + return nil +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go new file mode 100644 index 000000000..029feeefd --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -0,0 +1,565 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "strings" + "sync" + + "google.golang.org/protobuf/internal/descopts" + ptag "google.golang.org/protobuf/internal/encoding/tag" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// legacyWrapMessage wraps v as a protoreflect.Message, +// where v must be a *struct kind and not implement the v2 API already. +func legacyWrapMessage(v reflect.Value) pref.Message { + t := v.Type() + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + return aberrantMessage{v: v} + } + mt := legacyLoadMessageInfo(t, "") + return mt.MessageOf(v.Interface()) +} + +// legacyLoadMessageType dynamically loads a protoreflect.Type for t, +// where t must be not implement the v2 API already. +// The provided name is used if it cannot be determined from the message. +func legacyLoadMessageType(t reflect.Type, name pref.FullName) protoreflect.MessageType { + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + return aberrantMessageType{t} + } + return legacyLoadMessageInfo(t, name) +} + +var legacyMessageTypeCache sync.Map // map[reflect.Type]*MessageInfo + +// legacyLoadMessageInfo dynamically loads a *MessageInfo for t, +// where t must be a *struct kind and not implement the v2 API already. +// The provided name is used if it cannot be determined from the message. +func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { + // Fast-path: check if a MessageInfo is cached for this concrete type. + if mt, ok := legacyMessageTypeCache.Load(t); ok { + return mt.(*MessageInfo) + } + + // Slow-path: derive message descriptor and initialize MessageInfo. + mi := &MessageInfo{ + Desc: legacyLoadMessageDesc(t, name), + GoReflectType: t, + } + + var hasMarshal, hasUnmarshal bool + v := reflect.Zero(t).Interface() + if _, hasMarshal = v.(legacyMarshaler); hasMarshal { + mi.methods.Marshal = legacyMarshal + + // We have no way to tell whether the type's Marshal method + // supports deterministic serialization or not, but this + // preserves the v1 implementation's behavior of always + // calling Marshal methods when present. + mi.methods.Flags |= piface.SupportMarshalDeterministic + } + if _, hasUnmarshal = v.(legacyUnmarshaler); hasUnmarshal { + mi.methods.Unmarshal = legacyUnmarshal + } + if _, hasMerge := v.(legacyMerger); hasMerge || (hasMarshal && hasUnmarshal) { + mi.methods.Merge = legacyMerge + } + + if mi, ok := legacyMessageTypeCache.LoadOrStore(t, mi); ok { + return mi.(*MessageInfo) + } + return mi +} + +var legacyMessageDescCache sync.Map // map[reflect.Type]protoreflect.MessageDescriptor + +// LegacyLoadMessageDesc returns an MessageDescriptor derived from the Go type, +// which should be a *struct kind and must not implement the v2 API already. +// +// This is exported for testing purposes. +func LegacyLoadMessageDesc(t reflect.Type) pref.MessageDescriptor { + return legacyLoadMessageDesc(t, "") +} +func legacyLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor { + // Fast-path: check if a MessageDescriptor is cached for this concrete type. + if mi, ok := legacyMessageDescCache.Load(t); ok { + return mi.(pref.MessageDescriptor) + } + + // Slow-path: initialize MessageDescriptor from the raw descriptor. + mv := reflect.Zero(t).Interface() + if _, ok := mv.(pref.ProtoMessage); ok { + panic(fmt.Sprintf("%v already implements proto.Message", t)) + } + mdV1, ok := mv.(messageV1) + if !ok { + return aberrantLoadMessageDesc(t, name) + } + + // If this is a dynamic message type where there isn't a 1-1 mapping between + // Go and protobuf types, calling the Descriptor method on the zero value of + // the message type isn't likely to work. If it panics, swallow the panic and + // continue as if the Descriptor method wasn't present. + b, idxs := func() ([]byte, []int) { + defer func() { + recover() + }() + return mdV1.Descriptor() + }() + if b == nil { + return aberrantLoadMessageDesc(t, name) + } + + // If the Go type has no fields, then this might be a proto3 empty message + // from before the size cache was added. If there are any fields, check to + // see that at least one of them looks like something we generated. + if t.Elem().Kind() == reflect.Struct { + if nfield := t.Elem().NumField(); nfield > 0 { + hasProtoField := false + for i := 0; i < nfield; i++ { + f := t.Elem().Field(i) + if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") { + hasProtoField = true + break + } + } + if !hasProtoField { + return aberrantLoadMessageDesc(t, name) + } + } + } + + md := legacyLoadFileDesc(b).Messages().Get(idxs[0]) + for _, i := range idxs[1:] { + md = md.Messages().Get(i) + } + if name != "" && md.FullName() != name { + panic(fmt.Sprintf("mismatching message name: got %v, want %v", md.FullName(), name)) + } + if md, ok := legacyMessageDescCache.LoadOrStore(t, md); ok { + return md.(protoreflect.MessageDescriptor) + } + return md +} + +var ( + aberrantMessageDescLock sync.Mutex + aberrantMessageDescCache map[reflect.Type]protoreflect.MessageDescriptor +) + +// aberrantLoadMessageDesc returns an MessageDescriptor derived from the Go type, +// which must not implement protoreflect.ProtoMessage or messageV1. +// +// This is a best-effort derivation of the message descriptor using the protobuf +// tags on the struct fields. +func aberrantLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor { + aberrantMessageDescLock.Lock() + defer aberrantMessageDescLock.Unlock() + if aberrantMessageDescCache == nil { + aberrantMessageDescCache = make(map[reflect.Type]protoreflect.MessageDescriptor) + } + return aberrantLoadMessageDescReentrant(t, name) +} +func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.MessageDescriptor { + // Fast-path: check if an MessageDescriptor is cached for this concrete type. + if md, ok := aberrantMessageDescCache[t]; ok { + return md + } + + // Slow-path: construct a descriptor from the Go struct type (best-effort). + // Cache the MessageDescriptor early on so that we can resolve internal + // cyclic references. + md := &filedesc.Message{L2: new(filedesc.MessageL2)} + md.L0.FullName = aberrantDeriveMessageName(t, name) + md.L0.ParentFile = filedesc.SurrogateProto2 + aberrantMessageDescCache[t] = md + + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + return md + } + + // Try to determine if the message is using proto3 by checking scalars. + for i := 0; i < t.Elem().NumField(); i++ { + f := t.Elem().Field(i) + if tag := f.Tag.Get("protobuf"); tag != "" { + switch f.Type.Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + md.L0.ParentFile = filedesc.SurrogateProto3 + } + for _, s := range strings.Split(tag, ",") { + if s == "proto3" { + md.L0.ParentFile = filedesc.SurrogateProto3 + } + } + } + } + + // Obtain a list of oneof wrapper types. + var oneofWrappers []reflect.Type + for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { + if fn, ok := t.MethodByName(method); ok { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + for _, v := range vs { + oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) + } + } + } + } + } + + // Obtain a list of the extension ranges. + if fn, ok := t.MethodByName("ExtensionRangeArray"); ok { + vs := fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0] + for i := 0; i < vs.Len(); i++ { + v := vs.Index(i) + md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]pref.FieldNumber{ + pref.FieldNumber(v.FieldByName("Start").Int()), + pref.FieldNumber(v.FieldByName("End").Int() + 1), + }) + md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, nil) + } + } + + // Derive the message fields by inspecting the struct fields. + for i := 0; i < t.Elem().NumField(); i++ { + f := t.Elem().Field(i) + if tag := f.Tag.Get("protobuf"); tag != "" { + tagKey := f.Tag.Get("protobuf_key") + tagVal := f.Tag.Get("protobuf_val") + aberrantAppendField(md, f.Type, tag, tagKey, tagVal) + } + if tag := f.Tag.Get("protobuf_oneof"); tag != "" { + n := len(md.L2.Oneofs.List) + md.L2.Oneofs.List = append(md.L2.Oneofs.List, filedesc.Oneof{}) + od := &md.L2.Oneofs.List[n] + od.L0.FullName = md.FullName().Append(pref.Name(tag)) + od.L0.ParentFile = md.L0.ParentFile + od.L0.Parent = md + od.L0.Index = n + + for _, t := range oneofWrappers { + if t.Implements(f.Type) { + f := t.Elem().Field(0) + if tag := f.Tag.Get("protobuf"); tag != "" { + aberrantAppendField(md, f.Type, tag, "", "") + fd := &md.L2.Fields.List[len(md.L2.Fields.List)-1] + fd.L1.ContainingOneof = od + od.L1.Fields.List = append(od.L1.Fields.List, fd) + } + } + } + } + } + + return md +} + +func aberrantDeriveMessageName(t reflect.Type, name pref.FullName) pref.FullName { + if name.IsValid() { + return name + } + func() { + defer func() { recover() }() // swallow possible nil panics + if m, ok := reflect.Zero(t).Interface().(interface{ XXX_MessageName() string }); ok { + name = pref.FullName(m.XXX_MessageName()) + } + }() + if name.IsValid() { + return name + } + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return AberrantDeriveFullName(t) +} + +func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, tagVal string) { + t := goType + isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct + isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 + if isOptional || isRepeated { + t = t.Elem() + } + fd := ptag.Unmarshal(tag, t, placeholderEnumValues{}).(*filedesc.Field) + + // Append field descriptor to the message. + n := len(md.L2.Fields.List) + md.L2.Fields.List = append(md.L2.Fields.List, *fd) + fd = &md.L2.Fields.List[n] + fd.L0.FullName = md.FullName().Append(fd.Name()) + fd.L0.ParentFile = md.L0.ParentFile + fd.L0.Parent = md + fd.L0.Index = n + + if fd.L1.IsWeak || fd.L1.HasPacked { + fd.L1.Options = func() pref.ProtoMessage { + opts := descopts.Field.ProtoReflect().New() + if fd.L1.IsWeak { + opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) + } + if fd.L1.HasPacked { + opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.IsPacked)) + } + return opts.Interface() + } + } + + // Populate Enum and Message. + if fd.Enum() == nil && fd.Kind() == pref.EnumKind { + switch v := reflect.Zero(t).Interface().(type) { + case pref.Enum: + fd.L1.Enum = v.Descriptor() + default: + fd.L1.Enum = LegacyLoadEnumDesc(t) + } + } + if fd.Message() == nil && (fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind) { + switch v := reflect.Zero(t).Interface().(type) { + case pref.ProtoMessage: + fd.L1.Message = v.ProtoReflect().Descriptor() + case messageV1: + fd.L1.Message = LegacyLoadMessageDesc(t) + default: + if t.Kind() == reflect.Map { + n := len(md.L1.Messages.List) + md.L1.Messages.List = append(md.L1.Messages.List, filedesc.Message{L2: new(filedesc.MessageL2)}) + md2 := &md.L1.Messages.List[n] + md2.L0.FullName = md.FullName().Append(pref.Name(strs.MapEntryName(string(fd.Name())))) + md2.L0.ParentFile = md.L0.ParentFile + md2.L0.Parent = md + md2.L0.Index = n + + md2.L1.IsMapEntry = true + md2.L2.Options = func() pref.ProtoMessage { + opts := descopts.Message.ProtoReflect().New() + opts.Set(opts.Descriptor().Fields().ByName("map_entry"), protoreflect.ValueOfBool(true)) + return opts.Interface() + } + + aberrantAppendField(md2, t.Key(), tagKey, "", "") + aberrantAppendField(md2, t.Elem(), tagVal, "", "") + + fd.L1.Message = md2 + break + } + fd.L1.Message = aberrantLoadMessageDescReentrant(t, "") + } + } +} + +type placeholderEnumValues struct { + protoreflect.EnumValueDescriptors +} + +func (placeholderEnumValues) ByNumber(n pref.EnumNumber) pref.EnumValueDescriptor { + return filedesc.PlaceholderEnumValue(pref.FullName(fmt.Sprintf("UNKNOWN_%d", n))) +} + +// legacyMarshaler is the proto.Marshaler interface superseded by protoiface.Methoder. +type legacyMarshaler interface { + Marshal() ([]byte, error) +} + +// legacyUnmarshaler is the proto.Unmarshaler interface superseded by protoiface.Methoder. +type legacyUnmarshaler interface { + Unmarshal([]byte) error +} + +// legacyMerger is the proto.Merger interface superseded by protoiface.Methoder. +type legacyMerger interface { + Merge(protoiface.MessageV1) +} + +var aberrantProtoMethods = &piface.Methods{ + Marshal: legacyMarshal, + Unmarshal: legacyUnmarshal, + Merge: legacyMerge, + + // We have no way to tell whether the type's Marshal method + // supports deterministic serialization or not, but this + // preserves the v1 implementation's behavior of always + // calling Marshal methods when present. + Flags: piface.SupportMarshalDeterministic, +} + +func legacyMarshal(in piface.MarshalInput) (piface.MarshalOutput, error) { + v := in.Message.(unwrapper).protoUnwrap() + marshaler, ok := v.(legacyMarshaler) + if !ok { + return piface.MarshalOutput{}, errors.New("%T does not implement Marshal", v) + } + out, err := marshaler.Marshal() + if in.Buf != nil { + out = append(in.Buf, out...) + } + return piface.MarshalOutput{ + Buf: out, + }, err +} + +func legacyUnmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { + v := in.Message.(unwrapper).protoUnwrap() + unmarshaler, ok := v.(legacyUnmarshaler) + if !ok { + return piface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v) + } + return piface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf) +} + +func legacyMerge(in piface.MergeInput) piface.MergeOutput { + // Check whether this supports the legacy merger. + dstv := in.Destination.(unwrapper).protoUnwrap() + merger, ok := dstv.(legacyMerger) + if ok { + merger.Merge(Export{}.ProtoMessageV1Of(in.Source)) + return piface.MergeOutput{Flags: piface.MergeComplete} + } + + // If legacy merger is unavailable, implement merge in terms of + // a marshal and unmarshal operation. + srcv := in.Source.(unwrapper).protoUnwrap() + marshaler, ok := srcv.(legacyMarshaler) + if !ok { + return piface.MergeOutput{} + } + dstv = in.Destination.(unwrapper).protoUnwrap() + unmarshaler, ok := dstv.(legacyUnmarshaler) + if !ok { + return piface.MergeOutput{} + } + if !in.Source.IsValid() { + // Legacy Marshal methods may not function on nil messages. + // Check for a typed nil source only after we confirm that + // legacy Marshal/Unmarshal methods are present, for + // consistency. + return piface.MergeOutput{Flags: piface.MergeComplete} + } + b, err := marshaler.Marshal() + if err != nil { + return piface.MergeOutput{} + } + err = unmarshaler.Unmarshal(b) + if err != nil { + return piface.MergeOutput{} + } + return piface.MergeOutput{Flags: piface.MergeComplete} +} + +// aberrantMessageType implements MessageType for all types other than pointer-to-struct. +type aberrantMessageType struct { + t reflect.Type +} + +func (mt aberrantMessageType) New() pref.Message { + if mt.t.Kind() == reflect.Ptr { + return aberrantMessage{reflect.New(mt.t.Elem())} + } + return aberrantMessage{reflect.Zero(mt.t)} +} +func (mt aberrantMessageType) Zero() pref.Message { + return aberrantMessage{reflect.Zero(mt.t)} +} +func (mt aberrantMessageType) GoType() reflect.Type { + return mt.t +} +func (mt aberrantMessageType) Descriptor() pref.MessageDescriptor { + return LegacyLoadMessageDesc(mt.t) +} + +// aberrantMessage implements Message for all types other than pointer-to-struct. +// +// When the underlying type implements legacyMarshaler or legacyUnmarshaler, +// the aberrant Message can be marshaled or unmarshaled. Otherwise, there is +// not much that can be done with values of this type. +type aberrantMessage struct { + v reflect.Value +} + +// Reset implements the v1 proto.Message.Reset method. +func (m aberrantMessage) Reset() { + if mr, ok := m.v.Interface().(interface{ Reset() }); ok { + mr.Reset() + return + } + if m.v.Kind() == reflect.Ptr && !m.v.IsNil() { + m.v.Elem().Set(reflect.Zero(m.v.Type().Elem())) + } +} + +func (m aberrantMessage) ProtoReflect() pref.Message { + return m +} + +func (m aberrantMessage) Descriptor() pref.MessageDescriptor { + return LegacyLoadMessageDesc(m.v.Type()) +} +func (m aberrantMessage) Type() pref.MessageType { + return aberrantMessageType{m.v.Type()} +} +func (m aberrantMessage) New() pref.Message { + if m.v.Type().Kind() == reflect.Ptr { + return aberrantMessage{reflect.New(m.v.Type().Elem())} + } + return aberrantMessage{reflect.Zero(m.v.Type())} +} +func (m aberrantMessage) Interface() pref.ProtoMessage { + return m +} +func (m aberrantMessage) Range(f func(pref.FieldDescriptor, pref.Value) bool) { + return +} +func (m aberrantMessage) Has(pref.FieldDescriptor) bool { + return false +} +func (m aberrantMessage) Clear(pref.FieldDescriptor) { + panic("invalid Message.Clear on " + string(m.Descriptor().FullName())) +} +func (m aberrantMessage) Get(fd pref.FieldDescriptor) pref.Value { + if fd.Default().IsValid() { + return fd.Default() + } + panic("invalid Message.Get on " + string(m.Descriptor().FullName())) +} +func (m aberrantMessage) Set(pref.FieldDescriptor, pref.Value) { + panic("invalid Message.Set on " + string(m.Descriptor().FullName())) +} +func (m aberrantMessage) Mutable(pref.FieldDescriptor) pref.Value { + panic("invalid Message.Mutable on " + string(m.Descriptor().FullName())) +} +func (m aberrantMessage) NewField(pref.FieldDescriptor) pref.Value { + panic("invalid Message.NewField on " + string(m.Descriptor().FullName())) +} +func (m aberrantMessage) WhichOneof(pref.OneofDescriptor) pref.FieldDescriptor { + panic("invalid Message.WhichOneof descriptor on " + string(m.Descriptor().FullName())) +} +func (m aberrantMessage) GetUnknown() pref.RawFields { + return nil +} +func (m aberrantMessage) SetUnknown(pref.RawFields) { + // SetUnknown discards its input on messages which don't support unknown field storage. +} +func (m aberrantMessage) IsValid() bool { + if m.v.Kind() == reflect.Ptr { + return !m.v.IsNil() + } + return false +} +func (m aberrantMessage) ProtoMethods() *piface.Methods { + return aberrantProtoMethods +} +func (m aberrantMessage) protoUnwrap() interface{} { + return m.v.Interface() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go new file mode 100644 index 000000000..c65bbc044 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go @@ -0,0 +1,176 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +type mergeOptions struct{} + +func (o mergeOptions) Merge(dst, src proto.Message) { + proto.Merge(dst, src) +} + +// merge is protoreflect.Methods.Merge. +func (mi *MessageInfo) merge(in piface.MergeInput) piface.MergeOutput { + dp, ok := mi.getPointer(in.Destination) + if !ok { + return piface.MergeOutput{} + } + sp, ok := mi.getPointer(in.Source) + if !ok { + return piface.MergeOutput{} + } + mi.mergePointer(dp, sp, mergeOptions{}) + return piface.MergeOutput{Flags: piface.MergeComplete} +} + +func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { + mi.init() + if dst.IsNil() { + panic(fmt.Sprintf("invalid value: merging into nil message")) + } + if src.IsNil() { + return + } + for _, f := range mi.orderedCoderFields { + if f.funcs.merge == nil { + continue + } + sfptr := src.Apply(f.offset) + if f.isPointer && sfptr.Elem().IsNil() { + continue + } + f.funcs.merge(dst.Apply(f.offset), sfptr, f, opts) + } + if mi.extensionOffset.IsValid() { + sext := src.Apply(mi.extensionOffset).Extensions() + dext := dst.Apply(mi.extensionOffset).Extensions() + if *dext == nil { + *dext = make(map[int32]ExtensionField) + } + for num, sx := range *sext { + xt := sx.Type() + xi := getExtensionFieldInfo(xt) + if xi.funcs.merge == nil { + continue + } + dx := (*dext)[num] + var dv pref.Value + if dx.Type() == sx.Type() { + dv = dx.Value() + } + if !dv.IsValid() && xi.unmarshalNeedsValue { + dv = xt.New() + } + dv = xi.funcs.merge(dv, sx.Value(), opts) + dx.Set(sx.Type(), dv) + (*dext)[num] = dx + } + } + if mi.unknownOffset.IsValid() { + su := mi.getUnknownBytes(src) + if su != nil && len(*su) > 0 { + du := mi.mutableUnknownBytes(dst) + *du = append(*du, *su...) + } + } +} + +func mergeScalarValue(dst, src pref.Value, opts mergeOptions) pref.Value { + return src +} + +func mergeBytesValue(dst, src pref.Value, opts mergeOptions) pref.Value { + return pref.ValueOfBytes(append(emptyBuf[:], src.Bytes()...)) +} + +func mergeListValue(dst, src pref.Value, opts mergeOptions) pref.Value { + dstl := dst.List() + srcl := src.List() + for i, llen := 0, srcl.Len(); i < llen; i++ { + dstl.Append(srcl.Get(i)) + } + return dst +} + +func mergeBytesListValue(dst, src pref.Value, opts mergeOptions) pref.Value { + dstl := dst.List() + srcl := src.List() + for i, llen := 0, srcl.Len(); i < llen; i++ { + sb := srcl.Get(i).Bytes() + db := append(emptyBuf[:], sb...) + dstl.Append(pref.ValueOfBytes(db)) + } + return dst +} + +func mergeMessageListValue(dst, src pref.Value, opts mergeOptions) pref.Value { + dstl := dst.List() + srcl := src.List() + for i, llen := 0, srcl.Len(); i < llen; i++ { + sm := srcl.Get(i).Message() + dm := proto.Clone(sm.Interface()).ProtoReflect() + dstl.Append(pref.ValueOfMessage(dm)) + } + return dst +} + +func mergeMessageValue(dst, src pref.Value, opts mergeOptions) pref.Value { + opts.Merge(dst.Message().Interface(), src.Message().Interface()) + return dst +} + +func mergeMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + if f.mi != nil { + if dst.Elem().IsNil() { + dst.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + f.mi.mergePointer(dst.Elem(), src.Elem(), opts) + } else { + dm := dst.AsValueOf(f.ft).Elem() + sm := src.AsValueOf(f.ft).Elem() + if dm.IsNil() { + dm.Set(reflect.New(f.ft.Elem())) + } + opts.Merge(asMessage(dm), asMessage(sm)) + } +} + +func mergeMessageSlice(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + for _, sp := range src.PointerSlice() { + dm := reflect.New(f.ft.Elem().Elem()) + if f.mi != nil { + f.mi.mergePointer(pointerOfValue(dm), sp, opts) + } else { + opts.Merge(asMessage(dm), asMessage(sp.AsValueOf(f.ft.Elem().Elem()))) + } + dst.AppendPointerSlice(pointerOfValue(dm)) + } +} + +func mergeBytes(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Bytes() = append(emptyBuf[:], *src.Bytes()...) +} + +func mergeBytesNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Bytes() + if len(v) > 0 { + *dst.Bytes() = append(emptyBuf[:], v...) + } +} + +func mergeBytesSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.BytesSlice() + for _, v := range *src.BytesSlice() { + *ds = append(*ds, append(emptyBuf[:], v...)) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go b/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go new file mode 100644 index 000000000..8816c274d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go @@ -0,0 +1,209 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import () + +func mergeBool(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Bool() = *src.Bool() +} + +func mergeBoolNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Bool() + if v != false { + *dst.Bool() = v + } +} + +func mergeBoolPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.BoolPtr() + if p != nil { + v := *p + *dst.BoolPtr() = &v + } +} + +func mergeBoolSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.BoolSlice() + ss := src.BoolSlice() + *ds = append(*ds, *ss...) +} + +func mergeInt32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Int32() = *src.Int32() +} + +func mergeInt32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Int32() + if v != 0 { + *dst.Int32() = v + } +} + +func mergeInt32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Int32Ptr() + if p != nil { + v := *p + *dst.Int32Ptr() = &v + } +} + +func mergeInt32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Int32Slice() + ss := src.Int32Slice() + *ds = append(*ds, *ss...) +} + +func mergeUint32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Uint32() = *src.Uint32() +} + +func mergeUint32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Uint32() + if v != 0 { + *dst.Uint32() = v + } +} + +func mergeUint32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Uint32Ptr() + if p != nil { + v := *p + *dst.Uint32Ptr() = &v + } +} + +func mergeUint32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Uint32Slice() + ss := src.Uint32Slice() + *ds = append(*ds, *ss...) +} + +func mergeInt64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Int64() = *src.Int64() +} + +func mergeInt64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Int64() + if v != 0 { + *dst.Int64() = v + } +} + +func mergeInt64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Int64Ptr() + if p != nil { + v := *p + *dst.Int64Ptr() = &v + } +} + +func mergeInt64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Int64Slice() + ss := src.Int64Slice() + *ds = append(*ds, *ss...) +} + +func mergeUint64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Uint64() = *src.Uint64() +} + +func mergeUint64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Uint64() + if v != 0 { + *dst.Uint64() = v + } +} + +func mergeUint64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Uint64Ptr() + if p != nil { + v := *p + *dst.Uint64Ptr() = &v + } +} + +func mergeUint64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Uint64Slice() + ss := src.Uint64Slice() + *ds = append(*ds, *ss...) +} + +func mergeFloat32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Float32() = *src.Float32() +} + +func mergeFloat32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Float32() + if v != 0 { + *dst.Float32() = v + } +} + +func mergeFloat32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Float32Ptr() + if p != nil { + v := *p + *dst.Float32Ptr() = &v + } +} + +func mergeFloat32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Float32Slice() + ss := src.Float32Slice() + *ds = append(*ds, *ss...) +} + +func mergeFloat64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Float64() = *src.Float64() +} + +func mergeFloat64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Float64() + if v != 0 { + *dst.Float64() = v + } +} + +func mergeFloat64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Float64Ptr() + if p != nil { + v := *p + *dst.Float64Ptr() = &v + } +} + +func mergeFloat64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Float64Slice() + ss := src.Float64Slice() + *ds = append(*ds, *ss...) +} + +func mergeString(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.String() = *src.String() +} + +func mergeStringNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.String() + if v != "" { + *dst.String() = v + } +} + +func mergeStringPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.StringPtr() + if p != nil { + v := *p + *dst.StringPtr() = &v + } +} + +func mergeStringSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.StringSlice() + ss := src.StringSlice() + *ds = append(*ds, *ss...) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go new file mode 100644 index 000000000..a104e28e8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -0,0 +1,276 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" +) + +// MessageInfo provides protobuf related functionality for a given Go type +// that represents a message. A given instance of MessageInfo is tied to +// exactly one Go type, which must be a pointer to a struct type. +// +// The exported fields must be populated before any methods are called +// and cannot be mutated after set. +type MessageInfo struct { + // GoReflectType is the underlying message Go type and must be populated. + GoReflectType reflect.Type // pointer to struct + + // Desc is the underlying message descriptor type and must be populated. + Desc pref.MessageDescriptor + + // Exporter must be provided in a purego environment in order to provide + // access to unexported fields. + Exporter exporter + + // OneofWrappers is list of pointers to oneof wrapper struct types. + OneofWrappers []interface{} + + initMu sync.Mutex // protects all unexported fields + initDone uint32 + + reflectMessageInfo // for reflection implementation + coderMessageInfo // for fast-path method implementations +} + +// exporter is a function that returns a reference to the ith field of v, +// where v is a pointer to a struct. It returns nil if it does not support +// exporting the requested field (e.g., already exported). +type exporter func(v interface{}, i int) interface{} + +// getMessageInfo returns the MessageInfo for any message type that +// is generated by our implementation of protoc-gen-go (for v2 and on). +// If it is unable to obtain a MessageInfo, it returns nil. +func getMessageInfo(mt reflect.Type) *MessageInfo { + m, ok := reflect.Zero(mt).Interface().(pref.ProtoMessage) + if !ok { + return nil + } + mr, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *MessageInfo }) + if !ok { + return nil + } + return mr.ProtoMessageInfo() +} + +func (mi *MessageInfo) init() { + // This function is called in the hot path. Inline the sync.Once logic, + // since allocating a closure for Once.Do is expensive. + // Keep init small to ensure that it can be inlined. + if atomic.LoadUint32(&mi.initDone) == 0 { + mi.initOnce() + } +} + +func (mi *MessageInfo) initOnce() { + mi.initMu.Lock() + defer mi.initMu.Unlock() + if mi.initDone == 1 { + return + } + + t := mi.GoReflectType + if t.Kind() != reflect.Ptr && t.Elem().Kind() != reflect.Struct { + panic(fmt.Sprintf("got %v, want *struct kind", t)) + } + t = t.Elem() + + si := mi.makeStructInfo(t) + mi.makeReflectFuncs(t, si) + mi.makeCoderMethods(t, si) + + atomic.StoreUint32(&mi.initDone, 1) +} + +// getPointer returns the pointer for a message, which should be of +// the type of the MessageInfo. If the message is of a different type, +// it returns ok==false. +func (mi *MessageInfo) getPointer(m pref.Message) (p pointer, ok bool) { + switch m := m.(type) { + case *messageState: + return m.pointer(), m.messageInfo() == mi + case *messageReflectWrapper: + return m.pointer(), m.messageInfo() == mi + } + return pointer{}, false +} + +type ( + SizeCache = int32 + WeakFields = map[int32]protoreflect.ProtoMessage + UnknownFields = unknownFieldsA // TODO: switch to unknownFieldsB + unknownFieldsA = []byte + unknownFieldsB = *[]byte + ExtensionFields = map[int32]ExtensionField +) + +var ( + sizecacheType = reflect.TypeOf(SizeCache(0)) + weakFieldsType = reflect.TypeOf(WeakFields(nil)) + unknownFieldsAType = reflect.TypeOf(unknownFieldsA(nil)) + unknownFieldsBType = reflect.TypeOf(unknownFieldsB(nil)) + extensionFieldsType = reflect.TypeOf(ExtensionFields(nil)) +) + +type structInfo struct { + sizecacheOffset offset + sizecacheType reflect.Type + weakOffset offset + weakType reflect.Type + unknownOffset offset + unknownType reflect.Type + extensionOffset offset + extensionType reflect.Type + + fieldsByNumber map[pref.FieldNumber]reflect.StructField + oneofsByName map[pref.Name]reflect.StructField + oneofWrappersByType map[reflect.Type]pref.FieldNumber + oneofWrappersByNumber map[pref.FieldNumber]reflect.Type +} + +func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { + si := structInfo{ + sizecacheOffset: invalidOffset, + weakOffset: invalidOffset, + unknownOffset: invalidOffset, + extensionOffset: invalidOffset, + + fieldsByNumber: map[pref.FieldNumber]reflect.StructField{}, + oneofsByName: map[pref.Name]reflect.StructField{}, + oneofWrappersByType: map[reflect.Type]pref.FieldNumber{}, + oneofWrappersByNumber: map[pref.FieldNumber]reflect.Type{}, + } + +fieldLoop: + for i := 0; i < t.NumField(); i++ { + switch f := t.Field(i); f.Name { + case genid.SizeCache_goname, genid.SizeCacheA_goname: + if f.Type == sizecacheType { + si.sizecacheOffset = offsetOf(f, mi.Exporter) + si.sizecacheType = f.Type + } + case genid.WeakFields_goname, genid.WeakFieldsA_goname: + if f.Type == weakFieldsType { + si.weakOffset = offsetOf(f, mi.Exporter) + si.weakType = f.Type + } + case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: + if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType { + si.unknownOffset = offsetOf(f, mi.Exporter) + si.unknownType = f.Type + } + case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname: + if f.Type == extensionFieldsType { + si.extensionOffset = offsetOf(f, mi.Exporter) + si.extensionType = f.Type + } + default: + for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { + if len(s) > 0 && strings.Trim(s, "0123456789") == "" { + n, _ := strconv.ParseUint(s, 10, 64) + si.fieldsByNumber[pref.FieldNumber(n)] = f + continue fieldLoop + } + } + if s := f.Tag.Get("protobuf_oneof"); len(s) > 0 { + si.oneofsByName[pref.Name(s)] = f + continue fieldLoop + } + } + } + + // Derive a mapping of oneof wrappers to fields. + oneofWrappers := mi.OneofWrappers + for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { + if fn, ok := reflect.PtrTo(t).MethodByName(method); ok { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + oneofWrappers = vs + } + } + } + } + for _, v := range oneofWrappers { + tf := reflect.TypeOf(v).Elem() + f := tf.Field(0) + for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { + if len(s) > 0 && strings.Trim(s, "0123456789") == "" { + n, _ := strconv.ParseUint(s, 10, 64) + si.oneofWrappersByType[tf] = pref.FieldNumber(n) + si.oneofWrappersByNumber[pref.FieldNumber(n)] = tf + break + } + } + } + + return si +} + +func (mi *MessageInfo) New() protoreflect.Message { + return mi.MessageOf(reflect.New(mi.GoReflectType.Elem()).Interface()) +} +func (mi *MessageInfo) Zero() protoreflect.Message { + return mi.MessageOf(reflect.Zero(mi.GoReflectType).Interface()) +} +func (mi *MessageInfo) Descriptor() protoreflect.MessageDescriptor { + return mi.Desc +} +func (mi *MessageInfo) Enum(i int) protoreflect.EnumType { + mi.init() + fd := mi.Desc.Fields().Get(i) + return Export{}.EnumTypeOf(mi.fieldTypes[fd.Number()]) +} +func (mi *MessageInfo) Message(i int) protoreflect.MessageType { + mi.init() + fd := mi.Desc.Fields().Get(i) + switch { + case fd.IsWeak(): + mt, _ := preg.GlobalTypes.FindMessageByName(fd.Message().FullName()) + return mt + case fd.IsMap(): + return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]} + default: + return Export{}.MessageTypeOf(mi.fieldTypes[fd.Number()]) + } +} + +type mapEntryType struct { + desc protoreflect.MessageDescriptor + valType interface{} // zero value of enum or message type +} + +func (mt mapEntryType) New() protoreflect.Message { + return nil +} +func (mt mapEntryType) Zero() protoreflect.Message { + return nil +} +func (mt mapEntryType) Descriptor() protoreflect.MessageDescriptor { + return mt.desc +} +func (mt mapEntryType) Enum(i int) protoreflect.EnumType { + fd := mt.desc.Fields().Get(i) + if fd.Enum() == nil { + return nil + } + return Export{}.EnumTypeOf(mt.valType) +} +func (mt mapEntryType) Message(i int) protoreflect.MessageType { + fd := mt.desc.Fields().Get(i) + if fd.Message() == nil { + return nil + } + return Export{}.MessageTypeOf(mt.valType) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go new file mode 100644 index 000000000..9488b7261 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -0,0 +1,465 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/pragma" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type reflectMessageInfo struct { + fields map[pref.FieldNumber]*fieldInfo + oneofs map[pref.Name]*oneofInfo + + // fieldTypes contains the zero value of an enum or message field. + // For lists, it contains the element type. + // For maps, it contains the entry value type. + fieldTypes map[pref.FieldNumber]interface{} + + // denseFields is a subset of fields where: + // 0 < fieldDesc.Number() < len(denseFields) + // It provides faster access to the fieldInfo, but may be incomplete. + denseFields []*fieldInfo + + // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. + rangeInfos []interface{} // either *fieldInfo or *oneofInfo + + getUnknown func(pointer) pref.RawFields + setUnknown func(pointer, pref.RawFields) + extensionMap func(pointer) *extensionMap + + nilMessage atomicNilMessage +} + +// makeReflectFuncs generates the set of functions to support reflection. +func (mi *MessageInfo) makeReflectFuncs(t reflect.Type, si structInfo) { + mi.makeKnownFieldsFunc(si) + mi.makeUnknownFieldsFunc(t, si) + mi.makeExtensionFieldsFunc(t, si) + mi.makeFieldTypes(si) +} + +// makeKnownFieldsFunc generates functions for operations that can be performed +// on each protobuf message field. It takes in a reflect.Type representing the +// Go struct and matches message fields with struct fields. +// +// This code assumes that the struct is well-formed and panics if there are +// any discrepancies. +func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { + mi.fields = map[pref.FieldNumber]*fieldInfo{} + md := mi.Desc + fds := md.Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + fs := si.fieldsByNumber[fd.Number()] + isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() + if isOneof { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } + var fi fieldInfo + switch { + case fs.Type == nil: + fi = fieldInfoForMissing(fd) // never occurs for officially generated message types + case isOneof: + fi = fieldInfoForOneof(fd, fs, mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) + case fd.IsMap(): + fi = fieldInfoForMap(fd, fs, mi.Exporter) + case fd.IsList(): + fi = fieldInfoForList(fd, fs, mi.Exporter) + case fd.IsWeak(): + fi = fieldInfoForWeakMessage(fd, si.weakOffset) + case fd.Message() != nil: + fi = fieldInfoForMessage(fd, fs, mi.Exporter) + default: + fi = fieldInfoForScalar(fd, fs, mi.Exporter) + } + mi.fields[fd.Number()] = &fi + } + + mi.oneofs = map[pref.Name]*oneofInfo{} + for i := 0; i < md.Oneofs().Len(); i++ { + od := md.Oneofs().Get(i) + mi.oneofs[od.Name()] = makeOneofInfo(od, si, mi.Exporter) + } + + mi.denseFields = make([]*fieldInfo, fds.Len()*2) + for i := 0; i < fds.Len(); i++ { + if fd := fds.Get(i); int(fd.Number()) < len(mi.denseFields) { + mi.denseFields[fd.Number()] = mi.fields[fd.Number()] + } + } + + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil && !od.IsSynthetic() { + mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()]) + i += od.Fields().Len() + } else { + mi.rangeInfos = append(mi.rangeInfos, mi.fields[fd.Number()]) + i++ + } + } + + // Introduce instability to iteration order, but keep it deterministic. + if len(mi.rangeInfos) > 1 && detrand.Bool() { + i := detrand.Intn(len(mi.rangeInfos) - 1) + mi.rangeInfos[i], mi.rangeInfos[i+1] = mi.rangeInfos[i+1], mi.rangeInfos[i] + } +} + +func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { + switch { + case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsAType: + // Handle as []byte. + mi.getUnknown = func(p pointer) pref.RawFields { + if p.IsNil() { + return nil + } + return *p.Apply(mi.unknownOffset).Bytes() + } + mi.setUnknown = func(p pointer, b pref.RawFields) { + if p.IsNil() { + panic("invalid SetUnknown on nil Message") + } + *p.Apply(mi.unknownOffset).Bytes() = b + } + case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsBType: + // Handle as *[]byte. + mi.getUnknown = func(p pointer) pref.RawFields { + if p.IsNil() { + return nil + } + bp := p.Apply(mi.unknownOffset).BytesPtr() + if *bp == nil { + return nil + } + return **bp + } + mi.setUnknown = func(p pointer, b pref.RawFields) { + if p.IsNil() { + panic("invalid SetUnknown on nil Message") + } + bp := p.Apply(mi.unknownOffset).BytesPtr() + if *bp == nil { + *bp = new([]byte) + } + **bp = b + } + default: + mi.getUnknown = func(pointer) pref.RawFields { + return nil + } + mi.setUnknown = func(p pointer, _ pref.RawFields) { + if p.IsNil() { + panic("invalid SetUnknown on nil Message") + } + } + } +} + +func (mi *MessageInfo) makeExtensionFieldsFunc(t reflect.Type, si structInfo) { + if si.extensionOffset.IsValid() { + mi.extensionMap = func(p pointer) *extensionMap { + if p.IsNil() { + return (*extensionMap)(nil) + } + v := p.Apply(si.extensionOffset).AsValueOf(extensionFieldsType) + return (*extensionMap)(v.Interface().(*map[int32]ExtensionField)) + } + } else { + mi.extensionMap = func(pointer) *extensionMap { + return (*extensionMap)(nil) + } + } +} +func (mi *MessageInfo) makeFieldTypes(si structInfo) { + md := mi.Desc + fds := md.Fields() + for i := 0; i < fds.Len(); i++ { + var ft reflect.Type + fd := fds.Get(i) + fs := si.fieldsByNumber[fd.Number()] + isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() + if isOneof { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } + var isMessage bool + switch { + case fs.Type == nil: + continue // never occurs for officially generated message types + case isOneof: + if fd.Enum() != nil || fd.Message() != nil { + ft = si.oneofWrappersByNumber[fd.Number()].Field(0).Type + } + case fd.IsMap(): + if fd.MapValue().Enum() != nil || fd.MapValue().Message() != nil { + ft = fs.Type.Elem() + } + isMessage = fd.MapValue().Message() != nil + case fd.IsList(): + if fd.Enum() != nil || fd.Message() != nil { + ft = fs.Type.Elem() + } + isMessage = fd.Message() != nil + case fd.Enum() != nil: + ft = fs.Type + if fd.HasPresence() && ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + case fd.Message() != nil: + ft = fs.Type + if fd.IsWeak() { + ft = nil + } + isMessage = true + } + if isMessage && ft != nil && ft.Kind() != reflect.Ptr { + ft = reflect.PtrTo(ft) // never occurs for officially generated message types + } + if ft != nil { + if mi.fieldTypes == nil { + mi.fieldTypes = make(map[pref.FieldNumber]interface{}) + } + mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() + } + } +} + +type extensionMap map[int32]ExtensionField + +func (m *extensionMap) Range(f func(pref.FieldDescriptor, pref.Value) bool) { + if m != nil { + for _, x := range *m { + xd := x.Type().TypeDescriptor() + v := x.Value() + if xd.IsList() && v.List().Len() == 0 { + continue + } + if !f(xd, v) { + return + } + } + } +} +func (m *extensionMap) Has(xt pref.ExtensionType) (ok bool) { + if m == nil { + return false + } + xd := xt.TypeDescriptor() + x, ok := (*m)[int32(xd.Number())] + if !ok { + return false + } + switch { + case xd.IsList(): + return x.Value().List().Len() > 0 + case xd.IsMap(): + return x.Value().Map().Len() > 0 + case xd.Message() != nil: + return x.Value().Message().IsValid() + } + return true +} +func (m *extensionMap) Clear(xt pref.ExtensionType) { + delete(*m, int32(xt.TypeDescriptor().Number())) +} +func (m *extensionMap) Get(xt pref.ExtensionType) pref.Value { + xd := xt.TypeDescriptor() + if m != nil { + if x, ok := (*m)[int32(xd.Number())]; ok { + return x.Value() + } + } + return xt.Zero() +} +func (m *extensionMap) Set(xt pref.ExtensionType, v pref.Value) { + xd := xt.TypeDescriptor() + isValid := true + switch { + case !xt.IsValidValue(v): + isValid = false + case xd.IsList(): + isValid = v.List().IsValid() + case xd.IsMap(): + isValid = v.Map().IsValid() + case xd.Message() != nil: + isValid = v.Message().IsValid() + } + if !isValid { + panic(fmt.Sprintf("%v: assigning invalid value", xt.TypeDescriptor().FullName())) + } + + if *m == nil { + *m = make(map[int32]ExtensionField) + } + var x ExtensionField + x.Set(xt, v) + (*m)[int32(xd.Number())] = x +} +func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value { + xd := xt.TypeDescriptor() + if xd.Kind() != pref.MessageKind && xd.Kind() != pref.GroupKind && !xd.IsList() && !xd.IsMap() { + panic("invalid Mutable on field with non-composite type") + } + if x, ok := (*m)[int32(xd.Number())]; ok { + return x.Value() + } + v := xt.New() + m.Set(xt, v) + return v +} + +// MessageState is a data structure that is nested as the first field in a +// concrete message. It provides a way to implement the ProtoReflect method +// in an allocation-free way without needing to have a shadow Go type generated +// for every message type. This technique only works using unsafe. +// +// +// Example generated code: +// +// type M struct { +// state protoimpl.MessageState +// +// Field1 int32 +// Field2 string +// Field3 *BarMessage +// ... +// } +// +// func (m *M) ProtoReflect() protoreflect.Message { +// mi := &file_fizz_buzz_proto_msgInfos[5] +// if protoimpl.UnsafeEnabled && m != nil { +// ms := protoimpl.X.MessageStateOf(Pointer(m)) +// if ms.LoadMessageInfo() == nil { +// ms.StoreMessageInfo(mi) +// } +// return ms +// } +// return mi.MessageOf(m) +// } +// +// The MessageState type holds a *MessageInfo, which must be atomically set to +// the message info associated with a given message instance. +// By unsafely converting a *M into a *MessageState, the MessageState object +// has access to all the information needed to implement protobuf reflection. +// It has access to the message info as its first field, and a pointer to the +// MessageState is identical to a pointer to the concrete message value. +// +// +// Requirements: +// • The type M must implement protoreflect.ProtoMessage. +// • The address of m must not be nil. +// • The address of m and the address of m.state must be equal, +// even though they are different Go types. +type MessageState struct { + pragma.NoUnkeyedLiterals + pragma.DoNotCompare + pragma.DoNotCopy + + atomicMessageInfo *MessageInfo +} + +type messageState MessageState + +var ( + _ pref.Message = (*messageState)(nil) + _ unwrapper = (*messageState)(nil) +) + +// messageDataType is a tuple of a pointer to the message data and +// a pointer to the message type. It is a generalized way of providing a +// reflective view over a message instance. The disadvantage of this approach +// is the need to allocate this tuple of 16B. +type messageDataType struct { + p pointer + mi *MessageInfo +} + +type ( + messageReflectWrapper messageDataType + messageIfaceWrapper messageDataType +) + +var ( + _ pref.Message = (*messageReflectWrapper)(nil) + _ unwrapper = (*messageReflectWrapper)(nil) + _ pref.ProtoMessage = (*messageIfaceWrapper)(nil) + _ unwrapper = (*messageIfaceWrapper)(nil) +) + +// MessageOf returns a reflective view over a message. The input must be a +// pointer to a named Go struct. If the provided type has a ProtoReflect method, +// it must be implemented by calling this method. +func (mi *MessageInfo) MessageOf(m interface{}) pref.Message { + if reflect.TypeOf(m) != mi.GoReflectType { + panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) + } + p := pointerOfIface(m) + if p.IsNil() { + return mi.nilMessage.Init(mi) + } + return &messageReflectWrapper{p, mi} +} + +func (m *messageReflectWrapper) pointer() pointer { return m.p } +func (m *messageReflectWrapper) messageInfo() *MessageInfo { return m.mi } + +// Reset implements the v1 proto.Message.Reset method. +func (m *messageIfaceWrapper) Reset() { + if mr, ok := m.protoUnwrap().(interface{ Reset() }); ok { + mr.Reset() + return + } + rv := reflect.ValueOf(m.protoUnwrap()) + if rv.Kind() == reflect.Ptr && !rv.IsNil() { + rv.Elem().Set(reflect.Zero(rv.Type().Elem())) + } +} +func (m *messageIfaceWrapper) ProtoReflect() pref.Message { + return (*messageReflectWrapper)(m) +} +func (m *messageIfaceWrapper) protoUnwrap() interface{} { + return m.p.AsIfaceOf(m.mi.GoReflectType.Elem()) +} + +// checkField verifies that the provided field descriptor is valid. +// Exactly one of the returned values is populated. +func (mi *MessageInfo) checkField(fd pref.FieldDescriptor) (*fieldInfo, pref.ExtensionType) { + var fi *fieldInfo + if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) { + fi = mi.denseFields[n] + } else { + fi = mi.fields[n] + } + if fi != nil { + if fi.fieldDesc != fd { + if got, want := fd.FullName(), fi.fieldDesc.FullName(); got != want { + panic(fmt.Sprintf("mismatching field: got %v, want %v", got, want)) + } + panic(fmt.Sprintf("mismatching field: %v", fd.FullName())) + } + return fi, nil + } + + if fd.IsExtension() { + if got, want := fd.ContainingMessage().FullName(), mi.Desc.FullName(); got != want { + // TODO: Should this be exact containing message descriptor match? + panic(fmt.Sprintf("extension %v has mismatching containing message: got %v, want %v", fd.FullName(), got, want)) + } + if !mi.Desc.ExtensionRanges().Has(fd.Number()) { + panic(fmt.Sprintf("extension %v extends %v outside the extension range", fd.FullName(), mi.Desc.FullName())) + } + xtd, ok := fd.(pref.ExtensionTypeDescriptor) + if !ok { + panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName())) + } + return nil, xtd.Type() + } + panic(fmt.Sprintf("field %v is invalid", fd.FullName())) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go new file mode 100644 index 000000000..343cf8721 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -0,0 +1,543 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "math" + "reflect" + "sync" + + "google.golang.org/protobuf/internal/flags" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" +) + +type fieldInfo struct { + fieldDesc pref.FieldDescriptor + + // These fields are used for protobuf reflection support. + has func(pointer) bool + clear func(pointer) + get func(pointer) pref.Value + set func(pointer, pref.Value) + mutable func(pointer) pref.Value + newMessage func() pref.Message + newField func() pref.Value +} + +func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { + // This never occurs for generated message types. + // It implies that a hand-crafted type has missing Go fields + // for specific protobuf message fields. + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + return false + }, + clear: func(p pointer) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + get: func(p pointer) pref.Value { + return fd.Default() + }, + set: func(p pointer, v pref.Value) { + panic("missing Go struct field for " + string(fd.FullName())) + }, + mutable: func(p pointer) pref.Value { + panic("missing Go struct field for " + string(fd.FullName())) + }, + newMessage: func() pref.Message { + panic("missing Go struct field for " + string(fd.FullName())) + }, + newField: func() pref.Value { + if v := fd.Default(); v.IsValid() { + return v + } + panic("missing Go struct field for " + string(fd.FullName())) + }, + } +} + +func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Interface { + panic(fmt.Sprintf("field %v has invalid type: got %v, want interface kind", fd.FullName(), ft)) + } + if ot.Kind() != reflect.Struct { + panic(fmt.Sprintf("field %v has invalid type: got %v, want struct kind", fd.FullName(), ot)) + } + if !reflect.PtrTo(ot).Implements(ft) { + panic(fmt.Sprintf("field %v has invalid type: %v does not implement %v", fd.FullName(), ot, ft)) + } + conv := NewConverter(ot.Field(0).Type, fd) + isMessage := fd.Message() != nil + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + // NOTE: The logic below intentionally assumes that oneof fields are + // well-formatted. That is, the oneof interface never contains a + // typed nil pointer to one of the wrapper structs. + + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { + return false + } + return true + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot { + // NOTE: We intentionally don't check for rv.Elem().IsNil() + // so that (*OneofWrapperType)(nil) gets cleared to nil. + return + } + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) pref.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { + return conv.Zero() + } + rv = rv.Elem().Elem().Field(0) + return conv.PBValueOf(rv) + }, + set: func(p pointer, v pref.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { + rv.Set(reflect.New(ot)) + } + rv = rv.Elem().Elem().Field(0) + rv.Set(conv.GoValueOf(v)) + }, + mutable: func(p pointer) pref.Value { + if !isMessage { + panic(fmt.Sprintf("field %v with invalid Mutable call on field with non-composite type", fd.FullName())) + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { + rv.Set(reflect.New(ot)) + } + rv = rv.Elem().Elem().Field(0) + if rv.Kind() == reflect.Ptr && rv.IsNil() { + rv.Set(conv.GoValueOf(pref.ValueOfMessage(conv.New().Message()))) + } + return conv.PBValueOf(rv) + }, + newMessage: func() pref.Message { + return conv.New().Message() + }, + newField: func() pref.Value { + return conv.New() + }, + } +} + +func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Map { + panic(fmt.Sprintf("field %v has invalid type: got %v, want map kind", fd.FullName(), ft)) + } + conv := NewConverter(ft, fd) + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return rv.Len() > 0 + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) pref.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v pref.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + pv := conv.GoValueOf(v) + if pv.IsNil() { + panic(fmt.Sprintf("map field %v cannot be set with read-only value", fd.FullName())) + } + rv.Set(pv) + }, + mutable: func(p pointer) pref.Value { + v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if v.IsNil() { + v.Set(reflect.MakeMap(fs.Type)) + } + return conv.PBValueOf(v) + }, + newField: func() pref.Value { + return conv.New() + }, + } +} + +func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Slice { + panic(fmt.Sprintf("field %v has invalid type: got %v, want slice kind", fd.FullName(), ft)) + } + conv := NewConverter(reflect.PtrTo(ft), fd) + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return rv.Len() > 0 + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) pref.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type) + if rv.Elem().Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v pref.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + pv := conv.GoValueOf(v) + if pv.IsNil() { + panic(fmt.Sprintf("list field %v cannot be set with read-only value", fd.FullName())) + } + rv.Set(pv.Elem()) + }, + mutable: func(p pointer) pref.Value { + v := p.Apply(fieldOffset).AsValueOf(fs.Type) + return conv.PBValueOf(v) + }, + newField: func() pref.Value { + return conv.New() + }, + } +} + +var ( + nilBytes = reflect.ValueOf([]byte(nil)) + emptyBytes = reflect.ValueOf([]byte{}) +) + +func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + nullable := fd.HasPresence() + isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 + if nullable { + if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice { + // This never occurs for generated message types. + // Despite the protobuf type system specifying presence, + // the Go field type cannot represent it. + nullable = false + } + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + } + conv := NewConverter(ft, fd) + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if nullable { + return !rv.IsNil() + } + switch rv.Kind() { + case reflect.Bool: + return rv.Bool() + case reflect.Int32, reflect.Int64: + return rv.Int() != 0 + case reflect.Uint32, reflect.Uint64: + return rv.Uint() != 0 + case reflect.Float32, reflect.Float64: + return rv.Float() != 0 || math.Signbit(rv.Float()) + case reflect.String, reflect.Slice: + return rv.Len() > 0 + default: + panic(fmt.Sprintf("field %v has invalid type: %v", fd.FullName(), rv.Type())) // should never happen + } + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) pref.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if nullable { + if rv.IsNil() { + return conv.Zero() + } + if rv.Kind() == reflect.Ptr { + rv = rv.Elem() + } + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v pref.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if nullable && rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(ft)) + } + rv = rv.Elem() + } + rv.Set(conv.GoValueOf(v)) + if isBytes && rv.Len() == 0 { + if nullable { + rv.Set(emptyBytes) // preserve presence + } else { + rv.Set(nilBytes) // do not preserve presence + } + } + }, + newField: func() pref.Value { + return conv.New() + }, + } +} + +func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldInfo { + if !flags.ProtoLegacy { + panic("no support for proto1 weak fields") + } + + var once sync.Once + var messageType pref.MessageType + lazyInit := func() { + once.Do(func() { + messageName := fd.Message().FullName() + messageType, _ = preg.GlobalTypes.FindMessageByName(messageName) + if messageType == nil { + panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName())) + } + }) + } + + num := fd.Number() + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + _, ok := p.Apply(weakOffset).WeakFields().get(num) + return ok + }, + clear: func(p pointer) { + p.Apply(weakOffset).WeakFields().clear(num) + }, + get: func(p pointer) pref.Value { + lazyInit() + if p.IsNil() { + return pref.ValueOfMessage(messageType.Zero()) + } + m, ok := p.Apply(weakOffset).WeakFields().get(num) + if !ok { + return pref.ValueOfMessage(messageType.Zero()) + } + return pref.ValueOfMessage(m.ProtoReflect()) + }, + set: func(p pointer, v pref.Value) { + lazyInit() + m := v.Message() + if m.Descriptor() != messageType.Descriptor() { + if got, want := m.Descriptor().FullName(), messageType.Descriptor().FullName(); got != want { + panic(fmt.Sprintf("field %v has mismatching message descriptor: got %v, want %v", fd.FullName(), got, want)) + } + panic(fmt.Sprintf("field %v has mismatching message descriptor: %v", fd.FullName(), m.Descriptor().FullName())) + } + p.Apply(weakOffset).WeakFields().set(num, m.Interface()) + }, + mutable: func(p pointer) pref.Value { + lazyInit() + fs := p.Apply(weakOffset).WeakFields() + m, ok := fs.get(num) + if !ok { + m = messageType.New().Interface() + fs.set(num, m) + } + return pref.ValueOfMessage(m.ProtoReflect()) + }, + newMessage: func() pref.Message { + lazyInit() + return messageType.New() + }, + newField: func() pref.Value { + lazyInit() + return pref.ValueOfMessage(messageType.New()) + }, + } +} + +func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + conv := NewConverter(ft, fd) + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if fs.Type.Kind() != reflect.Ptr { + return !isZero(rv) + } + return !rv.IsNil() + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) pref.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return conv.PBValueOf(rv) + }, + set: func(p pointer, v pref.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(conv.GoValueOf(v)) + if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { + panic(fmt.Sprintf("field %v has invalid nil pointer", fd.FullName())) + } + }, + mutable: func(p pointer) pref.Value { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { + rv.Set(conv.GoValueOf(conv.New())) + } + return conv.PBValueOf(rv) + }, + newMessage: func() pref.Message { + return conv.New().Message() + }, + newField: func() pref.Value { + return conv.New() + }, + } +} + +type oneofInfo struct { + oneofDesc pref.OneofDescriptor + which func(pointer) pref.FieldNumber +} + +func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInfo { + oi := &oneofInfo{oneofDesc: od} + if od.IsSynthetic() { + fs := si.fieldsByNumber[od.Fields().Get(0).Number()] + fieldOffset := offsetOf(fs, x) + oi.which = func(p pointer) pref.FieldNumber { + if p.IsNil() { + return 0 + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { // valid on either *T or []byte + return 0 + } + return od.Fields().Get(0).Number() + } + } else { + fs := si.oneofsByName[od.Name()] + fieldOffset := offsetOf(fs, x) + oi.which = func(p pointer) pref.FieldNumber { + if p.IsNil() { + return 0 + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { + return 0 + } + rv = rv.Elem() + if rv.IsNil() { + return 0 + } + return si.oneofWrappersByType[rv.Type().Elem()] + } + } + return oi +} + +// isZero is identical to reflect.Value.IsZero. +// TODO: Remove this when Go1.13 is the minimally supported Go version. +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return math.Float64bits(v.Float()) == 0 + case reflect.Complex64, reflect.Complex128: + c := v.Complex() + return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !isZero(v.Index(i)) { + return false + } + } + return true + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + return v.IsNil() + case reflect.String: + return v.Len() == 0 + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !isZero(v.Field(i)) { + return false + } + } + return true + default: + panic(&reflect.ValueError{"reflect.Value.IsZero", v.Kind()}) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go new file mode 100644 index 000000000..741d6e5b6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go @@ -0,0 +1,249 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +func (m *messageState) Descriptor() protoreflect.MessageDescriptor { + return m.messageInfo().Desc +} +func (m *messageState) Type() protoreflect.MessageType { + return m.messageInfo() +} +func (m *messageState) New() protoreflect.Message { + return m.messageInfo().New() +} +func (m *messageState) Interface() protoreflect.ProtoMessage { + return m.protoUnwrap().(protoreflect.ProtoMessage) +} +func (m *messageState) protoUnwrap() interface{} { + return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) +} +func (m *messageState) ProtoMethods() *protoiface.Methods { + m.messageInfo().init() + return &m.messageInfo().methods +} + +// ProtoMessageInfo is a pseudo-internal API for allowing the v1 code +// to be able to retrieve a v2 MessageInfo struct. +// +// WARNING: This method is exempt from the compatibility promise and +// may be removed in the future without warning. +func (m *messageState) ProtoMessageInfo() *MessageInfo { + return m.messageInfo() +} + +func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + m.messageInfo().init() + for _, ri := range m.messageInfo().rangeInfos { + switch ri := ri.(type) { + case *fieldInfo: + if ri.has(m.pointer()) { + if !f(ri.fieldDesc, ri.get(m.pointer())) { + return + } + } + case *oneofInfo: + if n := ri.which(m.pointer()); n > 0 { + fi := m.messageInfo().fields[n] + if !f(fi.fieldDesc, fi.get(m.pointer())) { + return + } + } + } + } + m.messageInfo().extensionMap(m.pointer()).Range(f) +} +func (m *messageState) Has(fd protoreflect.FieldDescriptor) bool { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.has(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Has(xt) + } +} +func (m *messageState) Clear(fd protoreflect.FieldDescriptor) { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + fi.clear(m.pointer()) + } else { + m.messageInfo().extensionMap(m.pointer()).Clear(xt) + } +} +func (m *messageState) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.get(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Get(xt) + } +} +func (m *messageState) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + fi.set(m.pointer(), v) + } else { + m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + } +} +func (m *messageState) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.mutable(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + } +} +func (m *messageState) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.newField() + } else { + return xt.New() + } +} +func (m *messageState) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + m.messageInfo().init() + if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + return od.Fields().ByNumber(oi.which(m.pointer())) + } + panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) +} +func (m *messageState) GetUnknown() protoreflect.RawFields { + m.messageInfo().init() + return m.messageInfo().getUnknown(m.pointer()) +} +func (m *messageState) SetUnknown(b protoreflect.RawFields) { + m.messageInfo().init() + m.messageInfo().setUnknown(m.pointer(), b) +} +func (m *messageState) IsValid() bool { + return !m.pointer().IsNil() +} + +func (m *messageReflectWrapper) Descriptor() protoreflect.MessageDescriptor { + return m.messageInfo().Desc +} +func (m *messageReflectWrapper) Type() protoreflect.MessageType { + return m.messageInfo() +} +func (m *messageReflectWrapper) New() protoreflect.Message { + return m.messageInfo().New() +} +func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage { + if m, ok := m.protoUnwrap().(protoreflect.ProtoMessage); ok { + return m + } + return (*messageIfaceWrapper)(m) +} +func (m *messageReflectWrapper) protoUnwrap() interface{} { + return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) +} +func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods { + m.messageInfo().init() + return &m.messageInfo().methods +} + +// ProtoMessageInfo is a pseudo-internal API for allowing the v1 code +// to be able to retrieve a v2 MessageInfo struct. +// +// WARNING: This method is exempt from the compatibility promise and +// may be removed in the future without warning. +func (m *messageReflectWrapper) ProtoMessageInfo() *MessageInfo { + return m.messageInfo() +} + +func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + m.messageInfo().init() + for _, ri := range m.messageInfo().rangeInfos { + switch ri := ri.(type) { + case *fieldInfo: + if ri.has(m.pointer()) { + if !f(ri.fieldDesc, ri.get(m.pointer())) { + return + } + } + case *oneofInfo: + if n := ri.which(m.pointer()); n > 0 { + fi := m.messageInfo().fields[n] + if !f(fi.fieldDesc, fi.get(m.pointer())) { + return + } + } + } + } + m.messageInfo().extensionMap(m.pointer()).Range(f) +} +func (m *messageReflectWrapper) Has(fd protoreflect.FieldDescriptor) bool { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.has(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Has(xt) + } +} +func (m *messageReflectWrapper) Clear(fd protoreflect.FieldDescriptor) { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + fi.clear(m.pointer()) + } else { + m.messageInfo().extensionMap(m.pointer()).Clear(xt) + } +} +func (m *messageReflectWrapper) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.get(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Get(xt) + } +} +func (m *messageReflectWrapper) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + fi.set(m.pointer(), v) + } else { + m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + } +} +func (m *messageReflectWrapper) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.mutable(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + } +} +func (m *messageReflectWrapper) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.newField() + } else { + return xt.New() + } +} +func (m *messageReflectWrapper) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + m.messageInfo().init() + if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + return od.Fields().ByNumber(oi.which(m.pointer())) + } + panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) +} +func (m *messageReflectWrapper) GetUnknown() protoreflect.RawFields { + m.messageInfo().init() + return m.messageInfo().getUnknown(m.pointer()) +} +func (m *messageReflectWrapper) SetUnknown(b protoreflect.RawFields) { + m.messageInfo().init() + m.messageInfo().setUnknown(m.pointer(), b) +} +func (m *messageReflectWrapper) IsValid() bool { + return !m.pointer().IsNil() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go new file mode 100644 index 000000000..4c491bdf4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -0,0 +1,179 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build purego || appengine +// +build purego appengine + +package impl + +import ( + "fmt" + "reflect" + "sync" +) + +const UnsafeEnabled = false + +// Pointer is an opaque pointer type. +type Pointer interface{} + +// offset represents the offset to a struct field, accessible from a pointer. +// The offset is the field index into a struct. +type offset struct { + index int + export exporter +} + +// offsetOf returns a field offset for the struct field. +func offsetOf(f reflect.StructField, x exporter) offset { + if len(f.Index) != 1 { + panic("embedded structs are not supported") + } + if f.PkgPath == "" { + return offset{index: f.Index[0]} // field is already exported + } + if x == nil { + panic("exporter must be provided for unexported field") + } + return offset{index: f.Index[0], export: x} +} + +// IsValid reports whether the offset is valid. +func (f offset) IsValid() bool { return f.index >= 0 } + +// invalidOffset is an invalid field offset. +var invalidOffset = offset{index: -1} + +// zeroOffset is a noop when calling pointer.Apply. +var zeroOffset = offset{index: 0} + +// pointer is an abstract representation of a pointer to a struct or field. +type pointer struct{ v reflect.Value } + +// pointerOf returns p as a pointer. +func pointerOf(p Pointer) pointer { + return pointerOfIface(p) +} + +// pointerOfValue returns v as a pointer. +func pointerOfValue(v reflect.Value) pointer { + return pointer{v: v} +} + +// pointerOfIface returns the pointer portion of an interface. +func pointerOfIface(v interface{}) pointer { + return pointer{v: reflect.ValueOf(v)} +} + +// IsNil reports whether the pointer is nil. +func (p pointer) IsNil() bool { + return p.v.IsNil() +} + +// Apply adds an offset to the pointer to derive a new pointer +// to a specified field. The current pointer must be pointing at a struct. +func (p pointer) Apply(f offset) pointer { + if f.export != nil { + if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() { + return pointer{v: v} + } + } + return pointer{v: p.v.Elem().Field(f.index).Addr()} +} + +// AsValueOf treats p as a pointer to an object of type t and returns the value. +// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) +func (p pointer) AsValueOf(t reflect.Type) reflect.Value { + if got := p.v.Type().Elem(); got != t { + panic(fmt.Sprintf("invalid type: got %v, want %v", got, t)) + } + return p.v +} + +// AsIfaceOf treats p as a pointer to an object of type t and returns the value. +// It is equivalent to p.AsValueOf(t).Interface() +func (p pointer) AsIfaceOf(t reflect.Type) interface{} { + return p.AsValueOf(t).Interface() +} + +func (p pointer) Bool() *bool { return p.v.Interface().(*bool) } +func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) } +func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) } +func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) } +func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) } +func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) } +func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) } +func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) } +func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) } +func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) } +func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) } +func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) } +func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) } +func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) } +func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) } +func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) } +func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) } +func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) } +func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) } +func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) } +func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) } +func (p pointer) String() *string { return p.v.Interface().(*string) } +func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } +func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } +func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } +func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) } +func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } +func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } +func (p pointer) Extensions() *map[int32]ExtensionField { + return p.v.Interface().(*map[int32]ExtensionField) +} + +func (p pointer) Elem() pointer { + return pointer{v: p.v.Elem()} +} + +// PointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) PointerSlice() []pointer { + // TODO: reconsider this + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s +} + +// AppendPointerSlice appends v to p, which must be a []*T. +func (p pointer) AppendPointerSlice(v pointer) { + sp := p.v.Elem() + sp.Set(reflect.Append(sp, v.v)) +} + +// SetPointer sets *p to v. +func (p pointer) SetPointer(v pointer) { + p.v.Elem().Set(v.v) +} + +func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } +func (ms *messageState) pointer() pointer { panic("not supported") } +func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } +func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") } +func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") } + +type atomicNilMessage struct { + once sync.Once + m messageReflectWrapper +} + +func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { + m.once.Do(func() { + m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface()) + m.m.mi = mi + }) + return &m.m +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go new file mode 100644 index 000000000..ee0e0573e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -0,0 +1,175 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine +// +build !purego,!appengine + +package impl + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +const UnsafeEnabled = true + +// Pointer is an opaque pointer type. +type Pointer unsafe.Pointer + +// offset represents the offset to a struct field, accessible from a pointer. +// The offset is the byte offset to the field from the start of the struct. +type offset uintptr + +// offsetOf returns a field offset for the struct field. +func offsetOf(f reflect.StructField, x exporter) offset { + return offset(f.Offset) +} + +// IsValid reports whether the offset is valid. +func (f offset) IsValid() bool { return f != invalidOffset } + +// invalidOffset is an invalid field offset. +var invalidOffset = ^offset(0) + +// zeroOffset is a noop when calling pointer.Apply. +var zeroOffset = offset(0) + +// pointer is a pointer to a message struct or field. +type pointer struct{ p unsafe.Pointer } + +// pointerOf returns p as a pointer. +func pointerOf(p Pointer) pointer { + return pointer{p: unsafe.Pointer(p)} +} + +// pointerOfValue returns v as a pointer. +func pointerOfValue(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} +} + +// pointerOfIface returns the pointer portion of an interface. +func pointerOfIface(v interface{}) pointer { + type ifaceHeader struct { + Type unsafe.Pointer + Data unsafe.Pointer + } + return pointer{p: (*ifaceHeader)(unsafe.Pointer(&v)).Data} +} + +// IsNil reports whether the pointer is nil. +func (p pointer) IsNil() bool { + return p.p == nil +} + +// Apply adds an offset to the pointer to derive a new pointer +// to a specified field. The pointer must be valid and pointing at a struct. +func (p pointer) Apply(f offset) pointer { + if p.IsNil() { + panic("invalid nil pointer") + } + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} +} + +// AsValueOf treats p as a pointer to an object of type t and returns the value. +// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) +func (p pointer) AsValueOf(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +// AsIfaceOf treats p as a pointer to an object of type t and returns the value. +// It is equivalent to p.AsValueOf(t).Interface() +func (p pointer) AsIfaceOf(t reflect.Type) interface{} { + // TODO: Use tricky unsafe magic to directly create ifaceHeader. + return p.AsValueOf(t).Interface() +} + +func (p pointer) Bool() *bool { return (*bool)(p.p) } +func (p pointer) BoolPtr() **bool { return (**bool)(p.p) } +func (p pointer) BoolSlice() *[]bool { return (*[]bool)(p.p) } +func (p pointer) Int32() *int32 { return (*int32)(p.p) } +func (p pointer) Int32Ptr() **int32 { return (**int32)(p.p) } +func (p pointer) Int32Slice() *[]int32 { return (*[]int32)(p.p) } +func (p pointer) Int64() *int64 { return (*int64)(p.p) } +func (p pointer) Int64Ptr() **int64 { return (**int64)(p.p) } +func (p pointer) Int64Slice() *[]int64 { return (*[]int64)(p.p) } +func (p pointer) Uint32() *uint32 { return (*uint32)(p.p) } +func (p pointer) Uint32Ptr() **uint32 { return (**uint32)(p.p) } +func (p pointer) Uint32Slice() *[]uint32 { return (*[]uint32)(p.p) } +func (p pointer) Uint64() *uint64 { return (*uint64)(p.p) } +func (p pointer) Uint64Ptr() **uint64 { return (**uint64)(p.p) } +func (p pointer) Uint64Slice() *[]uint64 { return (*[]uint64)(p.p) } +func (p pointer) Float32() *float32 { return (*float32)(p.p) } +func (p pointer) Float32Ptr() **float32 { return (**float32)(p.p) } +func (p pointer) Float32Slice() *[]float32 { return (*[]float32)(p.p) } +func (p pointer) Float64() *float64 { return (*float64)(p.p) } +func (p pointer) Float64Ptr() **float64 { return (**float64)(p.p) } +func (p pointer) Float64Slice() *[]float64 { return (*[]float64)(p.p) } +func (p pointer) String() *string { return (*string)(p.p) } +func (p pointer) StringPtr() **string { return (**string)(p.p) } +func (p pointer) StringSlice() *[]string { return (*[]string)(p.p) } +func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) } +func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) } +func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) } +func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) } +func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) } + +func (p pointer) Elem() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// PointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) PointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) +} + +// AppendPointerSlice appends v to p, which must be a []*T. +func (p pointer) AppendPointerSlice(v pointer) { + *(*[]pointer)(p.p) = append(*(*[]pointer)(p.p), v) +} + +// SetPointer sets *p to v. +func (p pointer) SetPointer(v pointer) { + *(*unsafe.Pointer)(p.p) = (unsafe.Pointer)(v.p) +} + +// Static check that MessageState does not exceed the size of a pointer. +const _ = uint(unsafe.Sizeof(unsafe.Pointer(nil)) - unsafe.Sizeof(MessageState{})) + +func (Export) MessageStateOf(p Pointer) *messageState { + // Super-tricky - see documentation on MessageState. + return (*messageState)(unsafe.Pointer(p)) +} +func (ms *messageState) pointer() pointer { + // Super-tricky - see documentation on MessageState. + return pointer{p: unsafe.Pointer(ms)} +} +func (ms *messageState) messageInfo() *MessageInfo { + mi := ms.LoadMessageInfo() + if mi == nil { + panic("invalid nil message info; this suggests memory corruption due to a race or shallow copy on the message struct") + } + return mi +} +func (ms *messageState) LoadMessageInfo() *MessageInfo { + return (*MessageInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&ms.atomicMessageInfo)))) +} +func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&ms.atomicMessageInfo)), unsafe.Pointer(mi)) +} + +type atomicNilMessage struct{ p unsafe.Pointer } // p is a *messageReflectWrapper + +func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { + if p := atomic.LoadPointer(&m.p); p != nil { + return (*messageReflectWrapper)(p) + } + w := &messageReflectWrapper{mi: mi} + atomic.CompareAndSwapPointer(&m.p, nil, (unsafe.Pointer)(w)) + return (*messageReflectWrapper)(atomic.LoadPointer(&m.p)) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go new file mode 100644 index 000000000..08cfb6054 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -0,0 +1,576 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "math" + "math/bits" + "reflect" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// ValidationStatus is the result of validating the wire-format encoding of a message. +type ValidationStatus int + +const ( + // ValidationUnknown indicates that unmarshaling the message might succeed or fail. + // The validator was unable to render a judgement. + // + // The only causes of this status are an aberrant message type appearing somewhere + // in the message or a failure in the extension resolver. + ValidationUnknown ValidationStatus = iota + 1 + + // ValidationInvalid indicates that unmarshaling the message will fail. + ValidationInvalid + + // ValidationValid indicates that unmarshaling the message will succeed. + ValidationValid +) + +func (v ValidationStatus) String() string { + switch v { + case ValidationUnknown: + return "ValidationUnknown" + case ValidationInvalid: + return "ValidationInvalid" + case ValidationValid: + return "ValidationValid" + default: + return fmt.Sprintf("ValidationStatus(%d)", int(v)) + } +} + +// Validate determines whether the contents of the buffer are a valid wire encoding +// of the message type. +// +// This function is exposed for testing. +func Validate(mt pref.MessageType, in piface.UnmarshalInput) (out piface.UnmarshalOutput, _ ValidationStatus) { + mi, ok := mt.(*MessageInfo) + if !ok { + return out, ValidationUnknown + } + if in.Resolver == nil { + in.Resolver = preg.GlobalTypes + } + o, st := mi.validate(in.Buf, 0, unmarshalOptions{ + flags: in.Flags, + resolver: in.Resolver, + }) + if o.initialized { + out.Flags |= piface.UnmarshalInitialized + } + return out, st +} + +type validationInfo struct { + mi *MessageInfo + typ validationType + keyType, valType validationType + + // For non-required fields, requiredBit is 0. + // + // For required fields, requiredBit's nth bit is set, where n is a + // unique index in the range [0, MessageInfo.numRequiredFields). + // + // If there are more than 64 required fields, requiredBit is 0. + requiredBit uint64 +} + +type validationType uint8 + +const ( + validationTypeOther validationType = iota + validationTypeMessage + validationTypeGroup + validationTypeMap + validationTypeRepeatedVarint + validationTypeRepeatedFixed32 + validationTypeRepeatedFixed64 + validationTypeVarint + validationTypeFixed32 + validationTypeFixed64 + validationTypeBytes + validationTypeUTF8String + validationTypeMessageSetItem +) + +func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescriptor, ft reflect.Type) validationInfo { + var vi validationInfo + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + switch fd.Kind() { + case pref.MessageKind: + vi.typ = validationTypeMessage + if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { + vi.mi = getMessageInfo(ot.Field(0).Type) + } + case pref.GroupKind: + vi.typ = validationTypeGroup + if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { + vi.mi = getMessageInfo(ot.Field(0).Type) + } + case pref.StringKind: + if strs.EnforceUTF8(fd) { + vi.typ = validationTypeUTF8String + } + } + default: + vi = newValidationInfo(fd, ft) + } + if fd.Cardinality() == pref.Required { + // Avoid overflow. The required field check is done with a 64-bit mask, with + // any message containing more than 64 required fields always reported as + // potentially uninitialized, so it is not important to get a precise count + // of the required fields past 64. + if mi.numRequiredFields < math.MaxUint8 { + mi.numRequiredFields++ + vi.requiredBit = 1 << (mi.numRequiredFields - 1) + } + } + return vi +} + +func newValidationInfo(fd pref.FieldDescriptor, ft reflect.Type) validationInfo { + var vi validationInfo + switch { + case fd.IsList(): + switch fd.Kind() { + case pref.MessageKind: + vi.typ = validationTypeMessage + if ft.Kind() == reflect.Slice { + vi.mi = getMessageInfo(ft.Elem()) + } + case pref.GroupKind: + vi.typ = validationTypeGroup + if ft.Kind() == reflect.Slice { + vi.mi = getMessageInfo(ft.Elem()) + } + case pref.StringKind: + vi.typ = validationTypeBytes + if strs.EnforceUTF8(fd) { + vi.typ = validationTypeUTF8String + } + default: + switch wireTypes[fd.Kind()] { + case protowire.VarintType: + vi.typ = validationTypeRepeatedVarint + case protowire.Fixed32Type: + vi.typ = validationTypeRepeatedFixed32 + case protowire.Fixed64Type: + vi.typ = validationTypeRepeatedFixed64 + } + } + case fd.IsMap(): + vi.typ = validationTypeMap + switch fd.MapKey().Kind() { + case pref.StringKind: + if strs.EnforceUTF8(fd) { + vi.keyType = validationTypeUTF8String + } + } + switch fd.MapValue().Kind() { + case pref.MessageKind: + vi.valType = validationTypeMessage + if ft.Kind() == reflect.Map { + vi.mi = getMessageInfo(ft.Elem()) + } + case pref.StringKind: + if strs.EnforceUTF8(fd) { + vi.valType = validationTypeUTF8String + } + } + default: + switch fd.Kind() { + case pref.MessageKind: + vi.typ = validationTypeMessage + if !fd.IsWeak() { + vi.mi = getMessageInfo(ft) + } + case pref.GroupKind: + vi.typ = validationTypeGroup + vi.mi = getMessageInfo(ft) + case pref.StringKind: + vi.typ = validationTypeBytes + if strs.EnforceUTF8(fd) { + vi.typ = validationTypeUTF8String + } + default: + switch wireTypes[fd.Kind()] { + case protowire.VarintType: + vi.typ = validationTypeVarint + case protowire.Fixed32Type: + vi.typ = validationTypeFixed32 + case protowire.Fixed64Type: + vi.typ = validationTypeFixed64 + case protowire.BytesType: + vi.typ = validationTypeBytes + } + } + } + return vi +} + +func (mi *MessageInfo) validate(b []byte, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, result ValidationStatus) { + mi.init() + type validationState struct { + typ validationType + keyType, valType validationType + endGroup protowire.Number + mi *MessageInfo + tail []byte + requiredMask uint64 + } + + // Pre-allocate some slots to avoid repeated slice reallocation. + states := make([]validationState, 0, 16) + states = append(states, validationState{ + typ: validationTypeMessage, + mi: mi, + }) + if groupTag > 0 { + states[0].typ = validationTypeGroup + states[0].endGroup = groupTag + } + initialized := true + start := len(b) +State: + for len(states) > 0 { + st := &states[len(states)-1] + for len(b) > 0 { + // Parse the tag (field number and wire type). + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + return out, ValidationInvalid + } + b = b[n:] + } + var num protowire.Number + if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { + return out, ValidationInvalid + } else { + num = protowire.Number(n) + } + wtyp := protowire.Type(tag & 7) + + if wtyp == protowire.EndGroupType { + if st.endGroup == num { + goto PopState + } + return out, ValidationInvalid + } + var vi validationInfo + switch { + case st.typ == validationTypeMap: + switch num { + case genid.MapEntry_Key_field_number: + vi.typ = st.keyType + case genid.MapEntry_Value_field_number: + vi.typ = st.valType + vi.mi = st.mi + vi.requiredBit = 1 + } + case flags.ProtoLegacy && st.mi.isMessageSet: + switch num { + case messageset.FieldItem: + vi.typ = validationTypeMessageSetItem + } + default: + var f *coderFieldInfo + if int(num) < len(st.mi.denseCoderFields) { + f = st.mi.denseCoderFields[num] + } else { + f = st.mi.coderFields[num] + } + if f != nil { + vi = f.validation + if vi.typ == validationTypeMessage && vi.mi == nil { + // Probable weak field. + // + // TODO: Consider storing the results of this lookup somewhere + // rather than recomputing it on every validation. + fd := st.mi.Desc.Fields().ByNumber(num) + if fd == nil || !fd.IsWeak() { + break + } + messageName := fd.Message().FullName() + messageType, err := preg.GlobalTypes.FindMessageByName(messageName) + switch err { + case nil: + vi.mi, _ = messageType.(*MessageInfo) + case preg.NotFound: + vi.typ = validationTypeBytes + default: + return out, ValidationUnknown + } + } + break + } + // Possible extension field. + // + // TODO: We should return ValidationUnknown when: + // 1. The resolver is not frozen. (More extensions may be added to it.) + // 2. The resolver returns preg.NotFound. + // In this case, a type added to the resolver in the future could cause + // unmarshaling to begin failing. Supporting this requires some way to + // determine if the resolver is frozen. + xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), num) + if err != nil && err != preg.NotFound { + return out, ValidationUnknown + } + if err == nil { + vi = getExtensionFieldInfo(xt).validation + } + } + if vi.requiredBit != 0 { + // Check that the field has a compatible wire type. + // We only need to consider non-repeated field types, + // since repeated fields (and maps) can never be required. + ok := false + switch vi.typ { + case validationTypeVarint: + ok = wtyp == protowire.VarintType + case validationTypeFixed32: + ok = wtyp == protowire.Fixed32Type + case validationTypeFixed64: + ok = wtyp == protowire.Fixed64Type + case validationTypeBytes, validationTypeUTF8String, validationTypeMessage: + ok = wtyp == protowire.BytesType + case validationTypeGroup: + ok = wtyp == protowire.StartGroupType + } + if ok { + st.requiredMask |= vi.requiredBit + } + } + + switch wtyp { + case protowire.VarintType: + if len(b) >= 10 { + switch { + case b[0] < 0x80: + b = b[1:] + case b[1] < 0x80: + b = b[2:] + case b[2] < 0x80: + b = b[3:] + case b[3] < 0x80: + b = b[4:] + case b[4] < 0x80: + b = b[5:] + case b[5] < 0x80: + b = b[6:] + case b[6] < 0x80: + b = b[7:] + case b[7] < 0x80: + b = b[8:] + case b[8] < 0x80: + b = b[9:] + case b[9] < 0x80 && b[9] < 2: + b = b[10:] + default: + return out, ValidationInvalid + } + } else { + switch { + case len(b) > 0 && b[0] < 0x80: + b = b[1:] + case len(b) > 1 && b[1] < 0x80: + b = b[2:] + case len(b) > 2 && b[2] < 0x80: + b = b[3:] + case len(b) > 3 && b[3] < 0x80: + b = b[4:] + case len(b) > 4 && b[4] < 0x80: + b = b[5:] + case len(b) > 5 && b[5] < 0x80: + b = b[6:] + case len(b) > 6 && b[6] < 0x80: + b = b[7:] + case len(b) > 7 && b[7] < 0x80: + b = b[8:] + case len(b) > 8 && b[8] < 0x80: + b = b[9:] + case len(b) > 9 && b[9] < 2: + b = b[10:] + default: + return out, ValidationInvalid + } + } + continue State + case protowire.BytesType: + var size uint64 + if len(b) >= 1 && b[0] < 0x80 { + size = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + size = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + size, n = protowire.ConsumeVarint(b) + if n < 0 { + return out, ValidationInvalid + } + b = b[n:] + } + if size > uint64(len(b)) { + return out, ValidationInvalid + } + v := b[:size] + b = b[size:] + switch vi.typ { + case validationTypeMessage: + if vi.mi == nil { + return out, ValidationUnknown + } + vi.mi.init() + fallthrough + case validationTypeMap: + if vi.mi != nil { + vi.mi.init() + } + states = append(states, validationState{ + typ: vi.typ, + keyType: vi.keyType, + valType: vi.valType, + mi: vi.mi, + tail: b, + }) + b = v + continue State + case validationTypeRepeatedVarint: + // Packed field. + for len(v) > 0 { + _, n := protowire.ConsumeVarint(v) + if n < 0 { + return out, ValidationInvalid + } + v = v[n:] + } + case validationTypeRepeatedFixed32: + // Packed field. + if len(v)%4 != 0 { + return out, ValidationInvalid + } + case validationTypeRepeatedFixed64: + // Packed field. + if len(v)%8 != 0 { + return out, ValidationInvalid + } + case validationTypeUTF8String: + if !utf8.Valid(v) { + return out, ValidationInvalid + } + } + case protowire.Fixed32Type: + if len(b) < 4 { + return out, ValidationInvalid + } + b = b[4:] + case protowire.Fixed64Type: + if len(b) < 8 { + return out, ValidationInvalid + } + b = b[8:] + case protowire.StartGroupType: + switch { + case vi.typ == validationTypeGroup: + if vi.mi == nil { + return out, ValidationUnknown + } + vi.mi.init() + states = append(states, validationState{ + typ: validationTypeGroup, + mi: vi.mi, + endGroup: num, + }) + continue State + case flags.ProtoLegacy && vi.typ == validationTypeMessageSetItem: + typeid, v, n, err := messageset.ConsumeFieldValue(b, false) + if err != nil { + return out, ValidationInvalid + } + xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), typeid) + switch { + case err == preg.NotFound: + b = b[n:] + case err != nil: + return out, ValidationUnknown + default: + xvi := getExtensionFieldInfo(xt).validation + if xvi.mi != nil { + xvi.mi.init() + } + states = append(states, validationState{ + typ: xvi.typ, + mi: xvi.mi, + tail: b[n:], + }) + b = v + continue State + } + default: + n := protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, ValidationInvalid + } + b = b[n:] + } + default: + return out, ValidationInvalid + } + } + if st.endGroup != 0 { + return out, ValidationInvalid + } + if len(b) != 0 { + return out, ValidationInvalid + } + b = st.tail + PopState: + numRequiredFields := 0 + switch st.typ { + case validationTypeMessage, validationTypeGroup: + numRequiredFields = int(st.mi.numRequiredFields) + case validationTypeMap: + // If this is a map field with a message value that contains + // required fields, require that the value be present. + if st.mi != nil && st.mi.numRequiredFields > 0 { + numRequiredFields = 1 + } + } + // If there are more than 64 required fields, this check will + // always fail and we will report that the message is potentially + // uninitialized. + if numRequiredFields > 0 && bits.OnesCount64(st.requiredMask) != numRequiredFields { + initialized = false + } + states = states[:len(states)-1] + } + out.n = start - len(b) + if initialized { + out.initialized = true + } + return out, ValidationValid +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go new file mode 100644 index 000000000..009cbefd1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/weak.go @@ -0,0 +1,74 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// weakFields adds methods to the exported WeakFields type for internal use. +// +// The exported type is an alias to an unnamed type, so methods can't be +// defined directly on it. +type weakFields WeakFields + +func (w weakFields) get(num pref.FieldNumber) (pref.ProtoMessage, bool) { + m, ok := w[int32(num)] + return m, ok +} + +func (w *weakFields) set(num pref.FieldNumber, m pref.ProtoMessage) { + if *w == nil { + *w = make(weakFields) + } + (*w)[int32(num)] = m +} + +func (w *weakFields) clear(num pref.FieldNumber) { + delete(*w, int32(num)) +} + +func (Export) HasWeak(w WeakFields, num pref.FieldNumber) bool { + _, ok := w[int32(num)] + return ok +} + +func (Export) ClearWeak(w *WeakFields, num pref.FieldNumber) { + delete(*w, int32(num)) +} + +func (Export) GetWeak(w WeakFields, num pref.FieldNumber, name pref.FullName) pref.ProtoMessage { + if m, ok := w[int32(num)]; ok { + return m + } + mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) + if mt == nil { + panic(fmt.Sprintf("message %v for weak field is not linked in", name)) + } + return mt.Zero().Interface() +} + +func (Export) SetWeak(w *WeakFields, num pref.FieldNumber, name pref.FullName, m pref.ProtoMessage) { + if m != nil { + mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) + if mt == nil { + panic(fmt.Sprintf("message %v for weak field is not linked in", name)) + } + if mt != m.ProtoReflect().Type() { + panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface())) + } + } + if m == nil || !m.ProtoReflect().IsValid() { + delete(*w, int32(num)) + return + } + if *w == nil { + *w = make(weakFields) + } + (*w)[int32(num)] = m +} diff --git a/vendor/google.golang.org/protobuf/internal/order/order.go b/vendor/google.golang.org/protobuf/internal/order/order.go new file mode 100644 index 000000000..2a24953f6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/order/order.go @@ -0,0 +1,89 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package order + +import ( + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// FieldOrder specifies the ordering to visit message fields. +// It is a function that reports whether x is ordered before y. +type FieldOrder func(x, y pref.FieldDescriptor) bool + +var ( + // AnyFieldOrder specifies no specific field ordering. + AnyFieldOrder FieldOrder = nil + + // LegacyFieldOrder sorts fields in the same ordering as emitted by + // wire serialization in the github.com/golang/protobuf implementation. + LegacyFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + ox, oy := x.ContainingOneof(), y.ContainingOneof() + inOneof := func(od pref.OneofDescriptor) bool { + return od != nil && !od.IsSynthetic() + } + + // Extension fields sort before non-extension fields. + if x.IsExtension() != y.IsExtension() { + return x.IsExtension() && !y.IsExtension() + } + // Fields not within a oneof sort before those within a oneof. + if inOneof(ox) != inOneof(oy) { + return !inOneof(ox) && inOneof(oy) + } + // Fields in disjoint oneof sets are sorted by declaration index. + if ox != nil && oy != nil && ox != oy { + return ox.Index() < oy.Index() + } + // Fields sorted by field number. + return x.Number() < y.Number() + } + + // NumberFieldOrder sorts fields by their field number. + NumberFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + return x.Number() < y.Number() + } + + // IndexNameFieldOrder sorts non-extension fields before extension fields. + // Non-extensions are sorted according to their declaration index. + // Extensions are sorted according to their full name. + IndexNameFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + // Non-extension fields sort before extension fields. + if x.IsExtension() != y.IsExtension() { + return !x.IsExtension() && y.IsExtension() + } + // Extensions sorted by fullname. + if x.IsExtension() && y.IsExtension() { + return x.FullName() < y.FullName() + } + // Non-extensions sorted by declaration index. + return x.Index() < y.Index() + } +) + +// KeyOrder specifies the ordering to visit map entries. +// It is a function that reports whether x is ordered before y. +type KeyOrder func(x, y pref.MapKey) bool + +var ( + // AnyKeyOrder specifies no specific key ordering. + AnyKeyOrder KeyOrder = nil + + // GenericKeyOrder sorts false before true, numeric keys in ascending order, + // and strings in lexicographical ordering according to UTF-8 codepoints. + GenericKeyOrder KeyOrder = func(x, y pref.MapKey) bool { + switch x.Interface().(type) { + case bool: + return !x.Bool() && y.Bool() + case int32, int64: + return x.Int() < y.Int() + case uint32, uint64: + return x.Uint() < y.Uint() + case string: + return x.String() < y.String() + default: + panic("invalid map key type") + } + } +) diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go new file mode 100644 index 000000000..c8090e0c5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/order/range.go @@ -0,0 +1,115 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package order provides ordered access to messages and maps. +package order + +import ( + "sort" + "sync" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type messageField struct { + fd pref.FieldDescriptor + v pref.Value +} + +var messageFieldPool = sync.Pool{ + New: func() interface{} { return new([]messageField) }, +} + +type ( + // FieldRnger is an interface for visiting all fields in a message. + // The protoreflect.Message type implements this interface. + FieldRanger interface{ Range(VisitField) } + // VisitField is called everytime a message field is visited. + VisitField = func(pref.FieldDescriptor, pref.Value) bool +) + +// RangeFields iterates over the fields of fs according to the specified order. +func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) { + if less == nil { + fs.Range(fn) + return + } + + // Obtain a pre-allocated scratch buffer. + p := messageFieldPool.Get().(*[]messageField) + fields := (*p)[:0] + defer func() { + if cap(fields) < 1024 { + *p = fields + messageFieldPool.Put(p) + } + }() + + // Collect all fields in the message and sort them. + fs.Range(func(fd pref.FieldDescriptor, v pref.Value) bool { + fields = append(fields, messageField{fd, v}) + return true + }) + sort.Slice(fields, func(i, j int) bool { + return less(fields[i].fd, fields[j].fd) + }) + + // Visit the fields in the specified ordering. + for _, f := range fields { + if !fn(f.fd, f.v) { + return + } + } +} + +type mapEntry struct { + k pref.MapKey + v pref.Value +} + +var mapEntryPool = sync.Pool{ + New: func() interface{} { return new([]mapEntry) }, +} + +type ( + // EntryRanger is an interface for visiting all fields in a message. + // The protoreflect.Map type implements this interface. + EntryRanger interface{ Range(VisitEntry) } + // VisitEntry is called everytime a map entry is visited. + VisitEntry = func(pref.MapKey, pref.Value) bool +) + +// RangeEntries iterates over the entries of es according to the specified order. +func RangeEntries(es EntryRanger, less KeyOrder, fn VisitEntry) { + if less == nil { + es.Range(fn) + return + } + + // Obtain a pre-allocated scratch buffer. + p := mapEntryPool.Get().(*[]mapEntry) + entries := (*p)[:0] + defer func() { + if cap(entries) < 1024 { + *p = entries + mapEntryPool.Put(p) + } + }() + + // Collect all entries in the map and sort them. + es.Range(func(k pref.MapKey, v pref.Value) bool { + entries = append(entries, mapEntry{k, v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + return less(entries[i].k, entries[j].k) + }) + + // Visit the entries in the specified ordering. + for _, e := range entries { + if !fn(e.k, e.v) { + return + } + } +} diff --git a/vendor/google.golang.org/protobuf/internal/pragma/pragma.go b/vendor/google.golang.org/protobuf/internal/pragma/pragma.go new file mode 100644 index 000000000..49dc4fcd9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/pragma/pragma.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pragma provides types that can be embedded into a struct to +// statically enforce or prevent certain language properties. +package pragma + +import "sync" + +// NoUnkeyedLiterals can be embedded in a struct to prevent unkeyed literals. +type NoUnkeyedLiterals struct{} + +// DoNotImplement can be embedded in an interface to prevent trivial +// implementations of the interface. +// +// This is useful to prevent unauthorized implementations of an interface +// so that it can be extended in the future for any protobuf language changes. +type DoNotImplement interface{ ProtoInternal(DoNotImplement) } + +// DoNotCompare can be embedded in a struct to prevent comparability. +type DoNotCompare [0]func() + +// DoNotCopy can be embedded in a struct to help prevent shallow copies. +// This does not rely on a Go language feature, but rather a special case +// within the vet checker. +// +// See https://golang.org/issues/8005. +type DoNotCopy [0]sync.Mutex diff --git a/vendor/google.golang.org/protobuf/internal/set/ints.go b/vendor/google.golang.org/protobuf/internal/set/ints.go new file mode 100644 index 000000000..d3d7f89ab --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/set/ints.go @@ -0,0 +1,58 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package set provides simple set data structures for uint64s. +package set + +import "math/bits" + +// int64s represents a set of integers within the range of 0..63. +type int64s uint64 + +func (bs *int64s) Len() int { + return bits.OnesCount64(uint64(*bs)) +} +func (bs *int64s) Has(n uint64) bool { + return uint64(*bs)&(uint64(1)< 0 +} +func (bs *int64s) Set(n uint64) { + *(*uint64)(bs) |= uint64(1) << n +} +func (bs *int64s) Clear(n uint64) { + *(*uint64)(bs) &^= uint64(1) << n +} + +// Ints represents a set of integers within the range of 0..math.MaxUint64. +type Ints struct { + lo int64s + hi map[uint64]struct{} +} + +func (bs *Ints) Len() int { + return bs.lo.Len() + len(bs.hi) +} +func (bs *Ints) Has(n uint64) bool { + if n < 64 { + return bs.lo.Has(n) + } + _, ok := bs.hi[n] + return ok +} +func (bs *Ints) Set(n uint64) { + if n < 64 { + bs.lo.Set(n) + return + } + if bs.hi == nil { + bs.hi = make(map[uint64]struct{}) + } + bs.hi[n] = struct{}{} +} +func (bs *Ints) Clear(n uint64) { + if n < 64 { + bs.lo.Clear(n) + return + } + delete(bs.hi, n) +} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings.go b/vendor/google.golang.org/protobuf/internal/strs/strings.go new file mode 100644 index 000000000..0b74e7658 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings.go @@ -0,0 +1,196 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package strs provides string manipulation functionality specific to protobuf. +package strs + +import ( + "go/token" + "strings" + "unicode" + "unicode/utf8" + + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// EnforceUTF8 reports whether to enforce strict UTF-8 validation. +func EnforceUTF8(fd protoreflect.FieldDescriptor) bool { + if flags.ProtoLegacy { + if fd, ok := fd.(interface{ EnforceUTF8() bool }); ok { + return fd.EnforceUTF8() + } + } + return fd.Syntax() == protoreflect.Proto3 +} + +// GoCamelCase camel-cases a protobuf name for use as a Go identifier. +// +// If there is an interior underscore followed by a lower case letter, +// drop the underscore and convert the letter to upper case. +func GoCamelCase(s string) string { + // Invariant: if the next letter is lower case, it must be converted + // to upper case. + // That is, we process a word at a time, where words are marked by _ or + // upper case letter. Digits are treated as words. + var b []byte + for i := 0; i < len(s); i++ { + c := s[i] + switch { + case c == '.' && i+1 < len(s) && isASCIILower(s[i+1]): + // Skip over '.' in ".{{lowercase}}". + case c == '.': + b = append(b, '_') // convert '.' to '_' + case c == '_' && (i == 0 || s[i-1] == '.'): + // Convert initial '_' to ensure we start with a capital letter. + // Do the same for '_' after '.' to match historic behavior. + b = append(b, 'X') // convert '_' to 'X' + case c == '_' && i+1 < len(s) && isASCIILower(s[i+1]): + // Skip over '_' in "_{{lowercase}}". + case isASCIIDigit(c): + b = append(b, c) + default: + // Assume we have a letter now - if not, it's a bogus identifier. + // The next word is a sequence of characters that must start upper case. + if isASCIILower(c) { + c -= 'a' - 'A' // convert lowercase to uppercase + } + b = append(b, c) + + // Accept lower case sequence that follows. + for ; i+1 < len(s) && isASCIILower(s[i+1]); i++ { + b = append(b, s[i+1]) + } + } + } + return string(b) +} + +// GoSanitized converts a string to a valid Go identifier. +func GoSanitized(s string) string { + // Sanitize the input to the set of valid characters, + // which must be '_' or be in the Unicode L or N categories. + s = strings.Map(func(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + return '_' + }, s) + + // Prepend '_' in the event of a Go keyword conflict or if + // the identifier is invalid (does not start in the Unicode L category). + r, _ := utf8.DecodeRuneInString(s) + if token.Lookup(s).IsKeyword() || !unicode.IsLetter(r) { + return "_" + s + } + return s +} + +// JSONCamelCase converts a snake_case identifier to a camelCase identifier, +// according to the protobuf JSON specification. +func JSONCamelCase(s string) string { + var b []byte + var wasUnderscore bool + for i := 0; i < len(s); i++ { // proto identifiers are always ASCII + c := s[i] + if c != '_' { + if wasUnderscore && isASCIILower(c) { + c -= 'a' - 'A' // convert to uppercase + } + b = append(b, c) + } + wasUnderscore = c == '_' + } + return string(b) +} + +// JSONSnakeCase converts a camelCase identifier to a snake_case identifier, +// according to the protobuf JSON specification. +func JSONSnakeCase(s string) string { + var b []byte + for i := 0; i < len(s); i++ { // proto identifiers are always ASCII + c := s[i] + if isASCIIUpper(c) { + b = append(b, '_') + c += 'a' - 'A' // convert to lowercase + } + b = append(b, c) + } + return string(b) +} + +// MapEntryName derives the name of the map entry message given the field name. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:254-276,6057 +func MapEntryName(s string) string { + var b []byte + upperNext := true + for _, c := range s { + switch { + case c == '_': + upperNext = true + case upperNext: + b = append(b, byte(unicode.ToUpper(c))) + upperNext = false + default: + b = append(b, byte(c)) + } + } + b = append(b, "Entry"...) + return string(b) +} + +// EnumValueName derives the camel-cased enum value name. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:297-313 +func EnumValueName(s string) string { + var b []byte + upperNext := true + for _, c := range s { + switch { + case c == '_': + upperNext = true + case upperNext: + b = append(b, byte(unicode.ToUpper(c))) + upperNext = false + default: + b = append(b, byte(unicode.ToLower(c))) + upperNext = false + } + } + return string(b) +} + +// TrimEnumPrefix trims the enum name prefix from an enum value name, +// where the prefix is all lowercase without underscores. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:330-375 +func TrimEnumPrefix(s, prefix string) string { + s0 := s // original input + for len(s) > 0 && len(prefix) > 0 { + if s[0] == '_' { + s = s[1:] + continue + } + if unicode.ToLower(rune(s[0])) != rune(prefix[0]) { + return s0 // no prefix match + } + s, prefix = s[1:], prefix[1:] + } + if len(prefix) > 0 { + return s0 // no prefix match + } + s = strings.TrimLeft(s, "_") + if len(s) == 0 { + return s0 // avoid returning empty string + } + return s +} + +func isASCIILower(c byte) bool { + return 'a' <= c && c <= 'z' +} +func isASCIIUpper(c byte) bool { + return 'A' <= c && c <= 'Z' +} +func isASCIIDigit(c byte) bool { + return '0' <= c && c <= '9' +} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go new file mode 100644 index 000000000..a1f6f3338 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go @@ -0,0 +1,28 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build purego || appengine +// +build purego appengine + +package strs + +import pref "google.golang.org/protobuf/reflect/protoreflect" + +func UnsafeString(b []byte) string { + return string(b) +} + +func UnsafeBytes(s string) []byte { + return []byte(s) +} + +type Builder struct{} + +func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { + return prefix.Append(name) +} + +func (*Builder) MakeString(b []byte) string { + return string(b) +} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go new file mode 100644 index 000000000..56a8a4ed3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -0,0 +1,95 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine +// +build !purego,!appengine + +package strs + +import ( + "unsafe" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type ( + stringHeader struct { + Data unsafe.Pointer + Len int + } + sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int + } +) + +// UnsafeString returns an unsafe string reference of b. +// The caller must treat the input slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user +// unless the input slice is provably immutable. +func UnsafeString(b []byte) (s string) { + src := (*sliceHeader)(unsafe.Pointer(&b)) + dst := (*stringHeader)(unsafe.Pointer(&s)) + dst.Data = src.Data + dst.Len = src.Len + return s +} + +// UnsafeBytes returns an unsafe bytes slice reference of s. +// The caller must treat returned slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user. +func UnsafeBytes(s string) (b []byte) { + src := (*stringHeader)(unsafe.Pointer(&s)) + dst := (*sliceHeader)(unsafe.Pointer(&b)) + dst.Data = src.Data + dst.Len = src.Len + dst.Cap = src.Len + return b +} + +// Builder builds a set of strings with shared lifetime. +// This differs from strings.Builder, which is for building a single string. +type Builder struct { + buf []byte +} + +// AppendFullName is equivalent to protoreflect.FullName.Append, +// but optimized for large batches where each name has a shared lifetime. +func (sb *Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { + n := len(prefix) + len(".") + len(name) + if len(prefix) == 0 { + n -= len(".") + } + sb.grow(n) + sb.buf = append(sb.buf, prefix...) + sb.buf = append(sb.buf, '.') + sb.buf = append(sb.buf, name...) + return pref.FullName(sb.last(n)) +} + +// MakeString is equivalent to string(b), but optimized for large batches +// with a shared lifetime. +func (sb *Builder) MakeString(b []byte) string { + sb.grow(len(b)) + sb.buf = append(sb.buf, b...) + return sb.last(len(b)) +} + +func (sb *Builder) grow(n int) { + if cap(sb.buf)-len(sb.buf) >= n { + return + } + + // Unlike strings.Builder, we do not need to copy over the contents + // of the old buffer since our builder provides no API for + // retrieving previously created strings. + sb.buf = make([]byte, 2*(cap(sb.buf)+n)) +} + +func (sb *Builder) last(n int) string { + return UnsafeString(sb.buf[len(sb.buf)-n:]) +} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go new file mode 100644 index 000000000..3d40d5249 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -0,0 +1,79 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package version records versioning information about this module. +package version + +import ( + "fmt" + "strings" +) + +// These constants determine the current version of this module. +// +// +// For our release process, we enforce the following rules: +// * Tagged releases use a tag that is identical to String. +// * Tagged releases never reference a commit where the String +// contains "devel". +// * The set of all commits in this repository where String +// does not contain "devel" must have a unique String. +// +// +// Steps for tagging a new release: +// 1. Create a new CL. +// +// 2. Update Minor, Patch, and/or PreRelease as necessary. +// PreRelease must not contain the string "devel". +// +// 3. Since the last released minor version, have there been any changes to +// generator that relies on new functionality in the runtime? +// If yes, then increment RequiredGenerated. +// +// 4. Since the last released minor version, have there been any changes to +// the runtime that removes support for old .pb.go source code? +// If yes, then increment SupportMinimum. +// +// 5. Send out the CL for review and submit it. +// Note that the next CL in step 8 must be submitted after this CL +// without any other CLs in-between. +// +// 6. Tag a new version, where the tag is is the current String. +// +// 7. Write release notes for all notable changes +// between this release and the last release. +// +// 8. Create a new CL. +// +// 9. Update PreRelease to include the string "devel". +// For example: "" -> "devel" or "rc.1" -> "rc.1.devel" +// +// 10. Send out the CL for review and submit it. +const ( + Major = 1 + Minor = 28 + Patch = 0 + PreRelease = "" +) + +// String formats the version string for this module in semver format. +// +// Examples: +// v1.20.1 +// v1.21.0-rc.1 +func String() string { + v := fmt.Sprintf("v%d.%d.%d", Major, Minor, Patch) + if PreRelease != "" { + v += "-" + PreRelease + + // TODO: Add metadata about the commit or build hash. + // See https://golang.org/issue/29814 + // See https://golang.org/issue/33533 + var metadata string + if strings.Contains(PreRelease, "devel") && metadata != "" { + v += "+" + metadata + } + } + return v +} diff --git a/vendor/google.golang.org/protobuf/proto/checkinit.go b/vendor/google.golang.org/protobuf/proto/checkinit.go new file mode 100644 index 000000000..3e9a6a2f6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/checkinit.go @@ -0,0 +1,71 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// CheckInitialized returns an error if any required fields in m are not set. +func CheckInitialized(m Message) error { + // Treat a nil message interface as an "untyped" empty message, + // which we assume to have no required fields. + if m == nil { + return nil + } + + return checkInitialized(m.ProtoReflect()) +} + +// CheckInitialized returns an error if any required fields in m are not set. +func checkInitialized(m protoreflect.Message) error { + if methods := protoMethods(m); methods != nil && methods.CheckInitialized != nil { + _, err := methods.CheckInitialized(protoiface.CheckInitializedInput{ + Message: m, + }) + return err + } + return checkInitializedSlow(m) +} + +func checkInitializedSlow(m protoreflect.Message) error { + md := m.Descriptor() + fds := md.Fields() + for i, nums := 0, md.RequiredNumbers(); i < nums.Len(); i++ { + fd := fds.ByNumber(nums.Get(i)) + if !m.Has(fd) { + return errors.RequiredNotSet(string(fd.FullName())) + } + } + var err error + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + case fd.IsList(): + if fd.Message() == nil { + return true + } + for i, list := 0, v.List(); i < list.Len() && err == nil; i++ { + err = checkInitialized(list.Get(i).Message()) + } + case fd.IsMap(): + if fd.MapValue().Message() == nil { + return true + } + v.Map().Range(func(key protoreflect.MapKey, v protoreflect.Value) bool { + err = checkInitialized(v.Message()) + return err == nil + }) + default: + if fd.Message() == nil { + return true + } + err = checkInitialized(v.Message()) + } + return err == nil + }) + return err +} diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go new file mode 100644 index 000000000..11bf7173b --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -0,0 +1,293 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" +) + +// UnmarshalOptions configures the unmarshaler. +// +// Example usage: +// err := UnmarshalOptions{DiscardUnknown: true}.Unmarshal(b, m) +type UnmarshalOptions struct { + pragma.NoUnkeyedLiterals + + // Merge merges the input into the destination message. + // The default behavior is to always reset the message before unmarshaling, + // unless Merge is specified. + Merge bool + + // AllowPartial accepts input for messages that will result in missing + // required fields. If AllowPartial is false (the default), Unmarshal will + // return an error if there are any missing required fields. + AllowPartial bool + + // If DiscardUnknown is set, unknown fields are ignored. + DiscardUnknown bool + + // Resolver is used for looking up types when unmarshaling extension fields. + // If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) + FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) + } + + // RecursionLimit limits how deeply messages may be nested. + // If zero, a default limit is applied. + RecursionLimit int +} + +// Unmarshal parses the wire-format message in b and places the result in m. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func Unmarshal(b []byte, m Message) error { + _, err := UnmarshalOptions{RecursionLimit: protowire.DefaultRecursionLimit}.unmarshal(b, m.ProtoReflect()) + return err +} + +// Unmarshal parses the wire-format message in b and places the result in m. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } + _, err := o.unmarshal(b, m.ProtoReflect()) + return err +} + +// UnmarshalState parses a wire-format message and places the result in m. +// +// This method permits fine-grained control over the unmarshaler. +// Most users should use Unmarshal instead. +func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } + return o.unmarshal(in.Buf, in.Message) +} + +// unmarshal is a centralized function that all unmarshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for unmarshal that do not go through this. +func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out protoiface.UnmarshalOutput, err error) { + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + if !o.Merge { + Reset(m.Interface()) + } + allowPartial := o.AllowPartial + o.Merge = true + o.AllowPartial = true + methods := protoMethods(m) + if methods != nil && methods.Unmarshal != nil && + !(o.DiscardUnknown && methods.Flags&protoiface.SupportUnmarshalDiscardUnknown == 0) { + in := protoiface.UnmarshalInput{ + Message: m, + Buf: b, + Resolver: o.Resolver, + Depth: o.RecursionLimit, + } + if o.DiscardUnknown { + in.Flags |= protoiface.UnmarshalDiscardUnknown + } + out, err = methods.Unmarshal(in) + } else { + o.RecursionLimit-- + if o.RecursionLimit < 0 { + return out, errors.New("exceeded max recursion depth") + } + err = o.unmarshalMessageSlow(b, m) + } + if err != nil { + return out, err + } + if allowPartial || (out.Flags&protoiface.UnmarshalInitialized != 0) { + return out, nil + } + return out, checkInitialized(m) +} + +func (o UnmarshalOptions) unmarshalMessage(b []byte, m protoreflect.Message) error { + _, err := o.unmarshal(b, m) + return err +} + +func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) error { + md := m.Descriptor() + if messageset.IsMessageSet(md) { + return o.unmarshalMessageSet(b, m) + } + fields := md.Fields() + for len(b) > 0 { + // Parse the tag (field number and wire type). + num, wtyp, tagLen := protowire.ConsumeTag(b) + if tagLen < 0 { + return errDecode + } + if num > protowire.MaxValidNumber { + return errDecode + } + + // Find the field descriptor for this field number. + fd := fields.ByNumber(num) + if fd == nil && md.ExtensionRanges().Has(num) { + extType, err := o.Resolver.FindExtensionByNumber(md.FullName(), num) + if err != nil && err != protoregistry.NotFound { + return errors.New("%v: unable to resolve extension %v: %v", md.FullName(), num, err) + } + if extType != nil { + fd = extType.TypeDescriptor() + } + } + var err error + if fd == nil { + err = errUnknown + } else if flags.ProtoLegacy { + if fd.IsWeak() && fd.Message().IsPlaceholder() { + err = errUnknown // weak referent is not linked in + } + } + + // Parse the field value. + var valLen int + switch { + case err != nil: + case fd.IsList(): + valLen, err = o.unmarshalList(b[tagLen:], wtyp, m.Mutable(fd).List(), fd) + case fd.IsMap(): + valLen, err = o.unmarshalMap(b[tagLen:], wtyp, m.Mutable(fd).Map(), fd) + default: + valLen, err = o.unmarshalSingular(b[tagLen:], wtyp, m, fd) + } + if err != nil { + if err != errUnknown { + return err + } + valLen = protowire.ConsumeFieldValue(num, wtyp, b[tagLen:]) + if valLen < 0 { + return errDecode + } + if !o.DiscardUnknown { + m.SetUnknown(append(m.GetUnknown(), b[:tagLen+valLen]...)) + } + } + b = b[tagLen+valLen:] + } + return nil +} + +func (o UnmarshalOptions) unmarshalSingular(b []byte, wtyp protowire.Type, m protoreflect.Message, fd protoreflect.FieldDescriptor) (n int, err error) { + v, n, err := o.unmarshalScalar(b, wtyp, fd) + if err != nil { + return 0, err + } + switch fd.Kind() { + case protoreflect.GroupKind, protoreflect.MessageKind: + m2 := m.Mutable(fd).Message() + if err := o.unmarshalMessage(v.Bytes(), m2); err != nil { + return n, err + } + default: + // Non-message scalars replace the previous value. + m.Set(fd, v) + } + return n, nil +} + +func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv protoreflect.Map, fd protoreflect.FieldDescriptor) (n int, err error) { + if wtyp != protowire.BytesType { + return 0, errUnknown + } + b, n = protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + var ( + keyField = fd.MapKey() + valField = fd.MapValue() + key protoreflect.Value + val protoreflect.Value + haveKey bool + haveVal bool + ) + switch valField.Kind() { + case protoreflect.GroupKind, protoreflect.MessageKind: + val = mapv.NewValue() + } + // Map entries are represented as a two-element message with fields + // containing the key and value. + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return 0, errDecode + } + if num > protowire.MaxValidNumber { + return 0, errDecode + } + b = b[n:] + err = errUnknown + switch num { + case genid.MapEntry_Key_field_number: + key, n, err = o.unmarshalScalar(b, wtyp, keyField) + if err != nil { + break + } + haveKey = true + case genid.MapEntry_Value_field_number: + var v protoreflect.Value + v, n, err = o.unmarshalScalar(b, wtyp, valField) + if err != nil { + break + } + switch valField.Kind() { + case protoreflect.GroupKind, protoreflect.MessageKind: + if err := o.unmarshalMessage(v.Bytes(), val.Message()); err != nil { + return 0, err + } + default: + val = v + } + haveVal = true + } + if err == errUnknown { + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return 0, errDecode + } + } else if err != nil { + return 0, err + } + b = b[n:] + } + // Every map entry should have entries for key and value, but this is not strictly required. + if !haveKey { + key = keyField.Default() + } + if !haveVal { + switch valField.Kind() { + case protoreflect.GroupKind, protoreflect.MessageKind: + default: + val = valField.Default() + } + } + mapv.Set(key.MapKey(), val) + return n, nil +} + +// errUnknown is used internally to indicate fields which should be added +// to the unknown field set of a message. It is never returned from an exported +// function. +var errUnknown = errors.New("BUG: internal error (unknown)") + +var errDecode = errors.New("cannot parse invalid wire-format data") diff --git a/vendor/google.golang.org/protobuf/proto/decode_gen.go b/vendor/google.golang.org/protobuf/proto/decode_gen.go new file mode 100644 index 000000000..301eeb20f --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/decode_gen.go @@ -0,0 +1,603 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package proto + +import ( + "math" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// unmarshalScalar decodes a value of the given kind. +// +// Message values are decoded into a []byte which aliases the input data. +func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd protoreflect.FieldDescriptor) (val protoreflect.Value, n int, err error) { + switch fd.Kind() { + case protoreflect.BoolKind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfBool(protowire.DecodeBool(v)), n, nil + case protoreflect.EnumKind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), n, nil + case protoreflect.Int32Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfInt32(int32(v)), n, nil + case protoreflect.Sint32Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), n, nil + case protoreflect.Uint32Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfUint32(uint32(v)), n, nil + case protoreflect.Int64Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfInt64(int64(v)), n, nil + case protoreflect.Sint64Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), n, nil + case protoreflect.Uint64Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfUint64(v), n, nil + case protoreflect.Sfixed32Kind: + if wtyp != protowire.Fixed32Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfInt32(int32(v)), n, nil + case protoreflect.Fixed32Kind: + if wtyp != protowire.Fixed32Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfUint32(uint32(v)), n, nil + case protoreflect.FloatKind: + if wtyp != protowire.Fixed32Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), n, nil + case protoreflect.Sfixed64Kind: + if wtyp != protowire.Fixed64Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfInt64(int64(v)), n, nil + case protoreflect.Fixed64Kind: + if wtyp != protowire.Fixed64Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfUint64(v), n, nil + case protoreflect.DoubleKind: + if wtyp != protowire.Fixed64Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfFloat64(math.Float64frombits(v)), n, nil + case protoreflect.StringKind: + if wtyp != protowire.BytesType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return val, 0, errDecode + } + if strs.EnforceUTF8(fd) && !utf8.Valid(v) { + return protoreflect.Value{}, 0, errors.InvalidUTF8(string(fd.FullName())) + } + return protoreflect.ValueOfString(string(v)), n, nil + case protoreflect.BytesKind: + if wtyp != protowire.BytesType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), n, nil + case protoreflect.MessageKind: + if wtyp != protowire.BytesType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfBytes(v), n, nil + case protoreflect.GroupKind: + if wtyp != protowire.StartGroupType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeGroup(fd.Number(), b) + if n < 0 { + return val, 0, errDecode + } + return protoreflect.ValueOfBytes(v), n, nil + default: + return val, 0, errUnknown + } +} + +func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list protoreflect.List, fd protoreflect.FieldDescriptor) (n int, err error) { + switch fd.Kind() { + case protoreflect.BoolKind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) + return n, nil + case protoreflect.EnumKind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) + return n, nil + case protoreflect.Int32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt32(int32(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + return n, nil + case protoreflect.Sint32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) + return n, nil + case protoreflect.Uint32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfUint32(uint32(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + return n, nil + case protoreflect.Int64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt64(int64(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + return n, nil + case protoreflect.Sint64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) + return n, nil + case protoreflect.Uint64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfUint64(v)) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfUint64(v)) + return n, nil + case protoreflect.Sfixed32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed32(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt32(int32(v))) + } + return n, nil + } + if wtyp != protowire.Fixed32Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + return n, nil + case protoreflect.Fixed32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed32(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfUint32(uint32(v))) + } + return n, nil + } + if wtyp != protowire.Fixed32Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + return n, nil + case protoreflect.FloatKind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed32(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) + } + return n, nil + } + if wtyp != protowire.Fixed32Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) + return n, nil + case protoreflect.Sfixed64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed64(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt64(int64(v))) + } + return n, nil + } + if wtyp != protowire.Fixed64Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + return n, nil + case protoreflect.Fixed64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed64(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfUint64(v)) + } + return n, nil + } + if wtyp != protowire.Fixed64Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfUint64(v)) + return n, nil + case protoreflect.DoubleKind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed64(buf) + if n < 0 { + return 0, errDecode + } + buf = buf[n:] + list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) + } + return n, nil + } + if wtyp != protowire.Fixed64Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) + return n, nil + case protoreflect.StringKind: + if wtyp != protowire.BytesType { + return 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + if strs.EnforceUTF8(fd) && !utf8.Valid(v) { + return 0, errors.InvalidUTF8(string(fd.FullName())) + } + list.Append(protoreflect.ValueOfString(string(v))) + return n, nil + case protoreflect.BytesKind: + if wtyp != protowire.BytesType { + return 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...))) + return n, nil + case protoreflect.MessageKind: + if wtyp != protowire.BytesType { + return 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, errDecode + } + m := list.NewElement() + if err := o.unmarshalMessage(v, m.Message()); err != nil { + return 0, err + } + list.Append(m) + return n, nil + case protoreflect.GroupKind: + if wtyp != protowire.StartGroupType { + return 0, errUnknown + } + v, n := protowire.ConsumeGroup(fd.Number(), b) + if n < 0 { + return 0, errDecode + } + m := list.NewElement() + if err := o.unmarshalMessage(v, m.Message()); err != nil { + return 0, err + } + list.Append(m) + return n, nil + default: + return 0, errUnknown + } +} + +// We append to an empty array rather than a nil []byte to get non-nil zero-length byte slices. +var emptyBuf [0]byte diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go new file mode 100644 index 000000000..c52d8c4ab --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/doc.go @@ -0,0 +1,94 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proto provides functions operating on protocol buffer messages. +// +// For documentation on protocol buffers in general, see: +// +// https://developers.google.com/protocol-buffers +// +// For a tutorial on using protocol buffers with Go, see: +// +// https://developers.google.com/protocol-buffers/docs/gotutorial +// +// For a guide to generated Go protocol buffer code, see: +// +// https://developers.google.com/protocol-buffers/docs/reference/go-generated +// +// +// Binary serialization +// +// This package contains functions to convert to and from the wire format, +// an efficient binary serialization of protocol buffers. +// +// • Size reports the size of a message in the wire format. +// +// • Marshal converts a message to the wire format. +// The MarshalOptions type provides more control over wire marshaling. +// +// • Unmarshal converts a message from the wire format. +// The UnmarshalOptions type provides more control over wire unmarshaling. +// +// +// Basic message operations +// +// • Clone makes a deep copy of a message. +// +// • Merge merges the content of a message into another. +// +// • Equal compares two messages. For more control over comparisons +// and detailed reporting of differences, see package +// "google.golang.org/protobuf/testing/protocmp". +// +// • Reset clears the content of a message. +// +// • CheckInitialized reports whether all required fields in a message are set. +// +// +// Optional scalar constructors +// +// The API for some generated messages represents optional scalar fields +// as pointers to a value. For example, an optional string field has the +// Go type *string. +// +// • Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, and String +// take a value and return a pointer to a new instance of it, +// to simplify construction of optional field values. +// +// Generated enum types usually have an Enum method which performs the +// same operation. +// +// Optional scalar fields are only supported in proto2. +// +// +// Extension accessors +// +// • HasExtension, GetExtension, SetExtension, and ClearExtension +// access extension field values in a protocol buffer message. +// +// Extension fields are only supported in proto2. +// +// +// Related packages +// +// • Package "google.golang.org/protobuf/encoding/protojson" converts messages to +// and from JSON. +// +// • Package "google.golang.org/protobuf/encoding/prototext" converts messages to +// and from the text format. +// +// • Package "google.golang.org/protobuf/reflect/protoreflect" provides a +// reflection interface for protocol buffer data types. +// +// • Package "google.golang.org/protobuf/testing/protocmp" provides features +// to compare protocol buffer messages with the "github.com/google/go-cmp/cmp" +// package. +// +// • Package "google.golang.org/protobuf/types/dynamicpb" provides a dynamic +// message type, suitable for working with messages where the protocol buffer +// type is only known at runtime. +// +// This module contains additional packages for more specialized use cases. +// Consult the individual package documentation for details. +package proto diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go new file mode 100644 index 000000000..d18239c23 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -0,0 +1,319 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// MarshalOptions configures the marshaler. +// +// Example usage: +// b, err := MarshalOptions{Deterministic: true}.Marshal(m) +type MarshalOptions struct { + pragma.NoUnkeyedLiterals + + // AllowPartial allows messages that have missing required fields to marshal + // without returning an error. If AllowPartial is false (the default), + // Marshal will return an error if there are any missing required fields. + AllowPartial bool + + // Deterministic controls whether the same message will always be + // serialized to the same bytes within the same binary. + // + // Setting this option guarantees that repeated serialization of + // the same message will return the same bytes, and that different + // processes of the same binary (which may be executing on different + // machines) will serialize equal messages to the same bytes. + // It has no effect on the resulting size of the encoded message compared + // to a non-deterministic marshal. + // + // Note that the deterministic serialization is NOT canonical across + // languages. It is not guaranteed to remain stable over time. It is + // unstable across different builds with schema changes due to unknown + // fields. Users who need canonical serialization (e.g., persistent + // storage in a canonical form, fingerprinting, etc.) must define + // their own canonicalization specification and implement their own + // serializer rather than relying on this API. + // + // If deterministic serialization is requested, map entries will be + // sorted by keys in lexographical order. This is an implementation + // detail and subject to change. + Deterministic bool + + // UseCachedSize indicates that the result of a previous Size call + // may be reused. + // + // Setting this option asserts that: + // + // 1. Size has previously been called on this message with identical + // options (except for UseCachedSize itself). + // + // 2. The message and all its submessages have not changed in any + // way since the Size call. + // + // If either of these invariants is violated, + // the results are undefined and may include panics or corrupted output. + // + // Implementations MAY take this option into account to provide + // better performance, but there is no guarantee that they will do so. + // There is absolutely no guarantee that Size followed by Marshal with + // UseCachedSize set will perform equivalently to Marshal alone. + UseCachedSize bool +} + +// Marshal returns the wire-format encoding of m. +func Marshal(m Message) ([]byte, error) { + // Treat nil message interface as an empty message; nothing to output. + if m == nil { + return nil, nil + } + + out, err := MarshalOptions{}.marshal(nil, m.ProtoReflect()) + if len(out.Buf) == 0 && err == nil { + out.Buf = emptyBytesForMessage(m) + } + return out.Buf, err +} + +// Marshal returns the wire-format encoding of m. +func (o MarshalOptions) Marshal(m Message) ([]byte, error) { + // Treat nil message interface as an empty message; nothing to output. + if m == nil { + return nil, nil + } + + out, err := o.marshal(nil, m.ProtoReflect()) + if len(out.Buf) == 0 && err == nil { + out.Buf = emptyBytesForMessage(m) + } + return out.Buf, err +} + +// emptyBytesForMessage returns a nil buffer if and only if m is invalid, +// otherwise it returns a non-nil empty buffer. +// +// This is to assist the edge-case where user-code does the following: +// m1.OptionalBytes, _ = proto.Marshal(m2) +// where they expect the proto2 "optional_bytes" field to be populated +// if any only if m2 is a valid message. +func emptyBytesForMessage(m Message) []byte { + if m == nil || !m.ProtoReflect().IsValid() { + return nil + } + return emptyBuf[:] +} + +// MarshalAppend appends the wire-format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { + // Treat nil message interface as an empty message; nothing to append. + if m == nil { + return b, nil + } + + out, err := o.marshal(b, m.ProtoReflect()) + return out.Buf, err +} + +// MarshalState returns the wire-format encoding of a message. +// +// This method permits fine-grained control over the marshaler. +// Most users should use Marshal instead. +func (o MarshalOptions) MarshalState(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + return o.marshal(in.Buf, in.Message) +} + +// marshal is a centralized function that all marshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for marshal that do not go through this. +func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoiface.MarshalOutput, err error) { + allowPartial := o.AllowPartial + o.AllowPartial = true + if methods := protoMethods(m); methods != nil && methods.Marshal != nil && + !(o.Deterministic && methods.Flags&protoiface.SupportMarshalDeterministic == 0) { + in := protoiface.MarshalInput{ + Message: m, + Buf: b, + } + if o.Deterministic { + in.Flags |= protoiface.MarshalDeterministic + } + if o.UseCachedSize { + in.Flags |= protoiface.MarshalUseCachedSize + } + if methods.Size != nil { + sout := methods.Size(protoiface.SizeInput{ + Message: m, + Flags: in.Flags, + }) + if cap(b) < len(b)+sout.Size { + in.Buf = make([]byte, len(b), growcap(cap(b), len(b)+sout.Size)) + copy(in.Buf, b) + } + in.Flags |= protoiface.MarshalUseCachedSize + } + out, err = methods.Marshal(in) + } else { + out.Buf, err = o.marshalMessageSlow(b, m) + } + if err != nil { + return out, err + } + if allowPartial { + return out, nil + } + return out, checkInitialized(m) +} + +func (o MarshalOptions) marshalMessage(b []byte, m protoreflect.Message) ([]byte, error) { + out, err := o.marshal(b, m) + return out.Buf, err +} + +// growcap scales up the capacity of a slice. +// +// Given a slice with a current capacity of oldcap and a desired +// capacity of wantcap, growcap returns a new capacity >= wantcap. +// +// The algorithm is mostly identical to the one used by append as of Go 1.14. +func growcap(oldcap, wantcap int) (newcap int) { + if wantcap > oldcap*2 { + newcap = wantcap + } else if oldcap < 1024 { + // The Go 1.14 runtime takes this case when len(s) < 1024, + // not when cap(s) < 1024. The difference doesn't seem + // significant here. + newcap = oldcap * 2 + } else { + newcap = oldcap + for 0 < newcap && newcap < wantcap { + newcap += newcap / 4 + } + if newcap <= 0 { + newcap = wantcap + } + } + return newcap +} + +func (o MarshalOptions) marshalMessageSlow(b []byte, m protoreflect.Message) ([]byte, error) { + if messageset.IsMessageSet(m.Descriptor()) { + return o.marshalMessageSet(b, m) + } + fieldOrder := order.AnyFieldOrder + if o.Deterministic { + // TODO: This should use a more natural ordering like NumberFieldOrder, + // but doing so breaks golden tests that make invalid assumption about + // output stability of this implementation. + fieldOrder = order.LegacyFieldOrder + } + var err error + order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + b, err = o.marshalField(b, fd, v) + return err == nil + }) + if err != nil { + return b, err + } + b = append(b, m.GetUnknown()...) + return b, nil +} + +func (o MarshalOptions) marshalField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { + switch { + case fd.IsList(): + return o.marshalList(b, fd, value.List()) + case fd.IsMap(): + return o.marshalMap(b, fd, value.Map()) + default: + b = protowire.AppendTag(b, fd.Number(), wireTypes[fd.Kind()]) + return o.marshalSingular(b, fd, value) + } +} + +func (o MarshalOptions) marshalList(b []byte, fd protoreflect.FieldDescriptor, list protoreflect.List) ([]byte, error) { + if fd.IsPacked() && list.Len() > 0 { + b = protowire.AppendTag(b, fd.Number(), protowire.BytesType) + b, pos := appendSpeculativeLength(b) + for i, llen := 0, list.Len(); i < llen; i++ { + var err error + b, err = o.marshalSingular(b, fd, list.Get(i)) + if err != nil { + return b, err + } + } + b = finishSpeculativeLength(b, pos) + return b, nil + } + + kind := fd.Kind() + for i, llen := 0, list.Len(); i < llen; i++ { + var err error + b = protowire.AppendTag(b, fd.Number(), wireTypes[kind]) + b, err = o.marshalSingular(b, fd, list.Get(i)) + if err != nil { + return b, err + } + } + return b, nil +} + +func (o MarshalOptions) marshalMap(b []byte, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) ([]byte, error) { + keyf := fd.MapKey() + valf := fd.MapValue() + keyOrder := order.AnyKeyOrder + if o.Deterministic { + keyOrder = order.GenericKeyOrder + } + var err error + order.RangeEntries(mapv, keyOrder, func(key protoreflect.MapKey, value protoreflect.Value) bool { + b = protowire.AppendTag(b, fd.Number(), protowire.BytesType) + var pos int + b, pos = appendSpeculativeLength(b) + + b, err = o.marshalField(b, keyf, key.Value()) + if err != nil { + return false + } + b, err = o.marshalField(b, valf, value) + if err != nil { + return false + } + b = finishSpeculativeLength(b, pos) + return true + }) + return b, err +} + +// When encoding length-prefixed fields, we speculatively set aside some number of bytes +// for the length, encode the data, and then encode the length (shifting the data if necessary +// to make room). +const speculativeLength = 1 + +func appendSpeculativeLength(b []byte) ([]byte, int) { + pos := len(b) + b = append(b, "\x00\x00\x00\x00"[:speculativeLength]...) + return b, pos +} + +func finishSpeculativeLength(b []byte, pos int) []byte { + mlen := len(b) - pos - speculativeLength + msiz := protowire.SizeVarint(uint64(mlen)) + if msiz != speculativeLength { + for i := 0; i < msiz-speculativeLength; i++ { + b = append(b, 0) + } + copy(b[pos+msiz:], b[pos+speculativeLength:]) + b = b[:pos+msiz+mlen] + } + protowire.AppendVarint(b[:pos], uint64(mlen)) + return b +} diff --git a/vendor/google.golang.org/protobuf/proto/encode_gen.go b/vendor/google.golang.org/protobuf/proto/encode_gen.go new file mode 100644 index 000000000..185dacfb4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/encode_gen.go @@ -0,0 +1,97 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package proto + +import ( + "math" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" +) + +var wireTypes = map[protoreflect.Kind]protowire.Type{ + protoreflect.BoolKind: protowire.VarintType, + protoreflect.EnumKind: protowire.VarintType, + protoreflect.Int32Kind: protowire.VarintType, + protoreflect.Sint32Kind: protowire.VarintType, + protoreflect.Uint32Kind: protowire.VarintType, + protoreflect.Int64Kind: protowire.VarintType, + protoreflect.Sint64Kind: protowire.VarintType, + protoreflect.Uint64Kind: protowire.VarintType, + protoreflect.Sfixed32Kind: protowire.Fixed32Type, + protoreflect.Fixed32Kind: protowire.Fixed32Type, + protoreflect.FloatKind: protowire.Fixed32Type, + protoreflect.Sfixed64Kind: protowire.Fixed64Type, + protoreflect.Fixed64Kind: protowire.Fixed64Type, + protoreflect.DoubleKind: protowire.Fixed64Type, + protoreflect.StringKind: protowire.BytesType, + protoreflect.BytesKind: protowire.BytesType, + protoreflect.MessageKind: protowire.BytesType, + protoreflect.GroupKind: protowire.StartGroupType, +} + +func (o MarshalOptions) marshalSingular(b []byte, fd protoreflect.FieldDescriptor, v protoreflect.Value) ([]byte, error) { + switch fd.Kind() { + case protoreflect.BoolKind: + b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) + case protoreflect.EnumKind: + b = protowire.AppendVarint(b, uint64(v.Enum())) + case protoreflect.Int32Kind: + b = protowire.AppendVarint(b, uint64(int32(v.Int()))) + case protoreflect.Sint32Kind: + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) + case protoreflect.Uint32Kind: + b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) + case protoreflect.Int64Kind: + b = protowire.AppendVarint(b, uint64(v.Int())) + case protoreflect.Sint64Kind: + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) + case protoreflect.Uint64Kind: + b = protowire.AppendVarint(b, v.Uint()) + case protoreflect.Sfixed32Kind: + b = protowire.AppendFixed32(b, uint32(v.Int())) + case protoreflect.Fixed32Kind: + b = protowire.AppendFixed32(b, uint32(v.Uint())) + case protoreflect.FloatKind: + b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) + case protoreflect.Sfixed64Kind: + b = protowire.AppendFixed64(b, uint64(v.Int())) + case protoreflect.Fixed64Kind: + b = protowire.AppendFixed64(b, v.Uint()) + case protoreflect.DoubleKind: + b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) + case protoreflect.StringKind: + if strs.EnforceUTF8(fd) && !utf8.ValidString(v.String()) { + return b, errors.InvalidUTF8(string(fd.FullName())) + } + b = protowire.AppendString(b, v.String()) + case protoreflect.BytesKind: + b = protowire.AppendBytes(b, v.Bytes()) + case protoreflect.MessageKind: + var pos int + var err error + b, pos = appendSpeculativeLength(b) + b, err = o.marshalMessage(b, v.Message()) + if err != nil { + return b, err + } + b = finishSpeculativeLength(b, pos) + case protoreflect.GroupKind: + var err error + b, err = o.marshalMessage(b, v.Message()) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, protowire.EncodeTag(fd.Number(), protowire.EndGroupType)) + default: + return b, errors.New("invalid kind %v", fd.Kind()) + } + return b, nil +} diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go new file mode 100644 index 000000000..4dba2b969 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -0,0 +1,167 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "math" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// Equal reports whether two messages are equal. +// If two messages marshal to the same bytes under deterministic serialization, +// then Equal is guaranteed to report true. +// +// Two messages are equal if they belong to the same message descriptor, +// have the same set of populated known and extension field values, +// and the same set of unknown fields values. If either of the top-level +// messages are invalid, then Equal reports true only if both are invalid. +// +// Scalar values are compared with the equivalent of the == operator in Go, +// except bytes values which are compared using bytes.Equal and +// floating point values which specially treat NaNs as equal. +// Message values are compared by recursively calling Equal. +// Lists are equal if each element value is also equal. +// Maps are equal if they have the same set of keys, where the pair of values +// for each key is also equal. +func Equal(x, y Message) bool { + if x == nil || y == nil { + return x == nil && y == nil + } + mx := x.ProtoReflect() + my := y.ProtoReflect() + if mx.IsValid() != my.IsValid() { + return false + } + return equalMessage(mx, my) +} + +// equalMessage compares two messages. +func equalMessage(mx, my pref.Message) bool { + if mx.Descriptor() != my.Descriptor() { + return false + } + + nx := 0 + equal := true + mx.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool { + nx++ + vy := my.Get(fd) + equal = my.Has(fd) && equalField(fd, vx, vy) + return equal + }) + if !equal { + return false + } + ny := 0 + my.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool { + ny++ + return true + }) + if nx != ny { + return false + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +// equalField compares two fields. +func equalField(fd pref.FieldDescriptor, x, y pref.Value) bool { + switch { + case fd.IsList(): + return equalList(fd, x.List(), y.List()) + case fd.IsMap(): + return equalMap(fd, x.Map(), y.Map()) + default: + return equalValue(fd, x, y) + } +} + +// equalMap compares two maps. +func equalMap(fd pref.FieldDescriptor, x, y pref.Map) bool { + if x.Len() != y.Len() { + return false + } + equal := true + x.Range(func(k pref.MapKey, vx pref.Value) bool { + vy := y.Get(k) + equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy) + return equal + }) + return equal +} + +// equalList compares two lists. +func equalList(fd pref.FieldDescriptor, x, y pref.List) bool { + if x.Len() != y.Len() { + return false + } + for i := x.Len() - 1; i >= 0; i-- { + if !equalValue(fd, x.Get(i), y.Get(i)) { + return false + } + } + return true +} + +// equalValue compares two singular values. +func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool { + switch fd.Kind() { + case pref.BoolKind: + return x.Bool() == y.Bool() + case pref.EnumKind: + return x.Enum() == y.Enum() + case pref.Int32Kind, pref.Sint32Kind, + pref.Int64Kind, pref.Sint64Kind, + pref.Sfixed32Kind, pref.Sfixed64Kind: + return x.Int() == y.Int() + case pref.Uint32Kind, pref.Uint64Kind, + pref.Fixed32Kind, pref.Fixed64Kind: + return x.Uint() == y.Uint() + case pref.FloatKind, pref.DoubleKind: + fx := x.Float() + fy := y.Float() + if math.IsNaN(fx) || math.IsNaN(fy) { + return math.IsNaN(fx) && math.IsNaN(fy) + } + return fx == fy + case pref.StringKind: + return x.String() == y.String() + case pref.BytesKind: + return bytes.Equal(x.Bytes(), y.Bytes()) + case pref.MessageKind, pref.GroupKind: + return equalMessage(x.Message(), y.Message()) + default: + return x.Interface() == y.Interface() + } +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +func equalUnknown(x, y pref.RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[pref.FieldNumber]pref.RawFields) + my := make(map[pref.FieldNumber]pref.RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + return reflect.DeepEqual(mx, my) +} diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go new file mode 100644 index 000000000..5f293cda8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -0,0 +1,92 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// HasExtension reports whether an extension field is populated. +// It returns false if m is invalid or if xt does not extend m. +func HasExtension(m Message, xt protoreflect.ExtensionType) bool { + // Treat nil message interface as an empty message; no populated fields. + if m == nil { + return false + } + + // As a special-case, we reports invalid or mismatching descriptors + // as always not being populated (since they aren't). + if xt == nil || m.ProtoReflect().Descriptor() != xt.TypeDescriptor().ContainingMessage() { + return false + } + + return m.ProtoReflect().Has(xt.TypeDescriptor()) +} + +// ClearExtension clears an extension field such that subsequent +// HasExtension calls return false. +// It panics if m is invalid or if xt does not extend m. +func ClearExtension(m Message, xt protoreflect.ExtensionType) { + m.ProtoReflect().Clear(xt.TypeDescriptor()) +} + +// GetExtension retrieves the value for an extension field. +// If the field is unpopulated, it returns the default value for +// scalars and an immutable, empty value for lists or messages. +// It panics if xt does not extend m. +func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { + // Treat nil message interface as an empty message; return the default. + if m == nil { + return xt.InterfaceOf(xt.Zero()) + } + + return xt.InterfaceOf(m.ProtoReflect().Get(xt.TypeDescriptor())) +} + +// SetExtension stores the value of an extension field. +// It panics if m is invalid, xt does not extend m, or if type of v +// is invalid for the specified extension field. +func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { + xd := xt.TypeDescriptor() + pv := xt.ValueOf(v) + + // Specially treat an invalid list, map, or message as clear. + isValid := true + switch { + case xd.IsList(): + isValid = pv.List().IsValid() + case xd.IsMap(): + isValid = pv.Map().IsValid() + case xd.Message() != nil: + isValid = pv.Message().IsValid() + } + if !isValid { + m.ProtoReflect().Clear(xd) + return + } + + m.ProtoReflect().Set(xd, pv) +} + +// RangeExtensions iterates over every populated extension field in m in an +// undefined order, calling f for each extension type and value encountered. +// It returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current extension field. +func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) { + // Treat nil message interface as an empty message; nothing to range over. + if m == nil { + return + } + + m.ProtoReflect().Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + xt := fd.(protoreflect.ExtensionTypeDescriptor).Type() + vi := xt.InterfaceOf(v) + return f(xt, vi) + } + return true + }) +} diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go new file mode 100644 index 000000000..d761ab331 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/merge.go @@ -0,0 +1,139 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "fmt" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// Merge merges src into dst, which must be a message with the same descriptor. +// +// Populated scalar fields in src are copied to dst, while populated +// singular messages in src are merged into dst by recursively calling Merge. +// The elements of every list field in src is appended to the corresponded +// list fields in dst. The entries of every map field in src is copied into +// the corresponding map field in dst, possibly replacing existing entries. +// The unknown fields of src are appended to the unknown fields of dst. +// +// It is semantically equivalent to unmarshaling the encoded form of src +// into dst with the UnmarshalOptions.Merge option specified. +func Merge(dst, src Message) { + // TODO: Should nil src be treated as semantically equivalent to a + // untyped, read-only, empty message? What about a nil dst? + + dstMsg, srcMsg := dst.ProtoReflect(), src.ProtoReflect() + if dstMsg.Descriptor() != srcMsg.Descriptor() { + if got, want := dstMsg.Descriptor().FullName(), srcMsg.Descriptor().FullName(); got != want { + panic(fmt.Sprintf("descriptor mismatch: %v != %v", got, want)) + } + panic("descriptor mismatch") + } + mergeOptions{}.mergeMessage(dstMsg, srcMsg) +} + +// Clone returns a deep copy of m. +// If the top-level message is invalid, it returns an invalid message as well. +func Clone(m Message) Message { + // NOTE: Most usages of Clone assume the following properties: + // t := reflect.TypeOf(m) + // t == reflect.TypeOf(m.ProtoReflect().New().Interface()) + // t == reflect.TypeOf(m.ProtoReflect().Type().Zero().Interface()) + // + // Embedding protobuf messages breaks this since the parent type will have + // a forwarded ProtoReflect method, but the Interface method will return + // the underlying embedded message type. + if m == nil { + return nil + } + src := m.ProtoReflect() + if !src.IsValid() { + return src.Type().Zero().Interface() + } + dst := src.New() + mergeOptions{}.mergeMessage(dst, src) + return dst.Interface() +} + +// mergeOptions provides a namespace for merge functions, and can be +// exported in the future if we add user-visible merge options. +type mergeOptions struct{} + +func (o mergeOptions) mergeMessage(dst, src protoreflect.Message) { + methods := protoMethods(dst) + if methods != nil && methods.Merge != nil { + in := protoiface.MergeInput{ + Destination: dst, + Source: src, + } + out := methods.Merge(in) + if out.Flags&protoiface.MergeComplete != 0 { + return + } + } + + if !dst.IsValid() { + panic(fmt.Sprintf("cannot merge into invalid %v message", dst.Descriptor().FullName())) + } + + src.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + case fd.IsList(): + o.mergeList(dst.Mutable(fd).List(), v.List(), fd) + case fd.IsMap(): + o.mergeMap(dst.Mutable(fd).Map(), v.Map(), fd.MapValue()) + case fd.Message() != nil: + o.mergeMessage(dst.Mutable(fd).Message(), v.Message()) + case fd.Kind() == protoreflect.BytesKind: + dst.Set(fd, o.cloneBytes(v)) + default: + dst.Set(fd, v) + } + return true + }) + + if len(src.GetUnknown()) > 0 { + dst.SetUnknown(append(dst.GetUnknown(), src.GetUnknown()...)) + } +} + +func (o mergeOptions) mergeList(dst, src protoreflect.List, fd protoreflect.FieldDescriptor) { + // Merge semantics appends to the end of the existing list. + for i, n := 0, src.Len(); i < n; i++ { + switch v := src.Get(i); { + case fd.Message() != nil: + dstv := dst.NewElement() + o.mergeMessage(dstv.Message(), v.Message()) + dst.Append(dstv) + case fd.Kind() == protoreflect.BytesKind: + dst.Append(o.cloneBytes(v)) + default: + dst.Append(v) + } + } +} + +func (o mergeOptions) mergeMap(dst, src protoreflect.Map, fd protoreflect.FieldDescriptor) { + // Merge semantics replaces, rather than merges into existing entries. + src.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + switch { + case fd.Message() != nil: + dstv := dst.NewValue() + o.mergeMessage(dstv.Message(), v.Message()) + dst.Set(k, dstv) + case fd.Kind() == protoreflect.BytesKind: + dst.Set(k, o.cloneBytes(v)) + default: + dst.Set(k, v) + } + return true + }) +} + +func (o mergeOptions) cloneBytes(v protoreflect.Value) protoreflect.Value { + return protoreflect.ValueOfBytes(append([]byte{}, v.Bytes()...)) +} diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go new file mode 100644 index 000000000..312d5d45c --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/messageset.go @@ -0,0 +1,93 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +func (o MarshalOptions) sizeMessageSet(m protoreflect.Message) (size int) { + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + size += messageset.SizeField(fd.Number()) + size += protowire.SizeTag(messageset.FieldMessage) + size += protowire.SizeBytes(o.size(v.Message())) + return true + }) + size += messageset.SizeUnknown(m.GetUnknown()) + return size +} + +func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]byte, error) { + if !flags.ProtoLegacy { + return b, errors.New("no support for message_set_wire_format") + } + fieldOrder := order.AnyFieldOrder + if o.Deterministic { + fieldOrder = order.NumberFieldOrder + } + var err error + order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + b, err = o.marshalMessageSetField(b, fd, v) + return err == nil + }) + if err != nil { + return b, err + } + return messageset.AppendUnknown(b, m.GetUnknown()) +} + +func (o MarshalOptions) marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { + b = messageset.AppendFieldStart(b, fd.Number()) + b = protowire.AppendTag(b, messageset.FieldMessage, protowire.BytesType) + b = protowire.AppendVarint(b, uint64(o.Size(value.Message().Interface()))) + b, err := o.marshalMessage(b, value.Message()) + if err != nil { + return b, err + } + b = messageset.AppendFieldEnd(b) + return b, nil +} + +func (o UnmarshalOptions) unmarshalMessageSet(b []byte, m protoreflect.Message) error { + if !flags.ProtoLegacy { + return errors.New("no support for message_set_wire_format") + } + return messageset.Unmarshal(b, false, func(num protowire.Number, v []byte) error { + err := o.unmarshalMessageSetField(m, num, v) + if err == errUnknown { + unknown := m.GetUnknown() + unknown = protowire.AppendTag(unknown, num, protowire.BytesType) + unknown = protowire.AppendBytes(unknown, v) + m.SetUnknown(unknown) + return nil + } + return err + }) +} + +func (o UnmarshalOptions) unmarshalMessageSetField(m protoreflect.Message, num protowire.Number, v []byte) error { + md := m.Descriptor() + if !md.ExtensionRanges().Has(num) { + return errUnknown + } + xt, err := o.Resolver.FindExtensionByNumber(md.FullName(), num) + if err == protoregistry.NotFound { + return errUnknown + } + if err != nil { + return errors.New("%v: unable to resolve extension %v: %v", md.FullName(), num, err) + } + xd := xt.TypeDescriptor() + if err := o.unmarshalMessage(v, m.Mutable(xd).Message()); err != nil { + return err + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go new file mode 100644 index 000000000..1f0d183b1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/proto.go @@ -0,0 +1,43 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Message is the top-level interface that all messages must implement. +// It provides access to a reflective view of a message. +// Any implementation of this interface may be used with all functions in the +// protobuf module that accept a Message, except where otherwise specified. +// +// This is the v2 interface definition for protobuf messages. +// The v1 interface definition is "github.com/golang/protobuf/proto".Message. +// +// To convert a v1 message to a v2 message, +// use "github.com/golang/protobuf/proto".MessageV2. +// To convert a v2 message to a v1 message, +// use "github.com/golang/protobuf/proto".MessageV1. +type Message = protoreflect.ProtoMessage + +// Error matches all errors produced by packages in the protobuf module. +// +// That is, errors.Is(err, Error) reports whether an error is produced +// by this module. +var Error error + +func init() { + Error = errors.Error +} + +// MessageName returns the full name of m. +// If m is nil, it returns an empty string. +func MessageName(m Message) protoreflect.FullName { + if m == nil { + return "" + } + return m.ProtoReflect().Descriptor().FullName() +} diff --git a/vendor/google.golang.org/protobuf/proto/proto_methods.go b/vendor/google.golang.org/protobuf/proto/proto_methods.go new file mode 100644 index 000000000..465e057b3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/proto_methods.go @@ -0,0 +1,20 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The protoreflect build tag disables use of fast-path methods. +//go:build !protoreflect +// +build !protoreflect + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +const hasProtoMethods = true + +func protoMethods(m protoreflect.Message) *protoiface.Methods { + return m.ProtoMethods() +} diff --git a/vendor/google.golang.org/protobuf/proto/proto_reflect.go b/vendor/google.golang.org/protobuf/proto/proto_reflect.go new file mode 100644 index 000000000..494d6ceef --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/proto_reflect.go @@ -0,0 +1,20 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The protoreflect build tag disables use of fast-path methods. +//go:build protoreflect +// +build protoreflect + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +const hasProtoMethods = false + +func protoMethods(m protoreflect.Message) *protoiface.Methods { + return nil +} diff --git a/vendor/google.golang.org/protobuf/proto/reset.go b/vendor/google.golang.org/protobuf/proto/reset.go new file mode 100644 index 000000000..3d7f89436 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/reset.go @@ -0,0 +1,43 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "fmt" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Reset clears every field in the message. +// The resulting message shares no observable memory with its previous state +// other than the memory for the message itself. +func Reset(m Message) { + if mr, ok := m.(interface{ Reset() }); ok && hasProtoMethods { + mr.Reset() + return + } + resetMessage(m.ProtoReflect()) +} + +func resetMessage(m protoreflect.Message) { + if !m.IsValid() { + panic(fmt.Sprintf("cannot reset invalid %v message", m.Descriptor().FullName())) + } + + // Clear all known fields. + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + m.Clear(fds.Get(i)) + } + + // Clear extension fields. + m.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + m.Clear(fd) + return true + }) + + // Clear unknown fields. + m.SetUnknown(nil) +} diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go new file mode 100644 index 000000000..554b9c6c0 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/size.go @@ -0,0 +1,97 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// Size returns the size in bytes of the wire-format encoding of m. +func Size(m Message) int { + return MarshalOptions{}.Size(m) +} + +// Size returns the size in bytes of the wire-format encoding of m. +func (o MarshalOptions) Size(m Message) int { + // Treat a nil message interface as an empty message; nothing to output. + if m == nil { + return 0 + } + + return o.size(m.ProtoReflect()) +} + +// size is a centralized function that all size operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for size that do not go through this. +func (o MarshalOptions) size(m protoreflect.Message) (size int) { + methods := protoMethods(m) + if methods != nil && methods.Size != nil { + out := methods.Size(protoiface.SizeInput{ + Message: m, + }) + return out.Size + } + if methods != nil && methods.Marshal != nil { + // This is not efficient, but we don't have any choice. + // This case is mainly used for legacy types with a Marshal method. + out, _ := methods.Marshal(protoiface.MarshalInput{ + Message: m, + }) + return len(out.Buf) + } + return o.sizeMessageSlow(m) +} + +func (o MarshalOptions) sizeMessageSlow(m protoreflect.Message) (size int) { + if messageset.IsMessageSet(m.Descriptor()) { + return o.sizeMessageSet(m) + } + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + size += o.sizeField(fd, v) + return true + }) + size += len(m.GetUnknown()) + return size +} + +func (o MarshalOptions) sizeField(fd protoreflect.FieldDescriptor, value protoreflect.Value) (size int) { + num := fd.Number() + switch { + case fd.IsList(): + return o.sizeList(num, fd, value.List()) + case fd.IsMap(): + return o.sizeMap(num, fd, value.Map()) + default: + return protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), value) + } +} + +func (o MarshalOptions) sizeList(num protowire.Number, fd protoreflect.FieldDescriptor, list protoreflect.List) (size int) { + if fd.IsPacked() && list.Len() > 0 { + content := 0 + for i, llen := 0, list.Len(); i < llen; i++ { + content += o.sizeSingular(num, fd.Kind(), list.Get(i)) + } + return protowire.SizeTag(num) + protowire.SizeBytes(content) + } + + for i, llen := 0, list.Len(); i < llen; i++ { + size += protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), list.Get(i)) + } + return size +} + +func (o MarshalOptions) sizeMap(num protowire.Number, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) (size int) { + mapv.Range(func(key protoreflect.MapKey, value protoreflect.Value) bool { + size += protowire.SizeTag(num) + size += protowire.SizeBytes(o.sizeField(fd.MapKey(), key.Value()) + o.sizeField(fd.MapValue(), value)) + return true + }) + return size +} diff --git a/vendor/google.golang.org/protobuf/proto/size_gen.go b/vendor/google.golang.org/protobuf/proto/size_gen.go new file mode 100644 index 000000000..3cf61a824 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/size_gen.go @@ -0,0 +1,55 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" +) + +func (o MarshalOptions) sizeSingular(num protowire.Number, kind protoreflect.Kind, v protoreflect.Value) int { + switch kind { + case protoreflect.BoolKind: + return protowire.SizeVarint(protowire.EncodeBool(v.Bool())) + case protoreflect.EnumKind: + return protowire.SizeVarint(uint64(v.Enum())) + case protoreflect.Int32Kind: + return protowire.SizeVarint(uint64(int32(v.Int()))) + case protoreflect.Sint32Kind: + return protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) + case protoreflect.Uint32Kind: + return protowire.SizeVarint(uint64(uint32(v.Uint()))) + case protoreflect.Int64Kind: + return protowire.SizeVarint(uint64(v.Int())) + case protoreflect.Sint64Kind: + return protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) + case protoreflect.Uint64Kind: + return protowire.SizeVarint(v.Uint()) + case protoreflect.Sfixed32Kind: + return protowire.SizeFixed32() + case protoreflect.Fixed32Kind: + return protowire.SizeFixed32() + case protoreflect.FloatKind: + return protowire.SizeFixed32() + case protoreflect.Sfixed64Kind: + return protowire.SizeFixed64() + case protoreflect.Fixed64Kind: + return protowire.SizeFixed64() + case protoreflect.DoubleKind: + return protowire.SizeFixed64() + case protoreflect.StringKind: + return protowire.SizeBytes(len(v.String())) + case protoreflect.BytesKind: + return protowire.SizeBytes(len(v.Bytes())) + case protoreflect.MessageKind: + return protowire.SizeBytes(o.size(v.Message())) + case protoreflect.GroupKind: + return protowire.SizeGroup(num, o.size(v.Message())) + default: + return 0 + } +} diff --git a/vendor/google.golang.org/protobuf/proto/wrappers.go b/vendor/google.golang.org/protobuf/proto/wrappers.go new file mode 100644 index 000000000..653b12c3a --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/wrappers.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +// Bool stores v in a new bool value and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int32 stores v in a new int32 value and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 stores v in a new int64 value and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Float32 stores v in a new float32 value and returns a pointer to it. +func Float32(v float32) *float32 { return &v } + +// Float64 stores v in a new float64 value and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// Uint32 stores v in a new uint32 value and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 stores v in a new uint64 value and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// String stores v in a new string value and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go new file mode 100644 index 000000000..e4dfb1205 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -0,0 +1,276 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protodesc provides functionality for converting +// FileDescriptorProto messages to/from protoreflect.FileDescriptor values. +// +// The google.protobuf.FileDescriptorProto is a protobuf message that describes +// the type information for a .proto file in a form that is easily serializable. +// The protoreflect.FileDescriptor is a more structured representation of +// the FileDescriptorProto message where references and remote dependencies +// can be directly followed. +package protodesc + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// Resolver is the resolver used by NewFile to resolve dependencies. +// The enums and messages provided must belong to some parent file, +// which is also registered. +// +// It is implemented by protoregistry.Files. +type Resolver interface { + FindFileByPath(string) (protoreflect.FileDescriptor, error) + FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) +} + +// FileOptions configures the construction of file descriptors. +type FileOptions struct { + pragma.NoUnkeyedLiterals + + // AllowUnresolvable configures New to permissively allow unresolvable + // file, enum, or message dependencies. Unresolved dependencies are replaced + // by placeholder equivalents. + // + // The following dependencies may be left unresolved: + // • Resolving an imported file. + // • Resolving the type for a message field or extension field. + // If the kind of the field is unknown, then a placeholder is used for both + // the Enum and Message accessors on the protoreflect.FieldDescriptor. + // • Resolving an enum value set as the default for an optional enum field. + // If unresolvable, the protoreflect.FieldDescriptor.Default is set to the + // first value in the associated enum (or zero if the also enum dependency + // is also unresolvable). The protoreflect.FieldDescriptor.DefaultEnumValue + // is populated with a placeholder. + // • Resolving the extended message type for an extension field. + // • Resolving the input or output message type for a service method. + // + // If the unresolved dependency uses a relative name, + // then the placeholder will contain an invalid FullName with a "*." prefix, + // indicating that the starting prefix of the full name is unknown. + AllowUnresolvable bool +} + +// NewFile creates a new protoreflect.FileDescriptor from the provided +// file descriptor message. See FileOptions.New for more information. +func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { + return FileOptions{}.New(fd, r) +} + +// NewFiles creates a new protoregistry.Files from the provided +// FileDescriptorSet message. See FileOptions.NewFiles for more information. +func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { + return FileOptions{}.NewFiles(fd) +} + +// New creates a new protoreflect.FileDescriptor from the provided +// file descriptor message. The file must represent a valid proto file according +// to protobuf semantics. The returned descriptor is a deep copy of the input. +// +// Any imported files, enum types, or message types referenced in the file are +// resolved using the provided registry. When looking up an import file path, +// the path must be unique. The newly created file descriptor is not registered +// back into the provided file registry. +func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { + if r == nil { + r = (*protoregistry.Files)(nil) // empty resolver + } + + // Handle the file descriptor content. + f := &filedesc.File{L2: &filedesc.FileL2{}} + switch fd.GetSyntax() { + case "proto2", "": + f.L1.Syntax = protoreflect.Proto2 + case "proto3": + f.L1.Syntax = protoreflect.Proto3 + default: + return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) + } + f.L1.Path = fd.GetName() + if f.L1.Path == "" { + return nil, errors.New("file path must be populated") + } + f.L1.Package = protoreflect.FullName(fd.GetPackage()) + if !f.L1.Package.IsValid() && f.L1.Package != "" { + return nil, errors.New("invalid package: %q", f.L1.Package) + } + if opts := fd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FileOptions) + f.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + + f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) + for _, i := range fd.GetPublicDependency() { + if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsPublic { + return nil, errors.New("invalid or duplicate public import index: %d", i) + } + f.L2.Imports[i].IsPublic = true + } + for _, i := range fd.GetWeakDependency() { + if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsWeak { + return nil, errors.New("invalid or duplicate weak import index: %d", i) + } + f.L2.Imports[i].IsWeak = true + } + imps := importSet{f.Path(): true} + for i, path := range fd.GetDependency() { + imp := &f.L2.Imports[i] + f, err := r.FindFileByPath(path) + if err == protoregistry.NotFound && (o.AllowUnresolvable || imp.IsWeak) { + f = filedesc.PlaceholderFile(path) + } else if err != nil { + return nil, errors.New("could not resolve import %q: %v", path, err) + } + imp.FileDescriptor = f + + if imps[imp.Path()] { + return nil, errors.New("already imported %q", path) + } + imps[imp.Path()] = true + } + for i := range fd.GetDependency() { + imp := &f.L2.Imports[i] + imps.importPublic(imp.Imports()) + } + + // Handle source locations. + f.L2.Locations.File = f + for _, loc := range fd.GetSourceCodeInfo().GetLocation() { + var l protoreflect.SourceLocation + // TODO: Validate that the path points to an actual declaration? + l.Path = protoreflect.SourcePath(loc.GetPath()) + s := loc.GetSpan() + switch len(s) { + case 3: + l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[0]), int(s[2]) + case 4: + l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[2]), int(s[3]) + default: + return nil, errors.New("invalid span: %v", s) + } + // TODO: Validate that the span information is sensible? + // See https://github.com/protocolbuffers/protobuf/issues/6378. + if false && (l.EndLine < l.StartLine || l.StartLine < 0 || l.StartColumn < 0 || l.EndColumn < 0 || + (l.StartLine == l.EndLine && l.EndColumn <= l.StartColumn)) { + return nil, errors.New("invalid span: %v", s) + } + l.LeadingDetachedComments = loc.GetLeadingDetachedComments() + l.LeadingComments = loc.GetLeadingComments() + l.TrailingComments = loc.GetTrailingComments() + f.L2.Locations.List = append(f.L2.Locations.List, l) + } + + // Step 1: Allocate and derive the names for all declarations. + // This copies all fields from the descriptor proto except: + // google.protobuf.FieldDescriptorProto.type_name + // google.protobuf.FieldDescriptorProto.default_value + // google.protobuf.FieldDescriptorProto.oneof_index + // google.protobuf.FieldDescriptorProto.extendee + // google.protobuf.MethodDescriptorProto.input + // google.protobuf.MethodDescriptorProto.output + var err error + sb := new(strs.Builder) + r1 := make(descsByName) + if f.L1.Enums.List, err = r1.initEnumDeclarations(fd.GetEnumType(), f, sb); err != nil { + return nil, err + } + if f.L1.Messages.List, err = r1.initMessagesDeclarations(fd.GetMessageType(), f, sb); err != nil { + return nil, err + } + if f.L1.Extensions.List, err = r1.initExtensionDeclarations(fd.GetExtension(), f, sb); err != nil { + return nil, err + } + if f.L1.Services.List, err = r1.initServiceDeclarations(fd.GetService(), f, sb); err != nil { + return nil, err + } + + // Step 2: Resolve every dependency reference not handled by step 1. + r2 := &resolver{local: r1, remote: r, imports: imps, allowUnresolvable: o.AllowUnresolvable} + if err := r2.resolveMessageDependencies(f.L1.Messages.List, fd.GetMessageType()); err != nil { + return nil, err + } + if err := r2.resolveExtensionDependencies(f.L1.Extensions.List, fd.GetExtension()); err != nil { + return nil, err + } + if err := r2.resolveServiceDependencies(f.L1.Services.List, fd.GetService()); err != nil { + return nil, err + } + + // Step 3: Validate every enum, message, and extension declaration. + if err := validateEnumDeclarations(f.L1.Enums.List, fd.GetEnumType()); err != nil { + return nil, err + } + if err := validateMessageDeclarations(f.L1.Messages.List, fd.GetMessageType()); err != nil { + return nil, err + } + if err := validateExtensionDeclarations(f.L1.Extensions.List, fd.GetExtension()); err != nil { + return nil, err + } + + return f, nil +} + +type importSet map[string]bool + +func (is importSet) importPublic(imps protoreflect.FileImports) { + for i := 0; i < imps.Len(); i++ { + if imp := imps.Get(i); imp.IsPublic { + is[imp.Path()] = true + is.importPublic(imp.Imports()) + } + } +} + +// NewFiles creates a new protoregistry.Files from the provided +// FileDescriptorSet message. The descriptor set must include only +// valid files according to protobuf semantics. The returned descriptors +// are a deep copy of the input. +func (o FileOptions) NewFiles(fds *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { + files := make(map[string]*descriptorpb.FileDescriptorProto) + for _, fd := range fds.File { + if _, ok := files[fd.GetName()]; ok { + return nil, errors.New("file appears multiple times: %q", fd.GetName()) + } + files[fd.GetName()] = fd + } + r := &protoregistry.Files{} + for _, fd := range files { + if err := o.addFileDeps(r, fd, files); err != nil { + return nil, err + } + } + return r, nil +} +func (o FileOptions) addFileDeps(r *protoregistry.Files, fd *descriptorpb.FileDescriptorProto, files map[string]*descriptorpb.FileDescriptorProto) error { + // Set the entry to nil while descending into a file's dependencies to detect cycles. + files[fd.GetName()] = nil + for _, dep := range fd.Dependency { + depfd, ok := files[dep] + if depfd == nil { + if ok { + return errors.New("import cycle in file: %q", dep) + } + continue + } + if err := o.addFileDeps(r, depfd, files); err != nil { + return err + } + } + // Delete the entry once dependencies are processed. + delete(files, fd.GetName()) + f, err := o.New(fd, r) + if err != nil { + return err + } + return r.RegisterFile(f) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go new file mode 100644 index 000000000..37efda1af --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -0,0 +1,248 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +type descsByName map[protoreflect.FullName]protoreflect.Descriptor + +func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (es []filedesc.Enum, err error) { + es = make([]filedesc.Enum, len(eds)) // allocate up-front to ensure stable pointers + for i, ed := range eds { + e := &es[i] + e.L2 = new(filedesc.EnumL2) + if e.L0, err = r.makeBase(e, parent, ed.GetName(), i, sb); err != nil { + return nil, err + } + if opts := ed.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.EnumOptions) + e.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + for _, s := range ed.GetReservedName() { + e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s)) + } + for _, rr := range ed.GetReservedRange() { + e.L2.ReservedRanges.List = append(e.L2.ReservedRanges.List, [2]protoreflect.EnumNumber{ + protoreflect.EnumNumber(rr.GetStart()), + protoreflect.EnumNumber(rr.GetEnd()), + }) + } + if e.L2.Values.List, err = r.initEnumValuesFromDescriptorProto(ed.GetValue(), e, sb); err != nil { + return nil, err + } + } + return es, nil +} + +func (r descsByName) initEnumValuesFromDescriptorProto(vds []*descriptorpb.EnumValueDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (vs []filedesc.EnumValue, err error) { + vs = make([]filedesc.EnumValue, len(vds)) // allocate up-front to ensure stable pointers + for i, vd := range vds { + v := &vs[i] + if v.L0, err = r.makeBase(v, parent, vd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := vd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.EnumValueOptions) + v.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + v.L1.Number = protoreflect.EnumNumber(vd.GetNumber()) + } + return vs, nil +} + +func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Message, err error) { + ms = make([]filedesc.Message, len(mds)) // allocate up-front to ensure stable pointers + for i, md := range mds { + m := &ms[i] + m.L2 = new(filedesc.MessageL2) + if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { + return nil, err + } + if opts := md.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.MessageOptions) + m.L2.Options = func() protoreflect.ProtoMessage { return opts } + m.L1.IsMapEntry = opts.GetMapEntry() + m.L1.IsMessageSet = opts.GetMessageSetWireFormat() + } + for _, s := range md.GetReservedName() { + m.L2.ReservedNames.List = append(m.L2.ReservedNames.List, protoreflect.Name(s)) + } + for _, rr := range md.GetReservedRange() { + m.L2.ReservedRanges.List = append(m.L2.ReservedRanges.List, [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(rr.GetStart()), + protoreflect.FieldNumber(rr.GetEnd()), + }) + } + for _, xr := range md.GetExtensionRange() { + m.L2.ExtensionRanges.List = append(m.L2.ExtensionRanges.List, [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(xr.GetStart()), + protoreflect.FieldNumber(xr.GetEnd()), + }) + var optsFunc func() protoreflect.ProtoMessage + if opts := xr.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.ExtensionRangeOptions) + optsFunc = func() protoreflect.ProtoMessage { return opts } + } + m.L2.ExtensionRangeOptions = append(m.L2.ExtensionRangeOptions, optsFunc) + } + if m.L2.Fields.List, err = r.initFieldsFromDescriptorProto(md.GetField(), m, sb); err != nil { + return nil, err + } + if m.L2.Oneofs.List, err = r.initOneofsFromDescriptorProto(md.GetOneofDecl(), m, sb); err != nil { + return nil, err + } + if m.L1.Enums.List, err = r.initEnumDeclarations(md.GetEnumType(), m, sb); err != nil { + return nil, err + } + if m.L1.Messages.List, err = r.initMessagesDeclarations(md.GetNestedType(), m, sb); err != nil { + return nil, err + } + if m.L1.Extensions.List, err = r.initExtensionDeclarations(md.GetExtension(), m, sb); err != nil { + return nil, err + } + } + return ms, nil +} + +func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (fs []filedesc.Field, err error) { + fs = make([]filedesc.Field, len(fds)) // allocate up-front to ensure stable pointers + for i, fd := range fds { + f := &fs[i] + if f.L0, err = r.makeBase(f, parent, fd.GetName(), i, sb); err != nil { + return nil, err + } + f.L1.IsProto3Optional = fd.GetProto3Optional() + if opts := fd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FieldOptions) + f.L1.Options = func() protoreflect.ProtoMessage { return opts } + f.L1.IsWeak = opts.GetWeak() + f.L1.HasPacked = opts.Packed != nil + f.L1.IsPacked = opts.GetPacked() + } + f.L1.Number = protoreflect.FieldNumber(fd.GetNumber()) + f.L1.Cardinality = protoreflect.Cardinality(fd.GetLabel()) + if fd.Type != nil { + f.L1.Kind = protoreflect.Kind(fd.GetType()) + } + if fd.JsonName != nil { + f.L1.StringName.InitJSON(fd.GetJsonName()) + } + } + return fs, nil +} + +func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (os []filedesc.Oneof, err error) { + os = make([]filedesc.Oneof, len(ods)) // allocate up-front to ensure stable pointers + for i, od := range ods { + o := &os[i] + if o.L0, err = r.makeBase(o, parent, od.GetName(), i, sb); err != nil { + return nil, err + } + if opts := od.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.OneofOptions) + o.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + } + return os, nil +} + +func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (xs []filedesc.Extension, err error) { + xs = make([]filedesc.Extension, len(xds)) // allocate up-front to ensure stable pointers + for i, xd := range xds { + x := &xs[i] + x.L2 = new(filedesc.ExtensionL2) + if x.L0, err = r.makeBase(x, parent, xd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := xd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FieldOptions) + x.L2.Options = func() protoreflect.ProtoMessage { return opts } + x.L2.IsPacked = opts.GetPacked() + } + x.L1.Number = protoreflect.FieldNumber(xd.GetNumber()) + x.L1.Cardinality = protoreflect.Cardinality(xd.GetLabel()) + if xd.Type != nil { + x.L1.Kind = protoreflect.Kind(xd.GetType()) + } + if xd.JsonName != nil { + x.L2.StringName.InitJSON(xd.GetJsonName()) + } + } + return xs, nil +} + +func (r descsByName) initServiceDeclarations(sds []*descriptorpb.ServiceDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ss []filedesc.Service, err error) { + ss = make([]filedesc.Service, len(sds)) // allocate up-front to ensure stable pointers + for i, sd := range sds { + s := &ss[i] + s.L2 = new(filedesc.ServiceL2) + if s.L0, err = r.makeBase(s, parent, sd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := sd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.ServiceOptions) + s.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + if s.L2.Methods.List, err = r.initMethodsFromDescriptorProto(sd.GetMethod(), s, sb); err != nil { + return nil, err + } + } + return ss, nil +} + +func (r descsByName) initMethodsFromDescriptorProto(mds []*descriptorpb.MethodDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Method, err error) { + ms = make([]filedesc.Method, len(mds)) // allocate up-front to ensure stable pointers + for i, md := range mds { + m := &ms[i] + if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { + return nil, err + } + if opts := md.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.MethodOptions) + m.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + m.L1.IsStreamingClient = md.GetClientStreaming() + m.L1.IsStreamingServer = md.GetServerStreaming() + } + return ms, nil +} + +func (r descsByName) makeBase(child, parent protoreflect.Descriptor, name string, idx int, sb *strs.Builder) (filedesc.BaseL0, error) { + if !protoreflect.Name(name).IsValid() { + return filedesc.BaseL0{}, errors.New("descriptor %q has an invalid nested name: %q", parent.FullName(), name) + } + + // Derive the full name of the child. + // Note that enum values are a sibling to the enum parent in the namespace. + var fullName protoreflect.FullName + if _, ok := parent.(protoreflect.EnumDescriptor); ok { + fullName = sb.AppendFullName(parent.FullName().Parent(), protoreflect.Name(name)) + } else { + fullName = sb.AppendFullName(parent.FullName(), protoreflect.Name(name)) + } + if _, ok := r[fullName]; ok { + return filedesc.BaseL0{}, errors.New("descriptor %q already declared", fullName) + } + r[fullName] = child + + // TODO: Verify that the full name does not already exist in the resolver? + // This is not as critical since most usages of NewFile will register + // the created file back into the registry, which will perform this check. + + return filedesc.BaseL0{ + FullName: fullName, + ParentFile: parent.ParentFile().(*filedesc.File), + Parent: parent, + Index: idx, + }, nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go new file mode 100644 index 000000000..cebb36cda --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -0,0 +1,286 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// resolver is a wrapper around a local registry of declarations within the file +// and the remote resolver. The remote resolver is restricted to only return +// descriptors that have been imported. +type resolver struct { + local descsByName + remote Resolver + imports importSet + + allowUnresolvable bool +} + +func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) (err error) { + for i, md := range mds { + m := &ms[i] + for j, fd := range md.GetField() { + f := &m.L2.Fields.List[j] + if f.L1.Cardinality == protoreflect.Required { + m.L2.RequiredNumbers.List = append(m.L2.RequiredNumbers.List, f.L1.Number) + } + if fd.OneofIndex != nil { + k := int(fd.GetOneofIndex()) + if !(0 <= k && k < len(md.GetOneofDecl())) { + return errors.New("message field %q has an invalid oneof index: %d", f.FullName(), k) + } + o := &m.L2.Oneofs.List[k] + f.L1.ContainingOneof = o + o.L1.Fields.List = append(o.L1.Fields.List, f) + } + + if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { + return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) + } + if fd.DefaultValue != nil { + v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable) + if err != nil { + return errors.New("message field %q has invalid default: %v", f.FullName(), err) + } + f.L1.Default = filedesc.DefaultValue(v, ev) + } + } + + if err := r.resolveMessageDependencies(m.L1.Messages.List, md.GetNestedType()); err != nil { + return err + } + if err := r.resolveExtensionDependencies(m.L1.Extensions.List, md.GetExtension()); err != nil { + return err + } + } + return nil +} + +func (r *resolver) resolveExtensionDependencies(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) (err error) { + for i, xd := range xds { + x := &xs[i] + if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee()), false); err != nil { + return errors.New("extension field %q cannot resolve extendee: %v", x.FullName(), err) + } + if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName()), false); err != nil { + return errors.New("extension field %q cannot resolve type: %v", x.FullName(), err) + } + if xd.DefaultValue != nil { + v, ev, err := unmarshalDefault(xd.GetDefaultValue(), x, r.allowUnresolvable) + if err != nil { + return errors.New("extension field %q has invalid default: %v", x.FullName(), err) + } + x.L2.Default = filedesc.DefaultValue(v, ev) + } + } + return nil +} + +func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*descriptorpb.ServiceDescriptorProto) (err error) { + for i, sd := range sds { + s := &ss[i] + for j, md := range sd.GetMethod() { + m := &s.L2.Methods.List[j] + m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()), false) + if err != nil { + return errors.New("service method %q cannot resolve input: %v", m.FullName(), err) + } + m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()), false) + if err != nil { + return errors.New("service method %q cannot resolve output: %v", m.FullName(), err) + } + } + } + return nil +} + +// findTarget finds an enum or message descriptor if k is an enum, message, +// group, or unknown. If unknown, and the name could be resolved, the kind +// returned kind is set based on the type of the resolved descriptor. +func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) { + switch k { + case protoreflect.EnumKind: + ed, err := r.findEnumDescriptor(scope, ref, isWeak) + if err != nil { + return 0, nil, nil, err + } + return k, ed, nil, nil + case protoreflect.MessageKind, protoreflect.GroupKind: + md, err := r.findMessageDescriptor(scope, ref, isWeak) + if err != nil { + return 0, nil, nil, err + } + return k, nil, md, nil + case 0: + // Handle unspecified kinds (possible with parsers that operate + // on a per-file basis without knowledge of dependencies). + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return k, filedesc.PlaceholderEnum(ref.FullName()), filedesc.PlaceholderMessage(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return 0, nil, nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return 0, nil, nil, err + } + switch d := d.(type) { + case protoreflect.EnumDescriptor: + return protoreflect.EnumKind, d, nil, nil + case protoreflect.MessageDescriptor: + return protoreflect.MessageKind, nil, d, nil + default: + return 0, nil, nil, errors.New("unknown kind") + } + default: + if ref != "" { + return 0, nil, nil, errors.New("target name cannot be specified for %v", k) + } + if !k.IsValid() { + return 0, nil, nil, errors.New("invalid kind: %d", k) + } + return k, nil, nil, nil + } +} + +// findDescriptor finds the descriptor by name, +// which may be a relative name within some scope. +// +// Suppose the scope was "fizz.buzz" and the reference was "Foo.Bar", +// then the following full names are searched: +// * fizz.buzz.Foo.Bar +// * fizz.Foo.Bar +// * Foo.Bar +func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.Descriptor, error) { + if !ref.IsValid() { + return nil, errors.New("invalid name reference: %q", ref) + } + if ref.IsFull() { + scope, ref = "", ref[1:] + } + var foundButNotImported protoreflect.Descriptor + for { + // Derive the full name to search. + s := protoreflect.FullName(ref) + if scope != "" { + s = scope + "." + s + } + + // Check the current file for the descriptor. + if d, ok := r.local[s]; ok { + return d, nil + } + + // Check the remote registry for the descriptor. + d, err := r.remote.FindDescriptorByName(s) + if err == nil { + // Only allow descriptors covered by one of the imports. + if r.imports[d.ParentFile().Path()] { + return d, nil + } + foundButNotImported = d + } else if err != protoregistry.NotFound { + return nil, errors.Wrap(err, "%q", s) + } + + // Continue on at a higher level of scoping. + if scope == "" { + if d := foundButNotImported; d != nil { + return nil, errors.New("resolved %q, but %q is not imported", d.FullName(), d.ParentFile().Path()) + } + return nil, protoregistry.NotFound + } + scope = scope.Parent() + } +} + +func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.EnumDescriptor, error) { + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return filedesc.PlaceholderEnum(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return nil, err + } + ed, ok := d.(protoreflect.EnumDescriptor) + if !ok { + return nil, errors.New("resolved %q, but it is not an enum", d.FullName()) + } + return ed, nil +} + +func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.MessageDescriptor, error) { + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return filedesc.PlaceholderMessage(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return nil, err + } + md, ok := d.(protoreflect.MessageDescriptor) + if !ok { + return nil, errors.New("resolved %q, but it is not an message", d.FullName()) + } + return md, nil +} + +// partialName is the partial name. A leading dot means that the name is full, +// otherwise the name is relative to some current scope. +// See google.protobuf.FieldDescriptorProto.type_name. +type partialName string + +func (s partialName) IsFull() bool { + return len(s) > 0 && s[0] == '.' +} + +func (s partialName) IsValid() bool { + if s.IsFull() { + return protoreflect.FullName(s[1:]).IsValid() + } + return protoreflect.FullName(s).IsValid() +} + +const unknownPrefix = "*." + +// FullName converts the partial name to a full name on a best-effort basis. +// If relative, it creates an invalid full name, using a "*." prefix +// to indicate that the start of the full name is unknown. +func (s partialName) FullName() protoreflect.FullName { + if s.IsFull() { + return protoreflect.FullName(s[1:]) + } + return protoreflect.FullName(unknownPrefix + s) +} + +func unmarshalDefault(s string, fd protoreflect.FieldDescriptor, allowUnresolvable bool) (protoreflect.Value, protoreflect.EnumValueDescriptor, error) { + var evs protoreflect.EnumValueDescriptors + if fd.Enum() != nil { + evs = fd.Enum().Values() + } + v, ev, err := defval.Unmarshal(s, fd.Kind(), evs, defval.Descriptor) + if err != nil && allowUnresolvable && evs != nil && protoreflect.Name(s).IsValid() { + v = protoreflect.ValueOfEnum(0) + if evs.Len() > 0 { + v = protoreflect.ValueOfEnum(evs.Get(0).Number()) + } + ev = filedesc.PlaceholderEnumValue(fd.Enum().FullName().Parent().Append(protoreflect.Name(s))) + } else if err != nil { + return v, ev, err + } + if fd.Syntax() == protoreflect.Proto3 { + return v, ev, errors.New("cannot be specified under proto3 semantics") + } + if fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind || fd.Cardinality() == protoreflect.Repeated { + return v, ev, errors.New("cannot be specified on composite types") + } + return v, ev, nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go new file mode 100644 index 000000000..9af1d5648 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -0,0 +1,374 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "strings" + "unicode" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescriptorProto) error { + for i, ed := range eds { + e := &es[i] + if err := e.L2.ReservedNames.CheckValid(); err != nil { + return errors.New("enum %q reserved names has %v", e.FullName(), err) + } + if err := e.L2.ReservedRanges.CheckValid(); err != nil { + return errors.New("enum %q reserved ranges has %v", e.FullName(), err) + } + if len(ed.GetValue()) == 0 { + return errors.New("enum %q must contain at least one value declaration", e.FullName()) + } + allowAlias := ed.GetOptions().GetAllowAlias() + foundAlias := false + for i := 0; i < e.Values().Len(); i++ { + v1 := e.Values().Get(i) + if v2 := e.Values().ByNumber(v1.Number()); v1 != v2 { + foundAlias = true + if !allowAlias { + return errors.New("enum %q has conflicting non-aliased values on number %d: %q with %q", e.FullName(), v1.Number(), v1.Name(), v2.Name()) + } + } + } + if allowAlias && !foundAlias { + return errors.New("enum %q allows aliases, but none were found", e.FullName()) + } + if e.Syntax() == protoreflect.Proto3 { + if v := e.Values().Get(0); v.Number() != 0 { + return errors.New("enum %q using proto3 semantics must have zero number for the first value", v.FullName()) + } + // Verify that value names in proto3 do not conflict if the + // case-insensitive prefix is removed. + // See protoc v3.8.0: src/google/protobuf/descriptor.cc:4991-5055 + names := map[string]protoreflect.EnumValueDescriptor{} + prefix := strings.Replace(strings.ToLower(string(e.Name())), "_", "", -1) + for i := 0; i < e.Values().Len(); i++ { + v1 := e.Values().Get(i) + s := strs.EnumValueName(strs.TrimEnumPrefix(string(v1.Name()), prefix)) + if v2, ok := names[s]; ok && v1.Number() != v2.Number() { + return errors.New("enum %q using proto3 semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) + } + names[s] = v1 + } + } + + for j, vd := range ed.GetValue() { + v := &e.L2.Values.List[j] + if vd.Number == nil { + return errors.New("enum value %q must have a specified number", v.FullName()) + } + if e.L2.ReservedNames.Has(v.Name()) { + return errors.New("enum value %q must not use reserved name", v.FullName()) + } + if e.L2.ReservedRanges.Has(v.Number()) { + return errors.New("enum value %q must not use reserved number %d", v.FullName(), v.Number()) + } + } + } + return nil +} + +func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { + for i, md := range mds { + m := &ms[i] + + // Handle the message descriptor itself. + isMessageSet := md.GetOptions().GetMessageSetWireFormat() + if err := m.L2.ReservedNames.CheckValid(); err != nil { + return errors.New("message %q reserved names has %v", m.FullName(), err) + } + if err := m.L2.ReservedRanges.CheckValid(isMessageSet); err != nil { + return errors.New("message %q reserved ranges has %v", m.FullName(), err) + } + if err := m.L2.ExtensionRanges.CheckValid(isMessageSet); err != nil { + return errors.New("message %q extension ranges has %v", m.FullName(), err) + } + if err := (*filedesc.FieldRanges).CheckOverlap(&m.L2.ReservedRanges, &m.L2.ExtensionRanges); err != nil { + return errors.New("message %q reserved and extension ranges has %v", m.FullName(), err) + } + for i := 0; i < m.Fields().Len(); i++ { + f1 := m.Fields().Get(i) + if f2 := m.Fields().ByNumber(f1.Number()); f1 != f2 { + return errors.New("message %q has conflicting fields: %q with %q", m.FullName(), f1.Name(), f2.Name()) + } + } + if isMessageSet && !flags.ProtoLegacy { + return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName()) + } + if isMessageSet && (m.Syntax() != protoreflect.Proto2 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { + return errors.New("message %q is an invalid proto1 MessageSet", m.FullName()) + } + if m.Syntax() == protoreflect.Proto3 { + if m.ExtensionRanges().Len() > 0 { + return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName()) + } + // Verify that field names in proto3 do not conflict if lowercased + // with all underscores removed. + // See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847 + names := map[string]protoreflect.FieldDescriptor{} + for i := 0; i < m.Fields().Len(); i++ { + f1 := m.Fields().Get(i) + s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1) + if f2, ok := names[s]; ok { + return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name()) + } + names[s] = f1 + } + } + + for j, fd := range md.GetField() { + f := &m.L2.Fields.List[j] + if m.L2.ReservedNames.Has(f.Name()) { + return errors.New("message field %q must not use reserved name", f.FullName()) + } + if !f.Number().IsValid() { + return errors.New("message field %q has an invalid number: %d", f.FullName(), f.Number()) + } + if !f.Cardinality().IsValid() { + return errors.New("message field %q has an invalid cardinality: %d", f.FullName(), f.Cardinality()) + } + if m.L2.ReservedRanges.Has(f.Number()) { + return errors.New("message field %q must not use reserved number %d", f.FullName(), f.Number()) + } + if m.L2.ExtensionRanges.Has(f.Number()) { + return errors.New("message field %q with number %d in extension range", f.FullName(), f.Number()) + } + if fd.Extendee != nil { + return errors.New("message field %q may not have extendee: %q", f.FullName(), fd.GetExtendee()) + } + if f.L1.IsProto3Optional { + if f.Syntax() != protoreflect.Proto3 { + return errors.New("message field %q under proto3 optional semantics must be specified in the proto3 syntax", f.FullName()) + } + if f.Cardinality() != protoreflect.Optional { + return errors.New("message field %q under proto3 optional semantics must have optional cardinality", f.FullName()) + } + if f.ContainingOneof() != nil && f.ContainingOneof().Fields().Len() != 1 { + return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName()) + } + } + if f.IsWeak() && !flags.ProtoLegacy { + return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) + } + if f.IsWeak() && (f.Syntax() != protoreflect.Proto2 || !isOptionalMessage(f) || f.ContainingOneof() != nil) { + return errors.New("message field %q may only be weak for an optional message", f.FullName()) + } + if f.IsPacked() && !isPackable(f) { + return errors.New("message field %q is not packable", f.FullName()) + } + if err := checkValidGroup(f); err != nil { + return errors.New("message field %q is an invalid group: %v", f.FullName(), err) + } + if err := checkValidMap(f); err != nil { + return errors.New("message field %q is an invalid map: %v", f.FullName(), err) + } + if f.Syntax() == protoreflect.Proto3 { + if f.Cardinality() == protoreflect.Required { + return errors.New("message field %q using proto3 semantics cannot be required", f.FullName()) + } + if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().Syntax() != protoreflect.Proto3 { + return errors.New("message field %q using proto3 semantics may only depend on a proto3 enum", f.FullName()) + } + } + } + seenSynthetic := false // synthetic oneofs for proto3 optional must come after real oneofs + for j := range md.GetOneofDecl() { + o := &m.L2.Oneofs.List[j] + if o.Fields().Len() == 0 { + return errors.New("message oneof %q must contain at least one field declaration", o.FullName()) + } + if n := o.Fields().Len(); n-1 != (o.Fields().Get(n-1).Index() - o.Fields().Get(0).Index()) { + return errors.New("message oneof %q must have consecutively declared fields", o.FullName()) + } + + if o.IsSynthetic() { + seenSynthetic = true + continue + } + if !o.IsSynthetic() && seenSynthetic { + return errors.New("message oneof %q must be declared before synthetic oneofs", o.FullName()) + } + + for i := 0; i < o.Fields().Len(); i++ { + f := o.Fields().Get(i) + if f.Cardinality() != protoreflect.Optional { + return errors.New("message field %q belongs in a oneof and must be optional", f.FullName()) + } + if f.IsWeak() { + return errors.New("message field %q belongs in a oneof and must not be a weak reference", f.FullName()) + } + } + } + + if err := validateEnumDeclarations(m.L1.Enums.List, md.GetEnumType()); err != nil { + return err + } + if err := validateMessageDeclarations(m.L1.Messages.List, md.GetNestedType()); err != nil { + return err + } + if err := validateExtensionDeclarations(m.L1.Extensions.List, md.GetExtension()); err != nil { + return err + } + } + return nil +} + +func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { + for i, xd := range xds { + x := &xs[i] + // NOTE: Avoid using the IsValid method since extensions to MessageSet + // may have a field number higher than normal. This check only verifies + // that the number is not negative or reserved. We check again later + // if we know that the extendee is definitely not a MessageSet. + if n := x.Number(); n < 0 || (protowire.FirstReservedNumber <= n && n <= protowire.LastReservedNumber) { + return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) + } + if !x.Cardinality().IsValid() || x.Cardinality() == protoreflect.Required { + return errors.New("extension field %q has an invalid cardinality: %d", x.FullName(), x.Cardinality()) + } + if xd.JsonName != nil { + // A bug in older versions of protoc would always populate the + // "json_name" option for extensions when it is meaningless. + // When it did so, it would always use the camel-cased field name. + if xd.GetJsonName() != strs.JSONCamelCase(string(x.Name())) { + return errors.New("extension field %q may not have an explicitly set JSON name: %q", x.FullName(), xd.GetJsonName()) + } + } + if xd.OneofIndex != nil { + return errors.New("extension field %q may not be part of a oneof", x.FullName()) + } + if md := x.ContainingMessage(); !md.IsPlaceholder() { + if !md.ExtensionRanges().Has(x.Number()) { + return errors.New("extension field %q extends %q with non-extension field number: %d", x.FullName(), md.FullName(), x.Number()) + } + isMessageSet := md.Options().(*descriptorpb.MessageOptions).GetMessageSetWireFormat() + if isMessageSet && !isOptionalMessage(x) { + return errors.New("extension field %q extends MessageSet and must be an optional message", x.FullName()) + } + if !isMessageSet && !x.Number().IsValid() { + return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) + } + } + if xd.GetOptions().GetWeak() { + return errors.New("extension field %q cannot be a weak reference", x.FullName()) + } + if x.IsPacked() && !isPackable(x) { + return errors.New("extension field %q is not packable", x.FullName()) + } + if err := checkValidGroup(x); err != nil { + return errors.New("extension field %q is an invalid group: %v", x.FullName(), err) + } + if md := x.Message(); md != nil && md.IsMapEntry() { + return errors.New("extension field %q cannot be a map entry", x.FullName()) + } + if x.Syntax() == protoreflect.Proto3 { + switch x.ContainingMessage().FullName() { + case (*descriptorpb.FileOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.EnumOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.EnumValueOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.MessageOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.FieldOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.OneofOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.ExtensionRangeOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.ServiceOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.MethodOptions)(nil).ProtoReflect().Descriptor().FullName(): + default: + return errors.New("extension field %q cannot be declared in proto3 unless extended descriptor options", x.FullName()) + } + } + } + return nil +} + +// isOptionalMessage reports whether this is an optional message. +// If the kind is unknown, it is assumed to be a message. +func isOptionalMessage(fd protoreflect.FieldDescriptor) bool { + return (fd.Kind() == 0 || fd.Kind() == protoreflect.MessageKind) && fd.Cardinality() == protoreflect.Optional +} + +// isPackable checks whether the pack option can be specified. +func isPackable(fd protoreflect.FieldDescriptor) bool { + switch fd.Kind() { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + return fd.IsList() +} + +// checkValidGroup reports whether fd is a valid group according to the same +// rules that protoc imposes. +func checkValidGroup(fd protoreflect.FieldDescriptor) error { + md := fd.Message() + switch { + case fd.Kind() != protoreflect.GroupKind: + return nil + case fd.Syntax() != protoreflect.Proto2: + return errors.New("invalid under proto2 semantics") + case md == nil || md.IsPlaceholder(): + return errors.New("message must be resolvable") + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case !unicode.IsUpper(rune(md.Name()[0])): + return errors.New("message name must start with an uppercase") + case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): + return errors.New("field name must be lowercased form of the message name") + } + return nil +} + +// checkValidMap checks whether the field is a valid map according to the same +// rules that protoc imposes. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:6045-6115 +func checkValidMap(fd protoreflect.FieldDescriptor) error { + md := fd.Message() + switch { + case md == nil || !md.IsMapEntry(): + return nil + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case md.Name() != protoreflect.Name(strs.MapEntryName(string(fd.Name()))): + return errors.New("incorrect implicit map entry name") + case fd.Cardinality() != protoreflect.Repeated: + return errors.New("field must be repeated") + case md.Fields().Len() != 2: + return errors.New("message must have exactly two fields") + case md.ExtensionRanges().Len() > 0: + return errors.New("message must not have any extension ranges") + case md.Enums().Len()+md.Messages().Len()+md.Extensions().Len() > 0: + return errors.New("message must not have any nested declarations") + } + kf := md.Fields().Get(0) + vf := md.Fields().Get(1) + switch { + case kf.Name() != genid.MapEntry_Key_field_name || kf.Number() != genid.MapEntry_Key_field_number || kf.Cardinality() != protoreflect.Optional || kf.ContainingOneof() != nil || kf.HasDefault(): + return errors.New("invalid key field") + case vf.Name() != genid.MapEntry_Value_field_name || vf.Number() != genid.MapEntry_Value_field_number || vf.Cardinality() != protoreflect.Optional || vf.ContainingOneof() != nil || vf.HasDefault(): + return errors.New("invalid value field") + } + switch kf.Kind() { + case protoreflect.BoolKind: // bool + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: // int32 + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: // int64 + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: // uint32 + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: // uint64 + case protoreflect.StringKind: // string + default: + return errors.New("invalid key kind: %v", kf.Kind()) + } + if e := vf.Enum(); e != nil && e.Values().Len() > 0 && e.Values().Get(0).Number() != 0 { + return errors.New("map enum value must have zero number for the first value") + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go new file mode 100644 index 000000000..a7c5ceffc --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -0,0 +1,252 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "fmt" + "strings" + + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// ToFileDescriptorProto copies a protoreflect.FileDescriptor into a +// google.protobuf.FileDescriptorProto message. +func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto { + p := &descriptorpb.FileDescriptorProto{ + Name: proto.String(file.Path()), + Options: proto.Clone(file.Options()).(*descriptorpb.FileOptions), + } + if file.Package() != "" { + p.Package = proto.String(string(file.Package())) + } + for i, imports := 0, file.Imports(); i < imports.Len(); i++ { + imp := imports.Get(i) + p.Dependency = append(p.Dependency, imp.Path()) + if imp.IsPublic { + p.PublicDependency = append(p.PublicDependency, int32(i)) + } + if imp.IsWeak { + p.WeakDependency = append(p.WeakDependency, int32(i)) + } + } + for i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ { + loc := locs.Get(i) + l := &descriptorpb.SourceCodeInfo_Location{} + l.Path = append(l.Path, loc.Path...) + if loc.StartLine == loc.EndLine { + l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndColumn)} + } else { + l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndLine), int32(loc.EndColumn)} + } + l.LeadingDetachedComments = append([]string(nil), loc.LeadingDetachedComments...) + if loc.LeadingComments != "" { + l.LeadingComments = proto.String(loc.LeadingComments) + } + if loc.TrailingComments != "" { + l.TrailingComments = proto.String(loc.TrailingComments) + } + if p.SourceCodeInfo == nil { + p.SourceCodeInfo = &descriptorpb.SourceCodeInfo{} + } + p.SourceCodeInfo.Location = append(p.SourceCodeInfo.Location, l) + + } + for i, messages := 0, file.Messages(); i < messages.Len(); i++ { + p.MessageType = append(p.MessageType, ToDescriptorProto(messages.Get(i))) + } + for i, enums := 0, file.Enums(); i < enums.Len(); i++ { + p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i))) + } + for i, services := 0, file.Services(); i < services.Len(); i++ { + p.Service = append(p.Service, ToServiceDescriptorProto(services.Get(i))) + } + for i, exts := 0, file.Extensions(); i < exts.Len(); i++ { + p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) + } + if syntax := file.Syntax(); syntax != protoreflect.Proto2 { + p.Syntax = proto.String(file.Syntax().String()) + } + return p +} + +// ToDescriptorProto copies a protoreflect.MessageDescriptor into a +// google.protobuf.DescriptorProto message. +func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto { + p := &descriptorpb.DescriptorProto{ + Name: proto.String(string(message.Name())), + Options: proto.Clone(message.Options()).(*descriptorpb.MessageOptions), + } + for i, fields := 0, message.Fields(); i < fields.Len(); i++ { + p.Field = append(p.Field, ToFieldDescriptorProto(fields.Get(i))) + } + for i, exts := 0, message.Extensions(); i < exts.Len(); i++ { + p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) + } + for i, messages := 0, message.Messages(); i < messages.Len(); i++ { + p.NestedType = append(p.NestedType, ToDescriptorProto(messages.Get(i))) + } + for i, enums := 0, message.Enums(); i < enums.Len(); i++ { + p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i))) + } + for i, xranges := 0, message.ExtensionRanges(); i < xranges.Len(); i++ { + xrange := xranges.Get(i) + p.ExtensionRange = append(p.ExtensionRange, &descriptorpb.DescriptorProto_ExtensionRange{ + Start: proto.Int32(int32(xrange[0])), + End: proto.Int32(int32(xrange[1])), + Options: proto.Clone(message.ExtensionRangeOptions(i)).(*descriptorpb.ExtensionRangeOptions), + }) + } + for i, oneofs := 0, message.Oneofs(); i < oneofs.Len(); i++ { + p.OneofDecl = append(p.OneofDecl, ToOneofDescriptorProto(oneofs.Get(i))) + } + for i, ranges := 0, message.ReservedRanges(); i < ranges.Len(); i++ { + rrange := ranges.Get(i) + p.ReservedRange = append(p.ReservedRange, &descriptorpb.DescriptorProto_ReservedRange{ + Start: proto.Int32(int32(rrange[0])), + End: proto.Int32(int32(rrange[1])), + }) + } + for i, names := 0, message.ReservedNames(); i < names.Len(); i++ { + p.ReservedName = append(p.ReservedName, string(names.Get(i))) + } + return p +} + +// ToFieldDescriptorProto copies a protoreflect.FieldDescriptor into a +// google.protobuf.FieldDescriptorProto message. +func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto { + p := &descriptorpb.FieldDescriptorProto{ + Name: proto.String(string(field.Name())), + Number: proto.Int32(int32(field.Number())), + Label: descriptorpb.FieldDescriptorProto_Label(field.Cardinality()).Enum(), + Options: proto.Clone(field.Options()).(*descriptorpb.FieldOptions), + } + if field.IsExtension() { + p.Extendee = fullNameOf(field.ContainingMessage()) + } + if field.Kind().IsValid() { + p.Type = descriptorpb.FieldDescriptorProto_Type(field.Kind()).Enum() + } + if field.Enum() != nil { + p.TypeName = fullNameOf(field.Enum()) + } + if field.Message() != nil { + p.TypeName = fullNameOf(field.Message()) + } + if field.HasJSONName() { + // A bug in older versions of protoc would always populate the + // "json_name" option for extensions when it is meaningless. + // When it did so, it would always use the camel-cased field name. + if field.IsExtension() { + p.JsonName = proto.String(strs.JSONCamelCase(string(field.Name()))) + } else { + p.JsonName = proto.String(field.JSONName()) + } + } + if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() { + p.Proto3Optional = proto.Bool(true) + } + if field.HasDefault() { + def, err := defval.Marshal(field.Default(), field.DefaultEnumValue(), field.Kind(), defval.Descriptor) + if err != nil && field.DefaultEnumValue() != nil { + def = string(field.DefaultEnumValue().Name()) // occurs for unresolved enum values + } else if err != nil { + panic(fmt.Sprintf("%v: %v", field.FullName(), err)) + } + p.DefaultValue = proto.String(def) + } + if oneof := field.ContainingOneof(); oneof != nil { + p.OneofIndex = proto.Int32(int32(oneof.Index())) + } + return p +} + +// ToOneofDescriptorProto copies a protoreflect.OneofDescriptor into a +// google.protobuf.OneofDescriptorProto message. +func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto { + return &descriptorpb.OneofDescriptorProto{ + Name: proto.String(string(oneof.Name())), + Options: proto.Clone(oneof.Options()).(*descriptorpb.OneofOptions), + } +} + +// ToEnumDescriptorProto copies a protoreflect.EnumDescriptor into a +// google.protobuf.EnumDescriptorProto message. +func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto { + p := &descriptorpb.EnumDescriptorProto{ + Name: proto.String(string(enum.Name())), + Options: proto.Clone(enum.Options()).(*descriptorpb.EnumOptions), + } + for i, values := 0, enum.Values(); i < values.Len(); i++ { + p.Value = append(p.Value, ToEnumValueDescriptorProto(values.Get(i))) + } + for i, ranges := 0, enum.ReservedRanges(); i < ranges.Len(); i++ { + rrange := ranges.Get(i) + p.ReservedRange = append(p.ReservedRange, &descriptorpb.EnumDescriptorProto_EnumReservedRange{ + Start: proto.Int32(int32(rrange[0])), + End: proto.Int32(int32(rrange[1])), + }) + } + for i, names := 0, enum.ReservedNames(); i < names.Len(); i++ { + p.ReservedName = append(p.ReservedName, string(names.Get(i))) + } + return p +} + +// ToEnumValueDescriptorProto copies a protoreflect.EnumValueDescriptor into a +// google.protobuf.EnumValueDescriptorProto message. +func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto { + return &descriptorpb.EnumValueDescriptorProto{ + Name: proto.String(string(value.Name())), + Number: proto.Int32(int32(value.Number())), + Options: proto.Clone(value.Options()).(*descriptorpb.EnumValueOptions), + } +} + +// ToServiceDescriptorProto copies a protoreflect.ServiceDescriptor into a +// google.protobuf.ServiceDescriptorProto message. +func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto { + p := &descriptorpb.ServiceDescriptorProto{ + Name: proto.String(string(service.Name())), + Options: proto.Clone(service.Options()).(*descriptorpb.ServiceOptions), + } + for i, methods := 0, service.Methods(); i < methods.Len(); i++ { + p.Method = append(p.Method, ToMethodDescriptorProto(methods.Get(i))) + } + return p +} + +// ToMethodDescriptorProto copies a protoreflect.MethodDescriptor into a +// google.protobuf.MethodDescriptorProto message. +func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto { + p := &descriptorpb.MethodDescriptorProto{ + Name: proto.String(string(method.Name())), + InputType: fullNameOf(method.Input()), + OutputType: fullNameOf(method.Output()), + Options: proto.Clone(method.Options()).(*descriptorpb.MethodOptions), + } + if method.IsStreamingClient() { + p.ClientStreaming = proto.Bool(true) + } + if method.IsStreamingServer() { + p.ServerStreaming = proto.Bool(true) + } + return p +} + +func fullNameOf(d protoreflect.Descriptor) *string { + if d == nil { + return nil + } + if strings.HasPrefix(string(d.FullName()), unknownPrefix) { + return proto.String(string(d.FullName()[len(unknownPrefix):])) + } + return proto.String("." + string(d.FullName())) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go new file mode 100644 index 000000000..d5d5af6eb --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -0,0 +1,78 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import ( + "google.golang.org/protobuf/internal/pragma" +) + +// The following types are used by the fast-path Message.ProtoMethods method. +// +// To avoid polluting the public protoreflect API with types used only by +// low-level implementations, the canonical definitions of these types are +// in the runtime/protoiface package. The definitions here and in protoiface +// must be kept in sync. +type ( + methods = struct { + pragma.NoUnkeyedLiterals + Flags supportFlags + Size func(sizeInput) sizeOutput + Marshal func(marshalInput) (marshalOutput, error) + Unmarshal func(unmarshalInput) (unmarshalOutput, error) + Merge func(mergeInput) mergeOutput + CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error) + } + supportFlags = uint64 + sizeInput = struct { + pragma.NoUnkeyedLiterals + Message Message + Flags uint8 + } + sizeOutput = struct { + pragma.NoUnkeyedLiterals + Size int + } + marshalInput = struct { + pragma.NoUnkeyedLiterals + Message Message + Buf []byte + Flags uint8 + } + marshalOutput = struct { + pragma.NoUnkeyedLiterals + Buf []byte + } + unmarshalInput = struct { + pragma.NoUnkeyedLiterals + Message Message + Buf []byte + Flags uint8 + Resolver interface { + FindExtensionByName(field FullName) (ExtensionType, error) + FindExtensionByNumber(message FullName, field FieldNumber) (ExtensionType, error) + } + Depth int + } + unmarshalOutput = struct { + pragma.NoUnkeyedLiterals + Flags uint8 + } + mergeInput = struct { + pragma.NoUnkeyedLiterals + Source Message + Destination Message + } + mergeOutput = struct { + pragma.NoUnkeyedLiterals + Flags uint8 + } + checkInitializedInput = struct { + pragma.NoUnkeyedLiterals + Message Message + } + checkInitializedOutput = struct { + pragma.NoUnkeyedLiterals + } +) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go new file mode 100644 index 000000000..dd85915bd --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -0,0 +1,504 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoreflect provides interfaces to dynamically manipulate messages. +// +// This package includes type descriptors which describe the structure of types +// defined in proto source files and value interfaces which provide the +// ability to examine and manipulate the contents of messages. +// +// +// Protocol Buffer Descriptors +// +// Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor) +// are immutable objects that represent protobuf type information. +// They are wrappers around the messages declared in descriptor.proto. +// Protobuf descriptors alone lack any information regarding Go types. +// +// Enums and messages generated by this module implement Enum and ProtoMessage, +// where the Descriptor and ProtoReflect.Descriptor accessors respectively +// return the protobuf descriptor for the values. +// +// The protobuf descriptor interfaces are not meant to be implemented by +// user code since they might need to be extended in the future to support +// additions to the protobuf language. +// The "google.golang.org/protobuf/reflect/protodesc" package converts between +// google.protobuf.DescriptorProto messages and protobuf descriptors. +// +// +// Go Type Descriptors +// +// A type descriptor (e.g., EnumType or MessageType) is a constructor for +// a concrete Go type that represents the associated protobuf descriptor. +// There is commonly a one-to-one relationship between protobuf descriptors and +// Go type descriptors, but it can potentially be a one-to-many relationship. +// +// Enums and messages generated by this module implement Enum and ProtoMessage, +// where the Type and ProtoReflect.Type accessors respectively +// return the protobuf descriptor for the values. +// +// The "google.golang.org/protobuf/types/dynamicpb" package can be used to +// create Go type descriptors from protobuf descriptors. +// +// +// Value Interfaces +// +// The Enum and Message interfaces provide a reflective view over an +// enum or message instance. For enums, it provides the ability to retrieve +// the enum value number for any concrete enum type. For messages, it provides +// the ability to access or manipulate fields of the message. +// +// To convert a proto.Message to a protoreflect.Message, use the +// former's ProtoReflect method. Since the ProtoReflect method is new to the +// v2 message interface, it may not be present on older message implementations. +// The "github.com/golang/protobuf/proto".MessageReflect function can be used +// to obtain a reflective view on older messages. +// +// +// Relationships +// +// The following diagrams demonstrate the relationships between +// various types declared in this package. +// +// +// ┌───────────────────────────────────┐ +// V │ +// ┌────────────── New(n) ─────────────┐ │ +// │ │ │ +// │ ┌──── Descriptor() ──┐ │ ┌── Number() ──┐ │ +// │ │ V V │ V │ +// ╔════════════╗ ╔════════════════╗ ╔════════╗ ╔════════════╗ +// ║ EnumType ║ ║ EnumDescriptor ║ ║ Enum ║ ║ EnumNumber ║ +// ╚════════════╝ ╚════════════════╝ ╚════════╝ ╚════════════╝ +// Λ Λ │ │ +// │ └─── Descriptor() ──┘ │ +// │ │ +// └────────────────── Type() ───────┘ +// +// • An EnumType describes a concrete Go enum type. +// It has an EnumDescriptor and can construct an Enum instance. +// +// • An EnumDescriptor describes an abstract protobuf enum type. +// +// • An Enum is a concrete enum instance. Generated enums implement Enum. +// +// +// ┌──────────────── New() ─────────────────┐ +// │ │ +// │ ┌─── Descriptor() ─────┐ │ ┌── Interface() ───┐ +// │ │ V V │ V +// ╔═════════════╗ ╔═══════════════════╗ ╔═════════╗ ╔══════════════╗ +// ║ MessageType ║ ║ MessageDescriptor ║ ║ Message ║ ║ ProtoMessage ║ +// ╚═════════════╝ ╚═══════════════════╝ ╚═════════╝ ╚══════════════╝ +// Λ Λ │ │ Λ │ +// │ └──── Descriptor() ────┘ │ └─ ProtoReflect() ─┘ +// │ │ +// └─────────────────── Type() ─────────┘ +// +// • A MessageType describes a concrete Go message type. +// It has a MessageDescriptor and can construct a Message instance. +// +// • A MessageDescriptor describes an abstract protobuf message type. +// +// • A Message is a concrete message instance. Generated messages implement +// ProtoMessage, which can convert to/from a Message. +// +// +// ┌── TypeDescriptor() ──┐ ┌───── Descriptor() ─────┐ +// │ V │ V +// ╔═══════════════╗ ╔═════════════════════════╗ ╔═════════════════════╗ +// ║ ExtensionType ║ ║ ExtensionTypeDescriptor ║ ║ ExtensionDescriptor ║ +// ╚═══════════════╝ ╚═════════════════════════╝ ╚═════════════════════╝ +// Λ │ │ Λ │ Λ +// └─────── Type() ───────┘ │ └─── may implement ────┘ │ +// │ │ +// └────── implements ────────┘ +// +// • An ExtensionType describes a concrete Go implementation of an extension. +// It has an ExtensionTypeDescriptor and can convert to/from +// abstract Values and Go values. +// +// • An ExtensionTypeDescriptor is an ExtensionDescriptor +// which also has an ExtensionType. +// +// • An ExtensionDescriptor describes an abstract protobuf extension field and +// may not always be an ExtensionTypeDescriptor. +package protoreflect + +import ( + "fmt" + "strings" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/pragma" +) + +type doNotImplement pragma.DoNotImplement + +// ProtoMessage is the top-level interface that all proto messages implement. +// This is declared in the protoreflect package to avoid a cyclic dependency; +// use the proto.Message type instead, which aliases this type. +type ProtoMessage interface{ ProtoReflect() Message } + +// Syntax is the language version of the proto file. +type Syntax syntax + +type syntax int8 // keep exact type opaque as the int type may change + +const ( + Proto2 Syntax = 2 + Proto3 Syntax = 3 +) + +// IsValid reports whether the syntax is valid. +func (s Syntax) IsValid() bool { + switch s { + case Proto2, Proto3: + return true + default: + return false + } +} + +// String returns s as a proto source identifier (e.g., "proto2"). +func (s Syntax) String() string { + switch s { + case Proto2: + return "proto2" + case Proto3: + return "proto3" + default: + return fmt.Sprintf("", s) + } +} + +// GoString returns s as a Go source identifier (e.g., "Proto2"). +func (s Syntax) GoString() string { + switch s { + case Proto2: + return "Proto2" + case Proto3: + return "Proto3" + default: + return fmt.Sprintf("Syntax(%d)", s) + } +} + +// Cardinality determines whether a field is optional, required, or repeated. +type Cardinality cardinality + +type cardinality int8 // keep exact type opaque as the int type may change + +// Constants as defined by the google.protobuf.Cardinality enumeration. +const ( + Optional Cardinality = 1 // appears zero or one times + Required Cardinality = 2 // appears exactly one time; invalid with Proto3 + Repeated Cardinality = 3 // appears zero or more times +) + +// IsValid reports whether the cardinality is valid. +func (c Cardinality) IsValid() bool { + switch c { + case Optional, Required, Repeated: + return true + default: + return false + } +} + +// String returns c as a proto source identifier (e.g., "optional"). +func (c Cardinality) String() string { + switch c { + case Optional: + return "optional" + case Required: + return "required" + case Repeated: + return "repeated" + default: + return fmt.Sprintf("", c) + } +} + +// GoString returns c as a Go source identifier (e.g., "Optional"). +func (c Cardinality) GoString() string { + switch c { + case Optional: + return "Optional" + case Required: + return "Required" + case Repeated: + return "Repeated" + default: + return fmt.Sprintf("Cardinality(%d)", c) + } +} + +// Kind indicates the basic proto kind of a field. +type Kind kind + +type kind int8 // keep exact type opaque as the int type may change + +// Constants as defined by the google.protobuf.Field.Kind enumeration. +const ( + BoolKind Kind = 8 + EnumKind Kind = 14 + Int32Kind Kind = 5 + Sint32Kind Kind = 17 + Uint32Kind Kind = 13 + Int64Kind Kind = 3 + Sint64Kind Kind = 18 + Uint64Kind Kind = 4 + Sfixed32Kind Kind = 15 + Fixed32Kind Kind = 7 + FloatKind Kind = 2 + Sfixed64Kind Kind = 16 + Fixed64Kind Kind = 6 + DoubleKind Kind = 1 + StringKind Kind = 9 + BytesKind Kind = 12 + MessageKind Kind = 11 + GroupKind Kind = 10 +) + +// IsValid reports whether the kind is valid. +func (k Kind) IsValid() bool { + switch k { + case BoolKind, EnumKind, + Int32Kind, Sint32Kind, Uint32Kind, + Int64Kind, Sint64Kind, Uint64Kind, + Sfixed32Kind, Fixed32Kind, FloatKind, + Sfixed64Kind, Fixed64Kind, DoubleKind, + StringKind, BytesKind, MessageKind, GroupKind: + return true + default: + return false + } +} + +// String returns k as a proto source identifier (e.g., "bool"). +func (k Kind) String() string { + switch k { + case BoolKind: + return "bool" + case EnumKind: + return "enum" + case Int32Kind: + return "int32" + case Sint32Kind: + return "sint32" + case Uint32Kind: + return "uint32" + case Int64Kind: + return "int64" + case Sint64Kind: + return "sint64" + case Uint64Kind: + return "uint64" + case Sfixed32Kind: + return "sfixed32" + case Fixed32Kind: + return "fixed32" + case FloatKind: + return "float" + case Sfixed64Kind: + return "sfixed64" + case Fixed64Kind: + return "fixed64" + case DoubleKind: + return "double" + case StringKind: + return "string" + case BytesKind: + return "bytes" + case MessageKind: + return "message" + case GroupKind: + return "group" + default: + return fmt.Sprintf("", k) + } +} + +// GoString returns k as a Go source identifier (e.g., "BoolKind"). +func (k Kind) GoString() string { + switch k { + case BoolKind: + return "BoolKind" + case EnumKind: + return "EnumKind" + case Int32Kind: + return "Int32Kind" + case Sint32Kind: + return "Sint32Kind" + case Uint32Kind: + return "Uint32Kind" + case Int64Kind: + return "Int64Kind" + case Sint64Kind: + return "Sint64Kind" + case Uint64Kind: + return "Uint64Kind" + case Sfixed32Kind: + return "Sfixed32Kind" + case Fixed32Kind: + return "Fixed32Kind" + case FloatKind: + return "FloatKind" + case Sfixed64Kind: + return "Sfixed64Kind" + case Fixed64Kind: + return "Fixed64Kind" + case DoubleKind: + return "DoubleKind" + case StringKind: + return "StringKind" + case BytesKind: + return "BytesKind" + case MessageKind: + return "MessageKind" + case GroupKind: + return "GroupKind" + default: + return fmt.Sprintf("Kind(%d)", k) + } +} + +// FieldNumber is the field number in a message. +type FieldNumber = protowire.Number + +// FieldNumbers represent a list of field numbers. +type FieldNumbers interface { + // Len reports the number of fields in the list. + Len() int + // Get returns the ith field number. It panics if out of bounds. + Get(i int) FieldNumber + // Has reports whether n is within the list of fields. + Has(n FieldNumber) bool + + doNotImplement +} + +// FieldRanges represent a list of field number ranges. +type FieldRanges interface { + // Len reports the number of ranges in the list. + Len() int + // Get returns the ith range. It panics if out of bounds. + Get(i int) [2]FieldNumber // start inclusive; end exclusive + // Has reports whether n is within any of the ranges. + Has(n FieldNumber) bool + + doNotImplement +} + +// EnumNumber is the numeric value for an enum. +type EnumNumber int32 + +// EnumRanges represent a list of enum number ranges. +type EnumRanges interface { + // Len reports the number of ranges in the list. + Len() int + // Get returns the ith range. It panics if out of bounds. + Get(i int) [2]EnumNumber // start inclusive; end inclusive + // Has reports whether n is within any of the ranges. + Has(n EnumNumber) bool + + doNotImplement +} + +// Name is the short name for a proto declaration. This is not the name +// as used in Go source code, which might not be identical to the proto name. +type Name string // e.g., "Kind" + +// IsValid reports whether s is a syntactically valid name. +// An empty name is invalid. +func (s Name) IsValid() bool { + return consumeIdent(string(s)) == len(s) +} + +// Names represent a list of names. +type Names interface { + // Len reports the number of names in the list. + Len() int + // Get returns the ith name. It panics if out of bounds. + Get(i int) Name + // Has reports whether s matches any names in the list. + Has(s Name) bool + + doNotImplement +} + +// FullName is a qualified name that uniquely identifies a proto declaration. +// A qualified name is the concatenation of the proto package along with the +// fully-declared name (i.e., name of parent preceding the name of the child), +// with a '.' delimiter placed between each Name. +// +// This should not have any leading or trailing dots. +type FullName string // e.g., "google.protobuf.Field.Kind" + +// IsValid reports whether s is a syntactically valid full name. +// An empty full name is invalid. +func (s FullName) IsValid() bool { + i := consumeIdent(string(s)) + if i < 0 { + return false + } + for len(s) > i { + if s[i] != '.' { + return false + } + i++ + n := consumeIdent(string(s[i:])) + if n < 0 { + return false + } + i += n + } + return true +} + +func consumeIdent(s string) (i int) { + if len(s) == 0 || !isLetter(s[i]) { + return -1 + } + i++ + for len(s) > i && isLetterDigit(s[i]) { + i++ + } + return i +} +func isLetter(c byte) bool { + return c == '_' || ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') +} +func isLetterDigit(c byte) bool { + return isLetter(c) || ('0' <= c && c <= '9') +} + +// Name returns the short name, which is the last identifier segment. +// A single segment FullName is the Name itself. +func (n FullName) Name() Name { + if i := strings.LastIndexByte(string(n), '.'); i >= 0 { + return Name(n[i+1:]) + } + return Name(n) +} + +// Parent returns the full name with the trailing identifier removed. +// A single segment FullName has no parent. +func (n FullName) Parent() FullName { + if i := strings.LastIndexByte(string(n), '.'); i >= 0 { + return n[:i] + } + return "" +} + +// Append returns the qualified name appended with the provided short name. +// +// Invariant: n == n.Parent().Append(n.Name()) // assuming n is valid +func (n FullName) Append(s Name) FullName { + if n == "" { + return FullName(s) + } + return n + "." + FullName(s) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go new file mode 100644 index 000000000..121ba3a07 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go @@ -0,0 +1,128 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import ( + "strconv" +) + +// SourceLocations is a list of source locations. +type SourceLocations interface { + // Len reports the number of source locations in the proto file. + Len() int + // Get returns the ith SourceLocation. It panics if out of bounds. + Get(int) SourceLocation + + // ByPath returns the SourceLocation for the given path, + // returning the first location if multiple exist for the same path. + // If multiple locations exist for the same path, + // then SourceLocation.Next index can be used to identify the + // index of the next SourceLocation. + // If no location exists for this path, it returns the zero value. + ByPath(path SourcePath) SourceLocation + + // ByDescriptor returns the SourceLocation for the given descriptor, + // returning the first location if multiple exist for the same path. + // If no location exists for this descriptor, it returns the zero value. + ByDescriptor(desc Descriptor) SourceLocation + + doNotImplement +} + +// SourceLocation describes a source location and +// corresponds with the google.protobuf.SourceCodeInfo.Location message. +type SourceLocation struct { + // Path is the path to the declaration from the root file descriptor. + // The contents of this slice must not be mutated. + Path SourcePath + + // StartLine and StartColumn are the zero-indexed starting location + // in the source file for the declaration. + StartLine, StartColumn int + // EndLine and EndColumn are the zero-indexed ending location + // in the source file for the declaration. + // In the descriptor.proto, the end line may be omitted if it is identical + // to the start line. Here, it is always populated. + EndLine, EndColumn int + + // LeadingDetachedComments are the leading detached comments + // for the declaration. The contents of this slice must not be mutated. + LeadingDetachedComments []string + // LeadingComments is the leading attached comment for the declaration. + LeadingComments string + // TrailingComments is the trailing attached comment for the declaration. + TrailingComments string + + // Next is an index into SourceLocations for the next source location that + // has the same Path. It is zero if there is no next location. + Next int +} + +// SourcePath identifies part of a file descriptor for a source location. +// The SourcePath is a sequence of either field numbers or indexes into +// a repeated field that form a path starting from the root file descriptor. +// +// See google.protobuf.SourceCodeInfo.Location.path. +type SourcePath []int32 + +// Equal reports whether p1 equals p2. +func (p1 SourcePath) Equal(p2 SourcePath) bool { + if len(p1) != len(p2) { + return false + } + for i := range p1 { + if p1[i] != p2[i] { + return false + } + } + return true +} + +// String formats the path in a humanly readable manner. +// The output is guaranteed to be deterministic, +// making it suitable for use as a key into a Go map. +// It is not guaranteed to be stable as the exact output could change +// in a future version of this module. +// +// Example output: +// .message_type[6].nested_type[15].field[3] +func (p SourcePath) String() string { + b := p.appendFileDescriptorProto(nil) + for _, i := range p { + b = append(b, '.') + b = strconv.AppendInt(b, int64(i), 10) + } + return string(b) +} + +type appendFunc func(*SourcePath, []byte) []byte + +func (p *SourcePath) appendSingularField(b []byte, name string, f appendFunc) []byte { + if len(*p) == 0 { + return b + } + b = append(b, '.') + b = append(b, name...) + *p = (*p)[1:] + if f != nil { + b = f(p, b) + } + return b +} + +func (p *SourcePath) appendRepeatedField(b []byte, name string, f appendFunc) []byte { + b = p.appendSingularField(b, name, nil) + if len(*p) == 0 || (*p)[0] < 0 { + return b + } + b = append(b, '[') + b = strconv.AppendUint(b, uint64((*p)[0]), 10) + b = append(b, ']') + *p = (*p)[1:] + if f != nil { + b = f(p, b) + } + return b +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go new file mode 100644 index 000000000..b03c1223c --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -0,0 +1,461 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package protoreflect + +func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "package", nil) + case 3: + b = p.appendRepeatedField(b, "dependency", nil) + case 10: + b = p.appendRepeatedField(b, "public_dependency", nil) + case 11: + b = p.appendRepeatedField(b, "weak_dependency", nil) + case 4: + b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto) + case 5: + b = p.appendRepeatedField(b, "enum_type", (*SourcePath).appendEnumDescriptorProto) + case 6: + b = p.appendRepeatedField(b, "service", (*SourcePath).appendServiceDescriptorProto) + case 7: + b = p.appendRepeatedField(b, "extension", (*SourcePath).appendFieldDescriptorProto) + case 8: + b = p.appendSingularField(b, "options", (*SourcePath).appendFileOptions) + case 9: + b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo) + case 12: + b = p.appendSingularField(b, "syntax", nil) + } + return b +} + +func (p *SourcePath) appendDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendRepeatedField(b, "field", (*SourcePath).appendFieldDescriptorProto) + case 6: + b = p.appendRepeatedField(b, "extension", (*SourcePath).appendFieldDescriptorProto) + case 3: + b = p.appendRepeatedField(b, "nested_type", (*SourcePath).appendDescriptorProto) + case 4: + b = p.appendRepeatedField(b, "enum_type", (*SourcePath).appendEnumDescriptorProto) + case 5: + b = p.appendRepeatedField(b, "extension_range", (*SourcePath).appendDescriptorProto_ExtensionRange) + case 8: + b = p.appendRepeatedField(b, "oneof_decl", (*SourcePath).appendOneofDescriptorProto) + case 7: + b = p.appendSingularField(b, "options", (*SourcePath).appendMessageOptions) + case 9: + b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange) + case 10: + b = p.appendRepeatedField(b, "reserved_name", nil) + } + return b +} + +func (p *SourcePath) appendEnumDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendRepeatedField(b, "value", (*SourcePath).appendEnumValueDescriptorProto) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendEnumOptions) + case 4: + b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange) + case 5: + b = p.appendRepeatedField(b, "reserved_name", nil) + } + return b +} + +func (p *SourcePath) appendServiceDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendRepeatedField(b, "method", (*SourcePath).appendMethodDescriptorProto) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendServiceOptions) + } + return b +} + +func (p *SourcePath) appendFieldDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 3: + b = p.appendSingularField(b, "number", nil) + case 4: + b = p.appendSingularField(b, "label", nil) + case 5: + b = p.appendSingularField(b, "type", nil) + case 6: + b = p.appendSingularField(b, "type_name", nil) + case 2: + b = p.appendSingularField(b, "extendee", nil) + case 7: + b = p.appendSingularField(b, "default_value", nil) + case 9: + b = p.appendSingularField(b, "oneof_index", nil) + case 10: + b = p.appendSingularField(b, "json_name", nil) + case 8: + b = p.appendSingularField(b, "options", (*SourcePath).appendFieldOptions) + case 17: + b = p.appendSingularField(b, "proto3_optional", nil) + } + return b +} + +func (p *SourcePath) appendFileOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "java_package", nil) + case 8: + b = p.appendSingularField(b, "java_outer_classname", nil) + case 10: + b = p.appendSingularField(b, "java_multiple_files", nil) + case 20: + b = p.appendSingularField(b, "java_generate_equals_and_hash", nil) + case 27: + b = p.appendSingularField(b, "java_string_check_utf8", nil) + case 9: + b = p.appendSingularField(b, "optimize_for", nil) + case 11: + b = p.appendSingularField(b, "go_package", nil) + case 16: + b = p.appendSingularField(b, "cc_generic_services", nil) + case 17: + b = p.appendSingularField(b, "java_generic_services", nil) + case 18: + b = p.appendSingularField(b, "py_generic_services", nil) + case 42: + b = p.appendSingularField(b, "php_generic_services", nil) + case 23: + b = p.appendSingularField(b, "deprecated", nil) + case 31: + b = p.appendSingularField(b, "cc_enable_arenas", nil) + case 36: + b = p.appendSingularField(b, "objc_class_prefix", nil) + case 37: + b = p.appendSingularField(b, "csharp_namespace", nil) + case 39: + b = p.appendSingularField(b, "swift_prefix", nil) + case 40: + b = p.appendSingularField(b, "php_class_prefix", nil) + case 41: + b = p.appendSingularField(b, "php_namespace", nil) + case 44: + b = p.appendSingularField(b, "php_metadata_namespace", nil) + case 45: + b = p.appendSingularField(b, "ruby_package", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendSourceCodeInfo(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendRepeatedField(b, "location", (*SourcePath).appendSourceCodeInfo_Location) + } + return b +} + +func (p *SourcePath) appendDescriptorProto_ExtensionRange(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "start", nil) + case 2: + b = p.appendSingularField(b, "end", nil) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendExtensionRangeOptions) + } + return b +} + +func (p *SourcePath) appendOneofDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "options", (*SourcePath).appendOneofOptions) + } + return b +} + +func (p *SourcePath) appendMessageOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "message_set_wire_format", nil) + case 2: + b = p.appendSingularField(b, "no_standard_descriptor_accessor", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) + case 7: + b = p.appendSingularField(b, "map_entry", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendDescriptorProto_ReservedRange(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "start", nil) + case 2: + b = p.appendSingularField(b, "end", nil) + } + return b +} + +func (p *SourcePath) appendEnumValueDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "number", nil) + case 3: + b = p.appendSingularField(b, "options", (*SourcePath).appendEnumValueOptions) + } + return b +} + +func (p *SourcePath) appendEnumOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 2: + b = p.appendSingularField(b, "allow_alias", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendEnumDescriptorProto_EnumReservedRange(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "start", nil) + case 2: + b = p.appendSingularField(b, "end", nil) + } + return b +} + +func (p *SourcePath) appendMethodDescriptorProto(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name", nil) + case 2: + b = p.appendSingularField(b, "input_type", nil) + case 3: + b = p.appendSingularField(b, "output_type", nil) + case 4: + b = p.appendSingularField(b, "options", (*SourcePath).appendMethodOptions) + case 5: + b = p.appendSingularField(b, "client_streaming", nil) + case 6: + b = p.appendSingularField(b, "server_streaming", nil) + } + return b +} + +func (p *SourcePath) appendServiceOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 33: + b = p.appendSingularField(b, "deprecated", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendFieldOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "ctype", nil) + case 2: + b = p.appendSingularField(b, "packed", nil) + case 6: + b = p.appendSingularField(b, "jstype", nil) + case 5: + b = p.appendSingularField(b, "lazy", nil) + case 3: + b = p.appendSingularField(b, "deprecated", nil) + case 10: + b = p.appendSingularField(b, "weak", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendUninterpretedOption(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 2: + b = p.appendRepeatedField(b, "name", (*SourcePath).appendUninterpretedOption_NamePart) + case 3: + b = p.appendSingularField(b, "identifier_value", nil) + case 4: + b = p.appendSingularField(b, "positive_int_value", nil) + case 5: + b = p.appendSingularField(b, "negative_int_value", nil) + case 6: + b = p.appendSingularField(b, "double_value", nil) + case 7: + b = p.appendSingularField(b, "string_value", nil) + case 8: + b = p.appendSingularField(b, "aggregate_value", nil) + } + return b +} + +func (p *SourcePath) appendSourceCodeInfo_Location(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendRepeatedField(b, "path", nil) + case 2: + b = p.appendRepeatedField(b, "span", nil) + case 3: + b = p.appendSingularField(b, "leading_comments", nil) + case 4: + b = p.appendSingularField(b, "trailing_comments", nil) + case 6: + b = p.appendRepeatedField(b, "leading_detached_comments", nil) + } + return b +} + +func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendOneofOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "deprecated", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendMethodOptions(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 33: + b = p.appendSingularField(b, "deprecated", nil) + case 34: + b = p.appendSingularField(b, "idempotency_level", nil) + case 999: + b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + } + return b +} + +func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "name_part", nil) + case 2: + b = p.appendSingularField(b, "is_extension", nil) + } + return b +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go new file mode 100644 index 000000000..8e53c44a9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -0,0 +1,665 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +// Descriptor provides a set of accessors that are common to every descriptor. +// Each descriptor type wraps the equivalent google.protobuf.XXXDescriptorProto, +// but provides efficient lookup and immutability. +// +// Each descriptor is comparable. Equality implies that the two types are +// exactly identical. However, it is possible for the same semantically +// identical proto type to be represented by multiple type descriptors. +// +// For example, suppose we have t1 and t2 which are both MessageDescriptors. +// If t1 == t2, then the types are definitely equal and all accessors return +// the same information. However, if t1 != t2, then it is still possible that +// they still represent the same proto type (e.g., t1.FullName == t2.FullName). +// This can occur if a descriptor type is created dynamically, or multiple +// versions of the same proto type are accidentally linked into the Go binary. +type Descriptor interface { + // ParentFile returns the parent file descriptor that this descriptor + // is declared within. The parent file for the file descriptor is itself. + // + // Support for this functionality is optional and may return nil. + ParentFile() FileDescriptor + + // Parent returns the parent containing this descriptor declaration. + // The following shows the mapping from child type to possible parent types: + // + // ╔═════════════════════╤═══════════════════════════════════╗ + // ║ Child type │ Possible parent types ║ + // ╠═════════════════════╪═══════════════════════════════════╣ + // ║ FileDescriptor │ nil ║ + // ║ MessageDescriptor │ FileDescriptor, MessageDescriptor ║ + // ║ FieldDescriptor │ FileDescriptor, MessageDescriptor ║ + // ║ OneofDescriptor │ MessageDescriptor ║ + // ║ EnumDescriptor │ FileDescriptor, MessageDescriptor ║ + // ║ EnumValueDescriptor │ EnumDescriptor ║ + // ║ ServiceDescriptor │ FileDescriptor ║ + // ║ MethodDescriptor │ ServiceDescriptor ║ + // ╚═════════════════════╧═══════════════════════════════════╝ + // + // Support for this functionality is optional and may return nil. + Parent() Descriptor + + // Index returns the index of this descriptor within its parent. + // It returns 0 if the descriptor does not have a parent or if the parent + // is unknown. + Index() int + + // Syntax is the protobuf syntax. + Syntax() Syntax // e.g., Proto2 or Proto3 + + // Name is the short name of the declaration (i.e., FullName.Name). + Name() Name // e.g., "Any" + + // FullName is the fully-qualified name of the declaration. + // + // The FullName is a concatenation of the full name of the type that this + // type is declared within and the declaration name. For example, + // field "foo_field" in message "proto.package.MyMessage" is + // uniquely identified as "proto.package.MyMessage.foo_field". + // Enum values are an exception to the rule (see EnumValueDescriptor). + FullName() FullName // e.g., "google.protobuf.Any" + + // IsPlaceholder reports whether type information is missing since a + // dependency is not resolved, in which case only name information is known. + // + // Placeholder types may only be returned by the following accessors + // as a result of unresolved dependencies or weak imports: + // + // ╔═══════════════════════════════════╤═════════════════════╗ + // ║ Accessor │ Descriptor ║ + // ╠═══════════════════════════════════╪═════════════════════╣ + // ║ FileImports.FileDescriptor │ FileDescriptor ║ + // ║ FieldDescriptor.Enum │ EnumDescriptor ║ + // ║ FieldDescriptor.Message │ MessageDescriptor ║ + // ║ FieldDescriptor.DefaultEnumValue │ EnumValueDescriptor ║ + // ║ FieldDescriptor.ContainingMessage │ MessageDescriptor ║ + // ║ MethodDescriptor.Input │ MessageDescriptor ║ + // ║ MethodDescriptor.Output │ MessageDescriptor ║ + // ╚═══════════════════════════════════╧═════════════════════╝ + // + // If true, only Name and FullName are valid. + // For FileDescriptor, the Path is also valid. + IsPlaceholder() bool + + // Options returns the descriptor options. The caller must not modify + // the returned value. + // + // To avoid a dependency cycle, this function returns a proto.Message value. + // The proto message type returned for each descriptor type is as follows: + // ╔═════════════════════╤══════════════════════════════════════════╗ + // ║ Go type │ Protobuf message type ║ + // ╠═════════════════════╪══════════════════════════════════════════╣ + // ║ FileDescriptor │ google.protobuf.FileOptions ║ + // ║ EnumDescriptor │ google.protobuf.EnumOptions ║ + // ║ EnumValueDescriptor │ google.protobuf.EnumValueOptions ║ + // ║ MessageDescriptor │ google.protobuf.MessageOptions ║ + // ║ FieldDescriptor │ google.protobuf.FieldOptions ║ + // ║ OneofDescriptor │ google.protobuf.OneofOptions ║ + // ║ ServiceDescriptor │ google.protobuf.ServiceOptions ║ + // ║ MethodDescriptor │ google.protobuf.MethodOptions ║ + // ╚═════════════════════╧══════════════════════════════════════════╝ + // + // This method returns a typed nil-pointer if no options are present. + // The caller must import the descriptorpb package to use this. + Options() ProtoMessage + + doNotImplement +} + +// FileDescriptor describes the types in a complete proto file and +// corresponds with the google.protobuf.FileDescriptorProto message. +// +// Top-level declarations: +// EnumDescriptor, MessageDescriptor, FieldDescriptor, and/or ServiceDescriptor. +type FileDescriptor interface { + Descriptor // Descriptor.FullName is identical to Package + + // Path returns the file name, relative to the source tree root. + Path() string // e.g., "path/to/file.proto" + // Package returns the protobuf package namespace. + Package() FullName // e.g., "google.protobuf" + + // Imports is a list of imported proto files. + Imports() FileImports + + // Enums is a list of the top-level enum declarations. + Enums() EnumDescriptors + // Messages is a list of the top-level message declarations. + Messages() MessageDescriptors + // Extensions is a list of the top-level extension declarations. + Extensions() ExtensionDescriptors + // Services is a list of the top-level service declarations. + Services() ServiceDescriptors + + // SourceLocations is a list of source locations. + SourceLocations() SourceLocations + + isFileDescriptor +} +type isFileDescriptor interface{ ProtoType(FileDescriptor) } + +// FileImports is a list of file imports. +type FileImports interface { + // Len reports the number of files imported by this proto file. + Len() int + // Get returns the ith FileImport. It panics if out of bounds. + Get(i int) FileImport + + doNotImplement +} + +// FileImport is the declaration for a proto file import. +type FileImport struct { + // FileDescriptor is the file type for the given import. + // It is a placeholder descriptor if IsWeak is set or if a dependency has + // not been regenerated to implement the new reflection APIs. + FileDescriptor + + // IsPublic reports whether this is a public import, which causes this file + // to alias declarations within the imported file. The intended use cases + // for this feature is the ability to move proto files without breaking + // existing dependencies. + // + // The current file and the imported file must be within proto package. + IsPublic bool + + // IsWeak reports whether this is a weak import, which does not impose + // a direct dependency on the target file. + // + // Weak imports are a legacy proto1 feature. Equivalent behavior is + // achieved using proto2 extension fields or proto3 Any messages. + IsWeak bool +} + +// MessageDescriptor describes a message and +// corresponds with the google.protobuf.DescriptorProto message. +// +// Nested declarations: +// FieldDescriptor, OneofDescriptor, FieldDescriptor, EnumDescriptor, +// and/or MessageDescriptor. +type MessageDescriptor interface { + Descriptor + + // IsMapEntry indicates that this is an auto-generated message type to + // represent the entry type for a map field. + // + // Map entry messages have only two fields: + // • a "key" field with a field number of 1 + // • a "value" field with a field number of 2 + // The key and value types are determined by these two fields. + // + // If IsMapEntry is true, it implies that FieldDescriptor.IsMap is true + // for some field with this message type. + IsMapEntry() bool + + // Fields is a list of nested field declarations. + Fields() FieldDescriptors + // Oneofs is a list of nested oneof declarations. + Oneofs() OneofDescriptors + + // ReservedNames is a list of reserved field names. + ReservedNames() Names + // ReservedRanges is a list of reserved ranges of field numbers. + ReservedRanges() FieldRanges + // RequiredNumbers is a list of required field numbers. + // In Proto3, it is always an empty list. + RequiredNumbers() FieldNumbers + // ExtensionRanges is the field ranges used for extension fields. + // In Proto3, it is always an empty ranges. + ExtensionRanges() FieldRanges + // ExtensionRangeOptions returns the ith extension range options. + // + // To avoid a dependency cycle, this method returns a proto.Message value, + // which always contains a google.protobuf.ExtensionRangeOptions message. + // This method returns a typed nil-pointer if no options are present. + // The caller must import the descriptorpb package to use this. + ExtensionRangeOptions(i int) ProtoMessage + + // Enums is a list of nested enum declarations. + Enums() EnumDescriptors + // Messages is a list of nested message declarations. + Messages() MessageDescriptors + // Extensions is a list of nested extension declarations. + Extensions() ExtensionDescriptors + + isMessageDescriptor +} +type isMessageDescriptor interface{ ProtoType(MessageDescriptor) } + +// MessageType encapsulates a MessageDescriptor with a concrete Go implementation. +// It is recommended that implementations of this interface also implement the +// MessageFieldTypes interface. +type MessageType interface { + // New returns a newly allocated empty message. + // It may return nil for synthetic messages representing a map entry. + New() Message + + // Zero returns an empty, read-only message. + // It may return nil for synthetic messages representing a map entry. + Zero() Message + + // Descriptor returns the message descriptor. + // + // Invariant: t.Descriptor() == t.New().Descriptor() + Descriptor() MessageDescriptor +} + +// MessageFieldTypes extends a MessageType by providing type information +// regarding enums and messages referenced by the message fields. +type MessageFieldTypes interface { + MessageType + + // Enum returns the EnumType for the ith field in Descriptor.Fields. + // It returns nil if the ith field is not an enum kind. + // It panics if out of bounds. + // + // Invariant: mt.Enum(i).Descriptor() == mt.Descriptor().Fields(i).Enum() + Enum(i int) EnumType + + // Message returns the MessageType for the ith field in Descriptor.Fields. + // It returns nil if the ith field is not a message or group kind. + // It panics if out of bounds. + // + // Invariant: mt.Message(i).Descriptor() == mt.Descriptor().Fields(i).Message() + Message(i int) MessageType +} + +// MessageDescriptors is a list of message declarations. +type MessageDescriptors interface { + // Len reports the number of messages. + Len() int + // Get returns the ith MessageDescriptor. It panics if out of bounds. + Get(i int) MessageDescriptor + // ByName returns the MessageDescriptor for a message named s. + // It returns nil if not found. + ByName(s Name) MessageDescriptor + + doNotImplement +} + +// FieldDescriptor describes a field within a message and +// corresponds with the google.protobuf.FieldDescriptorProto message. +// +// It is used for both normal fields defined within the parent message +// (e.g., MessageDescriptor.Fields) and fields that extend some remote message +// (e.g., FileDescriptor.Extensions or MessageDescriptor.Extensions). +type FieldDescriptor interface { + Descriptor + + // Number reports the unique number for this field. + Number() FieldNumber + // Cardinality reports the cardinality for this field. + Cardinality() Cardinality + // Kind reports the basic kind for this field. + Kind() Kind + + // HasJSONName reports whether this field has an explicitly set JSON name. + HasJSONName() bool + + // JSONName reports the name used for JSON serialization. + // It is usually the camel-cased form of the field name. + // Extension fields are represented by the full name surrounded by brackets. + JSONName() string + + // TextName reports the name used for text serialization. + // It is usually the name of the field, except that groups use the name + // of the inlined message, and extension fields are represented by the + // full name surrounded by brackets. + TextName() string + + // HasPresence reports whether the field distinguishes between unpopulated + // and default values. + HasPresence() bool + + // IsExtension reports whether this is an extension field. If false, + // then Parent and ContainingMessage refer to the same message. + // Otherwise, ContainingMessage and Parent likely differ. + IsExtension() bool + + // HasOptionalKeyword reports whether the "optional" keyword was explicitly + // specified in the source .proto file. + HasOptionalKeyword() bool + + // IsWeak reports whether this is a weak field, which does not impose a + // direct dependency on the target type. + // If true, then Message returns a placeholder type. + IsWeak() bool + + // IsPacked reports whether repeated primitive numeric kinds should be + // serialized using a packed encoding. + // If true, then it implies Cardinality is Repeated. + IsPacked() bool + + // IsList reports whether this field represents a list, + // where the value type for the associated field is a List. + // It is equivalent to checking whether Cardinality is Repeated and + // that IsMap reports false. + IsList() bool + + // IsMap reports whether this field represents a map, + // where the value type for the associated field is a Map. + // It is equivalent to checking whether Cardinality is Repeated, + // that the Kind is MessageKind, and that Message.IsMapEntry reports true. + IsMap() bool + + // MapKey returns the field descriptor for the key in the map entry. + // It returns nil if IsMap reports false. + MapKey() FieldDescriptor + + // MapValue returns the field descriptor for the value in the map entry. + // It returns nil if IsMap reports false. + MapValue() FieldDescriptor + + // HasDefault reports whether this field has a default value. + HasDefault() bool + + // Default returns the default value for scalar fields. + // For proto2, it is the default value as specified in the proto file, + // or the zero value if unspecified. + // For proto3, it is always the zero value of the scalar. + // The Value type is determined by the Kind. + Default() Value + + // DefaultEnumValue returns the enum value descriptor for the default value + // of an enum field, and is nil for any other kind of field. + DefaultEnumValue() EnumValueDescriptor + + // ContainingOneof is the containing oneof that this field belongs to, + // and is nil if this field is not part of a oneof. + ContainingOneof() OneofDescriptor + + // ContainingMessage is the containing message that this field belongs to. + // For extension fields, this may not necessarily be the parent message + // that the field is declared within. + ContainingMessage() MessageDescriptor + + // Enum is the enum descriptor if Kind is EnumKind. + // It returns nil for any other Kind. + Enum() EnumDescriptor + + // Message is the message descriptor if Kind is + // MessageKind or GroupKind. It returns nil for any other Kind. + Message() MessageDescriptor + + isFieldDescriptor +} +type isFieldDescriptor interface{ ProtoType(FieldDescriptor) } + +// FieldDescriptors is a list of field declarations. +type FieldDescriptors interface { + // Len reports the number of fields. + Len() int + // Get returns the ith FieldDescriptor. It panics if out of bounds. + Get(i int) FieldDescriptor + // ByName returns the FieldDescriptor for a field named s. + // It returns nil if not found. + ByName(s Name) FieldDescriptor + // ByJSONName returns the FieldDescriptor for a field with s as the JSON name. + // It returns nil if not found. + ByJSONName(s string) FieldDescriptor + // ByTextName returns the FieldDescriptor for a field with s as the text name. + // It returns nil if not found. + ByTextName(s string) FieldDescriptor + // ByNumber returns the FieldDescriptor for a field numbered n. + // It returns nil if not found. + ByNumber(n FieldNumber) FieldDescriptor + + doNotImplement +} + +// OneofDescriptor describes a oneof field set within a given message and +// corresponds with the google.protobuf.OneofDescriptorProto message. +type OneofDescriptor interface { + Descriptor + + // IsSynthetic reports whether this is a synthetic oneof created to support + // proto3 optional semantics. If true, Fields contains exactly one field + // with HasOptionalKeyword specified. + IsSynthetic() bool + + // Fields is a list of fields belonging to this oneof. + Fields() FieldDescriptors + + isOneofDescriptor +} +type isOneofDescriptor interface{ ProtoType(OneofDescriptor) } + +// OneofDescriptors is a list of oneof declarations. +type OneofDescriptors interface { + // Len reports the number of oneof fields. + Len() int + // Get returns the ith OneofDescriptor. It panics if out of bounds. + Get(i int) OneofDescriptor + // ByName returns the OneofDescriptor for a oneof named s. + // It returns nil if not found. + ByName(s Name) OneofDescriptor + + doNotImplement +} + +// ExtensionDescriptor is an alias of FieldDescriptor for documentation. +type ExtensionDescriptor = FieldDescriptor + +// ExtensionTypeDescriptor is an ExtensionDescriptor with an associated ExtensionType. +type ExtensionTypeDescriptor interface { + ExtensionDescriptor + + // Type returns the associated ExtensionType. + Type() ExtensionType + + // Descriptor returns the plain ExtensionDescriptor without the + // associated ExtensionType. + Descriptor() ExtensionDescriptor +} + +// ExtensionDescriptors is a list of field declarations. +type ExtensionDescriptors interface { + // Len reports the number of fields. + Len() int + // Get returns the ith ExtensionDescriptor. It panics if out of bounds. + Get(i int) ExtensionDescriptor + // ByName returns the ExtensionDescriptor for a field named s. + // It returns nil if not found. + ByName(s Name) ExtensionDescriptor + + doNotImplement +} + +// ExtensionType encapsulates an ExtensionDescriptor with a concrete +// Go implementation. The nested field descriptor must be for a extension field. +// +// While a normal field is a member of the parent message that it is declared +// within (see Descriptor.Parent), an extension field is a member of some other +// target message (see ExtensionDescriptor.Extendee) and may have no +// relationship with the parent. However, the full name of an extension field is +// relative to the parent that it is declared within. +// +// For example: +// syntax = "proto2"; +// package example; +// message FooMessage { +// extensions 100 to max; +// } +// message BarMessage { +// extends FooMessage { optional BarMessage bar_field = 100; } +// } +// +// Field "bar_field" is an extension of FooMessage, but its full name is +// "example.BarMessage.bar_field" instead of "example.FooMessage.bar_field". +type ExtensionType interface { + // New returns a new value for the field. + // For scalars, this returns the default value in native Go form. + New() Value + + // Zero returns a new value for the field. + // For scalars, this returns the default value in native Go form. + // For composite types, this returns an empty, read-only message, list, or map. + Zero() Value + + // TypeDescriptor returns the extension type descriptor. + TypeDescriptor() ExtensionTypeDescriptor + + // ValueOf wraps the input and returns it as a Value. + // ValueOf panics if the input value is invalid or not the appropriate type. + // + // ValueOf is more extensive than protoreflect.ValueOf for a given field's + // value as it has more type information available. + ValueOf(interface{}) Value + + // InterfaceOf completely unwraps the Value to the underlying Go type. + // InterfaceOf panics if the input is nil or does not represent the + // appropriate underlying Go type. For composite types, it panics if the + // value is not mutable. + // + // InterfaceOf is able to unwrap the Value further than Value.Interface + // as it has more type information available. + InterfaceOf(Value) interface{} + + // IsValidValue reports whether the Value is valid to assign to the field. + IsValidValue(Value) bool + + // IsValidInterface reports whether the input is valid to assign to the field. + IsValidInterface(interface{}) bool +} + +// EnumDescriptor describes an enum and +// corresponds with the google.protobuf.EnumDescriptorProto message. +// +// Nested declarations: +// EnumValueDescriptor. +type EnumDescriptor interface { + Descriptor + + // Values is a list of nested enum value declarations. + Values() EnumValueDescriptors + + // ReservedNames is a list of reserved enum names. + ReservedNames() Names + // ReservedRanges is a list of reserved ranges of enum numbers. + ReservedRanges() EnumRanges + + isEnumDescriptor +} +type isEnumDescriptor interface{ ProtoType(EnumDescriptor) } + +// EnumType encapsulates an EnumDescriptor with a concrete Go implementation. +type EnumType interface { + // New returns an instance of this enum type with its value set to n. + New(n EnumNumber) Enum + + // Descriptor returns the enum descriptor. + // + // Invariant: t.Descriptor() == t.New(0).Descriptor() + Descriptor() EnumDescriptor +} + +// EnumDescriptors is a list of enum declarations. +type EnumDescriptors interface { + // Len reports the number of enum types. + Len() int + // Get returns the ith EnumDescriptor. It panics if out of bounds. + Get(i int) EnumDescriptor + // ByName returns the EnumDescriptor for an enum named s. + // It returns nil if not found. + ByName(s Name) EnumDescriptor + + doNotImplement +} + +// EnumValueDescriptor describes an enum value and +// corresponds with the google.protobuf.EnumValueDescriptorProto message. +// +// All other proto declarations are in the namespace of the parent. +// However, enum values do not follow this rule and are within the namespace +// of the parent's parent (i.e., they are a sibling of the containing enum). +// Thus, a value named "FOO_VALUE" declared within an enum uniquely identified +// as "proto.package.MyEnum" has a full name of "proto.package.FOO_VALUE". +type EnumValueDescriptor interface { + Descriptor + + // Number returns the enum value as an integer. + Number() EnumNumber + + isEnumValueDescriptor +} +type isEnumValueDescriptor interface{ ProtoType(EnumValueDescriptor) } + +// EnumValueDescriptors is a list of enum value declarations. +type EnumValueDescriptors interface { + // Len reports the number of enum values. + Len() int + // Get returns the ith EnumValueDescriptor. It panics if out of bounds. + Get(i int) EnumValueDescriptor + // ByName returns the EnumValueDescriptor for the enum value named s. + // It returns nil if not found. + ByName(s Name) EnumValueDescriptor + // ByNumber returns the EnumValueDescriptor for the enum value numbered n. + // If multiple have the same number, the first one defined is returned + // It returns nil if not found. + ByNumber(n EnumNumber) EnumValueDescriptor + + doNotImplement +} + +// ServiceDescriptor describes a service and +// corresponds with the google.protobuf.ServiceDescriptorProto message. +// +// Nested declarations: MethodDescriptor. +type ServiceDescriptor interface { + Descriptor + + // Methods is a list of nested message declarations. + Methods() MethodDescriptors + + isServiceDescriptor +} +type isServiceDescriptor interface{ ProtoType(ServiceDescriptor) } + +// ServiceDescriptors is a list of service declarations. +type ServiceDescriptors interface { + // Len reports the number of services. + Len() int + // Get returns the ith ServiceDescriptor. It panics if out of bounds. + Get(i int) ServiceDescriptor + // ByName returns the ServiceDescriptor for a service named s. + // It returns nil if not found. + ByName(s Name) ServiceDescriptor + + doNotImplement +} + +// MethodDescriptor describes a method and +// corresponds with the google.protobuf.MethodDescriptorProto message. +type MethodDescriptor interface { + Descriptor + + // Input is the input message descriptor. + Input() MessageDescriptor + // Output is the output message descriptor. + Output() MessageDescriptor + // IsStreamingClient reports whether the client streams multiple messages. + IsStreamingClient() bool + // IsStreamingServer reports whether the server streams multiple messages. + IsStreamingServer() bool + + isMethodDescriptor +} +type isMethodDescriptor interface{ ProtoType(MethodDescriptor) } + +// MethodDescriptors is a list of method declarations. +type MethodDescriptors interface { + // Len reports the number of methods. + Len() int + // Get returns the ith MethodDescriptor. It panics if out of bounds. + Get(i int) MethodDescriptor + // ByName returns the MethodDescriptor for a service method named s. + // It returns nil if not found. + ByName(s Name) MethodDescriptor + + doNotImplement +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go new file mode 100644 index 000000000..f31981077 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -0,0 +1,285 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import "google.golang.org/protobuf/encoding/protowire" + +// Enum is a reflection interface for a concrete enum value, +// which provides type information and a getter for the enum number. +// Enum does not provide a mutable API since enums are commonly backed by +// Go constants, which are not addressable. +type Enum interface { + // Descriptor returns enum descriptor, which contains only the protobuf + // type information for the enum. + Descriptor() EnumDescriptor + + // Type returns the enum type, which encapsulates both Go and protobuf + // type information. If the Go type information is not needed, + // it is recommended that the enum descriptor be used instead. + Type() EnumType + + // Number returns the enum value as an integer. + Number() EnumNumber +} + +// Message is a reflective interface for a concrete message value, +// encapsulating both type and value information for the message. +// +// Accessor/mutators for individual fields are keyed by FieldDescriptor. +// For non-extension fields, the descriptor must exactly match the +// field known by the parent message. +// For extension fields, the descriptor must implement ExtensionTypeDescriptor, +// extend the parent message (i.e., have the same message FullName), and +// be within the parent's extension range. +// +// Each field Value can be a scalar or a composite type (Message, List, or Map). +// See Value for the Go types associated with a FieldDescriptor. +// Providing a Value that is invalid or of an incorrect type panics. +type Message interface { + // Descriptor returns message descriptor, which contains only the protobuf + // type information for the message. + Descriptor() MessageDescriptor + + // Type returns the message type, which encapsulates both Go and protobuf + // type information. If the Go type information is not needed, + // it is recommended that the message descriptor be used instead. + Type() MessageType + + // New returns a newly allocated and mutable empty message. + New() Message + + // Interface unwraps the message reflection interface and + // returns the underlying ProtoMessage interface. + Interface() ProtoMessage + + // Range iterates over every populated field in an undefined order, + // calling f for each field descriptor and value encountered. + // Range returns immediately if f returns false. + // While iterating, mutating operations may only be performed + // on the current field descriptor. + Range(f func(FieldDescriptor, Value) bool) + + // Has reports whether a field is populated. + // + // Some fields have the property of nullability where it is possible to + // distinguish between the default value of a field and whether the field + // was explicitly populated with the default value. Singular message fields, + // member fields of a oneof, and proto2 scalar fields are nullable. Such + // fields are populated only if explicitly set. + // + // In other cases (aside from the nullable cases above), + // a proto3 scalar field is populated if it contains a non-zero value, and + // a repeated field is populated if it is non-empty. + Has(FieldDescriptor) bool + + // Clear clears the field such that a subsequent Has call reports false. + // + // Clearing an extension field clears both the extension type and value + // associated with the given field number. + // + // Clear is a mutating operation and unsafe for concurrent use. + Clear(FieldDescriptor) + + // Get retrieves the value for a field. + // + // For unpopulated scalars, it returns the default value, where + // the default value of a bytes scalar is guaranteed to be a copy. + // For unpopulated composite types, it returns an empty, read-only view + // of the value; to obtain a mutable reference, use Mutable. + Get(FieldDescriptor) Value + + // Set stores the value for a field. + // + // For a field belonging to a oneof, it implicitly clears any other field + // that may be currently set within the same oneof. + // For extension fields, it implicitly stores the provided ExtensionType. + // When setting a composite type, it is unspecified whether the stored value + // aliases the source's memory in any way. If the composite value is an + // empty, read-only value, then it panics. + // + // Set is a mutating operation and unsafe for concurrent use. + Set(FieldDescriptor, Value) + + // Mutable returns a mutable reference to a composite type. + // + // If the field is unpopulated, it may allocate a composite value. + // For a field belonging to a oneof, it implicitly clears any other field + // that may be currently set within the same oneof. + // For extension fields, it implicitly stores the provided ExtensionType + // if not already stored. + // It panics if the field does not contain a composite type. + // + // Mutable is a mutating operation and unsafe for concurrent use. + Mutable(FieldDescriptor) Value + + // NewField returns a new value that is assignable to the field + // for the given descriptor. For scalars, this returns the default value. + // For lists, maps, and messages, this returns a new, empty, mutable value. + NewField(FieldDescriptor) Value + + // WhichOneof reports which field within the oneof is populated, + // returning nil if none are populated. + // It panics if the oneof descriptor does not belong to this message. + WhichOneof(OneofDescriptor) FieldDescriptor + + // GetUnknown retrieves the entire list of unknown fields. + // The caller may only mutate the contents of the RawFields + // if the mutated bytes are stored back into the message with SetUnknown. + GetUnknown() RawFields + + // SetUnknown stores an entire list of unknown fields. + // The raw fields must be syntactically valid according to the wire format. + // An implementation may panic if this is not the case. + // Once stored, the caller must not mutate the content of the RawFields. + // An empty RawFields may be passed to clear the fields. + // + // SetUnknown is a mutating operation and unsafe for concurrent use. + SetUnknown(RawFields) + + // IsValid reports whether the message is valid. + // + // An invalid message is an empty, read-only value. + // + // An invalid message often corresponds to a nil pointer of the concrete + // message type, but the details are implementation dependent. + // Validity is not part of the protobuf data model, and may not + // be preserved in marshaling or other operations. + IsValid() bool + + // ProtoMethods returns optional fast-path implementions of various operations. + // This method may return nil. + // + // The returned methods type is identical to + // "google.golang.org/protobuf/runtime/protoiface".Methods. + // Consult the protoiface package documentation for details. + ProtoMethods() *methods +} + +// RawFields is the raw bytes for an ordered sequence of fields. +// Each field contains both the tag (representing field number and wire type), +// and also the wire data itself. +type RawFields []byte + +// IsValid reports whether b is syntactically correct wire format. +func (b RawFields) IsValid() bool { + for len(b) > 0 { + _, _, n := protowire.ConsumeField(b) + if n < 0 { + return false + } + b = b[n:] + } + return true +} + +// List is a zero-indexed, ordered list. +// The element Value type is determined by FieldDescriptor.Kind. +// Providing a Value that is invalid or of an incorrect type panics. +type List interface { + // Len reports the number of entries in the List. + // Get, Set, and Truncate panic with out of bound indexes. + Len() int + + // Get retrieves the value at the given index. + // It never returns an invalid value. + Get(int) Value + + // Set stores a value for the given index. + // When setting a composite type, it is unspecified whether the set + // value aliases the source's memory in any way. + // + // Set is a mutating operation and unsafe for concurrent use. + Set(int, Value) + + // Append appends the provided value to the end of the list. + // When appending a composite type, it is unspecified whether the appended + // value aliases the source's memory in any way. + // + // Append is a mutating operation and unsafe for concurrent use. + Append(Value) + + // AppendMutable appends a new, empty, mutable message value to the end + // of the list and returns it. + // It panics if the list does not contain a message type. + AppendMutable() Value + + // Truncate truncates the list to a smaller length. + // + // Truncate is a mutating operation and unsafe for concurrent use. + Truncate(int) + + // NewElement returns a new value for a list element. + // For enums, this returns the first enum value. + // For other scalars, this returns the zero value. + // For messages, this returns a new, empty, mutable value. + NewElement() Value + + // IsValid reports whether the list is valid. + // + // An invalid list is an empty, read-only value. + // + // Validity is not part of the protobuf data model, and may not + // be preserved in marshaling or other operations. + IsValid() bool +} + +// Map is an unordered, associative map. +// The entry MapKey type is determined by FieldDescriptor.MapKey.Kind. +// The entry Value type is determined by FieldDescriptor.MapValue.Kind. +// Providing a MapKey or Value that is invalid or of an incorrect type panics. +type Map interface { + // Len reports the number of elements in the map. + Len() int + + // Range iterates over every map entry in an undefined order, + // calling f for each key and value encountered. + // Range calls f Len times unless f returns false, which stops iteration. + // While iterating, mutating operations may only be performed + // on the current map key. + Range(f func(MapKey, Value) bool) + + // Has reports whether an entry with the given key is in the map. + Has(MapKey) bool + + // Clear clears the entry associated with they given key. + // The operation does nothing if there is no entry associated with the key. + // + // Clear is a mutating operation and unsafe for concurrent use. + Clear(MapKey) + + // Get retrieves the value for an entry with the given key. + // It returns an invalid value for non-existent entries. + Get(MapKey) Value + + // Set stores the value for an entry with the given key. + // It panics when given a key or value that is invalid or the wrong type. + // When setting a composite type, it is unspecified whether the set + // value aliases the source's memory in any way. + // + // Set is a mutating operation and unsafe for concurrent use. + Set(MapKey, Value) + + // Mutable retrieves a mutable reference to the entry for the given key. + // If no entry exists for the key, it creates a new, empty, mutable value + // and stores it as the entry for the key. + // It panics if the map value is not a message. + Mutable(MapKey) Value + + // NewValue returns a new value assignable as a map value. + // For enums, this returns the first enum value. + // For other scalars, this returns the zero value. + // For messages, this returns a new, empty, mutable value. + NewValue() Value + + // IsValid reports whether the map is valid. + // + // An invalid map is an empty, read-only value. + // + // An invalid message often corresponds to a nil Go map value, + // but the details are implementation dependent. + // Validity is not part of the protobuf data model, and may not + // be preserved in marshaling or other operations. + IsValid() bool +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go new file mode 100644 index 000000000..7ced876f4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -0,0 +1,60 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build purego || appengine +// +build purego appengine + +package protoreflect + +import "google.golang.org/protobuf/internal/pragma" + +type valueType int + +const ( + nilType valueType = iota + boolType + int32Type + int64Type + uint32Type + uint64Type + float32Type + float64Type + stringType + bytesType + enumType + ifaceType +) + +// value is a union where only one type can be represented at a time. +// This uses a distinct field for each type. This is type safe in Go, but +// occupies more memory than necessary (72B). +type value struct { + pragma.DoNotCompare // 0B + + typ valueType // 8B + num uint64 // 8B + str string // 16B + bin []byte // 24B + iface interface{} // 16B +} + +func valueOfString(v string) Value { + return Value{typ: stringType, str: v} +} +func valueOfBytes(v []byte) Value { + return Value{typ: bytesType, bin: v} +} +func valueOfIface(v interface{}) Value { + return Value{typ: ifaceType, iface: v} +} + +func (v Value) getString() string { + return v.str +} +func (v Value) getBytes() []byte { + return v.bin +} +func (v Value) getIface() interface{} { + return v.iface +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go new file mode 100644 index 000000000..eb7764c30 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -0,0 +1,436 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import ( + "fmt" + "math" +) + +// Value is a union where only one Go type may be set at a time. +// The Value is used to represent all possible values a field may take. +// The following shows which Go type is used to represent each proto Kind: +// +// ╔════════════╤═════════════════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠════════════╪═════════════════════════════════════╣ +// ║ bool │ BoolKind ║ +// ║ int32 │ Int32Kind, Sint32Kind, Sfixed32Kind ║ +// ║ int64 │ Int64Kind, Sint64Kind, Sfixed64Kind ║ +// ║ uint32 │ Uint32Kind, Fixed32Kind ║ +// ║ uint64 │ Uint64Kind, Fixed64Kind ║ +// ║ float32 │ FloatKind ║ +// ║ float64 │ DoubleKind ║ +// ║ string │ StringKind ║ +// ║ []byte │ BytesKind ║ +// ║ EnumNumber │ EnumKind ║ +// ║ Message │ MessageKind, GroupKind ║ +// ╚════════════╧═════════════════════════════════════╝ +// +// Multiple protobuf Kinds may be represented by a single Go type if the type +// can losslessly represent the information for the proto kind. For example, +// Int64Kind, Sint64Kind, and Sfixed64Kind are all represented by int64, +// but use different integer encoding methods. +// +// The List or Map types are used if the field cardinality is repeated. +// A field is a List if FieldDescriptor.IsList reports true. +// A field is a Map if FieldDescriptor.IsMap reports true. +// +// Converting to/from a Value and a concrete Go value panics on type mismatch. +// For example, ValueOf("hello").Int() panics because this attempts to +// retrieve an int64 from a string. +// +// List, Map, and Message Values are called "composite" values. +// +// A composite Value may alias (reference) memory at some location, +// such that changes to the Value updates the that location. +// A composite value acquired with a Mutable method, such as Message.Mutable, +// always references the source object. +// +// For example: +// // Append a 0 to a "repeated int32" field. +// // Since the Value returned by Mutable is guaranteed to alias +// // the source message, modifying the Value modifies the message. +// message.Mutable(fieldDesc).(List).Append(protoreflect.ValueOfInt32(0)) +// +// // Assign [0] to a "repeated int32" field by creating a new Value, +// // modifying it, and assigning it. +// list := message.NewField(fieldDesc).(List) +// list.Append(protoreflect.ValueOfInt32(0)) +// message.Set(fieldDesc, list) +// // ERROR: Since it is not defined whether Set aliases the source, +// // appending to the List here may or may not modify the message. +// list.Append(protoreflect.ValueOfInt32(0)) +// +// Some operations, such as Message.Get, may return an "empty, read-only" +// composite Value. Modifying an empty, read-only value panics. +type Value value + +// The protoreflect API uses a custom Value union type instead of interface{} +// to keep the future open for performance optimizations. Using an interface{} +// always incurs an allocation for primitives (e.g., int64) since it needs to +// be boxed on the heap (as interfaces can only contain pointers natively). +// Instead, we represent the Value union as a flat struct that internally keeps +// track of which type is set. Using unsafe, the Value union can be reduced +// down to 24B, which is identical in size to a slice. +// +// The latest compiler (Go1.11) currently suffers from some limitations: +// • With inlining, the compiler should be able to statically prove that +// only one of these switch cases are taken and inline one specific case. +// See https://golang.org/issue/22310. + +// ValueOf returns a Value initialized with the concrete value stored in v. +// This panics if the type does not match one of the allowed types in the +// Value union. +func ValueOf(v interface{}) Value { + switch v := v.(type) { + case nil: + return Value{} + case bool: + return ValueOfBool(v) + case int32: + return ValueOfInt32(v) + case int64: + return ValueOfInt64(v) + case uint32: + return ValueOfUint32(v) + case uint64: + return ValueOfUint64(v) + case float32: + return ValueOfFloat32(v) + case float64: + return ValueOfFloat64(v) + case string: + return ValueOfString(v) + case []byte: + return ValueOfBytes(v) + case EnumNumber: + return ValueOfEnum(v) + case Message, List, Map: + return valueOfIface(v) + case ProtoMessage: + panic(fmt.Sprintf("invalid proto.Message(%T) type, expected a protoreflect.Message type", v)) + default: + panic(fmt.Sprintf("invalid type: %T", v)) + } +} + +// ValueOfBool returns a new boolean value. +func ValueOfBool(v bool) Value { + if v { + return Value{typ: boolType, num: 1} + } else { + return Value{typ: boolType, num: 0} + } +} + +// ValueOfInt32 returns a new int32 value. +func ValueOfInt32(v int32) Value { + return Value{typ: int32Type, num: uint64(v)} +} + +// ValueOfInt64 returns a new int64 value. +func ValueOfInt64(v int64) Value { + return Value{typ: int64Type, num: uint64(v)} +} + +// ValueOfUint32 returns a new uint32 value. +func ValueOfUint32(v uint32) Value { + return Value{typ: uint32Type, num: uint64(v)} +} + +// ValueOfUint64 returns a new uint64 value. +func ValueOfUint64(v uint64) Value { + return Value{typ: uint64Type, num: v} +} + +// ValueOfFloat32 returns a new float32 value. +func ValueOfFloat32(v float32) Value { + return Value{typ: float32Type, num: uint64(math.Float64bits(float64(v)))} +} + +// ValueOfFloat64 returns a new float64 value. +func ValueOfFloat64(v float64) Value { + return Value{typ: float64Type, num: uint64(math.Float64bits(float64(v)))} +} + +// ValueOfString returns a new string value. +func ValueOfString(v string) Value { + return valueOfString(v) +} + +// ValueOfBytes returns a new bytes value. +func ValueOfBytes(v []byte) Value { + return valueOfBytes(v[:len(v):len(v)]) +} + +// ValueOfEnum returns a new enum value. +func ValueOfEnum(v EnumNumber) Value { + return Value{typ: enumType, num: uint64(v)} +} + +// ValueOfMessage returns a new Message value. +func ValueOfMessage(v Message) Value { + return valueOfIface(v) +} + +// ValueOfList returns a new List value. +func ValueOfList(v List) Value { + return valueOfIface(v) +} + +// ValueOfMap returns a new Map value. +func ValueOfMap(v Map) Value { + return valueOfIface(v) +} + +// IsValid reports whether v is populated with a value. +func (v Value) IsValid() bool { + return v.typ != nilType +} + +// Interface returns v as an interface{}. +// +// Invariant: v == ValueOf(v).Interface() +func (v Value) Interface() interface{} { + switch v.typ { + case nilType: + return nil + case boolType: + return v.Bool() + case int32Type: + return int32(v.Int()) + case int64Type: + return int64(v.Int()) + case uint32Type: + return uint32(v.Uint()) + case uint64Type: + return uint64(v.Uint()) + case float32Type: + return float32(v.Float()) + case float64Type: + return float64(v.Float()) + case stringType: + return v.String() + case bytesType: + return v.Bytes() + case enumType: + return v.Enum() + default: + return v.getIface() + } +} + +func (v Value) typeName() string { + switch v.typ { + case nilType: + return "nil" + case boolType: + return "bool" + case int32Type: + return "int32" + case int64Type: + return "int64" + case uint32Type: + return "uint32" + case uint64Type: + return "uint64" + case float32Type: + return "float32" + case float64Type: + return "float64" + case stringType: + return "string" + case bytesType: + return "bytes" + case enumType: + return "enum" + default: + switch v := v.getIface().(type) { + case Message: + return "message" + case List: + return "list" + case Map: + return "map" + default: + return fmt.Sprintf("", v) + } + } +} + +func (v Value) panicMessage(what string) string { + return fmt.Sprintf("type mismatch: cannot convert %v to %s", v.typeName(), what) +} + +// Bool returns v as a bool and panics if the type is not a bool. +func (v Value) Bool() bool { + switch v.typ { + case boolType: + return v.num > 0 + default: + panic(v.panicMessage("bool")) + } +} + +// Int returns v as a int64 and panics if the type is not a int32 or int64. +func (v Value) Int() int64 { + switch v.typ { + case int32Type, int64Type: + return int64(v.num) + default: + panic(v.panicMessage("int")) + } +} + +// Uint returns v as a uint64 and panics if the type is not a uint32 or uint64. +func (v Value) Uint() uint64 { + switch v.typ { + case uint32Type, uint64Type: + return uint64(v.num) + default: + panic(v.panicMessage("uint")) + } +} + +// Float returns v as a float64 and panics if the type is not a float32 or float64. +func (v Value) Float() float64 { + switch v.typ { + case float32Type, float64Type: + return math.Float64frombits(uint64(v.num)) + default: + panic(v.panicMessage("float")) + } +} + +// String returns v as a string. Since this method implements fmt.Stringer, +// this returns the formatted string value for any non-string type. +func (v Value) String() string { + switch v.typ { + case stringType: + return v.getString() + default: + return fmt.Sprint(v.Interface()) + } +} + +// Bytes returns v as a []byte and panics if the type is not a []byte. +func (v Value) Bytes() []byte { + switch v.typ { + case bytesType: + return v.getBytes() + default: + panic(v.panicMessage("bytes")) + } +} + +// Enum returns v as a EnumNumber and panics if the type is not a EnumNumber. +func (v Value) Enum() EnumNumber { + switch v.typ { + case enumType: + return EnumNumber(v.num) + default: + panic(v.panicMessage("enum")) + } +} + +// Message returns v as a Message and panics if the type is not a Message. +func (v Value) Message() Message { + switch vi := v.getIface().(type) { + case Message: + return vi + default: + panic(v.panicMessage("message")) + } +} + +// List returns v as a List and panics if the type is not a List. +func (v Value) List() List { + switch vi := v.getIface().(type) { + case List: + return vi + default: + panic(v.panicMessage("list")) + } +} + +// Map returns v as a Map and panics if the type is not a Map. +func (v Value) Map() Map { + switch vi := v.getIface().(type) { + case Map: + return vi + default: + panic(v.panicMessage("map")) + } +} + +// MapKey returns v as a MapKey and panics for invalid MapKey types. +func (v Value) MapKey() MapKey { + switch v.typ { + case boolType, int32Type, int64Type, uint32Type, uint64Type, stringType: + return MapKey(v) + default: + panic(v.panicMessage("map key")) + } +} + +// MapKey is used to index maps, where the Go type of the MapKey must match +// the specified key Kind (see MessageDescriptor.IsMapEntry). +// The following shows what Go type is used to represent each proto Kind: +// +// ╔═════════╤═════════════════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═════════╪═════════════════════════════════════╣ +// ║ bool │ BoolKind ║ +// ║ int32 │ Int32Kind, Sint32Kind, Sfixed32Kind ║ +// ║ int64 │ Int64Kind, Sint64Kind, Sfixed64Kind ║ +// ║ uint32 │ Uint32Kind, Fixed32Kind ║ +// ║ uint64 │ Uint64Kind, Fixed64Kind ║ +// ║ string │ StringKind ║ +// ╚═════════╧═════════════════════════════════════╝ +// +// A MapKey is constructed and accessed through a Value: +// k := ValueOf("hash").MapKey() // convert string to MapKey +// s := k.String() // convert MapKey to string +// +// The MapKey is a strict subset of valid types used in Value; +// converting a Value to a MapKey with an invalid type panics. +type MapKey value + +// IsValid reports whether k is populated with a value. +func (k MapKey) IsValid() bool { + return Value(k).IsValid() +} + +// Interface returns k as an interface{}. +func (k MapKey) Interface() interface{} { + return Value(k).Interface() +} + +// Bool returns k as a bool and panics if the type is not a bool. +func (k MapKey) Bool() bool { + return Value(k).Bool() +} + +// Int returns k as a int64 and panics if the type is not a int32 or int64. +func (k MapKey) Int() int64 { + return Value(k).Int() +} + +// Uint returns k as a uint64 and panics if the type is not a uint32 or uint64. +func (k MapKey) Uint() uint64 { + return Value(k).Uint() +} + +// String returns k as a string. Since this method implements fmt.Stringer, +// this returns the formatted string value for any non-string type. +func (k MapKey) String() string { + return Value(k).String() +} + +// Value returns k as a Value. +func (k MapKey) Value() Value { + return Value(k) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go new file mode 100644 index 000000000..702ddf22a --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go @@ -0,0 +1,99 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine +// +build !purego,!appengine + +package protoreflect + +import ( + "unsafe" + + "google.golang.org/protobuf/internal/pragma" +) + +type ( + stringHeader struct { + Data unsafe.Pointer + Len int + } + sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int + } + ifaceHeader struct { + Type unsafe.Pointer + Data unsafe.Pointer + } +) + +var ( + nilType = typeOf(nil) + boolType = typeOf(*new(bool)) + int32Type = typeOf(*new(int32)) + int64Type = typeOf(*new(int64)) + uint32Type = typeOf(*new(uint32)) + uint64Type = typeOf(*new(uint64)) + float32Type = typeOf(*new(float32)) + float64Type = typeOf(*new(float64)) + stringType = typeOf(*new(string)) + bytesType = typeOf(*new([]byte)) + enumType = typeOf(*new(EnumNumber)) +) + +// typeOf returns a pointer to the Go type information. +// The pointer is comparable and equal if and only if the types are identical. +func typeOf(t interface{}) unsafe.Pointer { + return (*ifaceHeader)(unsafe.Pointer(&t)).Type +} + +// value is a union where only one type can be represented at a time. +// The struct is 24B large on 64-bit systems and requires the minimum storage +// necessary to represent each possible type. +// +// The Go GC needs to be able to scan variables containing pointers. +// As such, pointers and non-pointers cannot be intermixed. +type value struct { + pragma.DoNotCompare // 0B + + // typ stores the type of the value as a pointer to the Go type. + typ unsafe.Pointer // 8B + + // ptr stores the data pointer for a String, Bytes, or interface value. + ptr unsafe.Pointer // 8B + + // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or + // Enum value as a raw uint64. + // + // It is also used to store the length of a String or Bytes value; + // the capacity is ignored. + num uint64 // 8B +} + +func valueOfString(v string) Value { + p := (*stringHeader)(unsafe.Pointer(&v)) + return Value{typ: stringType, ptr: p.Data, num: uint64(len(v))} +} +func valueOfBytes(v []byte) Value { + p := (*sliceHeader)(unsafe.Pointer(&v)) + return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} +} +func valueOfIface(v interface{}) Value { + p := (*ifaceHeader)(unsafe.Pointer(&v)) + return Value{typ: p.Type, ptr: p.Data} +} + +func (v Value) getString() (x string) { + *(*stringHeader)(unsafe.Pointer(&x)) = stringHeader{Data: v.ptr, Len: int(v.num)} + return x +} +func (v Value) getBytes() (x []byte) { + *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} + return x +} +func (v Value) getIface() (x interface{}) { + *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} + return x +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go new file mode 100644 index 000000000..59f024c44 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -0,0 +1,880 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoregistry provides data structures to register and lookup +// protobuf descriptor types. +// +// The Files registry contains file descriptors and provides the ability +// to iterate over the files or lookup a specific descriptor within the files. +// Files only contains protobuf descriptors and has no understanding of Go +// type information that may be associated with each descriptor. +// +// The Types registry contains descriptor types for which there is a known +// Go type associated with that descriptor. It provides the ability to iterate +// over the registered types or lookup a type by name. +package protoregistry + +import ( + "fmt" + "os" + "strings" + "sync" + + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// conflictPolicy configures the policy for handling registration conflicts. +// +// It can be over-written at compile time with a linker-initialized variable: +// go build -ldflags "-X google.golang.org/protobuf/reflect/protoregistry.conflictPolicy=warn" +// +// It can be over-written at program execution with an environment variable: +// GOLANG_PROTOBUF_REGISTRATION_CONFLICT=warn ./main +// +// Neither of the above are covered by the compatibility promise and +// may be removed in a future release of this module. +var conflictPolicy = "panic" // "panic" | "warn" | "ignore" + +// ignoreConflict reports whether to ignore a registration conflict +// given the descriptor being registered and the error. +// It is a variable so that the behavior is easily overridden in another file. +var ignoreConflict = func(d protoreflect.Descriptor, err error) bool { + const env = "GOLANG_PROTOBUF_REGISTRATION_CONFLICT" + const faq = "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict" + policy := conflictPolicy + if v := os.Getenv(env); v != "" { + policy = v + } + switch policy { + case "panic": + panic(fmt.Sprintf("%v\nSee %v\n", err, faq)) + case "warn": + fmt.Fprintf(os.Stderr, "WARNING: %v\nSee %v\n\n", err, faq) + return true + case "ignore": + return true + default: + panic("invalid " + env + " value: " + os.Getenv(env)) + } +} + +var globalMutex sync.RWMutex + +// GlobalFiles is a global registry of file descriptors. +var GlobalFiles *Files = new(Files) + +// GlobalTypes is the registry used by default for type lookups +// unless a local registry is provided by the user. +var GlobalTypes *Types = new(Types) + +// NotFound is a sentinel error value to indicate that the type was not found. +// +// Since registry lookup can happen in the critical performance path, resolvers +// must return this exact error value, not an error wrapping it. +var NotFound = errors.New("not found") + +// Files is a registry for looking up or iterating over files and the +// descriptors contained within them. +// The Find and Range methods are safe for concurrent use. +type Files struct { + // The map of descsByName contains: + // EnumDescriptor + // EnumValueDescriptor + // MessageDescriptor + // ExtensionDescriptor + // ServiceDescriptor + // *packageDescriptor + // + // Note that files are stored as a slice, since a package may contain + // multiple files. Only top-level declarations are registered. + // Note that enum values are in the top-level since that are in the same + // scope as the parent enum. + descsByName map[protoreflect.FullName]interface{} + filesByPath map[string][]protoreflect.FileDescriptor + numFiles int +} + +type packageDescriptor struct { + files []protoreflect.FileDescriptor +} + +// RegisterFile registers the provided file descriptor. +// +// If any descriptor within the file conflicts with the descriptor of any +// previously registered file (e.g., two enums with the same full name), +// then the file is not registered and an error is returned. +// +// It is permitted for multiple files to have the same file path. +func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { + if r == GlobalFiles { + globalMutex.Lock() + defer globalMutex.Unlock() + } + if r.descsByName == nil { + r.descsByName = map[protoreflect.FullName]interface{}{ + "": &packageDescriptor{}, + } + r.filesByPath = make(map[string][]protoreflect.FileDescriptor) + } + path := file.Path() + if prev := r.filesByPath[path]; len(prev) > 0 { + r.checkGenProtoConflict(path) + err := errors.New("file %q is already registered", file.Path()) + err = amendErrorWithCaller(err, prev[0], file) + if !(r == GlobalFiles && ignoreConflict(file, err)) { + return err + } + } + + for name := file.Package(); name != ""; name = name.Parent() { + switch prev := r.descsByName[name]; prev.(type) { + case nil, *packageDescriptor: + default: + err := errors.New("file %q has a package name conflict over %v", file.Path(), name) + err = amendErrorWithCaller(err, prev, file) + if r == GlobalFiles && ignoreConflict(file, err) { + err = nil + } + return err + } + } + var err error + var hasConflict bool + rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) { + if prev := r.descsByName[d.FullName()]; prev != nil { + hasConflict = true + err = errors.New("file %q has a name conflict over %v", file.Path(), d.FullName()) + err = amendErrorWithCaller(err, prev, file) + if r == GlobalFiles && ignoreConflict(d, err) { + err = nil + } + } + }) + if hasConflict { + return err + } + + for name := file.Package(); name != ""; name = name.Parent() { + if r.descsByName[name] == nil { + r.descsByName[name] = &packageDescriptor{} + } + } + p := r.descsByName[file.Package()].(*packageDescriptor) + p.files = append(p.files, file) + rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) { + r.descsByName[d.FullName()] = d + }) + r.filesByPath[path] = append(r.filesByPath[path], file) + r.numFiles++ + return nil +} + +// Several well-known types were hosted in the google.golang.org/genproto module +// but were later moved to this module. To avoid a weak dependency on the +// genproto module (and its relatively large set of transitive dependencies), +// we rely on a registration conflict to determine whether the genproto version +// is too old (i.e., does not contain aliases to the new type declarations). +func (r *Files) checkGenProtoConflict(path string) { + if r != GlobalFiles { + return + } + var prevPath string + const prevModule = "google.golang.org/genproto" + const prevVersion = "cb27e3aa (May 26th, 2020)" + switch path { + case "google/protobuf/field_mask.proto": + prevPath = prevModule + "/protobuf/field_mask" + case "google/protobuf/api.proto": + prevPath = prevModule + "/protobuf/api" + case "google/protobuf/type.proto": + prevPath = prevModule + "/protobuf/ptype" + case "google/protobuf/source_context.proto": + prevPath = prevModule + "/protobuf/source_context" + default: + return + } + pkgName := strings.TrimSuffix(strings.TrimPrefix(path, "google/protobuf/"), ".proto") + pkgName = strings.Replace(pkgName, "_", "", -1) + "pb" // e.g., "field_mask" => "fieldmaskpb" + currPath := "google.golang.org/protobuf/types/known/" + pkgName + panic(fmt.Sprintf(""+ + "duplicate registration of %q\n"+ + "\n"+ + "The generated definition for this file has moved:\n"+ + "\tfrom: %q\n"+ + "\tto: %q\n"+ + "A dependency on the %q module must\n"+ + "be at version %v or higher.\n"+ + "\n"+ + "Upgrade the dependency by running:\n"+ + "\tgo get -u %v\n", + path, prevPath, currPath, prevModule, prevVersion, prevPath)) +} + +// FindDescriptorByName looks up a descriptor by the full name. +// +// This returns (nil, NotFound) if not found. +func (r *Files) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + prefix := name + suffix := nameSuffix("") + for prefix != "" { + if d, ok := r.descsByName[prefix]; ok { + switch d := d.(type) { + case protoreflect.EnumDescriptor: + if d.FullName() == name { + return d, nil + } + case protoreflect.EnumValueDescriptor: + if d.FullName() == name { + return d, nil + } + case protoreflect.MessageDescriptor: + if d.FullName() == name { + return d, nil + } + if d := findDescriptorInMessage(d, suffix); d != nil && d.FullName() == name { + return d, nil + } + case protoreflect.ExtensionDescriptor: + if d.FullName() == name { + return d, nil + } + case protoreflect.ServiceDescriptor: + if d.FullName() == name { + return d, nil + } + if d := d.Methods().ByName(suffix.Pop()); d != nil && d.FullName() == name { + return d, nil + } + } + return nil, NotFound + } + prefix = prefix.Parent() + suffix = nameSuffix(name[len(prefix)+len("."):]) + } + return nil, NotFound +} + +func findDescriptorInMessage(md protoreflect.MessageDescriptor, suffix nameSuffix) protoreflect.Descriptor { + name := suffix.Pop() + if suffix == "" { + if ed := md.Enums().ByName(name); ed != nil { + return ed + } + for i := md.Enums().Len() - 1; i >= 0; i-- { + if vd := md.Enums().Get(i).Values().ByName(name); vd != nil { + return vd + } + } + if xd := md.Extensions().ByName(name); xd != nil { + return xd + } + if fd := md.Fields().ByName(name); fd != nil { + return fd + } + if od := md.Oneofs().ByName(name); od != nil { + return od + } + } + if md := md.Messages().ByName(name); md != nil { + if suffix == "" { + return md + } + return findDescriptorInMessage(md, suffix) + } + return nil +} + +type nameSuffix string + +func (s *nameSuffix) Pop() (name protoreflect.Name) { + if i := strings.IndexByte(string(*s), '.'); i >= 0 { + name, *s = protoreflect.Name((*s)[:i]), (*s)[i+1:] + } else { + name, *s = protoreflect.Name((*s)), "" + } + return name +} + +// FindFileByPath looks up a file by the path. +// +// This returns (nil, NotFound) if not found. +// This returns an error if multiple files have the same path. +func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + fds := r.filesByPath[path] + switch len(fds) { + case 0: + return nil, NotFound + case 1: + return fds[0], nil + default: + return nil, errors.New("multiple files named %q", path) + } +} + +// NumFiles reports the number of registered files, +// including duplicate files with the same name. +func (r *Files) NumFiles() int { + if r == nil { + return 0 + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return r.numFiles +} + +// RangeFiles iterates over all registered files while f returns true. +// If multiple files have the same name, RangeFiles iterates over all of them. +// The iteration order is undefined. +func (r *Files) RangeFiles(f func(protoreflect.FileDescriptor) bool) { + if r == nil { + return + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, files := range r.filesByPath { + for _, file := range files { + if !f(file) { + return + } + } + } +} + +// NumFilesByPackage reports the number of registered files in a proto package. +func (r *Files) NumFilesByPackage(name protoreflect.FullName) int { + if r == nil { + return 0 + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + p, ok := r.descsByName[name].(*packageDescriptor) + if !ok { + return 0 + } + return len(p.files) +} + +// RangeFilesByPackage iterates over all registered files in a given proto package +// while f returns true. The iteration order is undefined. +func (r *Files) RangeFilesByPackage(name protoreflect.FullName, f func(protoreflect.FileDescriptor) bool) { + if r == nil { + return + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + p, ok := r.descsByName[name].(*packageDescriptor) + if !ok { + return + } + for _, file := range p.files { + if !f(file) { + return + } + } +} + +// rangeTopLevelDescriptors iterates over all top-level descriptors in a file +// which will be directly entered into the registry. +func rangeTopLevelDescriptors(fd protoreflect.FileDescriptor, f func(protoreflect.Descriptor)) { + eds := fd.Enums() + for i := eds.Len() - 1; i >= 0; i-- { + f(eds.Get(i)) + vds := eds.Get(i).Values() + for i := vds.Len() - 1; i >= 0; i-- { + f(vds.Get(i)) + } + } + mds := fd.Messages() + for i := mds.Len() - 1; i >= 0; i-- { + f(mds.Get(i)) + } + xds := fd.Extensions() + for i := xds.Len() - 1; i >= 0; i-- { + f(xds.Get(i)) + } + sds := fd.Services() + for i := sds.Len() - 1; i >= 0; i-- { + f(sds.Get(i)) + } +} + +// MessageTypeResolver is an interface for looking up messages. +// +// A compliant implementation must deterministically return the same type +// if no error is encountered. +// +// The Types type implements this interface. +type MessageTypeResolver interface { + // FindMessageByName looks up a message by its full name. + // E.g., "google.protobuf.Any" + // + // This return (nil, NotFound) if not found. + FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) + + // FindMessageByURL looks up a message by a URL identifier. + // See documentation on google.protobuf.Any.type_url for the URL format. + // + // This returns (nil, NotFound) if not found. + FindMessageByURL(url string) (protoreflect.MessageType, error) +} + +// ExtensionTypeResolver is an interface for looking up extensions. +// +// A compliant implementation must deterministically return the same type +// if no error is encountered. +// +// The Types type implements this interface. +type ExtensionTypeResolver interface { + // FindExtensionByName looks up a extension field by the field's full name. + // Note that this is the full name of the field as determined by + // where the extension is declared and is unrelated to the full name of the + // message being extended. + // + // This returns (nil, NotFound) if not found. + FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) + + // FindExtensionByNumber looks up a extension field by the field number + // within some parent message, identified by full name. + // + // This returns (nil, NotFound) if not found. + FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) +} + +var ( + _ MessageTypeResolver = (*Types)(nil) + _ ExtensionTypeResolver = (*Types)(nil) +) + +// Types is a registry for looking up or iterating over descriptor types. +// The Find and Range methods are safe for concurrent use. +type Types struct { + typesByName typesByName + extensionsByMessage extensionsByMessage + + numEnums int + numMessages int + numExtensions int +} + +type ( + typesByName map[protoreflect.FullName]interface{} + extensionsByMessage map[protoreflect.FullName]extensionsByNumber + extensionsByNumber map[protoreflect.FieldNumber]protoreflect.ExtensionType +) + +// RegisterMessage registers the provided message type. +// +// If a naming conflict occurs, the type is not registered and an error is returned. +func (r *Types) RegisterMessage(mt protoreflect.MessageType) error { + // Under rare circumstances getting the descriptor might recursively + // examine the registry, so fetch it before locking. + md := mt.Descriptor() + + if r == GlobalTypes { + globalMutex.Lock() + defer globalMutex.Unlock() + } + + if err := r.register("message", md, mt); err != nil { + return err + } + r.numMessages++ + return nil +} + +// RegisterEnum registers the provided enum type. +// +// If a naming conflict occurs, the type is not registered and an error is returned. +func (r *Types) RegisterEnum(et protoreflect.EnumType) error { + // Under rare circumstances getting the descriptor might recursively + // examine the registry, so fetch it before locking. + ed := et.Descriptor() + + if r == GlobalTypes { + globalMutex.Lock() + defer globalMutex.Unlock() + } + + if err := r.register("enum", ed, et); err != nil { + return err + } + r.numEnums++ + return nil +} + +// RegisterExtension registers the provided extension type. +// +// If a naming conflict occurs, the type is not registered and an error is returned. +func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error { + // Under rare circumstances getting the descriptor might recursively + // examine the registry, so fetch it before locking. + // + // A known case where this can happen: Fetching the TypeDescriptor for a + // legacy ExtensionDesc can consult the global registry. + xd := xt.TypeDescriptor() + + if r == GlobalTypes { + globalMutex.Lock() + defer globalMutex.Unlock() + } + + field := xd.Number() + message := xd.ContainingMessage().FullName() + if prev := r.extensionsByMessage[message][field]; prev != nil { + err := errors.New("extension number %d is already registered on message %v", field, message) + err = amendErrorWithCaller(err, prev, xt) + if !(r == GlobalTypes && ignoreConflict(xd, err)) { + return err + } + } + + if err := r.register("extension", xd, xt); err != nil { + return err + } + if r.extensionsByMessage == nil { + r.extensionsByMessage = make(extensionsByMessage) + } + if r.extensionsByMessage[message] == nil { + r.extensionsByMessage[message] = make(extensionsByNumber) + } + r.extensionsByMessage[message][field] = xt + r.numExtensions++ + return nil +} + +func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error { + name := desc.FullName() + prev := r.typesByName[name] + if prev != nil { + err := errors.New("%v %v is already registered", kind, name) + err = amendErrorWithCaller(err, prev, typ) + if !(r == GlobalTypes && ignoreConflict(desc, err)) { + return err + } + } + if r.typesByName == nil { + r.typesByName = make(typesByName) + } + r.typesByName[name] = typ + return nil +} + +// FindEnumByName looks up an enum by its full name. +// E.g., "google.protobuf.Field.Kind". +// +// This returns (nil, NotFound) if not found. +func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumType, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if v := r.typesByName[enum]; v != nil { + if et, _ := v.(protoreflect.EnumType); et != nil { + return et, nil + } + return nil, errors.New("found wrong type: got %v, want enum", typeName(v)) + } + return nil, NotFound +} + +// FindMessageByName looks up a message by its full name, +// e.g. "google.protobuf.Any". +// +// This returns (nil, NotFound) if not found. +func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if v := r.typesByName[message]; v != nil { + if mt, _ := v.(protoreflect.MessageType); mt != nil { + return mt, nil + } + return nil, errors.New("found wrong type: got %v, want message", typeName(v)) + } + return nil, NotFound +} + +// FindMessageByURL looks up a message by a URL identifier. +// See documentation on google.protobuf.Any.type_url for the URL format. +// +// This returns (nil, NotFound) if not found. +func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { + // This function is similar to FindMessageByName but + // truncates anything before and including '/' in the URL. + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + message := protoreflect.FullName(url) + if i := strings.LastIndexByte(url, '/'); i >= 0 { + message = message[i+len("/"):] + } + + if v := r.typesByName[message]; v != nil { + if mt, _ := v.(protoreflect.MessageType); mt != nil { + return mt, nil + } + return nil, errors.New("found wrong type: got %v, want message", typeName(v)) + } + return nil, NotFound +} + +// FindExtensionByName looks up a extension field by the field's full name. +// Note that this is the full name of the field as determined by +// where the extension is declared and is unrelated to the full name of the +// message being extended. +// +// This returns (nil, NotFound) if not found. +func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if v := r.typesByName[field]; v != nil { + if xt, _ := v.(protoreflect.ExtensionType); xt != nil { + return xt, nil + } + + // MessageSet extensions are special in that the name of the extension + // is the name of the message type used to extend the MessageSet. + // This naming scheme is used by text and JSON serialization. + // + // This feature is protected by the ProtoLegacy flag since MessageSets + // are a proto1 feature that is long deprecated. + if flags.ProtoLegacy { + if _, ok := v.(protoreflect.MessageType); ok { + field := field.Append(messageset.ExtensionName) + if v := r.typesByName[field]; v != nil { + if xt, _ := v.(protoreflect.ExtensionType); xt != nil { + if messageset.IsMessageSetExtension(xt.TypeDescriptor()) { + return xt, nil + } + } + } + } + } + + return nil, errors.New("found wrong type: got %v, want extension", typeName(v)) + } + return nil, NotFound +} + +// FindExtensionByNumber looks up a extension field by the field number +// within some parent message, identified by full name. +// +// This returns (nil, NotFound) if not found. +func (r *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if xt, ok := r.extensionsByMessage[message][field]; ok { + return xt, nil + } + return nil, NotFound +} + +// NumEnums reports the number of registered enums. +func (r *Types) NumEnums() int { + if r == nil { + return 0 + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return r.numEnums +} + +// RangeEnums iterates over all registered enums while f returns true. +// Iteration order is undefined. +func (r *Types) RangeEnums(f func(protoreflect.EnumType) bool) { + if r == nil { + return + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, typ := range r.typesByName { + if et, ok := typ.(protoreflect.EnumType); ok { + if !f(et) { + return + } + } + } +} + +// NumMessages reports the number of registered messages. +func (r *Types) NumMessages() int { + if r == nil { + return 0 + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return r.numMessages +} + +// RangeMessages iterates over all registered messages while f returns true. +// Iteration order is undefined. +func (r *Types) RangeMessages(f func(protoreflect.MessageType) bool) { + if r == nil { + return + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, typ := range r.typesByName { + if mt, ok := typ.(protoreflect.MessageType); ok { + if !f(mt) { + return + } + } + } +} + +// NumExtensions reports the number of registered extensions. +func (r *Types) NumExtensions() int { + if r == nil { + return 0 + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return r.numExtensions +} + +// RangeExtensions iterates over all registered extensions while f returns true. +// Iteration order is undefined. +func (r *Types) RangeExtensions(f func(protoreflect.ExtensionType) bool) { + if r == nil { + return + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, typ := range r.typesByName { + if xt, ok := typ.(protoreflect.ExtensionType); ok { + if !f(xt) { + return + } + } + } +} + +// NumExtensionsByMessage reports the number of registered extensions for +// a given message type. +func (r *Types) NumExtensionsByMessage(message protoreflect.FullName) int { + if r == nil { + return 0 + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return len(r.extensionsByMessage[message]) +} + +// RangeExtensionsByMessage iterates over all registered extensions filtered +// by a given message type while f returns true. Iteration order is undefined. +func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) { + if r == nil { + return + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, xt := range r.extensionsByMessage[message] { + if !f(xt) { + return + } + } +} + +func typeName(t interface{}) string { + switch t.(type) { + case protoreflect.EnumType: + return "enum" + case protoreflect.MessageType: + return "message" + case protoreflect.ExtensionType: + return "extension" + default: + return fmt.Sprintf("%T", t) + } +} + +func amendErrorWithCaller(err error, prev, curr interface{}) error { + prevPkg := goPackage(prev) + currPkg := goPackage(curr) + if prevPkg == "" || currPkg == "" || prevPkg == currPkg { + return err + } + return errors.New("%s\n\tpreviously from: %q\n\tcurrently from: %q", err, prevPkg, currPkg) +} + +func goPackage(v interface{}) string { + switch d := v.(type) { + case protoreflect.EnumType: + v = d.Descriptor() + case protoreflect.MessageType: + v = d.Descriptor() + case protoreflect.ExtensionType: + v = d.TypeDescriptor() + } + if d, ok := v.(protoreflect.Descriptor); ok { + v = d.ParentFile() + } + if d, ok := v.(interface{ GoPackagePath() string }); ok { + return d.GoPackagePath() + } + return "" +} diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go b/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go new file mode 100644 index 000000000..c58727675 --- /dev/null +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoiface + +type MessageV1 interface { + Reset() + String() string + ProtoMessage() +} + +type ExtensionRangeV1 struct { + Start, End int32 // both inclusive +} diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go new file mode 100644 index 000000000..44cf467d8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -0,0 +1,168 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoiface contains types referenced or implemented by messages. +// +// WARNING: This package should only be imported by message implementations. +// The functionality found in this package should be accessed through +// higher-level abstractions provided by the proto package. +package protoiface + +import ( + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Methods is a set of optional fast-path implementations of various operations. +type Methods = struct { + pragma.NoUnkeyedLiterals + + // Flags indicate support for optional features. + Flags SupportFlags + + // Size returns the size in bytes of the wire-format encoding of a message. + // Marshal must be provided if a custom Size is provided. + Size func(SizeInput) SizeOutput + + // Marshal formats a message in the wire-format encoding to the provided buffer. + // Size should be provided if a custom Marshal is provided. + // It must not return an error for a partial message. + Marshal func(MarshalInput) (MarshalOutput, error) + + // Unmarshal parses the wire-format encoding and merges the result into a message. + // It must not reset the target message or return an error for a partial message. + Unmarshal func(UnmarshalInput) (UnmarshalOutput, error) + + // Merge merges the contents of a source message into a destination message. + Merge func(MergeInput) MergeOutput + + // CheckInitialized returns an error if any required fields in the message are not set. + CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error) +} + +// SupportFlags indicate support for optional features. +type SupportFlags = uint64 + +const ( + // SupportMarshalDeterministic reports whether MarshalOptions.Deterministic is supported. + SupportMarshalDeterministic SupportFlags = 1 << iota + + // SupportUnmarshalDiscardUnknown reports whether UnmarshalOptions.DiscardUnknown is supported. + SupportUnmarshalDiscardUnknown +) + +// SizeInput is input to the Size method. +type SizeInput = struct { + pragma.NoUnkeyedLiterals + + Message protoreflect.Message + Flags MarshalInputFlags +} + +// SizeOutput is output from the Size method. +type SizeOutput = struct { + pragma.NoUnkeyedLiterals + + Size int +} + +// MarshalInput is input to the Marshal method. +type MarshalInput = struct { + pragma.NoUnkeyedLiterals + + Message protoreflect.Message + Buf []byte // output is appended to this buffer + Flags MarshalInputFlags +} + +// MarshalOutput is output from the Marshal method. +type MarshalOutput = struct { + pragma.NoUnkeyedLiterals + + Buf []byte // contains marshaled message +} + +// MarshalInputFlags configure the marshaler. +// Most flags correspond to fields in proto.MarshalOptions. +type MarshalInputFlags = uint8 + +const ( + MarshalDeterministic MarshalInputFlags = 1 << iota + MarshalUseCachedSize +) + +// UnmarshalInput is input to the Unmarshal method. +type UnmarshalInput = struct { + pragma.NoUnkeyedLiterals + + Message protoreflect.Message + Buf []byte // input buffer + Flags UnmarshalInputFlags + Resolver interface { + FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) + FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) + } + Depth int +} + +// UnmarshalOutput is output from the Unmarshal method. +type UnmarshalOutput = struct { + pragma.NoUnkeyedLiterals + + Flags UnmarshalOutputFlags +} + +// UnmarshalInputFlags configure the unmarshaler. +// Most flags correspond to fields in proto.UnmarshalOptions. +type UnmarshalInputFlags = uint8 + +const ( + UnmarshalDiscardUnknown UnmarshalInputFlags = 1 << iota +) + +// UnmarshalOutputFlags are output from the Unmarshal method. +type UnmarshalOutputFlags = uint8 + +const ( + // UnmarshalInitialized may be set on return if all required fields are known to be set. + // If unset, then it does not necessarily indicate that the message is uninitialized, + // only that its status could not be confirmed. + UnmarshalInitialized UnmarshalOutputFlags = 1 << iota +) + +// MergeInput is input to the Merge method. +type MergeInput = struct { + pragma.NoUnkeyedLiterals + + Source protoreflect.Message + Destination protoreflect.Message +} + +// MergeOutput is output from the Merge method. +type MergeOutput = struct { + pragma.NoUnkeyedLiterals + + Flags MergeOutputFlags +} + +// MergeOutputFlags are output from the Merge method. +type MergeOutputFlags = uint8 + +const ( + // MergeComplete reports whether the merge was performed. + // If unset, the merger must have made no changes to the destination. + MergeComplete MergeOutputFlags = 1 << iota +) + +// CheckInitializedInput is input to the CheckInitialized method. +type CheckInitializedInput = struct { + pragma.NoUnkeyedLiterals + + Message protoreflect.Message +} + +// CheckInitializedOutput is output from the CheckInitialized method. +type CheckInitializedOutput = struct { + pragma.NoUnkeyedLiterals +} diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go new file mode 100644 index 000000000..4a1ab7fb3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go @@ -0,0 +1,44 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoimpl contains the default implementation for messages +// generated by protoc-gen-go. +// +// WARNING: This package should only ever be imported by generated messages. +// The compatibility agreement covers nothing except for functionality needed +// to keep existing generated messages operational. Breakages that occur due +// to unauthorized usages of this package are not the author's responsibility. +package protoimpl + +import ( + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/filetype" + "google.golang.org/protobuf/internal/impl" +) + +// UnsafeEnabled specifies whether package unsafe can be used. +const UnsafeEnabled = impl.UnsafeEnabled + +type ( + // Types used by generated code in init functions. + DescBuilder = filedesc.Builder + TypeBuilder = filetype.Builder + + // Types used by generated code to implement EnumType, MessageType, and ExtensionType. + EnumInfo = impl.EnumInfo + MessageInfo = impl.MessageInfo + ExtensionInfo = impl.ExtensionInfo + + // Types embedded in generated messages. + MessageState = impl.MessageState + SizeCache = impl.SizeCache + WeakFields = impl.WeakFields + UnknownFields = impl.UnknownFields + ExtensionFields = impl.ExtensionFields + ExtensionFieldV1 = impl.ExtensionField + + Pointer = impl.Pointer +) + +var X impl.Export diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go new file mode 100644 index 000000000..ff094e1ba --- /dev/null +++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go @@ -0,0 +1,56 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoimpl + +import ( + "google.golang.org/protobuf/internal/version" +) + +const ( + // MaxVersion is the maximum supported version for generated .pb.go files. + // It is always the current version of the module. + MaxVersion = version.Minor + + // GenVersion is the runtime version required by generated .pb.go files. + // This is incremented when generated code relies on new functionality + // in the runtime. + GenVersion = 20 + + // MinVersion is the minimum supported version for generated .pb.go files. + // This is incremented when the runtime drops support for old code. + MinVersion = 0 +) + +// EnforceVersion is used by code generated by protoc-gen-go +// to statically enforce minimum and maximum versions of this package. +// A compilation failure implies either that: +// * the runtime package is too old and needs to be updated OR +// * the generated code is too old and needs to be regenerated. +// +// The runtime package can be upgraded by running: +// go get google.golang.org/protobuf +// +// The generated code can be regenerated by running: +// protoc --go_out=${PROTOC_GEN_GO_ARGS} ${PROTO_FILES} +// +// Example usage by generated code: +// const ( +// // Verify that this generated code is sufficiently up-to-date. +// _ = protoimpl.EnforceVersion(genVersion - protoimpl.MinVersion) +// // Verify that runtime/protoimpl is sufficiently up-to-date. +// _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - genVersion) +// ) +// +// The genVersion is the current minor version used to generated the code. +// This compile-time check relies on negative integer overflow of a uint +// being a compilation failure (guaranteed by the Go specification). +type EnforceVersion uint + +// This enforces the following invariant: +// MinVersion ≤ GenVersion ≤ MaxVersion +const ( + _ = EnforceVersion(GenVersion - MinVersion) + _ = EnforceVersion(MaxVersion - GenVersion) +) diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go new file mode 100644 index 000000000..abe4ab511 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -0,0 +1,3957 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/descriptor.proto + +package descriptorpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // Length-delimited aggregate. + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 // Uses ZigZag encoding. + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 // Uses ZigZag encoding. +) + +// Enum value maps for FieldDescriptorProto_Type. +var ( + FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", + } + FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, + } +) + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() +} + +func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[0] +} + +func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldDescriptorProto_Type) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(num) + return nil +} + +// Deprecated: Use FieldDescriptorProto_Type.Descriptor instead. +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +// Enum value maps for FieldDescriptorProto_Label. +var ( + FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", + } + FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, + } +) + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() +} + +func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[1] +} + +func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldDescriptorProto_Label) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(num) + return nil +} + +// Deprecated: Use FieldDescriptorProto_Label.Descriptor instead. +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 // Generate complete code for parsing, serialization, + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 // Use ReflectionOps to implement these methods. + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 // Generate code using MessageLite and the lite runtime. +) + +// Enum value maps for FileOptions_OptimizeMode. +var ( + FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", + } + FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, + } +) + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() +} + +func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[2] +} + +func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FileOptions_OptimizeMode) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(num) + return nil +} + +// Deprecated: Use FileOptions_OptimizeMode.Descriptor instead. +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +// Enum value maps for FieldOptions_CType. +var ( + FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", + } + FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, + } +) + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() +} + +func (FieldOptions_CType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[3] +} + +func (x FieldOptions_CType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_CType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_CType(num) + return nil +} + +// Deprecated: Use FieldOptions_CType.Descriptor instead. +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +// Enum value maps for FieldOptions_JSType. +var ( + FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", + } + FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, + } +) + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() +} + +func (FieldOptions_JSType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[4] +} + +func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_JSType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_JSType(num) + return nil +} + +// Deprecated: Use FieldOptions_JSType.Descriptor instead. +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 // implies idempotent + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 // idempotent, but may have side effects +) + +// Enum value maps for MethodOptions_IdempotencyLevel. +var ( + MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", + } + MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, + } +) + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() +} + +func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[5] +} + +func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(num) + return nil +} + +// Deprecated: Use MethodOptions_IdempotencyLevel.Descriptor instead. +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` +} + +func (x *FileDescriptorSet) Reset() { + *x = FileDescriptorSet{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorSet) ProtoMessage() {} + +func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorSet.ProtoReflect.Descriptor instead. +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} +} + +func (x *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if x != nil { + return x.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // file name, relative to root of source tree + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc. + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` +} + +func (x *FileDescriptorProto) Reset() { + *x = FileDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorProto) ProtoMessage() {} + +func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorProto.ProtoReflect.Descriptor instead. +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{1} +} + +func (x *FileDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *FileDescriptorProto) GetPackage() string { + if x != nil && x.Package != nil { + return *x.Package + } + return "" +} + +func (x *FileDescriptorProto) GetDependency() []string { + if x != nil { + return x.Dependency + } + return nil +} + +func (x *FileDescriptorProto) GetPublicDependency() []int32 { + if x != nil { + return x.PublicDependency + } + return nil +} + +func (x *FileDescriptorProto) GetWeakDependency() []int32 { + if x != nil { + return x.WeakDependency + } + return nil +} + +func (x *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if x != nil { + return x.MessageType + } + return nil +} + +func (x *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if x != nil { + return x.EnumType + } + return nil +} + +func (x *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if x != nil { + return x.Service + } + return nil +} + +func (x *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if x != nil { + return x.Extension + } + return nil +} + +func (x *FileDescriptorProto) GetOptions() *FileOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if x != nil { + return x.SourceCodeInfo + } + return nil +} + +func (x *FileDescriptorProto) GetSyntax() string { + if x != nil && x.Syntax != nil { + return *x.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` +} + +func (x *DescriptorProto) Reset() { + *x = DescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescriptorProto) ProtoMessage() {} + +func (x *DescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescriptorProto.ProtoReflect.Descriptor instead. +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2} +} + +func (x *DescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *DescriptorProto) GetField() []*FieldDescriptorProto { + if x != nil { + return x.Field + } + return nil +} + +func (x *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if x != nil { + return x.Extension + } + return nil +} + +func (x *DescriptorProto) GetNestedType() []*DescriptorProto { + if x != nil { + return x.NestedType + } + return nil +} + +func (x *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if x != nil { + return x.EnumType + } + return nil +} + +func (x *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if x != nil { + return x.ExtensionRange + } + return nil +} + +func (x *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if x != nil { + return x.OneofDecl + } + return nil +} + +func (x *DescriptorProto) GetOptions() *MessageOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if x != nil { + return x.ReservedRange + } + return nil +} + +func (x *DescriptorProto) GetReservedName() []string { + if x != nil { + return x.ReservedName + } + return nil +} + +type ExtensionRangeOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +func (x *ExtensionRangeOptions) Reset() { + *x = ExtensionRangeOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRangeOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRangeOptions) ProtoMessage() {} + +func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRangeOptions.ProtoReflect.Descriptor instead. +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3} +} + +func (x *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // If true, this is a proto3 "optional". When a proto3 field is optional, it + // tracks presence regardless of field type. + // + // When proto3_optional is true, this field must be belong to a oneof to + // signal to old proto3 clients that presence is tracked for this field. This + // oneof is known as a "synthetic" oneof, and this field must be its sole + // member (each proto3 optional field gets its own synthetic oneof). Synthetic + // oneofs exist in the descriptor only, and do not generate any API. Synthetic + // oneofs must be ordered after all "real" oneofs. + // + // For message fields, proto3_optional doesn't create any semantic change, + // since non-repeated message fields always track presence. However it still + // indicates the semantic detail of whether the user wrote "optional" or not. + // This can be useful for round-tripping the .proto file. For consistency we + // give message fields a synthetic oneof also, even though it is not required + // to track presence. This is especially important because the parser can't + // tell if a field is a message or an enum, so it must always create a + // synthetic oneof. + // + // Proto2 optional fields do not set this flag, because they already indicate + // optional with `LABEL_OPTIONAL`. + Proto3Optional *bool `protobuf:"varint,17,opt,name=proto3_optional,json=proto3Optional" json:"proto3_optional,omitempty"` +} + +func (x *FieldDescriptorProto) Reset() { + *x = FieldDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldDescriptorProto) ProtoMessage() {} + +func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldDescriptorProto.ProtoReflect.Descriptor instead. +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4} +} + +func (x *FieldDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *FieldDescriptorProto) GetNumber() int32 { + if x != nil && x.Number != nil { + return *x.Number + } + return 0 +} + +func (x *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if x != nil && x.Label != nil { + return *x.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (x *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if x != nil && x.Type != nil { + return *x.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (x *FieldDescriptorProto) GetTypeName() string { + if x != nil && x.TypeName != nil { + return *x.TypeName + } + return "" +} + +func (x *FieldDescriptorProto) GetExtendee() string { + if x != nil && x.Extendee != nil { + return *x.Extendee + } + return "" +} + +func (x *FieldDescriptorProto) GetDefaultValue() string { + if x != nil && x.DefaultValue != nil { + return *x.DefaultValue + } + return "" +} + +func (x *FieldDescriptorProto) GetOneofIndex() int32 { + if x != nil && x.OneofIndex != nil { + return *x.OneofIndex + } + return 0 +} + +func (x *FieldDescriptorProto) GetJsonName() string { + if x != nil && x.JsonName != nil { + return *x.JsonName + } + return "" +} + +func (x *FieldDescriptorProto) GetOptions() *FieldOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *FieldDescriptorProto) GetProto3Optional() bool { + if x != nil && x.Proto3Optional != nil { + return *x.Proto3Optional + } + return false +} + +// Describes a oneof. +type OneofDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` +} + +func (x *OneofDescriptorProto) Reset() { + *x = OneofDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OneofDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OneofDescriptorProto) ProtoMessage() {} + +func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OneofDescriptorProto.ProtoReflect.Descriptor instead. +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{5} +} + +func (x *OneofDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *OneofDescriptorProto) GetOptions() *OneofOptions { + if x != nil { + return x.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` +} + +func (x *EnumDescriptorProto) Reset() { + *x = EnumDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumDescriptorProto) ProtoMessage() {} + +func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumDescriptorProto.ProtoReflect.Descriptor instead. +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{6} +} + +func (x *EnumDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if x != nil { + return x.Value + } + return nil +} + +func (x *EnumDescriptorProto) GetOptions() *EnumOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if x != nil { + return x.ReservedRange + } + return nil +} + +func (x *EnumDescriptorProto) GetReservedName() []string { + if x != nil { + return x.ReservedName + } + return nil +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` +} + +func (x *EnumValueDescriptorProto) Reset() { + *x = EnumValueDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumValueDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumValueDescriptorProto) ProtoMessage() {} + +func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumValueDescriptorProto.ProtoReflect.Descriptor instead. +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{7} +} + +func (x *EnumValueDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *EnumValueDescriptorProto) GetNumber() int32 { + if x != nil && x.Number != nil { + return *x.Number + } + return 0 +} + +func (x *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if x != nil { + return x.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` +} + +func (x *ServiceDescriptorProto) Reset() { + *x = ServiceDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceDescriptorProto) ProtoMessage() {} + +func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceDescriptorProto.ProtoReflect.Descriptor instead. +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{8} +} + +func (x *ServiceDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if x != nil { + return x.Method + } + return nil +} + +func (x *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if x != nil { + return x.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` +} + +// Default values for MethodDescriptorProto fields. +const ( + Default_MethodDescriptorProto_ClientStreaming = bool(false) + Default_MethodDescriptorProto_ServerStreaming = bool(false) +) + +func (x *MethodDescriptorProto) Reset() { + *x = MethodDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodDescriptorProto) ProtoMessage() {} + +func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodDescriptorProto.ProtoReflect.Descriptor instead. +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{9} +} + +func (x *MethodDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *MethodDescriptorProto) GetInputType() string { + if x != nil && x.InputType != nil { + return *x.InputType + } + return "" +} + +func (x *MethodDescriptorProto) GetOutputType() string { + if x != nil && x.OutputType != nil { + return *x.OutputType + } + return "" +} + +func (x *MethodDescriptorProto) GetOptions() *MethodOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *MethodDescriptorProto) GetClientStreaming() bool { + if x != nil && x.ClientStreaming != nil { + return *x.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (x *MethodDescriptorProto) GetServerStreaming() bool { + if x != nil && x.ServerStreaming != nil { + return *x.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + // + // Deprecated: Do not use. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=1" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for FileOptions fields. +const ( + Default_FileOptions_JavaMultipleFiles = bool(false) + Default_FileOptions_JavaStringCheckUtf8 = bool(false) + Default_FileOptions_OptimizeFor = FileOptions_SPEED + Default_FileOptions_CcGenericServices = bool(false) + Default_FileOptions_JavaGenericServices = bool(false) + Default_FileOptions_PyGenericServices = bool(false) + Default_FileOptions_PhpGenericServices = bool(false) + Default_FileOptions_Deprecated = bool(false) + Default_FileOptions_CcEnableArenas = bool(true) +) + +func (x *FileOptions) Reset() { + *x = FileOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileOptions) ProtoMessage() {} + +func (x *FileOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileOptions.ProtoReflect.Descriptor instead. +func (*FileOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{10} +} + +func (x *FileOptions) GetJavaPackage() string { + if x != nil && x.JavaPackage != nil { + return *x.JavaPackage + } + return "" +} + +func (x *FileOptions) GetJavaOuterClassname() string { + if x != nil && x.JavaOuterClassname != nil { + return *x.JavaOuterClassname + } + return "" +} + +func (x *FileOptions) GetJavaMultipleFiles() bool { + if x != nil && x.JavaMultipleFiles != nil { + return *x.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (x *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if x != nil && x.JavaGenerateEqualsAndHash != nil { + return *x.JavaGenerateEqualsAndHash + } + return false +} + +func (x *FileOptions) GetJavaStringCheckUtf8() bool { + if x != nil && x.JavaStringCheckUtf8 != nil { + return *x.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (x *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if x != nil && x.OptimizeFor != nil { + return *x.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (x *FileOptions) GetGoPackage() string { + if x != nil && x.GoPackage != nil { + return *x.GoPackage + } + return "" +} + +func (x *FileOptions) GetCcGenericServices() bool { + if x != nil && x.CcGenericServices != nil { + return *x.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (x *FileOptions) GetJavaGenericServices() bool { + if x != nil && x.JavaGenericServices != nil { + return *x.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (x *FileOptions) GetPyGenericServices() bool { + if x != nil && x.PyGenericServices != nil { + return *x.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (x *FileOptions) GetPhpGenericServices() bool { + if x != nil && x.PhpGenericServices != nil { + return *x.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (x *FileOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (x *FileOptions) GetCcEnableArenas() bool { + if x != nil && x.CcEnableArenas != nil { + return *x.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (x *FileOptions) GetObjcClassPrefix() string { + if x != nil && x.ObjcClassPrefix != nil { + return *x.ObjcClassPrefix + } + return "" +} + +func (x *FileOptions) GetCsharpNamespace() string { + if x != nil && x.CsharpNamespace != nil { + return *x.CsharpNamespace + } + return "" +} + +func (x *FileOptions) GetSwiftPrefix() string { + if x != nil && x.SwiftPrefix != nil { + return *x.SwiftPrefix + } + return "" +} + +func (x *FileOptions) GetPhpClassPrefix() string { + if x != nil && x.PhpClassPrefix != nil { + return *x.PhpClassPrefix + } + return "" +} + +func (x *FileOptions) GetPhpNamespace() string { + if x != nil && x.PhpNamespace != nil { + return *x.PhpNamespace + } + return "" +} + +func (x *FileOptions) GetPhpMetadataNamespace() string { + if x != nil && x.PhpMetadataNamespace != nil { + return *x.PhpMetadataNamespace + } + return "" +} + +func (x *FileOptions) GetRubyPackage() string { + if x != nil && x.RubyPackage != nil { + return *x.RubyPackage + } + return "" +} + +func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for MessageOptions fields. +const ( + Default_MessageOptions_MessageSetWireFormat = bool(false) + Default_MessageOptions_NoStandardDescriptorAccessor = bool(false) + Default_MessageOptions_Deprecated = bool(false) +) + +func (x *MessageOptions) Reset() { + *x = MessageOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MessageOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageOptions) ProtoMessage() {} + +func (x *MessageOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageOptions.ProtoReflect.Descriptor instead. +func (*MessageOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{11} +} + +func (x *MessageOptions) GetMessageSetWireFormat() bool { + if x != nil && x.MessageSetWireFormat != nil { + return *x.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (x *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if x != nil && x.NoStandardDescriptorAccessor != nil { + return *x.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (x *MessageOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (x *MessageOptions) GetMapEntry() bool { + if x != nil && x.MapEntry != nil { + return *x.MapEntry + } + return false +} + +func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for FieldOptions fields. +const ( + Default_FieldOptions_Ctype = FieldOptions_STRING + Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL + Default_FieldOptions_Lazy = bool(false) + Default_FieldOptions_Deprecated = bool(false) + Default_FieldOptions_Weak = bool(false) +) + +func (x *FieldOptions) Reset() { + *x = FieldOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOptions) ProtoMessage() {} + +func (x *FieldOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOptions.ProtoReflect.Descriptor instead. +func (*FieldOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12} +} + +func (x *FieldOptions) GetCtype() FieldOptions_CType { + if x != nil && x.Ctype != nil { + return *x.Ctype + } + return Default_FieldOptions_Ctype +} + +func (x *FieldOptions) GetPacked() bool { + if x != nil && x.Packed != nil { + return *x.Packed + } + return false +} + +func (x *FieldOptions) GetJstype() FieldOptions_JSType { + if x != nil && x.Jstype != nil { + return *x.Jstype + } + return Default_FieldOptions_Jstype +} + +func (x *FieldOptions) GetLazy() bool { + if x != nil && x.Lazy != nil { + return *x.Lazy + } + return Default_FieldOptions_Lazy +} + +func (x *FieldOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (x *FieldOptions) GetWeak() bool { + if x != nil && x.Weak != nil { + return *x.Weak + } + return Default_FieldOptions_Weak +} + +func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +func (x *OneofOptions) Reset() { + *x = OneofOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OneofOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OneofOptions) ProtoMessage() {} + +func (x *OneofOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OneofOptions.ProtoReflect.Descriptor instead. +func (*OneofOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13} +} + +func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for EnumOptions fields. +const ( + Default_EnumOptions_Deprecated = bool(false) +) + +func (x *EnumOptions) Reset() { + *x = EnumOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumOptions) ProtoMessage() {} + +func (x *EnumOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumOptions.ProtoReflect.Descriptor instead. +func (*EnumOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{14} +} + +func (x *EnumOptions) GetAllowAlias() bool { + if x != nil && x.AllowAlias != nil { + return *x.AllowAlias + } + return false +} + +func (x *EnumOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for EnumValueOptions fields. +const ( + Default_EnumValueOptions_Deprecated = bool(false) +) + +func (x *EnumValueOptions) Reset() { + *x = EnumValueOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumValueOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumValueOptions) ProtoMessage() {} + +func (x *EnumValueOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumValueOptions.ProtoReflect.Descriptor instead. +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{15} +} + +func (x *EnumValueOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for ServiceOptions fields. +const ( + Default_ServiceOptions_Deprecated = bool(false) +) + +func (x *ServiceOptions) Reset() { + *x = ServiceOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceOptions) ProtoMessage() {} + +func (x *ServiceOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceOptions.ProtoReflect.Descriptor instead. +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16} +} + +func (x *ServiceOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (x *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for MethodOptions fields. +const ( + Default_MethodOptions_Deprecated = bool(false) + Default_MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN +) + +func (x *MethodOptions) Reset() { + *x = MethodOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodOptions) ProtoMessage() {} + +func (x *MethodOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodOptions.ProtoReflect.Descriptor instead. +func (*MethodOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17} +} + +func (x *MethodOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (x *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if x != nil && x.IdempotencyLevel != nil { + return *x.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` +} + +func (x *UninterpretedOption) Reset() { + *x = UninterpretedOption{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UninterpretedOption) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UninterpretedOption) ProtoMessage() {} + +func (x *UninterpretedOption) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UninterpretedOption.ProtoReflect.Descriptor instead. +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{18} +} + +func (x *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if x != nil { + return x.Name + } + return nil +} + +func (x *UninterpretedOption) GetIdentifierValue() string { + if x != nil && x.IdentifierValue != nil { + return *x.IdentifierValue + } + return "" +} + +func (x *UninterpretedOption) GetPositiveIntValue() uint64 { + if x != nil && x.PositiveIntValue != nil { + return *x.PositiveIntValue + } + return 0 +} + +func (x *UninterpretedOption) GetNegativeIntValue() int64 { + if x != nil && x.NegativeIntValue != nil { + return *x.NegativeIntValue + } + return 0 +} + +func (x *UninterpretedOption) GetDoubleValue() float64 { + if x != nil && x.DoubleValue != nil { + return *x.DoubleValue + } + return 0 +} + +func (x *UninterpretedOption) GetStringValue() []byte { + if x != nil { + return x.StringValue + } + return nil +} + +func (x *UninterpretedOption) GetAggregateValue() string { + if x != nil && x.AggregateValue != nil { + return *x.AggregateValue + } + return "" +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` +} + +func (x *SourceCodeInfo) Reset() { + *x = SourceCodeInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceCodeInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceCodeInfo) ProtoMessage() {} + +func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceCodeInfo.ProtoReflect.Descriptor instead. +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} +} + +func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if x != nil { + return x.Location + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` +} + +func (x *GeneratedCodeInfo) Reset() { + *x = GeneratedCodeInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GeneratedCodeInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GeneratedCodeInfo) ProtoMessage() {} + +func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GeneratedCodeInfo.ProtoReflect.Descriptor instead. +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} +} + +func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if x != nil { + return x.Annotation + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` +} + +func (x *DescriptorProto_ExtensionRange) Reset() { + *x = DescriptorProto_ExtensionRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DescriptorProto_ExtensionRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} + +func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescriptorProto_ExtensionRange.ProtoReflect.Descriptor instead. +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *DescriptorProto_ExtensionRange) GetStart() int32 { + if x != nil && x.Start != nil { + return *x.Start + } + return 0 +} + +func (x *DescriptorProto_ExtensionRange) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +func (x *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if x != nil { + return x.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. +} + +func (x *DescriptorProto_ReservedRange) Reset() { + *x = DescriptorProto_ReservedRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DescriptorProto_ReservedRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescriptorProto_ReservedRange) ProtoMessage() {} + +func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescriptorProto_ReservedRange.ProtoReflect.Descriptor instead. +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2, 1} +} + +func (x *DescriptorProto_ReservedRange) GetStart() int32 { + if x != nil && x.Start != nil { + return *x.Start + } + return 0 +} + +func (x *DescriptorProto_ReservedRange) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Inclusive. +} + +func (x *EnumDescriptorProto_EnumReservedRange) Reset() { + *x = EnumDescriptorProto_EnumReservedRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumDescriptorProto_EnumReservedRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} + +func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumDescriptorProto_EnumReservedRange.ProtoReflect.Descriptor instead. +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if x != nil && x.Start != nil { + return *x.Start + } + return 0 +} + +func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` +} + +func (x *UninterpretedOption_NamePart) Reset() { + *x = UninterpretedOption_NamePart{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UninterpretedOption_NamePart) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UninterpretedOption_NamePart) ProtoMessage() {} + +func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UninterpretedOption_NamePart.ProtoReflect.Descriptor instead. +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{18, 0} +} + +func (x *UninterpretedOption_NamePart) GetNamePart() string { + if x != nil && x.NamePart != nil { + return *x.NamePart + } + return "" +} + +func (x *UninterpretedOption_NamePart) GetIsExtension() bool { + if x != nil && x.IsExtension != nil { + return *x.IsExtension + } + return false +} + +type SourceCodeInfo_Location struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` +} + +func (x *SourceCodeInfo_Location) Reset() { + *x = SourceCodeInfo_Location{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceCodeInfo_Location) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceCodeInfo_Location) ProtoMessage() {} + +func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceCodeInfo_Location.ProtoReflect.Descriptor instead. +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} +} + +func (x *SourceCodeInfo_Location) GetPath() []int32 { + if x != nil { + return x.Path + } + return nil +} + +func (x *SourceCodeInfo_Location) GetSpan() []int32 { + if x != nil { + return x.Span + } + return nil +} + +func (x *SourceCodeInfo_Location) GetLeadingComments() string { + if x != nil && x.LeadingComments != nil { + return *x.LeadingComments + } + return "" +} + +func (x *SourceCodeInfo_Location) GetTrailingComments() string { + if x != nil && x.TrailingComments != nil { + return *x.TrailingComments + } + return "" +} + +func (x *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if x != nil { + return x.LeadingDetachedComments + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` +} + +func (x *GeneratedCodeInfo_Annotation) Reset() { + *x = GeneratedCodeInfo_Annotation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GeneratedCodeInfo_Annotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} + +func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GeneratedCodeInfo_Annotation.ProtoReflect.Descriptor instead. +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} +} + +func (x *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if x != nil { + return x.Path + } + return nil +} + +func (x *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if x != nil && x.SourceFile != nil { + return *x.SourceFile + } + return "" +} + +func (x *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if x != nil && x.Begin != nil { + return *x.Begin + } + return 0 +} + +func (x *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor + +var file_google_protobuf_descriptor_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x22, 0x4d, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, + 0x6c, 0x65, 0x22, 0xe4, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, + 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, + 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, + 0x03, 0x28, 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, + 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, + 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, + 0x77, 0x65, 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, + 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, + 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, + 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, + 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, + 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, + 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, + 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, + 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, + 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, + 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, + 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, + 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, + 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, + 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, + 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, + 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, + 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, + 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, + 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, + 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, + 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, + 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, + 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, + 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, + 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, + 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, + 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, + 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, + 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, + 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, + 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, + 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, + 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, + 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, + 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, + 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, + 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, + 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, + 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, + 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, + 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, + 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, + 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, + 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, + 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, + 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, + 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, + 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, + 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, + 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, + 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, + 0x10, 0x27, 0x22, 0xd1, 0x02, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, + 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, + 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, + 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, + 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, + 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xe2, 0x03, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, + 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, + 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, + 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, + 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, + 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, + 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, + 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, + 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, + 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, + 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, + 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, + 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, + 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, + 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, + 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, + 0x22, 0xc0, 0x01, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, + 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, + 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, + 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, + 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, + 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, + 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, + 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, + 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, + 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, + 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, + 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, + 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, + 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, + 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, + 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, + 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, + 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, + 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, + 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, + 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, + 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, + 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, + 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd1, 0x01, + 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, + 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, + 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, + 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, + 0x64, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, + 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, +} + +var ( + file_google_protobuf_descriptor_proto_rawDescOnce sync.Once + file_google_protobuf_descriptor_proto_rawDescData = file_google_protobuf_descriptor_proto_rawDesc +) + +func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { + file_google_protobuf_descriptor_proto_rawDescOnce.Do(func() { + file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_descriptor_proto_rawDescData) + }) + return file_google_protobuf_descriptor_proto_rawDescData +} + +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 27) +var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ + (FieldDescriptorProto_Type)(0), // 0: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 1: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 2: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 3: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 4: google.protobuf.FieldOptions.JSType + (MethodOptions_IdempotencyLevel)(0), // 5: google.protobuf.MethodOptions.IdempotencyLevel + (*FileDescriptorSet)(nil), // 6: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 7: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 8: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 9: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 10: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 11: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 12: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 13: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 14: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 15: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 16: google.protobuf.FileOptions + (*MessageOptions)(nil), // 17: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 18: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 19: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 20: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 21: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 23: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 24: google.protobuf.UninterpretedOption + (*SourceCodeInfo)(nil), // 25: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 26: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 27: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 28: google.protobuf.DescriptorProto.ReservedRange + (*EnumDescriptorProto_EnumReservedRange)(nil), // 29: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*UninterpretedOption_NamePart)(nil), // 30: google.protobuf.UninterpretedOption.NamePart + (*SourceCodeInfo_Location)(nil), // 31: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 32: google.protobuf.GeneratedCodeInfo.Annotation +} +var file_google_protobuf_descriptor_proto_depIdxs = []int32{ + 7, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 8, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 12, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 14, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 10, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 16, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 25, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 10, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 10, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 8, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 12, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 27, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 11, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 17, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 28, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 24, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 1, // 16: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 0, // 17: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 18, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 19, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 13, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 20, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 29, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 21, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 15, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 22, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 23, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 2, // 27: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 24, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 3, // 30: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 4, // 31: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 24, // 32: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 33: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 34: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 35: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 36: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 5, // 37: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 24, // 38: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 30, // 39: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 31, // 40: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 32, // 41: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 9, // 42: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 43, // [43:43] is the sub-list for method output_type + 43, // [43:43] is the sub-list for method input_type + 43, // [43:43] is the sub-list for extension type_name + 43, // [43:43] is the sub-list for extension extendee + 0, // [0:43] is the sub-list for field type_name +} + +func init() { file_google_protobuf_descriptor_proto_init() } +func file_google_protobuf_descriptor_proto_init() { + if File_google_protobuf_descriptor_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionRangeOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OneofDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumValueDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MessageOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OneofOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumValueOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UninterpretedOption); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceCodeInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GeneratedCodeInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DescriptorProto_ExtensionRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DescriptorProto_ReservedRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UninterpretedOption_NamePart); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceCodeInfo_Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GeneratedCodeInfo_Annotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, + NumEnums: 6, + NumMessages: 27, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_descriptor_proto_goTypes, + DependencyIndexes: file_google_protobuf_descriptor_proto_depIdxs, + EnumInfos: file_google_protobuf_descriptor_proto_enumTypes, + MessageInfos: file_google_protobuf_descriptor_proto_msgTypes, + }.Build() + File_google_protobuf_descriptor_proto = out.File + file_google_protobuf_descriptor_proto_rawDesc = nil + file_google_protobuf_descriptor_proto_goTypes = nil + file_google_protobuf_descriptor_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go new file mode 100644 index 000000000..8c10797b9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -0,0 +1,498 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/any.proto + +// Package anypb contains generated types for google/protobuf/any.proto. +// +// The Any message is a dynamic representation of any other message value. +// It is functionally a tuple of the full name of the remote message type and +// the serialized bytes of the remote message value. +// +// +// Constructing an Any +// +// An Any message containing another message value is constructed using New: +// +// any, err := anypb.New(m) +// if err != nil { +// ... // handle error +// } +// ... // make use of any +// +// +// Unmarshaling an Any +// +// With a populated Any message, the underlying message can be serialized into +// a remote concrete message value in a few ways. +// +// If the exact concrete type is known, then a new (or pre-existing) instance +// of that message can be passed to the UnmarshalTo method: +// +// m := new(foopb.MyMessage) +// if err := any.UnmarshalTo(m); err != nil { +// ... // handle error +// } +// ... // make use of m +// +// If the exact concrete type is not known, then the UnmarshalNew method can be +// used to unmarshal the contents into a new instance of the remote message type: +// +// m, err := any.UnmarshalNew() +// if err != nil { +// ... // handle error +// } +// ... // make use of m +// +// UnmarshalNew uses the global type registry to resolve the message type and +// construct a new instance of that message to unmarshal into. In order for a +// message type to appear in the global registry, the Go type representing that +// protobuf message type must be linked into the Go binary. For messages +// generated by protoc-gen-go, this is achieved through an import of the +// generated Go package representing a .proto file. +// +// A common pattern with UnmarshalNew is to use a type switch with the resulting +// proto.Message value: +// +// switch m := m.(type) { +// case *foopb.MyMessage: +// ... // make use of m as a *foopb.MyMessage +// case *barpb.OtherMessage: +// ... // make use of m as a *barpb.OtherMessage +// case *bazpb.SomeMessage: +// ... // make use of m as a *bazpb.SomeMessage +// } +// +// This pattern ensures that the generated packages containing the message types +// listed in the case clauses are linked into the Go binary and therefore also +// registered in the global registry. +// +// +// Type checking an Any +// +// In order to type check whether an Any message represents some other message, +// then use the MessageIs method: +// +// if any.MessageIs((*foopb.MyMessage)(nil)) { +// ... // make use of any, knowing that it contains a foopb.MyMessage +// } +// +// The MessageIs method can also be used with an allocated instance of the target +// message type if the intention is to unmarshal into it if the type matches: +// +// m := new(foopb.MyMessage) +// if any.MessageIs(m) { +// if err := any.UnmarshalTo(m); err != nil { +// ... // handle error +// } +// ... // make use of m +// } +// +package anypb + +import ( + proto "google.golang.org/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoregistry "google.golang.org/protobuf/reflect/protoregistry" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + strings "strings" + sync "sync" +) + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := anypb.New(foo) +// if err != nil { +// ... +// } +// ... +// foo := &pb.Foo{} +// if err := any.UnmarshalTo(foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. This string must contain at least + // one "/" character. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +// New marshals src into a new Any instance. +func New(src proto.Message) (*Any, error) { + dst := new(Any) + if err := dst.MarshalFrom(src); err != nil { + return nil, err + } + return dst, nil +} + +// MarshalFrom marshals src into dst as the underlying message +// using the provided marshal options. +// +// If no options are specified, call dst.MarshalFrom instead. +func MarshalFrom(dst *Any, src proto.Message, opts proto.MarshalOptions) error { + const urlPrefix = "type.googleapis.com/" + if src == nil { + return protoimpl.X.NewError("invalid nil source message") + } + b, err := opts.Marshal(src) + if err != nil { + return err + } + dst.TypeUrl = urlPrefix + string(src.ProtoReflect().Descriptor().FullName()) + dst.Value = b + return nil +} + +// UnmarshalTo unmarshals the underlying message from src into dst +// using the provided unmarshal options. +// It reports an error if dst is not of the right message type. +// +// If no options are specified, call src.UnmarshalTo instead. +func UnmarshalTo(src *Any, dst proto.Message, opts proto.UnmarshalOptions) error { + if src == nil { + return protoimpl.X.NewError("invalid nil source message") + } + if !src.MessageIs(dst) { + got := dst.ProtoReflect().Descriptor().FullName() + want := src.MessageName() + return protoimpl.X.NewError("mismatched message type: got %q, want %q", got, want) + } + return opts.Unmarshal(src.GetValue(), dst) +} + +// UnmarshalNew unmarshals the underlying message from src into dst, +// which is newly created message using a type resolved from the type URL. +// The message type is resolved according to opt.Resolver, +// which should implement protoregistry.MessageTypeResolver. +// It reports an error if the underlying message type could not be resolved. +// +// If no options are specified, call src.UnmarshalNew instead. +func UnmarshalNew(src *Any, opts proto.UnmarshalOptions) (dst proto.Message, err error) { + if src.GetTypeUrl() == "" { + return nil, protoimpl.X.NewError("invalid empty type URL") + } + if opts.Resolver == nil { + opts.Resolver = protoregistry.GlobalTypes + } + r, ok := opts.Resolver.(protoregistry.MessageTypeResolver) + if !ok { + return nil, protoregistry.NotFound + } + mt, err := r.FindMessageByURL(src.GetTypeUrl()) + if err != nil { + if err == protoregistry.NotFound { + return nil, err + } + return nil, protoimpl.X.NewError("could not resolve %q: %v", src.GetTypeUrl(), err) + } + dst = mt.New().Interface() + return dst, opts.Unmarshal(src.GetValue(), dst) +} + +// MessageIs reports whether the underlying message is of the same type as m. +func (x *Any) MessageIs(m proto.Message) bool { + if m == nil { + return false + } + url := x.GetTypeUrl() + name := string(m.ProtoReflect().Descriptor().FullName()) + if !strings.HasSuffix(url, name) { + return false + } + return len(url) == len(name) || url[len(url)-len(name)-1] == '/' +} + +// MessageName reports the full name of the underlying message, +// returning an empty string if invalid. +func (x *Any) MessageName() protoreflect.FullName { + url := x.GetTypeUrl() + name := protoreflect.FullName(url) + if i := strings.LastIndexByte(url, '/'); i >= 0 { + name = name[i+len("/"):] + } + if !name.IsValid() { + return "" + } + return name +} + +// MarshalFrom marshals m into x as the underlying message. +func (x *Any) MarshalFrom(m proto.Message) error { + return MarshalFrom(x, m, proto.MarshalOptions{}) +} + +// UnmarshalTo unmarshals the contents of the underlying message of x into m. +// It resets m before performing the unmarshal operation. +// It reports an error if m is not of the right message type. +func (x *Any) UnmarshalTo(m proto.Message) error { + return UnmarshalTo(x, m, proto.UnmarshalOptions{}) +} + +// UnmarshalNew unmarshals the contents of the underlying message of x into +// a newly allocated message of the specified type. +// It reports an error if the underlying message type could not be resolved. +func (x *Any) UnmarshalNew() (proto.Message, error) { + return UnmarshalNew(x, proto.UnmarshalOptions{}) +} + +func (x *Any) Reset() { + *x = Any{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_any_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Any) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Any) ProtoMessage() {} + +func (x *Any) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_any_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Any.ProtoReflect.Descriptor instead. +func (*Any) Descriptor() ([]byte, []int) { + return file_google_protobuf_any_proto_rawDescGZIP(), []int{0} +} + +func (x *Any) GetTypeUrl() string { + if x != nil { + return x.TypeUrl + } + return "" +} + +func (x *Any) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +var File_google_protobuf_any_proto protoreflect.FileDescriptor + +var file_google_protobuf_any_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03, + 0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, + 0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, + 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_any_proto_rawDescOnce sync.Once + file_google_protobuf_any_proto_rawDescData = file_google_protobuf_any_proto_rawDesc +) + +func file_google_protobuf_any_proto_rawDescGZIP() []byte { + file_google_protobuf_any_proto_rawDescOnce.Do(func() { + file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_any_proto_rawDescData) + }) + return file_google_protobuf_any_proto_rawDescData +} + +var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_any_proto_goTypes = []interface{}{ + (*Any)(nil), // 0: google.protobuf.Any +} +var file_google_protobuf_any_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_any_proto_init() } +func file_google_protobuf_any_proto_init() { + if File_google_protobuf_any_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Any); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_any_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_any_proto_goTypes, + DependencyIndexes: file_google_protobuf_any_proto_depIdxs, + MessageInfos: file_google_protobuf_any_proto_msgTypes, + }.Build() + File_google_protobuf_any_proto = out.File + file_google_protobuf_any_proto_rawDesc = nil + file_google_protobuf_any_proto_goTypes = nil + file_google_protobuf_any_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go new file mode 100644 index 000000000..a583ca2f6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -0,0 +1,379 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/duration.proto + +// Package durationpb contains generated types for google/protobuf/duration.proto. +// +// The Duration message represents a signed span of time. +// +// +// Conversion to a Go Duration +// +// The AsDuration method can be used to convert a Duration message to a +// standard Go time.Duration value: +// +// d := dur.AsDuration() +// ... // make use of d as a time.Duration +// +// Converting to a time.Duration is a common operation so that the extensive +// set of time-based operations provided by the time package can be leveraged. +// See https://golang.org/pkg/time for more information. +// +// The AsDuration method performs the conversion on a best-effort basis. +// Durations with denormal values (e.g., nanoseconds beyond -99999999 and +// +99999999, inclusive; or seconds and nanoseconds with opposite signs) +// are normalized during the conversion to a time.Duration. To manually check for +// invalid Duration per the documented limitations in duration.proto, +// additionally call the CheckValid method: +// +// if err := dur.CheckValid(); err != nil { +// ... // handle error +// } +// +// Note that the documented limitations in duration.proto does not protect a +// Duration from overflowing the representable range of a time.Duration in Go. +// The AsDuration method uses saturation arithmetic such that an overflow clamps +// the resulting value to the closest representable value (e.g., math.MaxInt64 +// for positive overflow and math.MinInt64 for negative overflow). +// +// +// Conversion from a Go Duration +// +// The durationpb.New function can be used to construct a Duration message +// from a standard Go time.Duration value: +// +// dur := durationpb.New(d) +// ... // make use of d as a *durationpb.Duration +// +package durationpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" + reflect "reflect" + sync "sync" + time "time" +) + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (duration.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +type Duration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +// New constructs a new Duration from the provided time.Duration. +func New(d time.Duration) *Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &Duration{Seconds: int64(secs), Nanos: int32(nanos)} +} + +// AsDuration converts x to a time.Duration, +// returning the closest duration value in the event of overflow. +func (x *Duration) AsDuration() time.Duration { + secs := x.GetSeconds() + nanos := x.GetNanos() + d := time.Duration(secs) * time.Second + overflow := d/time.Second != time.Duration(secs) + d += time.Duration(nanos) * time.Nanosecond + overflow = overflow || (secs < 0 && nanos < 0 && d > 0) + overflow = overflow || (secs > 0 && nanos > 0 && d < 0) + if overflow { + switch { + case secs < 0: + return time.Duration(math.MinInt64) + case secs > 0: + return time.Duration(math.MaxInt64) + } + } + return d +} + +// IsValid reports whether the duration is valid. +// It is equivalent to CheckValid == nil. +func (x *Duration) IsValid() bool { + return x.check() == 0 +} + +// CheckValid returns an error if the duration is invalid. +// In particular, it checks whether the value is within the range of +// -10000 years to +10000 years inclusive. +// An error is reported for a nil Duration. +func (x *Duration) CheckValid() error { + switch x.check() { + case invalidNil: + return protoimpl.X.NewError("invalid nil Duration") + case invalidUnderflow: + return protoimpl.X.NewError("duration (%v) exceeds -10000 years", x) + case invalidOverflow: + return protoimpl.X.NewError("duration (%v) exceeds +10000 years", x) + case invalidNanosRange: + return protoimpl.X.NewError("duration (%v) has out-of-range nanos", x) + case invalidNanosSign: + return protoimpl.X.NewError("duration (%v) has seconds and nanos with different signs", x) + default: + return nil + } +} + +const ( + _ = iota + invalidNil + invalidUnderflow + invalidOverflow + invalidNanosRange + invalidNanosSign +) + +func (x *Duration) check() uint { + const absDuration = 315576000000 // 10000yr * 365.25day/yr * 24hr/day * 60min/hr * 60sec/min + secs := x.GetSeconds() + nanos := x.GetNanos() + switch { + case x == nil: + return invalidNil + case secs < -absDuration: + return invalidUnderflow + case secs > +absDuration: + return invalidOverflow + case nanos <= -1e9 || nanos >= +1e9: + return invalidNanosRange + case (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0): + return invalidNanosSign + default: + return 0 + } +} + +func (x *Duration) Reset() { + *x = Duration{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_duration_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Duration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Duration) ProtoMessage() {} + +func (x *Duration) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_duration_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Duration.ProtoReflect.Descriptor instead. +func (*Duration) Descriptor() ([]byte, []int) { + return file_google_protobuf_duration_proto_rawDescGZIP(), []int{0} +} + +func (x *Duration) GetSeconds() int64 { + if x != nil { + return x.Seconds + } + return 0 +} + +func (x *Duration) GetNanos() int32 { + if x != nil { + return x.Nanos + } + return 0 +} + +var File_google_protobuf_duration_proto protoreflect.FileDescriptor + +var file_google_protobuf_duration_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, + 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01, + 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, + 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_duration_proto_rawDescOnce sync.Once + file_google_protobuf_duration_proto_rawDescData = file_google_protobuf_duration_proto_rawDesc +) + +func file_google_protobuf_duration_proto_rawDescGZIP() []byte { + file_google_protobuf_duration_proto_rawDescOnce.Do(func() { + file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_duration_proto_rawDescData) + }) + return file_google_protobuf_duration_proto_rawDescData +} + +var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_duration_proto_goTypes = []interface{}{ + (*Duration)(nil), // 0: google.protobuf.Duration +} +var file_google_protobuf_duration_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_duration_proto_init() } +func file_google_protobuf_duration_proto_init() { + if File_google_protobuf_duration_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Duration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_duration_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_duration_proto_goTypes, + DependencyIndexes: file_google_protobuf_duration_proto_depIdxs, + MessageInfos: file_google_protobuf_duration_proto_msgTypes, + }.Build() + File_google_protobuf_duration_proto = out.File + file_google_protobuf_duration_proto_rawDesc = nil + file_google_protobuf_duration_proto_goTypes = nil + file_google_protobuf_duration_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go new file mode 100644 index 000000000..c9ae92132 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -0,0 +1,390 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/timestamp.proto + +// Package timestamppb contains generated types for google/protobuf/timestamp.proto. +// +// The Timestamp message represents a timestamp, +// an instant in time since the Unix epoch (January 1st, 1970). +// +// +// Conversion to a Go Time +// +// The AsTime method can be used to convert a Timestamp message to a +// standard Go time.Time value in UTC: +// +// t := ts.AsTime() +// ... // make use of t as a time.Time +// +// Converting to a time.Time is a common operation so that the extensive +// set of time-based operations provided by the time package can be leveraged. +// See https://golang.org/pkg/time for more information. +// +// The AsTime method performs the conversion on a best-effort basis. Timestamps +// with denormal values (e.g., nanoseconds beyond 0 and 99999999, inclusive) +// are normalized during the conversion to a time.Time. To manually check for +// invalid Timestamps per the documented limitations in timestamp.proto, +// additionally call the CheckValid method: +// +// if err := ts.CheckValid(); err != nil { +// ... // handle error +// } +// +// +// Conversion from a Go Time +// +// The timestamppb.New function can be used to construct a Timestamp message +// from a standard Go time.Time value: +// +// ts := timestamppb.New(t) +// ... // make use of ts as a *timestamppb.Timestamp +// +// In order to construct a Timestamp representing the current time, use Now: +// +// ts := timestamppb.Now() +// ... // make use of ts as a *timestamppb.Timestamp +// +package timestamppb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + time "time" +) + +// A Timestamp represents a point in time independent of any time zone or local +// calendar, encoded as a count of seconds and fractions of seconds at +// nanosecond resolution. The count is relative to an epoch at UTC midnight on +// January 1, 1970, in the proleptic Gregorian calendar which extends the +// Gregorian calendar backwards to year one. +// +// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap +// second table is needed for interpretation, using a [24-hour linear +// smear](https://developers.google.com/time/smear). +// +// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By +// restricting to that range, we ensure that we can convert to and from [RFC +// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from Java `Instant.now()`. +// +// Instant now = Instant.now(); +// +// Timestamp timestamp = +// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) +// .setNanos(now.getNano()).build(); +// +// +// Example 6: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard +// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using +// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with +// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use +// the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D +// ) to obtain a formatter capable of generating timestamps in this format. +// +// +type Timestamp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +// Now constructs a new Timestamp from the current time. +func Now() *Timestamp { + return New(time.Now()) +} + +// New constructs a new Timestamp from the provided time.Time. +func New(t time.Time) *Timestamp { + return &Timestamp{Seconds: int64(t.Unix()), Nanos: int32(t.Nanosecond())} +} + +// AsTime converts x to a time.Time. +func (x *Timestamp) AsTime() time.Time { + return time.Unix(int64(x.GetSeconds()), int64(x.GetNanos())).UTC() +} + +// IsValid reports whether the timestamp is valid. +// It is equivalent to CheckValid == nil. +func (x *Timestamp) IsValid() bool { + return x.check() == 0 +} + +// CheckValid returns an error if the timestamp is invalid. +// In particular, it checks whether the value represents a date that is +// in the range of 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive. +// An error is reported for a nil Timestamp. +func (x *Timestamp) CheckValid() error { + switch x.check() { + case invalidNil: + return protoimpl.X.NewError("invalid nil Timestamp") + case invalidUnderflow: + return protoimpl.X.NewError("timestamp (%v) before 0001-01-01", x) + case invalidOverflow: + return protoimpl.X.NewError("timestamp (%v) after 9999-12-31", x) + case invalidNanos: + return protoimpl.X.NewError("timestamp (%v) has out-of-range nanos", x) + default: + return nil + } +} + +const ( + _ = iota + invalidNil + invalidUnderflow + invalidOverflow + invalidNanos +) + +func (x *Timestamp) check() uint { + const minTimestamp = -62135596800 // Seconds between 1970-01-01T00:00:00Z and 0001-01-01T00:00:00Z, inclusive + const maxTimestamp = +253402300799 // Seconds between 1970-01-01T00:00:00Z and 9999-12-31T23:59:59Z, inclusive + secs := x.GetSeconds() + nanos := x.GetNanos() + switch { + case x == nil: + return invalidNil + case secs < minTimestamp: + return invalidUnderflow + case secs > maxTimestamp: + return invalidOverflow + case nanos < 0 || nanos >= 1e9: + return invalidNanos + default: + return 0 + } +} + +func (x *Timestamp) Reset() { + *x = Timestamp{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Timestamp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Timestamp) ProtoMessage() {} + +func (x *Timestamp) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Timestamp.ProtoReflect.Descriptor instead. +func (*Timestamp) Descriptor() ([]byte, []int) { + return file_google_protobuf_timestamp_proto_rawDescGZIP(), []int{0} +} + +func (x *Timestamp) GetSeconds() int64 { + if x != nil { + return x.Seconds + } + return 0 +} + +func (x *Timestamp) GetNanos() int32 { + if x != nil { + return x.Nanos + } + return 0 +} + +var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor + +var file_google_protobuf_timestamp_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, + 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, + 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, + 0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01, + 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, + 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_timestamp_proto_rawDescOnce sync.Once + file_google_protobuf_timestamp_proto_rawDescData = file_google_protobuf_timestamp_proto_rawDesc +) + +func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { + file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() { + file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_timestamp_proto_rawDescData) + }) + return file_google_protobuf_timestamp_proto_rawDescData +} + +var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_timestamp_proto_goTypes = []interface{}{ + (*Timestamp)(nil), // 0: google.protobuf.Timestamp +} +var file_google_protobuf_timestamp_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_timestamp_proto_init() } +func file_google_protobuf_timestamp_proto_init() { + if File_google_protobuf_timestamp_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Timestamp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_timestamp_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_timestamp_proto_goTypes, + DependencyIndexes: file_google_protobuf_timestamp_proto_depIdxs, + MessageInfos: file_google_protobuf_timestamp_proto_msgTypes, + }.Build() + File_google_protobuf_timestamp_proto = out.File + file_google_protobuf_timestamp_proto_rawDesc = nil + file_google_protobuf_timestamp_proto_goTypes = nil + file_google_protobuf_timestamp_proto_depIdxs = nil +} diff --git a/vendor/gotest.tools/v3/assert/assert.go b/vendor/gotest.tools/v3/assert/assert.go index e75d1f38a..f3f805dde 100644 --- a/vendor/gotest.tools/v3/assert/assert.go +++ b/vendor/gotest.tools/v3/assert/assert.go @@ -49,7 +49,7 @@ The example below shows assert used with some common types. Comparisons -Package http://gotest.tools/assert/cmp provides +Package http://pkg.go.dev/gotest.tools/v3/assert/cmp provides many common comparisons. Additional comparisons can be written to compare values in other ways. See the example Assert (CustomComparison). @@ -58,22 +58,16 @@ Automated migration from testify gty-migrate-from-testify is a command which translates Go source code from testify assertions to the assertions provided by this package. -See http://gotest.tools/assert/cmd/gty-migrate-from-testify. +See http://pkg.go.dev/gotest.tools/v3/assert/cmd/gty-migrate-from-testify. */ package assert // import "gotest.tools/v3/assert" import ( - "fmt" - "go/ast" - "go/token" - "reflect" - gocmp "github.com/google/go-cmp/cmp" "gotest.tools/v3/assert/cmp" - "gotest.tools/v3/internal/format" - "gotest.tools/v3/internal/source" + "gotest.tools/v3/internal/assert" ) // BoolOrComparison can be a bool, or cmp.Comparison. See Assert() for usage. @@ -90,128 +84,6 @@ type helperT interface { Helper() } -const failureMessage = "assertion failed: " - -// nolint: gocyclo -func assert( - t TestingT, - failer func(), - argSelector argSelector, - comparison BoolOrComparison, - msgAndArgs ...interface{}, -) bool { - if ht, ok := t.(helperT); ok { - ht.Helper() - } - var success bool - switch check := comparison.(type) { - case bool: - if check { - return true - } - logFailureFromBool(t, msgAndArgs...) - - // Undocumented legacy comparison without Result type - case func() (success bool, message string): - success = runCompareFunc(t, check, msgAndArgs...) - - case nil: - return true - - case error: - msg := failureMsgFromError(check) - t.Log(format.WithCustomMessage(failureMessage+msg, msgAndArgs...)) - - case cmp.Comparison: - success = runComparison(t, argSelector, check, msgAndArgs...) - - case func() cmp.Result: - success = runComparison(t, argSelector, check, msgAndArgs...) - - default: - t.Log(fmt.Sprintf("invalid Comparison: %v (%T)", check, check)) - } - - if success { - return true - } - failer() - return false -} - -func runCompareFunc( - t TestingT, - f func() (success bool, message string), - msgAndArgs ...interface{}, -) bool { - if ht, ok := t.(helperT); ok { - ht.Helper() - } - if success, message := f(); !success { - t.Log(format.WithCustomMessage(failureMessage+message, msgAndArgs...)) - return false - } - return true -} - -func logFailureFromBool(t TestingT, msgAndArgs ...interface{}) { - if ht, ok := t.(helperT); ok { - ht.Helper() - } - const stackIndex = 3 // Assert()/Check(), assert(), formatFailureFromBool() - const comparisonArgPos = 1 - args, err := source.CallExprArgs(stackIndex) - if err != nil { - t.Log(err.Error()) - return - } - - msg, err := boolFailureMessage(args[comparisonArgPos]) - if err != nil { - t.Log(err.Error()) - msg = "expression is false" - } - - t.Log(format.WithCustomMessage(failureMessage+msg, msgAndArgs...)) -} - -func failureMsgFromError(err error) string { - // Handle errors with non-nil types - v := reflect.ValueOf(err) - if v.Kind() == reflect.Ptr && v.IsNil() { - return fmt.Sprintf("error is not nil: error has type %T", err) - } - return "error is not nil: " + err.Error() -} - -func boolFailureMessage(expr ast.Expr) (string, error) { - if binaryExpr, ok := expr.(*ast.BinaryExpr); ok && binaryExpr.Op == token.NEQ { - x, err := source.FormatNode(binaryExpr.X) - if err != nil { - return "", err - } - y, err := source.FormatNode(binaryExpr.Y) - if err != nil { - return "", err - } - return x + " is " + y, nil - } - - if unaryExpr, ok := expr.(*ast.UnaryExpr); ok && unaryExpr.Op == token.NOT { - x, err := source.FormatNode(unaryExpr.X) - if err != nil { - return "", err - } - return x + " is true", nil - } - - formatted, err := source.FormatNode(expr) - if err != nil { - return "", err - } - return "expression is false: " + formatted, nil -} - // Assert performs a comparison. If the comparison fails, the test is marked as // failed, a failure message is logged, and execution is stopped immediately. // @@ -222,7 +94,7 @@ func boolFailureMessage(expr ast.Expr) (string, error) { // cmp.Comparison // Uses cmp.Result.Success() to check for success of failure. // The comparison is responsible for producing a helpful failure message. -// http://gotest.tools/assert/cmp provides many common comparisons. +// http://pkg.go.dev/gotest.tools/v3/assert/cmp provides many common comparisons. // error // A nil value is considered success. // A non-nil error is a failure, err.Error() is used as the failure message. @@ -230,7 +102,9 @@ func Assert(t TestingT, comparison BoolOrComparison, msgAndArgs ...interface{}) if ht, ok := t.(helperT); ok { ht.Helper() } - assert(t, t.FailNow, argsFromComparisonCall, comparison, msgAndArgs...) + if !assert.Eval(t, assert.ArgsFromComparisonCall, comparison, msgAndArgs...) { + t.FailNow() + } } // Check performs a comparison. If the comparison fails the test is marked as @@ -242,7 +116,11 @@ func Check(t TestingT, comparison BoolOrComparison, msgAndArgs ...interface{}) b if ht, ok := t.(helperT); ok { ht.Helper() } - return assert(t, t.Fail, argsFromComparisonCall, comparison, msgAndArgs...) + if !assert.Eval(t, assert.ArgsFromComparisonCall, comparison, msgAndArgs...) { + t.Fail() + return false + } + return true } // NilError fails the test immediately if err is not nil. @@ -251,7 +129,9 @@ func NilError(t TestingT, err error, msgAndArgs ...interface{}) { if ht, ok := t.(helperT); ok { ht.Helper() } - assert(t, t.FailNow, argsAfterT, err, msgAndArgs...) + if !assert.Eval(t, assert.ArgsAfterT, err, msgAndArgs...) { + t.FailNow() + } } // Equal uses the == operator to assert two values are equal and fails the test @@ -270,13 +150,15 @@ func Equal(t TestingT, x, y interface{}, msgAndArgs ...interface{}) { if ht, ok := t.(helperT); ok { ht.Helper() } - assert(t, t.FailNow, argsAfterT, cmp.Equal(x, y), msgAndArgs...) + if !assert.Eval(t, assert.ArgsAfterT, cmp.Equal(x, y), msgAndArgs...) { + t.FailNow() + } } // DeepEqual uses google/go-cmp (https://godoc.org/github.com/google/go-cmp/cmp) // to assert two values are equal and fails the test if they are not equal. // -// Package http://gotest.tools/assert/opt provides some additional +// Package http://pkg.go.dev/gotest.tools/v3/assert/opt provides some additional // commonly used Options. // // This is equivalent to Assert(t, cmp.DeepEqual(x, y)). @@ -284,7 +166,9 @@ func DeepEqual(t TestingT, x, y interface{}, opts ...gocmp.Option) { if ht, ok := t.(helperT); ok { ht.Helper() } - assert(t, t.FailNow, argsAfterT, cmp.DeepEqual(x, y, opts...)) + if !assert.Eval(t, assert.ArgsAfterT, cmp.DeepEqual(x, y, opts...)) { + t.FailNow() + } } // Error fails the test if err is nil, or the error message is not the expected @@ -294,7 +178,9 @@ func Error(t TestingT, err error, message string, msgAndArgs ...interface{}) { if ht, ok := t.(helperT); ok { ht.Helper() } - assert(t, t.FailNow, argsAfterT, cmp.Error(err, message), msgAndArgs...) + if !assert.Eval(t, assert.ArgsAfterT, cmp.Error(err, message), msgAndArgs...) { + t.FailNow() + } } // ErrorContains fails the test if err is nil, or the error message does not @@ -304,7 +190,9 @@ func ErrorContains(t TestingT, err error, substring string, msgAndArgs ...interf if ht, ok := t.(helperT); ok { ht.Helper() } - assert(t, t.FailNow, argsAfterT, cmp.ErrorContains(err, substring), msgAndArgs...) + if !assert.Eval(t, assert.ArgsAfterT, cmp.ErrorContains(err, substring), msgAndArgs...) { + t.FailNow() + } } // ErrorType fails the test if err is nil, or err is not the expected type. @@ -325,5 +213,7 @@ func ErrorType(t TestingT, err error, expected interface{}, msgAndArgs ...interf if ht, ok := t.(helperT); ok { ht.Helper() } - assert(t, t.FailNow, argsAfterT, cmp.ErrorType(err, expected), msgAndArgs...) + if !assert.Eval(t, assert.ArgsAfterT, cmp.ErrorType(err, expected), msgAndArgs...) { + t.FailNow() + } } diff --git a/vendor/gotest.tools/v3/assert/cmp/compare.go b/vendor/gotest.tools/v3/assert/cmp/compare.go index bb3b24f43..3c0e05ab5 100644 --- a/vendor/gotest.tools/v3/assert/cmp/compare.go +++ b/vendor/gotest.tools/v3/assert/cmp/compare.go @@ -21,7 +21,7 @@ type Comparison func() Result // and succeeds if the values are equal. // // The comparison can be customized using comparison Options. -// Package http://gotest.tools/assert/opt provides some additional +// Package http://pkg.go.dev/gotest.tools/v3/assert/opt provides some additional // commonly used Options. func DeepEqual(x, y interface{}, opts ...cmp.Option) Comparison { return func() (result Result) { diff --git a/vendor/gotest.tools/v3/assert/cmp/result.go b/vendor/gotest.tools/v3/assert/cmp/result.go index 385e1a087..3b48d9bf0 100644 --- a/vendor/gotest.tools/v3/assert/cmp/result.go +++ b/vendor/gotest.tools/v3/assert/cmp/result.go @@ -52,13 +52,12 @@ func ResultFromError(err error) Result { } type templatedResult struct { - success bool template string data map[string]interface{} } func (r templatedResult) Success() bool { - return r.success + return false } func (r templatedResult) FailureMessage(args []ast.Expr) string { diff --git a/vendor/gotest.tools/v3/internal/assert/assert.go b/vendor/gotest.tools/v3/internal/assert/assert.go new file mode 100644 index 000000000..8dc01f4b5 --- /dev/null +++ b/vendor/gotest.tools/v3/internal/assert/assert.go @@ -0,0 +1,143 @@ +package assert + +import ( + "fmt" + "go/ast" + "go/token" + "reflect" + + "gotest.tools/v3/assert/cmp" + "gotest.tools/v3/internal/format" + "gotest.tools/v3/internal/source" +) + +// LogT is the subset of testing.T used by the assert package. +type LogT interface { + Log(args ...interface{}) +} + +type helperT interface { + Helper() +} + +const failureMessage = "assertion failed: " + +// Eval the comparison and print a failure messages if the comparison has failed. +// nolint: gocyclo +func Eval( + t LogT, + argSelector argSelector, + comparison interface{}, + msgAndArgs ...interface{}, +) bool { + if ht, ok := t.(helperT); ok { + ht.Helper() + } + var success bool + switch check := comparison.(type) { + case bool: + if check { + return true + } + logFailureFromBool(t, msgAndArgs...) + + // Undocumented legacy comparison without Result type + case func() (success bool, message string): + success = runCompareFunc(t, check, msgAndArgs...) + + case nil: + return true + + case error: + msg := failureMsgFromError(check) + t.Log(format.WithCustomMessage(failureMessage+msg, msgAndArgs...)) + + case cmp.Comparison: + success = RunComparison(t, argSelector, check, msgAndArgs...) + + case func() cmp.Result: + success = RunComparison(t, argSelector, check, msgAndArgs...) + + default: + t.Log(fmt.Sprintf("invalid Comparison: %v (%T)", check, check)) + } + return success +} + +func runCompareFunc( + t LogT, + f func() (success bool, message string), + msgAndArgs ...interface{}, +) bool { + if ht, ok := t.(helperT); ok { + ht.Helper() + } + if success, message := f(); !success { + t.Log(format.WithCustomMessage(failureMessage+message, msgAndArgs...)) + return false + } + return true +} + +func logFailureFromBool(t LogT, msgAndArgs ...interface{}) { + if ht, ok := t.(helperT); ok { + ht.Helper() + } + const stackIndex = 3 // Assert()/Check(), assert(), logFailureFromBool() + args, err := source.CallExprArgs(stackIndex) + if err != nil { + t.Log(err.Error()) + return + } + + const comparisonArgIndex = 1 // Assert(t, comparison) + if len(args) <= comparisonArgIndex { + t.Log(failureMessage + "but assert failed to find the expression to print") + return + } + + msg, err := boolFailureMessage(args[comparisonArgIndex]) + if err != nil { + t.Log(err.Error()) + msg = "expression is false" + } + + t.Log(format.WithCustomMessage(failureMessage+msg, msgAndArgs...)) +} + +func failureMsgFromError(err error) string { + // Handle errors with non-nil types + v := reflect.ValueOf(err) + if v.Kind() == reflect.Ptr && v.IsNil() { + return fmt.Sprintf("error is not nil: error has type %T", err) + } + return "error is not nil: " + err.Error() +} + +func boolFailureMessage(expr ast.Expr) (string, error) { + if binaryExpr, ok := expr.(*ast.BinaryExpr); ok && binaryExpr.Op == token.NEQ { + x, err := source.FormatNode(binaryExpr.X) + if err != nil { + return "", err + } + y, err := source.FormatNode(binaryExpr.Y) + if err != nil { + return "", err + } + return x + " is " + y, nil + } + + if unaryExpr, ok := expr.(*ast.UnaryExpr); ok && unaryExpr.Op == token.NOT { + x, err := source.FormatNode(unaryExpr.X) + if err != nil { + return "", err + } + return x + " is true", nil + } + + formatted, err := source.FormatNode(expr) + if err != nil { + return "", err + } + return "expression is false: " + formatted, nil +} diff --git a/vendor/gotest.tools/v3/assert/result.go b/vendor/gotest.tools/v3/internal/assert/result.go similarity index 69% rename from vendor/gotest.tools/v3/assert/result.go rename to vendor/gotest.tools/v3/internal/assert/result.go index d77860b58..20cd54129 100644 --- a/vendor/gotest.tools/v3/assert/result.go +++ b/vendor/gotest.tools/v3/internal/assert/result.go @@ -9,8 +9,10 @@ import ( "gotest.tools/v3/internal/source" ) -func runComparison( - t TestingT, +// RunComparison and return Comparison.Success. If the comparison fails a messages +// will be printed using t.Log. +func RunComparison( + t LogT, argSelector argSelector, f cmp.Comparison, msgAndArgs ...interface{}, @@ -26,7 +28,7 @@ func runComparison( var message string switch typed := result.(type) { case resultWithComparisonArgs: - const stackIndex = 3 // Assert/Check, assert, runComparison + const stackIndex = 3 // Assert/Check, assert, RunComparison args, err := source.CallExprArgs(stackIndex) if err != nil { t.Log(err.Error()) @@ -88,15 +90,20 @@ func isShortPrintableExpr(expr ast.Expr) bool { type argSelector func([]ast.Expr) []ast.Expr -func argsAfterT(args []ast.Expr) []ast.Expr { +// ArgsAfterT selects args starting at position 1. Used when the caller has a +// testing.T as the first argument, and the args to select should follow it. +func ArgsAfterT(args []ast.Expr) []ast.Expr { if len(args) < 1 { return nil } return args[1:] } -func argsFromComparisonCall(args []ast.Expr) []ast.Expr { - if len(args) < 1 { +// ArgsFromComparisonCall selects args from the CallExpression at position 1. +// Used when the caller has a testing.T as the first argument, and the args to +// select are passed to the cmp.Comparison at position 1. +func ArgsFromComparisonCall(args []ast.Expr) []ast.Expr { + if len(args) <= 1 { return nil } if callExpr, ok := args[1].(*ast.CallExpr); ok { @@ -104,3 +111,15 @@ func argsFromComparisonCall(args []ast.Expr) []ast.Expr { } return nil } + +// ArgsAtZeroIndex selects args from the CallExpression at position 1. +// Used when the caller accepts a single cmp.Comparison argument. +func ArgsAtZeroIndex(args []ast.Expr) []ast.Expr { + if len(args) == 0 { + return nil + } + if callExpr, ok := args[0].(*ast.CallExpr); ok { + return callExpr.Args + } + return nil +} diff --git a/vendor/gotest.tools/v3/internal/cleanup/cleanup.go b/vendor/gotest.tools/v3/internal/cleanup/cleanup.go index 7e2922f40..58206e57f 100644 --- a/vendor/gotest.tools/v3/internal/cleanup/cleanup.go +++ b/vendor/gotest.tools/v3/internal/cleanup/cleanup.go @@ -6,14 +6,17 @@ package cleanup import ( "os" "strings" - - "gotest.tools/v3/x/subtest" ) type cleanupT interface { Cleanup(f func()) } +// implemented by gotest.tools/x/subtest.TestContext +type addCleanupT interface { + AddCleanup(f func()) +} + type logT interface { Log(...interface{}) } @@ -39,7 +42,7 @@ func Cleanup(t logT, f func()) { ct.Cleanup(f) return } - if tc, ok := t.(subtest.TestContext); ok { + if tc, ok := t.(addCleanupT); ok { tc.AddCleanup(f) } } diff --git a/vendor/gotest.tools/v3/internal/source/source.go b/vendor/gotest.tools/v3/internal/source/source.go index 134afb6ba..c2eef0337 100644 --- a/vendor/gotest.tools/v3/internal/source/source.go +++ b/vendor/gotest.tools/v3/internal/source/source.go @@ -93,8 +93,9 @@ func nodePosition(fileset *token.FileSet, node ast.Node) token.Position { } // GoVersionLessThan returns true if runtime.Version() is semantically less than -// version 1.minor. -func GoVersionLessThan(minor int64) bool { +// version major.minor. Returns false if a release version can not be parsed from +// runtime.Version(). +func GoVersionLessThan(major, minor int64) bool { version := runtime.Version() // not a release version if !strings.HasPrefix(version, "go") { @@ -105,11 +106,21 @@ func GoVersionLessThan(minor int64) bool { if len(parts) < 2 { return false } - actual, err := strconv.ParseInt(parts[1], 10, 32) - return err == nil && parts[0] == "1" && actual < minor + rMajor, err := strconv.ParseInt(parts[0], 10, 32) + if err != nil { + return false + } + if rMajor != major { + return rMajor < major + } + rMinor, err := strconv.ParseInt(parts[1], 10, 32) + if err != nil { + return false + } + return rMinor < minor } -var goVersionBefore19 = GoVersionLessThan(9) +var goVersionBefore19 = GoVersionLessThan(1, 9) func getCallExprArgs(node ast.Node) ([]ast.Expr, error) { visitor := &callExprVisitor{} diff --git a/vendor/gotest.tools/v3/x/subtest/context.go b/vendor/gotest.tools/v3/x/subtest/context.go deleted file mode 100644 index 5750bf409..000000000 --- a/vendor/gotest.tools/v3/x/subtest/context.go +++ /dev/null @@ -1,84 +0,0 @@ -/*Package subtest provides a TestContext to subtests which handles cleanup, and -provides a testing.TB, and context.Context. - -This package was inspired by github.com/frankban/quicktest. -*/ -package subtest // import "gotest.tools/v3/x/subtest" - -import ( - "context" - "testing" -) - -type testcase struct { - testing.TB - ctx context.Context - cleanupFuncs []cleanupFunc -} - -type cleanupFunc func() - -func (tc *testcase) Ctx() context.Context { - if tc.ctx == nil { - var cancel func() - tc.ctx, cancel = context.WithCancel(context.Background()) - tc.AddCleanup(cancel) - } - return tc.ctx -} - -// cleanup runs all cleanup functions. Functions are run in the opposite order -// in which they were added. Cleanup is called automatically before Run exits. -func (tc *testcase) cleanup() { - for _, f := range tc.cleanupFuncs { - // Defer all cleanup functions so they all run even if one calls - // t.FailNow() or panics. Deferring them also runs them in reverse order. - defer f() - } - tc.cleanupFuncs = nil -} - -func (tc *testcase) AddCleanup(f func()) { - tc.cleanupFuncs = append(tc.cleanupFuncs, f) -} - -func (tc *testcase) Parallel() { - tp, ok := tc.TB.(parallel) - if !ok { - panic("Parallel called with a testing.B") - } - tp.Parallel() -} - -type parallel interface { - Parallel() -} - -// Run a subtest. When subtest exits, every cleanup function added with -// TestContext.AddCleanup will be run. -func Run(t *testing.T, name string, subtest func(t TestContext)) bool { - return t.Run(name, func(t *testing.T) { - tc := &testcase{TB: t} - defer tc.cleanup() - subtest(tc) - }) -} - -// TestContext provides a testing.TB and a context.Context for a test case. -type TestContext interface { - testing.TB - // AddCleanup function which will be run when before Run returns. - // - // Deprecated: Go 1.14+ now includes a testing.TB.Cleanup(func()) which - // should be used instead. AddCleanup will be removed in a future release. - AddCleanup(f func()) - // Ctx returns a context for the test case. Multiple calls from the same subtest - // will return the same context. The context is cancelled when Run - // returns. - Ctx() context.Context - // Parallel calls t.Parallel on the testing.TB. Panics if testing.TB does - // not implement Parallel. - Parallel() -} - -var _ TestContext = &testcase{} diff --git a/vendor/modules.txt b/vendor/modules.txt index 0a35f8440..925f5de96 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,4 +1,12 @@ -# github.com/aws/aws-sdk-go v1.40.25 +# github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 +## explicit; go 1.16 +github.com/Azure/go-ansiterm +github.com/Azure/go-ansiterm/winterm +# github.com/Microsoft/go-winio v0.5.2 +## explicit; go 1.13 +github.com/Microsoft/go-winio +github.com/Microsoft/go-winio/pkg/guid +# github.com/aws/aws-sdk-go v1.40.44 ## explicit; go 1.11 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn @@ -53,27 +61,90 @@ github.com/aws/aws-sdk-go/service/sso github.com/aws/aws-sdk-go/service/sso/ssoiface github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface +# github.com/cenkalti/backoff/v4 v4.2.0 +## explicit; go 1.18 +github.com/cenkalti/backoff/v4 +# github.com/containerd/containerd v1.6.19 +## explicit; go 1.17 +github.com/containerd/containerd/errdefs +github.com/containerd/containerd/log +github.com/containerd/containerd/pkg/userns +github.com/containerd/containerd/platforms +# github.com/cpuguy83/dockercfg v0.3.1 +## explicit; go 1.13 +github.com/cpuguy83/dockercfg # github.com/cpuguy83/go-md2man/v2 v2.0.2 ## explicit; go 1.11 github.com/cpuguy83/go-md2man/v2/md2man -# github.com/davecgh/go-spew v1.1.1 +# github.com/docker/distribution v2.8.2+incompatible +## explicit +github.com/docker/distribution/digestset +github.com/docker/distribution/reference +# github.com/docker/docker v23.0.5+incompatible +## explicit +github.com/docker/docker/api +github.com/docker/docker/api/types +github.com/docker/docker/api/types/blkiodev +github.com/docker/docker/api/types/container +github.com/docker/docker/api/types/events +github.com/docker/docker/api/types/filters +github.com/docker/docker/api/types/image +github.com/docker/docker/api/types/mount +github.com/docker/docker/api/types/network +github.com/docker/docker/api/types/registry +github.com/docker/docker/api/types/strslice +github.com/docker/docker/api/types/swarm +github.com/docker/docker/api/types/swarm/runtime +github.com/docker/docker/api/types/time +github.com/docker/docker/api/types/versions +github.com/docker/docker/api/types/volume +github.com/docker/docker/client +github.com/docker/docker/errdefs +github.com/docker/docker/pkg/archive +github.com/docker/docker/pkg/idtools +github.com/docker/docker/pkg/ioutils +github.com/docker/docker/pkg/jsonmessage +github.com/docker/docker/pkg/longpath +github.com/docker/docker/pkg/pools +github.com/docker/docker/pkg/stdcopy +github.com/docker/docker/pkg/system +# github.com/docker/go-connections v0.4.0 +## explicit +github.com/docker/go-connections/nat +github.com/docker/go-connections/sockets +github.com/docker/go-connections/tlsconfig +# github.com/docker/go-units v0.5.0 ## explicit +github.com/docker/go-units +# github.com/gogo/protobuf v1.3.2 +## explicit; go 1.15 +github.com/gogo/protobuf/proto # github.com/golang/mock v1.6.0 ## explicit; go 1.11 github.com/golang/mock/gomock -# github.com/google/go-cmp v0.4.0 -## explicit; go 1.8 +# github.com/golang/protobuf v1.5.2 +## explicit; go 1.9 +github.com/golang/protobuf/proto +github.com/golang/protobuf/ptypes +github.com/golang/protobuf/ptypes/any +github.com/golang/protobuf/ptypes/duration +github.com/golang/protobuf/ptypes/timestamp +# github.com/google/go-cmp v0.5.9 +## explicit; go 1.13 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/cmpopts github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/hashicorp/errwrap v1.0.0 +# github.com/google/uuid v1.3.0 ## explicit -github.com/hashicorp/errwrap -# github.com/hashicorp/go-multierror v1.0.0 +github.com/google/uuid +# github.com/hashicorp/errwrap v1.1.0 ## explicit +github.com/hashicorp/errwrap +# github.com/hashicorp/go-multierror v1.1.1 +## explicit; go 1.13 github.com/hashicorp/go-multierror # github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 ## explicit @@ -85,6 +156,9 @@ github.com/igungor/gofakes3/backend/s3bolt github.com/igungor/gofakes3/backend/s3mem github.com/igungor/gofakes3/internal/goskipiter github.com/igungor/gofakes3/internal/s3io +# github.com/imdario/mergo v0.3.15 +## explicit; go 1.13 +github.com/imdario/mergo # github.com/jmespath/go-jmespath v0.4.0 ## explicit; go 1.14 github.com/jmespath/go-jmespath @@ -94,13 +168,46 @@ github.com/karrick/godirwalk # github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 ## explicit github.com/kballard/go-shellquote -# github.com/kr/pretty v0.3.0 -## explicit; go 1.12 +# github.com/klauspost/compress v1.11.13 +## explicit; go 1.13 +github.com/klauspost/compress/fse +github.com/klauspost/compress/huff0 +github.com/klauspost/compress/snappy +github.com/klauspost/compress/zstd +github.com/klauspost/compress/zstd/internal/xxhash +# github.com/kr/text v0.2.0 +## explicit # github.com/lanrat/extsort v1.0.0 ## explicit; go 1.13 github.com/lanrat/extsort github.com/lanrat/extsort/queue github.com/lanrat/extsort/tempfile +# github.com/magiconair/properties v1.8.7 +## explicit; go 1.19 +github.com/magiconair/properties +# github.com/moby/patternmatcher v0.5.0 +## explicit; go 1.19 +github.com/moby/patternmatcher +# github.com/moby/sys/sequential v0.5.0 +## explicit; go 1.17 +github.com/moby/sys/sequential +# github.com/moby/term v0.5.0 +## explicit; go 1.18 +github.com/moby/term +github.com/moby/term/windows +# github.com/morikuni/aec v1.0.0 +## explicit +github.com/morikuni/aec +# github.com/opencontainers/go-digest v1.0.0 +## explicit; go 1.13 +github.com/opencontainers/go-digest +# github.com/opencontainers/image-spec v1.1.0-rc2 +## explicit; go 1.17 +github.com/opencontainers/image-spec/specs-go +github.com/opencontainers/image-spec/specs-go/v1 +# github.com/opencontainers/runc v1.1.5 +## explicit; go 1.16 +github.com/opencontainers/runc/libcontainer/user # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors @@ -113,9 +220,21 @@ github.com/ryszard/goskiplist/skiplist # github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63 ## explicit github.com/shabbyrobe/gocovmerge +# github.com/sirupsen/logrus v1.9.0 +## explicit; go 1.13 +github.com/sirupsen/logrus # github.com/termie/go-shutil v0.0.0-20140729215957-bcacb06fecae ## explicit github.com/termie/go-shutil +# github.com/testcontainers/testcontainers-go v0.21.0 +## explicit; go 1.19 +github.com/testcontainers/testcontainers-go +github.com/testcontainers/testcontainers-go/exec +github.com/testcontainers/testcontainers-go/internal +github.com/testcontainers/testcontainers-go/internal/config +github.com/testcontainers/testcontainers-go/internal/testcontainersdocker +github.com/testcontainers/testcontainers-go/internal/testcontainerssession +github.com/testcontainers/testcontainers-go/wait # github.com/urfave/cli/v2 v2.11.2 ## explicit; go 1.18 github.com/urfave/cli/v2 @@ -125,34 +244,78 @@ github.com/xrash/smetrics # go.etcd.io/bbolt v1.3.6 ## explicit; go 1.12 go.etcd.io/bbolt -# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c +# golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea +## explicit; go 1.20 +golang.org/x/exp/constraints +golang.org/x/exp/slices +# golang.org/x/net v0.7.0 +## explicit; go 1.17 +golang.org/x/net/internal/socks +golang.org/x/net/proxy +# golang.org/x/sync v0.1.0 ## explicit golang.org/x/sync/errgroup -# golang.org/x/sys v0.0.0-20220405210540-1e041c57c461 +# golang.org/x/sys v0.8.0 ## explicit; go 1.17 +golang.org/x/sys/execabs golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix -# golang.org/x/tools v0.1.1 -## explicit; go 1.17 +golang.org/x/sys/windows +# golang.org/x/tools v0.2.0 +## explicit; go 1.18 golang.org/x/tools/cover -# golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 +# google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad +## explicit; go 1.15 +google.golang.org/genproto/googleapis/rpc/status +# google.golang.org/grpc v1.47.0 +## explicit; go 1.14 +google.golang.org/grpc/codes +google.golang.org/grpc/internal/status +google.golang.org/grpc/status +# google.golang.org/protobuf v1.28.0 ## explicit; go 1.11 -golang.org/x/xerrors -golang.org/x/xerrors/internal -# gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 -## explicit +google.golang.org/protobuf/encoding/prototext +google.golang.org/protobuf/encoding/protowire +google.golang.org/protobuf/internal/descfmt +google.golang.org/protobuf/internal/descopts +google.golang.org/protobuf/internal/detrand +google.golang.org/protobuf/internal/encoding/defval +google.golang.org/protobuf/internal/encoding/messageset +google.golang.org/protobuf/internal/encoding/tag +google.golang.org/protobuf/internal/encoding/text +google.golang.org/protobuf/internal/errors +google.golang.org/protobuf/internal/filedesc +google.golang.org/protobuf/internal/filetype +google.golang.org/protobuf/internal/flags +google.golang.org/protobuf/internal/genid +google.golang.org/protobuf/internal/impl +google.golang.org/protobuf/internal/order +google.golang.org/protobuf/internal/pragma +google.golang.org/protobuf/internal/set +google.golang.org/protobuf/internal/strs +google.golang.org/protobuf/internal/version +google.golang.org/protobuf/proto +google.golang.org/protobuf/reflect/protodesc +google.golang.org/protobuf/reflect/protoreflect +google.golang.org/protobuf/reflect/protoregistry +google.golang.org/protobuf/runtime/protoiface +google.golang.org/protobuf/runtime/protoimpl +google.golang.org/protobuf/types/descriptorpb +google.golang.org/protobuf/types/known/anypb +google.golang.org/protobuf/types/known/durationpb +google.golang.org/protobuf/types/known/timestamppb # gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce ## explicit gopkg.in/mgo.v2/bson gopkg.in/mgo.v2/internal/json -# gotest.tools/v3 v3.0.2 +# gotest.tools/v3 v3.0.3 ## explicit; go 1.11 gotest.tools/v3/assert gotest.tools/v3/assert/cmp gotest.tools/v3/fs gotest.tools/v3/icmd +gotest.tools/v3/internal/assert gotest.tools/v3/internal/cleanup gotest.tools/v3/internal/difflib gotest.tools/v3/internal/format gotest.tools/v3/internal/source -gotest.tools/v3/x/subtest From fc2fdc8e2aa70667b7049ee908b03f578d4ebe92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Wed, 26 Jul 2023 12:57:40 +0300 Subject: [PATCH 08/50] fix: conflicts --- e2e/select_test.go | 52 - go.mod | 34 +- go.sum | 238 +- vendor/github.com/Azure/go-ansiterm/LICENSE | 21 - vendor/github.com/Azure/go-ansiterm/README.md | 12 - .../github.com/Azure/go-ansiterm/constants.go | 188 - .../github.com/Azure/go-ansiterm/context.go | 7 - .../Azure/go-ansiterm/csi_entry_state.go | 49 - .../Azure/go-ansiterm/csi_param_state.go | 38 - .../go-ansiterm/escape_intermediate_state.go | 36 - .../Azure/go-ansiterm/escape_state.go | 47 - .../Azure/go-ansiterm/event_handler.go | 90 - .../Azure/go-ansiterm/ground_state.go | 24 - .../Azure/go-ansiterm/osc_string_state.go | 31 - vendor/github.com/Azure/go-ansiterm/parser.go | 151 - .../go-ansiterm/parser_action_helpers.go | 99 - .../Azure/go-ansiterm/parser_actions.go | 119 - vendor/github.com/Azure/go-ansiterm/states.go | 71 - .../github.com/Azure/go-ansiterm/utilities.go | 21 - .../Azure/go-ansiterm/winterm/ansi.go | 196 - .../Azure/go-ansiterm/winterm/api.go | 327 - .../go-ansiterm/winterm/attr_translation.go | 100 - .../go-ansiterm/winterm/cursor_helpers.go | 101 - .../go-ansiterm/winterm/erase_helpers.go | 84 - .../go-ansiterm/winterm/scroll_helper.go | 118 - .../Azure/go-ansiterm/winterm/utilities.go | 9 - .../go-ansiterm/winterm/win_event_handler.go | 743 - .../github.com/Microsoft/go-winio/.gitignore | 1 - .../github.com/Microsoft/go-winio/CODEOWNERS | 1 - vendor/github.com/Microsoft/go-winio/LICENSE | 22 - .../github.com/Microsoft/go-winio/README.md | 37 - .../github.com/Microsoft/go-winio/backup.go | 280 - vendor/github.com/Microsoft/go-winio/ea.go | 137 - vendor/github.com/Microsoft/go-winio/file.go | 329 - .../github.com/Microsoft/go-winio/fileinfo.go | 73 - .../github.com/Microsoft/go-winio/hvsock.go | 316 - vendor/github.com/Microsoft/go-winio/pipe.go | 517 - .../Microsoft/go-winio/pkg/guid/guid.go | 228 - .../go-winio/pkg/guid/guid_nonwindows.go | 15 - .../go-winio/pkg/guid/guid_windows.go | 10 - .../Microsoft/go-winio/privilege.go | 203 - .../github.com/Microsoft/go-winio/reparse.go | 128 - vendor/github.com/Microsoft/go-winio/sd.go | 98 - .../github.com/Microsoft/go-winio/syscall.go | 3 - .../Microsoft/go-winio/zsyscall_windows.go | 427 - .../github.com/cenkalti/backoff/v4/.gitignore | 25 - vendor/github.com/cenkalti/backoff/v4/LICENSE | 20 - .../github.com/cenkalti/backoff/v4/README.md | 32 - .../github.com/cenkalti/backoff/v4/backoff.go | 66 - .../github.com/cenkalti/backoff/v4/context.go | 62 - .../cenkalti/backoff/v4/exponential.go | 161 - .../github.com/cenkalti/backoff/v4/retry.go | 146 - .../github.com/cenkalti/backoff/v4/ticker.go | 97 - .../github.com/cenkalti/backoff/v4/timer.go | 35 - .../github.com/cenkalti/backoff/v4/tries.go | 38 - .../github.com/containerd/containerd/LICENSE | 191 - .../github.com/containerd/containerd/NOTICE | 16 - .../containerd/containerd/errdefs/errors.go | 92 - .../containerd/containerd/errdefs/grpc.go | 147 - .../containerd/containerd/log/context.go | 69 - .../containerd/pkg/userns/userns_linux.go | 62 - .../pkg/userns/userns_unsupported.go | 26 - .../containerd/platforms/compare.go | 203 - .../containerd/platforms/cpuinfo.go | 131 - .../containerd/platforms/database.go | 116 - .../containerd/platforms/defaults.go | 27 - .../containerd/platforms/defaults_darwin.go | 45 - .../containerd/platforms/defaults_unix.go | 41 - .../containerd/platforms/defaults_windows.go | 95 - .../containerd/platforms/platforms.go | 261 - vendor/github.com/cpuguy83/dockercfg/LICENSE | 21 - .../github.com/cpuguy83/dockercfg/README.md | 8 - vendor/github.com/cpuguy83/dockercfg/auth.go | 184 - .../github.com/cpuguy83/dockercfg/config.go | 65 - vendor/github.com/cpuguy83/dockercfg/load.go | 51 - vendor/github.com/docker/distribution/LICENSE | 202 - .../docker/distribution/digestset/set.go | 247 - .../docker/distribution/reference/helpers.go | 42 - .../distribution/reference/normalize.go | 199 - .../distribution/reference/reference.go | 433 - .../docker/distribution/reference/regexp.go | 143 - vendor/github.com/docker/docker/AUTHORS | 2372 --- vendor/github.com/docker/docker/LICENSE | 191 - vendor/github.com/docker/docker/NOTICE | 19 - vendor/github.com/docker/docker/api/README.md | 42 - vendor/github.com/docker/docker/api/common.go | 11 - .../docker/docker/api/common_unix.go | 7 - .../docker/docker/api/common_windows.go | 8 - .../docker/docker/api/swagger-gen.yaml | 12 - .../github.com/docker/docker/api/swagger.yaml | 12152 ---------------- .../docker/docker/api/types/auth.go | 22 - .../docker/docker/api/types/blkiodev/blkio.go | 23 - .../docker/docker/api/types/client.go | 443 - .../docker/docker/api/types/configs.go | 67 - .../docker/api/types/container/config.go | 96 - .../api/types/container/container_changes.go | 20 - .../api/types/container/container_top.go | 22 - .../api/types/container/container_update.go | 16 - .../api/types/container/create_response.go | 19 - .../docker/api/types/container/deprecated.go | 16 - .../docker/api/types/container/host_config.go | 465 - .../api/types/container/hostconfig_unix.go | 42 - .../api/types/container/hostconfig_windows.go | 40 - .../api/types/container/wait_exit_error.go | 12 - .../api/types/container/wait_response.go | 18 - .../api/types/container/waitcondition.go | 22 - .../docker/docker/api/types/deprecated.go | 14 - .../docker/docker/api/types/error_response.go | 13 - .../docker/api/types/error_response_ext.go | 6 - .../docker/docker/api/types/events/events.go | 47 - .../docker/docker/api/types/filters/parse.go | 323 - .../docker/api/types/graph_driver_data.go | 23 - .../docker/docker/api/types/id_response.go | 13 - .../docker/api/types/image/image_history.go | 36 - .../api/types/image_delete_response_item.go | 15 - .../docker/docker/api/types/image_summary.go | 97 - .../docker/docker/api/types/mount/mount.go | 140 - .../docker/api/types/network/network.go | 126 - .../docker/docker/api/types/plugin.go | 203 - .../docker/docker/api/types/plugin_device.go | 25 - .../docker/docker/api/types/plugin_env.go | 25 - .../docker/api/types/plugin_interface_type.go | 21 - .../docker/docker/api/types/plugin_mount.go | 37 - .../docker/api/types/plugin_responses.go | 71 - .../docker/docker/api/types/port.go | 23 - .../docker/api/types/registry/authenticate.go | 21 - .../docker/api/types/registry/registry.go | 120 - .../api/types/service_update_response.go | 12 - .../docker/docker/api/types/stats.go | 181 - .../docker/api/types/strslice/strslice.go | 30 - .../docker/docker/api/types/swarm/common.go | 48 - .../docker/docker/api/types/swarm/config.go | 40 - .../docker/api/types/swarm/container.go | 80 - .../docker/docker/api/types/swarm/network.go | 121 - .../docker/docker/api/types/swarm/node.go | 139 - .../docker/docker/api/types/swarm/runtime.go | 27 - .../docker/api/types/swarm/runtime/gen.go | 3 - .../api/types/swarm/runtime/plugin.pb.go | 754 - .../api/types/swarm/runtime/plugin.proto | 21 - .../docker/docker/api/types/swarm/secret.go | 36 - .../docker/docker/api/types/swarm/service.go | 202 - .../docker/docker/api/types/swarm/swarm.go | 237 - .../docker/docker/api/types/swarm/task.go | 225 - .../docker/docker/api/types/time/timestamp.go | 131 - .../docker/docker/api/types/types.go | 809 - .../docker/api/types/versions/README.md | 14 - .../docker/api/types/versions/compare.go | 65 - .../docker/api/types/volume/cluster_volume.go | 420 - .../docker/api/types/volume/create_options.go | 29 - .../docker/api/types/volume/deprecated.go | 11 - .../docker/api/types/volume/list_response.go | 18 - .../docker/docker/api/types/volume/options.go | 8 - .../docker/docker/api/types/volume/volume.go | 75 - .../docker/api/types/volume/volume_update.go | 7 - .../github.com/docker/docker/client/README.md | 35 - .../docker/docker/client/build_cancel.go | 16 - .../docker/docker/client/build_prune.go | 45 - .../docker/docker/client/checkpoint_create.go | 14 - .../docker/docker/client/checkpoint_delete.go | 20 - .../docker/docker/client/checkpoint_list.go | 28 - .../github.com/docker/docker/client/client.go | 321 - .../docker/docker/client/client_deprecated.go | 27 - .../docker/docker/client/client_unix.go | 11 - .../docker/docker/client/client_windows.go | 8 - .../docker/docker/client/config_create.go | 25 - .../docker/docker/client/config_inspect.go | 36 - .../docker/docker/client/config_list.go | 38 - .../docker/docker/client/config_remove.go | 13 - .../docker/docker/client/config_update.go | 20 - .../docker/docker/client/container_attach.go | 59 - .../docker/docker/client/container_commit.go | 55 - .../docker/docker/client/container_copy.go | 93 - .../docker/docker/client/container_create.go | 83 - .../docker/docker/client/container_diff.go | 23 - .../docker/docker/client/container_exec.go | 66 - .../docker/docker/client/container_export.go | 19 - .../docker/docker/client/container_inspect.go | 53 - .../docker/docker/client/container_kill.go | 18 - .../docker/docker/client/container_list.go | 57 - .../docker/docker/client/container_logs.go | 80 - .../docker/docker/client/container_pause.go | 10 - .../docker/docker/client/container_prune.go | 36 - .../docker/docker/client/container_remove.go | 27 - .../docker/docker/client/container_rename.go | 15 - .../docker/docker/client/container_resize.go | 29 - .../docker/docker/client/container_restart.go | 26 - .../docker/docker/client/container_start.go | 23 - .../docker/docker/client/container_stats.go | 42 - .../docker/docker/client/container_stop.go | 30 - .../docker/docker/client/container_top.go | 28 - .../docker/docker/client/container_unpause.go | 10 - .../docker/docker/client/container_update.go | 21 - .../docker/docker/client/container_wait.go | 104 - .../docker/docker/client/disk_usage.go | 33 - .../docker/client/distribution_inspect.go | 38 - .../docker/docker/client/envvars.go | 90 - .../github.com/docker/docker/client/errors.go | 93 - .../github.com/docker/docker/client/events.go | 101 - .../github.com/docker/docker/client/hijack.go | 153 - .../docker/docker/client/image_build.go | 146 - .../docker/docker/client/image_create.go | 37 - .../docker/docker/client/image_history.go | 22 - .../docker/docker/client/image_import.go | 40 - .../docker/docker/client/image_inspect.go | 32 - .../docker/docker/client/image_list.go | 49 - .../docker/docker/client/image_load.go | 29 - .../docker/docker/client/image_prune.go | 36 - .../docker/docker/client/image_pull.go | 64 - .../docker/docker/client/image_push.go | 54 - .../docker/docker/client/image_remove.go | 31 - .../docker/docker/client/image_save.go | 21 - .../docker/docker/client/image_search.go | 53 - .../docker/docker/client/image_tag.go | 37 - .../github.com/docker/docker/client/info.go | 26 - .../docker/docker/client/interface.go | 201 - .../docker/client/interface_experimental.go | 18 - .../docker/docker/client/interface_stable.go | 10 - .../github.com/docker/docker/client/login.go | 25 - .../docker/docker/client/network_connect.go | 19 - .../docker/docker/client/network_create.go | 25 - .../docker/client/network_disconnect.go | 15 - .../docker/docker/client/network_inspect.go | 49 - .../docker/docker/client/network_list.go | 32 - .../docker/docker/client/network_prune.go | 36 - .../docker/docker/client/network_remove.go | 10 - .../docker/docker/client/node_inspect.go | 32 - .../docker/docker/client/node_list.go | 36 - .../docker/docker/client/node_remove.go | 20 - .../docker/docker/client/node_update.go | 17 - .../docker/docker/client/options.go | 210 - .../github.com/docker/docker/client/ping.go | 75 - .../docker/docker/client/plugin_create.go | 23 - .../docker/docker/client/plugin_disable.go | 19 - .../docker/docker/client/plugin_enable.go | 19 - .../docker/docker/client/plugin_inspect.go | 31 - .../docker/docker/client/plugin_install.go | 113 - .../docker/docker/client/plugin_list.go | 33 - .../docker/docker/client/plugin_push.go | 16 - .../docker/docker/client/plugin_remove.go | 20 - .../docker/docker/client/plugin_set.go | 12 - .../docker/docker/client/plugin_upgrade.go | 39 - .../docker/docker/client/request.go | 277 - .../docker/docker/client/secret_create.go | 25 - .../docker/docker/client/secret_inspect.go | 36 - .../docker/docker/client/secret_list.go | 38 - .../docker/docker/client/secret_remove.go | 13 - .../docker/docker/client/secret_update.go | 20 - .../docker/docker/client/service_create.go | 178 - .../docker/docker/client/service_inspect.go | 37 - .../docker/docker/client/service_list.go | 39 - .../docker/docker/client/service_logs.go | 52 - .../docker/docker/client/service_remove.go | 10 - .../docker/docker/client/service_update.go | 74 - .../docker/client/swarm_get_unlock_key.go | 21 - .../docker/docker/client/swarm_init.go | 21 - .../docker/docker/client/swarm_inspect.go | 21 - .../docker/docker/client/swarm_join.go | 14 - .../docker/docker/client/swarm_leave.go | 17 - .../docker/docker/client/swarm_unlock.go | 14 - .../docker/docker/client/swarm_update.go | 21 - .../docker/docker/client/task_inspect.go | 32 - .../docker/docker/client/task_list.go | 35 - .../docker/docker/client/task_logs.go | 51 - .../docker/docker/client/transport.go | 17 - .../github.com/docker/docker/client/utils.go | 34 - .../docker/docker/client/version.go | 21 - .../docker/docker/client/volume_create.go | 20 - .../docker/docker/client/volume_inspect.go | 38 - .../docker/docker/client/volume_list.go | 33 - .../docker/docker/client/volume_prune.go | 36 - .../docker/docker/client/volume_remove.go | 21 - .../docker/docker/client/volume_update.go | 24 - .../github.com/docker/docker/errdefs/defs.go | 69 - .../github.com/docker/docker/errdefs/doc.go | 8 - .../docker/docker/errdefs/helpers.go | 279 - .../docker/docker/errdefs/http_helpers.go | 46 - vendor/github.com/docker/docker/errdefs/is.go | 107 - .../docker/docker/pkg/archive/README.md | 1 - .../docker/docker/pkg/archive/archive.go | 1474 -- .../docker/pkg/archive/archive_linux.go | 100 - .../docker/pkg/archive/archive_other.go | 8 - .../docker/docker/pkg/archive/archive_unix.go | 125 - .../docker/pkg/archive/archive_windows.go | 67 - .../docker/docker/pkg/archive/changes.go | 442 - .../docker/pkg/archive/changes_linux.go | 286 - .../docker/pkg/archive/changes_other.go | 98 - .../docker/docker/pkg/archive/changes_unix.go | 44 - .../docker/pkg/archive/changes_windows.go | 34 - .../docker/docker/pkg/archive/copy.go | 485 - .../docker/docker/pkg/archive/copy_unix.go | 12 - .../docker/docker/pkg/archive/copy_windows.go | 9 - .../docker/docker/pkg/archive/diff.go | 244 - .../docker/docker/pkg/archive/diff_unix.go | 22 - .../docker/docker/pkg/archive/diff_windows.go | 6 - .../docker/docker/pkg/archive/time_linux.go | 16 - .../docker/pkg/archive/time_unsupported.go | 17 - .../docker/docker/pkg/archive/whiteouts.go | 23 - .../docker/docker/pkg/archive/wrap.go | 59 - .../docker/docker/pkg/idtools/idtools.go | 243 - .../docker/docker/pkg/idtools/idtools_unix.go | 325 - .../docker/pkg/idtools/idtools_windows.go | 34 - .../docker/pkg/idtools/usergroupadd_linux.go | 163 - .../pkg/idtools/usergroupadd_unsupported.go | 13 - .../docker/docker/pkg/idtools/utils_unix.go | 32 - .../docker/docker/pkg/ioutils/buffer.go | 51 - .../docker/docker/pkg/ioutils/bytespipe.go | 187 - .../docker/docker/pkg/ioutils/fswriters.go | 161 - .../docker/docker/pkg/ioutils/readers.go | 151 - .../docker/docker/pkg/ioutils/temp_unix.go | 11 - .../docker/docker/pkg/ioutils/temp_windows.go | 16 - .../docker/docker/pkg/ioutils/writeflusher.go | 92 - .../docker/docker/pkg/ioutils/writers.go | 66 - .../docker/pkg/jsonmessage/jsonmessage.go | 283 - .../docker/docker/pkg/longpath/longpath.go | 26 - .../docker/docker/pkg/pools/pools.go | 137 - .../docker/docker/pkg/stdcopy/stdcopy.go | 190 - .../docker/docker/pkg/system/args_windows.go | 16 - .../docker/docker/pkg/system/chtimes.go | 31 - .../docker/pkg/system/chtimes_nowindows.go | 15 - .../docker/pkg/system/chtimes_windows.go | 26 - .../docker/docker/pkg/system/errors.go | 13 - .../docker/docker/pkg/system/filesys.go | 19 - .../docker/pkg/system/filesys_deprecated.go | 35 - .../docker/docker/pkg/system/filesys_unix.go | 17 - .../docker/pkg/system/filesys_windows.go | 118 - .../docker/docker/pkg/system/image_os.go | 10 - .../docker/docker/pkg/system/init.go | 22 - .../docker/docker/pkg/system/init_windows.go | 18 - .../docker/docker/pkg/system/lstat_unix.go | 21 - .../docker/docker/pkg/system/lstat_windows.go | 14 - .../docker/docker/pkg/system/meminfo.go | 17 - .../docker/docker/pkg/system/meminfo_linux.go | 69 - .../docker/pkg/system/meminfo_unsupported.go | 9 - .../docker/pkg/system/meminfo_windows.go | 45 - .../docker/docker/pkg/system/mknod.go | 17 - .../docker/docker/pkg/system/mknod_freebsd.go | 14 - .../docker/docker/pkg/system/mknod_unix.go | 14 - .../docker/docker/pkg/system/mknod_windows.go | 11 - .../docker/docker/pkg/system/path.go | 41 - .../docker/docker/pkg/system/path_unix.go | 17 - .../docker/docker/pkg/system/path_windows.go | 48 - .../docker/docker/pkg/system/process_unix.go | 46 - .../docker/pkg/system/process_windows.go | 18 - .../docker/docker/pkg/system/stat_bsd.go | 16 - .../docker/docker/pkg/system/stat_darwin.go | 13 - .../docker/docker/pkg/system/stat_linux.go | 20 - .../docker/docker/pkg/system/stat_openbsd.go | 13 - .../docker/docker/pkg/system/stat_solaris.go | 13 - .../docker/docker/pkg/system/stat_unix.go | 67 - .../docker/docker/pkg/system/stat_windows.go | 49 - .../docker/docker/pkg/system/utimes_unix.go | 25 - .../docker/pkg/system/utimes_unsupported.go | 11 - .../docker/docker/pkg/system/xattrs_linux.go | 37 - .../docker/pkg/system/xattrs_unsupported.go | 14 - .../github.com/docker/go-connections/LICENSE | 191 - .../docker/go-connections/nat/nat.go | 242 - .../docker/go-connections/nat/parse.go | 57 - .../docker/go-connections/nat/sort.go | 96 - .../docker/go-connections/sockets/README.md | 0 .../go-connections/sockets/inmem_socket.go | 81 - .../docker/go-connections/sockets/proxy.go | 51 - .../docker/go-connections/sockets/sockets.go | 38 - .../go-connections/sockets/sockets_unix.go | 35 - .../go-connections/sockets/sockets_windows.go | 27 - .../go-connections/sockets/tcp_socket.go | 22 - .../go-connections/sockets/unix_socket.go | 32 - .../go-connections/tlsconfig/certpool_go17.go | 18 - .../tlsconfig/certpool_other.go | 13 - .../docker/go-connections/tlsconfig/config.go | 254 - .../tlsconfig/config_client_ciphers.go | 17 - .../tlsconfig/config_legacy_client_ciphers.go | 15 - .../docker/go-units/CONTRIBUTING.md | 67 - vendor/github.com/docker/go-units/LICENSE | 191 - vendor/github.com/docker/go-units/MAINTAINERS | 46 - vendor/github.com/docker/go-units/README.md | 16 - vendor/github.com/docker/go-units/circle.yml | 11 - vendor/github.com/docker/go-units/duration.go | 35 - vendor/github.com/docker/go-units/size.go | 154 - vendor/github.com/docker/go-units/ulimit.go | 123 - vendor/github.com/gogo/protobuf/AUTHORS | 15 - vendor/github.com/gogo/protobuf/CONTRIBUTORS | 23 - vendor/github.com/gogo/protobuf/LICENSE | 35 - .../github.com/gogo/protobuf/proto/Makefile | 43 - .../github.com/gogo/protobuf/proto/clone.go | 258 - .../gogo/protobuf/proto/custom_gogo.go | 39 - .../github.com/gogo/protobuf/proto/decode.go | 427 - .../gogo/protobuf/proto/deprecated.go | 63 - .../github.com/gogo/protobuf/proto/discard.go | 350 - .../gogo/protobuf/proto/duration.go | 100 - .../gogo/protobuf/proto/duration_gogo.go | 49 - .../github.com/gogo/protobuf/proto/encode.go | 205 - .../gogo/protobuf/proto/encode_gogo.go | 33 - .../github.com/gogo/protobuf/proto/equal.go | 300 - .../gogo/protobuf/proto/extensions.go | 605 - .../gogo/protobuf/proto/extensions_gogo.go | 389 - vendor/github.com/gogo/protobuf/proto/lib.go | 973 -- .../gogo/protobuf/proto/lib_gogo.go | 50 - .../gogo/protobuf/proto/message_set.go | 181 - .../gogo/protobuf/proto/pointer_reflect.go | 357 - .../protobuf/proto/pointer_reflect_gogo.go | 59 - .../gogo/protobuf/proto/pointer_unsafe.go | 308 - .../protobuf/proto/pointer_unsafe_gogo.go | 56 - .../gogo/protobuf/proto/properties.go | 610 - .../gogo/protobuf/proto/properties_gogo.go | 36 - .../gogo/protobuf/proto/skip_gogo.go | 119 - .../gogo/protobuf/proto/table_marshal.go | 3009 ---- .../gogo/protobuf/proto/table_marshal_gogo.go | 388 - .../gogo/protobuf/proto/table_merge.go | 676 - .../gogo/protobuf/proto/table_unmarshal.go | 2249 --- .../protobuf/proto/table_unmarshal_gogo.go | 385 - vendor/github.com/gogo/protobuf/proto/text.go | 930 -- .../gogo/protobuf/proto/text_gogo.go | 57 - .../gogo/protobuf/proto/text_parser.go | 1018 -- .../gogo/protobuf/proto/timestamp.go | 113 - .../gogo/protobuf/proto/timestamp_gogo.go | 49 - .../gogo/protobuf/proto/wrappers.go | 1888 --- .../gogo/protobuf/proto/wrappers_gogo.go | 113 - vendor/github.com/golang/protobuf/AUTHORS | 3 - .../github.com/golang/protobuf/CONTRIBUTORS | 3 - vendor/github.com/golang/protobuf/LICENSE | 28 - .../golang/protobuf/proto/buffer.go | 324 - .../golang/protobuf/proto/defaults.go | 63 - .../golang/protobuf/proto/deprecated.go | 113 - .../golang/protobuf/proto/discard.go | 58 - .../golang/protobuf/proto/extensions.go | 356 - .../golang/protobuf/proto/properties.go | 306 - .../github.com/golang/protobuf/proto/proto.go | 167 - .../golang/protobuf/proto/registry.go | 317 - .../golang/protobuf/proto/text_decode.go | 801 - .../golang/protobuf/proto/text_encode.go | 560 - .../github.com/golang/protobuf/proto/wire.go | 78 - .../golang/protobuf/proto/wrappers.go | 34 - .../github.com/golang/protobuf/ptypes/any.go | 179 - .../golang/protobuf/ptypes/any/any.pb.go | 62 - .../github.com/golang/protobuf/ptypes/doc.go | 10 - .../golang/protobuf/ptypes/duration.go | 76 - .../protobuf/ptypes/duration/duration.pb.go | 63 - .../golang/protobuf/ptypes/timestamp.go | 112 - .../protobuf/ptypes/timestamp/timestamp.pb.go | 64 - vendor/github.com/google/uuid/.travis.yml | 9 - vendor/github.com/google/uuid/CONTRIBUTING.md | 10 - vendor/github.com/google/uuid/CONTRIBUTORS | 9 - vendor/github.com/google/uuid/LICENSE | 27 - vendor/github.com/google/uuid/README.md | 19 - vendor/github.com/google/uuid/dce.go | 80 - vendor/github.com/google/uuid/doc.go | 12 - vendor/github.com/google/uuid/hash.go | 53 - vendor/github.com/google/uuid/marshal.go | 38 - vendor/github.com/google/uuid/node.go | 90 - vendor/github.com/google/uuid/node_js.go | 12 - vendor/github.com/google/uuid/node_net.go | 33 - vendor/github.com/google/uuid/null.go | 118 - vendor/github.com/google/uuid/sql.go | 59 - vendor/github.com/google/uuid/time.go | 123 - vendor/github.com/google/uuid/util.go | 43 - vendor/github.com/google/uuid/uuid.go | 294 - vendor/github.com/google/uuid/version1.go | 44 - vendor/github.com/google/uuid/version4.go | 76 - .../github.com/imdario/mergo/.deepsource.toml | 12 - vendor/github.com/imdario/mergo/.gitignore | 33 - vendor/github.com/imdario/mergo/.travis.yml | 12 - .../imdario/mergo/CODE_OF_CONDUCT.md | 46 - .../github.com/imdario/mergo/CONTRIBUTING.md | 112 - vendor/github.com/imdario/mergo/LICENSE | 28 - vendor/github.com/imdario/mergo/README.md | 236 - vendor/github.com/imdario/mergo/SECURITY.md | 14 - vendor/github.com/imdario/mergo/doc.go | 143 - vendor/github.com/imdario/mergo/map.go | 178 - vendor/github.com/imdario/mergo/merge.go | 409 - vendor/github.com/imdario/mergo/mergo.go | 81 - vendor/github.com/klauspost/compress/LICENSE | 28 - .../klauspost/compress/fse/README.md | 79 - .../klauspost/compress/fse/bitreader.go | 122 - .../klauspost/compress/fse/bitwriter.go | 168 - .../klauspost/compress/fse/bytereader.go | 47 - .../klauspost/compress/fse/compress.go | 684 - .../klauspost/compress/fse/decompress.go | 374 - .../github.com/klauspost/compress/fse/fse.go | 144 - .../klauspost/compress/huff0/.gitignore | 1 - .../klauspost/compress/huff0/README.md | 89 - .../klauspost/compress/huff0/bitreader.go | 329 - .../klauspost/compress/huff0/bitwriter.go | 210 - .../klauspost/compress/huff0/bytereader.go | 54 - .../klauspost/compress/huff0/compress.go | 657 - .../klauspost/compress/huff0/decompress.go | 1164 -- .../klauspost/compress/huff0/huff0.go | 273 - .../klauspost/compress/snappy/.gitignore | 16 - .../klauspost/compress/snappy/AUTHORS | 15 - .../klauspost/compress/snappy/CONTRIBUTORS | 37 - .../klauspost/compress/snappy/LICENSE | 27 - .../klauspost/compress/snappy/README | 107 - .../klauspost/compress/snappy/decode.go | 237 - .../klauspost/compress/snappy/decode_amd64.go | 14 - .../klauspost/compress/snappy/decode_amd64.s | 482 - .../klauspost/compress/snappy/decode_other.go | 115 - .../klauspost/compress/snappy/encode.go | 285 - .../klauspost/compress/snappy/encode_amd64.go | 29 - .../klauspost/compress/snappy/encode_amd64.s | 730 - .../klauspost/compress/snappy/encode_other.go | 238 - .../klauspost/compress/snappy/runbench.cmd | 2 - .../klauspost/compress/snappy/snappy.go | 98 - .../klauspost/compress/zstd/README.md | 417 - .../klauspost/compress/zstd/bitreader.go | 136 - .../klauspost/compress/zstd/bitwriter.go | 169 - .../klauspost/compress/zstd/blockdec.go | 739 - .../klauspost/compress/zstd/blockenc.go | 871 -- .../compress/zstd/blocktype_string.go | 85 - .../klauspost/compress/zstd/bytebuf.go | 127 - .../klauspost/compress/zstd/bytereader.go | 88 - .../klauspost/compress/zstd/decodeheader.go | 202 - .../klauspost/compress/zstd/decoder.go | 561 - .../compress/zstd/decoder_options.go | 84 - .../klauspost/compress/zstd/dict.go | 122 - .../klauspost/compress/zstd/enc_base.go | 177 - .../klauspost/compress/zstd/enc_best.go | 487 - .../klauspost/compress/zstd/enc_better.go | 1170 -- .../klauspost/compress/zstd/enc_dfast.go | 1121 -- .../klauspost/compress/zstd/enc_fast.go | 1018 -- .../klauspost/compress/zstd/encoder.go | 576 - .../compress/zstd/encoder_options.go | 312 - .../klauspost/compress/zstd/framedec.go | 494 - .../klauspost/compress/zstd/frameenc.go | 137 - .../klauspost/compress/zstd/fse_decoder.go | 385 - .../klauspost/compress/zstd/fse_encoder.go | 726 - .../klauspost/compress/zstd/fse_predefined.go | 158 - .../klauspost/compress/zstd/hash.go | 77 - .../klauspost/compress/zstd/history.go | 89 - .../compress/zstd/internal/xxhash/LICENSE.txt | 22 - .../compress/zstd/internal/xxhash/README.md | 58 - .../compress/zstd/internal/xxhash/xxhash.go | 238 - .../zstd/internal/xxhash/xxhash_amd64.go | 13 - .../zstd/internal/xxhash/xxhash_amd64.s | 215 - .../zstd/internal/xxhash/xxhash_other.go | 76 - .../zstd/internal/xxhash/xxhash_safe.go | 11 - .../klauspost/compress/zstd/seqdec.go | 492 - .../klauspost/compress/zstd/seqenc.go | 115 - .../klauspost/compress/zstd/snappy.go | 436 - .../klauspost/compress/zstd/zstd.go | 156 - .../magiconair/properties/.gitignore | 6 - .../magiconair/properties/CHANGELOG.md | 205 - .../magiconair/properties/LICENSE.md | 24 - .../magiconair/properties/README.md | 128 - .../magiconair/properties/decode.go | 289 - .../github.com/magiconair/properties/doc.go | 155 - .../magiconair/properties/integrate.go | 35 - .../github.com/magiconair/properties/lex.go | 395 - .../github.com/magiconair/properties/load.go | 293 - .../magiconair/properties/parser.go | 86 - .../magiconair/properties/properties.go | 848 -- .../magiconair/properties/rangecheck.go | 31 - vendor/github.com/moby/patternmatcher/LICENSE | 191 - vendor/github.com/moby/patternmatcher/NOTICE | 16 - .../moby/patternmatcher/patternmatcher.go | 474 - vendor/github.com/moby/sys/sequential/LICENSE | 202 - vendor/github.com/moby/sys/sequential/doc.go | 15 - .../moby/sys/sequential/sequential_unix.go | 45 - .../moby/sys/sequential/sequential_windows.go | 161 - vendor/github.com/moby/term/.gitignore | 8 - vendor/github.com/moby/term/LICENSE | 191 - vendor/github.com/moby/term/README.md | 36 - vendor/github.com/moby/term/ascii.go | 66 - vendor/github.com/moby/term/doc.go | 3 - vendor/github.com/moby/term/proxy.go | 88 - vendor/github.com/moby/term/term.go | 85 - vendor/github.com/moby/term/term_unix.go | 98 - vendor/github.com/moby/term/term_windows.go | 176 - vendor/github.com/moby/term/termios_bsd.go | 13 - vendor/github.com/moby/term/termios_nonbsd.go | 13 - vendor/github.com/moby/term/termios_unix.go | 35 - .../github.com/moby/term/termios_windows.go | 37 - .../moby/term/windows/ansi_reader.go | 252 - .../moby/term/windows/ansi_writer.go | 57 - .../github.com/moby/term/windows/console.go | 43 - vendor/github.com/moby/term/windows/doc.go | 5 - vendor/github.com/morikuni/aec/LICENSE | 21 - vendor/github.com/morikuni/aec/README.md | 178 - vendor/github.com/morikuni/aec/aec.go | 137 - vendor/github.com/morikuni/aec/ansi.go | 59 - vendor/github.com/morikuni/aec/builder.go | 388 - vendor/github.com/morikuni/aec/sample.gif | Bin 12548 -> 0 bytes vendor/github.com/morikuni/aec/sgr.go | 202 - .../opencontainers/go-digest/.mailmap | 4 - .../opencontainers/go-digest/.pullapprove.yml | 28 - .../opencontainers/go-digest/.travis.yml | 5 - .../opencontainers/go-digest/CONTRIBUTING.md | 72 - .../opencontainers/go-digest/LICENSE | 192 - .../opencontainers/go-digest/LICENSE.docs | 425 - .../opencontainers/go-digest/MAINTAINERS | 5 - .../opencontainers/go-digest/README.md | 96 - .../opencontainers/go-digest/algorithm.go | 193 - .../opencontainers/go-digest/digest.go | 157 - .../opencontainers/go-digest/digester.go | 40 - .../opencontainers/go-digest/doc.go | 62 - .../opencontainers/go-digest/verifiers.go | 46 - .../opencontainers/image-spec/LICENSE | 191 - .../image-spec/specs-go/v1/annotations.go | 71 - .../image-spec/specs-go/v1/artifact.go | 34 - .../image-spec/specs-go/v1/config.go | 114 - .../image-spec/specs-go/v1/descriptor.go | 72 - .../image-spec/specs-go/v1/index.go | 32 - .../image-spec/specs-go/v1/layout.go | 28 - .../image-spec/specs-go/v1/manifest.go | 38 - .../image-spec/specs-go/v1/mediatype.go | 60 - .../image-spec/specs-go/version.go | 32 - .../image-spec/specs-go/versioned.go | 23 - vendor/github.com/opencontainers/runc/LICENSE | 191 - vendor/github.com/opencontainers/runc/NOTICE | 17 - .../runc/libcontainer/user/lookup_unix.go | 157 - .../runc/libcontainer/user/user.go | 605 - .../runc/libcontainer/user/user_fuzzer.go | 43 - vendor/github.com/sirupsen/logrus/.gitignore | 4 - .../github.com/sirupsen/logrus/.golangci.yml | 40 - vendor/github.com/sirupsen/logrus/.travis.yml | 15 - .../github.com/sirupsen/logrus/CHANGELOG.md | 259 - vendor/github.com/sirupsen/logrus/LICENSE | 21 - vendor/github.com/sirupsen/logrus/README.md | 513 - vendor/github.com/sirupsen/logrus/alt_exit.go | 76 - .../github.com/sirupsen/logrus/appveyor.yml | 14 - .../github.com/sirupsen/logrus/buffer_pool.go | 43 - vendor/github.com/sirupsen/logrus/doc.go | 26 - vendor/github.com/sirupsen/logrus/entry.go | 442 - vendor/github.com/sirupsen/logrus/exported.go | 270 - .../github.com/sirupsen/logrus/formatter.go | 78 - vendor/github.com/sirupsen/logrus/hooks.go | 34 - .../sirupsen/logrus/json_formatter.go | 128 - vendor/github.com/sirupsen/logrus/logger.go | 417 - vendor/github.com/sirupsen/logrus/logrus.go | 186 - .../logrus/terminal_check_appengine.go | 11 - .../sirupsen/logrus/terminal_check_bsd.go | 13 - .../sirupsen/logrus/terminal_check_js.go | 7 - .../logrus/terminal_check_no_terminal.go | 11 - .../logrus/terminal_check_notappengine.go | 17 - .../sirupsen/logrus/terminal_check_solaris.go | 11 - .../sirupsen/logrus/terminal_check_unix.go | 13 - .../sirupsen/logrus/terminal_check_windows.go | 27 - .../sirupsen/logrus/text_formatter.go | 339 - vendor/github.com/sirupsen/logrus/writer.go | 70 - .../testcontainers-go/.gitignore | 13 - .../testcontainers-go/.golangci.yml | 16 - .../testcontainers-go/CONTRIBUTING.md | 13 - .../testcontainers/testcontainers-go/LICENSE | 21 - .../testcontainers/testcontainers-go/Makefile | 9 - .../testcontainers/testcontainers-go/Pipfile | 15 - .../testcontainers-go/Pipfile.lock | 298 - .../testcontainers-go/README.md | 17 - .../testcontainers-go/RELEASING.md | 203 - .../testcontainers-go/commons-test.mk | 21 - .../testcontainers-go/config.go | 29 - .../testcontainers-go/container.go | 263 - .../testcontainers-go/docker.go | 1404 -- .../testcontainers-go/docker_auth.go | 108 - .../testcontainers-go/docker_mounts.go | 116 - .../testcontainers-go/exec/processor.go | 44 - .../testcontainers/testcontainers-go/file.go | 141 - .../testcontainers-go/generic.go | 157 - .../internal/config/config.go | 100 - .../internal/testcontainersdocker/client.go | 65 - .../testcontainersdocker/docker_host.go | 264 - .../testcontainersdocker/docker_rootless.go | 141 - .../testcontainersdocker/docker_socket.go | 46 - .../internal/testcontainersdocker/images.go | 126 - .../internal/testcontainersdocker/labels.go | 9 - .../internal/testcontainerssession/session.go | 22 - .../testcontainers-go/internal/version.go | 4 - .../testcontainers-go/lifecycle.go | 379 - .../testcontainers-go/logconsumer.go | 22 - .../testcontainers-go/logger.go | 75 - .../testcontainers-go/mkdocs.yml | 106 - .../testcontainers-go/mounts.go | 116 - .../testcontainers-go/network.go | 45 - .../testcontainers-go/parallel.go | 122 - .../testcontainers-go/provider.go | 163 - .../testcontainers-go/reaper.go | 212 - .../testcontainers-go/requirements.txt | 4 - .../testcontainers-go/runtime.txt | 1 - .../testcontainers-go/testcontainer.go | 1 - .../testcontainers-go/testing.go | 42 - .../testcontainers-go/wait/all.go | 80 - .../testcontainers-go/wait/errors.go | 10 - .../testcontainers-go/wait/errors_windows.go | 9 - .../testcontainers-go/wait/exec.go | 99 - .../testcontainers-go/wait/exit.go | 89 - .../testcontainers-go/wait/health.go | 91 - .../testcontainers-go/wait/host_port.go | 204 - .../testcontainers-go/wait/http.go | 285 - .../testcontainers-go/wait/log.go | 117 - .../testcontainers-go/wait/nop.go | 69 - .../testcontainers-go/wait/sql.go | 116 - .../testcontainers-go/wait/wait.go | 62 - vendor/golang.org/x/exp/LICENSE | 27 - vendor/golang.org/x/exp/PATENTS | 22 - .../x/exp/constraints/constraints.go | 50 - vendor/golang.org/x/exp/slices/slices.go | 258 - vendor/golang.org/x/exp/slices/sort.go | 128 - vendor/golang.org/x/exp/slices/zsortfunc.go | 479 - .../golang.org/x/exp/slices/zsortordered.go | 481 - vendor/golang.org/x/net/LICENSE | 27 - vendor/golang.org/x/net/PATENTS | 22 - .../golang.org/x/net/internal/socks/client.go | 168 - .../golang.org/x/net/internal/socks/socks.go | 317 - vendor/golang.org/x/net/proxy/dial.go | 54 - vendor/golang.org/x/net/proxy/direct.go | 31 - vendor/golang.org/x/net/proxy/per_host.go | 155 - vendor/golang.org/x/net/proxy/proxy.go | 149 - vendor/golang.org/x/net/proxy/socks5.go | 42 - vendor/golang.org/x/sys/execabs/execabs.go | 102 - .../golang.org/x/sys/execabs/execabs_go118.go | 18 - .../golang.org/x/sys/execabs/execabs_go119.go | 21 - .../sys/internal/unsafeheader/unsafeheader.go | 30 - vendor/golang.org/x/sys/windows/aliases.go | 13 - .../golang.org/x/sys/windows/dll_windows.go | 416 - vendor/golang.org/x/sys/windows/empty.s | 9 - .../golang.org/x/sys/windows/env_windows.go | 54 - vendor/golang.org/x/sys/windows/eventlog.go | 21 - .../golang.org/x/sys/windows/exec_windows.go | 183 - .../x/sys/windows/memory_windows.go | 48 - vendor/golang.org/x/sys/windows/mkerrors.bash | 70 - .../x/sys/windows/mkknownfolderids.bash | 27 - vendor/golang.org/x/sys/windows/mksyscall.go | 10 - vendor/golang.org/x/sys/windows/race.go | 31 - vendor/golang.org/x/sys/windows/race0.go | 26 - .../x/sys/windows/security_windows.go | 1444 -- vendor/golang.org/x/sys/windows/service.go | 254 - .../x/sys/windows/setupapi_windows.go | 1425 -- vendor/golang.org/x/sys/windows/str.go | 23 - vendor/golang.org/x/sys/windows/syscall.go | 105 - .../x/sys/windows/syscall_windows.go | 1808 --- .../golang.org/x/sys/windows/types_windows.go | 3349 ----- .../x/sys/windows/types_windows_386.go | 35 - .../x/sys/windows/types_windows_amd64.go | 34 - .../x/sys/windows/types_windows_arm.go | 35 - .../x/sys/windows/types_windows_arm64.go | 34 - .../x/sys/windows/zerrors_windows.go | 9468 ------------ .../x/sys/windows/zknownfolderids_windows.go | 149 - .../x/sys/windows/zsyscall_windows.go | 4345 ------ vendor/google.golang.org/genproto/LICENSE | 202 - .../googleapis/rpc/status/status.pb.go | 201 - vendor/google.golang.org/grpc/AUTHORS | 1 - vendor/google.golang.org/grpc/LICENSE | 202 - vendor/google.golang.org/grpc/NOTICE.txt | 13 - .../grpc/codes/code_string.go | 62 - vendor/google.golang.org/grpc/codes/codes.go | 244 - .../grpc/internal/status/status.go | 166 - .../google.golang.org/grpc/status/status.go | 135 - vendor/google.golang.org/protobuf/AUTHORS | 3 - .../google.golang.org/protobuf/CONTRIBUTORS | 3 - vendor/google.golang.org/protobuf/LICENSE | 27 - vendor/google.golang.org/protobuf/PATENTS | 22 - .../protobuf/encoding/prototext/decode.go | 770 - .../protobuf/encoding/prototext/doc.go | 7 - .../protobuf/encoding/prototext/encode.go | 371 - .../protobuf/encoding/protowire/wire.go | 547 - .../protobuf/internal/descfmt/stringer.go | 318 - .../protobuf/internal/descopts/options.go | 29 - .../protobuf/internal/detrand/rand.go | 69 - .../internal/encoding/defval/default.go | 213 - .../encoding/messageset/messageset.go | 241 - .../protobuf/internal/encoding/tag/tag.go | 207 - .../protobuf/internal/encoding/text/decode.go | 665 - .../internal/encoding/text/decode_number.go | 190 - .../internal/encoding/text/decode_string.go | 161 - .../internal/encoding/text/decode_token.go | 373 - .../protobuf/internal/encoding/text/doc.go | 29 - .../protobuf/internal/encoding/text/encode.go | 270 - .../protobuf/internal/errors/errors.go | 89 - .../protobuf/internal/errors/is_go112.go | 40 - .../protobuf/internal/errors/is_go113.go | 13 - .../protobuf/internal/filedesc/build.go | 158 - .../protobuf/internal/filedesc/desc.go | 631 - .../protobuf/internal/filedesc/desc_init.go | 471 - .../protobuf/internal/filedesc/desc_lazy.go | 704 - .../protobuf/internal/filedesc/desc_list.go | 450 - .../internal/filedesc/desc_list_gen.go | 356 - .../protobuf/internal/filedesc/placeholder.go | 107 - .../protobuf/internal/filetype/build.go | 297 - .../protobuf/internal/flags/flags.go | 24 - .../internal/flags/proto_legacy_disable.go | 10 - .../internal/flags/proto_legacy_enable.go | 10 - .../protobuf/internal/genid/any_gen.go | 34 - .../protobuf/internal/genid/api_gen.go | 106 - .../protobuf/internal/genid/descriptor_gen.go | 829 -- .../protobuf/internal/genid/doc.go | 11 - .../protobuf/internal/genid/duration_gen.go | 34 - .../protobuf/internal/genid/empty_gen.go | 19 - .../protobuf/internal/genid/field_mask_gen.go | 31 - .../protobuf/internal/genid/goname.go | 25 - .../protobuf/internal/genid/map_entry.go | 16 - .../internal/genid/source_context_gen.go | 31 - .../protobuf/internal/genid/struct_gen.go | 116 - .../protobuf/internal/genid/timestamp_gen.go | 34 - .../protobuf/internal/genid/type_gen.go | 184 - .../protobuf/internal/genid/wrappers.go | 13 - .../protobuf/internal/genid/wrappers_gen.go | 175 - .../protobuf/internal/impl/api_export.go | 177 - .../protobuf/internal/impl/checkinit.go | 141 - .../protobuf/internal/impl/codec_extension.go | 223 - .../protobuf/internal/impl/codec_field.go | 830 -- .../protobuf/internal/impl/codec_gen.go | 5637 ------- .../protobuf/internal/impl/codec_map.go | 388 - .../protobuf/internal/impl/codec_map_go111.go | 38 - .../protobuf/internal/impl/codec_map_go112.go | 12 - .../protobuf/internal/impl/codec_message.go | 217 - .../internal/impl/codec_messageset.go | 123 - .../protobuf/internal/impl/codec_reflect.go | 210 - .../protobuf/internal/impl/codec_tables.go | 557 - .../protobuf/internal/impl/codec_unsafe.go | 18 - .../protobuf/internal/impl/convert.go | 496 - .../protobuf/internal/impl/convert_list.go | 141 - .../protobuf/internal/impl/convert_map.go | 121 - .../protobuf/internal/impl/decode.go | 284 - .../protobuf/internal/impl/encode.go | 201 - .../protobuf/internal/impl/enum.go | 21 - .../protobuf/internal/impl/extension.go | 156 - .../protobuf/internal/impl/legacy_enum.go | 219 - .../protobuf/internal/impl/legacy_export.go | 92 - .../internal/impl/legacy_extension.go | 176 - .../protobuf/internal/impl/legacy_file.go | 81 - .../protobuf/internal/impl/legacy_message.go | 565 - .../protobuf/internal/impl/merge.go | 176 - .../protobuf/internal/impl/merge_gen.go | 209 - .../protobuf/internal/impl/message.go | 276 - .../protobuf/internal/impl/message_reflect.go | 465 - .../internal/impl/message_reflect_field.go | 543 - .../internal/impl/message_reflect_gen.go | 249 - .../protobuf/internal/impl/pointer_reflect.go | 179 - .../protobuf/internal/impl/pointer_unsafe.go | 175 - .../protobuf/internal/impl/validate.go | 576 - .../protobuf/internal/impl/weak.go | 74 - .../protobuf/internal/order/order.go | 89 - .../protobuf/internal/order/range.go | 115 - .../protobuf/internal/pragma/pragma.go | 29 - .../protobuf/internal/set/ints.go | 58 - .../protobuf/internal/strs/strings.go | 196 - .../protobuf/internal/strs/strings_pure.go | 28 - .../protobuf/internal/strs/strings_unsafe.go | 95 - .../protobuf/internal/version/version.go | 79 - .../protobuf/proto/checkinit.go | 71 - .../protobuf/proto/decode.go | 293 - .../protobuf/proto/decode_gen.go | 603 - .../google.golang.org/protobuf/proto/doc.go | 94 - .../protobuf/proto/encode.go | 319 - .../protobuf/proto/encode_gen.go | 97 - .../google.golang.org/protobuf/proto/equal.go | 167 - .../protobuf/proto/extension.go | 92 - .../google.golang.org/protobuf/proto/merge.go | 139 - .../protobuf/proto/messageset.go | 93 - .../google.golang.org/protobuf/proto/proto.go | 43 - .../protobuf/proto/proto_methods.go | 20 - .../protobuf/proto/proto_reflect.go | 20 - .../google.golang.org/protobuf/proto/reset.go | 43 - .../google.golang.org/protobuf/proto/size.go | 97 - .../protobuf/proto/size_gen.go | 55 - .../protobuf/proto/wrappers.go | 29 - .../protobuf/reflect/protodesc/desc.go | 276 - .../protobuf/reflect/protodesc/desc_init.go | 248 - .../reflect/protodesc/desc_resolve.go | 286 - .../reflect/protodesc/desc_validate.go | 374 - .../protobuf/reflect/protodesc/proto.go | 252 - .../protobuf/reflect/protoreflect/methods.go | 78 - .../protobuf/reflect/protoreflect/proto.go | 504 - .../protobuf/reflect/protoreflect/source.go | 128 - .../reflect/protoreflect/source_gen.go | 461 - .../protobuf/reflect/protoreflect/type.go | 665 - .../protobuf/reflect/protoreflect/value.go | 285 - .../reflect/protoreflect/value_pure.go | 60 - .../reflect/protoreflect/value_union.go | 436 - .../reflect/protoreflect/value_unsafe.go | 99 - .../reflect/protoregistry/registry.go | 880 -- .../protobuf/runtime/protoiface/legacy.go | 15 - .../protobuf/runtime/protoiface/methods.go | 168 - .../protobuf/runtime/protoimpl/impl.go | 44 - .../protobuf/runtime/protoimpl/version.go | 56 - .../types/descriptorpb/descriptor.pb.go | 3957 ----- .../protobuf/types/known/anypb/any.pb.go | 498 - .../types/known/durationpb/duration.pb.go | 379 - .../types/known/timestamppb/timestamp.pb.go | 390 - vendor/modules.txt | 178 +- 877 files changed, 44 insertions(+), 169870 deletions(-) delete mode 100644 vendor/github.com/Azure/go-ansiterm/LICENSE delete mode 100644 vendor/github.com/Azure/go-ansiterm/README.md delete mode 100644 vendor/github.com/Azure/go-ansiterm/constants.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/context.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/csi_entry_state.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/csi_param_state.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/escape_state.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/event_handler.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/ground_state.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/osc_string_state.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/parser.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/parser_actions.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/states.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/utilities.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/ansi.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/api.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/utilities.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go delete mode 100644 vendor/github.com/Microsoft/go-winio/.gitignore delete mode 100644 vendor/github.com/Microsoft/go-winio/CODEOWNERS delete mode 100644 vendor/github.com/Microsoft/go-winio/LICENSE delete mode 100644 vendor/github.com/Microsoft/go-winio/README.md delete mode 100644 vendor/github.com/Microsoft/go-winio/backup.go delete mode 100644 vendor/github.com/Microsoft/go-winio/ea.go delete mode 100644 vendor/github.com/Microsoft/go-winio/file.go delete mode 100644 vendor/github.com/Microsoft/go-winio/fileinfo.go delete mode 100644 vendor/github.com/Microsoft/go-winio/hvsock.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pipe.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go delete mode 100644 vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go delete mode 100644 vendor/github.com/Microsoft/go-winio/privilege.go delete mode 100644 vendor/github.com/Microsoft/go-winio/reparse.go delete mode 100644 vendor/github.com/Microsoft/go-winio/sd.go delete mode 100644 vendor/github.com/Microsoft/go-winio/syscall.go delete mode 100644 vendor/github.com/Microsoft/go-winio/zsyscall_windows.go delete mode 100644 vendor/github.com/cenkalti/backoff/v4/.gitignore delete mode 100644 vendor/github.com/cenkalti/backoff/v4/LICENSE delete mode 100644 vendor/github.com/cenkalti/backoff/v4/README.md delete mode 100644 vendor/github.com/cenkalti/backoff/v4/backoff.go delete mode 100644 vendor/github.com/cenkalti/backoff/v4/context.go delete mode 100644 vendor/github.com/cenkalti/backoff/v4/exponential.go delete mode 100644 vendor/github.com/cenkalti/backoff/v4/retry.go delete mode 100644 vendor/github.com/cenkalti/backoff/v4/ticker.go delete mode 100644 vendor/github.com/cenkalti/backoff/v4/timer.go delete mode 100644 vendor/github.com/cenkalti/backoff/v4/tries.go delete mode 100644 vendor/github.com/containerd/containerd/LICENSE delete mode 100644 vendor/github.com/containerd/containerd/NOTICE delete mode 100644 vendor/github.com/containerd/containerd/errdefs/errors.go delete mode 100644 vendor/github.com/containerd/containerd/errdefs/grpc.go delete mode 100644 vendor/github.com/containerd/containerd/log/context.go delete mode 100644 vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go delete mode 100644 vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go delete mode 100644 vendor/github.com/containerd/containerd/platforms/compare.go delete mode 100644 vendor/github.com/containerd/containerd/platforms/cpuinfo.go delete mode 100644 vendor/github.com/containerd/containerd/platforms/database.go delete mode 100644 vendor/github.com/containerd/containerd/platforms/defaults.go delete mode 100644 vendor/github.com/containerd/containerd/platforms/defaults_darwin.go delete mode 100644 vendor/github.com/containerd/containerd/platforms/defaults_unix.go delete mode 100644 vendor/github.com/containerd/containerd/platforms/defaults_windows.go delete mode 100644 vendor/github.com/containerd/containerd/platforms/platforms.go delete mode 100644 vendor/github.com/cpuguy83/dockercfg/LICENSE delete mode 100644 vendor/github.com/cpuguy83/dockercfg/README.md delete mode 100644 vendor/github.com/cpuguy83/dockercfg/auth.go delete mode 100644 vendor/github.com/cpuguy83/dockercfg/config.go delete mode 100644 vendor/github.com/cpuguy83/dockercfg/load.go delete mode 100644 vendor/github.com/docker/distribution/LICENSE delete mode 100644 vendor/github.com/docker/distribution/digestset/set.go delete mode 100644 vendor/github.com/docker/distribution/reference/helpers.go delete mode 100644 vendor/github.com/docker/distribution/reference/normalize.go delete mode 100644 vendor/github.com/docker/distribution/reference/reference.go delete mode 100644 vendor/github.com/docker/distribution/reference/regexp.go delete mode 100644 vendor/github.com/docker/docker/AUTHORS delete mode 100644 vendor/github.com/docker/docker/LICENSE delete mode 100644 vendor/github.com/docker/docker/NOTICE delete mode 100644 vendor/github.com/docker/docker/api/README.md delete mode 100644 vendor/github.com/docker/docker/api/common.go delete mode 100644 vendor/github.com/docker/docker/api/common_unix.go delete mode 100644 vendor/github.com/docker/docker/api/common_windows.go delete mode 100644 vendor/github.com/docker/docker/api/swagger-gen.yaml delete mode 100644 vendor/github.com/docker/docker/api/swagger.yaml delete mode 100644 vendor/github.com/docker/docker/api/types/auth.go delete mode 100644 vendor/github.com/docker/docker/api/types/blkiodev/blkio.go delete mode 100644 vendor/github.com/docker/docker/api/types/client.go delete mode 100644 vendor/github.com/docker/docker/api/types/configs.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/config.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/container_changes.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/container_top.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/container_update.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/create_response.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/deprecated.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/host_config.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/wait_exit_error.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/wait_response.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/waitcondition.go delete mode 100644 vendor/github.com/docker/docker/api/types/deprecated.go delete mode 100644 vendor/github.com/docker/docker/api/types/error_response.go delete mode 100644 vendor/github.com/docker/docker/api/types/error_response_ext.go delete mode 100644 vendor/github.com/docker/docker/api/types/events/events.go delete mode 100644 vendor/github.com/docker/docker/api/types/filters/parse.go delete mode 100644 vendor/github.com/docker/docker/api/types/graph_driver_data.go delete mode 100644 vendor/github.com/docker/docker/api/types/id_response.go delete mode 100644 vendor/github.com/docker/docker/api/types/image/image_history.go delete mode 100644 vendor/github.com/docker/docker/api/types/image_delete_response_item.go delete mode 100644 vendor/github.com/docker/docker/api/types/image_summary.go delete mode 100644 vendor/github.com/docker/docker/api/types/mount/mount.go delete mode 100644 vendor/github.com/docker/docker/api/types/network/network.go delete mode 100644 vendor/github.com/docker/docker/api/types/plugin.go delete mode 100644 vendor/github.com/docker/docker/api/types/plugin_device.go delete mode 100644 vendor/github.com/docker/docker/api/types/plugin_env.go delete mode 100644 vendor/github.com/docker/docker/api/types/plugin_interface_type.go delete mode 100644 vendor/github.com/docker/docker/api/types/plugin_mount.go delete mode 100644 vendor/github.com/docker/docker/api/types/plugin_responses.go delete mode 100644 vendor/github.com/docker/docker/api/types/port.go delete mode 100644 vendor/github.com/docker/docker/api/types/registry/authenticate.go delete mode 100644 vendor/github.com/docker/docker/api/types/registry/registry.go delete mode 100644 vendor/github.com/docker/docker/api/types/service_update_response.go delete mode 100644 vendor/github.com/docker/docker/api/types/stats.go delete mode 100644 vendor/github.com/docker/docker/api/types/strslice/strslice.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/common.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/config.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/container.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/network.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/node.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/secret.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/service.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/swarm.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/task.go delete mode 100644 vendor/github.com/docker/docker/api/types/time/timestamp.go delete mode 100644 vendor/github.com/docker/docker/api/types/types.go delete mode 100644 vendor/github.com/docker/docker/api/types/versions/README.md delete mode 100644 vendor/github.com/docker/docker/api/types/versions/compare.go delete mode 100644 vendor/github.com/docker/docker/api/types/volume/cluster_volume.go delete mode 100644 vendor/github.com/docker/docker/api/types/volume/create_options.go delete mode 100644 vendor/github.com/docker/docker/api/types/volume/deprecated.go delete mode 100644 vendor/github.com/docker/docker/api/types/volume/list_response.go delete mode 100644 vendor/github.com/docker/docker/api/types/volume/options.go delete mode 100644 vendor/github.com/docker/docker/api/types/volume/volume.go delete mode 100644 vendor/github.com/docker/docker/api/types/volume/volume_update.go delete mode 100644 vendor/github.com/docker/docker/client/README.md delete mode 100644 vendor/github.com/docker/docker/client/build_cancel.go delete mode 100644 vendor/github.com/docker/docker/client/build_prune.go delete mode 100644 vendor/github.com/docker/docker/client/checkpoint_create.go delete mode 100644 vendor/github.com/docker/docker/client/checkpoint_delete.go delete mode 100644 vendor/github.com/docker/docker/client/checkpoint_list.go delete mode 100644 vendor/github.com/docker/docker/client/client.go delete mode 100644 vendor/github.com/docker/docker/client/client_deprecated.go delete mode 100644 vendor/github.com/docker/docker/client/client_unix.go delete mode 100644 vendor/github.com/docker/docker/client/client_windows.go delete mode 100644 vendor/github.com/docker/docker/client/config_create.go delete mode 100644 vendor/github.com/docker/docker/client/config_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/config_list.go delete mode 100644 vendor/github.com/docker/docker/client/config_remove.go delete mode 100644 vendor/github.com/docker/docker/client/config_update.go delete mode 100644 vendor/github.com/docker/docker/client/container_attach.go delete mode 100644 vendor/github.com/docker/docker/client/container_commit.go delete mode 100644 vendor/github.com/docker/docker/client/container_copy.go delete mode 100644 vendor/github.com/docker/docker/client/container_create.go delete mode 100644 vendor/github.com/docker/docker/client/container_diff.go delete mode 100644 vendor/github.com/docker/docker/client/container_exec.go delete mode 100644 vendor/github.com/docker/docker/client/container_export.go delete mode 100644 vendor/github.com/docker/docker/client/container_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/container_kill.go delete mode 100644 vendor/github.com/docker/docker/client/container_list.go delete mode 100644 vendor/github.com/docker/docker/client/container_logs.go delete mode 100644 vendor/github.com/docker/docker/client/container_pause.go delete mode 100644 vendor/github.com/docker/docker/client/container_prune.go delete mode 100644 vendor/github.com/docker/docker/client/container_remove.go delete mode 100644 vendor/github.com/docker/docker/client/container_rename.go delete mode 100644 vendor/github.com/docker/docker/client/container_resize.go delete mode 100644 vendor/github.com/docker/docker/client/container_restart.go delete mode 100644 vendor/github.com/docker/docker/client/container_start.go delete mode 100644 vendor/github.com/docker/docker/client/container_stats.go delete mode 100644 vendor/github.com/docker/docker/client/container_stop.go delete mode 100644 vendor/github.com/docker/docker/client/container_top.go delete mode 100644 vendor/github.com/docker/docker/client/container_unpause.go delete mode 100644 vendor/github.com/docker/docker/client/container_update.go delete mode 100644 vendor/github.com/docker/docker/client/container_wait.go delete mode 100644 vendor/github.com/docker/docker/client/disk_usage.go delete mode 100644 vendor/github.com/docker/docker/client/distribution_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/envvars.go delete mode 100644 vendor/github.com/docker/docker/client/errors.go delete mode 100644 vendor/github.com/docker/docker/client/events.go delete mode 100644 vendor/github.com/docker/docker/client/hijack.go delete mode 100644 vendor/github.com/docker/docker/client/image_build.go delete mode 100644 vendor/github.com/docker/docker/client/image_create.go delete mode 100644 vendor/github.com/docker/docker/client/image_history.go delete mode 100644 vendor/github.com/docker/docker/client/image_import.go delete mode 100644 vendor/github.com/docker/docker/client/image_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/image_list.go delete mode 100644 vendor/github.com/docker/docker/client/image_load.go delete mode 100644 vendor/github.com/docker/docker/client/image_prune.go delete mode 100644 vendor/github.com/docker/docker/client/image_pull.go delete mode 100644 vendor/github.com/docker/docker/client/image_push.go delete mode 100644 vendor/github.com/docker/docker/client/image_remove.go delete mode 100644 vendor/github.com/docker/docker/client/image_save.go delete mode 100644 vendor/github.com/docker/docker/client/image_search.go delete mode 100644 vendor/github.com/docker/docker/client/image_tag.go delete mode 100644 vendor/github.com/docker/docker/client/info.go delete mode 100644 vendor/github.com/docker/docker/client/interface.go delete mode 100644 vendor/github.com/docker/docker/client/interface_experimental.go delete mode 100644 vendor/github.com/docker/docker/client/interface_stable.go delete mode 100644 vendor/github.com/docker/docker/client/login.go delete mode 100644 vendor/github.com/docker/docker/client/network_connect.go delete mode 100644 vendor/github.com/docker/docker/client/network_create.go delete mode 100644 vendor/github.com/docker/docker/client/network_disconnect.go delete mode 100644 vendor/github.com/docker/docker/client/network_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/network_list.go delete mode 100644 vendor/github.com/docker/docker/client/network_prune.go delete mode 100644 vendor/github.com/docker/docker/client/network_remove.go delete mode 100644 vendor/github.com/docker/docker/client/node_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/node_list.go delete mode 100644 vendor/github.com/docker/docker/client/node_remove.go delete mode 100644 vendor/github.com/docker/docker/client/node_update.go delete mode 100644 vendor/github.com/docker/docker/client/options.go delete mode 100644 vendor/github.com/docker/docker/client/ping.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_create.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_disable.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_enable.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_install.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_list.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_push.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_remove.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_set.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_upgrade.go delete mode 100644 vendor/github.com/docker/docker/client/request.go delete mode 100644 vendor/github.com/docker/docker/client/secret_create.go delete mode 100644 vendor/github.com/docker/docker/client/secret_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/secret_list.go delete mode 100644 vendor/github.com/docker/docker/client/secret_remove.go delete mode 100644 vendor/github.com/docker/docker/client/secret_update.go delete mode 100644 vendor/github.com/docker/docker/client/service_create.go delete mode 100644 vendor/github.com/docker/docker/client/service_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/service_list.go delete mode 100644 vendor/github.com/docker/docker/client/service_logs.go delete mode 100644 vendor/github.com/docker/docker/client/service_remove.go delete mode 100644 vendor/github.com/docker/docker/client/service_update.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_get_unlock_key.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_init.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_join.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_leave.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_unlock.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_update.go delete mode 100644 vendor/github.com/docker/docker/client/task_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/task_list.go delete mode 100644 vendor/github.com/docker/docker/client/task_logs.go delete mode 100644 vendor/github.com/docker/docker/client/transport.go delete mode 100644 vendor/github.com/docker/docker/client/utils.go delete mode 100644 vendor/github.com/docker/docker/client/version.go delete mode 100644 vendor/github.com/docker/docker/client/volume_create.go delete mode 100644 vendor/github.com/docker/docker/client/volume_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/volume_list.go delete mode 100644 vendor/github.com/docker/docker/client/volume_prune.go delete mode 100644 vendor/github.com/docker/docker/client/volume_remove.go delete mode 100644 vendor/github.com/docker/docker/client/volume_update.go delete mode 100644 vendor/github.com/docker/docker/errdefs/defs.go delete mode 100644 vendor/github.com/docker/docker/errdefs/doc.go delete mode 100644 vendor/github.com/docker/docker/errdefs/helpers.go delete mode 100644 vendor/github.com/docker/docker/errdefs/http_helpers.go delete mode 100644 vendor/github.com/docker/docker/errdefs/is.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/README.md delete mode 100644 vendor/github.com/docker/docker/pkg/archive/archive.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_other.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/changes.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_other.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/copy.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/diff.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/diff_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/diff_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/time_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/time_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/whiteouts.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/wrap.go delete mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools.go delete mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/idtools/utils_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/ioutils/buffer.go delete mode 100644 vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go delete mode 100644 vendor/github.com/docker/docker/pkg/ioutils/fswriters.go delete mode 100644 vendor/github.com/docker/docker/pkg/ioutils/readers.go delete mode 100644 vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go delete mode 100644 vendor/github.com/docker/docker/pkg/ioutils/writers.go delete mode 100644 vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go delete mode 100644 vendor/github.com/docker/docker/pkg/longpath/longpath.go delete mode 100644 vendor/github.com/docker/docker/pkg/pools/pools.go delete mode 100644 vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/args_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/errors.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/filesys.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/filesys_deprecated.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/filesys_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/filesys_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/image_os.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/init.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/init_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/lstat_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/lstat_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/mknod.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/mknod_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/mknod_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/path.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/path_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/path_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/process_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/process_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/stat_bsd.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/stat_darwin.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/stat_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/stat_openbsd.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/stat_solaris.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/stat_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/stat_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/utimes_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/xattrs_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go delete mode 100644 vendor/github.com/docker/go-connections/LICENSE delete mode 100644 vendor/github.com/docker/go-connections/nat/nat.go delete mode 100644 vendor/github.com/docker/go-connections/nat/parse.go delete mode 100644 vendor/github.com/docker/go-connections/nat/sort.go delete mode 100644 vendor/github.com/docker/go-connections/sockets/README.md delete mode 100644 vendor/github.com/docker/go-connections/sockets/inmem_socket.go delete mode 100644 vendor/github.com/docker/go-connections/sockets/proxy.go delete mode 100644 vendor/github.com/docker/go-connections/sockets/sockets.go delete mode 100644 vendor/github.com/docker/go-connections/sockets/sockets_unix.go delete mode 100644 vendor/github.com/docker/go-connections/sockets/sockets_windows.go delete mode 100644 vendor/github.com/docker/go-connections/sockets/tcp_socket.go delete mode 100644 vendor/github.com/docker/go-connections/sockets/unix_socket.go delete mode 100644 vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go delete mode 100644 vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go delete mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config.go delete mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go delete mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go delete mode 100644 vendor/github.com/docker/go-units/CONTRIBUTING.md delete mode 100644 vendor/github.com/docker/go-units/LICENSE delete mode 100644 vendor/github.com/docker/go-units/MAINTAINERS delete mode 100644 vendor/github.com/docker/go-units/README.md delete mode 100644 vendor/github.com/docker/go-units/circle.yml delete mode 100644 vendor/github.com/docker/go-units/duration.go delete mode 100644 vendor/github.com/docker/go-units/size.go delete mode 100644 vendor/github.com/docker/go-units/ulimit.go delete mode 100644 vendor/github.com/gogo/protobuf/AUTHORS delete mode 100644 vendor/github.com/gogo/protobuf/CONTRIBUTORS delete mode 100644 vendor/github.com/gogo/protobuf/LICENSE delete mode 100644 vendor/github.com/gogo/protobuf/proto/Makefile delete mode 100644 vendor/github.com/gogo/protobuf/proto/clone.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/custom_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/decode.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/deprecated.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/discard.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/duration.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/duration_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/encode.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/encode_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/equal.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/extensions.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/extensions_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/lib.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/lib_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/message_set.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/properties.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/properties_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/skip_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_marshal.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_merge.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_unmarshal.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/text.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/text_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/text_parser.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go delete mode 100644 vendor/github.com/golang/protobuf/AUTHORS delete mode 100644 vendor/github.com/golang/protobuf/CONTRIBUTORS delete mode 100644 vendor/github.com/golang/protobuf/LICENSE delete mode 100644 vendor/github.com/golang/protobuf/proto/buffer.go delete mode 100644 vendor/github.com/golang/protobuf/proto/defaults.go delete mode 100644 vendor/github.com/golang/protobuf/proto/deprecated.go delete mode 100644 vendor/github.com/golang/protobuf/proto/discard.go delete mode 100644 vendor/github.com/golang/protobuf/proto/extensions.go delete mode 100644 vendor/github.com/golang/protobuf/proto/properties.go delete mode 100644 vendor/github.com/golang/protobuf/proto/proto.go delete mode 100644 vendor/github.com/golang/protobuf/proto/registry.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text_decode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text_encode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/wire.go delete mode 100644 vendor/github.com/golang/protobuf/proto/wrappers.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/doc.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go delete mode 100644 vendor/github.com/google/uuid/.travis.yml delete mode 100644 vendor/github.com/google/uuid/CONTRIBUTING.md delete mode 100644 vendor/github.com/google/uuid/CONTRIBUTORS delete mode 100644 vendor/github.com/google/uuid/LICENSE delete mode 100644 vendor/github.com/google/uuid/README.md delete mode 100644 vendor/github.com/google/uuid/dce.go delete mode 100644 vendor/github.com/google/uuid/doc.go delete mode 100644 vendor/github.com/google/uuid/hash.go delete mode 100644 vendor/github.com/google/uuid/marshal.go delete mode 100644 vendor/github.com/google/uuid/node.go delete mode 100644 vendor/github.com/google/uuid/node_js.go delete mode 100644 vendor/github.com/google/uuid/node_net.go delete mode 100644 vendor/github.com/google/uuid/null.go delete mode 100644 vendor/github.com/google/uuid/sql.go delete mode 100644 vendor/github.com/google/uuid/time.go delete mode 100644 vendor/github.com/google/uuid/util.go delete mode 100644 vendor/github.com/google/uuid/uuid.go delete mode 100644 vendor/github.com/google/uuid/version1.go delete mode 100644 vendor/github.com/google/uuid/version4.go delete mode 100644 vendor/github.com/imdario/mergo/.deepsource.toml delete mode 100644 vendor/github.com/imdario/mergo/.gitignore delete mode 100644 vendor/github.com/imdario/mergo/.travis.yml delete mode 100644 vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/imdario/mergo/CONTRIBUTING.md delete mode 100644 vendor/github.com/imdario/mergo/LICENSE delete mode 100644 vendor/github.com/imdario/mergo/README.md delete mode 100644 vendor/github.com/imdario/mergo/SECURITY.md delete mode 100644 vendor/github.com/imdario/mergo/doc.go delete mode 100644 vendor/github.com/imdario/mergo/map.go delete mode 100644 vendor/github.com/imdario/mergo/merge.go delete mode 100644 vendor/github.com/imdario/mergo/mergo.go delete mode 100644 vendor/github.com/klauspost/compress/LICENSE delete mode 100644 vendor/github.com/klauspost/compress/fse/README.md delete mode 100644 vendor/github.com/klauspost/compress/fse/bitreader.go delete mode 100644 vendor/github.com/klauspost/compress/fse/bitwriter.go delete mode 100644 vendor/github.com/klauspost/compress/fse/bytereader.go delete mode 100644 vendor/github.com/klauspost/compress/fse/compress.go delete mode 100644 vendor/github.com/klauspost/compress/fse/decompress.go delete mode 100644 vendor/github.com/klauspost/compress/fse/fse.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/.gitignore delete mode 100644 vendor/github.com/klauspost/compress/huff0/README.md delete mode 100644 vendor/github.com/klauspost/compress/huff0/bitreader.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/bitwriter.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/bytereader.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/compress.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/decompress.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/huff0.go delete mode 100644 vendor/github.com/klauspost/compress/snappy/.gitignore delete mode 100644 vendor/github.com/klauspost/compress/snappy/AUTHORS delete mode 100644 vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS delete mode 100644 vendor/github.com/klauspost/compress/snappy/LICENSE delete mode 100644 vendor/github.com/klauspost/compress/snappy/README delete mode 100644 vendor/github.com/klauspost/compress/snappy/decode.go delete mode 100644 vendor/github.com/klauspost/compress/snappy/decode_amd64.go delete mode 100644 vendor/github.com/klauspost/compress/snappy/decode_amd64.s delete mode 100644 vendor/github.com/klauspost/compress/snappy/decode_other.go delete mode 100644 vendor/github.com/klauspost/compress/snappy/encode.go delete mode 100644 vendor/github.com/klauspost/compress/snappy/encode_amd64.go delete mode 100644 vendor/github.com/klauspost/compress/snappy/encode_amd64.s delete mode 100644 vendor/github.com/klauspost/compress/snappy/encode_other.go delete mode 100644 vendor/github.com/klauspost/compress/snappy/runbench.cmd delete mode 100644 vendor/github.com/klauspost/compress/snappy/snappy.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/README.md delete mode 100644 vendor/github.com/klauspost/compress/zstd/bitreader.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/bitwriter.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/blockdec.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/blockenc.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/blocktype_string.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/bytebuf.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/bytereader.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/decodeheader.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/decoder.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/decoder_options.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/dict.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/enc_base.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/enc_best.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/enc_better.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/enc_dfast.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/enc_fast.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/encoder.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/encoder_options.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/framedec.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/frameenc.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/fse_encoder.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/fse_predefined.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/hash.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/history.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/seqenc.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/snappy.go delete mode 100644 vendor/github.com/klauspost/compress/zstd/zstd.go delete mode 100644 vendor/github.com/magiconair/properties/.gitignore delete mode 100644 vendor/github.com/magiconair/properties/CHANGELOG.md delete mode 100644 vendor/github.com/magiconair/properties/LICENSE.md delete mode 100644 vendor/github.com/magiconair/properties/README.md delete mode 100644 vendor/github.com/magiconair/properties/decode.go delete mode 100644 vendor/github.com/magiconair/properties/doc.go delete mode 100644 vendor/github.com/magiconair/properties/integrate.go delete mode 100644 vendor/github.com/magiconair/properties/lex.go delete mode 100644 vendor/github.com/magiconair/properties/load.go delete mode 100644 vendor/github.com/magiconair/properties/parser.go delete mode 100644 vendor/github.com/magiconair/properties/properties.go delete mode 100644 vendor/github.com/magiconair/properties/rangecheck.go delete mode 100644 vendor/github.com/moby/patternmatcher/LICENSE delete mode 100644 vendor/github.com/moby/patternmatcher/NOTICE delete mode 100644 vendor/github.com/moby/patternmatcher/patternmatcher.go delete mode 100644 vendor/github.com/moby/sys/sequential/LICENSE delete mode 100644 vendor/github.com/moby/sys/sequential/doc.go delete mode 100644 vendor/github.com/moby/sys/sequential/sequential_unix.go delete mode 100644 vendor/github.com/moby/sys/sequential/sequential_windows.go delete mode 100644 vendor/github.com/moby/term/.gitignore delete mode 100644 vendor/github.com/moby/term/LICENSE delete mode 100644 vendor/github.com/moby/term/README.md delete mode 100644 vendor/github.com/moby/term/ascii.go delete mode 100644 vendor/github.com/moby/term/doc.go delete mode 100644 vendor/github.com/moby/term/proxy.go delete mode 100644 vendor/github.com/moby/term/term.go delete mode 100644 vendor/github.com/moby/term/term_unix.go delete mode 100644 vendor/github.com/moby/term/term_windows.go delete mode 100644 vendor/github.com/moby/term/termios_bsd.go delete mode 100644 vendor/github.com/moby/term/termios_nonbsd.go delete mode 100644 vendor/github.com/moby/term/termios_unix.go delete mode 100644 vendor/github.com/moby/term/termios_windows.go delete mode 100644 vendor/github.com/moby/term/windows/ansi_reader.go delete mode 100644 vendor/github.com/moby/term/windows/ansi_writer.go delete mode 100644 vendor/github.com/moby/term/windows/console.go delete mode 100644 vendor/github.com/moby/term/windows/doc.go delete mode 100644 vendor/github.com/morikuni/aec/LICENSE delete mode 100644 vendor/github.com/morikuni/aec/README.md delete mode 100644 vendor/github.com/morikuni/aec/aec.go delete mode 100644 vendor/github.com/morikuni/aec/ansi.go delete mode 100644 vendor/github.com/morikuni/aec/builder.go delete mode 100644 vendor/github.com/morikuni/aec/sample.gif delete mode 100644 vendor/github.com/morikuni/aec/sgr.go delete mode 100644 vendor/github.com/opencontainers/go-digest/.mailmap delete mode 100644 vendor/github.com/opencontainers/go-digest/.pullapprove.yml delete mode 100644 vendor/github.com/opencontainers/go-digest/.travis.yml delete mode 100644 vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md delete mode 100644 vendor/github.com/opencontainers/go-digest/LICENSE delete mode 100644 vendor/github.com/opencontainers/go-digest/LICENSE.docs delete mode 100644 vendor/github.com/opencontainers/go-digest/MAINTAINERS delete mode 100644 vendor/github.com/opencontainers/go-digest/README.md delete mode 100644 vendor/github.com/opencontainers/go-digest/algorithm.go delete mode 100644 vendor/github.com/opencontainers/go-digest/digest.go delete mode 100644 vendor/github.com/opencontainers/go-digest/digester.go delete mode 100644 vendor/github.com/opencontainers/go-digest/doc.go delete mode 100644 vendor/github.com/opencontainers/go-digest/verifiers.go delete mode 100644 vendor/github.com/opencontainers/image-spec/LICENSE delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/version.go delete mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/versioned.go delete mode 100644 vendor/github.com/opencontainers/runc/LICENSE delete mode 100644 vendor/github.com/opencontainers/runc/NOTICE delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/user.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go delete mode 100644 vendor/github.com/sirupsen/logrus/.gitignore delete mode 100644 vendor/github.com/sirupsen/logrus/.golangci.yml delete mode 100644 vendor/github.com/sirupsen/logrus/.travis.yml delete mode 100644 vendor/github.com/sirupsen/logrus/CHANGELOG.md delete mode 100644 vendor/github.com/sirupsen/logrus/LICENSE delete mode 100644 vendor/github.com/sirupsen/logrus/README.md delete mode 100644 vendor/github.com/sirupsen/logrus/alt_exit.go delete mode 100644 vendor/github.com/sirupsen/logrus/appveyor.yml delete mode 100644 vendor/github.com/sirupsen/logrus/buffer_pool.go delete mode 100644 vendor/github.com/sirupsen/logrus/doc.go delete mode 100644 vendor/github.com/sirupsen/logrus/entry.go delete mode 100644 vendor/github.com/sirupsen/logrus/exported.go delete mode 100644 vendor/github.com/sirupsen/logrus/formatter.go delete mode 100644 vendor/github.com/sirupsen/logrus/hooks.go delete mode 100644 vendor/github.com/sirupsen/logrus/json_formatter.go delete mode 100644 vendor/github.com/sirupsen/logrus/logger.go delete mode 100644 vendor/github.com/sirupsen/logrus/logrus.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_appengine.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_bsd.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_js.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_solaris.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_unix.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_windows.go delete mode 100644 vendor/github.com/sirupsen/logrus/text_formatter.go delete mode 100644 vendor/github.com/sirupsen/logrus/writer.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/.gitignore delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/.golangci.yml delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/CONTRIBUTING.md delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/LICENSE delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/Makefile delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/Pipfile delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/Pipfile.lock delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/README.md delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/RELEASING.md delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/commons-test.mk delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/config.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/container.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/docker.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/docker_auth.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/docker_mounts.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/exec/processor.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/file.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/generic.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/internal/config/config.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/client.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/docker_host.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/docker_rootless.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/docker_socket.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/images.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/labels.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/internal/testcontainerssession/session.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/internal/version.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/lifecycle.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/logconsumer.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/logger.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/mkdocs.yml delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/mounts.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/network.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/parallel.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/provider.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/reaper.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/requirements.txt delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/runtime.txt delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/testcontainer.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/testing.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/all.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/errors.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/errors_windows.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/exec.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/exit.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/health.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/host_port.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/http.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/log.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/nop.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/sql.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/wait.go delete mode 100644 vendor/golang.org/x/exp/LICENSE delete mode 100644 vendor/golang.org/x/exp/PATENTS delete mode 100644 vendor/golang.org/x/exp/constraints/constraints.go delete mode 100644 vendor/golang.org/x/exp/slices/slices.go delete mode 100644 vendor/golang.org/x/exp/slices/sort.go delete mode 100644 vendor/golang.org/x/exp/slices/zsortfunc.go delete mode 100644 vendor/golang.org/x/exp/slices/zsortordered.go delete mode 100644 vendor/golang.org/x/net/LICENSE delete mode 100644 vendor/golang.org/x/net/PATENTS delete mode 100644 vendor/golang.org/x/net/internal/socks/client.go delete mode 100644 vendor/golang.org/x/net/internal/socks/socks.go delete mode 100644 vendor/golang.org/x/net/proxy/dial.go delete mode 100644 vendor/golang.org/x/net/proxy/direct.go delete mode 100644 vendor/golang.org/x/net/proxy/per_host.go delete mode 100644 vendor/golang.org/x/net/proxy/proxy.go delete mode 100644 vendor/golang.org/x/net/proxy/socks5.go delete mode 100644 vendor/golang.org/x/sys/execabs/execabs.go delete mode 100644 vendor/golang.org/x/sys/execabs/execabs_go118.go delete mode 100644 vendor/golang.org/x/sys/execabs/execabs_go119.go delete mode 100644 vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go delete mode 100644 vendor/golang.org/x/sys/windows/aliases.go delete mode 100644 vendor/golang.org/x/sys/windows/dll_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/empty.s delete mode 100644 vendor/golang.org/x/sys/windows/env_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/eventlog.go delete mode 100644 vendor/golang.org/x/sys/windows/exec_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/memory_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/mkerrors.bash delete mode 100644 vendor/golang.org/x/sys/windows/mkknownfolderids.bash delete mode 100644 vendor/golang.org/x/sys/windows/mksyscall.go delete mode 100644 vendor/golang.org/x/sys/windows/race.go delete mode 100644 vendor/golang.org/x/sys/windows/race0.go delete mode 100644 vendor/golang.org/x/sys/windows/security_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/service.go delete mode 100644 vendor/golang.org/x/sys/windows/setupapi_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/str.go delete mode 100644 vendor/golang.org/x/sys/windows/syscall.go delete mode 100644 vendor/golang.org/x/sys/windows/syscall_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows_386.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows_amd64.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows_arm.go delete mode 100644 vendor/golang.org/x/sys/windows/types_windows_arm64.go delete mode 100644 vendor/golang.org/x/sys/windows/zerrors_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/zknownfolderids_windows.go delete mode 100644 vendor/golang.org/x/sys/windows/zsyscall_windows.go delete mode 100644 vendor/google.golang.org/genproto/LICENSE delete mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go delete mode 100644 vendor/google.golang.org/grpc/AUTHORS delete mode 100644 vendor/google.golang.org/grpc/LICENSE delete mode 100644 vendor/google.golang.org/grpc/NOTICE.txt delete mode 100644 vendor/google.golang.org/grpc/codes/code_string.go delete mode 100644 vendor/google.golang.org/grpc/codes/codes.go delete mode 100644 vendor/google.golang.org/grpc/internal/status/status.go delete mode 100644 vendor/google.golang.org/grpc/status/status.go delete mode 100644 vendor/google.golang.org/protobuf/AUTHORS delete mode 100644 vendor/google.golang.org/protobuf/CONTRIBUTORS delete mode 100644 vendor/google.golang.org/protobuf/LICENSE delete mode 100644 vendor/google.golang.org/protobuf/PATENTS delete mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/decode.go delete mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/doc.go delete mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/encode.go delete mode 100644 vendor/google.golang.org/protobuf/encoding/protowire/wire.go delete mode 100644 vendor/google.golang.org/protobuf/internal/descfmt/stringer.go delete mode 100644 vendor/google.golang.org/protobuf/internal/descopts/options.go delete mode 100644 vendor/google.golang.org/protobuf/internal/detrand/rand.go delete mode 100644 vendor/google.golang.org/protobuf/internal/encoding/defval/default.go delete mode 100644 vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go delete mode 100644 vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go delete mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode.go delete mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go delete mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go delete mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go delete mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/doc.go delete mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/encode.go delete mode 100644 vendor/google.golang.org/protobuf/internal/errors/errors.go delete mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go112.go delete mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go113.go delete mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/build.go delete mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc.go delete mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go delete mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go delete mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go delete mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go delete mode 100644 vendor/google.golang.org/protobuf/internal/filetype/build.go delete mode 100644 vendor/google.golang.org/protobuf/internal/flags/flags.go delete mode 100644 vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go delete mode 100644 vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/any_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/api_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/doc.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/duration_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/empty_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/goname.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/map_entry.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/struct_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/type_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers.go delete mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/api_export.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/checkinit.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_extension.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_field.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_message.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_tables.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_list.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_map.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/decode.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/encode.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/enum.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/extension.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_export.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_file.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_message.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/message.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/validate.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/weak.go delete mode 100644 vendor/google.golang.org/protobuf/internal/order/order.go delete mode 100644 vendor/google.golang.org/protobuf/internal/order/range.go delete mode 100644 vendor/google.golang.org/protobuf/internal/pragma/pragma.go delete mode 100644 vendor/google.golang.org/protobuf/internal/set/ints.go delete mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings.go delete mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_pure.go delete mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go delete mode 100644 vendor/google.golang.org/protobuf/internal/version/version.go delete mode 100644 vendor/google.golang.org/protobuf/proto/checkinit.go delete mode 100644 vendor/google.golang.org/protobuf/proto/decode.go delete mode 100644 vendor/google.golang.org/protobuf/proto/decode_gen.go delete mode 100644 vendor/google.golang.org/protobuf/proto/doc.go delete mode 100644 vendor/google.golang.org/protobuf/proto/encode.go delete mode 100644 vendor/google.golang.org/protobuf/proto/encode_gen.go delete mode 100644 vendor/google.golang.org/protobuf/proto/equal.go delete mode 100644 vendor/google.golang.org/protobuf/proto/extension.go delete mode 100644 vendor/google.golang.org/protobuf/proto/merge.go delete mode 100644 vendor/google.golang.org/protobuf/proto/messageset.go delete mode 100644 vendor/google.golang.org/protobuf/proto/proto.go delete mode 100644 vendor/google.golang.org/protobuf/proto/proto_methods.go delete mode 100644 vendor/google.golang.org/protobuf/proto/proto_reflect.go delete mode 100644 vendor/google.golang.org/protobuf/proto/reset.go delete mode 100644 vendor/google.golang.org/protobuf/proto/size.go delete mode 100644 vendor/google.golang.org/protobuf/proto/size_gen.go delete mode 100644 vendor/google.golang.org/protobuf/proto/wrappers.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/proto.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/type.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go delete mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go delete mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/methods.go delete mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go delete mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/version.go delete mode 100644 vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go delete mode 100644 vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go delete mode 100644 vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go delete mode 100644 vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go diff --git a/e2e/select_test.go b/e2e/select_test.go index ba0722af7..60374081d 100644 --- a/e2e/select_test.go +++ b/e2e/select_test.go @@ -2,14 +2,10 @@ package e2e import ( - "context" "fmt" "strings" "testing" - "github.com/docker/go-connections/nat" - "github.com/testcontainers/testcontainers-go" - "github.com/testcontainers/testcontainers-go/wait" "gotest.tools/v3/icmd" ) @@ -127,41 +123,6 @@ func getSequentialFile(n int, inputForm, outputForm, structure string) (string, return sb.String(), expectedLines } -func startMinio(t *testing.T, ctx context.Context) (testcontainers.Container, string, error) { - t.Helper() - port, err := nat.NewPort("", "9000") - if err != nil { - t.Errorf("error while building port\n") - return nil, "", err - } - - req := testcontainers.ContainerRequest{ - Image: "minio/minio", - ExposedPorts: []string{string(port)}, - Cmd: []string{"server", "/data"}, - WaitingFor: wait.ForListeningPort(port), - } - - minioC, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ - ContainerRequest: req, - Started: true, - }) - if err != nil { - return nil, "", err - } - host, err := minioC.Host(ctx) - if err != nil { - return nil, "", err - } - - mappedPort, err := minioC.MappedPort(ctx, port) - if err != nil { - return nil, "", err - } - address := fmt.Sprintf("http://%s:%d", host, mappedPort.Int()) - return minioC, address, nil -} - /* test cases: json: @@ -186,19 +147,6 @@ parquet: */ func TestSelectCommand(t *testing.T) { t.Parallel() - /* - ctx := context.Background() - container, address, err := startMinio(t, ctx) - if err != nil { - t.Fatalf("could not start the container. Error:%v\n", err) - } - - t.Cleanup(func() { - if err := container.Terminate(ctx); err != nil { - t.Errorf("could not terminate the container. Error:%v\n", err) - } - }) - */ // credentials are same for all test cases region := "us-east-1" accessKeyId := "minioadmin" diff --git a/go.mod b/go.mod index 5c165b4a5..bceb23347 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,6 @@ go 1.17 require ( github.com/aws/aws-sdk-go v1.40.44 - github.com/docker/go-connections v0.4.0 github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.5.9 github.com/hashicorp/go-multierror v1.1.1 @@ -14,51 +13,28 @@ require ( github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 github.com/lanrat/extsort v1.0.0 github.com/termie/go-shutil v0.0.0-20140729215957-bcacb06fecae - github.com/testcontainers/testcontainers-go v0.21.0 github.com/urfave/cli/v2 v2.11.2 gotest.tools/v3 v3.0.3 ) require ( - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect - github.com/Microsoft/go-winio v0.5.2 // indirect - github.com/cenkalti/backoff/v4 v4.2.0 // indirect - github.com/containerd/containerd v1.6.19 // indirect - github.com/cpuguy83/dockercfg v0.3.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/docker/distribution v2.8.2+incompatible // indirect - github.com/docker/docker v23.0.5+incompatible // indirect - github.com/docker/go-units v0.5.0 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/imdario/mergo v0.3.15 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/klauspost/compress v1.11.13 // indirect - github.com/kr/text v0.2.0 // indirect - github.com/magiconair/properties v1.8.7 // indirect - github.com/moby/patternmatcher v0.5.0 // indirect - github.com/moby/sys/sequential v0.5.0 // indirect - github.com/moby/term v0.5.0 // indirect - github.com/morikuni/aec v1.0.0 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc2 // indirect - github.com/opencontainers/runc v1.1.5 // indirect + github.com/kr/pretty v0.3.0 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/rogpeppe/go-internal v1.8.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect go.etcd.io/bbolt v1.3.6 // indirect - golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea // indirect golang.org/x/net v0.7.0 // indirect golang.org/x/sync v0.1.0 // indirect golang.org/x/sys v0.8.0 // indirect golang.org/x/tools v0.2.0 // indirect - google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad // indirect - google.golang.org/grpc v1.47.0 // indirect - google.golang.org/protobuf v1.28.0 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index 525bf27f5..be0ff4843 100644 --- a/go.sum +++ b/go.sum @@ -1,97 +1,18 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= -github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/hcsshim v0.9.7 h1:mKNHW/Xvv1aFH87Jb6ERDzXTJTLPlmzfZ28VBFD/bfg= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/aws/aws-sdk-go v1.17.4/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.40.44 h1:kECaYybTWYZY5IKHvQMxbE6Wi5Qrb+7hbkV7zQV3Sg8= github.com/aws/aws-sdk-go v1.40.44/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= -github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4= -github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= -github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= -github.com/containerd/containerd v1.6.19 h1:F0qgQPrG0P2JPgwpxWxYavrVeXAG0ezUIB9Z/4FTUAU= -github.com/containerd/containerd v1.6.19/go.mod h1:HZCDMn4v/Xl2579/MvtOC2M206i+JJ6VxFWU/NetrGY= -github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E= -github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= -github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v23.0.5+incompatible h1:DaxtlTJjFSnLOXVNUBU1+6kXGz2lpDoEAH6QoxaSg8k= -github.com/docker/docker v23.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= -github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -101,8 +22,6 @@ github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 h1:VHgatEHNcBFE github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/igungor/gofakes3 v0.0.13 h1:rMi9KRo3jrqMyNnR9rc+d+cWTFR3mIQXnO1y+5gL98A= github.com/igungor/gofakes3 v0.0.13/go.mod h1:id+2y7yvTVoN1HW6VOtnfSqFt4EAfH5piVL4p9JsmQc= -github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= -github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -112,213 +31,118 @@ github.com/karrick/godirwalk v1.15.3 h1:0a2pXOgtB16CqIqXTiT7+K9L73f74n/aNQUnH6Or github.com/karrick/godirwalk v1.15.3/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.11.13 h1:eSvu8Tmq6j2psUJqJrLcWH6K3w5Dwc+qipbaA6eVEN4= -github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lanrat/extsort v1.0.0 h1:JjvkCUbD55+gs5s64FHmCU93kWjegEAM5n10XN6GB3c= github.com/lanrat/extsort v1.0.0/go.mod h1:bkDEvem4UnD1h87yKICydXs63mKrIGW3W9OGPMg93Ww= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo= -github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= -github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= -github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= -github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= -github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= -github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= -github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= -github.com/opencontainers/runc v1.1.5 h1:L44KXEpKmfWDcS02aeGm8QNTFXTo2D+8MYGDIJ/GDEs= -github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8= -github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63 h1:J6qvD6rbmOil46orKqJaRPG+zTpoGlBTUdyv8ki63L0= github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63/go.mod h1:n+VKSARF5y/tS9XFSP7vWDfS+GUC5vs/YT7M5XDTUEM= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/termie/go-shutil v0.0.0-20140729215957-bcacb06fecae h1:vgGSvdW5Lqg+I1aZOlG32uyE6xHpLdKhZzcTEktz5wM= github.com/termie/go-shutil v0.0.0-20140729215957-bcacb06fecae/go.mod h1:quDq6Se6jlGwiIKia/itDZxqC5rj6/8OdFyMMAwTxCs= -github.com/testcontainers/testcontainers-go v0.21.0 h1:syePAxdeTzfkap+RrJaQZpJQ/s/fsUgn11xIvHrOE9U= -github.com/testcontainers/testcontainers-go v0.21.0/go.mod h1:c1ez3WVRHq7T/Aj+X3TIipFBwkBaNT5iNCY8+1b83Ng= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.11.2 h1:FVfNg4m3vbjbBpLYxW//WjxUoHvJ9TlppXcqY9Q9ZfA= github.com/urfave/cli/v2 v2.11.2/go.mod h1:f8iq5LtQ/bLxafbdBSLPPNsgaW0l/2fYYEHhAyPlwvo= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea h1:vLCWI/yYrdEHyN2JzIzPO3aaQJHQdp89IZBA/+azVC4= -golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190310074541-c10a0554eabf/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190308174544-00c44ba9c14f/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE= golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad h1:kqrS+lhvaMHCxul6sKQvKJ8nAAhlVItmZV822hYFH/U= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/Azure/go-ansiterm/LICENSE b/vendor/github.com/Azure/go-ansiterm/LICENSE deleted file mode 100644 index e3d9a64d1..000000000 --- a/vendor/github.com/Azure/go-ansiterm/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Microsoft Corporation - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/Azure/go-ansiterm/README.md b/vendor/github.com/Azure/go-ansiterm/README.md deleted file mode 100644 index 261c041e7..000000000 --- a/vendor/github.com/Azure/go-ansiterm/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# go-ansiterm - -This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent. - -For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position. - -The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go). - -See parser_test.go for examples exercising the state machine and generating appropriate function calls. - ------ -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/vendor/github.com/Azure/go-ansiterm/constants.go b/vendor/github.com/Azure/go-ansiterm/constants.go deleted file mode 100644 index 96504a33b..000000000 --- a/vendor/github.com/Azure/go-ansiterm/constants.go +++ /dev/null @@ -1,188 +0,0 @@ -package ansiterm - -const LogEnv = "DEBUG_TERMINAL" - -// ANSI constants -// References: -// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm -// -- http://man7.org/linux/man-pages/man4/console_codes.4.html -// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html -// -- http://en.wikipedia.org/wiki/ANSI_escape_code -// -- http://vt100.net/emu/dec_ansi_parser -// -- http://vt100.net/emu/vt500_parser.svg -// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html -// -- http://www.inwap.com/pdp10/ansicode.txt -const ( - // ECMA-48 Set Graphics Rendition - // Note: - // -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved - // -- Fonts could possibly be supported via SetCurrentConsoleFontEx - // -- Windows does not expose the per-window cursor (i.e., caret) blink times - ANSI_SGR_RESET = 0 - ANSI_SGR_BOLD = 1 - ANSI_SGR_DIM = 2 - _ANSI_SGR_ITALIC = 3 - ANSI_SGR_UNDERLINE = 4 - _ANSI_SGR_BLINKSLOW = 5 - _ANSI_SGR_BLINKFAST = 6 - ANSI_SGR_REVERSE = 7 - _ANSI_SGR_INVISIBLE = 8 - _ANSI_SGR_LINETHROUGH = 9 - _ANSI_SGR_FONT_00 = 10 - _ANSI_SGR_FONT_01 = 11 - _ANSI_SGR_FONT_02 = 12 - _ANSI_SGR_FONT_03 = 13 - _ANSI_SGR_FONT_04 = 14 - _ANSI_SGR_FONT_05 = 15 - _ANSI_SGR_FONT_06 = 16 - _ANSI_SGR_FONT_07 = 17 - _ANSI_SGR_FONT_08 = 18 - _ANSI_SGR_FONT_09 = 19 - _ANSI_SGR_FONT_10 = 20 - _ANSI_SGR_DOUBLEUNDERLINE = 21 - ANSI_SGR_BOLD_DIM_OFF = 22 - _ANSI_SGR_ITALIC_OFF = 23 - ANSI_SGR_UNDERLINE_OFF = 24 - _ANSI_SGR_BLINK_OFF = 25 - _ANSI_SGR_RESERVED_00 = 26 - ANSI_SGR_REVERSE_OFF = 27 - _ANSI_SGR_INVISIBLE_OFF = 28 - _ANSI_SGR_LINETHROUGH_OFF = 29 - ANSI_SGR_FOREGROUND_BLACK = 30 - ANSI_SGR_FOREGROUND_RED = 31 - ANSI_SGR_FOREGROUND_GREEN = 32 - ANSI_SGR_FOREGROUND_YELLOW = 33 - ANSI_SGR_FOREGROUND_BLUE = 34 - ANSI_SGR_FOREGROUND_MAGENTA = 35 - ANSI_SGR_FOREGROUND_CYAN = 36 - ANSI_SGR_FOREGROUND_WHITE = 37 - _ANSI_SGR_RESERVED_01 = 38 - ANSI_SGR_FOREGROUND_DEFAULT = 39 - ANSI_SGR_BACKGROUND_BLACK = 40 - ANSI_SGR_BACKGROUND_RED = 41 - ANSI_SGR_BACKGROUND_GREEN = 42 - ANSI_SGR_BACKGROUND_YELLOW = 43 - ANSI_SGR_BACKGROUND_BLUE = 44 - ANSI_SGR_BACKGROUND_MAGENTA = 45 - ANSI_SGR_BACKGROUND_CYAN = 46 - ANSI_SGR_BACKGROUND_WHITE = 47 - _ANSI_SGR_RESERVED_02 = 48 - ANSI_SGR_BACKGROUND_DEFAULT = 49 - // 50 - 65: Unsupported - - ANSI_MAX_CMD_LENGTH = 4096 - - MAX_INPUT_EVENTS = 128 - DEFAULT_WIDTH = 80 - DEFAULT_HEIGHT = 24 - - ANSI_BEL = 0x07 - ANSI_BACKSPACE = 0x08 - ANSI_TAB = 0x09 - ANSI_LINE_FEED = 0x0A - ANSI_VERTICAL_TAB = 0x0B - ANSI_FORM_FEED = 0x0C - ANSI_CARRIAGE_RETURN = 0x0D - ANSI_ESCAPE_PRIMARY = 0x1B - ANSI_ESCAPE_SECONDARY = 0x5B - ANSI_OSC_STRING_ENTRY = 0x5D - ANSI_COMMAND_FIRST = 0x40 - ANSI_COMMAND_LAST = 0x7E - DCS_ENTRY = 0x90 - CSI_ENTRY = 0x9B - OSC_STRING = 0x9D - ANSI_PARAMETER_SEP = ";" - ANSI_CMD_G0 = '(' - ANSI_CMD_G1 = ')' - ANSI_CMD_G2 = '*' - ANSI_CMD_G3 = '+' - ANSI_CMD_DECPNM = '>' - ANSI_CMD_DECPAM = '=' - ANSI_CMD_OSC = ']' - ANSI_CMD_STR_TERM = '\\' - - KEY_CONTROL_PARAM_2 = ";2" - KEY_CONTROL_PARAM_3 = ";3" - KEY_CONTROL_PARAM_4 = ";4" - KEY_CONTROL_PARAM_5 = ";5" - KEY_CONTROL_PARAM_6 = ";6" - KEY_CONTROL_PARAM_7 = ";7" - KEY_CONTROL_PARAM_8 = ";8" - KEY_ESC_CSI = "\x1B[" - KEY_ESC_N = "\x1BN" - KEY_ESC_O = "\x1BO" - - FILL_CHARACTER = ' ' -) - -func getByteRange(start byte, end byte) []byte { - bytes := make([]byte, 0, 32) - for i := start; i <= end; i++ { - bytes = append(bytes, byte(i)) - } - - return bytes -} - -var toGroundBytes = getToGroundBytes() -var executors = getExecuteBytes() - -// SPACE 20+A0 hex Always and everywhere a blank space -// Intermediate 20-2F hex !"#$%&'()*+,-./ -var intermeds = getByteRange(0x20, 0x2F) - -// Parameters 30-3F hex 0123456789:;<=>? -// CSI Parameters 30-39, 3B hex 0123456789; -var csiParams = getByteRange(0x30, 0x3F) - -var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...) - -// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_ -var upperCase = getByteRange(0x40, 0x5F) - -// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~ -var lowerCase = getByteRange(0x60, 0x7E) - -// Alphabetics 40-7E hex (all of upper and lower case) -var alphabetics = append(upperCase, lowerCase...) - -var printables = getByteRange(0x20, 0x7F) - -var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E) -var escapeToGroundBytes = getEscapeToGroundBytes() - -// See http://www.vt100.net/emu/vt500_parser.png for description of the complex -// byte ranges below - -func getEscapeToGroundBytes() []byte { - escapeToGroundBytes := getByteRange(0x30, 0x4F) - escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...) - escapeToGroundBytes = append(escapeToGroundBytes, 0x59) - escapeToGroundBytes = append(escapeToGroundBytes, 0x5A) - escapeToGroundBytes = append(escapeToGroundBytes, 0x5C) - escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...) - return escapeToGroundBytes -} - -func getExecuteBytes() []byte { - executeBytes := getByteRange(0x00, 0x17) - executeBytes = append(executeBytes, 0x19) - executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...) - return executeBytes -} - -func getToGroundBytes() []byte { - groundBytes := []byte{0x18} - groundBytes = append(groundBytes, 0x1A) - groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...) - groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...) - groundBytes = append(groundBytes, 0x99) - groundBytes = append(groundBytes, 0x9A) - groundBytes = append(groundBytes, 0x9C) - return groundBytes -} - -// Delete 7F hex Always and everywhere ignored -// C1 Control 80-9F hex 32 additional control characters -// G1 Displayable A1-FE hex 94 additional displayable characters -// Special A0+FF hex Same as SPACE and DELETE diff --git a/vendor/github.com/Azure/go-ansiterm/context.go b/vendor/github.com/Azure/go-ansiterm/context.go deleted file mode 100644 index 8d66e777c..000000000 --- a/vendor/github.com/Azure/go-ansiterm/context.go +++ /dev/null @@ -1,7 +0,0 @@ -package ansiterm - -type ansiContext struct { - currentChar byte - paramBuffer []byte - interBuffer []byte -} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go deleted file mode 100644 index bcbe00d0c..000000000 --- a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go +++ /dev/null @@ -1,49 +0,0 @@ -package ansiterm - -type csiEntryState struct { - baseState -} - -func (csiState csiEntryState) Handle(b byte) (s state, e error) { - csiState.parser.logf("CsiEntry::Handle %#x", b) - - nextState, err := csiState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(alphabetics, b): - return csiState.parser.ground, nil - case sliceContains(csiCollectables, b): - return csiState.parser.csiParam, nil - case sliceContains(executors, b): - return csiState, csiState.parser.execute() - } - - return csiState, nil -} - -func (csiState csiEntryState) Transition(s state) error { - csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) - csiState.baseState.Transition(s) - - switch s { - case csiState.parser.ground: - return csiState.parser.csiDispatch() - case csiState.parser.csiParam: - switch { - case sliceContains(csiParams, csiState.parser.context.currentChar): - csiState.parser.collectParam() - case sliceContains(intermeds, csiState.parser.context.currentChar): - csiState.parser.collectInter() - } - } - - return nil -} - -func (csiState csiEntryState) Enter() error { - csiState.parser.clear() - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go deleted file mode 100644 index 7ed5e01c3..000000000 --- a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go +++ /dev/null @@ -1,38 +0,0 @@ -package ansiterm - -type csiParamState struct { - baseState -} - -func (csiState csiParamState) Handle(b byte) (s state, e error) { - csiState.parser.logf("CsiParam::Handle %#x", b) - - nextState, err := csiState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(alphabetics, b): - return csiState.parser.ground, nil - case sliceContains(csiCollectables, b): - csiState.parser.collectParam() - return csiState, nil - case sliceContains(executors, b): - return csiState, csiState.parser.execute() - } - - return csiState, nil -} - -func (csiState csiParamState) Transition(s state) error { - csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) - csiState.baseState.Transition(s) - - switch s { - case csiState.parser.ground: - return csiState.parser.csiDispatch() - } - - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go deleted file mode 100644 index 1c719db9e..000000000 --- a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go +++ /dev/null @@ -1,36 +0,0 @@ -package ansiterm - -type escapeIntermediateState struct { - baseState -} - -func (escState escapeIntermediateState) Handle(b byte) (s state, e error) { - escState.parser.logf("escapeIntermediateState::Handle %#x", b) - nextState, err := escState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(intermeds, b): - return escState, escState.parser.collectInter() - case sliceContains(executors, b): - return escState, escState.parser.execute() - case sliceContains(escapeIntermediateToGroundBytes, b): - return escState.parser.ground, nil - } - - return escState, nil -} - -func (escState escapeIntermediateState) Transition(s state) error { - escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) - escState.baseState.Transition(s) - - switch s { - case escState.parser.ground: - return escState.parser.escDispatch() - } - - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_state.go b/vendor/github.com/Azure/go-ansiterm/escape_state.go deleted file mode 100644 index 6390abd23..000000000 --- a/vendor/github.com/Azure/go-ansiterm/escape_state.go +++ /dev/null @@ -1,47 +0,0 @@ -package ansiterm - -type escapeState struct { - baseState -} - -func (escState escapeState) Handle(b byte) (s state, e error) { - escState.parser.logf("escapeState::Handle %#x", b) - nextState, err := escState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case b == ANSI_ESCAPE_SECONDARY: - return escState.parser.csiEntry, nil - case b == ANSI_OSC_STRING_ENTRY: - return escState.parser.oscString, nil - case sliceContains(executors, b): - return escState, escState.parser.execute() - case sliceContains(escapeToGroundBytes, b): - return escState.parser.ground, nil - case sliceContains(intermeds, b): - return escState.parser.escapeIntermediate, nil - } - - return escState, nil -} - -func (escState escapeState) Transition(s state) error { - escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name()) - escState.baseState.Transition(s) - - switch s { - case escState.parser.ground: - return escState.parser.escDispatch() - case escState.parser.escapeIntermediate: - return escState.parser.collectInter() - } - - return nil -} - -func (escState escapeState) Enter() error { - escState.parser.clear() - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/event_handler.go b/vendor/github.com/Azure/go-ansiterm/event_handler.go deleted file mode 100644 index 98087b38c..000000000 --- a/vendor/github.com/Azure/go-ansiterm/event_handler.go +++ /dev/null @@ -1,90 +0,0 @@ -package ansiterm - -type AnsiEventHandler interface { - // Print - Print(b byte) error - - // Execute C0 commands - Execute(b byte) error - - // CUrsor Up - CUU(int) error - - // CUrsor Down - CUD(int) error - - // CUrsor Forward - CUF(int) error - - // CUrsor Backward - CUB(int) error - - // Cursor to Next Line - CNL(int) error - - // Cursor to Previous Line - CPL(int) error - - // Cursor Horizontal position Absolute - CHA(int) error - - // Vertical line Position Absolute - VPA(int) error - - // CUrsor Position - CUP(int, int) error - - // Horizontal and Vertical Position (depends on PUM) - HVP(int, int) error - - // Text Cursor Enable Mode - DECTCEM(bool) error - - // Origin Mode - DECOM(bool) error - - // 132 Column Mode - DECCOLM(bool) error - - // Erase in Display - ED(int) error - - // Erase in Line - EL(int) error - - // Insert Line - IL(int) error - - // Delete Line - DL(int) error - - // Insert Character - ICH(int) error - - // Delete Character - DCH(int) error - - // Set Graphics Rendition - SGR([]int) error - - // Pan Down - SU(int) error - - // Pan Up - SD(int) error - - // Device Attributes - DA([]string) error - - // Set Top and Bottom Margins - DECSTBM(int, int) error - - // Index - IND() error - - // Reverse Index - RI() error - - // Flush updates from previous commands - Flush() error -} diff --git a/vendor/github.com/Azure/go-ansiterm/ground_state.go b/vendor/github.com/Azure/go-ansiterm/ground_state.go deleted file mode 100644 index 52451e946..000000000 --- a/vendor/github.com/Azure/go-ansiterm/ground_state.go +++ /dev/null @@ -1,24 +0,0 @@ -package ansiterm - -type groundState struct { - baseState -} - -func (gs groundState) Handle(b byte) (s state, e error) { - gs.parser.context.currentChar = b - - nextState, err := gs.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(printables, b): - return gs, gs.parser.print() - - case sliceContains(executors, b): - return gs, gs.parser.execute() - } - - return gs, nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go deleted file mode 100644 index 593b10ab6..000000000 --- a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go +++ /dev/null @@ -1,31 +0,0 @@ -package ansiterm - -type oscStringState struct { - baseState -} - -func (oscState oscStringState) Handle(b byte) (s state, e error) { - oscState.parser.logf("OscString::Handle %#x", b) - nextState, err := oscState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case isOscStringTerminator(b): - return oscState.parser.ground, nil - } - - return oscState, nil -} - -// See below for OSC string terminators for linux -// http://man7.org/linux/man-pages/man4/console_codes.4.html -func isOscStringTerminator(b byte) bool { - - if b == ANSI_BEL || b == 0x5C { - return true - } - - return false -} diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go deleted file mode 100644 index 03cec7ada..000000000 --- a/vendor/github.com/Azure/go-ansiterm/parser.go +++ /dev/null @@ -1,151 +0,0 @@ -package ansiterm - -import ( - "errors" - "log" - "os" -) - -type AnsiParser struct { - currState state - eventHandler AnsiEventHandler - context *ansiContext - csiEntry state - csiParam state - dcsEntry state - escape state - escapeIntermediate state - error state - ground state - oscString state - stateMap []state - - logf func(string, ...interface{}) -} - -type Option func(*AnsiParser) - -func WithLogf(f func(string, ...interface{})) Option { - return func(ap *AnsiParser) { - ap.logf = f - } -} - -func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser { - ap := &AnsiParser{ - eventHandler: evtHandler, - context: &ansiContext{}, - } - for _, o := range opts { - o(ap) - } - - if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { - logFile, _ := os.Create("ansiParser.log") - logger := log.New(logFile, "", log.LstdFlags) - if ap.logf != nil { - l := ap.logf - ap.logf = func(s string, v ...interface{}) { - l(s, v...) - logger.Printf(s, v...) - } - } else { - ap.logf = logger.Printf - } - } - - if ap.logf == nil { - ap.logf = func(string, ...interface{}) {} - } - - ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}} - ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}} - ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}} - ap.escape = escapeState{baseState{name: "Escape", parser: ap}} - ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}} - ap.error = errorState{baseState{name: "Error", parser: ap}} - ap.ground = groundState{baseState{name: "Ground", parser: ap}} - ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}} - - ap.stateMap = []state{ - ap.csiEntry, - ap.csiParam, - ap.dcsEntry, - ap.escape, - ap.escapeIntermediate, - ap.error, - ap.ground, - ap.oscString, - } - - ap.currState = getState(initialState, ap.stateMap) - - ap.logf("CreateParser: parser %p", ap) - return ap -} - -func getState(name string, states []state) state { - for _, el := range states { - if el.Name() == name { - return el - } - } - - return nil -} - -func (ap *AnsiParser) Parse(bytes []byte) (int, error) { - for i, b := range bytes { - if err := ap.handle(b); err != nil { - return i, err - } - } - - return len(bytes), ap.eventHandler.Flush() -} - -func (ap *AnsiParser) handle(b byte) error { - ap.context.currentChar = b - newState, err := ap.currState.Handle(b) - if err != nil { - return err - } - - if newState == nil { - ap.logf("WARNING: newState is nil") - return errors.New("New state of 'nil' is invalid.") - } - - if newState != ap.currState { - if err := ap.changeState(newState); err != nil { - return err - } - } - - return nil -} - -func (ap *AnsiParser) changeState(newState state) error { - ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) - - // Exit old state - if err := ap.currState.Exit(); err != nil { - ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err) - return err - } - - // Perform transition action - if err := ap.currState.Transition(newState); err != nil { - ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err) - return err - } - - // Enter new state - if err := newState.Enter(); err != nil { - ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err) - return err - } - - ap.currState = newState - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go deleted file mode 100644 index de0a1f9cd..000000000 --- a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go +++ /dev/null @@ -1,99 +0,0 @@ -package ansiterm - -import ( - "strconv" -) - -func parseParams(bytes []byte) ([]string, error) { - paramBuff := make([]byte, 0, 0) - params := []string{} - - for _, v := range bytes { - if v == ';' { - if len(paramBuff) > 0 { - // Completed parameter, append it to the list - s := string(paramBuff) - params = append(params, s) - paramBuff = make([]byte, 0, 0) - } - } else { - paramBuff = append(paramBuff, v) - } - } - - // Last parameter may not be terminated with ';' - if len(paramBuff) > 0 { - s := string(paramBuff) - params = append(params, s) - } - - return params, nil -} - -func parseCmd(context ansiContext) (string, error) { - return string(context.currentChar), nil -} - -func getInt(params []string, dflt int) int { - i := getInts(params, 1, dflt)[0] - return i -} - -func getInts(params []string, minCount int, dflt int) []int { - ints := []int{} - - for _, v := range params { - i, _ := strconv.Atoi(v) - // Zero is mapped to the default value in VT100. - if i == 0 { - i = dflt - } - ints = append(ints, i) - } - - if len(ints) < minCount { - remaining := minCount - len(ints) - for i := 0; i < remaining; i++ { - ints = append(ints, dflt) - } - } - - return ints -} - -func (ap *AnsiParser) modeDispatch(param string, set bool) error { - switch param { - case "?3": - return ap.eventHandler.DECCOLM(set) - case "?6": - return ap.eventHandler.DECOM(set) - case "?25": - return ap.eventHandler.DECTCEM(set) - } - return nil -} - -func (ap *AnsiParser) hDispatch(params []string) error { - if len(params) == 1 { - return ap.modeDispatch(params[0], true) - } - - return nil -} - -func (ap *AnsiParser) lDispatch(params []string) error { - if len(params) == 1 { - return ap.modeDispatch(params[0], false) - } - - return nil -} - -func getEraseParam(params []string) int { - param := getInt(params, 0) - if param < 0 || 3 < param { - param = 0 - } - - return param -} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/github.com/Azure/go-ansiterm/parser_actions.go deleted file mode 100644 index 0bb5e51e9..000000000 --- a/vendor/github.com/Azure/go-ansiterm/parser_actions.go +++ /dev/null @@ -1,119 +0,0 @@ -package ansiterm - -func (ap *AnsiParser) collectParam() error { - currChar := ap.context.currentChar - ap.logf("collectParam %#x", currChar) - ap.context.paramBuffer = append(ap.context.paramBuffer, currChar) - return nil -} - -func (ap *AnsiParser) collectInter() error { - currChar := ap.context.currentChar - ap.logf("collectInter %#x", currChar) - ap.context.paramBuffer = append(ap.context.interBuffer, currChar) - return nil -} - -func (ap *AnsiParser) escDispatch() error { - cmd, _ := parseCmd(*ap.context) - intermeds := ap.context.interBuffer - ap.logf("escDispatch currentChar: %#x", ap.context.currentChar) - ap.logf("escDispatch: %v(%v)", cmd, intermeds) - - switch cmd { - case "D": // IND - return ap.eventHandler.IND() - case "E": // NEL, equivalent to CRLF - err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN) - if err == nil { - err = ap.eventHandler.Execute(ANSI_LINE_FEED) - } - return err - case "M": // RI - return ap.eventHandler.RI() - } - - return nil -} - -func (ap *AnsiParser) csiDispatch() error { - cmd, _ := parseCmd(*ap.context) - params, _ := parseParams(ap.context.paramBuffer) - ap.logf("Parsed params: %v with length: %d", params, len(params)) - - ap.logf("csiDispatch: %v(%v)", cmd, params) - - switch cmd { - case "@": - return ap.eventHandler.ICH(getInt(params, 1)) - case "A": - return ap.eventHandler.CUU(getInt(params, 1)) - case "B": - return ap.eventHandler.CUD(getInt(params, 1)) - case "C": - return ap.eventHandler.CUF(getInt(params, 1)) - case "D": - return ap.eventHandler.CUB(getInt(params, 1)) - case "E": - return ap.eventHandler.CNL(getInt(params, 1)) - case "F": - return ap.eventHandler.CPL(getInt(params, 1)) - case "G": - return ap.eventHandler.CHA(getInt(params, 1)) - case "H": - ints := getInts(params, 2, 1) - x, y := ints[0], ints[1] - return ap.eventHandler.CUP(x, y) - case "J": - param := getEraseParam(params) - return ap.eventHandler.ED(param) - case "K": - param := getEraseParam(params) - return ap.eventHandler.EL(param) - case "L": - return ap.eventHandler.IL(getInt(params, 1)) - case "M": - return ap.eventHandler.DL(getInt(params, 1)) - case "P": - return ap.eventHandler.DCH(getInt(params, 1)) - case "S": - return ap.eventHandler.SU(getInt(params, 1)) - case "T": - return ap.eventHandler.SD(getInt(params, 1)) - case "c": - return ap.eventHandler.DA(params) - case "d": - return ap.eventHandler.VPA(getInt(params, 1)) - case "f": - ints := getInts(params, 2, 1) - x, y := ints[0], ints[1] - return ap.eventHandler.HVP(x, y) - case "h": - return ap.hDispatch(params) - case "l": - return ap.lDispatch(params) - case "m": - return ap.eventHandler.SGR(getInts(params, 1, 0)) - case "r": - ints := getInts(params, 2, 1) - top, bottom := ints[0], ints[1] - return ap.eventHandler.DECSTBM(top, bottom) - default: - ap.logf("ERROR: Unsupported CSI command: '%s', with full context: %v", cmd, ap.context) - return nil - } - -} - -func (ap *AnsiParser) print() error { - return ap.eventHandler.Print(ap.context.currentChar) -} - -func (ap *AnsiParser) clear() error { - ap.context = &ansiContext{} - return nil -} - -func (ap *AnsiParser) execute() error { - return ap.eventHandler.Execute(ap.context.currentChar) -} diff --git a/vendor/github.com/Azure/go-ansiterm/states.go b/vendor/github.com/Azure/go-ansiterm/states.go deleted file mode 100644 index f2ea1fcd1..000000000 --- a/vendor/github.com/Azure/go-ansiterm/states.go +++ /dev/null @@ -1,71 +0,0 @@ -package ansiterm - -type stateID int - -type state interface { - Enter() error - Exit() error - Handle(byte) (state, error) - Name() string - Transition(state) error -} - -type baseState struct { - name string - parser *AnsiParser -} - -func (base baseState) Enter() error { - return nil -} - -func (base baseState) Exit() error { - return nil -} - -func (base baseState) Handle(b byte) (s state, e error) { - - switch { - case b == CSI_ENTRY: - return base.parser.csiEntry, nil - case b == DCS_ENTRY: - return base.parser.dcsEntry, nil - case b == ANSI_ESCAPE_PRIMARY: - return base.parser.escape, nil - case b == OSC_STRING: - return base.parser.oscString, nil - case sliceContains(toGroundBytes, b): - return base.parser.ground, nil - } - - return nil, nil -} - -func (base baseState) Name() string { - return base.name -} - -func (base baseState) Transition(s state) error { - if s == base.parser.ground { - execBytes := []byte{0x18} - execBytes = append(execBytes, 0x1A) - execBytes = append(execBytes, getByteRange(0x80, 0x8F)...) - execBytes = append(execBytes, getByteRange(0x91, 0x97)...) - execBytes = append(execBytes, 0x99) - execBytes = append(execBytes, 0x9A) - - if sliceContains(execBytes, base.parser.context.currentChar) { - return base.parser.execute() - } - } - - return nil -} - -type dcsEntryState struct { - baseState -} - -type errorState struct { - baseState -} diff --git a/vendor/github.com/Azure/go-ansiterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/utilities.go deleted file mode 100644 index 392114493..000000000 --- a/vendor/github.com/Azure/go-ansiterm/utilities.go +++ /dev/null @@ -1,21 +0,0 @@ -package ansiterm - -import ( - "strconv" -) - -func sliceContains(bytes []byte, b byte) bool { - for _, v := range bytes { - if v == b { - return true - } - } - - return false -} - -func convertBytesToInteger(bytes []byte) int { - s := string(bytes) - i, _ := strconv.Atoi(s) - return i -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go deleted file mode 100644 index 5599082ae..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go +++ /dev/null @@ -1,196 +0,0 @@ -// +build windows - -package winterm - -import ( - "fmt" - "os" - "strconv" - "strings" - "syscall" - - "github.com/Azure/go-ansiterm" - windows "golang.org/x/sys/windows" -) - -// Windows keyboard constants -// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx. -const ( - VK_PRIOR = 0x21 // PAGE UP key - VK_NEXT = 0x22 // PAGE DOWN key - VK_END = 0x23 // END key - VK_HOME = 0x24 // HOME key - VK_LEFT = 0x25 // LEFT ARROW key - VK_UP = 0x26 // UP ARROW key - VK_RIGHT = 0x27 // RIGHT ARROW key - VK_DOWN = 0x28 // DOWN ARROW key - VK_SELECT = 0x29 // SELECT key - VK_PRINT = 0x2A // PRINT key - VK_EXECUTE = 0x2B // EXECUTE key - VK_SNAPSHOT = 0x2C // PRINT SCREEN key - VK_INSERT = 0x2D // INS key - VK_DELETE = 0x2E // DEL key - VK_HELP = 0x2F // HELP key - VK_F1 = 0x70 // F1 key - VK_F2 = 0x71 // F2 key - VK_F3 = 0x72 // F3 key - VK_F4 = 0x73 // F4 key - VK_F5 = 0x74 // F5 key - VK_F6 = 0x75 // F6 key - VK_F7 = 0x76 // F7 key - VK_F8 = 0x77 // F8 key - VK_F9 = 0x78 // F9 key - VK_F10 = 0x79 // F10 key - VK_F11 = 0x7A // F11 key - VK_F12 = 0x7B // F12 key - - RIGHT_ALT_PRESSED = 0x0001 - LEFT_ALT_PRESSED = 0x0002 - RIGHT_CTRL_PRESSED = 0x0004 - LEFT_CTRL_PRESSED = 0x0008 - SHIFT_PRESSED = 0x0010 - NUMLOCK_ON = 0x0020 - SCROLLLOCK_ON = 0x0040 - CAPSLOCK_ON = 0x0080 - ENHANCED_KEY = 0x0100 -) - -type ansiCommand struct { - CommandBytes []byte - Command string - Parameters []string - IsSpecial bool -} - -func newAnsiCommand(command []byte) *ansiCommand { - - if isCharacterSelectionCmdChar(command[1]) { - // Is Character Set Selection commands - return &ansiCommand{ - CommandBytes: command, - Command: string(command), - IsSpecial: true, - } - } - - // last char is command character - lastCharIndex := len(command) - 1 - - ac := &ansiCommand{ - CommandBytes: command, - Command: string(command[lastCharIndex]), - IsSpecial: false, - } - - // more than a single escape - if lastCharIndex != 0 { - start := 1 - // skip if double char escape sequence - if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY { - start++ - } - // convert this to GetNextParam method - ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP) - } - - return ac -} - -func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 { - if index < 0 || index >= len(ac.Parameters) { - return defaultValue - } - - param, err := strconv.ParseInt(ac.Parameters[index], 10, 16) - if err != nil { - return defaultValue - } - - return int16(param) -} - -func (ac *ansiCommand) String() string { - return fmt.Sprintf("0x%v \"%v\" (\"%v\")", - bytesToHex(ac.CommandBytes), - ac.Command, - strings.Join(ac.Parameters, "\",\"")) -} - -// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands. -// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html. -func isAnsiCommandChar(b byte) bool { - switch { - case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY: - return true - case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM: - // non-CSI escape sequence terminator - return true - case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL: - // String escape sequence terminator - return true - } - return false -} - -func isXtermOscSequence(command []byte, current byte) bool { - return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL) -} - -func isCharacterSelectionCmdChar(b byte) bool { - return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3) -} - -// bytesToHex converts a slice of bytes to a human-readable string. -func bytesToHex(b []byte) string { - hex := make([]string, len(b)) - for i, ch := range b { - hex[i] = fmt.Sprintf("%X", ch) - } - return strings.Join(hex, "") -} - -// ensureInRange adjusts the passed value, if necessary, to ensure it is within -// the passed min / max range. -func ensureInRange(n int16, min int16, max int16) int16 { - if n < min { - return min - } else if n > max { - return max - } else { - return n - } -} - -func GetStdFile(nFile int) (*os.File, uintptr) { - var file *os.File - - // syscall uses negative numbers - // windows package uses very big uint32 - // Keep these switches split so we don't have to convert ints too much. - switch uint32(nFile) { - case windows.STD_INPUT_HANDLE: - file = os.Stdin - case windows.STD_OUTPUT_HANDLE: - file = os.Stdout - case windows.STD_ERROR_HANDLE: - file = os.Stderr - default: - switch nFile { - case syscall.STD_INPUT_HANDLE: - file = os.Stdin - case syscall.STD_OUTPUT_HANDLE: - file = os.Stdout - case syscall.STD_ERROR_HANDLE: - file = os.Stderr - default: - panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile)) - } - } - - fd, err := syscall.GetStdHandle(nFile) - if err != nil { - panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err)) - } - - return file, uintptr(fd) -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/Azure/go-ansiterm/winterm/api.go deleted file mode 100644 index 6055e33b9..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/api.go +++ /dev/null @@ -1,327 +0,0 @@ -// +build windows - -package winterm - -import ( - "fmt" - "syscall" - "unsafe" -) - -//=========================================================================================================== -// IMPORTANT NOTE: -// -// The methods below make extensive use of the "unsafe" package to obtain the required pointers. -// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack -// variables) the pointers reference *before* the API completes. -// -// As a result, in those cases, the code must hint that the variables remain in active by invoking the -// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer -// require unsafe pointers. -// -// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform -// the garbage collector the variables remain in use if: -// -// -- The value is not a pointer (e.g., int32, struct) -// -- The value is not referenced by the method after passing the pointer to Windows -// -// See http://golang.org/doc/go1.3. -//=========================================================================================================== - -var ( - kernel32DLL = syscall.NewLazyDLL("kernel32.dll") - - getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo") - setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo") - setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition") - setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") - getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") - setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize") - scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA") - setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") - setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo") - writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW") - readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW") - waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject") -) - -// Windows Console constants -const ( - // Console modes - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. - ENABLE_PROCESSED_INPUT = 0x0001 - ENABLE_LINE_INPUT = 0x0002 - ENABLE_ECHO_INPUT = 0x0004 - ENABLE_WINDOW_INPUT = 0x0008 - ENABLE_MOUSE_INPUT = 0x0010 - ENABLE_INSERT_MODE = 0x0020 - ENABLE_QUICK_EDIT_MODE = 0x0040 - ENABLE_EXTENDED_FLAGS = 0x0080 - ENABLE_AUTO_POSITION = 0x0100 - ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200 - - ENABLE_PROCESSED_OUTPUT = 0x0001 - ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 - ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 - DISABLE_NEWLINE_AUTO_RETURN = 0x0008 - ENABLE_LVB_GRID_WORLDWIDE = 0x0010 - - // Character attributes - // Note: - // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan). - // Clearing all foreground or background colors results in black; setting all creates white. - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes. - FOREGROUND_BLUE uint16 = 0x0001 - FOREGROUND_GREEN uint16 = 0x0002 - FOREGROUND_RED uint16 = 0x0004 - FOREGROUND_INTENSITY uint16 = 0x0008 - FOREGROUND_MASK uint16 = 0x000F - - BACKGROUND_BLUE uint16 = 0x0010 - BACKGROUND_GREEN uint16 = 0x0020 - BACKGROUND_RED uint16 = 0x0040 - BACKGROUND_INTENSITY uint16 = 0x0080 - BACKGROUND_MASK uint16 = 0x00F0 - - COMMON_LVB_MASK uint16 = 0xFF00 - COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000 - COMMON_LVB_UNDERSCORE uint16 = 0x8000 - - // Input event types - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. - KEY_EVENT = 0x0001 - MOUSE_EVENT = 0x0002 - WINDOW_BUFFER_SIZE_EVENT = 0x0004 - MENU_EVENT = 0x0008 - FOCUS_EVENT = 0x0010 - - // WaitForSingleObject return codes - WAIT_ABANDONED = 0x00000080 - WAIT_FAILED = 0xFFFFFFFF - WAIT_SIGNALED = 0x0000000 - WAIT_TIMEOUT = 0x00000102 - - // WaitForSingleObject wait duration - WAIT_INFINITE = 0xFFFFFFFF - WAIT_ONE_SECOND = 1000 - WAIT_HALF_SECOND = 500 - WAIT_QUARTER_SECOND = 250 -) - -// Windows API Console types -// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD) -// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment -type ( - CHAR_INFO struct { - UnicodeChar uint16 - Attributes uint16 - } - - CONSOLE_CURSOR_INFO struct { - Size uint32 - Visible int32 - } - - CONSOLE_SCREEN_BUFFER_INFO struct { - Size COORD - CursorPosition COORD - Attributes uint16 - Window SMALL_RECT - MaximumWindowSize COORD - } - - COORD struct { - X int16 - Y int16 - } - - SMALL_RECT struct { - Left int16 - Top int16 - Right int16 - Bottom int16 - } - - // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. - INPUT_RECORD struct { - EventType uint16 - KeyEvent KEY_EVENT_RECORD - } - - KEY_EVENT_RECORD struct { - KeyDown int32 - RepeatCount uint16 - VirtualKeyCode uint16 - VirtualScanCode uint16 - UnicodeChar uint16 - ControlKeyState uint32 - } - - WINDOW_BUFFER_SIZE struct { - Size COORD - } -) - -// boolToBOOL converts a Go bool into a Windows int32. -func boolToBOOL(f bool) int32 { - if f { - return int32(1) - } else { - return int32(0) - } -} - -// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx. -func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { - r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) - return checkError(r1, r2, err) -} - -// SetConsoleCursorInfo sets the size and visiblity of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx. -func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { - r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) - return checkError(r1, r2, err) -} - -// SetConsoleCursorPosition location of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx. -func SetConsoleCursorPosition(handle uintptr, coord COORD) error { - r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord)) - use(coord) - return checkError(r1, r2, err) -} - -// GetConsoleMode gets the console mode for given file descriptor -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx. -func GetConsoleMode(handle uintptr) (mode uint32, err error) { - err = syscall.GetConsoleMode(syscall.Handle(handle), &mode) - return mode, err -} - -// SetConsoleMode sets the console mode for given file descriptor -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. -func SetConsoleMode(handle uintptr, mode uint32) error { - r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0) - use(mode) - return checkError(r1, r2, err) -} - -// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx. -func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { - info := CONSOLE_SCREEN_BUFFER_INFO{} - err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)) - if err != nil { - return nil, err - } - return &info, nil -} - -func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error { - r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char))) - use(scrollRect) - use(clipRect) - use(destOrigin) - use(char) - return checkError(r1, r2, err) -} - -// SetConsoleScreenBufferSize sets the size of the console screen buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx. -func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error { - r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord)) - use(coord) - return checkError(r1, r2, err) -} - -// SetConsoleTextAttribute sets the attributes of characters written to the -// console screen buffer by the WriteFile or WriteConsole function. -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx. -func SetConsoleTextAttribute(handle uintptr, attribute uint16) error { - r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0) - use(attribute) - return checkError(r1, r2, err) -} - -// SetConsoleWindowInfo sets the size and position of the console screen buffer's window. -// Note that the size and location must be within and no larger than the backing console screen buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx. -func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error { - r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect))) - use(isAbsolute) - use(rect) - return checkError(r1, r2, err) -} - -// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx. -func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error { - r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion))) - use(buffer) - use(bufferSize) - use(bufferCoord) - return checkError(r1, r2, err) -} - -// ReadConsoleInput reads (and removes) data from the console input buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx. -func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error { - r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count))) - use(buffer) - return checkError(r1, r2, err) -} - -// WaitForSingleObject waits for the passed handle to be signaled. -// It returns true if the handle was signaled; false otherwise. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx. -func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) { - r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait))) - switch r1 { - case WAIT_ABANDONED, WAIT_TIMEOUT: - return false, nil - case WAIT_SIGNALED: - return true, nil - } - use(msWait) - return false, err -} - -// String helpers -func (info CONSOLE_SCREEN_BUFFER_INFO) String() string { - return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize) -} - -func (coord COORD) String() string { - return fmt.Sprintf("%v,%v", coord.X, coord.Y) -} - -func (rect SMALL_RECT) String() string { - return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom) -} - -// checkError evaluates the results of a Windows API call and returns the error if it failed. -func checkError(r1, r2 uintptr, err error) error { - // Windows APIs return non-zero to indicate success - if r1 != 0 { - return nil - } - - // Return the error if provided, otherwise default to EINVAL - if err != nil { - return err - } - return syscall.EINVAL -} - -// coordToPointer converts a COORD into a uintptr (by fooling the type system). -func coordToPointer(c COORD) uintptr { - // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass. - return uintptr(*((*uint32)(unsafe.Pointer(&c)))) -} - -// use is a no-op, but the compiler cannot see that it is. -// Calling use(p) ensures that p is kept live until that point. -func use(p interface{}) {} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go deleted file mode 100644 index cbec8f728..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go +++ /dev/null @@ -1,100 +0,0 @@ -// +build windows - -package winterm - -import "github.com/Azure/go-ansiterm" - -const ( - FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE - BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE -) - -// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the -// request represented by the passed ANSI mode. -func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) { - switch ansiMode { - - // Mode styles - case ansiterm.ANSI_SGR_BOLD: - windowsMode = windowsMode | FOREGROUND_INTENSITY - - case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF: - windowsMode &^= FOREGROUND_INTENSITY - - case ansiterm.ANSI_SGR_UNDERLINE: - windowsMode = windowsMode | COMMON_LVB_UNDERSCORE - - case ansiterm.ANSI_SGR_REVERSE: - inverted = true - - case ansiterm.ANSI_SGR_REVERSE_OFF: - inverted = false - - case ansiterm.ANSI_SGR_UNDERLINE_OFF: - windowsMode &^= COMMON_LVB_UNDERSCORE - - // Foreground colors - case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT: - windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK) - - case ansiterm.ANSI_SGR_FOREGROUND_BLACK: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) - - case ansiterm.ANSI_SGR_FOREGROUND_RED: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED - - case ansiterm.ANSI_SGR_FOREGROUND_GREEN: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN - - case ansiterm.ANSI_SGR_FOREGROUND_YELLOW: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN - - case ansiterm.ANSI_SGR_FOREGROUND_BLUE: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_CYAN: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_WHITE: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE - - // Background colors - case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT: - // Black with no intensity - windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK) - - case ansiterm.ANSI_SGR_BACKGROUND_BLACK: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) - - case ansiterm.ANSI_SGR_BACKGROUND_RED: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED - - case ansiterm.ANSI_SGR_BACKGROUND_GREEN: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN - - case ansiterm.ANSI_SGR_BACKGROUND_YELLOW: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN - - case ansiterm.ANSI_SGR_BACKGROUND_BLUE: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_CYAN: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_WHITE: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE - } - - return windowsMode, inverted -} - -// invertAttributes inverts the foreground and background colors of a Windows attributes value -func invertAttributes(windowsMode uint16) uint16 { - return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4) -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go deleted file mode 100644 index 3ee06ea72..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go +++ /dev/null @@ -1,101 +0,0 @@ -// +build windows - -package winterm - -const ( - horizontal = iota - vertical -) - -func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT { - if h.originMode { - sr := h.effectiveSr(info.Window) - return SMALL_RECT{ - Top: sr.top, - Bottom: sr.bottom, - Left: 0, - Right: info.Size.X - 1, - } - } else { - return SMALL_RECT{ - Top: info.Window.Top, - Bottom: info.Window.Bottom, - Left: 0, - Right: info.Size.X - 1, - } - } -} - -// setCursorPosition sets the cursor to the specified position, bounded to the screen size -func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error { - position.X = ensureInRange(position.X, window.Left, window.Right) - position.Y = ensureInRange(position.Y, window.Top, window.Bottom) - err := SetConsoleCursorPosition(h.fd, position) - if err != nil { - return err - } - h.logf("Cursor position set: (%d, %d)", position.X, position.Y) - return err -} - -func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error { - return h.moveCursor(vertical, param) -} - -func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error { - return h.moveCursor(horizontal, param) -} - -func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - switch moveMode { - case horizontal: - position.X += int16(param) - case vertical: - position.Y += int16(param) - } - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) moveCursorLine(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - position.X = 0 - position.Y += int16(param) - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - position.X = int16(param) - 1 - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go deleted file mode 100644 index 244b5fa25..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go +++ /dev/null @@ -1,84 +0,0 @@ -// +build windows - -package winterm - -import "github.com/Azure/go-ansiterm" - -func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error { - // Ignore an invalid (negative area) request - if toCoord.Y < fromCoord.Y { - return nil - } - - var err error - - var coordStart = COORD{} - var coordEnd = COORD{} - - xCurrent, yCurrent := fromCoord.X, fromCoord.Y - xEnd, yEnd := toCoord.X, toCoord.Y - - // Clear any partial initial line - if xCurrent > 0 { - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yCurrent - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - xCurrent = 0 - yCurrent += 1 - } - - // Clear intervening rectangular section - if yCurrent < yEnd { - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yEnd-1 - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - xCurrent = 0 - yCurrent = yEnd - } - - // Clear remaining partial ending line - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yEnd - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error { - region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X} - width := toCoord.X - fromCoord.X + 1 - height := toCoord.Y - fromCoord.Y + 1 - size := uint32(width) * uint32(height) - - if size <= 0 { - return nil - } - - buffer := make([]CHAR_INFO, size) - - char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes} - for i := 0; i < int(size); i++ { - buffer[i] = char - } - - err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion) - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go deleted file mode 100644 index 2d27fa1d0..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go +++ /dev/null @@ -1,118 +0,0 @@ -// +build windows - -package winterm - -// effectiveSr gets the current effective scroll region in buffer coordinates -func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion { - top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom) - bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom) - if top >= bottom { - top = window.Top - bottom = window.Bottom - } - return scrollRegion{top: top, bottom: bottom} -} - -func (h *windowsAnsiEventHandler) scrollUp(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - sr := h.effectiveSr(info.Window) - return h.scroll(param, sr, info) -} - -func (h *windowsAnsiEventHandler) scrollDown(param int) error { - return h.scrollUp(-param) -} - -func (h *windowsAnsiEventHandler) deleteLines(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - start := info.CursorPosition.Y - sr := h.effectiveSr(info.Window) - // Lines cannot be inserted or deleted outside the scrolling region. - if start >= sr.top && start <= sr.bottom { - sr.top = start - return h.scroll(param, sr, info) - } else { - return nil - } -} - -func (h *windowsAnsiEventHandler) insertLines(param int) error { - return h.deleteLines(-param) -} - -// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates. -func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error { - h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) - h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) - - // Copy from and clip to the scroll region (full buffer width) - scrollRect := SMALL_RECT{ - Top: sr.top, - Bottom: sr.bottom, - Left: 0, - Right: info.Size.X - 1, - } - - // Origin to which area should be copied - destOrigin := COORD{ - X: 0, - Y: sr.top - int16(param), - } - - char := CHAR_INFO{ - UnicodeChar: ' ', - Attributes: h.attributes, - } - - if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { - return err - } - return nil -} - -func (h *windowsAnsiEventHandler) deleteCharacters(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - return h.scrollLine(param, info.CursorPosition, info) -} - -func (h *windowsAnsiEventHandler) insertCharacters(param int) error { - return h.deleteCharacters(-param) -} - -// scrollLine scrolls a line horizontally starting at the provided position by a number of columns. -func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error { - // Copy from and clip to the scroll region (full buffer width) - scrollRect := SMALL_RECT{ - Top: position.Y, - Bottom: position.Y, - Left: position.X, - Right: info.Size.X - 1, - } - - // Origin to which area should be copied - destOrigin := COORD{ - X: position.X - int16(columns), - Y: position.Y, - } - - char := CHAR_INFO{ - UnicodeChar: ' ', - Attributes: h.attributes, - } - - if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go deleted file mode 100644 index afa7635d7..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build windows - -package winterm - -// AddInRange increments a value by the passed quantity while ensuring the values -// always remain within the supplied min / max range. -func addInRange(n int16, increment int16, min int16, max int16) int16 { - return ensureInRange(n+increment, min, max) -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go deleted file mode 100644 index 2d40fb75a..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go +++ /dev/null @@ -1,743 +0,0 @@ -// +build windows - -package winterm - -import ( - "bytes" - "log" - "os" - "strconv" - - "github.com/Azure/go-ansiterm" -) - -type windowsAnsiEventHandler struct { - fd uintptr - file *os.File - infoReset *CONSOLE_SCREEN_BUFFER_INFO - sr scrollRegion - buffer bytes.Buffer - attributes uint16 - inverted bool - wrapNext bool - drewMarginByte bool - originMode bool - marginByte byte - curInfo *CONSOLE_SCREEN_BUFFER_INFO - curPos COORD - logf func(string, ...interface{}) -} - -type Option func(*windowsAnsiEventHandler) - -func WithLogf(f func(string, ...interface{})) Option { - return func(w *windowsAnsiEventHandler) { - w.logf = f - } -} - -func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler { - infoReset, err := GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil - } - - h := &windowsAnsiEventHandler{ - fd: fd, - file: file, - infoReset: infoReset, - attributes: infoReset.Attributes, - } - for _, o := range opts { - o(h) - } - - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ := os.Create("winEventHandler.log") - logger := log.New(logFile, "", log.LstdFlags) - if h.logf != nil { - l := h.logf - h.logf = func(s string, v ...interface{}) { - l(s, v...) - logger.Printf(s, v...) - } - } else { - h.logf = logger.Printf - } - } - - if h.logf == nil { - h.logf = func(string, ...interface{}) {} - } - - return h -} - -type scrollRegion struct { - top int16 - bottom int16 -} - -// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the -// current cursor position and scroll region settings, in which case it returns -// true. If no special handling is necessary, then it does nothing and returns -// false. -// -// In the false case, the caller should ensure that a carriage return -// and line feed are inserted or that the text is otherwise wrapped. -func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { - if h.wrapNext { - if err := h.Flush(); err != nil { - return false, err - } - h.clearWrap() - } - pos, info, err := h.getCurrentInfo() - if err != nil { - return false, err - } - sr := h.effectiveSr(info.Window) - if pos.Y == sr.bottom { - // Scrolling is necessary. Let Windows automatically scroll if the scrolling region - // is the full window. - if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom { - if includeCR { - pos.X = 0 - h.updatePos(pos) - } - return false, nil - } - - // A custom scroll region is active. Scroll the window manually to simulate - // the LF. - if err := h.Flush(); err != nil { - return false, err - } - h.logf("Simulating LF inside scroll region") - if err := h.scrollUp(1); err != nil { - return false, err - } - if includeCR { - pos.X = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return false, err - } - } - return true, nil - - } else if pos.Y < info.Window.Bottom { - // Let Windows handle the LF. - pos.Y++ - if includeCR { - pos.X = 0 - } - h.updatePos(pos) - return false, nil - } else { - // The cursor is at the bottom of the screen but outside the scroll - // region. Skip the LF. - h.logf("Simulating LF outside scroll region") - if includeCR { - if err := h.Flush(); err != nil { - return false, err - } - pos.X = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return false, err - } - } - return true, nil - } -} - -// executeLF executes a LF without a CR. -func (h *windowsAnsiEventHandler) executeLF() error { - handled, err := h.simulateLF(false) - if err != nil { - return err - } - if !handled { - // Windows LF will reset the cursor column position. Write the LF - // and restore the cursor position. - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) - if pos.X != 0 { - if err := h.Flush(); err != nil { - return err - } - h.logf("Resetting cursor position for LF without CR") - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - } - } - return nil -} - -func (h *windowsAnsiEventHandler) Print(b byte) error { - if h.wrapNext { - h.buffer.WriteByte(h.marginByte) - h.clearWrap() - if _, err := h.simulateLF(true); err != nil { - return err - } - } - pos, info, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X == info.Size.X-1 { - h.wrapNext = true - h.marginByte = b - } else { - pos.X++ - h.updatePos(pos) - h.buffer.WriteByte(b) - } - return nil -} - -func (h *windowsAnsiEventHandler) Execute(b byte) error { - switch b { - case ansiterm.ANSI_TAB: - h.logf("Execute(TAB)") - // Move to the next tab stop, but preserve auto-wrap if already set. - if !h.wrapNext { - pos, info, err := h.getCurrentInfo() - if err != nil { - return err - } - pos.X = (pos.X + 8) - pos.X%8 - if pos.X >= info.Size.X { - pos.X = info.Size.X - 1 - } - if err := h.Flush(); err != nil { - return err - } - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - } - return nil - - case ansiterm.ANSI_BEL: - h.buffer.WriteByte(ansiterm.ANSI_BEL) - return nil - - case ansiterm.ANSI_BACKSPACE: - if h.wrapNext { - if err := h.Flush(); err != nil { - return err - } - h.clearWrap() - } - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X > 0 { - pos.X-- - h.updatePos(pos) - h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE) - } - return nil - - case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED: - // Treat as true LF. - return h.executeLF() - - case ansiterm.ANSI_LINE_FEED: - // Simulate a CR and LF for now since there is no way in go-ansiterm - // to tell if the LF should include CR (and more things break when it's - // missing than when it's incorrectly added). - handled, err := h.simulateLF(true) - if handled || err != nil { - return err - } - return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) - - case ansiterm.ANSI_CARRIAGE_RETURN: - if h.wrapNext { - if err := h.Flush(); err != nil { - return err - } - h.clearWrap() - } - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X != 0 { - pos.X = 0 - h.updatePos(pos) - h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN) - } - return nil - - default: - return nil - } -} - -func (h *windowsAnsiEventHandler) CUU(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUU: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorVertical(-param) -} - -func (h *windowsAnsiEventHandler) CUD(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUD: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorVertical(param) -} - -func (h *windowsAnsiEventHandler) CUF(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUF: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorHorizontal(param) -} - -func (h *windowsAnsiEventHandler) CUB(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUB: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorHorizontal(-param) -} - -func (h *windowsAnsiEventHandler) CNL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CNL: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorLine(param) -} - -func (h *windowsAnsiEventHandler) CPL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CPL: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorLine(-param) -} - -func (h *windowsAnsiEventHandler) CHA(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CHA: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorColumn(param) -} - -func (h *windowsAnsiEventHandler) VPA(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("VPA: [[%d]]", param) - h.clearWrap() - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - window := h.getCursorWindow(info) - position := info.CursorPosition - position.Y = window.Top + int16(param) - 1 - return h.setCursorPosition(position, window) -} - -func (h *windowsAnsiEventHandler) CUP(row int, col int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUP: [[%d %d]]", row, col) - h.clearWrap() - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - window := h.getCursorWindow(info) - position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1} - return h.setCursorPosition(position, window) -} - -func (h *windowsAnsiEventHandler) HVP(row int, col int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("HVP: [[%d %d]]", row, col) - h.clearWrap() - return h.CUP(row, col) -} - -func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)}) - h.clearWrap() - return nil -} - -func (h *windowsAnsiEventHandler) DECOM(enable bool) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)}) - h.clearWrap() - h.originMode = enable - return h.CUP(1, 1) -} - -func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)}) - h.clearWrap() - if err := h.ED(2); err != nil { - return err - } - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - targetWidth := int16(80) - if use132 { - targetWidth = 132 - } - if info.Size.X < targetWidth { - if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { - h.logf("set buffer failed: %v", err) - return err - } - } - window := info.Window - window.Left = 0 - window.Right = targetWidth - 1 - if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { - h.logf("set window failed: %v", err) - return err - } - if info.Size.X > targetWidth { - if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { - h.logf("set buffer failed: %v", err) - return err - } - } - return SetConsoleCursorPosition(h.fd, COORD{0, 0}) -} - -func (h *windowsAnsiEventHandler) ED(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("ED: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - - // [J -- Erases from the cursor to the end of the screen, including the cursor position. - // [1J -- Erases from the beginning of the screen to the cursor, including the cursor position. - // [2J -- Erases the complete display. The cursor does not move. - // Notes: - // -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - var start COORD - var end COORD - - switch param { - case 0: - start = info.CursorPosition - end = COORD{info.Size.X - 1, info.Size.Y - 1} - - case 1: - start = COORD{0, 0} - end = info.CursorPosition - - case 2: - start = COORD{0, 0} - end = COORD{info.Size.X - 1, info.Size.Y - 1} - } - - err = h.clearRange(h.attributes, start, end) - if err != nil { - return err - } - - // If the whole buffer was cleared, move the window to the top while preserving - // the window-relative cursor position. - if param == 2 { - pos := info.CursorPosition - window := info.Window - pos.Y -= window.Top - window.Bottom -= window.Top - window.Top = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { - return err - } - } - - return nil -} - -func (h *windowsAnsiEventHandler) EL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("EL: [%v]", strconv.Itoa(param)) - h.clearWrap() - - // [K -- Erases from the cursor to the end of the line, including the cursor position. - // [1K -- Erases from the beginning of the line to the cursor, including the cursor position. - // [2K -- Erases the complete line. - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - var start COORD - var end COORD - - switch param { - case 0: - start = info.CursorPosition - end = COORD{info.Size.X, info.CursorPosition.Y} - - case 1: - start = COORD{0, info.CursorPosition.Y} - end = info.CursorPosition - - case 2: - start = COORD{0, info.CursorPosition.Y} - end = COORD{info.Size.X, info.CursorPosition.Y} - } - - err = h.clearRange(h.attributes, start, end) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) IL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("IL: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.insertLines(param) -} - -func (h *windowsAnsiEventHandler) DL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DL: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.deleteLines(param) -} - -func (h *windowsAnsiEventHandler) ICH(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("ICH: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.insertCharacters(param) -} - -func (h *windowsAnsiEventHandler) DCH(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DCH: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.deleteCharacters(param) -} - -func (h *windowsAnsiEventHandler) SGR(params []int) error { - if err := h.Flush(); err != nil { - return err - } - strings := []string{} - for _, v := range params { - strings = append(strings, strconv.Itoa(v)) - } - - h.logf("SGR: [%v]", strings) - - if len(params) <= 0 { - h.attributes = h.infoReset.Attributes - h.inverted = false - } else { - for _, attr := range params { - - if attr == ansiterm.ANSI_SGR_RESET { - h.attributes = h.infoReset.Attributes - h.inverted = false - continue - } - - h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr)) - } - } - - attributes := h.attributes - if h.inverted { - attributes = invertAttributes(attributes) - } - err := SetConsoleTextAttribute(h.fd, attributes) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) SU(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("SU: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.scrollUp(param) -} - -func (h *windowsAnsiEventHandler) SD(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("SD: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.scrollDown(param) -} - -func (h *windowsAnsiEventHandler) DA(params []string) error { - h.logf("DA: [%v]", params) - // DA cannot be implemented because it must send data on the VT100 input stream, - // which is not available to go-ansiterm. - return nil -} - -func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DECSTBM: [%d, %d]", top, bottom) - - // Windows is 0 indexed, Linux is 1 indexed - h.sr.top = int16(top - 1) - h.sr.bottom = int16(bottom - 1) - - // This command also moves the cursor to the origin. - h.clearWrap() - return h.CUP(1, 1) -} - -func (h *windowsAnsiEventHandler) RI() error { - if err := h.Flush(); err != nil { - return err - } - h.logf("RI: []") - h.clearWrap() - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - sr := h.effectiveSr(info.Window) - if info.CursorPosition.Y == sr.top { - return h.scrollDown(1) - } - - return h.moveCursorVertical(-1) -} - -func (h *windowsAnsiEventHandler) IND() error { - h.logf("IND: []") - return h.executeLF() -} - -func (h *windowsAnsiEventHandler) Flush() error { - h.curInfo = nil - if h.buffer.Len() > 0 { - h.logf("Flush: [%s]", h.buffer.Bytes()) - if _, err := h.buffer.WriteTo(h.file); err != nil { - return err - } - } - - if h.wrapNext && !h.drewMarginByte { - h.logf("Flush: drawing margin byte '%c'", h.marginByte) - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}} - size := COORD{1, 1} - position := COORD{0, 0} - region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y} - if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil { - return err - } - h.drewMarginByte = true - } - return nil -} - -// cacheConsoleInfo ensures that the current console screen information has been queried -// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos. -func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) { - if h.curInfo == nil { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return COORD{}, nil, err - } - h.curInfo = info - h.curPos = info.CursorPosition - } - return h.curPos, h.curInfo, nil -} - -func (h *windowsAnsiEventHandler) updatePos(pos COORD) { - if h.curInfo == nil { - panic("failed to call getCurrentInfo before calling updatePos") - } - h.curPos = pos -} - -// clearWrap clears the state where the cursor is in the margin -// waiting for the next character before wrapping the line. This must -// be done before most operations that act on the cursor. -func (h *windowsAnsiEventHandler) clearWrap() { - h.wrapNext = false - h.drewMarginByte = false -} diff --git a/vendor/github.com/Microsoft/go-winio/.gitignore b/vendor/github.com/Microsoft/go-winio/.gitignore deleted file mode 100644 index b883f1fdc..000000000 --- a/vendor/github.com/Microsoft/go-winio/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.exe diff --git a/vendor/github.com/Microsoft/go-winio/CODEOWNERS b/vendor/github.com/Microsoft/go-winio/CODEOWNERS deleted file mode 100644 index ae1b4942b..000000000 --- a/vendor/github.com/Microsoft/go-winio/CODEOWNERS +++ /dev/null @@ -1 +0,0 @@ - * @microsoft/containerplat diff --git a/vendor/github.com/Microsoft/go-winio/LICENSE b/vendor/github.com/Microsoft/go-winio/LICENSE deleted file mode 100644 index b8b569d77..000000000 --- a/vendor/github.com/Microsoft/go-winio/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Microsoft - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md deleted file mode 100644 index 683be1dcf..000000000 --- a/vendor/github.com/Microsoft/go-winio/README.md +++ /dev/null @@ -1,37 +0,0 @@ -# go-winio [![Build Status](https://github.com/microsoft/go-winio/actions/workflows/ci.yml/badge.svg)](https://github.com/microsoft/go-winio/actions/workflows/ci.yml) - -This repository contains utilities for efficiently performing Win32 IO operations in -Go. Currently, this is focused on accessing named pipes and other file handles, and -for using named pipes as a net transport. - -This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go -to reuse the thread to schedule another goroutine. This limits support to Windows Vista and -newer operating systems. This is similar to the implementation of network sockets in Go's net -package. - -Please see the LICENSE file for licensing information. - -## Contributing - -This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) -declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. - -When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR -appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. - -We also require that contributors sign their commits using git commit -s or git commit --signoff to certify they either authored the work themselves -or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for more info, as well as to make sure that you can -attest to the rules listed. Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off. - - -## Code of Conduct - -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). -For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or -contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. - - - -## Special Thanks -Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe -for another named pipe implementation. diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go deleted file mode 100644 index 2be34af43..000000000 --- a/vendor/github.com/Microsoft/go-winio/backup.go +++ /dev/null @@ -1,280 +0,0 @@ -// +build windows - -package winio - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - "syscall" - "unicode/utf16" -) - -//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead -//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite - -const ( - BackupData = uint32(iota + 1) - BackupEaData - BackupSecurity - BackupAlternateData - BackupLink - BackupPropertyData - BackupObjectId - BackupReparseData - BackupSparseBlock - BackupTxfsData -) - -const ( - StreamSparseAttributes = uint32(8) -) - -const ( - WRITE_DAC = 0x40000 - WRITE_OWNER = 0x80000 - ACCESS_SYSTEM_SECURITY = 0x1000000 -) - -// BackupHeader represents a backup stream of a file. -type BackupHeader struct { - Id uint32 // The backup stream ID - Attributes uint32 // Stream attributes - Size int64 // The size of the stream in bytes - Name string // The name of the stream (for BackupAlternateData only). - Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). -} - -type win32StreamId struct { - StreamId uint32 - Attributes uint32 - Size uint64 - NameSize uint32 -} - -// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series -// of BackupHeader values. -type BackupStreamReader struct { - r io.Reader - bytesLeft int64 -} - -// NewBackupStreamReader produces a BackupStreamReader from any io.Reader. -func NewBackupStreamReader(r io.Reader) *BackupStreamReader { - return &BackupStreamReader{r, 0} -} - -// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if -// it was not completely read. -func (r *BackupStreamReader) Next() (*BackupHeader, error) { - if r.bytesLeft > 0 { - if s, ok := r.r.(io.Seeker); ok { - // Make sure Seek on io.SeekCurrent sometimes succeeds - // before trying the actual seek. - if _, err := s.Seek(0, io.SeekCurrent); err == nil { - if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil { - return nil, err - } - r.bytesLeft = 0 - } - } - if _, err := io.Copy(ioutil.Discard, r); err != nil { - return nil, err - } - } - var wsi win32StreamId - if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { - return nil, err - } - hdr := &BackupHeader{ - Id: wsi.StreamId, - Attributes: wsi.Attributes, - Size: int64(wsi.Size), - } - if wsi.NameSize != 0 { - name := make([]uint16, int(wsi.NameSize/2)) - if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { - return nil, err - } - hdr.Name = syscall.UTF16ToString(name) - } - if wsi.StreamId == BackupSparseBlock { - if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { - return nil, err - } - hdr.Size -= 8 - } - r.bytesLeft = hdr.Size - return hdr, nil -} - -// Read reads from the current backup stream. -func (r *BackupStreamReader) Read(b []byte) (int, error) { - if r.bytesLeft == 0 { - return 0, io.EOF - } - if int64(len(b)) > r.bytesLeft { - b = b[:r.bytesLeft] - } - n, err := r.r.Read(b) - r.bytesLeft -= int64(n) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } else if r.bytesLeft == 0 && err == nil { - err = io.EOF - } - return n, err -} - -// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API. -type BackupStreamWriter struct { - w io.Writer - bytesLeft int64 -} - -// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer. -func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { - return &BackupStreamWriter{w, 0} -} - -// WriteHeader writes the next backup stream header and prepares for calls to Write(). -func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { - if w.bytesLeft != 0 { - return fmt.Errorf("missing %d bytes", w.bytesLeft) - } - name := utf16.Encode([]rune(hdr.Name)) - wsi := win32StreamId{ - StreamId: hdr.Id, - Attributes: hdr.Attributes, - Size: uint64(hdr.Size), - NameSize: uint32(len(name) * 2), - } - if hdr.Id == BackupSparseBlock { - // Include space for the int64 block offset - wsi.Size += 8 - } - if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { - return err - } - if len(name) != 0 { - if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { - return err - } - } - if hdr.Id == BackupSparseBlock { - if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { - return err - } - } - w.bytesLeft = hdr.Size - return nil -} - -// Write writes to the current backup stream. -func (w *BackupStreamWriter) Write(b []byte) (int, error) { - if w.bytesLeft < int64(len(b)) { - return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) - } - n, err := w.w.Write(b) - w.bytesLeft -= int64(n) - return n, err -} - -// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API. -type BackupFileReader struct { - f *os.File - includeSecurity bool - ctx uintptr -} - -// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true, -// Read will attempt to read the security descriptor of the file. -func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { - r := &BackupFileReader{f, includeSecurity, 0} - return r -} - -// Read reads a backup stream from the file by calling the Win32 API BackupRead(). -func (r *BackupFileReader) Read(b []byte) (int, error) { - var bytesRead uint32 - err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) - if err != nil { - return 0, &os.PathError{"BackupRead", r.f.Name(), err} - } - runtime.KeepAlive(r.f) - if bytesRead == 0 { - return 0, io.EOF - } - return int(bytesRead), nil -} - -// Close frees Win32 resources associated with the BackupFileReader. It does not close -// the underlying file. -func (r *BackupFileReader) Close() error { - if r.ctx != 0 { - backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) - runtime.KeepAlive(r.f) - r.ctx = 0 - } - return nil -} - -// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API. -type BackupFileWriter struct { - f *os.File - includeSecurity bool - ctx uintptr -} - -// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true, -// Write() will attempt to restore the security descriptor from the stream. -func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { - w := &BackupFileWriter{f, includeSecurity, 0} - return w -} - -// Write restores a portion of the file using the provided backup stream. -func (w *BackupFileWriter) Write(b []byte) (int, error) { - var bytesWritten uint32 - err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) - if err != nil { - return 0, &os.PathError{"BackupWrite", w.f.Name(), err} - } - runtime.KeepAlive(w.f) - if int(bytesWritten) != len(b) { - return int(bytesWritten), errors.New("not all bytes could be written") - } - return len(b), nil -} - -// Close frees Win32 resources associated with the BackupFileWriter. It does not -// close the underlying file. -func (w *BackupFileWriter) Close() error { - if w.ctx != 0 { - backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) - runtime.KeepAlive(w.f) - w.ctx = 0 - } - return nil -} - -// OpenForBackup opens a file or directory, potentially skipping access checks if the backup -// or restore privileges have been acquired. -// -// If the file opened was a directory, it cannot be used with Readdir(). -func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { - winPath, err := syscall.UTF16FromString(path) - if err != nil { - return nil, err - } - h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0) - if err != nil { - err = &os.PathError{Op: "open", Path: path, Err: err} - return nil, err - } - return os.NewFile(uintptr(h), path), nil -} diff --git a/vendor/github.com/Microsoft/go-winio/ea.go b/vendor/github.com/Microsoft/go-winio/ea.go deleted file mode 100644 index 4051c1b33..000000000 --- a/vendor/github.com/Microsoft/go-winio/ea.go +++ /dev/null @@ -1,137 +0,0 @@ -package winio - -import ( - "bytes" - "encoding/binary" - "errors" -) - -type fileFullEaInformation struct { - NextEntryOffset uint32 - Flags uint8 - NameLength uint8 - ValueLength uint16 -} - -var ( - fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) - - errInvalidEaBuffer = errors.New("invalid extended attribute buffer") - errEaNameTooLarge = errors.New("extended attribute name too large") - errEaValueTooLarge = errors.New("extended attribute value too large") -) - -// ExtendedAttribute represents a single Windows EA. -type ExtendedAttribute struct { - Name string - Value []byte - Flags uint8 -} - -func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { - var info fileFullEaInformation - err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) - if err != nil { - err = errInvalidEaBuffer - return - } - - nameOffset := fileFullEaInformationSize - nameLen := int(info.NameLength) - valueOffset := nameOffset + int(info.NameLength) + 1 - valueLen := int(info.ValueLength) - nextOffset := int(info.NextEntryOffset) - if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { - err = errInvalidEaBuffer - return - } - - ea.Name = string(b[nameOffset : nameOffset+nameLen]) - ea.Value = b[valueOffset : valueOffset+valueLen] - ea.Flags = info.Flags - if info.NextEntryOffset != 0 { - nb = b[info.NextEntryOffset:] - } - return -} - -// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION -// buffer retrieved from BackupRead, ZwQueryEaFile, etc. -func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { - for len(b) != 0 { - ea, nb, err := parseEa(b) - if err != nil { - return nil, err - } - - eas = append(eas, ea) - b = nb - } - return -} - -func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { - if int(uint8(len(ea.Name))) != len(ea.Name) { - return errEaNameTooLarge - } - if int(uint16(len(ea.Value))) != len(ea.Value) { - return errEaValueTooLarge - } - entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) - withPadding := (entrySize + 3) &^ 3 - nextOffset := uint32(0) - if !last { - nextOffset = withPadding - } - info := fileFullEaInformation{ - NextEntryOffset: nextOffset, - Flags: ea.Flags, - NameLength: uint8(len(ea.Name)), - ValueLength: uint16(len(ea.Value)), - } - - err := binary.Write(buf, binary.LittleEndian, &info) - if err != nil { - return err - } - - _, err = buf.Write([]byte(ea.Name)) - if err != nil { - return err - } - - err = buf.WriteByte(0) - if err != nil { - return err - } - - _, err = buf.Write(ea.Value) - if err != nil { - return err - } - - _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) - if err != nil { - return err - } - - return nil -} - -// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION -// buffer for use with BackupWrite, ZwSetEaFile, etc. -func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { - var buf bytes.Buffer - for i := range eas { - last := false - if i == len(eas)-1 { - last = true - } - - err := writeEa(&buf, &eas[i], last) - if err != nil { - return nil, err - } - } - return buf.Bytes(), nil -} diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go deleted file mode 100644 index 293ab54c8..000000000 --- a/vendor/github.com/Microsoft/go-winio/file.go +++ /dev/null @@ -1,329 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "errors" - "io" - "runtime" - "sync" - "sync/atomic" - "syscall" - "time" -) - -//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx -//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort -//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus -//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes -//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult - -type atomicBool int32 - -func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } -func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } -func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } -func (b *atomicBool) swap(new bool) bool { - var newInt int32 - if new { - newInt = 1 - } - return atomic.SwapInt32((*int32)(b), newInt) == 1 -} - -const ( - cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 - cFILE_SKIP_SET_EVENT_ON_HANDLE = 2 -) - -var ( - ErrFileClosed = errors.New("file has already been closed") - ErrTimeout = &timeoutError{} -) - -type timeoutError struct{} - -func (e *timeoutError) Error() string { return "i/o timeout" } -func (e *timeoutError) Timeout() bool { return true } -func (e *timeoutError) Temporary() bool { return true } - -type timeoutChan chan struct{} - -var ioInitOnce sync.Once -var ioCompletionPort syscall.Handle - -// ioResult contains the result of an asynchronous IO operation -type ioResult struct { - bytes uint32 - err error -} - -// ioOperation represents an outstanding asynchronous Win32 IO -type ioOperation struct { - o syscall.Overlapped - ch chan ioResult -} - -func initIo() { - h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) - if err != nil { - panic(err) - } - ioCompletionPort = h - go ioCompletionProcessor(h) -} - -// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. -// It takes ownership of this handle and will close it if it is garbage collected. -type win32File struct { - handle syscall.Handle - wg sync.WaitGroup - wgLock sync.RWMutex - closing atomicBool - socket bool - readDeadline deadlineHandler - writeDeadline deadlineHandler -} - -type deadlineHandler struct { - setLock sync.Mutex - channel timeoutChan - channelLock sync.RWMutex - timer *time.Timer - timedout atomicBool -} - -// makeWin32File makes a new win32File from an existing file handle -func makeWin32File(h syscall.Handle) (*win32File, error) { - f := &win32File{handle: h} - ioInitOnce.Do(initIo) - _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) - if err != nil { - return nil, err - } - err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE) - if err != nil { - return nil, err - } - f.readDeadline.channel = make(timeoutChan) - f.writeDeadline.channel = make(timeoutChan) - return f, nil -} - -func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { - // If we return the result of makeWin32File directly, it can result in an - // interface-wrapped nil, rather than a nil interface value. - f, err := makeWin32File(h) - if err != nil { - return nil, err - } - return f, nil -} - -// closeHandle closes the resources associated with a Win32 handle -func (f *win32File) closeHandle() { - f.wgLock.Lock() - // Atomically set that we are closing, releasing the resources only once. - if !f.closing.swap(true) { - f.wgLock.Unlock() - // cancel all IO and wait for it to complete - cancelIoEx(f.handle, nil) - f.wg.Wait() - // at this point, no new IO can start - syscall.Close(f.handle) - f.handle = 0 - } else { - f.wgLock.Unlock() - } -} - -// Close closes a win32File. -func (f *win32File) Close() error { - f.closeHandle() - return nil -} - -// IsClosed checks if the file has been closed -func (f *win32File) IsClosed() bool { - return f.closing.isSet() -} - -// prepareIo prepares for a new IO operation. -// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. -func (f *win32File) prepareIo() (*ioOperation, error) { - f.wgLock.RLock() - if f.closing.isSet() { - f.wgLock.RUnlock() - return nil, ErrFileClosed - } - f.wg.Add(1) - f.wgLock.RUnlock() - c := &ioOperation{} - c.ch = make(chan ioResult) - return c, nil -} - -// ioCompletionProcessor processes completed async IOs forever -func ioCompletionProcessor(h syscall.Handle) { - for { - var bytes uint32 - var key uintptr - var op *ioOperation - err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) - if op == nil { - panic(err) - } - op.ch <- ioResult{bytes, err} - } -} - -// asyncIo processes the return value from ReadFile or WriteFile, blocking until -// the operation has actually completed. -func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { - if err != syscall.ERROR_IO_PENDING { - return int(bytes), err - } - - if f.closing.isSet() { - cancelIoEx(f.handle, &c.o) - } - - var timeout timeoutChan - if d != nil { - d.channelLock.Lock() - timeout = d.channel - d.channelLock.Unlock() - } - - var r ioResult - select { - case r = <-c.ch: - err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { - if f.closing.isSet() { - err = ErrFileClosed - } - } else if err != nil && f.socket { - // err is from Win32. Query the overlapped structure to get the winsock error. - var bytes, flags uint32 - err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) - } - case <-timeout: - cancelIoEx(f.handle, &c.o) - r = <-c.ch - err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { - err = ErrTimeout - } - } - - // runtime.KeepAlive is needed, as c is passed via native - // code to ioCompletionProcessor, c must remain alive - // until the channel read is complete. - runtime.KeepAlive(c) - return int(r.bytes), err -} - -// Read reads from a file handle. -func (f *win32File) Read(b []byte) (int, error) { - c, err := f.prepareIo() - if err != nil { - return 0, err - } - defer f.wg.Done() - - if f.readDeadline.timedout.isSet() { - return 0, ErrTimeout - } - - var bytes uint32 - err = syscall.ReadFile(f.handle, b, &bytes, &c.o) - n, err := f.asyncIo(c, &f.readDeadline, bytes, err) - runtime.KeepAlive(b) - - // Handle EOF conditions. - if err == nil && n == 0 && len(b) != 0 { - return 0, io.EOF - } else if err == syscall.ERROR_BROKEN_PIPE { - return 0, io.EOF - } else { - return n, err - } -} - -// Write writes to a file handle. -func (f *win32File) Write(b []byte) (int, error) { - c, err := f.prepareIo() - if err != nil { - return 0, err - } - defer f.wg.Done() - - if f.writeDeadline.timedout.isSet() { - return 0, ErrTimeout - } - - var bytes uint32 - err = syscall.WriteFile(f.handle, b, &bytes, &c.o) - n, err := f.asyncIo(c, &f.writeDeadline, bytes, err) - runtime.KeepAlive(b) - return n, err -} - -func (f *win32File) SetReadDeadline(deadline time.Time) error { - return f.readDeadline.set(deadline) -} - -func (f *win32File) SetWriteDeadline(deadline time.Time) error { - return f.writeDeadline.set(deadline) -} - -func (f *win32File) Flush() error { - return syscall.FlushFileBuffers(f.handle) -} - -func (f *win32File) Fd() uintptr { - return uintptr(f.handle) -} - -func (d *deadlineHandler) set(deadline time.Time) error { - d.setLock.Lock() - defer d.setLock.Unlock() - - if d.timer != nil { - if !d.timer.Stop() { - <-d.channel - } - d.timer = nil - } - d.timedout.setFalse() - - select { - case <-d.channel: - d.channelLock.Lock() - d.channel = make(chan struct{}) - d.channelLock.Unlock() - default: - } - - if deadline.IsZero() { - return nil - } - - timeoutIO := func() { - d.timedout.setTrue() - close(d.channel) - } - - now := time.Now() - duration := deadline.Sub(now) - if deadline.After(now) { - // Deadline is in the future, set a timer to wait - d.timer = time.AfterFunc(duration, timeoutIO) - } else { - // Deadline is in the past. Cancel all pending IO now. - timeoutIO() - } - return nil -} diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go deleted file mode 100644 index 3ab6bff69..000000000 --- a/vendor/github.com/Microsoft/go-winio/fileinfo.go +++ /dev/null @@ -1,73 +0,0 @@ -// +build windows - -package winio - -import ( - "os" - "runtime" - "unsafe" - - "golang.org/x/sys/windows" -) - -// FileBasicInfo contains file access time and file attributes information. -type FileBasicInfo struct { - CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime - FileAttributes uint32 - pad uint32 // padding -} - -// GetFileBasicInfo retrieves times and attributes for a file. -func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { - bi := &FileBasicInfo{} - if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { - return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - return bi, nil -} - -// SetFileBasicInfo sets times and attributes for a file. -func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { - if err := windows.SetFileInformationByHandle(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { - return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - return nil -} - -// FileStandardInfo contains extended information for the file. -// FILE_STANDARD_INFO in WinBase.h -// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info -type FileStandardInfo struct { - AllocationSize, EndOfFile int64 - NumberOfLinks uint32 - DeletePending, Directory bool -} - -// GetFileStandardInfo retrieves ended information for the file. -func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) { - si := &FileStandardInfo{} - if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileStandardInfo, (*byte)(unsafe.Pointer(si)), uint32(unsafe.Sizeof(*si))); err != nil { - return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - return si, nil -} - -// FileIDInfo contains the volume serial number and file ID for a file. This pair should be -// unique on a system. -type FileIDInfo struct { - VolumeSerialNumber uint64 - FileID [16]byte -} - -// GetFileID retrieves the unique (volume, file ID) pair for a file. -func GetFileID(f *os.File) (*FileIDInfo, error) { - fileID := &FileIDInfo{} - if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileIdInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { - return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - return fileID, nil -} diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go deleted file mode 100644 index b2b644d00..000000000 --- a/vendor/github.com/Microsoft/go-winio/hvsock.go +++ /dev/null @@ -1,316 +0,0 @@ -//go:build windows -// +build windows - -package winio - -import ( - "fmt" - "io" - "net" - "os" - "syscall" - "time" - "unsafe" - - "github.com/Microsoft/go-winio/pkg/guid" -) - -//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind - -const ( - afHvSock = 34 // AF_HYPERV - - socketError = ^uintptr(0) -) - -// An HvsockAddr is an address for a AF_HYPERV socket. -type HvsockAddr struct { - VMID guid.GUID - ServiceID guid.GUID -} - -type rawHvsockAddr struct { - Family uint16 - _ uint16 - VMID guid.GUID - ServiceID guid.GUID -} - -// Network returns the address's network name, "hvsock". -func (addr *HvsockAddr) Network() string { - return "hvsock" -} - -func (addr *HvsockAddr) String() string { - return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID) -} - -// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. -func VsockServiceID(port uint32) guid.GUID { - g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3") - g.Data1 = port - return g -} - -func (addr *HvsockAddr) raw() rawHvsockAddr { - return rawHvsockAddr{ - Family: afHvSock, - VMID: addr.VMID, - ServiceID: addr.ServiceID, - } -} - -func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { - addr.VMID = raw.VMID - addr.ServiceID = raw.ServiceID -} - -// HvsockListener is a socket listener for the AF_HYPERV address family. -type HvsockListener struct { - sock *win32File - addr HvsockAddr -} - -// HvsockConn is a connected socket of the AF_HYPERV address family. -type HvsockConn struct { - sock *win32File - local, remote HvsockAddr -} - -func newHvSocket() (*win32File, error) { - fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1) - if err != nil { - return nil, os.NewSyscallError("socket", err) - } - f, err := makeWin32File(fd) - if err != nil { - syscall.Close(fd) - return nil, err - } - f.socket = true - return f, nil -} - -// ListenHvsock listens for connections on the specified hvsock address. -func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { - l := &HvsockListener{addr: *addr} - sock, err := newHvSocket() - if err != nil { - return nil, l.opErr("listen", err) - } - sa := addr.raw() - err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa))) - if err != nil { - return nil, l.opErr("listen", os.NewSyscallError("socket", err)) - } - err = syscall.Listen(sock.handle, 16) - if err != nil { - return nil, l.opErr("listen", os.NewSyscallError("listen", err)) - } - return &HvsockListener{sock: sock, addr: *addr}, nil -} - -func (l *HvsockListener) opErr(op string, err error) error { - return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err} -} - -// Addr returns the listener's network address. -func (l *HvsockListener) Addr() net.Addr { - return &l.addr -} - -// Accept waits for the next connection and returns it. -func (l *HvsockListener) Accept() (_ net.Conn, err error) { - sock, err := newHvSocket() - if err != nil { - return nil, l.opErr("accept", err) - } - defer func() { - if sock != nil { - sock.Close() - } - }() - c, err := l.sock.prepareIo() - if err != nil { - return nil, l.opErr("accept", err) - } - defer l.sock.wg.Done() - - // AcceptEx, per documentation, requires an extra 16 bytes per address. - const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) - var addrbuf [addrlen * 2]byte - - var bytes uint32 - err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o) - _, err = l.sock.asyncIo(c, nil, bytes, err) - if err != nil { - return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) - } - conn := &HvsockConn{ - sock: sock, - } - conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) - conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) - sock = nil - return conn, nil -} - -// Close closes the listener, causing any pending Accept calls to fail. -func (l *HvsockListener) Close() error { - return l.sock.Close() -} - -/* Need to finish ConnectEx handling -func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) { - sock, err := newHvSocket() - if err != nil { - return nil, err - } - defer func() { - if sock != nil { - sock.Close() - } - }() - c, err := sock.prepareIo() - if err != nil { - return nil, err - } - defer sock.wg.Done() - var bytes uint32 - err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o) - _, err = sock.asyncIo(ctx, c, nil, bytes, err) - if err != nil { - return nil, err - } - conn := &HvsockConn{ - sock: sock, - remote: *addr, - } - sock = nil - return conn, nil -} -*/ - -func (conn *HvsockConn) opErr(op string, err error) error { - return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} -} - -func (conn *HvsockConn) Read(b []byte) (int, error) { - c, err := conn.sock.prepareIo() - if err != nil { - return 0, conn.opErr("read", err) - } - defer conn.sock.wg.Done() - buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} - var flags, bytes uint32 - err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) - n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err) - if err != nil { - if _, ok := err.(syscall.Errno); ok { - err = os.NewSyscallError("wsarecv", err) - } - return 0, conn.opErr("read", err) - } else if n == 0 { - err = io.EOF - } - return n, err -} - -func (conn *HvsockConn) Write(b []byte) (int, error) { - t := 0 - for len(b) != 0 { - n, err := conn.write(b) - if err != nil { - return t + n, err - } - t += n - b = b[n:] - } - return t, nil -} - -func (conn *HvsockConn) write(b []byte) (int, error) { - c, err := conn.sock.prepareIo() - if err != nil { - return 0, conn.opErr("write", err) - } - defer conn.sock.wg.Done() - buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} - var bytes uint32 - err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) - n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err) - if err != nil { - if _, ok := err.(syscall.Errno); ok { - err = os.NewSyscallError("wsasend", err) - } - return 0, conn.opErr("write", err) - } - return n, err -} - -// Close closes the socket connection, failing any pending read or write calls. -func (conn *HvsockConn) Close() error { - return conn.sock.Close() -} - -func (conn *HvsockConn) IsClosed() bool { - return conn.sock.IsClosed() -} - -func (conn *HvsockConn) shutdown(how int) error { - if conn.IsClosed() { - return ErrFileClosed - } - - err := syscall.Shutdown(conn.sock.handle, how) - if err != nil { - return os.NewSyscallError("shutdown", err) - } - return nil -} - -// CloseRead shuts down the read end of the socket, preventing future read operations. -func (conn *HvsockConn) CloseRead() error { - err := conn.shutdown(syscall.SHUT_RD) - if err != nil { - return conn.opErr("close", err) - } - return nil -} - -// CloseWrite shuts down the write end of the socket, preventing future write operations and -// notifying the other endpoint that no more data will be written. -func (conn *HvsockConn) CloseWrite() error { - err := conn.shutdown(syscall.SHUT_WR) - if err != nil { - return conn.opErr("close", err) - } - return nil -} - -// LocalAddr returns the local address of the connection. -func (conn *HvsockConn) LocalAddr() net.Addr { - return &conn.local -} - -// RemoteAddr returns the remote address of the connection. -func (conn *HvsockConn) RemoteAddr() net.Addr { - return &conn.remote -} - -// SetDeadline implements the net.Conn SetDeadline method. -func (conn *HvsockConn) SetDeadline(t time.Time) error { - conn.SetReadDeadline(t) - conn.SetWriteDeadline(t) - return nil -} - -// SetReadDeadline implements the net.Conn SetReadDeadline method. -func (conn *HvsockConn) SetReadDeadline(t time.Time) error { - return conn.sock.SetReadDeadline(t) -} - -// SetWriteDeadline implements the net.Conn SetWriteDeadline method. -func (conn *HvsockConn) SetWriteDeadline(t time.Time) error { - return conn.sock.SetWriteDeadline(t) -} diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go deleted file mode 100644 index 96700a73d..000000000 --- a/vendor/github.com/Microsoft/go-winio/pipe.go +++ /dev/null @@ -1,517 +0,0 @@ -// +build windows - -package winio - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "os" - "runtime" - "syscall" - "time" - "unsafe" -) - -//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe -//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW -//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW -//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo -//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW -//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc -//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile -//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb -//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U -//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl - -type ioStatusBlock struct { - Status, Information uintptr -} - -type objectAttributes struct { - Length uintptr - RootDirectory uintptr - ObjectName *unicodeString - Attributes uintptr - SecurityDescriptor *securityDescriptor - SecurityQoS uintptr -} - -type unicodeString struct { - Length uint16 - MaximumLength uint16 - Buffer uintptr -} - -type securityDescriptor struct { - Revision byte - Sbz1 byte - Control uint16 - Owner uintptr - Group uintptr - Sacl uintptr - Dacl uintptr -} - -type ntstatus int32 - -func (status ntstatus) Err() error { - if status >= 0 { - return nil - } - return rtlNtStatusToDosError(status) -} - -const ( - cERROR_PIPE_BUSY = syscall.Errno(231) - cERROR_NO_DATA = syscall.Errno(232) - cERROR_PIPE_CONNECTED = syscall.Errno(535) - cERROR_SEM_TIMEOUT = syscall.Errno(121) - - cSECURITY_SQOS_PRESENT = 0x100000 - cSECURITY_ANONYMOUS = 0 - - cPIPE_TYPE_MESSAGE = 4 - - cPIPE_READMODE_MESSAGE = 2 - - cFILE_OPEN = 1 - cFILE_CREATE = 2 - - cFILE_PIPE_MESSAGE_TYPE = 1 - cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2 - - cSE_DACL_PRESENT = 4 -) - -var ( - // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. - // This error should match net.errClosing since docker takes a dependency on its text. - ErrPipeListenerClosed = errors.New("use of closed network connection") - - errPipeWriteClosed = errors.New("pipe has been closed for write") -) - -type win32Pipe struct { - *win32File - path string -} - -type win32MessageBytePipe struct { - win32Pipe - writeClosed bool - readEOF bool -} - -type pipeAddress string - -func (f *win32Pipe) LocalAddr() net.Addr { - return pipeAddress(f.path) -} - -func (f *win32Pipe) RemoteAddr() net.Addr { - return pipeAddress(f.path) -} - -func (f *win32Pipe) SetDeadline(t time.Time) error { - f.SetReadDeadline(t) - f.SetWriteDeadline(t) - return nil -} - -// CloseWrite closes the write side of a message pipe in byte mode. -func (f *win32MessageBytePipe) CloseWrite() error { - if f.writeClosed { - return errPipeWriteClosed - } - err := f.win32File.Flush() - if err != nil { - return err - } - _, err = f.win32File.Write(nil) - if err != nil { - return err - } - f.writeClosed = true - return nil -} - -// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since -// they are used to implement CloseWrite(). -func (f *win32MessageBytePipe) Write(b []byte) (int, error) { - if f.writeClosed { - return 0, errPipeWriteClosed - } - if len(b) == 0 { - return 0, nil - } - return f.win32File.Write(b) -} - -// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message -// mode pipe will return io.EOF, as will all subsequent reads. -func (f *win32MessageBytePipe) Read(b []byte) (int, error) { - if f.readEOF { - return 0, io.EOF - } - n, err := f.win32File.Read(b) - if err == io.EOF { - // If this was the result of a zero-byte read, then - // it is possible that the read was due to a zero-size - // message. Since we are simulating CloseWrite with a - // zero-byte message, ensure that all future Read() calls - // also return EOF. - f.readEOF = true - } else if err == syscall.ERROR_MORE_DATA { - // ERROR_MORE_DATA indicates that the pipe's read mode is message mode - // and the message still has more bytes. Treat this as a success, since - // this package presents all named pipes as byte streams. - err = nil - } - return n, err -} - -func (s pipeAddress) Network() string { - return "pipe" -} - -func (s pipeAddress) String() string { - return string(s) -} - -// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. -func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) { - for { - - select { - case <-ctx.Done(): - return syscall.Handle(0), ctx.Err() - default: - h, err := createFile(*path, access, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) - if err == nil { - return h, nil - } - if err != cERROR_PIPE_BUSY { - return h, &os.PathError{Err: err, Op: "open", Path: *path} - } - // Wait 10 msec and try again. This is a rather simplistic - // view, as we always try each 10 milliseconds. - time.Sleep(10 * time.Millisecond) - } - } -} - -// DialPipe connects to a named pipe by path, timing out if the connection -// takes longer than the specified duration. If timeout is nil, then we use -// a default timeout of 2 seconds. (We do not use WaitNamedPipe.) -func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { - var absTimeout time.Time - if timeout != nil { - absTimeout = time.Now().Add(*timeout) - } else { - absTimeout = time.Now().Add(2 * time.Second) - } - ctx, _ := context.WithDeadline(context.Background(), absTimeout) - conn, err := DialPipeContext(ctx, path) - if err == context.DeadlineExceeded { - return nil, ErrTimeout - } - return conn, err -} - -// DialPipeContext attempts to connect to a named pipe by `path` until `ctx` -// cancellation or timeout. -func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { - return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE) -} - -// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx` -// cancellation or timeout. -func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) { - var err error - var h syscall.Handle - h, err = tryDialPipe(ctx, &path, access) - if err != nil { - return nil, err - } - - var flags uint32 - err = getNamedPipeInfo(h, &flags, nil, nil, nil) - if err != nil { - return nil, err - } - - f, err := makeWin32File(h) - if err != nil { - syscall.Close(h) - return nil, err - } - - // If the pipe is in message mode, return a message byte pipe, which - // supports CloseWrite(). - if flags&cPIPE_TYPE_MESSAGE != 0 { - return &win32MessageBytePipe{ - win32Pipe: win32Pipe{win32File: f, path: path}, - }, nil - } - return &win32Pipe{win32File: f, path: path}, nil -} - -type acceptResponse struct { - f *win32File - err error -} - -type win32PipeListener struct { - firstHandle syscall.Handle - path string - config PipeConfig - acceptCh chan (chan acceptResponse) - closeCh chan int - doneCh chan int -} - -func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) { - path16, err := syscall.UTF16FromString(path) - if err != nil { - return 0, &os.PathError{Op: "open", Path: path, Err: err} - } - - var oa objectAttributes - oa.Length = unsafe.Sizeof(oa) - - var ntPath unicodeString - if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil { - return 0, &os.PathError{Op: "open", Path: path, Err: err} - } - defer localFree(ntPath.Buffer) - oa.ObjectName = &ntPath - - // The security descriptor is only needed for the first pipe. - if first { - if sd != nil { - len := uint32(len(sd)) - sdb := localAlloc(0, len) - defer localFree(sdb) - copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) - oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) - } else { - // Construct the default named pipe security descriptor. - var dacl uintptr - if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { - return 0, fmt.Errorf("getting default named pipe ACL: %s", err) - } - defer localFree(dacl) - - sdb := &securityDescriptor{ - Revision: 1, - Control: cSE_DACL_PRESENT, - Dacl: dacl, - } - oa.SecurityDescriptor = sdb - } - } - - typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS) - if c.MessageMode { - typ |= cFILE_PIPE_MESSAGE_TYPE - } - - disposition := uint32(cFILE_OPEN) - access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE) - if first { - disposition = cFILE_CREATE - // By not asking for read or write access, the named pipe file system - // will put this pipe into an initially disconnected state, blocking - // client connections until the next call with first == false. - access = syscall.SYNCHRONIZE - } - - timeout := int64(-50 * 10000) // 50ms - - var ( - h syscall.Handle - iosb ioStatusBlock - ) - err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err() - if err != nil { - return 0, &os.PathError{Op: "open", Path: path, Err: err} - } - - runtime.KeepAlive(ntPath) - return h, nil -} - -func (l *win32PipeListener) makeServerPipe() (*win32File, error) { - h, err := makeServerPipeHandle(l.path, nil, &l.config, false) - if err != nil { - return nil, err - } - f, err := makeWin32File(h) - if err != nil { - syscall.Close(h) - return nil, err - } - return f, nil -} - -func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { - p, err := l.makeServerPipe() - if err != nil { - return nil, err - } - - // Wait for the client to connect. - ch := make(chan error) - go func(p *win32File) { - ch <- connectPipe(p) - }(p) - - select { - case err = <-ch: - if err != nil { - p.Close() - p = nil - } - case <-l.closeCh: - // Abort the connect request by closing the handle. - p.Close() - p = nil - err = <-ch - if err == nil || err == ErrFileClosed { - err = ErrPipeListenerClosed - } - } - return p, err -} - -func (l *win32PipeListener) listenerRoutine() { - closed := false - for !closed { - select { - case <-l.closeCh: - closed = true - case responseCh := <-l.acceptCh: - var ( - p *win32File - err error - ) - for { - p, err = l.makeConnectedServerPipe() - // If the connection was immediately closed by the client, try - // again. - if err != cERROR_NO_DATA { - break - } - } - responseCh <- acceptResponse{p, err} - closed = err == ErrPipeListenerClosed - } - } - syscall.Close(l.firstHandle) - l.firstHandle = 0 - // Notify Close() and Accept() callers that the handle has been closed. - close(l.doneCh) -} - -// PipeConfig contain configuration for the pipe listener. -type PipeConfig struct { - // SecurityDescriptor contains a Windows security descriptor in SDDL format. - SecurityDescriptor string - - // MessageMode determines whether the pipe is in byte or message mode. In either - // case the pipe is read in byte mode by default. The only practical difference in - // this implementation is that CloseWrite() is only supported for message mode pipes; - // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only - // transferred to the reader (and returned as io.EOF in this implementation) - // when the pipe is in message mode. - MessageMode bool - - // InputBufferSize specifies the size of the input buffer, in bytes. - InputBufferSize int32 - - // OutputBufferSize specifies the size of the output buffer, in bytes. - OutputBufferSize int32 -} - -// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe. -// The pipe must not already exist. -func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { - var ( - sd []byte - err error - ) - if c == nil { - c = &PipeConfig{} - } - if c.SecurityDescriptor != "" { - sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) - if err != nil { - return nil, err - } - } - h, err := makeServerPipeHandle(path, sd, c, true) - if err != nil { - return nil, err - } - l := &win32PipeListener{ - firstHandle: h, - path: path, - config: *c, - acceptCh: make(chan (chan acceptResponse)), - closeCh: make(chan int), - doneCh: make(chan int), - } - go l.listenerRoutine() - return l, nil -} - -func connectPipe(p *win32File) error { - c, err := p.prepareIo() - if err != nil { - return err - } - defer p.wg.Done() - - err = connectNamedPipe(p.handle, &c.o) - _, err = p.asyncIo(c, nil, 0, err) - if err != nil && err != cERROR_PIPE_CONNECTED { - return err - } - return nil -} - -func (l *win32PipeListener) Accept() (net.Conn, error) { - ch := make(chan acceptResponse) - select { - case l.acceptCh <- ch: - response := <-ch - err := response.err - if err != nil { - return nil, err - } - if l.config.MessageMode { - return &win32MessageBytePipe{ - win32Pipe: win32Pipe{win32File: response.f, path: l.path}, - }, nil - } - return &win32Pipe{win32File: response.f, path: l.path}, nil - case <-l.doneCh: - return nil, ErrPipeListenerClosed - } -} - -func (l *win32PipeListener) Close() error { - select { - case l.closeCh <- 1: - <-l.doneCh - case <-l.doneCh: - } - return nil -} - -func (l *win32PipeListener) Addr() net.Addr { - return pipeAddress(l.path) -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go deleted file mode 100644 index 2d9161e2d..000000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go +++ /dev/null @@ -1,228 +0,0 @@ -// +build windows - -// Package guid provides a GUID type. The backing structure for a GUID is -// identical to that used by the golang.org/x/sys/windows GUID type. -// There are two main binary encodings used for a GUID, the big-endian encoding, -// and the Windows (mixed-endian) encoding. See here for details: -// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding -package guid - -import ( - "crypto/rand" - "crypto/sha1" - "encoding" - "encoding/binary" - "fmt" - "strconv" -) - -// Variant specifies which GUID variant (or "type") of the GUID. It determines -// how the entirety of the rest of the GUID is interpreted. -type Variant uint8 - -// The variants specified by RFC 4122. -const ( - // VariantUnknown specifies a GUID variant which does not conform to one of - // the variant encodings specified in RFC 4122. - VariantUnknown Variant = iota - VariantNCS - VariantRFC4122 - VariantMicrosoft - VariantFuture -) - -// Version specifies how the bits in the GUID were generated. For instance, a -// version 4 GUID is randomly generated, and a version 5 is generated from the -// hash of an input string. -type Version uint8 - -var _ = (encoding.TextMarshaler)(GUID{}) -var _ = (encoding.TextUnmarshaler)(&GUID{}) - -// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. -func NewV4() (GUID, error) { - var b [16]byte - if _, err := rand.Read(b[:]); err != nil { - return GUID{}, err - } - - g := FromArray(b) - g.setVersion(4) // Version 4 means randomly generated. - g.setVariant(VariantRFC4122) - - return g, nil -} - -// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing) -// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name, -// and the sample code treats it as a series of bytes, so we do the same here. -// -// Some implementations, such as those found on Windows, treat the name as a -// big-endian UTF16 stream of bytes. If that is desired, the string can be -// encoded as such before being passed to this function. -func NewV5(namespace GUID, name []byte) (GUID, error) { - b := sha1.New() - namespaceBytes := namespace.ToArray() - b.Write(namespaceBytes[:]) - b.Write(name) - - a := [16]byte{} - copy(a[:], b.Sum(nil)) - - g := FromArray(a) - g.setVersion(5) // Version 5 means generated from a string. - g.setVariant(VariantRFC4122) - - return g, nil -} - -func fromArray(b [16]byte, order binary.ByteOrder) GUID { - var g GUID - g.Data1 = order.Uint32(b[0:4]) - g.Data2 = order.Uint16(b[4:6]) - g.Data3 = order.Uint16(b[6:8]) - copy(g.Data4[:], b[8:16]) - return g -} - -func (g GUID) toArray(order binary.ByteOrder) [16]byte { - b := [16]byte{} - order.PutUint32(b[0:4], g.Data1) - order.PutUint16(b[4:6], g.Data2) - order.PutUint16(b[6:8], g.Data3) - copy(b[8:16], g.Data4[:]) - return b -} - -// FromArray constructs a GUID from a big-endian encoding array of 16 bytes. -func FromArray(b [16]byte) GUID { - return fromArray(b, binary.BigEndian) -} - -// ToArray returns an array of 16 bytes representing the GUID in big-endian -// encoding. -func (g GUID) ToArray() [16]byte { - return g.toArray(binary.BigEndian) -} - -// FromWindowsArray constructs a GUID from a Windows encoding array of bytes. -func FromWindowsArray(b [16]byte) GUID { - return fromArray(b, binary.LittleEndian) -} - -// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows -// encoding. -func (g GUID) ToWindowsArray() [16]byte { - return g.toArray(binary.LittleEndian) -} - -func (g GUID) String() string { - return fmt.Sprintf( - "%08x-%04x-%04x-%04x-%012x", - g.Data1, - g.Data2, - g.Data3, - g.Data4[:2], - g.Data4[2:]) -} - -// FromString parses a string containing a GUID and returns the GUID. The only -// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` -// format. -func FromString(s string) (GUID, error) { - if len(s) != 36 { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - - var g GUID - - data1, err := strconv.ParseUint(s[0:8], 16, 32) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data1 = uint32(data1) - - data2, err := strconv.ParseUint(s[9:13], 16, 16) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data2 = uint16(data2) - - data3, err := strconv.ParseUint(s[14:18], 16, 16) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data3 = uint16(data3) - - for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} { - v, err := strconv.ParseUint(s[x:x+2], 16, 8) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data4[i] = uint8(v) - } - - return g, nil -} - -func (g *GUID) setVariant(v Variant) { - d := g.Data4[0] - switch v { - case VariantNCS: - d = (d & 0x7f) - case VariantRFC4122: - d = (d & 0x3f) | 0x80 - case VariantMicrosoft: - d = (d & 0x1f) | 0xc0 - case VariantFuture: - d = (d & 0x0f) | 0xe0 - case VariantUnknown: - fallthrough - default: - panic(fmt.Sprintf("invalid variant: %d", v)) - } - g.Data4[0] = d -} - -// Variant returns the GUID variant, as defined in RFC 4122. -func (g GUID) Variant() Variant { - b := g.Data4[0] - if b&0x80 == 0 { - return VariantNCS - } else if b&0xc0 == 0x80 { - return VariantRFC4122 - } else if b&0xe0 == 0xc0 { - return VariantMicrosoft - } else if b&0xe0 == 0xe0 { - return VariantFuture - } - return VariantUnknown -} - -func (g *GUID) setVersion(v Version) { - g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12) -} - -// Version returns the GUID version, as defined in RFC 4122. -func (g GUID) Version() Version { - return Version((g.Data3 & 0xF000) >> 12) -} - -// MarshalText returns the textual representation of the GUID. -func (g GUID) MarshalText() ([]byte, error) { - return []byte(g.String()), nil -} - -// UnmarshalText takes the textual representation of a GUID, and unmarhals it -// into this GUID. -func (g *GUID) UnmarshalText(text []byte) error { - g2, err := FromString(string(text)) - if err != nil { - return err - } - *g = g2 - return nil -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go deleted file mode 100644 index f64d828c0..000000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !windows - -package guid - -// GUID represents a GUID/UUID. It has the same structure as -// golang.org/x/sys/windows.GUID so that it can be used with functions expecting -// that type. It is defined as its own type as that is only available to builds -// targeted at `windows`. The representation matches that used by native Windows -// code. -type GUID struct { - Data1 uint32 - Data2 uint16 - Data3 uint16 - Data4 [8]byte -} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go deleted file mode 100644 index 83617f4ee..000000000 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go +++ /dev/null @@ -1,10 +0,0 @@ -package guid - -import "golang.org/x/sys/windows" - -// GUID represents a GUID/UUID. It has the same structure as -// golang.org/x/sys/windows.GUID so that it can be used with functions expecting -// that type. It is defined as its own type so that stringification and -// marshaling can be supported. The representation matches that used by native -// Windows code. -type GUID windows.GUID diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go deleted file mode 100644 index c3dd7c217..000000000 --- a/vendor/github.com/Microsoft/go-winio/privilege.go +++ /dev/null @@ -1,203 +0,0 @@ -// +build windows - -package winio - -import ( - "bytes" - "encoding/binary" - "fmt" - "runtime" - "sync" - "syscall" - "unicode/utf16" - - "golang.org/x/sys/windows" -) - -//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges -//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf -//sys revertToSelf() (err error) = advapi32.RevertToSelf -//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken -//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread -//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW -//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW -//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW - -const ( - SE_PRIVILEGE_ENABLED = 2 - - ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 - - SeBackupPrivilege = "SeBackupPrivilege" - SeRestorePrivilege = "SeRestorePrivilege" - SeSecurityPrivilege = "SeSecurityPrivilege" -) - -const ( - securityAnonymous = iota - securityIdentification - securityImpersonation - securityDelegation -) - -var ( - privNames = make(map[string]uint64) - privNameMutex sync.Mutex -) - -// PrivilegeError represents an error enabling privileges. -type PrivilegeError struct { - privileges []uint64 -} - -func (e *PrivilegeError) Error() string { - s := "" - if len(e.privileges) > 1 { - s = "Could not enable privileges " - } else { - s = "Could not enable privilege " - } - for i, p := range e.privileges { - if i != 0 { - s += ", " - } - s += `"` - s += getPrivilegeName(p) - s += `"` - } - return s -} - -// RunWithPrivilege enables a single privilege for a function call. -func RunWithPrivilege(name string, fn func() error) error { - return RunWithPrivileges([]string{name}, fn) -} - -// RunWithPrivileges enables privileges for a function call. -func RunWithPrivileges(names []string, fn func() error) error { - privileges, err := mapPrivileges(names) - if err != nil { - return err - } - runtime.LockOSThread() - defer runtime.UnlockOSThread() - token, err := newThreadToken() - if err != nil { - return err - } - defer releaseThreadToken(token) - err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED) - if err != nil { - return err - } - return fn() -} - -func mapPrivileges(names []string) ([]uint64, error) { - var privileges []uint64 - privNameMutex.Lock() - defer privNameMutex.Unlock() - for _, name := range names { - p, ok := privNames[name] - if !ok { - err := lookupPrivilegeValue("", name, &p) - if err != nil { - return nil, err - } - privNames[name] = p - } - privileges = append(privileges, p) - } - return privileges, nil -} - -// EnableProcessPrivileges enables privileges globally for the process. -func EnableProcessPrivileges(names []string) error { - return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) -} - -// DisableProcessPrivileges disables privileges globally for the process. -func DisableProcessPrivileges(names []string) error { - return enableDisableProcessPrivilege(names, 0) -} - -func enableDisableProcessPrivilege(names []string, action uint32) error { - privileges, err := mapPrivileges(names) - if err != nil { - return err - } - - p, _ := windows.GetCurrentProcess() - var token windows.Token - err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) - if err != nil { - return err - } - - defer token.Close() - return adjustPrivileges(token, privileges, action) -} - -func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { - var b bytes.Buffer - binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) - for _, p := range privileges { - binary.Write(&b, binary.LittleEndian, p) - binary.Write(&b, binary.LittleEndian, action) - } - prevState := make([]byte, b.Len()) - reqSize := uint32(0) - success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) - if !success { - return err - } - if err == ERROR_NOT_ALL_ASSIGNED { - return &PrivilegeError{privileges} - } - return nil -} - -func getPrivilegeName(luid uint64) string { - var nameBuffer [256]uint16 - bufSize := uint32(len(nameBuffer)) - err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) - if err != nil { - return fmt.Sprintf("", luid) - } - - var displayNameBuffer [256]uint16 - displayBufSize := uint32(len(displayNameBuffer)) - var langID uint32 - err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) - if err != nil { - return fmt.Sprintf("", string(utf16.Decode(nameBuffer[:bufSize]))) - } - - return string(utf16.Decode(displayNameBuffer[:displayBufSize])) -} - -func newThreadToken() (windows.Token, error) { - err := impersonateSelf(securityImpersonation) - if err != nil { - return 0, err - } - - var token windows.Token - err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) - if err != nil { - rerr := revertToSelf() - if rerr != nil { - panic(rerr) - } - return 0, err - } - return token, nil -} - -func releaseThreadToken(h windows.Token) { - err := revertToSelf() - if err != nil { - panic(err) - } - h.Close() -} diff --git a/vendor/github.com/Microsoft/go-winio/reparse.go b/vendor/github.com/Microsoft/go-winio/reparse.go deleted file mode 100644 index fc1ee4d3a..000000000 --- a/vendor/github.com/Microsoft/go-winio/reparse.go +++ /dev/null @@ -1,128 +0,0 @@ -package winio - -import ( - "bytes" - "encoding/binary" - "fmt" - "strings" - "unicode/utf16" - "unsafe" -) - -const ( - reparseTagMountPoint = 0xA0000003 - reparseTagSymlink = 0xA000000C -) - -type reparseDataBuffer struct { - ReparseTag uint32 - ReparseDataLength uint16 - Reserved uint16 - SubstituteNameOffset uint16 - SubstituteNameLength uint16 - PrintNameOffset uint16 - PrintNameLength uint16 -} - -// ReparsePoint describes a Win32 symlink or mount point. -type ReparsePoint struct { - Target string - IsMountPoint bool -} - -// UnsupportedReparsePointError is returned when trying to decode a non-symlink or -// mount point reparse point. -type UnsupportedReparsePointError struct { - Tag uint32 -} - -func (e *UnsupportedReparsePointError) Error() string { - return fmt.Sprintf("unsupported reparse point %x", e.Tag) -} - -// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink -// or a mount point. -func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { - tag := binary.LittleEndian.Uint32(b[0:4]) - return DecodeReparsePointData(tag, b[8:]) -} - -func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) { - isMountPoint := false - switch tag { - case reparseTagMountPoint: - isMountPoint = true - case reparseTagSymlink: - default: - return nil, &UnsupportedReparsePointError{tag} - } - nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6]) - if !isMountPoint { - nameOffset += 4 - } - nameLength := binary.LittleEndian.Uint16(b[6:8]) - name := make([]uint16, nameLength/2) - err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) - if err != nil { - return nil, err - } - return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil -} - -func isDriveLetter(c byte) bool { - return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') -} - -// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or -// mount point. -func EncodeReparsePoint(rp *ReparsePoint) []byte { - // Generate an NT path and determine if this is a relative path. - var ntTarget string - relative := false - if strings.HasPrefix(rp.Target, `\\?\`) { - ntTarget = `\??\` + rp.Target[4:] - } else if strings.HasPrefix(rp.Target, `\\`) { - ntTarget = `\??\UNC\` + rp.Target[2:] - } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { - ntTarget = `\??\` + rp.Target - } else { - ntTarget = rp.Target - relative = true - } - - // The paths must be NUL-terminated even though they are counted strings. - target16 := utf16.Encode([]rune(rp.Target + "\x00")) - ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) - - size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 - size += len(ntTarget16)*2 + len(target16)*2 - - tag := uint32(reparseTagMountPoint) - if !rp.IsMountPoint { - tag = reparseTagSymlink - size += 4 // Add room for symlink flags - } - - data := reparseDataBuffer{ - ReparseTag: tag, - ReparseDataLength: uint16(size), - SubstituteNameOffset: 0, - SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), - PrintNameOffset: uint16(len(ntTarget16) * 2), - PrintNameLength: uint16((len(target16) - 1) * 2), - } - - var b bytes.Buffer - binary.Write(&b, binary.LittleEndian, &data) - if !rp.IsMountPoint { - flags := uint32(0) - if relative { - flags |= 1 - } - binary.Write(&b, binary.LittleEndian, flags) - } - - binary.Write(&b, binary.LittleEndian, ntTarget16) - binary.Write(&b, binary.LittleEndian, target16) - return b.Bytes() -} diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go deleted file mode 100644 index db1b370a1..000000000 --- a/vendor/github.com/Microsoft/go-winio/sd.go +++ /dev/null @@ -1,98 +0,0 @@ -// +build windows - -package winio - -import ( - "syscall" - "unsafe" -) - -//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW -//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW -//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW -//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW -//sys localFree(mem uintptr) = LocalFree -//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength - -const ( - cERROR_NONE_MAPPED = syscall.Errno(1332) -) - -type AccountLookupError struct { - Name string - Err error -} - -func (e *AccountLookupError) Error() string { - if e.Name == "" { - return "lookup account: empty account name specified" - } - var s string - switch e.Err { - case cERROR_NONE_MAPPED: - s = "not found" - default: - s = e.Err.Error() - } - return "lookup account " + e.Name + ": " + s -} - -type SddlConversionError struct { - Sddl string - Err error -} - -func (e *SddlConversionError) Error() string { - return "convert " + e.Sddl + ": " + e.Err.Error() -} - -// LookupSidByName looks up the SID of an account by name -func LookupSidByName(name string) (sid string, err error) { - if name == "" { - return "", &AccountLookupError{name, cERROR_NONE_MAPPED} - } - - var sidSize, sidNameUse, refDomainSize uint32 - err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) - if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { - return "", &AccountLookupError{name, err} - } - sidBuffer := make([]byte, sidSize) - refDomainBuffer := make([]uint16, refDomainSize) - err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) - if err != nil { - return "", &AccountLookupError{name, err} - } - var strBuffer *uint16 - err = convertSidToStringSid(&sidBuffer[0], &strBuffer) - if err != nil { - return "", &AccountLookupError{name, err} - } - sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) - localFree(uintptr(unsafe.Pointer(strBuffer))) - return sid, nil -} - -func SddlToSecurityDescriptor(sddl string) ([]byte, error) { - var sdBuffer uintptr - err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) - if err != nil { - return nil, &SddlConversionError{sddl, err} - } - defer localFree(sdBuffer) - sd := make([]byte, getSecurityDescriptorLength(sdBuffer)) - copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)]) - return sd, nil -} - -func SecurityDescriptorToSddl(sd []byte) (string, error) { - var sddl *uint16 - // The returned string length seems to including an aribtrary number of terminating NULs. - // Don't use it. - err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) - if err != nil { - return "", err - } - defer localFree(uintptr(unsafe.Pointer(sddl))) - return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil -} diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go deleted file mode 100644 index 5955c99fd..000000000 --- a/vendor/github.com/Microsoft/go-winio/syscall.go +++ /dev/null @@ -1,3 +0,0 @@ -package winio - -//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go deleted file mode 100644 index 176ff75e3..000000000 --- a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ /dev/null @@ -1,427 +0,0 @@ -// Code generated by 'go generate'; DO NOT EDIT. - -package winio - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - modntdll = windows.NewLazySystemDLL("ntdll.dll") - modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") - - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") - procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") - procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") - procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") - procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") - procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") - procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") - procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") - procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") - procRevertToSelf = modadvapi32.NewProc("RevertToSelf") - procBackupRead = modkernel32.NewProc("BackupRead") - procBackupWrite = modkernel32.NewProc("BackupWrite") - procCancelIoEx = modkernel32.NewProc("CancelIoEx") - procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") - procCreateFileW = modkernel32.NewProc("CreateFileW") - procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") - procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") - procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") - procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") - procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") - procLocalAlloc = modkernel32.NewProc("LocalAlloc") - procLocalFree = modkernel32.NewProc("LocalFree") - procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") - procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") - procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") - procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") - procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") - procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") - procbind = modws2_32.NewProc("bind") -) - -func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { - var _p0 uint32 - if releaseAll { - _p0 = 1 - } - r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) - success = r0 != 0 - if true { - err = errnoErr(e1) - } - return -} - -func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func convertSidToStringSid(sid *byte, str **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(str) - if err != nil { - return - } - return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) -} - -func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getSecurityDescriptorLength(sd uintptr) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) - len = uint32(r0) - return -} - -func impersonateSelf(level uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(accountName) - if err != nil { - return - } - return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) -} - -func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) -} - -func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - return _lookupPrivilegeName(_p0, luid, buffer, size) -} - -func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - var _p1 *uint16 - _p1, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _lookupPrivilegeValue(_p0, _p1, luid) -} - -func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { - var _p0 uint32 - if openAsSelf { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func revertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - var _p1 uint32 - if abort { - _p1 = 1 - } - var _p2 uint32 - if processSecurity { - _p2 = 1 - } - r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - var _p1 uint32 - if abort { - _p1 = 1 - } - var _p2 uint32 - if processSecurity { - _p2 = 1 - } - r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) -} - -func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = errnoErr(e1) - } - return -} - -func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) - newport = syscall.Handle(r0) - if newport == 0 { - err = errnoErr(e1) - } - return -} - -func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) -} - -func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = errnoErr(e1) - } - return -} - -func getCurrentThread() (h syscall.Handle) { - r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) - h = syscall.Handle(r0) - return -} - -func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { - r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) - ptr = uintptr(r0) - return -} - -func localFree(mem uintptr) { - syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) - return -} - -func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) { - r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) - status = ntstatus(r0) - return -} - -func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) { - r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) - status = ntstatus(r0) - return -} - -func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) - status = ntstatus(r0) - return -} - -func rtlNtStatusToDosError(status ntstatus) (winerr error) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) - if r0 != 0 { - winerr = syscall.Errno(r0) - } - return -} - -func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { - var _p0 uint32 - if wait { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) - if r1 == socketError { - err = errnoErr(e1) - } - return -} diff --git a/vendor/github.com/cenkalti/backoff/v4/.gitignore b/vendor/github.com/cenkalti/backoff/v4/.gitignore deleted file mode 100644 index 50d95c548..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -# IDEs -.idea/ diff --git a/vendor/github.com/cenkalti/backoff/v4/LICENSE b/vendor/github.com/cenkalti/backoff/v4/LICENSE deleted file mode 100644 index 89b817996..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Cenk Altı - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md deleted file mode 100644 index 16abdfc08..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/README.md +++ /dev/null @@ -1,32 +0,0 @@ -# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] - -This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. - -[Exponential backoff][exponential backoff wiki] -is an algorithm that uses feedback to multiplicatively decrease the rate of some process, -in order to gradually find an acceptable rate. -The retries exponentially increase and stop increasing when a certain threshold is met. - -## Usage - -Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end. - -Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. - -## Contributing - -* I would like to keep this library as small as possible. -* Please don't send a PR without opening an issue and discussing it first. -* If proposed change is not a common use case, I will probably not accept it. - -[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4 -[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png -[travis]: https://travis-ci.org/cenkalti/backoff -[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master -[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master -[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master - -[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java -[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff - -[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples diff --git a/vendor/github.com/cenkalti/backoff/v4/backoff.go b/vendor/github.com/cenkalti/backoff/v4/backoff.go deleted file mode 100644 index 3676ee405..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/backoff.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package backoff implements backoff algorithms for retrying operations. -// -// Use Retry function for retrying operations that may fail. -// If Retry does not meet your needs, -// copy/paste the function into your project and modify as you wish. -// -// There is also Ticker type similar to time.Ticker. -// You can use it if you need to work with channels. -// -// See Examples section below for usage examples. -package backoff - -import "time" - -// BackOff is a backoff policy for retrying an operation. -type BackOff interface { - // NextBackOff returns the duration to wait before retrying the operation, - // or backoff. Stop to indicate that no more retries should be made. - // - // Example usage: - // - // duration := backoff.NextBackOff(); - // if (duration == backoff.Stop) { - // // Do not retry operation. - // } else { - // // Sleep for duration and retry operation. - // } - // - NextBackOff() time.Duration - - // Reset to initial state. - Reset() -} - -// Stop indicates that no more retries should be made for use in NextBackOff(). -const Stop time.Duration = -1 - -// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, -// meaning that the operation is retried immediately without waiting, indefinitely. -type ZeroBackOff struct{} - -func (b *ZeroBackOff) Reset() {} - -func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } - -// StopBackOff is a fixed backoff policy that always returns backoff.Stop for -// NextBackOff(), meaning that the operation should never be retried. -type StopBackOff struct{} - -func (b *StopBackOff) Reset() {} - -func (b *StopBackOff) NextBackOff() time.Duration { return Stop } - -// ConstantBackOff is a backoff policy that always returns the same backoff delay. -// This is in contrast to an exponential backoff policy, -// which returns a delay that grows longer as you call NextBackOff() over and over again. -type ConstantBackOff struct { - Interval time.Duration -} - -func (b *ConstantBackOff) Reset() {} -func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } - -func NewConstantBackOff(d time.Duration) *ConstantBackOff { - return &ConstantBackOff{Interval: d} -} diff --git a/vendor/github.com/cenkalti/backoff/v4/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go deleted file mode 100644 index 48482330e..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/context.go +++ /dev/null @@ -1,62 +0,0 @@ -package backoff - -import ( - "context" - "time" -) - -// BackOffContext is a backoff policy that stops retrying after the context -// is canceled. -type BackOffContext interface { // nolint: golint - BackOff - Context() context.Context -} - -type backOffContext struct { - BackOff - ctx context.Context -} - -// WithContext returns a BackOffContext with context ctx -// -// ctx must not be nil -func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint - if ctx == nil { - panic("nil context") - } - - if b, ok := b.(*backOffContext); ok { - return &backOffContext{ - BackOff: b.BackOff, - ctx: ctx, - } - } - - return &backOffContext{ - BackOff: b, - ctx: ctx, - } -} - -func getContext(b BackOff) context.Context { - if cb, ok := b.(BackOffContext); ok { - return cb.Context() - } - if tb, ok := b.(*backOffTries); ok { - return getContext(tb.delegate) - } - return context.Background() -} - -func (b *backOffContext) Context() context.Context { - return b.ctx -} - -func (b *backOffContext) NextBackOff() time.Duration { - select { - case <-b.ctx.Done(): - return Stop - default: - return b.BackOff.NextBackOff() - } -} diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go deleted file mode 100644 index 2c56c1e71..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/exponential.go +++ /dev/null @@ -1,161 +0,0 @@ -package backoff - -import ( - "math/rand" - "time" -) - -/* -ExponentialBackOff is a backoff implementation that increases the backoff -period for each retry attempt using a randomization function that grows exponentially. - -NextBackOff() is calculated using the following formula: - - randomized interval = - RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) - -In other words NextBackOff() will range between the randomization factor -percentage below and above the retry interval. - -For example, given the following parameters: - - RetryInterval = 2 - RandomizationFactor = 0.5 - Multiplier = 2 - -the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, -multiplied by the exponential, that is, between 2 and 6 seconds. - -Note: MaxInterval caps the RetryInterval and not the randomized interval. - -If the time elapsed since an ExponentialBackOff instance is created goes past the -MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. - -The elapsed time can be reset by calling Reset(). - -Example: Given the following default arguments, for 10 tries the sequence will be, -and assuming we go over the MaxElapsedTime on the 10th try: - - Request # RetryInterval (seconds) Randomized Interval (seconds) - - 1 0.5 [0.25, 0.75] - 2 0.75 [0.375, 1.125] - 3 1.125 [0.562, 1.687] - 4 1.687 [0.8435, 2.53] - 5 2.53 [1.265, 3.795] - 6 3.795 [1.897, 5.692] - 7 5.692 [2.846, 8.538] - 8 8.538 [4.269, 12.807] - 9 12.807 [6.403, 19.210] - 10 19.210 backoff.Stop - -Note: Implementation is not thread-safe. -*/ -type ExponentialBackOff struct { - InitialInterval time.Duration - RandomizationFactor float64 - Multiplier float64 - MaxInterval time.Duration - // After MaxElapsedTime the ExponentialBackOff returns Stop. - // It never stops if MaxElapsedTime == 0. - MaxElapsedTime time.Duration - Stop time.Duration - Clock Clock - - currentInterval time.Duration - startTime time.Time -} - -// Clock is an interface that returns current time for BackOff. -type Clock interface { - Now() time.Time -} - -// Default values for ExponentialBackOff. -const ( - DefaultInitialInterval = 500 * time.Millisecond - DefaultRandomizationFactor = 0.5 - DefaultMultiplier = 1.5 - DefaultMaxInterval = 60 * time.Second - DefaultMaxElapsedTime = 15 * time.Minute -) - -// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. -func NewExponentialBackOff() *ExponentialBackOff { - b := &ExponentialBackOff{ - InitialInterval: DefaultInitialInterval, - RandomizationFactor: DefaultRandomizationFactor, - Multiplier: DefaultMultiplier, - MaxInterval: DefaultMaxInterval, - MaxElapsedTime: DefaultMaxElapsedTime, - Stop: Stop, - Clock: SystemClock, - } - b.Reset() - return b -} - -type systemClock struct{} - -func (t systemClock) Now() time.Time { - return time.Now() -} - -// SystemClock implements Clock interface that uses time.Now(). -var SystemClock = systemClock{} - -// Reset the interval back to the initial retry interval and restarts the timer. -// Reset must be called before using b. -func (b *ExponentialBackOff) Reset() { - b.currentInterval = b.InitialInterval - b.startTime = b.Clock.Now() -} - -// NextBackOff calculates the next backoff interval using the formula: -// Randomized interval = RetryInterval * (1 ± RandomizationFactor) -func (b *ExponentialBackOff) NextBackOff() time.Duration { - // Make sure we have not gone over the maximum elapsed time. - elapsed := b.GetElapsedTime() - next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) - b.incrementCurrentInterval() - if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime { - return b.Stop - } - return next -} - -// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance -// is created and is reset when Reset() is called. -// -// The elapsed time is computed using time.Now().UnixNano(). It is -// safe to call even while the backoff policy is used by a running -// ticker. -func (b *ExponentialBackOff) GetElapsedTime() time.Duration { - return b.Clock.Now().Sub(b.startTime) -} - -// Increments the current interval by multiplying it with the multiplier. -func (b *ExponentialBackOff) incrementCurrentInterval() { - // Check for overflow, if overflow is detected set the current interval to the max interval. - if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { - b.currentInterval = b.MaxInterval - } else { - b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) - } -} - -// Returns a random value from the following interval: -// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. -func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { - if randomizationFactor == 0 { - return currentInterval // make sure no randomness is used when randomizationFactor is 0. - } - var delta = randomizationFactor * float64(currentInterval) - var minInterval = float64(currentInterval) - delta - var maxInterval = float64(currentInterval) + delta - - // Get a random value from the range [minInterval, maxInterval]. - // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then - // we want a 33% chance for selecting either 1, 2 or 3. - return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) -} diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go deleted file mode 100644 index b9c0c51cd..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/retry.go +++ /dev/null @@ -1,146 +0,0 @@ -package backoff - -import ( - "errors" - "time" -) - -// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData(). -// The operation will be retried using a backoff policy if it returns an error. -type OperationWithData[T any] func() (T, error) - -// An Operation is executing by Retry() or RetryNotify(). -// The operation will be retried using a backoff policy if it returns an error. -type Operation func() error - -func (o Operation) withEmptyData() OperationWithData[struct{}] { - return func() (struct{}, error) { - return struct{}{}, o() - } -} - -// Notify is a notify-on-error function. It receives an operation error and -// backoff delay if the operation failed (with an error). -// -// NOTE that if the backoff policy stated to stop retrying, -// the notify function isn't called. -type Notify func(error, time.Duration) - -// Retry the operation o until it does not return error or BackOff stops. -// o is guaranteed to be run at least once. -// -// If o returns a *PermanentError, the operation is not retried, and the -// wrapped error is returned. -// -// Retry sleeps the goroutine for the duration returned by BackOff after a -// failed operation returns. -func Retry(o Operation, b BackOff) error { - return RetryNotify(o, b, nil) -} - -// RetryWithData is like Retry but returns data in the response too. -func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) { - return RetryNotifyWithData(o, b, nil) -} - -// RetryNotify calls notify function with the error and wait duration -// for each failed attempt before sleep. -func RetryNotify(operation Operation, b BackOff, notify Notify) error { - return RetryNotifyWithTimer(operation, b, notify, nil) -} - -// RetryNotifyWithData is like RetryNotify but returns data in the response too. -func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) { - return doRetryNotify(operation, b, notify, nil) -} - -// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer -// for each failed attempt before sleep. -// A default timer that uses system timer is used when nil is passed. -func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { - _, err := doRetryNotify(operation.withEmptyData(), b, notify, t) - return err -} - -// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too. -func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { - return doRetryNotify(operation, b, notify, t) -} - -func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { - var ( - err error - next time.Duration - res T - ) - if t == nil { - t = &defaultTimer{} - } - - defer func() { - t.Stop() - }() - - ctx := getContext(b) - - b.Reset() - for { - res, err = operation() - if err == nil { - return res, nil - } - - var permanent *PermanentError - if errors.As(err, &permanent) { - return res, permanent.Err - } - - if next = b.NextBackOff(); next == Stop { - if cerr := ctx.Err(); cerr != nil { - return res, cerr - } - - return res, err - } - - if notify != nil { - notify(err, next) - } - - t.Start(next) - - select { - case <-ctx.Done(): - return res, ctx.Err() - case <-t.C(): - } - } -} - -// PermanentError signals that the operation should not be retried. -type PermanentError struct { - Err error -} - -func (e *PermanentError) Error() string { - return e.Err.Error() -} - -func (e *PermanentError) Unwrap() error { - return e.Err -} - -func (e *PermanentError) Is(target error) bool { - _, ok := target.(*PermanentError) - return ok -} - -// Permanent wraps the given err in a *PermanentError. -func Permanent(err error) error { - if err == nil { - return nil - } - return &PermanentError{ - Err: err, - } -} diff --git a/vendor/github.com/cenkalti/backoff/v4/ticker.go b/vendor/github.com/cenkalti/backoff/v4/ticker.go deleted file mode 100644 index df9d68bce..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/ticker.go +++ /dev/null @@ -1,97 +0,0 @@ -package backoff - -import ( - "context" - "sync" - "time" -) - -// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. -// -// Ticks will continue to arrive when the previous operation is still running, -// so operations that take a while to fail could run in quick succession. -type Ticker struct { - C <-chan time.Time - c chan time.Time - b BackOff - ctx context.Context - timer Timer - stop chan struct{} - stopOnce sync.Once -} - -// NewTicker returns a new Ticker containing a channel that will send -// the time at times specified by the BackOff argument. Ticker is -// guaranteed to tick at least once. The channel is closed when Stop -// method is called or BackOff stops. It is not safe to manipulate the -// provided backoff policy (notably calling NextBackOff or Reset) -// while the ticker is running. -func NewTicker(b BackOff) *Ticker { - return NewTickerWithTimer(b, &defaultTimer{}) -} - -// NewTickerWithTimer returns a new Ticker with a custom timer. -// A default timer that uses system timer is used when nil is passed. -func NewTickerWithTimer(b BackOff, timer Timer) *Ticker { - if timer == nil { - timer = &defaultTimer{} - } - c := make(chan time.Time) - t := &Ticker{ - C: c, - c: c, - b: b, - ctx: getContext(b), - timer: timer, - stop: make(chan struct{}), - } - t.b.Reset() - go t.run() - return t -} - -// Stop turns off a ticker. After Stop, no more ticks will be sent. -func (t *Ticker) Stop() { - t.stopOnce.Do(func() { close(t.stop) }) -} - -func (t *Ticker) run() { - c := t.c - defer close(c) - - // Ticker is guaranteed to tick at least once. - afterC := t.send(time.Now()) - - for { - if afterC == nil { - return - } - - select { - case tick := <-afterC: - afterC = t.send(tick) - case <-t.stop: - t.c = nil // Prevent future ticks from being sent to the channel. - return - case <-t.ctx.Done(): - return - } - } -} - -func (t *Ticker) send(tick time.Time) <-chan time.Time { - select { - case t.c <- tick: - case <-t.stop: - return nil - } - - next := t.b.NextBackOff() - if next == Stop { - t.Stop() - return nil - } - - t.timer.Start(next) - return t.timer.C() -} diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v4/timer.go deleted file mode 100644 index 8120d0213..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/timer.go +++ /dev/null @@ -1,35 +0,0 @@ -package backoff - -import "time" - -type Timer interface { - Start(duration time.Duration) - Stop() - C() <-chan time.Time -} - -// defaultTimer implements Timer interface using time.Timer -type defaultTimer struct { - timer *time.Timer -} - -// C returns the timers channel which receives the current time when the timer fires. -func (t *defaultTimer) C() <-chan time.Time { - return t.timer.C -} - -// Start starts the timer to fire after the given duration -func (t *defaultTimer) Start(duration time.Duration) { - if t.timer == nil { - t.timer = time.NewTimer(duration) - } else { - t.timer.Reset(duration) - } -} - -// Stop is called when the timer is not used anymore and resources may be freed. -func (t *defaultTimer) Stop() { - if t.timer != nil { - t.timer.Stop() - } -} diff --git a/vendor/github.com/cenkalti/backoff/v4/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go deleted file mode 100644 index 28d58ca37..000000000 --- a/vendor/github.com/cenkalti/backoff/v4/tries.go +++ /dev/null @@ -1,38 +0,0 @@ -package backoff - -import "time" - -/* -WithMaxRetries creates a wrapper around another BackOff, which will -return Stop if NextBackOff() has been called too many times since -the last time Reset() was called - -Note: Implementation is not thread-safe. -*/ -func WithMaxRetries(b BackOff, max uint64) BackOff { - return &backOffTries{delegate: b, maxTries: max} -} - -type backOffTries struct { - delegate BackOff - maxTries uint64 - numTries uint64 -} - -func (b *backOffTries) NextBackOff() time.Duration { - if b.maxTries == 0 { - return Stop - } - if b.maxTries > 0 { - if b.maxTries <= b.numTries { - return Stop - } - b.numTries++ - } - return b.delegate.NextBackOff() -} - -func (b *backOffTries) Reset() { - b.numTries = 0 - b.delegate.Reset() -} diff --git a/vendor/github.com/containerd/containerd/LICENSE b/vendor/github.com/containerd/containerd/LICENSE deleted file mode 100644 index 584149b6e..000000000 --- a/vendor/github.com/containerd/containerd/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright The containerd Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containerd/containerd/NOTICE b/vendor/github.com/containerd/containerd/NOTICE deleted file mode 100644 index 8915f0277..000000000 --- a/vendor/github.com/containerd/containerd/NOTICE +++ /dev/null @@ -1,16 +0,0 @@ -Docker -Copyright 2012-2015 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go deleted file mode 100644 index 876225597..000000000 --- a/vendor/github.com/containerd/containerd/errdefs/errors.go +++ /dev/null @@ -1,92 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package errdefs defines the common errors used throughout containerd -// packages. -// -// Use with fmt.Errorf to add context to an error. -// -// To detect an error class, use the IsXXX functions to tell whether an error -// is of a certain type. -// -// The functions ToGRPC and FromGRPC can be used to map server-side and -// client-side errors to the correct types. -package errdefs - -import ( - "context" - "errors" -) - -// Definitions of common error types used throughout containerd. All containerd -// errors returned by most packages will map into one of these errors classes. -// Packages should return errors of these types when they want to instruct a -// client to take a particular action. -// -// For the most part, we just try to provide local grpc errors. Most conditions -// map very well to those defined by grpc. -var ( - ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping. - ErrInvalidArgument = errors.New("invalid argument") - ErrNotFound = errors.New("not found") - ErrAlreadyExists = errors.New("already exists") - ErrFailedPrecondition = errors.New("failed precondition") - ErrUnavailable = errors.New("unavailable") - ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented -) - -// IsInvalidArgument returns true if the error is due to an invalid argument -func IsInvalidArgument(err error) bool { - return errors.Is(err, ErrInvalidArgument) -} - -// IsNotFound returns true if the error is due to a missing object -func IsNotFound(err error) bool { - return errors.Is(err, ErrNotFound) -} - -// IsAlreadyExists returns true if the error is due to an already existing -// metadata item -func IsAlreadyExists(err error) bool { - return errors.Is(err, ErrAlreadyExists) -} - -// IsFailedPrecondition returns true if an operation could not proceed to the -// lack of a particular condition -func IsFailedPrecondition(err error) bool { - return errors.Is(err, ErrFailedPrecondition) -} - -// IsUnavailable returns true if the error is due to a resource being unavailable -func IsUnavailable(err error) bool { - return errors.Is(err, ErrUnavailable) -} - -// IsNotImplemented returns true if the error is due to not being implemented -func IsNotImplemented(err error) bool { - return errors.Is(err, ErrNotImplemented) -} - -// IsCanceled returns true if the error is due to `context.Canceled`. -func IsCanceled(err error) bool { - return errors.Is(err, context.Canceled) -} - -// IsDeadlineExceeded returns true if the error is due to -// `context.DeadlineExceeded`. -func IsDeadlineExceeded(err error) bool { - return errors.Is(err, context.DeadlineExceeded) -} diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go deleted file mode 100644 index 7a9b33e05..000000000 --- a/vendor/github.com/containerd/containerd/errdefs/grpc.go +++ /dev/null @@ -1,147 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package errdefs - -import ( - "context" - "fmt" - "strings" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// ToGRPC will attempt to map the backend containerd error into a grpc error, -// using the original error message as a description. -// -// Further information may be extracted from certain errors depending on their -// type. -// -// If the error is unmapped, the original error will be returned to be handled -// by the regular grpc error handling stack. -func ToGRPC(err error) error { - if err == nil { - return nil - } - - if isGRPCError(err) { - // error has already been mapped to grpc - return err - } - - switch { - case IsInvalidArgument(err): - return status.Errorf(codes.InvalidArgument, err.Error()) - case IsNotFound(err): - return status.Errorf(codes.NotFound, err.Error()) - case IsAlreadyExists(err): - return status.Errorf(codes.AlreadyExists, err.Error()) - case IsFailedPrecondition(err): - return status.Errorf(codes.FailedPrecondition, err.Error()) - case IsUnavailable(err): - return status.Errorf(codes.Unavailable, err.Error()) - case IsNotImplemented(err): - return status.Errorf(codes.Unimplemented, err.Error()) - case IsCanceled(err): - return status.Errorf(codes.Canceled, err.Error()) - case IsDeadlineExceeded(err): - return status.Errorf(codes.DeadlineExceeded, err.Error()) - } - - return err -} - -// ToGRPCf maps the error to grpc error codes, assembling the formatting string -// and combining it with the target error string. -// -// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) -func ToGRPCf(err error, format string, args ...interface{}) error { - return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) -} - -// FromGRPC returns the underlying error from a grpc service based on the grpc error code -func FromGRPC(err error) error { - if err == nil { - return nil - } - - var cls error // divide these into error classes, becomes the cause - - switch code(err) { - case codes.InvalidArgument: - cls = ErrInvalidArgument - case codes.AlreadyExists: - cls = ErrAlreadyExists - case codes.NotFound: - cls = ErrNotFound - case codes.Unavailable: - cls = ErrUnavailable - case codes.FailedPrecondition: - cls = ErrFailedPrecondition - case codes.Unimplemented: - cls = ErrNotImplemented - case codes.Canceled: - cls = context.Canceled - case codes.DeadlineExceeded: - cls = context.DeadlineExceeded - default: - cls = ErrUnknown - } - - msg := rebaseMessage(cls, err) - if msg != "" { - err = fmt.Errorf("%s: %w", msg, cls) - } else { - err = cls - } - - return err -} - -// rebaseMessage removes the repeats for an error at the end of an error -// string. This will happen when taking an error over grpc then remapping it. -// -// Effectively, we just remove the string of cls from the end of err if it -// appears there. -func rebaseMessage(cls error, err error) string { - desc := errDesc(err) - clss := cls.Error() - if desc == clss { - return "" - } - - return strings.TrimSuffix(desc, ": "+clss) -} - -func isGRPCError(err error) bool { - _, ok := status.FromError(err) - return ok -} - -func code(err error) codes.Code { - if s, ok := status.FromError(err); ok { - return s.Code() - } - return codes.Unknown -} - -func errDesc(err error) string { - if s, ok := status.FromError(err); ok { - return s.Message() - } - return err.Error() -} diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go deleted file mode 100644 index 0db9562b8..000000000 --- a/vendor/github.com/containerd/containerd/log/context.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package log - -import ( - "context" - - "github.com/sirupsen/logrus" -) - -var ( - // G is an alias for GetLogger. - // - // We may want to define this locally to a package to get package tagged log - // messages. - G = GetLogger - - // L is an alias for the standard logger. - L = logrus.NewEntry(logrus.StandardLogger()) -) - -type ( - loggerKey struct{} -) - -const ( - // RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to - // ensure the formatted time is always the same number of characters. - RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - - // TextFormat represents the text logging format - TextFormat = "text" - - // JSONFormat represents the JSON logging format - JSONFormat = "json" -) - -// WithLogger returns a new context with the provided logger. Use in -// combination with logger.WithField(s) for great effect. -func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { - e := logger.WithContext(ctx) - return context.WithValue(ctx, loggerKey{}, e) -} - -// GetLogger retrieves the current logger from the context. If no logger is -// available, the default logger is returned. -func GetLogger(ctx context.Context) *logrus.Entry { - logger := ctx.Value(loggerKey{}) - - if logger == nil { - return L.WithContext(ctx) - } - - return logger.(*logrus.Entry) -} diff --git a/vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go b/vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go deleted file mode 100644 index 6656465ef..000000000 --- a/vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package userns - -import ( - "bufio" - "fmt" - "os" - "sync" -) - -var ( - inUserNS bool - nsOnce sync.Once -) - -// RunningInUserNS detects whether we are currently running in a user namespace. -// Originally copied from github.com/lxc/lxd/shared/util.go -func RunningInUserNS() bool { - nsOnce.Do(func() { - file, err := os.Open("/proc/self/uid_map") - if err != nil { - // This kernel-provided file only exists if user namespaces are supported - return - } - defer file.Close() - - buf := bufio.NewReader(file) - l, _, err := buf.ReadLine() - if err != nil { - return - } - - line := string(l) - var a, b, c int64 - fmt.Sscanf(line, "%d %d %d", &a, &b, &c) - - /* - * We assume we are in the initial user namespace if we have a full - * range - 4294967295 uids starting at uid 0. - */ - if a == 0 && b == 0 && c == 4294967295 { - return - } - inUserNS = true - }) - return inUserNS -} diff --git a/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go b/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go deleted file mode 100644 index 4f8d7dd2d..000000000 --- a/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go +++ /dev/null @@ -1,26 +0,0 @@ -//go:build !linux -// +build !linux - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package userns - -// RunningInUserNS is a stub for non-Linux systems -// Always returns false -func RunningInUserNS() bool { - return false -} diff --git a/vendor/github.com/containerd/containerd/platforms/compare.go b/vendor/github.com/containerd/containerd/platforms/compare.go deleted file mode 100644 index 3913ef663..000000000 --- a/vendor/github.com/containerd/containerd/platforms/compare.go +++ /dev/null @@ -1,203 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "strconv" - "strings" - - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// MatchComparer is able to match and compare platforms to -// filter and sort platforms. -type MatchComparer interface { - Matcher - - Less(specs.Platform, specs.Platform) bool -} - -// platformVector returns an (ordered) vector of appropriate specs.Platform -// objects to try matching for the given platform object (see platforms.Only). -func platformVector(platform specs.Platform) []specs.Platform { - vector := []specs.Platform{platform} - - switch platform.Architecture { - case "amd64": - if amd64Version, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && amd64Version > 1 { - for amd64Version--; amd64Version >= 1; amd64Version-- { - vector = append(vector, specs.Platform{ - Architecture: platform.Architecture, - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - Variant: "v" + strconv.Itoa(amd64Version), - }) - } - } - vector = append(vector, specs.Platform{ - Architecture: "386", - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - }) - case "arm": - if armVersion, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && armVersion > 5 { - for armVersion--; armVersion >= 5; armVersion-- { - vector = append(vector, specs.Platform{ - Architecture: platform.Architecture, - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - Variant: "v" + strconv.Itoa(armVersion), - }) - } - } - case "arm64": - variant := platform.Variant - if variant == "" { - variant = "v8" - } - vector = append(vector, platformVector(specs.Platform{ - Architecture: "arm", - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - Variant: variant, - })...) - } - - return vector -} - -// Only returns a match comparer for a single platform -// using default resolution logic for the platform. -// -// For arm/v8, will also match arm/v7, arm/v6 and arm/v5 -// For arm/v7, will also match arm/v6 and arm/v5 -// For arm/v6, will also match arm/v5 -// For amd64, will also match 386 -func Only(platform specs.Platform) MatchComparer { - return Ordered(platformVector(Normalize(platform))...) -} - -// OnlyStrict returns a match comparer for a single platform. -// -// Unlike Only, OnlyStrict does not match sub platforms. -// So, "arm/vN" will not match "arm/vM" where M < N, -// and "amd64" will not also match "386". -// -// OnlyStrict matches non-canonical forms. -// So, "arm64" matches "arm/64/v8". -func OnlyStrict(platform specs.Platform) MatchComparer { - return Ordered(Normalize(platform)) -} - -// Ordered returns a platform MatchComparer which matches any of the platforms -// but orders them in order they are provided. -func Ordered(platforms ...specs.Platform) MatchComparer { - matchers := make([]Matcher, len(platforms)) - for i := range platforms { - matchers[i] = NewMatcher(platforms[i]) - } - return orderedPlatformComparer{ - matchers: matchers, - } -} - -// Any returns a platform MatchComparer which matches any of the platforms -// with no preference for ordering. -func Any(platforms ...specs.Platform) MatchComparer { - matchers := make([]Matcher, len(platforms)) - for i := range platforms { - matchers[i] = NewMatcher(platforms[i]) - } - return anyPlatformComparer{ - matchers: matchers, - } -} - -// All is a platform MatchComparer which matches all platforms -// with preference for ordering. -var All MatchComparer = allPlatformComparer{} - -type orderedPlatformComparer struct { - matchers []Matcher -} - -func (c orderedPlatformComparer) Match(platform specs.Platform) bool { - for _, m := range c.matchers { - if m.Match(platform) { - return true - } - } - return false -} - -func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool { - for _, m := range c.matchers { - p1m := m.Match(p1) - p2m := m.Match(p2) - if p1m && !p2m { - return true - } - if p1m || p2m { - return false - } - } - return false -} - -type anyPlatformComparer struct { - matchers []Matcher -} - -func (c anyPlatformComparer) Match(platform specs.Platform) bool { - for _, m := range c.matchers { - if m.Match(platform) { - return true - } - } - return false -} - -func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool { - var p1m, p2m bool - for _, m := range c.matchers { - if !p1m && m.Match(p1) { - p1m = true - } - if !p2m && m.Match(p2) { - p2m = true - } - if p1m && p2m { - return false - } - } - // If one matches, and the other does, sort match first - return p1m && !p2m -} - -type allPlatformComparer struct{} - -func (allPlatformComparer) Match(specs.Platform) bool { - return true -} - -func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool { - return false -} diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go deleted file mode 100644 index 046e0356d..000000000 --- a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go +++ /dev/null @@ -1,131 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "bufio" - "fmt" - "os" - "runtime" - "strings" - "sync" - - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/log" -) - -// Present the ARM instruction set architecture, eg: v7, v8 -// Don't use this value directly; call cpuVariant() instead. -var cpuVariantValue string - -var cpuVariantOnce sync.Once - -func cpuVariant() string { - cpuVariantOnce.Do(func() { - if isArmArch(runtime.GOARCH) { - cpuVariantValue = getCPUVariant() - } - }) - return cpuVariantValue -} - -// For Linux, the kernel has already detected the ABI, ISA and Features. -// So we don't need to access the ARM registers to detect platform information -// by ourselves. We can just parse these information from /proc/cpuinfo -func getCPUInfo(pattern string) (info string, err error) { - if !isLinuxOS(runtime.GOOS) { - return "", fmt.Errorf("getCPUInfo for OS %s: %w", runtime.GOOS, errdefs.ErrNotImplemented) - } - - cpuinfo, err := os.Open("/proc/cpuinfo") - if err != nil { - return "", err - } - defer cpuinfo.Close() - - // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse - // the first core is enough. - scanner := bufio.NewScanner(cpuinfo) - for scanner.Scan() { - newline := scanner.Text() - list := strings.Split(newline, ":") - - if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { - return strings.TrimSpace(list[1]), nil - } - } - - // Check whether the scanner encountered errors - err = scanner.Err() - if err != nil { - return "", err - } - - return "", fmt.Errorf("getCPUInfo for pattern: %s: %w", pattern, errdefs.ErrNotFound) -} - -func getCPUVariant() string { - if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { - // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use - // runtime.GOARCH to determine the variants - var variant string - switch runtime.GOARCH { - case "arm64": - variant = "v8" - case "arm": - variant = "v7" - default: - variant = "unknown" - } - - return variant - } - - variant, err := getCPUInfo("Cpu architecture") - if err != nil { - log.L.WithError(err).Error("failure getting variant") - return "" - } - - // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7") - // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 - if runtime.GOARCH == "arm" && variant == "7" { - model, err := getCPUInfo("model name") - if err == nil && strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { - variant = "6" - } - } - - switch strings.ToLower(variant) { - case "8", "aarch64": - variant = "v8" - case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": - variant = "v7" - case "6", "6tej": - variant = "v6" - case "5", "5t", "5te", "5tej": - variant = "v5" - case "4", "4t": - variant = "v4" - case "3": - variant = "v3" - default: - variant = "unknown" - } - - return variant -} diff --git a/vendor/github.com/containerd/containerd/platforms/database.go b/vendor/github.com/containerd/containerd/platforms/database.go deleted file mode 100644 index dbe9957ca..000000000 --- a/vendor/github.com/containerd/containerd/platforms/database.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "runtime" - "strings" -) - -// isLinuxOS returns true if the operating system is Linux. -// -// The OS value should be normalized before calling this function. -func isLinuxOS(os string) bool { - return os == "linux" -} - -// These function are generated from https://golang.org/src/go/build/syslist.go. -// -// We use switch statements because they are slightly faster than map lookups -// and use a little less memory. - -// isKnownOS returns true if we know about the operating system. -// -// The OS value should be normalized before calling this function. -func isKnownOS(os string) bool { - switch os { - case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "ios", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": - return true - } - return false -} - -// isArmArch returns true if the architecture is ARM. -// -// The arch value should be normalized before being passed to this function. -func isArmArch(arch string) bool { - switch arch { - case "arm", "arm64": - return true - } - return false -} - -// isKnownArch returns true if we know about the architecture. -// -// The arch value should be normalized before being passed to this function. -func isKnownArch(arch string) bool { - switch arch { - case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "loong64", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": - return true - } - return false -} - -func normalizeOS(os string) string { - if os == "" { - return runtime.GOOS - } - os = strings.ToLower(os) - - switch os { - case "macos": - os = "darwin" - } - return os -} - -// normalizeArch normalizes the architecture. -func normalizeArch(arch, variant string) (string, string) { - arch, variant = strings.ToLower(arch), strings.ToLower(variant) - switch arch { - case "i386": - arch = "386" - variant = "" - case "x86_64", "x86-64", "amd64": - arch = "amd64" - if variant == "v1" { - variant = "" - } - case "aarch64", "arm64": - arch = "arm64" - switch variant { - case "8", "v8": - variant = "" - } - case "armhf": - arch = "arm" - variant = "v7" - case "armel": - arch = "arm" - variant = "v6" - case "arm": - switch variant { - case "", "7": - variant = "v7" - case "5", "6", "8": - variant = "v" + variant - } - } - - return arch, variant -} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults.go b/vendor/github.com/containerd/containerd/platforms/defaults.go deleted file mode 100644 index cfa3ff34a..000000000 --- a/vendor/github.com/containerd/containerd/platforms/defaults.go +++ /dev/null @@ -1,27 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -// DefaultString returns the default string specifier for the platform. -func DefaultString() string { - return Format(DefaultSpec()) -} - -// DefaultStrict returns strict form of Default. -func DefaultStrict() MatchComparer { - return OnlyStrict(DefaultSpec()) -} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go b/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go deleted file mode 100644 index e249fe48d..000000000 --- a/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go +++ /dev/null @@ -1,45 +0,0 @@ -//go:build darwin -// +build darwin - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "runtime" - - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// DefaultSpec returns the current platform's default platform specification. -func DefaultSpec() specs.Platform { - return specs.Platform{ - OS: runtime.GOOS, - Architecture: runtime.GOARCH, - // The Variant field will be empty if arch != ARM. - Variant: cpuVariant(), - } -} - -// Default returns the default matcher for the platform. -func Default() MatchComparer { - return Ordered(DefaultSpec(), specs.Platform{ - // darwin runtime also supports Linux binary via runu/LKL - OS: "linux", - Architecture: runtime.GOARCH, - }) -} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go deleted file mode 100644 index 49690f1b3..000000000 --- a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go +++ /dev/null @@ -1,41 +0,0 @@ -//go:build !windows && !darwin -// +build !windows,!darwin - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "runtime" - - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// DefaultSpec returns the current platform's default platform specification. -func DefaultSpec() specs.Platform { - return specs.Platform{ - OS: runtime.GOOS, - Architecture: runtime.GOARCH, - // The Variant field will be empty if arch != ARM. - Variant: cpuVariant(), - } -} - -// Default returns the default matcher for the platform. -func Default() MatchComparer { - return Only(DefaultSpec()) -} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go deleted file mode 100644 index ff9771a60..000000000 --- a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go +++ /dev/null @@ -1,95 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "fmt" - "runtime" - "strconv" - "strings" - - imagespec "github.com/opencontainers/image-spec/specs-go/v1" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/sys/windows" -) - -// DefaultSpec returns the current platform's default platform specification. -func DefaultSpec() specs.Platform { - major, minor, build := windows.RtlGetNtVersionNumbers() - return specs.Platform{ - OS: runtime.GOOS, - Architecture: runtime.GOARCH, - OSVersion: fmt.Sprintf("%d.%d.%d", major, minor, build), - // The Variant field will be empty if arch != ARM. - Variant: cpuVariant(), - } -} - -type matchComparer struct { - defaults Matcher - osVersionPrefix string -} - -// Match matches platform with the same windows major, minor -// and build version. -func (m matchComparer) Match(p specs.Platform) bool { - match := m.defaults.Match(p) - - if match && p.OS == "windows" { - if strings.HasPrefix(p.OSVersion, m.osVersionPrefix) { - return true - } - return p.OSVersion == "" - } - return false -} - -// Less sorts matched platforms in front of other platforms. -// For matched platforms, it puts platforms with larger revision -// number in front. -func (m matchComparer) Less(p1, p2 imagespec.Platform) bool { - m1, m2 := m.Match(p1), m.Match(p2) - if m1 && m2 { - r1, r2 := revision(p1.OSVersion), revision(p2.OSVersion) - return r1 > r2 - } - return m1 && !m2 -} - -func revision(v string) int { - parts := strings.Split(v, ".") - if len(parts) < 4 { - return 0 - } - r, err := strconv.Atoi(parts[3]) - if err != nil { - return 0 - } - return r -} - -// Default returns the current platform's default platform specification. -func Default() MatchComparer { - major, minor, build := windows.RtlGetNtVersionNumbers() - return matchComparer{ - defaults: Ordered(DefaultSpec(), specs.Platform{ - OS: "linux", - Architecture: runtime.GOARCH, - }), - osVersionPrefix: fmt.Sprintf("%d.%d.%d", major, minor, build), - } -} diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go deleted file mode 100644 index 234309941..000000000 --- a/vendor/github.com/containerd/containerd/platforms/platforms.go +++ /dev/null @@ -1,261 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package platforms provides a toolkit for normalizing, matching and -// specifying container platforms. -// -// Centered around OCI platform specifications, we define a string-based -// specifier syntax that can be used for user input. With a specifier, users -// only need to specify the parts of the platform that are relevant to their -// context, providing an operating system or architecture or both. -// -// How do I use this package? -// -// The vast majority of use cases should simply use the match function with -// user input. The first step is to parse a specifier into a matcher: -// -// m, err := Parse("linux") -// if err != nil { ... } -// -// Once you have a matcher, use it to match against the platform declared by a -// component, typically from an image or runtime. Since extracting an images -// platform is a little more involved, we'll use an example against the -// platform default: -// -// if ok := m.Match(Default()); !ok { /* doesn't match */ } -// -// This can be composed in loops for resolving runtimes or used as a filter for -// fetch and select images. -// -// More details of the specifier syntax and platform spec follow. -// -// # Declaring Platform Support -// -// Components that have strict platform requirements should use the OCI -// platform specification to declare their support. Typically, this will be -// images and runtimes that should make these declaring which platform they -// support specifically. This looks roughly as follows: -// -// type Platform struct { -// Architecture string -// OS string -// Variant string -// } -// -// Most images and runtimes should at least set Architecture and OS, according -// to their GOARCH and GOOS values, respectively (follow the OCI image -// specification when in doubt). ARM should set variant under certain -// discussions, which are outlined below. -// -// # Platform Specifiers -// -// While the OCI platform specifications provide a tool for components to -// specify structured information, user input typically doesn't need the full -// context and much can be inferred. To solve this problem, we introduced -// "specifiers". A specifier has the format -// `||/[/]`. The user can provide either the -// operating system or the architecture or both. -// -// An example of a common specifier is `linux/amd64`. If the host has a default -// of runtime that matches this, the user can simply provide the component that -// matters. For example, if a image provides amd64 and arm64 support, the -// operating system, `linux` can be inferred, so they only have to provide -// `arm64` or `amd64`. Similar behavior is implemented for operating systems, -// where the architecture may be known but a runtime may support images from -// different operating systems. -// -// # Normalization -// -// Because not all users are familiar with the way the Go runtime represents -// platforms, several normalizations have been provided to make this package -// easier to user. -// -// The following are performed for architectures: -// -// Value Normalized -// aarch64 arm64 -// armhf arm -// armel arm/v6 -// i386 386 -// x86_64 amd64 -// x86-64 amd64 -// -// We also normalize the operating system `macos` to `darwin`. -// -// # ARM Support -// -// To qualify ARM architecture, the Variant field is used to qualify the arm -// version. The most common arm version, v7, is represented without the variant -// unless it is explicitly provided. This is treated as equivalent to armhf. A -// previous architecture, armel, will be normalized to arm/v6. -// -// While these normalizations are provided, their support on arm platforms has -// not yet been fully implemented and tested. -package platforms - -import ( - "fmt" - "path" - "regexp" - "runtime" - "strconv" - "strings" - - "github.com/containerd/containerd/errdefs" - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -var ( - specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`) -) - -// Matcher matches platforms specifications, provided by an image or runtime. -type Matcher interface { - Match(platform specs.Platform) bool -} - -// NewMatcher returns a simple matcher based on the provided platform -// specification. The returned matcher only looks for equality based on os, -// architecture and variant. -// -// One may implement their own matcher if this doesn't provide the required -// functionality. -// -// Applications should opt to use `Match` over directly parsing specifiers. -func NewMatcher(platform specs.Platform) Matcher { - return &matcher{ - Platform: Normalize(platform), - } -} - -type matcher struct { - specs.Platform -} - -func (m *matcher) Match(platform specs.Platform) bool { - normalized := Normalize(platform) - return m.OS == normalized.OS && - m.Architecture == normalized.Architecture && - m.Variant == normalized.Variant -} - -func (m *matcher) String() string { - return Format(m.Platform) -} - -// Parse parses the platform specifier syntax into a platform declaration. -// -// Platform specifiers are in the format `||/[/]`. -// The minimum required information for a platform specifier is the operating -// system or architecture. If there is only a single string (no slashes), the -// value will be matched against the known set of operating systems, then fall -// back to the known set of architectures. The missing component will be -// inferred based on the local environment. -func Parse(specifier string) (specs.Platform, error) { - if strings.Contains(specifier, "*") { - // TODO(stevvooe): need to work out exact wildcard handling - return specs.Platform{}, fmt.Errorf("%q: wildcards not yet supported: %w", specifier, errdefs.ErrInvalidArgument) - } - - parts := strings.Split(specifier, "/") - - for _, part := range parts { - if !specifierRe.MatchString(part) { - return specs.Platform{}, fmt.Errorf("%q is an invalid component of %q: platform specifier component must match %q: %w", part, specifier, specifierRe.String(), errdefs.ErrInvalidArgument) - } - } - - var p specs.Platform - switch len(parts) { - case 1: - // in this case, we will test that the value might be an OS, then look - // it up. If it is not known, we'll treat it as an architecture. Since - // we have very little information about the platform here, we are - // going to be a little more strict if we don't know about the argument - // value. - p.OS = normalizeOS(parts[0]) - if isKnownOS(p.OS) { - // picks a default architecture - p.Architecture = runtime.GOARCH - if p.Architecture == "arm" && cpuVariant() != "v7" { - p.Variant = cpuVariant() - } - - return p, nil - } - - p.Architecture, p.Variant = normalizeArch(parts[0], "") - if p.Architecture == "arm" && p.Variant == "v7" { - p.Variant = "" - } - if isKnownArch(p.Architecture) { - p.OS = runtime.GOOS - return p, nil - } - - return specs.Platform{}, fmt.Errorf("%q: unknown operating system or architecture: %w", specifier, errdefs.ErrInvalidArgument) - case 2: - // In this case, we treat as a regular os/arch pair. We don't care - // about whether or not we know of the platform. - p.OS = normalizeOS(parts[0]) - p.Architecture, p.Variant = normalizeArch(parts[1], "") - if p.Architecture == "arm" && p.Variant == "v7" { - p.Variant = "" - } - - return p, nil - case 3: - // we have a fully specified variant, this is rare - p.OS = normalizeOS(parts[0]) - p.Architecture, p.Variant = normalizeArch(parts[1], parts[2]) - if p.Architecture == "arm64" && p.Variant == "" { - p.Variant = "v8" - } - - return p, nil - } - - return specs.Platform{}, fmt.Errorf("%q: cannot parse platform specifier: %w", specifier, errdefs.ErrInvalidArgument) -} - -// MustParse is like Parses but panics if the specifier cannot be parsed. -// Simplifies initialization of global variables. -func MustParse(specifier string) specs.Platform { - p, err := Parse(specifier) - if err != nil { - panic("platform: Parse(" + strconv.Quote(specifier) + "): " + err.Error()) - } - return p -} - -// Format returns a string specifier from the provided platform specification. -func Format(platform specs.Platform) string { - if platform.OS == "" { - return "unknown" - } - - return path.Join(platform.OS, platform.Architecture, platform.Variant) -} - -// Normalize validates and translate the platform to the canonical value. -// -// For example, if "Aarch64" is encountered, we change it to "arm64" or if -// "x86_64" is encountered, it becomes "amd64". -func Normalize(platform specs.Platform) specs.Platform { - platform.OS = normalizeOS(platform.OS) - platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant) - return platform -} diff --git a/vendor/github.com/cpuguy83/dockercfg/LICENSE b/vendor/github.com/cpuguy83/dockercfg/LICENSE deleted file mode 100644 index 8ed681804..000000000 --- a/vendor/github.com/cpuguy83/dockercfg/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2020 Brian Goff - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/cpuguy83/dockercfg/README.md b/vendor/github.com/cpuguy83/dockercfg/README.md deleted file mode 100644 index 880ab8010..000000000 --- a/vendor/github.com/cpuguy83/dockercfg/README.md +++ /dev/null @@ -1,8 +0,0 @@ -### github.com/cpuguy83/dockercfg -Go library to load docker CLI configs, auths, etc. with minimal deps. -So far the only deps are on the stdlib. - -### Usage -See the [godoc](https://godoc.org/github.com/cpuguy83/dockercfg) for API details. - -I'm currently using this in [zapp](https://github.com/cpuguy83/zapp/blob/d25c43d4cd7ccf29fba184aafbc720a753e1a15d/main.go#L58-L83) to handle registry auth instead of always asking the user to enter it. \ No newline at end of file diff --git a/vendor/github.com/cpuguy83/dockercfg/auth.go b/vendor/github.com/cpuguy83/dockercfg/auth.go deleted file mode 100644 index 5a9891b87..000000000 --- a/vendor/github.com/cpuguy83/dockercfg/auth.go +++ /dev/null @@ -1,184 +0,0 @@ -package dockercfg - -import ( - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "os" - "os/exec" - "runtime" - "strings" -) - -// This is used by the docker CLI in casses where an oauth identity token is used. -// In that case the username is stored litterally as `` -// When fetching the credentials we check for this value to determine if -const tokenUsername = "" - -// GetRegistryCredentials gets registry credentials for the passed in registry host. -// -// This will use `LoadDefaultConfig` to read registry auth details from the config. -// If the config doesn't exist, it will attempt to load registry credentials using the default credential helper for the platform. -func GetRegistryCredentials(hostname string) (string, string, error) { - cfg, err := LoadDefaultConfig() - if err != nil { - if !os.IsNotExist(err) { - return "", "", err - } - return GetCredentialsFromHelper("", hostname) - } - return cfg.GetRegistryCredentials(hostname) -} - -// ResolveRegistryHost can be used to transform a docker registry host name into what is used for the docker config/cred helpers -// -// This is useful for using with containerd authorizers. -// Natrually this only transforms docker hub URLs. -func ResolveRegistryHost(host string) string { - switch host { - case "index.docker.io", "docker.io", "https://index.docker.io/v1/", "registry-1.docker.io": - return "https://index.docker.io/v1/" - } - return host -} - -// GetRegistryCredentials gets credentials, if any, for the provided hostname -// -// Hostnames should already be resolved using `ResolveRegistryAuth` -// -// If the returned username string is empty, the password is an identity token. -func (c *Config) GetRegistryCredentials(hostname string) (string, string, error) { - h, ok := c.CredentialHelpers[hostname] - if ok { - return GetCredentialsFromHelper(h, hostname) - } - - if c.CredentialsStore != "" { - return GetCredentialsFromHelper(c.CredentialsStore, hostname) - } - - auth, ok := c.AuthConfigs[hostname] - if !ok { - return GetCredentialsFromHelper("", hostname) - } - - if auth.IdentityToken != "" { - return "", auth.IdentityToken, nil - } - - if auth.Username != "" && auth.Password != "" { - return auth.Username, auth.Password, nil - } - - return DecodeBase64Auth(auth) -} - -// DecodeBase64Auth decodes the legacy file-based auth storage from the docker CLI. -// It takes the "Auth" filed from AuthConfig and decodes that into a username and password. -// -// If "Auth" is empty, an empty user/pass will be returned, but not an error. -func DecodeBase64Auth(auth AuthConfig) (string, string, error) { - if auth.Auth == "" { - return "", "", nil - } - - decLen := base64.StdEncoding.DecodedLen(len(auth.Auth)) - decoded := make([]byte, decLen) - n, err := base64.StdEncoding.Decode(decoded, []byte(auth.Auth)) - if err != nil { - return "", "", fmt.Errorf("error decoding auth from file: %w", err) - } - - if n > decLen { - return "", "", fmt.Errorf("decoded value is longer than expected length, expected: %d, actual: %d", decLen, n) - } - - split := strings.SplitN(string(decoded), ":", 2) - if len(split) != 2 { - return "", "", errors.New("invalid auth string") - } - - return split[0], strings.Trim(split[1], "\x00"), nil -} - -// Errors from credential helpers -var ( - ErrCredentialsNotFound = errors.New("credentials not found in native keychain") - ErrCredentialsMissingServerURL = errors.New("no credentials server URL") -) - -// GetCredentialsFromHelper attempts to lookup credentials from the passed in docker credential helper. -// -// The credential helpoer should just be the suffix name (no "docker-credential-"). -// If the passed in helper program is empty this will look up the default helper for the platform. -// -// If the credentials are not found, no error is returned, only empty credentials. -// -// Hostnames should already be resolved using `ResolveRegistryAuth` -// -// If the username string is empty, the password string is an identity token. -func GetCredentialsFromHelper(helper, hostname string) (string, string, error) { - if helper == "" { - helper = getCredentialHelper() - } - if helper == "" { - return "", "", nil - } - - p, err := exec.LookPath("docker-credential-" + helper) - if err != nil { - return "", "", nil - } - - cmd := exec.Command(p, "get") - cmd.Stdin = strings.NewReader(hostname) - - b, err := cmd.Output() - if err != nil { - s := strings.TrimSpace(string(b)) - - switch s { - case ErrCredentialsNotFound.Error(): - return "", "", nil - case ErrCredentialsMissingServerURL.Error(): - return "", "", errors.New(s) - default: - } - - return "", "", err - } - - var creds struct { - Username string - Secret string - } - - if err := json.Unmarshal(b, &creds); err != nil { - return "", "", err - } - - // When tokenUsername is used, the output is an identity token and the username is garbage - if creds.Username == tokenUsername { - creds.Username = "" - } - - return creds.Username, creds.Secret, nil -} - -// getCredentialHelper gets the default credential helper name for the current platform. -func getCredentialHelper() string { - switch runtime.GOOS { - case "linux": - if _, err := exec.LookPath("pass"); err == nil { - return "pass" - } - return "secretservice" - case "darwin": - return "osxkeychain" - case "windows": - return "wincred" - default: - return "" - } -} diff --git a/vendor/github.com/cpuguy83/dockercfg/config.go b/vendor/github.com/cpuguy83/dockercfg/config.go deleted file mode 100644 index 88736cabc..000000000 --- a/vendor/github.com/cpuguy83/dockercfg/config.go +++ /dev/null @@ -1,65 +0,0 @@ -package dockercfg - -// Config represents the on disk format of the docker CLI's config file. -type Config struct { - AuthConfigs map[string]AuthConfig `json:"auths"` - HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` - PsFormat string `json:"psFormat,omitempty"` - ImagesFormat string `json:"imagesFormat,omitempty"` - NetworksFormat string `json:"networksFormat,omitempty"` - PluginsFormat string `json:"pluginsFormat,omitempty"` - VolumesFormat string `json:"volumesFormat,omitempty"` - StatsFormat string `json:"statsFormat,omitempty"` - DetachKeys string `json:"detachKeys,omitempty"` - CredentialsStore string `json:"credsStore,omitempty"` - CredentialHelpers map[string]string `json:"credHelpers,omitempty"` - Filename string `json:"-"` // Note: for internal use only - ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"` - ServicesFormat string `json:"servicesFormat,omitempty"` - TasksFormat string `json:"tasksFormat,omitempty"` - SecretFormat string `json:"secretFormat,omitempty"` - ConfigFormat string `json:"configFormat,omitempty"` - NodesFormat string `json:"nodesFormat,omitempty"` - PruneFilters []string `json:"pruneFilters,omitempty"` - Proxies map[string]ProxyConfig `json:"proxies,omitempty"` - Experimental string `json:"experimental,omitempty"` - StackOrchestrator string `json:"stackOrchestrator,omitempty"` - Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"` - CurrentContext string `json:"currentContext,omitempty"` - CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` - Aliases map[string]string `json:"aliases,omitempty"` -} - -// ProxyConfig contains proxy configuration settings -type ProxyConfig struct { - HTTPProxy string `json:"httpProxy,omitempty"` - HTTPSProxy string `json:"httpsProxy,omitempty"` - NoProxy string `json:"noProxy,omitempty"` - FTPProxy string `json:"ftpProxy,omitempty"` -} - -// AuthConfig contains authorization information for connecting to a Registry -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth,omitempty"` - - // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. - Email string `json:"email,omitempty"` - - ServerAddress string `json:"serveraddress,omitempty"` - - // IdentityToken is used to authenticate the user and get - // an access token for the registry. - IdentityToken string `json:"identitytoken,omitempty"` - - // RegistryToken is a bearer token to be sent to a registry - RegistryToken string `json:"registrytoken,omitempty"` -} - -// KubernetesConfig contains Kubernetes orchestrator settings -type KubernetesConfig struct { - AllNamespaces string `json:"allNamespaces,omitempty"` -} diff --git a/vendor/github.com/cpuguy83/dockercfg/load.go b/vendor/github.com/cpuguy83/dockercfg/load.go deleted file mode 100644 index 262545b9d..000000000 --- a/vendor/github.com/cpuguy83/dockercfg/load.go +++ /dev/null @@ -1,51 +0,0 @@ -package dockercfg - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" -) - -// UserHomeConfigPath returns the path to the docker config in the current user's home dir. -func UserHomeConfigPath() (string, error) { - home, err := os.UserHomeDir() - if err != nil { - return "", fmt.Errorf("error looking up user home dir: %w", err) - } - - return filepath.Join(home, ".docker", "config.json"), nil -} - -// ConfigPath returns the path to the docker cli config. -// -// It will either use the DOCKER_CONFIG env var if set, or the value from `UserHomeConfigPath` -// DOCKER_CONFIG would be the dir path where `config.json` is stored, this returns the path to config.json. -func ConfigPath() (string, error) { - if p := os.Getenv("DOCKER_CONFIG"); p != "" { - return filepath.Join(p, "config.json"), nil - } - return UserHomeConfigPath() -} - -// LoadDefaultConfig loads the docker cli config from the path returned from `ConfigPath` -func LoadDefaultConfig() (Config, error) { - var cfg Config - p, err := ConfigPath() - if err != nil { - return cfg, err - } - return cfg, FromFile(p, &cfg) -} - -// FromFile loads config from the specified path into cfg -func FromFile(configPath string, cfg *Config) error { - f, err := os.Open(configPath) - if err != nil { - return err - } - - err = json.NewDecoder(f).Decode(&cfg) - f.Close() - return err -} diff --git a/vendor/github.com/docker/distribution/LICENSE b/vendor/github.com/docker/distribution/LICENSE deleted file mode 100644 index e06d20818..000000000 --- a/vendor/github.com/docker/distribution/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/github.com/docker/distribution/digestset/set.go b/vendor/github.com/docker/distribution/digestset/set.go deleted file mode 100644 index 71327dca7..000000000 --- a/vendor/github.com/docker/distribution/digestset/set.go +++ /dev/null @@ -1,247 +0,0 @@ -package digestset - -import ( - "errors" - "sort" - "strings" - "sync" - - digest "github.com/opencontainers/go-digest" -) - -var ( - // ErrDigestNotFound is used when a matching digest - // could not be found in a set. - ErrDigestNotFound = errors.New("digest not found") - - // ErrDigestAmbiguous is used when multiple digests - // are found in a set. None of the matching digests - // should be considered valid matches. - ErrDigestAmbiguous = errors.New("ambiguous digest string") -) - -// Set is used to hold a unique set of digests which -// may be easily referenced by easily referenced by a string -// representation of the digest as well as short representation. -// The uniqueness of the short representation is based on other -// digests in the set. If digests are omitted from this set, -// collisions in a larger set may not be detected, therefore it -// is important to always do short representation lookups on -// the complete set of digests. To mitigate collisions, an -// appropriately long short code should be used. -type Set struct { - mutex sync.RWMutex - entries digestEntries -} - -// NewSet creates an empty set of digests -// which may have digests added. -func NewSet() *Set { - return &Set{ - entries: digestEntries{}, - } -} - -// checkShortMatch checks whether two digests match as either whole -// values or short values. This function does not test equality, -// rather whether the second value could match against the first -// value. -func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { - if len(hex) == len(shortHex) { - if hex != shortHex { - return false - } - if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - } else if !strings.HasPrefix(hex, shortHex) { - return false - } else if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - return true -} - -// Lookup looks for a digest matching the given string representation. -// If no digests could be found ErrDigestNotFound will be returned -// with an empty digest value. If multiple matches are found -// ErrDigestAmbiguous will be returned with an empty digest value. -func (dst *Set) Lookup(d string) (digest.Digest, error) { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - if len(dst.entries) == 0 { - return "", ErrDigestNotFound - } - var ( - searchFunc func(int) bool - alg digest.Algorithm - hex string - ) - dgst, err := digest.Parse(d) - if err == digest.ErrDigestInvalidFormat { - hex = d - searchFunc = func(i int) bool { - return dst.entries[i].val >= d - } - } else { - hex = dgst.Hex() - alg = dgst.Algorithm() - searchFunc = func(i int) bool { - if dst.entries[i].val == hex { - return dst.entries[i].alg >= alg - } - return dst.entries[i].val >= hex - } - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { - return "", ErrDigestNotFound - } - if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { - return dst.entries[idx].digest, nil - } - if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { - return "", ErrDigestAmbiguous - } - - return dst.entries[idx].digest, nil -} - -// Add adds the given digest to the set. An error will be returned -// if the given digest is invalid. If the digest already exists in the -// set, this operation will be a no-op. -func (dst *Set) Add(d digest.Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) { - dst.entries = append(dst.entries, entry) - return nil - } else if dst.entries[idx].digest == d { - return nil - } - - entries := append(dst.entries, nil) - copy(entries[idx+1:], entries[idx:len(entries)-1]) - entries[idx] = entry - dst.entries = entries - return nil -} - -// Remove removes the given digest from the set. An err will be -// returned if the given digest is invalid. If the digest does -// not exist in the set, this operation will be a no-op. -func (dst *Set) Remove(d digest.Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - // Not found if idx is after or value at idx is not digest - if idx == len(dst.entries) || dst.entries[idx].digest != d { - return nil - } - - entries := dst.entries - copy(entries[idx:], entries[idx+1:]) - entries = entries[:len(entries)-1] - dst.entries = entries - - return nil -} - -// All returns all the digests in the set -func (dst *Set) All() []digest.Digest { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - retValues := make([]digest.Digest, len(dst.entries)) - for i := range dst.entries { - retValues[i] = dst.entries[i].digest - } - - return retValues -} - -// ShortCodeTable returns a map of Digest to unique short codes. The -// length represents the minimum value, the maximum length may be the -// entire value of digest if uniqueness cannot be achieved without the -// full value. This function will attempt to make short codes as short -// as possible to be unique. -func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - m := make(map[digest.Digest]string, len(dst.entries)) - l := length - resetIdx := 0 - for i := 0; i < len(dst.entries); i++ { - var short string - extended := true - for extended { - extended = false - if len(dst.entries[i].val) <= l { - short = dst.entries[i].digest.String() - } else { - short = dst.entries[i].val[:l] - for j := i + 1; j < len(dst.entries); j++ { - if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { - if j > resetIdx { - resetIdx = j - } - extended = true - } else { - break - } - } - if extended { - l++ - } - } - } - m[dst.entries[i].digest] = short - if i >= resetIdx { - l = length - } - } - return m -} - -type digestEntry struct { - alg digest.Algorithm - val string - digest digest.Digest -} - -type digestEntries []*digestEntry - -func (d digestEntries) Len() int { - return len(d) -} - -func (d digestEntries) Less(i, j int) bool { - if d[i].val != d[j].val { - return d[i].val < d[j].val - } - return d[i].alg < d[j].alg -} - -func (d digestEntries) Swap(i, j int) { - d[i], d[j] = d[j], d[i] -} diff --git a/vendor/github.com/docker/distribution/reference/helpers.go b/vendor/github.com/docker/distribution/reference/helpers.go deleted file mode 100644 index 978df7eab..000000000 --- a/vendor/github.com/docker/distribution/reference/helpers.go +++ /dev/null @@ -1,42 +0,0 @@ -package reference - -import "path" - -// IsNameOnly returns true if reference only contains a repo name. -func IsNameOnly(ref Named) bool { - if _, ok := ref.(NamedTagged); ok { - return false - } - if _, ok := ref.(Canonical); ok { - return false - } - return true -} - -// FamiliarName returns the familiar name string -// for the given named, familiarizing if needed. -func FamiliarName(ref Named) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().Name() - } - return ref.Name() -} - -// FamiliarString returns the familiar string representation -// for the given reference, familiarizing if needed. -func FamiliarString(ref Reference) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().String() - } - return ref.String() -} - -// FamiliarMatch reports whether ref matches the specified pattern. -// See https://godoc.org/path#Match for supported patterns. -func FamiliarMatch(pattern string, ref Reference) (bool, error) { - matched, err := path.Match(pattern, FamiliarString(ref)) - if namedRef, isNamed := ref.(Named); isNamed && !matched { - matched, _ = path.Match(pattern, FamiliarName(namedRef)) - } - return matched, err -} diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/docker/distribution/reference/normalize.go deleted file mode 100644 index b3dfb7a6d..000000000 --- a/vendor/github.com/docker/distribution/reference/normalize.go +++ /dev/null @@ -1,199 +0,0 @@ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/docker/distribution/digestset" - "github.com/opencontainers/go-digest" -) - -var ( - legacyDefaultDomain = "index.docker.io" - defaultDomain = "docker.io" - officialRepoName = "library" - defaultTag = "latest" -) - -// normalizedNamed represents a name which has been -// normalized and has a familiar form. A familiar name -// is what is used in Docker UI. An example normalized -// name is "docker.io/library/ubuntu" and corresponding -// familiar name of "ubuntu". -type normalizedNamed interface { - Named - Familiar() Named -} - -// ParseNormalizedNamed parses a string into a named reference -// transforming a familiar name from Docker UI to a fully -// qualified reference. If the value may be an identifier -// use ParseAnyReference. -func ParseNormalizedNamed(s string) (Named, error) { - if ok := anchoredIdentifierRegexp.MatchString(s); ok { - return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) - } - domain, remainder := splitDockerDomain(s) - var remoteName string - if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { - remoteName = remainder[:tagSep] - } else { - remoteName = remainder - } - if strings.ToLower(remoteName) != remoteName { - return nil, errors.New("invalid reference format: repository name must be lowercase") - } - - ref, err := Parse(domain + "/" + remainder) - if err != nil { - return nil, err - } - named, isNamed := ref.(Named) - if !isNamed { - return nil, fmt.Errorf("reference %s has no name", ref.String()) - } - return named, nil -} - -// ParseDockerRef normalizes the image reference following the docker convention. This is added -// mainly for backward compatibility. -// The reference returned can only be either tagged or digested. For reference contains both tag -// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ -// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as -// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. -func ParseDockerRef(ref string) (Named, error) { - named, err := ParseNormalizedNamed(ref) - if err != nil { - return nil, err - } - if _, ok := named.(NamedTagged); ok { - if canonical, ok := named.(Canonical); ok { - // The reference is both tagged and digested, only - // return digested. - newNamed, err := WithName(canonical.Name()) - if err != nil { - return nil, err - } - newCanonical, err := WithDigest(newNamed, canonical.Digest()) - if err != nil { - return nil, err - } - return newCanonical, nil - } - } - return TagNameOnly(named), nil -} - -// splitDockerDomain splits a repository name to domain and remotename string. -// If no valid domain is found, the default domain is used. Repository name -// needs to be already validated before. -func splitDockerDomain(name string) (domain, remainder string) { - i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { - domain, remainder = defaultDomain, name - } else { - domain, remainder = name[:i], name[i+1:] - } - if domain == legacyDefaultDomain { - domain = defaultDomain - } - if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { - remainder = officialRepoName + "/" + remainder - } - return -} - -// familiarizeName returns a shortened version of the name familiar -// to to the Docker UI. Familiar names have the default domain -// "docker.io" and "library/" repository prefix removed. -// For example, "docker.io/library/redis" will have the familiar -// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". -// Returns a familiarized named only reference. -func familiarizeName(named namedRepository) repository { - repo := repository{ - domain: named.Domain(), - path: named.Path(), - } - - if repo.domain == defaultDomain { - repo.domain = "" - // Handle official repositories which have the pattern "library/" - if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { - repo.path = split[1] - } - } - return repo -} - -func (r reference) Familiar() Named { - return reference{ - namedRepository: familiarizeName(r.namedRepository), - tag: r.tag, - digest: r.digest, - } -} - -func (r repository) Familiar() Named { - return familiarizeName(r) -} - -func (t taggedReference) Familiar() Named { - return taggedReference{ - namedRepository: familiarizeName(t.namedRepository), - tag: t.tag, - } -} - -func (c canonicalReference) Familiar() Named { - return canonicalReference{ - namedRepository: familiarizeName(c.namedRepository), - digest: c.digest, - } -} - -// TagNameOnly adds the default tag "latest" to a reference if it only has -// a repo name. -func TagNameOnly(ref Named) Named { - if IsNameOnly(ref) { - namedTagged, err := WithTag(ref, defaultTag) - if err != nil { - // Default tag must be valid, to create a NamedTagged - // type with non-validated input the WithTag function - // should be used instead - panic(err) - } - return namedTagged - } - return ref -} - -// ParseAnyReference parses a reference string as a possible identifier, -// full digest, or familiar name. -func ParseAnyReference(ref string) (Reference, error) { - if ok := anchoredIdentifierRegexp.MatchString(ref); ok { - return digestReference("sha256:" + ref), nil - } - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - - return ParseNormalizedNamed(ref) -} - -// ParseAnyReferenceWithSet parses a reference string as a possible short -// identifier to be matched in a digest set, a full digest, or familiar name. -func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { - if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { - dgst, err := ds.Lookup(ref) - if err == nil { - return digestReference(dgst), nil - } - } else { - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - } - - return ParseNormalizedNamed(ref) -} diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go deleted file mode 100644 index b7cd00b0d..000000000 --- a/vendor/github.com/docker/distribution/reference/reference.go +++ /dev/null @@ -1,433 +0,0 @@ -// Package reference provides a general type to represent any way of referencing images within the registry. -// Its main purpose is to abstract tags and digests (content-addressable hash). -// -// Grammar -// -// reference := name [ ":" tag ] [ "@" digest ] -// name := [domain '/'] path-component ['/' path-component]* -// domain := domain-component ['.' domain-component]* [':' port-number] -// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ -// port-number := /[0-9]+/ -// path-component := alpha-numeric [separator alpha-numeric]* -// alpha-numeric := /[a-z0-9]+/ -// separator := /[_.]|__|[-]*/ -// -// tag := /[\w][\w.-]{0,127}/ -// -// digest := digest-algorithm ":" digest-hex -// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* -// digest-algorithm-separator := /[+.-_]/ -// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ -// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value -// -// identifier := /[a-f0-9]{64}/ -// short-identifier := /[a-f0-9]{6,64}/ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -const ( - // NameTotalLengthMax is the maximum total number of characters in a repository name. - NameTotalLengthMax = 255 -) - -var ( - // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. - ErrReferenceInvalidFormat = errors.New("invalid reference format") - - // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. - ErrTagInvalidFormat = errors.New("invalid tag format") - - // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. - ErrDigestInvalidFormat = errors.New("invalid digest format") - - // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. - ErrNameContainsUppercase = errors.New("repository name must be lowercase") - - // ErrNameEmpty is returned for empty, invalid repository names. - ErrNameEmpty = errors.New("repository name must have at least one component") - - // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. - ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) - - // ErrNameNotCanonical is returned when a name is not canonical. - ErrNameNotCanonical = errors.New("repository name must be canonical") -) - -// Reference is an opaque object reference identifier that may include -// modifiers such as a hostname, name, tag, and digest. -type Reference interface { - // String returns the full reference - String() string -} - -// Field provides a wrapper type for resolving correct reference types when -// working with encoding. -type Field struct { - reference Reference -} - -// AsField wraps a reference in a Field for encoding. -func AsField(reference Reference) Field { - return Field{reference} -} - -// Reference unwraps the reference type from the field to -// return the Reference object. This object should be -// of the appropriate type to further check for different -// reference types. -func (f Field) Reference() Reference { - return f.reference -} - -// MarshalText serializes the field to byte text which -// is the string of the reference. -func (f Field) MarshalText() (p []byte, err error) { - return []byte(f.reference.String()), nil -} - -// UnmarshalText parses text bytes by invoking the -// reference parser to ensure the appropriately -// typed reference object is wrapped by field. -func (f *Field) UnmarshalText(p []byte) error { - r, err := Parse(string(p)) - if err != nil { - return err - } - - f.reference = r - return nil -} - -// Named is an object with a full name -type Named interface { - Reference - Name() string -} - -// Tagged is an object which has a tag -type Tagged interface { - Reference - Tag() string -} - -// NamedTagged is an object including a name and tag. -type NamedTagged interface { - Named - Tag() string -} - -// Digested is an object which has a digest -// in which it can be referenced by -type Digested interface { - Reference - Digest() digest.Digest -} - -// Canonical reference is an object with a fully unique -// name including a name with domain and digest -type Canonical interface { - Named - Digest() digest.Digest -} - -// namedRepository is a reference to a repository with a name. -// A namedRepository has both domain and path components. -type namedRepository interface { - Named - Domain() string - Path() string -} - -// Domain returns the domain part of the Named reference -func Domain(named Named) string { - if r, ok := named.(namedRepository); ok { - return r.Domain() - } - domain, _ := splitDomain(named.Name()) - return domain -} - -// Path returns the name without the domain part of the Named reference -func Path(named Named) (name string) { - if r, ok := named.(namedRepository); ok { - return r.Path() - } - _, path := splitDomain(named.Name()) - return path -} - -func splitDomain(name string) (string, string) { - match := anchoredNameRegexp.FindStringSubmatch(name) - if len(match) != 3 { - return "", name - } - return match[1], match[2] -} - -// SplitHostname splits a named reference into a -// hostname and name string. If no valid hostname is -// found, the hostname is empty and the full value -// is returned as name -// DEPRECATED: Use Domain or Path -func SplitHostname(named Named) (string, string) { - if r, ok := named.(namedRepository); ok { - return r.Domain(), r.Path() - } - return splitDomain(named.Name()) -} - -// Parse parses s and returns a syntactically valid Reference. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: Parse will not handle short digests. -func Parse(s string) (Reference, error) { - matches := ReferenceRegexp.FindStringSubmatch(s) - if matches == nil { - if s == "" { - return nil, ErrNameEmpty - } - if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { - return nil, ErrNameContainsUppercase - } - return nil, ErrReferenceInvalidFormat - } - - if len(matches[1]) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - var repo repository - - nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) - if len(nameMatch) == 3 { - repo.domain = nameMatch[1] - repo.path = nameMatch[2] - } else { - repo.domain = "" - repo.path = matches[1] - } - - ref := reference{ - namedRepository: repo, - tag: matches[2], - } - if matches[3] != "" { - var err error - ref.digest, err = digest.Parse(matches[3]) - if err != nil { - return nil, err - } - } - - r := getBestReferenceType(ref) - if r == nil { - return nil, ErrNameEmpty - } - - return r, nil -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name and be in the canonical -// form, otherwise an error is returned. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: ParseNamed will not handle short digests. -func ParseNamed(s string) (Named, error) { - named, err := ParseNormalizedNamed(s) - if err != nil { - return nil, err - } - if named.String() != s { - return nil, ErrNameNotCanonical - } - return named, nil -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -func WithName(name string) (Named, error) { - if len(name) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - match := anchoredNameRegexp.FindStringSubmatch(name) - if match == nil || len(match) != 3 { - return nil, ErrReferenceInvalidFormat - } - return repository{ - domain: match[1], - path: match[2], - }, nil -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -func WithTag(name Named, tag string) (NamedTagged, error) { - if !anchoredTagRegexp.MatchString(tag) { - return nil, ErrTagInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if canonical, ok := name.(Canonical); ok { - return reference{ - namedRepository: repo, - tag: tag, - digest: canonical.Digest(), - }, nil - } - return taggedReference{ - namedRepository: repo, - tag: tag, - }, nil -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -func WithDigest(name Named, digest digest.Digest) (Canonical, error) { - if !anchoredDigestRegexp.MatchString(digest.String()) { - return nil, ErrDigestInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if tagged, ok := name.(Tagged); ok { - return reference{ - namedRepository: repo, - tag: tagged.Tag(), - digest: digest, - }, nil - } - return canonicalReference{ - namedRepository: repo, - digest: digest, - }, nil -} - -// TrimNamed removes any tag or digest from the named reference. -func TrimNamed(ref Named) Named { - domain, path := SplitHostname(ref) - return repository{ - domain: domain, - path: path, - } -} - -func getBestReferenceType(ref reference) Reference { - if ref.Name() == "" { - // Allow digest only references - if ref.digest != "" { - return digestReference(ref.digest) - } - return nil - } - if ref.tag == "" { - if ref.digest != "" { - return canonicalReference{ - namedRepository: ref.namedRepository, - digest: ref.digest, - } - } - return ref.namedRepository - } - if ref.digest == "" { - return taggedReference{ - namedRepository: ref.namedRepository, - tag: ref.tag, - } - } - - return ref -} - -type reference struct { - namedRepository - tag string - digest digest.Digest -} - -func (r reference) String() string { - return r.Name() + ":" + r.tag + "@" + r.digest.String() -} - -func (r reference) Tag() string { - return r.tag -} - -func (r reference) Digest() digest.Digest { - return r.digest -} - -type repository struct { - domain string - path string -} - -func (r repository) String() string { - return r.Name() -} - -func (r repository) Name() string { - if r.domain == "" { - return r.path - } - return r.domain + "/" + r.path -} - -func (r repository) Domain() string { - return r.domain -} - -func (r repository) Path() string { - return r.path -} - -type digestReference digest.Digest - -func (d digestReference) String() string { - return digest.Digest(d).String() -} - -func (d digestReference) Digest() digest.Digest { - return digest.Digest(d) -} - -type taggedReference struct { - namedRepository - tag string -} - -func (t taggedReference) String() string { - return t.Name() + ":" + t.tag -} - -func (t taggedReference) Tag() string { - return t.tag -} - -type canonicalReference struct { - namedRepository - digest digest.Digest -} - -func (c canonicalReference) String() string { - return c.Name() + "@" + c.digest.String() -} - -func (c canonicalReference) Digest() digest.Digest { - return c.digest -} diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go deleted file mode 100644 index 786034932..000000000 --- a/vendor/github.com/docker/distribution/reference/regexp.go +++ /dev/null @@ -1,143 +0,0 @@ -package reference - -import "regexp" - -var ( - // alphaNumericRegexp defines the alpha numeric atom, typically a - // component of names. This only allows lower case characters and digits. - alphaNumericRegexp = match(`[a-z0-9]+`) - - // separatorRegexp defines the separators allowed to be embedded in name - // components. This allow one period, one or two underscore and multiple - // dashes. - separatorRegexp = match(`(?:[._]|__|[-]*)`) - - // nameComponentRegexp restricts registry path component names to start - // with at least one letter or number, with following parts able to be - // separated by one period, one or two underscore and multiple dashes. - nameComponentRegexp = expression( - alphaNumericRegexp, - optional(repeated(separatorRegexp, alphaNumericRegexp))) - - // domainComponentRegexp restricts the registry domain component of a - // repository name to start with a component as defined by DomainRegexp - // and followed by an optional port. - domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) - - // DomainRegexp defines the structure of potential domain components - // that may be part of image names. This is purposely a subset of what is - // allowed by DNS to ensure backwards compatibility with Docker image - // names. - DomainRegexp = expression( - domainComponentRegexp, - optional(repeated(literal(`.`), domainComponentRegexp)), - optional(literal(`:`), match(`[0-9]+`))) - - // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. - TagRegexp = match(`[\w][\w.-]{0,127}`) - - // anchoredTagRegexp matches valid tag names, anchored at the start and - // end of the matched string. - anchoredTagRegexp = anchored(TagRegexp) - - // DigestRegexp matches valid digests. - DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) - - // anchoredDigestRegexp matches valid digests, anchored at the start and - // end of the matched string. - anchoredDigestRegexp = anchored(DigestRegexp) - - // NameRegexp is the format for the name component of references. The - // regexp has capturing groups for the domain and name part omitting - // the separating forward slash from either. - NameRegexp = expression( - optional(DomainRegexp, literal(`/`)), - nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp))) - - // anchoredNameRegexp is used to parse a name value, capturing the - // domain and trailing components. - anchoredNameRegexp = anchored( - optional(capture(DomainRegexp), literal(`/`)), - capture(nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp)))) - - // ReferenceRegexp is the full supported format of a reference. The regexp - // is anchored and has capturing groups for name, tag, and digest - // components. - ReferenceRegexp = anchored(capture(NameRegexp), - optional(literal(":"), capture(TagRegexp)), - optional(literal("@"), capture(DigestRegexp))) - - // IdentifierRegexp is the format for string identifier used as a - // content addressable identifier using sha256. These identifiers - // are like digests without the algorithm, since sha256 is used. - IdentifierRegexp = match(`([a-f0-9]{64})`) - - // ShortIdentifierRegexp is the format used to represent a prefix - // of an identifier. A prefix may be used to match a sha256 identifier - // within a list of trusted identifiers. - ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) - - // anchoredIdentifierRegexp is used to check or match an - // identifier value, anchored at start and end of string. - anchoredIdentifierRegexp = anchored(IdentifierRegexp) - - // anchoredShortIdentifierRegexp is used to check if a value - // is a possible identifier prefix, anchored at start and end - // of string. - anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) -) - -// match compiles the string to a regular expression. -var match = regexp.MustCompile - -// literal compiles s into a literal regular expression, escaping any regexp -// reserved characters. -func literal(s string) *regexp.Regexp { - re := match(regexp.QuoteMeta(s)) - - if _, complete := re.LiteralPrefix(); !complete { - panic("must be a literal") - } - - return re -} - -// expression defines a full expression, where each regular expression must -// follow the previous. -func expression(res ...*regexp.Regexp) *regexp.Regexp { - var s string - for _, re := range res { - s += re.String() - } - - return match(s) -} - -// optional wraps the expression in a non-capturing group and makes the -// production optional. -func optional(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `?`) -} - -// repeated wraps the regexp in a non-capturing group to get one or more -// matches. -func repeated(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `+`) -} - -// group wraps the regexp in a non-capturing group. -func group(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(?:` + expression(res...).String() + `)`) -} - -// capture wraps the expression in a capturing group. -func capture(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(` + expression(res...).String() + `)`) -} - -// anchored anchors the regular expression by adding start and end delimiters. -func anchored(res ...*regexp.Regexp) *regexp.Regexp { - return match(`^` + expression(res...).String() + `$`) -} diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS deleted file mode 100644 index 0728bfe18..000000000 --- a/vendor/github.com/docker/docker/AUTHORS +++ /dev/null @@ -1,2372 +0,0 @@ -# File @generated by hack/generate-authors.sh. DO NOT EDIT. -# This file lists all contributors to the repository. -# See hack/generate-authors.sh to make modifications. - -Aanand Prasad -Aaron Davidson -Aaron Feng -Aaron Hnatiw -Aaron Huslage -Aaron L. Xu -Aaron Lehmann -Aaron Welch -Abel Muiño -Abhijeet Kasurde -Abhinandan Prativadi -Abhinav Ajgaonkar -Abhishek Chanda -Abhishek Sharma -Abin Shahab -Abirdcfly -Ada Mancini -Adam Avilla -Adam Dobrawy -Adam Eijdenberg -Adam Kunk -Adam Miller -Adam Mills -Adam Pointer -Adam Singer -Adam Walz -Adam Williams -Addam Hardy -Aditi Rajagopal -Aditya -Adnan Khan -Adolfo Ochagavía -Adria Casas -Adrian Moisey -Adrian Mouat -Adrian Oprea -Adrien Folie -Adrien Gallouët -Ahmed Kamal -Ahmet Alp Balkan -Aidan Feldman -Aidan Hobson Sayers -AJ Bowen -Ajey Charantimath -ajneu -Akash Gupta -Akhil Mohan -Akihiro Matsushima -Akihiro Suda -Akim Demaille -Akira Koyasu -Akshay Karle -Akshay Moghe -Al Tobey -alambike -Alan Hoyle -Alan Scherger -Alan Thompson -Albert Callarisa -Albert Zhang -Albin Kerouanton -Alec Benson -Alejandro González Hevia -Aleksa Sarai -Aleksandr Chebotov -Aleksandrs Fadins -Alena Prokharchyk -Alessandro Boch -Alessio Biancalana -Alex Chan -Alex Chen -Alex Coventry -Alex Crawford -Alex Ellis -Alex Gaynor -Alex Goodman -Alex Nordlund -Alex Olshansky -Alex Samorukov -Alex Warhawk -Alexander Artemenko -Alexander Boyd -Alexander Larsson -Alexander Midlash -Alexander Morozov -Alexander Polakov -Alexander Shopov -Alexandre Beslic -Alexandre Garnier -Alexandre González -Alexandre Jomin -Alexandru Sfirlogea -Alexei Margasov -Alexey Guskov -Alexey Kotlyarov -Alexey Shamrin -Alexis Ries -Alexis Thomas -Alfred Landrum -Ali Dehghani -Alicia Lauerman -Alihan Demir -Allen Madsen -Allen Sun -almoehi -Alvaro Saurin -Alvin Deng -Alvin Richards -amangoel -Amen Belayneh -Ameya Gawde -Amir Goldstein -Amit Bakshi -Amit Krishnan -Amit Shukla -Amr Gawish -Amy Lindburg -Anand Patil -AnandkumarPatel -Anatoly Borodin -Anca Iordache -Anchal Agrawal -Anda Xu -Anders Janmyr -Andre Dublin <81dublin@gmail.com> -Andre Granovsky -Andrea Denisse Gómez -Andrea Luzzardi -Andrea Turli -Andreas Elvers -Andreas Köhler -Andreas Savvides -Andreas Tiefenthaler -Andrei Gherzan -Andrei Ushakov -Andrei Vagin -Andrew C. Bodine -Andrew Clay Shafer -Andrew Duckworth -Andrew France -Andrew Gerrand -Andrew Guenther -Andrew He -Andrew Hsu -Andrew Kim -Andrew Kuklewicz -Andrew Macgregor -Andrew Macpherson -Andrew Martin -Andrew McDonnell -Andrew Munsell -Andrew Pennebaker -Andrew Po -Andrew Weiss -Andrew Williams -Andrews Medina -Andrey Kolomentsev -Andrey Petrov -Andrey Stolbovsky -André Martins -Andy Chambers -andy diller -Andy Goldstein -Andy Kipp -Andy Lindeman -Andy Rothfusz -Andy Smith -Andy Wilson -Andy Zhang -Anes Hasicic -Angel Velazquez -Anil Belur -Anil Madhavapeddy -Ankit Jain -Ankush Agarwal -Anonmily -Anran Qiao -Anshul Pundir -Anthon van der Neut -Anthony Baire -Anthony Bishopric -Anthony Dahanne -Anthony Sottile -Anton Löfgren -Anton Nikitin -Anton Polonskiy -Anton Tiurin -Antonio Murdaca -Antonis Kalipetis -Antony Messerli -Anuj Bahuguna -Anuj Varma -Anusha Ragunathan -Anyu Wang -apocas -Arash Deshmeh -ArikaChen -Arko Dasgupta -Arnaud Lefebvre -Arnaud Porterie -Arnaud Rebillout -Artem Khramov -Arthur Barr -Arthur Gautier -Artur Meyster -Arun Gupta -Asad Saeeduddin -Asbjørn Enge -Austin Vazquez -averagehuman -Avi Das -Avi Kivity -Avi Miller -Avi Vaid -ayoshitake -Azat Khuyiyakhmetov -Bao Yonglei -Bardia Keyoumarsi -Barnaby Gray -Barry Allard -Bartłomiej Piotrowski -Bastiaan Bakker -Bastien Pascard -bdevloed -Bearice Ren -Ben Bonnefoy -Ben Firshman -Ben Golub -Ben Gould -Ben Hall -Ben Langfeld -Ben Sargent -Ben Severson -Ben Toews -Ben Wiklund -Benjamin Atkin -Benjamin Baker -Benjamin Boudreau -Benjamin Böhmke -Benjamin Yolken -Benny Ng -Benoit Chesneau -Bernerd Schaefer -Bernhard M. Wiedemann -Bert Goethals -Bertrand Roussel -Bevisy Zhang -Bharath Thiruveedula -Bhiraj Butala -Bhumika Bayani -Bilal Amarni -Bill Wang -Billy Ridgway -Bily Zhang -Bin Liu -Bingshen Wang -Bjorn Neergaard -Blake Geno -Boaz Shuster -bobby abbott -Bojun Zhu -Boqin Qin -Boris Pruessmann -Boshi Lian -Bouke Haarsma -Boyd Hemphill -boynux -Bradley Cicenas -Bradley Wright -Brandon Liu -Brandon Philips -Brandon Rhodes -Brendan Dixon -Brent Salisbury -Brett Higgins -Brett Kochendorfer -Brett Milford -Brett Randall -Brian (bex) Exelbierd -Brian Bland -Brian DeHamer -Brian Dorsey -Brian Flad -Brian Goff -Brian McCallister -Brian Olsen -Brian Schwind -Brian Shumate -Brian Torres-Gil -Brian Trump -Brice Jaglin -Briehan Lombaard -Brielle Broder -Bruno Bigras -Bruno Binet -Bruno Gazzera -Bruno Renié -Bruno Tavares -Bryan Bess -Bryan Boreham -Bryan Matsuo -Bryan Murphy -Burke Libbey -Byung Kang -Caleb Spare -Calen Pennington -Cameron Boehmer -Cameron Sparr -Cameron Spear -Campbell Allen -Candid Dauth -Cao Weiwei -Carl Henrik Lunde -Carl Loa Odin -Carl X. Su -Carlo Mion -Carlos Alexandro Becker -Carlos de Paula -Carlos Sanchez -Carol Fager-Higgins -Cary -Casey Bisson -Catalin Pirvu -Ce Gao -Cedric Davies -Cezar Sa Espinola -Chad Swenson -Chance Zibolski -Chander Govindarajan -Chanhun Jeong -Chao Wang -Charles Chan -Charles Hooper -Charles Law -Charles Lindsay -Charles Merriam -Charles Sarrazin -Charles Smith -Charlie Drage -Charlie Lewis -Chase Bolt -ChaYoung You -Chee Hau Lim -Chen Chao -Chen Chuanliang -Chen Hanxiao -Chen Min -Chen Mingjie -Chen Qiu -Cheng-mean Liu -Chengfei Shang -Chengguang Xu -Chenyang Yan -chenyuzhu -Chetan Birajdar -Chewey -Chia-liang Kao -chli -Cholerae Hu -Chris Alfonso -Chris Armstrong -Chris Dias -Chris Dituri -Chris Fordham -Chris Gavin -Chris Gibson -Chris Khoo -Chris Kreussling (Flatbush Gardener) -Chris McKinnel -Chris McKinnel -Chris Price -Chris Seto -Chris Snow -Chris St. Pierre -Chris Stivers -Chris Swan -Chris Telfer -Chris Wahl -Chris Weyl -Chris White -Christian Becker -Christian Berendt -Christian Brauner -Christian Böhme -Christian Muehlhaeuser -Christian Persson -Christian Rotzoll -Christian Simon -Christian Stefanescu -Christoph Ziebuhr -Christophe Mehay -Christophe Troestler -Christophe Vidal -Christopher Biscardi -Christopher Crone -Christopher Currie -Christopher Jones -Christopher Latham -Christopher Rigor -Christy Norman -Chun Chen -Ciro S. Costa -Clayton Coleman -Clint Armstrong -Clinton Kitson -clubby789 -Cody Roseborough -Coenraad Loubser -Colin Dunklau -Colin Hebert -Colin Panisset -Colin Rice -Colin Walters -Collin Guarino -Colm Hally -companycy -Conor Evans -Corbin Coleman -Corey Farrell -Cory Forsyth -Cory Snider -cressie176 -Cristian Ariza -Cristian Staretu -cristiano balducci -Cristina Yenyxe Gonzalez Garcia -Cruceru Calin-Cristian -CUI Wei -cuishuang -Cuong Manh Le -Cyprian Gracz -Cyril F -Da McGrady -Daan van Berkel -Daehyeok Mun -Dafydd Crosby -dalanlan -Damian Smyth -Damien Nadé -Damien Nozay -Damjan Georgievski -Dan Anolik -Dan Buch -Dan Cotora -Dan Feldman -Dan Griffin -Dan Hirsch -Dan Keder -Dan Levy -Dan McPherson -Dan Plamadeala -Dan Stine -Dan Williams -Dani Hodovic -Dani Louca -Daniel Antlinger -Daniel Black -Daniel Dao -Daniel Exner -Daniel Farrell -Daniel Garcia -Daniel Gasienica -Daniel Grunwell -Daniel Helfand -Daniel Hiltgen -Daniel J Walsh -Daniel Menet -Daniel Mizyrycki -Daniel Nephin -Daniel Norberg -Daniel Nordberg -Daniel P. Berrangé -Daniel Robinson -Daniel S -Daniel Sweet -Daniel Von Fange -Daniel Watkins -Daniel X Moore -Daniel YC Lin -Daniel Zhang -Daniele Rondina -Danny Berger -Danny Milosavljevic -Danny Yates -Danyal Khaliq -Darren Coxall -Darren Shepherd -Darren Stahl -Dattatraya Kumbhar -Davanum Srinivas -Dave Barboza -Dave Goodchild -Dave Henderson -Dave MacDonald -Dave Tucker -David Anderson -David Bellotti -David Calavera -David Chung -David Corking -David Cramer -David Currie -David Davis -David Dooling -David Gageot -David Gebler -David Glasser -David Lawrence -David Lechner -David M. Karr -David Mackey -David Manouchehri -David Mat -David Mcanulty -David McKay -David O'Rourke -David P Hilton -David Pelaez -David R. Jenni -David Röthlisberger -David Sheets -David Sissitka -David Trott -David Wang <00107082@163.com> -David Williamson -David Xia -David Young -Davide Ceretti -Dawn Chen -dbdd -dcylabs -Debayan De -Deborah Gertrude Digges -deed02392 -Deep Debroy -Deng Guangxing -Deni Bertovic -Denis Defreyne -Denis Gladkikh -Denis Ollier -Dennis Chen -Dennis Chen -Dennis Docter -Derek -Derek -Derek Ch -Derek McGowan -Deric Crago -Deshi Xiao -Devon Estes -Devvyn Murphy -Dharmit Shah -Dhawal Yogesh Bhanushali -Dhilip Kumars -Diego Romero -Diego Siqueira -Dieter Reuter -Dillon Dixon -Dima Stopel -Dimitri John Ledkov -Dimitris Mandalidis -Dimitris Rozakis -Dimitry Andric -Dinesh Subhraveti -Ding Fei -dingwei -Diogo Monica -DiuDiugirl -Djibril Koné -Djordje Lukic -dkumor -Dmitri Logvinenko -Dmitri Shuralyov -Dmitry Demeshchuk -Dmitry Gusev -Dmitry Kononenko -Dmitry Sharshakov -Dmitry Shyshkin -Dmitry Smirnov -Dmitry V. Krivenok -Dmitry Vorobev -Dmytro Iakovliev -docker-unir[bot] -Dolph Mathews -Dominic Tubach -Dominic Yin -Dominik Dingel -Dominik Finkbeiner -Dominik Honnef -Don Kirkby -Don Kjer -Don Spaulding -Donald Huang -Dong Chen -Donghwa Kim -Donovan Jones -Doron Podoleanu -Doug Davis -Doug MacEachern -Doug Tangren -Douglas Curtis -Dr Nic Williams -dragon788 -Dražen Lučanin -Drew Erny -Drew Hubl -Dustin Sallings -Ed Costello -Edmund Wagner -Eiichi Tsukata -Eike Herzbach -Eivin Giske Skaaren -Eivind Uggedal -Elan Ruusamäe -Elango Sivanandam -Elena Morozova -Eli Uriegas -Elias Faxö -Elias Koromilas -Elias Probst -Elijah Zupancic -eluck -Elvir Kuric -Emil Davtyan -Emil Hernvall -Emily Maier -Emily Rose -Emir Ozer -Eng Zer Jun -Enguerran -Eohyung Lee -epeterso -Eric Barch -Eric Curtin -Eric G. Noriega -Eric Hanchrow -Eric Lee -Eric Mountain -Eric Myhre -Eric Paris -Eric Rafaloff -Eric Rosenberg -Eric Sage -Eric Soderstrom -Eric Yang -Eric-Olivier Lamey -Erica Windisch -Erich Cordoba -Erik Bray -Erik Dubbelboer -Erik Hollensbe -Erik Inge Bolsø -Erik Kristensen -Erik Sipsma -Erik St. Martin -Erik Weathers -Erno Hopearuoho -Erwin van der Koogh -Espen Suenson -Ethan Bell -Ethan Mosbaugh -Euan Harris -Euan Kemp -Eugen Krizo -Eugene Yakubovich -Evan Allrich -Evan Carmi -Evan Hazlett -Evan Krall -Evan Phoenix -Evan Wies -Evelyn Xu -Everett Toews -Evgeniy Makhrov -Evgeny Shmarnev -Evgeny Vereshchagin -Ewa Czechowska -Eystein Måløy Stenberg -ezbercih -Ezra Silvera -Fabian Kramm -Fabian Lauer -Fabian Raetz -Fabiano Rosas -Fabio Falci -Fabio Kung -Fabio Rapposelli -Fabio Rehm -Fabrizio Regini -Fabrizio Soppelsa -Faiz Khan -falmp -Fangming Fang -Fangyuan Gao <21551127@zju.edu.cn> -fanjiyun -Fareed Dudhia -Fathi Boudra -Federico Gimenez -Felipe Oliveira -Felipe Ruhland -Felix Abecassis -Felix Geisendörfer -Felix Hupfeld -Felix Rabe -Felix Ruess -Felix Schindler -Feng Yan -Fengtu Wang -Ferenc Szabo -Fernando -Fero Volar -Feroz Salam -Ferran Rodenas -Filipe Brandenburger -Filipe Oliveira -Flavio Castelli -Flavio Crisciani -Florian -Florian Klein -Florian Maier -Florian Noeding -Florian Schmaus -Florian Weingarten -Florin Asavoaie -Florin Patan -fonglh -Foysal Iqbal -Francesc Campoy -Francesco Degrassi -Francesco Mari -Francis Chuang -Francisco Carriedo -Francisco Souza -Frank Groeneveld -Frank Herrmann -Frank Macreery -Frank Rosquin -Frank Yang -Fred Lifton -Frederick F. Kautz IV -Frederico F. de Oliveira -Frederik Loeffert -Frederik Nordahl Jul Sabroe -Freek Kalter -Frieder Bluemle -frobnicaty <92033765+frobnicaty@users.noreply.github.com> -Frédéric Dalleau -Fu JinLin -Félix Baylac-Jacqué -Félix Cantournet -Gabe Rosenhouse -Gabor Nagy -Gabriel Goller -Gabriel L. Somlo -Gabriel Linder -Gabriel Monroy -Gabriel Nicolas Avellaneda -Gaetan de Villele -Galen Sampson -Gang Qiao -Gareth Rushgrove -Garrett Barboza -Gary Schaetz -Gaurav -Gaurav Singh -Gaël PORTAY -Genki Takiuchi -GennadySpb -Geoff Levand -Geoffrey Bachelet -Geon Kim -George Kontridze -George MacRorie -George Xie -Georgi Hristozov -Georgy Yakovlev -Gereon Frey -German DZ -Gert van Valkenhoef -Gerwim Feiken -Ghislain Bourgeois -Giampaolo Mancini -Gianluca Borello -Gildas Cuisinier -Giovan Isa Musthofa -gissehel -Giuseppe Mazzotta -Giuseppe Scrivano -Gleb Fotengauer-Malinovskiy -Gleb M Borisov -Glyn Normington -GoBella -Goffert van Gool -Goldwyn Rodrigues -Gopikannan Venugopalsamy -Gosuke Miyashita -Gou Rao -Govinda Fichtner -Grant Millar -Grant Reaber -Graydon Hoare -Greg Fausak -Greg Pflaum -Greg Stephens -Greg Thornton -Grzegorz Jaśkiewicz -Guilhem Lettron -Guilherme Salgado -Guillaume Dufour -Guillaume J. Charmes -Gunadhya S. <6939749+gunadhya@users.noreply.github.com> -Guoqiang QI -guoxiuyan -Guri -Gurjeet Singh -Guruprasad -Gustav Sinder -gwx296173 -Günter Zöchbauer -Haichao Yang -haikuoliu -haining.cao -Hakan Özler -Hamish Hutchings -Hannes Ljungberg -Hans Kristian Flaatten -Hans Rødtang -Hao Shu Wei -Hao Zhang <21521210@zju.edu.cn> -Harald Albers -Harald Niesche -Harley Laue -Harold Cooper -Harrison Turton -Harry Zhang -Harshal Patil -Harshal Patil -He Simei -He Xiaoxi -He Xin -heartlock <21521209@zju.edu.cn> -Hector Castro -Helen Xie -Henning Sprang -Hiroshi Hatake -Hiroyuki Sasagawa -Hobofan -Hollie Teal -Hong Xu -Hongbin Lu -Hongxu Jia -Honza Pokorny -Hsing-Hui Hsu -hsinko <21551195@zju.edu.cn> -Hu Keping -Hu Tao -HuanHuan Ye -Huanzhong Zhang -Huayi Zhang -Hugo Barrera -Hugo Duncan -Hugo Marisco <0x6875676f@gmail.com> -Hui Kang -Hunter Blanks -huqun -Huu Nguyen -Hyeongkyu Lee -Hyzhou Zhy -Iago López Galeiras -Ian Bishop -Ian Bull -Ian Calvert -Ian Campbell -Ian Chen -Ian Lee -Ian Main -Ian Philpot -Ian Truslove -Iavael -Icaro Seara -Ignacio Capurro -Igor Dolzhikov -Igor Karpovich -Iliana Weller -Ilkka Laukkanen -Illo Abdulrahim -Ilya Dmitrichenko -Ilya Gusev -Ilya Khlopotov -imre Fitos -inglesp -Ingo Gottwald -Innovimax -Isaac Dupree -Isabel Jimenez -Isaiah Grace -Isao Jonas -Iskander Sharipov -Ivan Babrou -Ivan Fraixedes -Ivan Grcic -Ivan Markin -J Bruni -J. Nunn -Jack Danger Canty -Jack Laxson -Jacob Atzen -Jacob Edelman -Jacob Tomlinson -Jacob Vallejo -Jacob Wen -Jaime Cepeda -Jaivish Kothari -Jake Champlin -Jake Moshenko -Jake Sanders -Jakub Drahos -Jakub Guzik -James Allen -James Carey -James Carr -James DeFelice -James Harrison Fisher -James Kyburz -James Kyle -James Lal -James Mills -James Nesbitt -James Nugent -James Sanders -James Turnbull -James Watkins-Harvey -Jamie Hannaford -Jamshid Afshar -Jan Breig -Jan Chren -Jan Götte -Jan Keromnes -Jan Koprowski -Jan Pazdziora -Jan Toebes -Jan-Gerd Tenberge -Jan-Jaap Driessen -Jana Radhakrishnan -Jannick Fahlbusch -Januar Wayong -Jared Biel -Jared Hocutt -Jaroslaw Zabiello -Jasmine Hegman -Jason A. Donenfeld -Jason Divock -Jason Giedymin -Jason Green -Jason Hall -Jason Heiss -Jason Livesay -Jason McVetta -Jason Plum -Jason Shepherd -Jason Smith -Jason Sommer -Jason Stangroome -Javier Bassi -jaxgeller -Jay -Jay Kamat -Jay Lim -Jean Rouge -Jean-Baptiste Barth -Jean-Baptiste Dalido -Jean-Christophe Berthon -Jean-Paul Calderone -Jean-Pierre Huynh -Jean-Tiare Le Bigot -Jeeva S. Chelladhurai -Jeff Anderson -Jeff Hajewski -Jeff Johnston -Jeff Lindsay -Jeff Mickey -Jeff Minard -Jeff Nickoloff -Jeff Silberman -Jeff Welch -Jeff Zvier -Jeffrey Bolle -Jeffrey Morgan -Jeffrey van Gogh -Jenny Gebske -Jeremy Chambers -Jeremy Grosser -Jeremy Huntwork -Jeremy Price -Jeremy Qian -Jeremy Unruh -Jeremy Yallop -Jeroen Franse -Jeroen Jacobs -Jesse Dearing -Jesse Dubay -Jessica Frazelle -Jezeniel Zapanta -Jhon Honce -Ji.Zhilong -Jian Liao -Jian Zhang -Jiang Jinyang -Jianyong Wu -Jie Luo -Jie Ma -Jihyun Hwang -Jilles Oldenbeuving -Jim Alateras -Jim Carroll -Jim Ehrismann -Jim Galasyn -Jim Lin -Jim Minter -Jim Perrin -Jimmy Cuadra -Jimmy Puckett -Jimmy Song -Jinsoo Park -Jintao Zhang -Jiri Appl -Jiri Popelka -Jiuyue Ma -Jiří Župka -Joakim Roubert -Joao Fernandes -Joao Trindade -Joe Beda -Joe Doliner -Joe Ferguson -Joe Gordon -Joe Shaw -Joe Van Dyk -Joel Friedly -Joel Handwell -Joel Hansson -Joel Wurtz -Joey Geiger -Joey Geiger -Joey Gibson -Joffrey F -Johan Euphrosine -Johan Rydberg -Johanan Lieberman -Johannes 'fish' Ziemke -John Costa -John Feminella -John Gardiner Myers -John Gossman -John Harris -John Howard -John Laswell -John Maguire -John Mulhausen -John OBrien III -John Starks -John Stephens -John Tims -John V. Martinez -John Warwick -John Willis -Jon Johnson -Jon Surrell -Jon Wedaman -Jonas Dohse -Jonas Heinrich -Jonas Pfenniger -Jonathan A. Schweder -Jonathan A. Sternberg -Jonathan Boulle -Jonathan Camp -Jonathan Choy -Jonathan Dowland -Jonathan Lebon -Jonathan Lomas -Jonathan McCrohan -Jonathan Mueller -Jonathan Pares -Jonathan Rudenberg -Jonathan Stoppani -Jonh Wendell -Joni Sar -Joost Cassee -Jordan Arentsen -Jordan Jennings -Jordan Sissel -Jordi Massaguer Pla -Jorge Marin -Jorit Kleine-Möllhoff -Jose Diaz-Gonzalez -Joseph Anthony Pasquale Holsten -Joseph Hager -Joseph Kern -Joseph Rothrock -Josh -Josh Bodah -Josh Bonczkowski -Josh Chorlton -Josh Eveleth -Josh Hawn -Josh Horwitz -Josh Poimboeuf -Josh Soref -Josh Wilson -Josiah Kiehl -José Tomás Albornoz -Joyce Jang -JP -Julian Taylor -Julien Barbier -Julien Bisconti -Julien Bordellier -Julien Dubois -Julien Kassar -Julien Maitrehenry -Julien Pervillé -Julien Pivotto -Julio Guerra -Julio Montes -Jun Du -Jun-Ru Chang -junxu -Jussi Nummelin -Justas Brazauskas -Justen Martin -Justin Cormack -Justin Force -Justin Keller <85903732+jk-vb@users.noreply.github.com> -Justin Menga -Justin Plock -Justin Simonelis -Justin Terry -Justyn Temme -Jyrki Puttonen -Jérémy Leherpeur -Jérôme Petazzoni -Jörg Thalheim -K. Heller -Kai Blin -Kai Qiang Wu (Kennan) -Kaijie Chen -Kamil Domański -Kamjar Gerami -Kanstantsin Shautsou -Kara Alexandra -Karan Lyons -Kareem Khazem -kargakis -Karl Grzeszczak -Karol Duleba -Karthik Karanth -Karthik Nayak -Kasper Fabæch Brandt -Kate Heddleston -Katie McLaughlin -Kato Kazuyoshi -Katrina Owen -Kawsar Saiyeed -Kay Yan -kayrus -Kazuhiro Sera -Kazuyoshi Kato -Ke Li -Ke Xu -Kei Ohmura -Keith Hudgins -Keli Hu -Ken Cochrane -Ken Herner -Ken ICHIKAWA -Ken Reese -Kenfe-Mickaël Laventure -Kenjiro Nakayama -Kent Johnson -Kenta Tada -Kevin "qwazerty" Houdebert -Kevin Alvarez -Kevin Burke -Kevin Clark -Kevin Feyrer -Kevin J. Lynagh -Kevin Jing Qiu -Kevin Kern -Kevin Menard -Kevin Meredith -Kevin P. Kucharczyk -Kevin Parsons -Kevin Richardson -Kevin Shi -Kevin Wallace -Kevin Yap -Keyvan Fatehi -kies -Kim BKC Carlbacker -Kim Eik -Kimbro Staken -Kir Kolyshkin -Kiran Gangadharan -Kirill SIbirev -knappe -Kohei Tsuruta -Koichi Shiraishi -Konrad Kleine -Konrad Ponichtera -Konstantin Gribov -Konstantin L -Konstantin Pelykh -Kostadin Plachkov -Krasi Georgiev -Krasimir Georgiev -Kris-Mikael Krister -Kristian Haugene -Kristina Zabunova -Krystian Wojcicki -Kunal Kushwaha -Kunal Tyagi -Kyle Conroy -Kyle Linden -Kyle Squizzato -Kyle Wuolle -kyu -Lachlan Coote -Lai Jiangshan -Lajos Papp -Lakshan Perera -Lalatendu Mohanty -Lance Chen -Lance Kinley -Lars Butler -Lars Kellogg-Stedman -Lars R. Damerow -Lars-Magnus Skog -Laszlo Meszaros -Laura Frank -Laurent Bernaille -Laurent Erignoux -Laurie Voss -Leandro Siqueira -Lee Calcote -Lee Chao <932819864@qq.com> -Lee, Meng-Han -Lei Gong -Lei Jitang -Leiiwang -Len Weincier -Lennie -Leo Gallucci -Leonardo Nodari -Leonardo Taccari -Leszek Kowalski -Levi Blackstone -Levi Gross -Levi Harrison -Lewis Daly -Lewis Marshall -Lewis Peckover -Li Yi -Liam Macgillavry -Liana Lo -Liang Mingqiang -Liang-Chi Hsieh -liangwei -Liao Qingwei -Lifubang -Lihua Tang -Lily Guo -limeidan -Lin Lu -LingFaKe -Linus Heckemann -Liran Tal -Liron Levin -Liu Bo -Liu Hua -liwenqi -lixiaobing10051267 -Liz Zhang -LIZAO LI -Lizzie Dixon <_@lizzie.io> -Lloyd Dewolf -Lokesh Mandvekar -longliqiang88 <394564827@qq.com> -Lorenz Leutgeb -Lorenzo Fontana -Lotus Fenn -Louis Delossantos -Louis Opter -Luca Favatella -Luca Marturana -Luca Orlandi -Luca-Bogdan Grigorescu -Lucas Chan -Lucas Chi -Lucas Molas -Lucas Silvestre -Luciano Mores -Luis Henrique Mulinari -Luis Martínez de Bartolomé Izquierdo -Luiz Svoboda -Lukas Heeren -Lukas Waslowski -lukaspustina -Lukasz Zajaczkowski -Luke Marsden -Lyn -Lynda O'Leary -Lénaïc Huard -Ma Müller -Ma Shimiao -Mabin -Madhan Raj Mookkandy -Madhav Puri -Madhu Venugopal -Mageee -Mahesh Tiyyagura -malnick -Malte Janduda -Manfred Touron -Manfred Zabarauskas -Manjunath A Kumatagi -Mansi Nahar -Manuel Meurer -Manuel Rüger -Manuel Woelker -mapk0y -Marc Abramowitz -Marc Kuo -Marc Tamsky -Marcel Edmund Franke -Marcelo Horacio Fortino -Marcelo Salazar -Marco Hennings -Marcus Cobden -Marcus Farkas -Marcus Linke -Marcus Martins -Marcus Ramberg -Marek Goldmann -Marian Marinov -Marianna Tessel -Mario Loriedo -Marius Gundersen -Marius Sturm -Marius Voila -Mark Allen -Mark Feit -Mark Jeromin -Mark McGranaghan -Mark McKinstry -Mark Milstein -Mark Oates -Mark Parker -Mark Vainomaa -Mark West -Markan Patel -Marko Mikulicic -Marko Tibold -Markus Fix -Markus Kortlang -Martijn Dwars -Martijn van Oosterhout -Martin Braun -Martin Dojcak -Martin Honermeyer -Martin Kelly -Martin Mosegaard Amdisen -Martin Muzatko -Martin Redmond -Maru Newby -Mary Anthony -Masahito Zembutsu -Masato Ohba -Masayuki Morita -Mason Malone -Mateusz Sulima -Mathias Monnerville -Mathieu Champlon -Mathieu Le Marec - Pasquet -Mathieu Parent -Mathieu Paturel -Matt Apperson -Matt Bachmann -Matt Bajor -Matt Bentley -Matt Haggard -Matt Hoyle -Matt McCormick -Matt Moore -Matt Morrison <3maven@gmail.com> -Matt Richardson -Matt Rickard -Matt Robenolt -Matt Schurenko -Matt Williams -Matthew Heon -Matthew Lapworth -Matthew Mayer -Matthew Mosesohn -Matthew Mueller -Matthew Riley -Matthias Klumpp -Matthias Kühnle -Matthias Rampke -Matthieu Fronton -Matthieu Hauglustaine -Mattias Jernberg -Mauricio Garavaglia -mauriyouth -Max Harmathy -Max Shytikov -Max Timchenko -Maxim Fedchyshyn -Maxim Ivanov -Maxim Kulkin -Maxim Treskin -Maxime Petazzoni -Maximiliano Maccanti -Maxwell -Meaglith Ma -meejah -Megan Kostick -Mehul Kar -Mei ChunTao -Mengdi Gao -Menghui Chen -Mert Yazıcıoğlu -mgniu -Micah Zoltu -Michael A. Smith -Michael Beskin -Michael Bridgen -Michael Brown -Michael Chiang -Michael Crosby -Michael Currie -Michael Friis -Michael Gorsuch -Michael Grauer -Michael Holzheu -Michael Hudson-Doyle -Michael Huettermann -Michael Irwin -Michael Kuehn -Michael Käufl -Michael Neale -Michael Nussbaum -Michael Prokop -Michael Scharf -Michael Spetsiotis -Michael Stapelberg -Michael Steinert -Michael Thies -Michael Weidmann -Michael West -Michael Zhao -Michal Fojtik -Michal Gebauer -Michal Jemala -Michal Kostrzewa -Michal Minář -Michal Rostecki -Michal Wieczorek -Michaël Pailloncy -Michał Czeraszkiewicz -Michał Gryko -Michał Kosek -Michiel de Jong -Mickaël Fortunato -Mickaël Remars -Miguel Angel Fernández -Miguel Morales -Miguel Perez -Mihai Borobocea -Mihuleacc Sergiu -Mikael Davranche -Mike Brown -Mike Bush -Mike Casas -Mike Chelen -Mike Danese -Mike Dillon -Mike Dougherty -Mike Estes -Mike Gaffney -Mike Goelzer -Mike Leone -Mike Lundy -Mike MacCana -Mike Naberezny -Mike Snitzer -mikelinjie <294893458@qq.com> -Mikhail Sobolev -Miklos Szegedi -Milas Bowman -Milind Chawre -Miloslav Trmač -mingqing -Mingzhen Feng -Misty Stanley-Jones -Mitch Capper -Mizuki Urushida -mlarcher -Mohammad Banikazemi -Mohammad Nasirifar -Mohammed Aaqib Ansari -Mohit Soni -Moorthy RS -Morgan Bauer -Morgante Pell -Morgy93 -Morten Siebuhr -Morton Fox -Moysés Borges -mrfly -Mrunal Patel -Muayyad Alsadi -Muhammad Zohaib Aslam -Mustafa Akın -Muthukumar R -Máximo Cuadros -Médi-Rémi Hashim -Nace Oroz -Nahum Shalman -Nakul Pathak -Nalin Dahyabhai -Nan Monnand Deng -Naoki Orii -Natalie Parker -Natanael Copa -Natasha Jarus -Nate Brennand -Nate Eagleson -Nate Jones -Nathan Carlson -Nathan Herald -Nathan Hsieh -Nathan Kleyn -Nathan LeClaire -Nathan McCauley -Nathan Williams -Naveed Jamil -Neal McBurnett -Neil Horman -Neil Peterson -Nelson Chen -Neyazul Haque -Nghia Tran -Niall O'Higgins -Nicholas E. Rabenau -Nick Adcock -Nick DeCoursin -Nick Irvine -Nick Neisen -Nick Parker -Nick Payne -Nick Russo -Nick Stenning -Nick Stinemates -Nick Wood -NickrenREN -Nicola Kabar -Nicolas Borboën -Nicolas De Loof -Nicolas Dudebout -Nicolas Goy -Nicolas Kaiser -Nicolas Sterchele -Nicolas V Castet -Nicolás Hock Isaza -Niel Drummond -Nigel Poulton -Nik Nyby -Nikhil Chawla -NikolaMandic -Nikolas Garofil -Nikolay Edigaryev -Nikolay Milovanov -Nirmal Mehta -Nishant Totla -NIWA Hideyuki -Noah Meyerhans -Noah Treuhaft -NobodyOnSE -noducks -Nolan Darilek -Noriki Nakamura -nponeccop -Nurahmadie -Nuutti Kotivuori -nzwsch -O.S. Tezer -objectified -Odin Ugedal -Oguz Bilgic -Oh Jinkyun -Ohad Schneider -ohmystack -Ole Reifschneider -Oliver Neal -Oliver Reason -Olivier Gambier -Olle Jonsson -Olli Janatuinen -Olly Pomeroy -Omri Shiv -Onur Filiz -Oriol Francès -Oscar Bonilla <6f6231@gmail.com> -Oskar Niburski -Otto Kekäläinen -Ouyang Liduo -Ovidio Mallo -Panagiotis Moustafellos -Paolo G. Giarrusso -Pascal -Pascal Bach -Pascal Borreli -Pascal Hartig -Patrick Böänziger -Patrick Devine -Patrick Haas -Patrick Hemmer -Patrick Stapleton -Patrik Cyvoct -pattichen -Paul "TBBle" Hampson -Paul -paul -Paul Annesley -Paul Bellamy -Paul Bowsher -Paul Furtado -Paul Hammond -Paul Jimenez -Paul Kehrer -Paul Lietar -Paul Liljenberg -Paul Morie -Paul Nasrat -Paul Weaver -Paulo Gomes -Paulo Ribeiro -Pavel Lobashov -Pavel Matěja -Pavel Pletenev -Pavel Pospisil -Pavel Sutyrin -Pavel Tikhomirov -Pavlos Ratis -Pavol Vargovcik -Pawel Konczalski -Paweł Gronowski -Peeyush Gupta -Peggy Li -Pei Su -Peng Tao -Penghan Wang -Per Weijnitz -perhapszzy@sina.com -Pete Woods -Peter Bourgon -Peter Braden -Peter Bücker -Peter Choi -Peter Dave Hello -Peter Edge -Peter Ericson -Peter Esbensen -Peter Jaffe -Peter Kang -Peter Malmgren -Peter Salvatore -Peter Volpe -Peter Waller -Petr Švihlík -Petros Angelatos -Phil -Phil Estes -Phil Sphicas -Phil Spitler -Philip Alexander Etling -Philip Monroe -Philipp Gillé -Philipp Wahala -Philipp Weissensteiner -Phillip Alexander -phineas -pidster -Piergiuliano Bossi -Pierre -Pierre Carrier -Pierre Dal-Pra -Pierre Wacrenier -Pierre-Alain RIVIERE -Piotr Bogdan -Piotr Karbowski -Porjo -Poul Kjeldager Sørensen -Pradeep Chhetri -Pradip Dhara -Pradipta Kr. Banerjee -Prasanna Gautam -Pratik Karki -Prayag Verma -Priya Wadhwa -Projjol Banerji -Przemek Hejman -Puneet Pruthi -Pure White -pysqz -Qiang Huang -Qin TianHuan -Qinglan Peng -Quan Tian -qudongfang -Quentin Brossard -Quentin Perez -Quentin Tayssier -r0n22 -Radostin Stoyanov -Rafal Jeczalik -Rafe Colton -Raghavendra K T -Raghuram Devarakonda -Raja Sami -Rajat Pandit -Rajdeep Dua -Ralf Sippl -Ralle -Ralph Bean -Ramkumar Ramachandra -Ramon Brooker -Ramon van Alteren -RaviTeja Pothana -Ray Tsang -ReadmeCritic -realityone -Recursive Madman -Reficul -Regan McCooey -Remi Rampin -Remy Suen -Renato Riccieri Santos Zannon -Renaud Gaubert -Rhys Hiltner -Ri Xu -Ricardo N Feliciano -Rich Horwood -Rich Moyse -Rich Seymour -Richard Burnison -Richard Harvey -Richard Mathie -Richard Metzler -Richard Scothern -Richo Healey -Rick Bradley -Rick van de Loo -Rick Wieman -Rik Nijessen -Riku Voipio -Riley Guerin -Ritesh H Shukla -Riyaz Faizullabhoy -Rob Cowsill <42620235+rcowsill@users.noreply.github.com> -Rob Gulewich -Rob Vesse -Robert Bachmann -Robert Bittle -Robert Obryk -Robert Schneider -Robert Shade -Robert Stern -Robert Terhaar -Robert Wallis -Robert Wang -Roberto G. Hashioka -Roberto Muñoz Fernández -Robin Naundorf -Robin Schneider -Robin Speekenbrink -Robin Thoni -robpc -Rodolfo Carvalho -Rodrigo Campos -Rodrigo Vaz -Roel Van Nyen -Roger Peppe -Rohit Jnagal -Rohit Kadam -Rohit Kapur -Rojin George -Roland Huß -Roland Kammerer -Roland Moriz -Roma Sokolov -Roman Dudin -Roman Mazur -Roman Strashkin -Roman Volosatovs -Roman Zabaluev -Ron Smits -Ron Williams -Rong Gao -Rong Zhang -Rongxiang Song -Rony Weng -root -root -root -root -Rory Hunter -Rory McCune -Ross Boucher -Rovanion Luckey -Royce Remer -Rozhnov Alexandr -Rudolph Gottesheim -Rui Cao -Rui Lopes -Ruilin Li -Runshen Zhu -Russ Magee -Ryan Abrams -Ryan Anderson -Ryan Aslett -Ryan Barry -Ryan Belgrave -Ryan Campbell -Ryan Detzel -Ryan Fowler -Ryan Liu -Ryan McLaughlin -Ryan O'Donnell -Ryan Seto -Ryan Shea -Ryan Simmen -Ryan Stelly -Ryan Thomas -Ryan Trauntvein -Ryan Wallner -Ryan Zhang -ryancooper7 -RyanDeng -Ryo Nakao -Ryoga Saito -Rémy Greinhofer -s. rannou -Sabin Basyal -Sachin Joshi -Sagar Hani -Sainath Grandhi -Sakeven Jiang -Salahuddin Khan -Sally O'Malley -Sam Abed -Sam Alba -Sam Bailey -Sam J Sharpe -Sam Neirinck -Sam Reis -Sam Rijs -Sam Whited -Sambuddha Basu -Sami Wagiaalla -Samuel Andaya -Samuel Dion-Girardeau -Samuel Karp -Samuel PHAN -sanchayanghosh -Sandeep Bansal -Sankar சங்கர் -Sanket Saurav -Santhosh Manohar -sapphiredev -Sargun Dhillon -Sascha Andres -Sascha Grunert -SataQiu -Satnam Singh -Satoshi Amemiya -Satoshi Tagomori -Scott Bessler -Scott Collier -Scott Johnston -Scott Percival -Scott Stamp -Scott Walls -sdreyesg -Sean Christopherson -Sean Cronin -Sean Lee -Sean McIntyre -Sean OMeara -Sean P. Kane -Sean Rodman -Sebastiaan van Steenis -Sebastiaan van Stijn -Sebastian Höffner -Sebastian Radloff -Sebastien Goasguen -Senthil Kumar Selvaraj -Senthil Kumaran -SeongJae Park -Seongyeol Lim -Serge Hallyn -Sergey Alekseev -Sergey Evstifeev -Sergii Kabashniuk -Sergio Lopez -Serhat Gülçiçek -SeungUkLee -Sevki Hasirci -Shane Canon -Shane da Silva -Shaun Kaasten -shaunol -Shawn Landden -Shawn Siefkas -shawnhe -Shayan Pooya -Shayne Wang -Shekhar Gulati -Sheng Yang -Shengbo Song -Shengjing Zhu -Shev Yan -Shih-Yuan Lee -Shihao Xia -Shijiang Wei -Shijun Qin -Shishir Mahajan -Shoubhik Bose -Shourya Sarcar -Shu-Wai Chow -shuai-z -Shukui Yang -Sian Lerk Lau -Siarhei Rasiukevich -Sidhartha Mani -sidharthamani -Silas Sewell -Silvan Jegen -Simão Reis -Simon Barendse -Simon Eskildsen -Simon Ferquel -Simon Leinen -Simon Menke -Simon Taranto -Simon Vikstrom -Sindhu S -Sjoerd Langkemper -skanehira -Smark Meng -Solganik Alexander -Solomon Hykes -Song Gao -Soshi Katsuta -Sotiris Salloumis -Soulou -Spencer Brown -Spencer Smith -Spike Curtis -Sridatta Thatipamala -Sridhar Ratnakumar -Srini Brahmaroutu -Srinivasan Srivatsan -Staf Wagemakers -Stanislav Bondarenko -Stanislav Levin -Steeve Morin -Stefan Berger -Stefan J. Wernli -Stefan Praszalowicz -Stefan S. -Stefan Scherer -Stefan Staudenmeyer -Stefan Weil -Steffen Butzer -Stephan Spindler -Stephen Benjamin -Stephen Crosby -Stephen Day -Stephen Drake -Stephen Rust -Steve Desmond -Steve Dougherty -Steve Durrheimer -Steve Francia -Steve Koch -Steven Burgess -Steven Erenst -Steven Hartland -Steven Iveson -Steven Merrill -Steven Richards -Steven Taylor -Stéphane Este-Gracias -Stig Larsson -Su Wang -Subhajit Ghosh -Sujith Haridasan -Sun Gengze <690388648@qq.com> -Sun Jianbo -Sune Keller -Sunny Gogoi -Suryakumar Sudar -Sven Dowideit -Swapnil Daingade -Sylvain Baubeau -Sylvain Bellemare -Sébastien -Sébastien HOUZÉ -Sébastien Luttringer -Sébastien Stormacq -Sören Tempel -Tabakhase -Tadej Janež -Takuto Sato -tang0th -Tangi Colin -Tatsuki Sugiura -Tatsushi Inagaki -Taylan Isikdemir -Taylor Jones -Ted M. Young -Tehmasp Chaudhri -Tejaswini Duggaraju -Tejesh Mehta -Terry Chu -terryding77 <550147740@qq.com> -Thatcher Peskens -theadactyl -Thell 'Bo' Fowler -Thermionix -Thiago Alves Silva -Thijs Terlouw -Thomas Bikeev -Thomas Frössman -Thomas Gazagnaire -Thomas Graf -Thomas Grainger -Thomas Hansen -Thomas Ledos -Thomas Leonard -Thomas Léveil -Thomas Orozco -Thomas Riccardi -Thomas Schroeter -Thomas Sjögren -Thomas Swift -Thomas Tanaka -Thomas Texier -Ti Zhou -Tiago Seabra -Tianon Gravi -Tianyi Wang -Tibor Vass -Tiffany Jernigan -Tiffany Low -Till Claassen -Till Wegmüller -Tim -Tim Bart -Tim Bosse -Tim Dettrick -Tim Düsterhus -Tim Hockin -Tim Potter -Tim Ruffles -Tim Smith -Tim Terhorst -Tim Wagner -Tim Wang -Tim Waugh -Tim Wraight -Tim Zju <21651152@zju.edu.cn> -timchenxiaoyu <837829664@qq.com> -timfeirg -Timo Rothenpieler -Timothy Hobbs -tjwebb123 -tobe -Tobias Bieniek -Tobias Bradtke -Tobias Gesellchen -Tobias Klauser -Tobias Munk -Tobias Pfandzelter -Tobias Schmidt -Tobias Schwab -Todd Crane -Todd Lunter -Todd Whiteman -Toli Kuznets -Tom Barlow -Tom Booth -Tom Denham -Tom Fotherby -Tom Howe -Tom Hulihan -Tom Maaswinkel -Tom Parker -Tom Sweeney -Tom Wilkie -Tom X. Tobin -Tom Zhao -Tomas Janousek -Tomas Kral -Tomas Tomecek -Tomasz Kopczynski -Tomasz Lipinski -Tomasz Nurkiewicz -Tomek Mańko -Tommaso Visconti -Tomoya Tabuchi -Tomáš Hrčka -tonic -Tonny Xu -Tony Abboud -Tony Daws -Tony Miller -toogley -Torstein Husebø -Toshiaki Makita -Tõnis Tiigi -Trace Andreason -tracylihui <793912329@qq.com> -Trapier Marshall -Travis Cline -Travis Thieman -Trent Ogren -Trevor -Trevor Pounds -Trevor Sullivan -Trishna Guha -Tristan Carel -Troy Denton -Tudor Brindus -Ty Alexander -Tycho Andersen -Tyler Brock -Tyler Brown -Tzu-Jung Lee -uhayate -Ulysse Carion -Umesh Yadav -Utz Bacher -vagrant -Vaidas Jablonskis -Valentin Kulesh -vanderliang -Velko Ivanov -Veres Lajos -Victor Algaze -Victor Coisne -Victor Costan -Victor I. Wood -Victor Lyuboslavsky -Victor Marmol -Victor Palma -Victor Vieux -Victoria Bialas -Vijaya Kumar K -Vikas Choudhary -Vikram bir Singh -Viktor Stanchev -Viktor Vojnovski -VinayRaghavanKS -Vincent Batts -Vincent Bernat -Vincent Boulineau -Vincent Demeester -Vincent Giersch -Vincent Mayers -Vincent Woo -Vinod Kulkarni -Vishal Doshi -Vishnu Kannan -Vitaly Ostrosablin -Vitor Monteiro -Vivek Agarwal -Vivek Dasgupta -Vivek Goyal -Vladimir Bulyga -Vladimir Kirillov -Vladimir Pouzanov -Vladimir Rutsky -Vladimir Varankin -VladimirAus -Vladislav Kolesnikov -Vlastimil Zeman -Vojtech Vitek (V-Teq) -Walter Leibbrandt -Walter Stanish -Wang Chao -Wang Guoliang -Wang Jie -Wang Long -Wang Ping -Wang Xing -Wang Yuexiao -Wang Yumu <37442693@qq.com> -wanghuaiqing -Ward Vandewege -WarheadsSE -Wassim Dhif -Wataru Ishida -Wayne Chang -Wayne Song -Weerasak Chongnguluam -Wei Fu -Wei Wu -Wei-Ting Kuo -weipeng -weiyan -Weiyang Zhu -Wen Cheng Ma -Wendel Fleming -Wenjun Tang -Wenkai Yin -wenlxie -Wenxuan Zhao -Wenyu You <21551128@zju.edu.cn> -Wenzhi Liang -Wes Morgan -Wewang Xiaorenfine -Wiktor Kwapisiewicz -Will Dietz -Will Rouesnel -Will Weaver -willhf -William Delanoue -William Henry -William Hubbs -William Martin -William Riancho -William Thurston -Wilson Júnior -Wing-Kam Wong -WiseTrem -Wolfgang Nagele -Wolfgang Powisch -Wonjun Kim -WuLonghui -xamyzhao -Xia Wu -Xian Chaobo -Xianglin Gao -Xianjie -Xianlu Bird -Xiao YongBiao -Xiao Zhang -XiaoBing Jiang -Xiaodong Liu -Xiaodong Zhang -Xiaohua Ding -Xiaoxi He -Xiaoxu Chen -Xiaoyu Zhang -xichengliudui <1693291525@qq.com> -xiekeyang -Ximo Guanter Gonzálbez -Xinbo Weng -Xinfeng Liu -Xinzi Zhou -Xiuming Chen -Xuecong Liao -xuzhaokui -Yadnyawalkya Tale -Yahya -yalpul -YAMADA Tsuyoshi -Yamasaki Masahide -Yan Feng -Yan Zhu -Yang Bai -Yang Li -Yang Pengfei -yangchenliang -Yann Autissier -Yanqiang Miao -Yao Zaiyong -Yash Murty -Yassine Tijani -Yasunori Mahata -Yazhong Liu -Yestin Sun -Yi EungJun -Yibai Zhang -Yihang Ho -Ying Li -Yohei Ueda -Yong Tang -Yongxin Li -Yongzhi Pan -Yosef Fertel -You-Sheng Yang (楊有勝) -youcai -Youcef YEKHLEF -Youfu Zhang -Yu Changchun -Yu Chengxia -Yu Peng -Yu-Ju Hong -Yuan Sun -Yuanhong Peng -Yue Zhang -Yufei Xiong -Yuhao Fang -Yuichiro Kaneko -YujiOshima -Yunxiang Huang -Yurii Rashkovskii -Yusuf Tarık Günaydın -Yves Blusseau <90z7oey02@sneakemail.com> -Yves Junqueira -Zac Dover -Zach Borboa -Zach Gershman -Zachary Jaffee -Zain Memon -Zaiste! -Zane DeGraffenried -Zefan Li -Zen Lin(Zhinan Lin) -Zhang Kun -Zhang Wei -Zhang Wentao -ZhangHang -zhangxianwei -Zhenan Ye <21551168@zju.edu.cn> -zhenghenghuo -Zhenhai Gao -Zhenkun Bi -ZhiPeng Lu -zhipengzuo -Zhou Hao -Zhoulin Xie -Zhu Guihua -Zhu Kunjia -Zhuoyun Wei -Ziheng Liu -Zilin Du -zimbatm -Ziming Dong -ZJUshuaizhou <21551191@zju.edu.cn> -zmarouf -Zoltan Tombol -Zou Yu -zqh -Zuhayr Elahi -Zunayed Ali -Álvaro Lázaro -Átila Camurça Alves -尹吉峰 -屈骏 -徐俊杰 -慕陶 -搏通 -黄艳红00139573 -정재영 diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE deleted file mode 100644 index 6d8d58fb6..000000000 --- a/vendor/github.com/docker/docker/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2018 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE deleted file mode 100644 index 58b19b6d1..000000000 --- a/vendor/github.com/docker/docker/NOTICE +++ /dev/null @@ -1,19 +0,0 @@ -Docker -Copyright 2012-2017 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -This product contains software (https://github.com/creack/pty) developed -by Keith Rarick, licensed under the MIT License. - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md deleted file mode 100644 index f136c3433..000000000 --- a/vendor/github.com/docker/docker/api/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# Working on the Engine API - -The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon. - -It consists of various components in this repository: - -- `api/swagger.yaml` A Swagger definition of the API. -- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this. -- `cli/` The command-line client. -- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs. -- `daemon/` The daemon, which serves the API. - -## Swagger definition - -The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: - -1. Automatically generate documentation. -2. Automatically generate the Go server and client. (A work-in-progress.) -3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc. - -## Updating the API documentation - -The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, edit this file to represent the change in the documentation. - -The file is split into two main sections: - -- `definitions`, which defines re-usable objects used in requests and responses -- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable) - -To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section. - -There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919). - -`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful when making edits to ensure you are doing the right thing. - -## Viewing the API documentation - -When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. - -Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation. - -The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io). diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go deleted file mode 100644 index bee9b875a..000000000 --- a/vendor/github.com/docker/docker/api/common.go +++ /dev/null @@ -1,11 +0,0 @@ -package api // import "github.com/docker/docker/api" - -// Common constants for daemon and client. -const ( - // DefaultVersion of Current REST API - DefaultVersion = "1.42" - - // NoBaseImageSpecifier is the symbol used by the FROM - // command to specify that no base image is to be used. - NoBaseImageSpecifier = "scratch" -) diff --git a/vendor/github.com/docker/docker/api/common_unix.go b/vendor/github.com/docker/docker/api/common_unix.go deleted file mode 100644 index 19fc63d65..000000000 --- a/vendor/github.com/docker/docker/api/common_unix.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build !windows -// +build !windows - -package api // import "github.com/docker/docker/api" - -// MinVersion represents Minimum REST API version supported -const MinVersion = "1.12" diff --git a/vendor/github.com/docker/docker/api/common_windows.go b/vendor/github.com/docker/docker/api/common_windows.go deleted file mode 100644 index 590ba5479..000000000 --- a/vendor/github.com/docker/docker/api/common_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -package api // import "github.com/docker/docker/api" - -// MinVersion represents Minimum REST API version supported -// Technically the first daemon API version released on Windows is v1.25 in -// engine version 1.13. However, some clients are explicitly using downlevel -// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive. -// Hence also allowing 1.24 on Windows. -const MinVersion string = "1.24" diff --git a/vendor/github.com/docker/docker/api/swagger-gen.yaml b/vendor/github.com/docker/docker/api/swagger-gen.yaml deleted file mode 100644 index f07a02737..000000000 --- a/vendor/github.com/docker/docker/api/swagger-gen.yaml +++ /dev/null @@ -1,12 +0,0 @@ - -layout: - models: - - name: definition - source: asset:model - target: "{{ joinFilePath .Target .ModelPackage }}" - file_name: "{{ (snakize (pascalize .Name)) }}.go" - operations: - - name: handler - source: asset:serverOperation - target: "{{ joinFilePath .Target .APIPackage .Package }}" - file_name: "{{ (snakize (pascalize .Name)) }}.go" diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml deleted file mode 100644 index afe7a8c37..000000000 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ /dev/null @@ -1,12152 +0,0 @@ -# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. -# -# This is used for generating API documentation and the types used by the -# client/server. See api/README.md for more information. -# -# Some style notes: -# - This file is used by ReDoc, which allows GitHub Flavored Markdown in -# descriptions. -# - There is no maximum line length, for ease of editing and pretty diffs. -# - operationIds are in the format "NounVerb", with a singular noun. - -swagger: "2.0" -schemes: - - "http" - - "https" -produces: - - "application/json" - - "text/plain" -consumes: - - "application/json" - - "text/plain" -basePath: "/v1.42" -info: - title: "Docker Engine API" - version: "1.42" - x-logo: - url: "https://docs.docker.com/assets/images/logo-docker-main.png" - description: | - The Engine API is an HTTP API served by Docker Engine. It is the API the - Docker client uses to communicate with the Engine, so everything the Docker - client can do can be done with the API. - - Most of the client's commands map directly to API endpoints (e.g. `docker ps` - is `GET /containers/json`). The notable exception is running containers, - which consists of several API calls. - - # Errors - - The API uses standard HTTP status codes to indicate the success or failure - of the API call. The body of the response will be JSON in the following - format: - - ``` - { - "message": "page not found" - } - ``` - - # Versioning - - The API is usually changed in each release, so API calls are versioned to - ensure that clients don't break. To lock to a specific version of the API, - you prefix the URL with its version, for example, call `/v1.30/info` to use - the v1.30 version of the `/info` endpoint. If the API version specified in - the URL is not supported by the daemon, a HTTP `400 Bad Request` error message - is returned. - - If you omit the version-prefix, the current version of the API (v1.42) is used. - For example, calling `/info` is the same as calling `/v1.42/info`. Using the - API without a version-prefix is deprecated and will be removed in a future release. - - Engine releases in the near future should support this version of the API, - so your client will continue to work even if it is talking to a newer Engine. - - The API uses an open schema model, which means server may add extra properties - to responses. Likewise, the server will ignore any extra query parameters and - request body properties. When you write clients, you need to ignore additional - properties in responses to ensure they do not break when talking to newer - daemons. - - - # Authentication - - Authentication for registries is handled client side. The client has to send - authentication details to various endpoints that need to communicate with - registries, such as `POST /images/(name)/push`. These are sent as - `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) - (JSON) string with the following structure: - - ``` - { - "username": "string", - "password": "string", - "email": "string", - "serveraddress": "string" - } - ``` - - The `serveraddress` is a domain/IP without a protocol. Throughout this - structure, double quotes are required. - - If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), - you can just pass this instead of credentials: - - ``` - { - "identitytoken": "9cbaf023786cd7..." - } - ``` - -# The tags on paths define the menu sections in the ReDoc documentation, so -# the usage of tags must make sense for that: -# - They should be singular, not plural. -# - There should not be too many tags, or the menu becomes unwieldy. For -# example, it is preferable to add a path to the "System" tag instead of -# creating a tag with a single path in it. -# - The order of tags in this list defines the order in the menu. -tags: - # Primary objects - - name: "Container" - x-displayName: "Containers" - description: | - Create and manage containers. - - name: "Image" - x-displayName: "Images" - - name: "Network" - x-displayName: "Networks" - description: | - Networks are user-defined networks that containers can be attached to. - See the [networking documentation](https://docs.docker.com/network/) - for more information. - - name: "Volume" - x-displayName: "Volumes" - description: | - Create and manage persistent storage that can be attached to containers. - - name: "Exec" - x-displayName: "Exec" - description: | - Run new commands inside running containers. Refer to the - [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) - for more information. - - To exec a command in a container, you first need to create an exec instance, - then start it. These two API endpoints are wrapped up in a single command-line - command, `docker exec`. - - # Swarm things - - name: "Swarm" - x-displayName: "Swarm" - description: | - Engines can be clustered together in a swarm. Refer to the - [swarm mode documentation](https://docs.docker.com/engine/swarm/) - for more information. - - name: "Node" - x-displayName: "Nodes" - description: | - Nodes are instances of the Engine participating in a swarm. Swarm mode - must be enabled for these endpoints to work. - - name: "Service" - x-displayName: "Services" - description: | - Services are the definitions of tasks to run on a swarm. Swarm mode must - be enabled for these endpoints to work. - - name: "Task" - x-displayName: "Tasks" - description: | - A task is a container running on a swarm. It is the atomic scheduling unit - of swarm. Swarm mode must be enabled for these endpoints to work. - - name: "Secret" - x-displayName: "Secrets" - description: | - Secrets are sensitive data that can be used by services. Swarm mode must - be enabled for these endpoints to work. - - name: "Config" - x-displayName: "Configs" - description: | - Configs are application configurations that can be used by services. Swarm - mode must be enabled for these endpoints to work. - # System things - - name: "Plugin" - x-displayName: "Plugins" - - name: "System" - x-displayName: "System" - -definitions: - Port: - type: "object" - description: "An open port on a container" - required: [PrivatePort, Type] - properties: - IP: - type: "string" - format: "ip-address" - description: "Host IP address that the container's port is mapped to" - PrivatePort: - type: "integer" - format: "uint16" - x-nullable: false - description: "Port on the container" - PublicPort: - type: "integer" - format: "uint16" - description: "Port exposed on the host" - Type: - type: "string" - x-nullable: false - enum: ["tcp", "udp", "sctp"] - example: - PrivatePort: 8080 - PublicPort: 80 - Type: "tcp" - - MountPoint: - type: "object" - description: | - MountPoint represents a mount point configuration inside the container. - This is used for reporting the mountpoints in use by a container. - properties: - Type: - description: | - The mount type: - - - `bind` a mount of a file or directory from the host into the container. - - `volume` a docker volume with the given `Name`. - - `tmpfs` a `tmpfs`. - - `npipe` a named pipe from the host into the container. - - `cluster` a Swarm cluster volume - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" - - "cluster" - example: "volume" - Name: - description: | - Name is the name reference to the underlying data defined by `Source` - e.g., the volume name. - type: "string" - example: "myvolume" - Source: - description: | - Source location of the mount. - - For volumes, this contains the storage location of the volume (within - `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains - the source (host) part of the bind-mount. For `tmpfs` mount points, this - field is empty. - type: "string" - example: "/var/lib/docker/volumes/myvolume/_data" - Destination: - description: | - Destination is the path relative to the container root (`/`) where - the `Source` is mounted inside the container. - type: "string" - example: "/usr/share/nginx/html/" - Driver: - description: | - Driver is the volume driver used to create the volume (if it is a volume). - type: "string" - example: "local" - Mode: - description: | - Mode is a comma separated list of options supplied by the user when - creating the bind/volume mount. - - The default is platform-specific (`"z"` on Linux, empty on Windows). - type: "string" - example: "z" - RW: - description: | - Whether the mount is mounted writable (read-write). - type: "boolean" - example: true - Propagation: - description: | - Propagation describes how mounts are propagated from the host into the - mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) - for details. This field is not used on Windows. - type: "string" - example: "" - - DeviceMapping: - type: "object" - description: "A device mapping between the host and container" - properties: - PathOnHost: - type: "string" - PathInContainer: - type: "string" - CgroupPermissions: - type: "string" - example: - PathOnHost: "/dev/deviceName" - PathInContainer: "/dev/deviceName" - CgroupPermissions: "mrw" - - DeviceRequest: - type: "object" - description: "A request for devices to be sent to device drivers" - properties: - Driver: - type: "string" - example: "nvidia" - Count: - type: "integer" - example: -1 - DeviceIDs: - type: "array" - items: - type: "string" - example: - - "0" - - "1" - - "GPU-fef8089b-4820-abfc-e83e-94318197576e" - Capabilities: - description: | - A list of capabilities; an OR list of AND lists of capabilities. - type: "array" - items: - type: "array" - items: - type: "string" - example: - # gpu AND nvidia AND compute - - ["gpu", "nvidia", "compute"] - Options: - description: | - Driver-specific options, specified as a key/value pairs. These options - are passed directly to the driver. - type: "object" - additionalProperties: - type: "string" - - ThrottleDevice: - type: "object" - properties: - Path: - description: "Device path" - type: "string" - Rate: - description: "Rate" - type: "integer" - format: "int64" - minimum: 0 - - Mount: - type: "object" - properties: - Target: - description: "Container path." - type: "string" - Source: - description: "Mount source (e.g. a volume name, a host path)." - type: "string" - Type: - description: | - The mount type. Available types: - - - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. - - `cluster` a Swarm cluster volume - type: "string" - enum: - - "bind" - - "volume" - - "tmpfs" - - "npipe" - - "cluster" - ReadOnly: - description: "Whether the mount should be read-only." - type: "boolean" - Consistency: - description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." - type: "string" - BindOptions: - description: "Optional configuration for the `bind` type." - type: "object" - properties: - Propagation: - description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." - type: "string" - enum: - - "private" - - "rprivate" - - "shared" - - "rshared" - - "slave" - - "rslave" - NonRecursive: - description: "Disable recursive bind mount." - type: "boolean" - default: false - CreateMountpoint: - description: "Create mount point on host if missing" - type: "boolean" - default: false - VolumeOptions: - description: "Optional configuration for the `volume` type." - type: "object" - properties: - NoCopy: - description: "Populate volume with data from the target." - type: "boolean" - default: false - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - DriverConfig: - description: "Map of driver specific options" - type: "object" - properties: - Name: - description: "Name of the driver to use to create the volume." - type: "string" - Options: - description: "key/value map of driver specific options." - type: "object" - additionalProperties: - type: "string" - TmpfsOptions: - description: "Optional configuration for the `tmpfs` type." - type: "object" - properties: - SizeBytes: - description: "The size for the tmpfs mount in bytes." - type: "integer" - format: "int64" - Mode: - description: "The permission mode for the tmpfs mount in an integer." - type: "integer" - - RestartPolicy: - description: | - The behavior to apply when the container exits. The default is not to - restart. - - An ever increasing delay (double the previous delay, starting at 100ms) is - added before each restart to prevent flooding the server. - type: "object" - properties: - Name: - type: "string" - description: | - - Empty string means not to restart - - `no` Do not automatically restart - - `always` Always restart - - `unless-stopped` Restart always except when the user has manually stopped the container - - `on-failure` Restart only when the container exit code is non-zero - enum: - - "" - - "no" - - "always" - - "unless-stopped" - - "on-failure" - MaximumRetryCount: - type: "integer" - description: | - If `on-failure` is used, the number of times to retry before giving up. - - Resources: - description: "A container's resources (cgroups config, ulimits, etc)" - type: "object" - properties: - # Applicable to all platforms - CpuShares: - description: | - An integer value representing this container's relative CPU weight - versus other containers. - type: "integer" - Memory: - description: "Memory limit in bytes." - type: "integer" - format: "int64" - default: 0 - # Applicable to UNIX platforms - CgroupParent: - description: | - Path to `cgroups` under which the container's `cgroup` is created. If - the path is not absolute, the path is considered to be relative to the - `cgroups` path of the init process. Cgroups are created if they do not - already exist. - type: "string" - BlkioWeight: - description: "Block IO weight (relative weight)." - type: "integer" - minimum: 0 - maximum: 1000 - BlkioWeightDevice: - description: | - Block IO weight (relative device weight) in the form: - - ``` - [{"Path": "device_path", "Weight": weight}] - ``` - type: "array" - items: - type: "object" - properties: - Path: - type: "string" - Weight: - type: "integer" - minimum: 0 - BlkioDeviceReadBps: - description: | - Limit read rate (bytes per second) from a device, in the form: - - ``` - [{"Path": "device_path", "Rate": rate}] - ``` - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - BlkioDeviceWriteBps: - description: | - Limit write rate (bytes per second) to a device, in the form: - - ``` - [{"Path": "device_path", "Rate": rate}] - ``` - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - BlkioDeviceReadIOps: - description: | - Limit read rate (IO per second) from a device, in the form: - - ``` - [{"Path": "device_path", "Rate": rate}] - ``` - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - BlkioDeviceWriteIOps: - description: | - Limit write rate (IO per second) to a device, in the form: - - ``` - [{"Path": "device_path", "Rate": rate}] - ``` - type: "array" - items: - $ref: "#/definitions/ThrottleDevice" - CpuPeriod: - description: "The length of a CPU period in microseconds." - type: "integer" - format: "int64" - CpuQuota: - description: | - Microseconds of CPU time that the container can get in a CPU period. - type: "integer" - format: "int64" - CpuRealtimePeriod: - description: | - The length of a CPU real-time period in microseconds. Set to 0 to - allocate no time allocated to real-time tasks. - type: "integer" - format: "int64" - CpuRealtimeRuntime: - description: | - The length of a CPU real-time runtime in microseconds. Set to 0 to - allocate no time allocated to real-time tasks. - type: "integer" - format: "int64" - CpusetCpus: - description: | - CPUs in which to allow execution (e.g., `0-3`, `0,1`). - type: "string" - example: "0-3" - CpusetMems: - description: | - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only - effective on NUMA systems. - type: "string" - Devices: - description: "A list of devices to add to the container." - type: "array" - items: - $ref: "#/definitions/DeviceMapping" - DeviceCgroupRules: - description: "a list of cgroup rules to apply to the container" - type: "array" - items: - type: "string" - example: "c 13:* rwm" - DeviceRequests: - description: | - A list of requests for devices to be sent to device drivers. - type: "array" - items: - $ref: "#/definitions/DeviceRequest" - KernelMemoryTCP: - description: | - Hard limit for kernel TCP buffer memory (in bytes). Depending on the - OCI runtime in use, this option may be ignored. It is no longer supported - by the default (runc) runtime. - - This field is omitted when empty. - type: "integer" - format: "int64" - MemoryReservation: - description: "Memory soft limit in bytes." - type: "integer" - format: "int64" - MemorySwap: - description: | - Total memory limit (memory + swap). Set as `-1` to enable unlimited - swap. - type: "integer" - format: "int64" - MemorySwappiness: - description: | - Tune a container's memory swappiness behavior. Accepts an integer - between 0 and 100. - type: "integer" - format: "int64" - minimum: 0 - maximum: 100 - NanoCpus: - description: "CPU quota in units of 10-9 CPUs." - type: "integer" - format: "int64" - OomKillDisable: - description: "Disable OOM Killer for the container." - type: "boolean" - Init: - description: | - Run an init inside the container that forwards signals and reaps - processes. This field is omitted if empty, and the default (as - configured on the daemon) is used. - type: "boolean" - x-nullable: true - PidsLimit: - description: | - Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` - to not change. - type: "integer" - format: "int64" - x-nullable: true - Ulimits: - description: | - A list of resource limits to set in the container. For example: - - ``` - {"Name": "nofile", "Soft": 1024, "Hard": 2048} - ``` - type: "array" - items: - type: "object" - properties: - Name: - description: "Name of ulimit" - type: "string" - Soft: - description: "Soft limit" - type: "integer" - Hard: - description: "Hard limit" - type: "integer" - # Applicable to Windows - CpuCount: - description: | - The number of usable CPUs (Windows only). - - On Windows Server containers, the processor resource controls are - mutually exclusive. The order of precedence is `CPUCount` first, then - `CPUShares`, and `CPUPercent` last. - type: "integer" - format: "int64" - CpuPercent: - description: | - The usable percentage of the available CPUs (Windows only). - - On Windows Server containers, the processor resource controls are - mutually exclusive. The order of precedence is `CPUCount` first, then - `CPUShares`, and `CPUPercent` last. - type: "integer" - format: "int64" - IOMaximumIOps: - description: "Maximum IOps for the container system drive (Windows only)" - type: "integer" - format: "int64" - IOMaximumBandwidth: - description: | - Maximum IO in bytes per second for the container system drive - (Windows only). - type: "integer" - format: "int64" - - Limit: - description: | - An object describing a limit on resources which can be requested by a task. - type: "object" - properties: - NanoCPUs: - type: "integer" - format: "int64" - example: 4000000000 - MemoryBytes: - type: "integer" - format: "int64" - example: 8272408576 - Pids: - description: | - Limits the maximum number of PIDs in the container. Set `0` for unlimited. - type: "integer" - format: "int64" - default: 0 - example: 100 - - ResourceObject: - description: | - An object describing the resources which can be advertised by a node and - requested by a task. - type: "object" - properties: - NanoCPUs: - type: "integer" - format: "int64" - example: 4000000000 - MemoryBytes: - type: "integer" - format: "int64" - example: 8272408576 - GenericResources: - $ref: "#/definitions/GenericResources" - - GenericResources: - description: | - User-defined resources can be either Integer resources (e.g, `SSD=3`) or - String resources (e.g, `GPU=UUID1`). - type: "array" - items: - type: "object" - properties: - NamedResourceSpec: - type: "object" - properties: - Kind: - type: "string" - Value: - type: "string" - DiscreteResourceSpec: - type: "object" - properties: - Kind: - type: "string" - Value: - type: "integer" - format: "int64" - example: - - DiscreteResourceSpec: - Kind: "SSD" - Value: 3 - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID1" - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID2" - - HealthConfig: - description: "A test to perform to check that the container is healthy." - type: "object" - properties: - Test: - description: | - The test to perform. Possible values are: - - - `[]` inherit healthcheck from image or parent image - - `["NONE"]` disable healthcheck - - `["CMD", args...]` exec arguments directly - - `["CMD-SHELL", command]` run command with system's default shell - type: "array" - items: - type: "string" - Interval: - description: | - The time to wait between checks in nanoseconds. It should be 0 or at - least 1000000 (1 ms). 0 means inherit. - type: "integer" - format: "int64" - Timeout: - description: | - The time to wait before considering the check to have hung. It should - be 0 or at least 1000000 (1 ms). 0 means inherit. - type: "integer" - format: "int64" - Retries: - description: | - The number of consecutive failures needed to consider a container as - unhealthy. 0 means inherit. - type: "integer" - StartPeriod: - description: | - Start period for the container to initialize before starting - health-retries countdown in nanoseconds. It should be 0 or at least - 1000000 (1 ms). 0 means inherit. - type: "integer" - format: "int64" - - Health: - description: | - Health stores information about the container's healthcheck results. - type: "object" - x-nullable: true - properties: - Status: - description: | - Status is one of `none`, `starting`, `healthy` or `unhealthy` - - - "none" Indicates there is no healthcheck - - "starting" Starting indicates that the container is not yet ready - - "healthy" Healthy indicates that the container is running correctly - - "unhealthy" Unhealthy indicates that the container has a problem - type: "string" - enum: - - "none" - - "starting" - - "healthy" - - "unhealthy" - example: "healthy" - FailingStreak: - description: "FailingStreak is the number of consecutive failures" - type: "integer" - example: 0 - Log: - type: "array" - description: | - Log contains the last few results (oldest first) - items: - $ref: "#/definitions/HealthcheckResult" - - HealthcheckResult: - description: | - HealthcheckResult stores information about a single run of a healthcheck probe - type: "object" - x-nullable: true - properties: - Start: - description: | - Date and time at which this check started in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "date-time" - example: "2020-01-04T10:44:24.496525531Z" - End: - description: | - Date and time at which this check ended in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2020-01-04T10:45:21.364524523Z" - ExitCode: - description: | - ExitCode meanings: - - - `0` healthy - - `1` unhealthy - - `2` reserved (considered unhealthy) - - other values: error running probe - type: "integer" - example: 0 - Output: - description: "Output from last check" - type: "string" - - HostConfig: - description: "Container configuration that depends on the host we are running on" - allOf: - - $ref: "#/definitions/Resources" - - type: "object" - properties: - # Applicable to all platforms - Binds: - type: "array" - description: | - A list of volume bindings for this container. Each volume binding - is a string in one of these forms: - - - `host-src:container-dest[:options]` to bind-mount a host path - into the container. Both `host-src`, and `container-dest` must - be an _absolute_ path. - - `volume-name:container-dest[:options]` to bind-mount a volume - managed by a volume driver into the container. `container-dest` - must be an _absolute_ path. - - `options` is an optional, comma-delimited list of: - - - `nocopy` disables automatic copying of data from the container - path to the volume. The `nocopy` flag only applies to named volumes. - - `[ro|rw]` mounts a volume read-only or read-write, respectively. - If omitted or set to `rw`, volumes are mounted read-write. - - `[z|Z]` applies SELinux labels to allow or deny multiple containers - to read and write to the same volume. - - `z`: a _shared_ content label is applied to the content. This - label indicates that multiple containers can share the volume - content, for both reading and writing. - - `Z`: a _private unshared_ label is applied to the content. - This label indicates that only the current container can use - a private volume. Labeling systems such as SELinux require - proper labels to be placed on volume content that is mounted - into a container. Without a label, the security system can - prevent a container's processes from using the content. By - default, the labels set by the host operating system are not - modified. - - `[[r]shared|[r]slave|[r]private]` specifies mount - [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). - This only applies to bind-mounted volumes, not internal volumes - or named volumes. Mount propagation requires the source mount - point (the location where the source directory is mounted in the - host operating system) to have the correct propagation properties. - For shared volumes, the source mount point must be set to `shared`. - For slave volumes, the mount must be set to either `shared` or - `slave`. - items: - type: "string" - ContainerIDFile: - type: "string" - description: "Path to a file where the container ID is written" - LogConfig: - type: "object" - description: "The logging configuration for this container" - properties: - Type: - type: "string" - enum: - - "json-file" - - "syslog" - - "journald" - - "gelf" - - "fluentd" - - "awslogs" - - "splunk" - - "etwlogs" - - "none" - Config: - type: "object" - additionalProperties: - type: "string" - NetworkMode: - type: "string" - description: | - Network mode to use for this container. Supported standard values - are: `bridge`, `host`, `none`, and `container:`. Any - other value is taken as a custom network's name to which this - container should connect to. - PortBindings: - $ref: "#/definitions/PortMap" - RestartPolicy: - $ref: "#/definitions/RestartPolicy" - AutoRemove: - type: "boolean" - description: | - Automatically remove the container when the container's process - exits. This has no effect if `RestartPolicy` is set. - VolumeDriver: - type: "string" - description: "Driver that this container uses to mount volumes." - VolumesFrom: - type: "array" - description: | - A list of volumes to inherit from another container, specified in - the form `[:]`. - items: - type: "string" - Mounts: - description: | - Specification for mounts to be added to the container. - type: "array" - items: - $ref: "#/definitions/Mount" - ConsoleSize: - type: "array" - description: | - Initial console size, as an `[height, width]` array. - x-nullable: true - minItems: 2 - maxItems: 2 - items: - type: "integer" - minimum: 0 - - # Applicable to UNIX platforms - CapAdd: - type: "array" - description: | - A list of kernel capabilities to add to the container. Conflicts - with option 'Capabilities'. - items: - type: "string" - CapDrop: - type: "array" - description: | - A list of kernel capabilities to drop from the container. Conflicts - with option 'Capabilities'. - items: - type: "string" - CgroupnsMode: - type: "string" - enum: - - "private" - - "host" - description: | - cgroup namespace mode for the container. Possible values are: - - - `"private"`: the container runs in its own private cgroup namespace - - `"host"`: use the host system's cgroup namespace - - If not specified, the daemon default is used, which can either be `"private"` - or `"host"`, depending on daemon version, kernel support and configuration. - Dns: - type: "array" - description: "A list of DNS servers for the container to use." - items: - type: "string" - DnsOptions: - type: "array" - description: "A list of DNS options." - items: - type: "string" - DnsSearch: - type: "array" - description: "A list of DNS search domains." - items: - type: "string" - ExtraHosts: - type: "array" - description: | - A list of hostnames/IP mappings to add to the container's `/etc/hosts` - file. Specified in the form `["hostname:IP"]`. - items: - type: "string" - GroupAdd: - type: "array" - description: | - A list of additional groups that the container process will run as. - items: - type: "string" - IpcMode: - type: "string" - description: | - IPC sharing mode for the container. Possible values are: - - - `"none"`: own private IPC namespace, with /dev/shm not mounted - - `"private"`: own private IPC namespace - - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers - - `"container:"`: join another (shareable) container's IPC namespace - - `"host"`: use the host system's IPC namespace - - If not specified, daemon default is used, which can either be `"private"` - or `"shareable"`, depending on daemon version and configuration. - Cgroup: - type: "string" - description: "Cgroup to use for the container." - Links: - type: "array" - description: | - A list of links for the container in the form `container_name:alias`. - items: - type: "string" - OomScoreAdj: - type: "integer" - description: | - An integer value containing the score given to the container in - order to tune OOM killer preferences. - example: 500 - PidMode: - type: "string" - description: | - Set the PID (Process) Namespace mode for the container. It can be - either: - - - `"container:"`: joins another container's PID namespace - - `"host"`: use the host's PID namespace inside the container - Privileged: - type: "boolean" - description: "Gives the container full access to the host." - PublishAllPorts: - type: "boolean" - description: | - Allocates an ephemeral host port for all of a container's - exposed ports. - - Ports are de-allocated when the container stops and allocated when - the container starts. The allocated port might be changed when - restarting the container. - - The port is selected from the ephemeral port range that depends on - the kernel. For example, on Linux the range is defined by - `/proc/sys/net/ipv4/ip_local_port_range`. - ReadonlyRootfs: - type: "boolean" - description: "Mount the container's root filesystem as read only." - SecurityOpt: - type: "array" - description: | - A list of string values to customize labels for MLS systems, such - as SELinux. - items: - type: "string" - StorageOpt: - type: "object" - description: | - Storage driver options for this container, in the form `{"size": "120G"}`. - additionalProperties: - type: "string" - Tmpfs: - type: "object" - description: | - A map of container directories which should be replaced by tmpfs - mounts, and their corresponding mount options. For example: - - ``` - { "/run": "rw,noexec,nosuid,size=65536k" } - ``` - additionalProperties: - type: "string" - UTSMode: - type: "string" - description: "UTS namespace to use for the container." - UsernsMode: - type: "string" - description: | - Sets the usernamespace mode for the container when usernamespace - remapping option is enabled. - ShmSize: - type: "integer" - description: | - Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. - minimum: 0 - Sysctls: - type: "object" - description: | - A list of kernel parameters (sysctls) to set in the container. - For example: - - ``` - {"net.ipv4.ip_forward": "1"} - ``` - additionalProperties: - type: "string" - Runtime: - type: "string" - description: "Runtime to use with this container." - # Applicable to Windows - Isolation: - type: "string" - description: | - Isolation technology of the container. (Windows only) - enum: - - "default" - - "process" - - "hyperv" - MaskedPaths: - type: "array" - description: | - The list of paths to be masked inside the container (this overrides - the default set of paths). - items: - type: "string" - ReadonlyPaths: - type: "array" - description: | - The list of paths to be set as read-only inside the container - (this overrides the default set of paths). - items: - type: "string" - - ContainerConfig: - description: | - Configuration for a container that is portable between hosts. - - When used as `ContainerConfig` field in an image, `ContainerConfig` is an - optional field containing the configuration of the container that was last - committed when creating the image. - - Previous versions of Docker builder used this field to store build cache, - and it is not in active use anymore. - type: "object" - properties: - Hostname: - description: | - The hostname to use for the container, as a valid RFC 1123 hostname. - type: "string" - example: "439f4e91bd1d" - Domainname: - description: | - The domain name to use for the container. - type: "string" - User: - description: "The user that commands are run as inside the container." - type: "string" - AttachStdin: - description: "Whether to attach to `stdin`." - type: "boolean" - default: false - AttachStdout: - description: "Whether to attach to `stdout`." - type: "boolean" - default: true - AttachStderr: - description: "Whether to attach to `stderr`." - type: "boolean" - default: true - ExposedPorts: - description: | - An object mapping ports to an empty object in the form: - - `{"/": {}}` - type: "object" - x-nullable: true - additionalProperties: - type: "object" - enum: - - {} - default: {} - example: { - "80/tcp": {}, - "443/tcp": {} - } - Tty: - description: | - Attach standard streams to a TTY, including `stdin` if it is not closed. - type: "boolean" - default: false - OpenStdin: - description: "Open `stdin`" - type: "boolean" - default: false - StdinOnce: - description: "Close `stdin` after one attached client disconnects" - type: "boolean" - default: false - Env: - description: | - A list of environment variables to set inside the container in the - form `["VAR=value", ...]`. A variable without `=` is removed from the - environment, rather than to have an empty value. - type: "array" - items: - type: "string" - example: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Cmd: - description: | - Command to run specified as a string or an array of strings. - type: "array" - items: - type: "string" - example: ["/bin/sh"] - Healthcheck: - $ref: "#/definitions/HealthConfig" - ArgsEscaped: - description: "Command is already escaped (Windows only)" - type: "boolean" - default: false - example: false - x-nullable: true - Image: - description: | - The name (or reference) of the image to use when creating the container, - or which was used when the container was created. - type: "string" - example: "example-image:1.0" - Volumes: - description: | - An object mapping mount point paths inside the container to empty - objects. - type: "object" - additionalProperties: - type: "object" - enum: - - {} - default: {} - WorkingDir: - description: "The working directory for commands to run in." - type: "string" - example: "/public/" - Entrypoint: - description: | - The entry point for the container as a string or an array of strings. - - If the array consists of exactly one empty string (`[""]`) then the - entry point is reset to system default (i.e., the entry point used by - docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). - type: "array" - items: - type: "string" - example: [] - NetworkDisabled: - description: "Disable networking for the container." - type: "boolean" - x-nullable: true - MacAddress: - description: "MAC address of the container." - type: "string" - x-nullable: true - OnBuild: - description: | - `ONBUILD` metadata that were defined in the image's `Dockerfile`. - type: "array" - x-nullable: true - items: - type: "string" - example: [] - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - StopSignal: - description: | - Signal to stop a container as a string or unsigned integer. - type: "string" - example: "SIGTERM" - x-nullable: true - StopTimeout: - description: "Timeout to stop a container in seconds." - type: "integer" - default: 10 - x-nullable: true - Shell: - description: | - Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. - type: "array" - x-nullable: true - items: - type: "string" - example: ["/bin/sh", "-c"] - - NetworkingConfig: - description: | - NetworkingConfig represents the container's networking configuration for - each of its interfaces. - It is used for the networking configs specified in the `docker create` - and `docker network connect` commands. - type: "object" - properties: - EndpointsConfig: - description: | - A mapping of network name to endpoint configuration for that network. - type: "object" - additionalProperties: - $ref: "#/definitions/EndpointSettings" - example: - # putting an example here, instead of using the example values from - # /definitions/EndpointSettings, because containers/create currently - # does not support attaching to multiple networks, so the example request - # would be confusing if it showed that multiple networks can be contained - # in the EndpointsConfig. - # TODO remove once we support multiple networks on container create (see https://github.com/moby/moby/blob/07e6b843594e061f82baa5fa23c2ff7d536c2a05/daemon/create.go#L323) - EndpointsConfig: - isolated_nw: - IPAMConfig: - IPv4Address: "172.20.30.33" - IPv6Address: "2001:db8:abcd::3033" - LinkLocalIPs: - - "169.254.34.68" - - "fe80::3468" - Links: - - "container_1" - - "container_2" - Aliases: - - "server_x" - - "server_y" - - NetworkSettings: - description: "NetworkSettings exposes the network settings in the API" - type: "object" - properties: - Bridge: - description: Name of the network's bridge (for example, `docker0`). - type: "string" - example: "docker0" - SandboxID: - description: SandboxID uniquely represents a container's network stack. - type: "string" - example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" - HairpinMode: - description: | - Indicates if hairpin NAT should be enabled on the virtual interface. - type: "boolean" - example: false - LinkLocalIPv6Address: - description: IPv6 unicast address using the link-local prefix. - type: "string" - example: "fe80::42:acff:fe11:1" - LinkLocalIPv6PrefixLen: - description: Prefix length of the IPv6 unicast address. - type: "integer" - example: "64" - Ports: - $ref: "#/definitions/PortMap" - SandboxKey: - description: SandboxKey identifies the sandbox - type: "string" - example: "/var/run/docker/netns/8ab54b426c38" - - # TODO is SecondaryIPAddresses actually used? - SecondaryIPAddresses: - description: "" - type: "array" - items: - $ref: "#/definitions/Address" - x-nullable: true - - # TODO is SecondaryIPv6Addresses actually used? - SecondaryIPv6Addresses: - description: "" - type: "array" - items: - $ref: "#/definitions/Address" - x-nullable: true - - # TODO properties below are part of DefaultNetworkSettings, which is - # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 - EndpointID: - description: | - EndpointID uniquely represents a service endpoint in a Sandbox. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" - Gateway: - description: | - Gateway address for the default "bridge" network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "172.17.0.1" - GlobalIPv6Address: - description: | - Global IPv6 address for the default "bridge" network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "2001:db8::5689" - GlobalIPv6PrefixLen: - description: | - Mask length of the global IPv6 address. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "integer" - example: 64 - IPAddress: - description: | - IPv4 address for the default "bridge" network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "172.17.0.4" - IPPrefixLen: - description: | - Mask length of the IPv4 address. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "integer" - example: 16 - IPv6Gateway: - description: | - IPv6 gateway address for this network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "2001:db8:2::100" - MacAddress: - description: | - MAC address for the container on the default "bridge" network. - -


- - > **Deprecated**: This field is only propagated when attached to the - > default "bridge" network. Use the information from the "bridge" - > network inside the `Networks` map instead, which contains the same - > information. This field was deprecated in Docker 1.9 and is scheduled - > to be removed in Docker 17.12.0 - type: "string" - example: "02:42:ac:11:00:04" - Networks: - description: | - Information about all networks that the container is connected to. - type: "object" - additionalProperties: - $ref: "#/definitions/EndpointSettings" - - Address: - description: Address represents an IPv4 or IPv6 IP address. - type: "object" - properties: - Addr: - description: IP address. - type: "string" - PrefixLen: - description: Mask length of the IP address. - type: "integer" - - PortMap: - description: | - PortMap describes the mapping of container ports to host ports, using the - container's port-number and protocol as key in the format `/`, - for example, `80/udp`. - - If a container's port is mapped for multiple protocols, separate entries - are added to the mapping table. - type: "object" - additionalProperties: - type: "array" - x-nullable: true - items: - $ref: "#/definitions/PortBinding" - example: - "443/tcp": - - HostIp: "127.0.0.1" - HostPort: "4443" - "80/tcp": - - HostIp: "0.0.0.0" - HostPort: "80" - - HostIp: "0.0.0.0" - HostPort: "8080" - "80/udp": - - HostIp: "0.0.0.0" - HostPort: "80" - "53/udp": - - HostIp: "0.0.0.0" - HostPort: "53" - "2377/tcp": null - - PortBinding: - description: | - PortBinding represents a binding between a host IP address and a host - port. - type: "object" - properties: - HostIp: - description: "Host IP address that the container's port is mapped to." - type: "string" - example: "127.0.0.1" - HostPort: - description: "Host port number that the container's port is mapped to." - type: "string" - example: "4443" - - GraphDriverData: - description: | - Information about the storage driver used to store the container's and - image's filesystem. - type: "object" - required: [Name, Data] - properties: - Name: - description: "Name of the storage driver." - type: "string" - x-nullable: false - example: "overlay2" - Data: - description: | - Low-level storage metadata, provided as key/value pairs. - - This information is driver-specific, and depends on the storage-driver - in use, and should be used for informational purposes only. - type: "object" - x-nullable: false - additionalProperties: - type: "string" - example: { - "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged", - "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff", - "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work" - } - - ImageInspect: - description: | - Information about an image in the local image cache. - type: "object" - properties: - Id: - description: | - ID is the content-addressable ID of an image. - - This identifier is a content-addressable digest calculated from the - image's configuration (which includes the digests of layers used by - the image). - - Note that this digest differs from the `RepoDigests` below, which - holds digests of image manifests that reference the image. - type: "string" - x-nullable: false - example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" - RepoTags: - description: | - List of image names/tags in the local image cache that reference this - image. - - Multiple image tags can refer to the same image, and this list may be - empty if no tags reference the image, in which case the image is - "untagged", in which case it can still be referenced by its ID. - type: "array" - items: - type: "string" - example: - - "example:1.0" - - "example:latest" - - "example:stable" - - "internal.registry.example.com:5000/example:1.0" - RepoDigests: - description: | - List of content-addressable digests of locally available image manifests - that the image is referenced from. Multiple manifests can refer to the - same image. - - These digests are usually only available if the image was either pulled - from a registry, or if the image was pushed to a registry, which is when - the manifest is generated and its digest calculated. - type: "array" - items: - type: "string" - example: - - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" - - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" - Parent: - description: | - ID of the parent image. - - Depending on how the image was created, this field may be empty and - is only set for images that were built/created locally. This field - is empty if the image was pulled from an image registry. - type: "string" - x-nullable: false - example: "" - Comment: - description: | - Optional message that was set when committing or importing the image. - type: "string" - x-nullable: false - example: "" - Created: - description: | - Date and time at which the image was created, formatted in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - x-nullable: false - example: "2022-02-04T21:20:12.497794809Z" - Container: - description: | - The ID of the container that was used to create the image. - - Depending on how the image was created, this field may be empty. - type: "string" - x-nullable: false - example: "65974bc86f1770ae4bff79f651ebdbce166ae9aada632ee3fa9af3a264911735" - ContainerConfig: - $ref: "#/definitions/ContainerConfig" - DockerVersion: - description: | - The version of Docker that was used to build the image. - - Depending on how the image was created, this field may be empty. - type: "string" - x-nullable: false - example: "20.10.7" - Author: - description: | - Name of the author that was specified when committing the image, or as - specified through MAINTAINER (deprecated) in the Dockerfile. - type: "string" - x-nullable: false - example: "" - Config: - $ref: "#/definitions/ContainerConfig" - Architecture: - description: | - Hardware CPU architecture that the image runs on. - type: "string" - x-nullable: false - example: "arm" - Variant: - description: | - CPU architecture variant (presently ARM-only). - type: "string" - x-nullable: true - example: "v7" - Os: - description: | - Operating System the image is built to run on. - type: "string" - x-nullable: false - example: "linux" - OsVersion: - description: | - Operating System version the image is built to run on (especially - for Windows). - type: "string" - example: "" - x-nullable: true - Size: - description: | - Total size of the image including all layers it is composed of. - type: "integer" - format: "int64" - x-nullable: false - example: 1239828 - VirtualSize: - description: | - Total size of the image including all layers it is composed of. - - In versions of Docker before v1.10, this field was calculated from - the image itself and all of its parent images. Docker v1.10 and up - store images self-contained, and no longer use a parent-chain, making - this field an equivalent of the Size field. - - This field is kept for backward compatibility, but may be removed in - a future version of the API. - type: "integer" - format: "int64" - x-nullable: false - example: 1239828 - GraphDriver: - $ref: "#/definitions/GraphDriverData" - RootFS: - description: | - Information about the image's RootFS, including the layer IDs. - type: "object" - required: [Type] - properties: - Type: - type: "string" - x-nullable: false - example: "layers" - Layers: - type: "array" - items: - type: "string" - example: - - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" - - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - Metadata: - description: | - Additional metadata of the image in the local cache. This information - is local to the daemon, and not part of the image itself. - type: "object" - properties: - LastTagTime: - description: | - Date and time at which the image was last tagged in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - - This information is only available if the image was tagged locally, - and omitted otherwise. - type: "string" - format: "dateTime" - example: "2022-02-28T14:40:02.623929178Z" - x-nullable: true - ImageSummary: - type: "object" - required: - - Id - - ParentId - - RepoTags - - RepoDigests - - Created - - Size - - SharedSize - - VirtualSize - - Labels - - Containers - properties: - Id: - description: | - ID is the content-addressable ID of an image. - - This identifier is a content-addressable digest calculated from the - image's configuration (which includes the digests of layers used by - the image). - - Note that this digest differs from the `RepoDigests` below, which - holds digests of image manifests that reference the image. - type: "string" - x-nullable: false - example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" - ParentId: - description: | - ID of the parent image. - - Depending on how the image was created, this field may be empty and - is only set for images that were built/created locally. This field - is empty if the image was pulled from an image registry. - type: "string" - x-nullable: false - example: "" - RepoTags: - description: | - List of image names/tags in the local image cache that reference this - image. - - Multiple image tags can refer to the same image, and this list may be - empty if no tags reference the image, in which case the image is - "untagged", in which case it can still be referenced by its ID. - type: "array" - x-nullable: false - items: - type: "string" - example: - - "example:1.0" - - "example:latest" - - "example:stable" - - "internal.registry.example.com:5000/example:1.0" - RepoDigests: - description: | - List of content-addressable digests of locally available image manifests - that the image is referenced from. Multiple manifests can refer to the - same image. - - These digests are usually only available if the image was either pulled - from a registry, or if the image was pushed to a registry, which is when - the manifest is generated and its digest calculated. - type: "array" - x-nullable: false - items: - type: "string" - example: - - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" - - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" - Created: - description: | - Date and time at which the image was created as a Unix timestamp - (number of seconds sinds EPOCH). - type: "integer" - x-nullable: false - example: "1644009612" - Size: - description: | - Total size of the image including all layers it is composed of. - type: "integer" - format: "int64" - x-nullable: false - example: 172064416 - SharedSize: - description: | - Total size of image layers that are shared between this image and other - images. - - This size is not calculated by default. `-1` indicates that the value - has not been set / calculated. - type: "integer" - format: "int64" - x-nullable: false - example: 1239828 - VirtualSize: - description: | - Total size of the image including all layers it is composed of. - - In versions of Docker before v1.10, this field was calculated from - the image itself and all of its parent images. Docker v1.10 and up - store images self-contained, and no longer use a parent-chain, making - this field an equivalent of the Size field. - - This field is kept for backward compatibility, but may be removed in - a future version of the API. - type: "integer" - format: "int64" - x-nullable: false - example: 172064416 - Labels: - description: "User-defined key/value metadata." - type: "object" - x-nullable: false - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Containers: - description: | - Number of containers using this image. Includes both stopped and running - containers. - - This size is not calculated by default, and depends on which API endpoint - is used. `-1` indicates that the value has not been set / calculated. - x-nullable: false - type: "integer" - example: 2 - - AuthConfig: - type: "object" - properties: - username: - type: "string" - password: - type: "string" - email: - type: "string" - serveraddress: - type: "string" - example: - username: "hannibal" - password: "xxxx" - serveraddress: "https://index.docker.io/v1/" - - ProcessConfig: - type: "object" - properties: - privileged: - type: "boolean" - user: - type: "string" - tty: - type: "boolean" - entrypoint: - type: "string" - arguments: - type: "array" - items: - type: "string" - - Volume: - type: "object" - required: [Name, Driver, Mountpoint, Labels, Scope, Options] - properties: - Name: - type: "string" - description: "Name of the volume." - x-nullable: false - example: "tardis" - Driver: - type: "string" - description: "Name of the volume driver used by the volume." - x-nullable: false - example: "custom" - Mountpoint: - type: "string" - description: "Mount path of the volume on the host." - x-nullable: false - example: "/var/lib/docker/volumes/tardis" - CreatedAt: - type: "string" - format: "dateTime" - description: "Date/Time the volume was created." - example: "2016-06-07T20:31:11.853781916Z" - Status: - type: "object" - description: | - Low-level details about the volume, provided by the volume driver. - Details are returned as a map with key/value pairs: - `{"key":"value","key2":"value2"}`. - - The `Status` field is optional, and is omitted if the volume driver - does not support this feature. - additionalProperties: - type: "object" - example: - hello: "world" - Labels: - type: "object" - description: "User-defined key/value metadata." - x-nullable: false - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Scope: - type: "string" - description: | - The level at which the volume exists. Either `global` for cluster-wide, - or `local` for machine level. - default: "local" - x-nullable: false - enum: ["local", "global"] - example: "local" - ClusterVolume: - $ref: "#/definitions/ClusterVolume" - Options: - type: "object" - description: | - The driver specific options used when creating the volume. - additionalProperties: - type: "string" - example: - device: "tmpfs" - o: "size=100m,uid=1000" - type: "tmpfs" - UsageData: - type: "object" - x-nullable: true - x-go-name: "UsageData" - required: [Size, RefCount] - description: | - Usage details about the volume. This information is used by the - `GET /system/df` endpoint, and omitted in other endpoints. - properties: - Size: - type: "integer" - format: "int64" - default: -1 - description: | - Amount of disk space used by the volume (in bytes). This information - is only available for volumes created with the `"local"` volume - driver. For volumes created with other volume drivers, this field - is set to `-1` ("not available") - x-nullable: false - RefCount: - type: "integer" - format: "int64" - default: -1 - description: | - The number of containers referencing this volume. This field - is set to `-1` if the reference-count is not available. - x-nullable: false - - VolumeCreateOptions: - description: "Volume configuration" - type: "object" - title: "VolumeConfig" - x-go-name: "CreateOptions" - properties: - Name: - description: | - The new volume's name. If not specified, Docker generates a name. - type: "string" - x-nullable: false - example: "tardis" - Driver: - description: "Name of the volume driver to use." - type: "string" - default: "local" - x-nullable: false - example: "custom" - DriverOpts: - description: | - A mapping of driver options and values. These options are - passed directly to the driver and are driver specific. - type: "object" - additionalProperties: - type: "string" - example: - device: "tmpfs" - o: "size=100m,uid=1000" - type: "tmpfs" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - ClusterVolumeSpec: - $ref: "#/definitions/ClusterVolumeSpec" - - VolumeListResponse: - type: "object" - title: "VolumeListResponse" - x-go-name: "ListResponse" - description: "Volume list response" - properties: - Volumes: - type: "array" - description: "List of volumes" - items: - $ref: "#/definitions/Volume" - Warnings: - type: "array" - description: | - Warnings that occurred when fetching the list of volumes. - items: - type: "string" - example: [] - - Network: - type: "object" - properties: - Name: - type: "string" - Id: - type: "string" - Created: - type: "string" - format: "dateTime" - Scope: - type: "string" - Driver: - type: "string" - EnableIPv6: - type: "boolean" - IPAM: - $ref: "#/definitions/IPAM" - Internal: - type: "boolean" - Attachable: - type: "boolean" - Ingress: - type: "boolean" - Containers: - type: "object" - additionalProperties: - $ref: "#/definitions/NetworkContainer" - Options: - type: "object" - additionalProperties: - type: "string" - Labels: - type: "object" - additionalProperties: - type: "string" - example: - Name: "net01" - Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" - Created: "2016-10-19T04:33:30.360899459Z" - Scope: "local" - Driver: "bridge" - EnableIPv6: false - IPAM: - Driver: "default" - Config: - - Subnet: "172.19.0.0/16" - Gateway: "172.19.0.1" - Options: - foo: "bar" - Internal: false - Attachable: false - Ingress: false - Containers: - 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: - Name: "test" - EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" - MacAddress: "02:42:ac:13:00:02" - IPv4Address: "172.19.0.2/16" - IPv6Address: "" - Options: - com.docker.network.bridge.default_bridge: "true" - com.docker.network.bridge.enable_icc: "true" - com.docker.network.bridge.enable_ip_masquerade: "true" - com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" - com.docker.network.bridge.name: "docker0" - com.docker.network.driver.mtu: "1500" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - IPAM: - type: "object" - properties: - Driver: - description: "Name of the IPAM driver to use." - type: "string" - default: "default" - Config: - description: | - List of IPAM configuration options, specified as a map: - - ``` - {"Subnet": , "IPRange": , "Gateway": , "AuxAddress": } - ``` - type: "array" - items: - $ref: "#/definitions/IPAMConfig" - Options: - description: "Driver-specific options, specified as a map." - type: "object" - additionalProperties: - type: "string" - - IPAMConfig: - type: "object" - properties: - Subnet: - type: "string" - IPRange: - type: "string" - Gateway: - type: "string" - AuxiliaryAddresses: - type: "object" - additionalProperties: - type: "string" - - NetworkContainer: - type: "object" - properties: - Name: - type: "string" - EndpointID: - type: "string" - MacAddress: - type: "string" - IPv4Address: - type: "string" - IPv6Address: - type: "string" - - BuildInfo: - type: "object" - properties: - id: - type: "string" - stream: - type: "string" - error: - type: "string" - errorDetail: - $ref: "#/definitions/ErrorDetail" - status: - type: "string" - progress: - type: "string" - progressDetail: - $ref: "#/definitions/ProgressDetail" - aux: - $ref: "#/definitions/ImageID" - - BuildCache: - type: "object" - description: | - BuildCache contains information about a build cache record. - properties: - ID: - type: "string" - description: | - Unique ID of the build cache record. - example: "ndlpt0hhvkqcdfkputsk4cq9c" - Parent: - description: | - ID of the parent build cache record. - - > **Deprecated**: This field is deprecated, and omitted if empty. - type: "string" - x-nullable: true - example: "" - Parents: - description: | - List of parent build cache record IDs. - type: "array" - items: - type: "string" - x-nullable: true - example: ["hw53o5aio51xtltp5xjp8v7fx"] - Type: - type: "string" - description: | - Cache record type. - example: "regular" - # see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84 - enum: - - "internal" - - "frontend" - - "source.local" - - "source.git.checkout" - - "exec.cachemount" - - "regular" - Description: - type: "string" - description: | - Description of the build-step that produced the build cache. - example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" - InUse: - type: "boolean" - description: | - Indicates if the build cache is in use. - example: false - Shared: - type: "boolean" - description: | - Indicates if the build cache is shared. - example: true - Size: - description: | - Amount of disk space used by the build cache (in bytes). - type: "integer" - example: 51 - CreatedAt: - description: | - Date and time at which the build cache was created in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2016-08-18T10:44:24.496525531Z" - LastUsedAt: - description: | - Date and time at which the build cache was last used in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - x-nullable: true - example: "2017-08-09T07:09:37.632105588Z" - UsageCount: - type: "integer" - example: 26 - - ImageID: - type: "object" - description: "Image ID or Digest" - properties: - ID: - type: "string" - example: - ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" - - CreateImageInfo: - type: "object" - properties: - id: - type: "string" - error: - type: "string" - errorDetail: - $ref: "#/definitions/ErrorDetail" - status: - type: "string" - progress: - type: "string" - progressDetail: - $ref: "#/definitions/ProgressDetail" - - PushImageInfo: - type: "object" - properties: - error: - type: "string" - status: - type: "string" - progress: - type: "string" - progressDetail: - $ref: "#/definitions/ProgressDetail" - - ErrorDetail: - type: "object" - properties: - code: - type: "integer" - message: - type: "string" - - ProgressDetail: - type: "object" - properties: - current: - type: "integer" - total: - type: "integer" - - ErrorResponse: - description: "Represents an error." - type: "object" - required: ["message"] - properties: - message: - description: "The error message." - type: "string" - x-nullable: false - example: - message: "Something went wrong." - - IdResponse: - description: "Response to an API call that returns just an Id" - type: "object" - required: ["Id"] - properties: - Id: - description: "The id of the newly created object." - type: "string" - x-nullable: false - - EndpointSettings: - description: "Configuration for a network endpoint." - type: "object" - properties: - # Configurations - IPAMConfig: - $ref: "#/definitions/EndpointIPAMConfig" - Links: - type: "array" - items: - type: "string" - example: - - "container_1" - - "container_2" - Aliases: - type: "array" - items: - type: "string" - example: - - "server_x" - - "server_y" - - # Operational data - NetworkID: - description: | - Unique ID of the network. - type: "string" - example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" - EndpointID: - description: | - Unique ID for the service endpoint in a Sandbox. - type: "string" - example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" - Gateway: - description: | - Gateway address for this network. - type: "string" - example: "172.17.0.1" - IPAddress: - description: | - IPv4 address. - type: "string" - example: "172.17.0.4" - IPPrefixLen: - description: | - Mask length of the IPv4 address. - type: "integer" - example: 16 - IPv6Gateway: - description: | - IPv6 gateway address. - type: "string" - example: "2001:db8:2::100" - GlobalIPv6Address: - description: | - Global IPv6 address. - type: "string" - example: "2001:db8::5689" - GlobalIPv6PrefixLen: - description: | - Mask length of the global IPv6 address. - type: "integer" - format: "int64" - example: 64 - MacAddress: - description: | - MAC address for the endpoint on this network. - type: "string" - example: "02:42:ac:11:00:04" - DriverOpts: - description: | - DriverOpts is a mapping of driver options and values. These options - are passed directly to the driver and are driver specific. - type: "object" - x-nullable: true - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - - EndpointIPAMConfig: - description: | - EndpointIPAMConfig represents an endpoint's IPAM configuration. - type: "object" - x-nullable: true - properties: - IPv4Address: - type: "string" - example: "172.20.30.33" - IPv6Address: - type: "string" - example: "2001:db8:abcd::3033" - LinkLocalIPs: - type: "array" - items: - type: "string" - example: - - "169.254.34.68" - - "fe80::3468" - - PluginMount: - type: "object" - x-nullable: false - required: [Name, Description, Settable, Source, Destination, Type, Options] - properties: - Name: - type: "string" - x-nullable: false - example: "some-mount" - Description: - type: "string" - x-nullable: false - example: "This is a mount that's used by the plugin." - Settable: - type: "array" - items: - type: "string" - Source: - type: "string" - example: "/var/lib/docker/plugins/" - Destination: - type: "string" - x-nullable: false - example: "/mnt/state" - Type: - type: "string" - x-nullable: false - example: "bind" - Options: - type: "array" - items: - type: "string" - example: - - "rbind" - - "rw" - - PluginDevice: - type: "object" - required: [Name, Description, Settable, Path] - x-nullable: false - properties: - Name: - type: "string" - x-nullable: false - Description: - type: "string" - x-nullable: false - Settable: - type: "array" - items: - type: "string" - Path: - type: "string" - example: "/dev/fuse" - - PluginEnv: - type: "object" - x-nullable: false - required: [Name, Description, Settable, Value] - properties: - Name: - x-nullable: false - type: "string" - Description: - x-nullable: false - type: "string" - Settable: - type: "array" - items: - type: "string" - Value: - type: "string" - - PluginInterfaceType: - type: "object" - x-nullable: false - required: [Prefix, Capability, Version] - properties: - Prefix: - type: "string" - x-nullable: false - Capability: - type: "string" - x-nullable: false - Version: - type: "string" - x-nullable: false - - PluginPrivilege: - description: | - Describes a permission the user has to accept upon installing - the plugin. - type: "object" - x-go-name: "PluginPrivilege" - properties: - Name: - type: "string" - example: "network" - Description: - type: "string" - Value: - type: "array" - items: - type: "string" - example: - - "host" - - Plugin: - description: "A plugin for the Engine API" - type: "object" - required: [Settings, Enabled, Config, Name] - properties: - Id: - type: "string" - example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" - Name: - type: "string" - x-nullable: false - example: "tiborvass/sample-volume-plugin" - Enabled: - description: - True if the plugin is running. False if the plugin is not running, - only installed. - type: "boolean" - x-nullable: false - example: true - Settings: - description: "Settings that can be modified by users." - type: "object" - x-nullable: false - required: [Args, Devices, Env, Mounts] - properties: - Mounts: - type: "array" - items: - $ref: "#/definitions/PluginMount" - Env: - type: "array" - items: - type: "string" - example: - - "DEBUG=0" - Args: - type: "array" - items: - type: "string" - Devices: - type: "array" - items: - $ref: "#/definitions/PluginDevice" - PluginReference: - description: "plugin remote reference used to push/pull the plugin" - type: "string" - x-nullable: false - example: "localhost:5000/tiborvass/sample-volume-plugin:latest" - Config: - description: "The config of a plugin." - type: "object" - x-nullable: false - required: - - Description - - Documentation - - Interface - - Entrypoint - - WorkDir - - Network - - Linux - - PidHost - - PropagatedMount - - IpcHost - - Mounts - - Env - - Args - properties: - DockerVersion: - description: "Docker Version used to create the plugin" - type: "string" - x-nullable: false - example: "17.06.0-ce" - Description: - type: "string" - x-nullable: false - example: "A sample volume plugin for Docker" - Documentation: - type: "string" - x-nullable: false - example: "https://docs.docker.com/engine/extend/plugins/" - Interface: - description: "The interface between Docker and the plugin" - x-nullable: false - type: "object" - required: [Types, Socket] - properties: - Types: - type: "array" - items: - $ref: "#/definitions/PluginInterfaceType" - example: - - "docker.volumedriver/1.0" - Socket: - type: "string" - x-nullable: false - example: "plugins.sock" - ProtocolScheme: - type: "string" - example: "some.protocol/v1.0" - description: "Protocol to use for clients connecting to the plugin." - enum: - - "" - - "moby.plugins.http/v1" - Entrypoint: - type: "array" - items: - type: "string" - example: - - "/usr/bin/sample-volume-plugin" - - "/data" - WorkDir: - type: "string" - x-nullable: false - example: "/bin/" - User: - type: "object" - x-nullable: false - properties: - UID: - type: "integer" - format: "uint32" - example: 1000 - GID: - type: "integer" - format: "uint32" - example: 1000 - Network: - type: "object" - x-nullable: false - required: [Type] - properties: - Type: - x-nullable: false - type: "string" - example: "host" - Linux: - type: "object" - x-nullable: false - required: [Capabilities, AllowAllDevices, Devices] - properties: - Capabilities: - type: "array" - items: - type: "string" - example: - - "CAP_SYS_ADMIN" - - "CAP_SYSLOG" - AllowAllDevices: - type: "boolean" - x-nullable: false - example: false - Devices: - type: "array" - items: - $ref: "#/definitions/PluginDevice" - PropagatedMount: - type: "string" - x-nullable: false - example: "/mnt/volumes" - IpcHost: - type: "boolean" - x-nullable: false - example: false - PidHost: - type: "boolean" - x-nullable: false - example: false - Mounts: - type: "array" - items: - $ref: "#/definitions/PluginMount" - Env: - type: "array" - items: - $ref: "#/definitions/PluginEnv" - example: - - Name: "DEBUG" - Description: "If set, prints debug messages" - Settable: null - Value: "0" - Args: - type: "object" - x-nullable: false - required: [Name, Description, Settable, Value] - properties: - Name: - x-nullable: false - type: "string" - example: "args" - Description: - x-nullable: false - type: "string" - example: "command line arguments" - Settable: - type: "array" - items: - type: "string" - Value: - type: "array" - items: - type: "string" - rootfs: - type: "object" - properties: - type: - type: "string" - example: "layers" - diff_ids: - type: "array" - items: - type: "string" - example: - - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" - - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" - - ObjectVersion: - description: | - The version number of the object such as node, service, etc. This is needed - to avoid conflicting writes. The client must send the version number along - with the modified specification when updating these objects. - - This approach ensures safe concurrency and determinism in that the change - on the object may not be applied if the version number has changed from the - last read. In other words, if two update requests specify the same base - version, only one of the requests can succeed. As a result, two separate - update requests that happen at the same time will not unintentionally - overwrite each other. - type: "object" - properties: - Index: - type: "integer" - format: "uint64" - example: 373531 - - NodeSpec: - type: "object" - properties: - Name: - description: "Name for the node." - type: "string" - example: "my-node" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - Role: - description: "Role of the node." - type: "string" - enum: - - "worker" - - "manager" - example: "manager" - Availability: - description: "Availability of the node." - type: "string" - enum: - - "active" - - "pause" - - "drain" - example: "active" - example: - Availability: "active" - Name: "node-name" - Role: "manager" - Labels: - foo: "bar" - - Node: - type: "object" - properties: - ID: - type: "string" - example: "24ifsmvkjbyhk" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - description: | - Date and time at which the node was added to the swarm in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2016-08-18T10:44:24.496525531Z" - UpdatedAt: - description: | - Date and time at which the node was last updated in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2017-08-09T07:09:37.632105588Z" - Spec: - $ref: "#/definitions/NodeSpec" - Description: - $ref: "#/definitions/NodeDescription" - Status: - $ref: "#/definitions/NodeStatus" - ManagerStatus: - $ref: "#/definitions/ManagerStatus" - - NodeDescription: - description: | - NodeDescription encapsulates the properties of the Node as reported by the - agent. - type: "object" - properties: - Hostname: - type: "string" - example: "bf3067039e47" - Platform: - $ref: "#/definitions/Platform" - Resources: - $ref: "#/definitions/ResourceObject" - Engine: - $ref: "#/definitions/EngineDescription" - TLSInfo: - $ref: "#/definitions/TLSInfo" - - Platform: - description: | - Platform represents the platform (Arch/OS). - type: "object" - properties: - Architecture: - description: | - Architecture represents the hardware architecture (for example, - `x86_64`). - type: "string" - example: "x86_64" - OS: - description: | - OS represents the Operating System (for example, `linux` or `windows`). - type: "string" - example: "linux" - - EngineDescription: - description: "EngineDescription provides information about an engine." - type: "object" - properties: - EngineVersion: - type: "string" - example: "17.06.0" - Labels: - type: "object" - additionalProperties: - type: "string" - example: - foo: "bar" - Plugins: - type: "array" - items: - type: "object" - properties: - Type: - type: "string" - Name: - type: "string" - example: - - Type: "Log" - Name: "awslogs" - - Type: "Log" - Name: "fluentd" - - Type: "Log" - Name: "gcplogs" - - Type: "Log" - Name: "gelf" - - Type: "Log" - Name: "journald" - - Type: "Log" - Name: "json-file" - - Type: "Log" - Name: "logentries" - - Type: "Log" - Name: "splunk" - - Type: "Log" - Name: "syslog" - - Type: "Network" - Name: "bridge" - - Type: "Network" - Name: "host" - - Type: "Network" - Name: "ipvlan" - - Type: "Network" - Name: "macvlan" - - Type: "Network" - Name: "null" - - Type: "Network" - Name: "overlay" - - Type: "Volume" - Name: "local" - - Type: "Volume" - Name: "localhost:5000/vieux/sshfs:latest" - - Type: "Volume" - Name: "vieux/sshfs:latest" - - TLSInfo: - description: | - Information about the issuer of leaf TLS certificates and the trusted root - CA certificate. - type: "object" - properties: - TrustRoot: - description: | - The root CA certificate(s) that are used to validate leaf TLS - certificates. - type: "string" - CertIssuerSubject: - description: - The base64-url-safe-encoded raw subject bytes of the issuer. - type: "string" - CertIssuerPublicKey: - description: | - The base64-url-safe-encoded raw public key bytes of the issuer. - type: "string" - example: - TrustRoot: | - -----BEGIN CERTIFICATE----- - MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw - EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 - MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH - A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf - 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB - Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO - PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz - pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H - -----END CERTIFICATE----- - CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" - CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" - - NodeStatus: - description: | - NodeStatus represents the status of a node. - - It provides the current status of the node, as seen by the manager. - type: "object" - properties: - State: - $ref: "#/definitions/NodeState" - Message: - type: "string" - example: "" - Addr: - description: "IP address of the node." - type: "string" - example: "172.17.0.2" - - NodeState: - description: "NodeState represents the state of a node." - type: "string" - enum: - - "unknown" - - "down" - - "ready" - - "disconnected" - example: "ready" - - ManagerStatus: - description: | - ManagerStatus represents the status of a manager. - - It provides the current status of a node's manager component, if the node - is a manager. - x-nullable: true - type: "object" - properties: - Leader: - type: "boolean" - default: false - example: true - Reachability: - $ref: "#/definitions/Reachability" - Addr: - description: | - The IP address and port at which the manager is reachable. - type: "string" - example: "10.0.0.46:2377" - - Reachability: - description: "Reachability represents the reachability of a node." - type: "string" - enum: - - "unknown" - - "unreachable" - - "reachable" - example: "reachable" - - SwarmSpec: - description: "User modifiable swarm configuration." - type: "object" - properties: - Name: - description: "Name of the swarm." - type: "string" - example: "default" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.corp.type: "production" - com.example.corp.department: "engineering" - Orchestration: - description: "Orchestration configuration." - type: "object" - x-nullable: true - properties: - TaskHistoryRetentionLimit: - description: | - The number of historic tasks to keep per instance or node. If - negative, never remove completed or failed tasks. - type: "integer" - format: "int64" - example: 10 - Raft: - description: "Raft configuration." - type: "object" - properties: - SnapshotInterval: - description: "The number of log entries between snapshots." - type: "integer" - format: "uint64" - example: 10000 - KeepOldSnapshots: - description: | - The number of snapshots to keep beyond the current snapshot. - type: "integer" - format: "uint64" - LogEntriesForSlowFollowers: - description: | - The number of log entries to keep around to sync up slow followers - after a snapshot is created. - type: "integer" - format: "uint64" - example: 500 - ElectionTick: - description: | - The number of ticks that a follower will wait for a message from - the leader before becoming a candidate and starting an election. - `ElectionTick` must be greater than `HeartbeatTick`. - - A tick currently defaults to one second, so these translate - directly to seconds currently, but this is NOT guaranteed. - type: "integer" - example: 3 - HeartbeatTick: - description: | - The number of ticks between heartbeats. Every HeartbeatTick ticks, - the leader will send a heartbeat to the followers. - - A tick currently defaults to one second, so these translate - directly to seconds currently, but this is NOT guaranteed. - type: "integer" - example: 1 - Dispatcher: - description: "Dispatcher configuration." - type: "object" - x-nullable: true - properties: - HeartbeatPeriod: - description: | - The delay for an agent to send a heartbeat to the dispatcher. - type: "integer" - format: "int64" - example: 5000000000 - CAConfig: - description: "CA configuration." - type: "object" - x-nullable: true - properties: - NodeCertExpiry: - description: "The duration node certificates are issued for." - type: "integer" - format: "int64" - example: 7776000000000000 - ExternalCAs: - description: | - Configuration for forwarding signing requests to an external - certificate authority. - type: "array" - items: - type: "object" - properties: - Protocol: - description: | - Protocol for communication with the external CA (currently - only `cfssl` is supported). - type: "string" - enum: - - "cfssl" - default: "cfssl" - URL: - description: | - URL where certificate signing requests should be sent. - type: "string" - Options: - description: | - An object with key/value pairs that are interpreted as - protocol-specific options for the external CA driver. - type: "object" - additionalProperties: - type: "string" - CACert: - description: | - The root CA certificate (in PEM format) this external CA uses - to issue TLS certificates (assumed to be to the current swarm - root CA certificate if not provided). - type: "string" - SigningCACert: - description: | - The desired signing CA certificate for all swarm node TLS leaf - certificates, in PEM format. - type: "string" - SigningCAKey: - description: | - The desired signing CA key for all swarm node TLS leaf certificates, - in PEM format. - type: "string" - ForceRotate: - description: | - An integer whose purpose is to force swarm to generate a new - signing CA certificate and key, if none have been specified in - `SigningCACert` and `SigningCAKey` - format: "uint64" - type: "integer" - EncryptionConfig: - description: "Parameters related to encryption-at-rest." - type: "object" - properties: - AutoLockManagers: - description: | - If set, generate a key and use it to lock data stored on the - managers. - type: "boolean" - example: false - TaskDefaults: - description: "Defaults for creating tasks in this cluster." - type: "object" - properties: - LogDriver: - description: | - The log driver to use for tasks created in the orchestrator if - unspecified by a service. - - Updating this value only affects new tasks. Existing tasks continue - to use their previously configured log driver until recreated. - type: "object" - properties: - Name: - description: | - The log driver to use as a default for new tasks. - type: "string" - example: "json-file" - Options: - description: | - Driver-specific options for the selectd log driver, specified - as key/value pairs. - type: "object" - additionalProperties: - type: "string" - example: - "max-file": "10" - "max-size": "100m" - - # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but - # without `JoinTokens`. - ClusterInfo: - description: | - ClusterInfo represents information about the swarm as is returned by the - "/info" endpoint. Join-tokens are not included. - x-nullable: true - type: "object" - properties: - ID: - description: "The ID of the swarm." - type: "string" - example: "abajmipo7b4xz5ip2nrla6b11" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - description: | - Date and time at which the swarm was initialised in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2016-08-18T10:44:24.496525531Z" - UpdatedAt: - description: | - Date and time at which the swarm was last updated in - [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. - type: "string" - format: "dateTime" - example: "2017-08-09T07:09:37.632105588Z" - Spec: - $ref: "#/definitions/SwarmSpec" - TLSInfo: - $ref: "#/definitions/TLSInfo" - RootRotationInProgress: - description: | - Whether there is currently a root CA rotation in progress for the swarm - type: "boolean" - example: false - DataPathPort: - description: | - DataPathPort specifies the data path port number for data traffic. - Acceptable port range is 1024 to 49151. - If no port is set or is set to 0, the default port (4789) is used. - type: "integer" - format: "uint32" - default: 4789 - example: 4789 - DefaultAddrPool: - description: | - Default Address Pool specifies default subnet pools for global scope - networks. - type: "array" - items: - type: "string" - format: "CIDR" - example: ["10.10.0.0/16", "20.20.0.0/16"] - SubnetSize: - description: | - SubnetSize specifies the subnet size of the networks created from the - default subnet pool. - type: "integer" - format: "uint32" - maximum: 29 - default: 24 - example: 24 - - JoinTokens: - description: | - JoinTokens contains the tokens workers and managers need to join the swarm. - type: "object" - properties: - Worker: - description: | - The token workers can use to join the swarm. - type: "string" - example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" - Manager: - description: | - The token managers can use to join the swarm. - type: "string" - example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" - - Swarm: - type: "object" - allOf: - - $ref: "#/definitions/ClusterInfo" - - type: "object" - properties: - JoinTokens: - $ref: "#/definitions/JoinTokens" - - TaskSpec: - description: "User modifiable task configuration." - type: "object" - properties: - PluginSpec: - type: "object" - description: | - Plugin spec for the service. *(Experimental release only.)* - -


- - > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are - > mutually exclusive. PluginSpec is only used when the Runtime field - > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime - > field is set to `attachment`. - properties: - Name: - description: "The name or 'alias' to use for the plugin." - type: "string" - Remote: - description: "The plugin image reference to use." - type: "string" - Disabled: - description: "Disable the plugin once scheduled." - type: "boolean" - PluginPrivilege: - type: "array" - items: - $ref: "#/definitions/PluginPrivilege" - ContainerSpec: - type: "object" - description: | - Container spec for the service. - -


- - > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are - > mutually exclusive. PluginSpec is only used when the Runtime field - > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime - > field is set to `attachment`. - properties: - Image: - description: "The image name to use for the container" - type: "string" - Labels: - description: "User-defined key/value data." - type: "object" - additionalProperties: - type: "string" - Command: - description: "The command to be run in the image." - type: "array" - items: - type: "string" - Args: - description: "Arguments to the command." - type: "array" - items: - type: "string" - Hostname: - description: | - The hostname to use for the container, as a valid - [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. - type: "string" - Env: - description: | - A list of environment variables in the form `VAR=value`. - type: "array" - items: - type: "string" - Dir: - description: "The working directory for commands to run in." - type: "string" - User: - description: "The user inside the container." - type: "string" - Groups: - type: "array" - description: | - A list of additional groups that the container process will run as. - items: - type: "string" - Privileges: - type: "object" - description: "Security options for the container" - properties: - CredentialSpec: - type: "object" - description: "CredentialSpec for managed service account (Windows only)" - properties: - Config: - type: "string" - example: "0bt9dmxjvjiqermk6xrop3ekq" - description: | - Load credential spec from a Swarm Config with the given ID. - The specified config must also be present in the Configs - field with the Runtime property set. - -


- - - > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, - > and `CredentialSpec.Config` are mutually exclusive. - File: - type: "string" - example: "spec.json" - description: | - Load credential spec from this file. The file is read by - the daemon, and must be present in the `CredentialSpecs` - subdirectory in the docker data directory, which defaults - to `C:\ProgramData\Docker\` on Windows. - - For example, specifying `spec.json` loads - `C:\ProgramData\Docker\CredentialSpecs\spec.json`. - -


- - > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, - > and `CredentialSpec.Config` are mutually exclusive. - Registry: - type: "string" - description: | - Load credential spec from this value in the Windows - registry. The specified registry value must be located in: - - `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` - -


- - - > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, - > and `CredentialSpec.Config` are mutually exclusive. - SELinuxContext: - type: "object" - description: "SELinux labels of the container" - properties: - Disable: - type: "boolean" - description: "Disable SELinux" - User: - type: "string" - description: "SELinux user label" - Role: - type: "string" - description: "SELinux role label" - Type: - type: "string" - description: "SELinux type label" - Level: - type: "string" - description: "SELinux level label" - TTY: - description: "Whether a pseudo-TTY should be allocated." - type: "boolean" - OpenStdin: - description: "Open `stdin`" - type: "boolean" - ReadOnly: - description: "Mount the container's root filesystem as read only." - type: "boolean" - Mounts: - description: | - Specification for mounts to be added to containers created as part - of the service. - type: "array" - items: - $ref: "#/definitions/Mount" - StopSignal: - description: "Signal to stop the container." - type: "string" - StopGracePeriod: - description: | - Amount of time to wait for the container to terminate before - forcefully killing it. - type: "integer" - format: "int64" - HealthCheck: - $ref: "#/definitions/HealthConfig" - Hosts: - type: "array" - description: | - A list of hostname/IP mappings to add to the container's `hosts` - file. The format of extra hosts is specified in the - [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) - man page: - - IP_address canonical_hostname [aliases...] - items: - type: "string" - DNSConfig: - description: | - Specification for DNS related configurations in resolver configuration - file (`resolv.conf`). - type: "object" - properties: - Nameservers: - description: "The IP addresses of the name servers." - type: "array" - items: - type: "string" - Search: - description: "A search list for host-name lookup." - type: "array" - items: - type: "string" - Options: - description: | - A list of internal resolver variables to be modified (e.g., - `debug`, `ndots:3`, etc.). - type: "array" - items: - type: "string" - Secrets: - description: | - Secrets contains references to zero or more secrets that will be - exposed to the service. - type: "array" - items: - type: "object" - properties: - File: - description: | - File represents a specific target that is backed by a file. - type: "object" - properties: - Name: - description: | - Name represents the final filename in the filesystem. - type: "string" - UID: - description: "UID represents the file UID." - type: "string" - GID: - description: "GID represents the file GID." - type: "string" - Mode: - description: "Mode represents the FileMode of the file." - type: "integer" - format: "uint32" - SecretID: - description: | - SecretID represents the ID of the specific secret that we're - referencing. - type: "string" - SecretName: - description: | - SecretName is the name of the secret that this references, - but this is just provided for lookup/display purposes. The - secret in the reference will be identified by its ID. - type: "string" - Configs: - description: | - Configs contains references to zero or more configs that will be - exposed to the service. - type: "array" - items: - type: "object" - properties: - File: - description: | - File represents a specific target that is backed by a file. - -


- - > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive - type: "object" - properties: - Name: - description: | - Name represents the final filename in the filesystem. - type: "string" - UID: - description: "UID represents the file UID." - type: "string" - GID: - description: "GID represents the file GID." - type: "string" - Mode: - description: "Mode represents the FileMode of the file." - type: "integer" - format: "uint32" - Runtime: - description: | - Runtime represents a target that is not mounted into the - container but is used by the task - -


- - > **Note**: `Configs.File` and `Configs.Runtime` are mutually - > exclusive - type: "object" - ConfigID: - description: | - ConfigID represents the ID of the specific config that we're - referencing. - type: "string" - ConfigName: - description: | - ConfigName is the name of the config that this references, - but this is just provided for lookup/display purposes. The - config in the reference will be identified by its ID. - type: "string" - Isolation: - type: "string" - description: | - Isolation technology of the containers running the service. - (Windows only) - enum: - - "default" - - "process" - - "hyperv" - Init: - description: | - Run an init inside the container that forwards signals and reaps - processes. This field is omitted if empty, and the default (as - configured on the daemon) is used. - type: "boolean" - x-nullable: true - Sysctls: - description: | - Set kernel namedspaced parameters (sysctls) in the container. - The Sysctls option on services accepts the same sysctls as the - are supported on containers. Note that while the same sysctls are - supported, no guarantees or checks are made about their - suitability for a clustered environment, and it's up to the user - to determine whether a given sysctl will work properly in a - Service. - type: "object" - additionalProperties: - type: "string" - # This option is not used by Windows containers - CapabilityAdd: - type: "array" - description: | - A list of kernel capabilities to add to the default set - for the container. - items: - type: "string" - example: - - "CAP_NET_RAW" - - "CAP_SYS_ADMIN" - - "CAP_SYS_CHROOT" - - "CAP_SYSLOG" - CapabilityDrop: - type: "array" - description: | - A list of kernel capabilities to drop from the default set - for the container. - items: - type: "string" - example: - - "CAP_NET_RAW" - Ulimits: - description: | - A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" - type: "array" - items: - type: "object" - properties: - Name: - description: "Name of ulimit" - type: "string" - Soft: - description: "Soft limit" - type: "integer" - Hard: - description: "Hard limit" - type: "integer" - NetworkAttachmentSpec: - description: | - Read-only spec type for non-swarm containers attached to swarm overlay - networks. - -


- - > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are - > mutually exclusive. PluginSpec is only used when the Runtime field - > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime - > field is set to `attachment`. - type: "object" - properties: - ContainerID: - description: "ID of the container represented by this task" - type: "string" - Resources: - description: | - Resource requirements which apply to each individual container created - as part of the service. - type: "object" - properties: - Limits: - description: "Define resources limits." - $ref: "#/definitions/Limit" - Reservations: - description: "Define resources reservation." - $ref: "#/definitions/ResourceObject" - RestartPolicy: - description: | - Specification for the restart policy which applies to containers - created as part of this service. - type: "object" - properties: - Condition: - description: "Condition for restart." - type: "string" - enum: - - "none" - - "on-failure" - - "any" - Delay: - description: "Delay between restart attempts." - type: "integer" - format: "int64" - MaxAttempts: - description: | - Maximum attempts to restart a given container before giving up - (default value is 0, which is ignored). - type: "integer" - format: "int64" - default: 0 - Window: - description: | - Windows is the time window used to evaluate the restart policy - (default value is 0, which is unbounded). - type: "integer" - format: "int64" - default: 0 - Placement: - type: "object" - properties: - Constraints: - description: | - An array of constraint expressions to limit the set of nodes where - a task can be scheduled. Constraint expressions can either use a - _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find - nodes that satisfy every expression (AND match). Constraints can - match node or Docker Engine labels as follows: - - node attribute | matches | example - ---------------------|--------------------------------|----------------------------------------------- - `node.id` | Node ID | `node.id==2ivku8v2gvtg4` - `node.hostname` | Node hostname | `node.hostname!=node-2` - `node.role` | Node role (`manager`/`worker`) | `node.role==manager` - `node.platform.os` | Node operating system | `node.platform.os==windows` - `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` - `node.labels` | User-defined node labels | `node.labels.security==high` - `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04` - - `engine.labels` apply to Docker Engine labels like operating system, - drivers, etc. Swarm administrators add `node.labels` for operational - purposes by using the [`node update endpoint`](#operation/NodeUpdate). - - type: "array" - items: - type: "string" - example: - - "node.hostname!=node3.corp.example.com" - - "node.role!=manager" - - "node.labels.type==production" - - "node.platform.os==linux" - - "node.platform.arch==x86_64" - Preferences: - description: | - Preferences provide a way to make the scheduler aware of factors - such as topology. They are provided in order from highest to - lowest precedence. - type: "array" - items: - type: "object" - properties: - Spread: - type: "object" - properties: - SpreadDescriptor: - description: | - label descriptor, such as `engine.labels.az`. - type: "string" - example: - - Spread: - SpreadDescriptor: "node.labels.datacenter" - - Spread: - SpreadDescriptor: "node.labels.rack" - MaxReplicas: - description: | - Maximum number of replicas for per node (default value is 0, which - is unlimited) - type: "integer" - format: "int64" - default: 0 - Platforms: - description: | - Platforms stores all the platforms that the service's image can - run on. This field is used in the platform filter for scheduling. - If empty, then the platform filter is off, meaning there are no - scheduling restrictions. - type: "array" - items: - $ref: "#/definitions/Platform" - ForceUpdate: - description: | - A counter that triggers an update even if no relevant parameters have - been changed. - type: "integer" - Runtime: - description: | - Runtime is the type of runtime specified for the task executor. - type: "string" - Networks: - description: "Specifies which networks the service should attach to." - type: "array" - items: - $ref: "#/definitions/NetworkAttachmentConfig" - LogDriver: - description: | - Specifies the log driver to use for tasks created from this spec. If - not present, the default one for the swarm will be used, finally - falling back to the engine default if not specified. - type: "object" - properties: - Name: - type: "string" - Options: - type: "object" - additionalProperties: - type: "string" - - TaskState: - type: "string" - enum: - - "new" - - "allocated" - - "pending" - - "assigned" - - "accepted" - - "preparing" - - "ready" - - "starting" - - "running" - - "complete" - - "shutdown" - - "failed" - - "rejected" - - "remove" - - "orphaned" - - Task: - type: "object" - properties: - ID: - description: "The ID of the task." - type: "string" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Name: - description: "Name of the task." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - Spec: - $ref: "#/definitions/TaskSpec" - ServiceID: - description: "The ID of the service this task is part of." - type: "string" - Slot: - type: "integer" - NodeID: - description: "The ID of the node that this task is on." - type: "string" - AssignedGenericResources: - $ref: "#/definitions/GenericResources" - Status: - type: "object" - properties: - Timestamp: - type: "string" - format: "dateTime" - State: - $ref: "#/definitions/TaskState" - Message: - type: "string" - Err: - type: "string" - ContainerStatus: - type: "object" - properties: - ContainerID: - type: "string" - PID: - type: "integer" - ExitCode: - type: "integer" - DesiredState: - $ref: "#/definitions/TaskState" - JobIteration: - description: | - If the Service this Task belongs to is a job-mode service, contains - the JobIteration of the Service this Task was created for. Absent if - the Task was created for a Replicated or Global Service. - $ref: "#/definitions/ObjectVersion" - example: - ID: "0kzzo1i0y4jz6027t0k7aezc7" - Version: - Index: 71 - CreatedAt: "2016-06-07T21:07:31.171892745Z" - UpdatedAt: "2016-06-07T21:07:31.376370513Z" - Spec: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Slot: 1 - NodeID: "60gvrl6tm78dmak4yl7srz94v" - Status: - Timestamp: "2016-06-07T21:07:31.290032978Z" - State: "running" - Message: "started" - ContainerStatus: - ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" - PID: 677 - DesiredState: "running" - NetworksAttachments: - - Network: - ID: "4qvuz4ko70xaltuqbt8956gd1" - Version: - Index: 18 - CreatedAt: "2016-06-07T20:31:11.912919752Z" - UpdatedAt: "2016-06-07T21:07:29.955277358Z" - Spec: - Name: "ingress" - Labels: - com.docker.swarm.internal: "true" - DriverConfiguration: {} - IPAMOptions: - Driver: {} - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - DriverState: - Name: "overlay" - Options: - com.docker.network.driver.overlay.vxlanid_list: "256" - IPAMOptions: - Driver: - Name: "default" - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - Addresses: - - "10.255.0.10/16" - AssignedGenericResources: - - DiscreteResourceSpec: - Kind: "SSD" - Value: 3 - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID1" - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID2" - - ServiceSpec: - description: "User modifiable configuration for a service." - type: object - properties: - Name: - description: "Name of the service." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - TaskTemplate: - $ref: "#/definitions/TaskSpec" - Mode: - description: "Scheduling mode for the service." - type: "object" - properties: - Replicated: - type: "object" - properties: - Replicas: - type: "integer" - format: "int64" - Global: - type: "object" - ReplicatedJob: - description: | - The mode used for services with a finite number of tasks that run - to a completed state. - type: "object" - properties: - MaxConcurrent: - description: | - The maximum number of replicas to run simultaneously. - type: "integer" - format: "int64" - default: 1 - TotalCompletions: - description: | - The total number of replicas desired to reach the Completed - state. If unset, will default to the value of `MaxConcurrent` - type: "integer" - format: "int64" - GlobalJob: - description: | - The mode used for services which run a task to the completed state - on each valid node. - type: "object" - UpdateConfig: - description: "Specification for the update strategy of the service." - type: "object" - properties: - Parallelism: - description: | - Maximum number of tasks to be updated in one iteration (0 means - unlimited parallelism). - type: "integer" - format: "int64" - Delay: - description: "Amount of time between updates, in nanoseconds." - type: "integer" - format: "int64" - FailureAction: - description: | - Action to take if an updated task fails to run, or stops running - during the update. - type: "string" - enum: - - "continue" - - "pause" - - "rollback" - Monitor: - description: | - Amount of time to monitor each updated task for failures, in - nanoseconds. - type: "integer" - format: "int64" - MaxFailureRatio: - description: | - The fraction of tasks that may fail during an update before the - failure action is invoked, specified as a floating point number - between 0 and 1. - type: "number" - default: 0 - Order: - description: | - The order of operations when rolling out an updated task. Either - the old task is shut down before the new task is started, or the - new task is started before the old task is shut down. - type: "string" - enum: - - "stop-first" - - "start-first" - RollbackConfig: - description: "Specification for the rollback strategy of the service." - type: "object" - properties: - Parallelism: - description: | - Maximum number of tasks to be rolled back in one iteration (0 means - unlimited parallelism). - type: "integer" - format: "int64" - Delay: - description: | - Amount of time between rollback iterations, in nanoseconds. - type: "integer" - format: "int64" - FailureAction: - description: | - Action to take if an rolled back task fails to run, or stops - running during the rollback. - type: "string" - enum: - - "continue" - - "pause" - Monitor: - description: | - Amount of time to monitor each rolled back task for failures, in - nanoseconds. - type: "integer" - format: "int64" - MaxFailureRatio: - description: | - The fraction of tasks that may fail during a rollback before the - failure action is invoked, specified as a floating point number - between 0 and 1. - type: "number" - default: 0 - Order: - description: | - The order of operations when rolling back a task. Either the old - task is shut down before the new task is started, or the new task - is started before the old task is shut down. - type: "string" - enum: - - "stop-first" - - "start-first" - Networks: - description: "Specifies which networks the service should attach to." - type: "array" - items: - $ref: "#/definitions/NetworkAttachmentConfig" - - EndpointSpec: - $ref: "#/definitions/EndpointSpec" - - EndpointPortConfig: - type: "object" - properties: - Name: - type: "string" - Protocol: - type: "string" - enum: - - "tcp" - - "udp" - - "sctp" - TargetPort: - description: "The port inside the container." - type: "integer" - PublishedPort: - description: "The port on the swarm hosts." - type: "integer" - PublishMode: - description: | - The mode in which port is published. - -


- - - "ingress" makes the target port accessible on every node, - regardless of whether there is a task for the service running on - that node or not. - - "host" bypasses the routing mesh and publish the port directly on - the swarm node where that service is running. - - type: "string" - enum: - - "ingress" - - "host" - default: "ingress" - example: "ingress" - - EndpointSpec: - description: "Properties that can be configured to access and load balance a service." - type: "object" - properties: - Mode: - description: | - The mode of resolution to use for internal load balancing between tasks. - type: "string" - enum: - - "vip" - - "dnsrr" - default: "vip" - Ports: - description: | - List of exposed ports that this service is accessible on from the - outside. Ports can only be provided if `vip` resolution mode is used. - type: "array" - items: - $ref: "#/definitions/EndpointPortConfig" - - Service: - type: "object" - properties: - ID: - type: "string" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Spec: - $ref: "#/definitions/ServiceSpec" - Endpoint: - type: "object" - properties: - Spec: - $ref: "#/definitions/EndpointSpec" - Ports: - type: "array" - items: - $ref: "#/definitions/EndpointPortConfig" - VirtualIPs: - type: "array" - items: - type: "object" - properties: - NetworkID: - type: "string" - Addr: - type: "string" - UpdateStatus: - description: "The status of a service update." - type: "object" - properties: - State: - type: "string" - enum: - - "updating" - - "paused" - - "completed" - StartedAt: - type: "string" - format: "dateTime" - CompletedAt: - type: "string" - format: "dateTime" - Message: - type: "string" - ServiceStatus: - description: | - The status of the service's tasks. Provided only when requested as - part of a ServiceList operation. - type: "object" - properties: - RunningTasks: - description: | - The number of tasks for the service currently in the Running state. - type: "integer" - format: "uint64" - example: 7 - DesiredTasks: - description: | - The number of tasks for the service desired to be running. - For replicated services, this is the replica count from the - service spec. For global services, this is computed by taking - count of all tasks for the service with a Desired State other - than Shutdown. - type: "integer" - format: "uint64" - example: 10 - CompletedTasks: - description: | - The number of tasks for a job that are in the Completed state. - This field must be cross-referenced with the service type, as the - value of 0 may mean the service is not in a job mode, or it may - mean the job-mode service has no tasks yet Completed. - type: "integer" - format: "uint64" - JobStatus: - description: | - The status of the service when it is in one of ReplicatedJob or - GlobalJob modes. Absent on Replicated and Global mode services. The - JobIteration is an ObjectVersion, but unlike the Service's version, - does not need to be sent with an update request. - type: "object" - properties: - JobIteration: - description: | - JobIteration is a value increased each time a Job is executed, - successfully or otherwise. "Executed", in this case, means the - job as a whole has been started, not that an individual Task has - been launched. A job is "Executed" when its ServiceSpec is - updated. JobIteration can be used to disambiguate Tasks belonging - to different executions of a job. Though JobIteration will - increase with each subsequent execution, it may not necessarily - increase by 1, and so JobIteration should not be used to - $ref: "#/definitions/ObjectVersion" - LastExecution: - description: | - The last time, as observed by the server, that this job was - started. - type: "string" - format: "dateTime" - example: - ID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Version: - Index: 19 - CreatedAt: "2016-06-07T21:05:51.880065305Z" - UpdatedAt: "2016-06-07T21:07:29.962229872Z" - Spec: - Name: "hopeful_cori" - TaskTemplate: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ForceUpdate: 0 - Mode: - Replicated: - Replicas: 1 - UpdateConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - RollbackConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - EndpointSpec: - Mode: "vip" - Ports: - - - Protocol: "tcp" - TargetPort: 6379 - PublishedPort: 30001 - Endpoint: - Spec: - Mode: "vip" - Ports: - - - Protocol: "tcp" - TargetPort: 6379 - PublishedPort: 30001 - Ports: - - - Protocol: "tcp" - TargetPort: 6379 - PublishedPort: 30001 - VirtualIPs: - - - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" - Addr: "10.255.0.2/16" - - - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" - Addr: "10.255.0.3/16" - - ImageDeleteResponseItem: - type: "object" - properties: - Untagged: - description: "The image ID of an image that was untagged" - type: "string" - Deleted: - description: "The image ID of an image that was deleted" - type: "string" - - ServiceUpdateResponse: - type: "object" - properties: - Warnings: - description: "Optional warning messages" - type: "array" - items: - type: "string" - example: - Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" - - ContainerSummary: - type: "object" - properties: - Id: - description: "The ID of this container" - type: "string" - x-go-name: "ID" - Names: - description: "The names that this container has been given" - type: "array" - items: - type: "string" - Image: - description: "The name of the image used when creating this container" - type: "string" - ImageID: - description: "The ID of the image that this container was created from" - type: "string" - Command: - description: "Command to run when starting the container" - type: "string" - Created: - description: "When the container was created" - type: "integer" - format: "int64" - Ports: - description: "The ports exposed by this container" - type: "array" - items: - $ref: "#/definitions/Port" - SizeRw: - description: "The size of files that have been created or changed by this container" - type: "integer" - format: "int64" - SizeRootFs: - description: "The total size of all the files in this container" - type: "integer" - format: "int64" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - State: - description: "The state of this container (e.g. `Exited`)" - type: "string" - Status: - description: "Additional human-readable status of this container (e.g. `Exit 0`)" - type: "string" - HostConfig: - type: "object" - properties: - NetworkMode: - type: "string" - NetworkSettings: - description: "A summary of the container's network settings" - type: "object" - properties: - Networks: - type: "object" - additionalProperties: - $ref: "#/definitions/EndpointSettings" - Mounts: - type: "array" - items: - $ref: "#/definitions/MountPoint" - - Driver: - description: "Driver represents a driver (network, logging, secrets)." - type: "object" - required: [Name] - properties: - Name: - description: "Name of the driver." - type: "string" - x-nullable: false - example: "some-driver" - Options: - description: "Key/value map of driver-specific options." - type: "object" - x-nullable: false - additionalProperties: - type: "string" - example: - OptionA: "value for driver-specific option A" - OptionB: "value for driver-specific option B" - - SecretSpec: - type: "object" - properties: - Name: - description: "User-defined name of the secret." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Data: - description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - data to store as secret. - - This field is only used to _create_ a secret, and is not returned by - other endpoints. - type: "string" - example: "" - Driver: - description: | - Name of the secrets driver used to fetch the secret's value from an - external secret store. - $ref: "#/definitions/Driver" - Templating: - description: | - Templating driver, if applicable - - Templating controls whether and how to evaluate the config payload as - a template. If no driver is set, no templating is used. - $ref: "#/definitions/Driver" - - Secret: - type: "object" - properties: - ID: - type: "string" - example: "blt1owaxmitz71s9v5zh81zun" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - example: "2017-07-20T13:55:28.678958722Z" - UpdatedAt: - type: "string" - format: "dateTime" - example: "2017-07-20T13:55:28.678958722Z" - Spec: - $ref: "#/definitions/SecretSpec" - - ConfigSpec: - type: "object" - properties: - Name: - description: "User-defined name of the config." - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - Data: - description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - config data. - type: "string" - Templating: - description: | - Templating driver, if applicable - - Templating controls whether and how to evaluate the config payload as - a template. If no driver is set, no templating is used. - $ref: "#/definitions/Driver" - - Config: - type: "object" - properties: - ID: - type: "string" - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Spec: - $ref: "#/definitions/ConfigSpec" - - ContainerState: - description: | - ContainerState stores container's running state. It's part of ContainerJSONBase - and will be returned by the "inspect" command. - type: "object" - x-nullable: true - properties: - Status: - description: | - String representation of the container state. Can be one of "created", - "running", "paused", "restarting", "removing", "exited", or "dead". - type: "string" - enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] - example: "running" - Running: - description: | - Whether this container is running. - - Note that a running container can be _paused_. The `Running` and `Paused` - booleans are not mutually exclusive: - - When pausing a container (on Linux), the freezer cgroup is used to suspend - all processes in the container. Freezing the process requires the process to - be running. As a result, paused containers are both `Running` _and_ `Paused`. - - Use the `Status` field instead to determine if a container's state is "running". - type: "boolean" - example: true - Paused: - description: "Whether this container is paused." - type: "boolean" - example: false - Restarting: - description: "Whether this container is restarting." - type: "boolean" - example: false - OOMKilled: - description: | - Whether this container has been killed because it ran out of memory. - type: "boolean" - example: false - Dead: - type: "boolean" - example: false - Pid: - description: "The process ID of this container" - type: "integer" - example: 1234 - ExitCode: - description: "The last exit code of this container" - type: "integer" - example: 0 - Error: - type: "string" - StartedAt: - description: "The time when this container was last started." - type: "string" - example: "2020-01-06T09:06:59.461876391Z" - FinishedAt: - description: "The time when this container last exited." - type: "string" - example: "2020-01-06T09:07:59.461876391Z" - Health: - $ref: "#/definitions/Health" - - ContainerCreateResponse: - description: "OK response to ContainerCreate operation" - type: "object" - title: "ContainerCreateResponse" - x-go-name: "CreateResponse" - required: [Id, Warnings] - properties: - Id: - description: "The ID of the created container" - type: "string" - x-nullable: false - example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" - Warnings: - description: "Warnings encountered when creating the container" - type: "array" - x-nullable: false - items: - type: "string" - example: [] - - ContainerWaitResponse: - description: "OK response to ContainerWait operation" - type: "object" - x-go-name: "WaitResponse" - title: "ContainerWaitResponse" - required: [StatusCode] - properties: - StatusCode: - description: "Exit code of the container" - type: "integer" - format: "int64" - x-nullable: false - Error: - $ref: "#/definitions/ContainerWaitExitError" - - ContainerWaitExitError: - description: "container waiting error, if any" - type: "object" - x-go-name: "WaitExitError" - properties: - Message: - description: "Details of an error" - type: "string" - - SystemVersion: - type: "object" - description: | - Response of Engine API: GET "/version" - properties: - Platform: - type: "object" - required: [Name] - properties: - Name: - type: "string" - Components: - type: "array" - description: | - Information about system components - items: - type: "object" - x-go-name: ComponentVersion - required: [Name, Version] - properties: - Name: - description: | - Name of the component - type: "string" - example: "Engine" - Version: - description: | - Version of the component - type: "string" - x-nullable: false - example: "19.03.12" - Details: - description: | - Key/value pairs of strings with additional information about the - component. These values are intended for informational purposes - only, and their content is not defined, and not part of the API - specification. - - These messages can be printed by the client as information to the user. - type: "object" - x-nullable: true - Version: - description: "The version of the daemon" - type: "string" - example: "19.03.12" - ApiVersion: - description: | - The default (and highest) API version that is supported by the daemon - type: "string" - example: "1.40" - MinAPIVersion: - description: | - The minimum API version that is supported by the daemon - type: "string" - example: "1.12" - GitCommit: - description: | - The Git commit of the source code that was used to build the daemon - type: "string" - example: "48a66213fe" - GoVersion: - description: | - The version Go used to compile the daemon, and the version of the Go - runtime in use. - type: "string" - example: "go1.13.14" - Os: - description: | - The operating system that the daemon is running on ("linux" or "windows") - type: "string" - example: "linux" - Arch: - description: | - The architecture that the daemon is running on - type: "string" - example: "amd64" - KernelVersion: - description: | - The kernel version (`uname -r`) that the daemon is running on. - - This field is omitted when empty. - type: "string" - example: "4.19.76-linuxkit" - Experimental: - description: | - Indicates if the daemon is started with experimental features enabled. - - This field is omitted when empty / false. - type: "boolean" - example: true - BuildTime: - description: | - The date and time that the daemon was compiled. - type: "string" - example: "2020-06-22T15:49:27.000000000+00:00" - - SystemInfo: - type: "object" - properties: - ID: - description: | - Unique identifier of the daemon. - -


- - > **Note**: The format of the ID itself is not part of the API, and - > should not be considered stable. - type: "string" - example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" - Containers: - description: "Total number of containers on the host." - type: "integer" - example: 14 - ContainersRunning: - description: | - Number of containers with status `"running"`. - type: "integer" - example: 3 - ContainersPaused: - description: | - Number of containers with status `"paused"`. - type: "integer" - example: 1 - ContainersStopped: - description: | - Number of containers with status `"stopped"`. - type: "integer" - example: 10 - Images: - description: | - Total number of images on the host. - - Both _tagged_ and _untagged_ (dangling) images are counted. - type: "integer" - example: 508 - Driver: - description: "Name of the storage driver in use." - type: "string" - example: "overlay2" - DriverStatus: - description: | - Information specific to the storage driver, provided as - "label" / "value" pairs. - - This information is provided by the storage driver, and formatted - in a way consistent with the output of `docker info` on the command - line. - -


- - > **Note**: The information returned in this field, including the - > formatting of values and labels, should not be considered stable, - > and may change without notice. - type: "array" - items: - type: "array" - items: - type: "string" - example: - - ["Backing Filesystem", "extfs"] - - ["Supports d_type", "true"] - - ["Native Overlay Diff", "true"] - DockerRootDir: - description: | - Root directory of persistent Docker state. - - Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` - on Windows. - type: "string" - example: "/var/lib/docker" - Plugins: - $ref: "#/definitions/PluginsInfo" - MemoryLimit: - description: "Indicates if the host has memory limit support enabled." - type: "boolean" - example: true - SwapLimit: - description: "Indicates if the host has memory swap limit support enabled." - type: "boolean" - example: true - KernelMemoryTCP: - description: | - Indicates if the host has kernel memory TCP limit support enabled. This - field is omitted if not supported. - - Kernel memory TCP limits are not supported when using cgroups v2, which - does not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup. - type: "boolean" - example: true - CpuCfsPeriod: - description: | - Indicates if CPU CFS(Completely Fair Scheduler) period is supported by - the host. - type: "boolean" - example: true - CpuCfsQuota: - description: | - Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by - the host. - type: "boolean" - example: true - CPUShares: - description: | - Indicates if CPU Shares limiting is supported by the host. - type: "boolean" - example: true - CPUSet: - description: | - Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. - - See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) - type: "boolean" - example: true - PidsLimit: - description: "Indicates if the host kernel has PID limit support enabled." - type: "boolean" - example: true - OomKillDisable: - description: "Indicates if OOM killer disable is supported on the host." - type: "boolean" - IPv4Forwarding: - description: "Indicates IPv4 forwarding is enabled." - type: "boolean" - example: true - BridgeNfIptables: - description: "Indicates if `bridge-nf-call-iptables` is available on the host." - type: "boolean" - example: true - BridgeNfIp6tables: - description: "Indicates if `bridge-nf-call-ip6tables` is available on the host." - type: "boolean" - example: true - Debug: - description: | - Indicates if the daemon is running in debug-mode / with debug-level - logging enabled. - type: "boolean" - example: true - NFd: - description: | - The total number of file Descriptors in use by the daemon process. - - This information is only returned if debug-mode is enabled. - type: "integer" - example: 64 - NGoroutines: - description: | - The number of goroutines that currently exist. - - This information is only returned if debug-mode is enabled. - type: "integer" - example: 174 - SystemTime: - description: | - Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) - format with nano-seconds. - type: "string" - example: "2017-08-08T20:28:29.06202363Z" - LoggingDriver: - description: | - The logging driver to use as a default for new containers. - type: "string" - CgroupDriver: - description: | - The driver to use for managing cgroups. - type: "string" - enum: ["cgroupfs", "systemd", "none"] - default: "cgroupfs" - example: "cgroupfs" - CgroupVersion: - description: | - The version of the cgroup. - type: "string" - enum: ["1", "2"] - default: "1" - example: "1" - NEventsListener: - description: "Number of event listeners subscribed." - type: "integer" - example: 30 - KernelVersion: - description: | - Kernel version of the host. - - On Linux, this information obtained from `uname`. On Windows this - information is queried from the HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ - registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. - type: "string" - example: "4.9.38-moby" - OperatingSystem: - description: | - Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS" - or "Windows Server 2016 Datacenter" - type: "string" - example: "Alpine Linux v3.5" - OSVersion: - description: | - Version of the host's operating system - -


- - > **Note**: The information returned in this field, including its - > very existence, and the formatting of values, should not be considered - > stable, and may change without notice. - type: "string" - example: "16.04" - OSType: - description: | - Generic type of the operating system of the host, as returned by the - Go runtime (`GOOS`). - - Currently returned values are "linux" and "windows". A full list of - possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). - type: "string" - example: "linux" - Architecture: - description: | - Hardware architecture of the host, as returned by the Go runtime - (`GOARCH`). - - A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). - type: "string" - example: "x86_64" - NCPU: - description: | - The number of logical CPUs usable by the daemon. - - The number of available CPUs is checked by querying the operating - system when the daemon starts. Changes to operating system CPU - allocation after the daemon is started are not reflected. - type: "integer" - example: 4 - MemTotal: - description: | - Total amount of physical memory available on the host, in bytes. - type: "integer" - format: "int64" - example: 2095882240 - - IndexServerAddress: - description: | - Address / URL of the index server that is used for image search, - and as a default for user authentication for Docker Hub and Docker Cloud. - default: "https://index.docker.io/v1/" - type: "string" - example: "https://index.docker.io/v1/" - RegistryConfig: - $ref: "#/definitions/RegistryServiceConfig" - GenericResources: - $ref: "#/definitions/GenericResources" - HttpProxy: - description: | - HTTP-proxy configured for the daemon. This value is obtained from the - [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. - Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL - are masked in the API response. - - Containers do not automatically inherit this configuration. - type: "string" - example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080" - HttpsProxy: - description: | - HTTPS-proxy configured for the daemon. This value is obtained from the - [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. - Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL - are masked in the API response. - - Containers do not automatically inherit this configuration. - type: "string" - example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443" - NoProxy: - description: | - Comma-separated list of domain extensions for which no proxy should be - used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) - environment variable. - - Containers do not automatically inherit this configuration. - type: "string" - example: "*.local, 169.254/16" - Name: - description: "Hostname of the host." - type: "string" - example: "node5.corp.example.com" - Labels: - description: | - User-defined labels (key/value metadata) as set on the daemon. - -


- - > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, - > set through the daemon configuration, and _node_ labels, set from a - > manager node in the Swarm. Node labels are not included in this - > field. Node labels can be retrieved using the `/nodes/(id)` endpoint - > on a manager node in the Swarm. - type: "array" - items: - type: "string" - example: ["storage=ssd", "production"] - ExperimentalBuild: - description: | - Indicates if experimental features are enabled on the daemon. - type: "boolean" - example: true - ServerVersion: - description: | - Version string of the daemon. - - > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) - > returns the Swarm version instead of the daemon version, for example - > `swarm/1.2.8`. - type: "string" - example: "17.06.0-ce" - ClusterStore: - description: | - URL of the distributed storage backend. - - - The storage backend is used for multihost networking (to store - network and endpoint information) and by the node discovery mechanism. - -


- - > **Deprecated**: This field is only propagated when using standalone Swarm - > mode, and overlay networking using an external k/v store. Overlay - > networks with Swarm mode enabled use the built-in raft store, and - > this field will be empty. - type: "string" - example: "consul://consul.corp.example.com:8600/some/path" - ClusterAdvertise: - description: | - The network endpoint that the Engine advertises for the purpose of - node discovery. ClusterAdvertise is a `host:port` combination on which - the daemon is reachable by other hosts. - -


- - > **Deprecated**: This field is only propagated when using standalone Swarm - > mode, and overlay networking using an external k/v store. Overlay - > networks with Swarm mode enabled use the built-in raft store, and - > this field will be empty. - type: "string" - example: "node5.corp.example.com:8000" - Runtimes: - description: | - List of [OCI compliant](https://github.com/opencontainers/runtime-spec) - runtimes configured on the daemon. Keys hold the "name" used to - reference the runtime. - - The Docker daemon relies on an OCI compliant runtime (invoked via the - `containerd` daemon) as its interface to the Linux kernel namespaces, - cgroups, and SELinux. - - The default runtime is `runc`, and automatically configured. Additional - runtimes can be configured by the user and will be listed here. - type: "object" - additionalProperties: - $ref: "#/definitions/Runtime" - default: - runc: - path: "runc" - example: - runc: - path: "runc" - runc-master: - path: "/go/bin/runc" - custom: - path: "/usr/local/bin/my-oci-runtime" - runtimeArgs: ["--debug", "--systemd-cgroup=false"] - DefaultRuntime: - description: | - Name of the default OCI runtime that is used when starting containers. - - The default can be overridden per-container at create time. - type: "string" - default: "runc" - example: "runc" - Swarm: - $ref: "#/definitions/SwarmInfo" - LiveRestoreEnabled: - description: | - Indicates if live restore is enabled. - - If enabled, containers are kept running when the daemon is shutdown - or upon daemon start if running containers are detected. - type: "boolean" - default: false - example: false - Isolation: - description: | - Represents the isolation technology to use as a default for containers. - The supported values are platform-specific. - - If no isolation value is specified on daemon start, on Windows client, - the default is `hyperv`, and on Windows server, the default is `process`. - - This option is currently not used on other platforms. - default: "default" - type: "string" - enum: - - "default" - - "hyperv" - - "process" - InitBinary: - description: | - Name and, optional, path of the `docker-init` binary. - - If the path is omitted, the daemon searches the host's `$PATH` for the - binary and uses the first result. - type: "string" - example: "docker-init" - ContainerdCommit: - $ref: "#/definitions/Commit" - RuncCommit: - $ref: "#/definitions/Commit" - InitCommit: - $ref: "#/definitions/Commit" - SecurityOptions: - description: | - List of security features that are enabled on the daemon, such as - apparmor, seccomp, SELinux, user-namespaces (userns), and rootless. - - Additional configuration options for each security feature may - be present, and are included as a comma-separated list of key/value - pairs. - type: "array" - items: - type: "string" - example: - - "name=apparmor" - - "name=seccomp,profile=default" - - "name=selinux" - - "name=userns" - - "name=rootless" - ProductLicense: - description: | - Reports a summary of the product license on the daemon. - - If a commercial license has been applied to the daemon, information - such as number of nodes, and expiration are included. - type: "string" - example: "Community Engine" - DefaultAddressPools: - description: | - List of custom default address pools for local networks, which can be - specified in the daemon.json file or dockerd option. - - Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 - 10.10.[0-255].0/24 address pools. - type: "array" - items: - type: "object" - properties: - Base: - description: "The network address in CIDR format" - type: "string" - example: "10.10.0.0/16" - Size: - description: "The network pool size" - type: "integer" - example: "24" - Warnings: - description: | - List of warnings / informational messages about missing features, or - issues related to the daemon configuration. - - These messages can be printed by the client as information to the user. - type: "array" - items: - type: "string" - example: - - "WARNING: No memory limit support" - - "WARNING: bridge-nf-call-iptables is disabled" - - "WARNING: bridge-nf-call-ip6tables is disabled" - - - # PluginsInfo is a temp struct holding Plugins name - # registered with docker daemon. It is used by Info struct - PluginsInfo: - description: | - Available plugins per type. - -


- - > **Note**: Only unmanaged (V1) plugins are included in this list. - > V1 plugins are "lazily" loaded, and are not returned in this list - > if there is no resource using the plugin. - type: "object" - properties: - Volume: - description: "Names of available volume-drivers, and network-driver plugins." - type: "array" - items: - type: "string" - example: ["local"] - Network: - description: "Names of available network-drivers, and network-driver plugins." - type: "array" - items: - type: "string" - example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] - Authorization: - description: "Names of available authorization plugins." - type: "array" - items: - type: "string" - example: ["img-authz-plugin", "hbm"] - Log: - description: "Names of available logging-drivers, and logging-driver plugins." - type: "array" - items: - type: "string" - example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"] - - - RegistryServiceConfig: - description: | - RegistryServiceConfig stores daemon registry services configuration. - type: "object" - x-nullable: true - properties: - AllowNondistributableArtifactsCIDRs: - description: | - List of IP ranges to which nondistributable artifacts can be pushed, - using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). - - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior, and enables the daemon to - push nondistributable artifacts to all registries whose resolved IP - address is within the subnet described by the CIDR syntax. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. - - type: "array" - items: - type: "string" - example: ["::1/128", "127.0.0.0/8"] - AllowNondistributableArtifactsHostnames: - description: | - List of registry hostnames to which nondistributable artifacts can be - pushed, using the format `[:]` or `[:]`. - - Some images (for example, Windows base images) contain artifacts - whose distribution is restricted by license. When these images are - pushed to a registry, restricted artifacts are not included. - - This configuration override this behavior for the specified - registries. - - This option is useful when pushing images containing - nondistributable artifacts to a registry on an air-gapped network so - hosts on that network can pull the images without connecting to - another server. - - > **Warning**: Nondistributable artifacts typically have restrictions - > on how and where they can be distributed and shared. Only use this - > feature to push artifacts to private registries and ensure that you - > are in compliance with any terms that cover redistributing - > nondistributable artifacts. - type: "array" - items: - type: "string" - example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] - InsecureRegistryCIDRs: - description: | - List of IP ranges of insecure registries, using the CIDR syntax - ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries - accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates - from unknown CAs) communication. - - By default, local registries (`127.0.0.0/8`) are configured as - insecure. All other registries are secure. Communicating with an - insecure registry is not possible if the daemon assumes that registry - is secure. - - This configuration override this behavior, insecure communication with - registries whose resolved IP address is within the subnet described by - the CIDR syntax. - - Registries can also be marked insecure by hostname. Those registries - are listed under `IndexConfigs` and have their `Secure` field set to - `false`. - - > **Warning**: Using this option can be useful when running a local - > registry, but introduces security vulnerabilities. This option - > should therefore ONLY be used for testing purposes. For increased - > security, users should add their CA to their system's list of trusted - > CAs instead of enabling this option. - type: "array" - items: - type: "string" - example: ["::1/128", "127.0.0.0/8"] - IndexConfigs: - type: "object" - additionalProperties: - $ref: "#/definitions/IndexInfo" - example: - "127.0.0.1:5000": - "Name": "127.0.0.1:5000" - "Mirrors": [] - "Secure": false - "Official": false - "[2001:db8:a0b:12f0::1]:80": - "Name": "[2001:db8:a0b:12f0::1]:80" - "Mirrors": [] - "Secure": false - "Official": false - "docker.io": - Name: "docker.io" - Mirrors: ["https://hub-mirror.corp.example.com:5000/"] - Secure: true - Official: true - "registry.internal.corp.example.com:3000": - Name: "registry.internal.corp.example.com:3000" - Mirrors: [] - Secure: false - Official: false - Mirrors: - description: | - List of registry URLs that act as a mirror for the official - (`docker.io`) registry. - - type: "array" - items: - type: "string" - example: - - "https://hub-mirror.corp.example.com:5000/" - - "https://[2001:db8:a0b:12f0::1]/" - - IndexInfo: - description: - IndexInfo contains information about a registry. - type: "object" - x-nullable: true - properties: - Name: - description: | - Name of the registry, such as "docker.io". - type: "string" - example: "docker.io" - Mirrors: - description: | - List of mirrors, expressed as URIs. - type: "array" - items: - type: "string" - example: - - "https://hub-mirror.corp.example.com:5000/" - - "https://registry-2.docker.io/" - - "https://registry-3.docker.io/" - Secure: - description: | - Indicates if the registry is part of the list of insecure - registries. - - If `false`, the registry is insecure. Insecure registries accept - un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from - unknown CAs) communication. - - > **Warning**: Insecure registries can be useful when running a local - > registry. However, because its use creates security vulnerabilities - > it should ONLY be enabled for testing purposes. For increased - > security, users should add their CA to their system's list of - > trusted CAs instead of enabling this option. - type: "boolean" - example: true - Official: - description: | - Indicates whether this is an official registry (i.e., Docker Hub / docker.io) - type: "boolean" - example: true - - Runtime: - description: | - Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) - runtime. - - The runtime is invoked by the daemon via the `containerd` daemon. OCI - runtimes act as an interface to the Linux kernel namespaces, cgroups, - and SELinux. - type: "object" - properties: - path: - description: | - Name and, optional, path, of the OCI executable binary. - - If the path is omitted, the daemon searches the host's `$PATH` for the - binary and uses the first result. - type: "string" - example: "/usr/local/bin/my-oci-runtime" - runtimeArgs: - description: | - List of command-line arguments to pass to the runtime when invoked. - type: "array" - x-nullable: true - items: - type: "string" - example: ["--debug", "--systemd-cgroup=false"] - - Commit: - description: | - Commit holds the Git-commit (SHA1) that a binary was built from, as - reported in the version-string of external tools, such as `containerd`, - or `runC`. - type: "object" - properties: - ID: - description: "Actual commit ID of external tool." - type: "string" - example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" - Expected: - description: | - Commit ID of external tool expected by dockerd as set at build time. - type: "string" - example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" - - SwarmInfo: - description: | - Represents generic information about swarm. - type: "object" - properties: - NodeID: - description: "Unique identifier of for this node in the swarm." - type: "string" - default: "" - example: "k67qz4598weg5unwwffg6z1m1" - NodeAddr: - description: | - IP address at which this node can be reached by other nodes in the - swarm. - type: "string" - default: "" - example: "10.0.0.46" - LocalNodeState: - $ref: "#/definitions/LocalNodeState" - ControlAvailable: - type: "boolean" - default: false - example: true - Error: - type: "string" - default: "" - RemoteManagers: - description: | - List of ID's and addresses of other managers in the swarm. - type: "array" - default: null - x-nullable: true - items: - $ref: "#/definitions/PeerNode" - example: - - NodeID: "71izy0goik036k48jg985xnds" - Addr: "10.0.0.158:2377" - - NodeID: "79y6h1o4gv8n120drcprv5nmc" - Addr: "10.0.0.159:2377" - - NodeID: "k67qz4598weg5unwwffg6z1m1" - Addr: "10.0.0.46:2377" - Nodes: - description: "Total number of nodes in the swarm." - type: "integer" - x-nullable: true - example: 4 - Managers: - description: "Total number of managers in the swarm." - type: "integer" - x-nullable: true - example: 3 - Cluster: - $ref: "#/definitions/ClusterInfo" - - LocalNodeState: - description: "Current local status of this node." - type: "string" - default: "" - enum: - - "" - - "inactive" - - "pending" - - "active" - - "error" - - "locked" - example: "active" - - PeerNode: - description: "Represents a peer-node in the swarm" - type: "object" - properties: - NodeID: - description: "Unique identifier of for this node in the swarm." - type: "string" - Addr: - description: | - IP address and ports at which this node can be reached. - type: "string" - - NetworkAttachmentConfig: - description: | - Specifies how a service should be attached to a particular network. - type: "object" - properties: - Target: - description: | - The target network for attachment. Must be a network name or ID. - type: "string" - Aliases: - description: | - Discoverable alternate names for the service on this network. - type: "array" - items: - type: "string" - DriverOpts: - description: | - Driver attachment options for the network target. - type: "object" - additionalProperties: - type: "string" - - EventActor: - description: | - Actor describes something that generates events, like a container, network, - or a volume. - type: "object" - properties: - ID: - description: "The ID of the object emitting the event" - type: "string" - example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" - Attributes: - description: | - Various key/value attributes of the object, depending on its type. - type: "object" - additionalProperties: - type: "string" - example: - com.example.some-label: "some-label-value" - image: "alpine:latest" - name: "my-container" - - EventMessage: - description: | - EventMessage represents the information an event contains. - type: "object" - title: "SystemEventsResponse" - properties: - Type: - description: "The type of object emitting the event" - type: "string" - enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"] - example: "container" - Action: - description: "The type of event" - type: "string" - example: "create" - Actor: - $ref: "#/definitions/EventActor" - scope: - description: | - Scope of the event. Engine events are `local` scope. Cluster (Swarm) - events are `swarm` scope. - type: "string" - enum: ["local", "swarm"] - time: - description: "Timestamp of event" - type: "integer" - format: "int64" - example: 1629574695 - timeNano: - description: "Timestamp of event, with nanosecond accuracy" - type: "integer" - format: "int64" - example: 1629574695515050031 - - OCIDescriptor: - type: "object" - x-go-name: Descriptor - description: | - A descriptor struct containing digest, media type, and size, as defined in - the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). - properties: - mediaType: - description: | - The media type of the object this schema refers to. - type: "string" - example: "application/vnd.docker.distribution.manifest.v2+json" - digest: - description: | - The digest of the targeted content. - type: "string" - example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" - size: - description: | - The size in bytes of the blob. - type: "integer" - format: "int64" - example: 3987495 - # TODO Not yet including these fields for now, as they are nil / omitted in our response. - # urls: - # description: | - # List of URLs from which this object MAY be downloaded. - # type: "array" - # items: - # type: "string" - # format: "uri" - # annotations: - # description: | - # Arbitrary metadata relating to the targeted content. - # type: "object" - # additionalProperties: - # type: "string" - # platform: - # $ref: "#/definitions/OCIPlatform" - - OCIPlatform: - type: "object" - x-go-name: Platform - description: | - Describes the platform which the image in the manifest runs on, as defined - in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). - properties: - architecture: - description: | - The CPU architecture, for example `amd64` or `ppc64`. - type: "string" - example: "arm" - os: - description: | - The operating system, for example `linux` or `windows`. - type: "string" - example: "windows" - os.version: - description: | - Optional field specifying the operating system version, for example on - Windows `10.0.19041.1165`. - type: "string" - example: "10.0.19041.1165" - os.features: - description: | - Optional field specifying an array of strings, each listing a required - OS feature (for example on Windows `win32k`). - type: "array" - items: - type: "string" - example: - - "win32k" - variant: - description: | - Optional field specifying a variant of the CPU, for example `v7` to - specify ARMv7 when architecture is `arm`. - type: "string" - example: "v7" - - DistributionInspect: - type: "object" - x-go-name: DistributionInspect - title: "DistributionInspectResponse" - required: [Descriptor, Platforms] - description: | - Describes the result obtained from contacting the registry to retrieve - image metadata. - properties: - Descriptor: - $ref: "#/definitions/OCIDescriptor" - Platforms: - type: "array" - description: | - An array containing all platforms supported by the image. - items: - $ref: "#/definitions/OCIPlatform" - - ClusterVolume: - type: "object" - description: | - Options and information specific to, and only present on, Swarm CSI - cluster volumes. - properties: - ID: - type: "string" - description: | - The Swarm ID of this volume. Because cluster volumes are Swarm - objects, they have an ID, unlike non-cluster volumes. This ID can - be used to refer to the Volume instead of the name. - Version: - $ref: "#/definitions/ObjectVersion" - CreatedAt: - type: "string" - format: "dateTime" - UpdatedAt: - type: "string" - format: "dateTime" - Spec: - $ref: "#/definitions/ClusterVolumeSpec" - Info: - type: "object" - description: | - Information about the global status of the volume. - properties: - CapacityBytes: - type: "integer" - format: "int64" - description: | - The capacity of the volume in bytes. A value of 0 indicates that - the capacity is unknown. - VolumeContext: - type: "object" - description: | - A map of strings to strings returned from the storage plugin when - the volume is created. - additionalProperties: - type: "string" - VolumeID: - type: "string" - description: | - The ID of the volume as returned by the CSI storage plugin. This - is distinct from the volume's ID as provided by Docker. This ID - is never used by the user when communicating with Docker to refer - to this volume. If the ID is blank, then the Volume has not been - successfully created in the plugin yet. - AccessibleTopology: - type: "array" - description: | - The topology this volume is actually accessible from. - items: - $ref: "#/definitions/Topology" - PublishStatus: - type: "array" - description: | - The status of the volume as it pertains to its publishing and use on - specific nodes - items: - type: "object" - properties: - NodeID: - type: "string" - description: | - The ID of the Swarm node the volume is published on. - State: - type: "string" - description: | - The published state of the volume. - * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed. - * `published` The volume is published successfully to the node. - * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so. - * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller. - enum: - - "pending-publish" - - "published" - - "pending-node-unpublish" - - "pending-controller-unpublish" - PublishContext: - type: "object" - description: | - A map of strings to strings returned by the CSI controller - plugin when a volume is published. - additionalProperties: - type: "string" - - ClusterVolumeSpec: - type: "object" - description: | - Cluster-specific options used to create the volume. - properties: - Group: - type: "string" - description: | - Group defines the volume group of this volume. Volumes belonging to - the same group can be referred to by group name when creating - Services. Referring to a volume by group instructs Swarm to treat - volumes in that group interchangeably for the purpose of scheduling. - Volumes with an empty string for a group technically all belong to - the same, emptystring group. - AccessMode: - type: "object" - description: | - Defines how the volume is used by tasks. - properties: - Scope: - type: "string" - description: | - The set of nodes this volume can be used on at one time. - - `single` The volume may only be scheduled to one node at a time. - - `multi` the volume may be scheduled to any supported number of nodes at a time. - default: "single" - enum: ["single", "multi"] - x-nullable: false - Sharing: - type: "string" - description: | - The number and way that different tasks can use this volume - at one time. - - `none` The volume may only be used by one task at a time. - - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly - - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write. - - `all` The volume may have any number of readers and writers. - default: "none" - enum: ["none", "readonly", "onewriter", "all"] - x-nullable: false - MountVolume: - type: "object" - description: | - Options for using this volume as a Mount-type volume. - - Either MountVolume or BlockVolume, but not both, must be - present. - properties: - FsType: - type: "string" - description: | - Specifies the filesystem type for the mount volume. - Optional. - MountFlags: - type: "array" - description: | - Flags to pass when mounting the volume. Optional. - items: - type: "string" - BlockVolume: - type: "object" - description: | - Options for using this volume as a Block-type volume. - Intentionally empty. - Secrets: - type: "array" - description: | - Swarm Secrets that are passed to the CSI storage plugin when - operating on this volume. - items: - type: "object" - description: | - One cluster volume secret entry. Defines a key-value pair that - is passed to the plugin. - properties: - Key: - type: "string" - description: | - Key is the name of the key of the key-value pair passed to - the plugin. - Secret: - type: "string" - description: | - Secret is the swarm Secret object from which to read data. - This can be a Secret name or ID. The Secret data is - retrieved by swarm and used as the value of the key-value - pair passed to the plugin. - AccessibilityRequirements: - type: "object" - description: | - Requirements for the accessible topology of the volume. These - fields are optional. For an in-depth description of what these - fields mean, see the CSI specification. - properties: - Requisite: - type: "array" - description: | - A list of required topologies, at least one of which the - volume must be accessible from. - items: - $ref: "#/definitions/Topology" - Preferred: - type: "array" - description: | - A list of topologies that the volume should attempt to be - provisioned in. - items: - $ref: "#/definitions/Topology" - CapacityRange: - type: "object" - description: | - The desired capacity that the volume should be created with. If - empty, the plugin will decide the capacity. - properties: - RequiredBytes: - type: "integer" - format: "int64" - description: | - The volume must be at least this big. The value of 0 - indicates an unspecified minimum - LimitBytes: - type: "integer" - format: "int64" - description: | - The volume must not be bigger than this. The value of 0 - indicates an unspecified maximum. - Availability: - type: "string" - description: | - The availability of the volume for use in tasks. - - `active` The volume is fully available for scheduling on the cluster - - `pause` No new workloads should use the volume, but existing workloads are not stopped. - - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started. - default: "active" - x-nullable: false - enum: - - "active" - - "pause" - - "drain" - - Topology: - description: | - A map of topological domains to topological segments. For in depth - details, see documentation for the Topology object in the CSI - specification. - type: "object" - additionalProperties: - type: "string" - -paths: - /containers/json: - get: - summary: "List containers" - description: | - Returns a list of containers. For details on the format, see the - [inspect endpoint](#operation/ContainerInspect). - - Note that it uses a different, smaller representation of a container - than inspecting a single container. For example, the list of linked - containers is not propagated . - operationId: "ContainerList" - produces: - - "application/json" - parameters: - - name: "all" - in: "query" - description: | - Return all containers. By default, only running containers are shown. - type: "boolean" - default: false - - name: "limit" - in: "query" - description: | - Return this number of most recently created containers, including - non-running ones. - type: "integer" - - name: "size" - in: "query" - description: | - Return the size of container as fields `SizeRw` and `SizeRootFs`. - type: "boolean" - default: false - - name: "filters" - in: "query" - description: | - Filters to process on the container list, encoded as JSON (a - `map[string][]string`). For example, `{"status": ["paused"]}` will - only return paused containers. - - Available filters: - - - `ancestor`=(`[:]`, ``, or ``) - - `before`=(`` or ``) - - `expose`=(`[/]`|`/[]`) - - `exited=` containers with exit code of `` - - `health`=(`starting`|`healthy`|`unhealthy`|`none`) - - `id=` a container's ID - - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) - - `is-task=`(`true`|`false`) - - `label=key` or `label="key=value"` of a container label - - `name=` a container's name - - `network`=(`` or ``) - - `publish`=(`[/]`|`/[]`) - - `since`=(`` or ``) - - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) - - `volume`=(`` or ``) - type: "string" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/ContainerSummary" - examples: - application/json: - - Id: "8dfafdbc3a40" - Names: - - "/boring_feynman" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 1" - Created: 1367854155 - State: "Exited" - Status: "Exit 0" - Ports: - - PrivatePort: 2222 - PublicPort: 3333 - Type: "tcp" - Labels: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.2" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:02" - Mounts: - - Name: "fac362...80535" - Source: "/data" - Destination: "/data" - Driver: "local" - Mode: "ro,Z" - RW: false - Propagation: "" - - Id: "9cd87474be90" - Names: - - "/coolName" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 222222" - Created: 1367854155 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.8" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:08" - Mounts: [] - - Id: "3176a2479c92" - Names: - - "/sleepy_dog" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 3333333333333333" - Created: 1367854154 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.6" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:06" - Mounts: [] - - Id: "4cb07b47f9fb" - Names: - - "/running_cat" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 444444444444444444444444444444444" - Created: 1367854152 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.5" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:05" - Mounts: [] - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Container"] - /containers/create: - post: - summary: "Create a container" - operationId: "ContainerCreate" - consumes: - - "application/json" - - "application/octet-stream" - produces: - - "application/json" - parameters: - - name: "name" - in: "query" - description: | - Assign the specified name to the container. Must match - `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. - type: "string" - pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" - - name: "platform" - in: "query" - description: | - Platform in the format `os[/arch[/variant]]` used for image lookup. - - When specified, the daemon checks if the requested image is present - in the local image cache with the given OS and Architecture, and - otherwise returns a `404` status. - - If the option is not set, the host's native OS and Architecture are - used to look up the image in the image cache. However, if no platform - is passed and the given image does exist in the local image cache, - but its OS or architecture does not match, the container is created - with the available image, and a warning is added to the `Warnings` - field in the response, for example; - - WARNING: The requested image's platform (linux/arm64/v8) does not - match the detected host platform (linux/amd64) and no - specific platform was requested - - type: "string" - default: "" - - name: "body" - in: "body" - description: "Container to create" - schema: - allOf: - - $ref: "#/definitions/ContainerConfig" - - type: "object" - properties: - HostConfig: - $ref: "#/definitions/HostConfig" - NetworkingConfig: - $ref: "#/definitions/NetworkingConfig" - example: - Hostname: "" - Domainname: "" - User: "" - AttachStdin: false - AttachStdout: true - AttachStderr: true - Tty: false - OpenStdin: false - StdinOnce: false - Env: - - "FOO=bar" - - "BAZ=quux" - Cmd: - - "date" - Entrypoint: "" - Image: "ubuntu" - Labels: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - Volumes: - /volumes/data: {} - WorkingDir: "" - NetworkDisabled: false - MacAddress: "12:34:56:78:9a:bc" - ExposedPorts: - 22/tcp: {} - StopSignal: "SIGTERM" - StopTimeout: 10 - HostConfig: - Binds: - - "/tmp:/tmp" - Links: - - "redis3:redis" - Memory: 0 - MemorySwap: 0 - MemoryReservation: 0 - NanoCpus: 500000 - CpuPercent: 80 - CpuShares: 512 - CpuPeriod: 100000 - CpuRealtimePeriod: 1000000 - CpuRealtimeRuntime: 10000 - CpuQuota: 50000 - CpusetCpus: "0,1" - CpusetMems: "0,1" - MaximumIOps: 0 - MaximumIOBps: 0 - BlkioWeight: 300 - BlkioWeightDevice: - - {} - BlkioDeviceReadBps: - - {} - BlkioDeviceReadIOps: - - {} - BlkioDeviceWriteBps: - - {} - BlkioDeviceWriteIOps: - - {} - DeviceRequests: - - Driver: "nvidia" - Count: -1 - DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] - Capabilities: [["gpu", "nvidia", "compute"]] - Options: - property1: "string" - property2: "string" - MemorySwappiness: 60 - OomKillDisable: false - OomScoreAdj: 500 - PidMode: "" - PidsLimit: 0 - PortBindings: - 22/tcp: - - HostPort: "11022" - PublishAllPorts: false - Privileged: false - ReadonlyRootfs: false - Dns: - - "8.8.8.8" - DnsOptions: - - "" - DnsSearch: - - "" - VolumesFrom: - - "parent" - - "other:ro" - CapAdd: - - "NET_ADMIN" - CapDrop: - - "MKNOD" - GroupAdd: - - "newgroup" - RestartPolicy: - Name: "" - MaximumRetryCount: 0 - AutoRemove: true - NetworkMode: "bridge" - Devices: [] - Ulimits: - - {} - LogConfig: - Type: "json-file" - Config: {} - SecurityOpt: [] - StorageOpt: {} - CgroupParent: "" - VolumeDriver: "" - ShmSize: 67108864 - NetworkingConfig: - EndpointsConfig: - isolated_nw: - IPAMConfig: - IPv4Address: "172.20.30.33" - IPv6Address: "2001:db8:abcd::3033" - LinkLocalIPs: - - "169.254.34.68" - - "fe80::3468" - Links: - - "container_1" - - "container_2" - Aliases: - - "server_x" - - "server_y" - - required: true - responses: - 201: - description: "Container created successfully" - schema: - $ref: "#/definitions/ContainerCreateResponse" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such image" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such image: c2ada9df5af8" - 409: - description: "conflict" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Container"] - /containers/{id}/json: - get: - summary: "Inspect a container" - description: "Return low-level information about a container." - operationId: "ContainerInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "object" - title: "ContainerInspectResponse" - properties: - Id: - description: "The ID of the container" - type: "string" - Created: - description: "The time the container was created" - type: "string" - Path: - description: "The path to the command being run" - type: "string" - Args: - description: "The arguments to the command being run" - type: "array" - items: - type: "string" - State: - $ref: "#/definitions/ContainerState" - Image: - description: "The container's image ID" - type: "string" - ResolvConfPath: - type: "string" - HostnamePath: - type: "string" - HostsPath: - type: "string" - LogPath: - type: "string" - Name: - type: "string" - RestartCount: - type: "integer" - Driver: - type: "string" - Platform: - type: "string" - MountLabel: - type: "string" - ProcessLabel: - type: "string" - AppArmorProfile: - type: "string" - ExecIDs: - description: "IDs of exec instances that are running in the container." - type: "array" - items: - type: "string" - x-nullable: true - HostConfig: - $ref: "#/definitions/HostConfig" - GraphDriver: - $ref: "#/definitions/GraphDriverData" - SizeRw: - description: | - The size of files that have been created or changed by this - container. - type: "integer" - format: "int64" - SizeRootFs: - description: "The total size of all the files in this container." - type: "integer" - format: "int64" - Mounts: - type: "array" - items: - $ref: "#/definitions/MountPoint" - Config: - $ref: "#/definitions/ContainerConfig" - NetworkSettings: - $ref: "#/definitions/NetworkSettings" - examples: - application/json: - AppArmorProfile: "" - Args: - - "-c" - - "exit 9" - Config: - AttachStderr: true - AttachStdin: false - AttachStdout: true - Cmd: - - "/bin/sh" - - "-c" - - "exit 9" - Domainname: "" - Env: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Healthcheck: - Test: ["CMD-SHELL", "exit 0"] - Hostname: "ba033ac44011" - Image: "ubuntu" - Labels: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - MacAddress: "" - NetworkDisabled: false - OpenStdin: false - StdinOnce: false - Tty: false - User: "" - Volumes: - /volumes/data: {} - WorkingDir: "" - StopSignal: "SIGTERM" - StopTimeout: 10 - Created: "2015-01-06T15:47:31.485331387Z" - Driver: "devicemapper" - ExecIDs: - - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" - - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" - HostConfig: - MaximumIOps: 0 - MaximumIOBps: 0 - BlkioWeight: 0 - BlkioWeightDevice: - - {} - BlkioDeviceReadBps: - - {} - BlkioDeviceWriteBps: - - {} - BlkioDeviceReadIOps: - - {} - BlkioDeviceWriteIOps: - - {} - ContainerIDFile: "" - CpusetCpus: "" - CpusetMems: "" - CpuPercent: 80 - CpuShares: 0 - CpuPeriod: 100000 - CpuRealtimePeriod: 1000000 - CpuRealtimeRuntime: 10000 - Devices: [] - DeviceRequests: - - Driver: "nvidia" - Count: -1 - DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] - Capabilities: [["gpu", "nvidia", "compute"]] - Options: - property1: "string" - property2: "string" - IpcMode: "" - Memory: 0 - MemorySwap: 0 - MemoryReservation: 0 - OomKillDisable: false - OomScoreAdj: 500 - NetworkMode: "bridge" - PidMode: "" - PortBindings: {} - Privileged: false - ReadonlyRootfs: false - PublishAllPorts: false - RestartPolicy: - MaximumRetryCount: 2 - Name: "on-failure" - LogConfig: - Type: "json-file" - Sysctls: - net.ipv4.ip_forward: "1" - Ulimits: - - {} - VolumeDriver: "" - ShmSize: 67108864 - HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" - HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" - LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" - Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" - Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" - MountLabel: "" - Name: "/boring_euclid" - NetworkSettings: - Bridge: "" - SandboxID: "" - HairpinMode: false - LinkLocalIPv6Address: "" - LinkLocalIPv6PrefixLen: 0 - SandboxKey: "" - EndpointID: "" - Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - IPAddress: "" - IPPrefixLen: 0 - IPv6Gateway: "" - MacAddress: "" - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.2" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:12:00:02" - Path: "/bin/sh" - ProcessLabel: "" - ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" - RestartCount: 1 - State: - Error: "" - ExitCode: 9 - FinishedAt: "2015-01-06T15:47:32.080254511Z" - Health: - Status: "healthy" - FailingStreak: 0 - Log: - - Start: "2019-12-22T10:59:05.6385933Z" - End: "2019-12-22T10:59:05.8078452Z" - ExitCode: 0 - Output: "" - OOMKilled: false - Dead: false - Paused: false - Pid: 0 - Restarting: false - Running: true - StartedAt: "2015-01-06T15:47:32.072697474Z" - Status: "running" - Mounts: - - Name: "fac362...80535" - Source: "/data" - Destination: "/data" - Driver: "local" - Mode: "ro,Z" - RW: false - Propagation: "" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "size" - in: "query" - type: "boolean" - default: false - description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" - tags: ["Container"] - /containers/{id}/top: - get: - summary: "List processes running inside a container" - description: | - On Unix systems, this is done by running the `ps` command. This endpoint - is not supported on Windows. - operationId: "ContainerTop" - responses: - 200: - description: "no error" - schema: - type: "object" - title: "ContainerTopResponse" - description: "OK response to ContainerTop operation" - properties: - Titles: - description: "The ps column titles" - type: "array" - items: - type: "string" - Processes: - description: | - Each process running in the container, where each is process - is an array of values corresponding to the titles. - type: "array" - items: - type: "array" - items: - type: "string" - examples: - application/json: - Titles: - - "UID" - - "PID" - - "PPID" - - "C" - - "STIME" - - "TTY" - - "TIME" - - "CMD" - Processes: - - - - "root" - - "13642" - - "882" - - "0" - - "17:03" - - "pts/0" - - "00:00:00" - - "/bin/bash" - - - - "root" - - "13735" - - "13642" - - "0" - - "17:06" - - "pts/0" - - "00:00:00" - - "sleep 10" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "ps_args" - in: "query" - description: "The arguments to pass to `ps`. For example, `aux`" - type: "string" - default: "-ef" - tags: ["Container"] - /containers/{id}/logs: - get: - summary: "Get container logs" - description: | - Get `stdout` and `stderr` logs from a container. - - Note: This endpoint works only for containers with the `json-file` or - `journald` logging driver. - produces: - - "application/vnd.docker.raw-stream" - - "application/vnd.docker.multiplexed-stream" - operationId: "ContainerLogs" - responses: - 200: - description: | - logs returned as a stream in response body. - For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). - Note that unlike the attach endpoint, the logs endpoint does not - upgrade the connection and does not set Content-Type. - schema: - type: "string" - format: "binary" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "follow" - in: "query" - description: "Keep connection after returning logs." - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Return logs from `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Return logs from `stderr`" - type: "boolean" - default: false - - name: "since" - in: "query" - description: "Only return logs since this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "until" - in: "query" - description: "Only return logs before this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "timestamps" - in: "query" - description: "Add timestamps to every log line" - type: "boolean" - default: false - - name: "tail" - in: "query" - description: | - Only return this number of log lines from the end of the logs. - Specify as an integer or `all` to output all log lines. - type: "string" - default: "all" - tags: ["Container"] - /containers/{id}/changes: - get: - summary: "Get changes on a container’s filesystem" - description: | - Returns which files in a container's filesystem have been added, deleted, - or modified. The `Kind` of modification can be one of: - - - `0`: Modified - - `1`: Added - - `2`: Deleted - operationId: "ContainerChanges" - produces: ["application/json"] - responses: - 200: - description: "The list of changes" - schema: - type: "array" - items: - type: "object" - x-go-name: "ContainerChangeResponseItem" - title: "ContainerChangeResponseItem" - description: "change item in response to ContainerChanges operation" - required: [Path, Kind] - properties: - Path: - description: "Path to file that has changed" - type: "string" - x-nullable: false - Kind: - description: "Kind of change" - type: "integer" - format: "uint8" - enum: [0, 1, 2] - x-nullable: false - examples: - application/json: - - Path: "/dev" - Kind: 0 - - Path: "/dev/kmsg" - Kind: 1 - - Path: "/test" - Kind: 1 - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/export: - get: - summary: "Export a container" - description: "Export the contents of a container as a tarball." - operationId: "ContainerExport" - produces: - - "application/octet-stream" - responses: - 200: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/stats: - get: - summary: "Get container stats based on resource usage" - description: | - This endpoint returns a live stream of a container’s resource usage - statistics. - - The `precpu_stats` is the CPU statistic of the *previous* read, and is - used to calculate the CPU usage percentage. It is not an exact copy - of the `cpu_stats` field. - - If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is - nil then for compatibility with older daemons the length of the - corresponding `cpu_usage.percpu_usage` array should be used. - - On a cgroup v2 host, the following fields are not set - * `blkio_stats`: all fields other than `io_service_bytes_recursive` - * `cpu_stats`: `cpu_usage.percpu_usage` - * `memory_stats`: `max_usage` and `failcnt` - Also, `memory_stats.stats` fields are incompatible with cgroup v1. - - To calculate the values shown by the `stats` command of the docker cli tool - the following formulas can be used: - * used_memory = `memory_stats.usage - memory_stats.stats.cache` - * available_memory = `memory_stats.limit` - * Memory usage % = `(used_memory / available_memory) * 100.0` - * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` - * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` - * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` - * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` - operationId: "ContainerStats" - produces: ["application/json"] - responses: - 200: - description: "no error" - schema: - type: "object" - examples: - application/json: - read: "2015-01-08T22:57:31.547920715Z" - pids_stats: - current: 3 - networks: - eth0: - rx_bytes: 5338 - rx_dropped: 0 - rx_errors: 0 - rx_packets: 36 - tx_bytes: 648 - tx_dropped: 0 - tx_errors: 0 - tx_packets: 8 - eth5: - rx_bytes: 4641 - rx_dropped: 0 - rx_errors: 0 - rx_packets: 26 - tx_bytes: 690 - tx_dropped: 0 - tx_errors: 0 - tx_packets: 9 - memory_stats: - stats: - total_pgmajfault: 0 - cache: 0 - mapped_file: 0 - total_inactive_file: 0 - pgpgout: 414 - rss: 6537216 - total_mapped_file: 0 - writeback: 0 - unevictable: 0 - pgpgin: 477 - total_unevictable: 0 - pgmajfault: 0 - total_rss: 6537216 - total_rss_huge: 6291456 - total_writeback: 0 - total_inactive_anon: 0 - rss_huge: 6291456 - hierarchical_memory_limit: 67108864 - total_pgfault: 964 - total_active_file: 0 - active_anon: 6537216 - total_active_anon: 6537216 - total_pgpgout: 414 - total_cache: 0 - inactive_anon: 0 - active_file: 0 - pgfault: 964 - inactive_file: 0 - total_pgpgin: 477 - max_usage: 6651904 - usage: 6537216 - failcnt: 0 - limit: 67108864 - blkio_stats: {} - cpu_stats: - cpu_usage: - percpu_usage: - - 8646879 - - 24472255 - - 36438778 - - 30657443 - usage_in_usermode: 50000000 - total_usage: 100215355 - usage_in_kernelmode: 30000000 - system_cpu_usage: 739306590000000 - online_cpus: 4 - throttling_data: - periods: 0 - throttled_periods: 0 - throttled_time: 0 - precpu_stats: - cpu_usage: - percpu_usage: - - 8646879 - - 24350896 - - 36438778 - - 30657443 - usage_in_usermode: 50000000 - total_usage: 100093996 - usage_in_kernelmode: 30000000 - system_cpu_usage: 9492140000000 - online_cpus: 4 - throttling_data: - periods: 0 - throttled_periods: 0 - throttled_time: 0 - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "stream" - in: "query" - description: | - Stream the output. If false, the stats will be output once and then - it will disconnect. - type: "boolean" - default: true - - name: "one-shot" - in: "query" - description: | - Only get a single stat instead of waiting for 2 cycles. Must be used - with `stream=false`. - type: "boolean" - default: false - tags: ["Container"] - /containers/{id}/resize: - post: - summary: "Resize a container TTY" - description: "Resize the TTY for a container." - operationId: "ContainerResize" - consumes: - - "application/octet-stream" - produces: - - "text/plain" - responses: - 200: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "cannot resize container" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "h" - in: "query" - description: "Height of the TTY session in characters" - type: "integer" - - name: "w" - in: "query" - description: "Width of the TTY session in characters" - type: "integer" - tags: ["Container"] - /containers/{id}/start: - post: - summary: "Start a container" - operationId: "ContainerStart" - responses: - 204: - description: "no error" - 304: - description: "container already started" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "detachKeys" - in: "query" - description: | - Override the key sequence for detaching a container. Format is a - single character `[a-Z]` or `ctrl-` where `` is one - of: `a-z`, `@`, `^`, `[`, `,` or `_`. - type: "string" - tags: ["Container"] - /containers/{id}/stop: - post: - summary: "Stop a container" - operationId: "ContainerStop" - responses: - 204: - description: "no error" - 304: - description: "container already stopped" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "signal" - in: "query" - description: | - Signal to send to the container as an integer or string (e.g. `SIGINT`). - type: "string" - - name: "t" - in: "query" - description: "Number of seconds to wait before killing the container" - type: "integer" - tags: ["Container"] - /containers/{id}/restart: - post: - summary: "Restart a container" - operationId: "ContainerRestart" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "signal" - in: "query" - description: | - Signal to send to the container as an integer or string (e.g. `SIGINT`). - type: "string" - - name: "t" - in: "query" - description: "Number of seconds to wait before killing the container" - type: "integer" - tags: ["Container"] - /containers/{id}/kill: - post: - summary: "Kill a container" - description: | - Send a POSIX signal to a container, defaulting to killing to the - container. - operationId: "ContainerKill" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "container is not running" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "signal" - in: "query" - description: | - Signal to send to the container as an integer or string (e.g. `SIGINT`). - type: "string" - default: "SIGKILL" - tags: ["Container"] - /containers/{id}/update: - post: - summary: "Update a container" - description: | - Change various configuration options of a container without having to - recreate it. - operationId: "ContainerUpdate" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 200: - description: "The container has been updated." - schema: - type: "object" - title: "ContainerUpdateResponse" - description: "OK response to ContainerUpdate operation" - properties: - Warnings: - type: "array" - items: - type: "string" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "update" - in: "body" - required: true - schema: - allOf: - - $ref: "#/definitions/Resources" - - type: "object" - properties: - RestartPolicy: - $ref: "#/definitions/RestartPolicy" - example: - BlkioWeight: 300 - CpuShares: 512 - CpuPeriod: 100000 - CpuQuota: 50000 - CpuRealtimePeriod: 1000000 - CpuRealtimeRuntime: 10000 - CpusetCpus: "0,1" - CpusetMems: "0" - Memory: 314572800 - MemorySwap: 514288000 - MemoryReservation: 209715200 - RestartPolicy: - MaximumRetryCount: 4 - Name: "on-failure" - tags: ["Container"] - /containers/{id}/rename: - post: - summary: "Rename a container" - operationId: "ContainerRename" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "name already in use" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "name" - in: "query" - required: true - description: "New name for the container" - type: "string" - tags: ["Container"] - /containers/{id}/pause: - post: - summary: "Pause a container" - description: | - Use the freezer cgroup to suspend all processes in a container. - - Traditionally, when suspending a process the `SIGSTOP` signal is used, - which is observable by the process being suspended. With the freezer - cgroup the process is unaware, and unable to capture, that it is being - suspended, and subsequently resumed. - operationId: "ContainerPause" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/unpause: - post: - summary: "Unpause a container" - description: "Resume a container which has been paused." - operationId: "ContainerUnpause" - responses: - 204: - description: "no error" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - tags: ["Container"] - /containers/{id}/attach: - post: - summary: "Attach to a container" - description: | - Attach to a container to read its output or send it input. You can attach - to the same container multiple times and you can reattach to containers - that have been detached. - - Either the `stream` or `logs` parameter must be `true` for this endpoint - to do anything. - - See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) - for more details. - - ### Hijacking - - This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, - and `stderr` on the same socket. - - This is the response from the daemon for an attach request: - - ``` - HTTP/1.1 200 OK - Content-Type: application/vnd.docker.raw-stream - - [STREAM] - ``` - - After the headers and two new lines, the TCP connection can now be used - for raw, bidirectional communication between the client and server. - - To hint potential proxies about connection hijacking, the Docker client - can also optionally send connection upgrade headers. - - For example, the client sends this request to upgrade the connection: - - ``` - POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 - Upgrade: tcp - Connection: Upgrade - ``` - - The Docker daemon will respond with a `101 UPGRADED` response, and will - similarly follow with the raw stream: - - ``` - HTTP/1.1 101 UPGRADED - Content-Type: application/vnd.docker.raw-stream - Connection: Upgrade - Upgrade: tcp - - [STREAM] - ``` - - ### Stream format - - When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), - the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream - and the stream over the hijacked connected is multiplexed to separate out - `stdout` and `stderr`. The stream consists of a series of frames, each - containing a header and a payload. - - The header contains the information which the stream writes (`stdout` or - `stderr`). It also contains the size of the associated frame encoded in - the last four bytes (`uint32`). - - It is encoded on the first eight bytes like this: - - ```go - header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} - ``` - - `STREAM_TYPE` can be: - - - 0: `stdin` (is written on `stdout`) - - 1: `stdout` - - 2: `stderr` - - `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size - encoded as big endian. - - Following the header is the payload, which is the specified number of - bytes of `STREAM_TYPE`. - - The simplest way to implement this protocol is the following: - - 1. Read 8 bytes. - 2. Choose `stdout` or `stderr` depending on the first byte. - 3. Extract the frame size from the last four bytes. - 4. Read the extracted size and output it on the correct output. - 5. Goto 1. - - ### Stream format when using a TTY - - When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), - the stream is not multiplexed. The data exchanged over the hijacked - connection is simply the raw data from the process PTY and client's - `stdin`. - - operationId: "ContainerAttach" - produces: - - "application/vnd.docker.raw-stream" - - "application/vnd.docker.multiplexed-stream" - responses: - 101: - description: "no error, hints proxy about hijacking" - 200: - description: "no error, no upgrade header found" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "detachKeys" - in: "query" - description: | - Override the key sequence for detaching a container.Format is a single - character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, - `@`, `^`, `[`, `,` or `_`. - type: "string" - - name: "logs" - in: "query" - description: | - Replay previous logs from the container. - - This is useful for attaching to a container that has started and you - want to output everything since the container started. - - If `stream` is also enabled, once all the previous output has been - returned, it will seamlessly transition into streaming current - output. - type: "boolean" - default: false - - name: "stream" - in: "query" - description: | - Stream attached streams from the time the request was made onwards. - type: "boolean" - default: false - - name: "stdin" - in: "query" - description: "Attach to `stdin`" - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Attach to `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Attach to `stderr`" - type: "boolean" - default: false - tags: ["Container"] - /containers/{id}/attach/ws: - get: - summary: "Attach to a container via a websocket" - operationId: "ContainerAttachWebsocket" - responses: - 101: - description: "no error, hints proxy about hijacking" - 200: - description: "no error, no upgrade header found" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "detachKeys" - in: "query" - description: | - Override the key sequence for detaching a container.Format is a single - character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, - `@`, `^`, `[`, `,`, or `_`. - type: "string" - - name: "logs" - in: "query" - description: "Return logs" - type: "boolean" - default: false - - name: "stream" - in: "query" - description: "Return stream" - type: "boolean" - default: false - - name: "stdin" - in: "query" - description: "Attach to `stdin`" - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Attach to `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Attach to `stderr`" - type: "boolean" - default: false - tags: ["Container"] - /containers/{id}/wait: - post: - summary: "Wait for a container" - description: "Block until a container stops, then returns the exit code." - operationId: "ContainerWait" - produces: ["application/json"] - responses: - 200: - description: "The container has exit." - schema: - $ref: "#/definitions/ContainerWaitResponse" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "condition" - in: "query" - description: | - Wait until a container state reaches the given condition. - - Defaults to `not-running` if omitted or empty. - type: "string" - enum: - - "not-running" - - "next-exit" - - "removed" - default: "not-running" - tags: ["Container"] - /containers/{id}: - delete: - summary: "Remove a container" - operationId: "ContainerDelete" - responses: - 204: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "conflict" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: | - You cannot remove a running container: c2ada9df5af8. Stop the - container before attempting removal or force remove - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "v" - in: "query" - description: "Remove anonymous volumes associated with the container." - type: "boolean" - default: false - - name: "force" - in: "query" - description: "If the container is running, kill it before removing it." - type: "boolean" - default: false - - name: "link" - in: "query" - description: "Remove the specified link associated with the container." - type: "boolean" - default: false - tags: ["Container"] - /containers/{id}/archive: - head: - summary: "Get information about files in a container" - description: | - A response header `X-Docker-Container-Path-Stat` is returned, containing - a base64 - encoded JSON object with some filesystem header information - about the path. - operationId: "ContainerArchiveInfo" - responses: - 200: - description: "no error" - headers: - X-Docker-Container-Path-Stat: - type: "string" - description: | - A base64 - encoded JSON object with some filesystem header - information about the path - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "Container or path does not exist" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "path" - in: "query" - required: true - description: "Resource in the container’s filesystem to archive." - type: "string" - tags: ["Container"] - get: - summary: "Get an archive of a filesystem resource in a container" - description: "Get a tar archive of a resource in the filesystem of container id." - operationId: "ContainerArchive" - produces: ["application/x-tar"] - responses: - 200: - description: "no error" - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "Container or path does not exist" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "path" - in: "query" - required: true - description: "Resource in the container’s filesystem to archive." - type: "string" - tags: ["Container"] - put: - summary: "Extract an archive of files or folders to a directory in a container" - description: | - Upload a tar archive to be extracted to a path in the filesystem of container id. - `path` parameter is asserted to be a directory. If it exists as a file, 400 error - will be returned with message "not a directory". - operationId: "PutContainerArchive" - consumes: ["application/x-tar", "application/octet-stream"] - responses: - 200: - description: "The content was extracted successfully" - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "not a directory" - 403: - description: "Permission denied, the volume or container rootfs is marked as read-only." - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "No such container or path does not exist inside the container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the container" - type: "string" - - name: "path" - in: "query" - required: true - description: "Path to a directory in the container to extract the archive’s contents into. " - type: "string" - - name: "noOverwriteDirNonDir" - in: "query" - description: | - If `1`, `true`, or `True` then it will be an error if unpacking the - given content would cause an existing directory to be replaced with - a non-directory and vice versa. - type: "string" - - name: "copyUIDGID" - in: "query" - description: | - If `1`, `true`, then it will copy UID/GID maps to the dest file or - dir - type: "string" - - name: "inputStream" - in: "body" - required: true - description: | - The input stream must be a tar archive compressed with one of the - following algorithms: `identity` (no compression), `gzip`, `bzip2`, - or `xz`. - schema: - type: "string" - format: "binary" - tags: ["Container"] - /containers/prune: - post: - summary: "Delete stopped containers" - produces: - - "application/json" - operationId: "ContainerPrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "ContainerPruneResponse" - properties: - ContainersDeleted: - description: "Container IDs that were deleted" - type: "array" - items: - type: "string" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Container"] - /images/json: - get: - summary: "List Images" - description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." - operationId: "ImageList" - produces: - - "application/json" - responses: - 200: - description: "Summary image data for the images matching the query" - schema: - type: "array" - items: - $ref: "#/definitions/ImageSummary" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "all" - in: "query" - description: "Show all images. Only images from a final layer (no children) are shown by default." - type: "boolean" - default: false - - name: "filters" - in: "query" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the images list. - - Available filters: - - - `before`=(`[:]`, `` or ``) - - `dangling=true` - - `label=key` or `label="key=value"` of an image label - - `reference`=(`[:]`) - - `since`=(`[:]`, `` or ``) - type: "string" - - name: "shared-size" - in: "query" - description: "Compute and show shared size as a `SharedSize` field on each image." - type: "boolean" - default: false - - name: "digests" - in: "query" - description: "Show digest information as a `RepoDigests` field on each image." - type: "boolean" - default: false - tags: ["Image"] - /build: - post: - summary: "Build an image" - description: | - Build an image from a tar archive with a `Dockerfile` in it. - - The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). - - The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. - - The build is canceled if the client drops the connection by quitting or being killed. - operationId: "ImageBuild" - consumes: - - "application/octet-stream" - produces: - - "application/json" - parameters: - - name: "inputStream" - in: "body" - description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." - schema: - type: "string" - format: "binary" - - name: "dockerfile" - in: "query" - description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." - type: "string" - default: "Dockerfile" - - name: "t" - in: "query" - description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." - type: "string" - - name: "extrahosts" - in: "query" - description: "Extra hosts to add to /etc/hosts" - type: "string" - - name: "remote" - in: "query" - description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." - type: "string" - - name: "q" - in: "query" - description: "Suppress verbose build output." - type: "boolean" - default: false - - name: "nocache" - in: "query" - description: "Do not use the cache when building the image." - type: "boolean" - default: false - - name: "cachefrom" - in: "query" - description: "JSON array of images used for build cache resolution." - type: "string" - - name: "pull" - in: "query" - description: "Attempt to pull the image even if an older image exists locally." - type: "string" - - name: "rm" - in: "query" - description: "Remove intermediate containers after a successful build." - type: "boolean" - default: true - - name: "forcerm" - in: "query" - description: "Always remove intermediate containers, even upon failure." - type: "boolean" - default: false - - name: "memory" - in: "query" - description: "Set memory limit for build." - type: "integer" - - name: "memswap" - in: "query" - description: "Total memory (memory + swap). Set as `-1` to disable swap." - type: "integer" - - name: "cpushares" - in: "query" - description: "CPU shares (relative weight)." - type: "integer" - - name: "cpusetcpus" - in: "query" - description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." - type: "string" - - name: "cpuperiod" - in: "query" - description: "The length of a CPU period in microseconds." - type: "integer" - - name: "cpuquota" - in: "query" - description: "Microseconds of CPU time that the container can get in a CPU period." - type: "integer" - - name: "buildargs" - in: "query" - description: > - JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker - uses the buildargs as the environment context for commands run via the `Dockerfile` RUN - instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for - passing secret values. - - - For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the - query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. - - - [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) - type: "string" - - name: "shmsize" - in: "query" - description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." - type: "integer" - - name: "squash" - in: "query" - description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" - type: "boolean" - - name: "labels" - in: "query" - description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." - type: "string" - - name: "networkmode" - in: "query" - description: | - Sets the networking mode for the run commands during build. Supported - standard values are: `bridge`, `host`, `none`, and `container:`. - Any other value is taken as a custom network's name or ID to which this - container should connect to. - type: "string" - - name: "Content-type" - in: "header" - type: "string" - enum: - - "application/x-tar" - default: "application/x-tar" - - name: "X-Registry-Config" - in: "header" - description: | - This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. - - The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: - - ``` - { - "docker.example.com": { - "username": "janedoe", - "password": "hunter2" - }, - "https://index.docker.io/v1/": { - "username": "mobydock", - "password": "conta1n3rize14" - } - } - ``` - - Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. - type: "string" - - name: "platform" - in: "query" - description: "Platform in the format os[/arch[/variant]]" - type: "string" - default: "" - - name: "target" - in: "query" - description: "Target build stage" - type: "string" - default: "" - - name: "outputs" - in: "query" - description: "BuildKit output configuration" - type: "string" - default: "" - responses: - 200: - description: "no error" - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Image"] - /build/prune: - post: - summary: "Delete builder cache" - produces: - - "application/json" - operationId: "BuildPrune" - parameters: - - name: "keep-storage" - in: "query" - description: "Amount of disk space in bytes to keep for cache" - type: "integer" - format: "int64" - - name: "all" - in: "query" - type: "boolean" - description: "Remove all types of build cache" - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the list of build cache objects. - - Available filters: - - - `until=`: duration relative to daemon's time, during which build cache was not used, in Go's duration format (e.g., '24h') - - `id=` - - `parent=` - - `type=` - - `description=` - - `inuse` - - `shared` - - `private` - responses: - 200: - description: "No error" - schema: - type: "object" - title: "BuildPruneResponse" - properties: - CachesDeleted: - type: "array" - items: - description: "ID of build cache object" - type: "string" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Image"] - /images/create: - post: - summary: "Create an image" - description: "Create an image by either pulling it from a registry or importing it." - operationId: "ImageCreate" - consumes: - - "text/plain" - - "application/octet-stream" - produces: - - "application/json" - responses: - 200: - description: "no error" - 404: - description: "repository does not exist or no read access" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "fromImage" - in: "query" - description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." - type: "string" - - name: "fromSrc" - in: "query" - description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." - type: "string" - - name: "repo" - in: "query" - description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." - type: "string" - - name: "tag" - in: "query" - description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." - type: "string" - - name: "message" - in: "query" - description: "Set commit message for imported image." - type: "string" - - name: "inputImage" - in: "body" - description: "Image content if the value `-` has been specified in fromSrc query parameter" - schema: - type: "string" - required: false - - name: "X-Registry-Auth" - in: "header" - description: | - A base64url-encoded auth configuration. - - Refer to the [authentication section](#section/Authentication) for - details. - type: "string" - - name: "changes" - in: "query" - description: | - Apply `Dockerfile` instructions to the image that is created, - for example: `changes=ENV DEBUG=true`. - Note that `ENV DEBUG=true` should be URI component encoded. - - Supported `Dockerfile` instructions: - `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` - type: "array" - items: - type: "string" - - name: "platform" - in: "query" - description: | - Platform in the format os[/arch[/variant]]. - - When used in combination with the `fromImage` option, the daemon checks - if the given image is present in the local image cache with the given - OS and Architecture, and otherwise attempts to pull the image. If the - option is not set, the host's native OS and Architecture are used. - If the given image does not exist in the local image cache, the daemon - attempts to pull the image with the host's native OS and Architecture. - If the given image does exists in the local image cache, but its OS or - architecture does not match, a warning is produced. - - When used with the `fromSrc` option to import an image from an archive, - this option sets the platform information for the imported image. If - the option is not set, the host's native OS and Architecture are used - for the imported image. - type: "string" - default: "" - tags: ["Image"] - /images/{name}/json: - get: - summary: "Inspect an image" - description: "Return low-level information about an image." - operationId: "ImageInspect" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/ImageInspect" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such image: someimage (tag: latest)" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or id" - type: "string" - required: true - tags: ["Image"] - /images/{name}/history: - get: - summary: "Get the history of an image" - description: "Return parent layers of an image." - operationId: "ImageHistory" - produces: ["application/json"] - responses: - 200: - description: "List of image layers" - schema: - type: "array" - items: - type: "object" - x-go-name: HistoryResponseItem - title: "HistoryResponseItem" - description: "individual image layer information in response to ImageHistory operation" - required: [Id, Created, CreatedBy, Tags, Size, Comment] - properties: - Id: - type: "string" - x-nullable: false - Created: - type: "integer" - format: "int64" - x-nullable: false - CreatedBy: - type: "string" - x-nullable: false - Tags: - type: "array" - items: - type: "string" - Size: - type: "integer" - format: "int64" - x-nullable: false - Comment: - type: "string" - x-nullable: false - examples: - application/json: - - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" - Created: 1398108230 - CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" - Tags: - - "ubuntu:lucid" - - "ubuntu:10.04" - Size: 182964289 - Comment: "" - - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" - Created: 1398108222 - CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" - Tags: [] - Size: 0 - Comment: "" - - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" - Created: 1371157430 - CreatedBy: "" - Tags: - - "scratch12:latest" - - "scratch:latest" - Size: 0 - Comment: "Imported from -" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID" - type: "string" - required: true - tags: ["Image"] - /images/{name}/push: - post: - summary: "Push an image" - description: | - Push an image to a registry. - - If you wish to push an image on to a private registry, that image must - already have a tag which references the registry. For example, - `registry.example.com/myimage:latest`. - - The push is cancelled if the HTTP connection is closed. - operationId: "ImagePush" - consumes: - - "application/octet-stream" - responses: - 200: - description: "No error" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID." - type: "string" - required: true - - name: "tag" - in: "query" - description: "The tag to associate with the image on the registry." - type: "string" - - name: "X-Registry-Auth" - in: "header" - description: | - A base64url-encoded auth configuration. - - Refer to the [authentication section](#section/Authentication) for - details. - type: "string" - required: true - tags: ["Image"] - /images/{name}/tag: - post: - summary: "Tag an image" - description: "Tag an image so that it becomes part of a repository." - operationId: "ImageTag" - responses: - 201: - description: "No error" - 400: - description: "Bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Conflict" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID to tag." - type: "string" - required: true - - name: "repo" - in: "query" - description: "The repository to tag in. For example, `someuser/someimage`." - type: "string" - - name: "tag" - in: "query" - description: "The name of the new tag." - type: "string" - tags: ["Image"] - /images/{name}: - delete: - summary: "Remove an image" - description: | - Remove an image, along with any untagged parent images that were - referenced by that image. - - Images can't be removed if they have descendant images, are being - used by a running container or are being used by a build. - operationId: "ImageDelete" - produces: ["application/json"] - responses: - 200: - description: "The image was deleted successfully" - schema: - type: "array" - items: - $ref: "#/definitions/ImageDeleteResponseItem" - examples: - application/json: - - Untagged: "3e2f21a89f" - - Deleted: "3e2f21a89f" - - Deleted: "53b4f83ac9" - 404: - description: "No such image" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Conflict" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID" - type: "string" - required: true - - name: "force" - in: "query" - description: "Remove the image even if it is being used by stopped containers or has other tags" - type: "boolean" - default: false - - name: "noprune" - in: "query" - description: "Do not delete untagged parent images" - type: "boolean" - default: false - tags: ["Image"] - /images/search: - get: - summary: "Search images" - description: "Search for an image on Docker Hub." - operationId: "ImageSearch" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - type: "array" - items: - type: "object" - title: "ImageSearchResponseItem" - properties: - description: - type: "string" - is_official: - type: "boolean" - is_automated: - type: "boolean" - name: - type: "string" - star_count: - type: "integer" - examples: - application/json: - - description: "" - is_official: false - is_automated: false - name: "wma55/u1210sshd" - star_count: 0 - - description: "" - is_official: false - is_automated: false - name: "jdswinbank/sshd" - star_count: 0 - - description: "" - is_official: false - is_automated: false - name: "vgauthier/sshd" - star_count: 0 - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "term" - in: "query" - description: "Term to search" - type: "string" - required: true - - name: "limit" - in: "query" - description: "Maximum number of results to return" - type: "integer" - - name: "filters" - in: "query" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: - - - `is-automated=(true|false)` - - `is-official=(true|false)` - - `stars=` Matches images that has at least 'number' stars. - type: "string" - tags: ["Image"] - /images/prune: - post: - summary: "Delete unused images" - produces: - - "application/json" - operationId: "ImagePrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: - - - `dangling=` When set to `true` (or `1`), prune only - unused *and* untagged images. When set to `false` - (or `0`), all unused images are pruned. - - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "ImagePruneResponse" - properties: - ImagesDeleted: - description: "Images that were deleted" - type: "array" - items: - $ref: "#/definitions/ImageDeleteResponseItem" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Image"] - /auth: - post: - summary: "Check auth configuration" - description: | - Validate credentials for a registry and, if available, get an identity - token for accessing the registry without password. - operationId: "SystemAuth" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 200: - description: "An identity token was generated successfully." - schema: - type: "object" - title: "SystemAuthResponse" - required: [Status] - properties: - Status: - description: "The status of the authentication" - type: "string" - x-nullable: false - IdentityToken: - description: "An opaque token used to authenticate a user after a successful login" - type: "string" - x-nullable: false - examples: - application/json: - Status: "Login Succeeded" - IdentityToken: "9cbaf023786cd7..." - 204: - description: "No error" - 401: - description: "Auth error" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "authConfig" - in: "body" - description: "Authentication to check" - schema: - $ref: "#/definitions/AuthConfig" - tags: ["System"] - /info: - get: - summary: "Get system information" - operationId: "SystemInfo" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/SystemInfo" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["System"] - /version: - get: - summary: "Get version" - description: "Returns the version of Docker that is running and various information about the system that Docker is running on." - operationId: "SystemVersion" - produces: ["application/json"] - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/SystemVersion" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["System"] - /_ping: - get: - summary: "Ping" - description: "This is a dummy endpoint you can use to test if the server is accessible." - operationId: "SystemPing" - produces: ["text/plain"] - responses: - 200: - description: "no error" - schema: - type: "string" - example: "OK" - headers: - API-Version: - type: "string" - description: "Max API Version the server supports" - Builder-Version: - type: "string" - description: | - Default version of docker image builder - - The default on Linux is version "2" (BuildKit), but the daemon - can be configured to recommend version "1" (classic Builder). - Windows does not yet support BuildKit for native Windows images, - and uses "1" (classic builder) as a default. - - This value is a recommendation as advertised by the daemon, and - it is up to the client to choose which builder to use. - default: "2" - Docker-Experimental: - type: "boolean" - description: "If the server is running with experimental mode enabled" - Swarm: - type: "string" - enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] - description: | - Contains information about Swarm status of the daemon, - and if the daemon is acting as a manager or worker node. - default: "inactive" - Cache-Control: - type: "string" - default: "no-cache, no-store, must-revalidate" - Pragma: - type: "string" - default: "no-cache" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - headers: - Cache-Control: - type: "string" - default: "no-cache, no-store, must-revalidate" - Pragma: - type: "string" - default: "no-cache" - tags: ["System"] - head: - summary: "Ping" - description: "This is a dummy endpoint you can use to test if the server is accessible." - operationId: "SystemPingHead" - produces: ["text/plain"] - responses: - 200: - description: "no error" - schema: - type: "string" - example: "(empty)" - headers: - API-Version: - type: "string" - description: "Max API Version the server supports" - Builder-Version: - type: "string" - description: "Default version of docker image builder" - Docker-Experimental: - type: "boolean" - description: "If the server is running with experimental mode enabled" - Swarm: - type: "string" - enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] - description: | - Contains information about Swarm status of the daemon, - and if the daemon is acting as a manager or worker node. - default: "inactive" - Cache-Control: - type: "string" - default: "no-cache, no-store, must-revalidate" - Pragma: - type: "string" - default: "no-cache" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["System"] - /commit: - post: - summary: "Create a new image from a container" - operationId: "ImageCommit" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IdResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "containerConfig" - in: "body" - description: "The container configuration" - schema: - $ref: "#/definitions/ContainerConfig" - - name: "container" - in: "query" - description: "The ID or name of the container to commit" - type: "string" - - name: "repo" - in: "query" - description: "Repository name for the created image" - type: "string" - - name: "tag" - in: "query" - description: "Tag name for the create image" - type: "string" - - name: "comment" - in: "query" - description: "Commit message" - type: "string" - - name: "author" - in: "query" - description: "Author of the image (e.g., `John Hannibal Smith `)" - type: "string" - - name: "pause" - in: "query" - description: "Whether to pause the container before committing" - type: "boolean" - default: true - - name: "changes" - in: "query" - description: "`Dockerfile` instructions to apply while committing" - type: "string" - tags: ["Image"] - /events: - get: - summary: "Monitor events" - description: | - Stream real-time events from the server. - - Various objects within Docker report events when something happens to them. - - Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` - - Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` - - Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` - - Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` - - The Docker daemon reports these events: `reload` - - Services report these events: `create`, `update`, and `remove` - - Nodes report these events: `create`, `update`, and `remove` - - Secrets report these events: `create`, `update`, and `remove` - - Configs report these events: `create`, `update`, and `remove` - - The Builder reports `prune` events - - operationId: "SystemEvents" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/EventMessage" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "since" - in: "query" - description: "Show events created since this timestamp then stream new events." - type: "string" - - name: "until" - in: "query" - description: "Show events created until this timestamp then stop streaming." - type: "string" - - name: "filters" - in: "query" - description: | - A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: - - - `config=` config name or ID - - `container=` container name or ID - - `daemon=` daemon name or ID - - `event=` event type - - `image=` image name or ID - - `label=` image or container label - - `network=` network name or ID - - `node=` node ID - - `plugin`= plugin name or ID - - `scope`= local or swarm - - `secret=` secret name or ID - - `service=` service name or ID - - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` - - `volume=` volume name - type: "string" - tags: ["System"] - /system/df: - get: - summary: "Get data usage information" - operationId: "SystemDataUsage" - responses: - 200: - description: "no error" - schema: - type: "object" - title: "SystemDataUsageResponse" - properties: - LayersSize: - type: "integer" - format: "int64" - Images: - type: "array" - items: - $ref: "#/definitions/ImageSummary" - Containers: - type: "array" - items: - $ref: "#/definitions/ContainerSummary" - Volumes: - type: "array" - items: - $ref: "#/definitions/Volume" - BuildCache: - type: "array" - items: - $ref: "#/definitions/BuildCache" - example: - LayersSize: 1092588 - Images: - - - Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" - ParentId: "" - RepoTags: - - "busybox:latest" - RepoDigests: - - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" - Created: 1466724217 - Size: 1092588 - SharedSize: 0 - VirtualSize: 1092588 - Labels: {} - Containers: 1 - Containers: - - - Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" - Names: - - "/top" - Image: "busybox" - ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" - Command: "top" - Created: 1472592424 - Ports: [] - SizeRootFs: 1092588 - Labels: {} - State: "exited" - Status: "Exited (0) 56 minutes ago" - HostConfig: - NetworkMode: "default" - NetworkSettings: - Networks: - bridge: - IPAMConfig: null - Links: null - Aliases: null - NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" - EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" - Gateway: "172.18.0.1" - IPAddress: "172.18.0.2" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:12:00:02" - Mounts: [] - Volumes: - - - Name: "my-volume" - Driver: "local" - Mountpoint: "/var/lib/docker/volumes/my-volume/_data" - Labels: null - Scope: "local" - Options: null - UsageData: - Size: 10920104 - RefCount: 2 - BuildCache: - - - ID: "hw53o5aio51xtltp5xjp8v7fx" - Parents: [] - Type: "regular" - Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" - InUse: false - Shared: true - Size: 0 - CreatedAt: "2021-06-28T13:31:01.474619385Z" - LastUsedAt: "2021-07-07T22:02:32.738075951Z" - UsageCount: 26 - - - ID: "ndlpt0hhvkqcdfkputsk4cq9c" - Parents: ["ndlpt0hhvkqcdfkputsk4cq9c"] - Type: "regular" - Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" - InUse: false - Shared: true - Size: 51 - CreatedAt: "2021-06-28T13:31:03.002625487Z" - LastUsedAt: "2021-07-07T22:02:32.773909517Z" - UsageCount: 26 - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "type" - in: "query" - description: | - Object types, for which to compute and return data. - type: "array" - collectionFormat: multi - items: - type: "string" - enum: ["container", "image", "volume", "build-cache"] - tags: ["System"] - /images/{name}/get: - get: - summary: "Export an image" - description: | - Get a tarball containing all images and metadata for a repository. - - If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. - - ### Image tarball format - - An image tarball contains one directory per image layer (named using its long ID), each containing these files: - - - `VERSION`: currently `1.0` - the file format version - - `json`: detailed layer information, similar to `docker inspect layer_id` - - `layer.tar`: A tarfile containing the filesystem changes in this layer - - The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. - - If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. - - ```json - { - "hello-world": { - "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" - } - } - ``` - operationId: "ImageGet" - produces: - - "application/x-tar" - responses: - 200: - description: "no error" - schema: - type: "string" - format: "binary" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or ID" - type: "string" - required: true - tags: ["Image"] - /images/get: - get: - summary: "Export several images" - description: | - Get a tarball containing all images and metadata for several image - repositories. - - For each value of the `names` parameter: if it is a specific name and - tag (e.g. `ubuntu:latest`), then only that image (and its parents) are - returned; if it is an image ID, similarly only that image (and its parents) - are returned and there would be no names referenced in the 'repositories' - file for this image ID. - - For details on the format, see the [export image endpoint](#operation/ImageGet). - operationId: "ImageGetAll" - produces: - - "application/x-tar" - responses: - 200: - description: "no error" - schema: - type: "string" - format: "binary" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "names" - in: "query" - description: "Image names to filter by" - type: "array" - items: - type: "string" - tags: ["Image"] - /images/load: - post: - summary: "Import images" - description: | - Load a set of images and tags into a repository. - - For details on the format, see the [export image endpoint](#operation/ImageGet). - operationId: "ImageLoad" - consumes: - - "application/x-tar" - produces: - - "application/json" - responses: - 200: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "imagesTarball" - in: "body" - description: "Tar archive containing images" - schema: - type: "string" - format: "binary" - - name: "quiet" - in: "query" - description: "Suppress progress details during load." - type: "boolean" - default: false - tags: ["Image"] - /containers/{id}/exec: - post: - summary: "Create an exec instance" - description: "Run a command inside a running container." - operationId: "ContainerExec" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IdResponse" - 404: - description: "no such container" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such container: c2ada9df5af8" - 409: - description: "container is paused" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "execConfig" - in: "body" - description: "Exec configuration" - schema: - type: "object" - title: "ExecConfig" - properties: - AttachStdin: - type: "boolean" - description: "Attach to `stdin` of the exec command." - AttachStdout: - type: "boolean" - description: "Attach to `stdout` of the exec command." - AttachStderr: - type: "boolean" - description: "Attach to `stderr` of the exec command." - ConsoleSize: - type: "array" - description: "Initial console size, as an `[height, width]` array." - x-nullable: true - minItems: 2 - maxItems: 2 - items: - type: "integer" - minimum: 0 - DetachKeys: - type: "string" - description: | - Override the key sequence for detaching a container. Format is - a single character `[a-Z]` or `ctrl-` where `` - is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. - Tty: - type: "boolean" - description: "Allocate a pseudo-TTY." - Env: - description: | - A list of environment variables in the form `["VAR=value", ...]`. - type: "array" - items: - type: "string" - Cmd: - type: "array" - description: "Command to run, as a string or array of strings." - items: - type: "string" - Privileged: - type: "boolean" - description: "Runs the exec process with extended privileges." - default: false - User: - type: "string" - description: | - The user, and optionally, group to run the exec process inside - the container. Format is one of: `user`, `user:group`, `uid`, - or `uid:gid`. - WorkingDir: - type: "string" - description: | - The working directory for the exec process inside the container. - example: - AttachStdin: false - AttachStdout: true - AttachStderr: true - DetachKeys: "ctrl-p,ctrl-q" - Tty: false - Cmd: - - "date" - Env: - - "FOO=bar" - - "BAZ=quux" - required: true - - name: "id" - in: "path" - description: "ID or name of container" - type: "string" - required: true - tags: ["Exec"] - /exec/{id}/start: - post: - summary: "Start an exec instance" - description: | - Starts a previously set up exec instance. If detach is true, this endpoint - returns immediately after starting the command. Otherwise, it sets up an - interactive session with the command. - operationId: "ExecStart" - consumes: - - "application/json" - produces: - - "application/vnd.docker.raw-stream" - - "application/vnd.docker.multiplexed-stream" - responses: - 200: - description: "No error" - 404: - description: "No such exec instance" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Container is stopped or paused" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "execStartConfig" - in: "body" - schema: - type: "object" - title: "ExecStartConfig" - properties: - Detach: - type: "boolean" - description: "Detach from the command." - Tty: - type: "boolean" - description: "Allocate a pseudo-TTY." - ConsoleSize: - type: "array" - description: "Initial console size, as an `[height, width]` array." - x-nullable: true - minItems: 2 - maxItems: 2 - items: - type: "integer" - minimum: 0 - example: - Detach: false - Tty: true - ConsoleSize: [80, 64] - - name: "id" - in: "path" - description: "Exec instance ID" - required: true - type: "string" - tags: ["Exec"] - /exec/{id}/resize: - post: - summary: "Resize an exec instance" - description: | - Resize the TTY session used by an exec instance. This endpoint only works - if `tty` was specified as part of creating and starting the exec instance. - operationId: "ExecResize" - responses: - 200: - description: "No error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "No such exec instance" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Exec instance ID" - required: true - type: "string" - - name: "h" - in: "query" - description: "Height of the TTY session in characters" - type: "integer" - - name: "w" - in: "query" - description: "Width of the TTY session in characters" - type: "integer" - tags: ["Exec"] - /exec/{id}/json: - get: - summary: "Inspect an exec instance" - description: "Return low-level information about an exec instance." - operationId: "ExecInspect" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "ExecInspectResponse" - properties: - CanRemove: - type: "boolean" - DetachKeys: - type: "string" - ID: - type: "string" - Running: - type: "boolean" - ExitCode: - type: "integer" - ProcessConfig: - $ref: "#/definitions/ProcessConfig" - OpenStdin: - type: "boolean" - OpenStderr: - type: "boolean" - OpenStdout: - type: "boolean" - ContainerID: - type: "string" - Pid: - type: "integer" - description: "The system process ID for the exec process." - examples: - application/json: - CanRemove: false - ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" - DetachKeys: "" - ExitCode: 2 - ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" - OpenStderr: true - OpenStdin: true - OpenStdout: true - ProcessConfig: - arguments: - - "-c" - - "exit 2" - entrypoint: "sh" - privileged: false - tty: true - user: "1000" - Running: false - Pid: 42000 - 404: - description: "No such exec instance" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Exec instance ID" - required: true - type: "string" - tags: ["Exec"] - - /volumes: - get: - summary: "List volumes" - operationId: "VolumeList" - produces: ["application/json"] - responses: - 200: - description: "Summary volume data that matches the query" - schema: - $ref: "#/definitions/VolumeListResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - description: | - JSON encoded value of the filters (a `map[string][]string`) to - process on the volumes list. Available filters: - - - `dangling=` When set to `true` (or `1`), returns all - volumes that are not in use by a container. When set to `false` - (or `0`), only volumes that are in use by one or more - containers are returned. - - `driver=` Matches volumes based on their driver. - - `label=` or `label=:` Matches volumes based on - the presence of a `label` alone or a `label` and a value. - - `name=` Matches all or part of a volume name. - type: "string" - format: "json" - tags: ["Volume"] - - /volumes/create: - post: - summary: "Create a volume" - operationId: "VolumeCreate" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 201: - description: "The volume was created successfully" - schema: - $ref: "#/definitions/Volume" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "volumeConfig" - in: "body" - required: true - description: "Volume configuration" - schema: - $ref: "#/definitions/VolumeCreateOptions" - tags: ["Volume"] - - /volumes/{name}: - get: - summary: "Inspect a volume" - operationId: "VolumeInspect" - produces: ["application/json"] - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/Volume" - 404: - description: "No such volume" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - required: true - description: "Volume name or ID" - type: "string" - tags: ["Volume"] - - put: - summary: | - "Update a volume. Valid only for Swarm cluster volumes" - operationId: "VolumeUpdate" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such volume" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "The name or ID of the volume" - type: "string" - required: true - - name: "body" - in: "body" - schema: - # though the schema for is an object that contains only a - # ClusterVolumeSpec, wrapping the ClusterVolumeSpec in this object - # means that if, later on, we support things like changing the - # labels, we can do so without duplicating that information to the - # ClusterVolumeSpec. - type: "object" - description: "Volume configuration" - properties: - Spec: - $ref: "#/definitions/ClusterVolumeSpec" - description: | - The spec of the volume to update. Currently, only Availability may - change. All other fields must remain unchanged. - - name: "version" - in: "query" - description: | - The version number of the volume being updated. This is required to - avoid conflicting writes. Found in the volume's `ClusterVolume` - field. - type: "integer" - format: "int64" - required: true - tags: ["Volume"] - - delete: - summary: "Remove a volume" - description: "Instruct the driver to remove the volume." - operationId: "VolumeDelete" - responses: - 204: - description: "The volume was removed" - 404: - description: "No such volume or volume driver" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "Volume is in use and cannot be removed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - required: true - description: "Volume name or ID" - type: "string" - - name: "force" - in: "query" - description: "Force the removal of the volume" - type: "boolean" - default: false - tags: ["Volume"] - - /volumes/prune: - post: - summary: "Delete unused volumes" - produces: - - "application/json" - operationId: "VolumePrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. - - `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "VolumePruneResponse" - properties: - VolumesDeleted: - description: "Volumes that were deleted" - type: "array" - items: - type: "string" - SpaceReclaimed: - description: "Disk space reclaimed in bytes" - type: "integer" - format: "int64" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Volume"] - /networks: - get: - summary: "List networks" - description: | - Returns a list of networks. For details on the format, see the - [network inspect endpoint](#operation/NetworkInspect). - - Note that it uses a different, smaller representation of a network than - inspecting a single network. For example, the list of containers attached - to the network is not propagated in API versions 1.28 and up. - operationId: "NetworkList" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - type: "array" - items: - $ref: "#/definitions/Network" - examples: - application/json: - - Name: "bridge" - Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" - Created: "2016-10-19T06:21:00.416543526Z" - Scope: "local" - Driver: "bridge" - EnableIPv6: false - Internal: false - Attachable: false - Ingress: false - IPAM: - Driver: "default" - Config: - - - Subnet: "172.17.0.0/16" - Options: - com.docker.network.bridge.default_bridge: "true" - com.docker.network.bridge.enable_icc: "true" - com.docker.network.bridge.enable_ip_masquerade: "true" - com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" - com.docker.network.bridge.name: "docker0" - com.docker.network.driver.mtu: "1500" - - Name: "none" - Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" - Created: "0001-01-01T00:00:00Z" - Scope: "local" - Driver: "null" - EnableIPv6: false - Internal: false - Attachable: false - Ingress: false - IPAM: - Driver: "default" - Config: [] - Containers: {} - Options: {} - - Name: "host" - Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" - Created: "0001-01-01T00:00:00Z" - Scope: "local" - Driver: "host" - EnableIPv6: false - Internal: false - Attachable: false - Ingress: false - IPAM: - Driver: "default" - Config: [] - Containers: {} - Options: {} - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - description: | - JSON encoded value of the filters (a `map[string][]string`) to process - on the networks list. - - Available filters: - - - `dangling=` When set to `true` (or `1`), returns all - networks that are not in use by a container. When set to `false` - (or `0`), only networks that are in use by one or more - containers are returned. - - `driver=` Matches a network's driver. - - `id=` Matches all or part of a network ID. - - `label=` or `label==` of a network label. - - `name=` Matches all or part of a network name. - - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). - - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. - type: "string" - tags: ["Network"] - - /networks/{id}: - get: - summary: "Inspect a network" - operationId: "NetworkInspect" - produces: - - "application/json" - responses: - 200: - description: "No error" - schema: - $ref: "#/definitions/Network" - 404: - description: "Network not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - - name: "verbose" - in: "query" - description: "Detailed inspect output for troubleshooting" - type: "boolean" - default: false - - name: "scope" - in: "query" - description: "Filter the network by scope (swarm, global, or local)" - type: "string" - tags: ["Network"] - - delete: - summary: "Remove a network" - operationId: "NetworkDelete" - responses: - 204: - description: "No error" - 403: - description: "operation not supported for pre-defined networks" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such network" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - tags: ["Network"] - - /networks/create: - post: - summary: "Create a network" - operationId: "NetworkCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "No error" - schema: - type: "object" - title: "NetworkCreateResponse" - properties: - Id: - description: "The ID of the created network." - type: "string" - Warning: - type: "string" - example: - Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" - Warning: "" - 403: - description: "operation not supported for pre-defined networks" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "plugin not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "networkConfig" - in: "body" - description: "Network configuration" - required: true - schema: - type: "object" - title: "NetworkCreateRequest" - required: ["Name"] - properties: - Name: - description: "The network's name." - type: "string" - CheckDuplicate: - description: | - Check for networks with duplicate names. Since Network is - primarily keyed based on a random ID and not on the name, and - network name is strictly a user-friendly alias to the network - which is uniquely identified using ID, there is no guaranteed - way to check for duplicates. CheckDuplicate is there to provide - a best effort checking of any networks which has the same name - but it is not guaranteed to catch all name collisions. - type: "boolean" - Driver: - description: "Name of the network driver plugin to use." - type: "string" - default: "bridge" - Internal: - description: "Restrict external access to the network." - type: "boolean" - Attachable: - description: | - Globally scoped network is manually attachable by regular - containers from workers in swarm mode. - type: "boolean" - Ingress: - description: | - Ingress network is the network which provides the routing-mesh - in swarm mode. - type: "boolean" - IPAM: - description: "Optional custom IP scheme for the network." - $ref: "#/definitions/IPAM" - EnableIPv6: - description: "Enable IPv6 on the network." - type: "boolean" - Options: - description: "Network specific options to be used by the drivers." - type: "object" - additionalProperties: - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - Name: "isolated_nw" - CheckDuplicate: false - Driver: "bridge" - EnableIPv6: true - IPAM: - Driver: "default" - Config: - - Subnet: "172.20.0.0/16" - IPRange: "172.20.10.0/24" - Gateway: "172.20.10.11" - - Subnet: "2001:db8:abcd::/64" - Gateway: "2001:db8:abcd::1011" - Options: - foo: "bar" - Internal: true - Attachable: false - Ingress: false - Options: - com.docker.network.bridge.default_bridge: "true" - com.docker.network.bridge.enable_icc: "true" - com.docker.network.bridge.enable_ip_masquerade: "true" - com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" - com.docker.network.bridge.name: "docker0" - com.docker.network.driver.mtu: "1500" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - tags: ["Network"] - - /networks/{id}/connect: - post: - summary: "Connect a container to a network" - operationId: "NetworkConnect" - consumes: - - "application/json" - responses: - 200: - description: "No error" - 403: - description: "Operation not supported for swarm scoped networks" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "Network or container not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - - name: "container" - in: "body" - required: true - schema: - type: "object" - title: "NetworkConnectRequest" - properties: - Container: - type: "string" - description: "The ID or name of the container to connect to the network." - EndpointConfig: - $ref: "#/definitions/EndpointSettings" - example: - Container: "3613f73ba0e4" - EndpointConfig: - IPAMConfig: - IPv4Address: "172.24.56.89" - IPv6Address: "2001:db8::5689" - tags: ["Network"] - - /networks/{id}/disconnect: - post: - summary: "Disconnect a container from a network" - operationId: "NetworkDisconnect" - consumes: - - "application/json" - responses: - 200: - description: "No error" - 403: - description: "Operation not supported for swarm scoped networks" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "Network or container not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "Network ID or name" - required: true - type: "string" - - name: "container" - in: "body" - required: true - schema: - type: "object" - title: "NetworkDisconnectRequest" - properties: - Container: - type: "string" - description: | - The ID or name of the container to disconnect from the network. - Force: - type: "boolean" - description: | - Force the container to disconnect from the network. - tags: ["Network"] - /networks/prune: - post: - summary: "Delete unused networks" - produces: - - "application/json" - operationId: "NetworkPrune" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the prune list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. - - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels. - type: "string" - responses: - 200: - description: "No error" - schema: - type: "object" - title: "NetworkPruneResponse" - properties: - NetworksDeleted: - description: "Networks that were deleted" - type: "array" - items: - type: "string" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Network"] - /plugins: - get: - summary: "List plugins" - operationId: "PluginList" - description: "Returns information about installed plugins." - produces: ["application/json"] - responses: - 200: - description: "No error" - schema: - type: "array" - items: - $ref: "#/definitions/Plugin" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the plugin list. - - Available filters: - - - `capability=` - - `enable=|` - tags: ["Plugin"] - - /plugins/privileges: - get: - summary: "Get plugin privileges" - operationId: "GetPluginPrivileges" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/PluginPrivilege" - example: - - Name: "network" - Description: "" - Value: - - "host" - - Name: "mount" - Description: "" - Value: - - "/data" - - Name: "device" - Description: "" - Value: - - "/dev/cpu_dma_latency" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "remote" - in: "query" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - tags: - - "Plugin" - - /plugins/pull: - post: - summary: "Install a plugin" - operationId: "PluginPull" - description: | - Pulls and installs a plugin. After the plugin is installed, it can be - enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). - produces: - - "application/json" - responses: - 204: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "remote" - in: "query" - description: | - Remote reference for plugin to install. - - The `:latest` tag is optional, and is used as the default if omitted. - required: true - type: "string" - - name: "name" - in: "query" - description: | - Local name for the pulled plugin. - - The `:latest` tag is optional, and is used as the default if omitted. - required: false - type: "string" - - name: "X-Registry-Auth" - in: "header" - description: | - A base64url-encoded auth configuration to use when pulling a plugin - from a registry. - - Refer to the [authentication section](#section/Authentication) for - details. - type: "string" - - name: "body" - in: "body" - schema: - type: "array" - items: - $ref: "#/definitions/PluginPrivilege" - example: - - Name: "network" - Description: "" - Value: - - "host" - - Name: "mount" - Description: "" - Value: - - "/data" - - Name: "device" - Description: "" - Value: - - "/dev/cpu_dma_latency" - tags: ["Plugin"] - /plugins/{name}/json: - get: - summary: "Inspect a plugin" - operationId: "PluginInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Plugin" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - tags: ["Plugin"] - /plugins/{name}: - delete: - summary: "Remove a plugin" - operationId: "PluginDelete" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Plugin" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - - name: "force" - in: "query" - description: | - Disable the plugin before removing. This may result in issues if the - plugin is in use by a container. - type: "boolean" - default: false - tags: ["Plugin"] - /plugins/{name}/enable: - post: - summary: "Enable a plugin" - operationId: "PluginEnable" - responses: - 200: - description: "no error" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - - name: "timeout" - in: "query" - description: "Set the HTTP client timeout (in seconds)" - type: "integer" - default: 0 - tags: ["Plugin"] - /plugins/{name}/disable: - post: - summary: "Disable a plugin" - operationId: "PluginDisable" - responses: - 200: - description: "no error" - 404: - description: "plugin is not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - tags: ["Plugin"] - /plugins/{name}/upgrade: - post: - summary: "Upgrade a plugin" - operationId: "PluginUpgrade" - responses: - 204: - description: "no error" - 404: - description: "plugin not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - - name: "remote" - in: "query" - description: | - Remote reference to upgrade to. - - The `:latest` tag is optional, and is used as the default if omitted. - required: true - type: "string" - - name: "X-Registry-Auth" - in: "header" - description: | - A base64url-encoded auth configuration to use when pulling a plugin - from a registry. - - Refer to the [authentication section](#section/Authentication) for - details. - type: "string" - - name: "body" - in: "body" - schema: - type: "array" - items: - $ref: "#/definitions/PluginPrivilege" - example: - - Name: "network" - Description: "" - Value: - - "host" - - Name: "mount" - Description: "" - Value: - - "/data" - - Name: "device" - Description: "" - Value: - - "/dev/cpu_dma_latency" - tags: ["Plugin"] - /plugins/create: - post: - summary: "Create a plugin" - operationId: "PluginCreate" - consumes: - - "application/x-tar" - responses: - 204: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "query" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - - name: "tarContext" - in: "body" - description: "Path to tar containing plugin rootfs and manifest" - schema: - type: "string" - format: "binary" - tags: ["Plugin"] - /plugins/{name}/push: - post: - summary: "Push a plugin" - operationId: "PluginPush" - description: | - Push a plugin to the registry. - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - responses: - 200: - description: "no error" - 404: - description: "plugin not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Plugin"] - /plugins/{name}/set: - post: - summary: "Configure a plugin" - operationId: "PluginSet" - consumes: - - "application/json" - parameters: - - name: "name" - in: "path" - description: | - The name of the plugin. The `:latest` tag is optional, and is the - default if omitted. - required: true - type: "string" - - name: "body" - in: "body" - schema: - type: "array" - items: - type: "string" - example: ["DEBUG=1"] - responses: - 204: - description: "No error" - 404: - description: "Plugin not installed" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Plugin"] - /nodes: - get: - summary: "List nodes" - operationId: "NodeList" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Node" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - description: | - Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). - - Available filters: - - `id=` - - `label=` - - `membership=`(`accepted`|`pending`)` - - `name=` - - `node.label=` - - `role=`(`manager`|`worker`)` - type: "string" - tags: ["Node"] - /nodes/{id}: - get: - summary: "Inspect a node" - operationId: "NodeInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Node" - 404: - description: "no such node" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the node" - type: "string" - required: true - tags: ["Node"] - delete: - summary: "Delete a node" - operationId: "NodeDelete" - responses: - 200: - description: "no error" - 404: - description: "no such node" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the node" - type: "string" - required: true - - name: "force" - in: "query" - description: "Force remove a node from the swarm" - default: false - type: "boolean" - tags: ["Node"] - /nodes/{id}/update: - post: - summary: "Update a node" - operationId: "NodeUpdate" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such node" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID of the node" - type: "string" - required: true - - name: "body" - in: "body" - schema: - $ref: "#/definitions/NodeSpec" - - name: "version" - in: "query" - description: | - The version number of the node object being updated. This is required - to avoid conflicting writes. - type: "integer" - format: "int64" - required: true - tags: ["Node"] - /swarm: - get: - summary: "Inspect swarm" - operationId: "SwarmInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Swarm" - 404: - description: "no such swarm" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Swarm"] - /swarm/init: - post: - summary: "Initialize a new swarm" - operationId: "SwarmInit" - produces: - - "application/json" - - "text/plain" - responses: - 200: - description: "no error" - schema: - description: "The node ID" - type: "string" - example: "7v2t30z9blmxuhnyo6s4cpenp" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is already part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - type: "object" - title: "SwarmInitRequest" - properties: - ListenAddr: - description: | - Listen address used for inter-manager communication, as well - as determining the networking interface used for the VXLAN - Tunnel Endpoint (VTEP). This can either be an address/port - combination in the form `192.168.1.1:4567`, or an interface - followed by a port number, like `eth0:4567`. If the port number - is omitted, the default swarm listening port is used. - type: "string" - AdvertiseAddr: - description: | - Externally reachable address advertised to other nodes. This - can either be an address/port combination in the form - `192.168.1.1:4567`, or an interface followed by a port number, - like `eth0:4567`. If the port number is omitted, the port - number from the listen address is used. If `AdvertiseAddr` is - not specified, it will be automatically detected when possible. - type: "string" - DataPathAddr: - description: | - Address or interface to use for data path traffic (format: - ``), for example, `192.168.1.1`, or an interface, - like `eth0`. If `DataPathAddr` is unspecified, the same address - as `AdvertiseAddr` is used. - - The `DataPathAddr` specifies the address that global scope - network drivers will publish towards other nodes in order to - reach the containers running on this node. Using this parameter - it is possible to separate the container data traffic from the - management traffic of the cluster. - type: "string" - DataPathPort: - description: | - DataPathPort specifies the data path port number for data traffic. - Acceptable port range is 1024 to 49151. - if no port is set or is set to 0, default port 4789 will be used. - type: "integer" - format: "uint32" - DefaultAddrPool: - description: | - Default Address Pool specifies default subnet pools for global - scope networks. - type: "array" - items: - type: "string" - example: ["10.10.0.0/16", "20.20.0.0/16"] - ForceNewCluster: - description: "Force creation of a new swarm." - type: "boolean" - SubnetSize: - description: | - SubnetSize specifies the subnet size of the networks created - from the default subnet pool. - type: "integer" - format: "uint32" - Spec: - $ref: "#/definitions/SwarmSpec" - example: - ListenAddr: "0.0.0.0:2377" - AdvertiseAddr: "192.168.1.1:2377" - DataPathPort: 4789 - DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] - SubnetSize: 24 - ForceNewCluster: false - Spec: - Orchestration: {} - Raft: {} - Dispatcher: {} - CAConfig: {} - EncryptionConfig: - AutoLockManagers: false - tags: ["Swarm"] - /swarm/join: - post: - summary: "Join an existing swarm" - operationId: "SwarmJoin" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is already part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - type: "object" - title: "SwarmJoinRequest" - properties: - ListenAddr: - description: | - Listen address used for inter-manager communication if the node - gets promoted to manager, as well as determining the networking - interface used for the VXLAN Tunnel Endpoint (VTEP). - type: "string" - AdvertiseAddr: - description: | - Externally reachable address advertised to other nodes. This - can either be an address/port combination in the form - `192.168.1.1:4567`, or an interface followed by a port number, - like `eth0:4567`. If the port number is omitted, the port - number from the listen address is used. If `AdvertiseAddr` is - not specified, it will be automatically detected when possible. - type: "string" - DataPathAddr: - description: | - Address or interface to use for data path traffic (format: - ``), for example, `192.168.1.1`, or an interface, - like `eth0`. If `DataPathAddr` is unspecified, the same address - as `AdvertiseAddr` is used. - - The `DataPathAddr` specifies the address that global scope - network drivers will publish towards other nodes in order to - reach the containers running on this node. Using this parameter - it is possible to separate the container data traffic from the - management traffic of the cluster. - - type: "string" - RemoteAddrs: - description: | - Addresses of manager nodes already participating in the swarm. - type: "array" - items: - type: "string" - JoinToken: - description: "Secret token for joining this swarm." - type: "string" - example: - ListenAddr: "0.0.0.0:2377" - AdvertiseAddr: "192.168.1.1:2377" - RemoteAddrs: - - "node1:2377" - JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" - tags: ["Swarm"] - /swarm/leave: - post: - summary: "Leave a swarm" - operationId: "SwarmLeave" - responses: - 200: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "force" - description: | - Force leave swarm, even if this is the last manager or that it will - break the cluster. - in: "query" - type: "boolean" - default: false - tags: ["Swarm"] - /swarm/update: - post: - summary: "Update a swarm" - operationId: "SwarmUpdate" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - $ref: "#/definitions/SwarmSpec" - - name: "version" - in: "query" - description: | - The version number of the swarm object being updated. This is - required to avoid conflicting writes. - type: "integer" - format: "int64" - required: true - - name: "rotateWorkerToken" - in: "query" - description: "Rotate the worker join token." - type: "boolean" - default: false - - name: "rotateManagerToken" - in: "query" - description: "Rotate the manager join token." - type: "boolean" - default: false - - name: "rotateManagerUnlockKey" - in: "query" - description: "Rotate the manager unlock key." - type: "boolean" - default: false - tags: ["Swarm"] - /swarm/unlockkey: - get: - summary: "Get the unlock key" - operationId: "SwarmUnlockkey" - consumes: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "object" - title: "UnlockKeyResponse" - properties: - UnlockKey: - description: "The swarm's unlock key." - type: "string" - example: - UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Swarm"] - /swarm/unlock: - post: - summary: "Unlock a locked manager" - operationId: "SwarmUnlock" - consumes: - - "application/json" - produces: - - "application/json" - parameters: - - name: "body" - in: "body" - required: true - schema: - type: "object" - title: "SwarmUnlockRequest" - properties: - UnlockKey: - description: "The swarm's unlock key." - type: "string" - example: - UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" - responses: - 200: - description: "no error" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Swarm"] - /services: - get: - summary: "List services" - operationId: "ServiceList" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Service" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the services list. - - Available filters: - - - `id=` - - `label=` - - `mode=["replicated"|"global"]` - - `name=` - - name: "status" - in: "query" - type: "boolean" - description: | - Include service status, with count of running and desired tasks. - tags: ["Service"] - /services/create: - post: - summary: "Create a service" - operationId: "ServiceCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - type: "object" - title: "ServiceCreateResponse" - properties: - ID: - description: "The ID of the created service." - type: "string" - Warning: - description: "Optional warning message" - type: "string" - example: - ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" - Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 403: - description: "network is not eligible for services" - schema: - $ref: "#/definitions/ErrorResponse" - 409: - description: "name conflicts with an existing service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - required: true - schema: - allOf: - - $ref: "#/definitions/ServiceSpec" - - type: "object" - example: - Name: "web" - TaskTemplate: - ContainerSpec: - Image: "nginx:alpine" - Mounts: - - - ReadOnly: true - Source: "web-data" - Target: "/usr/share/nginx/html" - Type: "volume" - VolumeOptions: - DriverConfig: {} - Labels: - com.example.something: "something-value" - Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] - User: "33" - DNSConfig: - Nameservers: ["8.8.8.8"] - Search: ["example.org"] - Options: ["timeout:3"] - Secrets: - - - File: - Name: "www.example.org.key" - UID: "33" - GID: "33" - Mode: 384 - SecretID: "fpjqlhnwb19zds35k8wn80lq9" - SecretName: "example_org_domain_key" - LogDriver: - Name: "json-file" - Options: - max-file: "3" - max-size: "10M" - Placement: {} - Resources: - Limits: - MemoryBytes: 104857600 - Reservations: {} - RestartPolicy: - Condition: "on-failure" - Delay: 10000000000 - MaxAttempts: 10 - Mode: - Replicated: - Replicas: 4 - UpdateConfig: - Parallelism: 2 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - RollbackConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - EndpointSpec: - Ports: - - - Protocol: "tcp" - PublishedPort: 8080 - TargetPort: 80 - Labels: - foo: "bar" - - name: "X-Registry-Auth" - in: "header" - description: | - A base64url-encoded auth configuration for pulling from private - registries. - - Refer to the [authentication section](#section/Authentication) for - details. - type: "string" - tags: ["Service"] - /services/{id}: - get: - summary: "Inspect a service" - operationId: "ServiceInspect" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Service" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID or name of service." - required: true - type: "string" - - name: "insertDefaults" - in: "query" - description: "Fill empty fields with default values." - type: "boolean" - default: false - tags: ["Service"] - delete: - summary: "Delete a service" - operationId: "ServiceDelete" - responses: - 200: - description: "no error" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID or name of service." - required: true - type: "string" - tags: ["Service"] - /services/{id}/update: - post: - summary: "Update a service" - operationId: "ServiceUpdate" - consumes: ["application/json"] - produces: ["application/json"] - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/ServiceUpdateResponse" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID or name of service." - required: true - type: "string" - - name: "body" - in: "body" - required: true - schema: - allOf: - - $ref: "#/definitions/ServiceSpec" - - type: "object" - example: - Name: "top" - TaskTemplate: - ContainerSpec: - Image: "busybox" - Args: - - "top" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ForceUpdate: 0 - Mode: - Replicated: - Replicas: 1 - UpdateConfig: - Parallelism: 2 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - RollbackConfig: - Parallelism: 1 - Delay: 1000000000 - FailureAction: "pause" - Monitor: 15000000000 - MaxFailureRatio: 0.15 - EndpointSpec: - Mode: "vip" - - - name: "version" - in: "query" - description: | - The version number of the service object being updated. This is - required to avoid conflicting writes. - This version number should be the value as currently set on the - service *before* the update. You can find the current version by - calling `GET /services/{id}` - required: true - type: "integer" - - name: "registryAuthFrom" - in: "query" - description: | - If the `X-Registry-Auth` header is not specified, this parameter - indicates where to find registry authorization credentials. - type: "string" - enum: ["spec", "previous-spec"] - default: "spec" - - name: "rollback" - in: "query" - description: | - Set to this parameter to `previous` to cause a server-side rollback - to the previous service spec. The supplied spec will be ignored in - this case. - type: "string" - - name: "X-Registry-Auth" - in: "header" - description: | - A base64url-encoded auth configuration for pulling from private - registries. - - Refer to the [authentication section](#section/Authentication) for - details. - type: "string" - - tags: ["Service"] - /services/{id}/logs: - get: - summary: "Get service logs" - description: | - Get `stdout` and `stderr` logs from a service. See also - [`/containers/{id}/logs`](#operation/ContainerLogs). - - **Note**: This endpoint works only for services with the `local`, - `json-file` or `journald` logging drivers. - produces: - - "application/vnd.docker.raw-stream" - - "application/vnd.docker.multiplexed-stream" - operationId: "ServiceLogs" - responses: - 200: - description: "logs returned as a stream in response body" - schema: - type: "string" - format: "binary" - 404: - description: "no such service" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such service: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID or name of the service" - type: "string" - - name: "details" - in: "query" - description: "Show service context and extra details provided to logs." - type: "boolean" - default: false - - name: "follow" - in: "query" - description: "Keep connection after returning logs." - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Return logs from `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Return logs from `stderr`" - type: "boolean" - default: false - - name: "since" - in: "query" - description: "Only return logs since this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "timestamps" - in: "query" - description: "Add timestamps to every log line" - type: "boolean" - default: false - - name: "tail" - in: "query" - description: | - Only return this number of log lines from the end of the logs. - Specify as an integer or `all` to output all log lines. - type: "string" - default: "all" - tags: ["Service"] - /tasks: - get: - summary: "List tasks" - operationId: "TaskList" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Task" - example: - - ID: "0kzzo1i0y4jz6027t0k7aezc7" - Version: - Index: 71 - CreatedAt: "2016-06-07T21:07:31.171892745Z" - UpdatedAt: "2016-06-07T21:07:31.376370513Z" - Spec: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Slot: 1 - NodeID: "60gvrl6tm78dmak4yl7srz94v" - Status: - Timestamp: "2016-06-07T21:07:31.290032978Z" - State: "running" - Message: "started" - ContainerStatus: - ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" - PID: 677 - DesiredState: "running" - NetworksAttachments: - - Network: - ID: "4qvuz4ko70xaltuqbt8956gd1" - Version: - Index: 18 - CreatedAt: "2016-06-07T20:31:11.912919752Z" - UpdatedAt: "2016-06-07T21:07:29.955277358Z" - Spec: - Name: "ingress" - Labels: - com.docker.swarm.internal: "true" - DriverConfiguration: {} - IPAMOptions: - Driver: {} - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - DriverState: - Name: "overlay" - Options: - com.docker.network.driver.overlay.vxlanid_list: "256" - IPAMOptions: - Driver: - Name: "default" - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - Addresses: - - "10.255.0.10/16" - - ID: "1yljwbmlr8er2waf8orvqpwms" - Version: - Index: 30 - CreatedAt: "2016-06-07T21:07:30.019104782Z" - UpdatedAt: "2016-06-07T21:07:30.231958098Z" - Name: "hopeful_cori" - Spec: - ContainerSpec: - Image: "redis" - Resources: - Limits: {} - Reservations: {} - RestartPolicy: - Condition: "any" - MaxAttempts: 0 - Placement: {} - ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" - Slot: 1 - NodeID: "60gvrl6tm78dmak4yl7srz94v" - Status: - Timestamp: "2016-06-07T21:07:30.202183143Z" - State: "shutdown" - Message: "shutdown" - ContainerStatus: - ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" - DesiredState: "shutdown" - NetworksAttachments: - - Network: - ID: "4qvuz4ko70xaltuqbt8956gd1" - Version: - Index: 18 - CreatedAt: "2016-06-07T20:31:11.912919752Z" - UpdatedAt: "2016-06-07T21:07:29.955277358Z" - Spec: - Name: "ingress" - Labels: - com.docker.swarm.internal: "true" - DriverConfiguration: {} - IPAMOptions: - Driver: {} - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - DriverState: - Name: "overlay" - Options: - com.docker.network.driver.overlay.vxlanid_list: "256" - IPAMOptions: - Driver: - Name: "default" - Configs: - - Subnet: "10.255.0.0/16" - Gateway: "10.255.0.1" - Addresses: - - "10.255.0.5/16" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the tasks list. - - Available filters: - - - `desired-state=(running | shutdown | accepted)` - - `id=` - - `label=key` or `label="key=value"` - - `name=` - - `node=` - - `service=` - tags: ["Task"] - /tasks/{id}: - get: - summary: "Inspect a task" - operationId: "TaskInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Task" - 404: - description: "no such task" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "ID of the task" - required: true - type: "string" - tags: ["Task"] - /tasks/{id}/logs: - get: - summary: "Get task logs" - description: | - Get `stdout` and `stderr` logs from a task. - See also [`/containers/{id}/logs`](#operation/ContainerLogs). - - **Note**: This endpoint works only for services with the `local`, - `json-file` or `journald` logging drivers. - operationId: "TaskLogs" - produces: - - "application/vnd.docker.raw-stream" - - "application/vnd.docker.multiplexed-stream" - responses: - 200: - description: "logs returned as a stream in response body" - schema: - type: "string" - format: "binary" - 404: - description: "no such task" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such task: c2ada9df5af8" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - description: "ID of the task" - type: "string" - - name: "details" - in: "query" - description: "Show task context and extra details provided to logs." - type: "boolean" - default: false - - name: "follow" - in: "query" - description: "Keep connection after returning logs." - type: "boolean" - default: false - - name: "stdout" - in: "query" - description: "Return logs from `stdout`" - type: "boolean" - default: false - - name: "stderr" - in: "query" - description: "Return logs from `stderr`" - type: "boolean" - default: false - - name: "since" - in: "query" - description: "Only return logs since this time, as a UNIX timestamp" - type: "integer" - default: 0 - - name: "timestamps" - in: "query" - description: "Add timestamps to every log line" - type: "boolean" - default: false - - name: "tail" - in: "query" - description: | - Only return this number of log lines from the end of the logs. - Specify as an integer or `all` to output all log lines. - type: "string" - default: "all" - tags: ["Task"] - /secrets: - get: - summary: "List secrets" - operationId: "SecretList" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Secret" - example: - - ID: "blt1owaxmitz71s9v5zh81zun" - Version: - Index: 85 - CreatedAt: "2017-07-20T13:55:28.678958722Z" - UpdatedAt: "2017-07-20T13:55:28.678958722Z" - Spec: - Name: "mysql-passwd" - Labels: - some.label: "some.value" - Driver: - Name: "secret-bucket" - Options: - OptionA: "value for driver option A" - OptionB: "value for driver option B" - - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "app-dev.crt" - Labels: - foo: "bar" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the secrets list. - - Available filters: - - - `id=` - - `label= or label==value` - - `name=` - - `names=` - tags: ["Secret"] - /secrets/create: - post: - summary: "Create a secret" - operationId: "SecretCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IdResponse" - 409: - description: "name conflicts with an existing object" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - schema: - allOf: - - $ref: "#/definitions/SecretSpec" - - type: "object" - example: - Name: "app-key.crt" - Labels: - foo: "bar" - Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" - Driver: - Name: "secret-bucket" - Options: - OptionA: "value for driver option A" - OptionB: "value for driver option B" - tags: ["Secret"] - /secrets/{id}: - get: - summary: "Inspect a secret" - operationId: "SecretInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Secret" - examples: - application/json: - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "app-dev.crt" - Labels: - foo: "bar" - Driver: - Name: "secret-bucket" - Options: - OptionA: "value for driver option A" - OptionB: "value for driver option B" - - 404: - description: "secret not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the secret" - tags: ["Secret"] - delete: - summary: "Delete a secret" - operationId: "SecretDelete" - produces: - - "application/json" - responses: - 204: - description: "no error" - 404: - description: "secret not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the secret" - tags: ["Secret"] - /secrets/{id}/update: - post: - summary: "Update a Secret" - operationId: "SecretUpdate" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such secret" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the secret" - type: "string" - required: true - - name: "body" - in: "body" - schema: - $ref: "#/definitions/SecretSpec" - description: | - The spec of the secret to update. Currently, only the Labels field - can be updated. All other fields must remain unchanged from the - [SecretInspect endpoint](#operation/SecretInspect) response values. - - name: "version" - in: "query" - description: | - The version number of the secret object being updated. This is - required to avoid conflicting writes. - type: "integer" - format: "int64" - required: true - tags: ["Secret"] - /configs: - get: - summary: "List configs" - operationId: "ConfigList" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - type: "array" - items: - $ref: "#/definitions/Config" - example: - - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "server.conf" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "filters" - in: "query" - type: "string" - description: | - A JSON encoded value of the filters (a `map[string][]string`) to - process on the configs list. - - Available filters: - - - `id=` - - `label= or label==value` - - `name=` - - `names=` - tags: ["Config"] - /configs/create: - post: - summary: "Create a config" - operationId: "ConfigCreate" - consumes: - - "application/json" - produces: - - "application/json" - responses: - 201: - description: "no error" - schema: - $ref: "#/definitions/IdResponse" - 409: - description: "name conflicts with an existing object" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "body" - in: "body" - schema: - allOf: - - $ref: "#/definitions/ConfigSpec" - - type: "object" - example: - Name: "server.conf" - Labels: - foo: "bar" - Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" - tags: ["Config"] - /configs/{id}: - get: - summary: "Inspect a config" - operationId: "ConfigInspect" - produces: - - "application/json" - responses: - 200: - description: "no error" - schema: - $ref: "#/definitions/Config" - examples: - application/json: - ID: "ktnbjxoalbkvbvedmg1urrz8h" - Version: - Index: 11 - CreatedAt: "2016-11-05T01:20:17.327670065Z" - UpdatedAt: "2016-11-05T01:20:17.327670065Z" - Spec: - Name: "app-dev.crt" - 404: - description: "config not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the config" - tags: ["Config"] - delete: - summary: "Delete a config" - operationId: "ConfigDelete" - produces: - - "application/json" - responses: - 204: - description: "no error" - 404: - description: "config not found" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - required: true - type: "string" - description: "ID of the config" - tags: ["Config"] - /configs/{id}/update: - post: - summary: "Update a Config" - operationId: "ConfigUpdate" - responses: - 200: - description: "no error" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 404: - description: "no such config" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - 503: - description: "node is not part of a swarm" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "id" - in: "path" - description: "The ID or name of the config" - type: "string" - required: true - - name: "body" - in: "body" - schema: - $ref: "#/definitions/ConfigSpec" - description: | - The spec of the config to update. Currently, only the Labels field - can be updated. All other fields must remain unchanged from the - [ConfigInspect endpoint](#operation/ConfigInspect) response values. - - name: "version" - in: "query" - description: | - The version number of the config object being updated. This is - required to avoid conflicting writes. - type: "integer" - format: "int64" - required: true - tags: ["Config"] - /distribution/{name}/json: - get: - summary: "Get image information from the registry" - description: | - Return image digest and platform information by contacting the registry. - operationId: "DistributionInspect" - produces: - - "application/json" - responses: - 200: - description: "descriptor and platform information" - schema: - $ref: "#/definitions/DistributionInspect" - 401: - description: "Failed authentication or no image found" - schema: - $ref: "#/definitions/ErrorResponse" - examples: - application/json: - message: "No such image: someimage (tag: latest)" - 500: - description: "Server error" - schema: - $ref: "#/definitions/ErrorResponse" - parameters: - - name: "name" - in: "path" - description: "Image name or id" - type: "string" - required: true - tags: ["Distribution"] - /session: - post: - summary: "Initialize interactive session" - description: | - Start a new interactive session with a server. Session allows server to - call back to the client for advanced capabilities. - - ### Hijacking - - This endpoint hijacks the HTTP connection to HTTP2 transport that allows - the client to expose gPRC services on that connection. - - For example, the client sends this request to upgrade the connection: - - ``` - POST /session HTTP/1.1 - Upgrade: h2c - Connection: Upgrade - ``` - - The Docker daemon responds with a `101 UPGRADED` response follow with - the raw stream: - - ``` - HTTP/1.1 101 UPGRADED - Connection: Upgrade - Upgrade: h2c - ``` - operationId: "Session" - produces: - - "application/vnd.docker.raw-stream" - responses: - 101: - description: "no error, hijacking successful" - 400: - description: "bad parameter" - schema: - $ref: "#/definitions/ErrorResponse" - 500: - description: "server error" - schema: - $ref: "#/definitions/ErrorResponse" - tags: ["Session"] diff --git a/vendor/github.com/docker/docker/api/types/auth.go b/vendor/github.com/docker/docker/api/types/auth.go deleted file mode 100644 index ddf15bb18..000000000 --- a/vendor/github.com/docker/docker/api/types/auth.go +++ /dev/null @@ -1,22 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -// AuthConfig contains authorization information for connecting to a Registry -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth,omitempty"` - - // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. - Email string `json:"email,omitempty"` - - ServerAddress string `json:"serveraddress,omitempty"` - - // IdentityToken is used to authenticate the user and get - // an access token for the registry. - IdentityToken string `json:"identitytoken,omitempty"` - - // RegistryToken is a bearer token to be sent to a registry - RegistryToken string `json:"registrytoken,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go deleted file mode 100644 index bf3463b90..000000000 --- a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go +++ /dev/null @@ -1,23 +0,0 @@ -package blkiodev // import "github.com/docker/docker/api/types/blkiodev" - -import "fmt" - -// WeightDevice is a structure that holds device:weight pair -type WeightDevice struct { - Path string - Weight uint16 -} - -func (w *WeightDevice) String() string { - return fmt.Sprintf("%s:%d", w.Path, w.Weight) -} - -// ThrottleDevice is a structure that holds device:rate_per_second pair -type ThrottleDevice struct { - Path string - Rate uint64 -} - -func (t *ThrottleDevice) String() string { - return fmt.Sprintf("%s:%d", t.Path, t.Rate) -} diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go deleted file mode 100644 index 97aca0230..000000000 --- a/vendor/github.com/docker/docker/api/types/client.go +++ /dev/null @@ -1,443 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "bufio" - "io" - "net" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - units "github.com/docker/go-units" -) - -// CheckpointCreateOptions holds parameters to create a checkpoint from a container -type CheckpointCreateOptions struct { - CheckpointID string - CheckpointDir string - Exit bool -} - -// CheckpointListOptions holds parameters to list checkpoints for a container -type CheckpointListOptions struct { - CheckpointDir string -} - -// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container -type CheckpointDeleteOptions struct { - CheckpointID string - CheckpointDir string -} - -// ContainerAttachOptions holds parameters to attach to a container. -type ContainerAttachOptions struct { - Stream bool - Stdin bool - Stdout bool - Stderr bool - DetachKeys string - Logs bool -} - -// ContainerCommitOptions holds parameters to commit changes into a container. -type ContainerCommitOptions struct { - Reference string - Comment string - Author string - Changes []string - Pause bool - Config *container.Config -} - -// ContainerExecInspect holds information returned by exec inspect. -type ContainerExecInspect struct { - ExecID string `json:"ID"` - ContainerID string - Running bool - ExitCode int - Pid int -} - -// ContainerListOptions holds parameters to list containers with. -type ContainerListOptions struct { - Size bool - All bool - Latest bool - Since string - Before string - Limit int - Filters filters.Args -} - -// ContainerLogsOptions holds parameters to filter logs with. -type ContainerLogsOptions struct { - ShowStdout bool - ShowStderr bool - Since string - Until string - Timestamps bool - Follow bool - Tail string - Details bool -} - -// ContainerRemoveOptions holds parameters to remove containers. -type ContainerRemoveOptions struct { - RemoveVolumes bool - RemoveLinks bool - Force bool -} - -// ContainerStartOptions holds parameters to start containers. -type ContainerStartOptions struct { - CheckpointID string - CheckpointDir string -} - -// CopyToContainerOptions holds information -// about files to copy into a container -type CopyToContainerOptions struct { - AllowOverwriteDirWithFile bool - CopyUIDGID bool -} - -// EventsOptions holds parameters to filter events with. -type EventsOptions struct { - Since string - Until string - Filters filters.Args -} - -// NetworkListOptions holds parameters to filter the list of networks with. -type NetworkListOptions struct { - Filters filters.Args -} - -// NewHijackedResponse intializes a HijackedResponse type -func NewHijackedResponse(conn net.Conn, mediaType string) HijackedResponse { - return HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn), mediaType: mediaType} -} - -// HijackedResponse holds connection information for a hijacked request. -type HijackedResponse struct { - mediaType string - Conn net.Conn - Reader *bufio.Reader -} - -// Close closes the hijacked connection and reader. -func (h *HijackedResponse) Close() { - h.Conn.Close() -} - -// MediaType let client know if HijackedResponse hold a raw or multiplexed stream. -// returns false if HTTP Content-Type is not relevant, and container must be inspected -func (h *HijackedResponse) MediaType() (string, bool) { - if h.mediaType == "" { - return "", false - } - return h.mediaType, true -} - -// CloseWriter is an interface that implements structs -// that close input streams to prevent from writing. -type CloseWriter interface { - CloseWrite() error -} - -// CloseWrite closes a readWriter for writing. -func (h *HijackedResponse) CloseWrite() error { - if conn, ok := h.Conn.(CloseWriter); ok { - return conn.CloseWrite() - } - return nil -} - -// ImageBuildOptions holds the information -// necessary to build images. -type ImageBuildOptions struct { - Tags []string - SuppressOutput bool - RemoteContext string - NoCache bool - Remove bool - ForceRemove bool - PullParent bool - Isolation container.Isolation - CPUSetCPUs string - CPUSetMems string - CPUShares int64 - CPUQuota int64 - CPUPeriod int64 - Memory int64 - MemorySwap int64 - CgroupParent string - NetworkMode string - ShmSize int64 - Dockerfile string - Ulimits []*units.Ulimit - // BuildArgs needs to be a *string instead of just a string so that - // we can tell the difference between "" (empty string) and no value - // at all (nil). See the parsing of buildArgs in - // api/server/router/build/build_routes.go for even more info. - BuildArgs map[string]*string - AuthConfigs map[string]AuthConfig - Context io.Reader - Labels map[string]string - // squash the resulting image's layers to the parent - // preserves the original image and creates a new one from the parent with all - // the changes applied to a single layer - Squash bool - // CacheFrom specifies images that are used for matching cache. Images - // specified here do not need to have a valid parent chain to match cache. - CacheFrom []string - SecurityOpt []string - ExtraHosts []string // List of extra hosts - Target string - SessionID string - Platform string - // Version specifies the version of the unerlying builder to use - Version BuilderVersion - // BuildID is an optional identifier that can be passed together with the - // build request. The same identifier can be used to gracefully cancel the - // build with the cancel request. - BuildID string - // Outputs defines configurations for exporting build results. Only supported - // in BuildKit mode - Outputs []ImageBuildOutput -} - -// ImageBuildOutput defines configuration for exporting a build result -type ImageBuildOutput struct { - Type string - Attrs map[string]string -} - -// BuilderVersion sets the version of underlying builder to use -type BuilderVersion string - -const ( - // BuilderV1 is the first generation builder in docker daemon - BuilderV1 BuilderVersion = "1" - // BuilderBuildKit is builder based on moby/buildkit project - BuilderBuildKit BuilderVersion = "2" -) - -// ImageBuildResponse holds information -// returned by a server after building -// an image. -type ImageBuildResponse struct { - Body io.ReadCloser - OSType string -} - -// ImageCreateOptions holds information to create images. -type ImageCreateOptions struct { - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. - Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. -} - -// ImageImportSource holds source information for ImageImport -type ImageImportSource struct { - Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. - SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. -} - -// ImageImportOptions holds information to import images from the client host. -type ImageImportOptions struct { - Tag string // Tag is the name to tag this image with. This attribute is deprecated. - Message string // Message is the message to tag the image with - Changes []string // Changes are the raw changes to apply to this image - Platform string // Platform is the target platform of the image -} - -// ImageListOptions holds parameters to list images with. -type ImageListOptions struct { - // All controls whether all images in the graph are filtered, or just - // the heads. - All bool - - // Filters is a JSON-encoded set of filter arguments. - Filters filters.Args - - // SharedSize indicates whether the shared size of images should be computed. - SharedSize bool - - // ContainerCount indicates whether container count should be computed. - ContainerCount bool -} - -// ImageLoadResponse returns information to the client about a load process. -type ImageLoadResponse struct { - // Body must be closed to avoid a resource leak - Body io.ReadCloser - JSON bool -} - -// ImagePullOptions holds information to pull images. -type ImagePullOptions struct { - All bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - PrivilegeFunc RequestPrivilegeFunc - Platform string -} - -// RequestPrivilegeFunc is a function interface that -// clients can supply to retry operations after -// getting an authorization error. -// This function returns the registry authentication -// header value in base 64 format, or an error -// if the privilege request fails. -type RequestPrivilegeFunc func() (string, error) - -// ImagePushOptions holds information to push images. -type ImagePushOptions ImagePullOptions - -// ImageRemoveOptions holds parameters to remove images. -type ImageRemoveOptions struct { - Force bool - PruneChildren bool -} - -// ImageSearchOptions holds parameters to search images with. -type ImageSearchOptions struct { - RegistryAuth string - PrivilegeFunc RequestPrivilegeFunc - Filters filters.Args - Limit int -} - -// ResizeOptions holds parameters to resize a tty. -// It can be used to resize container ttys and -// exec process ttys too. -type ResizeOptions struct { - Height uint - Width uint -} - -// NodeListOptions holds parameters to list nodes with. -type NodeListOptions struct { - Filters filters.Args -} - -// NodeRemoveOptions holds parameters to remove nodes with. -type NodeRemoveOptions struct { - Force bool -} - -// ServiceCreateOptions contains the options to use when creating a service. -type ServiceCreateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // QueryRegistry indicates whether the service update requires - // contacting a registry. A registry may be contacted to retrieve - // the image digest and manifest, which in turn can be used to update - // platform or other information about the service. - QueryRegistry bool -} - -// ServiceCreateResponse contains the information returned to a client -// on the creation of a new service. -type ServiceCreateResponse struct { - // ID is the ID of the created service. - ID string - // Warnings is a set of non-fatal warning messages to pass on to the user. - Warnings []string `json:",omitempty"` -} - -// Values for RegistryAuthFrom in ServiceUpdateOptions -const ( - RegistryAuthFromSpec = "spec" - RegistryAuthFromPreviousSpec = "previous-spec" -) - -// ServiceUpdateOptions contains the options to be used for updating services. -type ServiceUpdateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate - // into this field. While it does open API users up to racy writes, most - // users may not need that level of consistency in practice. - - // RegistryAuthFrom specifies where to find the registry authorization - // credentials if they are not given in EncodedRegistryAuth. Valid - // values are "spec" and "previous-spec". - RegistryAuthFrom string - - // Rollback indicates whether a server-side rollback should be - // performed. When this is set, the provided spec will be ignored. - // The valid values are "previous" and "none". An empty value is the - // same as "none". - Rollback string - - // QueryRegistry indicates whether the service update requires - // contacting a registry. A registry may be contacted to retrieve - // the image digest and manifest, which in turn can be used to update - // platform or other information about the service. - QueryRegistry bool -} - -// ServiceListOptions holds parameters to list services with. -type ServiceListOptions struct { - Filters filters.Args - - // Status indicates whether the server should include the service task - // count of running and desired tasks. - Status bool -} - -// ServiceInspectOptions holds parameters related to the "service inspect" -// operation. -type ServiceInspectOptions struct { - InsertDefaults bool -} - -// TaskListOptions holds parameters to list tasks with. -type TaskListOptions struct { - Filters filters.Args -} - -// PluginRemoveOptions holds parameters to remove plugins. -type PluginRemoveOptions struct { - Force bool -} - -// PluginEnableOptions holds parameters to enable plugins. -type PluginEnableOptions struct { - Timeout int -} - -// PluginDisableOptions holds parameters to disable plugins. -type PluginDisableOptions struct { - Force bool -} - -// PluginInstallOptions holds parameters to install a plugin. -type PluginInstallOptions struct { - Disabled bool - AcceptAllPermissions bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - RemoteRef string // RemoteRef is the plugin name on the registry - PrivilegeFunc RequestPrivilegeFunc - AcceptPermissionsFunc func(PluginPrivileges) (bool, error) - Args []string -} - -// SwarmUnlockKeyResponse contains the response for Engine API: -// GET /swarm/unlockkey -type SwarmUnlockKeyResponse struct { - // UnlockKey is the unlock key in ASCII-armored format. - UnlockKey string -} - -// PluginCreateOptions hold all options to plugin create. -type PluginCreateOptions struct { - RepoName string -} diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go deleted file mode 100644 index 7689f38b3..000000000 --- a/vendor/github.com/docker/docker/api/types/configs.go +++ /dev/null @@ -1,67 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// configs holds structs used for internal communication between the -// frontend (such as an http server) and the backend (such as the -// docker daemon). - -// ContainerCreateConfig is the parameter set to ContainerCreate() -type ContainerCreateConfig struct { - Name string - Config *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig - Platform *specs.Platform - AdjustCPUShares bool -} - -// ContainerRmConfig holds arguments for the container remove -// operation. This struct is used to tell the backend what operations -// to perform. -type ContainerRmConfig struct { - ForceRemove, RemoveVolume, RemoveLink bool -} - -// ExecConfig is a small subset of the Config struct that holds the configuration -// for the exec feature of docker. -type ExecConfig struct { - User string // User that will run the command - Privileged bool // Is the container in privileged mode - Tty bool // Attach standard streams to a tty. - ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width] - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStderr bool // Attach the standard error - AttachStdout bool // Attach the standard output - Detach bool // Execute in detach mode - DetachKeys string // Escape keys for detach - Env []string // Environment variables - WorkingDir string // Working directory - Cmd []string // Execution commands and args -} - -// PluginRmConfig holds arguments for plugin remove. -type PluginRmConfig struct { - ForceRemove bool -} - -// PluginEnableConfig holds arguments for plugin enable -type PluginEnableConfig struct { - Timeout int -} - -// PluginDisableConfig holds arguments for plugin disable. -type PluginDisableConfig struct { - ForceDisable bool -} - -// NetworkListConfig stores the options available for listing networks -type NetworkListConfig struct { - // TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here - Detailed bool - Verbose bool -} diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go deleted file mode 100644 index 077583e66..000000000 --- a/vendor/github.com/docker/docker/api/types/container/config.go +++ /dev/null @@ -1,96 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -import ( - "io" - "time" - - "github.com/docker/docker/api/types/strslice" - "github.com/docker/go-connections/nat" -) - -// MinimumDuration puts a minimum on user configured duration. -// This is to prevent API error on time unit. For example, API may -// set 3 as healthcheck interval with intention of 3 seconds, but -// Docker interprets it as 3 nanoseconds. -const MinimumDuration = 1 * time.Millisecond - -// StopOptions holds the options to stop or restart a container. -type StopOptions struct { - // Signal (optional) is the signal to send to the container to (gracefully) - // stop it before forcibly terminating the container with SIGKILL after the - // timeout expires. If not value is set, the default (SIGTERM) is used. - Signal string `json:",omitempty"` - - // Timeout (optional) is the timeout (in seconds) to wait for the container - // to stop gracefully before forcibly terminating it with SIGKILL. - // - // - Use nil to use the default timeout (10 seconds). - // - Use '-1' to wait indefinitely. - // - Use '0' to not wait for the container to exit gracefully, and - // immediately proceeds to forcibly terminating the container. - // - Other positive values are used as timeout (in seconds). - Timeout *int `json:",omitempty"` -} - -// HealthConfig holds configuration settings for the HEALTHCHECK feature. -type HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - -// ExecStartOptions holds the options to start container's exec. -type ExecStartOptions struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer - ConsoleSize *[2]uint `json:",omitempty"` -} - -// Config contains the configuration data about a container. -// It should hold only portable information about the container. -// Here, "portable" means "independent from the host we are running on". -// Non-portable information *should* appear in HostConfig. -// All fields added to this struct must be marked `omitempty` to keep getting -// predictable hashes from the old `v1Compatibility` configuration. -type Config struct { - Hostname string // Hostname - Domainname string // Domainname - User string // User that will run the command(s) inside the container, also support user:group - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStdout bool // Attach the standard output - AttachStderr bool // Attach the standard error - ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string // List of environment variable to set in the container - Cmd strslice.StrSlice // Command to run when starting the container - Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific). - Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) - Volumes map[string]struct{} // List of volumes (mounts) used for the container - WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container - NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_changes.go b/vendor/github.com/docker/docker/api/types/container/container_changes.go deleted file mode 100644 index 16dd5019e..000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_changes.go +++ /dev/null @@ -1,20 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerChangeResponseItem change item in response to ContainerChanges operation -// swagger:model ContainerChangeResponseItem -type ContainerChangeResponseItem struct { - - // Kind of change - // Required: true - Kind uint8 `json:"Kind"` - - // Path to file that has changed - // Required: true - Path string `json:"Path"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/docker/docker/api/types/container/container_top.go deleted file mode 100644 index 63381da36..000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_top.go +++ /dev/null @@ -1,22 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerTopOKBody OK response to ContainerTop operation -// swagger:model ContainerTopOKBody -type ContainerTopOKBody struct { - - // Each process running in the container, where each is process - // is an array of values corresponding to the titles. - // - // Required: true - Processes [][]string `json:"Processes"` - - // The ps column titles - // Required: true - Titles []string `json:"Titles"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go deleted file mode 100644 index c10f175ea..000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_update.go +++ /dev/null @@ -1,16 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerUpdateOKBody OK response to ContainerUpdate operation -// swagger:model ContainerUpdateOKBody -type ContainerUpdateOKBody struct { - - // warnings - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/create_response.go b/vendor/github.com/docker/docker/api/types/container/create_response.go deleted file mode 100644 index aa0e7f7d0..000000000 --- a/vendor/github.com/docker/docker/api/types/container/create_response.go +++ /dev/null @@ -1,19 +0,0 @@ -package container - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// CreateResponse ContainerCreateResponse -// -// OK response to ContainerCreate operation -// swagger:model CreateResponse -type CreateResponse struct { - - // The ID of the created container - // Required: true - ID string `json:"Id"` - - // Warnings encountered when creating the container - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/deprecated.go b/vendor/github.com/docker/docker/api/types/container/deprecated.go deleted file mode 100644 index 0cb70e363..000000000 --- a/vendor/github.com/docker/docker/api/types/container/deprecated.go +++ /dev/null @@ -1,16 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ContainerCreateCreatedBody OK response to ContainerCreate operation -// -// Deprecated: use CreateResponse -type ContainerCreateCreatedBody = CreateResponse - -// ContainerWaitOKBody OK response to ContainerWait operation -// -// Deprecated: use WaitResponse -type ContainerWaitOKBody = WaitResponse - -// ContainerWaitOKBodyError container waiting error, if any -// -// Deprecated: use WaitExitError -type ContainerWaitOKBodyError = WaitExitError diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go deleted file mode 100644 index 100f434ce..000000000 --- a/vendor/github.com/docker/docker/api/types/container/host_config.go +++ /dev/null @@ -1,465 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -import ( - "strings" - - "github.com/docker/docker/api/types/blkiodev" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/strslice" - "github.com/docker/go-connections/nat" - units "github.com/docker/go-units" -) - -// CgroupnsMode represents the cgroup namespace mode of the container -type CgroupnsMode string - -// cgroup namespace modes for containers -const ( - CgroupnsModeEmpty CgroupnsMode = "" - CgroupnsModePrivate CgroupnsMode = "private" - CgroupnsModeHost CgroupnsMode = "host" -) - -// IsPrivate indicates whether the container uses its own private cgroup namespace -func (c CgroupnsMode) IsPrivate() bool { - return c == CgroupnsModePrivate -} - -// IsHost indicates whether the container shares the host's cgroup namespace -func (c CgroupnsMode) IsHost() bool { - return c == CgroupnsModeHost -} - -// IsEmpty indicates whether the container cgroup namespace mode is unset -func (c CgroupnsMode) IsEmpty() bool { - return c == CgroupnsModeEmpty -} - -// Valid indicates whether the cgroup namespace mode is valid -func (c CgroupnsMode) Valid() bool { - return c.IsEmpty() || c.IsPrivate() || c.IsHost() -} - -// Isolation represents the isolation technology of a container. The supported -// values are platform specific -type Isolation string - -// Isolation modes for containers -const ( - IsolationEmpty Isolation = "" // IsolationEmpty is unspecified (same behavior as default) - IsolationDefault Isolation = "default" // IsolationDefault is the default isolation mode on current daemon - IsolationProcess Isolation = "process" // IsolationProcess is process isolation mode - IsolationHyperV Isolation = "hyperv" // IsolationHyperV is HyperV isolation mode -) - -// IsDefault indicates the default isolation technology of a container. On Linux this -// is the native driver. On Windows, this is a Windows Server Container. -func (i Isolation) IsDefault() bool { - // TODO consider making isolation-mode strict (case-sensitive) - v := Isolation(strings.ToLower(string(i))) - return v == IsolationDefault || v == IsolationEmpty -} - -// IsHyperV indicates the use of a Hyper-V partition for isolation -func (i Isolation) IsHyperV() bool { - // TODO consider making isolation-mode strict (case-sensitive) - return Isolation(strings.ToLower(string(i))) == IsolationHyperV -} - -// IsProcess indicates the use of process isolation -func (i Isolation) IsProcess() bool { - // TODO consider making isolation-mode strict (case-sensitive) - return Isolation(strings.ToLower(string(i))) == IsolationProcess -} - -// IpcMode represents the container ipc stack. -type IpcMode string - -// IpcMode constants -const ( - IPCModeNone IpcMode = "none" - IPCModeHost IpcMode = "host" - IPCModeContainer IpcMode = "container" - IPCModePrivate IpcMode = "private" - IPCModeShareable IpcMode = "shareable" -) - -// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. -func (n IpcMode) IsPrivate() bool { - return n == IPCModePrivate -} - -// IsHost indicates whether the container shares the host's ipc namespace. -func (n IpcMode) IsHost() bool { - return n == IPCModeHost -} - -// IsShareable indicates whether the container's ipc namespace can be shared with another container. -func (n IpcMode) IsShareable() bool { - return n == IPCModeShareable -} - -// IsContainer indicates whether the container uses another container's ipc namespace. -func (n IpcMode) IsContainer() bool { - return strings.HasPrefix(string(n), string(IPCModeContainer)+":") -} - -// IsNone indicates whether container IpcMode is set to "none". -func (n IpcMode) IsNone() bool { - return n == IPCModeNone -} - -// IsEmpty indicates whether container IpcMode is empty -func (n IpcMode) IsEmpty() bool { - return n == "" -} - -// Valid indicates whether the ipc mode is valid. -func (n IpcMode) Valid() bool { - return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer() -} - -// Container returns the name of the container ipc stack is going to be used. -func (n IpcMode) Container() string { - if n.IsContainer() { - return strings.TrimPrefix(string(n), string(IPCModeContainer)+":") - } - return "" -} - -// NetworkMode represents the container network stack. -type NetworkMode string - -// IsNone indicates whether container isn't using a network stack. -func (n NetworkMode) IsNone() bool { - return n == "none" -} - -// IsDefault indicates whether container uses the default network stack. -func (n NetworkMode) IsDefault() bool { - return n == "default" -} - -// IsPrivate indicates whether container uses its private network stack. -func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsContainer indicates whether container uses a container network stack. -func (n NetworkMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// ConnectedContainer is the id of the container which network this container is connected to. -func (n NetworkMode) ConnectedContainer() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// UserDefined indicates user-created network -func (n NetworkMode) UserDefined() string { - if n.IsUserDefined() { - return string(n) - } - return "" -} - -// UsernsMode represents userns mode in the container. -type UsernsMode string - -// IsHost indicates whether the container uses the host's userns. -func (n UsernsMode) IsHost() bool { - return n == "host" -} - -// IsPrivate indicates whether the container uses the a private userns. -func (n UsernsMode) IsPrivate() bool { - return !(n.IsHost()) -} - -// Valid indicates whether the userns is valid. -func (n UsernsMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - default: - return false - } - return true -} - -// CgroupSpec represents the cgroup to use for the container. -type CgroupSpec string - -// IsContainer indicates whether the container is using another container cgroup -func (c CgroupSpec) IsContainer() bool { - parts := strings.SplitN(string(c), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the cgroup spec is valid. -func (c CgroupSpec) Valid() bool { - return c.IsContainer() || c == "" -} - -// Container returns the name of the container whose cgroup will be used. -func (c CgroupSpec) Container() string { - parts := strings.SplitN(string(c), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// UTSMode represents the UTS namespace of the container. -type UTSMode string - -// IsPrivate indicates whether the container uses its private UTS namespace. -func (n UTSMode) IsPrivate() bool { - return !(n.IsHost()) -} - -// IsHost indicates whether the container uses the host's UTS namespace. -func (n UTSMode) IsHost() bool { - return n == "host" -} - -// Valid indicates whether the UTS namespace is valid. -func (n UTSMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - default: - return false - } - return true -} - -// PidMode represents the pid namespace of the container. -type PidMode string - -// IsPrivate indicates whether the container uses its own new pid namespace. -func (n PidMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsHost indicates whether the container uses the host's pid namespace. -func (n PidMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether the container uses a container's pid namespace. -func (n PidMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the pid namespace is valid. -func (n PidMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - case "container": - if len(parts) != 2 || parts[1] == "" { - return false - } - default: - return false - } - return true -} - -// Container returns the name of the container whose pid namespace is going to be used. -func (n PidMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// DeviceRequest represents a request for devices from a device driver. -// Used by GPU device drivers. -type DeviceRequest struct { - Driver string // Name of device driver - Count int // Number of devices to request (-1 = All) - DeviceIDs []string // List of device IDs as recognizable by the device driver - Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu") - Options map[string]string // Options to pass onto the device driver -} - -// DeviceMapping represents the device mapping between the host and the container. -type DeviceMapping struct { - PathOnHost string - PathInContainer string - CgroupPermissions string -} - -// RestartPolicy represents the restart policies of the container. -type RestartPolicy struct { - Name string - MaximumRetryCount int -} - -// IsNone indicates whether the container has the "no" restart policy. -// This means the container will not automatically restart when exiting. -func (rp *RestartPolicy) IsNone() bool { - return rp.Name == "no" || rp.Name == "" -} - -// IsAlways indicates whether the container has the "always" restart policy. -// This means the container will automatically restart regardless of the exit status. -func (rp *RestartPolicy) IsAlways() bool { - return rp.Name == "always" -} - -// IsOnFailure indicates whether the container has the "on-failure" restart policy. -// This means the container will automatically restart of exiting with a non-zero exit status. -func (rp *RestartPolicy) IsOnFailure() bool { - return rp.Name == "on-failure" -} - -// IsUnlessStopped indicates whether the container has the -// "unless-stopped" restart policy. This means the container will -// automatically restart unless user has put it to stopped state. -func (rp *RestartPolicy) IsUnlessStopped() bool { - return rp.Name == "unless-stopped" -} - -// IsSame compares two RestartPolicy to see if they are the same -func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { - return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount -} - -// LogMode is a type to define the available modes for logging -// These modes affect how logs are handled when log messages start piling up. -type LogMode string - -// Available logging modes -const ( - LogModeUnset LogMode = "" - LogModeBlocking LogMode = "blocking" - LogModeNonBlock LogMode = "non-blocking" -) - -// LogConfig represents the logging configuration of the container. -type LogConfig struct { - Type string - Config map[string]string -} - -// Resources contains container's resources (cgroups config, ulimits...) -type Resources struct { - // Applicable to all platforms - CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) - Memory int64 // Memory limit (in bytes) - NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. - - // Applicable to UNIX platforms - CgroupParent string // Parent cgroup. - BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) - BlkioWeightDevice []*blkiodev.WeightDevice - BlkioDeviceReadBps []*blkiodev.ThrottleDevice - BlkioDeviceWriteBps []*blkiodev.ThrottleDevice - BlkioDeviceReadIOps []*blkiodev.ThrottleDevice - BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice - CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period - CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota - CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period - CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime - CpusetCpus string // CpusetCpus 0-2, 0,1 - CpusetMems string // CpusetMems 0-2, 0,1 - Devices []DeviceMapping // List of devices to map inside the container - DeviceCgroupRules []string // List of rule to be added to the device cgroup - DeviceRequests []DeviceRequest // List of device requests for device drivers - - // KernelMemory specifies the kernel memory limit (in bytes) for the container. - // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes. - KernelMemory int64 `json:",omitempty"` - KernelMemoryTCP int64 `json:",omitempty"` // Hard limit for kernel TCP buffer memory (in bytes) - MemoryReservation int64 // Memory soft limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap - MemorySwappiness *int64 // Tuning container memory swappiness behaviour - OomKillDisable *bool // Whether to disable OOM Killer or not - PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. - Ulimits []*units.Ulimit // List of ulimits to be set in the container - - // Applicable to Windows - CPUCount int64 `json:"CpuCount"` // CPU count - CPUPercent int64 `json:"CpuPercent"` // CPU percent - IOMaximumIOps uint64 // Maximum IOps for the container system drive - IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive -} - -// UpdateConfig holds the mutable attributes of a Container. -// Those attributes can be updated at runtime. -type UpdateConfig struct { - // Contains container's resources (cgroups, ulimits) - Resources - RestartPolicy RestartPolicy -} - -// HostConfig the non-portable Config structure of a container. -// Here, "non-portable" means "dependent of the host we are running on". -// Portable information *should* appear in Config. -type HostConfig struct { - // Applicable to all platforms - Binds []string // List of volume bindings for this container - ContainerIDFile string // File (path) where the containerId is written - LogConfig LogConfig // Configuration of the logs for this container - NetworkMode NetworkMode // Network mode to use for the container - PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host - RestartPolicy RestartPolicy // Restart policy to be used for the container - AutoRemove bool // Automatically remove container when it exits - VolumeDriver string // Name of the volume driver used to mount volumes - VolumesFrom []string // List of volumes to take from other container - ConsoleSize [2]uint // Initial console size (height,width) - - // Applicable to UNIX platforms - CapAdd strslice.StrSlice // List of kernel capabilities to add to the container - CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container - CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container - DNS []string `json:"Dns"` // List of DNS server to lookup - DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for - DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for - ExtraHosts []string // List of extra hosts - GroupAdd []string // List of additional groups that the container process will run as - IpcMode IpcMode // IPC namespace to use for the container - Cgroup CgroupSpec // Cgroup to use for the container - Links []string // List of links (in the name:alias form) - OomScoreAdj int // Container preference for OOM-killing - PidMode PidMode // PID namespace to use for the container - Privileged bool // Is the container in privileged mode - PublishAllPorts bool // Should docker publish all exposed port for the container - ReadonlyRootfs bool // Is the container root filesystem in read-only - SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. - StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. - Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container - UTSMode UTSMode // UTS namespace to use for the container - UsernsMode UsernsMode // The user namespace to use for the container - ShmSize int64 // Total shm memory usage - Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container - Runtime string `json:",omitempty"` // Runtime to use with this container - - // Applicable to Windows - Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) - - // Contains container's resources (cgroups, ulimits) - Resources - - // Mounts specs used by the container - Mounts []mount.Mount `json:",omitempty"` - - // MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths) - MaskedPaths []string - - // ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths) - ReadonlyPaths []string - - // Run a custom init inside the container, if null, use the daemon's configured settings - Init *bool `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go deleted file mode 100644 index 24c4fa8d9..000000000 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go +++ /dev/null @@ -1,42 +0,0 @@ -//go:build !windows -// +build !windows - -package container // import "github.com/docker/docker/api/types/container" - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsBridge() { - return "bridge" - } else if n.IsHost() { - return "host" - } else if n.IsContainer() { - return "container" - } else if n.IsNone() { - return "none" - } else if n.IsDefault() { - return "default" - } else if n.IsUserDefined() { - return n.UserDefined() - } - return "" -} - -// IsBridge indicates whether container uses the bridge network stack -func (n NetworkMode) IsBridge() bool { - return n == "bridge" -} - -// IsHost indicates whether container uses the host network stack. -func (n NetworkMode) IsHost() bool { - return n == "host" -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() -} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go deleted file mode 100644 index 99f803a5b..000000000 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go +++ /dev/null @@ -1,40 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// IsBridge indicates whether container uses the bridge network stack -// in windows it is given the name NAT -func (n NetworkMode) IsBridge() bool { - return n == "nat" -} - -// IsHost indicates whether container uses the host network stack. -// returns false as this is not supported by windows -func (n NetworkMode) IsHost() bool { - return false -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() -} - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() || i.IsHyperV() || i.IsProcess() -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsDefault() { - return "default" - } else if n.IsBridge() { - return "nat" - } else if n.IsNone() { - return "none" - } else if n.IsContainer() { - return "container" - } else if n.IsUserDefined() { - return n.UserDefined() - } - - return "" -} diff --git a/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go b/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go deleted file mode 100644 index ab56d4eed..000000000 --- a/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go +++ /dev/null @@ -1,12 +0,0 @@ -package container - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// WaitExitError container waiting error, if any -// swagger:model WaitExitError -type WaitExitError struct { - - // Details of an error - Message string `json:"Message,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/wait_response.go b/vendor/github.com/docker/docker/api/types/container/wait_response.go deleted file mode 100644 index 84fc6afdd..000000000 --- a/vendor/github.com/docker/docker/api/types/container/wait_response.go +++ /dev/null @@ -1,18 +0,0 @@ -package container - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// WaitResponse ContainerWaitResponse -// -// OK response to ContainerWait operation -// swagger:model WaitResponse -type WaitResponse struct { - - // error - Error *WaitExitError `json:"Error,omitempty"` - - // Exit code of the container - // Required: true - StatusCode int64 `json:"StatusCode"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/vendor/github.com/docker/docker/api/types/container/waitcondition.go deleted file mode 100644 index cd8311f99..000000000 --- a/vendor/github.com/docker/docker/api/types/container/waitcondition.go +++ /dev/null @@ -1,22 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// WaitCondition is a type used to specify a container state for which -// to wait. -type WaitCondition string - -// Possible WaitCondition Values. -// -// WaitConditionNotRunning (default) is used to wait for any of the non-running -// states: "created", "exited", "dead", "removing", or "removed". -// -// WaitConditionNextExit is used to wait for the next time the state changes -// to a non-running state. If the state is currently "created" or "exited", -// this would cause Wait() to block until either the container runs and exits -// or is removed. -// -// WaitConditionRemoved is used to wait for the container to be removed. -const ( - WaitConditionNotRunning WaitCondition = "not-running" - WaitConditionNextExit WaitCondition = "next-exit" - WaitConditionRemoved WaitCondition = "removed" -) diff --git a/vendor/github.com/docker/docker/api/types/deprecated.go b/vendor/github.com/docker/docker/api/types/deprecated.go deleted file mode 100644 index 216d1df0f..000000000 --- a/vendor/github.com/docker/docker/api/types/deprecated.go +++ /dev/null @@ -1,14 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import "github.com/docker/docker/api/types/volume" - -// Volume volume -// -// Deprecated: use github.com/docker/docker/api/types/volume.Volume -type Volume = volume.Volume - -// VolumeUsageData Usage details about the volume. This information is used by the -// `GET /system/df` endpoint, and omitted in other endpoints. -// -// Deprecated: use github.com/docker/docker/api/types/volume.UsageData -type VolumeUsageData = volume.UsageData diff --git a/vendor/github.com/docker/docker/api/types/error_response.go b/vendor/github.com/docker/docker/api/types/error_response.go deleted file mode 100644 index dc942d9d9..000000000 --- a/vendor/github.com/docker/docker/api/types/error_response.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ErrorResponse Represents an error. -// swagger:model ErrorResponse -type ErrorResponse struct { - - // The error message. - // Required: true - Message string `json:"message"` -} diff --git a/vendor/github.com/docker/docker/api/types/error_response_ext.go b/vendor/github.com/docker/docker/api/types/error_response_ext.go deleted file mode 100644 index f84f034cd..000000000 --- a/vendor/github.com/docker/docker/api/types/error_response_ext.go +++ /dev/null @@ -1,6 +0,0 @@ -package types - -// Error returns the error message -func (e ErrorResponse) Error() string { - return e.Message -} diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go deleted file mode 100644 index 9fe07e26f..000000000 --- a/vendor/github.com/docker/docker/api/types/events/events.go +++ /dev/null @@ -1,47 +0,0 @@ -package events // import "github.com/docker/docker/api/types/events" - -// Type is used for event-types. -type Type = string - -// List of known event types. -const ( - BuilderEventType Type = "builder" // BuilderEventType is the event type that the builder generates. - ConfigEventType Type = "config" // ConfigEventType is the event type that configs generate. - ContainerEventType Type = "container" // ContainerEventType is the event type that containers generate. - DaemonEventType Type = "daemon" // DaemonEventType is the event type that daemon generate. - ImageEventType Type = "image" // ImageEventType is the event type that images generate. - NetworkEventType Type = "network" // NetworkEventType is the event type that networks generate. - NodeEventType Type = "node" // NodeEventType is the event type that nodes generate. - PluginEventType Type = "plugin" // PluginEventType is the event type that plugins generate. - SecretEventType Type = "secret" // SecretEventType is the event type that secrets generate. - ServiceEventType Type = "service" // ServiceEventType is the event type that services generate. - VolumeEventType Type = "volume" // VolumeEventType is the event type that volumes generate. -) - -// Actor describes something that generates events, -// like a container, or a network, or a volume. -// It has a defined name and a set of attributes. -// The container attributes are its labels, other actors -// can generate these attributes from other properties. -type Actor struct { - ID string - Attributes map[string]string -} - -// Message represents the information an event contains -type Message struct { - // Deprecated information from JSONMessage. - // With data only in container events. - Status string `json:"status,omitempty"` // Deprecated: use Action instead. - ID string `json:"id,omitempty"` // Deprecated: use Actor.ID instead. - From string `json:"from,omitempty"` // Deprecated: use Actor.Attributes["image"] instead. - - Type Type - Action string - Actor Actor - // Engine events are local scope. Cluster events are swarm scope. - Scope string `json:"scope,omitempty"` - - Time int64 `json:"time,omitempty"` - TimeNano int64 `json:"timeNano,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go deleted file mode 100644 index f8fe79407..000000000 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ /dev/null @@ -1,323 +0,0 @@ -/* -Package filters provides tools for encoding a mapping of keys to a set of -multiple values. -*/ -package filters // import "github.com/docker/docker/api/types/filters" - -import ( - "encoding/json" - "regexp" - "strings" - - "github.com/docker/docker/api/types/versions" - "github.com/pkg/errors" -) - -// Args stores a mapping of keys to a set of multiple values. -type Args struct { - fields map[string]map[string]bool -} - -// KeyValuePair are used to initialize a new Args -type KeyValuePair struct { - Key string - Value string -} - -// Arg creates a new KeyValuePair for initializing Args -func Arg(key, value string) KeyValuePair { - return KeyValuePair{Key: key, Value: value} -} - -// NewArgs returns a new Args populated with the initial args -func NewArgs(initialArgs ...KeyValuePair) Args { - args := Args{fields: map[string]map[string]bool{}} - for _, arg := range initialArgs { - args.Add(arg.Key, arg.Value) - } - return args -} - -// Keys returns all the keys in list of Args -func (args Args) Keys() []string { - keys := make([]string, 0, len(args.fields)) - for k := range args.fields { - keys = append(keys, k) - } - return keys -} - -// MarshalJSON returns a JSON byte representation of the Args -func (args Args) MarshalJSON() ([]byte, error) { - if len(args.fields) == 0 { - return []byte("{}"), nil - } - return json.Marshal(args.fields) -} - -// ToJSON returns the Args as a JSON encoded string -func ToJSON(a Args) (string, error) { - if a.Len() == 0 { - return "", nil - } - buf, err := json.Marshal(a) - return string(buf), err -} - -// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 -// then the encoded format will use an older legacy format where the values are a -// list of strings, instead of a set. -// -// Deprecated: do not use in any new code; use ToJSON instead -func ToParamWithVersion(version string, a Args) (string, error) { - if a.Len() == 0 { - return "", nil - } - - if version != "" && versions.LessThan(version, "1.22") { - buf, err := json.Marshal(convertArgsToSlice(a.fields)) - return string(buf), err - } - - return ToJSON(a) -} - -// FromJSON decodes a JSON encoded string into Args -func FromJSON(p string) (Args, error) { - args := NewArgs() - - if p == "" { - return args, nil - } - - raw := []byte(p) - err := json.Unmarshal(raw, &args) - if err == nil { - return args, nil - } - - // Fallback to parsing arguments in the legacy slice format - deprecated := map[string][]string{} - if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { - return args, invalidFilter{errors.Wrap(err, "invalid filter")} - } - - args.fields = deprecatedArgs(deprecated) - return args, nil -} - -// UnmarshalJSON populates the Args from JSON encode bytes -func (args Args) UnmarshalJSON(raw []byte) error { - return json.Unmarshal(raw, &args.fields) -} - -// Get returns the list of values associated with the key -func (args Args) Get(key string) []string { - values := args.fields[key] - if values == nil { - return make([]string, 0) - } - slice := make([]string, 0, len(values)) - for key := range values { - slice = append(slice, key) - } - return slice -} - -// Add a new value to the set of values -func (args Args) Add(key, value string) { - if _, ok := args.fields[key]; ok { - args.fields[key][value] = true - } else { - args.fields[key] = map[string]bool{value: true} - } -} - -// Del removes a value from the set -func (args Args) Del(key, value string) { - if _, ok := args.fields[key]; ok { - delete(args.fields[key], value) - if len(args.fields[key]) == 0 { - delete(args.fields, key) - } - } -} - -// Len returns the number of keys in the mapping -func (args Args) Len() int { - return len(args.fields) -} - -// MatchKVList returns true if all the pairs in sources exist as key=value -// pairs in the mapping at key, or if there are no values at key. -func (args Args) MatchKVList(key string, sources map[string]string) bool { - fieldValues := args.fields[key] - - // do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - - if len(sources) == 0 { - return false - } - - for value := range fieldValues { - testKV := strings.SplitN(value, "=", 2) - - v, ok := sources[testKV[0]] - if !ok { - return false - } - if len(testKV) == 2 && testKV[1] != v { - return false - } - } - - return true -} - -// Match returns true if any of the values at key match the source string -func (args Args) Match(field, source string) bool { - if args.ExactMatch(field, source) { - return true - } - - fieldValues := args.fields[field] - for name2match := range fieldValues { - match, err := regexp.MatchString(name2match, source) - if err != nil { - continue - } - if match { - return true - } - } - return false -} - -// ExactMatch returns true if the source matches exactly one of the values. -func (args Args) ExactMatch(key, source string) bool { - fieldValues, ok := args.fields[key] - // do not filter if there is no filter set or cannot determine filter - if !ok || len(fieldValues) == 0 { - return true - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// UniqueExactMatch returns true if there is only one value and the source -// matches exactly the value. -func (args Args) UniqueExactMatch(key, source string) bool { - fieldValues := args.fields[key] - // do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - if len(args.fields[key]) != 1 { - return false - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// FuzzyMatch returns true if the source matches exactly one value, or the -// source has one of the values as a prefix. -func (args Args) FuzzyMatch(key, source string) bool { - if args.ExactMatch(key, source) { - return true - } - - fieldValues := args.fields[key] - for prefix := range fieldValues { - if strings.HasPrefix(source, prefix) { - return true - } - } - return false -} - -// Contains returns true if the key exists in the mapping -func (args Args) Contains(field string) bool { - _, ok := args.fields[field] - return ok -} - -type invalidFilter struct{ error } - -func (e invalidFilter) Error() string { - return e.error.Error() -} - -func (invalidFilter) InvalidParameter() {} - -// Validate compared the set of accepted keys against the keys in the mapping. -// An error is returned if any mapping keys are not in the accepted set. -func (args Args) Validate(accepted map[string]bool) error { - for name := range args.fields { - if !accepted[name] { - return invalidFilter{errors.New("invalid filter '" + name + "'")} - } - } - return nil -} - -// WalkValues iterates over the list of values for a key in the mapping and calls -// op() for each value. If op returns an error the iteration stops and the -// error is returned. -func (args Args) WalkValues(field string, op func(value string) error) error { - if _, ok := args.fields[field]; !ok { - return nil - } - for v := range args.fields[field] { - if err := op(v); err != nil { - return err - } - } - return nil -} - -// Clone returns a copy of args. -func (args Args) Clone() (newArgs Args) { - newArgs.fields = make(map[string]map[string]bool, len(args.fields)) - for k, m := range args.fields { - var mm map[string]bool - if m != nil { - mm = make(map[string]bool, len(m)) - for kk, v := range m { - mm[kk] = v - } - } - newArgs.fields[k] = mm - } - return newArgs -} - -func deprecatedArgs(d map[string][]string) map[string]map[string]bool { - m := map[string]map[string]bool{} - for k, v := range d { - values := map[string]bool{} - for _, vv := range v { - values[vv] = true - } - m[k] = values - } - return m -} - -func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { - m := map[string][]string{} - for k, v := range f { - values := []string{} - for kk := range v { - if v[kk] { - values = append(values, kk) - } - } - m[k] = values - } - return m -} diff --git a/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/vendor/github.com/docker/docker/api/types/graph_driver_data.go deleted file mode 100644 index ce3deb331..000000000 --- a/vendor/github.com/docker/docker/api/types/graph_driver_data.go +++ /dev/null @@ -1,23 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// GraphDriverData Information about the storage driver used to store the container's and -// image's filesystem. -// -// swagger:model GraphDriverData -type GraphDriverData struct { - - // Low-level storage metadata, provided as key/value pairs. - // - // This information is driver-specific, and depends on the storage-driver - // in use, and should be used for informational purposes only. - // - // Required: true - Data map[string]string `json:"Data"` - - // Name of the storage driver. - // Required: true - Name string `json:"Name"` -} diff --git a/vendor/github.com/docker/docker/api/types/id_response.go b/vendor/github.com/docker/docker/api/types/id_response.go deleted file mode 100644 index 7592d2f8b..000000000 --- a/vendor/github.com/docker/docker/api/types/id_response.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// IDResponse Response to an API call that returns just an Id -// swagger:model IdResponse -type IDResponse struct { - - // The id of the newly created object. - // Required: true - ID string `json:"Id"` -} diff --git a/vendor/github.com/docker/docker/api/types/image/image_history.go b/vendor/github.com/docker/docker/api/types/image/image_history.go deleted file mode 100644 index e302bb0ae..000000000 --- a/vendor/github.com/docker/docker/api/types/image/image_history.go +++ /dev/null @@ -1,36 +0,0 @@ -package image // import "github.com/docker/docker/api/types/image" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// HistoryResponseItem individual image layer information in response to ImageHistory operation -// swagger:model HistoryResponseItem -type HistoryResponseItem struct { - - // comment - // Required: true - Comment string `json:"Comment"` - - // created - // Required: true - Created int64 `json:"Created"` - - // created by - // Required: true - CreatedBy string `json:"CreatedBy"` - - // Id - // Required: true - ID string `json:"Id"` - - // size - // Required: true - Size int64 `json:"Size"` - - // tags - // Required: true - Tags []string `json:"Tags"` -} diff --git a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go b/vendor/github.com/docker/docker/api/types/image_delete_response_item.go deleted file mode 100644 index b9a65a0d8..000000000 --- a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go +++ /dev/null @@ -1,15 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ImageDeleteResponseItem image delete response item -// swagger:model ImageDeleteResponseItem -type ImageDeleteResponseItem struct { - - // The image ID of an image that was deleted - Deleted string `json:"Deleted,omitempty"` - - // The image ID of an image that was untagged - Untagged string `json:"Untagged,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/docker/docker/api/types/image_summary.go deleted file mode 100644 index 90b983a25..000000000 --- a/vendor/github.com/docker/docker/api/types/image_summary.go +++ /dev/null @@ -1,97 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ImageSummary image summary -// swagger:model ImageSummary -type ImageSummary struct { - - // Number of containers using this image. Includes both stopped and running - // containers. - // - // This size is not calculated by default, and depends on which API endpoint - // is used. `-1` indicates that the value has not been set / calculated. - // - // Required: true - Containers int64 `json:"Containers"` - - // Date and time at which the image was created as a Unix timestamp - // (number of seconds sinds EPOCH). - // - // Required: true - Created int64 `json:"Created"` - - // ID is the content-addressable ID of an image. - // - // This identifier is a content-addressable digest calculated from the - // image's configuration (which includes the digests of layers used by - // the image). - // - // Note that this digest differs from the `RepoDigests` below, which - // holds digests of image manifests that reference the image. - // - // Required: true - ID string `json:"Id"` - - // User-defined key/value metadata. - // Required: true - Labels map[string]string `json:"Labels"` - - // ID of the parent image. - // - // Depending on how the image was created, this field may be empty and - // is only set for images that were built/created locally. This field - // is empty if the image was pulled from an image registry. - // - // Required: true - ParentID string `json:"ParentId"` - - // List of content-addressable digests of locally available image manifests - // that the image is referenced from. Multiple manifests can refer to the - // same image. - // - // These digests are usually only available if the image was either pulled - // from a registry, or if the image was pushed to a registry, which is when - // the manifest is generated and its digest calculated. - // - // Required: true - RepoDigests []string `json:"RepoDigests"` - - // List of image names/tags in the local image cache that reference this - // image. - // - // Multiple image tags can refer to the same image, and this list may be - // empty if no tags reference the image, in which case the image is - // "untagged", in which case it can still be referenced by its ID. - // - // Required: true - RepoTags []string `json:"RepoTags"` - - // Total size of image layers that are shared between this image and other - // images. - // - // This size is not calculated by default. `-1` indicates that the value - // has not been set / calculated. - // - // Required: true - SharedSize int64 `json:"SharedSize"` - - // Total size of the image including all layers it is composed of. - // - // Required: true - Size int64 `json:"Size"` - - // Total size of the image including all layers it is composed of. - // - // In versions of Docker before v1.10, this field was calculated from - // the image itself and all of its parent images. Docker v1.10 and up - // store images self-contained, and no longer use a parent-chain, making - // this field an equivalent of the Size field. - // - // This field is kept for backward compatibility, but may be removed in - // a future version of the API. - // - // Required: true - VirtualSize int64 `json:"VirtualSize"` -} diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go deleted file mode 100644 index ac4ce6223..000000000 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ /dev/null @@ -1,140 +0,0 @@ -package mount // import "github.com/docker/docker/api/types/mount" - -import ( - "os" -) - -// Type represents the type of a mount. -type Type string - -// Type constants -const ( - // TypeBind is the type for mounting host dir - TypeBind Type = "bind" - // TypeVolume is the type for remote storage volumes - TypeVolume Type = "volume" - // TypeTmpfs is the type for mounting tmpfs - TypeTmpfs Type = "tmpfs" - // TypeNamedPipe is the type for mounting Windows named pipes - TypeNamedPipe Type = "npipe" - // TypeCluster is the type for Swarm Cluster Volumes. - TypeCluster Type = "cluster" -) - -// Mount represents a mount (volume). -type Mount struct { - Type Type `json:",omitempty"` - // Source specifies the name of the mount. Depending on mount type, this - // may be a volume name or a host path, or even ignored. - // Source is not supported for tmpfs (must be an empty value) - Source string `json:",omitempty"` - Target string `json:",omitempty"` - ReadOnly bool `json:",omitempty"` - Consistency Consistency `json:",omitempty"` - - BindOptions *BindOptions `json:",omitempty"` - VolumeOptions *VolumeOptions `json:",omitempty"` - TmpfsOptions *TmpfsOptions `json:",omitempty"` - ClusterOptions *ClusterOptions `json:",omitempty"` -} - -// Propagation represents the propagation of a mount. -type Propagation string - -const ( - // PropagationRPrivate RPRIVATE - PropagationRPrivate Propagation = "rprivate" - // PropagationPrivate PRIVATE - PropagationPrivate Propagation = "private" - // PropagationRShared RSHARED - PropagationRShared Propagation = "rshared" - // PropagationShared SHARED - PropagationShared Propagation = "shared" - // PropagationRSlave RSLAVE - PropagationRSlave Propagation = "rslave" - // PropagationSlave SLAVE - PropagationSlave Propagation = "slave" -) - -// Propagations is the list of all valid mount propagations -var Propagations = []Propagation{ - PropagationRPrivate, - PropagationPrivate, - PropagationRShared, - PropagationShared, - PropagationRSlave, - PropagationSlave, -} - -// Consistency represents the consistency requirements of a mount. -type Consistency string - -const ( - // ConsistencyFull guarantees bind mount-like consistency - ConsistencyFull Consistency = "consistent" - // ConsistencyCached mounts can cache read data and FS structure - ConsistencyCached Consistency = "cached" - // ConsistencyDelegated mounts can cache read and written data and structure - ConsistencyDelegated Consistency = "delegated" - // ConsistencyDefault provides "consistent" behavior unless overridden - ConsistencyDefault Consistency = "default" -) - -// BindOptions defines options specific to mounts of type "bind". -type BindOptions struct { - Propagation Propagation `json:",omitempty"` - NonRecursive bool `json:",omitempty"` - CreateMountpoint bool `json:",omitempty"` -} - -// VolumeOptions represents the options for a mount of type volume. -type VolumeOptions struct { - NoCopy bool `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - DriverConfig *Driver `json:",omitempty"` -} - -// Driver represents a volume driver. -type Driver struct { - Name string `json:",omitempty"` - Options map[string]string `json:",omitempty"` -} - -// TmpfsOptions defines options specific to mounts of type "tmpfs". -type TmpfsOptions struct { - // Size sets the size of the tmpfs, in bytes. - // - // This will be converted to an operating system specific value - // depending on the host. For example, on linux, it will be converted to - // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with - // docker, uses a straight byte value. - // - // Percentages are not supported. - SizeBytes int64 `json:",omitempty"` - // Mode of the tmpfs upon creation - Mode os.FileMode `json:",omitempty"` - - // TODO(stevvooe): There are several more tmpfs flags, specified in the - // daemon, that are accepted. Only the most basic are added for now. - // - // From https://github.com/moby/sys/blob/mount/v0.1.1/mount/flags.go#L47-L56 - // - // var validFlags = map[string]bool{ - // "": true, - // "size": true, X - // "mode": true, X - // "uid": true, - // "gid": true, - // "nr_inodes": true, - // "nr_blocks": true, - // "mpol": true, - // } - // - // Some of these may be straightforward to add, but others, such as - // uid/gid have implications in a clustered system. -} - -// ClusterOptions specifies options for a Cluster volume. -type ClusterOptions struct { - // intentionally empty -} diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go deleted file mode 100644 index 437b184c6..000000000 --- a/vendor/github.com/docker/docker/api/types/network/network.go +++ /dev/null @@ -1,126 +0,0 @@ -package network // import "github.com/docker/docker/api/types/network" -import ( - "github.com/docker/docker/api/types/filters" -) - -// Address represents an IP address -type Address struct { - Addr string - PrefixLen int -} - -// IPAM represents IP Address Management -type IPAM struct { - Driver string - Options map[string]string // Per network IPAM driver options - Config []IPAMConfig -} - -// IPAMConfig represents IPAM configurations -type IPAMConfig struct { - Subnet string `json:",omitempty"` - IPRange string `json:",omitempty"` - Gateway string `json:",omitempty"` - AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` -} - -// EndpointIPAMConfig represents IPAM configurations for the endpoint -type EndpointIPAMConfig struct { - IPv4Address string `json:",omitempty"` - IPv6Address string `json:",omitempty"` - LinkLocalIPs []string `json:",omitempty"` -} - -// Copy makes a copy of the endpoint ipam config -func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { - cfgCopy := *cfg - cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) - cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) - return &cfgCopy -} - -// PeerInfo represents one peer of an overlay network -type PeerInfo struct { - Name string - IP string -} - -// EndpointSettings stores the network endpoint details -type EndpointSettings struct { - // Configurations - IPAMConfig *EndpointIPAMConfig - Links []string - Aliases []string - // Operational data - NetworkID string - EndpointID string - Gateway string - IPAddress string - IPPrefixLen int - IPv6Gateway string - GlobalIPv6Address string - GlobalIPv6PrefixLen int - MacAddress string - DriverOpts map[string]string -} - -// Task carries the information about one backend task -type Task struct { - Name string - EndpointID string - EndpointIP string - Info map[string]string -} - -// ServiceInfo represents service parameters with the list of service's tasks -type ServiceInfo struct { - VIP string - Ports []string - LocalLBIndex int - Tasks []Task -} - -// Copy makes a deep copy of `EndpointSettings` -func (es *EndpointSettings) Copy() *EndpointSettings { - epCopy := *es - if es.IPAMConfig != nil { - epCopy.IPAMConfig = es.IPAMConfig.Copy() - } - - if es.Links != nil { - links := make([]string, 0, len(es.Links)) - epCopy.Links = append(links, es.Links...) - } - - if es.Aliases != nil { - aliases := make([]string, 0, len(es.Aliases)) - epCopy.Aliases = append(aliases, es.Aliases...) - } - return &epCopy -} - -// NetworkingConfig represents the container's networking configuration for each of its interfaces -// Carries the networking configs specified in the `docker run` and `docker network connect` commands -type NetworkingConfig struct { - EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network -} - -// ConfigReference specifies the source which provides a network's configuration -type ConfigReference struct { - Network string -} - -var acceptedFilters = map[string]bool{ - "dangling": true, - "driver": true, - "id": true, - "label": true, - "name": true, - "scope": true, - "type": true, -} - -// ValidateFilters validates the list of filter args with the available filters. -func ValidateFilters(filter filters.Args) error { - return filter.Validate(acceptedFilters) -} diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go deleted file mode 100644 index abae48b9a..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin.go +++ /dev/null @@ -1,203 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Plugin A plugin for the Engine API -// swagger:model Plugin -type Plugin struct { - - // config - // Required: true - Config PluginConfig `json:"Config"` - - // True if the plugin is running. False if the plugin is not running, only installed. - // Required: true - Enabled bool `json:"Enabled"` - - // Id - ID string `json:"Id,omitempty"` - - // name - // Required: true - Name string `json:"Name"` - - // plugin remote reference used to push/pull the plugin - PluginReference string `json:"PluginReference,omitempty"` - - // settings - // Required: true - Settings PluginSettings `json:"Settings"` -} - -// PluginConfig The config of a plugin. -// swagger:model PluginConfig -type PluginConfig struct { - - // args - // Required: true - Args PluginConfigArgs `json:"Args"` - - // description - // Required: true - Description string `json:"Description"` - - // Docker Version used to create the plugin - DockerVersion string `json:"DockerVersion,omitempty"` - - // documentation - // Required: true - Documentation string `json:"Documentation"` - - // entrypoint - // Required: true - Entrypoint []string `json:"Entrypoint"` - - // env - // Required: true - Env []PluginEnv `json:"Env"` - - // interface - // Required: true - Interface PluginConfigInterface `json:"Interface"` - - // ipc host - // Required: true - IpcHost bool `json:"IpcHost"` - - // linux - // Required: true - Linux PluginConfigLinux `json:"Linux"` - - // mounts - // Required: true - Mounts []PluginMount `json:"Mounts"` - - // network - // Required: true - Network PluginConfigNetwork `json:"Network"` - - // pid host - // Required: true - PidHost bool `json:"PidHost"` - - // propagated mount - // Required: true - PropagatedMount string `json:"PropagatedMount"` - - // user - User PluginConfigUser `json:"User,omitempty"` - - // work dir - // Required: true - WorkDir string `json:"WorkDir"` - - // rootfs - Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` -} - -// PluginConfigArgs plugin config args -// swagger:model PluginConfigArgs -type PluginConfigArgs struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // value - // Required: true - Value []string `json:"Value"` -} - -// PluginConfigInterface The interface between Docker and the plugin -// swagger:model PluginConfigInterface -type PluginConfigInterface struct { - - // Protocol to use for clients connecting to the plugin. - ProtocolScheme string `json:"ProtocolScheme,omitempty"` - - // socket - // Required: true - Socket string `json:"Socket"` - - // types - // Required: true - Types []PluginInterfaceType `json:"Types"` -} - -// PluginConfigLinux plugin config linux -// swagger:model PluginConfigLinux -type PluginConfigLinux struct { - - // allow all devices - // Required: true - AllowAllDevices bool `json:"AllowAllDevices"` - - // capabilities - // Required: true - Capabilities []string `json:"Capabilities"` - - // devices - // Required: true - Devices []PluginDevice `json:"Devices"` -} - -// PluginConfigNetwork plugin config network -// swagger:model PluginConfigNetwork -type PluginConfigNetwork struct { - - // type - // Required: true - Type string `json:"Type"` -} - -// PluginConfigRootfs plugin config rootfs -// swagger:model PluginConfigRootfs -type PluginConfigRootfs struct { - - // diff ids - DiffIds []string `json:"diff_ids"` - - // type - Type string `json:"type,omitempty"` -} - -// PluginConfigUser plugin config user -// swagger:model PluginConfigUser -type PluginConfigUser struct { - - // g ID - GID uint32 `json:"GID,omitempty"` - - // UID - UID uint32 `json:"UID,omitempty"` -} - -// PluginSettings Settings that can be modified by users. -// swagger:model PluginSettings -type PluginSettings struct { - - // args - // Required: true - Args []string `json:"Args"` - - // devices - // Required: true - Devices []PluginDevice `json:"Devices"` - - // env - // Required: true - Env []string `json:"Env"` - - // mounts - // Required: true - Mounts []PluginMount `json:"Mounts"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_device.go b/vendor/github.com/docker/docker/api/types/plugin_device.go deleted file mode 100644 index 569901067..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_device.go +++ /dev/null @@ -1,25 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginDevice plugin device -// swagger:model PluginDevice -type PluginDevice struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // path - // Required: true - Path *string `json:"Path"` - - // settable - // Required: true - Settable []string `json:"Settable"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_env.go b/vendor/github.com/docker/docker/api/types/plugin_env.go deleted file mode 100644 index 32962dc2e..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_env.go +++ /dev/null @@ -1,25 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginEnv plugin env -// swagger:model PluginEnv -type PluginEnv struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // value - // Required: true - Value *string `json:"Value"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go deleted file mode 100644 index c82f204e8..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go +++ /dev/null @@ -1,21 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginInterfaceType plugin interface type -// swagger:model PluginInterfaceType -type PluginInterfaceType struct { - - // capability - // Required: true - Capability string `json:"Capability"` - - // prefix - // Required: true - Prefix string `json:"Prefix"` - - // version - // Required: true - Version string `json:"Version"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_mount.go b/vendor/github.com/docker/docker/api/types/plugin_mount.go deleted file mode 100644 index 5c031cf8b..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_mount.go +++ /dev/null @@ -1,37 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginMount plugin mount -// swagger:model PluginMount -type PluginMount struct { - - // description - // Required: true - Description string `json:"Description"` - - // destination - // Required: true - Destination string `json:"Destination"` - - // name - // Required: true - Name string `json:"Name"` - - // options - // Required: true - Options []string `json:"Options"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // source - // Required: true - Source *string `json:"Source"` - - // type - // Required: true - Type string `json:"Type"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/docker/docker/api/types/plugin_responses.go deleted file mode 100644 index 60d1fb5ad..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_responses.go +++ /dev/null @@ -1,71 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "encoding/json" - "fmt" - "sort" -) - -// PluginsListResponse contains the response for the Engine API -type PluginsListResponse []*Plugin - -// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType -func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { - versionIndex := len(p) - prefixIndex := 0 - if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { - return fmt.Errorf("%q is not a plugin interface type", p) - } - p = p[1 : len(p)-1] -loop: - for i, b := range p { - switch b { - case '.': - prefixIndex = i - case '/': - versionIndex = i - break loop - } - } - t.Prefix = string(p[:prefixIndex]) - t.Capability = string(p[prefixIndex+1 : versionIndex]) - if versionIndex < len(p) { - t.Version = string(p[versionIndex+1:]) - } - return nil -} - -// MarshalJSON implements json.Marshaler for PluginInterfaceType -func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { - return json.Marshal(t.String()) -} - -// String implements fmt.Stringer for PluginInterfaceType -func (t PluginInterfaceType) String() string { - return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string - Description string - Value []string -} - -// PluginPrivileges is a list of PluginPrivilege -type PluginPrivileges []PluginPrivilege - -func (s PluginPrivileges) Len() int { - return len(s) -} - -func (s PluginPrivileges) Less(i, j int) bool { - return s[i].Name < s[j].Name -} - -func (s PluginPrivileges) Swap(i, j int) { - sort.Strings(s[i].Value) - sort.Strings(s[j].Value) - s[i], s[j] = s[j], s[i] -} diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/docker/docker/api/types/port.go deleted file mode 100644 index d91234744..000000000 --- a/vendor/github.com/docker/docker/api/types/port.go +++ /dev/null @@ -1,23 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Port An open port on a container -// swagger:model Port -type Port struct { - - // Host IP address that the container's port is mapped to - IP string `json:"IP,omitempty"` - - // Port on the container - // Required: true - PrivatePort uint16 `json:"PrivatePort"` - - // Port exposed on the host - PublicPort uint16 `json:"PublicPort,omitempty"` - - // type - // Required: true - Type string `json:"Type"` -} diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/docker/docker/api/types/registry/authenticate.go deleted file mode 100644 index f0a2113e4..000000000 --- a/vendor/github.com/docker/docker/api/types/registry/authenticate.go +++ /dev/null @@ -1,21 +0,0 @@ -package registry // import "github.com/docker/docker/api/types/registry" - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// AuthenticateOKBody authenticate o k body -// swagger:model AuthenticateOKBody -type AuthenticateOKBody struct { - - // An opaque token used to authenticate a user after a successful login - // Required: true - IdentityToken string `json:"IdentityToken"` - - // The status of the authentication - // Required: true - Status string `json:"Status"` -} diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go deleted file mode 100644 index 62a88f5be..000000000 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ /dev/null @@ -1,120 +0,0 @@ -package registry // import "github.com/docker/docker/api/types/registry" - -import ( - "encoding/json" - "net" - - v1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ServiceConfig stores daemon registry services configuration. -type ServiceConfig struct { - AllowNondistributableArtifactsCIDRs []*NetIPNet - AllowNondistributableArtifactsHostnames []string - InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` - IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` - Mirrors []string -} - -// NetIPNet is the net.IPNet type, which can be marshalled and -// unmarshalled to JSON -type NetIPNet net.IPNet - -// String returns the CIDR notation of ipnet -func (ipnet *NetIPNet) String() string { - return (*net.IPNet)(ipnet).String() -} - -// MarshalJSON returns the JSON representation of the IPNet -func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { - return json.Marshal((*net.IPNet)(ipnet).String()) -} - -// UnmarshalJSON sets the IPNet from a byte array of JSON -func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { - var ipnetStr string - if err = json.Unmarshal(b, &ipnetStr); err == nil { - var cidr *net.IPNet - if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { - *ipnet = NetIPNet(*cidr) - } - } - return -} - -// IndexInfo contains information about a registry -// -// RepositoryInfo Examples: -// -// { -// "Index" : { -// "Name" : "docker.io", -// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], -// "Secure" : true, -// "Official" : true, -// }, -// "RemoteName" : "library/debian", -// "LocalName" : "debian", -// "CanonicalName" : "docker.io/debian" -// "Official" : true, -// } -// -// { -// "Index" : { -// "Name" : "127.0.0.1:5000", -// "Mirrors" : [], -// "Secure" : false, -// "Official" : false, -// }, -// "RemoteName" : "user/repo", -// "LocalName" : "127.0.0.1:5000/user/repo", -// "CanonicalName" : "127.0.0.1:5000/user/repo", -// "Official" : false, -// } -type IndexInfo struct { - // Name is the name of the registry, such as "docker.io" - Name string - // Mirrors is a list of mirrors, expressed as URIs - Mirrors []string - // Secure is set to false if the registry is part of the list of - // insecure registries. Insecure registries accept HTTP and/or accept - // HTTPS with certificates from unknown CAs. - Secure bool - // Official indicates whether this is an official registry - Official bool -} - -// SearchResult describes a search result returned from a registry -type SearchResult struct { - // StarCount indicates the number of stars this repository has - StarCount int `json:"star_count"` - // IsOfficial is true if the result is from an official repository. - IsOfficial bool `json:"is_official"` - // Name is the name of the repository - Name string `json:"name"` - // IsAutomated indicates whether the result is automated - IsAutomated bool `json:"is_automated"` - // Description is a textual description of the repository - Description string `json:"description"` -} - -// SearchResults lists a collection search results returned from a registry -type SearchResults struct { - // Query contains the query string that generated the search results - Query string `json:"query"` - // NumResults indicates the number of results the query returned - NumResults int `json:"num_results"` - // Results is a slice containing the actual results for the search - Results []SearchResult `json:"results"` -} - -// DistributionInspect describes the result obtained from contacting the -// registry to retrieve image metadata -type DistributionInspect struct { - // Descriptor contains information about the manifest, including - // the content addressable digest - Descriptor v1.Descriptor - // Platforms contains the list of platforms supported by the image, - // obtained by parsing the manifest - Platforms []v1.Platform -} diff --git a/vendor/github.com/docker/docker/api/types/service_update_response.go b/vendor/github.com/docker/docker/api/types/service_update_response.go deleted file mode 100644 index 74ea64b1b..000000000 --- a/vendor/github.com/docker/docker/api/types/service_update_response.go +++ /dev/null @@ -1,12 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ServiceUpdateResponse service update response -// swagger:model ServiceUpdateResponse -type ServiceUpdateResponse struct { - - // Optional warning messages - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/stats.go deleted file mode 100644 index 20daebed1..000000000 --- a/vendor/github.com/docker/docker/api/types/stats.go +++ /dev/null @@ -1,181 +0,0 @@ -// Package types is used for API stability in the types and response to the -// consumers of the API stats endpoint. -package types // import "github.com/docker/docker/api/types" - -import "time" - -// ThrottlingData stores CPU throttling stats of one running container. -// Not used on Windows. -type ThrottlingData struct { - // Number of periods with throttling active - Periods uint64 `json:"periods"` - // Number of periods when the container hits its throttling limit. - ThrottledPeriods uint64 `json:"throttled_periods"` - // Aggregate time the container was throttled for in nanoseconds. - ThrottledTime uint64 `json:"throttled_time"` -} - -// CPUUsage stores All CPU stats aggregated since container inception. -type CPUUsage struct { - // Total CPU time consumed. - // Units: nanoseconds (Linux) - // Units: 100's of nanoseconds (Windows) - TotalUsage uint64 `json:"total_usage"` - - // Total CPU time consumed per core (Linux). Not used on Windows. - // Units: nanoseconds. - PercpuUsage []uint64 `json:"percpu_usage,omitempty"` - - // Time spent by tasks of the cgroup in kernel mode (Linux). - // Time spent by all container processes in kernel mode (Windows). - // Units: nanoseconds (Linux). - // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. - UsageInKernelmode uint64 `json:"usage_in_kernelmode"` - - // Time spent by tasks of the cgroup in user mode (Linux). - // Time spent by all container processes in user mode (Windows). - // Units: nanoseconds (Linux). - // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers - UsageInUsermode uint64 `json:"usage_in_usermode"` -} - -// CPUStats aggregates and wraps all CPU related info of container -type CPUStats struct { - // CPU Usage. Linux and Windows. - CPUUsage CPUUsage `json:"cpu_usage"` - - // System Usage. Linux only. - SystemUsage uint64 `json:"system_cpu_usage,omitempty"` - - // Online CPUs. Linux only. - OnlineCPUs uint32 `json:"online_cpus,omitempty"` - - // Throttling Data. Linux only. - ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` -} - -// MemoryStats aggregates all memory stats since container inception on Linux. -// Windows returns stats for commit and private working set only. -type MemoryStats struct { - // Linux Memory Stats - - // current res_counter usage for memory - Usage uint64 `json:"usage,omitempty"` - // maximum usage ever recorded. - MaxUsage uint64 `json:"max_usage,omitempty"` - // TODO(vishh): Export these as stronger types. - // all the stats exported via memory.stat. - Stats map[string]uint64 `json:"stats,omitempty"` - // number of times memory usage hits limits. - Failcnt uint64 `json:"failcnt,omitempty"` - Limit uint64 `json:"limit,omitempty"` - - // Windows Memory Stats - // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx - - // committed bytes - Commit uint64 `json:"commitbytes,omitempty"` - // peak committed bytes - CommitPeak uint64 `json:"commitpeakbytes,omitempty"` - // private working set - PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` -} - -// BlkioStatEntry is one small entity to store a piece of Blkio stats -// Not used on Windows. -type BlkioStatEntry struct { - Major uint64 `json:"major"` - Minor uint64 `json:"minor"` - Op string `json:"op"` - Value uint64 `json:"value"` -} - -// BlkioStats stores All IO service stats for data read and write. -// This is a Linux specific structure as the differences between expressing -// block I/O on Windows and Linux are sufficiently significant to make -// little sense attempting to morph into a combined structure. -type BlkioStats struct { - // number of bytes transferred to and from the block device - IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` - IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` - IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` - IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` - IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` - IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` - IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` - SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` -} - -// StorageStats is the disk I/O stats for read/write on Windows. -type StorageStats struct { - ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` - ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` - WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` - WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` -} - -// NetworkStats aggregates the network stats of one container -type NetworkStats struct { - // Bytes received. Windows and Linux. - RxBytes uint64 `json:"rx_bytes"` - // Packets received. Windows and Linux. - RxPackets uint64 `json:"rx_packets"` - // Received errors. Not used on Windows. Note that we don't `omitempty` this - // field as it is expected in the >=v1.21 API stats structure. - RxErrors uint64 `json:"rx_errors"` - // Incoming packets dropped. Windows and Linux. - RxDropped uint64 `json:"rx_dropped"` - // Bytes sent. Windows and Linux. - TxBytes uint64 `json:"tx_bytes"` - // Packets sent. Windows and Linux. - TxPackets uint64 `json:"tx_packets"` - // Sent errors. Not used on Windows. Note that we don't `omitempty` this - // field as it is expected in the >=v1.21 API stats structure. - TxErrors uint64 `json:"tx_errors"` - // Outgoing packets dropped. Windows and Linux. - TxDropped uint64 `json:"tx_dropped"` - // Endpoint ID. Not used on Linux. - EndpointID string `json:"endpoint_id,omitempty"` - // Instance ID. Not used on Linux. - InstanceID string `json:"instance_id,omitempty"` -} - -// PidsStats contains the stats of a container's pids -type PidsStats struct { - // Current is the number of pids in the cgroup - Current uint64 `json:"current,omitempty"` - // Limit is the hard limit on the number of pids in the cgroup. - // A "Limit" of 0 means that there is no limit. - Limit uint64 `json:"limit,omitempty"` -} - -// Stats is Ultimate struct aggregating all types of stats of one container -type Stats struct { - // Common stats - Read time.Time `json:"read"` - PreRead time.Time `json:"preread"` - - // Linux specific stats, not populated on Windows. - PidsStats PidsStats `json:"pids_stats,omitempty"` - BlkioStats BlkioStats `json:"blkio_stats,omitempty"` - - // Windows specific stats, not populated on Linux. - NumProcs uint32 `json:"num_procs"` - StorageStats StorageStats `json:"storage_stats,omitempty"` - - // Shared stats - CPUStats CPUStats `json:"cpu_stats,omitempty"` - PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" - MemoryStats MemoryStats `json:"memory_stats,omitempty"` -} - -// StatsJSON is newly used Networks -type StatsJSON struct { - Stats - - Name string `json:"name,omitempty"` - ID string `json:"id,omitempty"` - - // Networks request version >=1.21 - Networks map[string]NetworkStats `json:"networks,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/vendor/github.com/docker/docker/api/types/strslice/strslice.go deleted file mode 100644 index 82921cebc..000000000 --- a/vendor/github.com/docker/docker/api/types/strslice/strslice.go +++ /dev/null @@ -1,30 +0,0 @@ -package strslice // import "github.com/docker/docker/api/types/strslice" - -import "encoding/json" - -// StrSlice represents a string or an array of strings. -// We need to override the json decoder to accept both options. -type StrSlice []string - -// UnmarshalJSON decodes the byte slice whether it's a string or an array of -// strings. This method is needed to implement json.Unmarshaler. -func (e *StrSlice) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - // With no input, we preserve the existing value by returning nil and - // leaving the target alone. This allows defining default values for - // the type. - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - - *e = p - return nil -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go deleted file mode 100644 index 5ded7dba8..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/common.go +++ /dev/null @@ -1,48 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "strconv" - "time" -) - -// Version represents the internal object version. -type Version struct { - Index uint64 `json:",omitempty"` -} - -// String implements fmt.Stringer interface. -func (v Version) String() string { - return strconv.FormatUint(v.Index, 10) -} - -// Meta is a base object inherited by most of the other once. -type Meta struct { - Version Version `json:",omitempty"` - CreatedAt time.Time `json:",omitempty"` - UpdatedAt time.Time `json:",omitempty"` -} - -// Annotations represents how to describe an object. -type Annotations struct { - Name string `json:",omitempty"` - Labels map[string]string `json:"Labels"` -} - -// Driver represents a driver (network, logging, secrets backend). -type Driver struct { - Name string `json:",omitempty"` - Options map[string]string `json:",omitempty"` -} - -// TLSInfo represents the TLS information about what CA certificate is trusted, -// and who the issuer for a TLS certificate is -type TLSInfo struct { - // TrustRoot is the trusted CA root certificate in PEM format - TrustRoot string `json:",omitempty"` - - // CertIssuer is the raw subject bytes of the issuer - CertIssuerSubject []byte `json:",omitempty"` - - // CertIssuerPublicKey is the raw public key bytes of the issuer - CertIssuerPublicKey []byte `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/docker/docker/api/types/swarm/config.go deleted file mode 100644 index 16202ccce..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/config.go +++ /dev/null @@ -1,40 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "os" - -// Config represents a config. -type Config struct { - ID string - Meta - Spec ConfigSpec -} - -// ConfigSpec represents a config specification from a config in swarm -type ConfigSpec struct { - Annotations - Data []byte `json:",omitempty"` - - // Templating controls whether and how to evaluate the config payload as - // a template. If it is not set, no templating is used. - Templating *Driver `json:",omitempty"` -} - -// ConfigReferenceFileTarget is a file target in a config reference -type ConfigReferenceFileTarget struct { - Name string - UID string - GID string - Mode os.FileMode -} - -// ConfigReferenceRuntimeTarget is a target for a config specifying that it -// isn't mounted into the container but instead has some other purpose. -type ConfigReferenceRuntimeTarget struct{} - -// ConfigReference is a reference to a config in swarm -type ConfigReference struct { - File *ConfigReferenceFileTarget `json:",omitempty"` - Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"` - ConfigID string - ConfigName string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go deleted file mode 100644 index af5e1c0bc..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ /dev/null @@ -1,80 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/mount" - "github.com/docker/go-units" -) - -// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) -// Detailed documentation is available in: -// http://man7.org/linux/man-pages/man5/resolv.conf.5.html -// `nameserver`, `search`, `options` have been supported. -// TODO: `domain` is not supported yet. -type DNSConfig struct { - // Nameservers specifies the IP addresses of the name servers - Nameservers []string `json:",omitempty"` - // Search specifies the search list for host-name lookup - Search []string `json:",omitempty"` - // Options allows certain internal resolver variables to be modified - Options []string `json:",omitempty"` -} - -// SELinuxContext contains the SELinux labels of the container. -type SELinuxContext struct { - Disable bool - - User string - Role string - Type string - Level string -} - -// CredentialSpec for managed service account (Windows only) -type CredentialSpec struct { - Config string - File string - Registry string -} - -// Privileges defines the security options for the container. -type Privileges struct { - CredentialSpec *CredentialSpec - SELinuxContext *SELinuxContext -} - -// ContainerSpec represents the spec of a container. -type ContainerSpec struct { - Image string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Command []string `json:",omitempty"` - Args []string `json:",omitempty"` - Hostname string `json:",omitempty"` - Env []string `json:",omitempty"` - Dir string `json:",omitempty"` - User string `json:",omitempty"` - Groups []string `json:",omitempty"` - Privileges *Privileges `json:",omitempty"` - Init *bool `json:",omitempty"` - StopSignal string `json:",omitempty"` - TTY bool `json:",omitempty"` - OpenStdin bool `json:",omitempty"` - ReadOnly bool `json:",omitempty"` - Mounts []mount.Mount `json:",omitempty"` - StopGracePeriod *time.Duration `json:",omitempty"` - Healthcheck *container.HealthConfig `json:",omitempty"` - // The format of extra hosts on swarmkit is specified in: - // http://man7.org/linux/man-pages/man5/hosts.5.html - // IP_address canonical_hostname [aliases...] - Hosts []string `json:",omitempty"` - DNSConfig *DNSConfig `json:",omitempty"` - Secrets []*SecretReference `json:",omitempty"` - Configs []*ConfigReference `json:",omitempty"` - Isolation container.Isolation `json:",omitempty"` - Sysctls map[string]string `json:",omitempty"` - CapabilityAdd []string `json:",omitempty"` - CapabilityDrop []string `json:",omitempty"` - Ulimits []*units.Ulimit `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go deleted file mode 100644 index 98ef3284d..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/network.go +++ /dev/null @@ -1,121 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "github.com/docker/docker/api/types/network" -) - -// Endpoint represents an endpoint. -type Endpoint struct { - Spec EndpointSpec `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` - VirtualIPs []EndpointVirtualIP `json:",omitempty"` -} - -// EndpointSpec represents the spec of an endpoint. -type EndpointSpec struct { - Mode ResolutionMode `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` -} - -// ResolutionMode represents a resolution mode. -type ResolutionMode string - -const ( - // ResolutionModeVIP VIP - ResolutionModeVIP ResolutionMode = "vip" - // ResolutionModeDNSRR DNSRR - ResolutionModeDNSRR ResolutionMode = "dnsrr" -) - -// PortConfig represents the config of a port. -type PortConfig struct { - Name string `json:",omitempty"` - Protocol PortConfigProtocol `json:",omitempty"` - // TargetPort is the port inside the container - TargetPort uint32 `json:",omitempty"` - // PublishedPort is the port on the swarm hosts - PublishedPort uint32 `json:",omitempty"` - // PublishMode is the mode in which port is published - PublishMode PortConfigPublishMode `json:",omitempty"` -} - -// PortConfigPublishMode represents the mode in which the port is to -// be published. -type PortConfigPublishMode string - -const ( - // PortConfigPublishModeIngress is used for ports published - // for ingress load balancing using routing mesh. - PortConfigPublishModeIngress PortConfigPublishMode = "ingress" - // PortConfigPublishModeHost is used for ports published - // for direct host level access on the host where the task is running. - PortConfigPublishModeHost PortConfigPublishMode = "host" -) - -// PortConfigProtocol represents the protocol of a port. -type PortConfigProtocol string - -const ( - // TODO(stevvooe): These should be used generally, not just for PortConfig. - - // PortConfigProtocolTCP TCP - PortConfigProtocolTCP PortConfigProtocol = "tcp" - // PortConfigProtocolUDP UDP - PortConfigProtocolUDP PortConfigProtocol = "udp" - // PortConfigProtocolSCTP SCTP - PortConfigProtocolSCTP PortConfigProtocol = "sctp" -) - -// EndpointVirtualIP represents the virtual ip of a port. -type EndpointVirtualIP struct { - NetworkID string `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// Network represents a network. -type Network struct { - ID string - Meta - Spec NetworkSpec `json:",omitempty"` - DriverState Driver `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` -} - -// NetworkSpec represents the spec of a network. -type NetworkSpec struct { - Annotations - DriverConfiguration *Driver `json:",omitempty"` - IPv6Enabled bool `json:",omitempty"` - Internal bool `json:",omitempty"` - Attachable bool `json:",omitempty"` - Ingress bool `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` - ConfigFrom *network.ConfigReference `json:",omitempty"` - Scope string `json:",omitempty"` -} - -// NetworkAttachmentConfig represents the configuration of a network attachment. -type NetworkAttachmentConfig struct { - Target string `json:",omitempty"` - Aliases []string `json:",omitempty"` - DriverOpts map[string]string `json:",omitempty"` -} - -// NetworkAttachment represents a network attachment. -type NetworkAttachment struct { - Network Network `json:",omitempty"` - Addresses []string `json:",omitempty"` -} - -// IPAMOptions represents ipam options. -type IPAMOptions struct { - Driver Driver `json:",omitempty"` - Configs []IPAMConfig `json:",omitempty"` -} - -// IPAMConfig represents ipam configuration. -type IPAMConfig struct { - Subnet string `json:",omitempty"` - Range string `json:",omitempty"` - Gateway string `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go deleted file mode 100644 index bb98d5eed..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/node.go +++ /dev/null @@ -1,139 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -// Node represents a node. -type Node struct { - ID string - Meta - // Spec defines the desired state of the node as specified by the user. - // The system will honor this and will *never* modify it. - Spec NodeSpec `json:",omitempty"` - // Description encapsulates the properties of the Node as reported by the - // agent. - Description NodeDescription `json:",omitempty"` - // Status provides the current status of the node, as seen by the manager. - Status NodeStatus `json:",omitempty"` - // ManagerStatus provides the current status of the node's manager - // component, if the node is a manager. - ManagerStatus *ManagerStatus `json:",omitempty"` -} - -// NodeSpec represents the spec of a node. -type NodeSpec struct { - Annotations - Role NodeRole `json:",omitempty"` - Availability NodeAvailability `json:",omitempty"` -} - -// NodeRole represents the role of a node. -type NodeRole string - -const ( - // NodeRoleWorker WORKER - NodeRoleWorker NodeRole = "worker" - // NodeRoleManager MANAGER - NodeRoleManager NodeRole = "manager" -) - -// NodeAvailability represents the availability of a node. -type NodeAvailability string - -const ( - // NodeAvailabilityActive ACTIVE - NodeAvailabilityActive NodeAvailability = "active" - // NodeAvailabilityPause PAUSE - NodeAvailabilityPause NodeAvailability = "pause" - // NodeAvailabilityDrain DRAIN - NodeAvailabilityDrain NodeAvailability = "drain" -) - -// NodeDescription represents the description of a node. -type NodeDescription struct { - Hostname string `json:",omitempty"` - Platform Platform `json:",omitempty"` - Resources Resources `json:",omitempty"` - Engine EngineDescription `json:",omitempty"` - TLSInfo TLSInfo `json:",omitempty"` - CSIInfo []NodeCSIInfo `json:",omitempty"` -} - -// Platform represents the platform (Arch/OS). -type Platform struct { - Architecture string `json:",omitempty"` - OS string `json:",omitempty"` -} - -// EngineDescription represents the description of an engine. -type EngineDescription struct { - EngineVersion string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Plugins []PluginDescription `json:",omitempty"` -} - -// NodeCSIInfo represents information about a CSI plugin available on the node -type NodeCSIInfo struct { - // PluginName is the name of the CSI plugin. - PluginName string `json:",omitempty"` - // NodeID is the ID of the node as reported by the CSI plugin. This is - // different from the swarm node ID. - NodeID string `json:",omitempty"` - // MaxVolumesPerNode is the maximum number of volumes that may be published - // to this node - MaxVolumesPerNode int64 `json:",omitempty"` - // AccessibleTopology indicates the location of this node in the CSI - // plugin's topology - AccessibleTopology *Topology `json:",omitempty"` -} - -// PluginDescription represents the description of an engine plugin. -type PluginDescription struct { - Type string `json:",omitempty"` - Name string `json:",omitempty"` -} - -// NodeStatus represents the status of a node. -type NodeStatus struct { - State NodeState `json:",omitempty"` - Message string `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// Reachability represents the reachability of a node. -type Reachability string - -const ( - // ReachabilityUnknown UNKNOWN - ReachabilityUnknown Reachability = "unknown" - // ReachabilityUnreachable UNREACHABLE - ReachabilityUnreachable Reachability = "unreachable" - // ReachabilityReachable REACHABLE - ReachabilityReachable Reachability = "reachable" -) - -// ManagerStatus represents the status of a manager. -type ManagerStatus struct { - Leader bool `json:",omitempty"` - Reachability Reachability `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// NodeState represents the state of a node. -type NodeState string - -const ( - // NodeStateUnknown UNKNOWN - NodeStateUnknown NodeState = "unknown" - // NodeStateDown DOWN - NodeStateDown NodeState = "down" - // NodeStateReady READY - NodeStateReady NodeState = "ready" - // NodeStateDisconnected DISCONNECTED - NodeStateDisconnected NodeState = "disconnected" -) - -// Topology defines the CSI topology of this node. This type is a duplicate of -// github.com/docker/docker/api/types.Topology. Because the type definition -// is so simple and to avoid complicated structure or circular imports, we just -// duplicate it here. See that type for full documentation -type Topology struct { - Segments map[string]string `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime.go deleted file mode 100644 index 0c77403cc..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime.go +++ /dev/null @@ -1,27 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -// RuntimeType is the type of runtime used for the TaskSpec -type RuntimeType string - -// RuntimeURL is the proto type url -type RuntimeURL string - -const ( - // RuntimeContainer is the container based runtime - RuntimeContainer RuntimeType = "container" - // RuntimePlugin is the plugin based runtime - RuntimePlugin RuntimeType = "plugin" - // RuntimeNetworkAttachment is the network attachment runtime - RuntimeNetworkAttachment RuntimeType = "attachment" - - // RuntimeURLContainer is the proto url for the container type - RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" - // RuntimeURLPlugin is the proto url for the plugin type - RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" -) - -// NetworkAttachmentSpec represents the runtime spec type for network -// attachment tasks -type NetworkAttachmentSpec struct { - ContainerID string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go deleted file mode 100644 index 98c2806c3..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go +++ /dev/null @@ -1,3 +0,0 @@ -//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto - -package runtime // import "github.com/docker/docker/api/types/swarm/runtime" diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go deleted file mode 100644 index e45045866..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go +++ /dev/null @@ -1,754 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: plugin.proto - -/* - Package runtime is a generated protocol buffer package. - - It is generated from these files: - plugin.proto - - It has these top-level messages: - PluginSpec - PluginPrivilege -*/ -package runtime - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -type PluginSpec struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` - Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` - Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` - Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` -} - -func (m *PluginSpec) Reset() { *m = PluginSpec{} } -func (m *PluginSpec) String() string { return proto.CompactTextString(m) } -func (*PluginSpec) ProtoMessage() {} -func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } - -func (m *PluginSpec) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginSpec) GetRemote() string { - if m != nil { - return m.Remote - } - return "" -} - -func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { - if m != nil { - return m.Privileges - } - return nil -} - -func (m *PluginSpec) GetDisabled() bool { - if m != nil { - return m.Disabled - } - return false -} - -func (m *PluginSpec) GetEnv() []string { - if m != nil { - return m.Env - } - return nil -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` -} - -func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } -func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } -func (*PluginPrivilege) ProtoMessage() {} -func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } - -func (m *PluginPrivilege) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginPrivilege) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *PluginPrivilege) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*PluginSpec)(nil), "PluginSpec") - proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") -} -func (m *PluginSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Remote) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) - i += copy(dAtA[i:], m.Remote) - } - if len(m.Privileges) > 0 { - for _, msg := range m.Privileges { - dAtA[i] = 0x1a - i++ - i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Disabled { - dAtA[i] = 0x20 - i++ - if m.Disabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.Env) > 0 { - for _, s := range m.Env { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Description) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - } - if len(m.Value) > 0 { - for _, s := range m.Value { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *PluginSpec) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Remote) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Privileges) > 0 { - for _, e := range m.Privileges { - l = e.Size() - n += 1 + l + sovPlugin(uint64(l)) - } - } - if m.Disabled { - n += 2 - } - if len(m.Env) > 0 { - for _, s := range m.Env { - l = len(s) - n += 1 + l + sovPlugin(uint64(l)) - } - } - return n -} - -func (m *PluginPrivilege) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Value) > 0 { - for _, s := range m.Value { - l = len(s) - n += 1 + l + sovPlugin(uint64(l)) - } - } - return n -} - -func sovPlugin(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozPlugin(x uint64) (n int) { - return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *PluginSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Remote = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Privileges = append(m.Privileges, &PluginPrivilege{}) - if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Disabled = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipPlugin(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthPlugin - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipPlugin(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } - -var fileDescriptorPlugin = []byte{ - // 256 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4d, 0x4b, 0xc3, 0x30, - 0x18, 0xc7, 0x89, 0xdd, 0xc6, 0xfa, 0x4c, 0x70, 0x04, 0x91, 0xe2, 0xa1, 0x94, 0x9d, 0x7a, 0x6a, - 0x45, 0x2f, 0x82, 0x37, 0x0f, 0x9e, 0x47, 0xbc, 0x09, 0x1e, 0xd2, 0xf6, 0xa1, 0x06, 0x9b, 0x17, - 0x92, 0xb4, 0xe2, 0x37, 0xf1, 0x23, 0x79, 0xf4, 0x23, 0x48, 0x3f, 0x89, 0x98, 0x75, 0x32, 0x64, - 0xa7, 0xff, 0x4b, 0xc2, 0x9f, 0x1f, 0x0f, 0x9c, 0x9a, 0xae, 0x6f, 0x85, 0x2a, 0x8c, 0xd5, 0x5e, - 0x6f, 0x3e, 0x08, 0xc0, 0x36, 0x14, 0x8f, 0x06, 0x6b, 0x4a, 0x61, 0xa6, 0xb8, 0xc4, 0x84, 0x64, - 0x24, 0x8f, 0x59, 0xf0, 0xf4, 0x02, 0x16, 0x16, 0xa5, 0xf6, 0x98, 0x9c, 0x84, 0x76, 0x4a, 0xf4, - 0x0a, 0xc0, 0x58, 0x31, 0x88, 0x0e, 0x5b, 0x74, 0x49, 0x94, 0x45, 0xf9, 0xea, 0x7a, 0x5d, 0xec, - 0xc6, 0xb6, 0xfb, 0x07, 0x76, 0xf0, 0x87, 0x5e, 0xc2, 0xb2, 0x11, 0x8e, 0x57, 0x1d, 0x36, 0xc9, - 0x2c, 0x23, 0xf9, 0x92, 0xfd, 0x65, 0xba, 0x86, 0x08, 0xd5, 0x90, 0xcc, 0xb3, 0x28, 0x8f, 0xd9, - 0xaf, 0xdd, 0x3c, 0xc3, 0xd9, 0xbf, 0xb1, 0xa3, 0x78, 0x19, 0xac, 0x1a, 0x74, 0xb5, 0x15, 0xc6, - 0x0b, 0xad, 0x26, 0xc6, 0xc3, 0x8a, 0x9e, 0xc3, 0x7c, 0xe0, 0x5d, 0x8f, 0x81, 0x31, 0x66, 0xbb, - 0x70, 0xff, 0xf0, 0x39, 0xa6, 0xe4, 0x6b, 0x4c, 0xc9, 0xf7, 0x98, 0x92, 0xa7, 0xdb, 0x56, 0xf8, - 0x97, 0xbe, 0x2a, 0x6a, 0x2d, 0xcb, 0x46, 0xd7, 0xaf, 0x68, 0xf7, 0xc2, 0x8d, 0x28, 0xfd, 0xbb, - 0x41, 0x57, 0xba, 0x37, 0x6e, 0x65, 0x69, 0x7b, 0xe5, 0x85, 0xc4, 0xbb, 0x49, 0xab, 0x45, 0x38, - 0xe4, 0xcd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0xa8, 0xd9, 0x9b, 0x58, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto deleted file mode 100644 index 9ef169046..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime"; - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -message PluginSpec { - string name = 1; - string remote = 2; - repeated PluginPrivilege privileges = 3; - bool disabled = 4; - repeated string env = 5; -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -message PluginPrivilege { - string name = 1; - string description = 2; - repeated string value = 3; -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go deleted file mode 100644 index d5213ec98..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/secret.go +++ /dev/null @@ -1,36 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "os" - -// Secret represents a secret. -type Secret struct { - ID string - Meta - Spec SecretSpec -} - -// SecretSpec represents a secret specification from a secret in swarm -type SecretSpec struct { - Annotations - Data []byte `json:",omitempty"` - Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store - - // Templating controls whether and how to evaluate the secret payload as - // a template. If it is not set, no templating is used. - Templating *Driver `json:",omitempty"` -} - -// SecretReferenceFileTarget is a file target in a secret reference -type SecretReferenceFileTarget struct { - Name string - UID string - GID string - Mode os.FileMode -} - -// SecretReference is a reference to a secret in swarm -type SecretReference struct { - File *SecretReferenceFileTarget - SecretID string - SecretName string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go deleted file mode 100644 index 6eb452d24..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/service.go +++ /dev/null @@ -1,202 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "time" - -// Service represents a service. -type Service struct { - ID string - Meta - Spec ServiceSpec `json:",omitempty"` - PreviousSpec *ServiceSpec `json:",omitempty"` - Endpoint Endpoint `json:",omitempty"` - UpdateStatus *UpdateStatus `json:",omitempty"` - - // ServiceStatus is an optional, extra field indicating the number of - // desired and running tasks. It is provided primarily as a shortcut to - // calculating these values client-side, which otherwise would require - // listing all tasks for a service, an operation that could be - // computation and network expensive. - ServiceStatus *ServiceStatus `json:",omitempty"` - - // JobStatus is the status of a Service which is in one of ReplicatedJob or - // GlobalJob modes. It is absent on Replicated and Global services. - JobStatus *JobStatus `json:",omitempty"` -} - -// ServiceSpec represents the spec of a service. -type ServiceSpec struct { - Annotations - - // TaskTemplate defines how the service should construct new tasks when - // orchestrating this service. - TaskTemplate TaskSpec `json:",omitempty"` - Mode ServiceMode `json:",omitempty"` - UpdateConfig *UpdateConfig `json:",omitempty"` - RollbackConfig *UpdateConfig `json:",omitempty"` - - // Networks field in ServiceSpec is deprecated. The - // same field in TaskSpec should be used instead. - // This field will be removed in a future release. - Networks []NetworkAttachmentConfig `json:",omitempty"` - EndpointSpec *EndpointSpec `json:",omitempty"` -} - -// ServiceMode represents the mode of a service. -type ServiceMode struct { - Replicated *ReplicatedService `json:",omitempty"` - Global *GlobalService `json:",omitempty"` - ReplicatedJob *ReplicatedJob `json:",omitempty"` - GlobalJob *GlobalJob `json:",omitempty"` -} - -// UpdateState is the state of a service update. -type UpdateState string - -const ( - // UpdateStateUpdating is the updating state. - UpdateStateUpdating UpdateState = "updating" - // UpdateStatePaused is the paused state. - UpdateStatePaused UpdateState = "paused" - // UpdateStateCompleted is the completed state. - UpdateStateCompleted UpdateState = "completed" - // UpdateStateRollbackStarted is the state with a rollback in progress. - UpdateStateRollbackStarted UpdateState = "rollback_started" - // UpdateStateRollbackPaused is the state with a rollback in progress. - UpdateStateRollbackPaused UpdateState = "rollback_paused" - // UpdateStateRollbackCompleted is the state with a rollback in progress. - UpdateStateRollbackCompleted UpdateState = "rollback_completed" -) - -// UpdateStatus reports the status of a service update. -type UpdateStatus struct { - State UpdateState `json:",omitempty"` - StartedAt *time.Time `json:",omitempty"` - CompletedAt *time.Time `json:",omitempty"` - Message string `json:",omitempty"` -} - -// ReplicatedService is a kind of ServiceMode. -type ReplicatedService struct { - Replicas *uint64 `json:",omitempty"` -} - -// GlobalService is a kind of ServiceMode. -type GlobalService struct{} - -// ReplicatedJob is the a type of Service which executes a defined Tasks -// in parallel until the specified number of Tasks have succeeded. -type ReplicatedJob struct { - // MaxConcurrent indicates the maximum number of Tasks that should be - // executing simultaneously for this job at any given time. There may be - // fewer Tasks that MaxConcurrent executing simultaneously; for example, if - // there are fewer than MaxConcurrent tasks needed to reach - // TotalCompletions. - // - // If this field is empty, it will default to a max concurrency of 1. - MaxConcurrent *uint64 `json:",omitempty"` - - // TotalCompletions is the total number of Tasks desired to run to - // completion. - // - // If this field is empty, the value of MaxConcurrent will be used. - TotalCompletions *uint64 `json:",omitempty"` -} - -// GlobalJob is the type of a Service which executes a Task on every Node -// matching the Service's placement constraints. These tasks run to completion -// and then exit. -// -// This type is deliberately empty. -type GlobalJob struct{} - -const ( - // UpdateFailureActionPause PAUSE - UpdateFailureActionPause = "pause" - // UpdateFailureActionContinue CONTINUE - UpdateFailureActionContinue = "continue" - // UpdateFailureActionRollback ROLLBACK - UpdateFailureActionRollback = "rollback" - - // UpdateOrderStopFirst STOP_FIRST - UpdateOrderStopFirst = "stop-first" - // UpdateOrderStartFirst START_FIRST - UpdateOrderStartFirst = "start-first" -) - -// UpdateConfig represents the update configuration. -type UpdateConfig struct { - // Maximum number of tasks to be updated in one iteration. - // 0 means unlimited parallelism. - Parallelism uint64 - - // Amount of time between updates. - Delay time.Duration `json:",omitempty"` - - // FailureAction is the action to take when an update failures. - FailureAction string `json:",omitempty"` - - // Monitor indicates how long to monitor a task for failure after it is - // created. If the task fails by ending up in one of the states - // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, - // this counts as a failure. If it fails after Monitor, it does not - // count as a failure. If Monitor is unspecified, a default value will - // be used. - Monitor time.Duration `json:",omitempty"` - - // MaxFailureRatio is the fraction of tasks that may fail during - // an update before the failure action is invoked. Any task created by - // the current update which ends up in one of the states REJECTED, - // COMPLETED or FAILED within Monitor from its creation counts as a - // failure. The number of failures is divided by the number of tasks - // being updated, and if this fraction is greater than - // MaxFailureRatio, the failure action is invoked. - // - // If the failure action is CONTINUE, there is no effect. - // If the failure action is PAUSE, no more tasks will be updated until - // another update is started. - MaxFailureRatio float32 - - // Order indicates the order of operations when rolling out an updated - // task. Either the old task is shut down before the new task is - // started, or the new task is started before the old task is shut down. - Order string -} - -// ServiceStatus represents the number of running tasks in a service and the -// number of tasks desired to be running. -type ServiceStatus struct { - // RunningTasks is the number of tasks for the service actually in the - // Running state - RunningTasks uint64 - - // DesiredTasks is the number of tasks desired to be running by the - // service. For replicated services, this is the replica count. For global - // services, this is computed by taking the number of tasks with desired - // state of not-Shutdown. - DesiredTasks uint64 - - // CompletedTasks is the number of tasks in the state Completed, if this - // service is in ReplicatedJob or GlobalJob mode. This field must be - // cross-referenced with the service type, because the default value of 0 - // may mean that a service is not in a job mode, or it may mean that the - // job has yet to complete any tasks. - CompletedTasks uint64 -} - -// JobStatus is the status of a job-type service. -type JobStatus struct { - // JobIteration is a value increased each time a Job is executed, - // successfully or otherwise. "Executed", in this case, means the job as a - // whole has been started, not that an individual Task has been launched. A - // job is "Executed" when its ServiceSpec is updated. JobIteration can be - // used to disambiguate Tasks belonging to different executions of a job. - // - // Though JobIteration will increase with each subsequent execution, it may - // not necessarily increase by 1, and so JobIteration should not be used to - // keep track of the number of times a job has been executed. - JobIteration Version - - // LastExecution is the time that the job was last executed, as observed by - // Swarm manager. - LastExecution time.Time `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go deleted file mode 100644 index 3eae4b9b2..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ /dev/null @@ -1,237 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "time" -) - -// ClusterInfo represents info about the cluster for outputting in "info" -// it contains the same information as "Swarm", but without the JoinTokens -type ClusterInfo struct { - ID string - Meta - Spec Spec - TLSInfo TLSInfo - RootRotationInProgress bool - DefaultAddrPool []string - SubnetSize uint32 - DataPathPort uint32 -} - -// Swarm represents a swarm. -type Swarm struct { - ClusterInfo - JoinTokens JoinTokens -} - -// JoinTokens contains the tokens workers and managers need to join the swarm. -type JoinTokens struct { - // Worker is the join token workers may use to join the swarm. - Worker string - // Manager is the join token managers may use to join the swarm. - Manager string -} - -// Spec represents the spec of a swarm. -type Spec struct { - Annotations - - Orchestration OrchestrationConfig `json:",omitempty"` - Raft RaftConfig `json:",omitempty"` - Dispatcher DispatcherConfig `json:",omitempty"` - CAConfig CAConfig `json:",omitempty"` - TaskDefaults TaskDefaults `json:",omitempty"` - EncryptionConfig EncryptionConfig `json:",omitempty"` -} - -// OrchestrationConfig represents orchestration configuration. -type OrchestrationConfig struct { - // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or - // node. If negative, never remove completed or failed tasks. - TaskHistoryRetentionLimit *int64 `json:",omitempty"` -} - -// TaskDefaults parameterizes cluster-level task creation with default values. -type TaskDefaults struct { - // LogDriver selects the log driver to use for tasks created in the - // orchestrator if unspecified by a service. - // - // Updating this value will only have an affect on new tasks. Old tasks - // will continue use their previously configured log driver until - // recreated. - LogDriver *Driver `json:",omitempty"` -} - -// EncryptionConfig controls at-rest encryption of data and keys. -type EncryptionConfig struct { - // AutoLockManagers specifies whether or not managers TLS keys and raft data - // should be encrypted at rest in such a way that they must be unlocked - // before the manager node starts up again. - AutoLockManagers bool -} - -// RaftConfig represents raft configuration. -type RaftConfig struct { - // SnapshotInterval is the number of log entries between snapshots. - SnapshotInterval uint64 `json:",omitempty"` - - // KeepOldSnapshots is the number of snapshots to keep beyond the - // current snapshot. - KeepOldSnapshots *uint64 `json:",omitempty"` - - // LogEntriesForSlowFollowers is the number of log entries to keep - // around to sync up slow followers after a snapshot is created. - LogEntriesForSlowFollowers uint64 `json:",omitempty"` - - // ElectionTick is the number of ticks that a follower will wait for a message - // from the leader before becoming a candidate and starting an election. - // ElectionTick must be greater than HeartbeatTick. - // - // A tick currently defaults to one second, so these translate directly to - // seconds currently, but this is NOT guaranteed. - ElectionTick int - - // HeartbeatTick is the number of ticks between heartbeats. Every - // HeartbeatTick ticks, the leader will send a heartbeat to the - // followers. - // - // A tick currently defaults to one second, so these translate directly to - // seconds currently, but this is NOT guaranteed. - HeartbeatTick int -} - -// DispatcherConfig represents dispatcher configuration. -type DispatcherConfig struct { - // HeartbeatPeriod defines how often agent should send heartbeats to - // dispatcher. - HeartbeatPeriod time.Duration `json:",omitempty"` -} - -// CAConfig represents CA configuration. -type CAConfig struct { - // NodeCertExpiry is the duration certificates should be issued for - NodeCertExpiry time.Duration `json:",omitempty"` - - // ExternalCAs is a list of CAs to which a manager node will make - // certificate signing requests for node certificates. - ExternalCAs []*ExternalCA `json:",omitempty"` - - // SigningCACert and SigningCAKey specify the desired signing root CA and - // root CA key for the swarm. When inspecting the cluster, the key will - // be redacted. - SigningCACert string `json:",omitempty"` - SigningCAKey string `json:",omitempty"` - - // If this value changes, and there is no specified signing cert and key, - // then the swarm is forced to generate a new root certificate ane key. - ForceRotate uint64 `json:",omitempty"` -} - -// ExternalCAProtocol represents type of external CA. -type ExternalCAProtocol string - -// ExternalCAProtocolCFSSL CFSSL -const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" - -// ExternalCA defines external CA to be used by the cluster. -type ExternalCA struct { - // Protocol is the protocol used by this external CA. - Protocol ExternalCAProtocol - - // URL is the URL where the external CA can be reached. - URL string - - // Options is a set of additional key/value pairs whose interpretation - // depends on the specified CA type. - Options map[string]string `json:",omitempty"` - - // CACert specifies which root CA is used by this external CA. This certificate must - // be in PEM format. - CACert string -} - -// InitRequest is the request used to init a swarm. -type InitRequest struct { - ListenAddr string - AdvertiseAddr string - DataPathAddr string - DataPathPort uint32 - ForceNewCluster bool - Spec Spec - AutoLockManagers bool - Availability NodeAvailability - DefaultAddrPool []string - SubnetSize uint32 -} - -// JoinRequest is the request used to join a swarm. -type JoinRequest struct { - ListenAddr string - AdvertiseAddr string - DataPathAddr string - RemoteAddrs []string - JoinToken string // accept by secret - Availability NodeAvailability -} - -// UnlockRequest is the request used to unlock a swarm. -type UnlockRequest struct { - // UnlockKey is the unlock key in ASCII-armored format. - UnlockKey string -} - -// LocalNodeState represents the state of the local node. -type LocalNodeState string - -const ( - // LocalNodeStateInactive INACTIVE - LocalNodeStateInactive LocalNodeState = "inactive" - // LocalNodeStatePending PENDING - LocalNodeStatePending LocalNodeState = "pending" - // LocalNodeStateActive ACTIVE - LocalNodeStateActive LocalNodeState = "active" - // LocalNodeStateError ERROR - LocalNodeStateError LocalNodeState = "error" - // LocalNodeStateLocked LOCKED - LocalNodeStateLocked LocalNodeState = "locked" -) - -// Info represents generic information about swarm. -type Info struct { - NodeID string - NodeAddr string - - LocalNodeState LocalNodeState - ControlAvailable bool - Error string - - RemoteManagers []Peer - Nodes int `json:",omitempty"` - Managers int `json:",omitempty"` - - Cluster *ClusterInfo `json:",omitempty"` - - Warnings []string `json:",omitempty"` -} - -// Status provides information about the current swarm status and role, -// obtained from the "Swarm" header in the API response. -type Status struct { - // NodeState represents the state of the node. - NodeState LocalNodeState - - // ControlAvailable indicates if the node is a swarm manager. - ControlAvailable bool -} - -// Peer represents a peer. -type Peer struct { - NodeID string - Addr string -} - -// UpdateFlags contains flags for SwarmUpdate. -type UpdateFlags struct { - RotateWorkerToken bool - RotateManagerToken bool - RotateManagerUnlockKey bool -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go deleted file mode 100644 index ad3eeca0b..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/task.go +++ /dev/null @@ -1,225 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "time" - - "github.com/docker/docker/api/types/swarm/runtime" -) - -// TaskState represents the state of a task. -type TaskState string - -const ( - // TaskStateNew NEW - TaskStateNew TaskState = "new" - // TaskStateAllocated ALLOCATED - TaskStateAllocated TaskState = "allocated" - // TaskStatePending PENDING - TaskStatePending TaskState = "pending" - // TaskStateAssigned ASSIGNED - TaskStateAssigned TaskState = "assigned" - // TaskStateAccepted ACCEPTED - TaskStateAccepted TaskState = "accepted" - // TaskStatePreparing PREPARING - TaskStatePreparing TaskState = "preparing" - // TaskStateReady READY - TaskStateReady TaskState = "ready" - // TaskStateStarting STARTING - TaskStateStarting TaskState = "starting" - // TaskStateRunning RUNNING - TaskStateRunning TaskState = "running" - // TaskStateComplete COMPLETE - TaskStateComplete TaskState = "complete" - // TaskStateShutdown SHUTDOWN - TaskStateShutdown TaskState = "shutdown" - // TaskStateFailed FAILED - TaskStateFailed TaskState = "failed" - // TaskStateRejected REJECTED - TaskStateRejected TaskState = "rejected" - // TaskStateRemove REMOVE - TaskStateRemove TaskState = "remove" - // TaskStateOrphaned ORPHANED - TaskStateOrphaned TaskState = "orphaned" -) - -// Task represents a task. -type Task struct { - ID string - Meta - Annotations - - Spec TaskSpec `json:",omitempty"` - ServiceID string `json:",omitempty"` - Slot int `json:",omitempty"` - NodeID string `json:",omitempty"` - Status TaskStatus `json:",omitempty"` - DesiredState TaskState `json:",omitempty"` - NetworksAttachments []NetworkAttachment `json:",omitempty"` - GenericResources []GenericResource `json:",omitempty"` - - // JobIteration is the JobIteration of the Service that this Task was - // spawned from, if the Service is a ReplicatedJob or GlobalJob. This is - // used to determine which Tasks belong to which run of the job. This field - // is absent if the Service mode is Replicated or Global. - JobIteration *Version `json:",omitempty"` - - // Volumes is the list of VolumeAttachments for this task. It specifies - // which particular volumes are to be used by this particular task, and - // fulfilling what mounts in the spec. - Volumes []VolumeAttachment -} - -// TaskSpec represents the spec of a task. -type TaskSpec struct { - // ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive. - // PluginSpec is only used when the `Runtime` field is set to `plugin` - // NetworkAttachmentSpec is used if the `Runtime` field is set to - // `attachment`. - ContainerSpec *ContainerSpec `json:",omitempty"` - PluginSpec *runtime.PluginSpec `json:",omitempty"` - NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` - - Resources *ResourceRequirements `json:",omitempty"` - RestartPolicy *RestartPolicy `json:",omitempty"` - Placement *Placement `json:",omitempty"` - Networks []NetworkAttachmentConfig `json:",omitempty"` - - // LogDriver specifies the LogDriver to use for tasks created from this - // spec. If not present, the one on cluster default on swarm.Spec will be - // used, finally falling back to the engine default if not specified. - LogDriver *Driver `json:",omitempty"` - - // ForceUpdate is a counter that triggers an update even if no relevant - // parameters have been changed. - ForceUpdate uint64 - - Runtime RuntimeType `json:",omitempty"` -} - -// Resources represents resources (CPU/Memory) which can be advertised by a -// node and requested to be reserved for a task. -type Resources struct { - NanoCPUs int64 `json:",omitempty"` - MemoryBytes int64 `json:",omitempty"` - GenericResources []GenericResource `json:",omitempty"` -} - -// Limit describes limits on resources which can be requested by a task. -type Limit struct { - NanoCPUs int64 `json:",omitempty"` - MemoryBytes int64 `json:",omitempty"` - Pids int64 `json:",omitempty"` -} - -// GenericResource represents a "user defined" resource which can -// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) -type GenericResource struct { - NamedResourceSpec *NamedGenericResource `json:",omitempty"` - DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"` -} - -// NamedGenericResource represents a "user defined" resource which is defined -// as a string. -// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) -// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) -type NamedGenericResource struct { - Kind string `json:",omitempty"` - Value string `json:",omitempty"` -} - -// DiscreteGenericResource represents a "user defined" resource which is defined -// as an integer -// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) -// Value is used to count the resource (SSD=5, HDD=3, ...) -type DiscreteGenericResource struct { - Kind string `json:",omitempty"` - Value int64 `json:",omitempty"` -} - -// ResourceRequirements represents resources requirements. -type ResourceRequirements struct { - Limits *Limit `json:",omitempty"` - Reservations *Resources `json:",omitempty"` -} - -// Placement represents orchestration parameters. -type Placement struct { - Constraints []string `json:",omitempty"` - Preferences []PlacementPreference `json:",omitempty"` - MaxReplicas uint64 `json:",omitempty"` - - // Platforms stores all the platforms that the image can run on. - // This field is used in the platform filter for scheduling. If empty, - // then the platform filter is off, meaning there are no scheduling restrictions. - Platforms []Platform `json:",omitempty"` -} - -// PlacementPreference provides a way to make the scheduler aware of factors -// such as topology. -type PlacementPreference struct { - Spread *SpreadOver -} - -// SpreadOver is a scheduling preference that instructs the scheduler to spread -// tasks evenly over groups of nodes identified by labels. -type SpreadOver struct { - // label descriptor, such as engine.labels.az - SpreadDescriptor string -} - -// RestartPolicy represents the restart policy. -type RestartPolicy struct { - Condition RestartPolicyCondition `json:",omitempty"` - Delay *time.Duration `json:",omitempty"` - MaxAttempts *uint64 `json:",omitempty"` - Window *time.Duration `json:",omitempty"` -} - -// RestartPolicyCondition represents when to restart. -type RestartPolicyCondition string - -const ( - // RestartPolicyConditionNone NONE - RestartPolicyConditionNone RestartPolicyCondition = "none" - // RestartPolicyConditionOnFailure ON_FAILURE - RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" - // RestartPolicyConditionAny ANY - RestartPolicyConditionAny RestartPolicyCondition = "any" -) - -// TaskStatus represents the status of a task. -type TaskStatus struct { - Timestamp time.Time `json:",omitempty"` - State TaskState `json:",omitempty"` - Message string `json:",omitempty"` - Err string `json:",omitempty"` - ContainerStatus *ContainerStatus `json:",omitempty"` - PortStatus PortStatus `json:",omitempty"` -} - -// ContainerStatus represents the status of a container. -type ContainerStatus struct { - ContainerID string - PID int - ExitCode int -} - -// PortStatus represents the port status of a task's host ports whose -// service has published host ports -type PortStatus struct { - Ports []PortConfig `json:",omitempty"` -} - -// VolumeAttachment contains the associating a Volume to a Task. -type VolumeAttachment struct { - // ID is the Swarmkit ID of the Volume. This is not the CSI VolumeId. - ID string `json:",omitempty"` - - // Source, together with Target, indicates the Mount, as specified in the - // ContainerSpec, that this volume fulfills. - Source string `json:",omitempty"` - - // Target, together with Source, indicates the Mount, as specified - // in the ContainerSpec, that this volume fulfills. - Target string `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go deleted file mode 100644 index 2a74b7a59..000000000 --- a/vendor/github.com/docker/docker/api/types/time/timestamp.go +++ /dev/null @@ -1,131 +0,0 @@ -package time // import "github.com/docker/docker/api/types/time" - -import ( - "fmt" - "math" - "strconv" - "strings" - "time" -) - -// These are additional predefined layouts for use in Time.Format and Time.Parse -// with --since and --until parameters for `docker logs` and `docker events` -const ( - rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone - rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone - dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 - dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 -) - -// GetTimestamp tries to parse given string as golang duration, -// then RFC3339 time and finally as a Unix timestamp. If -// any of these were successful, it returns a Unix timestamp -// as string otherwise returns the given value back. -// In case of duration input, the returned timestamp is computed -// as the given reference time minus the amount of the duration. -func GetTimestamp(value string, reference time.Time) (string, error) { - if d, err := time.ParseDuration(value); value != "0" && err == nil { - return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil - } - - var format string - // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation - parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) - - if strings.Contains(value, ".") { - if parseInLocation { - format = rFC3339NanoLocal - } else { - format = time.RFC3339Nano - } - } else if strings.Contains(value, "T") { - // we want the number of colons in the T portion of the timestamp - tcolons := strings.Count(value, ":") - // if parseInLocation is off and we have a +/- zone offset (not Z) then - // there will be an extra colon in the input for the tz offset subtract that - // colon from the tcolons count - if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { - tcolons-- - } - if parseInLocation { - switch tcolons { - case 0: - format = "2006-01-02T15" - case 1: - format = "2006-01-02T15:04" - default: - format = rFC3339Local - } - } else { - switch tcolons { - case 0: - format = "2006-01-02T15Z07:00" - case 1: - format = "2006-01-02T15:04Z07:00" - default: - format = time.RFC3339 - } - } - } else if parseInLocation { - format = dateLocal - } else { - format = dateWithZone - } - - var t time.Time - var err error - - if parseInLocation { - t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) - } else { - t, err = time.Parse(format, value) - } - - if err != nil { - // if there is a `-` then it's an RFC3339 like timestamp - if strings.Contains(value, "-") { - return "", err // was probably an RFC3339 like timestamp but the parser failed with an error - } - if _, _, err := parseTimestamp(value); err != nil { - return "", fmt.Errorf("failed to parse value as time or duration: %q", value) - } - return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) - } - - return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil -} - -// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the -// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) -// if the incoming nanosecond portion is longer or shorter than 9 digits it is -// converted to nanoseconds. The expectation is that the seconds and -// seconds will be used to create a time variable. For example: -// -// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) -// if err == nil since := time.Unix(seconds, nanoseconds) -// -// returns seconds as def(aultSeconds) if value == "" -func ParseTimestamps(value string, def int64) (int64, int64, error) { - if value == "" { - return def, 0, nil - } - return parseTimestamp(value) -} - -func parseTimestamp(value string) (int64, int64, error) { - sa := strings.SplitN(value, ".", 2) - s, err := strconv.ParseInt(sa[0], 10, 64) - if err != nil { - return s, 0, err - } - if len(sa) != 2 { - return s, 0, nil - } - n, err := strconv.ParseInt(sa[1], 10, 64) - if err != nil { - return s, n, err - } - // should already be in nanoseconds but just in case convert n to nanoseconds - n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) - return s, n, nil -} diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go deleted file mode 100644 index 036405299..000000000 --- a/vendor/github.com/docker/docker/api/types/types.go +++ /dev/null @@ -1,809 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "errors" - "fmt" - "io" - "os" - "strings" - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/api/types/volume" - "github.com/docker/go-connections/nat" -) - -const ( - // MediaTypeRawStream is vendor specific MIME-Type set for raw TTY streams - MediaTypeRawStream = "application/vnd.docker.raw-stream" - - // MediaTypeMultiplexedStream is vendor specific MIME-Type set for stdin/stdout/stderr multiplexed streams - MediaTypeMultiplexedStream = "application/vnd.docker.multiplexed-stream" -) - -// RootFS returns Image's RootFS description including the layer IDs. -type RootFS struct { - Type string `json:",omitempty"` - Layers []string `json:",omitempty"` -} - -// ImageInspect contains response of Engine API: -// GET "/images/{name:.*}/json" -type ImageInspect struct { - // ID is the content-addressable ID of an image. - // - // This identifier is a content-addressable digest calculated from the - // image's configuration (which includes the digests of layers used by - // the image). - // - // Note that this digest differs from the `RepoDigests` below, which - // holds digests of image manifests that reference the image. - ID string `json:"Id"` - - // RepoTags is a list of image names/tags in the local image cache that - // reference this image. - // - // Multiple image tags can refer to the same image, and this list may be - // empty if no tags reference the image, in which case the image is - // "untagged", in which case it can still be referenced by its ID. - RepoTags []string - - // RepoDigests is a list of content-addressable digests of locally available - // image manifests that the image is referenced from. Multiple manifests can - // refer to the same image. - // - // These digests are usually only available if the image was either pulled - // from a registry, or if the image was pushed to a registry, which is when - // the manifest is generated and its digest calculated. - RepoDigests []string - - // Parent is the ID of the parent image. - // - // Depending on how the image was created, this field may be empty and - // is only set for images that were built/created locally. This field - // is empty if the image was pulled from an image registry. - Parent string - - // Comment is an optional message that can be set when committing or - // importing the image. - Comment string - - // Created is the date and time at which the image was created, formatted in - // RFC 3339 nano-seconds (time.RFC3339Nano). - Created string - - // Container is the ID of the container that was used to create the image. - // - // Depending on how the image was created, this field may be empty. - Container string - - // ContainerConfig is an optional field containing the configuration of the - // container that was last committed when creating the image. - // - // Previous versions of Docker builder used this field to store build cache, - // and it is not in active use anymore. - ContainerConfig *container.Config - - // DockerVersion is the version of Docker that was used to build the image. - // - // Depending on how the image was created, this field may be empty. - DockerVersion string - - // Author is the name of the author that was specified when committing the - // image, or as specified through MAINTAINER (deprecated) in the Dockerfile. - Author string - Config *container.Config - - // Architecture is the hardware CPU architecture that the image runs on. - Architecture string - - // Variant is the CPU architecture variant (presently ARM-only). - Variant string `json:",omitempty"` - - // OS is the Operating System the image is built to run on. - Os string - - // OsVersion is the version of the Operating System the image is built to - // run on (especially for Windows). - OsVersion string `json:",omitempty"` - - // Size is the total size of the image including all layers it is composed of. - Size int64 - - // VirtualSize is the total size of the image including all layers it is - // composed of. - // - // In versions of Docker before v1.10, this field was calculated from - // the image itself and all of its parent images. Docker v1.10 and up - // store images self-contained, and no longer use a parent-chain, making - // this field an equivalent of the Size field. - // - // This field is kept for backward compatibility, but may be removed in - // a future version of the API. - VirtualSize int64 // TODO(thaJeztah): deprecate this field - - // GraphDriver holds information about the storage driver used to store the - // container's and image's filesystem. - GraphDriver GraphDriverData - - // RootFS contains information about the image's RootFS, including the - // layer IDs. - RootFS RootFS - - // Metadata of the image in the local cache. - // - // This information is local to the daemon, and not part of the image itself. - Metadata ImageMetadata -} - -// ImageMetadata contains engine-local data about the image -type ImageMetadata struct { - // LastTagTime is the date and time at which the image was last tagged. - LastTagTime time.Time `json:",omitempty"` -} - -// Container contains response of Engine API: -// GET "/containers/json" -type Container struct { - ID string `json:"Id"` - Names []string - Image string - ImageID string - Command string - Created int64 - Ports []Port - SizeRw int64 `json:",omitempty"` - SizeRootFs int64 `json:",omitempty"` - Labels map[string]string - State string - Status string - HostConfig struct { - NetworkMode string `json:",omitempty"` - } - NetworkSettings *SummaryNetworkSettings - Mounts []MountPoint -} - -// CopyConfig contains request body of Engine API: -// POST "/containers/"+containerID+"/copy" -type CopyConfig struct { - Resource string -} - -// ContainerPathStat is used to encode the header from -// GET "/containers/{name:.*}/archive" -// "Name" is the file or directory name. -type ContainerPathStat struct { - Name string `json:"name"` - Size int64 `json:"size"` - Mode os.FileMode `json:"mode"` - Mtime time.Time `json:"mtime"` - LinkTarget string `json:"linkTarget"` -} - -// ContainerStats contains response of Engine API: -// GET "/stats" -type ContainerStats struct { - Body io.ReadCloser `json:"body"` - OSType string `json:"ostype"` -} - -// Ping contains response of Engine API: -// GET "/_ping" -type Ping struct { - APIVersion string - OSType string - Experimental bool - BuilderVersion BuilderVersion - - // SwarmStatus provides information about the current swarm status of the - // engine, obtained from the "Swarm" header in the API response. - // - // It can be a nil struct if the API version does not provide this header - // in the ping response, or if an error occurred, in which case the client - // should use other ways to get the current swarm status, such as the /swarm - // endpoint. - SwarmStatus *swarm.Status -} - -// ComponentVersion describes the version information for a specific component. -type ComponentVersion struct { - Name string - Version string - Details map[string]string `json:",omitempty"` -} - -// Version contains response of Engine API: -// GET "/version" -type Version struct { - Platform struct{ Name string } `json:",omitempty"` - Components []ComponentVersion `json:",omitempty"` - - // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility - - Version string - APIVersion string `json:"ApiVersion"` - MinAPIVersion string `json:"MinAPIVersion,omitempty"` - GitCommit string - GoVersion string - Os string - Arch string - KernelVersion string `json:",omitempty"` - Experimental bool `json:",omitempty"` - BuildTime string `json:",omitempty"` -} - -// Commit holds the Git-commit (SHA1) that a binary was built from, as reported -// in the version-string of external tools, such as containerd, or runC. -type Commit struct { - ID string // ID is the actual commit ID of external tool. - Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. -} - -// Info contains response of Engine API: -// GET "/info" -type Info struct { - ID string - Containers int - ContainersRunning int - ContainersPaused int - ContainersStopped int - Images int - Driver string - DriverStatus [][2]string - SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API - Plugins PluginsInfo - MemoryLimit bool - SwapLimit bool - KernelMemory bool `json:",omitempty"` // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes - KernelMemoryTCP bool `json:",omitempty"` // KernelMemoryTCP is not supported on cgroups v2. - CPUCfsPeriod bool `json:"CpuCfsPeriod"` - CPUCfsQuota bool `json:"CpuCfsQuota"` - CPUShares bool - CPUSet bool - PidsLimit bool - IPv4Forwarding bool - BridgeNfIptables bool - BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` - Debug bool - NFd int - OomKillDisable bool - NGoroutines int - SystemTime string - LoggingDriver string - CgroupDriver string - CgroupVersion string `json:",omitempty"` - NEventsListener int - KernelVersion string - OperatingSystem string - OSVersion string - OSType string - Architecture string - IndexServerAddress string - RegistryConfig *registry.ServiceConfig - NCPU int - MemTotal int64 - GenericResources []swarm.GenericResource - DockerRootDir string - HTTPProxy string `json:"HttpProxy"` - HTTPSProxy string `json:"HttpsProxy"` - NoProxy string - Name string - Labels []string - ExperimentalBuild bool - ServerVersion string - ClusterStore string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated - ClusterAdvertise string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated - Runtimes map[string]Runtime - DefaultRuntime string - Swarm swarm.Info - // LiveRestoreEnabled determines whether containers should be kept - // running when the daemon is shutdown or upon daemon start if - // running containers are detected - LiveRestoreEnabled bool - Isolation container.Isolation - InitBinary string - ContainerdCommit Commit - RuncCommit Commit - InitCommit Commit - SecurityOptions []string - ProductLicense string `json:",omitempty"` - DefaultAddressPools []NetworkAddressPool `json:",omitempty"` - - // Warnings contains a slice of warnings that occurred while collecting - // system information. These warnings are intended to be informational - // messages for the user, and are not intended to be parsed / used for - // other purposes, as they do not have a fixed format. - Warnings []string -} - -// KeyValue holds a key/value pair -type KeyValue struct { - Key, Value string -} - -// NetworkAddressPool is a temp struct used by Info struct -type NetworkAddressPool struct { - Base string - Size int -} - -// SecurityOpt contains the name and options of a security option -type SecurityOpt struct { - Name string - Options []KeyValue -} - -// DecodeSecurityOptions decodes a security options string slice to a type safe -// SecurityOpt -func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { - so := []SecurityOpt{} - for _, opt := range opts { - // support output from a < 1.13 docker daemon - if !strings.Contains(opt, "=") { - so = append(so, SecurityOpt{Name: opt}) - continue - } - secopt := SecurityOpt{} - split := strings.Split(opt, ",") - for _, s := range split { - kv := strings.SplitN(s, "=", 2) - if len(kv) != 2 { - return nil, fmt.Errorf("invalid security option %q", s) - } - if kv[0] == "" || kv[1] == "" { - return nil, errors.New("invalid empty security option") - } - if kv[0] == "name" { - secopt.Name = kv[1] - continue - } - secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]}) - } - so = append(so, secopt) - } - return so, nil -} - -// PluginsInfo is a temp struct holding Plugins name -// registered with docker daemon. It is used by Info struct -type PluginsInfo struct { - // List of Volume plugins registered - Volume []string - // List of Network plugins registered - Network []string - // List of Authorization plugins registered - Authorization []string - // List of Log plugins registered - Log []string -} - -// ExecStartCheck is a temp struct used by execStart -// Config fields is part of ExecConfig in runconfig package -type ExecStartCheck struct { - // ExecStart will first check if it's detached - Detach bool - // Check if there's a tty - Tty bool - // Terminal size [height, width], unused if Tty == false - ConsoleSize *[2]uint `json:",omitempty"` -} - -// HealthcheckResult stores information about a single run of a healthcheck probe -type HealthcheckResult struct { - Start time.Time // Start is the time this check started - End time.Time // End is the time this check ended - ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe - Output string // Output from last check -} - -// Health states -const ( - NoHealthcheck = "none" // Indicates there is no healthcheck - Starting = "starting" // Starting indicates that the container is not yet ready - Healthy = "healthy" // Healthy indicates that the container is running correctly - Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem -) - -// Health stores information about the container's healthcheck results -type Health struct { - Status string // Status is one of Starting, Healthy or Unhealthy - FailingStreak int // FailingStreak is the number of consecutive failures - Log []*HealthcheckResult // Log contains the last few results (oldest first) -} - -// ContainerState stores container's running state -// it's part of ContainerJSONBase and will return by "inspect" command -type ContainerState struct { - Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" - Running bool - Paused bool - Restarting bool - OOMKilled bool - Dead bool - Pid int - ExitCode int - Error string - StartedAt string - FinishedAt string - Health *Health `json:",omitempty"` -} - -// ContainerNode stores information about the node that a container -// is running on. It's only used by the Docker Swarm standalone API -type ContainerNode struct { - ID string - IPAddress string `json:"IP"` - Addr string - Name string - Cpus int - Memory int64 - Labels map[string]string -} - -// ContainerJSONBase contains response of Engine API: -// GET "/containers/{name:.*}/json" -type ContainerJSONBase struct { - ID string `json:"Id"` - Created string - Path string - Args []string - State *ContainerState - Image string - ResolvConfPath string - HostnamePath string - HostsPath string - LogPath string - Node *ContainerNode `json:",omitempty"` // Node is only propagated by Docker Swarm standalone API - Name string - RestartCount int - Driver string - Platform string - MountLabel string - ProcessLabel string - AppArmorProfile string - ExecIDs []string - HostConfig *container.HostConfig - GraphDriver GraphDriverData - SizeRw *int64 `json:",omitempty"` - SizeRootFs *int64 `json:",omitempty"` -} - -// ContainerJSON is newly used struct along with MountPoint -type ContainerJSON struct { - *ContainerJSONBase - Mounts []MountPoint - Config *container.Config - NetworkSettings *NetworkSettings -} - -// NetworkSettings exposes the network settings in the api -type NetworkSettings struct { - NetworkSettingsBase - DefaultNetworkSettings - Networks map[string]*network.EndpointSettings -} - -// SummaryNetworkSettings provides a summary of container's networks -// in /containers/json -type SummaryNetworkSettings struct { - Networks map[string]*network.EndpointSettings -} - -// NetworkSettingsBase holds basic information about networks -type NetworkSettingsBase struct { - Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) - SandboxID string // SandboxID uniquely represents a container's network stack - HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface - LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix - LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address - Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port - SandboxKey string // SandboxKey identifies the sandbox - SecondaryIPAddresses []network.Address - SecondaryIPv6Addresses []network.Address -} - -// DefaultNetworkSettings holds network information -// during the 2 release deprecation period. -// It will be removed in Docker 1.11. -type DefaultNetworkSettings struct { - EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox - Gateway string // Gateway holds the gateway address for the network - GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address - GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address - IPAddress string // IPAddress holds the IPv4 address for the network - IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address - IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 - MacAddress string // MacAddress holds the MAC address for the network -} - -// MountPoint represents a mount point configuration inside the container. -// This is used for reporting the mountpoints in use by a container. -type MountPoint struct { - // Type is the type of mount, see `Type` definitions in - // github.com/docker/docker/api/types/mount.Type - Type mount.Type `json:",omitempty"` - - // Name is the name reference to the underlying data defined by `Source` - // e.g., the volume name. - Name string `json:",omitempty"` - - // Source is the source location of the mount. - // - // For volumes, this contains the storage location of the volume (within - // `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains - // the source (host) part of the bind-mount. For `tmpfs` mount points, this - // field is empty. - Source string - - // Destination is the path relative to the container root (`/`) where the - // Source is mounted inside the container. - Destination string - - // Driver is the volume driver used to create the volume (if it is a volume). - Driver string `json:",omitempty"` - - // Mode is a comma separated list of options supplied by the user when - // creating the bind/volume mount. - // - // The default is platform-specific (`"z"` on Linux, empty on Windows). - Mode string - - // RW indicates whether the mount is mounted writable (read-write). - RW bool - - // Propagation describes how mounts are propagated from the host into the - // mount point, and vice-versa. Refer to the Linux kernel documentation - // for details: - // https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt - // - // This field is not used on Windows. - Propagation mount.Propagation -} - -// NetworkResource is the body of the "get network" http response message -type NetworkResource struct { - Name string // Name is the requested name of the network - ID string `json:"Id"` // ID uniquely identifies a network on a single machine - Created time.Time // Created is the time the network created - Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) - Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) - EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 - IPAM network.IPAM // IPAM is the network's IP Address Management - Internal bool // Internal represents if the network is used internal only - Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. - Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. - ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. - ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. - Containers map[string]EndpointResource // Containers contains endpoints belonging to the network - Options map[string]string // Options holds the network specific options to use for when creating the network - Labels map[string]string // Labels holds metadata specific to the network being created - Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network - Services map[string]network.ServiceInfo `json:",omitempty"` -} - -// EndpointResource contains network resources allocated and used for a container in a network -type EndpointResource struct { - Name string - EndpointID string - MacAddress string - IPv4Address string - IPv6Address string -} - -// NetworkCreate is the expected body of the "create network" http request message -type NetworkCreate struct { - // Check for networks with duplicate names. - // Network is primarily keyed based on a random ID and not on the name. - // Network name is strictly a user-friendly alias to the network - // which is uniquely identified using ID. - // And there is no guaranteed way to check for duplicates. - // Option CheckDuplicate is there to provide a best effort checking of any networks - // which has the same name but it is not guaranteed to catch all name collisions. - CheckDuplicate bool - Driver string - Scope string - EnableIPv6 bool - IPAM *network.IPAM - Internal bool - Attachable bool - Ingress bool - ConfigOnly bool - ConfigFrom *network.ConfigReference - Options map[string]string - Labels map[string]string -} - -// NetworkCreateRequest is the request message sent to the server for network create call. -type NetworkCreateRequest struct { - NetworkCreate - Name string -} - -// NetworkCreateResponse is the response message sent by the server for network create call -type NetworkCreateResponse struct { - ID string `json:"Id"` - Warning string -} - -// NetworkConnect represents the data to be used to connect a container to the network -type NetworkConnect struct { - Container string - EndpointConfig *network.EndpointSettings `json:",omitempty"` -} - -// NetworkDisconnect represents the data to be used to disconnect a container from the network -type NetworkDisconnect struct { - Container string - Force bool -} - -// NetworkInspectOptions holds parameters to inspect network -type NetworkInspectOptions struct { - Scope string - Verbose bool -} - -// Checkpoint represents the details of a checkpoint -type Checkpoint struct { - Name string // Name is the name of the checkpoint -} - -// Runtime describes an OCI runtime -type Runtime struct { - Path string `json:"path"` - Args []string `json:"runtimeArgs,omitempty"` - - // This is exposed here only for internal use - // It is not currently supported to specify custom shim configs - Shim *ShimConfig `json:"-"` -} - -// ShimConfig is used by runtime to configure containerd shims -type ShimConfig struct { - Binary string - Opts interface{} -} - -// DiskUsageObject represents an object type used for disk usage query filtering. -type DiskUsageObject string - -const ( - // ContainerObject represents a container DiskUsageObject. - ContainerObject DiskUsageObject = "container" - // ImageObject represents an image DiskUsageObject. - ImageObject DiskUsageObject = "image" - // VolumeObject represents a volume DiskUsageObject. - VolumeObject DiskUsageObject = "volume" - // BuildCacheObject represents a build-cache DiskUsageObject. - BuildCacheObject DiskUsageObject = "build-cache" -) - -// DiskUsageOptions holds parameters for system disk usage query. -type DiskUsageOptions struct { - // Types specifies what object types to include in the response. If empty, - // all object types are returned. - Types []DiskUsageObject -} - -// DiskUsage contains response of Engine API: -// GET "/system/df" -type DiskUsage struct { - LayersSize int64 - Images []*ImageSummary - Containers []*Container - Volumes []*volume.Volume - BuildCache []*BuildCache - BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40. -} - -// ContainersPruneReport contains the response for Engine API: -// POST "/containers/prune" -type ContainersPruneReport struct { - ContainersDeleted []string - SpaceReclaimed uint64 -} - -// VolumesPruneReport contains the response for Engine API: -// POST "/volumes/prune" -type VolumesPruneReport struct { - VolumesDeleted []string - SpaceReclaimed uint64 -} - -// ImagesPruneReport contains the response for Engine API: -// POST "/images/prune" -type ImagesPruneReport struct { - ImagesDeleted []ImageDeleteResponseItem - SpaceReclaimed uint64 -} - -// BuildCachePruneReport contains the response for Engine API: -// POST "/build/prune" -type BuildCachePruneReport struct { - CachesDeleted []string - SpaceReclaimed uint64 -} - -// NetworksPruneReport contains the response for Engine API: -// POST "/networks/prune" -type NetworksPruneReport struct { - NetworksDeleted []string -} - -// SecretCreateResponse contains the information returned to a client -// on the creation of a new secret. -type SecretCreateResponse struct { - // ID is the id of the created secret. - ID string -} - -// SecretListOptions holds parameters to list secrets -type SecretListOptions struct { - Filters filters.Args -} - -// ConfigCreateResponse contains the information returned to a client -// on the creation of a new config. -type ConfigCreateResponse struct { - // ID is the id of the created config. - ID string -} - -// ConfigListOptions holds parameters to list configs -type ConfigListOptions struct { - Filters filters.Args -} - -// PushResult contains the tag, manifest digest, and manifest size from the -// push. It's used to signal this information to the trust code in the client -// so it can sign the manifest if necessary. -type PushResult struct { - Tag string - Digest string - Size int -} - -// BuildResult contains the image id of a successful build -type BuildResult struct { - ID string -} - -// BuildCache contains information about a build cache record. -type BuildCache struct { - // ID is the unique ID of the build cache record. - ID string - // Parent is the ID of the parent build cache record. - // - // Deprecated: deprecated in API v1.42 and up, as it was deprecated in BuildKit; use Parents instead. - Parent string `json:"Parent,omitempty"` - // Parents is the list of parent build cache record IDs. - Parents []string `json:" Parents,omitempty"` - // Type is the cache record type. - Type string - // Description is a description of the build-step that produced the build cache. - Description string - // InUse indicates if the build cache is in use. - InUse bool - // Shared indicates if the build cache is shared. - Shared bool - // Size is the amount of disk space used by the build cache (in bytes). - Size int64 - // CreatedAt is the date and time at which the build cache was created. - CreatedAt time.Time - // LastUsedAt is the date and time at which the build cache was last used. - LastUsedAt *time.Time - UsageCount int -} - -// BuildCachePruneOptions hold parameters to prune the build cache -type BuildCachePruneOptions struct { - All bool - KeepStorage int64 - Filters filters.Args -} diff --git a/vendor/github.com/docker/docker/api/types/versions/README.md b/vendor/github.com/docker/docker/api/types/versions/README.md deleted file mode 100644 index 1ef911edb..000000000 --- a/vendor/github.com/docker/docker/api/types/versions/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Legacy API type versions - -This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. - -Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. - -## Package name conventions - -The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: - -1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. -2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. - -For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go deleted file mode 100644 index 489e917ee..000000000 --- a/vendor/github.com/docker/docker/api/types/versions/compare.go +++ /dev/null @@ -1,65 +0,0 @@ -package versions // import "github.com/docker/docker/api/types/versions" - -import ( - "strconv" - "strings" -) - -// compare compares two version strings -// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. -func compare(v1, v2 string) int { - if v1 == v2 { - return 0 - } - var ( - currTab = strings.Split(v1, ".") - otherTab = strings.Split(v2, ".") - ) - - max := len(currTab) - if len(otherTab) > max { - max = len(otherTab) - } - for i := 0; i < max; i++ { - var currInt, otherInt int - - if len(currTab) > i { - currInt, _ = strconv.Atoi(currTab[i]) - } - if len(otherTab) > i { - otherInt, _ = strconv.Atoi(otherTab[i]) - } - if currInt > otherInt { - return 1 - } - if otherInt > currInt { - return -1 - } - } - return 0 -} - -// LessThan checks if a version is less than another -func LessThan(v, other string) bool { - return compare(v, other) == -1 -} - -// LessThanOrEqualTo checks if a version is less than or equal to another -func LessThanOrEqualTo(v, other string) bool { - return compare(v, other) <= 0 -} - -// GreaterThan checks if a version is greater than another -func GreaterThan(v, other string) bool { - return compare(v, other) == 1 -} - -// GreaterThanOrEqualTo checks if a version is greater than or equal to another -func GreaterThanOrEqualTo(v, other string) bool { - return compare(v, other) >= 0 -} - -// Equal checks if a version is equal to another -func Equal(v, other string) bool { - return compare(v, other) == 0 -} diff --git a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go deleted file mode 100644 index 55fc5d389..000000000 --- a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go +++ /dev/null @@ -1,420 +0,0 @@ -package volume - -import ( - "github.com/docker/docker/api/types/swarm" -) - -// ClusterVolume contains options and information specific to, and only present -// on, Swarm CSI cluster volumes. -type ClusterVolume struct { - // ID is the Swarm ID of the volume. Because cluster volumes are Swarm - // objects, they have an ID, unlike non-cluster volumes, which only have a - // Name. This ID can be used to refer to the cluster volume. - ID string - - // Meta is the swarm metadata about this volume. - swarm.Meta - - // Spec is the cluster-specific options from which this volume is derived. - Spec ClusterVolumeSpec - - // PublishStatus contains the status of the volume as it pertains to its - // publishing on Nodes. - PublishStatus []*PublishStatus `json:",omitempty"` - - // Info is information about the global status of the volume. - Info *Info `json:",omitempty"` -} - -// ClusterVolumeSpec contains the spec used to create this volume. -type ClusterVolumeSpec struct { - // Group defines the volume group of this volume. Volumes belonging to the - // same group can be referred to by group name when creating Services. - // Referring to a volume by group instructs swarm to treat volumes in that - // group interchangeably for the purpose of scheduling. Volumes with an - // empty string for a group technically all belong to the same, emptystring - // group. - Group string `json:",omitempty"` - - // AccessMode defines how the volume is used by tasks. - AccessMode *AccessMode `json:",omitempty"` - - // AccessibilityRequirements specifies where in the cluster a volume must - // be accessible from. - // - // This field must be empty if the plugin does not support - // VOLUME_ACCESSIBILITY_CONSTRAINTS capabilities. If it is present but the - // plugin does not support it, volume will not be created. - // - // If AccessibilityRequirements is empty, but the plugin does support - // VOLUME_ACCESSIBILITY_CONSTRAINTS, then Swarmkit will assume the entire - // cluster is a valid target for the volume. - AccessibilityRequirements *TopologyRequirement `json:",omitempty"` - - // CapacityRange defines the desired capacity that the volume should be - // created with. If nil, the plugin will decide the capacity. - CapacityRange *CapacityRange `json:",omitempty"` - - // Secrets defines Swarm Secrets that are passed to the CSI storage plugin - // when operating on this volume. - Secrets []Secret `json:",omitempty"` - - // Availability is the Volume's desired availability. Analogous to Node - // Availability, this allows the user to take volumes offline in order to - // update or delete them. - Availability Availability `json:",omitempty"` -} - -// Availability specifies the availability of the volume. -type Availability string - -const ( - // AvailabilityActive indicates that the volume is active and fully - // schedulable on the cluster. - AvailabilityActive Availability = "active" - - // AvailabilityPause indicates that no new workloads should use the - // volume, but existing workloads can continue to use it. - AvailabilityPause Availability = "pause" - - // AvailabilityDrain indicates that all workloads using this volume - // should be rescheduled, and the volume unpublished from all nodes. - AvailabilityDrain Availability = "drain" -) - -// AccessMode defines the access mode of a volume. -type AccessMode struct { - // Scope defines the set of nodes this volume can be used on at one time. - Scope Scope `json:",omitempty"` - - // Sharing defines the number and way that different tasks can use this - // volume at one time. - Sharing SharingMode `json:",omitempty"` - - // MountVolume defines options for using this volume as a Mount-type - // volume. - // - // Either BlockVolume or MountVolume, but not both, must be present. - MountVolume *TypeMount `json:",omitempty"` - - // BlockVolume defines options for using this volume as a Block-type - // volume. - // - // Either BlockVolume or MountVolume, but not both, must be present. - BlockVolume *TypeBlock `json:",omitempty"` -} - -// Scope defines the Scope of a Cluster Volume. This is how many nodes a -// Volume can be accessed simultaneously on. -type Scope string - -const ( - // ScopeSingleNode indicates the volume can be used on one node at a - // time. - ScopeSingleNode Scope = "single" - - // ScopeMultiNode indicates the volume can be used on many nodes at - // the same time. - ScopeMultiNode Scope = "multi" -) - -// SharingMode defines the Sharing of a Cluster Volume. This is how Tasks using a -// Volume at the same time can use it. -type SharingMode string - -const ( - // SharingNone indicates that only one Task may use the Volume at a - // time. - SharingNone SharingMode = "none" - - // SharingReadOnly indicates that the Volume may be shared by any - // number of Tasks, but they must be read-only. - SharingReadOnly SharingMode = "readonly" - - // SharingOneWriter indicates that the Volume may be shared by any - // number of Tasks, but all after the first must be read-only. - SharingOneWriter SharingMode = "onewriter" - - // SharingAll means that the Volume may be shared by any number of - // Tasks, as readers or writers. - SharingAll SharingMode = "all" -) - -// TypeBlock defines options for using a volume as a block-type volume. -// -// Intentionally empty. -type TypeBlock struct{} - -// TypeMount contains options for using a volume as a Mount-type -// volume. -type TypeMount struct { - // FsType specifies the filesystem type for the mount volume. Optional. - FsType string `json:",omitempty"` - - // MountFlags defines flags to pass when mounting the volume. Optional. - MountFlags []string `json:",omitempty"` -} - -// TopologyRequirement expresses the user's requirements for a volume's -// accessible topology. -type TopologyRequirement struct { - // Requisite specifies a list of Topologies, at least one of which the - // volume must be accessible from. - // - // Taken verbatim from the CSI Spec: - // - // Specifies the list of topologies the provisioned volume MUST be - // accessible from. - // This field is OPTIONAL. If TopologyRequirement is specified either - // requisite or preferred or both MUST be specified. - // - // If requisite is specified, the provisioned volume MUST be - // accessible from at least one of the requisite topologies. - // - // Given - // x = number of topologies provisioned volume is accessible from - // n = number of requisite topologies - // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 - // If x==n, then the SP MUST make the provisioned volume available to - // all topologies from the list of requisite topologies. If it is - // unable to do so, the SP MUST fail the CreateVolume call. - // For example, if a volume should be accessible from a single zone, - // and requisite = - // {"region": "R1", "zone": "Z2"} - // then the provisioned volume MUST be accessible from the "region" - // "R1" and the "zone" "Z2". - // Similarly, if a volume should be accessible from two zones, and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"} - // then the provisioned volume MUST be accessible from the "region" - // "R1" and both "zone" "Z2" and "zone" "Z3". - // - // If xn, then the SP MUST make the provisioned volume available from - // all topologies from the list of requisite topologies and MAY choose - // the remaining x-n unique topologies from the list of all possible - // topologies. If it is unable to do so, the SP MUST fail the - // CreateVolume call. - // For example, if a volume should be accessible from two zones, and - // requisite = - // {"region": "R1", "zone": "Z2"} - // then the provisioned volume MUST be accessible from the "region" - // "R1" and the "zone" "Z2" and the SP may select the second zone - // independently, e.g. "R1/Z4". - Requisite []Topology `json:",omitempty"` - - // Preferred is a list of Topologies that the volume should attempt to be - // provisioned in. - // - // Taken from the CSI spec: - // - // Specifies the list of topologies the CO would prefer the volume to - // be provisioned in. - // - // This field is OPTIONAL. If TopologyRequirement is specified either - // requisite or preferred or both MUST be specified. - // - // An SP MUST attempt to make the provisioned volume available using - // the preferred topologies in order from first to last. - // - // If requisite is specified, all topologies in preferred list MUST - // also be present in the list of requisite topologies. - // - // If the SP is unable to to make the provisioned volume available - // from any of the preferred topologies, the SP MAY choose a topology - // from the list of requisite topologies. - // If the list of requisite topologies is not specified, then the SP - // MAY choose from the list of all possible topologies. - // If the list of requisite topologies is specified and the SP is - // unable to to make the provisioned volume available from any of the - // requisite topologies it MUST fail the CreateVolume call. - // - // Example 1: - // Given a volume should be accessible from a single zone, and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"} - // preferred = - // {"region": "R1", "zone": "Z3"} - // then the the SP SHOULD first attempt to make the provisioned volume - // available from "zone" "Z3" in the "region" "R1" and fall back to - // "zone" "Z2" in the "region" "R1" if that is not possible. - // - // Example 2: - // Given a volume should be accessible from a single zone, and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"}, - // {"region": "R1", "zone": "Z4"}, - // {"region": "R1", "zone": "Z5"} - // preferred = - // {"region": "R1", "zone": "Z4"}, - // {"region": "R1", "zone": "Z2"} - // then the the SP SHOULD first attempt to make the provisioned volume - // accessible from "zone" "Z4" in the "region" "R1" and fall back to - // "zone" "Z2" in the "region" "R1" if that is not possible. If that - // is not possible, the SP may choose between either the "zone" - // "Z3" or "Z5" in the "region" "R1". - // - // Example 3: - // Given a volume should be accessible from TWO zones (because an - // opaque parameter in CreateVolumeRequest, for example, specifies - // the volume is accessible from two zones, aka synchronously - // replicated), and - // requisite = - // {"region": "R1", "zone": "Z2"}, - // {"region": "R1", "zone": "Z3"}, - // {"region": "R1", "zone": "Z4"}, - // {"region": "R1", "zone": "Z5"} - // preferred = - // {"region": "R1", "zone": "Z5"}, - // {"region": "R1", "zone": "Z3"} - // then the the SP SHOULD first attempt to make the provisioned volume - // accessible from the combination of the two "zones" "Z5" and "Z3" in - // the "region" "R1". If that's not possible, it should fall back to - // a combination of "Z5" and other possibilities from the list of - // requisite. If that's not possible, it should fall back to a - // combination of "Z3" and other possibilities from the list of - // requisite. If that's not possible, it should fall back to a - // combination of other possibilities from the list of requisite. - Preferred []Topology `json:",omitempty"` -} - -// Topology is a map of topological domains to topological segments. -// -// This description is taken verbatim from the CSI Spec: -// -// A topological domain is a sub-division of a cluster, like "region", -// "zone", "rack", etc. -// A topological segment is a specific instance of a topological domain, -// like "zone3", "rack3", etc. -// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} -// Valid keys have two segments: an OPTIONAL prefix and name, separated -// by a slash (/), for example: "com.company.example/zone". -// The key name segment is REQUIRED. The prefix is OPTIONAL. -// The key name MUST be 63 characters or less, begin and end with an -// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), -// underscores (_), dots (.), or alphanumerics in between, for example -// "zone". -// The key prefix MUST be 63 characters or less, begin and end with a -// lower-case alphanumeric character ([a-z0-9]), contain only -// dashes (-), dots (.), or lower-case alphanumerics in between, and -// follow domain name notation format -// (https://tools.ietf.org/html/rfc1035#section-2.3.1). -// The key prefix SHOULD include the plugin's host company name and/or -// the plugin name, to minimize the possibility of collisions with keys -// from other plugins. -// If a key prefix is specified, it MUST be identical across all -// topology keys returned by the SP (across all RPCs). -// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" -// MUST not both exist. -// Each value (topological segment) MUST contain 1 or more strings. -// Each string MUST be 63 characters or less and begin and end with an -// alphanumeric character with '-', '_', '.', or alphanumerics in -// between. -type Topology struct { - Segments map[string]string `json:",omitempty"` -} - -// CapacityRange describes the minimum and maximum capacity a volume should be -// created with -type CapacityRange struct { - // RequiredBytes specifies that a volume must be at least this big. The - // value of 0 indicates an unspecified minimum. - RequiredBytes int64 - - // LimitBytes specifies that a volume must not be bigger than this. The - // value of 0 indicates an unspecified maximum - LimitBytes int64 -} - -// Secret represents a Swarm Secret value that must be passed to the CSI -// storage plugin when operating on this Volume. It represents one key-value -// pair of possibly many. -type Secret struct { - // Key is the name of the key of the key-value pair passed to the plugin. - Key string - - // Secret is the swarm Secret object from which to read data. This can be a - // Secret name or ID. The Secret data is retrieved by Swarm and used as the - // value of the key-value pair passed to the plugin. - Secret string -} - -// PublishState represents the state of a Volume as it pertains to its -// use on a particular Node. -type PublishState string - -const ( - // StatePending indicates that the volume should be published on - // this node, but the call to ControllerPublishVolume has not been - // successfully completed yet and the result recorded by swarmkit. - StatePending PublishState = "pending-publish" - - // StatePublished means the volume is published successfully to the node. - StatePublished PublishState = "published" - - // StatePendingNodeUnpublish indicates that the Volume should be - // unpublished on the Node, and we're waiting for confirmation that it has - // done so. After the Node has confirmed that the Volume has been - // unpublished, the state will move to StatePendingUnpublish. - StatePendingNodeUnpublish PublishState = "pending-node-unpublish" - - // StatePendingUnpublish means the volume is still published to the node - // by the controller, awaiting the operation to unpublish it. - StatePendingUnpublish PublishState = "pending-controller-unpublish" -) - -// PublishStatus represents the status of the volume as published to an -// individual node -type PublishStatus struct { - // NodeID is the ID of the swarm node this Volume is published to. - NodeID string `json:",omitempty"` - - // State is the publish state of the volume. - State PublishState `json:",omitempty"` - - // PublishContext is the PublishContext returned by the CSI plugin when - // a volume is published. - PublishContext map[string]string `json:",omitempty"` -} - -// Info contains information about the Volume as a whole as provided by -// the CSI storage plugin. -type Info struct { - // CapacityBytes is the capacity of the volume in bytes. A value of 0 - // indicates that the capacity is unknown. - CapacityBytes int64 `json:",omitempty"` - - // VolumeContext is the context originating from the CSI storage plugin - // when the Volume is created. - VolumeContext map[string]string `json:",omitempty"` - - // VolumeID is the ID of the Volume as seen by the CSI storage plugin. This - // is distinct from the Volume's Swarm ID, which is the ID used by all of - // the Docker Engine to refer to the Volume. If this field is blank, then - // the Volume has not been successfully created yet. - VolumeID string `json:",omitempty"` - - // AccessibleTopolgoy is the topology this volume is actually accessible - // from. - AccessibleTopology []Topology `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/create_options.go b/vendor/github.com/docker/docker/api/types/volume/create_options.go deleted file mode 100644 index 37c41a609..000000000 --- a/vendor/github.com/docker/docker/api/types/volume/create_options.go +++ /dev/null @@ -1,29 +0,0 @@ -package volume - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// CreateOptions VolumeConfig -// -// Volume configuration -// swagger:model CreateOptions -type CreateOptions struct { - - // cluster volume spec - ClusterVolumeSpec *ClusterVolumeSpec `json:"ClusterVolumeSpec,omitempty"` - - // Name of the volume driver to use. - Driver string `json:"Driver,omitempty"` - - // A mapping of driver options and values. These options are - // passed directly to the driver and are driver specific. - // - DriverOpts map[string]string `json:"DriverOpts,omitempty"` - - // User-defined key/value metadata. - Labels map[string]string `json:"Labels,omitempty"` - - // The new volume's name. If not specified, Docker generates a name. - // - Name string `json:"Name,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/deprecated.go b/vendor/github.com/docker/docker/api/types/volume/deprecated.go deleted file mode 100644 index ab622d8cc..000000000 --- a/vendor/github.com/docker/docker/api/types/volume/deprecated.go +++ /dev/null @@ -1,11 +0,0 @@ -package volume // import "github.com/docker/docker/api/types/volume" - -// VolumeCreateBody Volume configuration -// -// Deprecated: use CreateOptions -type VolumeCreateBody = CreateOptions - -// VolumeListOKBody Volume list response -// -// Deprecated: use ListResponse -type VolumeListOKBody = ListResponse diff --git a/vendor/github.com/docker/docker/api/types/volume/list_response.go b/vendor/github.com/docker/docker/api/types/volume/list_response.go deleted file mode 100644 index ca5192a2a..000000000 --- a/vendor/github.com/docker/docker/api/types/volume/list_response.go +++ /dev/null @@ -1,18 +0,0 @@ -package volume - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ListResponse VolumeListResponse -// -// Volume list response -// swagger:model ListResponse -type ListResponse struct { - - // List of volumes - Volumes []*Volume `json:"Volumes"` - - // Warnings that occurred when fetching the list of volumes. - // - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/options.go b/vendor/github.com/docker/docker/api/types/volume/options.go deleted file mode 100644 index 8b0dd1389..000000000 --- a/vendor/github.com/docker/docker/api/types/volume/options.go +++ /dev/null @@ -1,8 +0,0 @@ -package volume // import "github.com/docker/docker/api/types/volume" - -import "github.com/docker/docker/api/types/filters" - -// ListOptions holds parameters to list volumes. -type ListOptions struct { - Filters filters.Args -} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume.go b/vendor/github.com/docker/docker/api/types/volume/volume.go deleted file mode 100644 index ea7d555e5..000000000 --- a/vendor/github.com/docker/docker/api/types/volume/volume.go +++ /dev/null @@ -1,75 +0,0 @@ -package volume - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Volume volume -// swagger:model Volume -type Volume struct { - - // cluster volume - ClusterVolume *ClusterVolume `json:"ClusterVolume,omitempty"` - - // Date/Time the volume was created. - CreatedAt string `json:"CreatedAt,omitempty"` - - // Name of the volume driver used by the volume. - // Required: true - Driver string `json:"Driver"` - - // User-defined key/value metadata. - // Required: true - Labels map[string]string `json:"Labels"` - - // Mount path of the volume on the host. - // Required: true - Mountpoint string `json:"Mountpoint"` - - // Name of the volume. - // Required: true - Name string `json:"Name"` - - // The driver specific options used when creating the volume. - // - // Required: true - Options map[string]string `json:"Options"` - - // The level at which the volume exists. Either `global` for cluster-wide, - // or `local` for machine level. - // - // Required: true - Scope string `json:"Scope"` - - // Low-level details about the volume, provided by the volume driver. - // Details are returned as a map with key/value pairs: - // `{"key":"value","key2":"value2"}`. - // - // The `Status` field is optional, and is omitted if the volume driver - // does not support this feature. - // - Status map[string]interface{} `json:"Status,omitempty"` - - // usage data - UsageData *UsageData `json:"UsageData,omitempty"` -} - -// UsageData Usage details about the volume. This information is used by the -// `GET /system/df` endpoint, and omitted in other endpoints. -// -// swagger:model UsageData -type UsageData struct { - - // The number of containers referencing this volume. This field - // is set to `-1` if the reference-count is not available. - // - // Required: true - RefCount int64 `json:"RefCount"` - - // Amount of disk space used by the volume (in bytes). This information - // is only available for volumes created with the `"local"` volume - // driver. For volumes created with other volume drivers, this field - // is set to `-1` ("not available") - // - // Required: true - Size int64 `json:"Size"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_update.go b/vendor/github.com/docker/docker/api/types/volume/volume_update.go deleted file mode 100644 index f958f80a6..000000000 --- a/vendor/github.com/docker/docker/api/types/volume/volume_update.go +++ /dev/null @@ -1,7 +0,0 @@ -package volume // import "github.com/docker/docker/api/types/volume" - -// UpdateOptions is configuration to update a Volume with. -type UpdateOptions struct { - // Spec is the ClusterVolumeSpec to update the volume to. - Spec *ClusterVolumeSpec `json:"Spec,omitempty"` -} diff --git a/vendor/github.com/docker/docker/client/README.md b/vendor/github.com/docker/docker/client/README.md deleted file mode 100644 index 992f18117..000000000 --- a/vendor/github.com/docker/docker/client/README.md +++ /dev/null @@ -1,35 +0,0 @@ -# Go client for the Docker Engine API - -The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc. - -For example, to list running containers (the equivalent of `docker ps`): - -```go -package main - -import ( - "context" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/client" -) - -func main() { - cli, err := client.NewClientWithOpts(client.FromEnv) - if err != nil { - panic(err) - } - - containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) - if err != nil { - panic(err) - } - - for _, container := range containers { - fmt.Printf("%s %s\n", container.ID[:10], container.Image) - } -} -``` - -[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client) diff --git a/vendor/github.com/docker/docker/client/build_cancel.go b/vendor/github.com/docker/docker/client/build_cancel.go deleted file mode 100644 index b76bf366b..000000000 --- a/vendor/github.com/docker/docker/client/build_cancel.go +++ /dev/null @@ -1,16 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" -) - -// BuildCancel requests the daemon to cancel the ongoing build request. -func (cli *Client) BuildCancel(ctx context.Context, id string) error { - query := url.Values{} - query.Set("id", id) - - serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil) - ensureReaderClosed(serverResp) - return err -} diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go deleted file mode 100644 index 397d67cdc..000000000 --- a/vendor/github.com/docker/docker/client/build_prune.go +++ /dev/null @@ -1,45 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/pkg/errors" -) - -// BuildCachePrune requests the daemon to delete unused cache data -func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) { - if err := cli.NewVersionError("1.31", "build prune"); err != nil { - return nil, err - } - - report := types.BuildCachePruneReport{} - - query := url.Values{} - if opts.All { - query.Set("all", "1") - } - query.Set("keep-storage", fmt.Sprintf("%d", opts.KeepStorage)) - filters, err := filters.ToJSON(opts.Filters) - if err != nil { - return nil, errors.Wrap(err, "prune could not marshal filters option") - } - query.Set("filters", filters) - - serverResp, err := cli.post(ctx, "/build/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) - - if err != nil { - return nil, err - } - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return nil, fmt.Errorf("Error retrieving disk usage: %v", err) - } - - return &report, nil -} diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go deleted file mode 100644 index 921024fe4..000000000 --- a/vendor/github.com/docker/docker/client/checkpoint_create.go +++ /dev/null @@ -1,14 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types" -) - -// CheckpointCreate creates a checkpoint from the given container with the given name -func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { - resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go deleted file mode 100644 index 54f55fa76..000000000 --- a/vendor/github.com/docker/docker/client/checkpoint_delete.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// CheckpointDelete deletes the checkpoint with the given name from the given container -func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error { - query := url.Values{} - if options.CheckpointDir != "" { - query.Set("dir", options.CheckpointDir) - } - - resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go deleted file mode 100644 index 39cfb959f..000000000 --- a/vendor/github.com/docker/docker/client/checkpoint_list.go +++ /dev/null @@ -1,28 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" -) - -// CheckpointList returns the checkpoints of the given container in the docker host -func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { - var checkpoints []types.Checkpoint - - query := url.Values{} - if options.CheckpointDir != "" { - query.Set("dir", options.CheckpointDir) - } - - resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return checkpoints, err - } - - err = json.NewDecoder(resp.body).Decode(&checkpoints) - return checkpoints, err -} diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go deleted file mode 100644 index 09ea4851f..000000000 --- a/vendor/github.com/docker/docker/client/client.go +++ /dev/null @@ -1,321 +0,0 @@ -/* -Package client is a Go client for the Docker Engine API. - -For more information about the Engine API, see the documentation: -https://docs.docker.com/engine/api/ - -# Usage - -You use the library by constructing a client object using [NewClientWithOpts] -and calling methods on it. The client can be configured from environment -variables by passing the [FromEnv] option, or configured manually by passing any -of the other available [Opts]. - -For example, to list running containers (the equivalent of "docker ps"): - - package main - - import ( - "context" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/client" - ) - - func main() { - cli, err := client.NewClientWithOpts(client.FromEnv) - if err != nil { - panic(err) - } - - containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) - if err != nil { - panic(err) - } - - for _, container := range containers { - fmt.Printf("%s %s\n", container.ID[:10], container.Image) - } - } -*/ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net" - "net/http" - "net/url" - "path" - "strings" - - "github.com/docker/docker/api" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" - "github.com/docker/go-connections/sockets" - "github.com/pkg/errors" -) - -// ErrRedirect is the error returned by checkRedirect when the request is non-GET. -var ErrRedirect = errors.New("unexpected redirect in response") - -// Client is the API client that performs all operations -// against a docker server. -type Client struct { - // scheme sets the scheme for the client - scheme string - // host holds the server address to connect to - host string - // proto holds the client protocol i.e. unix. - proto string - // addr holds the client address. - addr string - // basePath holds the path to prepend to the requests. - basePath string - // client used to send and receive http requests. - client *http.Client - // version of the server to talk to. - version string - // custom http headers configured by users. - customHTTPHeaders map[string]string - // manualOverride is set to true when the version was set by users. - manualOverride bool - - // negotiateVersion indicates if the client should automatically negotiate - // the API version to use when making requests. API version negotiation is - // performed on the first request, after which negotiated is set to "true" - // so that subsequent requests do not re-negotiate. - negotiateVersion bool - - // negotiated indicates that API version negotiation took place - negotiated bool -} - -// CheckRedirect specifies the policy for dealing with redirect responses: -// If the request is non-GET return ErrRedirect, otherwise use the last response. -// -// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) -// in the client. The Docker client (and by extension docker API client) can be -// made to send a request like POST /containers//start where what would normally -// be in the name section of the URL is empty. This triggers an HTTP 301 from -// the daemon. -// -// In go 1.8 this 301 will be converted to a GET request, and ends up getting -// a 404 from the daemon. This behavior change manifests in the client in that -// before, the 301 was not followed and the client did not generate an error, -// but now results in a message like Error response from daemon: page not found. -func CheckRedirect(req *http.Request, via []*http.Request) error { - if via[0].Method == http.MethodGet { - return http.ErrUseLastResponse - } - return ErrRedirect -} - -// NewClientWithOpts initializes a new API client with a default HTTPClient, and -// default API host and version. It also initializes the custom HTTP headers to -// add to each request. -// -// It takes an optional list of Opt functional arguments, which are applied in -// the order they're provided, which allows modifying the defaults when creating -// the client. For example, the following initializes a client that configures -// itself with values from environment variables (client.FromEnv), and has -// automatic API version negotiation enabled (client.WithAPIVersionNegotiation()). -// -// cli, err := client.NewClientWithOpts( -// client.FromEnv, -// client.WithAPIVersionNegotiation(), -// ) -func NewClientWithOpts(ops ...Opt) (*Client, error) { - client, err := defaultHTTPClient(DefaultDockerHost) - if err != nil { - return nil, err - } - c := &Client{ - host: DefaultDockerHost, - version: api.DefaultVersion, - client: client, - proto: defaultProto, - addr: defaultAddr, - } - - for _, op := range ops { - if err := op(c); err != nil { - return nil, err - } - } - - if c.scheme == "" { - c.scheme = "http" - - tlsConfig := resolveTLSConfig(c.client.Transport) - if tlsConfig != nil { - // TODO(stevvooe): This isn't really the right way to write clients in Go. - // `NewClient` should probably only take an `*http.Client` and work from there. - // Unfortunately, the model of having a host-ish/url-thingy as the connection - // string has us confusing protocol and transport layers. We continue doing - // this to avoid breaking existing clients but this should be addressed. - c.scheme = "https" - } - } - - return c, nil -} - -func defaultHTTPClient(host string) (*http.Client, error) { - hostURL, err := ParseHostURL(host) - if err != nil { - return nil, err - } - transport := &http.Transport{} - _ = sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host) - return &http.Client{ - Transport: transport, - CheckRedirect: CheckRedirect, - }, nil -} - -// Close the transport used by the client -func (cli *Client) Close() error { - if t, ok := cli.client.Transport.(*http.Transport); ok { - t.CloseIdleConnections() - } - return nil -} - -// getAPIPath returns the versioned request path to call the api. -// It appends the query parameters to the path if they are not empty. -func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string { - var apiPath string - if cli.negotiateVersion && !cli.negotiated { - cli.NegotiateAPIVersion(ctx) - } - if cli.version != "" { - v := strings.TrimPrefix(cli.version, "v") - apiPath = path.Join(cli.basePath, "/v"+v, p) - } else { - apiPath = path.Join(cli.basePath, p) - } - return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String() -} - -// ClientVersion returns the API version used by this client. -func (cli *Client) ClientVersion() string { - return cli.version -} - -// NegotiateAPIVersion queries the API and updates the version to match the API -// version. NegotiateAPIVersion downgrades the client's API version to match the -// APIVersion if the ping version is lower than the default version. If the API -// version reported by the server is higher than the maximum version supported -// by the client, it uses the client's maximum version. -// -// If a manual override is in place, either through the "DOCKER_API_VERSION" -// (EnvOverrideAPIVersion) environment variable, or if the client is initialized -// with a fixed version (WithVersion(xx)), no negotiation is performed. -// -// If the API server's ping response does not contain an API version, or if the -// client did not get a successful ping response, it assumes it is connected with -// an old daemon that does not support API version negotiation, in which case it -// downgrades to the latest version of the API before version negotiation was -// added (1.24). -func (cli *Client) NegotiateAPIVersion(ctx context.Context) { - if !cli.manualOverride { - ping, _ := cli.Ping(ctx) - cli.negotiateAPIVersionPing(ping) - } -} - -// NegotiateAPIVersionPing downgrades the client's API version to match the -// APIVersion in the ping response. If the API version in pingResponse is higher -// than the maximum version supported by the client, it uses the client's maximum -// version. -// -// If a manual override is in place, either through the "DOCKER_API_VERSION" -// (EnvOverrideAPIVersion) environment variable, or if the client is initialized -// with a fixed version (WithVersion(xx)), no negotiation is performed. -// -// If the API server's ping response does not contain an API version, we assume -// we are connected with an old daemon without API version negotiation support, -// and downgrade to the latest version of the API before version negotiation was -// added (1.24). -func (cli *Client) NegotiateAPIVersionPing(pingResponse types.Ping) { - if !cli.manualOverride { - cli.negotiateAPIVersionPing(pingResponse) - } -} - -// negotiateAPIVersionPing queries the API and updates the version to match the -// API version from the ping response. -func (cli *Client) negotiateAPIVersionPing(pingResponse types.Ping) { - // default to the latest version before versioning headers existed - if pingResponse.APIVersion == "" { - pingResponse.APIVersion = "1.24" - } - - // if the client is not initialized with a version, start with the latest supported version - if cli.version == "" { - cli.version = api.DefaultVersion - } - - // if server version is lower than the client version, downgrade - if versions.LessThan(pingResponse.APIVersion, cli.version) { - cli.version = pingResponse.APIVersion - } - - // Store the results, so that automatic API version negotiation (if enabled) - // won't be performed on the next request. - if cli.negotiateVersion { - cli.negotiated = true - } -} - -// DaemonHost returns the host address used by the client -func (cli *Client) DaemonHost() string { - return cli.host -} - -// HTTPClient returns a copy of the HTTP client bound to the server -func (cli *Client) HTTPClient() *http.Client { - c := *cli.client - return &c -} - -// ParseHostURL parses a url string, validates the string is a host url, and -// returns the parsed URL -func ParseHostURL(host string) (*url.URL, error) { - protoAddrParts := strings.SplitN(host, "://", 2) - if len(protoAddrParts) == 1 { - return nil, errors.Errorf("unable to parse docker host `%s`", host) - } - - var basePath string - proto, addr := protoAddrParts[0], protoAddrParts[1] - if proto == "tcp" { - parsed, err := url.Parse("tcp://" + addr) - if err != nil { - return nil, err - } - addr = parsed.Host - basePath = parsed.Path - } - return &url.URL{ - Scheme: proto, - Host: addr, - Path: basePath, - }, nil -} - -// Dialer returns a dialer for a raw stream connection, with an HTTP/1.1 header, -// that can be used for proxying the daemon connection. -// -// Used by `docker dial-stdio` (docker/cli#889). -func (cli *Client) Dialer() func(context.Context) (net.Conn, error) { - return func(ctx context.Context) (net.Conn, error) { - if transport, ok := cli.client.Transport.(*http.Transport); ok { - if transport.DialContext != nil && transport.TLSClientConfig == nil { - return transport.DialContext(ctx, cli.proto, cli.addr) - } - } - return fallbackDial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) - } -} diff --git a/vendor/github.com/docker/docker/client/client_deprecated.go b/vendor/github.com/docker/docker/client/client_deprecated.go deleted file mode 100644 index 9e366ce20..000000000 --- a/vendor/github.com/docker/docker/client/client_deprecated.go +++ /dev/null @@ -1,27 +0,0 @@ -package client - -import "net/http" - -// NewClient initializes a new API client for the given host and API version. -// It uses the given http client as transport. -// It also initializes the custom http headers to add to each request. -// -// It won't send any version information if the version number is empty. It is -// highly recommended that you set a version or your client may break if the -// server is upgraded. -// -// Deprecated: use [NewClientWithOpts] passing the [WithHost], [WithVersion], -// [WithHTTPClient] and [WithHTTPHeaders] options. We recommend enabling API -// version negotiation by passing the [WithAPIVersionNegotiation] option instead -// of WithVersion. -func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { - return NewClientWithOpts(WithHost(host), WithVersion(version), WithHTTPClient(client), WithHTTPHeaders(httpHeaders)) -} - -// NewEnvClient initializes a new API client based on environment variables. -// See FromEnv for a list of support environment variables. -// -// Deprecated: use [NewClientWithOpts] passing the [FromEnv] option. -func NewEnvClient() (*Client, error) { - return NewClientWithOpts(FromEnv) -} diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go deleted file mode 100644 index f0783f708..000000000 --- a/vendor/github.com/docker/docker/client/client_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build linux || freebsd || openbsd || netbsd || darwin || solaris || illumos || dragonfly -// +build linux freebsd openbsd netbsd darwin solaris illumos dragonfly - -package client // import "github.com/docker/docker/client" - -// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST -// (EnvOverrideHost) environment variable is unset or empty. -const DefaultDockerHost = "unix:///var/run/docker.sock" - -const defaultProto = "unix" -const defaultAddr = "/var/run/docker.sock" diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go deleted file mode 100644 index 5abe60457..000000000 --- a/vendor/github.com/docker/docker/client/client_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -package client // import "github.com/docker/docker/client" - -// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST -// (EnvOverrideHost) environment variable is unset or empty. -const DefaultDockerHost = "npipe:////./pipe/docker_engine" - -const defaultProto = "npipe" -const defaultAddr = "//./pipe/docker_engine" diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go deleted file mode 100644 index f6b1881fc..000000000 --- a/vendor/github.com/docker/docker/client/config_create.go +++ /dev/null @@ -1,25 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" -) - -// ConfigCreate creates a new config. -func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { - var response types.ConfigCreateResponse - if err := cli.NewVersionError("1.30", "config create"); err != nil { - return response, err - } - resp, err := cli.post(ctx, "/configs/create", nil, config, nil) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go deleted file mode 100644 index 9be7882c3..000000000 --- a/vendor/github.com/docker/docker/client/config_inspect.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types/swarm" -) - -// ConfigInspectWithRaw returns the config information with raw data -func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) { - if id == "" { - return swarm.Config{}, nil, objectNotFoundError{object: "config", id: id} - } - if err := cli.NewVersionError("1.30", "config inspect"); err != nil { - return swarm.Config{}, nil, err - } - resp, err := cli.get(ctx, "/configs/"+id, nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return swarm.Config{}, nil, err - } - - body, err := io.ReadAll(resp.body) - if err != nil { - return swarm.Config{}, nil, err - } - - var config swarm.Config - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&config) - - return config, body, err -} diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go deleted file mode 100644 index 565acc6e2..000000000 --- a/vendor/github.com/docker/docker/client/config_list.go +++ /dev/null @@ -1,38 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// ConfigList returns the list of configs. -func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { - if err := cli.NewVersionError("1.30", "config list"); err != nil { - return nil, err - } - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/configs", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var configs []swarm.Config - err = json.NewDecoder(resp.body).Decode(&configs) - return configs, err -} diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go deleted file mode 100644 index 24b94e9c1..000000000 --- a/vendor/github.com/docker/docker/client/config_remove.go +++ /dev/null @@ -1,13 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// ConfigRemove removes a config. -func (cli *Client) ConfigRemove(ctx context.Context, id string) error { - if err := cli.NewVersionError("1.30", "config remove"); err != nil { - return err - } - resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go deleted file mode 100644 index 1ac298543..000000000 --- a/vendor/github.com/docker/docker/client/config_update.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/swarm" -) - -// ConfigUpdate attempts to update a config -func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { - if err := cli.NewVersionError("1.30", "config update"); err != nil { - return err - } - query := url.Values{} - query.Set("version", version.String()) - resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go deleted file mode 100644 index ba92117d3..000000000 --- a/vendor/github.com/docker/docker/client/container_attach.go +++ /dev/null @@ -1,59 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ContainerAttach attaches a connection to a container in the server. -// It returns a types.HijackedConnection with the hijacked connection -// and the a reader to get output. It's up to the called to close -// the hijacked connection by calling types.HijackedResponse.Close. -// -// The stream format on the response will be in one of two formats: -// -// If the container is using a TTY, there is only a single stream (stdout), and -// data is copied directly from the container output stream, no extra -// multiplexing or headers. -// -// If the container is *not* using a TTY, streams for stdout and stderr are -// multiplexed. -// The format of the multiplexed stream is as follows: -// -// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} -// -// STREAM_TYPE can be 1 for stdout and 2 for stderr -// -// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. -// This is the size of OUTPUT. -// -// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this -// stream. -func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { - query := url.Values{} - if options.Stream { - query.Set("stream", "1") - } - if options.Stdin { - query.Set("stdin", "1") - } - if options.Stdout { - query.Set("stdout", "1") - } - if options.Stderr { - query.Set("stderr", "1") - } - if options.DetachKeys != "" { - query.Set("detachKeys", options.DetachKeys) - } - if options.Logs { - query.Set("logs", "1") - } - - headers := map[string][]string{ - "Content-Type": {"text/plain"}, - } - return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) -} diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go deleted file mode 100644 index cd7f76346..000000000 --- a/vendor/github.com/docker/docker/client/container_commit.go +++ /dev/null @@ -1,55 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "errors" - "net/url" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" -) - -// ContainerCommit applies changes to a container and creates a new tagged image. -func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { - var repository, tag string - if options.Reference != "" { - ref, err := reference.ParseNormalizedNamed(options.Reference) - if err != nil { - return types.IDResponse{}, err - } - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference") - } - ref = reference.TagNameOnly(ref) - - if tagged, ok := ref.(reference.Tagged); ok { - tag = tagged.Tag() - } - repository = reference.FamiliarName(ref) - } - - query := url.Values{} - query.Set("container", container) - query.Set("repo", repository) - query.Set("tag", tag) - query.Set("comment", options.Comment) - query.Set("author", options.Author) - for _, change := range options.Changes { - query.Add("changes", change) - } - if !options.Pause { - query.Set("pause", "0") - } - - var response types.IDResponse - resp, err := cli.post(ctx, "/commit", query, options.Config, nil) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go deleted file mode 100644 index 883be7fa3..000000000 --- a/vendor/github.com/docker/docker/client/container_copy.go +++ /dev/null @@ -1,93 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "path/filepath" - "strings" - - "github.com/docker/docker/api/types" -) - -// ContainerStatPath returns stat information about a path inside the container filesystem. -func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { - query := url.Values{} - query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. - - urlStr := "/containers/" + containerID + "/archive" - response, err := cli.head(ctx, urlStr, query, nil) - defer ensureReaderClosed(response) - if err != nil { - return types.ContainerPathStat{}, err - } - return getContainerPathStatFromHeader(response.header) -} - -// CopyToContainer copies content into the container filesystem. -// Note that `content` must be a Reader for a TAR archive -func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options types.CopyToContainerOptions) error { - query := url.Values{} - query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API. - // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. - if !options.AllowOverwriteDirWithFile { - query.Set("noOverwriteDirNonDir", "true") - } - - if options.CopyUIDGID { - query.Set("copyUIDGID", "true") - } - - apiPath := "/containers/" + containerID + "/archive" - - response, err := cli.putRaw(ctx, apiPath, query, content, nil) - defer ensureReaderClosed(response) - if err != nil { - return err - } - - return nil -} - -// CopyFromContainer gets the content from the container and returns it as a Reader -// for a TAR archive to manipulate it in the host. It's up to the caller to close the reader. -func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { - query := make(url.Values, 1) - query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. - - apiPath := "/containers/" + containerID + "/archive" - response, err := cli.get(ctx, apiPath, query, nil) - if err != nil { - return nil, types.ContainerPathStat{}, err - } - - // In order to get the copy behavior right, we need to know information - // about both the source and the destination. The response headers include - // stat info about the source that we can use in deciding exactly how to - // copy it locally. Along with the stat info about the local destination, - // we have everything we need to handle the multiple possibilities there - // can be when copying a file/dir from one location to another file/dir. - stat, err := getContainerPathStatFromHeader(response.header) - if err != nil { - return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) - } - return response.body, stat, err -} - -func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { - var stat types.ContainerPathStat - - encodedStat := header.Get("X-Docker-Container-Path-Stat") - statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) - - err := json.NewDecoder(statDecoder).Decode(&stat) - if err != nil { - err = fmt.Errorf("unable to decode container path stat header: %s", err) - } - - return stat, err -} diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go deleted file mode 100644 index f82420b67..000000000 --- a/vendor/github.com/docker/docker/client/container_create.go +++ /dev/null @@ -1,83 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "path" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/versions" - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -type configWrapper struct { - *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig -} - -// ContainerCreate creates a new container based on the given configuration. -// It can be associated with a name, but it's not mandatory. -func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.CreateResponse, error) { - var response container.CreateResponse - - if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { - return response, err - } - if err := cli.NewVersionError("1.41", "specify container image platform"); platform != nil && err != nil { - return response, err - } - - if hostConfig != nil { - if versions.LessThan(cli.ClientVersion(), "1.25") { - // When using API 1.24 and under, the client is responsible for removing the container - hostConfig.AutoRemove = false - } - if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.42") || versions.LessThan(cli.ClientVersion(), "1.40") { - // KernelMemory was added in API 1.40, and deprecated in API 1.42 - hostConfig.KernelMemory = 0 - } - if platform != nil && platform.OS == "linux" && versions.LessThan(cli.ClientVersion(), "1.42") { - // When using API under 1.42, the Linux daemon doesn't respect the ConsoleSize - hostConfig.ConsoleSize = [2]uint{0, 0} - } - } - - query := url.Values{} - if p := formatPlatform(platform); p != "" { - query.Set("platform", p) - } - - if containerName != "" { - query.Set("name", containerName) - } - - body := configWrapper{ - Config: config, - HostConfig: hostConfig, - NetworkingConfig: networkingConfig, - } - - serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return response, err - } - - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} - -// formatPlatform returns a formatted string representing platform (e.g. linux/arm/v7). -// -// Similar to containerd's platforms.Format(), but does allow components to be -// omitted (e.g. pass "architecture" only, without "os": -// https://github.com/containerd/containerd/blob/v1.5.2/platforms/platforms.go#L243-L263 -func formatPlatform(platform *specs.Platform) string { - if platform == nil { - return "" - } - return path.Join(platform.OS, platform.Architecture, platform.Variant) -} diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/docker/docker/client/container_diff.go deleted file mode 100644 index 29dac8491..000000000 --- a/vendor/github.com/docker/docker/client/container_diff.go +++ /dev/null @@ -1,23 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/container" -) - -// ContainerDiff shows differences in a container filesystem since it was started. -func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.ContainerChangeResponseItem, error) { - var changes []container.ContainerChangeResponseItem - - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return changes, err - } - - err = json.NewDecoder(serverResp.body).Decode(&changes) - return changes, err -} diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go deleted file mode 100644 index 6a2cb006f..000000000 --- a/vendor/github.com/docker/docker/client/container_exec.go +++ /dev/null @@ -1,66 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" -) - -// ContainerExecCreate creates a new exec configuration to run an exec process. -func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { - var response types.IDResponse - - if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { - return response, err - } - if versions.LessThan(cli.ClientVersion(), "1.42") { - config.ConsoleSize = nil - } - - resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - err = json.NewDecoder(resp.body).Decode(&response) - return response, err -} - -// ContainerExecStart starts an exec process already created in the docker host. -func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { - if versions.LessThan(cli.ClientVersion(), "1.42") { - config.ConsoleSize = nil - } - resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) - ensureReaderClosed(resp) - return err -} - -// ContainerExecAttach attaches a connection to an exec process in the server. -// It returns a types.HijackedConnection with the hijacked connection -// and the a reader to get output. It's up to the called to close -// the hijacked connection by calling types.HijackedResponse.Close. -func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) { - if versions.LessThan(cli.ClientVersion(), "1.42") { - config.ConsoleSize = nil - } - headers := map[string][]string{ - "Content-Type": {"application/json"}, - } - return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) -} - -// ContainerExecInspect returns information about a specific exec process on the docker host. -func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { - var response types.ContainerExecInspect - resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/docker/docker/client/container_export.go deleted file mode 100644 index d0c0a5cba..000000000 --- a/vendor/github.com/docker/docker/client/container_export.go +++ /dev/null @@ -1,19 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" -) - -// ContainerExport retrieves the raw contents of a container -// and returns them as an io.ReadCloser. It's up to the caller -// to close the stream. -func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) - if err != nil { - return nil, err - } - - return serverResp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go deleted file mode 100644 index d48f0d3a6..000000000 --- a/vendor/github.com/docker/docker/client/container_inspect.go +++ /dev/null @@ -1,53 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ContainerInspect returns the container information. -func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { - if containerID == "" { - return types.ContainerJSON{}, objectNotFoundError{object: "container", id: containerID} - } - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return types.ContainerJSON{}, err - } - - var response types.ContainerJSON - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} - -// ContainerInspectWithRaw returns the container information and its raw representation. -func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { - if containerID == "" { - return types.ContainerJSON{}, nil, objectNotFoundError{object: "container", id: containerID} - } - query := url.Values{} - if getSize { - query.Set("size", "1") - } - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return types.ContainerJSON{}, nil, err - } - - body, err := io.ReadAll(serverResp.body) - if err != nil { - return types.ContainerJSON{}, nil, err - } - - var response types.ContainerJSON - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go deleted file mode 100644 index 7c9529f1e..000000000 --- a/vendor/github.com/docker/docker/client/container_kill.go +++ /dev/null @@ -1,18 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" -) - -// ContainerKill terminates the container process but does not remove the container from the docker host. -func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { - query := url.Values{} - if signal != "" { - query.Set("signal", signal) - } - - resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go deleted file mode 100644 index bd491b3db..000000000 --- a/vendor/github.com/docker/docker/client/container_list.go +++ /dev/null @@ -1,57 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "strconv" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// ContainerList returns the list of containers in the docker host. -func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { - query := url.Values{} - - if options.All { - query.Set("all", "1") - } - - if options.Limit > 0 { - query.Set("limit", strconv.Itoa(options.Limit)) - } - - if options.Since != "" { - query.Set("since", options.Since) - } - - if options.Before != "" { - query.Set("before", options.Before) - } - - if options.Size { - query.Set("size", "1") - } - - if options.Filters.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/containers/json", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var containers []types.Container - err = json.NewDecoder(resp.body).Decode(&containers) - return containers, err -} diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go deleted file mode 100644 index 9bdf2b0fa..000000000 --- a/vendor/github.com/docker/docker/client/container_logs.go +++ /dev/null @@ -1,80 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "time" - - "github.com/docker/docker/api/types" - timetypes "github.com/docker/docker/api/types/time" - "github.com/pkg/errors" -) - -// ContainerLogs returns the logs generated by a container in an io.ReadCloser. -// It's up to the caller to close the stream. -// -// The stream format on the response will be in one of two formats: -// -// If the container is using a TTY, there is only a single stream (stdout), and -// data is copied directly from the container output stream, no extra -// multiplexing or headers. -// -// If the container is *not* using a TTY, streams for stdout and stderr are -// multiplexed. -// The format of the multiplexed stream is as follows: -// -// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} -// -// STREAM_TYPE can be 1 for stdout and 2 for stderr -// -// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. -// This is the size of OUTPUT. -// -// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this -// stream. -func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { - query := url.Values{} - if options.ShowStdout { - query.Set("stdout", "1") - } - - if options.ShowStderr { - query.Set("stderr", "1") - } - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, time.Now()) - if err != nil { - return nil, errors.Wrap(err, `invalid value for "since"`) - } - query.Set("since", ts) - } - - if options.Until != "" { - ts, err := timetypes.GetTimestamp(options.Until, time.Now()) - if err != nil { - return nil, errors.Wrap(err, `invalid value for "until"`) - } - query.Set("until", ts) - } - - if options.Timestamps { - query.Set("timestamps", "1") - } - - if options.Details { - query.Set("details", "1") - } - - if options.Follow { - query.Set("follow", "1") - } - query.Set("tail", options.Tail) - - resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/docker/docker/client/container_pause.go deleted file mode 100644 index 5e7271a37..000000000 --- a/vendor/github.com/docker/docker/client/container_pause.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// ContainerPause pauses the main process of a given container without terminating it. -func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { - resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go deleted file mode 100644 index 04383deaa..000000000 --- a/vendor/github.com/docker/docker/client/container_prune.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// ContainersPrune requests the daemon to delete unused data -func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) { - var report types.ContainersPruneReport - - if err := cli.NewVersionError("1.25", "container prune"); err != nil { - return report, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return report, err - } - - serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return report, err - } - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving disk usage: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go deleted file mode 100644 index c21de609b..000000000 --- a/vendor/github.com/docker/docker/client/container_remove.go +++ /dev/null @@ -1,27 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ContainerRemove kills and removes a container from the docker host. -func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { - query := url.Values{} - if options.RemoveVolumes { - query.Set("v", "1") - } - if options.RemoveLinks { - query.Set("link", "1") - } - - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/docker/docker/client/container_rename.go deleted file mode 100644 index 240fdf552..000000000 --- a/vendor/github.com/docker/docker/client/container_rename.go +++ /dev/null @@ -1,15 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" -) - -// ContainerRename changes the name of a given container. -func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { - query := url.Values{} - query.Set("name", newContainerName) - resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go deleted file mode 100644 index a9d4c0c79..000000000 --- a/vendor/github.com/docker/docker/client/container_resize.go +++ /dev/null @@ -1,29 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types" -) - -// ContainerResize changes the size of the tty for a container. -func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { - return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) -} - -// ContainerExecResize changes the size of the tty for an exec process running inside a container. -func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { - return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) -} - -func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error { - query := url.Values{} - query.Set("h", strconv.Itoa(int(height))) - query.Set("w", strconv.Itoa(int(width))) - - resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go deleted file mode 100644 index 1e0ad9998..000000000 --- a/vendor/github.com/docker/docker/client/container_restart.go +++ /dev/null @@ -1,26 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/versions" -) - -// ContainerRestart stops and starts a container again. -// It makes the daemon wait for the container to be up again for -// a specific amount of time, given the timeout. -func (cli *Client) ContainerRestart(ctx context.Context, containerID string, options container.StopOptions) error { - query := url.Values{} - if options.Timeout != nil { - query.Set("t", strconv.Itoa(*options.Timeout)) - } - if options.Signal != "" && versions.GreaterThanOrEqualTo(cli.version, "1.42") { - query.Set("signal", options.Signal) - } - resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go deleted file mode 100644 index c2e0b15dc..000000000 --- a/vendor/github.com/docker/docker/client/container_start.go +++ /dev/null @@ -1,23 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ContainerStart sends a request to the docker daemon to start a container. -func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error { - query := url.Values{} - if len(options.CheckpointID) != 0 { - query.Set("checkpoint", options.CheckpointID) - } - if len(options.CheckpointDir) != 0 { - query.Set("checkpoint-dir", options.CheckpointDir) - } - - resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go deleted file mode 100644 index 0a6488dde..000000000 --- a/vendor/github.com/docker/docker/client/container_stats.go +++ /dev/null @@ -1,42 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ContainerStats returns near realtime stats for a given container. -// It's up to the caller to close the io.ReadCloser returned. -func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { - query := url.Values{} - query.Set("stream", "0") - if stream { - query.Set("stream", "1") - } - - resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) - if err != nil { - return types.ContainerStats{}, err - } - - osType := getDockerOS(resp.header.Get("Server")) - return types.ContainerStats{Body: resp.body, OSType: osType}, err -} - -// ContainerStatsOneShot gets a single stat entry from a container. -// It differs from `ContainerStats` in that the API should not wait to prime the stats -func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string) (types.ContainerStats, error) { - query := url.Values{} - query.Set("stream", "0") - query.Set("one-shot", "1") - - resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) - if err != nil { - return types.ContainerStats{}, err - } - - osType := getDockerOS(resp.header.Get("Server")) - return types.ContainerStats{Body: resp.body, OSType: osType}, err -} diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go deleted file mode 100644 index 2a43ce227..000000000 --- a/vendor/github.com/docker/docker/client/container_stop.go +++ /dev/null @@ -1,30 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/versions" -) - -// ContainerStop stops a container. In case the container fails to stop -// gracefully within a time frame specified by the timeout argument, -// it is forcefully terminated (killed). -// -// If the timeout is nil, the container's StopTimeout value is used, if set, -// otherwise the engine default. A negative timeout value can be specified, -// meaning no timeout, i.e. no forceful termination is performed. -func (cli *Client) ContainerStop(ctx context.Context, containerID string, options container.StopOptions) error { - query := url.Values{} - if options.Timeout != nil { - query.Set("t", strconv.Itoa(*options.Timeout)) - } - if options.Signal != "" && versions.GreaterThanOrEqualTo(cli.version, "1.42") { - query.Set("signal", options.Signal) - } - resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go deleted file mode 100644 index a5b78999b..000000000 --- a/vendor/github.com/docker/docker/client/container_top.go +++ /dev/null @@ -1,28 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "strings" - - "github.com/docker/docker/api/types/container" -) - -// ContainerTop shows process information from within a container. -func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.ContainerTopOKBody, error) { - var response container.ContainerTopOKBody - query := url.Values{} - if len(arguments) > 0 { - query.Set("ps_args", strings.Join(arguments, " ")) - } - - resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/docker/docker/client/container_unpause.go deleted file mode 100644 index 1d8f87316..000000000 --- a/vendor/github.com/docker/docker/client/container_unpause.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// ContainerUnpause resumes the process execution within a container -func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { - resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go deleted file mode 100644 index bf68a5300..000000000 --- a/vendor/github.com/docker/docker/client/container_update.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/container" -) - -// ContainerUpdate updates the resources of a container. -func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { - var response container.ContainerUpdateOKBody - serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return response, err - } - - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go deleted file mode 100644 index 2375eb1e8..000000000 --- a/vendor/github.com/docker/docker/client/container_wait.go +++ /dev/null @@ -1,104 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "io" - "net/url" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/versions" -) - -const containerWaitErrorMsgLimit = 2 * 1024 /* Max: 2KiB */ - -// ContainerWait waits until the specified container is in a certain state -// indicated by the given condition, either "not-running" (default), -// "next-exit", or "removed". -// -// If this client's API version is before 1.30, condition is ignored and -// ContainerWait will return immediately with the two channels, as the server -// will wait as if the condition were "not-running". -// -// If this client's API version is at least 1.30, ContainerWait blocks until -// the request has been acknowledged by the server (with a response header), -// then returns two channels on which the caller can wait for the exit status -// of the container or an error if there was a problem either beginning the -// wait request or in getting the response. This allows the caller to -// synchronize ContainerWait with other calls, such as specifying a -// "next-exit" condition before issuing a ContainerStart request. -func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) { - if versions.LessThan(cli.ClientVersion(), "1.30") { - return cli.legacyContainerWait(ctx, containerID) - } - - resultC := make(chan container.WaitResponse) - errC := make(chan error, 1) - - query := url.Values{} - if condition != "" { - query.Set("condition", string(condition)) - } - - resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil) - if err != nil { - defer ensureReaderClosed(resp) - errC <- err - return resultC, errC - } - - go func() { - defer ensureReaderClosed(resp) - - body := resp.body - responseText := bytes.NewBuffer(nil) - stream := io.TeeReader(body, responseText) - - var res container.WaitResponse - if err := json.NewDecoder(stream).Decode(&res); err != nil { - // NOTE(nicks): The /wait API does not work well with HTTP proxies. - // At any time, the proxy could cut off the response stream. - // - // But because the HTTP status has already been written, the proxy's - // only option is to write a plaintext error message. - // - // If there's a JSON parsing error, read the real error message - // off the body and send it to the client. - _, _ = io.ReadAll(io.LimitReader(stream, containerWaitErrorMsgLimit)) - errC <- errors.New(responseText.String()) - return - } - - resultC <- res - }() - - return resultC, errC -} - -// legacyContainerWait returns immediately and doesn't have an option to wait -// until the container is removed. -func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.WaitResponse, <-chan error) { - resultC := make(chan container.WaitResponse) - errC := make(chan error) - - go func() { - resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) - if err != nil { - errC <- err - return - } - defer ensureReaderClosed(resp) - - var res container.WaitResponse - if err := json.NewDecoder(resp.body).Decode(&res); err != nil { - errC <- err - return - } - - resultC <- res - }() - - return resultC, errC -} diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go deleted file mode 100644 index ba0d92e9e..000000000 --- a/vendor/github.com/docker/docker/client/disk_usage.go +++ /dev/null @@ -1,33 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - "net/url" - - "github.com/docker/docker/api/types" -) - -// DiskUsage requests the current data usage from the daemon -func (cli *Client) DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) { - var query url.Values - if len(options.Types) > 0 { - query = url.Values{} - for _, t := range options.Types { - query.Add("type", string(t)) - } - } - - serverResp, err := cli.get(ctx, "/system/df", query, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return types.DiskUsage{}, err - } - - var du types.DiskUsage - if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { - return types.DiskUsage{}, fmt.Errorf("Error retrieving disk usage: %v", err) - } - return du, nil -} diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go deleted file mode 100644 index 7f36c99a0..000000000 --- a/vendor/github.com/docker/docker/client/distribution_inspect.go +++ /dev/null @@ -1,38 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - registrytypes "github.com/docker/docker/api/types/registry" -) - -// DistributionInspect returns the image digest with the full manifest. -func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registrytypes.DistributionInspect, error) { - // Contact the registry to retrieve digest and platform information - var distributionInspect registrytypes.DistributionInspect - if image == "" { - return distributionInspect, objectNotFoundError{object: "distribution", id: image} - } - - if err := cli.NewVersionError("1.30", "distribution inspect"); err != nil { - return distributionInspect, err - } - var headers map[string][]string - - if encodedRegistryAuth != "" { - headers = map[string][]string{ - "X-Registry-Auth": {encodedRegistryAuth}, - } - } - - resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers) - defer ensureReaderClosed(resp) - if err != nil { - return distributionInspect, err - } - - err = json.NewDecoder(resp.body).Decode(&distributionInspect) - return distributionInspect, err -} diff --git a/vendor/github.com/docker/docker/client/envvars.go b/vendor/github.com/docker/docker/client/envvars.go deleted file mode 100644 index 61dd45c1d..000000000 --- a/vendor/github.com/docker/docker/client/envvars.go +++ /dev/null @@ -1,90 +0,0 @@ -package client // import "github.com/docker/docker/client" - -const ( - // EnvOverrideHost is the name of the environment variable that can be used - // to override the default host to connect to (DefaultDockerHost). - // - // This env-var is read by FromEnv and WithHostFromEnv and when set to a - // non-empty value, takes precedence over the default host (which is platform - // specific), or any host already set. - EnvOverrideHost = "DOCKER_HOST" - - // EnvOverrideAPIVersion is the name of the environment variable that can - // be used to override the API version to use. Value should be - // formatted as MAJOR.MINOR, for example, "1.19". - // - // This env-var is read by FromEnv and WithVersionFromEnv and when set to a - // non-empty value, takes precedence over API version negotiation. - // - // This environment variable should be used for debugging purposes only, as - // it can set the client to use an incompatible (or invalid) API version. - EnvOverrideAPIVersion = "DOCKER_API_VERSION" - - // EnvOverrideCertPath is the name of the environment variable that can be - // used to specify the directory from which to load the TLS certificates - // (ca.pem, cert.pem, key.pem) from. These certificates are used to configure - // the Client for a TCP connection protected by TLS client authentication. - // - // TLS certificate verification is enabled by default if the Client is configured - // to use a TLS connection. Refer to EnvTLSVerify below to learn how to - // disable verification for testing purposes. - // - // WARNING: Access to the remote API is equivalent to root access to the - // host where the daemon runs. Do not expose the API without protection, - // and only if needed. Make sure you are familiar with the "daemon attack - // surface" (https://docs.docker.com/go/attack-surface/). - // - // For local access to the API, it is recommended to connect with the daemon - // using the default local socket connection (on Linux), or the named pipe - // (on Windows). - // - // If you need to access the API of a remote daemon, consider using an SSH - // (ssh://) connection, which is easier to set up, and requires no additional - // configuration if the host is accessible using ssh. - // - // If you cannot use the alternatives above, and you must expose the API over - // a TCP connection, refer to https://docs.docker.com/engine/security/protect-access/ - // to learn how to configure the daemon and client to use a TCP connection - // with TLS client authentication. Make sure you know the differences between - // a regular TLS connection and a TLS connection protected by TLS client - // authentication, and verify that the API cannot be accessed by other clients. - EnvOverrideCertPath = "DOCKER_CERT_PATH" - - // EnvTLSVerify is the name of the environment variable that can be used to - // enable or disable TLS certificate verification. When set to a non-empty - // value, TLS certificate verification is enabled, and the client is configured - // to use a TLS connection, using certificates from the default directories - // (within `~/.docker`); refer to EnvOverrideCertPath above for additional - // details. - // - // WARNING: Access to the remote API is equivalent to root access to the - // host where the daemon runs. Do not expose the API without protection, - // and only if needed. Make sure you are familiar with the "daemon attack - // surface" (https://docs.docker.com/go/attack-surface/). - // - // Before setting up your client and daemon to use a TCP connection with TLS - // client authentication, consider using one of the alternatives mentioned - // in EnvOverrideCertPath above. - // - // Disabling TLS certificate verification (for testing purposes) - // - // TLS certificate verification is enabled by default if the Client is configured - // to use a TLS connection, and it is highly recommended to keep verification - // enabled to prevent machine-in-the-middle attacks. Refer to the documentation - // at https://docs.docker.com/engine/security/protect-access/ and pages linked - // from that page to learn how to configure the daemon and client to use a - // TCP connection with TLS client authentication enabled. - // - // Set the "DOCKER_TLS_VERIFY" environment to an empty string ("") to - // disable TLS certificate verification. Disabling verification is insecure, - // so should only be done for testing purposes. From the Go documentation - // (https://pkg.go.dev/crypto/tls#Config): - // - // InsecureSkipVerify controls whether a client verifies the server's - // certificate chain and host name. If InsecureSkipVerify is true, crypto/tls - // accepts any certificate presented by the server and any host name in that - // certificate. In this mode, TLS is susceptible to machine-in-the-middle - // attacks unless custom verification is used. This should be used only for - // testing or in combination with VerifyConnection or VerifyPeerCertificate. - EnvTLSVerify = "DOCKER_TLS_VERIFY" -) diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go deleted file mode 100644 index e5a8a865f..000000000 --- a/vendor/github.com/docker/docker/client/errors.go +++ /dev/null @@ -1,93 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "fmt" - - "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" -) - -// errConnectionFailed implements an error returned when connection failed. -type errConnectionFailed struct { - host string -} - -// Error returns a string representation of an errConnectionFailed -func (err errConnectionFailed) Error() string { - if err.host == "" { - return "Cannot connect to the Docker daemon. Is the docker daemon running on this host?" - } - return fmt.Sprintf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", err.host) -} - -// IsErrConnectionFailed returns true if the error is caused by connection failed. -func IsErrConnectionFailed(err error) bool { - return errors.As(err, &errConnectionFailed{}) -} - -// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed. -func ErrorConnectionFailed(host string) error { - return errConnectionFailed{host: host} -} - -// Deprecated: use the errdefs.NotFound() interface instead. Kept for backward compatibility -type notFound interface { - error - NotFound() bool -} - -// IsErrNotFound returns true if the error is a NotFound error, which is returned -// by the API when some object is not found. -func IsErrNotFound(err error) bool { - if errdefs.IsNotFound(err) { - return true - } - var e notFound - return errors.As(err, &e) -} - -type objectNotFoundError struct { - object string - id string -} - -func (e objectNotFoundError) NotFound() {} - -func (e objectNotFoundError) Error() string { - return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) -} - -// IsErrUnauthorized returns true if the error is caused -// when a remote registry authentication fails -// -// Deprecated: use errdefs.IsUnauthorized -func IsErrUnauthorized(err error) bool { - return errdefs.IsUnauthorized(err) -} - -type pluginPermissionDenied struct { - name string -} - -func (e pluginPermissionDenied) Error() string { - return "Permission denied while installing plugin " + e.name -} - -// IsErrNotImplemented returns true if the error is a NotImplemented error. -// This is returned by the API when a requested feature has not been -// implemented. -// -// Deprecated: use errdefs.IsNotImplemented -func IsErrNotImplemented(err error) bool { - return errdefs.IsNotImplemented(err) -} - -// NewVersionError returns an error if the APIVersion required -// if less than the current supported version -func (cli *Client) NewVersionError(APIrequired, feature string) error { - if cli.version != "" && versions.LessThan(cli.version, APIrequired) { - return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version) - } - return nil -} diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go deleted file mode 100644 index a9c48a928..000000000 --- a/vendor/github.com/docker/docker/client/events.go +++ /dev/null @@ -1,101 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/filters" - timetypes "github.com/docker/docker/api/types/time" -) - -// Events returns a stream of events in the daemon. It's up to the caller to close the stream -// by cancelling the context. Once the stream has been completely read an io.EOF error will -// be sent over the error channel. If an error is sent all processing will be stopped. It's up -// to the caller to reopen the stream in the event of an error by reinvoking this method. -func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { - messages := make(chan events.Message) - errs := make(chan error, 1) - - started := make(chan struct{}) - go func() { - defer close(errs) - - query, err := buildEventsQueryParams(cli.version, options) - if err != nil { - close(started) - errs <- err - return - } - - resp, err := cli.get(ctx, "/events", query, nil) - if err != nil { - close(started) - errs <- err - return - } - defer resp.body.Close() - - decoder := json.NewDecoder(resp.body) - - close(started) - for { - select { - case <-ctx.Done(): - errs <- ctx.Err() - return - default: - var event events.Message - if err := decoder.Decode(&event); err != nil { - errs <- err - return - } - - select { - case messages <- event: - case <-ctx.Done(): - errs <- ctx.Err() - return - } - } - } - }() - <-started - - return messages, errs -} - -func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) { - query := url.Values{} - ref := time.Now() - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, ref) - if err != nil { - return nil, err - } - query.Set("since", ts) - } - - if options.Until != "" { - ts, err := timetypes.GetTimestamp(options.Until, ref) - if err != nil { - return nil, err - } - query.Set("until", ts) - } - - if options.Filters.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code - filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters) - if err != nil { - return nil, err - } - query.Set("filters", filterJSON) - } - - return query, nil -} diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go deleted file mode 100644 index 6bdacab10..000000000 --- a/vendor/github.com/docker/docker/client/hijack.go +++ /dev/null @@ -1,153 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bufio" - "context" - "crypto/tls" - "fmt" - "net" - "net/http" - "net/http/httputil" - "net/url" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" - "github.com/docker/go-connections/sockets" - "github.com/pkg/errors" -) - -// postHijacked sends a POST request and hijacks the connection. -func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { - bodyEncoded, err := encodeData(body) - if err != nil { - return types.HijackedResponse{}, err - } - - apiPath := cli.getAPIPath(ctx, path, query) - req, err := http.NewRequest(http.MethodPost, apiPath, bodyEncoded) - if err != nil { - return types.HijackedResponse{}, err - } - req = cli.addHeaders(req, headers) - - conn, mediaType, err := cli.setupHijackConn(ctx, req, "tcp") - if err != nil { - return types.HijackedResponse{}, err - } - - return types.NewHijackedResponse(conn, mediaType), err -} - -// DialHijack returns a hijacked connection with negotiated protocol proto. -func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) { - req, err := http.NewRequest(http.MethodPost, url, nil) - if err != nil { - return nil, err - } - req = cli.addHeaders(req, meta) - - conn, _, err := cli.setupHijackConn(ctx, req, proto) - return conn, err -} - -// fallbackDial is used when WithDialer() was not called. -// See cli.Dialer(). -func fallbackDial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { - if tlsConfig != nil && proto != "unix" && proto != "npipe" { - return tls.Dial(proto, addr, tlsConfig) - } - if proto == "npipe" { - return sockets.DialPipe(addr, 32*time.Second) - } - return net.Dial(proto, addr) -} - -func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto string) (net.Conn, string, error) { - req.Host = cli.addr - req.Header.Set("Connection", "Upgrade") - req.Header.Set("Upgrade", proto) - - dialer := cli.Dialer() - conn, err := dialer(ctx) - if err != nil { - return nil, "", errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") - } - - // When we set up a TCP connection for hijack, there could be long periods - // of inactivity (a long running command with no output) that in certain - // network setups may cause ECONNTIMEOUT, leaving the client in an unknown - // state. Setting TCP KeepAlive on the socket connection will prohibit - // ECONNTIMEOUT unless the socket connection truly is broken - if tcpConn, ok := conn.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(30 * time.Second) - } - - clientconn := httputil.NewClientConn(conn, nil) - defer clientconn.Close() - - // Server hijacks the connection, error 'connection closed' expected - resp, err := clientconn.Do(req) - - //nolint:staticcheck // ignore SA1019 for connecting to old (pre go1.8) daemons - if err != httputil.ErrPersistEOF { - if err != nil { - return nil, "", err - } - if resp.StatusCode != http.StatusSwitchingProtocols { - resp.Body.Close() - return nil, "", fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) - } - } - - c, br := clientconn.Hijack() - if br.Buffered() > 0 { - // If there is buffered content, wrap the connection. We return an - // object that implements CloseWrite if the underlying connection - // implements it. - if _, ok := c.(types.CloseWriter); ok { - c = &hijackedConnCloseWriter{&hijackedConn{c, br}} - } else { - c = &hijackedConn{c, br} - } - } else { - br.Reset(nil) - } - - var mediaType string - if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.42") { - // Prior to 1.42, Content-Type is always set to raw-stream and not relevant - mediaType = resp.Header.Get("Content-Type") - } - - return c, mediaType, nil -} - -// hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case -// that a) there was already buffered data in the http layer when Hijack() was -// called, and b) the underlying net.Conn does *not* implement CloseWrite(). -// hijackedConn does not implement CloseWrite() either. -type hijackedConn struct { - net.Conn - r *bufio.Reader -} - -func (c *hijackedConn) Read(b []byte) (int, error) { - return c.r.Read(b) -} - -// hijackedConnCloseWriter is a hijackedConn which additionally implements -// CloseWrite(). It is returned by setupHijackConn in the case that a) there -// was already buffered data in the http layer when Hijack() was called, and b) -// the underlying net.Conn *does* implement CloseWrite(). -type hijackedConnCloseWriter struct { - *hijackedConn -} - -var _ types.CloseWriter = &hijackedConnCloseWriter{} - -func (c *hijackedConnCloseWriter) CloseWrite() error { - conn := c.Conn.(types.CloseWriter) - return conn.CloseWrite() -} diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go deleted file mode 100644 index d16e1d8ea..000000000 --- a/vendor/github.com/docker/docker/client/image_build.go +++ /dev/null @@ -1,146 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/base64" - "encoding/json" - "io" - "net/http" - "net/url" - "strconv" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" -) - -// ImageBuild sends a request to the daemon to build images. -// The Body in the response implements an io.ReadCloser and it's up to the caller to -// close it. -func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { - query, err := cli.imageBuildOptionsToQuery(options) - if err != nil { - return types.ImageBuildResponse{}, err - } - - headers := http.Header(make(map[string][]string)) - buf, err := json.Marshal(options.AuthConfigs) - if err != nil { - return types.ImageBuildResponse{}, err - } - headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) - - headers.Set("Content-Type", "application/x-tar") - - serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) - if err != nil { - return types.ImageBuildResponse{}, err - } - - osType := getDockerOS(serverResp.header.Get("Server")) - - return types.ImageBuildResponse{ - Body: serverResp.body, - OSType: osType, - }, nil -} - -func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { - query := url.Values{ - "t": options.Tags, - "securityopt": options.SecurityOpt, - "extrahosts": options.ExtraHosts, - } - if options.SuppressOutput { - query.Set("q", "1") - } - if options.RemoteContext != "" { - query.Set("remote", options.RemoteContext) - } - if options.NoCache { - query.Set("nocache", "1") - } - if options.Remove { - query.Set("rm", "1") - } else { - query.Set("rm", "0") - } - - if options.ForceRemove { - query.Set("forcerm", "1") - } - - if options.PullParent { - query.Set("pull", "1") - } - - if options.Squash { - if err := cli.NewVersionError("1.25", "squash"); err != nil { - return query, err - } - query.Set("squash", "1") - } - - if !container.Isolation.IsDefault(options.Isolation) { - query.Set("isolation", string(options.Isolation)) - } - - query.Set("cpusetcpus", options.CPUSetCPUs) - query.Set("networkmode", options.NetworkMode) - query.Set("cpusetmems", options.CPUSetMems) - query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) - query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) - query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) - query.Set("memory", strconv.FormatInt(options.Memory, 10)) - query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) - query.Set("cgroupparent", options.CgroupParent) - query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) - query.Set("dockerfile", options.Dockerfile) - query.Set("target", options.Target) - - ulimitsJSON, err := json.Marshal(options.Ulimits) - if err != nil { - return query, err - } - query.Set("ulimits", string(ulimitsJSON)) - - buildArgsJSON, err := json.Marshal(options.BuildArgs) - if err != nil { - return query, err - } - query.Set("buildargs", string(buildArgsJSON)) - - labelsJSON, err := json.Marshal(options.Labels) - if err != nil { - return query, err - } - query.Set("labels", string(labelsJSON)) - - cacheFromJSON, err := json.Marshal(options.CacheFrom) - if err != nil { - return query, err - } - query.Set("cachefrom", string(cacheFromJSON)) - if options.SessionID != "" { - query.Set("session", options.SessionID) - } - if options.Platform != "" { - if err := cli.NewVersionError("1.32", "platform"); err != nil { - return query, err - } - query.Set("platform", strings.ToLower(options.Platform)) - } - if options.BuildID != "" { - query.Set("buildid", options.BuildID) - } - query.Set("version", string(options.Version)) - - if options.Outputs != nil { - outputsJSON, err := json.Marshal(options.Outputs) - if err != nil { - return query, err - } - query.Set("outputs", string(outputsJSON)) - } - return query, nil -} diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go deleted file mode 100644 index b1c022777..000000000 --- a/vendor/github.com/docker/docker/client/image_create.go +++ /dev/null @@ -1,37 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "strings" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" -) - -// ImageCreate creates a new image based on the parent options. -// It returns the JSON content in the response body. -func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { - ref, err := reference.ParseNormalizedNamed(parentReference) - if err != nil { - return nil, err - } - - query := url.Values{} - query.Set("fromImage", reference.FamiliarName(ref)) - query.Set("tag", getAPITagFromNamedRef(ref)) - if options.Platform != "" { - query.Set("platform", strings.ToLower(options.Platform)) - } - resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, "/images/create", query, nil, headers) -} diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/docker/docker/client/image_history.go deleted file mode 100644 index b5bea10d8..000000000 --- a/vendor/github.com/docker/docker/client/image_history.go +++ /dev/null @@ -1,22 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/image" -) - -// ImageHistory returns the changes in an image in history format. -func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) { - var history []image.HistoryResponseItem - serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return history, err - } - - err = json.NewDecoder(serverResp.body).Decode(&history) - return history, err -} diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go deleted file mode 100644 index c5de42cb7..000000000 --- a/vendor/github.com/docker/docker/client/image_import.go +++ /dev/null @@ -1,40 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "strings" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" -) - -// ImageImport creates a new image based on the source options. -// It returns the JSON content in the response body. -func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { - if ref != "" { - // Check if the given image name can be resolved - if _, err := reference.ParseNormalizedNamed(ref); err != nil { - return nil, err - } - } - - query := url.Values{} - query.Set("fromSrc", source.SourceName) - query.Set("repo", ref) - query.Set("tag", options.Tag) - query.Set("message", options.Message) - if options.Platform != "" { - query.Set("platform", strings.ToLower(options.Platform)) - } - for _, change := range options.Changes { - query.Add("changes", change) - } - - resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go deleted file mode 100644 index 1de10e5a0..000000000 --- a/vendor/github.com/docker/docker/client/image_inspect.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types" -) - -// ImageInspectWithRaw returns the image information and its raw representation. -func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { - if imageID == "" { - return types.ImageInspect{}, nil, objectNotFoundError{object: "image", id: imageID} - } - serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return types.ImageInspect{}, nil, err - } - - body, err := io.ReadAll(serverResp.body) - if err != nil { - return types.ImageInspect{}, nil, err - } - - var response types.ImageInspect - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go deleted file mode 100644 index 950d51333..000000000 --- a/vendor/github.com/docker/docker/client/image_list.go +++ /dev/null @@ -1,49 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/versions" -) - -// ImageList returns a list of images in the docker host. -func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) { - var images []types.ImageSummary - query := url.Values{} - - optionFilters := options.Filters - referenceFilters := optionFilters.Get("reference") - if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 { - query.Set("filter", referenceFilters[0]) - for _, filterValue := range referenceFilters { - optionFilters.Del("reference", filterValue) - } - } - if optionFilters.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code - filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters) - if err != nil { - return images, err - } - query.Set("filters", filterJSON) - } - if options.All { - query.Set("all", "1") - } - if options.SharedSize && versions.GreaterThanOrEqualTo(cli.version, "1.42") { - query.Set("shared-size", "1") - } - - serverResp, err := cli.get(ctx, "/images/json", query, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return images, err - } - - err = json.NewDecoder(serverResp.body).Decode(&images) - return images, err -} diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go deleted file mode 100644 index 91016e493..000000000 --- a/vendor/github.com/docker/docker/client/image_load.go +++ /dev/null @@ -1,29 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ImageLoad loads an image in the docker host from the client host. -// It's up to the caller to close the io.ReadCloser in the -// ImageLoadResponse returned by this function. -func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { - v := url.Values{} - v.Set("quiet", "0") - if quiet { - v.Set("quiet", "1") - } - headers := map[string][]string{"Content-Type": {"application/x-tar"}} - resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) - if err != nil { - return types.ImageLoadResponse{}, err - } - return types.ImageLoadResponse{ - Body: resp.body, - JSON: resp.header.Get("Content-Type") == "application/json", - }, nil -} diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go deleted file mode 100644 index 56af6d7f9..000000000 --- a/vendor/github.com/docker/docker/client/image_prune.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// ImagesPrune requests the daemon to delete unused data -func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) { - var report types.ImagesPruneReport - - if err := cli.NewVersionError("1.25", "image prune"); err != nil { - return report, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return report, err - } - - serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return report, err - } - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving disk usage: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go deleted file mode 100644 index a23975591..000000000 --- a/vendor/github.com/docker/docker/client/image_pull.go +++ /dev/null @@ -1,64 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "strings" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/errdefs" -) - -// ImagePull requests the docker host to pull an image from a remote registry. -// It executes the privileged function if the operation is unauthorized -// and it tries one more time. -// It's up to the caller to handle the io.ReadCloser and close it properly. -// -// FIXME(vdemeester): there is currently used in a few way in docker/docker -// - if not in trusted content, ref is used to pass the whole reference, and tag is empty -// - if in trusted content, ref is used to pass the reference name, and tag for the digest -func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) { - ref, err := reference.ParseNormalizedNamed(refStr) - if err != nil { - return nil, err - } - - query := url.Values{} - query.Set("fromImage", reference.FamiliarName(ref)) - if !options.All { - query.Set("tag", getAPITagFromNamedRef(ref)) - } - if options.Platform != "" { - query.Set("platform", strings.ToLower(options.Platform)) - } - - resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return nil, privilegeErr - } - resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) - } - if err != nil { - return nil, err - } - return resp.body, nil -} - -// getAPITagFromNamedRef returns a tag from the specified reference. -// This function is necessary as long as the docker "server" api expects -// digests to be sent as tags and makes a distinction between the name -// and tag/digest part of a reference. -func getAPITagFromNamedRef(ref reference.Named) string { - if digested, ok := ref.(reference.Digested); ok { - return digested.Digest().String() - } - ref = reference.TagNameOnly(ref) - if tagged, ok := ref.(reference.Tagged); ok { - return tagged.Tag() - } - return "" -} diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go deleted file mode 100644 index 845580d4a..000000000 --- a/vendor/github.com/docker/docker/client/image_push.go +++ /dev/null @@ -1,54 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "errors" - "io" - "net/url" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/errdefs" -) - -// ImagePush requests the docker host to push an image to a remote registry. -// It executes the privileged function if the operation is unauthorized -// and it tries one more time. -// It's up to the caller to handle the io.ReadCloser and close it properly. -func (cli *Client) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) { - ref, err := reference.ParseNormalizedNamed(image) - if err != nil { - return nil, err - } - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return nil, errors.New("cannot push a digest reference") - } - - name := reference.FamiliarName(ref) - query := url.Values{} - if !options.All { - ref = reference.TagNameOnly(ref) - if tagged, ok := ref.(reference.Tagged); ok { - query.Set("tag", tagged.Tag()) - } - } - - resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) - if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return nil, privilegeErr - } - resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader) - } - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) -} diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go deleted file mode 100644 index 6a9fb3f41..000000000 --- a/vendor/github.com/docker/docker/client/image_remove.go +++ /dev/null @@ -1,31 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ImageRemove removes an image from the docker host. -func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { - query := url.Values{} - - if options.Force { - query.Set("force", "1") - } - if !options.PruneChildren { - query.Set("noprune", "1") - } - - var dels []types.ImageDeleteResponseItem - resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return dels, err - } - - err = json.NewDecoder(resp.body).Decode(&dels) - return dels, err -} diff --git a/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/docker/docker/client/image_save.go deleted file mode 100644 index d1314e4b2..000000000 --- a/vendor/github.com/docker/docker/client/image_save.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" -) - -// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. -// It's up to the caller to store the images and close the stream. -func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { - query := url.Values{ - "names": imageIDs, - } - - resp, err := cli.get(ctx, "/images/get", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go deleted file mode 100644 index e69fa3722..000000000 --- a/vendor/github.com/docker/docker/client/image_search.go +++ /dev/null @@ -1,53 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "strconv" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" -) - -// ImageSearch makes the docker host search by a term in a remote registry. -// The list of results is not sorted in any fashion. -func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { - var results []registry.SearchResult - query := url.Values{} - query.Set("term", term) - if options.Limit > 0 { - query.Set("limit", strconv.Itoa(options.Limit)) - } - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return results, err - } - query.Set("filters", filterJSON) - } - - resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) - defer ensureReaderClosed(resp) - if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return results, privilegeErr - } - resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) - } - if err != nil { - return results, err - } - - err = json.NewDecoder(resp.body).Decode(&results) - return results, err -} - -func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.get(ctx, "/images/search", query, headers) -} diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go deleted file mode 100644 index 5652bfc25..000000000 --- a/vendor/github.com/docker/docker/client/image_tag.go +++ /dev/null @@ -1,37 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/distribution/reference" - "github.com/pkg/errors" -) - -// ImageTag tags an image in the docker host -func (cli *Client) ImageTag(ctx context.Context, source, target string) error { - if _, err := reference.ParseAnyReference(source); err != nil { - return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source) - } - - ref, err := reference.ParseNormalizedNamed(target) - if err != nil { - return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", target) - } - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return errors.New("refusing to create a tag with a digest reference") - } - - ref = reference.TagNameOnly(ref) - - query := url.Values{} - query.Set("repo", reference.FamiliarName(ref)) - if tagged, ok := ref.(reference.Tagged); ok { - query.Set("tag", tagged.Tag()) - } - - resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go deleted file mode 100644 index c856704e2..000000000 --- a/vendor/github.com/docker/docker/client/info.go +++ /dev/null @@ -1,26 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - "net/url" - - "github.com/docker/docker/api/types" -) - -// Info returns information about the docker server. -func (cli *Client) Info(ctx context.Context) (types.Info, error) { - var info types.Info - serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return info, err - } - - if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { - return info, fmt.Errorf("Error reading remote info: %v", err) - } - - return info, nil -} diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go deleted file mode 100644 index e9c1ed722..000000000 --- a/vendor/github.com/docker/docker/client/interface.go +++ /dev/null @@ -1,201 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net" - "net/http" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/api/types/volume" - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// CommonAPIClient is the common methods between stable and experimental versions of APIClient. -type CommonAPIClient interface { - ConfigAPIClient - ContainerAPIClient - DistributionAPIClient - ImageAPIClient - NodeAPIClient - NetworkAPIClient - PluginAPIClient - ServiceAPIClient - SwarmAPIClient - SecretAPIClient - SystemAPIClient - VolumeAPIClient - ClientVersion() string - DaemonHost() string - HTTPClient() *http.Client - ServerVersion(ctx context.Context) (types.Version, error) - NegotiateAPIVersion(ctx context.Context) - NegotiateAPIVersionPing(types.Ping) - DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) - Dialer() func(context.Context) (net.Conn, error) - Close() error -} - -// ContainerAPIClient defines API client methods for the containers -type ContainerAPIClient interface { - ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) - ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) - ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.CreateResponse, error) - ContainerDiff(ctx context.Context, container string) ([]container.ContainerChangeResponseItem, error) - ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) - ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) - ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) - ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error - ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error - ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) - ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) - ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) - ContainerKill(ctx context.Context, container, signal string) error - ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) - ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) - ContainerPause(ctx context.Context, container string) error - ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error - ContainerRename(ctx context.Context, container, newContainerName string) error - ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error - ContainerRestart(ctx context.Context, container string, options container.StopOptions) error - ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) - ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) - ContainerStatsOneShot(ctx context.Context, container string) (types.ContainerStats, error) - ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error - ContainerStop(ctx context.Context, container string, options container.StopOptions) error - ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error) - ContainerUnpause(ctx context.Context, container string) error - ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) - ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) - CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) - CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error - ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) -} - -// DistributionAPIClient defines API client methods for the registry -type DistributionAPIClient interface { - DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) -} - -// ImageAPIClient defines API client methods for the images -type ImageAPIClient interface { - ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) - BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) - BuildCancel(ctx context.Context, id string) error - ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) - ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) - ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) - ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) - ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) - ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) - ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) - ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) - ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) - ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) - ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) - ImageTag(ctx context.Context, image, ref string) error - ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error) -} - -// NetworkAPIClient defines API client methods for the networks -type NetworkAPIClient interface { - NetworkConnect(ctx context.Context, network, container string, config *network.EndpointSettings) error - NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) - NetworkDisconnect(ctx context.Context, network, container string, force bool) error - NetworkInspect(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, error) - NetworkInspectWithRaw(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) - NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) - NetworkRemove(ctx context.Context, network string) error - NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error) -} - -// NodeAPIClient defines API client methods for the nodes -type NodeAPIClient interface { - NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) - NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) - NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error - NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error -} - -// PluginAPIClient defines API client methods for the plugins -type PluginAPIClient interface { - PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) - PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error - PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error - PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error - PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) - PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) - PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) - PluginSet(ctx context.Context, name string, args []string) error - PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) - PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error -} - -// ServiceAPIClient defines API client methods for the services -type ServiceAPIClient interface { - ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) - ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) - ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) - ServiceRemove(ctx context.Context, serviceID string) error - ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) - ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) - TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) - TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) - TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) -} - -// SwarmAPIClient defines API client methods for the swarm -type SwarmAPIClient interface { - SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) - SwarmJoin(ctx context.Context, req swarm.JoinRequest) error - SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) - SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error - SwarmLeave(ctx context.Context, force bool) error - SwarmInspect(ctx context.Context) (swarm.Swarm, error) - SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error -} - -// SystemAPIClient defines API client methods for the system -type SystemAPIClient interface { - Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) - Info(ctx context.Context) (types.Info, error) - RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) - DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) - Ping(ctx context.Context) (types.Ping, error) -} - -// VolumeAPIClient defines API client methods for the volumes -type VolumeAPIClient interface { - VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) - VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error) - VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) - VolumeList(ctx context.Context, filter filters.Args) (volume.ListResponse, error) - VolumeRemove(ctx context.Context, volumeID string, force bool) error - VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) - VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error -} - -// SecretAPIClient defines API client methods for secrets -type SecretAPIClient interface { - SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) - SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) - SecretRemove(ctx context.Context, id string) error - SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) - SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error -} - -// ConfigAPIClient defines API client methods for configs -type ConfigAPIClient interface { - ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) - ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) - ConfigRemove(ctx context.Context, id string) error - ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error) - ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error -} diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/interface_experimental.go deleted file mode 100644 index 402ffb512..000000000 --- a/vendor/github.com/docker/docker/client/interface_experimental.go +++ /dev/null @@ -1,18 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types" -) - -type apiClientExperimental interface { - CheckpointAPIClient -} - -// CheckpointAPIClient defines API client methods for the checkpoints -type CheckpointAPIClient interface { - CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error - CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error - CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) -} diff --git a/vendor/github.com/docker/docker/client/interface_stable.go b/vendor/github.com/docker/docker/client/interface_stable.go deleted file mode 100644 index 5502cd742..000000000 --- a/vendor/github.com/docker/docker/client/interface_stable.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -// APIClient is an interface that clients that talk with a docker server must implement. -type APIClient interface { - CommonAPIClient - apiClientExperimental -} - -// Ensure that Client always implements APIClient. -var _ APIClient = &Client{} diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go deleted file mode 100644 index f05852063..000000000 --- a/vendor/github.com/docker/docker/client/login.go +++ /dev/null @@ -1,25 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/registry" -) - -// RegistryLogin authenticates the docker server with a given docker registry. -// It returns unauthorizedError when the authentication fails. -func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) { - resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) - defer ensureReaderClosed(resp) - - if err != nil { - return registry.AuthenticateOKBody{}, err - } - - var response registry.AuthenticateOKBody - err = json.NewDecoder(resp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go deleted file mode 100644 index 571894613..000000000 --- a/vendor/github.com/docker/docker/client/network_connect.go +++ /dev/null @@ -1,19 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/network" -) - -// NetworkConnect connects a container to an existent network in the docker host. -func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { - nc := types.NetworkConnect{ - Container: containerID, - EndpointConfig: config, - } - resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go deleted file mode 100644 index 278d9383a..000000000 --- a/vendor/github.com/docker/docker/client/network_create.go +++ /dev/null @@ -1,25 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" -) - -// NetworkCreate creates a new network in the docker host. -func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { - networkCreateRequest := types.NetworkCreateRequest{ - NetworkCreate: options, - Name: name, - } - var response types.NetworkCreateResponse - serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return response, err - } - - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go deleted file mode 100644 index dd1567665..000000000 --- a/vendor/github.com/docker/docker/client/network_disconnect.go +++ /dev/null @@ -1,15 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types" -) - -// NetworkDisconnect disconnects a container from an existent network in the docker host. -func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { - nd := types.NetworkDisconnect{Container: containerID, Force: force} - resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go deleted file mode 100644 index 0f90e2bb9..000000000 --- a/vendor/github.com/docker/docker/client/network_inspect.go +++ /dev/null @@ -1,49 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - "net/url" - - "github.com/docker/docker/api/types" -) - -// NetworkInspect returns the information for a specific network configured in the docker host. -func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) { - networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options) - return networkResource, err -} - -// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. -func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) { - if networkID == "" { - return types.NetworkResource{}, nil, objectNotFoundError{object: "network", id: networkID} - } - var ( - networkResource types.NetworkResource - resp serverResponse - err error - ) - query := url.Values{} - if options.Verbose { - query.Set("verbose", "true") - } - if options.Scope != "" { - query.Set("scope", options.Scope) - } - resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return networkResource, nil, err - } - - body, err := io.ReadAll(resp.body) - if err != nil { - return networkResource, nil, err - } - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&networkResource) - return networkResource, body, err -} diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go deleted file mode 100644 index ed2acb557..000000000 --- a/vendor/github.com/docker/docker/client/network_list.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// NetworkList returns the list of networks configured in the docker host. -func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { - query := url.Values{} - if options.Filters.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - var networkResources []types.NetworkResource - resp, err := cli.get(ctx, "/networks", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return networkResources, err - } - err = json.NewDecoder(resp.body).Decode(&networkResources) - return networkResources, err -} diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go deleted file mode 100644 index cebb18821..000000000 --- a/vendor/github.com/docker/docker/client/network_prune.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// NetworksPrune requests the daemon to delete unused networks -func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) { - var report types.NetworksPruneReport - - if err := cli.NewVersionError("1.25", "network prune"); err != nil { - return report, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return report, err - } - - serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return report, err - } - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving network prune report: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go deleted file mode 100644 index 9d6c6cef0..000000000 --- a/vendor/github.com/docker/docker/client/network_remove.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// NetworkRemove removes an existent network from the docker host. -func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { - resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go deleted file mode 100644 index 95ab9b1be..000000000 --- a/vendor/github.com/docker/docker/client/node_inspect.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types/swarm" -) - -// NodeInspectWithRaw returns the node information. -func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { - if nodeID == "" { - return swarm.Node{}, nil, objectNotFoundError{object: "node", id: nodeID} - } - serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return swarm.Node{}, nil, err - } - - body, err := io.ReadAll(serverResp.body) - if err != nil { - return swarm.Node{}, nil, err - } - - var response swarm.Node - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go deleted file mode 100644 index c212906bc..000000000 --- a/vendor/github.com/docker/docker/client/node_list.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// NodeList returns the list of nodes. -func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/nodes", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var nodes []swarm.Node - err = json.NewDecoder(resp.body).Decode(&nodes) - return nodes, err -} diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go deleted file mode 100644 index e44436deb..000000000 --- a/vendor/github.com/docker/docker/client/node_remove.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// NodeRemove removes a Node. -func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { - query := url.Values{} - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go deleted file mode 100644 index 0d0fc3b78..000000000 --- a/vendor/github.com/docker/docker/client/node_update.go +++ /dev/null @@ -1,17 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/swarm" -) - -// NodeUpdate updates a Node. -func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { - query := url.Values{} - query.Set("version", version.String()) - resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/options.go b/vendor/github.com/docker/docker/client/options.go deleted file mode 100644 index 099ad4184..000000000 --- a/vendor/github.com/docker/docker/client/options.go +++ /dev/null @@ -1,210 +0,0 @@ -package client - -import ( - "context" - "net" - "net/http" - "os" - "path/filepath" - "time" - - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" - "github.com/pkg/errors" -) - -// Opt is a configuration option to initialize a client -type Opt func(*Client) error - -// FromEnv configures the client with values from environment variables. -// -// FromEnv uses the following environment variables: -// -// DOCKER_HOST (EnvOverrideHost) to set the URL to the docker server. -// -// DOCKER_API_VERSION (EnvOverrideAPIVersion) to set the version of the API to -// use, leave empty for latest. -// -// DOCKER_CERT_PATH (EnvOverrideCertPath) to specify the directory from which to -// load the TLS certificates (ca.pem, cert.pem, key.pem). -// -// DOCKER_TLS_VERIFY (EnvTLSVerify) to enable or disable TLS verification (off by -// default). -func FromEnv(c *Client) error { - ops := []Opt{ - WithTLSClientConfigFromEnv(), - WithHostFromEnv(), - WithVersionFromEnv(), - } - for _, op := range ops { - if err := op(c); err != nil { - return err - } - } - return nil -} - -// WithDialContext applies the dialer to the client transport. This can be -// used to set the Timeout and KeepAlive settings of the client. -func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt { - return func(c *Client) error { - if transport, ok := c.client.Transport.(*http.Transport); ok { - transport.DialContext = dialContext - return nil - } - return errors.Errorf("cannot apply dialer to transport: %T", c.client.Transport) - } -} - -// WithHost overrides the client host with the specified one. -func WithHost(host string) Opt { - return func(c *Client) error { - hostURL, err := ParseHostURL(host) - if err != nil { - return err - } - c.host = host - c.proto = hostURL.Scheme - c.addr = hostURL.Host - c.basePath = hostURL.Path - if transport, ok := c.client.Transport.(*http.Transport); ok { - return sockets.ConfigureTransport(transport, c.proto, c.addr) - } - return errors.Errorf("cannot apply host to transport: %T", c.client.Transport) - } -} - -// WithHostFromEnv overrides the client host with the host specified in the -// DOCKER_HOST (EnvOverrideHost) environment variable. If DOCKER_HOST is not set, -// or set to an empty value, the host is not modified. -func WithHostFromEnv() Opt { - return func(c *Client) error { - if host := os.Getenv(EnvOverrideHost); host != "" { - return WithHost(host)(c) - } - return nil - } -} - -// WithHTTPClient overrides the client http client with the specified one -func WithHTTPClient(client *http.Client) Opt { - return func(c *Client) error { - if client != nil { - c.client = client - } - return nil - } -} - -// WithTimeout configures the time limit for requests made by the HTTP client -func WithTimeout(timeout time.Duration) Opt { - return func(c *Client) error { - c.client.Timeout = timeout - return nil - } -} - -// WithHTTPHeaders overrides the client default http headers -func WithHTTPHeaders(headers map[string]string) Opt { - return func(c *Client) error { - c.customHTTPHeaders = headers - return nil - } -} - -// WithScheme overrides the client scheme with the specified one -func WithScheme(scheme string) Opt { - return func(c *Client) error { - c.scheme = scheme - return nil - } -} - -// WithTLSClientConfig applies a tls config to the client transport. -func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt { - return func(c *Client) error { - opts := tlsconfig.Options{ - CAFile: cacertPath, - CertFile: certPath, - KeyFile: keyPath, - ExclusiveRootPools: true, - } - config, err := tlsconfig.Client(opts) - if err != nil { - return errors.Wrap(err, "failed to create tls config") - } - if transport, ok := c.client.Transport.(*http.Transport); ok { - transport.TLSClientConfig = config - return nil - } - return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport) - } -} - -// WithTLSClientConfigFromEnv configures the client's TLS settings with the -// settings in the DOCKER_CERT_PATH and DOCKER_TLS_VERIFY environment variables. -// If DOCKER_CERT_PATH is not set or empty, TLS configuration is not modified. -// -// WithTLSClientConfigFromEnv uses the following environment variables: -// -// DOCKER_CERT_PATH (EnvOverrideCertPath) to specify the directory from which to -// load the TLS certificates (ca.pem, cert.pem, key.pem). -// -// DOCKER_TLS_VERIFY (EnvTLSVerify) to enable or disable TLS verification (off by -// default). -func WithTLSClientConfigFromEnv() Opt { - return func(c *Client) error { - dockerCertPath := os.Getenv(EnvOverrideCertPath) - if dockerCertPath == "" { - return nil - } - options := tlsconfig.Options{ - CAFile: filepath.Join(dockerCertPath, "ca.pem"), - CertFile: filepath.Join(dockerCertPath, "cert.pem"), - KeyFile: filepath.Join(dockerCertPath, "key.pem"), - InsecureSkipVerify: os.Getenv(EnvTLSVerify) == "", - } - tlsc, err := tlsconfig.Client(options) - if err != nil { - return err - } - - c.client = &http.Client{ - Transport: &http.Transport{TLSClientConfig: tlsc}, - CheckRedirect: CheckRedirect, - } - return nil - } -} - -// WithVersion overrides the client version with the specified one. If an empty -// version is specified, the value will be ignored to allow version negotiation. -func WithVersion(version string) Opt { - return func(c *Client) error { - if version != "" { - c.version = version - c.manualOverride = true - } - return nil - } -} - -// WithVersionFromEnv overrides the client version with the version specified in -// the DOCKER_API_VERSION environment variable. If DOCKER_API_VERSION is not set, -// the version is not modified. -func WithVersionFromEnv() Opt { - return func(c *Client) error { - return WithVersion(os.Getenv(EnvOverrideAPIVersion))(c) - } -} - -// WithAPIVersionNegotiation enables automatic API version negotiation for the client. -// With this option enabled, the client automatically negotiates the API version -// to use when making requests. API version negotiation is performed on the first -// request; subsequent requests will not re-negotiate. -func WithAPIVersionNegotiation() Opt { - return func(c *Client) error { - c.negotiateVersion = true - return nil - } -} diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go deleted file mode 100644 index 27e8695cb..000000000 --- a/vendor/github.com/docker/docker/client/ping.go +++ /dev/null @@ -1,75 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/http" - "path" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/errdefs" -) - -// Ping pings the server and returns the value of the "Docker-Experimental", -// "Builder-Version", "OS-Type" & "API-Version" headers. It attempts to use -// a HEAD request on the endpoint, but falls back to GET if HEAD is not supported -// by the daemon. -func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { - var ping types.Ping - - // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest() - // because ping requests are used during API version negotiation, so we want - // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping - req, err := cli.buildRequest(http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil) - if err != nil { - return ping, err - } - serverResp, err := cli.doRequest(ctx, req) - if err == nil { - defer ensureReaderClosed(serverResp) - switch serverResp.statusCode { - case http.StatusOK, http.StatusInternalServerError: - // Server handled the request, so parse the response - return parsePingResponse(cli, serverResp) - } - } else if IsErrConnectionFailed(err) { - return ping, err - } - - req, err = cli.buildRequest(http.MethodGet, path.Join(cli.basePath, "/_ping"), nil, nil) - if err != nil { - return ping, err - } - serverResp, err = cli.doRequest(ctx, req) - defer ensureReaderClosed(serverResp) - if err != nil { - return ping, err - } - return parsePingResponse(cli, serverResp) -} - -func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) { - var ping types.Ping - if resp.header == nil { - err := cli.checkResponseErr(resp) - return ping, errdefs.FromStatusCode(err, resp.statusCode) - } - ping.APIVersion = resp.header.Get("API-Version") - ping.OSType = resp.header.Get("OSType") - if resp.header.Get("Docker-Experimental") == "true" { - ping.Experimental = true - } - if bv := resp.header.Get("Builder-Version"); bv != "" { - ping.BuilderVersion = types.BuilderVersion(bv) - } - if si := resp.header.Get("Swarm"); si != "" { - parts := strings.SplitN(si, "/", 2) - ping.SwarmStatus = &swarm.Status{ - NodeState: swarm.LocalNodeState(parts[0]), - ControlAvailable: len(parts) == 2 && parts[1] == "manager", - } - } - err := cli.checkResponseErr(resp) - return ping, errdefs.FromStatusCode(err, resp.statusCode) -} diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go deleted file mode 100644 index b95dbaf68..000000000 --- a/vendor/github.com/docker/docker/client/plugin_create.go +++ /dev/null @@ -1,23 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/http" - "net/url" - - "github.com/docker/docker/api/types" -) - -// PluginCreate creates a plugin -func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error { - headers := http.Header(make(map[string][]string)) - headers.Set("Content-Type", "application/x-tar") - - query := url.Values{} - query.Set("name", createOptions.RepoName) - - resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go deleted file mode 100644 index 01f6574f9..000000000 --- a/vendor/github.com/docker/docker/client/plugin_disable.go +++ /dev/null @@ -1,19 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// PluginDisable disables a plugin -func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error { - query := url.Values{} - if options.Force { - query.Set("force", "1") - } - resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go deleted file mode 100644 index 736da48bd..000000000 --- a/vendor/github.com/docker/docker/client/plugin_enable.go +++ /dev/null @@ -1,19 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types" -) - -// PluginEnable enables a plugin -func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error { - query := url.Values{} - query.Set("timeout", strconv.Itoa(options.Timeout)) - - resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go deleted file mode 100644 index f09e46066..000000000 --- a/vendor/github.com/docker/docker/client/plugin_inspect.go +++ /dev/null @@ -1,31 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types" -) - -// PluginInspectWithRaw inspects an existing plugin -func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { - if name == "" { - return nil, nil, objectNotFoundError{object: "plugin", id: name} - } - resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, nil, err - } - - body, err := io.ReadAll(resp.body) - if err != nil { - return nil, nil, err - } - var p types.Plugin - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&p) - return &p, body, err -} diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go deleted file mode 100644 index 012afe61c..000000000 --- a/vendor/github.com/docker/docker/client/plugin_install.go +++ /dev/null @@ -1,113 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "io" - "net/url" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" -) - -// PluginInstall installs a plugin -func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { - query := url.Values{} - if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { - return nil, errors.Wrap(err, "invalid remote reference") - } - query.Set("remote", options.RemoteRef) - - privileges, err := cli.checkPluginPermissions(ctx, query, options) - if err != nil { - return nil, err - } - - // set name for plugin pull, if empty should default to remote reference - query.Set("name", name) - - resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth) - if err != nil { - return nil, err - } - - name = resp.header.Get("Docker-Plugin-Name") - - pr, pw := io.Pipe() - go func() { // todo: the client should probably be designed more around the actual api - _, err := io.Copy(pw, resp.body) - if err != nil { - pw.CloseWithError(err) - return - } - defer func() { - if err != nil { - delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) - ensureReaderClosed(delResp) - } - }() - if len(options.Args) > 0 { - if err := cli.PluginSet(ctx, name, options.Args); err != nil { - pw.CloseWithError(err) - return - } - } - - if options.Disabled { - pw.Close() - return - } - - enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) - pw.CloseWithError(enableErr) - }() - return pr, nil -} - -func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.get(ctx, "/plugins/privileges", query, headers) -} - -func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, "/plugins/pull", query, privileges, headers) -} - -func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { - resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) - if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { - // todo: do inspect before to check existing name before checking privileges - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - ensureReaderClosed(resp) - return nil, privilegeErr - } - options.RegistryAuth = newAuthHeader - resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) - } - if err != nil { - ensureReaderClosed(resp) - return nil, err - } - - var privileges types.PluginPrivileges - if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil { - ensureReaderClosed(resp) - return nil, err - } - ensureReaderClosed(resp) - - if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 { - accept, err := options.AcceptPermissionsFunc(privileges) - if err != nil { - return nil, err - } - if !accept { - return nil, pluginPermissionDenied{options.RemoteRef} - } - } - return privileges, nil -} diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go deleted file mode 100644 index 2091a054d..000000000 --- a/vendor/github.com/docker/docker/client/plugin_list.go +++ /dev/null @@ -1,33 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// PluginList returns the installed plugins -func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) { - var plugins types.PluginsListResponse - query := url.Values{} - - if filter.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code - filterJSON, err := filters.ToParamWithVersion(cli.version, filter) - if err != nil { - return plugins, err - } - query.Set("filters", filterJSON) - } - resp, err := cli.get(ctx, "/plugins", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return plugins, err - } - - err = json.NewDecoder(resp.body).Decode(&plugins) - return plugins, err -} diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go deleted file mode 100644 index d20bfe844..000000000 --- a/vendor/github.com/docker/docker/client/plugin_push.go +++ /dev/null @@ -1,16 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" -) - -// PluginPush pushes a plugin to a registry -func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go deleted file mode 100644 index 4cd66958c..000000000 --- a/vendor/github.com/docker/docker/client/plugin_remove.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// PluginRemove removes a plugin -func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error { - query := url.Values{} - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go deleted file mode 100644 index dcf5752ca..000000000 --- a/vendor/github.com/docker/docker/client/plugin_set.go +++ /dev/null @@ -1,12 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" -) - -// PluginSet modifies settings for an existing plugin -func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error { - resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go deleted file mode 100644 index 115cea945..000000000 --- a/vendor/github.com/docker/docker/client/plugin_upgrade.go +++ /dev/null @@ -1,39 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/pkg/errors" -) - -// PluginUpgrade upgrades a plugin -func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { - if err := cli.NewVersionError("1.26", "plugin upgrade"); err != nil { - return nil, err - } - query := url.Values{} - if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { - return nil, errors.Wrap(err, "invalid remote reference") - } - query.Set("remote", options.RemoteRef) - - privileges, err := cli.checkPluginPermissions(ctx, query, options) - if err != nil { - return nil, err - } - - resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth) - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, headers) -} diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go deleted file mode 100644 index c799095c1..000000000 --- a/vendor/github.com/docker/docker/client/request.go +++ /dev/null @@ -1,277 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net" - "net/http" - "net/url" - "os" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" -) - -// serverResponse is a wrapper for http API responses. -type serverResponse struct { - body io.ReadCloser - header http.Header - statusCode int - reqURL *url.URL -} - -// head sends an http request to the docker API using the method HEAD. -func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, http.MethodHead, path, query, nil, headers) -} - -// get sends an http request to the docker API using the method GET with a specific Go context. -func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, http.MethodGet, path, query, nil, headers) -} - -// post sends an http request to the docker API using the method POST with a specific Go context. -func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { - body, headers, err := encodeBody(obj, headers) - if err != nil { - return serverResponse{}, err - } - return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) -} - -func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) -} - -func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { - body, headers, err := encodeBody(obj, headers) - if err != nil { - return serverResponse{}, err - } - return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) -} - -// putRaw sends an http request to the docker API using the method PUT. -func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) -} - -// delete sends an http request to the docker API using the method DELETE. -func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, http.MethodDelete, path, query, nil, headers) -} - -type headers map[string][]string - -func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) { - if obj == nil { - return nil, headers, nil - } - - body, err := encodeData(obj) - if err != nil { - return nil, headers, err - } - if headers == nil { - headers = make(map[string][]string) - } - headers["Content-Type"] = []string{"application/json"} - return body, headers, nil -} - -func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) { - expectedPayload := (method == http.MethodPost || method == http.MethodPut) - if expectedPayload && body == nil { - body = bytes.NewReader([]byte{}) - } - - req, err := http.NewRequest(method, path, body) - if err != nil { - return nil, err - } - req = cli.addHeaders(req, headers) - - if cli.proto == "unix" || cli.proto == "npipe" { - // For local communications, it doesn't matter what the host is. We just - // need a valid and meaningful host name. (See #189) - req.Host = "docker" - } - - req.URL.Host = cli.addr - req.URL.Scheme = cli.scheme - - if expectedPayload && req.Header.Get("Content-Type") == "" { - req.Header.Set("Content-Type", "text/plain") - } - return req, nil -} - -func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { - req, err := cli.buildRequest(method, cli.getAPIPath(ctx, path, query), body, headers) - if err != nil { - return serverResponse{}, err - } - - resp, err := cli.doRequest(ctx, req) - switch { - case errors.Is(err, context.Canceled): - return serverResponse{}, errdefs.Cancelled(err) - case errors.Is(err, context.DeadlineExceeded): - return serverResponse{}, errdefs.Deadline(err) - case err == nil: - err = cli.checkResponseErr(resp) - } - return resp, errdefs.FromStatusCode(err, resp.statusCode) -} - -func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { - serverResp := serverResponse{statusCode: -1, reqURL: req.URL} - - req = req.WithContext(ctx) - resp, err := cli.client.Do(req) - if err != nil { - if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { - return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) - } - - if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { - return serverResp, errors.Wrap(err, "the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings") - } - - // Don't decorate context sentinel errors; users may be comparing to - // them directly. - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return serverResp, err - } - - if nErr, ok := err.(*url.Error); ok { - if nErr, ok := nErr.Err.(*net.OpError); ok { - if os.IsPermission(nErr.Err) { - return serverResp, errors.Wrapf(err, "permission denied while trying to connect to the Docker daemon socket at %v", cli.host) - } - } - } - - if err, ok := err.(net.Error); ok { - if err.Timeout() { - return serverResp, ErrorConnectionFailed(cli.host) - } - if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { - return serverResp, ErrorConnectionFailed(cli.host) - } - } - - // Although there's not a strongly typed error for this in go-winio, - // lots of people are using the default configuration for the docker - // daemon on Windows where the daemon is listening on a named pipe - // `//./pipe/docker_engine, and the client must be running elevated. - // Give users a clue rather than the not-overly useful message - // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info: - // open //./pipe/docker_engine: The system cannot find the file specified.`. - // Note we can't string compare "The system cannot find the file specified" as - // this is localised - for example in French the error would be - // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` - if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { - // Checks if client is running with elevated privileges - if f, elevatedErr := os.Open("\\\\.\\PHYSICALDRIVE0"); elevatedErr == nil { - err = errors.Wrap(err, "in the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect") - } else { - f.Close() - err = errors.Wrap(err, "this error may indicate that the docker daemon is not running") - } - } - - return serverResp, errors.Wrap(err, "error during connect") - } - - if resp != nil { - serverResp.statusCode = resp.StatusCode - serverResp.body = resp.Body - serverResp.header = resp.Header - } - return serverResp, nil -} - -func (cli *Client) checkResponseErr(serverResp serverResponse) error { - if serverResp.statusCode >= 200 && serverResp.statusCode < 400 { - return nil - } - - var body []byte - var err error - if serverResp.body != nil { - bodyMax := 1 * 1024 * 1024 // 1 MiB - bodyR := &io.LimitedReader{ - R: serverResp.body, - N: int64(bodyMax), - } - body, err = io.ReadAll(bodyR) - if err != nil { - return err - } - if bodyR.N == 0 { - return fmt.Errorf("request returned %s with a message (> %d bytes) for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), bodyMax, serverResp.reqURL) - } - } - if len(body) == 0 { - return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) - } - - var ct string - if serverResp.header != nil { - ct = serverResp.header.Get("Content-Type") - } - - var errorMessage string - if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" { - var errorResponse types.ErrorResponse - if err := json.Unmarshal(body, &errorResponse); err != nil { - return errors.Wrap(err, "Error reading JSON") - } - errorMessage = strings.TrimSpace(errorResponse.Message) - } else { - errorMessage = strings.TrimSpace(string(body)) - } - - return errors.Wrap(errors.New(errorMessage), "Error response from daemon") -} - -func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { - // Add CLI Config's HTTP Headers BEFORE we set the Docker headers - // then the user can't change OUR headers - for k, v := range cli.customHTTPHeaders { - if versions.LessThan(cli.version, "1.25") && http.CanonicalHeaderKey(k) == "User-Agent" { - continue - } - req.Header.Set(k, v) - } - - for k, v := range headers { - req.Header[http.CanonicalHeaderKey(k)] = v - } - return req -} - -func encodeData(data interface{}) (*bytes.Buffer, error) { - params := bytes.NewBuffer(nil) - if data != nil { - if err := json.NewEncoder(params).Encode(data); err != nil { - return nil, err - } - } - return params, nil -} - -func ensureReaderClosed(response serverResponse) { - if response.body != nil { - // Drain up to 512 bytes and close the body to let the Transport reuse the connection - io.CopyN(io.Discard, response.body, 512) - response.body.Close() - } -} diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go deleted file mode 100644 index c65d38a19..000000000 --- a/vendor/github.com/docker/docker/client/secret_create.go +++ /dev/null @@ -1,25 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" -) - -// SecretCreate creates a new secret. -func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { - var response types.SecretCreateResponse - if err := cli.NewVersionError("1.25", "secret create"); err != nil { - return response, err - } - resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go deleted file mode 100644 index 5906874b1..000000000 --- a/vendor/github.com/docker/docker/client/secret_inspect.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types/swarm" -) - -// SecretInspectWithRaw returns the secret information with raw data -func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { - if err := cli.NewVersionError("1.25", "secret inspect"); err != nil { - return swarm.Secret{}, nil, err - } - if id == "" { - return swarm.Secret{}, nil, objectNotFoundError{object: "secret", id: id} - } - resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return swarm.Secret{}, nil, err - } - - body, err := io.ReadAll(resp.body) - if err != nil { - return swarm.Secret{}, nil, err - } - - var secret swarm.Secret - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&secret) - - return secret, body, err -} diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go deleted file mode 100644 index a0289c9f4..000000000 --- a/vendor/github.com/docker/docker/client/secret_list.go +++ /dev/null @@ -1,38 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// SecretList returns the list of secrets. -func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { - if err := cli.NewVersionError("1.25", "secret list"); err != nil { - return nil, err - } - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/secrets", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var secrets []swarm.Secret - err = json.NewDecoder(resp.body).Decode(&secrets) - return secrets, err -} diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go deleted file mode 100644 index f47f68b6e..000000000 --- a/vendor/github.com/docker/docker/client/secret_remove.go +++ /dev/null @@ -1,13 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// SecretRemove removes a secret. -func (cli *Client) SecretRemove(ctx context.Context, id string) error { - if err := cli.NewVersionError("1.25", "secret remove"); err != nil { - return err - } - resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go deleted file mode 100644 index 2e939e8ce..000000000 --- a/vendor/github.com/docker/docker/client/secret_update.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/swarm" -) - -// SecretUpdate attempts to update a secret. -func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { - if err := cli.NewVersionError("1.25", "secret update"); err != nil { - return err - } - query := url.Values{} - query.Set("version", version.String()) - resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go deleted file mode 100644 index 23024d0f8..000000000 --- a/vendor/github.com/docker/docker/client/service_create.go +++ /dev/null @@ -1,178 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - "strings" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// ServiceCreate creates a new service. -func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { - var response types.ServiceCreateResponse - headers := map[string][]string{ - "version": {cli.version}, - } - - if options.EncodedRegistryAuth != "" { - headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} - } - - // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container - if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) { - service.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} - } - - if err := validateServiceSpec(service); err != nil { - return response, err - } - - // ensure that the image is tagged - var resolveWarning string - switch { - case service.TaskTemplate.ContainerSpec != nil: - if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { - service.TaskTemplate.ContainerSpec.Image = taggedImg - } - if options.QueryRegistry { - resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) - } - case service.TaskTemplate.PluginSpec != nil: - if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { - service.TaskTemplate.PluginSpec.Remote = taggedImg - } - if options.QueryRegistry { - resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) - } - } - - resp, err := cli.post(ctx, "/services/create", nil, service, headers) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - if resolveWarning != "" { - response.Warnings = append(response.Warnings, resolveWarning) - } - - return response, err -} - -func resolveContainerSpecImage(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string { - var warning string - if img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.ContainerSpec.Image, encodedAuth); err != nil { - warning = digestWarning(taskSpec.ContainerSpec.Image) - } else { - taskSpec.ContainerSpec.Image = img - if len(imgPlatforms) > 0 { - if taskSpec.Placement == nil { - taskSpec.Placement = &swarm.Placement{} - } - taskSpec.Placement.Platforms = imgPlatforms - } - } - return warning -} - -func resolvePluginSpecRemote(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string { - var warning string - if img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.PluginSpec.Remote, encodedAuth); err != nil { - warning = digestWarning(taskSpec.PluginSpec.Remote) - } else { - taskSpec.PluginSpec.Remote = img - if len(imgPlatforms) > 0 { - if taskSpec.Placement == nil { - taskSpec.Placement = &swarm.Placement{} - } - taskSpec.Placement.Platforms = imgPlatforms - } - } - return warning -} - -func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) { - distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth) - var platforms []swarm.Platform - if err != nil { - return "", nil, err - } - - imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest) - - if len(distributionInspect.Platforms) > 0 { - platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms)) - for _, p := range distributionInspect.Platforms { - // clear architecture field for arm. This is a temporary patch to address - // https://github.com/docker/swarmkit/issues/2294. The issue is that while - // image manifests report "arm" as the architecture, the node reports - // something like "armv7l" (includes the variant), which causes arm images - // to stop working with swarm mode. This patch removes the architecture - // constraint for arm images to ensure tasks get scheduled. - arch := p.Architecture - if strings.ToLower(arch) == "arm" { - arch = "" - } - platforms = append(platforms, swarm.Platform{ - Architecture: arch, - OS: p.OS, - }) - } - } - return imageWithDigest, platforms, err -} - -// imageWithDigestString takes an image string and a digest, and updates -// the image string if it didn't originally contain a digest. It returns -// image unmodified in other situations. -func imageWithDigestString(image string, dgst digest.Digest) string { - namedRef, err := reference.ParseNormalizedNamed(image) - if err == nil { - if _, isCanonical := namedRef.(reference.Canonical); !isCanonical { - // ensure that image gets a default tag if none is provided - img, err := reference.WithDigest(namedRef, dgst) - if err == nil { - return reference.FamiliarString(img) - } - } - } - return image -} - -// imageWithTagString takes an image string, and returns a tagged image -// string, adding a 'latest' tag if one was not provided. It returns an -// empty string if a canonical reference was provided -func imageWithTagString(image string) string { - namedRef, err := reference.ParseNormalizedNamed(image) - if err == nil { - return reference.FamiliarString(reference.TagNameOnly(namedRef)) - } - return "" -} - -// digestWarning constructs a formatted warning string using the -// image name that could not be pinned by digest. The formatting -// is hardcoded, but could me made smarter in the future -func digestWarning(image string) string { - return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) -} - -func validateServiceSpec(s swarm.ServiceSpec) error { - if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil { - return errors.New("must not specify both a container spec and a plugin spec in the task template") - } - if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin { - return errors.New("mismatched runtime with plugin spec") - } - if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) { - return errors.New("mismatched runtime with container spec") - } - return nil -} diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go deleted file mode 100644 index cee020c98..000000000 --- a/vendor/github.com/docker/docker/client/service_inspect.go +++ /dev/null @@ -1,37 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" -) - -// ServiceInspectWithRaw returns the service information and the raw data. -func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) { - if serviceID == "" { - return swarm.Service{}, nil, objectNotFoundError{object: "service", id: serviceID} - } - query := url.Values{} - query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults)) - serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return swarm.Service{}, nil, err - } - - body, err := io.ReadAll(serverResp.body) - if err != nil { - return swarm.Service{}, nil, err - } - - var response swarm.Service - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go deleted file mode 100644 index f97ec75a5..000000000 --- a/vendor/github.com/docker/docker/client/service_list.go +++ /dev/null @@ -1,39 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// ServiceList returns the list of services. -func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - if options.Status { - query.Set("status", "true") - } - - resp, err := cli.get(ctx, "/services", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var services []swarm.Service - err = json.NewDecoder(resp.body).Decode(&services) - return services, err -} diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go deleted file mode 100644 index 906fd4059..000000000 --- a/vendor/github.com/docker/docker/client/service_logs.go +++ /dev/null @@ -1,52 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "time" - - "github.com/docker/docker/api/types" - timetypes "github.com/docker/docker/api/types/time" - "github.com/pkg/errors" -) - -// ServiceLogs returns the logs generated by a service in an io.ReadCloser. -// It's up to the caller to close the stream. -func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { - query := url.Values{} - if options.ShowStdout { - query.Set("stdout", "1") - } - - if options.ShowStderr { - query.Set("stderr", "1") - } - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, time.Now()) - if err != nil { - return nil, errors.Wrap(err, `invalid value for "since"`) - } - query.Set("since", ts) - } - - if options.Timestamps { - query.Set("timestamps", "1") - } - - if options.Details { - query.Set("details", "1") - } - - if options.Follow { - query.Set("follow", "1") - } - query.Set("tail", options.Tail) - - resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go deleted file mode 100644 index 2c46326eb..000000000 --- a/vendor/github.com/docker/docker/client/service_remove.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// ServiceRemove kills and removes a service. -func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { - resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go deleted file mode 100644 index 8014b8625..000000000 --- a/vendor/github.com/docker/docker/client/service_update.go +++ /dev/null @@ -1,74 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" -) - -// ServiceUpdate updates a Service. The version number is required to avoid conflicting writes. -// It should be the value as set *before* the update. You can find this value in the Meta field -// of swarm.Service, which can be found using ServiceInspectWithRaw. -func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { - var ( - query = url.Values{} - response = types.ServiceUpdateResponse{} - ) - - headers := map[string][]string{ - "version": {cli.version}, - } - - if options.EncodedRegistryAuth != "" { - headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} - } - - if options.RegistryAuthFrom != "" { - query.Set("registryAuthFrom", options.RegistryAuthFrom) - } - - if options.Rollback != "" { - query.Set("rollback", options.Rollback) - } - - query.Set("version", version.String()) - - if err := validateServiceSpec(service); err != nil { - return response, err - } - - // ensure that the image is tagged - var resolveWarning string - switch { - case service.TaskTemplate.ContainerSpec != nil: - if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { - service.TaskTemplate.ContainerSpec.Image = taggedImg - } - if options.QueryRegistry { - resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) - } - case service.TaskTemplate.PluginSpec != nil: - if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { - service.TaskTemplate.PluginSpec.Remote = taggedImg - } - if options.QueryRegistry { - resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) - } - } - - resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) - defer ensureReaderClosed(resp) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - if resolveWarning != "" { - response.Warnings = append(response.Warnings, resolveWarning) - } - - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go deleted file mode 100644 index 19f59dd58..000000000 --- a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" -) - -// SwarmGetUnlockKey retrieves the swarm's unlock key. -func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { - serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return types.SwarmUnlockKeyResponse{}, err - } - - var response types.SwarmUnlockKeyResponse - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go deleted file mode 100644 index da3c1637e..000000000 --- a/vendor/github.com/docker/docker/client/swarm_init.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmInit initializes the swarm. -func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { - serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return "", err - } - - var response string - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go deleted file mode 100644 index b52b67a88..000000000 --- a/vendor/github.com/docker/docker/client/swarm_inspect.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmInspect inspects the swarm. -func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { - serverResp, err := cli.get(ctx, "/swarm", nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return swarm.Swarm{}, err - } - - var response swarm.Swarm - err = json.NewDecoder(serverResp.body).Decode(&response) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_join.go b/vendor/github.com/docker/docker/client/swarm_join.go deleted file mode 100644 index a1cf0455d..000000000 --- a/vendor/github.com/docker/docker/client/swarm_join.go +++ /dev/null @@ -1,14 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmJoin joins the swarm. -func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { - resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/swarm_leave.go b/vendor/github.com/docker/docker/client/swarm_leave.go deleted file mode 100644 index 90ca84b36..000000000 --- a/vendor/github.com/docker/docker/client/swarm_leave.go +++ /dev/null @@ -1,17 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" -) - -// SwarmLeave leaves the swarm. -func (cli *Client) SwarmLeave(ctx context.Context, force bool) error { - query := url.Values{} - if force { - query.Set("force", "1") - } - resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go deleted file mode 100644 index d2412f7d4..000000000 --- a/vendor/github.com/docker/docker/client/swarm_unlock.go +++ /dev/null @@ -1,14 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmUnlock unlocks locked swarm. -func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { - serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) - ensureReaderClosed(serverResp) - return err -} diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go deleted file mode 100644 index 9fde7d75e..000000000 --- a/vendor/github.com/docker/docker/client/swarm_update.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmUpdate updates the swarm. -func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { - query := url.Values{} - query.Set("version", version.String()) - query.Set("rotateWorkerToken", strconv.FormatBool(flags.RotateWorkerToken)) - query.Set("rotateManagerToken", strconv.FormatBool(flags.RotateManagerToken)) - query.Set("rotateManagerUnlockKey", strconv.FormatBool(flags.RotateManagerUnlockKey)) - resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go deleted file mode 100644 index dde1f6c59..000000000 --- a/vendor/github.com/docker/docker/client/task_inspect.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types/swarm" -) - -// TaskInspectWithRaw returns the task information and its raw representation. -func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { - if taskID == "" { - return swarm.Task{}, nil, objectNotFoundError{object: "task", id: taskID} - } - serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return swarm.Task{}, nil, err - } - - body, err := io.ReadAll(serverResp.body) - if err != nil { - return swarm.Task{}, nil, err - } - - var response swarm.Task - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go deleted file mode 100644 index 4869b4449..000000000 --- a/vendor/github.com/docker/docker/client/task_list.go +++ /dev/null @@ -1,35 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// TaskList returns the list of tasks. -func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/tasks", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return nil, err - } - - var tasks []swarm.Task - err = json.NewDecoder(resp.body).Decode(&tasks) - return tasks, err -} diff --git a/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/docker/docker/client/task_logs.go deleted file mode 100644 index 6222fab57..000000000 --- a/vendor/github.com/docker/docker/client/task_logs.go +++ /dev/null @@ -1,51 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "time" - - "github.com/docker/docker/api/types" - timetypes "github.com/docker/docker/api/types/time" -) - -// TaskLogs returns the logs generated by a task in an io.ReadCloser. -// It's up to the caller to close the stream. -func (cli *Client) TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { - query := url.Values{} - if options.ShowStdout { - query.Set("stdout", "1") - } - - if options.ShowStderr { - query.Set("stderr", "1") - } - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, time.Now()) - if err != nil { - return nil, err - } - query.Set("since", ts) - } - - if options.Timestamps { - query.Set("timestamps", "1") - } - - if options.Details { - query.Set("details", "1") - } - - if options.Follow { - query.Set("follow", "1") - } - query.Set("tail", options.Tail) - - resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/transport.go b/vendor/github.com/docker/docker/client/transport.go deleted file mode 100644 index 554134436..000000000 --- a/vendor/github.com/docker/docker/client/transport.go +++ /dev/null @@ -1,17 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "crypto/tls" - "net/http" -) - -// resolveTLSConfig attempts to resolve the TLS configuration from the -// RoundTripper. -func resolveTLSConfig(transport http.RoundTripper) *tls.Config { - switch tr := transport.(type) { - case *http.Transport: - return tr.TLSClientConfig - default: - return nil - } -} diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go deleted file mode 100644 index 7f3ff44eb..000000000 --- a/vendor/github.com/docker/docker/client/utils.go +++ /dev/null @@ -1,34 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - "regexp" - - "github.com/docker/docker/api/types/filters" -) - -var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) - -// getDockerOS returns the operating system based on the server header from the daemon. -func getDockerOS(serverHeader string) string { - var osType string - matches := headerRegexp.FindStringSubmatch(serverHeader) - if len(matches) > 0 { - osType = matches[1] - } - return osType -} - -// getFiltersQuery returns a url query with "filters" query term, based on the -// filters provided. -func getFiltersQuery(f filters.Args) (url.Values, error) { - query := url.Values{} - if f.Len() > 0 { - filterJSON, err := filters.ToJSON(f) - if err != nil { - return query, err - } - query.Set("filters", filterJSON) - } - return query, nil -} diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go deleted file mode 100644 index 8f17ff4e8..000000000 --- a/vendor/github.com/docker/docker/client/version.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" -) - -// ServerVersion returns information of the docker client and server host. -func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { - resp, err := cli.get(ctx, "/version", nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return types.Version{}, err - } - - var server types.Version - err = json.NewDecoder(resp.body).Decode(&server) - return server, err -} diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go deleted file mode 100644 index b3b182437..000000000 --- a/vendor/github.com/docker/docker/client/volume_create.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/volume" -) - -// VolumeCreate creates a volume in the docker host. -func (cli *Client) VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) { - var vol volume.Volume - resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) - defer ensureReaderClosed(resp) - if err != nil { - return vol, err - } - err = json.NewDecoder(resp.body).Decode(&vol) - return vol, err -} diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go deleted file mode 100644 index b3ba4e604..000000000 --- a/vendor/github.com/docker/docker/client/volume_inspect.go +++ /dev/null @@ -1,38 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io" - - "github.com/docker/docker/api/types/volume" -) - -// VolumeInspect returns the information about a specific volume in the docker host. -func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error) { - vol, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) - return vol, err -} - -// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation -func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) { - if volumeID == "" { - return volume.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID} - } - - var vol volume.Volume - resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) - defer ensureReaderClosed(resp) - if err != nil { - return vol, nil, err - } - - body, err := io.ReadAll(resp.body) - if err != nil { - return vol, nil, err - } - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&vol) - return vol, body, err -} diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go deleted file mode 100644 index d8204f8db..000000000 --- a/vendor/github.com/docker/docker/client/volume_list.go +++ /dev/null @@ -1,33 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/volume" -) - -// VolumeList returns the volumes configured in the docker host. -func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volume.ListResponse, error) { - var volumes volume.ListResponse - query := url.Values{} - - if filter.Len() > 0 { - //nolint:staticcheck // ignore SA1019 for old code - filterJSON, err := filters.ToParamWithVersion(cli.version, filter) - if err != nil { - return volumes, err - } - query.Set("filters", filterJSON) - } - resp, err := cli.get(ctx, "/volumes", query, nil) - defer ensureReaderClosed(resp) - if err != nil { - return volumes, err - } - - err = json.NewDecoder(resp.body).Decode(&volumes) - return volumes, err -} diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go deleted file mode 100644 index 6e324708f..000000000 --- a/vendor/github.com/docker/docker/client/volume_prune.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// VolumesPrune requests the daemon to delete unused data -func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) { - var report types.VolumesPruneReport - - if err := cli.NewVersionError("1.25", "volume prune"); err != nil { - return report, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return report, err - } - - serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) - defer ensureReaderClosed(serverResp) - if err != nil { - return report, err - } - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving volume prune report: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go deleted file mode 100644 index 1f2643836..000000000 --- a/vendor/github.com/docker/docker/client/volume_remove.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/versions" -) - -// VolumeRemove removes a volume from the docker host. -func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { - query := url.Values{} - if versions.GreaterThanOrEqualTo(cli.version, "1.25") { - if force { - query.Set("force", "1") - } - } - resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) - defer ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/volume_update.go b/vendor/github.com/docker/docker/client/volume_update.go deleted file mode 100644 index 33bd31e53..000000000 --- a/vendor/github.com/docker/docker/client/volume_update.go +++ /dev/null @@ -1,24 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/api/types/volume" -) - -// VolumeUpdate updates a volume. This only works for Cluster Volumes, and -// only some fields can be updated. -func (cli *Client) VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error { - if err := cli.NewVersionError("1.42", "volume update"); err != nil { - return err - } - - query := url.Values{} - query.Set("version", version.String()) - - resp, err := cli.put(ctx, "/volumes/"+volumeID, query, options, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/errdefs/defs.go b/vendor/github.com/docker/docker/errdefs/defs.go deleted file mode 100644 index 61e7456b4..000000000 --- a/vendor/github.com/docker/docker/errdefs/defs.go +++ /dev/null @@ -1,69 +0,0 @@ -package errdefs // import "github.com/docker/docker/errdefs" - -// ErrNotFound signals that the requested object doesn't exist -type ErrNotFound interface { - NotFound() -} - -// ErrInvalidParameter signals that the user input is invalid -type ErrInvalidParameter interface { - InvalidParameter() -} - -// ErrConflict signals that some internal state conflicts with the requested action and can't be performed. -// A change in state should be able to clear this error. -type ErrConflict interface { - Conflict() -} - -// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action -type ErrUnauthorized interface { - Unauthorized() -} - -// ErrUnavailable signals that the requested action/subsystem is not available. -type ErrUnavailable interface { - Unavailable() -} - -// ErrForbidden signals that the requested action cannot be performed under any circumstances. -// When a ErrForbidden is returned, the caller should never retry the action. -type ErrForbidden interface { - Forbidden() -} - -// ErrSystem signals that some internal error occurred. -// An example of this would be a failed mount request. -type ErrSystem interface { - System() -} - -// ErrNotModified signals that an action can't be performed because it's already in the desired state -type ErrNotModified interface { - NotModified() -} - -// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured. -type ErrNotImplemented interface { - NotImplemented() -} - -// ErrUnknown signals that the kind of error that occurred is not known. -type ErrUnknown interface { - Unknown() -} - -// ErrCancelled signals that the action was cancelled. -type ErrCancelled interface { - Cancelled() -} - -// ErrDeadline signals that the deadline was reached before the action completed. -type ErrDeadline interface { - DeadlineExceeded() -} - -// ErrDataLoss indicates that data was lost or there is data corruption. -type ErrDataLoss interface { - DataLoss() -} diff --git a/vendor/github.com/docker/docker/errdefs/doc.go b/vendor/github.com/docker/docker/errdefs/doc.go deleted file mode 100644 index c211f174f..000000000 --- a/vendor/github.com/docker/docker/errdefs/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors. -// Errors that cross the package boundary should implement one (and only one) of these interfaces. -// -// Packages should not reference these interfaces directly, only implement them. -// To check if a particular error implements one of these interfaces, there are helper -// functions provided (e.g. `Is`) which can be used rather than asserting the interfaces directly. -// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`). -package errdefs // import "github.com/docker/docker/errdefs" diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go deleted file mode 100644 index fe06fb6f7..000000000 --- a/vendor/github.com/docker/docker/errdefs/helpers.go +++ /dev/null @@ -1,279 +0,0 @@ -package errdefs // import "github.com/docker/docker/errdefs" - -import "context" - -type errNotFound struct{ error } - -func (errNotFound) NotFound() {} - -func (e errNotFound) Cause() error { - return e.error -} - -func (e errNotFound) Unwrap() error { - return e.error -} - -// NotFound is a helper to create an error of the class with the same name from any error type -func NotFound(err error) error { - if err == nil || IsNotFound(err) { - return err - } - return errNotFound{err} -} - -type errInvalidParameter struct{ error } - -func (errInvalidParameter) InvalidParameter() {} - -func (e errInvalidParameter) Cause() error { - return e.error -} - -func (e errInvalidParameter) Unwrap() error { - return e.error -} - -// InvalidParameter is a helper to create an error of the class with the same name from any error type -func InvalidParameter(err error) error { - if err == nil || IsInvalidParameter(err) { - return err - } - return errInvalidParameter{err} -} - -type errConflict struct{ error } - -func (errConflict) Conflict() {} - -func (e errConflict) Cause() error { - return e.error -} - -func (e errConflict) Unwrap() error { - return e.error -} - -// Conflict is a helper to create an error of the class with the same name from any error type -func Conflict(err error) error { - if err == nil || IsConflict(err) { - return err - } - return errConflict{err} -} - -type errUnauthorized struct{ error } - -func (errUnauthorized) Unauthorized() {} - -func (e errUnauthorized) Cause() error { - return e.error -} - -func (e errUnauthorized) Unwrap() error { - return e.error -} - -// Unauthorized is a helper to create an error of the class with the same name from any error type -func Unauthorized(err error) error { - if err == nil || IsUnauthorized(err) { - return err - } - return errUnauthorized{err} -} - -type errUnavailable struct{ error } - -func (errUnavailable) Unavailable() {} - -func (e errUnavailable) Cause() error { - return e.error -} - -func (e errUnavailable) Unwrap() error { - return e.error -} - -// Unavailable is a helper to create an error of the class with the same name from any error type -func Unavailable(err error) error { - if err == nil || IsUnavailable(err) { - return err - } - return errUnavailable{err} -} - -type errForbidden struct{ error } - -func (errForbidden) Forbidden() {} - -func (e errForbidden) Cause() error { - return e.error -} - -func (e errForbidden) Unwrap() error { - return e.error -} - -// Forbidden is a helper to create an error of the class with the same name from any error type -func Forbidden(err error) error { - if err == nil || IsForbidden(err) { - return err - } - return errForbidden{err} -} - -type errSystem struct{ error } - -func (errSystem) System() {} - -func (e errSystem) Cause() error { - return e.error -} - -func (e errSystem) Unwrap() error { - return e.error -} - -// System is a helper to create an error of the class with the same name from any error type -func System(err error) error { - if err == nil || IsSystem(err) { - return err - } - return errSystem{err} -} - -type errNotModified struct{ error } - -func (errNotModified) NotModified() {} - -func (e errNotModified) Cause() error { - return e.error -} - -func (e errNotModified) Unwrap() error { - return e.error -} - -// NotModified is a helper to create an error of the class with the same name from any error type -func NotModified(err error) error { - if err == nil || IsNotModified(err) { - return err - } - return errNotModified{err} -} - -type errNotImplemented struct{ error } - -func (errNotImplemented) NotImplemented() {} - -func (e errNotImplemented) Cause() error { - return e.error -} - -func (e errNotImplemented) Unwrap() error { - return e.error -} - -// NotImplemented is a helper to create an error of the class with the same name from any error type -func NotImplemented(err error) error { - if err == nil || IsNotImplemented(err) { - return err - } - return errNotImplemented{err} -} - -type errUnknown struct{ error } - -func (errUnknown) Unknown() {} - -func (e errUnknown) Cause() error { - return e.error -} - -func (e errUnknown) Unwrap() error { - return e.error -} - -// Unknown is a helper to create an error of the class with the same name from any error type -func Unknown(err error) error { - if err == nil || IsUnknown(err) { - return err - } - return errUnknown{err} -} - -type errCancelled struct{ error } - -func (errCancelled) Cancelled() {} - -func (e errCancelled) Cause() error { - return e.error -} - -func (e errCancelled) Unwrap() error { - return e.error -} - -// Cancelled is a helper to create an error of the class with the same name from any error type -func Cancelled(err error) error { - if err == nil || IsCancelled(err) { - return err - } - return errCancelled{err} -} - -type errDeadline struct{ error } - -func (errDeadline) DeadlineExceeded() {} - -func (e errDeadline) Cause() error { - return e.error -} - -func (e errDeadline) Unwrap() error { - return e.error -} - -// Deadline is a helper to create an error of the class with the same name from any error type -func Deadline(err error) error { - if err == nil || IsDeadline(err) { - return err - } - return errDeadline{err} -} - -type errDataLoss struct{ error } - -func (errDataLoss) DataLoss() {} - -func (e errDataLoss) Cause() error { - return e.error -} - -func (e errDataLoss) Unwrap() error { - return e.error -} - -// DataLoss is a helper to create an error of the class with the same name from any error type -func DataLoss(err error) error { - if err == nil || IsDataLoss(err) { - return err - } - return errDataLoss{err} -} - -// FromContext returns the error class from the passed in context -func FromContext(ctx context.Context) error { - e := ctx.Err() - if e == nil { - return nil - } - - if e == context.Canceled { - return Cancelled(e) - } - if e == context.DeadlineExceeded { - return Deadline(e) - } - return Unknown(e) -} diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go deleted file mode 100644 index 77bda389d..000000000 --- a/vendor/github.com/docker/docker/errdefs/http_helpers.go +++ /dev/null @@ -1,46 +0,0 @@ -package errdefs // import "github.com/docker/docker/errdefs" - -import ( - "net/http" -) - -// FromStatusCode creates an errdef error, based on the provided HTTP status-code -func FromStatusCode(err error, statusCode int) error { - if err == nil { - return nil - } - switch statusCode { - case http.StatusNotFound: - err = NotFound(err) - case http.StatusBadRequest: - err = InvalidParameter(err) - case http.StatusConflict: - err = Conflict(err) - case http.StatusUnauthorized: - err = Unauthorized(err) - case http.StatusServiceUnavailable: - err = Unavailable(err) - case http.StatusForbidden: - err = Forbidden(err) - case http.StatusNotModified: - err = NotModified(err) - case http.StatusNotImplemented: - err = NotImplemented(err) - case http.StatusInternalServerError: - if !IsSystem(err) && !IsUnknown(err) && !IsDataLoss(err) && !IsDeadline(err) && !IsCancelled(err) { - err = System(err) - } - default: - switch { - case statusCode >= 200 && statusCode < 400: - // it's a client error - case statusCode >= 400 && statusCode < 500: - err = InvalidParameter(err) - case statusCode >= 500 && statusCode < 600: - err = System(err) - default: - err = Unknown(err) - } - } - return err -} diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go deleted file mode 100644 index 3abf07d0c..000000000 --- a/vendor/github.com/docker/docker/errdefs/is.go +++ /dev/null @@ -1,107 +0,0 @@ -package errdefs // import "github.com/docker/docker/errdefs" - -type causer interface { - Cause() error -} - -func getImplementer(err error) error { - switch e := err.(type) { - case - ErrNotFound, - ErrInvalidParameter, - ErrConflict, - ErrUnauthorized, - ErrUnavailable, - ErrForbidden, - ErrSystem, - ErrNotModified, - ErrNotImplemented, - ErrCancelled, - ErrDeadline, - ErrDataLoss, - ErrUnknown: - return err - case causer: - return getImplementer(e.Cause()) - default: - return err - } -} - -// IsNotFound returns if the passed in error is an ErrNotFound -func IsNotFound(err error) bool { - _, ok := getImplementer(err).(ErrNotFound) - return ok -} - -// IsInvalidParameter returns if the passed in error is an ErrInvalidParameter -func IsInvalidParameter(err error) bool { - _, ok := getImplementer(err).(ErrInvalidParameter) - return ok -} - -// IsConflict returns if the passed in error is an ErrConflict -func IsConflict(err error) bool { - _, ok := getImplementer(err).(ErrConflict) - return ok -} - -// IsUnauthorized returns if the passed in error is an ErrUnauthorized -func IsUnauthorized(err error) bool { - _, ok := getImplementer(err).(ErrUnauthorized) - return ok -} - -// IsUnavailable returns if the passed in error is an ErrUnavailable -func IsUnavailable(err error) bool { - _, ok := getImplementer(err).(ErrUnavailable) - return ok -} - -// IsForbidden returns if the passed in error is an ErrForbidden -func IsForbidden(err error) bool { - _, ok := getImplementer(err).(ErrForbidden) - return ok -} - -// IsSystem returns if the passed in error is an ErrSystem -func IsSystem(err error) bool { - _, ok := getImplementer(err).(ErrSystem) - return ok -} - -// IsNotModified returns if the passed in error is a NotModified error -func IsNotModified(err error) bool { - _, ok := getImplementer(err).(ErrNotModified) - return ok -} - -// IsNotImplemented returns if the passed in error is an ErrNotImplemented -func IsNotImplemented(err error) bool { - _, ok := getImplementer(err).(ErrNotImplemented) - return ok -} - -// IsUnknown returns if the passed in error is an ErrUnknown -func IsUnknown(err error) bool { - _, ok := getImplementer(err).(ErrUnknown) - return ok -} - -// IsCancelled returns if the passed in error is an ErrCancelled -func IsCancelled(err error) bool { - _, ok := getImplementer(err).(ErrCancelled) - return ok -} - -// IsDeadline returns if the passed in error is an ErrDeadline -func IsDeadline(err error) bool { - _, ok := getImplementer(err).(ErrDeadline) - return ok -} - -// IsDataLoss returns if the passed in error is an ErrDataLoss -func IsDataLoss(err error) bool { - _, ok := getImplementer(err).(ErrDataLoss) - return ok -} diff --git a/vendor/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/docker/docker/pkg/archive/README.md deleted file mode 100644 index 7307d9694..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/README.md +++ /dev/null @@ -1 +0,0 @@ -This code provides helper functions for dealing with archive files. diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go deleted file mode 100644 index 3af7c3a65..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ /dev/null @@ -1,1474 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "bufio" - "bytes" - "compress/bzip2" - "compress/gzip" - "context" - "encoding/binary" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "syscall" - "time" - - "github.com/containerd/containerd/pkg/userns" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" - "github.com/klauspost/compress/zstd" - "github.com/moby/patternmatcher" - "github.com/moby/sys/sequential" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - exec "golang.org/x/sys/execabs" -) - -// ImpliedDirectoryMode represents the mode (Unix permissions) applied to directories that are implied by files in a -// tar, but that do not have their own header entry. -// -// The permissions mask is stored in a constant instead of locally to ensure that magic numbers do not -// proliferate in the codebase. The default value 0755 has been selected based on the default umask of 0022, and -// a convention of mkdir(1) calling mkdir(2) with permissions of 0777, resulting in a final value of 0755. -// -// This value is currently implementation-defined, and not captured in any cross-runtime specification. Thus, it is -// subject to change in Moby at any time -- image authors who require consistent or known directory permissions -// should explicitly control them by ensuring that header entries exist for any applicable path. -const ImpliedDirectoryMode = 0755 - -type ( - // Compression is the state represents if compressed or not. - Compression int - // WhiteoutFormat is the format of whiteouts unpacked - WhiteoutFormat int - - // TarOptions wraps the tar options. - TarOptions struct { - IncludeFiles []string - ExcludePatterns []string - Compression Compression - NoLchown bool - IDMap idtools.IdentityMapping - ChownOpts *idtools.Identity - IncludeSourceDir bool - // WhiteoutFormat is the expected on disk format for whiteout files. - // This format will be converted to the standard format on pack - // and from the standard format on unpack. - WhiteoutFormat WhiteoutFormat - // When unpacking, specifies whether overwriting a directory with a - // non-directory is allowed and vice versa. - NoOverwriteDirNonDir bool - // For each include when creating an archive, the included name will be - // replaced with the matching name from this map. - RebaseNames map[string]string - InUserNS bool - } -) - -// Archiver implements the Archiver interface and allows the reuse of most utility functions of -// this package with a pluggable Untar function. Also, to facilitate the passing of specific id -// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. -type Archiver struct { - Untar func(io.Reader, string, *TarOptions) error - IDMapping idtools.IdentityMapping -} - -// NewDefaultArchiver returns a new Archiver without any IdentityMapping -func NewDefaultArchiver() *Archiver { - return &Archiver{Untar: Untar} -} - -// breakoutError is used to differentiate errors related to breaking out -// When testing archive breakout in the unit tests, this error is expected -// in order for the test to pass. -type breakoutError error - -const ( - // Uncompressed represents the uncompressed. - Uncompressed Compression = iota - // Bzip2 is bzip2 compression algorithm. - Bzip2 - // Gzip is gzip compression algorithm. - Gzip - // Xz is xz compression algorithm. - Xz - // Zstd is zstd compression algorithm. - Zstd -) - -const ( - // AUFSWhiteoutFormat is the default format for whiteouts - AUFSWhiteoutFormat WhiteoutFormat = iota - // OverlayWhiteoutFormat formats whiteout according to the overlay - // standard. - OverlayWhiteoutFormat -) - -const ( - modeISDIR = 040000 // Directory - modeISFIFO = 010000 // FIFO - modeISREG = 0100000 // Regular file - modeISLNK = 0120000 // Symbolic link - modeISBLK = 060000 // Block special file - modeISCHR = 020000 // Character special file - modeISSOCK = 0140000 // Socket -) - -// IsArchivePath checks if the (possibly compressed) file at the given path -// starts with a tar file header. -func IsArchivePath(path string) bool { - file, err := os.Open(path) - if err != nil { - return false - } - defer file.Close() - rdr, err := DecompressStream(file) - if err != nil { - return false - } - defer rdr.Close() - r := tar.NewReader(rdr) - _, err = r.Next() - return err == nil -} - -const ( - zstdMagicSkippableStart = 0x184D2A50 - zstdMagicSkippableMask = 0xFFFFFFF0 -) - -var ( - bzip2Magic = []byte{0x42, 0x5A, 0x68} - gzipMagic = []byte{0x1F, 0x8B, 0x08} - xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} - zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} -) - -type matcher = func([]byte) bool - -func magicNumberMatcher(m []byte) matcher { - return func(source []byte) bool { - return bytes.HasPrefix(source, m) - } -} - -// zstdMatcher detects zstd compression algorithm. -// Zstandard compressed data is made of one or more frames. -// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. -// See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. -func zstdMatcher() matcher { - return func(source []byte) bool { - if bytes.HasPrefix(source, zstdMagic) { - // Zstandard frame - return true - } - // skippable frame - if len(source) < 8 { - return false - } - // magic number from 0x184D2A50 to 0x184D2A5F. - if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { - return true - } - return false - } -} - -// DetectCompression detects the compression algorithm of the source. -func DetectCompression(source []byte) Compression { - compressionMap := map[Compression]matcher{ - Bzip2: magicNumberMatcher(bzip2Magic), - Gzip: magicNumberMatcher(gzipMagic), - Xz: magicNumberMatcher(xzMagic), - Zstd: zstdMatcher(), - } - for _, compression := range []Compression{Bzip2, Gzip, Xz, Zstd} { - fn := compressionMap[compression] - if fn(source) { - return compression - } - } - return Uncompressed -} - -func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { - args := []string{"xz", "-d", "-c", "-q"} - - return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) -} - -func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { - if noPigzEnv := os.Getenv("MOBY_DISABLE_PIGZ"); noPigzEnv != "" { - noPigz, err := strconv.ParseBool(noPigzEnv) - if err != nil { - logrus.WithError(err).Warn("invalid value in MOBY_DISABLE_PIGZ env var") - } - if noPigz { - logrus.Debugf("Use of pigz is disabled due to MOBY_DISABLE_PIGZ=%s", noPigzEnv) - return gzip.NewReader(buf) - } - } - - unpigzPath, err := exec.LookPath("unpigz") - if err != nil { - logrus.Debugf("unpigz binary not found, falling back to go gzip library") - return gzip.NewReader(buf) - } - - logrus.Debugf("Using %s to decompress", unpigzPath) - - return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) -} - -func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { - return ioutils.NewReadCloserWrapper(readBuf, func() error { - cancel() - return readBuf.Close() - }) -} - -// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. -func DecompressStream(archive io.Reader) (io.ReadCloser, error) { - p := pools.BufioReader32KPool - buf := p.Get(archive) - bs, err := buf.Peek(10) - if err != nil && err != io.EOF { - // Note: we'll ignore any io.EOF error because there are some odd - // cases where the layer.tar file will be empty (zero bytes) and - // that results in an io.EOF from the Peek() call. So, in those - // cases we'll just treat it as a non-compressed stream and - // that means just create an empty layer. - // See Issue 18170 - return nil, err - } - - compression := DetectCompression(bs) - switch compression { - case Uncompressed: - readBufWrapper := p.NewReadCloserWrapper(buf, buf) - return readBufWrapper, nil - case Gzip: - ctx, cancel := context.WithCancel(context.Background()) - - gzReader, err := gzDecompress(ctx, buf) - if err != nil { - cancel() - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) - return wrapReadCloser(readBufWrapper, cancel), nil - case Bzip2: - bz2Reader := bzip2.NewReader(buf) - readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) - return readBufWrapper, nil - case Xz: - ctx, cancel := context.WithCancel(context.Background()) - - xzReader, err := xzDecompress(ctx, buf) - if err != nil { - cancel() - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) - return wrapReadCloser(readBufWrapper, cancel), nil - case Zstd: - zstdReader, err := zstd.NewReader(buf) - if err != nil { - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, zstdReader) - return readBufWrapper, nil - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -// CompressStream compresses the dest with specified compression algorithm. -func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { - p := pools.BufioWriter32KPool - buf := p.Get(dest) - switch compression { - case Uncompressed: - writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) - return writeBufWrapper, nil - case Gzip: - gzWriter := gzip.NewWriter(dest) - writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) - return writeBufWrapper, nil - case Bzip2, Xz: - // archive/bzip2 does not support writing, and there is no xz support at all - // However, this is not a problem as docker only currently generates gzipped tars - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to -// modify the contents or header of an entry in the archive. If the file already -// exists in the archive the TarModifierFunc will be called with the Header and -// a reader which will return the files content. If the file does not exist both -// header and content will be nil. -type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) - -// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the -// tar stream are modified if they match any of the keys in mods. -func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { - pipeReader, pipeWriter := io.Pipe() - - go func() { - tarReader := tar.NewReader(inputTarStream) - tarWriter := tar.NewWriter(pipeWriter) - defer inputTarStream.Close() - defer tarWriter.Close() - - modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { - header, data, err := modifier(name, original, tarReader) - switch { - case err != nil: - return err - case header == nil: - return nil - } - - if header.Name == "" { - header.Name = name - } - header.Size = int64(len(data)) - if err := tarWriter.WriteHeader(header); err != nil { - return err - } - if len(data) != 0 { - if _, err := tarWriter.Write(data); err != nil { - return err - } - } - return nil - } - - var err error - var originalHeader *tar.Header - for { - originalHeader, err = tarReader.Next() - if err == io.EOF { - break - } - if err != nil { - pipeWriter.CloseWithError(err) - return - } - - modifier, ok := mods[originalHeader.Name] - if !ok { - // No modifiers for this file, copy the header and data - if err := tarWriter.WriteHeader(originalHeader); err != nil { - pipeWriter.CloseWithError(err) - return - } - if _, err := pools.Copy(tarWriter, tarReader); err != nil { - pipeWriter.CloseWithError(err) - return - } - continue - } - delete(mods, originalHeader.Name) - - if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { - pipeWriter.CloseWithError(err) - return - } - } - - // Apply the modifiers that haven't matched any files in the archive - for name, modifier := range mods { - if err := modify(name, nil, modifier, nil); err != nil { - pipeWriter.CloseWithError(err) - return - } - } - - pipeWriter.Close() - }() - return pipeReader -} - -// Extension returns the extension of a file that uses the specified compression algorithm. -func (compression *Compression) Extension() string { - switch *compression { - case Uncompressed: - return "tar" - case Bzip2: - return "tar.bz2" - case Gzip: - return "tar.gz" - case Xz: - return "tar.xz" - case Zstd: - return "tar.zst" - } - return "" -} - -// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to -// prevent tar.FileInfoHeader from introspecting it and potentially calling into -// glibc. -type nosysFileInfo struct { - os.FileInfo -} - -func (fi nosysFileInfo) Sys() interface{} { - // A Sys value of type *tar.Header is safe as it is system-independent. - // The tar.FileInfoHeader function copies the fields into the returned - // header without performing any OS lookups. - if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok { - return sys - } - return nil -} - -// sysStat, if non-nil, populates hdr from system-dependent fields of fi. -var sysStat func(fi os.FileInfo, hdr *tar.Header) error - -// FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi. -// -// Compared to the archive/tar.FileInfoHeader function, this function is safe to -// call from a chrooted process as it does not populate fields which would -// require operating system lookups. It behaves identically to -// tar.FileInfoHeader when fi is a FileInfo value returned from -// tar.Header.FileInfo(). -// -// When fi is a FileInfo for a native file, such as returned from os.Stat() and -// os.Lstat(), the returned Header value differs from one returned from -// tar.FileInfoHeader in the following ways. The Uname and Gname fields are not -// set as OS lookups would be required to populate them. The AccessTime and -// ChangeTime fields are not currently set (not yet implemented) although that -// is subject to change. Callers which require the AccessTime or ChangeTime -// fields to be zeroed should explicitly zero them out in the returned Header -// value to avoid any compatibility issues in the future. -func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) { - hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link) - if err != nil { - return nil, err - } - if sysStat != nil { - return hdr, sysStat(fi, hdr) - } - return hdr, nil -} - -// FileInfoHeader creates a populated Header from fi. -// -// Compared to the archive/tar package, this function fills in less information -// but is safe to call from a chrooted process. The AccessTime and ChangeTime -// fields are not set in the returned header, ModTime is truncated to one-second -// precision, and the Uname and Gname fields are only set when fi is a FileInfo -// value returned from tar.Header.FileInfo(). Also, regardless of Go version, -// this function fills file type bits (e.g. hdr.Mode |= modeISDIR), which have -// been deleted since Go 1.9 archive/tar. -func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { - hdr, err := FileInfoHeaderNoLookups(fi, link) - if err != nil { - return nil, err - } - hdr.Format = tar.FormatPAX - hdr.ModTime = hdr.ModTime.Truncate(time.Second) - hdr.AccessTime = time.Time{} - hdr.ChangeTime = time.Time{} - hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) - hdr.Name = canonicalTarName(name, fi.IsDir()) - return hdr, nil -} - -// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar -// https://github.com/golang/go/commit/66b5a2f -func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { - fm := fi.Mode() - switch { - case fm.IsRegular(): - mode |= modeISREG - case fi.IsDir(): - mode |= modeISDIR - case fm&os.ModeSymlink != 0: - mode |= modeISLNK - case fm&os.ModeDevice != 0: - if fm&os.ModeCharDevice != 0 { - mode |= modeISCHR - } else { - mode |= modeISBLK - } - case fm&os.ModeNamedPipe != 0: - mode |= modeISFIFO - case fm&os.ModeSocket != 0: - mode |= modeISSOCK - } - return mode -} - -// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem -// to a tar header -func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { - const ( - // Values based on linux/include/uapi/linux/capability.h - xattrCapsSz2 = 20 - versionOffset = 3 - vfsCapRevision2 = 2 - vfsCapRevision3 = 3 - ) - capability, _ := system.Lgetxattr(path, "security.capability") - if capability != nil { - length := len(capability) - if capability[versionOffset] == vfsCapRevision3 { - // Convert VFS_CAP_REVISION_3 to VFS_CAP_REVISION_2 as root UID makes no - // sense outside the user namespace the archive is built in. - capability[versionOffset] = vfsCapRevision2 - length = xattrCapsSz2 - } - hdr.Xattrs = make(map[string]string) - hdr.Xattrs["security.capability"] = string(capability[:length]) - } - return nil -} - -type tarWhiteoutConverter interface { - ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) - ConvertRead(*tar.Header, string) (bool, error) -} - -type tarAppender struct { - TarWriter *tar.Writer - Buffer *bufio.Writer - - // for hardlink mapping - SeenFiles map[uint64]string - IdentityMapping idtools.IdentityMapping - ChownOpts *idtools.Identity - - // For packing and unpacking whiteout files in the - // non standard format. The whiteout files defined - // by the AUFS standard are used as the tar whiteout - // standard. - WhiteoutConverter tarWhiteoutConverter -} - -func newTarAppender(idMapping idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender { - return &tarAppender{ - SeenFiles: make(map[uint64]string), - TarWriter: tar.NewWriter(writer), - Buffer: pools.BufioWriter32KPool.Get(nil), - IdentityMapping: idMapping, - ChownOpts: chownOpts, - } -} - -// canonicalTarName provides a platform-independent and consistent posix-style -// path for files and directories to be archived regardless of the platform. -func canonicalTarName(name string, isDir bool) string { - name = CanonicalTarNameForPath(name) - - // suffix with '/' for directories - if isDir && !strings.HasSuffix(name, "/") { - name += "/" - } - return name -} - -// addTarFile adds to the tar archive a file from `path` as `name` -func (ta *tarAppender) addTarFile(path, name string) error { - fi, err := os.Lstat(path) - if err != nil { - return err - } - - var link string - if fi.Mode()&os.ModeSymlink != 0 { - var err error - link, err = os.Readlink(path) - if err != nil { - return err - } - } - - hdr, err := FileInfoHeader(name, fi, link) - if err != nil { - return err - } - if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { - return err - } - - // if it's not a directory and has more than 1 link, - // it's hard linked, so set the type flag accordingly - if !fi.IsDir() && hasHardlinks(fi) { - inode, err := getInodeFromStat(fi.Sys()) - if err != nil { - return err - } - // a link should have a name that it links too - // and that linked name should be first in the tar archive - if oldpath, ok := ta.SeenFiles[inode]; ok { - hdr.Typeflag = tar.TypeLink - hdr.Linkname = oldpath - hdr.Size = 0 // This Must be here for the writer math to add up! - } else { - ta.SeenFiles[inode] = name - } - } - - // check whether the file is overlayfs whiteout - // if yes, skip re-mapping container ID mappings. - isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 - - // handle re-mapping container ID mappings back to host ID mappings before - // writing tar headers/files. We skip whiteout files because they were written - // by the kernel and already have proper ownership relative to the host - if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() { - fileIDPair, err := getFileUIDGID(fi.Sys()) - if err != nil { - return err - } - hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair) - if err != nil { - return err - } - } - - // explicitly override with ChownOpts - if ta.ChownOpts != nil { - hdr.Uid = ta.ChownOpts.UID - hdr.Gid = ta.ChownOpts.GID - } - - if ta.WhiteoutConverter != nil { - wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) - if err != nil { - return err - } - - // If a new whiteout file exists, write original hdr, then - // replace hdr with wo to be written after. Whiteouts should - // always be written after the original. Note the original - // hdr may have been updated to be a whiteout with returning - // a whiteout header - if wo != nil { - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - return fmt.Errorf("tar: cannot use whiteout for non-empty file") - } - hdr = wo - } - } - - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - // We use sequential file access to avoid depleting the standby list on - // Windows. On Linux, this equates to a regular os.Open. - file, err := sequential.Open(path) - if err != nil { - return err - } - - ta.Buffer.Reset(ta.TarWriter) - defer ta.Buffer.Reset(nil) - _, err = io.Copy(ta.Buffer, file) - file.Close() - if err != nil { - return err - } - err = ta.Buffer.Flush() - if err != nil { - return err - } - } - - return nil -} - -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error { - // hdr.Mode is in linux format, which we can use for sycalls, - // but for os.Foo() calls we need the mode converted to os.FileMode, - // so use hdrInfo.Mode() (they differ for e.g. setuid bits) - hdrInfo := hdr.FileInfo() - - switch hdr.Typeflag { - case tar.TypeDir: - // Create directory unless it exists as a directory already. - // In that case we just want to merge the two - if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { - if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { - return err - } - } - - case tar.TypeReg: - // Source is regular file. We use sequential file access to avoid depleting - // the standby list on Windows. On Linux, this equates to a regular os.OpenFile. - file, err := sequential.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) - if err != nil { - return err - } - if _, err := io.Copy(file, reader); err != nil { - file.Close() - return err - } - file.Close() - - case tar.TypeBlock, tar.TypeChar: - if inUserns { // cannot create devices in a userns - return nil - } - // Handle this is an OS-specific way - if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { - return err - } - - case tar.TypeFifo: - // Handle this is an OS-specific way - if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { - return err - } - - case tar.TypeLink: - // #nosec G305 -- The target path is checked for path traversal. - targetPath := filepath.Join(extractDir, hdr.Linkname) - // check for hardlink breakout - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) - } - if err := os.Link(targetPath, path); err != nil { - return err - } - - case tar.TypeSymlink: - // path -> hdr.Linkname = targetPath - // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file - targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // #nosec G305 -- The target path is checked for path traversal. - - // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because - // that symlink would first have to be created, which would be caught earlier, at this very check: - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) - } - if err := os.Symlink(hdr.Linkname, path); err != nil { - return err - } - - case tar.TypeXGlobalHeader: - logrus.Debug("PAX Global Extended Headers found and ignored") - return nil - - default: - return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) - } - - // Lchown is not supported on Windows. - if Lchown && runtime.GOOS != "windows" { - if chownOpts == nil { - chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid} - } - if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { - msg := "failed to Lchown %q for UID %d, GID %d" - if errors.Is(err, syscall.EINVAL) && userns.RunningInUserNS() { - msg += " (try increasing the number of subordinate IDs in /etc/subuid and /etc/subgid)" - } - return errors.Wrapf(err, msg, path, hdr.Uid, hdr.Gid) - } - } - - var errors []string - for key, value := range hdr.Xattrs { - if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { - if err == syscall.ENOTSUP || err == syscall.EPERM { - // We ignore errors here because not all graphdrivers support - // xattrs *cough* old versions of AUFS *cough*. However only - // ENOTSUP should be emitted in that case, otherwise we still - // bail. - // EPERM occurs if modifying xattrs is not allowed. This can - // happen when running in userns with restrictions (ChromeOS). - errors = append(errors, err.Error()) - continue - } - return err - } - } - - if len(errors) > 0 { - logrus.WithFields(logrus.Fields{ - "errors": errors, - }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") - } - - // There is no LChmod, so ignore mode for symlink. Also, this - // must happen after chown, as that can modify the file mode - if err := handleLChmod(hdr, path, hdrInfo); err != nil { - return err - } - - aTime := hdr.AccessTime - if aTime.Before(hdr.ModTime) { - // Last access time should never be before last modified time. - aTime = hdr.ModTime - } - - // system.Chtimes doesn't support a NOFOLLOW flag atm - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { - return err - } - } else { - ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} - if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { - return err - } - } - return nil -} - -// Tar creates an archive from the directory at `path`, and returns it as a -// stream of bytes. -func Tar(path string, compression Compression) (io.ReadCloser, error) { - return TarWithOptions(path, &TarOptions{Compression: compression}) -} - -// TarWithOptions creates an archive from the directory at `path`, only including files whose relative -// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. -func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { - // Fix the source path to work with long path names. This is a no-op - // on platforms other than Windows. - srcPath = fixVolumePathPrefix(srcPath) - - pm, err := patternmatcher.New(options.ExcludePatterns) - if err != nil { - return nil, err - } - - pipeReader, pipeWriter := io.Pipe() - - compressWriter, err := CompressStream(pipeWriter, options.Compression) - if err != nil { - return nil, err - } - - whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) - if err != nil { - return nil, err - } - - go func() { - ta := newTarAppender( - options.IDMap, - compressWriter, - options.ChownOpts, - ) - ta.WhiteoutConverter = whiteoutConverter - - defer func() { - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - logrus.Errorf("Can't close tar writer: %s", err) - } - if err := compressWriter.Close(); err != nil { - logrus.Errorf("Can't close compress writer: %s", err) - } - if err := pipeWriter.Close(); err != nil { - logrus.Errorf("Can't close pipe writer: %s", err) - } - }() - - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - - stat, err := os.Lstat(srcPath) - if err != nil { - return - } - - if !stat.IsDir() { - // We can't later join a non-dir with any includes because the - // 'walk' will error if "file/." is stat-ed and "file" is not a - // directory. So, we must split the source path and use the - // basename as the include. - if len(options.IncludeFiles) > 0 { - logrus.Warn("Tar: Can't archive a file with includes") - } - - dir, base := SplitPathDirEntry(srcPath) - srcPath = dir - options.IncludeFiles = []string{base} - } - - if len(options.IncludeFiles) == 0 { - options.IncludeFiles = []string{"."} - } - - seen := make(map[string]bool) - - for _, include := range options.IncludeFiles { - rebaseName := options.RebaseNames[include] - - var ( - parentMatchInfo []patternmatcher.MatchInfo - parentDirs []string - ) - - walkRoot := getWalkRoot(srcPath, include) - filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { - if err != nil { - logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) - return nil - } - - relFilePath, err := filepath.Rel(srcPath, filePath) - if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { - // Error getting relative path OR we are looking - // at the source directory path. Skip in both situations. - return nil - } - - if options.IncludeSourceDir && include == "." && relFilePath != "." { - relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) - } - - skip := false - - // If "include" is an exact match for the current file - // then even if there's an "excludePatterns" pattern that - // matches it, don't skip it. IOW, assume an explicit 'include' - // is asking for that file no matter what - which is true - // for some files, like .dockerignore and Dockerfile (sometimes) - if include != relFilePath { - for len(parentDirs) != 0 { - lastParentDir := parentDirs[len(parentDirs)-1] - if strings.HasPrefix(relFilePath, lastParentDir+string(os.PathSeparator)) { - break - } - parentDirs = parentDirs[:len(parentDirs)-1] - parentMatchInfo = parentMatchInfo[:len(parentMatchInfo)-1] - } - - var matchInfo patternmatcher.MatchInfo - if len(parentMatchInfo) != 0 { - skip, matchInfo, err = pm.MatchesUsingParentResults(relFilePath, parentMatchInfo[len(parentMatchInfo)-1]) - } else { - skip, matchInfo, err = pm.MatchesUsingParentResults(relFilePath, patternmatcher.MatchInfo{}) - } - if err != nil { - logrus.Errorf("Error matching %s: %v", relFilePath, err) - return err - } - - if f.IsDir() { - parentDirs = append(parentDirs, relFilePath) - parentMatchInfo = append(parentMatchInfo, matchInfo) - } - } - - if skip { - // If we want to skip this file and its a directory - // then we should first check to see if there's an - // excludes pattern (e.g. !dir/file) that starts with this - // dir. If so then we can't skip this dir. - - // Its not a dir then so we can just return/skip. - if !f.IsDir() { - return nil - } - - // No exceptions (!...) in patterns so just skip dir - if !pm.Exclusions() { - return filepath.SkipDir - } - - dirSlash := relFilePath + string(filepath.Separator) - - for _, pat := range pm.Patterns() { - if !pat.Exclusion() { - continue - } - if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { - // found a match - so can't skip this dir - return nil - } - } - - // No matching exclusion dir so just skip dir - return filepath.SkipDir - } - - if seen[relFilePath] { - return nil - } - seen[relFilePath] = true - - // Rename the base resource. - if rebaseName != "" { - var replacement string - if rebaseName != string(filepath.Separator) { - // Special case the root directory to replace with an - // empty string instead so that we don't end up with - // double slashes in the paths. - replacement = rebaseName - } - - relFilePath = strings.Replace(relFilePath, include, replacement, 1) - } - - if err := ta.addTarFile(filePath, relFilePath); err != nil { - logrus.Errorf("Can't add file %s to tar: %s", filePath, err) - // if pipe is broken, stop writing tar stream to it - if err == io.ErrClosedPipe { - return err - } - } - return nil - }) - } - }() - - return pipeReader, nil -} - -// Unpack unpacks the decompressedArchive to dest with options. -func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { - tr := tar.NewReader(decompressedArchive) - trBuf := pools.BufioReader32KPool.Get(nil) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS) - if err != nil { - return err - } - - // Iterate through the files in the archive. -loop: - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return err - } - - // ignore XGlobalHeader early to avoid creating parent directories for them - if hdr.Typeflag == tar.TypeXGlobalHeader { - logrus.Debugf("PAX Global Extended Headers found for %s and ignored", hdr.Name) - continue - } - - // Normalize name, for safety and for a simple is-root check - // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: - // This keeps "..\" as-is, but normalizes "\..\" to "\". - hdr.Name = filepath.Clean(hdr.Name) - - for _, exclude := range options.ExcludePatterns { - if strings.HasPrefix(hdr.Name, exclude) { - continue loop - } - } - - // Ensure that the parent directory exists. - err = createImpliedDirectories(dest, hdr, options) - if err != nil { - return err - } - - // #nosec G305 -- The joined path is checked for path traversal. - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return err - } - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - - // If path exits we almost always just want to remove and replace it - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing directory with a non-directory from the archive. - return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) - } - - if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing non-directory with a directory from the archive. - return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) - } - - if fi.IsDir() && hdr.Name == "." { - continue - } - - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return err - } - } - } - trBuf.Reset(tr) - - if err := remapIDs(options.IDMap, hdr); err != nil { - return err - } - - if whiteoutConverter != nil { - writeFile, err := whiteoutConverter.ConvertRead(hdr, path) - if err != nil { - return err - } - if !writeFile { - continue - } - } - - if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { - return err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - } - - for _, hdr := range dirs { - // #nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice. - path := filepath.Join(dest, hdr.Name) - - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return err - } - } - return nil -} - -// createImpliedDirectories will create all parent directories of the current path with default permissions, if they do -// not already exist. This is possible as the tar format supports 'implicit' directories, where their existence is -// defined by the paths of files in the tar, but there are no header entries for the directories themselves, and thus -// we most both create them and choose metadata like permissions. -// -// The caller should have performed filepath.Clean(hdr.Name), so hdr.Name will now be in the filepath format for the OS -// on which the daemon is running. This precondition is required because this function assumes a OS-specific path -// separator when checking that a path is not the root. -func createImpliedDirectories(dest string, hdr *tar.Header, options *TarOptions) error { - // Not the root directory, ensure that the parent directory exists - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - // RootPair() is confined inside this loop as most cases will not require a call, so we can spend some - // unneeded function calls in the uncommon case to encapsulate logic -- implied directories are a niche - // usage that reduces the portability of an image. - rootIDs := options.IDMap.RootPair() - - err = idtools.MkdirAllAndChownNew(parentPath, ImpliedDirectoryMode, rootIDs) - if err != nil { - return err - } - } - } - - return nil -} - -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. -// -// FIXME: specify behavior when target path exists vs. doesn't exist. -func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, true) -} - -// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive must be an uncompressed stream. -func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, false) -} - -// Handler for teasing out the automatic decompression -func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { - if tarArchive == nil { - return fmt.Errorf("Empty archive") - } - dest = filepath.Clean(dest) - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - r := tarArchive - if decompress { - decompressedArchive, err := DecompressStream(tarArchive) - if err != nil { - return err - } - defer decompressedArchive.Close() - r = decompressedArchive - } - - return Unpack(r, dest, options) -} - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func (archiver *Archiver) TarUntar(src, dst string) error { - archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) - if err != nil { - return err - } - defer archive.Close() - options := &TarOptions{ - IDMap: archiver.IDMapping, - } - return archiver.Untar(archive, dst, options) -} - -// UntarPath untar a file from path to a destination, src is the source tar file path. -func (archiver *Archiver) UntarPath(src, dst string) error { - archive, err := os.Open(src) - if err != nil { - return err - } - defer archive.Close() - options := &TarOptions{ - IDMap: archiver.IDMapping, - } - return archiver.Untar(archive, dst, options) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func (archiver *Archiver) CopyWithTar(src, dst string) error { - srcSt, err := os.Stat(src) - if err != nil { - return err - } - if !srcSt.IsDir() { - return archiver.CopyFileWithTar(src, dst) - } - - // if this Archiver is set up with ID mapping we need to create - // the new destination directory with the remapped root UID/GID pair - // as owner - rootIDs := archiver.IDMapping.RootPair() - // Create dst, copy src's content into it - if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { - return err - } - return archiver.TarUntar(src, dst) -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { - srcSt, err := os.Stat(src) - if err != nil { - return err - } - - if srcSt.IsDir() { - return fmt.Errorf("Can't copy a directory") - } - - // Clean up the trailing slash. This must be done in an operating - // system specific manner. - if dst[len(dst)-1] == os.PathSeparator { - dst = filepath.Join(dst, filepath.Base(src)) - } - // Create the holding directory if necessary - if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { - return err - } - - r, w := io.Pipe() - errC := make(chan error, 1) - - go func() { - defer close(errC) - - errC <- func() error { - defer w.Close() - - srcF, err := os.Open(src) - if err != nil { - return err - } - defer srcF.Close() - - hdr, err := FileInfoHeaderNoLookups(srcSt, "") - if err != nil { - return err - } - hdr.Format = tar.FormatPAX - hdr.ModTime = hdr.ModTime.Truncate(time.Second) - hdr.AccessTime = time.Time{} - hdr.ChangeTime = time.Time{} - hdr.Name = filepath.Base(dst) - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - - if err := remapIDs(archiver.IDMapping, hdr); err != nil { - return err - } - - tw := tar.NewWriter(w) - defer tw.Close() - if err := tw.WriteHeader(hdr); err != nil { - return err - } - if _, err := io.Copy(tw, srcF); err != nil { - return err - } - return nil - }() - }() - defer func() { - if er := <-errC; err == nil && er != nil { - err = er - } - }() - - err = archiver.Untar(r, filepath.Dir(dst), nil) - if err != nil { - r.CloseWithError(err) - } - return err -} - -// IdentityMapping returns the IdentityMapping of the archiver. -func (archiver *Archiver) IdentityMapping() idtools.IdentityMapping { - return archiver.IDMapping -} - -func remapIDs(idMapping idtools.IdentityMapping, hdr *tar.Header) error { - ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}) - hdr.Uid, hdr.Gid = ids.UID, ids.GID - return err -} - -// cmdStream executes a command, and returns its stdout as a stream. -// If the command fails to run or doesn't complete successfully, an error -// will be returned, including anything written on stderr. -func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { - cmd.Stdin = input - pipeR, pipeW := io.Pipe() - cmd.Stdout = pipeW - var errBuf bytes.Buffer - cmd.Stderr = &errBuf - - // Run the command and return the pipe - if err := cmd.Start(); err != nil { - return nil, err - } - - // Ensure the command has exited before we clean anything up - done := make(chan struct{}) - - // Copy stdout to the returned pipe - go func() { - if err := cmd.Wait(); err != nil { - pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) - } else { - pipeW.Close() - } - close(done) - }() - - return ioutils.NewReadCloserWrapper(pipeR, func() error { - // Close pipeR, and then wait for the command to complete before returning. We have to close pipeR first, as - // cmd.Wait waits for any non-file stdout/stderr/stdin to close. - err := pipeR.Close() - <-done - return err - }), nil -} - -// NewTempArchive reads the content of src into a temporary file, and returns the contents -// of that file as an archive. The archive can only be read once - as soon as reading completes, -// the file will be deleted. -func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { - f, err := os.CreateTemp(dir, "") - if err != nil { - return nil, err - } - if _, err := io.Copy(f, src); err != nil { - return nil, err - } - if _, err := f.Seek(0, 0); err != nil { - return nil, err - } - st, err := f.Stat() - if err != nil { - return nil, err - } - size := st.Size() - return &TempArchive{File: f, Size: size}, nil -} - -// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, -// the file will be deleted. -type TempArchive struct { - *os.File - Size int64 // Pre-computed from Stat().Size() as a convenience - read int64 - closed bool -} - -// Close closes the underlying file if it's still open, or does a no-op -// to allow callers to try to close the TempArchive multiple times safely. -func (archive *TempArchive) Close() error { - if archive.closed { - return nil - } - - archive.closed = true - - return archive.File.Close() -} - -func (archive *TempArchive) Read(data []byte) (int, error) { - n, err := archive.File.Read(data) - archive.read += int64(n) - if err != nil || archive.read == archive.Size { - archive.Close() - os.Remove(archive.File.Name()) - } - return n, err -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go deleted file mode 100644 index 76321a35e..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go +++ /dev/null @@ -1,100 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/system" - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) (tarWhiteoutConverter, error) { - if format == OverlayWhiteoutFormat { - if inUserNS { - return nil, errors.New("specifying OverlayWhiteoutFormat is not allowed in userns") - } - return overlayWhiteoutConverter{}, nil - } - return nil, nil -} - -type overlayWhiteoutConverter struct { -} - -func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { - // convert whiteouts to AUFS format - if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { - // we just rename the file and make it normal - dir, filename := filepath.Split(hdr.Name) - hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) - hdr.Mode = 0600 - hdr.Typeflag = tar.TypeReg - hdr.Size = 0 - } - - if fi.Mode()&os.ModeDir != 0 { - // convert opaque dirs to AUFS format by writing an empty file with the prefix - opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") - if err != nil { - return nil, err - } - if len(opaque) == 1 && opaque[0] == 'y' { - if hdr.Xattrs != nil { - delete(hdr.Xattrs, "trusted.overlay.opaque") - } - - // create a header for the whiteout file - // it should inherit some properties from the parent, but be a regular file - wo = &tar.Header{ - Typeflag: tar.TypeReg, - Mode: hdr.Mode & int64(os.ModePerm), - Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), - Size: 0, - Uid: hdr.Uid, - Uname: hdr.Uname, - Gid: hdr.Gid, - Gname: hdr.Gname, - AccessTime: hdr.AccessTime, - ChangeTime: hdr.ChangeTime, - } //#nosec G305 -- An archive is being created, not extracted. - } - } - - return -} - -func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { - base := filepath.Base(path) - dir := filepath.Dir(path) - - // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay - if base == WhiteoutOpaqueDir { - err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) - if err != nil { - return false, errors.Wrapf(err, "setxattr(%q, trusted.overlay.opaque=y)", dir) - } - // don't write the file itself - return false, err - } - - // if a file was deleted and we are using overlay, we need to create a character device - if strings.HasPrefix(base, WhiteoutPrefix) { - originalBase := base[len(WhiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - - if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { - return false, errors.Wrapf(err, "failed to mknod(%q, S_IFCHR, 0)", originalPath) - } - if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { - return false, err - } - - // don't write the file itself - return false, nil - } - - return true, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go deleted file mode 100644 index 28ae2769c..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_other.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build !linux -// +build !linux - -package archive // import "github.com/docker/docker/pkg/archive" - -func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) (tarWhiteoutConverter, error) { - return nil, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go deleted file mode 100644 index 1a2aea2e6..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go +++ /dev/null @@ -1,125 +0,0 @@ -//go:build !windows -// +build !windows - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "errors" - "os" - "path/filepath" - "strings" - "syscall" - - "github.com/containerd/containerd/pkg/userns" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/system" - "golang.org/x/sys/unix" -) - -func init() { - sysStat = statUnix -} - -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return srcPath -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a separate function as this is platform specific. On Linux, we -// can't use filepath.Join(srcPath,include) because this will clean away -// a trailing "." or "/" which may be important. -func getWalkRoot(srcPath string, include string) string { - return strings.TrimSuffix(srcPath, string(filepath.Separator)) + string(filepath.Separator) + include -} - -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) string { - return p // already unix-style -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. - -func chmodTarEntry(perm os.FileMode) os.FileMode { - return perm // noop for unix as golang APIs provide perm bits correctly -} - -// statUnix populates hdr from system-dependent fields of fi without performing -// any OS lookups. -func statUnix(fi os.FileInfo, hdr *tar.Header) error { - s, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return nil - } - - hdr.Uid = int(s.Uid) - hdr.Gid = int(s.Gid) - - if s.Mode&unix.S_IFBLK != 0 || - s.Mode&unix.S_IFCHR != 0 { - hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert - hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert - } - - return nil -} - -func getInodeFromStat(stat interface{}) (inode uint64, err error) { - s, ok := stat.(*syscall.Stat_t) - - if ok { - inode = s.Ino - } - - return -} - -func getFileUIDGID(stat interface{}) (idtools.Identity, error) { - s, ok := stat.(*syscall.Stat_t) - - if !ok { - return idtools.Identity{}, errors.New("cannot convert stat value to syscall.Stat_t") - } - return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - mode := uint32(hdr.Mode & 07777) - switch hdr.Typeflag { - case tar.TypeBlock: - mode |= unix.S_IFBLK - case tar.TypeChar: - mode |= unix.S_IFCHR - case tar.TypeFifo: - mode |= unix.S_IFIFO - } - - err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) - if errors.Is(err, syscall.EPERM) && userns.RunningInUserNS() { - // In most cases, cannot create a device if running in user namespace - err = nil - } - return err -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go deleted file mode 100644 index 7260174bf..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go +++ /dev/null @@ -1,67 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/longpath" -) - -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return longpath.AddPrefix(srcPath) -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a separate function as this is platform specific. -func getWalkRoot(srcPath string, include string) string { - return filepath.Join(srcPath, include) -} - -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) string { - return filepath.ToSlash(p) -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. -func chmodTarEntry(perm os.FileMode) os.FileMode { - // perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) - permPart := perm & os.ModePerm - noPermPart := perm &^ os.ModePerm - // Add the x bit: make everything +x from windows - permPart |= 0111 - permPart &= 0755 - - return noPermPart | permPart -} - -func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { - // do nothing. no notion of Rdev, Nlink in stat on Windows - return -} - -func getInodeFromStat(stat interface{}) (inode uint64, err error) { - // do nothing. no notion of Inode in stat on Windows - return -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - return nil -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - return nil -} - -func getFileUIDGID(stat interface{}) (idtools.Identity, error) { - // no notion of file ownership mapping yet on Windows - return idtools.Identity{UID: 0, GID: 0}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go deleted file mode 100644 index 7f7242be5..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes.go +++ /dev/null @@ -1,442 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strings" - "syscall" - "time" - - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" -) - -// ChangeType represents the change type. -type ChangeType int - -const ( - // ChangeModify represents the modify operation. - ChangeModify = iota - // ChangeAdd represents the add operation. - ChangeAdd - // ChangeDelete represents the delete operation. - ChangeDelete -) - -func (c ChangeType) String() string { - switch c { - case ChangeModify: - return "C" - case ChangeAdd: - return "A" - case ChangeDelete: - return "D" - } - return "" -} - -// Change represents a change, it wraps the change type and path. -// It describes changes of the files in the path respect to the -// parent layers. The change could be modify, add, delete. -// This is used for layer diff. -type Change struct { - Path string - Kind ChangeType -} - -func (change *Change) String() string { - return fmt.Sprintf("%s %s", change.Kind, change.Path) -} - -// for sort.Sort -type changesByPath []Change - -func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } -func (c changesByPath) Len() int { return len(c) } -func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } - -// Gnu tar doesn't have sub-second mtime precision. The go tar -// writer (1.10+) does when using PAX format, but we round times to seconds -// to ensure archives have the same hashes for backwards compatibility. -// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4. -// -// Non-sub-second is problematic when we apply changes via tar -// files. We handle this by comparing for exact times, *or* same -// second count and either a or b having exactly 0 nanoseconds -func sameFsTime(a, b time.Time) bool { - return a.Equal(b) || - (a.Unix() == b.Unix() && - (a.Nanosecond() == 0 || b.Nanosecond() == 0)) -} - -func sameFsTimeSpec(a, b syscall.Timespec) bool { - return a.Sec == b.Sec && - (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) -} - -// Changes walks the path rw and determines changes for the files in the path, -// with respect to the parent layers -func Changes(layers []string, rw string) ([]Change, error) { - return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip) -} - -func aufsMetadataSkip(path string) (skip bool, err error) { - skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) - if err != nil { - skip = true - } - return -} - -func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { - f := filepath.Base(path) - - // If there is a whiteout, then the file was removed - if strings.HasPrefix(f, WhiteoutPrefix) { - originalFile := f[len(WhiteoutPrefix):] - return filepath.Join(filepath.Dir(path), originalFile), nil - } - - return "", nil -} - -type skipChange func(string) (bool, error) -type deleteChange func(string, string, os.FileInfo) (string, error) - -func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { - var ( - changes []Change - changedDirs = make(map[string]struct{}) - ) - - err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - path, err = filepath.Rel(rw, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - path = filepath.Join(string(os.PathSeparator), path) - - // Skip root - if path == string(os.PathSeparator) { - return nil - } - - if sc != nil { - if skip, err := sc(path); skip { - return err - } - } - - change := Change{ - Path: path, - } - - deletedFile, err := dc(rw, path, f) - if err != nil { - return err - } - - // Find out what kind of modification happened - if deletedFile != "" { - change.Path = deletedFile - change.Kind = ChangeDelete - } else { - // Otherwise, the file was added - change.Kind = ChangeAdd - - // ...Unless it already existed in a top layer, in which case, it's a modification - for _, layer := range layers { - stat, err := os.Stat(filepath.Join(layer, path)) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - // The file existed in the top layer, so that's a modification - - // However, if it's a directory, maybe it wasn't actually modified. - // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar - if stat.IsDir() && f.IsDir() { - if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { - // Both directories are the same, don't record the change - return nil - } - } - change.Kind = ChangeModify - break - } - } - } - - // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. - // This block is here to ensure the change is recorded even if the - // modify time, mode and size of the parent directory in the rw and ro layers are all equal. - // Check https://github.com/docker/docker/pull/13590 for details. - if f.IsDir() { - changedDirs[path] = struct{}{} - } - if change.Kind == ChangeAdd || change.Kind == ChangeDelete { - parent := filepath.Dir(path) - if _, ok := changedDirs[parent]; !ok && parent != "/" { - changes = append(changes, Change{Path: parent, Kind: ChangeModify}) - changedDirs[parent] = struct{}{} - } - } - - // Record change - changes = append(changes, change) - return nil - }) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - return changes, nil -} - -// FileInfo describes the information of a file. -type FileInfo struct { - parent *FileInfo - name string - stat *system.StatT - children map[string]*FileInfo - capability []byte - added bool -} - -// LookUp looks up the file information of a file. -func (info *FileInfo) LookUp(path string) *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - parent := info - if path == string(os.PathSeparator) { - return info - } - - pathElements := strings.Split(path, string(os.PathSeparator)) - for _, elem := range pathElements { - if elem != "" { - child := parent.children[elem] - if child == nil { - return nil - } - parent = child - } - } - return parent -} - -func (info *FileInfo) path() string { - if info.parent == nil { - // As this runs on the daemon side, file paths are OS specific. - return string(os.PathSeparator) - } - return filepath.Join(info.parent.path(), info.name) -} - -func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { - sizeAtEntry := len(*changes) - - if oldInfo == nil { - // add - change := Change{ - Path: info.path(), - Kind: ChangeAdd, - } - *changes = append(*changes, change) - info.added = true - } - - // We make a copy so we can modify it to detect additions - // also, we only recurse on the old dir if the new info is a directory - // otherwise any previous delete/change is considered recursive - oldChildren := make(map[string]*FileInfo) - if oldInfo != nil && info.isDir() { - for k, v := range oldInfo.children { - oldChildren[k] = v - } - } - - for name, newChild := range info.children { - oldChild := oldChildren[name] - if oldChild != nil { - // change? - oldStat := oldChild.stat - newStat := newChild.stat - // Note: We can't compare inode or ctime or blocksize here, because these change - // when copying a file into a container. However, that is not generally a problem - // because any content change will change mtime, and any status change should - // be visible when actually comparing the stat fields. The only time this - // breaks down is if some code intentionally hides a change by setting - // back mtime - if statDifferent(oldStat, newStat) || - !bytes.Equal(oldChild.capability, newChild.capability) { - change := Change{ - Path: newChild.path(), - Kind: ChangeModify, - } - *changes = append(*changes, change) - newChild.added = true - } - - // Remove from copy so we can detect deletions - delete(oldChildren, name) - } - - newChild.addChanges(oldChild, changes) - } - for _, oldChild := range oldChildren { - // delete - change := Change{ - Path: oldChild.path(), - Kind: ChangeDelete, - } - *changes = append(*changes, change) - } - - // If there were changes inside this directory, we need to add it, even if the directory - // itself wasn't changed. This is needed to properly save and restore filesystem permissions. - // As this runs on the daemon side, file paths are OS specific. - if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { - change := Change{ - Path: info.path(), - Kind: ChangeModify, - } - // Let's insert the directory entry before the recently added entries located inside this dir - *changes = append(*changes, change) // just to resize the slice, will be overwritten - copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) - (*changes)[sizeAtEntry] = change - } -} - -// Changes add changes to file information. -func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { - var changes []Change - - info.addChanges(oldInfo, &changes) - - return changes -} - -func newRootFileInfo() *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - root := &FileInfo{ - name: string(os.PathSeparator), - children: make(map[string]*FileInfo), - } - return root -} - -// ChangesDirs compares two directories and generates an array of Change objects describing the changes. -// If oldDir is "", then all files in newDir will be Add-Changes. -func ChangesDirs(newDir, oldDir string) ([]Change, error) { - var ( - oldRoot, newRoot *FileInfo - ) - if oldDir == "" { - emptyDir, err := os.MkdirTemp("", "empty") - if err != nil { - return nil, err - } - defer os.Remove(emptyDir) - oldDir = emptyDir - } - oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) - if err != nil { - return nil, err - } - - return newRoot.Changes(oldRoot), nil -} - -// ChangesSize calculates the size in bytes of the provided changes, based on newDir. -func ChangesSize(newDir string, changes []Change) int64 { - var ( - size int64 - sf = make(map[uint64]struct{}) - ) - for _, change := range changes { - if change.Kind == ChangeModify || change.Kind == ChangeAdd { - file := filepath.Join(newDir, change.Path) - fileInfo, err := os.Lstat(file) - if err != nil { - logrus.Errorf("Can not stat %q: %s", file, err) - continue - } - - if fileInfo != nil && !fileInfo.IsDir() { - if hasHardlinks(fileInfo) { - inode := getIno(fileInfo) - if _, ok := sf[inode]; !ok { - size += fileInfo.Size() - sf[inode] = struct{}{} - } - } else { - size += fileInfo.Size() - } - } - } - } - return size -} - -// ExportChanges produces an Archive from the provided changes, relative to dir. -func ExportChanges(dir string, changes []Change, idMap idtools.IdentityMapping) (io.ReadCloser, error) { - reader, writer := io.Pipe() - go func() { - ta := newTarAppender(idMap, writer, nil) - - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - sort.Sort(changesByPath(changes)) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - for _, change := range changes { - if change.Kind == ChangeDelete { - whiteOutDir := filepath.Dir(change.Path) - whiteOutBase := filepath.Base(change.Path) - whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) - timestamp := time.Now() - hdr := &tar.Header{ - Name: whiteOut[1:], - Size: 0, - ModTime: timestamp, - AccessTime: timestamp, - ChangeTime: timestamp, - } - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - logrus.Debugf("Can't write whiteout header: %s", err) - } - } else { - path := filepath.Join(dir, change.Path) - if err := ta.addTarFile(path, change.Path[1:]); err != nil { - logrus.Debugf("Can't add file %s to tar: %s", path, err) - } - } - } - - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - logrus.Debugf("Can't close layer: %s", err) - } - if err := writer.Close(); err != nil { - logrus.Debugf("failed close Changes writer: %s", err) - } - }() - return reader, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go deleted file mode 100644 index f8792b3d4..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go +++ /dev/null @@ -1,286 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "sort" - "syscall" - "unsafe" - - "github.com/docker/docker/pkg/system" - "golang.org/x/sys/unix" -) - -// walker is used to implement collectFileInfoForChanges on linux. Where this -// method in general returns the entire contents of two directory trees, we -// optimize some FS calls out on linux. In particular, we take advantage of the -// fact that getdents(2) returns the inode of each file in the directory being -// walked, which, when walking two trees in parallel to generate a list of -// changes, can be used to prune subtrees without ever having to lstat(2) them -// directly. Eliminating stat calls in this way can save up to seconds on large -// images. -type walker struct { - dir1 string - dir2 string - root1 *FileInfo - root2 *FileInfo -} - -// collectFileInfoForChanges returns a complete representation of the trees -// rooted at dir1 and dir2, with one important exception: any subtree or -// leaf where the inode and device numbers are an exact match between dir1 -// and dir2 will be pruned from the results. This method is *only* to be used -// to generating a list of changes between the two directories, as it does not -// reflect the full contents. -func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { - w := &walker{ - dir1: dir1, - dir2: dir2, - root1: newRootFileInfo(), - root2: newRootFileInfo(), - } - - i1, err := os.Lstat(w.dir1) - if err != nil { - return nil, nil, err - } - i2, err := os.Lstat(w.dir2) - if err != nil { - return nil, nil, err - } - - if err := w.walk("/", i1, i2); err != nil { - return nil, nil, err - } - - return w.root1, w.root2, nil -} - -// Given a FileInfo, its path info, and a reference to the root of the tree -// being constructed, register this file with the tree. -func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { - if fi == nil { - return nil - } - parent := root.LookUp(filepath.Dir(path)) - if parent == nil { - return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) - } - info := &FileInfo{ - name: filepath.Base(path), - children: make(map[string]*FileInfo), - parent: parent, - } - cpath := filepath.Join(dir, path) - stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) - if err != nil { - return err - } - info.stat = stat - info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access - parent.children[info.name] = info - return nil -} - -// Walk a subtree rooted at the same path in both trees being iterated. For -// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d -func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { - // Register these nodes with the return trees, unless we're still at the - // (already-created) roots: - if path != "/" { - if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { - return err - } - if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { - return err - } - } - - is1Dir := i1 != nil && i1.IsDir() - is2Dir := i2 != nil && i2.IsDir() - - sameDevice := false - if i1 != nil && i2 != nil { - si1 := i1.Sys().(*syscall.Stat_t) - si2 := i2.Sys().(*syscall.Stat_t) - if si1.Dev == si2.Dev { - sameDevice = true - } - } - - // If these files are both non-existent, or leaves (non-dirs), we are done. - if !is1Dir && !is2Dir { - return nil - } - - // Fetch the names of all the files contained in both directories being walked: - var names1, names2 []nameIno - if is1Dir { - names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access - if err != nil { - return err - } - } - if is2Dir { - names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access - if err != nil { - return err - } - } - - // We have lists of the files contained in both parallel directories, sorted - // in the same order. Walk them in parallel, generating a unique merged list - // of all items present in either or both directories. - var names []string - ix1 := 0 - ix2 := 0 - - for { - if ix1 >= len(names1) { - break - } - if ix2 >= len(names2) { - break - } - - ni1 := names1[ix1] - ni2 := names2[ix2] - - switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { - case -1: // ni1 < ni2 -- advance ni1 - // we will not encounter ni1 in names2 - names = append(names, ni1.name) - ix1++ - case 0: // ni1 == ni2 - if ni1.ino != ni2.ino || !sameDevice { - names = append(names, ni1.name) - } - ix1++ - ix2++ - case 1: // ni1 > ni2 -- advance ni2 - // we will not encounter ni2 in names1 - names = append(names, ni2.name) - ix2++ - } - } - for ix1 < len(names1) { - names = append(names, names1[ix1].name) - ix1++ - } - for ix2 < len(names2) { - names = append(names, names2[ix2].name) - ix2++ - } - - // For each of the names present in either or both of the directories being - // iterated, stat the name under each root, and recurse the pair of them: - for _, name := range names { - fname := filepath.Join(path, name) - var cInfo1, cInfo2 os.FileInfo - if is1Dir { - cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if is2Dir { - cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if err = w.walk(fname, cInfo1, cInfo2); err != nil { - return err - } - } - return nil -} - -// {name,inode} pairs used to support the early-pruning logic of the walker type -type nameIno struct { - name string - ino uint64 -} - -type nameInoSlice []nameIno - -func (s nameInoSlice) Len() int { return len(s) } -func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } - -// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode -// numbers further up the stack when reading directory contents. Unlike -// os.Readdirnames, which returns a list of filenames, this function returns a -// list of {filename,inode} pairs. -func readdirnames(dirname string) (names []nameIno, err error) { - var ( - size = 100 - buf = make([]byte, 4096) - nbuf int - bufp int - nb int - ) - - f, err := os.Open(dirname) - if err != nil { - return nil, err - } - defer f.Close() - - names = make([]nameIno, 0, size) // Empty with room to grow. - for { - // Refill the buffer if necessary - if bufp >= nbuf { - bufp = 0 - nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux - if nbuf < 0 { - nbuf = 0 - } - if err != nil { - return nil, os.NewSyscallError("readdirent", err) - } - if nbuf <= 0 { - break // EOF - } - } - - // Drain the buffer - nb, names = parseDirent(buf[bufp:nbuf], names) - bufp += nb - } - - sl := nameInoSlice(names) - sort.Sort(sl) - return sl, nil -} - -// parseDirent is a minor modification of unix.ParseDirent (linux version) -// which returns {name,inode} pairs instead of just names. -func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { - origlen := len(buf) - for len(buf) > 0 { - dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) - buf = buf[dirent.Reclen:] - if dirent.Ino == 0 { // File absent in directory. - continue - } - bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) - var name = string(bytes[0:clen(bytes[:])]) - if name == "." || name == ".." { // Useless names - continue - } - names = append(names, nameIno{name, dirent.Ino}) - } - return origlen - len(buf), names -} - -func clen(n []byte) int { - for i := 0; i < len(n); i++ { - if n[i] == 0 { - return i - } - } - return len(n) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go deleted file mode 100644 index 0e4399a43..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_other.go +++ /dev/null @@ -1,98 +0,0 @@ -//go:build !linux -// +build !linux - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/docker/docker/pkg/system" -) - -func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { - var ( - oldRoot, newRoot *FileInfo - err1, err2 error - errs = make(chan error, 2) - ) - go func() { - oldRoot, err1 = collectFileInfo(oldDir) - errs <- err1 - }() - go func() { - newRoot, err2 = collectFileInfo(newDir) - errs <- err2 - }() - - // block until both routines have returned - for i := 0; i < 2; i++ { - if err := <-errs; err != nil { - return nil, nil, err - } - } - - return oldRoot, newRoot, nil -} - -func collectFileInfo(sourceDir string) (*FileInfo, error) { - root := newRootFileInfo() - - err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - relPath, err := filepath.Rel(sourceDir, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - relPath = filepath.Join(string(os.PathSeparator), relPath) - - // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. - // Temporary workaround. If the returned path starts with two backslashes, - // trim it down to a single backslash. Only relevant on Windows. - if runtime.GOOS == "windows" { - if strings.HasPrefix(relPath, `\\`) { - relPath = relPath[1:] - } - } - - if relPath == string(os.PathSeparator) { - return nil - } - - parent := root.LookUp(filepath.Dir(relPath)) - if parent == nil { - return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) - } - - info := &FileInfo{ - name: filepath.Base(relPath), - children: make(map[string]*FileInfo), - parent: parent, - } - - s, err := system.Lstat(path) - if err != nil { - return err - } - info.stat = s - - info.capability, _ = system.Lgetxattr(path, "security.capability") - - parent.children[info.name] = info - - return nil - }) - if err != nil { - return nil, err - } - return root, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go deleted file mode 100644 index 54aace970..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go +++ /dev/null @@ -1,44 +0,0 @@ -//go:build !windows -// +build !windows - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "os" - "syscall" - - "github.com/docker/docker/pkg/system" - "golang.org/x/sys/unix" -) - -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { - // Don't look at size for dirs, its not a good measure of change - if oldStat.Mode() != newStat.Mode() || - oldStat.UID() != newStat.UID() || - oldStat.GID() != newStat.GID() || - oldStat.Rdev() != newStat.Rdev() || - // Don't look at size or modification time for dirs, its not a good - // measure of change. See https://github.com/moby/moby/issues/9874 - // for a description of the issue with modification time, and - // https://github.com/moby/moby/pull/11422 for the change. - // (Note that in the Windows implementation of this function, - // modification time IS taken as a change). See - // https://github.com/moby/moby/pull/37982 for more information. - (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && - (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 -} - -func getIno(fi os.FileInfo) uint64 { - return fi.Sys().(*syscall.Stat_t).Ino -} - -func hasHardlinks(fi os.FileInfo) bool { - return fi.Sys().(*syscall.Stat_t).Nlink > 1 -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go deleted file mode 100644 index 9906685e4..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go +++ /dev/null @@ -1,34 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "os" - - "github.com/docker/docker/pkg/system" -) - -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { - // Note there is slight difference between the Linux and Windows - // implementations here. Due to https://github.com/moby/moby/issues/9874, - // and the fix at https://github.com/moby/moby/pull/11422, Linux does not - // consider a change to the directory time as a change. Windows on NTFS - // does. See https://github.com/moby/moby/pull/37982 for more information. - - if !sameFsTime(oldStat.Mtim(), newStat.Mtim()) || - oldStat.Mode() != newStat.Mode() || - oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode().IsDir() -} - -func getIno(fi os.FileInfo) (inode uint64) { - return -} - -func hasHardlinks(fi os.FileInfo) bool { - return false -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go deleted file mode 100644 index f3111b79b..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy.go +++ /dev/null @@ -1,485 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "errors" - "io" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" -) - -// Errors used or returned by this file. -var ( - ErrNotDirectory = errors.New("not a directory") - ErrDirNotExists = errors.New("no such directory") - ErrCannotCopyDir = errors.New("cannot copy directory") - ErrInvalidCopySource = errors.New("invalid copy source content") -) - -// PreserveTrailingDotOrSeparator returns the given cleaned path (after -// processing using any utility functions from the path or filepath stdlib -// packages) and appends a trailing `/.` or `/` if its corresponding original -// path (from before being processed by utility functions from the path or -// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned -// path already ends in a `.` path segment, then another is not added. If the -// clean path already ends in the separator, then another is not added. -func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string { - // Ensure paths are in platform semantics - cleanedPath = strings.ReplaceAll(cleanedPath, "/", string(sep)) - originalPath = strings.ReplaceAll(originalPath, "/", string(sep)) - - if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { - if !hasTrailingPathSeparator(cleanedPath, sep) { - // Add a separator if it doesn't already end with one (a cleaned - // path would only end in a separator if it is the root). - cleanedPath += string(sep) - } - cleanedPath += "." - } - - if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) { - cleanedPath += string(sep) - } - - return cleanedPath -} - -// assertsDirectory returns whether the given path is -// asserted to be a directory, i.e., the path ends with -// a trailing '/' or `/.`, assuming a path separator of `/`. -func assertsDirectory(path string, sep byte) bool { - return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path) -} - -// hasTrailingPathSeparator returns whether the given -// path ends with the system's path separator character. -func hasTrailingPathSeparator(path string, sep byte) bool { - return len(path) > 0 && path[len(path)-1] == sep -} - -// specifiesCurrentDir returns whether the given path specifies -// a "current directory", i.e., the last path segment is `.`. -func specifiesCurrentDir(path string) bool { - return filepath.Base(path) == "." -} - -// SplitPathDirEntry splits the given path between its directory name and its -// basename by first cleaning the path but preserves a trailing "." if the -// original path specified the current directory. -func SplitPathDirEntry(path string) (dir, base string) { - cleanedPath := filepath.Clean(filepath.FromSlash(path)) - - if specifiesCurrentDir(path) { - cleanedPath += string(os.PathSeparator) + "." - } - - return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) -} - -// TarResource archives the resource described by the given CopyInfo to a Tar -// archive. A non-nil error is returned if sourcePath does not exist or is -// asserted to be a directory but exists as another type of file. -// -// This function acts as a convenient wrapper around TarWithOptions, which -// requires a directory as the source path. TarResource accepts either a -// directory or a file path and correctly sets the Tar options. -func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { - return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) -} - -// TarResourceRebase is like TarResource but renames the first path element of -// items in the resulting tar archive to match the given rebaseName if not "". -func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { - sourcePath = normalizePath(sourcePath) - if _, err = os.Lstat(sourcePath); err != nil { - // Catches the case where the source does not exist or is not a - // directory if asserted to be a directory, as this also causes an - // error. - return - } - - // Separate the source path between its directory and - // the entry in that directory which we are archiving. - sourceDir, sourceBase := SplitPathDirEntry(sourcePath) - opts := TarResourceRebaseOpts(sourceBase, rebaseName) - - logrus.Debugf("copying %q from %q", sourceBase, sourceDir) - return TarWithOptions(sourceDir, opts) -} - -// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase -// parameters to be sent to TarWithOptions (the TarOptions struct) -func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions { - filter := []string{sourceBase} - return &TarOptions{ - Compression: Uncompressed, - IncludeFiles: filter, - IncludeSourceDir: true, - RebaseNames: map[string]string{ - sourceBase: rebaseName, - }, - } -} - -// CopyInfo holds basic info about the source -// or destination path of a copy operation. -type CopyInfo struct { - Path string - Exists bool - IsDir bool - RebaseName string -} - -// CopyInfoSourcePath stats the given path to create a CopyInfo -// struct representing that resource for the source of an archive copy -// operation. The given path should be an absolute local path. A source path -// has all symlinks evaluated that appear before the last path separator ("/" -// on Unix). As it is to be a copy source, the path must exist. -func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { - // normalize the file path and then evaluate the symbol link - // we will use the target file instead of the symbol link if - // followLink is set - path = normalizePath(path) - - resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) - if err != nil { - return CopyInfo{}, err - } - - stat, err := os.Lstat(resolvedPath) - if err != nil { - return CopyInfo{}, err - } - - return CopyInfo{ - Path: resolvedPath, - Exists: true, - IsDir: stat.IsDir(), - RebaseName: rebaseName, - }, nil -} - -// CopyInfoDestinationPath stats the given path to create a CopyInfo -// struct representing that resource for the destination of an archive copy -// operation. The given path should be an absolute local path. -func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { - maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. - path = normalizePath(path) - originalPath := path - - stat, err := os.Lstat(path) - - if err == nil && stat.Mode()&os.ModeSymlink == 0 { - // The path exists and is not a symlink. - return CopyInfo{ - Path: path, - Exists: true, - IsDir: stat.IsDir(), - }, nil - } - - // While the path is a symlink. - for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { - if n > maxSymlinkIter { - // Don't follow symlinks more than this arbitrary number of times. - return CopyInfo{}, errors.New("too many symlinks in " + originalPath) - } - - // The path is a symbolic link. We need to evaluate it so that the - // destination of the copy operation is the link target and not the - // link itself. This is notably different than CopyInfoSourcePath which - // only evaluates symlinks before the last appearing path separator. - // Also note that it is okay if the last path element is a broken - // symlink as the copy operation should create the target. - var linkTarget string - - linkTarget, err = os.Readlink(path) - if err != nil { - return CopyInfo{}, err - } - - if !system.IsAbs(linkTarget) { - // Join with the parent directory. - dstParent, _ := SplitPathDirEntry(path) - linkTarget = filepath.Join(dstParent, linkTarget) - } - - path = linkTarget - stat, err = os.Lstat(path) - } - - if err != nil { - // It's okay if the destination path doesn't exist. We can still - // continue the copy operation if the parent directory exists. - if !os.IsNotExist(err) { - return CopyInfo{}, err - } - - // Ensure destination parent dir exists. - dstParent, _ := SplitPathDirEntry(path) - - parentDirStat, err := os.Stat(dstParent) - if err != nil { - return CopyInfo{}, err - } - if !parentDirStat.IsDir() { - return CopyInfo{}, ErrNotDirectory - } - - return CopyInfo{Path: path}, nil - } - - // The path exists after resolving symlinks. - return CopyInfo{ - Path: path, - Exists: true, - IsDir: stat.IsDir(), - }, nil -} - -// PrepareArchiveCopy prepares the given srcContent archive, which should -// contain the archived resource described by srcInfo, to the destination -// described by dstInfo. Returns the possibly modified content archive along -// with the path to the destination directory which it should be extracted to. -func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { - // Ensure in platform semantics - srcInfo.Path = normalizePath(srcInfo.Path) - dstInfo.Path = normalizePath(dstInfo.Path) - - // Separate the destination path between its directory and base - // components in case the source archive contents need to be rebased. - dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) - _, srcBase := SplitPathDirEntry(srcInfo.Path) - - switch { - case dstInfo.Exists && dstInfo.IsDir: - // The destination exists as a directory. No alteration - // to srcContent is needed as its contents can be - // simply extracted to the destination directory. - return dstInfo.Path, io.NopCloser(srcContent), nil - case dstInfo.Exists && srcInfo.IsDir: - // The destination exists as some type of file and the source - // content is a directory. This is an error condition since - // you cannot copy a directory to an existing file location. - return "", nil, ErrCannotCopyDir - case dstInfo.Exists: - // The destination exists as some type of file and the source content - // is also a file. The source content entry will have to be renamed to - // have a basename which matches the destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case srcInfo.IsDir: - // The destination does not exist and the source content is an archive - // of a directory. The archive should be extracted to the parent of - // the destination path instead, and when it is, the directory that is - // created as a result should take the name of the destination path. - // The source content entries will have to be renamed to have a - // basename which matches the destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case assertsDirectory(dstInfo.Path, os.PathSeparator): - // The destination does not exist and is asserted to be created as a - // directory, but the source content is not a directory. This is an - // error condition since you cannot create a directory from a file - // source. - return "", nil, ErrDirNotExists - default: - // The last remaining case is when the destination does not exist, is - // not asserted to be a directory, and the source content is not an - // archive of a directory. It this case, the destination file will need - // to be created when the archive is extracted and the source content - // entry will have to be renamed to have a basename which matches the - // destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - } -} - -// RebaseArchiveEntries rewrites the given srcContent archive replacing -// an occurrence of oldBase with newBase at the beginning of entry names. -func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { - if oldBase == string(os.PathSeparator) { - // If oldBase specifies the root directory, use an empty string as - // oldBase instead so that newBase doesn't replace the path separator - // that all paths will start with. - oldBase = "" - } - - rebased, w := io.Pipe() - - go func() { - srcTar := tar.NewReader(srcContent) - rebasedTar := tar.NewWriter(w) - - for { - hdr, err := srcTar.Next() - if err == io.EOF { - // Signals end of archive. - rebasedTar.Close() - w.Close() - return - } - if err != nil { - w.CloseWithError(err) - return - } - - // srcContent tar stream, as served by TarWithOptions(), is - // definitely in PAX format, but tar.Next() mistakenly guesses it - // as USTAR, which creates a problem: if the newBase is >100 - // characters long, WriteHeader() returns an error like - // "archive/tar: cannot encode header: Format specifies USTAR; and USTAR cannot encode Name=...". - // - // To fix, set the format to PAX here. See docker/for-linux issue #484. - hdr.Format = tar.FormatPAX - hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) - if hdr.Typeflag == tar.TypeLink { - hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) - } - - if err = rebasedTar.WriteHeader(hdr); err != nil { - w.CloseWithError(err) - return - } - - // Ignoring GoSec G110. See https://github.com/securego/gosec/pull/433 - // and https://cure53.de/pentest-report_opa.pdf, which recommends to - // replace io.Copy with io.CopyN7. The latter allows to specify the - // maximum number of bytes that should be read. By properly defining - // the limit, it can be assured that a GZip compression bomb cannot - // easily cause a Denial-of-Service. - // After reviewing with @tonistiigi and @cpuguy83, this should not - // affect us, because here we do not read into memory, hence should - // not be vulnerable to this code consuming memory. - //nolint:gosec // G110: Potential DoS vulnerability via decompression bomb (gosec) - if _, err = io.Copy(rebasedTar, srcTar); err != nil { - w.CloseWithError(err) - return - } - } - }() - - return rebased -} - -// CopyResource performs an archive copy from the given source path to the -// given destination path. The source path MUST exist and the destination -// path's parent directory must exist. -func CopyResource(srcPath, dstPath string, followLink bool) error { - var ( - srcInfo CopyInfo - err error - ) - - // Ensure in platform semantics - srcPath = normalizePath(srcPath) - dstPath = normalizePath(dstPath) - - // Clean the source and destination paths. - srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator) - dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator) - - if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { - return err - } - - content, err := TarResource(srcInfo) - if err != nil { - return err - } - defer content.Close() - - return CopyTo(content, srcInfo, dstPath) -} - -// CopyTo handles extracting the given content whose -// entries should be sourced from srcInfo to dstPath. -func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { - // The destination path need not exist, but CopyInfoDestinationPath will - // ensure that at least the parent directory exists. - dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) - if err != nil { - return err - } - - dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) - if err != nil { - return err - } - defer copyArchive.Close() - - options := &TarOptions{ - NoLchown: true, - NoOverwriteDirNonDir: true, - } - - return Untar(copyArchive, dstDir, options) -} - -// ResolveHostSourcePath decides real path need to be copied with parameters such as -// whether to follow symbol link or not, if followLink is true, resolvedPath will return -// link target of any symbol link file, else it will only resolve symlink of directory -// but return symbol link file itself without resolving. -func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { - if followLink { - resolvedPath, err = filepath.EvalSymlinks(path) - if err != nil { - return - } - - resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) - } else { - dirPath, basePath := filepath.Split(path) - - // if not follow symbol link, then resolve symbol link of parent dir - var resolvedDirPath string - resolvedDirPath, err = filepath.EvalSymlinks(dirPath) - if err != nil { - return - } - // resolvedDirPath will have been cleaned (no trailing path separators) so - // we can manually join it with the base path element. - resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath - if hasTrailingPathSeparator(path, os.PathSeparator) && - filepath.Base(path) != filepath.Base(resolvedPath) { - rebaseName = filepath.Base(path) - } - } - return resolvedPath, rebaseName, nil -} - -// GetRebaseName normalizes and compares path and resolvedPath, -// return completed resolved path and rebased file name -func GetRebaseName(path, resolvedPath string) (string, string) { - // linkTarget will have been cleaned (no trailing path separators and dot) so - // we can manually join it with them - var rebaseName string - if specifiesCurrentDir(path) && - !specifiesCurrentDir(resolvedPath) { - resolvedPath += string(filepath.Separator) + "." - } - - if hasTrailingPathSeparator(path, os.PathSeparator) && - !hasTrailingPathSeparator(resolvedPath, os.PathSeparator) { - resolvedPath += string(filepath.Separator) - } - - if filepath.Base(path) != filepath.Base(resolvedPath) { - // In the case where the path had a trailing separator and a symlink - // evaluation has changed the last path component, we will need to - // rebase the name in the archive that is being copied to match the - // originally requested name. - rebaseName = filepath.Base(path) - } - return resolvedPath, rebaseName -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go deleted file mode 100644 index 2ac7729f4..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build !windows -// +build !windows - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.ToSlash(path) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go deleted file mode 100644 index a878d1bac..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.FromSlash(path) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go deleted file mode 100644 index 8eeccb608..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/diff.go +++ /dev/null @@ -1,244 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" -) - -// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be -// compressed or uncompressed. -// Returns the size in bytes of the contents of the layer. -func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { - tr := tar.NewReader(layer) - trBuf := pools.BufioReader32KPool.Get(tr) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - unpackedPaths := make(map[string]struct{}) - - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - aufsTempdir := "" - aufsHardlinks := make(map[string]*tar.Header) - - // Iterate through the files in the archive. - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return 0, err - } - - size += hdr.Size - - // Normalize name, for safety and for a simple is-root check - hdr.Name = filepath.Clean(hdr.Name) - - // Windows does not support filenames with colons in them. Ignore - // these files. This is not a problem though (although it might - // appear that it is). Let's suppose a client is running docker pull. - // The daemon it points to is Windows. Would it make sense for the - // client to be doing a docker pull Ubuntu for example (which has files - // with colons in the name under /usr/share/man/man3)? No, absolutely - // not as it would really only make sense that they were pulling a - // Windows image. However, for development, it is necessary to be able - // to pull Linux images which are in the repository. - // - // TODO Windows. Once the registry is aware of what images are Windows- - // specific or Linux-specific, this warning should be changed to an error - // to cater for the situation where someone does manage to upload a Linux - // image but have it tagged as Windows inadvertently. - if runtime.GOOS == "windows" { - if strings.Contains(hdr.Name, ":") { - logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) - continue - } - } - - // Ensure that the parent directory exists. - err = createImpliedDirectories(dest, hdr, options) - if err != nil { - return 0, err - } - - // Skip AUFS metadata dirs - if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { - // Regular files inside /.wh..wh.plnk can be used as hardlink targets - // We don't want this directory, but we need the files in them so that - // such hardlinks can be resolved. - if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { - basename := filepath.Base(hdr.Name) - aufsHardlinks[basename] = hdr - if aufsTempdir == "" { - if aufsTempdir, err = os.MkdirTemp("", "dockerplnk"); err != nil { - return 0, err - } - defer os.RemoveAll(aufsTempdir) - } - if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { - return 0, err - } - } - - if hdr.Name != WhiteoutOpaqueDir { - continue - } - } - //#nosec G305 -- The joined path is guarded against path traversal. - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return 0, err - } - - // Note as these operations are platform specific, so must the slash be. - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - base := filepath.Base(path) - - if strings.HasPrefix(base, WhiteoutPrefix) { - dir := filepath.Dir(path) - if base == WhiteoutOpaqueDir { - _, err := os.Lstat(dir) - if err != nil { - return 0, err - } - err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - if os.IsNotExist(err) { - err = nil // parent was deleted - } - return err - } - if path == dir { - return nil - } - if _, exists := unpackedPaths[path]; !exists { - err := os.RemoveAll(path) - return err - } - return nil - }) - if err != nil { - return 0, err - } - } else { - originalBase := base[len(WhiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - if err := os.RemoveAll(originalPath); err != nil { - return 0, err - } - } - } else { - // If path exits we almost always just want to remove and replace it. - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return 0, err - } - } - } - - trBuf.Reset(tr) - srcData := io.Reader(trBuf) - srcHdr := hdr - - // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so - // we manually retarget these into the temporary files we extracted them into - if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { - linkBasename := filepath.Base(hdr.Linkname) - srcHdr = aufsHardlinks[linkBasename] - if srcHdr == nil { - return 0, fmt.Errorf("Invalid aufs hardlink") - } - tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) - if err != nil { - return 0, err - } - defer tmpFile.Close() - srcData = tmpFile - } - - if err := remapIDs(options.IDMap, srcHdr); err != nil { - return 0, err - } - - if err := createTarFile(path, dest, srcHdr, srcData, !options.NoLchown, nil, options.InUserNS); err != nil { - return 0, err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - unpackedPaths[path] = struct{}{} - } - } - - for _, hdr := range dirs { - //#nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice. - path := filepath.Join(dest, hdr.Name) - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return 0, err - } - } - - return size, nil -} - -// ApplyLayer parses a diff in the standard layer format from `layer`, -// and applies it to the directory `dest`. The stream `layer` can be -// compressed or uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer io.Reader) (int64, error) { - return applyLayerHandler(dest, layer, &TarOptions{}, true) -} - -// ApplyUncompressedLayer parses a diff in the standard layer format from -// `layer`, and applies it to the directory `dest`. The stream `layer` -// can only be uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { - return applyLayerHandler(dest, layer, options, false) -} - -// do the bulk load of ApplyLayer, but allow for not calling DecompressStream -func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { - dest = filepath.Clean(dest) - - // We need to be able to set any perms - restore := overrideUmask(0) - defer restore() - - if decompress { - decompLayer, err := DecompressStream(layer) - if err != nil { - return 0, err - } - defer decompLayer.Close() - layer = decompLayer - } - return UnpackLayer(dest, layer, options) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_unix.go b/vendor/github.com/docker/docker/pkg/archive/diff_unix.go deleted file mode 100644 index d7f806445..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/diff_unix.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build !windows -// +build !windows - -package archive - -import "golang.org/x/sys/unix" - -// overrideUmask sets current process's file mode creation mask to newmask -// and returns a function to restore it. -// -// WARNING for readers stumbling upon this code. Changing umask in a multi- -// threaded environment isn't safe. Don't use this without understanding the -// risks, and don't export this function for others to use (we shouldn't even -// be using this ourself). -// -// FIXME(thaJeztah): we should get rid of these hacks if possible. -func overrideUmask(newMask int) func() { - oldMask := unix.Umask(newMask) - return func() { - unix.Umask(oldMask) - } -} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_windows.go b/vendor/github.com/docker/docker/pkg/archive/diff_windows.go deleted file mode 100644 index d28f5b2df..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/diff_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package archive - -// overrideUmask is a no-op on windows. -func overrideUmask(newmask int) func() { - return func() {} -} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go deleted file mode 100644 index 797143ee8..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/time_linux.go +++ /dev/null @@ -1,16 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - if time.IsZero() { - // Return UTIME_OMIT special value - ts.Sec = 0 - ts.Nsec = (1 << 30) - 2 - return - } - return syscall.NsecToTimespec(time.UnixNano()) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go deleted file mode 100644 index d08779686..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build !linux -// +build !linux - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - nsec := int64(0) - if !time.IsZero() { - nsec = time.UnixNano() - } - return syscall.NsecToTimespec(nsec) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go deleted file mode 100644 index 4c072a87e..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go +++ /dev/null @@ -1,23 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -// Whiteouts are files with a special meaning for the layered filesystem. -// Docker uses AUFS whiteout files inside exported archives. In other -// filesystems these files are generated/handled on tar creation/extraction. - -// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a -// filename this means that file has been removed from the base layer. -const WhiteoutPrefix = ".wh." - -// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not -// for removing an actual file. Normally these files are excluded from exported -// archives. -const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix - -// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other -// layers. Normally these should not go into exported archives and all changed -// hardlinks should be copied to the top layer. -const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" - -// WhiteoutOpaqueDir file means directory has been made opaque - meaning -// readdir calls to this directory do not follow to lower layers. -const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go deleted file mode 100644 index 032db82ce..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/wrap.go +++ /dev/null @@ -1,59 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "bytes" - "io" -) - -// Generate generates a new archive from the content provided -// as input. -// -// `files` is a sequence of path/content pairs. A new file is -// added to the archive for each pair. -// If the last pair is incomplete, the file is created with an -// empty content. For example: -// -// Generate("foo.txt", "hello world", "emptyfile") -// -// The above call will return an archive with 2 files: -// - ./foo.txt with content "hello world" -// - ./empty with empty content -// -// FIXME: stream content instead of buffering -// FIXME: specify permissions and other archive metadata -func Generate(input ...string) (io.Reader, error) { - files := parseStringPairs(input...) - buf := new(bytes.Buffer) - tw := tar.NewWriter(buf) - for _, file := range files { - name, content := file[0], file[1] - hdr := &tar.Header{ - Name: name, - Size: int64(len(content)), - } - if err := tw.WriteHeader(hdr); err != nil { - return nil, err - } - if _, err := tw.Write([]byte(content)); err != nil { - return nil, err - } - } - if err := tw.Close(); err != nil { - return nil, err - } - return buf, nil -} - -func parseStringPairs(input ...string) (output [][2]string) { - output = make([][2]string, 0, len(input)/2+1) - for i := 0; i < len(input); i += 2 { - var pair [2]string - pair[0] = input[i] - if i+1 < len(input) { - pair[1] = input[i+1] - } - output = append(output, pair) - } - return -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go deleted file mode 100644 index 1e0a89004..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools.go +++ /dev/null @@ -1,243 +0,0 @@ -package idtools // import "github.com/docker/docker/pkg/idtools" - -import ( - "bufio" - "fmt" - "os" - "strconv" - "strings" -) - -// IDMap contains a single entry for user namespace range remapping. An array -// of IDMap entries represents the structure that will be provided to the Linux -// kernel for creating a user namespace. -type IDMap struct { - ContainerID int `json:"container_id"` - HostID int `json:"host_id"` - Size int `json:"size"` -} - -type subIDRange struct { - Start int - Length int -} - -type ranges []subIDRange - -func (e ranges) Len() int { return len(e) } -func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } -func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } - -const ( - subuidFileName = "/etc/subuid" - subgidFileName = "/etc/subgid" -) - -// MkdirAllAndChown creates a directory (include any along the path) and then modifies -// ownership to the requested uid/gid. If the directory already exists, this -// function will still change ownership and permissions. -func MkdirAllAndChown(path string, mode os.FileMode, owner Identity) error { - return mkdirAs(path, mode, owner, true, true) -} - -// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. -// If the directory already exists, this function still changes ownership and permissions. -// Note that unlike os.Mkdir(), this function does not return IsExist error -// in case path already exists. -func MkdirAndChown(path string, mode os.FileMode, owner Identity) error { - return mkdirAs(path, mode, owner, false, true) -} - -// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies -// ownership ONLY of newly created directories to the requested uid/gid. If the -// directories along the path exist, no change of ownership or permissions will be performed -func MkdirAllAndChownNew(path string, mode os.FileMode, owner Identity) error { - return mkdirAs(path, mode, owner, true, false) -} - -// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. -// If the maps are empty, then the root uid/gid will default to "real" 0/0 -func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { - uid, err := toHost(0, uidMap) - if err != nil { - return -1, -1, err - } - gid, err := toHost(0, gidMap) - if err != nil { - return -1, -1, err - } - return uid, gid, nil -} - -// toContainer takes an id mapping, and uses it to translate a -// host ID to the remapped ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id -func toContainer(hostID int, idMap []IDMap) (int, error) { - if idMap == nil { - return hostID, nil - } - for _, m := range idMap { - if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { - contID := m.ContainerID + (hostID - m.HostID) - return contID, nil - } - } - return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) -} - -// toHost takes an id mapping and a remapped ID, and translates the -// ID to the mapped host ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id # -func toHost(contID int, idMap []IDMap) (int, error) { - if idMap == nil { - return contID, nil - } - for _, m := range idMap { - if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { - hostID := m.HostID + (contID - m.ContainerID) - return hostID, nil - } - } - return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) -} - -// Identity is either a UID and GID pair or a SID (but not both) -type Identity struct { - UID int - GID int - SID string -} - -// Chown changes the numeric uid and gid of the named file to id.UID and id.GID. -func (id Identity) Chown(name string) error { - return os.Chown(name, id.UID, id.GID) -} - -// IdentityMapping contains a mappings of UIDs and GIDs. -// The zero value represents an empty mapping. -type IdentityMapping struct { - UIDMaps []IDMap `json:"UIDMaps"` - GIDMaps []IDMap `json:"GIDMaps"` -} - -// RootPair returns a uid and gid pair for the root user. The error is ignored -// because a root user always exists, and the defaults are correct when the uid -// and gid maps are empty. -func (i IdentityMapping) RootPair() Identity { - uid, gid, _ := GetRootUIDGID(i.UIDMaps, i.GIDMaps) - return Identity{UID: uid, GID: gid} -} - -// ToHost returns the host UID and GID for the container uid, gid. -// Remapping is only performed if the ids aren't already the remapped root ids -func (i IdentityMapping) ToHost(pair Identity) (Identity, error) { - var err error - target := i.RootPair() - - if pair.UID != target.UID { - target.UID, err = toHost(pair.UID, i.UIDMaps) - if err != nil { - return target, err - } - } - - if pair.GID != target.GID { - target.GID, err = toHost(pair.GID, i.GIDMaps) - } - return target, err -} - -// ToContainer returns the container UID and GID for the host uid and gid -func (i IdentityMapping) ToContainer(pair Identity) (int, int, error) { - uid, err := toContainer(pair.UID, i.UIDMaps) - if err != nil { - return -1, -1, err - } - gid, err := toContainer(pair.GID, i.GIDMaps) - return uid, gid, err -} - -// Empty returns true if there are no id mappings -func (i IdentityMapping) Empty() bool { - return len(i.UIDMaps) == 0 && len(i.GIDMaps) == 0 -} - -// UIDs returns the mapping for UID. -// -// Deprecated: reference the UIDMaps field directly. -func (i IdentityMapping) UIDs() []IDMap { - return i.UIDMaps -} - -// GIDs returns the mapping for GID. -// -// Deprecated: reference the GIDMaps field directly. -func (i IdentityMapping) GIDs() []IDMap { - return i.GIDMaps -} - -func createIDMap(subidRanges ranges) []IDMap { - idMap := []IDMap{} - - containerID := 0 - for _, idrange := range subidRanges { - idMap = append(idMap, IDMap{ - ContainerID: containerID, - HostID: idrange.Start, - Size: idrange.Length, - }) - containerID = containerID + idrange.Length - } - return idMap -} - -func parseSubuid(username string) (ranges, error) { - return parseSubidFile(subuidFileName, username) -} - -func parseSubgid(username string) (ranges, error) { - return parseSubidFile(subgidFileName, username) -} - -// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) -// and return all found ranges for a specified username. If the special value -// "ALL" is supplied for username, then all ranges in the file will be returned -func parseSubidFile(path, username string) (ranges, error) { - var rangeList ranges - - subidFile, err := os.Open(path) - if err != nil { - return rangeList, err - } - defer subidFile.Close() - - s := bufio.NewScanner(subidFile) - for s.Scan() { - text := strings.TrimSpace(s.Text()) - if text == "" || strings.HasPrefix(text, "#") { - continue - } - parts := strings.Split(text, ":") - if len(parts) != 3 { - return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) - } - if parts[0] == username || username == "ALL" { - startid, err := strconv.Atoi(parts[1]) - if err != nil { - return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) - } - length, err := strconv.Atoi(parts[2]) - if err != nil { - return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) - } - rangeList = append(rangeList, subIDRange{startid, length}) - } - } - - return rangeList, s.Err() -} - -// CurrentIdentity returns the identity of the current process -func CurrentIdentity() Identity { - return Identity{UID: os.Getuid(), GID: os.Getegid()} -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go deleted file mode 100644 index 03846b030..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ /dev/null @@ -1,325 +0,0 @@ -//go:build !windows -// +build !windows - -package idtools // import "github.com/docker/docker/pkg/idtools" - -import ( - "bytes" - "fmt" - "io" - "os" - "os/exec" - "path/filepath" - "strconv" - "sync" - "syscall" - - "github.com/docker/docker/pkg/system" - "github.com/opencontainers/runc/libcontainer/user" - "github.com/pkg/errors" -) - -var ( - entOnce sync.Once - getentCmd string -) - -func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { - // make an array containing the original path asked for, plus (for mkAll == true) - // all path components leading up to the complete path that don't exist before we MkdirAll - // so that we can chown all of them properly at the end. If chownExisting is false, we won't - // chown the full directory path if it exists - - var paths []string - path, err := filepath.Abs(path) - if err != nil { - return err - } - - stat, err := system.Stat(path) - if err == nil { - if !stat.IsDir() { - return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} - } - if !chownExisting { - return nil - } - - // short-circuit--we were called with an existing directory and chown was requested - return setPermissions(path, mode, owner.UID, owner.GID, stat) - } - - if os.IsNotExist(err) { - paths = []string{path} - } - - if mkAll { - // walk back to "/" looking for directories which do not exist - // and add them to the paths array for chown after creation - dirPath := path - for { - dirPath = filepath.Dir(dirPath) - if dirPath == "/" { - break - } - if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { - paths = append(paths, dirPath) - } - } - if err := system.MkdirAll(path, mode); err != nil { - return err - } - } else { - if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { - return err - } - } - // even if it existed, we will chown the requested path + any subpaths that - // didn't exist when we called MkdirAll - for _, pathComponent := range paths { - if err := setPermissions(pathComponent, mode, owner.UID, owner.GID, nil); err != nil { - return err - } - } - return nil -} - -// CanAccess takes a valid (existing) directory and a uid, gid pair and determines -// if that uid, gid pair has access (execute bit) to the directory -func CanAccess(path string, pair Identity) bool { - statInfo, err := system.Stat(path) - if err != nil { - return false - } - fileMode := os.FileMode(statInfo.Mode()) - permBits := fileMode.Perm() - return accessible(statInfo.UID() == uint32(pair.UID), - statInfo.GID() == uint32(pair.GID), permBits) -} - -func accessible(isOwner, isGroup bool, perms os.FileMode) bool { - if isOwner && (perms&0100 == 0100) { - return true - } - if isGroup && (perms&0010 == 0010) { - return true - } - if perms&0001 == 0001 { - return true - } - return false -} - -// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupUser(name string) (user.User, error) { - // first try a local system files lookup using existing capabilities - usr, err := user.LookupUser(name) - if err == nil { - return usr, nil - } - // local files lookup failed; attempt to call `getent` to query configured passwd dbs - usr, err = getentUser(name) - if err != nil { - return user.User{}, err - } - return usr, nil -} - -// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupUID(uid int) (user.User, error) { - // first try a local system files lookup using existing capabilities - usr, err := user.LookupUid(uid) - if err == nil { - return usr, nil - } - // local files lookup failed; attempt to call `getent` to query configured passwd dbs - return getentUser(strconv.Itoa(uid)) -} - -func getentUser(name string) (user.User, error) { - reader, err := callGetent("passwd", name) - if err != nil { - return user.User{}, err - } - users, err := user.ParsePasswd(reader) - if err != nil { - return user.User{}, err - } - if len(users) == 0 { - return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", name) - } - return users[0], nil -} - -// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupGroup(name string) (user.Group, error) { - // first try a local system files lookup using existing capabilities - group, err := user.LookupGroup(name) - if err == nil { - return group, nil - } - // local files lookup failed; attempt to call `getent` to query configured group dbs - return getentGroup(name) -} - -// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupGID(gid int) (user.Group, error) { - // first try a local system files lookup using existing capabilities - group, err := user.LookupGid(gid) - if err == nil { - return group, nil - } - // local files lookup failed; attempt to call `getent` to query configured group dbs - return getentGroup(strconv.Itoa(gid)) -} - -func getentGroup(name string) (user.Group, error) { - reader, err := callGetent("group", name) - if err != nil { - return user.Group{}, err - } - groups, err := user.ParseGroup(reader) - if err != nil { - return user.Group{}, err - } - if len(groups) == 0 { - return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", name) - } - return groups[0], nil -} - -func callGetent(database, key string) (io.Reader, error) { - entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) - // if no `getent` command on host, can't do anything else - if getentCmd == "" { - return nil, fmt.Errorf("unable to find getent command") - } - out, err := execCmd(getentCmd, database, key) - if err != nil { - exitCode, errC := getExitCode(err) - if errC != nil { - return nil, err - } - switch exitCode { - case 1: - return nil, fmt.Errorf("getent reported invalid parameters/database unknown") - case 2: - return nil, fmt.Errorf("getent unable to find entry %q in %s database", key, database) - case 3: - return nil, fmt.Errorf("getent database doesn't support enumeration") - default: - return nil, err - } - } - return bytes.NewReader(out), nil -} - -// getExitCode returns the ExitStatus of the specified error if its type is -// exec.ExitError, returns 0 and an error otherwise. -func getExitCode(err error) (int, error) { - exitCode := 0 - if exiterr, ok := err.(*exec.ExitError); ok { - if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { - return procExit.ExitStatus(), nil - } - } - return exitCode, fmt.Errorf("failed to get exit code") -} - -// setPermissions performs a chown/chmod only if the uid/gid don't match what's requested -// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the -// dir is on an NFS share, so don't call chown unless we absolutely must. -// Likewise for setting permissions. -func setPermissions(p string, mode os.FileMode, uid, gid int, stat *system.StatT) error { - if stat == nil { - var err error - stat, err = system.Stat(p) - if err != nil { - return err - } - } - if os.FileMode(stat.Mode()).Perm() != mode.Perm() { - if err := os.Chmod(p, mode.Perm()); err != nil { - return err - } - } - if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { - return nil - } - return os.Chown(p, uid, gid) -} - -// NewIdentityMapping takes a requested username and -// using the data from /etc/sub{uid,gid} ranges, creates the -// proper uid and gid remapping ranges for that user/group pair -// -// Deprecated: Use LoadIdentityMapping. -func NewIdentityMapping(name string) (*IdentityMapping, error) { - m, err := LoadIdentityMapping(name) - if err != nil { - return nil, err - } - return &m, err -} - -// LoadIdentityMapping takes a requested username and -// using the data from /etc/sub{uid,gid} ranges, creates the -// proper uid and gid remapping ranges for that user/group pair -func LoadIdentityMapping(name string) (IdentityMapping, error) { - usr, err := LookupUser(name) - if err != nil { - return IdentityMapping{}, fmt.Errorf("Could not get user for username %s: %v", name, err) - } - - subuidRanges, err := lookupSubUIDRanges(usr) - if err != nil { - return IdentityMapping{}, err - } - subgidRanges, err := lookupSubGIDRanges(usr) - if err != nil { - return IdentityMapping{}, err - } - - return IdentityMapping{ - UIDMaps: subuidRanges, - GIDMaps: subgidRanges, - }, nil -} - -func lookupSubUIDRanges(usr user.User) ([]IDMap, error) { - rangeList, err := parseSubuid(strconv.Itoa(usr.Uid)) - if err != nil { - return nil, err - } - if len(rangeList) == 0 { - rangeList, err = parseSubuid(usr.Name) - if err != nil { - return nil, err - } - } - if len(rangeList) == 0 { - return nil, errors.Errorf("no subuid ranges found for user %q", usr.Name) - } - return createIDMap(rangeList), nil -} - -func lookupSubGIDRanges(usr user.User) ([]IDMap, error) { - rangeList, err := parseSubgid(strconv.Itoa(usr.Uid)) - if err != nil { - return nil, err - } - if len(rangeList) == 0 { - rangeList, err = parseSubgid(usr.Name) - if err != nil { - return nil, err - } - } - if len(rangeList) == 0 { - return nil, errors.Errorf("no subgid ranges found for user %q", usr.Name) - } - return createIDMap(rangeList), nil -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go deleted file mode 100644 index 0f5aadd49..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go +++ /dev/null @@ -1,34 +0,0 @@ -package idtools // import "github.com/docker/docker/pkg/idtools" - -import ( - "os" - - "github.com/docker/docker/pkg/system" -) - -const ( - SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" -) - -const ( - ContainerAdministratorSidString = "S-1-5-93-2-1" - ContainerUserSidString = "S-1-5-93-2-2" -) - -// This is currently a wrapper around MkdirAll, however, since currently -// permissions aren't set through this path, the identity isn't utilized. -// Ownership is handled elsewhere, but in the future could be support here -// too. -func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { - if err := system.MkdirAll(path, mode); err != nil { - return err - } - return nil -} - -// CanAccess takes a valid (existing) directory and a uid, gid pair and determines -// if that uid, gid pair has access (execute bit) to the directory -// Windows does not require/support this function, so always return true -func CanAccess(path string, identity Identity) bool { - return true -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go deleted file mode 100644 index 3ad9255df..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go +++ /dev/null @@ -1,163 +0,0 @@ -package idtools // import "github.com/docker/docker/pkg/idtools" - -import ( - "fmt" - "regexp" - "sort" - "strconv" - "strings" - "sync" -) - -// add a user and/or group to Linux /etc/passwd, /etc/group using standard -// Linux distribution commands: -// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group -// useradd -r -s /bin/false - -var ( - once sync.Once - userCommand string - idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) -) - -const ( - // default length for a UID/GID subordinate range - defaultRangeLen = 65536 - defaultRangeStart = 100000 -) - -// AddNamespaceRangesUser takes a username and uses the standard system -// utility to create a system user/group pair used to hold the -// /etc/sub{uid,gid} ranges which will be used for user namespace -// mapping ranges in containers. -func AddNamespaceRangesUser(name string) (int, int, error) { - if err := addUser(name); err != nil { - return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) - } - - // Query the system for the created uid and gid pair - out, err := execCmd("id", name) - if err != nil { - return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) - } - matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) - if len(matches) != 3 { - return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) - } - uid, err := strconv.Atoi(matches[1]) - if err != nil { - return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) - } - gid, err := strconv.Atoi(matches[2]) - if err != nil { - return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) - } - - // Now we need to create the subuid/subgid ranges for our new user/group (system users - // do not get auto-created ranges in subuid/subgid) - - if err := createSubordinateRanges(name); err != nil { - return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) - } - return uid, gid, nil -} - -func addUser(name string) error { - once.Do(func() { - // set up which commands are used for adding users/groups dependent on distro - if _, err := resolveBinary("adduser"); err == nil { - userCommand = "adduser" - } else if _, err := resolveBinary("useradd"); err == nil { - userCommand = "useradd" - } - }) - var args []string - switch userCommand { - case "adduser": - args = []string{"--system", "--shell", "/bin/false", "--no-create-home", "--disabled-login", "--disabled-password", "--group", name} - case "useradd": - args = []string{"-r", "-s", "/bin/false", name} - default: - return fmt.Errorf("cannot add user; no useradd/adduser binary found") - } - - if out, err := execCmd(userCommand, args...); err != nil { - return fmt.Errorf("failed to add user with error: %v; output: %q", err, string(out)) - } - return nil -} - -func createSubordinateRanges(name string) error { - // first, we should verify that ranges weren't automatically created - // by the distro tooling - ranges, err := parseSubuid(name) - if err != nil { - return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) - } - if len(ranges) == 0 { - // no UID ranges; let's create one - startID, err := findNextUIDRange() - if err != nil { - return fmt.Errorf("Can't find available subuid range: %v", err) - } - out, err := execCmd("usermod", "-v", fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1), name) - if err != nil { - return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) - } - } - - ranges, err = parseSubgid(name) - if err != nil { - return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) - } - if len(ranges) == 0 { - // no GID ranges; let's create one - startID, err := findNextGIDRange() - if err != nil { - return fmt.Errorf("Can't find available subgid range: %v", err) - } - out, err := execCmd("usermod", "-w", fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1), name) - if err != nil { - return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) - } - } - return nil -} - -func findNextUIDRange() (int, error) { - ranges, err := parseSubuid("ALL") - if err != nil { - return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) - } - sort.Sort(ranges) - return findNextRangeStart(ranges) -} - -func findNextGIDRange() (int, error) { - ranges, err := parseSubgid("ALL") - if err != nil { - return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) - } - sort.Sort(ranges) - return findNextRangeStart(ranges) -} - -func findNextRangeStart(rangeList ranges) (int, error) { - startID := defaultRangeStart - for _, arange := range rangeList { - if wouldOverlap(arange, startID) { - startID = arange.Start + arange.Length - } - } - return startID, nil -} - -func wouldOverlap(arange subIDRange, ID int) bool { - low := ID - high := ID + defaultRangeLen - if (low >= arange.Start && low <= arange.Start+arange.Length) || - (high <= arange.Start+arange.Length && high >= arange.Start) { - return true - } - return false -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go deleted file mode 100644 index 5e24577e2..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build !linux -// +build !linux - -package idtools // import "github.com/docker/docker/pkg/idtools" - -import "fmt" - -// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair -// and calls the appropriate helper function to add the group and then -// the user to the group in /etc/group and /etc/passwd respectively. -func AddNamespaceRangesUser(name string) (int, int, error) { - return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go deleted file mode 100644 index 540672af5..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go +++ /dev/null @@ -1,32 +0,0 @@ -//go:build !windows -// +build !windows - -package idtools // import "github.com/docker/docker/pkg/idtools" - -import ( - "fmt" - "os/exec" - "path/filepath" -) - -func resolveBinary(binname string) (string, error) { - binaryPath, err := exec.LookPath(binname) - if err != nil { - return "", err - } - resolvedPath, err := filepath.EvalSymlinks(binaryPath) - if err != nil { - return "", err - } - // only return no error if the final resolved binary basename - // matches what was searched for - if filepath.Base(resolvedPath) == binname { - return resolvedPath, nil - } - return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) -} - -func execCmd(cmd string, arg ...string) ([]byte, error) { - execCmd := exec.Command(cmd, arg...) - return execCmd.CombinedOutput() -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go deleted file mode 100644 index 466f79294..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go +++ /dev/null @@ -1,51 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "errors" - "io" -) - -var errBufferFull = errors.New("buffer is full") - -type fixedBuffer struct { - buf []byte - pos int - lastRead int -} - -func (b *fixedBuffer) Write(p []byte) (int, error) { - n := copy(b.buf[b.pos:cap(b.buf)], p) - b.pos += n - - if n < len(p) { - if b.pos == cap(b.buf) { - return n, errBufferFull - } - return n, io.ErrShortWrite - } - return n, nil -} - -func (b *fixedBuffer) Read(p []byte) (int, error) { - n := copy(p, b.buf[b.lastRead:b.pos]) - b.lastRead += n - return n, nil -} - -func (b *fixedBuffer) Len() int { - return b.pos - b.lastRead -} - -func (b *fixedBuffer) Cap() int { - return cap(b.buf) -} - -func (b *fixedBuffer) Reset() { - b.pos = 0 - b.lastRead = 0 - b.buf = b.buf[:0] -} - -func (b *fixedBuffer) String() string { - return string(b.buf[b.lastRead:b.pos]) -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go deleted file mode 100644 index c1cfa62fd..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go +++ /dev/null @@ -1,187 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "errors" - "io" - "sync" -) - -// maxCap is the highest capacity to use in byte slices that buffer data. -const maxCap = 1e6 - -// minCap is the lowest capacity to use in byte slices that buffer data -const minCap = 64 - -// blockThreshold is the minimum number of bytes in the buffer which will cause -// a write to BytesPipe to block when allocating a new slice. -const blockThreshold = 1e6 - -var ( - // ErrClosed is returned when Write is called on a closed BytesPipe. - ErrClosed = errors.New("write to closed BytesPipe") - - bufPools = make(map[int]*sync.Pool) - bufPoolsLock sync.Mutex -) - -// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). -// All written data may be read at most once. Also, BytesPipe allocates -// and releases new byte slices to adjust to current needs, so the buffer -// won't be overgrown after peak loads. -type BytesPipe struct { - mu sync.Mutex - wait *sync.Cond - buf []*fixedBuffer - bufLen int - closeErr error // error to return from next Read. set to nil if not closed. - readBlock bool // check read BytesPipe is Wait() or not -} - -// NewBytesPipe creates new BytesPipe, initialized by specified slice. -// If buf is nil, then it will be initialized with slice which cap is 64. -// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). -func NewBytesPipe() *BytesPipe { - bp := &BytesPipe{} - bp.buf = append(bp.buf, getBuffer(minCap)) - bp.wait = sync.NewCond(&bp.mu) - return bp -} - -// Write writes p to BytesPipe. -// It can allocate new []byte slices in a process of writing. -func (bp *BytesPipe) Write(p []byte) (int, error) { - bp.mu.Lock() - defer bp.mu.Unlock() - - written := 0 -loop0: - for { - if bp.closeErr != nil { - return written, ErrClosed - } - - if len(bp.buf) == 0 { - bp.buf = append(bp.buf, getBuffer(64)) - } - // get the last buffer - b := bp.buf[len(bp.buf)-1] - - n, err := b.Write(p) - written += n - bp.bufLen += n - - // errBufferFull is an error we expect to get if the buffer is full - if err != nil && err != errBufferFull { - bp.wait.Broadcast() - return written, err - } - - // if there was enough room to write all then break - if len(p) == n { - break - } - - // more data: write to the next slice - p = p[n:] - - // make sure the buffer doesn't grow too big from this write - for bp.bufLen >= blockThreshold { - if bp.readBlock { - bp.wait.Broadcast() - } - bp.wait.Wait() - if bp.closeErr != nil { - continue loop0 - } - } - - // add new byte slice to the buffers slice and continue writing - nextCap := b.Cap() * 2 - if nextCap > maxCap { - nextCap = maxCap - } - bp.buf = append(bp.buf, getBuffer(nextCap)) - } - bp.wait.Broadcast() - return written, nil -} - -// CloseWithError causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) CloseWithError(err error) error { - bp.mu.Lock() - if err != nil { - bp.closeErr = err - } else { - bp.closeErr = io.EOF - } - bp.wait.Broadcast() - bp.mu.Unlock() - return nil -} - -// Close causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) Close() error { - return bp.CloseWithError(nil) -} - -// Read reads bytes from BytesPipe. -// Data could be read only once. -func (bp *BytesPipe) Read(p []byte) (n int, err error) { - bp.mu.Lock() - defer bp.mu.Unlock() - if bp.bufLen == 0 { - if bp.closeErr != nil { - return 0, bp.closeErr - } - bp.readBlock = true - bp.wait.Wait() - bp.readBlock = false - if bp.bufLen == 0 && bp.closeErr != nil { - return 0, bp.closeErr - } - } - - for bp.bufLen > 0 { - b := bp.buf[0] - read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error - n += read - bp.bufLen -= read - - if b.Len() == 0 { - // it's empty so return it to the pool and move to the next one - returnBuffer(b) - bp.buf[0] = nil - bp.buf = bp.buf[1:] - } - - if len(p) == read { - break - } - - p = p[read:] - } - - bp.wait.Broadcast() - return -} - -func returnBuffer(b *fixedBuffer) { - b.Reset() - bufPoolsLock.Lock() - pool := bufPools[b.Cap()] - bufPoolsLock.Unlock() - if pool != nil { - pool.Put(b) - } -} - -func getBuffer(size int) *fixedBuffer { - bufPoolsLock.Lock() - pool, ok := bufPools[size] - if !ok { - pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} - bufPools[size] = pool - } - bufPoolsLock.Unlock() - return pool.Get().(*fixedBuffer) -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go deleted file mode 100644 index 82671d8cd..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go +++ /dev/null @@ -1,161 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "io" - "os" - "path/filepath" -) - -// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a -// temporary file and closing it atomically changes the temporary file to -// destination path. Writing and closing concurrently is not allowed. -func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { - f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) - if err != nil { - return nil, err - } - - abspath, err := filepath.Abs(filename) - if err != nil { - return nil, err - } - return &atomicFileWriter{ - f: f, - fn: abspath, - perm: perm, - }, nil -} - -// AtomicWriteFile atomically writes data to a file named by filename. -func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { - f, err := NewAtomicFileWriter(filename, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - f.(*atomicFileWriter).writeErr = err - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -type atomicFileWriter struct { - f *os.File - fn string - writeErr error - perm os.FileMode -} - -func (w *atomicFileWriter) Write(dt []byte) (int, error) { - n, err := w.f.Write(dt) - if err != nil { - w.writeErr = err - } - return n, err -} - -func (w *atomicFileWriter) Close() (retErr error) { - defer func() { - if retErr != nil || w.writeErr != nil { - os.Remove(w.f.Name()) - } - }() - if err := w.f.Sync(); err != nil { - w.f.Close() - return err - } - if err := w.f.Close(); err != nil { - return err - } - if err := os.Chmod(w.f.Name(), w.perm); err != nil { - return err - } - if w.writeErr == nil { - return os.Rename(w.f.Name(), w.fn) - } - return nil -} - -// AtomicWriteSet is used to atomically write a set -// of files and ensure they are visible at the same time. -// Must be committed to a new directory. -type AtomicWriteSet struct { - root string -} - -// NewAtomicWriteSet creates a new atomic write set to -// atomically create a set of files. The given directory -// is used as the base directory for storing files before -// commit. If no temporary directory is given the system -// default is used. -func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { - td, err := os.MkdirTemp(tmpDir, "write-set-") - if err != nil { - return nil, err - } - - return &AtomicWriteSet{ - root: td, - }, nil -} - -// WriteFile writes a file to the set, guaranteeing the file -// has been synced. -func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { - f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -type syncFileCloser struct { - *os.File -} - -func (w syncFileCloser) Close() error { - err := w.File.Sync() - if err1 := w.File.Close(); err == nil { - err = err1 - } - return err -} - -// FileWriter opens a file writer inside the set. The file -// should be synced and closed before calling commit. -func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { - f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) - if err != nil { - return nil, err - } - return syncFileCloser{f}, nil -} - -// Cancel cancels the set and removes all temporary data -// created in the set. -func (ws *AtomicWriteSet) Cancel() error { - return os.RemoveAll(ws.root) -} - -// Commit moves all created files to the target directory. The -// target directory must not exist and the parent of the target -// directory must exist. -func (ws *AtomicWriteSet) Commit(target string) error { - return os.Rename(ws.root, target) -} - -// String returns the location the set is writing to. -func (ws *AtomicWriteSet) String() string { - return ws.root -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go deleted file mode 100644 index de00b95e3..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/readers.go +++ /dev/null @@ -1,151 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "context" - "io" - - // make sure crypto.SHA256, crypto.sha512 and crypto.SHA384 are registered - // TODO remove once https://github.com/opencontainers/go-digest/pull/64 is merged. - _ "crypto/sha256" - _ "crypto/sha512" -) - -// ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser -// It calls the given callback function when closed. It should be constructed -// with NewReadCloserWrapper -type ReadCloserWrapper struct { - io.Reader - closer func() error -} - -// Close calls back the passed closer function -func (r *ReadCloserWrapper) Close() error { - return r.closer() -} - -// NewReadCloserWrapper returns a new io.ReadCloser. -func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { - return &ReadCloserWrapper{ - Reader: r, - closer: closer, - } -} - -type readerErrWrapper struct { - reader io.Reader - closer func() -} - -func (r *readerErrWrapper) Read(p []byte) (int, error) { - n, err := r.reader.Read(p) - if err != nil { - r.closer() - } - return n, err -} - -// NewReaderErrWrapper returns a new io.Reader. -func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { - return &readerErrWrapper{ - reader: r, - closer: closer, - } -} - -// OnEOFReader wraps an io.ReadCloser and a function -// the function will run at the end of file or close the file. -type OnEOFReader struct { - Rc io.ReadCloser - Fn func() -} - -func (r *OnEOFReader) Read(p []byte) (n int, err error) { - n, err = r.Rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -// Close closes the file and run the function. -func (r *OnEOFReader) Close() error { - err := r.Rc.Close() - r.runFunc() - return err -} - -func (r *OnEOFReader) runFunc() { - if fn := r.Fn; fn != nil { - fn() - r.Fn = nil - } -} - -// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read -// operations. -type cancelReadCloser struct { - cancel func() - pR *io.PipeReader // Stream to read from - pW *io.PipeWriter -} - -// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the -// context is cancelled. The returned io.ReadCloser must be closed when it is -// no longer needed. -func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { - pR, pW := io.Pipe() - - // Create a context used to signal when the pipe is closed - doneCtx, cancel := context.WithCancel(context.Background()) - - p := &cancelReadCloser{ - cancel: cancel, - pR: pR, - pW: pW, - } - - go func() { - _, err := io.Copy(pW, in) - select { - case <-ctx.Done(): - // If the context was closed, p.closeWithError - // was already called. Calling it again would - // change the error that Read returns. - default: - p.closeWithError(err) - } - in.Close() - }() - go func() { - for { - select { - case <-ctx.Done(): - p.closeWithError(ctx.Err()) - case <-doneCtx.Done(): - return - } - } - }() - - return p -} - -// Read wraps the Read method of the pipe that provides data from the wrapped -// ReadCloser. -func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { - return p.pR.Read(buf) -} - -// closeWithError closes the wrapper and its underlying reader. It will -// cause future calls to Read to return err. -func (p *cancelReadCloser) closeWithError(err error) { - p.pW.CloseWithError(err) - p.cancel() -} - -// Close closes the wrapper its underlying reader. It will cause -// future calls to Read to return io.EOF. -func (p *cancelReadCloser) Close() error { - p.closeWithError(io.EOF) - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go deleted file mode 100644 index 748912230..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !windows -// +build !windows - -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import "os" - -// TempDir on Unix systems is equivalent to os.MkdirTemp. -func TempDir(dir, prefix string) (string, error) { - return os.MkdirTemp(dir, prefix) -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go deleted file mode 100644 index a57fd9af6..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go +++ /dev/null @@ -1,16 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "os" - - "github.com/docker/docker/pkg/longpath" -) - -// TempDir is the equivalent of os.MkdirTemp, except that the result is in Windows longpath format. -func TempDir(dir, prefix string) (string, error) { - tempDir, err := os.MkdirTemp(dir, prefix) - if err != nil { - return "", err - } - return longpath.AddPrefix(tempDir), nil -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go deleted file mode 100644 index 91b8d1826..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go +++ /dev/null @@ -1,92 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "io" - "sync" -) - -// WriteFlusher wraps the Write and Flush operation ensuring that every write -// is a flush. In addition, the Close method can be called to intercept -// Read/Write calls if the targets lifecycle has already ended. -type WriteFlusher struct { - w io.Writer - flusher flusher - flushed chan struct{} - flushedOnce sync.Once - closed chan struct{} - closeLock sync.Mutex -} - -type flusher interface { - Flush() -} - -var errWriteFlusherClosed = io.EOF - -func (wf *WriteFlusher) Write(b []byte) (n int, err error) { - select { - case <-wf.closed: - return 0, errWriteFlusherClosed - default: - } - - n, err = wf.w.Write(b) - wf.Flush() // every write is a flush. - return n, err -} - -// Flush the stream immediately. -func (wf *WriteFlusher) Flush() { - select { - case <-wf.closed: - return - default: - } - - wf.flushedOnce.Do(func() { - close(wf.flushed) - }) - wf.flusher.Flush() -} - -// Flushed returns the state of flushed. -// If it's flushed, return true, or else it return false. -func (wf *WriteFlusher) Flushed() bool { - // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to - // be used to detect whether or a response code has been issued or not. - // Another hook should be used instead. - var flushed bool - select { - case <-wf.flushed: - flushed = true - default: - } - return flushed -} - -// Close closes the write flusher, disallowing any further writes to the -// target. After the flusher is closed, all calls to write or flush will -// result in an error. -func (wf *WriteFlusher) Close() error { - wf.closeLock.Lock() - defer wf.closeLock.Unlock() - - select { - case <-wf.closed: - return errWriteFlusherClosed - default: - close(wf.closed) - } - return nil -} - -// NewWriteFlusher returns a new WriteFlusher. -func NewWriteFlusher(w io.Writer) *WriteFlusher { - var fl flusher - if f, ok := w.(flusher); ok { - fl = f - } else { - fl = &NopFlusher{} - } - return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go deleted file mode 100644 index 61c679497..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/writers.go +++ /dev/null @@ -1,66 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import "io" - -// NopWriter represents a type which write operation is nop. -type NopWriter struct{} - -func (*NopWriter) Write(buf []byte) (int, error) { - return len(buf), nil -} - -type nopWriteCloser struct { - io.Writer -} - -func (w *nopWriteCloser) Close() error { return nil } - -// NopWriteCloser returns a nopWriteCloser. -func NopWriteCloser(w io.Writer) io.WriteCloser { - return &nopWriteCloser{w} -} - -// NopFlusher represents a type which flush operation is nop. -type NopFlusher struct{} - -// Flush is a nop operation. -func (f *NopFlusher) Flush() {} - -type writeCloserWrapper struct { - io.Writer - closer func() error -} - -func (r *writeCloserWrapper) Close() error { - return r.closer() -} - -// NewWriteCloserWrapper returns a new io.WriteCloser. -func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { - return &writeCloserWrapper{ - Writer: r, - closer: closer, - } -} - -// WriteCounter wraps a concrete io.Writer and hold a count of the number -// of bytes written to the writer during a "session". -// This can be convenient when write return is masked -// (e.g., json.Encoder.Encode()) -type WriteCounter struct { - Count int64 - Writer io.Writer -} - -// NewWriteCounter returns a new WriteCounter. -func NewWriteCounter(w io.Writer) *WriteCounter { - return &WriteCounter{ - Writer: w, - } -} - -func (wc *WriteCounter) Write(p []byte) (count int, err error) { - count, err = wc.Writer.Write(p) - wc.Count += int64(count) - return -} diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go deleted file mode 100644 index cf8d04b1b..000000000 --- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go +++ /dev/null @@ -1,283 +0,0 @@ -package jsonmessage // import "github.com/docker/docker/pkg/jsonmessage" - -import ( - "encoding/json" - "fmt" - "io" - "strings" - "time" - - units "github.com/docker/go-units" - "github.com/moby/term" - "github.com/morikuni/aec" -) - -// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to -// ensure the formatted time isalways the same number of characters. -const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - -// JSONError wraps a concrete Code and Message, `Code` is -// is an integer error code, `Message` is the error message. -type JSONError struct { - Code int `json:"code,omitempty"` - Message string `json:"message,omitempty"` -} - -func (e *JSONError) Error() string { - return e.Message -} - -// JSONProgress describes a Progress. terminalFd is the fd of the current terminal, -// Start is the initial value for the operation. Current is the current status and -// value of the progress made towards Total. Total is the end value describing when -// we made 100% progress for an operation. -type JSONProgress struct { - terminalFd uintptr - Current int64 `json:"current,omitempty"` - Total int64 `json:"total,omitempty"` - Start int64 `json:"start,omitempty"` - // If true, don't show xB/yB - HideCounts bool `json:"hidecounts,omitempty"` - Units string `json:"units,omitempty"` - nowFunc func() time.Time - winSize int -} - -func (p *JSONProgress) String() string { - var ( - width = p.width() - pbBox string - numbersBox string - timeLeftBox string - ) - if p.Current <= 0 && p.Total <= 0 { - return "" - } - if p.Total <= 0 { - switch p.Units { - case "": - current := units.HumanSize(float64(p.Current)) - return fmt.Sprintf("%8v", current) - default: - return fmt.Sprintf("%d %s", p.Current, p.Units) - } - } - - percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 - if percentage > 50 { - percentage = 50 - } - if width > 110 { - // this number can't be negative gh#7136 - numSpaces := 0 - if 50-percentage > 0 { - numSpaces = 50 - percentage - } - pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) - } - - switch { - case p.HideCounts: - case p.Units == "": // no units, use bytes - current := units.HumanSize(float64(p.Current)) - total := units.HumanSize(float64(p.Total)) - - numbersBox = fmt.Sprintf("%8v/%v", current, total) - - if p.Current > p.Total { - // remove total display if the reported current is wonky. - numbersBox = fmt.Sprintf("%8v", current) - } - default: - numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units) - - if p.Current > p.Total { - // remove total display if the reported current is wonky. - numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units) - } - } - - if p.Current > 0 && p.Start > 0 && percentage < 50 { - fromStart := p.now().Sub(time.Unix(p.Start, 0)) - perEntry := fromStart / time.Duration(p.Current) - left := time.Duration(p.Total-p.Current) * perEntry - left = (left / time.Second) * time.Second - - if width > 50 { - timeLeftBox = " " + left.String() - } - } - return pbBox + numbersBox + timeLeftBox -} - -// shim for testing -func (p *JSONProgress) now() time.Time { - if p.nowFunc == nil { - p.nowFunc = func() time.Time { - return time.Now().UTC() - } - } - return p.nowFunc() -} - -// shim for testing -func (p *JSONProgress) width() int { - if p.winSize != 0 { - return p.winSize - } - ws, err := term.GetWinsize(p.terminalFd) - if err == nil { - return int(ws.Width) - } - return 200 -} - -// JSONMessage defines a message struct. It describes -// the created time, where it from, status, ID of the -// message. It's used for docker events. -type JSONMessage struct { - Stream string `json:"stream,omitempty"` - Status string `json:"status,omitempty"` - Progress *JSONProgress `json:"progressDetail,omitempty"` - ProgressMessage string `json:"progress,omitempty"` // deprecated - ID string `json:"id,omitempty"` - From string `json:"from,omitempty"` - Time int64 `json:"time,omitempty"` - TimeNano int64 `json:"timeNano,omitempty"` - Error *JSONError `json:"errorDetail,omitempty"` - ErrorMessage string `json:"error,omitempty"` // deprecated - // Aux contains out-of-band data, such as digests for push signing and image id after building. - Aux *json.RawMessage `json:"aux,omitempty"` -} - -func clearLine(out io.Writer) { - eraseMode := aec.EraseModes.All - cl := aec.EraseLine(eraseMode) - fmt.Fprint(out, cl) -} - -func cursorUp(out io.Writer, l uint) { - fmt.Fprint(out, aec.Up(l)) -} - -func cursorDown(out io.Writer, l uint) { - fmt.Fprint(out, aec.Down(l)) -} - -// Display displays the JSONMessage to `out`. If `isTerminal` is true, it will erase the -// entire current line when displaying the progressbar. -func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { - if jm.Error != nil { - if jm.Error.Code == 401 { - return fmt.Errorf("authentication is required") - } - return jm.Error - } - var endl string - if isTerminal && jm.Stream == "" && jm.Progress != nil { - clearLine(out) - endl = "\r" - fmt.Fprint(out, endl) - } else if jm.Progress != nil && jm.Progress.String() != "" { // disable progressbar in non-terminal - return nil - } - if jm.TimeNano != 0 { - fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed)) - } else if jm.Time != 0 { - fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed)) - } - if jm.ID != "" { - fmt.Fprintf(out, "%s: ", jm.ID) - } - if jm.From != "" { - fmt.Fprintf(out, "(from %s) ", jm.From) - } - if jm.Progress != nil && isTerminal { - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) - } else if jm.ProgressMessage != "" { // deprecated - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) - } else if jm.Stream != "" { - fmt.Fprintf(out, "%s%s", jm.Stream, endl) - } else { - fmt.Fprintf(out, "%s%s\n", jm.Status, endl) - } - return nil -} - -// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` -// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of -// each line and move the cursor while displaying. -func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error { - var ( - dec = json.NewDecoder(in) - ids = make(map[string]uint) - ) - - for { - var diff uint - var jm JSONMessage - if err := dec.Decode(&jm); err != nil { - if err == io.EOF { - break - } - return err - } - - if jm.Aux != nil { - if auxCallback != nil { - auxCallback(jm) - } - continue - } - - if jm.Progress != nil { - jm.Progress.terminalFd = terminalFd - } - if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { - line, ok := ids[jm.ID] - if !ok { - // NOTE: This approach of using len(id) to - // figure out the number of lines of history - // only works as long as we clear the history - // when we output something that's not - // accounted for in the map, such as a line - // with no ID. - line = uint(len(ids)) - ids[jm.ID] = line - if isTerminal { - fmt.Fprintf(out, "\n") - } - } - diff = uint(len(ids)) - line - if isTerminal { - cursorUp(out, diff) - } - } else { - // When outputting something that isn't progress - // output, clear the history of previous lines. We - // don't want progress entries from some previous - // operation to be updated (for example, pull -a - // with multiple tags). - ids = make(map[string]uint) - } - err := jm.Display(out, isTerminal) - if jm.ID != "" && isTerminal { - cursorDown(out, diff) - } - if err != nil { - return err - } - } - return nil -} - -type stream interface { - io.Writer - FD() uintptr - IsTerminal() bool -} - -// DisplayJSONMessagesToStream prints json messages to the output stream -func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(JSONMessage)) error { - return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) -} diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/docker/docker/pkg/longpath/longpath.go deleted file mode 100644 index 4177affba..000000000 --- a/vendor/github.com/docker/docker/pkg/longpath/longpath.go +++ /dev/null @@ -1,26 +0,0 @@ -// longpath introduces some constants and helper functions for handling long paths -// in Windows, which are expected to be prepended with `\\?\` and followed by either -// a drive letter, a UNC server\share, or a volume identifier. - -package longpath // import "github.com/docker/docker/pkg/longpath" - -import ( - "strings" -) - -// Prefix is the longpath prefix for Windows file paths. -const Prefix = `\\?\` - -// AddPrefix will add the Windows long path prefix to the path provided if -// it does not already have it. -func AddPrefix(path string) string { - if !strings.HasPrefix(path, Prefix) { - if strings.HasPrefix(path, `\\`) { - // This is a UNC path, so we need to add 'UNC' to the path as well. - path = Prefix + `UNC` + path[1:] - } else { - path = Prefix + path - } - } - return path -} diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go deleted file mode 100644 index 3792c67a9..000000000 --- a/vendor/github.com/docker/docker/pkg/pools/pools.go +++ /dev/null @@ -1,137 +0,0 @@ -// Package pools provides a collection of pools which provide various -// data types with buffers. These can be used to lower the number of -// memory allocations and reuse buffers. -// -// New pools should be added to this package to allow them to be -// shared across packages. -// -// Utility functions which operate on pools should be added to this -// package to allow them to be reused. -package pools // import "github.com/docker/docker/pkg/pools" - -import ( - "bufio" - "io" - "sync" - - "github.com/docker/docker/pkg/ioutils" -) - -const buffer32K = 32 * 1024 - -var ( - // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. - BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) - // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. - BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) - buffer32KPool = newBufferPoolWithSize(buffer32K) -) - -// BufioReaderPool is a bufio reader that uses sync.Pool. -type BufioReaderPool struct { - pool sync.Pool -} - -// newBufioReaderPoolWithSize is unexported because new pools should be -// added here to be shared where required. -func newBufioReaderPoolWithSize(size int) *BufioReaderPool { - return &BufioReaderPool{ - pool: sync.Pool{ - New: func() interface{} { return bufio.NewReaderSize(nil, size) }, - }, - } -} - -// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. -func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { - buf := bufPool.pool.Get().(*bufio.Reader) - buf.Reset(r) - return buf -} - -// Put puts the bufio.Reader back into the pool. -func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { - b.Reset(nil) - bufPool.pool.Put(b) -} - -type bufferPool struct { - pool sync.Pool -} - -func newBufferPoolWithSize(size int) *bufferPool { - return &bufferPool{ - pool: sync.Pool{ - New: func() interface{} { s := make([]byte, size); return &s }, - }, - } -} - -func (bp *bufferPool) Get() *[]byte { - return bp.pool.Get().(*[]byte) -} - -func (bp *bufferPool) Put(b *[]byte) { - bp.pool.Put(b) -} - -// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. -func Copy(dst io.Writer, src io.Reader) (written int64, err error) { - buf := buffer32KPool.Get() - written, err = io.CopyBuffer(dst, src, *buf) - buffer32KPool.Put(buf) - return -} - -// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back -// into the pool and closes the reader if it's an io.ReadCloser. -func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { - return ioutils.NewReadCloserWrapper(r, func() error { - if readCloser, ok := r.(io.ReadCloser); ok { - readCloser.Close() - } - bufPool.Put(buf) - return nil - }) -} - -// BufioWriterPool is a bufio writer that uses sync.Pool. -type BufioWriterPool struct { - pool sync.Pool -} - -// newBufioWriterPoolWithSize is unexported because new pools should be -// added here to be shared where required. -func newBufioWriterPoolWithSize(size int) *BufioWriterPool { - return &BufioWriterPool{ - pool: sync.Pool{ - New: func() interface{} { return bufio.NewWriterSize(nil, size) }, - }, - } -} - -// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. -func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { - buf := bufPool.pool.Get().(*bufio.Writer) - buf.Reset(w) - return buf -} - -// Put puts the bufio.Writer back into the pool. -func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { - b.Reset(nil) - bufPool.pool.Put(b) -} - -// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back -// into the pool and closes the writer if it's an io.Writecloser. -func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { - return ioutils.NewWriteCloserWrapper(w, func() error { - buf.Flush() - if writeCloser, ok := w.(io.WriteCloser); ok { - writeCloser.Close() - } - bufPool.Put(buf) - return nil - }) -} diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go deleted file mode 100644 index 8f6e0a737..000000000 --- a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go +++ /dev/null @@ -1,190 +0,0 @@ -package stdcopy // import "github.com/docker/docker/pkg/stdcopy" - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "sync" -) - -// StdType is the type of standard stream -// a writer can multiplex to. -type StdType byte - -const ( - // Stdin represents standard input stream type. - Stdin StdType = iota - // Stdout represents standard output stream type. - Stdout - // Stderr represents standard error steam type. - Stderr - // Systemerr represents errors originating from the system that make it - // into the multiplexed stream. - Systemerr - - stdWriterPrefixLen = 8 - stdWriterFdIndex = 0 - stdWriterSizeIndex = 4 - - startingBufLen = 32*1024 + stdWriterPrefixLen + 1 -) - -var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }} - -// stdWriter is wrapper of io.Writer with extra customized info. -type stdWriter struct { - io.Writer - prefix byte -} - -// Write sends the buffer to the underneath writer. -// It inserts the prefix header before the buffer, -// so stdcopy.StdCopy knows where to multiplex the output. -// It makes stdWriter to implement io.Writer. -func (w *stdWriter) Write(p []byte) (n int, err error) { - if w == nil || w.Writer == nil { - return 0, errors.New("Writer not instantiated") - } - if p == nil { - return 0, nil - } - - header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix} - binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p))) - buf := bufPool.Get().(*bytes.Buffer) - buf.Write(header[:]) - buf.Write(p) - - n, err = w.Writer.Write(buf.Bytes()) - n -= stdWriterPrefixLen - if n < 0 { - n = 0 - } - - buf.Reset() - bufPool.Put(buf) - return -} - -// NewStdWriter instantiates a new Writer. -// Everything written to it will be encapsulated using a custom format, -// and written to the underlying `w` stream. -// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. -// `t` indicates the id of the stream to encapsulate. -// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. -func NewStdWriter(w io.Writer, t StdType) io.Writer { - return &stdWriter{ - Writer: w, - prefix: byte(t), - } -} - -// StdCopy is a modified version of io.Copy. -// -// StdCopy will demultiplex `src`, assuming that it contains two streams, -// previously multiplexed together using a StdWriter instance. -// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. -// -// StdCopy will read until it hits EOF on `src`. It will then return a nil error. -// In other words: if `err` is non nil, it indicates a real underlying error. -// -// `written` will hold the total number of bytes written to `dstout` and `dsterr`. -func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { - var ( - buf = make([]byte, startingBufLen) - bufLen = len(buf) - nr, nw int - er, ew error - out io.Writer - frameSize int - ) - - for { - // Make sure we have at least a full header - for nr < stdWriterPrefixLen { - var nr2 int - nr2, er = src.Read(buf[nr:]) - nr += nr2 - if er == io.EOF { - if nr < stdWriterPrefixLen { - return written, nil - } - break - } - if er != nil { - return 0, er - } - } - - stream := StdType(buf[stdWriterFdIndex]) - // Check the first byte to know where to write - switch stream { - case Stdin: - fallthrough - case Stdout: - // Write on stdout - out = dstout - case Stderr: - // Write on stderr - out = dsterr - case Systemerr: - // If we're on Systemerr, we won't write anywhere. - // NB: if this code changes later, make sure you don't try to write - // to outstream if Systemerr is the stream - out = nil - default: - return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex]) - } - - // Retrieve the size of the frame - frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4])) - - // Check if the buffer is big enough to read the frame. - // Extend it if necessary. - if frameSize+stdWriterPrefixLen > bufLen { - buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...) - bufLen = len(buf) - } - - // While the amount of bytes read is less than the size of the frame + header, we keep reading - for nr < frameSize+stdWriterPrefixLen { - var nr2 int - nr2, er = src.Read(buf[nr:]) - nr += nr2 - if er == io.EOF { - if nr < frameSize+stdWriterPrefixLen { - return written, nil - } - break - } - if er != nil { - return 0, er - } - } - - // we might have an error from the source mixed up in our multiplexed - // stream. if we do, return it. - if stream == Systemerr { - return written, fmt.Errorf("error from daemon in stream: %s", string(buf[stdWriterPrefixLen:frameSize+stdWriterPrefixLen])) - } - - // Write the retrieved frame (without header) - nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen]) - if ew != nil { - return 0, ew - } - - // If the frame has not been fully written: error - if nw != frameSize { - return 0, io.ErrShortWrite - } - written += int64(nw) - - // Move the rest of the buffer to the beginning - copy(buf, buf[frameSize+stdWriterPrefixLen:]) - // Move the index - nr -= frameSize + stdWriterPrefixLen - } -} diff --git a/vendor/github.com/docker/docker/pkg/system/args_windows.go b/vendor/github.com/docker/docker/pkg/system/args_windows.go deleted file mode 100644 index b7c9487a0..000000000 --- a/vendor/github.com/docker/docker/pkg/system/args_windows.go +++ /dev/null @@ -1,16 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "strings" - - "golang.org/x/sys/windows" -) - -// EscapeArgs makes a Windows-style escaped command line from a set of arguments -func EscapeArgs(args []string) string { - escapedArgs := make([]string, len(args)) - for i, a := range args { - escapedArgs[i] = windows.EscapeArg(a) - } - return strings.Join(escapedArgs, " ") -} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/docker/docker/pkg/system/chtimes.go deleted file mode 100644 index c26a4e24b..000000000 --- a/vendor/github.com/docker/docker/pkg/system/chtimes.go +++ /dev/null @@ -1,31 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "os" - "time" -) - -// Chtimes changes the access time and modified time of a file at the given path -func Chtimes(name string, atime time.Time, mtime time.Time) error { - unixMinTime := time.Unix(0, 0) - unixMaxTime := maxTime - - // If the modified time is prior to the Unix Epoch, or after the - // end of Unix Time, os.Chtimes has undefined behavior - // default to Unix Epoch in this case, just in case - - if atime.Before(unixMinTime) || atime.After(unixMaxTime) { - atime = unixMinTime - } - - if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { - mtime = unixMinTime - } - - if err := os.Chtimes(name, atime, mtime); err != nil { - return err - } - - // Take platform specific action for setting create time. - return setCTime(name, mtime) -} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go deleted file mode 100644 index 84ae15705..000000000 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go +++ /dev/null @@ -1,15 +0,0 @@ -//go:build !windows -// +build !windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "time" -) - -// setCTime will set the create time on a file. On Unix, the create -// time is updated as a side effect of setting the modified time, so -// no action is required. -func setCTime(path string, ctime time.Time) error { - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go deleted file mode 100644 index 6664b8bca..000000000 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go +++ /dev/null @@ -1,26 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "time" - - "golang.org/x/sys/windows" -) - -// setCTime will set the create time on a file. On Windows, this requires -// calling SetFileTime and explicitly including the create time. -func setCTime(path string, ctime time.Time) error { - ctimespec := windows.NsecToTimespec(ctime.UnixNano()) - pathp, e := windows.UTF16PtrFromString(path) - if e != nil { - return e - } - h, e := windows.CreateFile(pathp, - windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, - windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) - if e != nil { - return e - } - defer windows.Close(h) - c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec)) - return windows.SetFileTime(h, &c, nil, nil) -} diff --git a/vendor/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/docker/docker/pkg/system/errors.go deleted file mode 100644 index 2573d7162..000000000 --- a/vendor/github.com/docker/docker/pkg/system/errors.go +++ /dev/null @@ -1,13 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "errors" -) - -var ( - // ErrNotSupportedPlatform means the platform is not supported. - ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") - - // ErrNotSupportedOperatingSystem means the operating system is not supported. - ErrNotSupportedOperatingSystem = errors.New("operating system is not supported") -) diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys.go deleted file mode 100644 index ce5990c91..000000000 --- a/vendor/github.com/docker/docker/pkg/system/filesys.go +++ /dev/null @@ -1,19 +0,0 @@ -package system - -import ( - "os" - "path/filepath" - "strings" -) - -// IsAbs is a platform-agnostic wrapper for filepath.IsAbs. -// -// On Windows, golang filepath.IsAbs does not consider a path \windows\system32 -// as absolute as it doesn't start with a drive-letter/colon combination. However, -// in docker we need to verify things such as WORKDIR /windows/system32 in -// a Dockerfile (which gets translated to \windows\system32 when being processed -// by the daemon). This SHOULD be treated as absolute from a docker processing -// perspective. -func IsAbs(path string) bool { - return filepath.IsAbs(path) || strings.HasPrefix(path, string(os.PathSeparator)) -} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_deprecated.go b/vendor/github.com/docker/docker/pkg/system/filesys_deprecated.go deleted file mode 100644 index b2ee00631..000000000 --- a/vendor/github.com/docker/docker/pkg/system/filesys_deprecated.go +++ /dev/null @@ -1,35 +0,0 @@ -package system - -import ( - "os" - - "github.com/moby/sys/sequential" -) - -// CreateSequential is deprecated. -// -// Deprecated: use os.Create or github.com/moby/sys/sequential.Create() -func CreateSequential(name string) (*os.File, error) { - return sequential.Create(name) -} - -// OpenSequential is deprecated. -// -// Deprecated: use os.Open or github.com/moby/sys/sequential.Open -func OpenSequential(name string) (*os.File, error) { - return sequential.Open(name) -} - -// OpenFileSequential is deprecated. -// -// Deprecated: use github.com/moby/sys/sequential.OpenFile() -func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { - return sequential.OpenFile(name, flag, perm) -} - -// TempFileSequential is deprecated. -// -// Deprecated: use os.CreateTemp or github.com/moby/sys/sequential.CreateTemp -func TempFileSequential(dir, prefix string) (f *os.File, err error) { - return sequential.CreateTemp(dir, prefix) -} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_unix.go b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go deleted file mode 100644 index 380112940..000000000 --- a/vendor/github.com/docker/docker/pkg/system/filesys_unix.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build !windows -// +build !windows - -package system // import "github.com/docker/docker/pkg/system" - -import "os" - -// MkdirAllWithACL is a wrapper for os.MkdirAll on unix systems. -func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { - return os.MkdirAll(path, perm) -} - -// MkdirAll creates a directory named path along with any necessary parents, -// with permission specified by attribute perm for all dir created. -func MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go deleted file mode 100644 index e3fa9f731..000000000 --- a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go +++ /dev/null @@ -1,118 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "os" - "regexp" - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -const ( - // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System - SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" -) - -// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory -// with an appropriate SDDL defined ACL. -func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { - return mkdirall(path, true, sddl) -} - -// MkdirAll implementation that is volume path aware for Windows. It can be used -// as a drop-in replacement for os.MkdirAll() -func MkdirAll(path string, _ os.FileMode) error { - return mkdirall(path, false, "") -} - -// mkdirall is a custom version of os.MkdirAll modified for use on Windows -// so that it is both volume path aware, and can create a directory with -// a DACL. -func mkdirall(path string, applyACL bool, sddl string) error { - if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { - return nil - } - - // The rest of this method is largely copied from os.MkdirAll and should be kept - // as-is to ensure compatibility. - - // Fast path: if we can tell whether path is a directory or file, stop with success or error. - dir, err := os.Stat(path) - if err == nil { - if dir.IsDir() { - return nil - } - return &os.PathError{ - Op: "mkdir", - Path: path, - Err: syscall.ENOTDIR, - } - } - - // Slow path: make sure parent exists and then call Mkdir for path. - i := len(path) - for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. - i-- - } - - j := i - for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. - j-- - } - - if j > 1 { - // Create parent - err = mkdirall(path[0:j-1], false, sddl) - if err != nil { - return err - } - } - - // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. - if applyACL { - err = mkdirWithACL(path, sddl) - } else { - err = os.Mkdir(path, 0) - } - - if err != nil { - // Handle arguments like "foo/." by - // double-checking that directory doesn't exist. - dir, err1 := os.Lstat(path) - if err1 == nil && dir.IsDir() { - return nil - } - return err - } - return nil -} - -// mkdirWithACL creates a new directory. If there is an error, it will be of -// type *PathError. . -// -// This is a modified and combined version of os.Mkdir and windows.Mkdir -// in golang to cater for creating a directory am ACL permitting full -// access, with inheritance, to any subfolder/file for Built-in Administrators -// and Local System. -func mkdirWithACL(name string, sddl string) error { - sa := windows.SecurityAttributes{Length: 0} - sd, err := windows.SecurityDescriptorFromString(sddl) - if err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - sa.SecurityDescriptor = sd - - namep, err := windows.UTF16PtrFromString(name) - if err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - - e := windows.CreateDirectory(namep, &sa) - if e != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: e} - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/image_os.go b/vendor/github.com/docker/docker/pkg/system/image_os.go deleted file mode 100644 index e3de86be2..000000000 --- a/vendor/github.com/docker/docker/pkg/system/image_os.go +++ /dev/null @@ -1,10 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" -import ( - "runtime" - "strings" -) - -// IsOSSupported determines if an operating system is supported by the host. -func IsOSSupported(os string) bool { - return strings.EqualFold(runtime.GOOS, os) -} diff --git a/vendor/github.com/docker/docker/pkg/system/init.go b/vendor/github.com/docker/docker/pkg/system/init.go deleted file mode 100644 index a17597aab..000000000 --- a/vendor/github.com/docker/docker/pkg/system/init.go +++ /dev/null @@ -1,22 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "syscall" - "time" - "unsafe" -) - -// Used by chtimes -var maxTime time.Time - -func init() { - // chtimes initialization - if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { - // This is a 64 bit timespec - // os.Chtimes limits time to the following - maxTime = time.Unix(0, 1<<63-1) - } else { - // This is a 32 bit timespec - maxTime = time.Unix(1<<31-1, 0) - } -} diff --git a/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/docker/docker/pkg/system/init_windows.go deleted file mode 100644 index 3c2a43ddb..000000000 --- a/vendor/github.com/docker/docker/pkg/system/init_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -var ( - // containerdRuntimeSupported determines if containerd should be the runtime. - containerdRuntimeSupported = false -) - -// InitContainerdRuntime sets whether to use containerd for runtime on Windows. -func InitContainerdRuntime(cdPath string) { - if len(cdPath) > 0 { - containerdRuntimeSupported = true - } -} - -// ContainerdRuntimeSupported returns true if the use of containerd runtime is supported. -func ContainerdRuntimeSupported() bool { - return containerdRuntimeSupported -} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go deleted file mode 100644 index 654b9f2c9..000000000 --- a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build !windows -// +build !windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "os" - "syscall" -) - -// Lstat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Lstat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Lstat(path, s); err != nil { - return nil, &os.PathError{Op: "Lstat", Path: path, Err: err} - } - return fromStatT(s) -} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go deleted file mode 100644 index 359c791d9..000000000 --- a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go +++ /dev/null @@ -1,14 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "os" - -// Lstat calls os.Lstat to get a fileinfo interface back. -// This is then copied into our own locally defined structure. -func Lstat(path string) (*StatT, error) { - fi, err := os.Lstat(path) - if err != nil { - return nil, err - } - - return fromStatT(&fi) -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo.go b/vendor/github.com/docker/docker/pkg/system/meminfo.go deleted file mode 100644 index 6667eb84d..000000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo.go +++ /dev/null @@ -1,17 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -// MemInfo contains memory statistics of the host system. -type MemInfo struct { - // Total usable RAM (i.e. physical RAM minus a few reserved bits and the - // kernel binary code). - MemTotal int64 - - // Amount of free memory. - MemFree int64 - - // Total amount of swap space available. - SwapTotal int64 - - // Amount of swap space that is currently unused. - SwapFree int64 -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go deleted file mode 100644 index 02a7377c1..000000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go +++ /dev/null @@ -1,69 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "bufio" - "io" - "os" - "strconv" - "strings" -) - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - file, err := os.Open("/proc/meminfo") - if err != nil { - return nil, err - } - defer file.Close() - return parseMemInfo(file) -} - -// parseMemInfo parses the /proc/meminfo file into -// a MemInfo object given an io.Reader to the file. -// Throws error if there are problems reading from the file -func parseMemInfo(reader io.Reader) (*MemInfo, error) { - meminfo := &MemInfo{} - scanner := bufio.NewScanner(reader) - memAvailable := int64(-1) - for scanner.Scan() { - // Expected format: ["MemTotal:", "1234", "kB"] - parts := strings.Fields(scanner.Text()) - - // Sanity checks: Skip malformed entries. - if len(parts) < 3 || parts[2] != "kB" { - continue - } - - // Convert to bytes. - size, err := strconv.Atoi(parts[1]) - if err != nil { - continue - } - // Convert to KiB - bytes := int64(size) * 1024 - - switch parts[0] { - case "MemTotal:": - meminfo.MemTotal = bytes - case "MemFree:": - meminfo.MemFree = bytes - case "MemAvailable:": - memAvailable = bytes - case "SwapTotal:": - meminfo.SwapTotal = bytes - case "SwapFree:": - meminfo.SwapFree = bytes - } - } - if memAvailable != -1 { - meminfo.MemFree = memAvailable - } - - // Handle errors that may have occurred during the reading of the file. - if err := scanner.Err(); err != nil { - return nil, err - } - - return meminfo, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go deleted file mode 100644 index 207ee58ee..000000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build !linux && !windows -// +build !linux,!windows - -package system // import "github.com/docker/docker/pkg/system" - -// ReadMemInfo is not supported on platforms other than linux and windows. -func ReadMemInfo() (*MemInfo, error) { - return nil, ErrNotSupportedPlatform -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go deleted file mode 100644 index 124d2c502..000000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go +++ /dev/null @@ -1,45 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "unsafe" - - "golang.org/x/sys/windows" -) - -var ( - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - - procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") -) - -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx -type memorystatusex struct { - dwLength uint32 - dwMemoryLoad uint32 - ullTotalPhys uint64 - ullAvailPhys uint64 - ullTotalPageFile uint64 - ullAvailPageFile uint64 - ullTotalVirtual uint64 - ullAvailVirtual uint64 - ullAvailExtendedVirtual uint64 -} - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - msi := &memorystatusex{ - dwLength: 64, - } - r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) - if r1 == 0 { - return &MemInfo{}, nil - } - return &MemInfo{ - MemTotal: int64(msi.ullTotalPhys), - MemFree: int64(msi.ullAvailPhys), - SwapTotal: int64(msi.ullTotalPageFile), - SwapFree: int64(msi.ullAvailPageFile), - }, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go deleted file mode 100644 index d27152c0f..000000000 --- a/vendor/github.com/docker/docker/pkg/system/mknod.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build !windows -// +build !windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "golang.org/x/sys/unix" -) - -// Mkdev is used to build the value of linux devices (in /dev/) which specifies major -// and minor number of the newly created device special file. -// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. -// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, -// then the top 12 bits of the minor. -func Mkdev(major int64, minor int64) uint32 { - return uint32(unix.Mkdev(uint32(major), uint32(minor))) -} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go b/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go deleted file mode 100644 index c890be116..000000000 --- a/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build freebsd -// +build freebsd - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "golang.org/x/sys/unix" -) - -// Mknod creates a filesystem node (file, device special file or named pipe) named path -// with attributes specified by mode and dev. -func Mknod(path string, mode uint32, dev int) error { - return unix.Mknod(path, mode, uint64(dev)) -} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_unix.go b/vendor/github.com/docker/docker/pkg/system/mknod_unix.go deleted file mode 100644 index 4586aad19..000000000 --- a/vendor/github.com/docker/docker/pkg/system/mknod_unix.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build !freebsd && !windows -// +build !freebsd,!windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "golang.org/x/sys/unix" -) - -// Mknod creates a filesystem node (file, device special file or named pipe) named path -// with attributes specified by mode and dev. -func Mknod(path string, mode uint32, dev int) error { - return unix.Mknod(path, mode, dev) -} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go deleted file mode 100644 index ec89d7a15..000000000 --- a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go +++ /dev/null @@ -1,11 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -// Mknod is not implemented on Windows. -func Mknod(path string, mode uint32, dev int) error { - return ErrNotSupportedPlatform -} - -// Mkdev is not implemented on Windows. -func Mkdev(major int64, minor int64) uint32 { - panic("Mkdev not implemented on Windows.") -} diff --git a/vendor/github.com/docker/docker/pkg/system/path.go b/vendor/github.com/docker/docker/pkg/system/path.go deleted file mode 100644 index 818b20efe..000000000 --- a/vendor/github.com/docker/docker/pkg/system/path.go +++ /dev/null @@ -1,41 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - -// DefaultPathEnv is unix style list of directories to search for -// executables. Each directory is separated from the next by a colon -// ':' character . -// For Windows containers, an empty string is returned as the default -// path will be set by the container, and Docker has no context of what the -// default path should be. -func DefaultPathEnv(os string) string { - if os == "windows" { - return "" - } - return defaultUnixPathEnv -} - -// PathVerifier defines the subset of a PathDriver that CheckSystemDriveAndRemoveDriveLetter -// actually uses in order to avoid system depending on containerd/continuity. -type PathVerifier interface { - IsAbs(string) bool -} - -// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, -// is the system drive. -// On Linux: this is a no-op. -// On Windows: this does the following> -// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. -// This is used, for example, when validating a user provided path in docker cp. -// If a drive letter is supplied, it must be the system drive. The drive letter -// is always removed. Also, it translates it to OS semantics (IOW / to \). We -// need the path in this syntax so that it can ultimately be concatenated with -// a Windows long-path which doesn't support drive-letters. Examples: -// C: --> Fail -// C:\ --> \ -// a --> a -// /a --> \a -// d:\ --> Fail -func CheckSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) { - return checkSystemDriveAndRemoveDriveLetter(path, driver) -} diff --git a/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go deleted file mode 100644 index 197a37a21..000000000 --- a/vendor/github.com/docker/docker/pkg/system/path_unix.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build !windows -// +build !windows - -package system // import "github.com/docker/docker/pkg/system" - -// GetLongPathName converts Windows short pathnames to full pathnames. -// For example C:\Users\ADMIN~1 --> C:\Users\Administrator. -// It is a no-op on non-Windows platforms -func GetLongPathName(path string) (string, error) { - return path, nil -} - -// checkSystemDriveAndRemoveDriveLetter is the non-Windows implementation -// of CheckSystemDriveAndRemoveDriveLetter -func checkSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) { - return path, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go deleted file mode 100644 index 7d375b0dd..000000000 --- a/vendor/github.com/docker/docker/pkg/system/path_windows.go +++ /dev/null @@ -1,48 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "fmt" - "path/filepath" - "strings" - - "golang.org/x/sys/windows" -) - -// GetLongPathName converts Windows short pathnames to full pathnames. -// For example C:\Users\ADMIN~1 --> C:\Users\Administrator. -// It is a no-op on non-Windows platforms -func GetLongPathName(path string) (string, error) { - // See https://groups.google.com/forum/#!topic/golang-dev/1tufzkruoTg - p, err := windows.UTF16FromString(path) - if err != nil { - return "", err - } - b := p // GetLongPathName says we can reuse buffer - n, err := windows.GetLongPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - if n > uint32(len(b)) { - b = make([]uint16, n) - _, err = windows.GetLongPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - } - return windows.UTF16ToString(b), nil -} - -// checkSystemDriveAndRemoveDriveLetter is the Windows implementation -// of CheckSystemDriveAndRemoveDriveLetter -func checkSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) { - if len(path) == 2 && string(path[1]) == ":" { - return "", fmt.Errorf("No relative path specified in %q", path) - } - if !driver.IsAbs(path) || len(path) < 2 { - return filepath.FromSlash(path), nil - } - if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { - return "", fmt.Errorf("The specified path is not on the system drive (C:)") - } - return filepath.FromSlash(path[2:]), nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/process_unix.go b/vendor/github.com/docker/docker/pkg/system/process_unix.go deleted file mode 100644 index 1c2c6a309..000000000 --- a/vendor/github.com/docker/docker/pkg/system/process_unix.go +++ /dev/null @@ -1,46 +0,0 @@ -//go:build linux || freebsd || darwin -// +build linux freebsd darwin - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "fmt" - "os" - "strings" - "syscall" - - "golang.org/x/sys/unix" -) - -// IsProcessAlive returns true if process with a given pid is running. -func IsProcessAlive(pid int) bool { - err := unix.Kill(pid, syscall.Signal(0)) - if err == nil || err == unix.EPERM { - return true - } - - return false -} - -// KillProcess force-stops a process. -func KillProcess(pid int) { - unix.Kill(pid, unix.SIGKILL) -} - -// IsProcessZombie return true if process has a state with "Z" -// http://man7.org/linux/man-pages/man5/proc.5.html -func IsProcessZombie(pid int) (bool, error) { - statPath := fmt.Sprintf("/proc/%d/stat", pid) - dataBytes, err := os.ReadFile(statPath) - if err != nil { - // TODO(thaJeztah) should we ignore os.IsNotExist() here? ("/proc//stat" will be gone if the process exited) - return false, err - } - data := string(dataBytes) - sdata := strings.SplitN(data, " ", 4) - if len(sdata) >= 3 && sdata[2] == "Z" { - return true, nil - } - - return false, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/process_windows.go b/vendor/github.com/docker/docker/pkg/system/process_windows.go deleted file mode 100644 index 09bdfa0ca..000000000 --- a/vendor/github.com/docker/docker/pkg/system/process_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "os" - -// IsProcessAlive returns true if process with a given pid is running. -func IsProcessAlive(pid int) bool { - _, err := os.FindProcess(pid) - - return err == nil -} - -// KillProcess force-stops a process. -func KillProcess(pid int) { - p, err := os.FindProcess(pid) - if err == nil { - _ = p.Kill() - } -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_bsd.go b/vendor/github.com/docker/docker/pkg/system/stat_bsd.go deleted file mode 100644 index 8e61d820f..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_bsd.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build freebsd || netbsd -// +build freebsd netbsd - -package system // import "github.com/docker/docker/pkg/system" - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go deleted file mode 100644 index c1c0ee9f3..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go +++ /dev/null @@ -1,13 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go deleted file mode 100644 index 3ac02393f..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_linux.go +++ /dev/null @@ -1,20 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: s.Mode, - uid: s.Uid, - gid: s.Gid, - // the type is 32bit on mips - rdev: uint64(s.Rdev), //nolint: unconvert - mtim: s.Mtim}, nil -} - -// FromStatT converts a syscall.Stat_t type to a system.Stat_t type -// This is exposed on Linux as pkg/archive/changes uses it. -func FromStatT(s *syscall.Stat_t) (*StatT, error) { - return fromStatT(s) -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go deleted file mode 100644 index 756b92d1e..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go +++ /dev/null @@ -1,13 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtim}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go deleted file mode 100644 index 6a51ccd64..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go +++ /dev/null @@ -1,13 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: s.Mode, - uid: s.Uid, - gid: s.Gid, - rdev: s.Rdev, - mtim: s.Mtim}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unix.go b/vendor/github.com/docker/docker/pkg/system/stat_unix.go deleted file mode 100644 index a45ffddf7..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_unix.go +++ /dev/null @@ -1,67 +0,0 @@ -//go:build !windows -// +build !windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "os" - "syscall" -) - -// StatT type contains status of a file. It contains metadata -// like permission, owner, group, size, etc about a file. -type StatT struct { - mode uint32 - uid uint32 - gid uint32 - rdev uint64 - size int64 - mtim syscall.Timespec -} - -// Mode returns file's permission mode. -func (s StatT) Mode() uint32 { - return s.mode -} - -// UID returns file's user id of owner. -func (s StatT) UID() uint32 { - return s.uid -} - -// GID returns file's group id of owner. -func (s StatT) GID() uint32 { - return s.gid -} - -// Rdev returns file's device ID (if it's special file). -func (s StatT) Rdev() uint64 { - return s.rdev -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mtim returns file's last modification time. -func (s StatT) Mtim() syscall.Timespec { - return s.mtim -} - -// IsDir reports whether s describes a directory. -func (s StatT) IsDir() bool { - return s.mode&syscall.S_IFDIR != 0 -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, &os.PathError{Op: "Stat", Path: path, Err: err} - } - return fromStatT(s) -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go deleted file mode 100644 index 0ff3af2fa..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_windows.go +++ /dev/null @@ -1,49 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "os" - "time" -) - -// StatT type contains status of a file. It contains metadata -// like permission, size, etc about a file. -type StatT struct { - mode os.FileMode - size int64 - mtim time.Time -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mode returns file's permission mode. -func (s StatT) Mode() os.FileMode { - return s.mode -} - -// Mtim returns file's last modification time. -func (s StatT) Mtim() time.Time { - return s.mtim -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - fi, err := os.Stat(path) - if err != nil { - return nil, err - } - return fromStatT(&fi) -} - -// fromStatT converts a os.FileInfo type to a system.StatT type -func fromStatT(fi *os.FileInfo) (*StatT, error) { - return &StatT{ - size: (*fi).Size(), - mode: (*fi).Mode(), - mtim: (*fi).ModTime()}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unix.go b/vendor/github.com/docker/docker/pkg/system/utimes_unix.go deleted file mode 100644 index 2768750a0..000000000 --- a/vendor/github.com/docker/docker/pkg/system/utimes_unix.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build linux || freebsd -// +build linux freebsd - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "syscall" - - "golang.org/x/sys/unix" -) - -// LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. -func LUtimesNano(path string, ts []syscall.Timespec) error { - uts := []unix.Timespec{ - unix.NsecToTimespec(syscall.TimespecToNsec(ts[0])), - unix.NsecToTimespec(syscall.TimespecToNsec(ts[1])), - } - err := unix.UtimesNanoAt(unix.AT_FDCWD, path, uts, unix.AT_SYMLINK_NOFOLLOW) - if err != nil && err != unix.ENOSYS { - return err - } - - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go deleted file mode 100644 index bfed4af03..000000000 --- a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !linux && !freebsd -// +build !linux,!freebsd - -package system // import "github.com/docker/docker/pkg/system" - -import "syscall" - -// LUtimesNano is only supported on linux and freebsd. -func LUtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotSupportedPlatform -} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go deleted file mode 100644 index 95b609fe7..000000000 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go +++ /dev/null @@ -1,37 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "golang.org/x/sys/unix" - -// Lgetxattr retrieves the value of the extended attribute identified by attr -// and associated with the given path in the file system. -// It will returns a nil slice and nil error if the xattr is not set. -func Lgetxattr(path string, attr string) ([]byte, error) { - // Start with a 128 length byte array - dest := make([]byte, 128) - sz, errno := unix.Lgetxattr(path, attr, dest) - - for errno == unix.ERANGE { - // Buffer too small, use zero-sized buffer to get the actual size - sz, errno = unix.Lgetxattr(path, attr, []byte{}) - if errno != nil { - return nil, errno - } - dest = make([]byte, sz) - sz, errno = unix.Lgetxattr(path, attr, dest) - } - - switch { - case errno == unix.ENODATA: - return nil, nil - case errno != nil: - return nil, errno - } - - return dest[:sz], nil -} - -// Lsetxattr sets the value of the extended attribute identified by attr -// and associated with the given path in the file system. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - return unix.Lsetxattr(path, attr, data, flags) -} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go deleted file mode 100644 index b165a5dbf..000000000 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build !linux -// +build !linux - -package system // import "github.com/docker/docker/pkg/system" - -// Lgetxattr is not supported on platforms other than linux. -func Lgetxattr(path string, attr string) ([]byte, error) { - return nil, ErrNotSupportedPlatform -} - -// Lsetxattr is not supported on platforms other than linux. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - return ErrNotSupportedPlatform -} diff --git a/vendor/github.com/docker/go-connections/LICENSE b/vendor/github.com/docker/go-connections/LICENSE deleted file mode 100644 index b55b37bc3..000000000 --- a/vendor/github.com/docker/go-connections/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go deleted file mode 100644 index bb7e4e336..000000000 --- a/vendor/github.com/docker/go-connections/nat/nat.go +++ /dev/null @@ -1,242 +0,0 @@ -// Package nat is a convenience package for manipulation of strings describing network ports. -package nat - -import ( - "fmt" - "net" - "strconv" - "strings" -) - -const ( - // portSpecTemplate is the expected format for port specifications - portSpecTemplate = "ip:hostPort:containerPort" -) - -// PortBinding represents a binding between a Host IP address and a Host Port -type PortBinding struct { - // HostIP is the host IP Address - HostIP string `json:"HostIp"` - // HostPort is the host port number - HostPort string -} - -// PortMap is a collection of PortBinding indexed by Port -type PortMap map[Port][]PortBinding - -// PortSet is a collection of structs indexed by Port -type PortSet map[Port]struct{} - -// Port is a string containing port number and protocol in the format "80/tcp" -type Port string - -// NewPort creates a new instance of a Port given a protocol and port number or port range -func NewPort(proto, port string) (Port, error) { - // Check for parsing issues on "port" now so we can avoid having - // to check it later on. - - portStartInt, portEndInt, err := ParsePortRangeToInt(port) - if err != nil { - return "", err - } - - if portStartInt == portEndInt { - return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil - } - return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil -} - -// ParsePort parses the port number string and returns an int -func ParsePort(rawPort string) (int, error) { - if len(rawPort) == 0 { - return 0, nil - } - port, err := strconv.ParseUint(rawPort, 10, 16) - if err != nil { - return 0, err - } - return int(port), nil -} - -// ParsePortRangeToInt parses the port range string and returns start/end ints -func ParsePortRangeToInt(rawPort string) (int, int, error) { - if len(rawPort) == 0 { - return 0, 0, nil - } - start, end, err := ParsePortRange(rawPort) - if err != nil { - return 0, 0, err - } - return int(start), int(end), nil -} - -// Proto returns the protocol of a Port -func (p Port) Proto() string { - proto, _ := SplitProtoPort(string(p)) - return proto -} - -// Port returns the port number of a Port -func (p Port) Port() string { - _, port := SplitProtoPort(string(p)) - return port -} - -// Int returns the port number of a Port as an int -func (p Port) Int() int { - portStr := p.Port() - // We don't need to check for an error because we're going to - // assume that any error would have been found, and reported, in NewPort() - port, _ := ParsePort(portStr) - return port -} - -// Range returns the start/end port numbers of a Port range as ints -func (p Port) Range() (int, int, error) { - return ParsePortRangeToInt(p.Port()) -} - -// SplitProtoPort splits a port in the format of proto/port -func SplitProtoPort(rawPort string) (string, string) { - parts := strings.Split(rawPort, "/") - l := len(parts) - if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { - return "", "" - } - if l == 1 { - return "tcp", rawPort - } - if len(parts[1]) == 0 { - return "tcp", parts[0] - } - return parts[1], parts[0] -} - -func validateProto(proto string) bool { - for _, availableProto := range []string{"tcp", "udp", "sctp"} { - if availableProto == proto { - return true - } - } - return false -} - -// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses -// these in to the internal types -func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { - var ( - exposedPorts = make(map[Port]struct{}, len(ports)) - bindings = make(map[Port][]PortBinding) - ) - for _, rawPort := range ports { - portMappings, err := ParsePortSpec(rawPort) - if err != nil { - return nil, nil, err - } - - for _, portMapping := range portMappings { - port := portMapping.Port - if _, exists := exposedPorts[port]; !exists { - exposedPorts[port] = struct{}{} - } - bslice, exists := bindings[port] - if !exists { - bslice = []PortBinding{} - } - bindings[port] = append(bslice, portMapping.Binding) - } - } - return exposedPorts, bindings, nil -} - -// PortMapping is a data object mapping a Port to a PortBinding -type PortMapping struct { - Port Port - Binding PortBinding -} - -func splitParts(rawport string) (string, string, string) { - parts := strings.Split(rawport, ":") - n := len(parts) - containerport := parts[n-1] - - switch n { - case 1: - return "", "", containerport - case 2: - return "", parts[0], containerport - case 3: - return parts[0], parts[1], containerport - default: - return strings.Join(parts[:n-2], ":"), parts[n-2], containerport - } -} - -// ParsePortSpec parses a port specification string into a slice of PortMappings -func ParsePortSpec(rawPort string) ([]PortMapping, error) { - var proto string - rawIP, hostPort, containerPort := splitParts(rawPort) - proto, containerPort = SplitProtoPort(containerPort) - - // Strip [] from IPV6 addresses - ip, _, err := net.SplitHostPort(rawIP + ":") - if err != nil { - return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err) - } - if ip != "" && net.ParseIP(ip) == nil { - return nil, fmt.Errorf("Invalid ip address: %s", ip) - } - if containerPort == "" { - return nil, fmt.Errorf("No port specified: %s", rawPort) - } - - startPort, endPort, err := ParsePortRange(containerPort) - if err != nil { - return nil, fmt.Errorf("Invalid containerPort: %s", containerPort) - } - - var startHostPort, endHostPort uint64 = 0, 0 - if len(hostPort) > 0 { - startHostPort, endHostPort, err = ParsePortRange(hostPort) - if err != nil { - return nil, fmt.Errorf("Invalid hostPort: %s", hostPort) - } - } - - if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { - // Allow host port range iff containerPort is not a range. - // In this case, use the host port range as the dynamic - // host port range to allocate into. - if endPort != startPort { - return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) - } - } - - if !validateProto(strings.ToLower(proto)) { - return nil, fmt.Errorf("Invalid proto: %s", proto) - } - - ports := []PortMapping{} - for i := uint64(0); i <= (endPort - startPort); i++ { - containerPort = strconv.FormatUint(startPort+i, 10) - if len(hostPort) > 0 { - hostPort = strconv.FormatUint(startHostPort+i, 10) - } - // Set hostPort to a range only if there is a single container port - // and a dynamic host port. - if startPort == endPort && startHostPort != endHostPort { - hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) - } - port, err := NewPort(strings.ToLower(proto), containerPort) - if err != nil { - return nil, err - } - - binding := PortBinding{ - HostIP: ip, - HostPort: hostPort, - } - ports = append(ports, PortMapping{Port: port, Binding: binding}) - } - return ports, nil -} diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go deleted file mode 100644 index 892adf8c6..000000000 --- a/vendor/github.com/docker/go-connections/nat/parse.go +++ /dev/null @@ -1,57 +0,0 @@ -package nat - -import ( - "fmt" - "strconv" - "strings" -) - -// PartParser parses and validates the specified string (data) using the specified template -// e.g. ip:public:private -> 192.168.0.1:80:8000 -// DEPRECATED: do not use, this function may be removed in a future version -func PartParser(template, data string) (map[string]string, error) { - // ip:public:private - var ( - templateParts = strings.Split(template, ":") - parts = strings.Split(data, ":") - out = make(map[string]string, len(templateParts)) - ) - if len(parts) != len(templateParts) { - return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) - } - - for i, t := range templateParts { - value := "" - if len(parts) > i { - value = parts[i] - } - out[t] = value - } - return out, nil -} - -// ParsePortRange parses and validates the specified string as a port-range (8000-9000) -func ParsePortRange(ports string) (uint64, uint64, error) { - if ports == "" { - return 0, 0, fmt.Errorf("Empty string specified for ports.") - } - if !strings.Contains(ports, "-") { - start, err := strconv.ParseUint(ports, 10, 16) - end := start - return start, end, err - } - - parts := strings.Split(ports, "-") - start, err := strconv.ParseUint(parts[0], 10, 16) - if err != nil { - return 0, 0, err - } - end, err := strconv.ParseUint(parts[1], 10, 16) - if err != nil { - return 0, 0, err - } - if end < start { - return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) - } - return start, end, nil -} diff --git a/vendor/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go deleted file mode 100644 index ce950171e..000000000 --- a/vendor/github.com/docker/go-connections/nat/sort.go +++ /dev/null @@ -1,96 +0,0 @@ -package nat - -import ( - "sort" - "strings" -) - -type portSorter struct { - ports []Port - by func(i, j Port) bool -} - -func (s *portSorter) Len() int { - return len(s.ports) -} - -func (s *portSorter) Swap(i, j int) { - s.ports[i], s.ports[j] = s.ports[j], s.ports[i] -} - -func (s *portSorter) Less(i, j int) bool { - ip := s.ports[i] - jp := s.ports[j] - - return s.by(ip, jp) -} - -// Sort sorts a list of ports using the provided predicate -// This function should compare `i` and `j`, returning true if `i` is -// considered to be less than `j` -func Sort(ports []Port, predicate func(i, j Port) bool) { - s := &portSorter{ports, predicate} - sort.Sort(s) -} - -type portMapEntry struct { - port Port - binding PortBinding -} - -type portMapSorter []portMapEntry - -func (s portMapSorter) Len() int { return len(s) } -func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// sort the port so that the order is: -// 1. port with larger specified bindings -// 2. larger port -// 3. port with tcp protocol -func (s portMapSorter) Less(i, j int) bool { - pi, pj := s[i].port, s[j].port - hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) - return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") -} - -// SortPortMap sorts the list of ports and their respected mapping. The ports -// will explicit HostPort will be placed first. -func SortPortMap(ports []Port, bindings PortMap) { - s := portMapSorter{} - for _, p := range ports { - if binding, ok := bindings[p]; ok { - for _, b := range binding { - s = append(s, portMapEntry{port: p, binding: b}) - } - bindings[p] = []PortBinding{} - } else { - s = append(s, portMapEntry{port: p}) - } - } - - sort.Sort(s) - var ( - i int - pm = make(map[Port]struct{}) - ) - // reorder ports - for _, entry := range s { - if _, ok := pm[entry.port]; !ok { - ports[i] = entry.port - pm[entry.port] = struct{}{} - i++ - } - // reorder bindings for this port - if _, ok := bindings[entry.port]; ok { - bindings[entry.port] = append(bindings[entry.port], entry.binding) - } - } -} - -func toInt(s string) uint64 { - i, _, err := ParsePortRange(s) - if err != nil { - i = 0 - } - return i -} diff --git a/vendor/github.com/docker/go-connections/sockets/README.md b/vendor/github.com/docker/go-connections/sockets/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/docker/go-connections/sockets/inmem_socket.go b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go deleted file mode 100644 index 99846ffdd..000000000 --- a/vendor/github.com/docker/go-connections/sockets/inmem_socket.go +++ /dev/null @@ -1,81 +0,0 @@ -package sockets - -import ( - "errors" - "net" - "sync" -) - -var errClosed = errors.New("use of closed network connection") - -// InmemSocket implements net.Listener using in-memory only connections. -type InmemSocket struct { - chConn chan net.Conn - chClose chan struct{} - addr string - mu sync.Mutex -} - -// dummyAddr is used to satisfy net.Addr for the in-mem socket -// it is just stored as a string and returns the string for all calls -type dummyAddr string - -// NewInmemSocket creates an in-memory only net.Listener -// The addr argument can be any string, but is used to satisfy the `Addr()` part -// of the net.Listener interface -func NewInmemSocket(addr string, bufSize int) *InmemSocket { - return &InmemSocket{ - chConn: make(chan net.Conn, bufSize), - chClose: make(chan struct{}), - addr: addr, - } -} - -// Addr returns the socket's addr string to satisfy net.Listener -func (s *InmemSocket) Addr() net.Addr { - return dummyAddr(s.addr) -} - -// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn. -func (s *InmemSocket) Accept() (net.Conn, error) { - select { - case conn := <-s.chConn: - return conn, nil - case <-s.chClose: - return nil, errClosed - } -} - -// Close closes the listener. It will be unavailable for use once closed. -func (s *InmemSocket) Close() error { - s.mu.Lock() - defer s.mu.Unlock() - select { - case <-s.chClose: - default: - close(s.chClose) - } - return nil -} - -// Dial is used to establish a connection with the in-mem server -func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) { - srvConn, clientConn := net.Pipe() - select { - case s.chConn <- srvConn: - case <-s.chClose: - return nil, errClosed - } - - return clientConn, nil -} - -// Network returns the addr string, satisfies net.Addr -func (a dummyAddr) Network() string { - return string(a) -} - -// String returns the string form -func (a dummyAddr) String() string { - return string(a) -} diff --git a/vendor/github.com/docker/go-connections/sockets/proxy.go b/vendor/github.com/docker/go-connections/sockets/proxy.go deleted file mode 100644 index 98e9a1dc6..000000000 --- a/vendor/github.com/docker/go-connections/sockets/proxy.go +++ /dev/null @@ -1,51 +0,0 @@ -package sockets - -import ( - "net" - "net/url" - "os" - "strings" - - "golang.org/x/net/proxy" -) - -// GetProxyEnv allows access to the uppercase and the lowercase forms of -// proxy-related variables. See the Go specification for details on these -// variables. https://golang.org/pkg/net/http/ -func GetProxyEnv(key string) string { - proxyValue := os.Getenv(strings.ToUpper(key)) - if proxyValue == "" { - return os.Getenv(strings.ToLower(key)) - } - return proxyValue -} - -// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a -// proxy.Dialer which will route the connections through the proxy using the -// given dialer. -func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) { - allProxy := GetProxyEnv("all_proxy") - if len(allProxy) == 0 { - return direct, nil - } - - proxyURL, err := url.Parse(allProxy) - if err != nil { - return direct, err - } - - proxyFromURL, err := proxy.FromURL(proxyURL, direct) - if err != nil { - return direct, err - } - - noProxy := GetProxyEnv("no_proxy") - if len(noProxy) == 0 { - return proxyFromURL, nil - } - - perHost := proxy.NewPerHost(proxyFromURL, direct) - perHost.AddFromString(noProxy) - - return perHost, nil -} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go deleted file mode 100644 index a1d7beb4d..000000000 --- a/vendor/github.com/docker/go-connections/sockets/sockets.go +++ /dev/null @@ -1,38 +0,0 @@ -// Package sockets provides helper functions to create and configure Unix or TCP sockets. -package sockets - -import ( - "errors" - "net" - "net/http" - "time" -) - -// Why 32? See https://github.com/docker/docker/pull/8035. -const defaultTimeout = 32 * time.Second - -// ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system. -var ErrProtocolNotAvailable = errors.New("protocol not available") - -// ConfigureTransport configures the specified Transport according to the -// specified proto and addr. -// If the proto is unix (using a unix socket to communicate) or npipe the -// compression is disabled. -func ConfigureTransport(tr *http.Transport, proto, addr string) error { - switch proto { - case "unix": - return configureUnixTransport(tr, proto, addr) - case "npipe": - return configureNpipeTransport(tr, proto, addr) - default: - tr.Proxy = http.ProxyFromEnvironment - dialer, err := DialerFromEnvironment(&net.Dialer{ - Timeout: defaultTimeout, - }) - if err != nil { - return err - } - tr.Dial = dialer.Dial - } - return nil -} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go deleted file mode 100644 index 386cf0dbb..000000000 --- a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build !windows - -package sockets - -import ( - "fmt" - "net" - "net/http" - "syscall" - "time" -) - -const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) - -func configureUnixTransport(tr *http.Transport, proto, addr string) error { - if len(addr) > maxUnixSocketPathSize { - return fmt.Errorf("Unix socket path %q is too long", addr) - } - // No need for compression in local communications. - tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return net.DialTimeout(proto, addr, defaultTimeout) - } - return nil -} - -func configureNpipeTransport(tr *http.Transport, proto, addr string) error { - return ErrProtocolNotAvailable -} - -// DialPipe connects to a Windows named pipe. -// This is not supported on other OSes. -func DialPipe(_ string, _ time.Duration) (net.Conn, error) { - return nil, syscall.EAFNOSUPPORT -} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go deleted file mode 100644 index 5c21644e1..000000000 --- a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -package sockets - -import ( - "net" - "net/http" - "time" - - "github.com/Microsoft/go-winio" -) - -func configureUnixTransport(tr *http.Transport, proto, addr string) error { - return ErrProtocolNotAvailable -} - -func configureNpipeTransport(tr *http.Transport, proto, addr string) error { - // No need for compression in local communications. - tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return DialPipe(addr, defaultTimeout) - } - return nil -} - -// DialPipe connects to a Windows named pipe. -func DialPipe(addr string, timeout time.Duration) (net.Conn, error) { - return winio.DialPipe(addr, &timeout) -} diff --git a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go deleted file mode 100644 index 53cbb6c79..000000000 --- a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go +++ /dev/null @@ -1,22 +0,0 @@ -// Package sockets provides helper functions to create and configure Unix or TCP sockets. -package sockets - -import ( - "crypto/tls" - "net" -) - -// NewTCPSocket creates a TCP socket listener with the specified address and -// the specified tls configuration. If TLSConfig is set, will encapsulate the -// TCP listener inside a TLS one. -func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) { - l, err := net.Listen("tcp", addr) - if err != nil { - return nil, err - } - if tlsConfig != nil { - tlsConfig.NextProtos = []string{"http/1.1"} - l = tls.NewListener(l, tlsConfig) - } - return l, nil -} diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go deleted file mode 100644 index a8b5dbb6f..000000000 --- a/vendor/github.com/docker/go-connections/sockets/unix_socket.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build !windows - -package sockets - -import ( - "net" - "os" - "syscall" -) - -// NewUnixSocket creates a unix socket with the specified path and group. -func NewUnixSocket(path string, gid int) (net.Listener, error) { - if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { - return nil, err - } - mask := syscall.Umask(0777) - defer syscall.Umask(mask) - - l, err := net.Listen("unix", path) - if err != nil { - return nil, err - } - if err := os.Chown(path, 0, gid); err != nil { - l.Close() - return nil, err - } - if err := os.Chmod(path, 0660); err != nil { - l.Close() - return nil, err - } - return l, nil -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go deleted file mode 100644 index 1ca0965e0..000000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build go1.7 - -package tlsconfig - -import ( - "crypto/x509" - "runtime" -) - -// SystemCertPool returns a copy of the system cert pool, -// returns an error if failed to load or empty pool on windows. -func SystemCertPool() (*x509.CertPool, error) { - certpool, err := x509.SystemCertPool() - if err != nil && runtime.GOOS == "windows" { - return x509.NewCertPool(), nil - } - return certpool, err -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go deleted file mode 100644 index 1ff81c333..000000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !go1.7 - -package tlsconfig - -import ( - "crypto/x509" -) - -// SystemCertPool returns an new empty cert pool, -// accessing system cert pool is supported in go 1.7 -func SystemCertPool() (*x509.CertPool, error) { - return x509.NewCertPool(), nil -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go deleted file mode 100644 index 0ef3fdcb4..000000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/config.go +++ /dev/null @@ -1,254 +0,0 @@ -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -// As a reminder from https://golang.org/pkg/crypto/tls/#Config: -// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. -// A Config may be reused; the tls package will also not modify it. -package tlsconfig - -import ( - "crypto/tls" - "crypto/x509" - "encoding/pem" - "fmt" - "io/ioutil" - "os" - - "github.com/pkg/errors" -) - -// Options represents the information needed to create client and server TLS configurations. -type Options struct { - CAFile string - - // If either CertFile or KeyFile is empty, Client() will not load them - // preventing the client from authenticating to the server. - // However, Server() requires them and will error out if they are empty. - CertFile string - KeyFile string - - // client-only option - InsecureSkipVerify bool - // server-only option - ClientAuth tls.ClientAuthType - // If ExclusiveRootPools is set, then if a CA file is provided, the root pool used for TLS - // creds will include exclusively the roots in that CA file. If no CA file is provided, - // the system pool will be used. - ExclusiveRootPools bool - MinVersion uint16 - // If Passphrase is set, it will be used to decrypt a TLS private key - // if the key is encrypted - Passphrase string -} - -// Extra (server-side) accepted CBC cipher suites - will phase out in the future -var acceptedCBCCiphers = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, -} - -// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls -// options struct but wants to use a commonly accepted set of TLS cipher suites, with -// known weak algorithms removed. -var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) - -// allTLSVersions lists all the TLS versions and is used by the code that validates -// a uint16 value as a TLS version. -var allTLSVersions = map[uint16]struct{}{ - tls.VersionSSL30: {}, - tls.VersionTLS10: {}, - tls.VersionTLS11: {}, - tls.VersionTLS12: {}, -} - -// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. -func ServerDefault(ops ...func(*tls.Config)) *tls.Config { - tlsconfig := &tls.Config{ - // Avoid fallback by default to SSL protocols < TLS1.2 - MinVersion: tls.VersionTLS12, - PreferServerCipherSuites: true, - CipherSuites: DefaultServerAcceptedCiphers, - } - - for _, op := range ops { - op(tlsconfig) - } - - return tlsconfig -} - -// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. -func ClientDefault(ops ...func(*tls.Config)) *tls.Config { - tlsconfig := &tls.Config{ - // Prefer TLS1.2 as the client minimum - MinVersion: tls.VersionTLS12, - CipherSuites: clientCipherSuites, - } - - for _, op := range ops { - op(tlsconfig) - } - - return tlsconfig -} - -// certPool returns an X.509 certificate pool from `caFile`, the certificate file. -func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { - // If we should verify the server, we need to load a trusted ca - var ( - certPool *x509.CertPool - err error - ) - if exclusivePool { - certPool = x509.NewCertPool() - } else { - certPool, err = SystemCertPool() - if err != nil { - return nil, fmt.Errorf("failed to read system certificates: %v", err) - } - } - pem, err := ioutil.ReadFile(caFile) - if err != nil { - return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) - } - if !certPool.AppendCertsFromPEM(pem) { - return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) - } - return certPool, nil -} - -// isValidMinVersion checks that the input value is a valid tls minimum version -func isValidMinVersion(version uint16) bool { - _, ok := allTLSVersions[version] - return ok -} - -// adjustMinVersion sets the MinVersion on `config`, the input configuration. -// It assumes the current MinVersion on the `config` is the lowest allowed. -func adjustMinVersion(options Options, config *tls.Config) error { - if options.MinVersion > 0 { - if !isValidMinVersion(options.MinVersion) { - return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion) - } - if options.MinVersion < config.MinVersion { - return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion) - } - config.MinVersion = options.MinVersion - } - - return nil -} - -// IsErrEncryptedKey returns true if the 'err' is an error of incorrect -// password when tryin to decrypt a TLS private key -func IsErrEncryptedKey(err error) bool { - return errors.Cause(err) == x509.IncorrectPasswordError -} - -// getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format. -// If the private key is encrypted, 'passphrase' is used to decrypted the -// private key. -func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { - // this section makes some small changes to code from notary/tuf/utils/x509.go - pemBlock, _ := pem.Decode(keyBytes) - if pemBlock == nil { - return nil, fmt.Errorf("no valid private key found") - } - - var err error - if x509.IsEncryptedPEMBlock(pemBlock) { - keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) - if err != nil { - return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it") - } - keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes}) - } - - return keyBytes, nil -} - -// getCert returns a Certificate from the CertFile and KeyFile in 'options', -// if the key is encrypted, the Passphrase in 'options' will be used to -// decrypt it. -func getCert(options Options) ([]tls.Certificate, error) { - if options.CertFile == "" && options.KeyFile == "" { - return nil, nil - } - - errMessage := "Could not load X509 key pair" - - cert, err := ioutil.ReadFile(options.CertFile) - if err != nil { - return nil, errors.Wrap(err, errMessage) - } - - prKeyBytes, err := ioutil.ReadFile(options.KeyFile) - if err != nil { - return nil, errors.Wrap(err, errMessage) - } - - prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase) - if err != nil { - return nil, errors.Wrap(err, errMessage) - } - - tlsCert, err := tls.X509KeyPair(cert, prKeyBytes) - if err != nil { - return nil, errors.Wrap(err, errMessage) - } - - return []tls.Certificate{tlsCert}, nil -} - -// Client returns a TLS configuration meant to be used by a client. -func Client(options Options) (*tls.Config, error) { - tlsConfig := ClientDefault() - tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify - if !options.InsecureSkipVerify && options.CAFile != "" { - CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) - if err != nil { - return nil, err - } - tlsConfig.RootCAs = CAs - } - - tlsCerts, err := getCert(options) - if err != nil { - return nil, err - } - tlsConfig.Certificates = tlsCerts - - if err := adjustMinVersion(options, tlsConfig); err != nil { - return nil, err - } - - return tlsConfig, nil -} - -// Server returns a TLS configuration meant to be used by a server. -func Server(options Options) (*tls.Config, error) { - tlsConfig := ServerDefault() - tlsConfig.ClientAuth = options.ClientAuth - tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) - if err != nil { - if os.IsNotExist(err) { - return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) - } - return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) - } - tlsConfig.Certificates = []tls.Certificate{tlsCert} - if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" { - CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) - if err != nil { - return nil, err - } - tlsConfig.ClientCAs = CAs - } - - if err := adjustMinVersion(options, tlsConfig); err != nil { - return nil, err - } - - return tlsConfig, nil -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go deleted file mode 100644 index 6b4c6a7c0..000000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build go1.5 - -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -package tlsconfig - -import ( - "crypto/tls" -) - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go deleted file mode 100644 index ee22df47c..000000000 --- a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !go1.5 - -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -package tlsconfig - -import ( - "crypto/tls" -) - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md deleted file mode 100644 index 9ea86d784..000000000 --- a/vendor/github.com/docker/go-units/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# Contributing to go-units - -Want to hack on go-units? Awesome! Here are instructions to get you started. - -go-units is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read Docker's -[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), -[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), -[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and -[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE deleted file mode 100644 index b55b37bc3..000000000 --- a/vendor/github.com/docker/go-units/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS deleted file mode 100644 index 4aac7c741..000000000 --- a/vendor/github.com/docker/go-units/MAINTAINERS +++ /dev/null @@ -1,46 +0,0 @@ -# go-units maintainers file -# -# This file describes who runs the docker/go-units project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "akihirosuda", - "dnephin", - "thajeztah", - "vdemeester", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.akihirosuda] - Name = "Akihiro Suda" - Email = "akihiro.suda.cz@hco.ntt.co.jp" - GitHub = "AkihiroSuda" - - [people.dnephin] - Name = "Daniel Nephin" - Email = "dnephin@gmail.com" - GitHub = "dnephin" - - [people.thajeztah] - Name = "Sebastiaan van Stijn" - Email = "github@gone.nl" - GitHub = "thaJeztah" - - [people.vdemeester] - Name = "Vincent Demeester" - Email = "vincent@sbr.pm" - GitHub = "vdemeester" \ No newline at end of file diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md deleted file mode 100644 index 4f70a4e13..000000000 --- a/vendor/github.com/docker/go-units/README.md +++ /dev/null @@ -1,16 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) - -# Introduction - -go-units is a library to transform human friendly measurements into machine friendly values. - -## Usage - -See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. - -## Copyright and license - -Copyright © 2015 Docker, Inc. - -go-units is licensed under the Apache License, Version 2.0. -See [LICENSE](LICENSE) for the full text of the license. diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml deleted file mode 100644 index af9d60552..000000000 --- a/vendor/github.com/docker/go-units/circle.yml +++ /dev/null @@ -1,11 +0,0 @@ -dependencies: - post: - # install golint - - go get golang.org/x/lint/golint - -test: - pre: - # run analysis before tests - - go vet ./... - - test -z "$(golint ./... | tee /dev/stderr)" - - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go deleted file mode 100644 index 48dd8744d..000000000 --- a/vendor/github.com/docker/go-units/duration.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package units provides helper function to parse and print size and time units -// in human-readable format. -package units - -import ( - "fmt" - "time" -) - -// HumanDuration returns a human-readable approximation of a duration -// (eg. "About a minute", "4 hours ago", etc.). -func HumanDuration(d time.Duration) string { - if seconds := int(d.Seconds()); seconds < 1 { - return "Less than a second" - } else if seconds == 1 { - return "1 second" - } else if seconds < 60 { - return fmt.Sprintf("%d seconds", seconds) - } else if minutes := int(d.Minutes()); minutes == 1 { - return "About a minute" - } else if minutes < 60 { - return fmt.Sprintf("%d minutes", minutes) - } else if hours := int(d.Hours() + 0.5); hours == 1 { - return "About an hour" - } else if hours < 48 { - return fmt.Sprintf("%d hours", hours) - } else if hours < 24*7*2 { - return fmt.Sprintf("%d days", hours/24) - } else if hours < 24*30*2 { - return fmt.Sprintf("%d weeks", hours/24/7) - } else if hours < 24*365*2 { - return fmt.Sprintf("%d months", hours/24/30) - } - return fmt.Sprintf("%d years", int(d.Hours())/24/365) -} diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go deleted file mode 100644 index c245a8951..000000000 --- a/vendor/github.com/docker/go-units/size.go +++ /dev/null @@ -1,154 +0,0 @@ -package units - -import ( - "fmt" - "strconv" - "strings" -) - -// See: http://en.wikipedia.org/wiki/Binary_prefix -const ( - // Decimal - - KB = 1000 - MB = 1000 * KB - GB = 1000 * MB - TB = 1000 * GB - PB = 1000 * TB - - // Binary - - KiB = 1024 - MiB = 1024 * KiB - GiB = 1024 * MiB - TiB = 1024 * GiB - PiB = 1024 * TiB -) - -type unitMap map[byte]int64 - -var ( - decimalMap = unitMap{'k': KB, 'm': MB, 'g': GB, 't': TB, 'p': PB} - binaryMap = unitMap{'k': KiB, 'm': MiB, 'g': GiB, 't': TiB, 'p': PiB} -) - -var ( - decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} - binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} -) - -func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { - i := 0 - unitsLimit := len(_map) - 1 - for size >= base && i < unitsLimit { - size = size / base - i++ - } - return size, _map[i] -} - -// CustomSize returns a human-readable approximation of a size -// using custom format. -func CustomSize(format string, size float64, base float64, _map []string) string { - size, unit := getSizeAndUnit(size, base, _map) - return fmt.Sprintf(format, size, unit) -} - -// HumanSizeWithPrecision allows the size to be in any precision, -// instead of 4 digit precision used in units.HumanSize. -func HumanSizeWithPrecision(size float64, precision int) string { - size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) - return fmt.Sprintf("%.*g%s", precision, size, unit) -} - -// HumanSize returns a human-readable approximation of a size -// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). -func HumanSize(size float64) string { - return HumanSizeWithPrecision(size, 4) -} - -// BytesSize returns a human-readable size in bytes, kibibytes, -// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). -func BytesSize(size float64) string { - return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) -} - -// FromHumanSize returns an integer from a human-readable specification of a -// size using SI standard (eg. "44kB", "17MB"). -func FromHumanSize(size string) (int64, error) { - return parseSize(size, decimalMap) -} - -// RAMInBytes parses a human-readable string representing an amount of RAM -// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and -// returns the number of bytes, or -1 if the string is unparseable. -// Units are case-insensitive, and the 'b' suffix is optional. -func RAMInBytes(size string) (int64, error) { - return parseSize(size, binaryMap) -} - -// Parses the human-readable size string into the amount it represents. -func parseSize(sizeStr string, uMap unitMap) (int64, error) { - // TODO: rewrite to use strings.Cut if there's a space - // once Go < 1.18 is deprecated. - sep := strings.LastIndexAny(sizeStr, "01234567890. ") - if sep == -1 { - // There should be at least a digit. - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) - } - var num, sfx string - if sizeStr[sep] != ' ' { - num = sizeStr[:sep+1] - sfx = sizeStr[sep+1:] - } else { - // Omit the space separator. - num = sizeStr[:sep] - sfx = sizeStr[sep+1:] - } - - size, err := strconv.ParseFloat(num, 64) - if err != nil { - return -1, err - } - // Backward compatibility: reject negative sizes. - if size < 0 { - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) - } - - if len(sfx) == 0 { - return int64(size), nil - } - - // Process the suffix. - - if len(sfx) > 3 { // Too long. - goto badSuffix - } - sfx = strings.ToLower(sfx) - // Trivial case: b suffix. - if sfx[0] == 'b' { - if len(sfx) > 1 { // no extra characters allowed after b. - goto badSuffix - } - return int64(size), nil - } - // A suffix from the map. - if mul, ok := uMap[sfx[0]]; ok { - size *= float64(mul) - } else { - goto badSuffix - } - - // The suffix may have extra "b" or "ib" (e.g. KiB or MB). - switch { - case len(sfx) == 2 && sfx[1] != 'b': - goto badSuffix - case len(sfx) == 3 && sfx[1:] != "ib": - goto badSuffix - } - - return int64(size), nil - -badSuffix: - return -1, fmt.Errorf("invalid suffix: '%s'", sfx) -} diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go deleted file mode 100644 index fca0400cc..000000000 --- a/vendor/github.com/docker/go-units/ulimit.go +++ /dev/null @@ -1,123 +0,0 @@ -package units - -import ( - "fmt" - "strconv" - "strings" -) - -// Ulimit is a human friendly version of Rlimit. -type Ulimit struct { - Name string - Hard int64 - Soft int64 -} - -// Rlimit specifies the resource limits, such as max open files. -type Rlimit struct { - Type int `json:"type,omitempty"` - Hard uint64 `json:"hard,omitempty"` - Soft uint64 `json:"soft,omitempty"` -} - -const ( - // magic numbers for making the syscall - // some of these are defined in the syscall package, but not all. - // Also since Windows client doesn't get access to the syscall package, need to - // define these here - rlimitAs = 9 - rlimitCore = 4 - rlimitCPU = 0 - rlimitData = 2 - rlimitFsize = 1 - rlimitLocks = 10 - rlimitMemlock = 8 - rlimitMsgqueue = 12 - rlimitNice = 13 - rlimitNofile = 7 - rlimitNproc = 6 - rlimitRss = 5 - rlimitRtprio = 14 - rlimitRttime = 15 - rlimitSigpending = 11 - rlimitStack = 3 -) - -var ulimitNameMapping = map[string]int{ - //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. - "core": rlimitCore, - "cpu": rlimitCPU, - "data": rlimitData, - "fsize": rlimitFsize, - "locks": rlimitLocks, - "memlock": rlimitMemlock, - "msgqueue": rlimitMsgqueue, - "nice": rlimitNice, - "nofile": rlimitNofile, - "nproc": rlimitNproc, - "rss": rlimitRss, - "rtprio": rlimitRtprio, - "rttime": rlimitRttime, - "sigpending": rlimitSigpending, - "stack": rlimitStack, -} - -// ParseUlimit parses and returns a Ulimit from the specified string. -func ParseUlimit(val string) (*Ulimit, error) { - parts := strings.SplitN(val, "=", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("invalid ulimit argument: %s", val) - } - - if _, exists := ulimitNameMapping[parts[0]]; !exists { - return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) - } - - var ( - soft int64 - hard = &soft // default to soft in case no hard was set - temp int64 - err error - ) - switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { - case 2: - temp, err = strconv.ParseInt(limitVals[1], 10, 64) - if err != nil { - return nil, err - } - hard = &temp - fallthrough - case 1: - soft, err = strconv.ParseInt(limitVals[0], 10, 64) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) - } - - if *hard != -1 { - if soft == -1 { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: soft: -1 (unlimited), hard: %d", *hard) - } - if soft > *hard { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) - } - } - - return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil -} - -// GetRlimit returns the RLimit corresponding to Ulimit. -func (u *Ulimit) GetRlimit() (*Rlimit, error) { - t, exists := ulimitNameMapping[u.Name] - if !exists { - return nil, fmt.Errorf("invalid ulimit name %s", u.Name) - } - - return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil -} - -func (u *Ulimit) String() string { - return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) -} diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS deleted file mode 100644 index 3d97fc7a2..000000000 --- a/vendor/github.com/gogo/protobuf/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of GoGo authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS file, which -# lists people. For example, employees are listed in CONTRIBUTORS, -# but not in AUTHORS, because the employer holds the copyright. - -# Names should be added to this file as one of -# Organization's name -# Individual's name -# Individual's name - -# Please keep the list sorted. - -Sendgrid, Inc -Vastech SA (PTY) LTD -Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS deleted file mode 100644 index 1b4f6c208..000000000 --- a/vendor/github.com/gogo/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,23 +0,0 @@ -Anton Povarov -Brian Goff -Clayton Coleman -Denis Smirnov -DongYun Kang -Dwayne Schultz -Georg Apitz -Gustav Paul -Johan Brandhorst -John Shahid -John Tuley -Laurent -Patrick Lee -Peter Edge -Roger Johansson -Sam Nguyen -Sergio Arbeo -Stephen J Day -Tamir Duberstein -Todd Eisenberger -Tormod Erevik Lea -Vyacheslav Kim -Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE deleted file mode 100644 index f57de90da..000000000 --- a/vendor/github.com/gogo/protobuf/LICENSE +++ /dev/null @@ -1,35 +0,0 @@ -Copyright (c) 2013, The GoGo Authors. All rights reserved. - -Protocol Buffers for Go with Gadgets - -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile deleted file mode 100644 index 00d65f327..000000000 --- a/vendor/github.com/gogo/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C test_proto - make -C proto3_proto - make diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go deleted file mode 100644 index a26b046d9..000000000 --- a/vendor/github.com/gogo/protobuf/proto/clone.go +++ /dev/null @@ -1,258 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "fmt" - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(src Message) Message { - in := reflect.ValueOf(src) - if in.IsNil() { - return src - } - out := reflect.New(in.Type().Elem()) - dst := out.Interface().(Message) - Merge(dst, src) - return dst -} - -// Merger is the interface representing objects that can merge messages of the same type. -type Merger interface { - // Merge merges src into this message. - // Required and optional fields that are set in src will be set to that value in dst. - // Elements of repeated fields will be appended. - // - // Merge may panic if called with a different argument type than the receiver. - Merge(src Message) -} - -// generatedMerger is the custom merge method that generated protos will have. -// We must add this method since a generate Merge method will conflict with -// many existing protos that have a Merge data field already defined. -type generatedMerger interface { - XXX_Merge(src Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - if m, ok := dst.(Merger); ok { - m.Merge(src) - return - } - - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) - } - if in.IsNil() { - return // Merge from nil src is a noop - } - if m, ok := dst.(generatedMerger); ok { - m.XXX_Merge(src) - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { - emOut := out.Addr().Interface().(extensionsBytes) - bIn := emIn.GetExtensions() - bOut := emOut.GetExtensions() - *bOut = append(*bOut, *bIn...) - } else if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go deleted file mode 100644 index 24552483c..000000000 --- a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go +++ /dev/null @@ -1,39 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "reflect" - -type custom interface { - Marshal() ([]byte, error) - Unmarshal(data []byte) error - Size() int -} - -var customType = reflect.TypeOf((*custom)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go deleted file mode 100644 index 63b0f08be..000000000 --- a/vendor/github.com/gogo/protobuf/proto/decode.go +++ /dev/null @@ -1,427 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -// Unmarshal implementations should not clear the receiver. -// Any unmarshaled data should be merged into the receiver. -// Callers of Unmarshal that do not want to retain existing data -// should Reset the receiver before calling Unmarshal. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// newUnmarshaler is the interface representing objects that can -// unmarshal themselves. The semantics are identical to Unmarshaler. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newUnmarshaler interface { - XXX_Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -// StartGroup tag is already consumed. This function consumes -// EndGroup tag. -func (p *Buffer) DecodeGroup(pb Message) error { - b := p.buf[p.index:] - x, y := findEndGroup(b) - if x < 0 { - return io.ErrUnexpectedEOF - } - err := Unmarshal(b[:x], pb) - p.index += y - return err -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(newUnmarshaler); ok { - err := u.XXX_Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - // Slow workaround for messages that aren't Unmarshalers. - // This includes some hand-coded .pb.go files and - // bootstrap protos. - // TODO: fix all of those and then add Unmarshal to - // the Message interface. Then: - // The cast above and code below can be deleted. - // The old unmarshaler can be deleted. - // Clients can call Unmarshal directly (can already do that, actually). - var info InternalMessageInfo - err := info.Unmarshal(pb, p.buf[p.index:]) - p.index = len(p.buf) - return err -} diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go deleted file mode 100644 index 35b882c09..000000000 --- a/vendor/github.com/gogo/protobuf/proto/deprecated.go +++ /dev/null @@ -1,63 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "errors" - -// Deprecated: do not use. -type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } - -// Deprecated: do not use. -func GetStats() Stats { return Stats{} } - -// Deprecated: do not use. -func MarshalMessageSet(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSet([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func MarshalMessageSetJSON(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSetJSON([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go deleted file mode 100644 index fe1bd7d90..000000000 --- a/vendor/github.com/gogo/protobuf/proto/discard.go +++ /dev/null @@ -1,350 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -type generatedDiscarder interface { - XXX_DiscardUnknown() -} - -// DiscardUnknown recursively discards all unknown fields from this message -// and all embedded messages. -// -// When unmarshaling a message with unrecognized fields, the tags and values -// of such fields are preserved in the Message. This allows a later call to -// marshal to be able to produce a message that continues to have those -// unrecognized fields. To avoid this, DiscardUnknown is used to -// explicitly clear the unknown fields after unmarshaling. -// -// For proto2 messages, the unknown fields of message extensions are only -// discarded from messages that have been accessed via GetExtension. -func DiscardUnknown(m Message) { - if m, ok := m.(generatedDiscarder); ok { - m.XXX_DiscardUnknown() - return - } - // TODO: Dynamically populate a InternalMessageInfo for legacy messages, - // but the master branch has no implementation for InternalMessageInfo, - // so it would be more work to replicate that approach. - discardLegacy(m) -} - -// DiscardUnknown recursively discards all unknown fields. -func (a *InternalMessageInfo) DiscardUnknown(m Message) { - di := atomicLoadDiscardInfo(&a.discard) - if di == nil { - di = getDiscardInfo(reflect.TypeOf(m).Elem()) - atomicStoreDiscardInfo(&a.discard, di) - } - di.discard(toPointer(&m)) -} - -type discardInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []discardFieldInfo - unrecognized field -} - -type discardFieldInfo struct { - field field // Offset of field, guaranteed to be valid - discard func(src pointer) -} - -var ( - discardInfoMap = map[reflect.Type]*discardInfo{} - discardInfoLock sync.Mutex -) - -func getDiscardInfo(t reflect.Type) *discardInfo { - discardInfoLock.Lock() - defer discardInfoLock.Unlock() - di := discardInfoMap[t] - if di == nil { - di = &discardInfo{typ: t} - discardInfoMap[t] = di - } - return di -} - -func (di *discardInfo) discard(src pointer) { - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&di.initialized) == 0 { - di.computeDiscardInfo() - } - - for _, fi := range di.fields { - sfp := src.offset(fi.field) - fi.discard(sfp) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { - // Ignore lock since DiscardUnknown is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - DiscardUnknown(m) - } - } - } - - if di.unrecognized.IsValid() { - *src.offset(di.unrecognized).toBytes() = nil - } -} - -func (di *discardInfo) computeDiscardInfo() { - di.lock.Lock() - defer di.lock.Unlock() - if di.initialized != 0 { - return - } - t := di.typ - n := t.NumField() - - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - dfi := discardFieldInfo{field: toField(&f)} - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) - case isSlice: // E.g., []*pb.T - discardInfo := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sps := src.getPointerSlice() - for _, sp := range sps { - if !sp.isNil() { - discardInfo.discard(sp) - } - } - } - default: // E.g., *pb.T - discardInfo := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sp := src.getPointer() - if !sp.isNil() { - discardInfo.discard(sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) - default: // E.g., map[K]V - if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) - dfi.discard = func(src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - DiscardUnknown(val.Interface().(Message)) - } - } - } else { - dfi.discard = func(pointer) {} // Noop - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) - default: // E.g., interface{} - // TODO: Make this faster? - dfi.discard = func(src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - DiscardUnknown(sv.Interface().(Message)) - } - } - } - } - default: - continue - } - di.fields = append(di.fields, dfi) - } - - di.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - di.unrecognized = toField(&f) - } - - atomic.StoreInt32(&di.initialized, 1) -} - -func discardLegacy(m Message) { - v := reflect.ValueOf(m) - if v.Kind() != reflect.Ptr || v.IsNil() { - return - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return - } - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - vf := v.Field(i) - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) - case isSlice: // E.g., []*pb.T - for j := 0; j < vf.Len(); j++ { - discardLegacy(vf.Index(j).Interface().(Message)) - } - default: // E.g., *pb.T - discardLegacy(vf.Interface().(Message)) - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) - default: // E.g., map[K]V - tv := vf.Type().Elem() - if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) - for _, key := range vf.MapKeys() { - val := vf.MapIndex(key) - discardLegacy(val.Interface().(Message)) - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) - default: // E.g., test_proto.isCommunique_Union interface - if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { - vf = vf.Elem() // E.g., *test_proto.Communique_Msg - if !vf.IsNil() { - vf = vf.Elem() // E.g., test_proto.Communique_Msg - vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value - if vf.Kind() == reflect.Ptr { - discardLegacy(vf.Interface().(Message)) - } - } - } - } - } - } - - if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { - if vf.Type() != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - vf.Set(reflect.ValueOf([]byte(nil))) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(m); err == nil { - // Ignore lock since discardLegacy is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - discardLegacy(m) - } - } - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go deleted file mode 100644 index 93464c91c..000000000 --- a/vendor/github.com/gogo/protobuf/proto/duration.go +++ /dev/null @@ -1,100 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// This file implements conversions between google.protobuf.Duration -// and time.Duration. - -import ( - "errors" - "fmt" - "time" -) - -const ( - // Range of a Duration in seconds, as specified in - // google/protobuf/duration.proto. This is about 10,000 years in seconds. - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// validateDuration determines whether the Duration is valid according to the -// definition in google/protobuf/duration.proto. A valid Duration -// may still be too large to fit into a time.Duration (the range of Duration -// is about 10,000 years, and the range of time.Duration is about 290). -func validateDuration(d *duration) error { - if d == nil { - return errors.New("duration: nil Duration") - } - if d.Seconds < minSeconds || d.Seconds > maxSeconds { - return fmt.Errorf("duration: %#v: seconds out of range", d) - } - if d.Nanos <= -1e9 || d.Nanos >= 1e9 { - return fmt.Errorf("duration: %#v: nanos out of range", d) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { - return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) - } - return nil -} - -// DurationFromProto converts a Duration to a time.Duration. DurationFromProto -// returns an error if the Duration is invalid or is too large to be -// represented in a time.Duration. -func durationFromProto(p *duration) (time.Duration, error) { - if err := validateDuration(p); err != nil { - return 0, err - } - d := time.Duration(p.Seconds) * time.Second - if int64(d/time.Second) != p.Seconds { - return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) - } - if p.Nanos != 0 { - d += time.Duration(p.Nanos) - if (d < 0) != (p.Nanos < 0) { - return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a Duration. -func durationProto(d time.Duration) *duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &duration{ - Seconds: secs, - Nanos: int32(nanos), - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go deleted file mode 100644 index e748e1730..000000000 --- a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go +++ /dev/null @@ -1,49 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() - -type duration struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (m *duration) Reset() { *m = duration{} } -func (*duration) ProtoMessage() {} -func (*duration) String() string { return "duration" } - -func init() { - RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go deleted file mode 100644 index 9581ccd30..000000000 --- a/vendor/github.com/gogo/protobuf/proto/encode.go +++ /dev/null @@ -1,205 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "reflect" -) - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - switch { - case x < 1<<7: - return 1 - case x < 1<<14: - return 2 - case x < 1<<21: - return 3 - case x < 1<<28: - return 4 - case x < 1<<35: - return 5 - case x < 1<<42: - return 6 - case x < 1<<49: - return 7 - case x < 1<<56: - return 8 - case x < 1<<63: - return 9 - } - return 10 -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - siz := Size(pb) - sizVar := SizeVarint(uint64(siz)) - p.grow(siz + sizVar) - p.EncodeVarint(uint64(siz)) - return p.Marshal(pb) -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go deleted file mode 100644 index 0f5fb173e..000000000 --- a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go +++ /dev/null @@ -1,33 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -func NewRequiredNotSetError(field string) *RequiredNotSetError { - return &RequiredNotSetError{field} -} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go deleted file mode 100644 index d4db5a1c1..000000000 --- a/vendor/github.com/gogo/protobuf/proto/equal.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - return bytes.Equal(u1, u2) -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1, m2 := e1.value, e2.value - - if m1 == nil && m2 == nil { - // Both have only encoded form. - if bytes.Equal(e1.enc, e2.enc) { - continue - } - // The bytes are different, but the extensions might still be - // equal. We need to decode them to compare. - } - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - // If both have only encoded form and the bytes are the same, - // it is handled above. We get here when the bytes are different. - // We don't know how to decode it, so just compare them as byte - // slices. - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - return false - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go deleted file mode 100644 index 341c6f57f..000000000 --- a/vendor/github.com/gogo/protobuf/proto/extensions.go +++ /dev/null @@ -1,605 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "io" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} - -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} - -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} - -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} - -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} - -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, error) { - switch p := p.(type) { - case extendableProto: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return p, nil - case extendableProtoV1: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return extensionAdapter{p}, nil - case extensionsBytes: - return slowExtensionAdapter{p}, nil - } - // Don't allocate a specific error containing %T: - // this is the hot path for Clone and MarshalText. - return nil, errNotExtendable -} - -var errNotExtendable = errors.New("proto: not an extendable proto.Message") - -func isNilPtr(x interface{}) bool { - v := reflect.ValueOf(x) - return v.Kind() == reflect.Ptr && v.IsNil() -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension - } -} - -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }) - e.p.extensionMap = make(map[int32]Extension) - } - return e.p.extensionMap -} - -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil - } - return e.p.extensionMap, &e.p.mu -} - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - if ebase, ok := base.(extensionsBytes); ok { - clearExtension(base, id) - ext := ebase.GetExtensions() - *ext = append(*ext, b...) - return - } - epb, err := extendable(base) - if err != nil { - return - } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if ea, ok := pbi.(slowExtensionAdapter); ok { - pbi = ea.extensionsBytes - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - buf := *ext - o := 0 - for o < len(buf) { - tag, n := DecodeVarint(buf[o:]) - fieldNum := int32(tag >> 3) - if int32(fieldNum) == extension.Field { - return true - } - wireType := int(tag & 0x7) - o += n - l, err := size(buf[o:], wireType) - if err != nil { - return false - } - o += l - } - return false - } - // TODO: Check types, field numbers, etc.? - epb, err := extendable(pb) - if err != nil { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false - } - mu.Lock() - _, ok := extmap[extension.Field] - mu.Unlock() - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - clearExtension(pb, extension.Field) -} - -func clearExtension(pb Message, fieldNum int32) { - if epb, ok := pb.(extensionsBytes); ok { - offset := 0 - for offset != -1 { - offset = deleteExtension(epb, fieldNum, offset) - } - return - } - epb, err := extendable(pb) - if err != nil { - return - } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, fieldNum) -} - -// GetExtension retrieves a proto2 extended field from pb. -// -// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), -// then GetExtension parses the encoded field and returns a Go value of the specified type. -// If the field is not present, then the default value is returned (if one is specified), -// otherwise ErrMissingExtension is reported. -// -// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes of the field extension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - return decodeExtensionFromBytes(extension, *ext) - } - - epb, err := extendable(pb) - if err != nil { - return nil, err - } - - if extension.ExtendedType != nil { - // can only check type if this is a complete descriptor - if cerr := checkExtensionTypes(epb, extension); cerr != nil { - return nil, cerr - } - } - - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return e.value, nil - } - - if extension.ExtensionType == nil { - // incomplete descriptor - return e.enc, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - if extension.ExtensionType == nil { - // incomplete descriptor, so no default - return nil, ErrMissingExtension - } - - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - unmarshal := typeUnmarshaler(t, extension.Tag) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate space to store the pointer/slice. - value := reflect.New(t).Elem() - - var err error - for { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - wire := int(x) & 7 - - b, err = unmarshal(b, valToPointer(value.Addr()), wire) - if err != nil { - return nil, err - } - - if len(b) == 0 { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - if epb, ok := pb.(extensionsBytes); ok { - ClearExtension(pb, extension) - newb, err := encodeExtension(extension, value) - if err != nil { - return err - } - bb := epb.GetExtensions() - *bb = append(*bb, newb...) - return nil - } - epb, err := extendable(pb) - if err != nil { - return err - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} - return nil -} - -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - *ext = []byte{} - return - } - epb, err := extendable(pb) - if err != nil { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) - } -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go deleted file mode 100644 index 6f1ae120e..000000000 --- a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go +++ /dev/null @@ -1,389 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "bytes" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strings" - "sync" -) - -type extensionsBytes interface { - Message - ExtensionRangeArray() []ExtensionRange - GetExtensions() *[]byte -} - -type slowExtensionAdapter struct { - extensionsBytes -} - -func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { - panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") -} - -func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - b := s.GetExtensions() - m, err := BytesToExtensionsMap(*b) - if err != nil { - panic(err) - } - return m, notLocker{} -} - -func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { - if reflect.ValueOf(pb).IsNil() { - return ifnotset - } - value, err := GetExtension(pb, extension) - if err != nil { - return ifnotset - } - if value == nil { - return ifnotset - } - if value.(*bool) == nil { - return ifnotset - } - return *(value.(*bool)) -} - -func (this *Extension) Equal(that *Extension) bool { - if err := this.Encode(); err != nil { - return false - } - if err := that.Encode(); err != nil { - return false - } - return bytes.Equal(this.enc, that.enc) -} - -func (this *Extension) Compare(that *Extension) int { - if err := this.Encode(); err != nil { - return 1 - } - if err := that.Encode(); err != nil { - return -1 - } - return bytes.Compare(this.enc, that.enc) -} - -func SizeOfInternalExtension(m extendableProto) (n int) { - info := getMarshalInfo(reflect.TypeOf(m)) - return info.sizeV1Extensions(m.extensionsWrite()) -} - -type sortableMapElem struct { - field int32 - ext Extension -} - -func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { - s := make(sortableExtensions, 0, len(m)) - for k, v := range m { - s = append(s, &sortableMapElem{field: k, ext: v}) - } - return s -} - -type sortableExtensions []*sortableMapElem - -func (this sortableExtensions) Len() int { return len(this) } - -func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } - -func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } - -func (this sortableExtensions) String() string { - sort.Sort(this) - ss := make([]string, len(this)) - for i := range this { - ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) - } - return "map[" + strings.Join(ss, ",") + "]" -} - -func StringFromInternalExtension(m extendableProto) string { - return StringFromExtensionsMap(m.extensionsWrite()) -} - -func StringFromExtensionsMap(m map[int32]Extension) string { - return newSortableExtensionsFromMap(m).String() -} - -func StringFromExtensionsBytes(ext []byte) string { - m, err := BytesToExtensionsMap(ext) - if err != nil { - panic(err) - } - return StringFromExtensionsMap(m) -} - -func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { - return EncodeExtensionMap(m.extensionsWrite(), data) -} - -func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) { - return EncodeExtensionMapBackwards(m.extensionsWrite(), data) -} - -func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { - o := 0 - for _, e := range m { - if err := e.Encode(); err != nil { - return 0, err - } - n := copy(data[o:], e.enc) - if n != len(e.enc) { - return 0, io.ErrShortBuffer - } - o += n - } - return o, nil -} - -func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) { - o := 0 - end := len(data) - for _, e := range m { - if err := e.Encode(); err != nil { - return 0, err - } - n := copy(data[end-len(e.enc):], e.enc) - if n != len(e.enc) { - return 0, io.ErrShortBuffer - } - end -= n - o += n - } - return o, nil -} - -func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { - e := m[id] - if err := e.Encode(); err != nil { - return nil, err - } - return e.enc, nil -} - -func size(buf []byte, wire int) (int, error) { - switch wire { - case WireVarint: - _, n := DecodeVarint(buf) - return n, nil - case WireFixed64: - return 8, nil - case WireBytes: - v, n := DecodeVarint(buf) - return int(v) + n, nil - case WireFixed32: - return 4, nil - case WireStartGroup: - offset := 0 - for { - u, n := DecodeVarint(buf[offset:]) - fwire := int(u & 0x7) - offset += n - if fwire == WireEndGroup { - return offset, nil - } - s, err := size(buf[offset:], wire) - if err != nil { - return 0, err - } - offset += s - } - } - return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) -} - -func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { - m := make(map[int32]Extension) - i := 0 - for i < len(buf) { - tag, n := DecodeVarint(buf[i:]) - if n <= 0 { - return nil, fmt.Errorf("unable to decode varint") - } - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - l, err := size(buf[i+n:], wireType) - if err != nil { - return nil, err - } - end := i + int(l) + n - m[int32(fieldNum)] = Extension{enc: buf[i:end]} - i = end - } - return m, nil -} - -func NewExtension(e []byte) Extension { - ee := Extension{enc: make([]byte, len(e))} - copy(ee.enc, e) - return ee -} - -func AppendExtension(e Message, tag int32, buf []byte) { - if ee, eok := e.(extensionsBytes); eok { - ext := ee.GetExtensions() - *ext = append(*ext, buf...) - return - } - if ee, eok := e.(extendableProto); eok { - m := ee.extensionsWrite() - ext := m[int32(tag)] // may be missing - ext.enc = append(ext.enc, buf...) - m[int32(tag)] = ext - } -} - -func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { - u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) - ei := u.getExtElemInfo(extension) - v := value - p := toAddrPointer(&v, ei.isptr) - siz := ei.sizer(p, SizeVarint(ei.wiretag)) - buf := make([]byte, 0, siz) - return ei.marshaler(buf, p, ei.wiretag, false) -} - -func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { - o := 0 - for o < len(buf) { - tag, n := DecodeVarint((buf)[o:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - if o+n > len(buf) { - return nil, fmt.Errorf("unable to decode extension") - } - l, err := size((buf)[o+n:], wireType) - if err != nil { - return nil, err - } - if int32(fieldNum) == extension.Field { - if o+n+l > len(buf) { - return nil, fmt.Errorf("unable to decode extension") - } - v, err := decodeExtension((buf)[o:o+n+l], extension) - if err != nil { - return nil, err - } - return v, nil - } - o += n + l - } - return defaultExtensionValue(extension) -} - -func (this *Extension) Encode() error { - if this.enc == nil { - var err error - this.enc, err = encodeExtension(this.desc, this.value) - if err != nil { - return err - } - } - return nil -} - -func (this Extension) GoString() string { - if err := this.Encode(); err != nil { - return fmt.Sprintf("error encoding extension: %v", err) - } - return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) -} - -func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return errors.New("proto: bad extension number; not in declared ranges") - } - return SetExtension(pb, desc, value) -} - -func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return nil, fmt.Errorf("unregistered field number %d", fieldNum) - } - return GetExtension(pb, desc) -} - -func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { - x := &XXX_InternalExtensions{ - p: new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }), - } - x.p.extensionMap = m - return *x -} - -func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { - pb := extendable.(extendableProto) - return pb.extensionsWrite() -} - -func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { - ext := pb.GetExtensions() - for offset < len(*ext) { - tag, n1 := DecodeVarint((*ext)[offset:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - n2, err := size((*ext)[offset+n1:], wireType) - if err != nil { - panic(err) - } - newOffset := offset + n1 + n2 - if fieldNum == theFieldNum { - *ext = append((*ext)[:offset], (*ext)[newOffset:]...) - return offset - } - offset = newOffset - } - return -1 -} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go deleted file mode 100644 index 80db1c155..000000000 --- a/vendor/github.com/gogo/protobuf/proto/lib.go +++ /dev/null @@ -1,973 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/gogo/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/gogo/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. -// Marshal reports this when a required field is not initialized. -// Unmarshal reports this when a required field is missing from the wire data. -type RequiredNotSetError struct{ field string } - -func (e *RequiredNotSetError) Error() string { - if e.field == "" { - return fmt.Sprintf("proto: required field not set") - } - return fmt.Sprintf("proto: required field %q not set", e.field) -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -type invalidUTF8Error struct{ field string } - -func (e *invalidUTF8Error) Error() string { - if e.field == "" { - return "proto: invalid UTF-8 detected" - } - return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) -} -func (e *invalidUTF8Error) InvalidUTF8() bool { - return true -} - -// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. -// This error should not be exposed to the external API as such errors should -// be recreated with the field information. -var errInvalidUTF8 = &invalidUTF8Error{} - -// isNonFatal reports whether the error is either a RequiredNotSet error -// or a InvalidUTF8 error. -func isNonFatal(err error) bool { - if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { - return true - } - if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { - return true - } - return false -} - -type nonFatal struct{ E error } - -// Merge merges err into nf and reports whether it was successful. -// Otherwise it returns false for any fatal non-nil errors. -func (nf *nonFatal) Merge(err error) (ok bool) { - if err == nil { - return true // not an error - } - if !isNonFatal(err) { - return false // fatal error - } - if nf.E == nil { - nf.E = err // store first instance of non-fatal error - } - return true -} - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - deterministic bool -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -// SetDeterministic sets whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (p *Buffer) SetDeterministic(deterministic bool) { - p.deterministic = deterministic -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - sindex := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = sindex -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or T or []*T or []T - switch f.Kind() { - case reflect.Struct: - setDefaults(f, recur, zeros) - - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.Kind() == reflect.Ptr && e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Struct: - nestedMessage = true // non-nullable - - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr, reflect.Struct: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// mapKeys returns a sort.Interface to be used for sorting the map keys. -// Map fields may have key types of non-float scalars, strings and enums. -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{vs: vs} - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - case reflect.Bool: - s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true - case reflect.String: - s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } - default: - panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -const ( - // ProtoPackageIsVersion3 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - GoGoProtoPackageIsVersion3 = true - - // ProtoPackageIsVersion2 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - GoGoProtoPackageIsVersion2 = true - - // ProtoPackageIsVersion1 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - GoGoProtoPackageIsVersion1 = true -) - -// InternalMessageInfo is a type used internally by generated .pb.go files. -// This type is not intended to be used by non-generated code. -// This type is not subject to any compatibility guarantee. -type InternalMessageInfo struct { - marshal *marshalInfo - unmarshal *unmarshalInfo - merge *mergeInfo - discard *discardInfo -} diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go deleted file mode 100644 index b3aa39190..000000000 --- a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go +++ /dev/null @@ -1,50 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "encoding/json" - "strconv" -) - -type Sizer interface { - Size() int -} - -type ProtoSizer interface { - ProtoSize() int -} - -func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { - s, ok := m[value] - if !ok { - s = strconv.Itoa(int(value)) - } - return json.Marshal(s) -} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go deleted file mode 100644 index f48a75676..000000000 --- a/vendor/github.com/gogo/protobuf/proto/message_set.go +++ /dev/null @@ -1,181 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "errors" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - return ms.find(pb) != nil -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func unmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go deleted file mode 100644 index b6cad9083..000000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,357 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" - "sync" -) - -const unsafeAllowed = false - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// zeroField is a noop when calling pointer.offset. -var zeroField = field([]int{}) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// The pointer type is for the table-driven decoder. -// The implementation here uses a reflect.Value of pointer type to -// create a generic pointer. In pointer_unsafe.go we use unsafe -// instead of reflect to implement the same (but faster) interface. -type pointer struct { - v reflect.Value -} - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - return pointer{v: reflect.ValueOf(*i)} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - v := reflect.ValueOf(*i) - u := reflect.New(v.Type()) - u.Elem().Set(v) - return pointer{v: u} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{v: v} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} -} - -func (p pointer) isNil() bool { - return p.v.IsNil() -} - -// grow updates the slice s in place to make it one element longer. -// s must be addressable. -// Returns the (addressable) new element. -func grow(s reflect.Value) reflect.Value { - n, m := s.Len(), s.Cap() - if n < m { - s.SetLen(n + 1) - } else { - s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) - } - return s.Index(n) -} - -func (p pointer) toInt64() *int64 { - return p.v.Interface().(*int64) -} -func (p pointer) toInt64Ptr() **int64 { - return p.v.Interface().(**int64) -} -func (p pointer) toInt64Slice() *[]int64 { - return p.v.Interface().(*[]int64) -} - -var int32ptr = reflect.TypeOf((*int32)(nil)) - -func (p pointer) toInt32() *int32 { - return p.v.Convert(int32ptr).Interface().(*int32) -} - -// The toInt32Ptr/Slice methods don't work because of enums. -// Instead, we must use set/get methods for the int32ptr/slice case. -/* - func (p pointer) toInt32Ptr() **int32 { - return p.v.Interface().(**int32) -} - func (p pointer) toInt32Slice() *[]int32 { - return p.v.Interface().(*[]int32) -} -*/ -func (p pointer) getInt32Ptr() *int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().(*int32) - } - // an enum - return p.v.Elem().Convert(int32PtrType).Interface().(*int32) -} -func (p pointer) setInt32Ptr(v int32) { - // Allocate value in a *int32. Possibly convert that to a *enum. - // Then assign it to a **int32 or **enum. - // Note: we can convert *int32 to *enum, but we can't convert - // **int32 to **enum! - p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) -} - -// getInt32Slice copies []int32 from p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getInt32Slice() []int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().([]int32) - } - // an enum - // Allocate a []int32, then assign []enum's values into it. - // Note: we can't convert []enum to []int32. - slice := p.v.Elem() - s := make([]int32, slice.Len()) - for i := 0; i < slice.Len(); i++ { - s[i] = int32(slice.Index(i).Int()) - } - return s -} - -// setInt32Slice copies []int32 into p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setInt32Slice(v []int32) { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - p.v.Elem().Set(reflect.ValueOf(v)) - return - } - // an enum - // Allocate a []enum, then assign []int32's values into it. - // Note: we can't convert []enum to []int32. - slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) - for i, x := range v { - slice.Index(i).SetInt(int64(x)) - } - p.v.Elem().Set(slice) -} -func (p pointer) appendInt32Slice(v int32) { - grow(p.v.Elem()).SetInt(int64(v)) -} - -func (p pointer) toUint64() *uint64 { - return p.v.Interface().(*uint64) -} -func (p pointer) toUint64Ptr() **uint64 { - return p.v.Interface().(**uint64) -} -func (p pointer) toUint64Slice() *[]uint64 { - return p.v.Interface().(*[]uint64) -} -func (p pointer) toUint32() *uint32 { - return p.v.Interface().(*uint32) -} -func (p pointer) toUint32Ptr() **uint32 { - return p.v.Interface().(**uint32) -} -func (p pointer) toUint32Slice() *[]uint32 { - return p.v.Interface().(*[]uint32) -} -func (p pointer) toBool() *bool { - return p.v.Interface().(*bool) -} -func (p pointer) toBoolPtr() **bool { - return p.v.Interface().(**bool) -} -func (p pointer) toBoolSlice() *[]bool { - return p.v.Interface().(*[]bool) -} -func (p pointer) toFloat64() *float64 { - return p.v.Interface().(*float64) -} -func (p pointer) toFloat64Ptr() **float64 { - return p.v.Interface().(**float64) -} -func (p pointer) toFloat64Slice() *[]float64 { - return p.v.Interface().(*[]float64) -} -func (p pointer) toFloat32() *float32 { - return p.v.Interface().(*float32) -} -func (p pointer) toFloat32Ptr() **float32 { - return p.v.Interface().(**float32) -} -func (p pointer) toFloat32Slice() *[]float32 { - return p.v.Interface().(*[]float32) -} -func (p pointer) toString() *string { - return p.v.Interface().(*string) -} -func (p pointer) toStringPtr() **string { - return p.v.Interface().(**string) -} -func (p pointer) toStringSlice() *[]string { - return p.v.Interface().(*[]string) -} -func (p pointer) toBytes() *[]byte { - return p.v.Interface().(*[]byte) -} -func (p pointer) toBytesSlice() *[][]byte { - return p.v.Interface().(*[][]byte) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return p.v.Interface().(*XXX_InternalExtensions) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return p.v.Interface().(*map[int32]Extension) -} -func (p pointer) getPointer() pointer { - return pointer{v: p.v.Elem()} -} -func (p pointer) setPointer(q pointer) { - p.v.Elem().Set(q.v) -} -func (p pointer) appendPointer(q pointer) { - grow(p.v.Elem()).Set(q.v) -} - -// getPointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getPointerSlice() []pointer { - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// setPointerSlice copies []pointer into p as a new []*T. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setPointerSlice(v []pointer) { - if v == nil { - p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) - return - } - s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) - for _, p := range v { - s = reflect.Append(s, p.v) - } - p.v.Elem().Set(s) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - if p.v.Elem().IsNil() { - return pointer{v: p.v.Elem()} - } - return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct -} - -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - // TODO: check that p.v.Type().Elem() == t? - return p.v -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} - -var atomicLock sync.Mutex diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go deleted file mode 100644 index 7ffd3c29d..000000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go +++ /dev/null @@ -1,59 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" -) - -// TODO: untested, so probably incorrect. - -func (p pointer) getRef() pointer { - return pointer{v: p.v.Addr()} -} - -func (p pointer) appendRef(v pointer, typ reflect.Type) { - slice := p.getSlice(typ) - elem := v.asPointerTo(typ).Elem() - newSlice := reflect.Append(slice, elem) - slice.Set(newSlice) -} - -func (p pointer) getSlice(typ reflect.Type) reflect.Value { - sliceTyp := reflect.SliceOf(typ) - slice := p.asPointerTo(sliceTyp) - slice = slice.Elem() - return slice -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index d55a335d9..000000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,308 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "sync/atomic" - "unsafe" -) - -const unsafeAllowed = true - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// zeroField is a noop when calling pointer.offset. -const zeroField = field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != invalidField -} - -// The pointer type below is for the new table-driven encoder/decoder. -// The implementation here uses unsafe.Pointer to create a generic pointer. -// In pointer_reflect.go we use reflect instead of unsafe to implement -// the same (but slower) interface. -type pointer struct { - p unsafe.Pointer -} - -// size of pointer -var ptrSize = unsafe.Sizeof(uintptr(0)) - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - // Super-tricky - read pointer out of data word of interface value. - // Saves ~25ns over the equivalent: - // return valToPointer(reflect.ValueOf(*i)) - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - // Super-tricky - read or get the address of data word of interface value. - if isptr { - // The interface is of pointer type, thus it is a direct interface. - // The data word is the pointer data itself. We take its address. - return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } - // The interface is not of pointer type. The data word is the pointer - // to the data. - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{p: unsafe.Pointer(v.Pointer())} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - // For safety, we should panic if !f.IsValid, however calling panic causes - // this to no longer be inlineable, which is a serious performance cost. - /* - if !f.IsValid() { - panic("invalid field") - } - */ - return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} -} - -func (p pointer) isNil() bool { - return p.p == nil -} - -func (p pointer) toInt64() *int64 { - return (*int64)(p.p) -} -func (p pointer) toInt64Ptr() **int64 { - return (**int64)(p.p) -} -func (p pointer) toInt64Slice() *[]int64 { - return (*[]int64)(p.p) -} -func (p pointer) toInt32() *int32 { - return (*int32)(p.p) -} - -// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. -/* - func (p pointer) toInt32Ptr() **int32 { - return (**int32)(p.p) - } - func (p pointer) toInt32Slice() *[]int32 { - return (*[]int32)(p.p) - } -*/ -func (p pointer) getInt32Ptr() *int32 { - return *(**int32)(p.p) -} -func (p pointer) setInt32Ptr(v int32) { - *(**int32)(p.p) = &v -} - -// getInt32Slice loads a []int32 from p. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getInt32Slice() []int32 { - return *(*[]int32)(p.p) -} - -// setInt32Slice stores a []int32 to p. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setInt32Slice(v []int32) { - *(*[]int32)(p.p) = v -} - -// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? -func (p pointer) appendInt32Slice(v int32) { - s := (*[]int32)(p.p) - *s = append(*s, v) -} - -func (p pointer) toUint64() *uint64 { - return (*uint64)(p.p) -} -func (p pointer) toUint64Ptr() **uint64 { - return (**uint64)(p.p) -} -func (p pointer) toUint64Slice() *[]uint64 { - return (*[]uint64)(p.p) -} -func (p pointer) toUint32() *uint32 { - return (*uint32)(p.p) -} -func (p pointer) toUint32Ptr() **uint32 { - return (**uint32)(p.p) -} -func (p pointer) toUint32Slice() *[]uint32 { - return (*[]uint32)(p.p) -} -func (p pointer) toBool() *bool { - return (*bool)(p.p) -} -func (p pointer) toBoolPtr() **bool { - return (**bool)(p.p) -} -func (p pointer) toBoolSlice() *[]bool { - return (*[]bool)(p.p) -} -func (p pointer) toFloat64() *float64 { - return (*float64)(p.p) -} -func (p pointer) toFloat64Ptr() **float64 { - return (**float64)(p.p) -} -func (p pointer) toFloat64Slice() *[]float64 { - return (*[]float64)(p.p) -} -func (p pointer) toFloat32() *float32 { - return (*float32)(p.p) -} -func (p pointer) toFloat32Ptr() **float32 { - return (**float32)(p.p) -} -func (p pointer) toFloat32Slice() *[]float32 { - return (*[]float32)(p.p) -} -func (p pointer) toString() *string { - return (*string)(p.p) -} -func (p pointer) toStringPtr() **string { - return (**string)(p.p) -} -func (p pointer) toStringSlice() *[]string { - return (*[]string)(p.p) -} -func (p pointer) toBytes() *[]byte { - return (*[]byte)(p.p) -} -func (p pointer) toBytesSlice() *[][]byte { - return (*[][]byte)(p.p) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(p.p) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return (*map[int32]Extension)(p.p) -} - -// getPointerSlice loads []*T from p as a []pointer. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getPointerSlice() []pointer { - // Super-tricky - p should point to a []*T where T is a - // message type. We load it as []pointer. - return *(*[]pointer)(p.p) -} - -// setPointerSlice stores []pointer into p as a []*T. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setPointerSlice(v []pointer) { - // Super-tricky - p should point to a []*T where T is a - // message type. We store it as []pointer. - *(*[]pointer)(p.p) = v -} - -// getPointer loads the pointer at p and returns it. -func (p pointer) getPointer() pointer { - return pointer{p: *(*unsafe.Pointer)(p.p)} -} - -// setPointer stores the pointer q at p. -func (p pointer) setPointer(q pointer) { - *(*unsafe.Pointer)(p.p) = q.p -} - -// append q to the slice pointed to by p. -func (p pointer) appendPointer(q pointer) { - s := (*[]unsafe.Pointer)(p.p) - *s = append(*s, q.p) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - // Super-tricky - read pointer out of data word of interface value. - return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} -} - -// asPointerTo returns a reflect.Value that is a pointer to an -// object of type t stored at p. -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - return reflect.NewAt(t, p.p) -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go deleted file mode 100644 index aca8eed02..000000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go +++ /dev/null @@ -1,56 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -func (p pointer) getRef() pointer { - return pointer{p: (unsafe.Pointer)(&p.p)} -} - -func (p pointer) appendRef(v pointer, typ reflect.Type) { - slice := p.getSlice(typ) - elem := v.asPointerTo(typ).Elem() - newSlice := reflect.Append(slice, elem) - slice.Set(newSlice) -} - -func (p pointer) getSlice(typ reflect.Type) reflect.Value { - sliceTyp := reflect.SliceOf(typ) - slice := p.asPointerTo(sliceTyp) - slice = slice.Elem() - return slice -} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go deleted file mode 100644 index 28da1475f..000000000 --- a/vendor/github.com/gogo/protobuf/proto/properties.go +++ /dev/null @@ -1,610 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - CustomType string - CastType string - StdTime bool - StdDuration bool - WktPointer bool - - stype reflect.Type // set for struct types only - ctype reflect.Type // set for custom types only - sprop *StructProperties // set for struct types only - - mtype reflect.Type // set for map types only - MapKeyProp *Properties // set for map types only - MapValProp *Properties // set for map types only -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s += "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - log.Printf("proto: tag has too few fields: %q", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - case "fixed32": - p.WireType = WireFixed32 - case "fixed64": - p.WireType = WireFixed64 - case "zigzag32": - p.WireType = WireVarint - case "zigzag64": - p.WireType = WireVarint - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - log.Printf("proto: tag has unknown wire type: %q", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - -outer: - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break outer - } - case strings.HasPrefix(f, "embedded="): - p.OrigName = strings.Split(f, "=")[1] - case strings.HasPrefix(f, "customtype="): - p.CustomType = strings.Split(f, "=")[1] - case strings.HasPrefix(f, "casttype="): - p.CastType = strings.Split(f, "=")[1] - case f == "stdtime": - p.StdTime = true - case f == "stdduration": - p.StdDuration = true - case f == "wktptr": - p.WktPointer = true - } - } -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// setFieldProps initializes the field properties for submessages and maps. -func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - isMap := typ.Kind() == reflect.Map - if len(p.CustomType) > 0 && !isMap { - p.ctype = typ - p.setTag(lockGetProp) - return - } - if p.StdTime && !isMap { - p.setTag(lockGetProp) - return - } - if p.StdDuration && !isMap { - p.setTag(lockGetProp) - return - } - if p.WktPointer && !isMap { - p.setTag(lockGetProp) - return - } - switch t1 := typ; t1.Kind() { - case reflect.Struct: - p.stype = typ - case reflect.Ptr: - if t1.Elem().Kind() == reflect.Struct { - p.stype = t1.Elem() - } - case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - case reflect.Struct: - p.stype = t3 - } - case reflect.Struct: - p.stype = t2 - } - - case reflect.Map: - - p.mtype = t1 - p.MapKeyProp = &Properties{} - p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.MapValProp = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - - p.MapValProp.CustomType = p.CustomType - p.MapValProp.StdDuration = p.StdDuration - p.MapValProp.StdTime = p.StdTime - p.MapValProp.WktPointer = p.WktPointer - p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - p.setTag(lockGetProp) -} - -func (p *Properties) setTag(lockGetProp bool) { - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() -) - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if tag == "" { - return - } - p.Parse(tag) - p.setFieldProps(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -type ( - oneofFuncsIface interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - oneofWrappersIface interface { - XXX_OneofWrappers() []interface{} - } -) - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - return prop - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - isOneofMessage := false - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - isOneofMessage = true - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - if isOneofMessage { - var oots []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oots = m.XXX_OneofFuncs() - case oneofWrappersIface: - oots = m.XXX_OneofWrappers() - } - if len(oots) > 0 { - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) -var enumStringMaps = make(map[string]map[int32]string) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap - if _, ok := enumStringMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumStringMaps[typeName] = unusedNameMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers - protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypedNils[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { - // Generated code always calls RegisterType with nil x. - // This check is just for extra safety. - protoTypedNils[name] = x - } else { - protoTypedNils[name] = reflect.Zero(t).Interface().(Message) - } - revProtoTypes[t] = name -} - -// RegisterMapType is called from generated code and maps from the fully qualified -// proto name to the native map type of the proto map definition. -func RegisterMapType(x interface{}, name string) { - if reflect.TypeOf(x).Kind() != reflect.Map { - panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) - } - if _, ok := protoMapTypes[name]; ok { - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoMapTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -// The type is not guaranteed to implement proto.Message if the name refers to a -// map entry. -func MessageType(name string) reflect.Type { - if t, ok := protoTypedNils[name]; ok { - return reflect.TypeOf(t) - } - return protoMapTypes[name] -} - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go deleted file mode 100644 index 40ea3dd93..000000000 --- a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go +++ /dev/null @@ -1,36 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" -) - -var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() -var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go deleted file mode 100644 index 5a5fd93f7..000000000 --- a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go +++ /dev/null @@ -1,119 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "io" -) - -func Skip(data []byte) (n int, err error) { - l := len(data) - index := 0 - for index < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - index++ - if data[index-1] < 0x80 { - break - } - } - return index, nil - case 1: - index += 8 - return index, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - index += length - return index, nil - case 3: - for { - var innerWire uint64 - var start int = index - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := Skip(data[start:]) - if err != nil { - return 0, err - } - index = start + next - } - return index, nil - case 4: - return index, nil - case 5: - index += 4 - return index, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go deleted file mode 100644 index f8babdefa..000000000 --- a/vendor/github.com/gogo/protobuf/proto/table_marshal.go +++ /dev/null @@ -1,3009 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// a sizer takes a pointer to a field and the size of its tag, computes the size of -// the encoded data. -type sizer func(pointer, int) int - -// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), -// marshals the field to the end of the slice, returns the slice and error (if any). -type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) - -// marshalInfo is the information used for marshaling a message. -type marshalInfo struct { - typ reflect.Type - fields []*marshalFieldInfo - unrecognized field // offset of XXX_unrecognized - extensions field // offset of XXX_InternalExtensions - v1extensions field // offset of XXX_extensions - sizecache field // offset of XXX_sizecache - initialized int32 // 0 -- only typ is set, 1 -- fully initialized - messageset bool // uses message set wire format - hasmarshaler bool // has custom marshaler - sync.RWMutex // protect extElems map, also for initialization - extElems map[int32]*marshalElemInfo // info of extension elements - - hassizer bool // has custom sizer - hasprotosizer bool // has custom protosizer - - bytesExtensions field // offset of XXX_extensions where the field type is []byte -} - -// marshalFieldInfo is the information used for marshaling a field of a message. -type marshalFieldInfo struct { - field field - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isPointer bool - required bool // field is required - name string // name of the field, for error reporting - oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements -} - -// marshalElemInfo is the information used for marshaling an extension or oneof element. -type marshalElemInfo struct { - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) -} - -var ( - marshalInfoMap = map[reflect.Type]*marshalInfo{} - marshalInfoLock sync.Mutex - - uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind() -) - -// getMarshalInfo returns the information to marshal a given type of message. -// The info it returns may not necessarily initialized. -// t is the type of the message (NOT the pointer to it). -func getMarshalInfo(t reflect.Type) *marshalInfo { - marshalInfoLock.Lock() - u, ok := marshalInfoMap[t] - if !ok { - u = &marshalInfo{typ: t} - marshalInfoMap[t] = u - } - marshalInfoLock.Unlock() - return u -} - -// Size is the entry point from generated code, -// and should be ONLY called by generated code. -// It computes the size of encoded data of msg. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Size(msg Message) int { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return 0 - } - return u.size(ptr) -} - -// Marshal is the entry point from generated code, -// and should be ONLY called by generated code. -// It marshals msg to the end of b. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return b, ErrNil - } - return u.marshal(b, ptr, deterministic) -} - -func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { - // u := a.marshal, but atomically. - // We use an atomic here to ensure memory consistency. - u := atomicLoadMarshalInfo(&a.marshal) - if u == nil { - // Get marshal information from type of message. - t := reflect.ValueOf(msg).Type() - if t.Kind() != reflect.Ptr { - panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) - } - u = getMarshalInfo(t.Elem()) - // Store it in the cache for later users. - // a.marshal = u, but atomically. - atomicStoreMarshalInfo(&a.marshal, u) - } - return u -} - -// size is the main function to compute the size of the encoded data of a message. -// ptr is the pointer to the message. -func (u *marshalInfo) size(ptr pointer) int { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - // Uses the message's Size method if available - if u.hassizer { - s := ptr.asPointerTo(u.typ).Interface().(Sizer) - return s.Size() - } - // Uses the message's ProtoSize method if available - if u.hasprotosizer { - s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) - return s.ProtoSize() - } - - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b, _ := m.Marshal() - return len(b) - } - - n := 0 - for _, f := range u.fields { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - n += f.sizer(ptr.offset(f.field), f.tagsize) - } - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - n += u.sizeMessageSet(e) - } else { - n += u.sizeExtensions(e) - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - n += u.sizeV1Extensions(m) - } - if u.bytesExtensions.IsValid() { - s := *ptr.offset(u.bytesExtensions).toBytes() - n += len(s) - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - n += len(s) - } - - // cache the result for use in marshal - if u.sizecache.IsValid() { - atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) - } - return n -} - -// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), -// fall back to compute the size. -func (u *marshalInfo) cachedsize(ptr pointer) int { - if u.sizecache.IsValid() { - return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) - } - return u.size(ptr) -} - -// marshal is the main function to marshal a message. It takes a byte slice and appends -// the encoded data to the end of the slice, returns the slice and error (if any). -// ptr is the pointer to the message. -// If deterministic is true, map is marshaled in deterministic order. -func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b1, err := m.Marshal() - b = append(b, b1...) - return b, err - } - - var err, errLater error - // The old marshaler encodes extensions at beginning. - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - b, err = u.appendMessageSet(b, e, deterministic) - } else { - b, err = u.appendExtensions(b, e, deterministic) - } - if err != nil { - return b, err - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - b, err = u.appendV1Extensions(b, m, deterministic) - if err != nil { - return b, err - } - } - if u.bytesExtensions.IsValid() { - s := *ptr.offset(u.bytesExtensions).toBytes() - b = append(b, s...) - } - for _, f := range u.fields { - if f.required { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // Required field is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name} - } - continue - } - } - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) - if err != nil { - if err1, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name + "." + err1.field} - } - continue - } - if err == errRepeatedHasNil { - err = errors.New("proto: repeated field " + f.name + " has nil element") - } - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return b, err - } - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - b = append(b, s...) - } - return b, errLater -} - -// computeMarshalInfo initializes the marshal info. -func (u *marshalInfo) computeMarshalInfo() { - u.Lock() - defer u.Unlock() - if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock - return - } - - t := u.typ - u.unrecognized = invalidField - u.extensions = invalidField - u.v1extensions = invalidField - u.bytesExtensions = invalidField - u.sizecache = invalidField - isOneofMessage := false - - if reflect.PtrTo(t).Implements(sizerType) { - u.hassizer = true - } - if reflect.PtrTo(t).Implements(protosizerType) { - u.hasprotosizer = true - } - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if reflect.PtrTo(t).Implements(marshalerType) { - u.hasmarshaler = true - atomic.StoreInt32(&u.initialized, 1) - return - } - - n := t.NumField() - - // deal with XXX fields first - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Tag.Get("protobuf_oneof") != "" { - isOneofMessage = true - } - if !strings.HasPrefix(f.Name, "XXX_") { - continue - } - switch f.Name { - case "XXX_sizecache": - u.sizecache = toField(&f) - case "XXX_unrecognized": - u.unrecognized = toField(&f) - case "XXX_InternalExtensions": - u.extensions = toField(&f) - u.messageset = f.Tag.Get("protobuf_messageset") == "1" - case "XXX_extensions": - if f.Type.Kind() == reflect.Map { - u.v1extensions = toField(&f) - } else { - u.bytesExtensions = toField(&f) - } - case "XXX_NoUnkeyedLiteral": - // nothing to do - default: - panic("unknown XXX field: " + f.Name) - } - n-- - } - - // get oneof implementers - var oneofImplementers []interface{} - // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler - if isOneofMessage { - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - } - - // normal fields - fields := make([]marshalFieldInfo, n) // batch allocation - u.fields = make([]*marshalFieldInfo, 0, n) - for i, j := 0, 0; i < t.NumField(); i++ { - f := t.Field(i) - - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - field := &fields[j] - j++ - field.name = f.Name - u.fields = append(u.fields, field) - if f.Tag.Get("protobuf_oneof") != "" { - field.computeOneofFieldInfo(&f, oneofImplementers) - continue - } - if f.Tag.Get("protobuf") == "" { - // field has no tag (not in generated message), ignore it - u.fields = u.fields[:len(u.fields)-1] - j-- - continue - } - field.computeMarshalFieldInfo(&f) - } - - // fields are marshaled in tag order on the wire. - sort.Sort(byTag(u.fields)) - - atomic.StoreInt32(&u.initialized, 1) -} - -// helper for sorting fields by tag -type byTag []*marshalFieldInfo - -func (a byTag) Len() int { return len(a) } -func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } - -// getExtElemInfo returns the information to marshal an extension element. -// The info it returns is initialized. -func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { - // get from cache first - u.RLock() - e, ok := u.extElems[desc.Field] - u.RUnlock() - if ok { - return e - } - - t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct - tags := strings.Split(desc.Tag, ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizr, marshalr := typeMarshaler(t, tags, false, false) - e = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizr, - marshaler: marshalr, - isptr: t.Kind() == reflect.Ptr, - } - - // update cache - u.Lock() - if u.extElems == nil { - u.extElems = make(map[int32]*marshalElemInfo) - } - u.extElems[desc.Field] = e - u.Unlock() - return e -} - -// computeMarshalFieldInfo fills up the information to marshal a field. -func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { - // parse protobuf tag of the field. - // tag has format of "bytes,49,opt,name=foo,def=hello!" - tags := strings.Split(f.Tag.Get("protobuf"), ",") - if tags[0] == "" { - return - } - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if tags[2] == "req" { - fi.required = true - } - fi.setTag(f, tag, wt) - fi.setMarshaler(f, tags) -} - -func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { - fi.field = toField(f) - fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. - fi.isPointer = true - fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) - fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) - - ityp := f.Type // interface type - for _, o := range oneofImplementers { - t := reflect.TypeOf(o) - if !t.Implements(ityp) { - continue - } - sf := t.Elem().Field(0) // oneof implementer is a struct with a single field - tags := strings.Split(sf.Tag.Get("protobuf"), ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value - fi.oneofElems[t.Elem()] = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizr, - marshaler: marshalr, - } - } -} - -// wiretype returns the wire encoding of the type. -func wiretype(encoding string) uint64 { - switch encoding { - case "fixed32": - return WireFixed32 - case "fixed64": - return WireFixed64 - case "varint", "zigzag32", "zigzag64": - return WireVarint - case "bytes": - return WireBytes - case "group": - return WireStartGroup - } - panic("unknown wire type " + encoding) -} - -// setTag fills up the tag (in wire format) and its size in the info of a field. -func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { - fi.field = toField(f) - fi.wiretag = uint64(tag)<<3 | wt - fi.tagsize = SizeVarint(uint64(tag) << 3) -} - -// setMarshaler fills up the sizer and marshaler in the info of a field. -func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { - switch f.Type.Kind() { - case reflect.Map: - // map field - fi.isPointer = true - fi.sizer, fi.marshaler = makeMapMarshaler(f) - return - case reflect.Ptr, reflect.Slice: - fi.isPointer = true - } - fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) -} - -// typeMarshaler returns the sizer and marshaler of a given field. -// t is the type of the field. -// tags is the generated "protobuf" tag of the field. -// If nozero is true, zero value is not marshaled to the wire. -// If oneof is true, it is a oneof field. -func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { - encoding := tags[0] - - pointer := false - slice := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - packed := false - proto3 := false - ctype := false - isTime := false - isDuration := false - isWktPointer := false - validateUTF8 := true - for i := 2; i < len(tags); i++ { - if tags[i] == "packed" { - packed = true - } - if tags[i] == "proto3" { - proto3 = true - } - if strings.HasPrefix(tags[i], "customtype=") { - ctype = true - } - if tags[i] == "stdtime" { - isTime = true - } - if tags[i] == "stdduration" { - isDuration = true - } - if tags[i] == "wktptr" { - isWktPointer = true - } - } - validateUTF8 = validateUTF8 && proto3 - if !proto3 && !pointer && !slice { - nozero = false - } - - if ctype { - if reflect.PtrTo(t).Implements(customType) { - if slice { - return makeMessageRefSliceMarshaler(getMarshalInfo(t)) - } - if pointer { - return makeCustomPtrMarshaler(getMarshalInfo(t)) - } - return makeCustomMarshaler(getMarshalInfo(t)) - } else { - panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) - } - } - - if isTime { - if pointer { - if slice { - return makeTimePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeTimePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeTimeSliceMarshaler(getMarshalInfo(t)) - } - return makeTimeMarshaler(getMarshalInfo(t)) - } - - if isDuration { - if pointer { - if slice { - return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) - } - return makeDurationPtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeDurationSliceMarshaler(getMarshalInfo(t)) - } - return makeDurationMarshaler(getMarshalInfo(t)) - } - - if isWktPointer { - switch t.Kind() { - case reflect.Float64: - if pointer { - if slice { - return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdDoubleValueMarshaler(getMarshalInfo(t)) - case reflect.Float32: - if pointer { - if slice { - return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdFloatValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdFloatValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdFloatValueMarshaler(getMarshalInfo(t)) - case reflect.Int64: - if pointer { - if slice { - return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt64ValueMarshaler(getMarshalInfo(t)) - case reflect.Uint64: - if pointer { - if slice { - return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt64ValueMarshaler(getMarshalInfo(t)) - case reflect.Int32: - if pointer { - if slice { - return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt32ValueMarshaler(getMarshalInfo(t)) - case reflect.Uint32: - if pointer { - if slice { - return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt32ValueMarshaler(getMarshalInfo(t)) - case reflect.Bool: - if pointer { - if slice { - return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBoolValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdBoolValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBoolValueMarshaler(getMarshalInfo(t)) - case reflect.String: - if pointer { - if slice { - return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdStringValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdStringValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdStringValueMarshaler(getMarshalInfo(t)) - case uint8SliceType: - if pointer { - if slice { - return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBytesValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdBytesValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBytesValueMarshaler(getMarshalInfo(t)) - default: - panic(fmt.Sprintf("unknown wktpointer type %#v", t)) - } - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return sizeBoolPtr, appendBoolPtr - } - if slice { - if packed { - return sizeBoolPackedSlice, appendBoolPackedSlice - } - return sizeBoolSlice, appendBoolSlice - } - if nozero { - return sizeBoolValueNoZero, appendBoolValueNoZero - } - return sizeBoolValue, appendBoolValue - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixed32Ptr, appendFixed32Ptr - } - if slice { - if packed { - return sizeFixed32PackedSlice, appendFixed32PackedSlice - } - return sizeFixed32Slice, appendFixed32Slice - } - if nozero { - return sizeFixed32ValueNoZero, appendFixed32ValueNoZero - } - return sizeFixed32Value, appendFixed32Value - case "varint": - if pointer { - return sizeVarint32Ptr, appendVarint32Ptr - } - if slice { - if packed { - return sizeVarint32PackedSlice, appendVarint32PackedSlice - } - return sizeVarint32Slice, appendVarint32Slice - } - if nozero { - return sizeVarint32ValueNoZero, appendVarint32ValueNoZero - } - return sizeVarint32Value, appendVarint32Value - } - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixedS32Ptr, appendFixedS32Ptr - } - if slice { - if packed { - return sizeFixedS32PackedSlice, appendFixedS32PackedSlice - } - return sizeFixedS32Slice, appendFixedS32Slice - } - if nozero { - return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero - } - return sizeFixedS32Value, appendFixedS32Value - case "varint": - if pointer { - return sizeVarintS32Ptr, appendVarintS32Ptr - } - if slice { - if packed { - return sizeVarintS32PackedSlice, appendVarintS32PackedSlice - } - return sizeVarintS32Slice, appendVarintS32Slice - } - if nozero { - return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero - } - return sizeVarintS32Value, appendVarintS32Value - case "zigzag32": - if pointer { - return sizeZigzag32Ptr, appendZigzag32Ptr - } - if slice { - if packed { - return sizeZigzag32PackedSlice, appendZigzag32PackedSlice - } - return sizeZigzag32Slice, appendZigzag32Slice - } - if nozero { - return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero - } - return sizeZigzag32Value, appendZigzag32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixed64Ptr, appendFixed64Ptr - } - if slice { - if packed { - return sizeFixed64PackedSlice, appendFixed64PackedSlice - } - return sizeFixed64Slice, appendFixed64Slice - } - if nozero { - return sizeFixed64ValueNoZero, appendFixed64ValueNoZero - } - return sizeFixed64Value, appendFixed64Value - case "varint": - if pointer { - return sizeVarint64Ptr, appendVarint64Ptr - } - if slice { - if packed { - return sizeVarint64PackedSlice, appendVarint64PackedSlice - } - return sizeVarint64Slice, appendVarint64Slice - } - if nozero { - return sizeVarint64ValueNoZero, appendVarint64ValueNoZero - } - return sizeVarint64Value, appendVarint64Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixedS64Ptr, appendFixedS64Ptr - } - if slice { - if packed { - return sizeFixedS64PackedSlice, appendFixedS64PackedSlice - } - return sizeFixedS64Slice, appendFixedS64Slice - } - if nozero { - return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero - } - return sizeFixedS64Value, appendFixedS64Value - case "varint": - if pointer { - return sizeVarintS64Ptr, appendVarintS64Ptr - } - if slice { - if packed { - return sizeVarintS64PackedSlice, appendVarintS64PackedSlice - } - return sizeVarintS64Slice, appendVarintS64Slice - } - if nozero { - return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero - } - return sizeVarintS64Value, appendVarintS64Value - case "zigzag64": - if pointer { - return sizeZigzag64Ptr, appendZigzag64Ptr - } - if slice { - if packed { - return sizeZigzag64PackedSlice, appendZigzag64PackedSlice - } - return sizeZigzag64Slice, appendZigzag64Slice - } - if nozero { - return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero - } - return sizeZigzag64Value, appendZigzag64Value - } - case reflect.Float32: - if pointer { - return sizeFloat32Ptr, appendFloat32Ptr - } - if slice { - if packed { - return sizeFloat32PackedSlice, appendFloat32PackedSlice - } - return sizeFloat32Slice, appendFloat32Slice - } - if nozero { - return sizeFloat32ValueNoZero, appendFloat32ValueNoZero - } - return sizeFloat32Value, appendFloat32Value - case reflect.Float64: - if pointer { - return sizeFloat64Ptr, appendFloat64Ptr - } - if slice { - if packed { - return sizeFloat64PackedSlice, appendFloat64PackedSlice - } - return sizeFloat64Slice, appendFloat64Slice - } - if nozero { - return sizeFloat64ValueNoZero, appendFloat64ValueNoZero - } - return sizeFloat64Value, appendFloat64Value - case reflect.String: - if validateUTF8 { - if pointer { - return sizeStringPtr, appendUTF8StringPtr - } - if slice { - return sizeStringSlice, appendUTF8StringSlice - } - if nozero { - return sizeStringValueNoZero, appendUTF8StringValueNoZero - } - return sizeStringValue, appendUTF8StringValue - } - if pointer { - return sizeStringPtr, appendStringPtr - } - if slice { - return sizeStringSlice, appendStringSlice - } - if nozero { - return sizeStringValueNoZero, appendStringValueNoZero - } - return sizeStringValue, appendStringValue - case reflect.Slice: - if slice { - return sizeBytesSlice, appendBytesSlice - } - if oneof { - // Oneof bytes field may also have "proto3" tag. - // We want to marshal it as a oneof field. Do this - // check before the proto3 check. - return sizeBytesOneof, appendBytesOneof - } - if proto3 { - return sizeBytes3, appendBytes3 - } - return sizeBytes, appendBytes - case reflect.Struct: - switch encoding { - case "group": - if slice { - return makeGroupSliceMarshaler(getMarshalInfo(t)) - } - return makeGroupMarshaler(getMarshalInfo(t)) - case "bytes": - if pointer { - if slice { - return makeMessageSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageMarshaler(getMarshalInfo(t)) - } else { - if slice { - return makeMessageRefSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageRefMarshaler(getMarshalInfo(t)) - } - } - } - panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) -} - -// Below are functions to size/marshal a specific type of a field. -// They are stored in the field's info, and called by function pointers. -// They have type sizer or marshaler. - -func sizeFixed32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixedS32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFloat32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - return (4 + tagsize) * len(s) -} -func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixed64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFixedS64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFloat64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - return (8 + tagsize) * len(s) -} -func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeVarint32Value(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarint32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarint64Value(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - return SizeVarint(v) + tagsize -} -func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return SizeVarint(v) + tagsize -} -func sizeVarint64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return SizeVarint(*p) + tagsize -} -func sizeVarint64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(v) + tagsize - } - return n -} -func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize - } - return n -} -func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize - } - return n -} -func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeBoolValue(_ pointer, tagsize int) int { - return 1 + tagsize -} -func sizeBoolValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toBool() - if !v { - return 0 - } - return 1 + tagsize -} -func sizeBoolPtr(ptr pointer, tagsize int) int { - p := *ptr.toBoolPtr() - if p == nil { - return 0 - } - return 1 + tagsize -} -func sizeBoolSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - return (1 + tagsize) * len(s) -} -func sizeBoolPackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return 0 - } - return len(s) + SizeVarint(uint64(len(s))) + tagsize -} -func sizeStringValue(ptr pointer, tagsize int) int { - v := *ptr.toString() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toString() - if v == "" { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringPtr(ptr pointer, tagsize int) int { - p := *ptr.toStringPtr() - if p == nil { - return 0 - } - v := *p - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringSlice(ptr pointer, tagsize int) int { - s := *ptr.toStringSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} -func sizeBytes(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if v == nil { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytes3(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if len(v) == 0 { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesOneof(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesSlice(ptr pointer, tagsize int) int { - s := *ptr.toBytesSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} - -// appendFixed32 appends an encoded fixed32 to b. -func appendFixed32(b []byte, v uint32) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24)) - return b -} - -// appendFixed64 appends an encoded fixed64 to b. -func appendFixed64(b []byte, v uint64) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24), - byte(v>>32), - byte(v>>40), - byte(v>>48), - byte(v>>56)) - return b -} - -// appendVarint appends an encoded varint to b. -func appendVarint(b []byte, v uint64) []byte { - // TODO: make 1-byte (maybe 2-byte) case inline-able, once we - // have non-leaf inliner. - switch { - case v < 1<<7: - b = append(b, byte(v)) - case v < 1<<14: - b = append(b, - byte(v&0x7f|0x80), - byte(v>>7)) - case v < 1<<21: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte(v>>14)) - case v < 1<<28: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte(v>>21)) - case v < 1<<35: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte(v>>28)) - case v < 1<<42: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte(v>>35)) - case v < 1<<49: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte(v>>42)) - case v < 1<<56: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte(v>>49)) - case v < 1<<63: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte(v>>56)) - default: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte((v>>56)&0x7f|0x80), - 1) - } - return b -} - -func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, *p) - return b, nil -} -func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(*p)) - return b, nil -} -func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(*p)) - return b, nil -} -func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, *p) - return b, nil -} -func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(*p)) - return b, nil -} -func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(*p)) - return b, nil -} -func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, *p) - return b, nil -} -func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - } - return b, nil -} -func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, v) - } - return b, nil -} -func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - if !v { - return b, nil - } - b = appendVarint(b, wiretag) - b = append(b, 1) - return b, nil -} - -func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toBoolPtr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - if *p { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(len(s))) - for _, v := range s { - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - if v == "" { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toStringSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} -func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if v == "" { - return b, nil - } - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - s := *ptr.toStringSlice() - for _, v := range s { - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if v == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if len(v) == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBytesSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} - -// makeGroupMarshaler returns the sizer and marshaler for a group. -// u is the marshal info of the underlying message. -func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - return u.size(p) + 2*tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - var err error - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, p, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - return b, err - } -} - -// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. -// u is the marshal info of the underlying message. -func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - n += u.size(v) + 2*tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, v, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMessageMarshaler returns the sizer and marshaler for a message field. -// u is the marshal info of the message. -func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.size(p) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(p) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, p, deterministic) - } -} - -// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. -// u is the marshal info of the message. -func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMapMarshaler returns the sizer and marshaler for a map field. -// f is the pointer to the reflect data structure of the field. -func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { - // figure out key and value type - t := f.Type - keyType := t.Key() - valType := t.Elem() - tags := strings.Split(f.Tag.Get("protobuf"), ",") - keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - stdOptions := false - for _, t := range tags { - if strings.HasPrefix(t, "customtype=") { - valTags = append(valTags, t) - } - if t == "stdtime" { - valTags = append(valTags, t) - stdOptions = true - } - if t == "stdduration" { - valTags = append(valTags, t) - stdOptions = true - } - if t == "wktptr" { - valTags = append(valTags, t) - } - } - keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map - valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map - keyWireTag := 1<<3 | wiretype(keyTags[0]) - valWireTag := 2<<3 | wiretype(valTags[0]) - - // We create an interface to get the addresses of the map key and value. - // If value is pointer-typed, the interface is a direct interface, the - // idata itself is the value. Otherwise, the idata is the pointer to the - // value. - // Key cannot be pointer-typed. - valIsPtr := valType.Kind() == reflect.Ptr - - // If value is a message with nested maps, calling - // valSizer in marshal may be quadratic. We should use - // cached version in marshal (but not in size). - // If value is not message type, we don't have size cache, - // but it cannot be nested either. Just use valSizer. - valCachedSizer := valSizer - if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct { - u := getMarshalInfo(valType.Elem()) - valCachedSizer = func(ptr pointer, tagsize int) int { - // Same as message sizer, but use cache. - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.cachedsize(p) - return siz + SizeVarint(uint64(siz)) + tagsize - } - } - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(t).Elem() // the map - n := 0 - for _, k := range m.MapKeys() { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(t).Elem() // the map - var err error - keys := m.MapKeys() - if len(keys) > 1 && deterministic { - sort.Sort(mapKeys(keys)) - } - - var nerr nonFatal - for _, k := range keys { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - b = appendVarint(b, uint64(siz)) - b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if !nerr.Merge(err) { - return b, err - } - b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != ErrNil && !nerr.Merge(err) { // allow nil value in map - return b, err - } - } - return b, nerr.E - } -} - -// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. -// fi is the marshal info of the field. -// f is the pointer to the reflect data structure of the field. -func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { - // Oneof field is an interface. We need to get the actual data type on the fly. - t := f.Type - return func(ptr pointer, _ int) int { - p := ptr.getInterfacePointer() - if p.isNil() { - return 0 - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - e := fi.oneofElems[telem] - return e.sizer(p, e.tagsize) - }, - func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { - p := ptr.getInterfacePointer() - if p.isNil() { - return b, nil - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { - return b, errOneofHasNil - } - e := fi.oneofElems[telem] - return e.marshaler(b, p, e.wiretag, deterministic) - } -} - -// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. -func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - mu.Unlock() - return n -} - -// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. -func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - // Not sure this is required, but the old code does it. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// message set format is: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } - -// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field -// in message set format (above). -func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for id, e := range m { - n += 2 // start group, end group. tag = 1 (size=1) - n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - siz := len(msgWithLen) - n += siz + 1 // message, tag = 3 (size=1) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, 1) // message, tag = 3 (size=1) - } - mu.Unlock() - return n -} - -// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) -// to the end of byte slice b. -func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for id, e := range m { - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if !nerr.Merge(err) { - return b, err - } - b = append(b, 1<<3|WireEndGroup) - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, id := range keys { - e := m[int32(id)] - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - b = append(b, 1<<3|WireEndGroup) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// sizeV1Extensions computes the size of encoded data for a V1-API extension field. -func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { - if m == nil { - return 0 - } - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - return n -} - -// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. -func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { - if m == nil { - return b, nil - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - var err error - var nerr nonFatal - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// newMarshaler is the interface representing objects that can marshal themselves. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newMarshaler interface { - XXX_Size() int - XXX_Marshal(b []byte, deterministic bool) ([]byte, error) -} - -// Size returns the encoded size of a protocol buffer message. -// This is the main entry point. -func Size(pb Message) int { - if m, ok := pb.(newMarshaler); ok { - return m.XXX_Size() - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, _ := m.Marshal() - return len(b) - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return 0 - } - var info InternalMessageInfo - return info.Size(pb) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, returning the data. -// This is the main entry point. -func Marshal(pb Message) ([]byte, error) { - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - b := make([]byte, 0, siz) - return m.XXX_Marshal(b, false) - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - return m.Marshal() - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return nil, ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - b := make([]byte, 0, siz) - return info.Marshal(b, pb, false) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, writing the result to the -// Buffer. -// This is an alternative entry point. It is not necessary to use -// a Buffer for most applications. -func (p *Buffer) Marshal(pb Message) error { - var err error - if p.deterministic { - if _, ok := pb.(Marshaler); ok { - return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb) - } - } - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - p.grow(siz) // make sure buf has enough capacity - pp := p.buf[len(p.buf) : len(p.buf) : len(p.buf)+siz] - pp, err = m.XXX_Marshal(pp, p.deterministic) - p.buf = append(p.buf, pp...) - return err - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - var b []byte - b, err = m.Marshal() - p.buf = append(p.buf, b...) - return err - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - p.grow(siz) // make sure buf has enough capacity - p.buf, err = info.Marshal(p.buf, pb, p.deterministic) - return err -} - -// grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After grow(n), at least n bytes can be written to the -// buffer without another allocation. -func (p *Buffer) grow(n int) { - need := len(p.buf) + n - if need <= cap(p.buf) { - return - } - newCap := len(p.buf) * 2 - if newCap < need { - newCap = need - } - p.buf = append(make([]byte, 0, newCap), p.buf...) -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go deleted file mode 100644 index 997f57c1e..000000000 --- a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go +++ /dev/null @@ -1,388 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -// makeMessageRefMarshaler differs a bit from makeMessageMarshaler -// It marshal a message T instead of a *T -func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - siz := u.size(ptr) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - b = appendVarint(b, wiretag) - siz := u.cachedsize(ptr) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, ptr, deterministic) - } -} - -// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler -// It marshals a slice of messages []T instead of []*T -func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - e := elem.Interface() - v := toAddrPointer(&e, false) - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - var err, errreq error - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - e := elem.Interface() - v := toAddrPointer(&e, false) - b = appendVarint(b, wiretag) - siz := u.size(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if err != nil { - if _, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errreq == nil { - errreq = err - } - continue - } - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - - return b, errreq - } -} - -func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) - siz := m.Size() - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) - siz := m.Size() - buf, err := m.Marshal() - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - return b, nil - } -} - -func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(u.typ).Interface().(custom) - siz := m.Size() - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(u.typ).Interface().(custom) - siz := m.Size() - buf, err := m.Marshal() - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - return b, nil - } -} - -func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(time.Time) - ts, err := timestampProto(t) - if err != nil { - return 0 - } - siz := Size(ts) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(time.Time) - ts, err := timestampProto(t) - if err != nil { - return nil, err - } - siz := Size(ts) - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - siz := Size(ts) - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) - dur := durationProto(*d) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) - dur := durationProto(*d) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(time.Duration) - dur := durationProto(d) - siz := Size(dur) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(time.Duration) - dur := durationProto(d) - siz := Size(dur) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go deleted file mode 100644 index 60dcf70d1..000000000 --- a/vendor/github.com/gogo/protobuf/proto/table_merge.go +++ /dev/null @@ -1,676 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -// Merge merges the src message into dst. -// This assumes that dst and src of the same type and are non-nil. -func (a *InternalMessageInfo) Merge(dst, src Message) { - mi := atomicLoadMergeInfo(&a.merge) - if mi == nil { - mi = getMergeInfo(reflect.TypeOf(dst).Elem()) - atomicStoreMergeInfo(&a.merge, mi) - } - mi.merge(toPointer(&dst), toPointer(&src)) -} - -type mergeInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []mergeFieldInfo - unrecognized field // Offset of XXX_unrecognized -} - -type mergeFieldInfo struct { - field field // Offset of field, guaranteed to be valid - - // isPointer reports whether the value in the field is a pointer. - // This is true for the following situations: - // * Pointer to struct - // * Pointer to basic type (proto2 only) - // * Slice (first value in slice header is a pointer) - // * String (first value in string header is a pointer) - isPointer bool - - // basicWidth reports the width of the field assuming that it is directly - // embedded in the struct (as is the case for basic types in proto3). - // The possible values are: - // 0: invalid - // 1: bool - // 4: int32, uint32, float32 - // 8: int64, uint64, float64 - basicWidth int - - // Where dst and src are pointers to the types being merged. - merge func(dst, src pointer) -} - -var ( - mergeInfoMap = map[reflect.Type]*mergeInfo{} - mergeInfoLock sync.Mutex -) - -func getMergeInfo(t reflect.Type) *mergeInfo { - mergeInfoLock.Lock() - defer mergeInfoLock.Unlock() - mi := mergeInfoMap[t] - if mi == nil { - mi = &mergeInfo{typ: t} - mergeInfoMap[t] = mi - } - return mi -} - -// merge merges src into dst assuming they are both of type *mi.typ. -func (mi *mergeInfo) merge(dst, src pointer) { - if dst.isNil() { - panic("proto: nil destination") - } - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&mi.initialized) == 0 { - mi.computeMergeInfo() - } - - for _, fi := range mi.fields { - sfp := src.offset(fi.field) - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string - continue - } - if fi.basicWidth > 0 { - switch { - case fi.basicWidth == 1 && !*sfp.toBool(): - continue - case fi.basicWidth == 4 && *sfp.toUint32() == 0: - continue - case fi.basicWidth == 8 && *sfp.toUint64() == 0: - continue - } - } - } - - dfp := dst.offset(fi.field) - fi.merge(dfp, sfp) - } - - // TODO: Make this faster? - out := dst.asPointerTo(mi.typ).Elem() - in := src.asPointerTo(mi.typ).Elem() - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - if mi.unrecognized.IsValid() { - if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { - *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) - } - } -} - -func (mi *mergeInfo) computeMergeInfo() { - mi.lock.Lock() - defer mi.lock.Unlock() - if mi.initialized != 0 { - return - } - t := mi.typ - n := t.NumField() - - props := GetProperties(t) - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - mfi := mergeFieldInfo{field: toField(&f)} - tf := f.Type - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - switch tf.Kind() { - case reflect.Ptr, reflect.Slice, reflect.String: - // As a special case, we assume slices and strings are pointers - // since we know that the first field in the SliceSlice or - // StringHeader is a data pointer. - mfi.isPointer = true - case reflect.Bool: - mfi.basicWidth = 1 - case reflect.Int32, reflect.Uint32, reflect.Float32: - mfi.basicWidth = 4 - case reflect.Int64, reflect.Uint64, reflect.Float64: - mfi.basicWidth = 8 - } - } - - // Unwrap tf to get at its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + tf.Name()) - } - - switch tf.Kind() { - case reflect.Int32: - switch { - case isSlice: // E.g., []int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Slice is not defined (see pointer_reflect.go). - /* - sfsp := src.toInt32Slice() - if *sfsp != nil { - dfsp := dst.toInt32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - */ - sfs := src.getInt32Slice() - if sfs != nil { - dfs := dst.getInt32Slice() - dfs = append(dfs, sfs...) - if dfs == nil { - dfs = []int32{} - } - dst.setInt32Slice(dfs) - } - } - case isPointer: // E.g., *int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). - /* - sfpp := src.toInt32Ptr() - if *sfpp != nil { - dfpp := dst.toInt32Ptr() - if *dfpp == nil { - *dfpp = Int32(**sfpp) - } else { - **dfpp = **sfpp - } - } - */ - sfp := src.getInt32Ptr() - if sfp != nil { - dfp := dst.getInt32Ptr() - if dfp == nil { - dst.setInt32Ptr(*sfp) - } else { - *dfp = *sfp - } - } - } - default: // E.g., int32 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt32(); v != 0 { - *dst.toInt32() = v - } - } - } - case reflect.Int64: - switch { - case isSlice: // E.g., []int64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toInt64Slice() - if *sfsp != nil { - dfsp := dst.toInt64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - } - case isPointer: // E.g., *int64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toInt64Ptr() - if *sfpp != nil { - dfpp := dst.toInt64Ptr() - if *dfpp == nil { - *dfpp = Int64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., int64 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt64(); v != 0 { - *dst.toInt64() = v - } - } - } - case reflect.Uint32: - switch { - case isSlice: // E.g., []uint32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint32Slice() - if *sfsp != nil { - dfsp := dst.toUint32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint32{} - } - } - } - case isPointer: // E.g., *uint32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint32Ptr() - if *sfpp != nil { - dfpp := dst.toUint32Ptr() - if *dfpp == nil { - *dfpp = Uint32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint32 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint32(); v != 0 { - *dst.toUint32() = v - } - } - } - case reflect.Uint64: - switch { - case isSlice: // E.g., []uint64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint64Slice() - if *sfsp != nil { - dfsp := dst.toUint64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint64{} - } - } - } - case isPointer: // E.g., *uint64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint64Ptr() - if *sfpp != nil { - dfpp := dst.toUint64Ptr() - if *dfpp == nil { - *dfpp = Uint64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint64 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint64(); v != 0 { - *dst.toUint64() = v - } - } - } - case reflect.Float32: - switch { - case isSlice: // E.g., []float32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat32Slice() - if *sfsp != nil { - dfsp := dst.toFloat32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float32{} - } - } - } - case isPointer: // E.g., *float32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat32Ptr() - if *sfpp != nil { - dfpp := dst.toFloat32Ptr() - if *dfpp == nil { - *dfpp = Float32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float32 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat32(); v != 0 { - *dst.toFloat32() = v - } - } - } - case reflect.Float64: - switch { - case isSlice: // E.g., []float64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat64Slice() - if *sfsp != nil { - dfsp := dst.toFloat64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float64{} - } - } - } - case isPointer: // E.g., *float64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat64Ptr() - if *sfpp != nil { - dfpp := dst.toFloat64Ptr() - if *dfpp == nil { - *dfpp = Float64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float64 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat64(); v != 0 { - *dst.toFloat64() = v - } - } - } - case reflect.Bool: - switch { - case isSlice: // E.g., []bool - mfi.merge = func(dst, src pointer) { - sfsp := src.toBoolSlice() - if *sfsp != nil { - dfsp := dst.toBoolSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []bool{} - } - } - } - case isPointer: // E.g., *bool - mfi.merge = func(dst, src pointer) { - sfpp := src.toBoolPtr() - if *sfpp != nil { - dfpp := dst.toBoolPtr() - if *dfpp == nil { - *dfpp = Bool(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., bool - mfi.merge = func(dst, src pointer) { - if v := *src.toBool(); v { - *dst.toBool() = v - } - } - } - case reflect.String: - switch { - case isSlice: // E.g., []string - mfi.merge = func(dst, src pointer) { - sfsp := src.toStringSlice() - if *sfsp != nil { - dfsp := dst.toStringSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []string{} - } - } - } - case isPointer: // E.g., *string - mfi.merge = func(dst, src pointer) { - sfpp := src.toStringPtr() - if *sfpp != nil { - dfpp := dst.toStringPtr() - if *dfpp == nil { - *dfpp = String(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., string - mfi.merge = func(dst, src pointer) { - if v := *src.toString(); v != "" { - *dst.toString() = v - } - } - } - case reflect.Slice: - isProto3 := props.Prop[i].proto3 - switch { - case isPointer: - panic("bad pointer in byte slice case in " + tf.Name()) - case tf.Elem().Kind() != reflect.Uint8: - panic("bad element kind in byte slice case in " + tf.Name()) - case isSlice: // E.g., [][]byte - mfi.merge = func(dst, src pointer) { - sbsp := src.toBytesSlice() - if *sbsp != nil { - dbsp := dst.toBytesSlice() - for _, sb := range *sbsp { - if sb == nil { - *dbsp = append(*dbsp, nil) - } else { - *dbsp = append(*dbsp, append([]byte{}, sb...)) - } - } - if *dbsp == nil { - *dbsp = [][]byte{} - } - } - } - default: // E.g., []byte - mfi.merge = func(dst, src pointer) { - sbp := src.toBytes() - if *sbp != nil { - dbp := dst.toBytes() - if !isProto3 || len(*sbp) > 0 { - *dbp = append([]byte{}, *sbp...) - } - } - } - } - case reflect.Struct: - switch { - case isSlice && !isPointer: // E.g. []pb.T - mergeInfo := getMergeInfo(tf) - zero := reflect.Zero(tf) - mfi.merge = func(dst, src pointer) { - // TODO: Make this faster? - dstsp := dst.asPointerTo(f.Type) - dsts := dstsp.Elem() - srcs := src.asPointerTo(f.Type).Elem() - for i := 0; i < srcs.Len(); i++ { - dsts = reflect.Append(dsts, zero) - srcElement := srcs.Index(i).Addr() - dstElement := dsts.Index(dsts.Len() - 1).Addr() - mergeInfo.merge(valToPointer(dstElement), valToPointer(srcElement)) - } - if dsts.IsNil() { - dsts = reflect.MakeSlice(f.Type, 0, 0) - } - dstsp.Elem().Set(dsts) - } - case !isPointer: - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - mergeInfo.merge(dst, src) - } - case isSlice: // E.g., []*pb.T - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sps := src.getPointerSlice() - if sps != nil { - dps := dst.getPointerSlice() - for _, sp := range sps { - var dp pointer - if !sp.isNil() { - dp = valToPointer(reflect.New(tf)) - mergeInfo.merge(dp, sp) - } - dps = append(dps, dp) - } - if dps == nil { - dps = []pointer{} - } - dst.setPointerSlice(dps) - } - } - default: // E.g., *pb.T - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sp := src.getPointer() - if !sp.isNil() { - dp := dst.getPointer() - if dp.isNil() { - dp = valToPointer(reflect.New(tf)) - dst.setPointer(dp) - } - mergeInfo.merge(dp, sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic("bad pointer or slice in map case in " + tf.Name()) - default: // E.g., map[K]V - mfi.merge = func(dst, src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - dm := dst.asPointerTo(tf).Elem() - if dm.IsNil() { - dm.Set(reflect.MakeMap(tf)) - } - - switch tf.Elem().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(Clone(val.Interface().(Message))) - dm.SetMapIndex(key, val) - } - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - dm.SetMapIndex(key, val) - } - default: // Basic type (e.g., string) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - dm.SetMapIndex(key, val) - } - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic("bad pointer or slice in interface case in " + tf.Name()) - default: // E.g., interface{} - // TODO: Make this faster? - mfi.merge = func(dst, src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - du := dst.asPointerTo(tf).Elem() - typ := su.Elem().Type() - if du.IsNil() || du.Elem().Type() != typ { - du.Set(reflect.New(typ.Elem())) // Initialize interface if empty - } - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - dv := du.Elem().Elem().Field(0) - if dv.Kind() == reflect.Ptr && dv.IsNil() { - dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - Merge(dv.Interface().(Message), sv.Interface().(Message)) - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) - default: // Basic type (e.g., string) - dv.Set(sv) - } - } - } - } - default: - panic(fmt.Sprintf("merger not found for type:%s", tf)) - } - mi.fields = append(mi.fields, mfi) - } - - mi.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - mi.unrecognized = toField(&f) - } - - atomic.StoreInt32(&mi.initialized, 1) -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go deleted file mode 100644 index 937229386..000000000 --- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go +++ /dev/null @@ -1,2249 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// Unmarshal is the entry point from the generated .pb.go files. -// This function is not intended to be used by non-generated code. -// This function is not subject to any compatibility guarantee. -// msg contains a pointer to a protocol buffer struct. -// b is the data to be unmarshaled into the protocol buffer. -// a is a pointer to a place to store cached unmarshal information. -func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { - // Load the unmarshal information for this message type. - // The atomic load ensures memory consistency. - u := atomicLoadUnmarshalInfo(&a.unmarshal) - if u == nil { - // Slow path: find unmarshal info for msg, update a with it. - u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) - atomicStoreUnmarshalInfo(&a.unmarshal, u) - } - // Then do the unmarshaling. - err := u.unmarshal(toPointer(&msg), b) - return err -} - -type unmarshalInfo struct { - typ reflect.Type // type of the protobuf struct - - // 0 = only typ field is initialized - // 1 = completely initialized - initialized int32 - lock sync.Mutex // prevents double initialization - dense []unmarshalFieldInfo // fields indexed by tag # - sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # - reqFields []string // names of required fields - reqMask uint64 // 1< 0 { - // Read tag and wire type. - // Special case 1 and 2 byte varints. - var x uint64 - if b[0] < 128 { - x = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - x = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - x, n = decodeVarint(b) - if n == 0 { - return io.ErrUnexpectedEOF - } - b = b[n:] - } - tag := x >> 3 - wire := int(x) & 7 - - // Dispatch on the tag to one of the unmarshal* functions below. - var f unmarshalFieldInfo - if tag < uint64(len(u.dense)) { - f = u.dense[tag] - } else { - f = u.sparse[tag] - } - if fn := f.unmarshal; fn != nil { - var err error - b, err = fn(b, m.offset(f.field), wire) - if err == nil { - reqMask |= f.reqMask - continue - } - if r, ok := err.(*RequiredNotSetError); ok { - // Remember this error, but keep parsing. We need to produce - // a full parse even if a required field is missing. - if errLater == nil { - errLater = r - } - reqMask |= f.reqMask - continue - } - if err != errInternalBadWireType { - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return err - } - // Fragments with bad wire type are treated as unknown fields. - } - - // Unknown tag. - if !u.unrecognized.IsValid() { - // Don't keep unrecognized data; just skip it. - var err error - b, err = skipField(b, wire) - if err != nil { - return err - } - continue - } - // Keep unrecognized data around. - // maybe in extensions, maybe in the unrecognized field. - z := m.offset(u.unrecognized).toBytes() - var emap map[int32]Extension - var e Extension - for _, r := range u.extensionRanges { - if uint64(r.Start) <= tag && tag <= uint64(r.End) { - if u.extensions.IsValid() { - mp := m.offset(u.extensions).toExtensions() - emap = mp.extensionsWrite() - e = emap[int32(tag)] - z = &e.enc - break - } - if u.oldExtensions.IsValid() { - p := m.offset(u.oldExtensions).toOldExtensions() - emap = *p - if emap == nil { - emap = map[int32]Extension{} - *p = emap - } - e = emap[int32(tag)] - z = &e.enc - break - } - if u.bytesExtensions.IsValid() { - z = m.offset(u.bytesExtensions).toBytes() - break - } - panic("no extensions field available") - } - } - // Use wire type to skip data. - var err error - b0 := b - b, err = skipField(b, wire) - if err != nil { - return err - } - *z = encodeVarint(*z, tag<<3|uint64(wire)) - *z = append(*z, b0[:len(b0)-len(b)]...) - - if emap != nil { - emap[int32(tag)] = e - } - } - if reqMask != u.reqMask && errLater == nil { - // A required field of this message is missing. - for _, n := range u.reqFields { - if reqMask&1 == 0 { - errLater = &RequiredNotSetError{n} - } - reqMask >>= 1 - } - } - return errLater -} - -// computeUnmarshalInfo fills in u with information for use -// in unmarshaling protocol buffers of type u.typ. -func (u *unmarshalInfo) computeUnmarshalInfo() { - u.lock.Lock() - defer u.lock.Unlock() - if u.initialized != 0 { - return - } - t := u.typ - n := t.NumField() - - // Set up the "not found" value for the unrecognized byte buffer. - // This is the default for proto3. - u.unrecognized = invalidField - u.extensions = invalidField - u.oldExtensions = invalidField - u.bytesExtensions = invalidField - - // List of the generated type and offset for each oneof field. - type oneofField struct { - ityp reflect.Type // interface type of oneof field - field field // offset in containing message - } - var oneofFields []oneofField - - for i := 0; i < n; i++ { - f := t.Field(i) - if f.Name == "XXX_unrecognized" { - // The byte slice used to hold unrecognized input is special. - if f.Type != reflect.TypeOf(([]byte)(nil)) { - panic("bad type for XXX_unrecognized field: " + f.Type.Name()) - } - u.unrecognized = toField(&f) - continue - } - if f.Name == "XXX_InternalExtensions" { - // Ditto here. - if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { - panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) - } - u.extensions = toField(&f) - if f.Tag.Get("protobuf_messageset") == "1" { - u.isMessageSet = true - } - continue - } - if f.Name == "XXX_extensions" { - // An older form of the extensions field. - if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { - u.oldExtensions = toField(&f) - continue - } else if f.Type == reflect.TypeOf(([]byte)(nil)) { - u.bytesExtensions = toField(&f) - continue - } - panic("bad type for XXX_extensions field: " + f.Type.Name()) - } - if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { - continue - } - - oneof := f.Tag.Get("protobuf_oneof") - if oneof != "" { - oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) - // The rest of oneof processing happens below. - continue - } - - tags := f.Tag.Get("protobuf") - tagArray := strings.Split(tags, ",") - if len(tagArray) < 2 { - panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) - } - tag, err := strconv.Atoi(tagArray[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tagArray[1]) - } - - name := "" - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - } - - // Extract unmarshaling function from the field (its type and tags). - unmarshal := fieldUnmarshaler(&f) - - // Required field? - var reqMask uint64 - if tagArray[2] == "req" { - bit := len(u.reqFields) - u.reqFields = append(u.reqFields, name) - reqMask = uint64(1) << uint(bit) - // TODO: if we have more than 64 required fields, we end up - // not verifying that all required fields are present. - // Fix this, perhaps using a count of required fields? - } - - // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask, name) - } - - // Find any types associated with oneof fields. - // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler - if len(oneofFields) > 0 { - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - for _, v := range oneofImplementers { - tptr := reflect.TypeOf(v) // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } - } - - } - } - - // Get extension ranges, if any. - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") - if fn.IsValid() { - if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { - panic("a message with extensions, but no extensions field in " + t.Name()) - } - u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) - } - - // Explicitly disallow tag 0. This will ensure we flag an error - // when decoding a buffer of all zeros. Without this code, we - // would decode and skip an all-zero buffer of even length. - // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. - u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { - return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0, "") - - // Set mask for required field check. - u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? - for len(u.dense) <= tag { - u.dense = append(u.dense, unmarshalFieldInfo{}) - } - u.dense[tag] = i - return - } - if u.sparse == nil { - u.sparse = map[uint64]unmarshalFieldInfo{} - } - u.sparse[uint64(tag)] = i -} - -// fieldUnmarshaler returns an unmarshaler for the given field. -func fieldUnmarshaler(f *reflect.StructField) unmarshaler { - if f.Type.Kind() == reflect.Map { - return makeUnmarshalMap(f) - } - return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) -} - -// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. -func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { - tagArray := strings.Split(tags, ",") - encoding := tagArray[0] - name := "unknown" - ctype := false - isTime := false - isDuration := false - isWktPointer := false - proto3 := false - validateUTF8 := true - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - if tag == "proto3" { - proto3 = true - } - if strings.HasPrefix(tag, "customtype=") { - ctype = true - } - if tag == "stdtime" { - isTime = true - } - if tag == "stdduration" { - isDuration = true - } - if tag == "wktptr" { - isWktPointer = true - } - } - validateUTF8 = validateUTF8 && proto3 - - // Figure out packaging (pointer, slice, or both) - slice := false - pointer := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - if ctype { - if reflect.PtrTo(t).Implements(customType) { - if slice { - return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) - } - if pointer { - return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalCustom(getUnmarshalInfo(t), name) - } else { - panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) - } - } - - if isTime { - if pointer { - if slice { - return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) - } - if slice { - return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalTime(getUnmarshalInfo(t), name) - } - - if isDuration { - if pointer { - if slice { - return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) - } - if slice { - return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalDuration(getUnmarshalInfo(t), name) - } - - if isWktPointer { - switch t.Kind() { - case reflect.Float64: - if pointer { - if slice { - return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Float32: - if pointer { - if slice { - return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Int64: - if pointer { - if slice { - return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Uint64: - if pointer { - if slice { - return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Int32: - if pointer { - if slice { - return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Uint32: - if pointer { - if slice { - return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Bool: - if pointer { - if slice { - return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.String: - if pointer { - if slice { - return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name) - case uint8SliceType: - if pointer { - if slice { - return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name) - default: - panic(fmt.Sprintf("unknown wktpointer type %#v", t)) - } - } - - // We'll never have both pointer and slice for basic types. - if pointer && slice && t.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + t.Name()) - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return unmarshalBoolPtr - } - if slice { - return unmarshalBoolSlice - } - return unmarshalBoolValue - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixedS32Ptr - } - if slice { - return unmarshalFixedS32Slice - } - return unmarshalFixedS32Value - case "varint": - // this could be int32 or enum - if pointer { - return unmarshalInt32Ptr - } - if slice { - return unmarshalInt32Slice - } - return unmarshalInt32Value - case "zigzag32": - if pointer { - return unmarshalSint32Ptr - } - if slice { - return unmarshalSint32Slice - } - return unmarshalSint32Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixedS64Ptr - } - if slice { - return unmarshalFixedS64Slice - } - return unmarshalFixedS64Value - case "varint": - if pointer { - return unmarshalInt64Ptr - } - if slice { - return unmarshalInt64Slice - } - return unmarshalInt64Value - case "zigzag64": - if pointer { - return unmarshalSint64Ptr - } - if slice { - return unmarshalSint64Slice - } - return unmarshalSint64Value - } - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixed32Ptr - } - if slice { - return unmarshalFixed32Slice - } - return unmarshalFixed32Value - case "varint": - if pointer { - return unmarshalUint32Ptr - } - if slice { - return unmarshalUint32Slice - } - return unmarshalUint32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixed64Ptr - } - if slice { - return unmarshalFixed64Slice - } - return unmarshalFixed64Value - case "varint": - if pointer { - return unmarshalUint64Ptr - } - if slice { - return unmarshalUint64Slice - } - return unmarshalUint64Value - } - case reflect.Float32: - if pointer { - return unmarshalFloat32Ptr - } - if slice { - return unmarshalFloat32Slice - } - return unmarshalFloat32Value - case reflect.Float64: - if pointer { - return unmarshalFloat64Ptr - } - if slice { - return unmarshalFloat64Slice - } - return unmarshalFloat64Value - case reflect.Map: - panic("map type in typeUnmarshaler in " + t.Name()) - case reflect.Slice: - if pointer { - panic("bad pointer in slice case in " + t.Name()) - } - if slice { - return unmarshalBytesSlice - } - return unmarshalBytesValue - case reflect.String: - if validateUTF8 { - if pointer { - return unmarshalUTF8StringPtr - } - if slice { - return unmarshalUTF8StringSlice - } - return unmarshalUTF8StringValue - } - if pointer { - return unmarshalStringPtr - } - if slice { - return unmarshalStringSlice - } - return unmarshalStringValue - case reflect.Struct: - // message or group field - if !pointer { - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessage(getUnmarshalInfo(t), name) - } - } - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) - case "group": - if slice { - return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) - } - } - panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) -} - -// Below are all the unmarshalers for individual fields of various types. - -func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64() = v - return b, nil -} - -func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64() = v - return b, nil -} - -func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64() = v - return b, nil -} - -func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64Ptr() = &v - return b, nil -} - -func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - *f.toInt32() = v - return b, nil -} - -func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - *f.toInt32() = v - return b, nil -} - -func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32() = v - return b, nil -} - -func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32Ptr() = &v - return b, nil -} - -func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64() = v - return b[8:], nil -} - -func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64() = v - return b[8:], nil -} - -func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32() = v - return b[4:], nil -} - -func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32Ptr() = &v - return b[4:], nil -} - -func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - *f.toInt32() = v - return b[4:], nil -} - -func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.setInt32Ptr(v) - return b[4:], nil -} - -func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - return b[4:], nil -} - -func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - // Note: any length varint is allowed, even though any sane - // encoder will use one byte. - // See https://github.com/golang/protobuf/issues/76 - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - // TODO: check if x>1? Tests seem to indicate no. - v := x != 0 - *f.toBool() = v - return b[n:], nil -} - -func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - *f.toBoolPtr() = &v - return b[n:], nil -} - -func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - b = b[n:] - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - return b[n:], nil -} - -func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64() = v - return b[8:], nil -} - -func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64Ptr() = &v - return b[8:], nil -} - -func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32() = v - return b[4:], nil -} - -func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32Ptr() = &v - return b[4:], nil -} - -func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - return b[x:], nil -} - -func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - return b[x:], nil -} - -func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - return b[x:], nil -} - -func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -var emptyBuf [0]byte - -func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // The use of append here is a trick which avoids the zeroing - // that would be required if we used a make/copy pair. - // We append to emptyBuf instead of nil because we want - // a non-nil result even when the length is 0. - v := append(emptyBuf[:], b[:x]...) - *f.toBytes() = v - return b[x:], nil -} - -func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := append(emptyBuf[:], b[:x]...) - s := f.toBytesSlice() - *s = append(*s, v) - return b[x:], nil -} - -func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[y:], err - } -} - -func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[y:], err - } -} - -func makeUnmarshalMap(f *reflect.StructField) unmarshaler { - t := f.Type - kt := t.Key() - vt := t.Elem() - tagArray := strings.Split(f.Tag.Get("protobuf"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - for _, t := range tagArray { - if strings.HasPrefix(t, "customtype=") { - valTags = append(valTags, t) - } - if t == "stdtime" { - valTags = append(valTags, t) - } - if t == "stdduration" { - valTags = append(valTags, t) - } - if t == "wktptr" { - valTags = append(valTags, t) - } - } - unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) - unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) - return func(b []byte, f pointer, w int) ([]byte, error) { - // The map entry is a submessage. Figure out how big it is. - if w != WireBytes { - return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - r := b[x:] // unused data to return - b = b[:x] // data for map entry - - // Note: we could use #keys * #values ~= 200 functions - // to do map decoding without reflection. Probably not worth it. - // Maps will be somewhat slow. Oh well. - - // Read key and value from data. - var nerr nonFatal - k := reflect.New(kt) - v := reflect.New(vt) - for len(b) > 0 { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - wire := int(x) & 7 - b = b[n:] - - var err error - switch x >> 3 { - case 1: - b, err = unmarshalKey(b, valToPointer(k), wire) - case 2: - b, err = unmarshalVal(b, valToPointer(v), wire) - default: - err = errInternalBadWireType // skip unknown tag - } - - if nerr.Merge(err) { - continue - } - if err != errInternalBadWireType { - return nil, err - } - - // Skip past unknown fields. - b, err = skipField(b, wire) - if err != nil { - return nil, err - } - } - - // Get map, allocate if needed. - m := f.asPointerTo(t).Elem() // an addressable map[K]T - if m.IsNil() { - m.Set(reflect.MakeMap(t)) - } - - // Insert into map. - m.SetMapIndex(k.Elem(), v.Elem()) - - return r, nerr.E - } -} - -// makeUnmarshalOneof makes an unmarshaler for oneof fields. -// for: -// message Msg { -// oneof F { -// int64 X = 1; -// float64 Y = 2; -// } -// } -// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). -// ityp is the interface type of the oneof field (e.g. isMsg_F). -// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). -// Note that this function will be called once for each case in the oneof. -func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { - sf := typ.Field(0) - field0 := toField(&sf) - return func(b []byte, f pointer, w int) ([]byte, error) { - // Allocate holder for value. - v := reflect.New(typ) - - // Unmarshal data into holder. - // We unmarshal into the first field of the holder object. - var err error - var nerr nonFatal - b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if !nerr.Merge(err) { - return nil, err - } - - // Write pointer to holder into target field. - f.asPointerTo(ityp).Elem().Set(v) - - return b, nerr.E - } -} - -// Error used by decode internally. -var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") - -// skipField skips past a field of type wire and returns the remaining bytes. -func skipField(b []byte, wire int) ([]byte, error) { - switch wire { - case WireVarint: - _, k := decodeVarint(b) - if k == 0 { - return b, io.ErrUnexpectedEOF - } - b = b[k:] - case WireFixed32: - if len(b) < 4 { - return b, io.ErrUnexpectedEOF - } - b = b[4:] - case WireFixed64: - if len(b) < 8 { - return b, io.ErrUnexpectedEOF - } - b = b[8:] - case WireBytes: - m, k := decodeVarint(b) - if k == 0 || uint64(len(b)-k) < m { - return b, io.ErrUnexpectedEOF - } - b = b[uint64(k)+m:] - case WireStartGroup: - _, i := findEndGroup(b) - if i == -1 { - return b, io.ErrUnexpectedEOF - } - b = b[i:] - default: - return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) - } - return b, nil -} - -// findEndGroup finds the index of the next EndGroup tag. -// Groups may be nested, so the "next" EndGroup tag is the first -// unpaired EndGroup. -// findEndGroup returns the indexes of the start and end of the EndGroup tag. -// Returns (-1,-1) if it can't find one. -func findEndGroup(b []byte) (int, int) { - depth := 1 - i := 0 - for { - x, n := decodeVarint(b[i:]) - if n == 0 { - return -1, -1 - } - j := i - i += n - switch x & 7 { - case WireVarint: - _, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - case WireFixed32: - if len(b)-4 < i { - return -1, -1 - } - i += 4 - case WireFixed64: - if len(b)-8 < i { - return -1, -1 - } - i += 8 - case WireBytes: - m, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - if uint64(len(b)-i) < m { - return -1, -1 - } - i += int(m) - case WireStartGroup: - depth++ - case WireEndGroup: - depth-- - if depth == 0 { - return j, i - } - default: - return -1, -1 - } - } -} - -// encodeVarint appends a varint-encoded integer to b and returns the result. -func encodeVarint(b []byte, x uint64) []byte { - for x >= 1<<7 { - b = append(b, byte(x&0x7f|0x80)) - x >>= 7 - } - return append(b, byte(x)) -} - -// decodeVarint reads a varint-encoded integer from b. -// Returns the decoded integer and the number of bytes read. -// If there is an error, it returns 0,0. -func decodeVarint(b []byte) (uint64, int) { - var x, y uint64 - if len(b) == 0 { - goto bad - } - x = uint64(b[0]) - if x < 0x80 { - return x, 1 - } - x -= 0x80 - - if len(b) <= 1 { - goto bad - } - y = uint64(b[1]) - x += y << 7 - if y < 0x80 { - return x, 2 - } - x -= 0x80 << 7 - - if len(b) <= 2 { - goto bad - } - y = uint64(b[2]) - x += y << 14 - if y < 0x80 { - return x, 3 - } - x -= 0x80 << 14 - - if len(b) <= 3 { - goto bad - } - y = uint64(b[3]) - x += y << 21 - if y < 0x80 { - return x, 4 - } - x -= 0x80 << 21 - - if len(b) <= 4 { - goto bad - } - y = uint64(b[4]) - x += y << 28 - if y < 0x80 { - return x, 5 - } - x -= 0x80 << 28 - - if len(b) <= 5 { - goto bad - } - y = uint64(b[5]) - x += y << 35 - if y < 0x80 { - return x, 6 - } - x -= 0x80 << 35 - - if len(b) <= 6 { - goto bad - } - y = uint64(b[6]) - x += y << 42 - if y < 0x80 { - return x, 7 - } - x -= 0x80 << 42 - - if len(b) <= 7 { - goto bad - } - y = uint64(b[7]) - x += y << 49 - if y < 0x80 { - return x, 8 - } - x -= 0x80 << 49 - - if len(b) <= 8 { - goto bad - } - y = uint64(b[8]) - x += y << 56 - if y < 0x80 { - return x, 9 - } - x -= 0x80 << 56 - - if len(b) <= 9 { - goto bad - } - y = uint64(b[9]) - x += y << 63 - if y < 2 { - return x, 10 - } - -bad: - return 0, 0 -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go deleted file mode 100644 index 00d6c7ad9..000000000 --- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go +++ /dev/null @@ -1,385 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "io" - "reflect" -) - -func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f // gogo: changed from v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.New(sub.typ)) - m := s.Interface().(custom) - if err := m.Unmarshal(b[:x]); err != nil { - return nil, err - } - return b[x:], nil - } -} - -func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := reflect.New(sub.typ) - c := m.Interface().(custom) - if err := c.Unmarshal(b[:x]); err != nil { - return nil, err - } - v := valToPointer(m) - f.appendRef(v, sub.typ) - return b[x:], nil - } -} - -func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - - m := f.asPointerTo(sub.typ).Interface().(custom) - if err := m.Unmarshal(b[:x]); err != nil { - return nil, err - } - return b[x:], nil - } -} - -func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(t)) - return b[x:], nil - } -} - -func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&t)) - return b[x:], nil - } -} - -func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&t)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(t)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&d)) - return b[x:], nil - } -} - -func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(d)) - return b[x:], nil - } -} - -func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&d)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(d)) - slice.Set(newSlice) - return b[x:], nil - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go deleted file mode 100644 index 87416afe9..000000000 --- a/vendor/github.com/gogo/protobuf/proto/text.go +++ /dev/null @@ -1,930 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" - "sync" - "time" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if name == "XXX_NoUnkeyedLiteral" { - continue - } - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if len(props.Enum) > 0 { - if err := tm.writeEnum(w, v, props); err != nil { - return err - } - } else if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.MapValProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - - if len(props.Enum) > 0 { - if err := tm.writeEnum(w, fv, props); err != nil { - return err - } - } else if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv - if pv.CanAddr() { - pv = sv.Addr() - } else { - pv = reflect.New(sv.Type()) - pv.Elem().Set(sv) - } - if _, err := extendable(pv.Interface()); err == nil { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - if props != nil { - if len(props.CustomType) > 0 { - custom, ok := v.Interface().(Marshaler) - if ok { - data, err := custom.Marshal() - if err != nil { - return err - } - if err := writeString(w, string(data)); err != nil { - return err - } - return nil - } - } else if len(props.CastType) > 0 { - if _, ok := v.Interface().(interface { - String() string - }); ok { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - _, err := fmt.Fprintf(w, "%d", v.Interface()) - return err - } - } - } else if props.StdTime { - t, ok := v.Interface().(time.Time) - if !ok { - return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) - } - tproto, err := timestampProto(t) - if err != nil { - return err - } - propsCopy := *props // Make a copy so that this is goroutine-safe - propsCopy.StdTime = false - err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) - return err - } else if props.StdDuration { - d, ok := v.Interface().(time.Duration) - if !ok { - return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) - } - dproto := durationProto(d) - propsCopy := *props // Make a copy so that this is goroutine-safe - propsCopy.StdDuration = false - err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) - return err - } - } - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if v.CanAddr() { - // Calling v.Interface on a struct causes the reflect package to - // copy the entire struct. This is racy with the new Marshaler - // since we atomically update the XXX_sizecache. - // - // Thus, we retrieve a pointer to the struct if possible to avoid - // a race since v.Interface on the pointer doesn't copy the struct. - // - // If v is not addressable, then we are not worried about a race - // since it implies that the binary Marshaler cannot possibly be - // mutating this value. - v = v.Addr() - } - if v.Type().Implements(textMarshalerType) { - text, err := v.Interface().(encoding.TextMarshaler).MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - if err := tm.writeStruct(w, v); err != nil { - return err - } - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, ferr := fmt.Fprintf(w, "/* %v */\n", err) - return ferr - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, werr := w.Write(endBraceNewline); werr != nil { - return werr - } - continue - } - if _, ferr := fmt.Fprint(w, tag); ferr != nil { - return ferr - } - if wire != WireStartGroup { - if err = w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err = w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - e := pv.Interface().(Message) - - var m map[int32]Extension - var mu sync.Locker - if em, ok := e.(extensionsBytes); ok { - eb := em.GetExtensions() - var err error - m, err = BytesToExtensionsMap(*eb) - if err != nil { - return err - } - mu = notLocker{} - } else if _, ok := e.(extendableProto); ok { - ep, _ := extendable(e) - m, mu = ep.extensionsRead() - if m == nil { - return nil - } - } - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(e, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go deleted file mode 100644 index 1d6c6aa0e..000000000 --- a/vendor/github.com/gogo/protobuf/proto/text_gogo.go +++ /dev/null @@ -1,57 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" -) - -func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { - m, ok := enumStringMaps[props.Enum] - if !ok { - if err := tm.writeAny(w, v, props); err != nil { - return err - } - } - key := int32(0) - if v.Kind() == reflect.Ptr { - key = int32(v.Elem().Int()) - } else { - key = int32(v.Int()) - } - s, ok := m[key] - if !ok { - if err := tm.writeAny(w, v, props); err != nil { - return err - } - } - _, err := fmt.Fprint(w, s) - return err -} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go deleted file mode 100644 index f85c0cc81..000000000 --- a/vendor/github.com/gogo/protobuf/proto/text_parser.go +++ /dev/null @@ -1,1018 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(rune(i)), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.MapKeyProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.MapValProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - if len(props.CustomType) > 0 { - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - tc := reflect.TypeOf(new(Marshaler)) - ok := t.Elem().Implements(tc.Elem()) - if ok { - fv := v - flen := fv.Len() - if flen == fv.Cap() { - nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) - reflect.Copy(nav, fv) - fv.Set(nav) - } - fv.SetLen(flen + 1) - - // Read one. - p.back() - return p.readAny(fv.Index(flen), props) - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.ValueOf(custom)) - } else { - custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.Indirect(reflect.ValueOf(custom))) - } - return nil - } - if props.StdTime { - fv := v - p.back() - props.StdTime = false - tproto := ×tamp{} - err := p.readAny(reflect.ValueOf(tproto).Elem(), props) - props.StdTime = true - if err != nil { - return err - } - tim, err := timestampFromProto(tproto) - if err != nil { - return err - } - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - if t.Elem().Kind() == reflect.Ptr { - ts := fv.Interface().([]*time.Time) - ts = append(ts, &tim) - fv.Set(reflect.ValueOf(ts)) - return nil - } else { - ts := fv.Interface().([]time.Time) - ts = append(ts, tim) - fv.Set(reflect.ValueOf(ts)) - return nil - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - v.Set(reflect.ValueOf(&tim)) - } else { - v.Set(reflect.Indirect(reflect.ValueOf(&tim))) - } - return nil - } - if props.StdDuration { - fv := v - p.back() - props.StdDuration = false - dproto := &duration{} - err := p.readAny(reflect.ValueOf(dproto).Elem(), props) - props.StdDuration = true - if err != nil { - return err - } - dur, err := durationFromProto(dproto) - if err != nil { - return err - } - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - if t.Elem().Kind() == reflect.Ptr { - ds := fv.Interface().([]*time.Duration) - ds = append(ds, &dur) - fv.Set(reflect.ValueOf(ds)) - return nil - } else { - ds := fv.Interface().([]time.Duration) - ds = append(ds, dur) - fv.Set(reflect.ValueOf(ds)) - return nil - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - v.Set(reflect.ValueOf(&dur)) - } else { - v.Set(reflect.Indirect(reflect.ValueOf(&dur))) - } - return nil - } - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - ntok := p.next() - if ntok.err != nil { - return ntok.err - } - if ntok.value == "]" { - break - } - if ntok.value != "," { - return p.errorf("Expected ']' or ',' found %q", ntok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int8: - if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil { - fv.SetInt(x) - return nil - } - case reflect.Int16: - if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil { - fv.SetInt(x) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint8: - if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil { - fv.SetUint(x) - return nil - } - case reflect.Uint16: - if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil { - fv.SetUint(x) - return nil - } - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - return um.UnmarshalText([]byte(s)) - } - pb.Reset() - v := reflect.ValueOf(pb) - return newTextParser(s).readStruct(v.Elem(), "") -} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go deleted file mode 100644 index 9324f6542..000000000 --- a/vendor/github.com/gogo/protobuf/proto/timestamp.go +++ /dev/null @@ -1,113 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// This file implements operations on google.protobuf.Timestamp. - -import ( - "errors" - "fmt" - "time" -) - -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range -// [0001-01-01, 10000-01-01) and has a Nanos field -// in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes -// the problem. -// -// Every valid Timestamp can be represented by a time.Time, but the converse is not true. -func validateTimestamp(ts *timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) - } - return nil -} - -// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return value -// is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -func timestampFromProto(ts *timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -func timestampProto(t time.Time) (*timestamp, error) { - seconds := t.Unix() - nanos := int32(t.Sub(time.Unix(seconds, 0))) - ts := ×tamp{ - Seconds: seconds, - Nanos: nanos, - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go deleted file mode 100644 index 38439fa99..000000000 --- a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go +++ /dev/null @@ -1,49 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() - -type timestamp struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (m *timestamp) Reset() { *m = timestamp{} } -func (*timestamp) ProtoMessage() {} -func (*timestamp) String() string { return "timestamp" } - -func init() { - RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") -} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go deleted file mode 100644 index b175d1b64..000000000 --- a/vendor/github.com/gogo/protobuf/proto/wrappers.go +++ /dev/null @@ -1,1888 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "io" - "reflect" -) - -func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*float64) - v := &float64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) - v := &float64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float64) - v := &float64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float64) - v := &float64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*float32) - v := &float32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) - v := &float32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float32) - v := &float32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float32) - v := &float32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*int64) - v := &int64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) - v := &int64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int64) - v := &int64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int64) - v := &int64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*uint64) - v := &uint64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) - v := &uint64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint64) - v := &uint64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint64) - v := &uint64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*int32) - v := &int32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) - v := &int32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int32) - v := &int32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int32) - v := &int32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*uint32) - v := &uint32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) - v := &uint32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint32) - v := &uint32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint32) - v := &uint32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*bool) - v := &boolValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) - v := &boolValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(bool) - v := &boolValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(bool) - v := &boolValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*string) - v := &stringValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) - v := &stringValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(string) - v := &stringValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(string) - v := &stringValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*[]byte) - v := &bytesValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) - v := &bytesValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().([]byte) - v := &bytesValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().([]byte) - v := &bytesValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go deleted file mode 100644 index c1cf7bf85..000000000 --- a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go +++ /dev/null @@ -1,113 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -type float64Value struct { - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *float64Value) Reset() { *m = float64Value{} } -func (*float64Value) ProtoMessage() {} -func (*float64Value) String() string { return "float64" } - -type float32Value struct { - Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *float32Value) Reset() { *m = float32Value{} } -func (*float32Value) ProtoMessage() {} -func (*float32Value) String() string { return "float32" } - -type int64Value struct { - Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *int64Value) Reset() { *m = int64Value{} } -func (*int64Value) ProtoMessage() {} -func (*int64Value) String() string { return "int64" } - -type uint64Value struct { - Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *uint64Value) Reset() { *m = uint64Value{} } -func (*uint64Value) ProtoMessage() {} -func (*uint64Value) String() string { return "uint64" } - -type int32Value struct { - Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *int32Value) Reset() { *m = int32Value{} } -func (*int32Value) ProtoMessage() {} -func (*int32Value) String() string { return "int32" } - -type uint32Value struct { - Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *uint32Value) Reset() { *m = uint32Value{} } -func (*uint32Value) ProtoMessage() {} -func (*uint32Value) String() string { return "uint32" } - -type boolValue struct { - Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *boolValue) Reset() { *m = boolValue{} } -func (*boolValue) ProtoMessage() {} -func (*boolValue) String() string { return "bool" } - -type stringValue struct { - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *stringValue) Reset() { *m = stringValue{} } -func (*stringValue) ProtoMessage() {} -func (*stringValue) String() string { return "string" } - -type bytesValue struct { - Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *bytesValue) Reset() { *m = bytesValue{} } -func (*bytesValue) ProtoMessage() {} -func (*bytesValue) String() string { return "[]byte" } - -func init() { - RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue") - RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue") - RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value") - RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value") - RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value") - RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value") - RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue") - RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue") - RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue") -} diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/github.com/golang/protobuf/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/github.com/golang/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE deleted file mode 100644 index 0f646931a..000000000 --- a/vendor/github.com/golang/protobuf/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2010 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go deleted file mode 100644 index e810e6fea..000000000 --- a/vendor/github.com/golang/protobuf/proto/buffer.go +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "errors" - "fmt" - - "google.golang.org/protobuf/encoding/prototext" - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - WireVarint = 0 - WireFixed32 = 5 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 -) - -// EncodeVarint returns the varint encoded bytes of v. -func EncodeVarint(v uint64) []byte { - return protowire.AppendVarint(nil, v) -} - -// SizeVarint returns the length of the varint encoded bytes of v. -// This is equal to len(EncodeVarint(v)). -func SizeVarint(v uint64) int { - return protowire.SizeVarint(v) -} - -// DecodeVarint parses a varint encoded integer from b, -// returning the integer value and the length of the varint. -// It returns (0, 0) if there is a parse error. -func DecodeVarint(b []byte) (uint64, int) { - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return 0, 0 - } - return v, n -} - -// Buffer is a buffer for encoding and decoding the protobuf wire format. -// It may be reused between invocations to reduce memory usage. -type Buffer struct { - buf []byte - idx int - deterministic bool -} - -// NewBuffer allocates a new Buffer initialized with buf, -// where the contents of buf are considered the unread portion of the buffer. -func NewBuffer(buf []byte) *Buffer { - return &Buffer{buf: buf} -} - -// SetDeterministic specifies whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (b *Buffer) SetDeterministic(deterministic bool) { - b.deterministic = deterministic -} - -// SetBuf sets buf as the internal buffer, -// where the contents of buf are considered the unread portion of the buffer. -func (b *Buffer) SetBuf(buf []byte) { - b.buf = buf - b.idx = 0 -} - -// Reset clears the internal buffer of all written and unread data. -func (b *Buffer) Reset() { - b.buf = b.buf[:0] - b.idx = 0 -} - -// Bytes returns the internal buffer. -func (b *Buffer) Bytes() []byte { - return b.buf -} - -// Unread returns the unread portion of the buffer. -func (b *Buffer) Unread() []byte { - return b.buf[b.idx:] -} - -// Marshal appends the wire-format encoding of m to the buffer. -func (b *Buffer) Marshal(m Message) error { - var err error - b.buf, err = marshalAppend(b.buf, m, b.deterministic) - return err -} - -// Unmarshal parses the wire-format message in the buffer and -// places the decoded results in m. -// It does not reset m before unmarshaling. -func (b *Buffer) Unmarshal(m Message) error { - err := UnmarshalMerge(b.Unread(), m) - b.idx = len(b.buf) - return err -} - -type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields } - -func (m *unknownFields) String() string { panic("not implemented") } -func (m *unknownFields) Reset() { panic("not implemented") } -func (m *unknownFields) ProtoMessage() { panic("not implemented") } - -// DebugPrint dumps the encoded bytes of b with a header and footer including s -// to stdout. This is only intended for debugging. -func (*Buffer) DebugPrint(s string, b []byte) { - m := MessageReflect(new(unknownFields)) - m.SetUnknown(b) - b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface()) - fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s) -} - -// EncodeVarint appends an unsigned varint encoding to the buffer. -func (b *Buffer) EncodeVarint(v uint64) error { - b.buf = protowire.AppendVarint(b.buf, v) - return nil -} - -// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer. -func (b *Buffer) EncodeZigzag32(v uint64) error { - return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) -} - -// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer. -func (b *Buffer) EncodeZigzag64(v uint64) error { - return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63)))) -} - -// EncodeFixed32 appends a 32-bit little-endian integer to the buffer. -func (b *Buffer) EncodeFixed32(v uint64) error { - b.buf = protowire.AppendFixed32(b.buf, uint32(v)) - return nil -} - -// EncodeFixed64 appends a 64-bit little-endian integer to the buffer. -func (b *Buffer) EncodeFixed64(v uint64) error { - b.buf = protowire.AppendFixed64(b.buf, uint64(v)) - return nil -} - -// EncodeRawBytes appends a length-prefixed raw bytes to the buffer. -func (b *Buffer) EncodeRawBytes(v []byte) error { - b.buf = protowire.AppendBytes(b.buf, v) - return nil -} - -// EncodeStringBytes appends a length-prefixed raw bytes to the buffer. -// It does not validate whether v contains valid UTF-8. -func (b *Buffer) EncodeStringBytes(v string) error { - b.buf = protowire.AppendString(b.buf, v) - return nil -} - -// EncodeMessage appends a length-prefixed encoded message to the buffer. -func (b *Buffer) EncodeMessage(m Message) error { - var err error - b.buf = protowire.AppendVarint(b.buf, uint64(Size(m))) - b.buf, err = marshalAppend(b.buf, m, b.deterministic) - return err -} - -// DecodeVarint consumes an encoded unsigned varint from the buffer. -func (b *Buffer) DecodeVarint() (uint64, error) { - v, n := protowire.ConsumeVarint(b.buf[b.idx:]) - if n < 0 { - return 0, protowire.ParseError(n) - } - b.idx += n - return uint64(v), nil -} - -// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer. -func (b *Buffer) DecodeZigzag32() (uint64, error) { - v, err := b.DecodeVarint() - if err != nil { - return 0, err - } - return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil -} - -// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer. -func (b *Buffer) DecodeZigzag64() (uint64, error) { - v, err := b.DecodeVarint() - if err != nil { - return 0, err - } - return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil -} - -// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer. -func (b *Buffer) DecodeFixed32() (uint64, error) { - v, n := protowire.ConsumeFixed32(b.buf[b.idx:]) - if n < 0 { - return 0, protowire.ParseError(n) - } - b.idx += n - return uint64(v), nil -} - -// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer. -func (b *Buffer) DecodeFixed64() (uint64, error) { - v, n := protowire.ConsumeFixed64(b.buf[b.idx:]) - if n < 0 { - return 0, protowire.ParseError(n) - } - b.idx += n - return uint64(v), nil -} - -// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer. -// If alloc is specified, it returns a copy the raw bytes -// rather than a sub-slice of the buffer. -func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) { - v, n := protowire.ConsumeBytes(b.buf[b.idx:]) - if n < 0 { - return nil, protowire.ParseError(n) - } - b.idx += n - if alloc { - v = append([]byte(nil), v...) - } - return v, nil -} - -// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer. -// It does not validate whether the raw bytes contain valid UTF-8. -func (b *Buffer) DecodeStringBytes() (string, error) { - v, n := protowire.ConsumeString(b.buf[b.idx:]) - if n < 0 { - return "", protowire.ParseError(n) - } - b.idx += n - return v, nil -} - -// DecodeMessage consumes a length-prefixed message from the buffer. -// It does not reset m before unmarshaling. -func (b *Buffer) DecodeMessage(m Message) error { - v, err := b.DecodeRawBytes(false) - if err != nil { - return err - } - return UnmarshalMerge(v, m) -} - -// DecodeGroup consumes a message group from the buffer. -// It assumes that the start group marker has already been consumed and -// consumes all bytes until (and including the end group marker). -// It does not reset m before unmarshaling. -func (b *Buffer) DecodeGroup(m Message) error { - v, n, err := consumeGroup(b.buf[b.idx:]) - if err != nil { - return err - } - b.idx += n - return UnmarshalMerge(v, m) -} - -// consumeGroup parses b until it finds an end group marker, returning -// the raw bytes of the message (excluding the end group marker) and the -// the total length of the message (including the end group marker). -func consumeGroup(b []byte) ([]byte, int, error) { - b0 := b - depth := 1 // assume this follows a start group marker - for { - _, wtyp, tagLen := protowire.ConsumeTag(b) - if tagLen < 0 { - return nil, 0, protowire.ParseError(tagLen) - } - b = b[tagLen:] - - var valLen int - switch wtyp { - case protowire.VarintType: - _, valLen = protowire.ConsumeVarint(b) - case protowire.Fixed32Type: - _, valLen = protowire.ConsumeFixed32(b) - case protowire.Fixed64Type: - _, valLen = protowire.ConsumeFixed64(b) - case protowire.BytesType: - _, valLen = protowire.ConsumeBytes(b) - case protowire.StartGroupType: - depth++ - case protowire.EndGroupType: - depth-- - default: - return nil, 0, errors.New("proto: cannot parse reserved wire type") - } - if valLen < 0 { - return nil, 0, protowire.ParseError(valLen) - } - b = b[valLen:] - - if depth == 0 { - return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil - } - } -} diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go deleted file mode 100644 index d399bf069..000000000 --- a/vendor/github.com/golang/protobuf/proto/defaults.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "google.golang.org/protobuf/reflect/protoreflect" -) - -// SetDefaults sets unpopulated scalar fields to their default values. -// Fields within a oneof are not set even if they have a default value. -// SetDefaults is recursively called upon any populated message fields. -func SetDefaults(m Message) { - if m != nil { - setDefaults(MessageReflect(m)) - } -} - -func setDefaults(m protoreflect.Message) { - fds := m.Descriptor().Fields() - for i := 0; i < fds.Len(); i++ { - fd := fds.Get(i) - if !m.Has(fd) { - if fd.HasDefault() && fd.ContainingOneof() == nil { - v := fd.Default() - if fd.Kind() == protoreflect.BytesKind { - v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes - } - m.Set(fd, v) - } - continue - } - } - - m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - switch { - // Handle singular message. - case fd.Cardinality() != protoreflect.Repeated: - if fd.Message() != nil { - setDefaults(m.Get(fd).Message()) - } - // Handle list of messages. - case fd.IsList(): - if fd.Message() != nil { - ls := m.Get(fd).List() - for i := 0; i < ls.Len(); i++ { - setDefaults(ls.Get(i).Message()) - } - } - // Handle map of messages. - case fd.IsMap(): - if fd.MapValue().Message() != nil { - ms := m.Get(fd).Map() - ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { - setDefaults(v.Message()) - return true - }) - } - } - return true - }) -} diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go deleted file mode 100644 index e8db57e09..000000000 --- a/vendor/github.com/golang/protobuf/proto/deprecated.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "encoding/json" - "errors" - "fmt" - "strconv" - - protoV2 "google.golang.org/protobuf/proto" -) - -var ( - // Deprecated: No longer returned. - ErrNil = errors.New("proto: Marshal called with nil") - - // Deprecated: No longer returned. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") - - // Deprecated: No longer returned. - ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") -) - -// Deprecated: Do not use. -type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } - -// Deprecated: Do not use. -func GetStats() Stats { return Stats{} } - -// Deprecated: Do not use. -func MarshalMessageSet(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: Do not use. -func UnmarshalMessageSet([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: Do not use. -func MarshalMessageSetJSON(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: Do not use. -func UnmarshalMessageSetJSON([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: Do not use. -func RegisterMessageSetType(Message, int32, string) {} - -// Deprecated: Do not use. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// Deprecated: Do not use. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// Deprecated: Do not use; this type existed for intenal-use only. -type InternalMessageInfo struct{} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) DiscardUnknown(m Message) { - DiscardUnknown(m) -} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) { - return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m)) -} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) Merge(dst, src Message) { - protoV2.Merge(MessageV2(dst), MessageV2(src)) -} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) Size(m Message) int { - return protoV2.Size(MessageV2(m)) -} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error { - return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m)) -} diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go deleted file mode 100644 index 2187e877f..000000000 --- a/vendor/github.com/golang/protobuf/proto/discard.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "google.golang.org/protobuf/reflect/protoreflect" -) - -// DiscardUnknown recursively discards all unknown fields from this message -// and all embedded messages. -// -// When unmarshaling a message with unrecognized fields, the tags and values -// of such fields are preserved in the Message. This allows a later call to -// marshal to be able to produce a message that continues to have those -// unrecognized fields. To avoid this, DiscardUnknown is used to -// explicitly clear the unknown fields after unmarshaling. -func DiscardUnknown(m Message) { - if m != nil { - discardUnknown(MessageReflect(m)) - } -} - -func discardUnknown(m protoreflect.Message) { - m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool { - switch { - // Handle singular message. - case fd.Cardinality() != protoreflect.Repeated: - if fd.Message() != nil { - discardUnknown(m.Get(fd).Message()) - } - // Handle list of messages. - case fd.IsList(): - if fd.Message() != nil { - ls := m.Get(fd).List() - for i := 0; i < ls.Len(); i++ { - discardUnknown(ls.Get(i).Message()) - } - } - // Handle map of messages. - case fd.IsMap(): - if fd.MapValue().Message() != nil { - ms := m.Get(fd).Map() - ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { - discardUnknown(v.Message()) - return true - }) - } - } - return true - }) - - // Discard unknown fields. - if len(m.GetUnknown()) > 0 { - m.SetUnknown(nil) - } -} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go deleted file mode 100644 index 42fc120c9..000000000 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "errors" - "fmt" - "reflect" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - "google.golang.org/protobuf/runtime/protoiface" - "google.golang.org/protobuf/runtime/protoimpl" -) - -type ( - // ExtensionDesc represents an extension descriptor and - // is used to interact with an extension field in a message. - // - // Variables of this type are generated in code by protoc-gen-go. - ExtensionDesc = protoimpl.ExtensionInfo - - // ExtensionRange represents a range of message extensions. - // Used in code generated by protoc-gen-go. - ExtensionRange = protoiface.ExtensionRangeV1 - - // Deprecated: Do not use; this is an internal type. - Extension = protoimpl.ExtensionFieldV1 - - // Deprecated: Do not use; this is an internal type. - XXX_InternalExtensions = protoimpl.ExtensionFields -) - -// ErrMissingExtension reports whether the extension was not present. -var ErrMissingExtension = errors.New("proto: missing extension") - -var errNotExtendable = errors.New("proto: not an extendable proto.Message") - -// HasExtension reports whether the extension field is present in m -// either as an explicitly populated field or as an unknown field. -func HasExtension(m Message, xt *ExtensionDesc) (has bool) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return false - } - - // Check whether any populated known field matches the field number. - xtd := xt.TypeDescriptor() - if isValidExtension(mr.Descriptor(), xtd) { - has = mr.Has(xtd) - } else { - mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - has = int32(fd.Number()) == xt.Field - return !has - }) - } - - // Check whether any unknown field matches the field number. - for b := mr.GetUnknown(); !has && len(b) > 0; { - num, _, n := protowire.ConsumeField(b) - has = int32(num) == xt.Field - b = b[n:] - } - return has -} - -// ClearExtension removes the extension field from m -// either as an explicitly populated field or as an unknown field. -func ClearExtension(m Message, xt *ExtensionDesc) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return - } - - xtd := xt.TypeDescriptor() - if isValidExtension(mr.Descriptor(), xtd) { - mr.Clear(xtd) - } else { - mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - if int32(fd.Number()) == xt.Field { - mr.Clear(fd) - return false - } - return true - }) - } - clearUnknown(mr, fieldNum(xt.Field)) -} - -// ClearAllExtensions clears all extensions from m. -// This includes populated fields and unknown fields in the extension range. -func ClearAllExtensions(m Message) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return - } - - mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - if fd.IsExtension() { - mr.Clear(fd) - } - return true - }) - clearUnknown(mr, mr.Descriptor().ExtensionRanges()) -} - -// GetExtension retrieves a proto2 extended field from m. -// -// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), -// then GetExtension parses the encoded field and returns a Go value of the specified type. -// If the field is not present, then the default value is returned (if one is specified), -// otherwise ErrMissingExtension is reported. -// -// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes for the extension field. -func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { - return nil, errNotExtendable - } - - // Retrieve the unknown fields for this extension field. - var bo protoreflect.RawFields - for bi := mr.GetUnknown(); len(bi) > 0; { - num, _, n := protowire.ConsumeField(bi) - if int32(num) == xt.Field { - bo = append(bo, bi[:n]...) - } - bi = bi[n:] - } - - // For type incomplete descriptors, only retrieve the unknown fields. - if xt.ExtensionType == nil { - return []byte(bo), nil - } - - // If the extension field only exists as unknown fields, unmarshal it. - // This is rarely done since proto.Unmarshal eagerly unmarshals extensions. - xtd := xt.TypeDescriptor() - if !isValidExtension(mr.Descriptor(), xtd) { - return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) - } - if !mr.Has(xtd) && len(bo) > 0 { - m2 := mr.New() - if err := (proto.UnmarshalOptions{ - Resolver: extensionResolver{xt}, - }.Unmarshal(bo, m2.Interface())); err != nil { - return nil, err - } - if m2.Has(xtd) { - mr.Set(xtd, m2.Get(xtd)) - clearUnknown(mr, fieldNum(xt.Field)) - } - } - - // Check whether the message has the extension field set or a default. - var pv protoreflect.Value - switch { - case mr.Has(xtd): - pv = mr.Get(xtd) - case xtd.HasDefault(): - pv = xtd.Default() - default: - return nil, ErrMissingExtension - } - - v := xt.InterfaceOf(pv) - rv := reflect.ValueOf(v) - if isScalarKind(rv.Kind()) { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - } - return v, nil -} - -// extensionResolver is a custom extension resolver that stores a single -// extension type that takes precedence over the global registry. -type extensionResolver struct{ xt protoreflect.ExtensionType } - -func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { - if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field { - return r.xt, nil - } - return protoregistry.GlobalTypes.FindExtensionByName(field) -} - -func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { - if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field { - return r.xt, nil - } - return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) -} - -// GetExtensions returns a list of the extensions values present in m, -// corresponding with the provided list of extension descriptors, xts. -// If an extension is missing in m, the corresponding value is nil. -func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return nil, errNotExtendable - } - - vs := make([]interface{}, len(xts)) - for i, xt := range xts { - v, err := GetExtension(m, xt) - if err != nil { - if err == ErrMissingExtension { - continue - } - return vs, err - } - vs[i] = v - } - return vs, nil -} - -// SetExtension sets an extension field in m to the provided value. -func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { - return errNotExtendable - } - - rv := reflect.ValueOf(v) - if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType) - } - if rv.Kind() == reflect.Ptr { - if rv.IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", v) - } - if isScalarKind(rv.Elem().Kind()) { - v = rv.Elem().Interface() - } - } - - xtd := xt.TypeDescriptor() - if !isValidExtension(mr.Descriptor(), xtd) { - return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) - } - mr.Set(xtd, xt.ValueOf(v)) - clearUnknown(mr, fieldNum(xt.Field)) - return nil -} - -// SetRawExtension inserts b into the unknown fields of m. -// -// Deprecated: Use Message.ProtoReflect.SetUnknown instead. -func SetRawExtension(m Message, fnum int32, b []byte) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return - } - - // Verify that the raw field is valid. - for b0 := b; len(b0) > 0; { - num, _, n := protowire.ConsumeField(b0) - if int32(num) != fnum { - panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum)) - } - b0 = b0[n:] - } - - ClearExtension(m, &ExtensionDesc{Field: fnum}) - mr.SetUnknown(append(mr.GetUnknown(), b...)) -} - -// ExtensionDescs returns a list of extension descriptors found in m, -// containing descriptors for both populated extension fields in m and -// also unknown fields of m that are in the extension range. -// For the later case, an type incomplete descriptor is provided where only -// the ExtensionDesc.Field field is populated. -// The order of the extension descriptors is undefined. -func ExtensionDescs(m Message) ([]*ExtensionDesc, error) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { - return nil, errNotExtendable - } - - // Collect a set of known extension descriptors. - extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc) - mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - if fd.IsExtension() { - xt := fd.(protoreflect.ExtensionTypeDescriptor) - if xd, ok := xt.Type().(*ExtensionDesc); ok { - extDescs[fd.Number()] = xd - } - } - return true - }) - - // Collect a set of unknown extension descriptors. - extRanges := mr.Descriptor().ExtensionRanges() - for b := mr.GetUnknown(); len(b) > 0; { - num, _, n := protowire.ConsumeField(b) - if extRanges.Has(num) && extDescs[num] == nil { - extDescs[num] = nil - } - b = b[n:] - } - - // Transpose the set of descriptors into a list. - var xts []*ExtensionDesc - for num, xt := range extDescs { - if xt == nil { - xt = &ExtensionDesc{Field: int32(num)} - } - xts = append(xts, xt) - } - return xts, nil -} - -// isValidExtension reports whether xtd is a valid extension descriptor for md. -func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool { - return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number()) -} - -// isScalarKind reports whether k is a protobuf scalar kind (except bytes). -// This function exists for historical reasons since the representation of -// scalars differs between v1 and v2, where v1 uses *T and v2 uses T. -func isScalarKind(k reflect.Kind) bool { - switch k { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - return true - default: - return false - } -} - -// clearUnknown removes unknown fields from m where remover.Has reports true. -func clearUnknown(m protoreflect.Message, remover interface { - Has(protoreflect.FieldNumber) bool -}) { - var bo protoreflect.RawFields - for bi := m.GetUnknown(); len(bi) > 0; { - num, _, n := protowire.ConsumeField(bi) - if !remover.Has(num) { - bo = append(bo, bi[:n]...) - } - bi = bi[n:] - } - if bi := m.GetUnknown(); len(bi) != len(bo) { - m.SetUnknown(bo) - } -} - -type fieldNum protoreflect.FieldNumber - -func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool { - return protoreflect.FieldNumber(n1) == n2 -} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go deleted file mode 100644 index dcdc2202f..000000000 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "sync" - - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/runtime/protoimpl" -) - -// StructProperties represents protocol buffer type information for a -// generated protobuf message in the open-struct API. -// -// Deprecated: Do not use. -type StructProperties struct { - // Prop are the properties for each field. - // - // Fields belonging to a oneof are stored in OneofTypes instead, with a - // single Properties representing the parent oneof held here. - // - // The order of Prop matches the order of fields in the Go struct. - // Struct fields that are not related to protobufs have a "XXX_" prefix - // in the Properties.Name and must be ignored by the user. - Prop []*Properties - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the protobuf field name. - OneofTypes map[string]*OneofProperties -} - -// Properties represents the type information for a protobuf message field. -// -// Deprecated: Do not use. -type Properties struct { - // Name is a placeholder name with little meaningful semantic value. - // If the name has an "XXX_" prefix, the entire Properties must be ignored. - Name string - // OrigName is the protobuf field name or oneof name. - OrigName string - // JSONName is the JSON name for the protobuf field. - JSONName string - // Enum is a placeholder name for enums. - // For historical reasons, this is neither the Go name for the enum, - // nor the protobuf name for the enum. - Enum string // Deprecated: Do not use. - // Weak contains the full name of the weakly referenced message. - Weak string - // Wire is a string representation of the wire type. - Wire string - // WireType is the protobuf wire type for the field. - WireType int - // Tag is the protobuf field number. - Tag int - // Required reports whether this is a required field. - Required bool - // Optional reports whether this is a optional field. - Optional bool - // Repeated reports whether this is a repeated field. - Repeated bool - // Packed reports whether this is a packed repeated field of scalars. - Packed bool - // Proto3 reports whether this field operates under the proto3 syntax. - Proto3 bool - // Oneof reports whether this field belongs within a oneof. - Oneof bool - - // Default is the default value in string form. - Default string - // HasDefault reports whether the field has a default value. - HasDefault bool - - // MapKeyProp is the properties for the key field for a map field. - MapKeyProp *Properties - // MapValProp is the properties for the value field for a map field. - MapValProp *Properties -} - -// OneofProperties represents the type information for a protobuf oneof. -// -// Deprecated: Do not use. -type OneofProperties struct { - // Type is a pointer to the generated wrapper type for the field value. - // This is nil for messages that are not in the open-struct API. - Type reflect.Type - // Field is the index into StructProperties.Prop for the containing oneof. - Field int - // Prop is the properties for the field. - Prop *Properties -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s += "," + strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != "" { - s += ",json=" + p.JSONName - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if len(p.Weak) > 0 { - s += ",weak=" + p.Weak - } - if p.Proto3 { - s += ",proto3" - } - if p.Oneof { - s += ",oneof" - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(tag string) { - // For example: "bytes,49,opt,name=foo,def=hello!" - for len(tag) > 0 { - i := strings.IndexByte(tag, ',') - if i < 0 { - i = len(tag) - } - switch s := tag[:i]; { - case strings.HasPrefix(s, "name="): - p.OrigName = s[len("name="):] - case strings.HasPrefix(s, "json="): - p.JSONName = s[len("json="):] - case strings.HasPrefix(s, "enum="): - p.Enum = s[len("enum="):] - case strings.HasPrefix(s, "weak="): - p.Weak = s[len("weak="):] - case strings.Trim(s, "0123456789") == "": - n, _ := strconv.ParseUint(s, 10, 32) - p.Tag = int(n) - case s == "opt": - p.Optional = true - case s == "req": - p.Required = true - case s == "rep": - p.Repeated = true - case s == "varint" || s == "zigzag32" || s == "zigzag64": - p.Wire = s - p.WireType = WireVarint - case s == "fixed32": - p.Wire = s - p.WireType = WireFixed32 - case s == "fixed64": - p.Wire = s - p.WireType = WireFixed64 - case s == "bytes": - p.Wire = s - p.WireType = WireBytes - case s == "group": - p.Wire = s - p.WireType = WireStartGroup - case s == "packed": - p.Packed = true - case s == "proto3": - p.Proto3 = true - case s == "oneof": - p.Oneof = true - case strings.HasPrefix(s, "def="): - // The default tag is special in that everything afterwards is the - // default regardless of the presence of commas. - p.HasDefault = true - p.Default, i = tag[len("def="):], len(tag) - } - tag = strings.TrimPrefix(tag[i:], ",") - } -} - -// Init populates the properties from a protocol buffer struct tag. -// -// Deprecated: Do not use. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.Name = name - p.OrigName = name - if tag == "" { - return - } - p.Parse(tag) - - if typ != nil && typ.Kind() == reflect.Map { - p.MapKeyProp = new(Properties) - p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil) - p.MapValProp = new(Properties) - p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil) - } -} - -var propertiesCache sync.Map // map[reflect.Type]*StructProperties - -// GetProperties returns the list of properties for the type represented by t, -// which must be a generated protocol buffer message in the open-struct API, -// where protobuf message fields are represented by exported Go struct fields. -// -// Deprecated: Use protobuf reflection instead. -func GetProperties(t reflect.Type) *StructProperties { - if p, ok := propertiesCache.Load(t); ok { - return p.(*StructProperties) - } - p, _ := propertiesCache.LoadOrStore(t, newProperties(t)) - return p.(*StructProperties) -} - -func newProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) - } - - var hasOneof bool - prop := new(StructProperties) - - // Construct a list of properties for each field in the struct. - for i := 0; i < t.NumField(); i++ { - p := new(Properties) - f := t.Field(i) - tagField := f.Tag.Get("protobuf") - p.Init(f.Type, f.Name, tagField, &f) - - tagOneof := f.Tag.Get("protobuf_oneof") - if tagOneof != "" { - hasOneof = true - p.OrigName = tagOneof - } - - // Rename unrelated struct fields with the "XXX_" prefix since so much - // user code simply checks for this to exclude special fields. - if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") { - p.Name = "XXX_" + p.Name - p.OrigName = "XXX_" + p.OrigName - } else if p.Weak != "" { - p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field - } - - prop.Prop = append(prop.Prop, p) - } - - // Construct a mapping of oneof field names to properties. - if hasOneof { - var oneofWrappers []interface{} - if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { - oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{}) - } - if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { - oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{}) - } - if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok { - if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok { - oneofWrappers = m.ProtoMessageInfo().OneofWrappers - } - } - - prop.OneofTypes = make(map[string]*OneofProperties) - for _, wrapper := range oneofWrappers { - p := &OneofProperties{ - Type: reflect.ValueOf(wrapper).Type(), // *T - Prop: new(Properties), - } - f := p.Type.Elem().Field(0) - p.Prop.Name = f.Name - p.Prop.Parse(f.Tag.Get("protobuf")) - - // Determine the struct field that contains this oneof. - // Each wrapper is assignable to exactly one parent field. - var foundOneof bool - for i := 0; i < t.NumField() && !foundOneof; i++ { - if p.Type.AssignableTo(t.Field(i).Type) { - p.Field = i - foundOneof = true - } - } - if !foundOneof { - panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) - } - prop.OneofTypes[p.Prop.OrigName] = p - } - } - - return prop -} - -func (sp *StructProperties) Len() int { return len(sp.Prop) } -func (sp *StructProperties) Less(i, j int) bool { return false } -func (sp *StructProperties) Swap(i, j int) { return } diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go deleted file mode 100644 index 5aee89c32..000000000 --- a/vendor/github.com/golang/protobuf/proto/proto.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package proto provides functionality for handling protocol buffer messages. -// In particular, it provides marshaling and unmarshaling between a protobuf -// message and the binary wire format. -// -// See https://developers.google.com/protocol-buffers/docs/gotutorial for -// more information. -// -// Deprecated: Use the "google.golang.org/protobuf/proto" package instead. -package proto - -import ( - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/runtime/protoiface" - "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - ProtoPackageIsVersion1 = true - ProtoPackageIsVersion2 = true - ProtoPackageIsVersion3 = true - ProtoPackageIsVersion4 = true -) - -// GeneratedEnum is any enum type generated by protoc-gen-go -// which is a named int32 kind. -// This type exists for documentation purposes. -type GeneratedEnum interface{} - -// GeneratedMessage is any message type generated by protoc-gen-go -// which is a pointer to a named struct kind. -// This type exists for documentation purposes. -type GeneratedMessage interface{} - -// Message is a protocol buffer message. -// -// This is the v1 version of the message interface and is marginally better -// than an empty interface as it lacks any method to programatically interact -// with the contents of the message. -// -// A v2 message is declared in "google.golang.org/protobuf/proto".Message and -// exposes protobuf reflection as a first-class feature of the interface. -// -// To convert a v1 message to a v2 message, use the MessageV2 function. -// To convert a v2 message to a v1 message, use the MessageV1 function. -type Message = protoiface.MessageV1 - -// MessageV1 converts either a v1 or v2 message to a v1 message. -// It returns nil if m is nil. -func MessageV1(m GeneratedMessage) protoiface.MessageV1 { - return protoimpl.X.ProtoMessageV1Of(m) -} - -// MessageV2 converts either a v1 or v2 message to a v2 message. -// It returns nil if m is nil. -func MessageV2(m GeneratedMessage) protoV2.Message { - return protoimpl.X.ProtoMessageV2Of(m) -} - -// MessageReflect returns a reflective view for a message. -// It returns nil if m is nil. -func MessageReflect(m Message) protoreflect.Message { - return protoimpl.X.MessageOf(m) -} - -// Marshaler is implemented by messages that can marshal themselves. -// This interface is used by the following functions: Size, Marshal, -// Buffer.Marshal, and Buffer.EncodeMessage. -// -// Deprecated: Do not implement. -type Marshaler interface { - // Marshal formats the encoded bytes of the message. - // It should be deterministic and emit valid protobuf wire data. - // The caller takes ownership of the returned buffer. - Marshal() ([]byte, error) -} - -// Unmarshaler is implemented by messages that can unmarshal themselves. -// This interface is used by the following functions: Unmarshal, UnmarshalMerge, -// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup. -// -// Deprecated: Do not implement. -type Unmarshaler interface { - // Unmarshal parses the encoded bytes of the protobuf wire input. - // The provided buffer is only valid for during method call. - // It should not reset the receiver message. - Unmarshal([]byte) error -} - -// Merger is implemented by messages that can merge themselves. -// This interface is used by the following functions: Clone and Merge. -// -// Deprecated: Do not implement. -type Merger interface { - // Merge merges the contents of src into the receiver message. - // It clones all data structures in src such that it aliases no mutable - // memory referenced by src. - Merge(src Message) -} - -// RequiredNotSetError is an error type returned when -// marshaling or unmarshaling a message with missing required fields. -type RequiredNotSetError struct { - err error -} - -func (e *RequiredNotSetError) Error() string { - if e.err != nil { - return e.err.Error() - } - return "proto: required field not set" -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -func checkRequiredNotSet(m protoV2.Message) error { - if err := protoV2.CheckInitialized(m); err != nil { - return &RequiredNotSetError{err: err} - } - return nil -} - -// Clone returns a deep copy of src. -func Clone(src Message) Message { - return MessageV1(protoV2.Clone(MessageV2(src))) -} - -// Merge merges src into dst, which must be messages of the same type. -// -// Populated scalar fields in src are copied to dst, while populated -// singular messages in src are merged into dst by recursively calling Merge. -// The elements of every list field in src is appended to the corresponded -// list fields in dst. The entries of every map field in src is copied into -// the corresponding map field in dst, possibly replacing existing entries. -// The unknown fields of src are appended to the unknown fields of dst. -func Merge(dst, src Message) { - protoV2.Merge(MessageV2(dst), MessageV2(src)) -} - -// Equal reports whether two messages are equal. -// If two messages marshal to the same bytes under deterministic serialization, -// then Equal is guaranteed to report true. -// -// Two messages are equal if they are the same protobuf message type, -// have the same set of populated known and extension field values, -// and the same set of unknown fields values. -// -// Scalar values are compared with the equivalent of the == operator in Go, -// except bytes values which are compared using bytes.Equal and -// floating point values which specially treat NaNs as equal. -// Message values are compared by recursively calling Equal. -// Lists are equal if each element value is also equal. -// Maps are equal if they have the same set of keys, where the pair of values -// for each key is also equal. -func Equal(x, y Message) bool { - return protoV2.Equal(MessageV2(x), MessageV2(y)) -} - -func isMessageSet(md protoreflect.MessageDescriptor) bool { - ms, ok := md.(interface{ IsMessageSet() bool }) - return ok && ms.IsMessageSet() -} diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go deleted file mode 100644 index 066b4323b..000000000 --- a/vendor/github.com/golang/protobuf/proto/registry.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "bytes" - "compress/gzip" - "fmt" - "io/ioutil" - "reflect" - "strings" - "sync" - - "google.golang.org/protobuf/reflect/protodesc" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - "google.golang.org/protobuf/runtime/protoimpl" -) - -// filePath is the path to the proto source file. -type filePath = string // e.g., "google/protobuf/descriptor.proto" - -// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto. -type fileDescGZIP = []byte - -var fileCache sync.Map // map[filePath]fileDescGZIP - -// RegisterFile is called from generated code to register the compressed -// FileDescriptorProto with the file path for a proto source file. -// -// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead. -func RegisterFile(s filePath, d fileDescGZIP) { - // Decompress the descriptor. - zr, err := gzip.NewReader(bytes.NewReader(d)) - if err != nil { - panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) - } - b, err := ioutil.ReadAll(zr) - if err != nil { - panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) - } - - // Construct a protoreflect.FileDescriptor from the raw descriptor. - // Note that DescBuilder.Build automatically registers the constructed - // file descriptor with the v2 registry. - protoimpl.DescBuilder{RawDescriptor: b}.Build() - - // Locally cache the raw descriptor form for the file. - fileCache.Store(s, d) -} - -// FileDescriptor returns the compressed FileDescriptorProto given the file path -// for a proto source file. It returns nil if not found. -// -// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead. -func FileDescriptor(s filePath) fileDescGZIP { - if v, ok := fileCache.Load(s); ok { - return v.(fileDescGZIP) - } - - // Find the descriptor in the v2 registry. - var b []byte - if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { - b, _ = Marshal(protodesc.ToFileDescriptorProto(fd)) - } - - // Locally cache the raw descriptor form for the file. - if len(b) > 0 { - v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b)) - return v.(fileDescGZIP) - } - return nil -} - -// enumName is the name of an enum. For historical reasons, the enum name is -// neither the full Go name nor the full protobuf name of the enum. -// The name is the dot-separated combination of just the proto package that the -// enum is declared within followed by the Go type name of the generated enum. -type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum" - -// enumsByName maps enum values by name to their numeric counterpart. -type enumsByName = map[string]int32 - -// enumsByNumber maps enum values by number to their name counterpart. -type enumsByNumber = map[int32]string - -var enumCache sync.Map // map[enumName]enumsByName -var numFilesCache sync.Map // map[protoreflect.FullName]int - -// RegisterEnum is called from the generated code to register the mapping of -// enum value names to enum numbers for the enum identified by s. -// -// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead. -func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) { - if _, ok := enumCache.Load(s); ok { - panic("proto: duplicate enum registered: " + s) - } - enumCache.Store(s, m) - - // This does not forward registration to the v2 registry since this API - // lacks sufficient information to construct a complete v2 enum descriptor. -} - -// EnumValueMap returns the mapping from enum value names to enum numbers for -// the enum of the given name. It returns nil if not found. -// -// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead. -func EnumValueMap(s enumName) enumsByName { - if v, ok := enumCache.Load(s); ok { - return v.(enumsByName) - } - - // Check whether the cache is stale. If the number of files in the current - // package differs, then it means that some enums may have been recently - // registered upstream that we do not know about. - var protoPkg protoreflect.FullName - if i := strings.LastIndexByte(s, '.'); i >= 0 { - protoPkg = protoreflect.FullName(s[:i]) - } - v, _ := numFilesCache.Load(protoPkg) - numFiles, _ := v.(int) - if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles { - return nil // cache is up-to-date; was not found earlier - } - - // Update the enum cache for all enums declared in the given proto package. - numFiles = 0 - protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool { - walkEnums(fd, func(ed protoreflect.EnumDescriptor) { - name := protoimpl.X.LegacyEnumName(ed) - if _, ok := enumCache.Load(name); !ok { - m := make(enumsByName) - evs := ed.Values() - for i := evs.Len() - 1; i >= 0; i-- { - ev := evs.Get(i) - m[string(ev.Name())] = int32(ev.Number()) - } - enumCache.LoadOrStore(name, m) - } - }) - numFiles++ - return true - }) - numFilesCache.Store(protoPkg, numFiles) - - // Check cache again for enum map. - if v, ok := enumCache.Load(s); ok { - return v.(enumsByName) - } - return nil -} - -// walkEnums recursively walks all enums declared in d. -func walkEnums(d interface { - Enums() protoreflect.EnumDescriptors - Messages() protoreflect.MessageDescriptors -}, f func(protoreflect.EnumDescriptor)) { - eds := d.Enums() - for i := eds.Len() - 1; i >= 0; i-- { - f(eds.Get(i)) - } - mds := d.Messages() - for i := mds.Len() - 1; i >= 0; i-- { - walkEnums(mds.Get(i), f) - } -} - -// messageName is the full name of protobuf message. -type messageName = string - -var messageTypeCache sync.Map // map[messageName]reflect.Type - -// RegisterType is called from generated code to register the message Go type -// for a message of the given name. -// -// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead. -func RegisterType(m Message, s messageName) { - mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s)) - if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil { - panic(err) - } - messageTypeCache.Store(s, reflect.TypeOf(m)) -} - -// RegisterMapType is called from generated code to register the Go map type -// for a protobuf message representing a map entry. -// -// Deprecated: Do not use. -func RegisterMapType(m interface{}, s messageName) { - t := reflect.TypeOf(m) - if t.Kind() != reflect.Map { - panic(fmt.Sprintf("invalid map kind: %v", t)) - } - if _, ok := messageTypeCache.Load(s); ok { - panic(fmt.Errorf("proto: duplicate proto message registered: %s", s)) - } - messageTypeCache.Store(s, t) -} - -// MessageType returns the message type for a named message. -// It returns nil if not found. -// -// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead. -func MessageType(s messageName) reflect.Type { - if v, ok := messageTypeCache.Load(s); ok { - return v.(reflect.Type) - } - - // Derive the message type from the v2 registry. - var t reflect.Type - if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil { - t = messageGoType(mt) - } - - // If we could not get a concrete type, it is possible that it is a - // pseudo-message for a map entry. - if t == nil { - d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s)) - if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() { - kt := goTypeForField(md.Fields().ByNumber(1)) - vt := goTypeForField(md.Fields().ByNumber(2)) - t = reflect.MapOf(kt, vt) - } - } - - // Locally cache the message type for the given name. - if t != nil { - v, _ := messageTypeCache.LoadOrStore(s, t) - return v.(reflect.Type) - } - return nil -} - -func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type { - switch k := fd.Kind(); k { - case protoreflect.EnumKind: - if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil { - return enumGoType(et) - } - return reflect.TypeOf(protoreflect.EnumNumber(0)) - case protoreflect.MessageKind, protoreflect.GroupKind: - if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil { - return messageGoType(mt) - } - return reflect.TypeOf((*protoreflect.Message)(nil)).Elem() - default: - return reflect.TypeOf(fd.Default().Interface()) - } -} - -func enumGoType(et protoreflect.EnumType) reflect.Type { - return reflect.TypeOf(et.New(0)) -} - -func messageGoType(mt protoreflect.MessageType) reflect.Type { - return reflect.TypeOf(MessageV1(mt.Zero().Interface())) -} - -// MessageName returns the full protobuf name for the given message type. -// -// Deprecated: Use protoreflect.MessageDescriptor.FullName instead. -func MessageName(m Message) messageName { - if m == nil { - return "" - } - if m, ok := m.(interface{ XXX_MessageName() messageName }); ok { - return m.XXX_MessageName() - } - return messageName(protoimpl.X.MessageDescriptorOf(m).FullName()) -} - -// RegisterExtension is called from the generated code to register -// the extension descriptor. -// -// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead. -func RegisterExtension(d *ExtensionDesc) { - if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil { - panic(err) - } -} - -type extensionsByNumber = map[int32]*ExtensionDesc - -var extensionCache sync.Map // map[messageName]extensionsByNumber - -// RegisteredExtensions returns a map of the registered extensions for the -// provided protobuf message, indexed by the extension field number. -// -// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead. -func RegisteredExtensions(m Message) extensionsByNumber { - // Check whether the cache is stale. If the number of extensions for - // the given message differs, then it means that some extensions were - // recently registered upstream that we do not know about. - s := MessageName(m) - v, _ := extensionCache.Load(s) - xs, _ := v.(extensionsByNumber) - if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) { - return xs // cache is up-to-date - } - - // Cache is stale, re-compute the extensions map. - xs = make(extensionsByNumber) - protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool { - if xd, ok := xt.(*ExtensionDesc); ok { - xs[int32(xt.TypeDescriptor().Number())] = xd - } else { - // TODO: This implies that the protoreflect.ExtensionType is a - // custom type not generated by protoc-gen-go. We could try and - // convert the type to an ExtensionDesc. - } - return true - }) - extensionCache.Store(s, xs) - return xs -} diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go deleted file mode 100644 index 47eb3e445..000000000 --- a/vendor/github.com/golang/protobuf/proto/text_decode.go +++ /dev/null @@ -1,801 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" - - "google.golang.org/protobuf/encoding/prototext" - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -const wrapTextUnmarshalV2 = false - -// ParseError is returned by UnmarshalText. -type ParseError struct { - Message string - - // Deprecated: Do not use. - Line, Offset int -} - -func (e *ParseError) Error() string { - if wrapTextUnmarshalV2 { - return e.Message - } - if e.Line == 1 { - return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message) - } - return fmt.Sprintf("line %d: %v", e.Line, e.Message) -} - -// UnmarshalText parses a proto text formatted string into m. -func UnmarshalText(s string, m Message) error { - if u, ok := m.(encoding.TextUnmarshaler); ok { - return u.UnmarshalText([]byte(s)) - } - - m.Reset() - mi := MessageV2(m) - - if wrapTextUnmarshalV2 { - err := prototext.UnmarshalOptions{ - AllowPartial: true, - }.Unmarshal([]byte(s), mi) - if err != nil { - return &ParseError{Message: err.Error()} - } - return checkRequiredNotSet(mi) - } else { - if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil { - return err - } - return checkRequiredNotSet(mi) - } -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) { - md := m.Descriptor() - fds := md.Fields() - - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - seen := make(map[protoreflect.FieldNumber]bool) - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - if err := p.unmarshalExtensionOrAny(m, seen); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := protoreflect.Name(tok.value) - fd := fds.ByName(name) - switch { - case fd == nil: - gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name)))) - if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name { - fd = gd - } - case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name: - fd = nil - case fd.IsWeak() && fd.Message().IsPlaceholder(): - fd = nil - } - if fd == nil { - typeName := string(md.FullName()) - if m, ok := m.Interface().(Message); ok { - t := reflect.TypeOf(m) - if t.Kind() == reflect.Ptr { - typeName = t.Elem().String() - } - } - return p.errorf("unknown field name %q in %v", name, typeName) - } - if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name()) - } - if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] { - return p.errorf("non-repeated field %q was repeated", fd.Name()) - } - seen[fd.Number()] = true - - // Consume any colon. - if err := p.checkForColon(fd); err != nil { - return err - } - - // Parse into the field. - v := m.Get(fd) - if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { - v = m.Mutable(fd) - } - if v, err = p.unmarshalValue(v, fd); err != nil { - return err - } - m.Set(fd, v) - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - } - return nil -} - -func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error { - name, err := p.consumeExtensionOrAnyName() - if err != nil { - return err - } - - // If it contains a slash, it's an Any type URL. - if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 { - tok := p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - - mt, err := protoregistry.GlobalTypes.FindMessageByURL(name) - if err != nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):]) - } - m2 := mt.New() - if err := p.unmarshalMessage(m2, terminator); err != nil { - return err - } - b, err := protoV2.Marshal(m2.Interface()) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err) - } - - urlFD := m.Descriptor().Fields().ByName("type_url") - valFD := m.Descriptor().Fields().ByName("value") - if seen[urlFD.Number()] { - return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name()) - } - if seen[valFD.Number()] { - return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name()) - } - m.Set(urlFD, protoreflect.ValueOfString(name)) - m.Set(valFD, protoreflect.ValueOfBytes(b)) - seen[urlFD.Number()] = true - seen[valFD.Number()] = true - return nil - } - - xname := protoreflect.FullName(name) - xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) - if xt == nil && isMessageSet(m.Descriptor()) { - xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) - } - if xt == nil { - return p.errorf("unrecognized extension %q", name) - } - fd := xt.TypeDescriptor() - if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { - return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName()) - } - - if err := p.checkForColon(fd); err != nil { - return err - } - - v := m.Get(fd) - if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { - v = m.Mutable(fd) - } - v, err = p.unmarshalValue(v, fd) - if err != nil { - return err - } - m.Set(fd, v) - return p.consumeOptionalSeparator() -} - -func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { - tok := p.next() - if tok.err != nil { - return v, tok.err - } - if tok.value == "" { - return v, p.errorf("unexpected EOF") - } - - switch { - case fd.IsList(): - lv := v.List() - var err error - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - vv := lv.NewElement() - vv, err = p.unmarshalSingularValue(vv, fd) - if err != nil { - return v, err - } - lv.Append(vv) - - tok := p.next() - if tok.err != nil { - return v, tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return v, p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return v, nil - } - - // One value of the repeated field. - p.back() - vv := lv.NewElement() - vv, err = p.unmarshalSingularValue(vv, fd) - if err != nil { - return v, err - } - lv.Append(vv) - return v, nil - case fd.IsMap(): - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return v, p.errorf("expected '{' or '<', found %q", tok.value) - } - - keyFD := fd.MapKey() - valFD := fd.MapValue() - - mv := v.Map() - kv := keyFD.Default() - vv := mv.NewValue() - for { - tok := p.next() - if tok.err != nil { - return v, tok.err - } - if tok.value == terminator { - break - } - var err error - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return v, err - } - if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil { - return v, err - } - if err := p.consumeOptionalSeparator(); err != nil { - return v, err - } - case "value": - if err := p.checkForColon(valFD); err != nil { - return v, err - } - if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil { - return v, err - } - if err := p.consumeOptionalSeparator(); err != nil { - return v, err - } - default: - p.back() - return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - mv.Set(kv.MapKey(), vv) - return v, nil - default: - p.back() - return p.unmarshalSingularValue(v, fd) - } -} - -func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { - tok := p.next() - if tok.err != nil { - return v, tok.err - } - if tok.value == "" { - return v, p.errorf("unexpected EOF") - } - - switch fd.Kind() { - case protoreflect.BoolKind: - switch tok.value { - case "true", "1", "t", "True": - return protoreflect.ValueOfBool(true), nil - case "false", "0", "f", "False": - return protoreflect.ValueOfBool(false), nil - } - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - return protoreflect.ValueOfInt32(int32(x)), nil - } - - // The C++ parser accepts large positive hex numbers that uses - // two's complement arithmetic to represent negative numbers. - // This feature is here for backwards compatibility with C++. - if strings.HasPrefix(tok.value, "0x") { - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil - } - } - case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - return protoreflect.ValueOfInt64(int64(x)), nil - } - - // The C++ parser accepts large positive hex numbers that uses - // two's complement arithmetic to represent negative numbers. - // This feature is here for backwards compatibility with C++. - if strings.HasPrefix(tok.value, "0x") { - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil - } - } - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - return protoreflect.ValueOfUint32(uint32(x)), nil - } - case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - return protoreflect.ValueOfUint64(uint64(x)), nil - } - case protoreflect.FloatKind: - // Ignore 'f' for compatibility with output generated by C++, - // but don't remove 'f' when the value is "-inf" or "inf". - v := tok.value - if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { - v = v[:len(v)-len("f")] - } - if x, err := strconv.ParseFloat(v, 32); err == nil { - return protoreflect.ValueOfFloat32(float32(x)), nil - } - case protoreflect.DoubleKind: - // Ignore 'f' for compatibility with output generated by C++, - // but don't remove 'f' when the value is "-inf" or "inf". - v := tok.value - if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { - v = v[:len(v)-len("f")] - } - if x, err := strconv.ParseFloat(v, 64); err == nil { - return protoreflect.ValueOfFloat64(float64(x)), nil - } - case protoreflect.StringKind: - if isQuote(tok.value[0]) { - return protoreflect.ValueOfString(tok.unquoted), nil - } - case protoreflect.BytesKind: - if isQuote(tok.value[0]) { - return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil - } - case protoreflect.EnumKind: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil - } - vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value)) - if vd != nil { - return protoreflect.ValueOfEnum(vd.Number()), nil - } - case protoreflect.MessageKind, protoreflect.GroupKind: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return v, p.errorf("expected '{' or '<', found %q", tok.value) - } - err := p.unmarshalMessage(v.Message(), terminator) - return v, err - default: - panic(fmt.Sprintf("invalid kind %v", fd.Kind())) - } - return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value) -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - if fd.Message() == nil { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -// consumeExtensionOrAnyName consumes an extension name or an Any type URL and -// the following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtensionOrAnyName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in unmarshalMessage to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -var errBadUTF8 = errors.New("proto: bad UTF-8") - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(rune(i)), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go deleted file mode 100644 index a31134eeb..000000000 --- a/vendor/github.com/golang/protobuf/proto/text_encode.go +++ /dev/null @@ -1,560 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "bytes" - "encoding" - "fmt" - "io" - "math" - "sort" - "strings" - - "google.golang.org/protobuf/encoding/prototext" - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -const wrapTextMarshalV2 = false - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line) - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes the proto text format of m to w. -func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error { - b, err := tm.marshal(m) - if len(b) > 0 { - if _, err := w.Write(b); err != nil { - return err - } - } - return err -} - -// Text returns a proto text formatted string of m. -func (tm *TextMarshaler) Text(m Message) string { - b, _ := tm.marshal(m) - return string(b) -} - -func (tm *TextMarshaler) marshal(m Message) ([]byte, error) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return []byte(""), nil - } - - if wrapTextMarshalV2 { - if m, ok := m.(encoding.TextMarshaler); ok { - return m.MarshalText() - } - - opts := prototext.MarshalOptions{ - AllowPartial: true, - EmitUnknown: true, - } - if !tm.Compact { - opts.Indent = " " - } - if !tm.ExpandAny { - opts.Resolver = (*protoregistry.Types)(nil) - } - return opts.Marshal(mr.Interface()) - } else { - w := &textWriter{ - compact: tm.Compact, - expandAny: tm.ExpandAny, - complete: true, - } - - if m, ok := m.(encoding.TextMarshaler); ok { - b, err := m.MarshalText() - if err != nil { - return nil, err - } - w.Write(b) - return w.buf, nil - } - - err := w.writeMessage(mr) - return w.buf, err - } -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// MarshalText writes the proto text format of m to w. -func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) } - -// MarshalTextString returns a proto text formatted string of m. -func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) } - -// CompactText writes the compact proto text format of m to w. -func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) } - -// CompactTextString returns a compact proto text formatted string of m. -func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) } - -var ( - newline = []byte("\n") - endBraceNewline = []byte("}\n") - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - compact bool // same as TextMarshaler.Compact - expandAny bool // same as TextMarshaler.ExpandAny - complete bool // whether the current position is a complete line - indent int // indentation level; never negative - buf []byte -} - -func (w *textWriter) Write(p []byte) (n int, _ error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - w.buf = append(w.buf, p...) - w.complete = false - return len(p), nil - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - w.buf = append(w.buf, ' ') - n++ - } - w.buf = append(w.buf, frag...) - n += len(frag) - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - w.buf = append(w.buf, frag...) - n += len(frag) - if i+1 < len(frags) { - w.buf = append(w.buf, '\n') - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - w.buf = append(w.buf, c) - w.complete = c == '\n' - return nil -} - -func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - - if fd.Kind() != protoreflect.GroupKind { - w.buf = append(w.buf, fd.Name()...) - w.WriteByte(':') - } else { - // Use message type name for group field name. - w.buf = append(w.buf, fd.Message().Name()...) - } - - if !w.compact { - w.WriteByte(' ') - } -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) { - md := m.Descriptor() - fdURL := md.Fields().ByName("type_url") - fdVal := md.Fields().ByName("value") - - url := m.Get(fdURL).String() - mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) - if err != nil { - return false, nil - } - - b := m.Get(fdVal).Bytes() - m2 := mt.New() - if err := proto.Unmarshal(b, m2.Interface()); err != nil { - return false, nil - } - w.Write([]byte("[")) - if requiresQuotes(url) { - w.writeQuotedString(url) - } else { - w.Write([]byte(url)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.indent++ - } - if err := w.writeMessage(m2); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.indent-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (w *textWriter) writeMessage(m protoreflect.Message) error { - md := m.Descriptor() - if w.expandAny && md.FullName() == "google.protobuf.Any" { - if canExpand, err := w.writeProto3Any(m); canExpand { - return err - } - } - - fds := md.Fields() - for i := 0; i < fds.Len(); { - fd := fds.Get(i) - if od := fd.ContainingOneof(); od != nil { - fd = m.WhichOneof(od) - i += od.Fields().Len() - } else { - i++ - } - if fd == nil || !m.Has(fd) { - continue - } - - switch { - case fd.IsList(): - lv := m.Get(fd).List() - for j := 0; j < lv.Len(); j++ { - w.writeName(fd) - v := lv.Get(j) - if err := w.writeSingularValue(v, fd); err != nil { - return err - } - w.WriteByte('\n') - } - case fd.IsMap(): - kfd := fd.MapKey() - vfd := fd.MapValue() - mv := m.Get(fd).Map() - - type entry struct{ key, val protoreflect.Value } - var entries []entry - mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { - entries = append(entries, entry{k.Value(), v}) - return true - }) - sort.Slice(entries, func(i, j int) bool { - switch kfd.Kind() { - case protoreflect.BoolKind: - return !entries[i].key.Bool() && entries[j].key.Bool() - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - return entries[i].key.Int() < entries[j].key.Int() - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - return entries[i].key.Uint() < entries[j].key.Uint() - case protoreflect.StringKind: - return entries[i].key.String() < entries[j].key.String() - default: - panic("invalid kind") - } - }) - for _, entry := range entries { - w.writeName(fd) - w.WriteByte('<') - if !w.compact { - w.WriteByte('\n') - } - w.indent++ - w.writeName(kfd) - if err := w.writeSingularValue(entry.key, kfd); err != nil { - return err - } - w.WriteByte('\n') - w.writeName(vfd) - if err := w.writeSingularValue(entry.val, vfd); err != nil { - return err - } - w.WriteByte('\n') - w.indent-- - w.WriteByte('>') - w.WriteByte('\n') - } - default: - w.writeName(fd) - if err := w.writeSingularValue(m.Get(fd), fd); err != nil { - return err - } - w.WriteByte('\n') - } - } - - if b := m.GetUnknown(); len(b) > 0 { - w.writeUnknownFields(b) - } - return w.writeExtensions(m) -} - -func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error { - switch fd.Kind() { - case protoreflect.FloatKind, protoreflect.DoubleKind: - switch vf := v.Float(); { - case math.IsInf(vf, +1): - w.Write(posInf) - case math.IsInf(vf, -1): - w.Write(negInf) - case math.IsNaN(vf): - w.Write(nan) - default: - fmt.Fprint(w, v.Interface()) - } - case protoreflect.StringKind: - // NOTE: This does not validate UTF-8 for historical reasons. - w.writeQuotedString(string(v.String())) - case protoreflect.BytesKind: - w.writeQuotedString(string(v.Bytes())) - case protoreflect.MessageKind, protoreflect.GroupKind: - var bra, ket byte = '<', '>' - if fd.Kind() == protoreflect.GroupKind { - bra, ket = '{', '}' - } - w.WriteByte(bra) - if !w.compact { - w.WriteByte('\n') - } - w.indent++ - m := v.Message() - if m2, ok := m.Interface().(encoding.TextMarshaler); ok { - b, err := m2.MarshalText() - if err != nil { - return err - } - w.Write(b) - } else { - w.writeMessage(m) - } - w.indent-- - w.WriteByte(ket) - case protoreflect.EnumKind: - if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil { - fmt.Fprint(w, ev.Name()) - } else { - fmt.Fprint(w, v.Enum()) - } - default: - fmt.Fprint(w, v.Interface()) - } - return nil -} - -// writeQuotedString writes a quoted string in the protocol buffer text format. -func (w *textWriter) writeQuotedString(s string) { - w.WriteByte('"') - for i := 0; i < len(s); i++ { - switch c := s[i]; c { - case '\n': - w.buf = append(w.buf, `\n`...) - case '\r': - w.buf = append(w.buf, `\r`...) - case '\t': - w.buf = append(w.buf, `\t`...) - case '"': - w.buf = append(w.buf, `\"`...) - case '\\': - w.buf = append(w.buf, `\\`...) - default: - if isPrint := c >= 0x20 && c < 0x7f; isPrint { - w.buf = append(w.buf, c) - } else { - w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...) - } - } - } - w.WriteByte('"') -} - -func (w *textWriter) writeUnknownFields(b []byte) { - if !w.compact { - fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b)) - } - - for len(b) > 0 { - num, wtyp, n := protowire.ConsumeTag(b) - if n < 0 { - return - } - b = b[n:] - - if wtyp == protowire.EndGroupType { - w.indent-- - w.Write(endBraceNewline) - continue - } - fmt.Fprint(w, num) - if wtyp != protowire.StartGroupType { - w.WriteByte(':') - } - if !w.compact || wtyp == protowire.StartGroupType { - w.WriteByte(' ') - } - switch wtyp { - case protowire.VarintType: - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return - } - b = b[n:] - fmt.Fprint(w, v) - case protowire.Fixed32Type: - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return - } - b = b[n:] - fmt.Fprint(w, v) - case protowire.Fixed64Type: - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return - } - b = b[n:] - fmt.Fprint(w, v) - case protowire.BytesType: - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return - } - b = b[n:] - fmt.Fprintf(w, "%q", v) - case protowire.StartGroupType: - w.WriteByte('{') - w.indent++ - default: - fmt.Fprintf(w, "/* unknown wire type %d */", wtyp) - } - w.WriteByte('\n') - } -} - -// writeExtensions writes all the extensions in m. -func (w *textWriter) writeExtensions(m protoreflect.Message) error { - md := m.Descriptor() - if md.ExtensionRanges().Len() == 0 { - return nil - } - - type ext struct { - desc protoreflect.FieldDescriptor - val protoreflect.Value - } - var exts []ext - m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - if fd.IsExtension() { - exts = append(exts, ext{fd, v}) - } - return true - }) - sort.Slice(exts, func(i, j int) bool { - return exts[i].desc.Number() < exts[j].desc.Number() - }) - - for _, ext := range exts { - // For message set, use the name of the message as the extension name. - name := string(ext.desc.FullName()) - if isMessageSet(ext.desc.ContainingMessage()) { - name = strings.TrimSuffix(name, ".message_set_extension") - } - - if !ext.desc.IsList() { - if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil { - return err - } - } else { - lv := ext.val.List() - for i := 0; i < lv.Len(); i++ { - if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil { - return err - } - } - } - } - return nil -} - -func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error { - fmt.Fprintf(w, "[%s]:", name) - if !w.compact { - w.WriteByte(' ') - } - if err := w.writeSingularValue(v, fd); err != nil { - return err - } - w.WriteByte('\n') - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - for i := 0; i < w.indent*2; i++ { - w.buf = append(w.buf, ' ') - } - w.complete = false -} diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go deleted file mode 100644 index d7c28da5a..000000000 --- a/vendor/github.com/golang/protobuf/proto/wire.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/runtime/protoiface" -) - -// Size returns the size in bytes of the wire-format encoding of m. -func Size(m Message) int { - if m == nil { - return 0 - } - mi := MessageV2(m) - return protoV2.Size(mi) -} - -// Marshal returns the wire-format encoding of m. -func Marshal(m Message) ([]byte, error) { - b, err := marshalAppend(nil, m, false) - if b == nil { - b = zeroBytes - } - return b, err -} - -var zeroBytes = make([]byte, 0, 0) - -func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) { - if m == nil { - return nil, ErrNil - } - mi := MessageV2(m) - nbuf, err := protoV2.MarshalOptions{ - Deterministic: deterministic, - AllowPartial: true, - }.MarshalAppend(buf, mi) - if err != nil { - return buf, err - } - if len(buf) == len(nbuf) { - if !mi.ProtoReflect().IsValid() { - return buf, ErrNil - } - } - return nbuf, checkRequiredNotSet(mi) -} - -// Unmarshal parses a wire-format message in b and places the decoded results in m. -// -// Unmarshal resets m before starting to unmarshal, so any existing data in m is always -// removed. Use UnmarshalMerge to preserve and append to existing data. -func Unmarshal(b []byte, m Message) error { - m.Reset() - return UnmarshalMerge(b, m) -} - -// UnmarshalMerge parses a wire-format message in b and places the decoded results in m. -func UnmarshalMerge(b []byte, m Message) error { - mi := MessageV2(m) - out, err := protoV2.UnmarshalOptions{ - AllowPartial: true, - Merge: true, - }.UnmarshalState(protoiface.UnmarshalInput{ - Buf: b, - Message: mi.ProtoReflect(), - }) - if err != nil { - return err - } - if out.Flags&protoiface.UnmarshalInitialized > 0 { - return nil - } - return checkRequiredNotSet(mi) -} diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go deleted file mode 100644 index 398e34859..000000000 --- a/vendor/github.com/golang/protobuf/proto/wrappers.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -// Bool stores v in a new bool value and returns a pointer to it. -func Bool(v bool) *bool { return &v } - -// Int stores v in a new int32 value and returns a pointer to it. -// -// Deprecated: Use Int32 instead. -func Int(v int) *int32 { return Int32(int32(v)) } - -// Int32 stores v in a new int32 value and returns a pointer to it. -func Int32(v int32) *int32 { return &v } - -// Int64 stores v in a new int64 value and returns a pointer to it. -func Int64(v int64) *int64 { return &v } - -// Uint32 stores v in a new uint32 value and returns a pointer to it. -func Uint32(v uint32) *uint32 { return &v } - -// Uint64 stores v in a new uint64 value and returns a pointer to it. -func Uint64(v uint64) *uint64 { return &v } - -// Float32 stores v in a new float32 value and returns a pointer to it. -func Float32(v float32) *float32 { return &v } - -// Float64 stores v in a new float64 value and returns a pointer to it. -func Float64(v float64) *float64 { return &v } - -// String stores v in a new string value and returns a pointer to it. -func String(v string) *string { return &v } diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go deleted file mode 100644 index 85f9f5736..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "fmt" - "strings" - - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - - anypb "github.com/golang/protobuf/ptypes/any" -) - -const urlPrefix = "type.googleapis.com/" - -// AnyMessageName returns the message name contained in an anypb.Any message. -// Most type assertions should use the Is function instead. -// -// Deprecated: Call the any.MessageName method instead. -func AnyMessageName(any *anypb.Any) (string, error) { - name, err := anyMessageName(any) - return string(name), err -} -func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { - if any == nil { - return "", fmt.Errorf("message is nil") - } - name := protoreflect.FullName(any.TypeUrl) - if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 { - name = name[i+len("/"):] - } - if !name.IsValid() { - return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) - } - return name, nil -} - -// MarshalAny marshals the given message m into an anypb.Any message. -// -// Deprecated: Call the anypb.New function instead. -func MarshalAny(m proto.Message) (*anypb.Any, error) { - switch dm := m.(type) { - case DynamicAny: - m = dm.Message - case *DynamicAny: - if dm == nil { - return nil, proto.ErrNil - } - m = dm.Message - } - b, err := proto.Marshal(m) - if err != nil { - return nil, err - } - return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil -} - -// Empty returns a new message of the type specified in an anypb.Any message. -// It returns protoregistry.NotFound if the corresponding message type could not -// be resolved in the global registry. -// -// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead -// to resolve the message name and create a new instance of it. -func Empty(any *anypb.Any) (proto.Message, error) { - name, err := anyMessageName(any) - if err != nil { - return nil, err - } - mt, err := protoregistry.GlobalTypes.FindMessageByName(name) - if err != nil { - return nil, err - } - return proto.MessageV1(mt.New().Interface()), nil -} - -// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message -// into the provided message m. It returns an error if the target message -// does not match the type in the Any message or if an unmarshal error occurs. -// -// The target message m may be a *DynamicAny message. If the underlying message -// type could not be resolved, then this returns protoregistry.NotFound. -// -// Deprecated: Call the any.UnmarshalTo method instead. -func UnmarshalAny(any *anypb.Any, m proto.Message) error { - if dm, ok := m.(*DynamicAny); ok { - if dm.Message == nil { - var err error - dm.Message, err = Empty(any) - if err != nil { - return err - } - } - m = dm.Message - } - - anyName, err := AnyMessageName(any) - if err != nil { - return err - } - msgName := proto.MessageName(m) - if anyName != msgName { - return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName) - } - return proto.Unmarshal(any.Value, m) -} - -// Is reports whether the Any message contains a message of the specified type. -// -// Deprecated: Call the any.MessageIs method instead. -func Is(any *anypb.Any, m proto.Message) bool { - if any == nil || m == nil { - return false - } - name := proto.MessageName(m) - if !strings.HasSuffix(any.TypeUrl, name) { - return false - } - return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/' -} - -// DynamicAny is a value that can be passed to UnmarshalAny to automatically -// allocate a proto.Message for the type specified in an anypb.Any message. -// The allocated message is stored in the embedded proto.Message. -// -// Example: -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) -// -// Deprecated: Use the any.UnmarshalNew method instead to unmarshal -// the any message contents into a new instance of the underlying message. -type DynamicAny struct{ proto.Message } - -func (m DynamicAny) String() string { - if m.Message == nil { - return "" - } - return m.Message.String() -} -func (m DynamicAny) Reset() { - if m.Message == nil { - return - } - m.Message.Reset() -} -func (m DynamicAny) ProtoMessage() { - return -} -func (m DynamicAny) ProtoReflect() protoreflect.Message { - if m.Message == nil { - return nil - } - return dynamicAny{proto.MessageReflect(m.Message)} -} - -type dynamicAny struct{ protoreflect.Message } - -func (m dynamicAny) Type() protoreflect.MessageType { - return dynamicAnyType{m.Message.Type()} -} -func (m dynamicAny) New() protoreflect.Message { - return dynamicAnyType{m.Message.Type()}.New() -} -func (m dynamicAny) Interface() protoreflect.ProtoMessage { - return DynamicAny{proto.MessageV1(m.Message.Interface())} -} - -type dynamicAnyType struct{ protoreflect.MessageType } - -func (t dynamicAnyType) New() protoreflect.Message { - return dynamicAny{t.MessageType.New()} -} -func (t dynamicAnyType) Zero() protoreflect.Message { - return dynamicAny{t.MessageType.Zero()} -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go deleted file mode 100644 index 0ef27d33d..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ /dev/null @@ -1,62 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/any/any.proto - -package any - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/any.proto. - -type Any = anypb.Any - -var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{ - 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, - 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() } -func file_github_com_golang_protobuf_ptypes_any_any_proto_init() { - if File_github_com_golang_protobuf_ptypes_any_any_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_any_any_proto = out.File - file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go deleted file mode 100644 index d3c33259d..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ptypes provides functionality for interacting with well-known types. -// -// Deprecated: Well-known types have specialized functionality directly -// injected into the generated packages for each message type. -// See the deprecation notice for each function for the suggested alternative. -package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go deleted file mode 100644 index b2b55dd85..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "errors" - "fmt" - "time" - - durationpb "github.com/golang/protobuf/ptypes/duration" -) - -// Range of google.protobuf.Duration as specified in duration.proto. -// This is about 10,000 years in seconds. -const ( - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// Duration converts a durationpb.Duration to a time.Duration. -// Duration returns an error if dur is invalid or overflows a time.Duration. -// -// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead. -func Duration(dur *durationpb.Duration) (time.Duration, error) { - if err := validateDuration(dur); err != nil { - return 0, err - } - d := time.Duration(dur.Seconds) * time.Second - if int64(d/time.Second) != dur.Seconds { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) - } - if dur.Nanos != 0 { - d += time.Duration(dur.Nanos) * time.Nanosecond - if (d < 0) != (dur.Nanos < 0) { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a durationpb.Duration. -// -// Deprecated: Call the durationpb.New function instead. -func DurationProto(d time.Duration) *durationpb.Duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &durationpb.Duration{ - Seconds: int64(secs), - Nanos: int32(nanos), - } -} - -// validateDuration determines whether the durationpb.Duration is valid -// according to the definition in google/protobuf/duration.proto. -// A valid durpb.Duration may still be too large to fit into a time.Duration -// Note that the range of durationpb.Duration is about 10,000 years, -// while the range of time.Duration is about 290 years. -func validateDuration(dur *durationpb.Duration) error { - if dur == nil { - return errors.New("duration: nil Duration") - } - if dur.Seconds < minSeconds || dur.Seconds > maxSeconds { - return fmt.Errorf("duration: %v: seconds out of range", dur) - } - if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 { - return fmt.Errorf("duration: %v: nanos out of range", dur) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) { - return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur) - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go deleted file mode 100644 index d0079ee3e..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ /dev/null @@ -1,63 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/duration/duration.proto - -package duration - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/duration.proto. - -type Duration = durationpb.Duration - -var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{ - 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() } -func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() { - if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File - file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go deleted file mode 100644 index 8368a3f70..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "errors" - "fmt" - "time" - - timestamppb "github.com/golang/protobuf/ptypes/timestamp" -) - -// Range of google.protobuf.Duration as specified in timestamp.proto. -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// Timestamp converts a timestamppb.Timestamp to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return -// value is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -// -// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead. -func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampNow returns a google.protobuf.Timestamp for the current time. -// -// Deprecated: Call the timestamppb.Now function instead. -func TimestampNow() *timestamppb.Timestamp { - ts, err := TimestampProto(time.Now()) - if err != nil { - panic("ptypes: time.Now() out of Timestamp range") - } - return ts -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -// -// Deprecated: Call the timestamppb.New function instead. -func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { - ts := ×tamppb.Timestamp{ - Seconds: t.Unix(), - Nanos: int32(t.Nanosecond()), - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} - -// TimestampString returns the RFC 3339 string for valid Timestamps. -// For invalid Timestamps, it returns an error message in parentheses. -// -// Deprecated: Call the ts.AsTime method instead, -// followed by a call to the Format method on the time.Time value. -func TimestampString(ts *timestamppb.Timestamp) string { - t, err := Timestamp(ts) - if err != nil { - return fmt.Sprintf("(%v)", err) - } - return t.Format(time.RFC3339Nano) -} - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01) -// and has a Nanos field in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes the problem. -// -// Every valid Timestamp can be represented by a time.Time, -// but the converse is not true. -func validateTimestamp(ts *timestamppb.Timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go deleted file mode 100644 index a76f80760..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ /dev/null @@ -1,64 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto - -package timestamp - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/timestamp.proto. - -type Timestamp = timestamppb.Timestamp - -var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ - 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, - 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} - -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } -func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { - if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil -} diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml deleted file mode 100644 index d8156a60b..000000000 --- a/vendor/github.com/google/uuid/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.4.3 - - 1.5.3 - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md deleted file mode 100644 index 04fdf09f1..000000000 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ /dev/null @@ -1,10 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to this project! - -### Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS deleted file mode 100644 index b4bb97f6b..000000000 --- a/vendor/github.com/google/uuid/CONTRIBUTORS +++ /dev/null @@ -1,9 +0,0 @@ -Paul Borman -bmatsuo -shawnps -theory -jboverfelt -dsymonds -cd1 -wallclockbuilder -dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE deleted file mode 100644 index 5dc68268d..000000000 --- a/vendor/github.com/google/uuid/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009,2014 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md deleted file mode 100644 index f765a46f9..000000000 --- a/vendor/github.com/google/uuid/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) -The uuid package generates and inspects UUIDs based on -[RFC 4122](http://tools.ietf.org/html/rfc4122) -and DCE 1.1: Authentication and Security Services. - -This package is based on the github.com/pborman/uuid package (previously named -code.google.com/p/go-uuid). It differs from these earlier packages in that -a UUID is a 16 byte array rather than a byte slice. One loss due to this -change is the ability to represent an invalid UUID (vs a NIL UUID). - -###### Install -`go get github.com/google/uuid` - -###### Documentation -[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) - -Full `go doc` style documentation for the package can be viewed online without -installing this package by using the GoDoc site here: -http://pkg.go.dev/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go deleted file mode 100644 index fa820b9d3..000000000 --- a/vendor/github.com/google/uuid/dce.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "fmt" - "os" -) - -// A Domain represents a Version 2 domain -type Domain byte - -// Domain constants for DCE Security (Version 2) UUIDs. -const ( - Person = Domain(0) - Group = Domain(1) - Org = Domain(2) -) - -// NewDCESecurity returns a DCE Security (Version 2) UUID. -// -// The domain should be one of Person, Group or Org. -// On a POSIX system the id should be the users UID for the Person -// domain and the users GID for the Group. The meaning of id for -// the domain Org or on non-POSIX systems is site defined. -// -// For a given domain/id pair the same token may be returned for up to -// 7 minutes and 10 seconds. -func NewDCESecurity(domain Domain, id uint32) (UUID, error) { - uuid, err := NewUUID() - if err == nil { - uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 - uuid[9] = byte(domain) - binary.BigEndian.PutUint32(uuid[0:], id) - } - return uuid, err -} - -// NewDCEPerson returns a DCE Security (Version 2) UUID in the person -// domain with the id returned by os.Getuid. -// -// NewDCESecurity(Person, uint32(os.Getuid())) -func NewDCEPerson() (UUID, error) { - return NewDCESecurity(Person, uint32(os.Getuid())) -} - -// NewDCEGroup returns a DCE Security (Version 2) UUID in the group -// domain with the id returned by os.Getgid. -// -// NewDCESecurity(Group, uint32(os.Getgid())) -func NewDCEGroup() (UUID, error) { - return NewDCESecurity(Group, uint32(os.Getgid())) -} - -// Domain returns the domain for a Version 2 UUID. Domains are only defined -// for Version 2 UUIDs. -func (uuid UUID) Domain() Domain { - return Domain(uuid[9]) -} - -// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 -// UUIDs. -func (uuid UUID) ID() uint32 { - return binary.BigEndian.Uint32(uuid[0:4]) -} - -func (d Domain) String() string { - switch d { - case Person: - return "Person" - case Group: - return "Group" - case Org: - return "Org" - } - return fmt.Sprintf("Domain%d", int(d)) -} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go deleted file mode 100644 index 5b8a4b9af..000000000 --- a/vendor/github.com/google/uuid/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package uuid generates and inspects UUIDs. -// -// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security -// Services. -// -// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to -// maps or compared directly. -package uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go deleted file mode 100644 index b404f4bec..000000000 --- a/vendor/github.com/google/uuid/hash.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "crypto/md5" - "crypto/sha1" - "hash" -) - -// Well known namespace IDs and UUIDs -var ( - NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) - Nil UUID // empty UUID, all zeros -) - -// NewHash returns a new UUID derived from the hash of space concatenated with -// data generated by h. The hash should be at least 16 byte in length. The -// first 16 bytes of the hash are used to form the UUID. The version of the -// UUID will be the lower 4 bits of version. NewHash is used to implement -// NewMD5 and NewSHA1. -func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { - h.Reset() - h.Write(space[:]) //nolint:errcheck - h.Write(data) //nolint:errcheck - s := h.Sum(nil) - var uuid UUID - copy(uuid[:], s) - uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) - uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant - return uuid -} - -// NewMD5 returns a new MD5 (Version 3) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(md5.New(), space, data, 3) -func NewMD5(space UUID, data []byte) UUID { - return NewHash(md5.New(), space, data, 3) -} - -// NewSHA1 returns a new SHA1 (Version 5) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(sha1.New(), space, data, 5) -func NewSHA1(space UUID, data []byte) UUID { - return NewHash(sha1.New(), space, data, 5) -} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go deleted file mode 100644 index 14bd34072..000000000 --- a/vendor/github.com/google/uuid/marshal.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "fmt" - -// MarshalText implements encoding.TextMarshaler. -func (uuid UUID) MarshalText() ([]byte, error) { - var js [36]byte - encodeHex(js[:], uuid) - return js[:], nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (uuid *UUID) UnmarshalText(data []byte) error { - id, err := ParseBytes(data) - if err != nil { - return err - } - *uuid = id - return nil -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (uuid UUID) MarshalBinary() ([]byte, error) { - return uuid[:], nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (uuid *UUID) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(uuid[:], data) - return nil -} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go deleted file mode 100644 index d651a2b06..000000000 --- a/vendor/github.com/google/uuid/node.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "sync" -) - -var ( - nodeMu sync.Mutex - ifname string // name of interface being used - nodeID [6]byte // hardware for version 1 UUIDs - zeroID [6]byte // nodeID with only 0's -) - -// NodeInterface returns the name of the interface from which the NodeID was -// derived. The interface "user" is returned if the NodeID was set by -// SetNodeID. -func NodeInterface() string { - defer nodeMu.Unlock() - nodeMu.Lock() - return ifname -} - -// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. -// If name is "" then the first usable interface found will be used or a random -// Node ID will be generated. If a named interface cannot be found then false -// is returned. -// -// SetNodeInterface never fails when name is "". -func SetNodeInterface(name string) bool { - defer nodeMu.Unlock() - nodeMu.Lock() - return setNodeInterface(name) -} - -func setNodeInterface(name string) bool { - iname, addr := getHardwareInterface(name) // null implementation for js - if iname != "" && addr != nil { - ifname = iname - copy(nodeID[:], addr) - return true - } - - // We found no interfaces with a valid hardware address. If name - // does not specify a specific interface generate a random Node ID - // (section 4.1.6) - if name == "" { - ifname = "random" - randomBits(nodeID[:]) - return true - } - return false -} - -// NodeID returns a slice of a copy of the current Node ID, setting the Node ID -// if not already set. -func NodeID() []byte { - defer nodeMu.Unlock() - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nid := nodeID - return nid[:] -} - -// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes -// of id are used. If id is less than 6 bytes then false is returned and the -// Node ID is not set. -func SetNodeID(id []byte) bool { - if len(id) < 6 { - return false - } - defer nodeMu.Unlock() - nodeMu.Lock() - copy(nodeID[:], id) - ifname = "user" - return true -} - -// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is -// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) NodeID() []byte { - var node [6]byte - copy(node[:], uuid[10:]) - return node[:] -} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go deleted file mode 100644 index 24b78edc9..000000000 --- a/vendor/github.com/google/uuid/node_js.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build js - -package uuid - -// getHardwareInterface returns nil values for the JS version of the code. -// This remvoves the "net" dependency, because it is not used in the browser. -// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. -func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go deleted file mode 100644 index 0cbbcddbd..000000000 --- a/vendor/github.com/google/uuid/node_net.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !js - -package uuid - -import "net" - -var interfaces []net.Interface // cached list of interfaces - -// getHardwareInterface returns the name and hardware address of interface name. -// If name is "" then the name and hardware address of one of the system's -// interfaces is returned. If no interfaces are found (name does not exist or -// there are no interfaces) then "", nil is returned. -// -// Only addresses of at least 6 bytes are returned. -func getHardwareInterface(name string) (string, []byte) { - if interfaces == nil { - var err error - interfaces, err = net.Interfaces() - if err != nil { - return "", nil - } - } - for _, ifs := range interfaces { - if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { - return ifs.Name, ifs.HardwareAddr - } - } - return "", nil -} diff --git a/vendor/github.com/google/uuid/null.go b/vendor/github.com/google/uuid/null.go deleted file mode 100644 index d7fcbf286..000000000 --- a/vendor/github.com/google/uuid/null.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2021 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "database/sql/driver" - "encoding/json" - "fmt" -) - -var jsonNull = []byte("null") - -// NullUUID represents a UUID that may be null. -// NullUUID implements the SQL driver.Scanner interface so -// it can be used as a scan destination: -// -// var u uuid.NullUUID -// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u) -// ... -// if u.Valid { -// // use u.UUID -// } else { -// // NULL value -// } -// -type NullUUID struct { - UUID UUID - Valid bool // Valid is true if UUID is not NULL -} - -// Scan implements the SQL driver.Scanner interface. -func (nu *NullUUID) Scan(value interface{}) error { - if value == nil { - nu.UUID, nu.Valid = Nil, false - return nil - } - - err := nu.UUID.Scan(value) - if err != nil { - nu.Valid = false - return err - } - - nu.Valid = true - return nil -} - -// Value implements the driver Valuer interface. -func (nu NullUUID) Value() (driver.Value, error) { - if !nu.Valid { - return nil, nil - } - // Delegate to UUID Value function - return nu.UUID.Value() -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (nu NullUUID) MarshalBinary() ([]byte, error) { - if nu.Valid { - return nu.UUID[:], nil - } - - return []byte(nil), nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (nu *NullUUID) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(nu.UUID[:], data) - nu.Valid = true - return nil -} - -// MarshalText implements encoding.TextMarshaler. -func (nu NullUUID) MarshalText() ([]byte, error) { - if nu.Valid { - return nu.UUID.MarshalText() - } - - return jsonNull, nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (nu *NullUUID) UnmarshalText(data []byte) error { - id, err := ParseBytes(data) - if err != nil { - nu.Valid = false - return err - } - nu.UUID = id - nu.Valid = true - return nil -} - -// MarshalJSON implements json.Marshaler. -func (nu NullUUID) MarshalJSON() ([]byte, error) { - if nu.Valid { - return json.Marshal(nu.UUID) - } - - return jsonNull, nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (nu *NullUUID) UnmarshalJSON(data []byte) error { - if bytes.Equal(data, jsonNull) { - *nu = NullUUID{} - return nil // valid null UUID - } - err := json.Unmarshal(data, &nu.UUID) - nu.Valid = err == nil - return err -} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go deleted file mode 100644 index 2e02ec06c..000000000 --- a/vendor/github.com/google/uuid/sql.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "database/sql/driver" - "fmt" -) - -// Scan implements sql.Scanner so UUIDs can be read from databases transparently. -// Currently, database types that map to string and []byte are supported. Please -// consult database-specific driver documentation for matching types. -func (uuid *UUID) Scan(src interface{}) error { - switch src := src.(type) { - case nil: - return nil - - case string: - // if an empty UUID comes from a table, we return a null UUID - if src == "" { - return nil - } - - // see Parse for required string format - u, err := Parse(src) - if err != nil { - return fmt.Errorf("Scan: %v", err) - } - - *uuid = u - - case []byte: - // if an empty UUID comes from a table, we return a null UUID - if len(src) == 0 { - return nil - } - - // assumes a simple slice of bytes if 16 bytes - // otherwise attempts to parse - if len(src) != 16 { - return uuid.Scan(string(src)) - } - copy((*uuid)[:], src) - - default: - return fmt.Errorf("Scan: unable to scan type %T into UUID", src) - } - - return nil -} - -// Value implements sql.Valuer so that UUIDs can be written to databases -// transparently. Currently, UUIDs map to strings. Please consult -// database-specific driver documentation for matching types. -func (uuid UUID) Value() (driver.Value, error) { - return uuid.String(), nil -} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go deleted file mode 100644 index e6ef06cdc..000000000 --- a/vendor/github.com/google/uuid/time.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "sync" - "time" -) - -// A Time represents a time as the number of 100's of nanoseconds since 15 Oct -// 1582. -type Time int64 - -const ( - lillian = 2299160 // Julian day of 15 Oct 1582 - unix = 2440587 // Julian day of 1 Jan 1970 - epoch = unix - lillian // Days between epochs - g1582 = epoch * 86400 // seconds between epochs - g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs -) - -var ( - timeMu sync.Mutex - lasttime uint64 // last time we returned - clockSeq uint16 // clock sequence for this run - - timeNow = time.Now // for testing -) - -// UnixTime converts t the number of seconds and nanoseconds using the Unix -// epoch of 1 Jan 1970. -func (t Time) UnixTime() (sec, nsec int64) { - sec = int64(t - g1582ns100) - nsec = (sec % 10000000) * 100 - sec /= 10000000 - return sec, nsec -} - -// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and -// clock sequence as well as adjusting the clock sequence as needed. An error -// is returned if the current time cannot be determined. -func GetTime() (Time, uint16, error) { - defer timeMu.Unlock() - timeMu.Lock() - return getTime() -} - -func getTime() (Time, uint16, error) { - t := timeNow() - - // If we don't have a clock sequence already, set one. - if clockSeq == 0 { - setClockSequence(-1) - } - now := uint64(t.UnixNano()/100) + g1582ns100 - - // If time has gone backwards with this clock sequence then we - // increment the clock sequence - if now <= lasttime { - clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 - } - lasttime = now - return Time(now), clockSeq, nil -} - -// ClockSequence returns the current clock sequence, generating one if not -// already set. The clock sequence is only used for Version 1 UUIDs. -// -// The uuid package does not use global static storage for the clock sequence or -// the last time a UUID was generated. Unless SetClockSequence is used, a new -// random clock sequence is generated the first time a clock sequence is -// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) -func ClockSequence() int { - defer timeMu.Unlock() - timeMu.Lock() - return clockSequence() -} - -func clockSequence() int { - if clockSeq == 0 { - setClockSequence(-1) - } - return int(clockSeq & 0x3fff) -} - -// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to -// -1 causes a new sequence to be generated. -func SetClockSequence(seq int) { - defer timeMu.Unlock() - timeMu.Lock() - setClockSequence(seq) -} - -func setClockSequence(seq int) { - if seq == -1 { - var b [2]byte - randomBits(b[:]) // clock sequence - seq = int(b[0])<<8 | int(b[1]) - } - oldSeq := clockSeq - clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant - if oldSeq != clockSeq { - lasttime = 0 - } -} - -// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1 and 2 UUIDs. -func (uuid UUID) Time() Time { - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time) -} - -// ClockSequence returns the clock sequence encoded in uuid. -// The clock sequence is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) ClockSequence() int { - return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff -} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go deleted file mode 100644 index 5ea6c7378..000000000 --- a/vendor/github.com/google/uuid/util.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "io" -) - -// randomBits completely fills slice b with random data. -func randomBits(b []byte) { - if _, err := io.ReadFull(rander, b); err != nil { - panic(err.Error()) // rand should never fail - } -} - -// xvalues returns the value of a byte as a hexadecimal digit or 255. -var xvalues = [256]byte{ - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, -} - -// xtob converts hex characters x1 and x2 into a byte. -func xtob(x1, x2 byte) (byte, bool) { - b1 := xvalues[x1] - b2 := xvalues[x2] - return (b1 << 4) | b2, b1 != 255 && b2 != 255 -} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go deleted file mode 100644 index a57207aeb..000000000 --- a/vendor/github.com/google/uuid/uuid.go +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright 2018 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "errors" - "fmt" - "io" - "strings" - "sync" -) - -// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC -// 4122. -type UUID [16]byte - -// A Version represents a UUID's version. -type Version byte - -// A Variant represents a UUID's variant. -type Variant byte - -// Constants returned by Variant. -const ( - Invalid = Variant(iota) // Invalid UUID - RFC4122 // The variant specified in RFC4122 - Reserved // Reserved, NCS backward compatibility. - Microsoft // Reserved, Microsoft Corporation backward compatibility. - Future // Reserved for future definition. -) - -const randPoolSize = 16 * 16 - -var ( - rander = rand.Reader // random function - poolEnabled = false - poolMu sync.Mutex - poolPos = randPoolSize // protected with poolMu - pool [randPoolSize]byte // protected with poolMu -) - -type invalidLengthError struct{ len int } - -func (err invalidLengthError) Error() string { - return fmt.Sprintf("invalid UUID length: %d", err.len) -} - -// IsInvalidLengthError is matcher function for custom error invalidLengthError -func IsInvalidLengthError(err error) bool { - _, ok := err.(invalidLengthError) - return ok -} - -// Parse decodes s into a UUID or returns an error. Both the standard UUID -// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the -// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex -// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. -func Parse(s string) (UUID, error) { - var uuid UUID - switch len(s) { - // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36: - - // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: - if strings.ToLower(s[:9]) != "urn:uuid:" { - return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) - } - s = s[9:] - - // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - case 36 + 2: - s = s[1:] - - // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - case 32: - var ok bool - for i := range uuid { - uuid[i], ok = xtob(s[i*2], s[i*2+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, invalidLengthError{len(s)} - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - v, ok := xtob(s[x], s[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// ParseBytes is like Parse, except it parses a byte slice instead of a string. -func ParseBytes(b []byte) (UUID, error) { - var uuid UUID - switch len(b) { - case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { - return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) - } - b = b[9:] - case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - b = b[1:] - case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - var ok bool - for i := 0; i < 32; i += 2 { - uuid[i/2], ok = xtob(b[i], b[i+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, invalidLengthError{len(b)} - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - v, ok := xtob(b[x], b[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// MustParse is like Parse but panics if the string cannot be parsed. -// It simplifies safe initialization of global variables holding compiled UUIDs. -func MustParse(s string) UUID { - uuid, err := Parse(s) - if err != nil { - panic(`uuid: Parse(` + s + `): ` + err.Error()) - } - return uuid -} - -// FromBytes creates a new UUID from a byte slice. Returns an error if the slice -// does not have a length of 16. The bytes are copied from the slice. -func FromBytes(b []byte) (uuid UUID, err error) { - err = uuid.UnmarshalBinary(b) - return uuid, err -} - -// Must returns uuid if err is nil and panics otherwise. -func Must(uuid UUID, err error) UUID { - if err != nil { - panic(err) - } - return uuid -} - -// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// , or "" if uuid is invalid. -func (uuid UUID) String() string { - var buf [36]byte - encodeHex(buf[:], uuid) - return string(buf[:]) -} - -// URN returns the RFC 2141 URN form of uuid, -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. -func (uuid UUID) URN() string { - var buf [36 + 9]byte - copy(buf[:], "urn:uuid:") - encodeHex(buf[9:], uuid) - return string(buf[:]) -} - -func encodeHex(dst []byte, uuid UUID) { - hex.Encode(dst, uuid[:4]) - dst[8] = '-' - hex.Encode(dst[9:13], uuid[4:6]) - dst[13] = '-' - hex.Encode(dst[14:18], uuid[6:8]) - dst[18] = '-' - hex.Encode(dst[19:23], uuid[8:10]) - dst[23] = '-' - hex.Encode(dst[24:], uuid[10:]) -} - -// Variant returns the variant encoded in uuid. -func (uuid UUID) Variant() Variant { - switch { - case (uuid[8] & 0xc0) == 0x80: - return RFC4122 - case (uuid[8] & 0xe0) == 0xc0: - return Microsoft - case (uuid[8] & 0xe0) == 0xe0: - return Future - default: - return Reserved - } -} - -// Version returns the version of uuid. -func (uuid UUID) Version() Version { - return Version(uuid[6] >> 4) -} - -func (v Version) String() string { - if v > 15 { - return fmt.Sprintf("BAD_VERSION_%d", v) - } - return fmt.Sprintf("VERSION_%d", v) -} - -func (v Variant) String() string { - switch v { - case RFC4122: - return "RFC4122" - case Reserved: - return "Reserved" - case Microsoft: - return "Microsoft" - case Future: - return "Future" - case Invalid: - return "Invalid" - } - return fmt.Sprintf("BadVariant%d", int(v)) -} - -// SetRand sets the random number generator to r, which implements io.Reader. -// If r.Read returns an error when the package requests random data then -// a panic will be issued. -// -// Calling SetRand with nil sets the random number generator to the default -// generator. -func SetRand(r io.Reader) { - if r == nil { - rander = rand.Reader - return - } - rander = r -} - -// EnableRandPool enables internal randomness pool used for Random -// (Version 4) UUID generation. The pool contains random bytes read from -// the random number generator on demand in batches. Enabling the pool -// may improve the UUID generation throughput significantly. -// -// Since the pool is stored on the Go heap, this feature may be a bad fit -// for security sensitive applications. -// -// Both EnableRandPool and DisableRandPool are not thread-safe and should -// only be called when there is no possibility that New or any other -// UUID Version 4 generation function will be called concurrently. -func EnableRandPool() { - poolEnabled = true -} - -// DisableRandPool disables the randomness pool if it was previously -// enabled with EnableRandPool. -// -// Both EnableRandPool and DisableRandPool are not thread-safe and should -// only be called when there is no possibility that New or any other -// UUID Version 4 generation function will be called concurrently. -func DisableRandPool() { - poolEnabled = false - defer poolMu.Unlock() - poolMu.Lock() - poolPos = randPoolSize -} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go deleted file mode 100644 index 463109629..000000000 --- a/vendor/github.com/google/uuid/version1.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" -) - -// NewUUID returns a Version 1 UUID based on the current NodeID and clock -// sequence, and the current time. If the NodeID has not been set by SetNodeID -// or SetNodeInterface then it will be set automatically. If the NodeID cannot -// be set NewUUID returns nil. If clock sequence has not been set by -// SetClockSequence then it will be set automatically. If GetTime fails to -// return the current NewUUID returns nil and an error. -// -// In most cases, New should be used. -func NewUUID() (UUID, error) { - var uuid UUID - now, seq, err := GetTime() - if err != nil { - return uuid, err - } - - timeLow := uint32(now & 0xffffffff) - timeMid := uint16((now >> 32) & 0xffff) - timeHi := uint16((now >> 48) & 0x0fff) - timeHi |= 0x1000 // Version 1 - - binary.BigEndian.PutUint32(uuid[0:], timeLow) - binary.BigEndian.PutUint16(uuid[4:], timeMid) - binary.BigEndian.PutUint16(uuid[6:], timeHi) - binary.BigEndian.PutUint16(uuid[8:], seq) - - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - copy(uuid[10:], nodeID[:]) - nodeMu.Unlock() - - return uuid, nil -} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go deleted file mode 100644 index 7697802e4..000000000 --- a/vendor/github.com/google/uuid/version4.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "io" - -// New creates a new random UUID or panics. New is equivalent to -// the expression -// -// uuid.Must(uuid.NewRandom()) -func New() UUID { - return Must(NewRandom()) -} - -// NewString creates a new random UUID and returns it as a string or panics. -// NewString is equivalent to the expression -// -// uuid.New().String() -func NewString() string { - return Must(NewRandom()).String() -} - -// NewRandom returns a Random (Version 4) UUID. -// -// The strength of the UUIDs is based on the strength of the crypto/rand -// package. -// -// Uses the randomness pool if it was enabled with EnableRandPool. -// -// A note about uniqueness derived from the UUID Wikipedia entry: -// -// Randomly generated UUIDs have 122 random bits. One's annual risk of being -// hit by a meteorite is estimated to be one chance in 17 billion, that -// means the probability is about 0.00000000006 (6 × 10−11), -// equivalent to the odds of creating a few tens of trillions of UUIDs in a -// year and having one duplicate. -func NewRandom() (UUID, error) { - if !poolEnabled { - return NewRandomFromReader(rander) - } - return newRandomFromPool() -} - -// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. -func NewRandomFromReader(r io.Reader) (UUID, error) { - var uuid UUID - _, err := io.ReadFull(r, uuid[:]) - if err != nil { - return Nil, err - } - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid, nil -} - -func newRandomFromPool() (UUID, error) { - var uuid UUID - poolMu.Lock() - if poolPos == randPoolSize { - _, err := io.ReadFull(rander, pool[:]) - if err != nil { - poolMu.Unlock() - return Nil, err - } - poolPos = 0 - } - copy(uuid[:], pool[poolPos:(poolPos+16)]) - poolPos += 16 - poolMu.Unlock() - - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid, nil -} diff --git a/vendor/github.com/imdario/mergo/.deepsource.toml b/vendor/github.com/imdario/mergo/.deepsource.toml deleted file mode 100644 index 8a0681af8..000000000 --- a/vendor/github.com/imdario/mergo/.deepsource.toml +++ /dev/null @@ -1,12 +0,0 @@ -version = 1 - -test_patterns = [ - "*_test.go" -] - -[[analyzers]] -name = "go" -enabled = true - - [analyzers.meta] - import_path = "github.com/imdario/mergo" \ No newline at end of file diff --git a/vendor/github.com/imdario/mergo/.gitignore b/vendor/github.com/imdario/mergo/.gitignore deleted file mode 100644 index 529c3412b..000000000 --- a/vendor/github.com/imdario/mergo/.gitignore +++ /dev/null @@ -1,33 +0,0 @@ -#### joe made this: http://goel.io/joe - -#### go #### -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ - -#### vim #### -# Swap -[._]*.s[a-v][a-z] -[._]*.sw[a-p] -[._]s[a-v][a-z] -[._]sw[a-p] - -# Session -Session.vim - -# Temporary -.netrwhist -*~ -# Auto-generated tag files -tags diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml deleted file mode 100644 index d324c43ba..000000000 --- a/vendor/github.com/imdario/mergo/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -arch: - - amd64 - - ppc64le -install: - - go get -t - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls -script: - - go test -race -v ./... -after_script: - - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md deleted file mode 100644 index 469b44907..000000000 --- a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,46 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/imdario/mergo/CONTRIBUTING.md b/vendor/github.com/imdario/mergo/CONTRIBUTING.md deleted file mode 100644 index 0a1ff9f94..000000000 --- a/vendor/github.com/imdario/mergo/CONTRIBUTING.md +++ /dev/null @@ -1,112 +0,0 @@ - -# Contributing to mergo - -First off, thanks for taking the time to contribute! ❤️ - -All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉 - -> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about: -> - Star the project -> - Tweet about it -> - Refer this project in your project's readme -> - Mention the project at local meetups and tell your friends/colleagues - - -## Table of Contents - -- [Code of Conduct](#code-of-conduct) -- [I Have a Question](#i-have-a-question) -- [I Want To Contribute](#i-want-to-contribute) -- [Reporting Bugs](#reporting-bugs) -- [Suggesting Enhancements](#suggesting-enhancements) - -## Code of Conduct - -This project and everyone participating in it is governed by the -[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md). -By participating, you are expected to uphold this code. Please report unacceptable behavior -to <>. - - -## I Have a Question - -> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo). - -Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first. - -If you then still feel the need to ask a question and need clarification, we recommend the following: - -- Open an [Issue](https://github.com/imdario/mergo/issues/new). -- Provide as much context as you can about what you're running into. -- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant. - -We will then take care of the issue as soon as possible. - -## I Want To Contribute - -> ### Legal Notice -> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. - -### Reporting Bugs - - -#### Before Submitting a Bug Report - -A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. - -- Make sure that you are using the latest version. -- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)). -- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug). -- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue. -- Collect information about the bug: -- Stack trace (Traceback) -- OS, Platform and Version (Windows, Linux, macOS, x86, ARM) -- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant. -- Possibly your input and the output -- Can you reliably reproduce the issue? And can you also reproduce it with older versions? - - -#### How Do I Submit a Good Bug Report? - -> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to . - - -We use GitHub issues to track bugs and errors. If you run into an issue with the project: - -- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.) -- Explain the behavior you would expect and the actual behavior. -- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case. -- Provide the information you collected in the previous section. - -Once it's filed: - -- The project team will label the issue accordingly. -- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced. -- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone. - -### Suggesting Enhancements - -This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. - - -#### Before Submitting an Enhancement - -- Make sure that you are using the latest version. -- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration. -- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. -- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library. - - -#### How Do I Submit a Good Enhancement Suggestion? - -Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues). - -- Use a **clear and descriptive title** for the issue to identify the suggestion. -- Provide a **step-by-step description of the suggested enhancement** in as many details as possible. -- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you. -- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. -- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration. - - -## Attribution -This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)! diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE deleted file mode 100644 index 686680298..000000000 --- a/vendor/github.com/imdario/mergo/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2013 Dario Castañé. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md deleted file mode 100644 index 4f0287498..000000000 --- a/vendor/github.com/imdario/mergo/README.md +++ /dev/null @@ -1,236 +0,0 @@ -# Mergo - -[![GoDoc][3]][4] -[![GitHub release][5]][6] -[![GoCard][7]][8] -[![Build Status][1]][2] -[![Coverage Status][9]][10] -[![Sourcegraph][11]][12] -[![FOSSA Status][13]][14] -[![Become my sponsor][15]][16] -[![Tidelift][17]][18] - -[1]: https://travis-ci.org/imdario/mergo.png -[2]: https://travis-ci.org/imdario/mergo -[3]: https://godoc.org/github.com/imdario/mergo?status.svg -[4]: https://godoc.org/github.com/imdario/mergo -[5]: https://img.shields.io/github/release/imdario/mergo.svg -[6]: https://github.com/imdario/mergo/releases -[7]: https://goreportcard.com/badge/imdario/mergo -[8]: https://goreportcard.com/report/github.com/imdario/mergo -[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master -[10]: https://coveralls.io/github/imdario/mergo?branch=master -[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg -[12]: https://sourcegraph.com/github.com/imdario/mergo?badge -[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield -[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield -[15]: https://img.shields.io/github/sponsors/imdario -[16]: https://github.com/sponsors/imdario -[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo -[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo - -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. - -## Status - -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). - -### Important note - -Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules. - -Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. - -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). - -### Donations - -If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: - -
Buy Me a Coffee at ko-fi.com -Donate using Liberapay -Become my sponsor - -### Mergo in the wild - -- [moby/moby](https://github.com/moby/moby) -- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) -- [vmware/dispatch](https://github.com/vmware/dispatch) -- [Shopify/themekit](https://github.com/Shopify/themekit) -- [imdario/zas](https://github.com/imdario/zas) -- [matcornic/hermes](https://github.com/matcornic/hermes) -- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) -- [kataras/iris](https://github.com/kataras/iris) -- [michaelsauter/crane](https://github.com/michaelsauter/crane) -- [go-task/task](https://github.com/go-task/task) -- [sensu/uchiwa](https://github.com/sensu/uchiwa) -- [ory/hydra](https://github.com/ory/hydra) -- [sisatech/vcli](https://github.com/sisatech/vcli) -- [dairycart/dairycart](https://github.com/dairycart/dairycart) -- [projectcalico/felix](https://github.com/projectcalico/felix) -- [resin-os/balena](https://github.com/resin-os/balena) -- [go-kivik/kivik](https://github.com/go-kivik/kivik) -- [Telefonica/govice](https://github.com/Telefonica/govice) -- [supergiant/supergiant](supergiant/supergiant) -- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) -- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) -- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) -- [EagerIO/Stout](https://github.com/EagerIO/Stout) -- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) -- [russross/canvasassignments](https://github.com/russross/canvasassignments) -- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) -- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) -- [divshot/gitling](https://github.com/divshot/gitling) -- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) -- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) -- [elwinar/rambler](https://github.com/elwinar/rambler) -- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) -- [jfbus/impressionist](https://github.com/jfbus/impressionist) -- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) -- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) -- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) -- [thoas/picfit](https://github.com/thoas/picfit) -- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) -- [jnuthong/item_search](https://github.com/jnuthong/item_search) -- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) -- [containerssh/containerssh](https://github.com/containerssh/containerssh) -- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) -- [tjpnz/structbot](https://github.com/tjpnz/structbot) - -## Install - - go get github.com/imdario/mergo - - // use in your .go code - import ( - "github.com/imdario/mergo" - ) - -## Usage - -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - -```go -if err := mergo.Merge(&dst, src); err != nil { - // ... -} -``` - -Also, you can merge overwriting values using the transformer `WithOverride`. - -```go -if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { - // ... -} -``` - -Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. - -```go -if err := mergo.Map(&dst, srcMap); err != nil { - // ... -} -``` - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. - -Here is a nice example: - -```go -package main - -import ( - "fmt" - "github.com/imdario/mergo" -) - -type Foo struct { - A string - B int64 -} - -func main() { - src := Foo{ - A: "one", - B: 2, - } - dest := Foo{ - A: "two", - } - mergo.Merge(&dest, src) - fmt.Println(dest) - // Will print - // {two 2} -} -``` - -Note: if test are failing due missing package, please execute: - - go get gopkg.in/yaml.v3 - -### Transformers - -Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? - -```go -package main - -import ( - "fmt" - "github.com/imdario/mergo" - "reflect" - "time" -) - -type timeTransformer struct { -} - -func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { - isZero := dst.MethodByName("IsZero") - result := isZero.Call([]reflect.Value{}) - if result[0].Bool() { - dst.Set(src) - } - } - return nil - } - } - return nil -} - -type Snapshot struct { - Time time.Time - // ... -} - -func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } -} -``` - -## Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) - -## About - -Written by [Dario Castañé](http://dario.im). - -## License - -[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). - - -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/github.com/imdario/mergo/SECURITY.md b/vendor/github.com/imdario/mergo/SECURITY.md deleted file mode 100644 index a5de61f77..000000000 --- a/vendor/github.com/imdario/mergo/SECURITY.md +++ /dev/null @@ -1,14 +0,0 @@ -# Security Policy - -## Supported Versions - -| Version | Supported | -| ------- | ------------------ | -| 0.3.x | :white_check_mark: | -| < 0.3 | :x: | - -## Security contact information - -To report a security vulnerability, please use the -[Tidelift security contact](https://tidelift.com/security). -Tidelift will coordinate the fix and disclosure. diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go deleted file mode 100644 index fcd985f99..000000000 --- a/vendor/github.com/imdario/mergo/doc.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -Status - -It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. - -Important note - -Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. - -Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. - -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). - -Install - -Do your usual installation procedure: - - go get github.com/imdario/mergo - - // use in your .go code - import ( - "github.com/imdario/mergo" - ) - -Usage - -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - - if err := mergo.Merge(&dst, src); err != nil { - // ... - } - -Also, you can merge overwriting values using the transformer WithOverride. - - if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { - // ... - } - -Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. - - if err := mergo.Map(&dst, srcMap); err != nil { - // ... - } - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. - -Here is a nice example: - - package main - - import ( - "fmt" - "github.com/imdario/mergo" - ) - - type Foo struct { - A string - B int64 - } - - func main() { - src := Foo{ - A: "one", - B: 2, - } - dest := Foo{ - A: "two", - } - mergo.Merge(&dest, src) - fmt.Println(dest) - // Will print - // {two 2} - } - -Transformers - -Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? - - package main - - import ( - "fmt" - "github.com/imdario/mergo" - "reflect" - "time" - ) - - type timeTransformer struct { - } - - func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { - isZero := dst.MethodByName("IsZero") - result := isZero.Call([]reflect.Value{}) - if result[0].Bool() { - dst.Set(src) - } - } - return nil - } - } - return nil - } - - type Snapshot struct { - Time time.Time - // ... - } - - func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } - } - -Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario - -About - -Written by Dario Castañé: https://da.rio.hn - -License - -BSD 3-Clause license, as Go language. - -*/ -package mergo diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go deleted file mode 100644 index b50d5c2a4..000000000 --- a/vendor/github.com/imdario/mergo/map.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2014 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" - "unicode" - "unicode/utf8" -) - -func changeInitialCase(s string, mapper func(rune) rune) string { - if s == "" { - return s - } - r, n := utf8.DecodeRuneInString(s) - return string(mapper(r)) + s[n:] -} - -func isExported(field reflect.StructField) bool { - r, _ := utf8.DecodeRuneInString(field.Name) - return r >= 'A' && r <= 'Z' -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{typ, seen, addr} - } - zeroValue := reflect.Value{} - switch dst.Kind() { - case reflect.Map: - dstMap := dst.Interface().(map[string]interface{}) - for i, n := 0, src.NumField(); i < n; i++ { - srcType := src.Type() - field := srcType.Field(i) - if !isExported(field) { - continue - } - fieldName := field.Name - fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) { - dstMap[fieldName] = src.Field(i).Interface() - } - } - case reflect.Ptr: - if dst.IsNil() { - v := reflect.New(dst.Type().Elem()) - dst.Set(v) - } - dst = dst.Elem() - fallthrough - case reflect.Struct: - srcMap := src.Interface().(map[string]interface{}) - for key := range srcMap { - config.overwriteWithEmptyValue = true - srcValue := srcMap[key] - fieldName := changeInitialCase(key, unicode.ToUpper) - dstElement := dst.FieldByName(fieldName) - if dstElement == zeroValue { - // We discard it because the field doesn't exist. - continue - } - srcElement := reflect.ValueOf(srcValue) - dstKind := dstElement.Kind() - srcKind := srcElement.Kind() - if srcKind == reflect.Ptr && dstKind != reflect.Ptr { - srcElement = srcElement.Elem() - srcKind = reflect.TypeOf(srcElement.Interface()).Kind() - } else if dstKind == reflect.Ptr { - // Can this work? I guess it can't. - if srcKind != reflect.Ptr && srcElement.CanAddr() { - srcPtr := srcElement.Addr() - srcElement = reflect.ValueOf(srcPtr) - srcKind = reflect.Ptr - } - } - - if !srcElement.IsValid() { - continue - } - if srcKind == dstKind { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if srcKind == reflect.Map { - if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else { - return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) - } - } - } - return -} - -// Map sets fields' values in dst from src. -// src can be a map with string keys or a struct. dst must be the opposite: -// if src is a map, dst must be a valid pointer to struct. If src is a struct, -// dst must be map[string]interface{}. -// It won't merge unexported (private) fields and will do recursively -// any exported field. -// If dst is a map, keys will be src fields' names in lower camel case. -// Missing key in src that doesn't match a field in dst will be skipped. This -// doesn't apply if dst is a map. -// This is separated method from Merge because it is cleaner and it keeps sane -// semantics: merging equal types, mapping different (restricted) types. -func Map(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, opts...) -} - -// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by -// non-empty src attribute values. -// Deprecated: Use Map(…) with WithOverride -func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, append(opts, WithOverride)...) -} - -func _map(dst, src interface{}, opts ...func(*Config)) error { - if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerArgument - } - var ( - vDst, vSrc reflect.Value - err error - ) - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - // To be friction-less, we redirect equal-type arguments - // to deepMerge. Only because arguments can be anything. - if vSrc.Kind() == vDst.Kind() { - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) - } - switch vSrc.Kind() { - case reflect.Struct: - if vDst.Kind() != reflect.Map { - return ErrExpectedMapAsDestination - } - case reflect.Map: - if vDst.Kind() != reflect.Struct { - return ErrExpectedStructAsDestination - } - default: - return ErrNotSupported - } - return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go deleted file mode 100644 index 0ef9b2138..000000000 --- a/vendor/github.com/imdario/mergo/merge.go +++ /dev/null @@ -1,409 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" -) - -func hasMergeableFields(dst reflect.Value) (exported bool) { - for i, n := 0, dst.NumField(); i < n; i++ { - field := dst.Type().Field(i) - if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { - exported = exported || hasMergeableFields(dst.Field(i)) - } else if isExportedComponent(&field) { - exported = exported || len(field.PkgPath) == 0 - } - } - return -} - -func isExportedComponent(field *reflect.StructField) bool { - pkgPath := field.PkgPath - if len(pkgPath) > 0 { - return false - } - c := field.Name[0] - if 'a' <= c && c <= 'z' || c == '_' { - return false - } - return true -} - -type Config struct { - Transformers Transformers - Overwrite bool - ShouldNotDereference bool - AppendSlice bool - TypeCheck bool - overwriteWithEmptyValue bool - overwriteSliceWithEmptyValue bool - sliceDeepCopy bool - debug bool -} - -type Transformers interface { - Transformer(reflect.Type) func(dst, src reflect.Value) error -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - typeCheck := config.TypeCheck - overwriteWithEmptySrc := config.overwriteWithEmptyValue - overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue - sliceDeepCopy := config.sliceDeepCopy - - if !src.IsValid() { - return - } - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{typ, seen, addr} - } - - if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { - if fn := config.Transformers.Transformer(dst.Type()); fn != nil { - err = fn(dst, src) - return - } - } - - switch dst.Kind() { - case reflect.Struct: - if hasMergeableFields(dst) { - for i, n := 0, dst.NumField(); i < n; i++ { - if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { - return - } - } - } else { - if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) { - dst.Set(src) - } - } - case reflect.Map: - if dst.IsNil() && !src.IsNil() { - if dst.CanSet() { - dst.Set(reflect.MakeMap(dst.Type())) - } else { - dst = src - return - } - } - - if src.Kind() != reflect.Map { - if overwrite && dst.CanSet() { - dst.Set(src) - } - return - } - - for _, key := range src.MapKeys() { - srcElement := src.MapIndex(key) - if !srcElement.IsValid() { - continue - } - dstElement := dst.MapIndex(key) - switch srcElement.Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: - if srcElement.IsNil() { - if overwrite { - dst.SetMapIndex(key, srcElement) - } - continue - } - fallthrough - default: - if !srcElement.CanInterface() { - continue - } - switch reflect.TypeOf(srcElement.Interface()).Kind() { - case reflect.Struct: - fallthrough - case reflect.Ptr: - fallthrough - case reflect.Map: - srcMapElm := srcElement - dstMapElm := dstElement - if srcMapElm.CanInterface() { - srcMapElm = reflect.ValueOf(srcMapElm.Interface()) - if dstMapElm.IsValid() { - dstMapElm = reflect.ValueOf(dstMapElm.Interface()) - } - } - if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { - return - } - case reflect.Slice: - srcSlice := reflect.ValueOf(srcElement.Interface()) - - var dstSlice reflect.Value - if !dstElement.IsValid() || dstElement.IsNil() { - dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) - } else { - dstSlice = reflect.ValueOf(dstElement.Interface()) - } - - if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { - if typeCheck && srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) - } - dstSlice = srcSlice - } else if config.AppendSlice { - if srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) - } - dstSlice = reflect.AppendSlice(dstSlice, srcSlice) - } else if sliceDeepCopy { - i := 0 - for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ { - srcElement := srcSlice.Index(i) - dstElement := dstSlice.Index(i) - - if srcElement.CanInterface() { - srcElement = reflect.ValueOf(srcElement.Interface()) - } - if dstElement.CanInterface() { - dstElement = reflect.ValueOf(dstElement.Interface()) - } - - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } - - } - dst.SetMapIndex(key, dstSlice) - } - } - - if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) { - if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice { - continue - } - if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map { - continue - } - } - - if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) { - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - dst.SetMapIndex(key, srcElement) - } - } - - // Ensure that all keys in dst are deleted if they are not in src. - if overwriteWithEmptySrc { - for _, key := range dst.MapKeys() { - srcElement := src.MapIndex(key) - if !srcElement.IsValid() { - dst.SetMapIndex(key, reflect.Value{}) - } - } - } - case reflect.Slice: - if !dst.CanSet() { - break - } - if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { - dst.Set(src) - } else if config.AppendSlice { - if src.Type() != dst.Type() { - return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) - } - dst.Set(reflect.AppendSlice(dst, src)) - } else if sliceDeepCopy { - for i := 0; i < src.Len() && i < dst.Len(); i++ { - srcElement := src.Index(i) - dstElement := dst.Index(i) - if srcElement.CanInterface() { - srcElement = reflect.ValueOf(srcElement.Interface()) - } - if dstElement.CanInterface() { - dstElement = reflect.ValueOf(dstElement.Interface()) - } - - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } - } - case reflect.Ptr: - fallthrough - case reflect.Interface: - if isReflectNil(src) { - if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) { - dst.Set(src) - } - break - } - - if src.Kind() != reflect.Interface { - if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { - if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { - dst.Set(src) - } - } else if src.Kind() == reflect.Ptr { - if !config.ShouldNotDereference { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - } else { - if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() { - dst.Set(src) - } - } - } else if dst.Elem().Type() == src.Type() { - if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { - return - } - } else { - return ErrDifferentArgumentsTypes - } - break - } - - if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { - dst.Set(src) - } - break - } - - if dst.Elem().Kind() == src.Elem().Kind() { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - break - } - default: - mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) - if mustSet { - if dst.CanSet() { - dst.Set(src) - } else { - dst = src - } - } - } - - return -} - -// Merge will fill any empty for value type attributes on the dst struct using corresponding -// src attributes if they themselves are not empty. dst and src must be valid same-type structs -// and dst must be a pointer to struct. -// It won't merge unexported (private) fields and will do recursively any exported field. -func Merge(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, opts...) -} - -// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by -// non-empty src attribute values. -// Deprecated: use Merge(…) with WithOverride -func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, append(opts, WithOverride)...) -} - -// WithTransformers adds transformers to merge, allowing to customize the merging of some types. -func WithTransformers(transformers Transformers) func(*Config) { - return func(config *Config) { - config.Transformers = transformers - } -} - -// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. -func WithOverride(config *Config) { - config.Overwrite = true -} - -// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. -func WithOverwriteWithEmptyValue(config *Config) { - config.Overwrite = true - config.overwriteWithEmptyValue = true -} - -// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice. -func WithOverrideEmptySlice(config *Config) { - config.overwriteSliceWithEmptyValue = true -} - -// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty -// (i.e. a non-nil pointer is never considered empty). -func WithoutDereference(config *Config) { - config.ShouldNotDereference = true -} - -// WithAppendSlice will make merge append slices instead of overwriting it. -func WithAppendSlice(config *Config) { - config.AppendSlice = true -} - -// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). -func WithTypeCheck(config *Config) { - config.TypeCheck = true -} - -// WithSliceDeepCopy will merge slice element one by one with Overwrite flag. -func WithSliceDeepCopy(config *Config) { - config.sliceDeepCopy = true - config.Overwrite = true -} - -func merge(dst, src interface{}, opts ...func(*Config)) error { - if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerArgument - } - var ( - vDst, vSrc reflect.Value - err error - ) - - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - if vDst.Type() != vSrc.Type() { - return ErrDifferentArgumentsTypes - } - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} - -// IsReflectNil is the reflect value provided nil -func isReflectNil(v reflect.Value) bool { - k := v.Kind() - switch k { - case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr: - // Both interface and slice are nil if first word is 0. - // Both are always bigger than a word; assume flagIndir. - return v.IsNil() - default: - return false - } -} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go deleted file mode 100644 index 0a721e2d8..000000000 --- a/vendor/github.com/imdario/mergo/mergo.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "errors" - "reflect" -) - -// Errors reported by Mergo when it finds invalid arguments. -var ( - ErrNilArguments = errors.New("src and dst must not be nil") - ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") - ErrNotSupported = errors.New("only structs, maps, and slices are supported") - ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") - ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") - ErrNonPointerArgument = errors.New("dst must be a pointer") -) - -// During deepMerge, must keep track of checks that are -// in progress. The comparison algorithm assumes that all -// checks in progress are true when it reencounters them. -// Visited are stored in a map indexed by 17 * a1 + a2; -type visit struct { - typ reflect.Type - next *visit - ptr uintptr -} - -// From src/pkg/encoding/json/encode.go. -func isEmptyValue(v reflect.Value, shouldDereference bool) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - if v.IsNil() { - return true - } - if shouldDereference { - return isEmptyValue(v.Elem(), shouldDereference) - } - return false - case reflect.Func: - return v.IsNil() - case reflect.Invalid: - return true - } - return false -} - -func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { - if dst == nil || src == nil { - err = ErrNilArguments - return - } - vDst = reflect.ValueOf(dst).Elem() - if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice { - err = ErrNotSupported - return - } - vSrc = reflect.ValueOf(src) - // We check if vSrc is a pointer to dereference it. - if vSrc.Kind() == reflect.Ptr { - vSrc = vSrc.Elem() - } - return -} diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE deleted file mode 100644 index 1eb75ef68..000000000 --- a/vendor/github.com/klauspost/compress/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2019 Klaus Post. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md deleted file mode 100644 index ea7324da6..000000000 --- a/vendor/github.com/klauspost/compress/fse/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# Finite State Entropy - -This package provides Finite State Entropy encoding and decoding. - -Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) -encoding provides a fast near-optimal symbol encoding/decoding -for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). - -This can be used for compressing input with a lot of similar input values to the smallest number of bytes. -This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, -but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. - -* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) - -## News - - * Feb 2018: First implementation released. Consider this beta software for now. - -# Usage - -This package provides a low level interface that allows to compress single independent blocks. - -Each block is separate, and there is no built in integrity checks. -This means that the caller should keep track of block sizes and also do checksums if needed. - -Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. -You must provide input and will receive the output and maybe an error. - -These error values can be returned: - -| Error | Description | -|---------------------|-----------------------------------------------------------------------------| -| `` | Everything ok, output is returned | -| `ErrIncompressible` | Returned when input is judged to be too hard to compress | -| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | -| `(error)` | An internal error occurred. | - -As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. - -To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object -that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same -object can be used for both. - -Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this -you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. - -Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. -You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back -your input was likely corrupted. - -It is important to note that a successful decoding does *not* mean your output matches your original input. -There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. - -For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). - -# Performance - -A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. -All compression functions are currently only running on the calling goroutine so only one core will be used per block. - -The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input -is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be -beneficial to transpose all your input values down by 64. - -With moderate block sizes around 64k speed are typically 200MB/s per core for compression and -around 300MB/s decompression speed. - -The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. - -# Plans - -At one point, more internals will be exposed to facilitate more "expert" usage of the components. - -A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). - -# Contributing - -Contributions are always welcome. Be aware that adding public functions will require good justification and breaking -changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go deleted file mode 100644 index f65eb3909..000000000 --- a/vendor/github.com/klauspost/compress/fse/bitreader.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import ( - "encoding/binary" - "errors" - "io" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBits(uint32(v))) - return nil -} - -// getBits will return n bits. n can be 0. -func (b *bitReader) getBits(n uint8) uint16 { - if n == 0 || b.bitsRead >= 64 { - return 0 - } - return b.getBitsFast(n) -} - -// getBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) getBitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - b.bitsRead += n - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - // 2 bounds checks. - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.bitsRead >= 64 && b.off == 0 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go deleted file mode 100644 index 43e463611..000000000 --- a/vendor/github.com/klauspost/compress/fse/bitwriter.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import "fmt" - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16ZeroNC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -// This is fastest if bits can be zero. -func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { - if bits == 0 { - return - } - value <<= (16 - bits) & 15 - value >>= (16 - bits) & 15 - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.bitContainer >>= v << 3 - b.nBits &= 7 -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() error { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() - return nil -} - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go deleted file mode 100644 index abade2d60..000000000 --- a/vendor/github.com/klauspost/compress/fse/bytereader.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go deleted file mode 100644 index 0d31f5ebc..000000000 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ /dev/null @@ -1,684 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import ( - "errors" - "fmt" -) - -// Compress the input bytes. Input must be < 2GB. -// Provide a Scratch buffer to avoid memory allocations. -// Note that the output is also kept in the scratch buffer. -// If input is too hard to compress, ErrIncompressible is returned. -// If input is a single byte value repeated ErrUseRLE is returned. -func Compress(in []byte, s *Scratch) ([]byte, error) { - if len(in) <= 1 { - return nil, ErrIncompressible - } - if len(in) > (2<<30)-1 { - return nil, errors.New("input too big, must be < 2GB") - } - s, err := s.prepare(in) - if err != nil { - return nil, err - } - - // Create histogram, if none was provided. - maxCount := s.maxCount - if maxCount == 0 { - maxCount = s.countSimple(in) - } - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount == len(in) { - // One symbol, use RLE - return nil, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return nil, ErrIncompressible - } - s.optimalTableLog() - err = s.normalizeCount() - if err != nil { - return nil, err - } - err = s.writeCount() - if err != nil { - return nil, err - } - - if false { - err = s.validateNorm() - if err != nil { - return nil, err - } - } - - err = s.buildCTable() - if err != nil { - return nil, err - } - err = s.compress(in) - if err != nil { - return nil, err - } - s.Out = s.bw.out - // Check if we compressed. - if len(s.Out) >= len(in) { - return nil, ErrIncompressible - } - return s.Out, nil -} - -// cState contains the compression state of a stream. -type cState struct { - bw *bitWriter - stateTable []uint16 - state uint16 -} - -// init will initialize the compression state to the first symbol of the stream. -func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { - c.bw = bw - c.stateTable = ct.stateTable - - nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 - im := int32((nbBitsOut << 16) - first.deltaNbBits) - lu := (im >> nbBitsOut) + first.deltaFindState - c.state = c.stateTable[lu] - return -} - -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encode(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState - c.bw.addBits16NC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encodeZero(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState - c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - -// flush will write the tablelog to the output and flush the remaining full bytes. -func (c *cState) flush(tableLog uint8) { - c.bw.flush32() - c.bw.addBits16NC(c.state, tableLog) - c.bw.flush() -} - -// compress is the main compression loop that will encode the input from the last byte to the first. -func (s *Scratch) compress(src []byte) error { - if len(src) <= 2 { - return errors.New("compress: src too small") - } - tt := s.ct.symbolTT[:256] - s.bw.reset(s.Out) - - // Our two states each encodes every second byte. - // Last byte encoded (first byte decoded) will always be encoded by c1. - var c1, c2 cState - - // Encode so remaining size is divisible by 4. - ip := len(src) - if ip&1 == 1 { - c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) - c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) - c1.encodeZero(tt[src[ip-3]]) - ip -= 3 - } else { - c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) - c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) - ip -= 2 - } - if ip&2 != 0 { - c2.encodeZero(tt[src[ip-1]]) - c1.encodeZero(tt[src[ip-2]]) - ip -= 2 - } - - // Main compression loop. - switch { - case !s.zeroBits && s.actualTableLog <= 8: - // We can encode 4 symbols without requiring a flush. - // We do not need to check if any output is 0 bits. - for ip >= 4 { - s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] - c2.encode(tt[v0]) - c1.encode(tt[v1]) - c2.encode(tt[v2]) - c1.encode(tt[v3]) - ip -= 4 - } - case !s.zeroBits: - // We do not need to check if any output is 0 bits. - for ip >= 4 { - s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] - c2.encode(tt[v0]) - c1.encode(tt[v1]) - s.bw.flush32() - c2.encode(tt[v2]) - c1.encode(tt[v3]) - ip -= 4 - } - case s.actualTableLog <= 8: - // We can encode 4 symbols without requiring a flush - for ip >= 4 { - s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] - c2.encodeZero(tt[v0]) - c1.encodeZero(tt[v1]) - c2.encodeZero(tt[v2]) - c1.encodeZero(tt[v3]) - ip -= 4 - } - default: - for ip >= 4 { - s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] - c2.encodeZero(tt[v0]) - c1.encodeZero(tt[v1]) - s.bw.flush32() - c2.encodeZero(tt[v2]) - c1.encodeZero(tt[v3]) - ip -= 4 - } - } - - // Flush final state. - // Used to initialize state when decoding. - c2.flush(s.actualTableLog) - c1.flush(s.actualTableLog) - - return s.bw.close() -} - -// writeCount will write the normalized histogram count to header. -// This is read back by readNCount. -func (s *Scratch) writeCount() error { - var ( - tableLog = s.actualTableLog - tableSize = 1 << tableLog - previous0 bool - charnum uint16 - - maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 - - // Write Table Size - bitStream = uint32(tableLog - minTablelog) - bitCount = uint(4) - remaining = int16(tableSize + 1) /* +1 for extra accuracy */ - threshold = int16(tableSize) - nbBits = uint(tableLog + 1) - ) - if cap(s.Out) < maxHeaderSize { - s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) - } - outP := uint(0) - out := s.Out[:maxHeaderSize] - - // stops at 1 - for remaining > 1 { - if previous0 { - start := charnum - for s.norm[charnum] == 0 { - charnum++ - } - for charnum >= start+24 { - start += 24 - bitStream += uint32(0xFFFF) << bitCount - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - } - for charnum >= start+3 { - start += 3 - bitStream += 3 << bitCount - bitCount += 2 - } - bitStream += uint32(charnum-start) << bitCount - bitCount += 2 - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - count := s.norm[charnum] - charnum++ - max := (2*threshold - 1) - remaining - if count < 0 { - remaining += count - } else { - remaining -= count - } - count++ // +1 for extra accuracy - if count >= threshold { - count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ - } - bitStream += uint32(count) << bitCount - bitCount += nbBits - if count < max { - bitCount-- - } - - previous0 = count == 1 - if remaining < 1 { - return errors.New("internal error: remaining<1") - } - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += (bitCount + 7) / 8 - - if charnum > s.symbolLen { - return errors.New("internal error: charnum > s.symbolLen") - } - s.Out = out[:outP] - return nil -} - -// symbolTransform contains the state transform for a symbol. -type symbolTransform struct { - deltaFindState int32 - deltaNbBits uint32 -} - -// String prints values as a human readable string. -func (s symbolTransform) String() string { - return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) -} - -// cTable contains tables used for compression. -type cTable struct { - tableSymbol []byte - stateTable []uint16 - symbolTT []symbolTransform -} - -// allocCtable will allocate tables needed for compression. -// If existing tables a re big enough, they are simply re-used. -func (s *Scratch) allocCtable() { - tableSize := 1 << s.actualTableLog - // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < tableSize { - s.ct.tableSymbol = make([]byte, tableSize) - } - s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] - - ctSize := tableSize - if cap(s.ct.stateTable) < ctSize { - s.ct.stateTable = make([]uint16, ctSize) - } - s.ct.stateTable = s.ct.stateTable[:ctSize] - - if cap(s.ct.symbolTT) < 256 { - s.ct.symbolTT = make([]symbolTransform, 256) - } - s.ct.symbolTT = s.ct.symbolTT[:256] -} - -// buildCTable will populate the compression table so it is ready to be used. -func (s *Scratch) buildCTable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - var cumul [maxSymbolValue + 2]int16 - - s.allocCtable() - tableSymbol := s.ct.tableSymbol[:tableSize] - // symbol start positions - { - cumul[0] = 0 - for ui, v := range s.norm[:s.symbolLen-1] { - u := byte(ui) // one less than reference - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = u - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - } - // Encode last symbol separately to avoid overflowing u - u := int(s.symbolLen - 1) - v := s.norm[s.symbolLen-1] - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = byte(u) - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - if uint32(cumul[s.symbolLen]) != tableSize { - return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) - } - cumul[s.symbolLen] = int16(tableSize) + 1 - } - // Spread symbols - s.zeroBits = false - { - step := tableStep(tableSize) - tableMask := tableSize - 1 - var position uint32 - // if any symbol > largeLimit, we may have 0 bits output. - largeLimit := int16(1 << (s.actualTableLog - 1)) - for ui, v := range s.norm[:s.symbolLen] { - symbol := byte(ui) - if v > largeLimit { - s.zeroBits = true - } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { - tableSymbol[position] = symbol - position = (position + step) & tableMask - for position > highThreshold { - position = (position + step) & tableMask - } /* Low proba area */ - } - } - - // Check if we have gone through all positions - if position != 0 { - return errors.New("position!=0") - } - } - - // Build table - table := s.ct.stateTable - { - tsi := int(tableSize) - for u, v := range tableSymbol { - // TableU16 : sorted by symbol order; gives next state value - table[cumul[v]] = uint16(tsi + u) - cumul[v]++ - } - } - - // Build Symbol Transformation Table - { - total := int16(0) - symbolTT := s.ct.symbolTT[:s.symbolLen] - tableLog := s.actualTableLog - tl := (uint32(tableLog) << 16) - (1 << tableLog) - for i, v := range s.norm[:s.symbolLen] { - switch v { - case 0: - case -1, 1: - symbolTT[i].deltaNbBits = tl - symbolTT[i].deltaFindState = int32(total - 1) - total++ - default: - maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) - minStatePlus := uint32(v) << maxBitsOut - symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus - symbolTT[i].deltaFindState = int32(total - v) - total += v - } - } - if total != int16(tableSize) { - return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) - } - } - return nil -} - -// countSimple will create a simple histogram in s.count. -// Returns the biggest count. -// Does not update s.clearCount. -func (s *Scratch) countSimple(in []byte) (max int) { - for _, v := range in { - s.count[v]++ - } - m := uint32(0) - for i, v := range s.count[:] { - if v > m { - m = v - } - if v > 0 { - s.symbolLen = uint16(i) + 1 - } - } - return int(m) -} - -// minTableLog provides the minimum logSize to safely represent a distribution. -func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 - minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 - if minBitsSrc < minBitsSymbols { - return uint8(minBitsSrc) - } - return uint8(minBitsSymbols) -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *Scratch) optimalTableLog() { - tableLog := s.TableLog - minBits := s.minTableLog() - maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minTablelog { - tableLog = minTablelog - } - if tableLog > maxTableLog { - tableLog = maxTableLog - } - s.actualTableLog = tableLog -} - -var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} - -// normalizeCount will normalize the count of the symbols so -// the total is equal to the table size. -func (s *Scratch) normalizeCount() error { - var ( - tableLog = s.actualTableLog - scale = 62 - uint64(tableLog) - step = (1 << 62) / uint64(s.br.remain()) - vStep = uint64(1) << (scale - 20) - stillToDistribute = int16(1 << tableLog) - largest int - largestP int16 - lowThreshold = (uint32)(s.br.remain() >> tableLog) - ) - - for i, cnt := range s.count[:s.symbolLen] { - // already handled - // if (count[s] == s.length) return 0; /* rle special case */ - - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - stillToDistribute-- - } else { - proba := (int16)((uint64(cnt) * step) >> scale) - if proba < 8 { - restToBeat := vStep * uint64(rtbTable[proba]) - v := uint64(cnt)*step - (uint64(proba) << scale) - if v > restToBeat { - proba++ - } - } - if proba > largestP { - largestP = proba - largest = i - } - s.norm[i] = proba - stillToDistribute -= proba - } - } - - if -stillToDistribute >= (s.norm[largest] >> 1) { - // corner case, need another normalization method - return s.normalizeCount2() - } - s.norm[largest] += stillToDistribute - return nil -} - -// Secondary normalization method. -// To be used when primary method fails. -func (s *Scratch) normalizeCount2() error { - const notYetAssigned = -2 - var ( - distributed uint32 - total = uint32(s.br.remain()) - tableLog = s.actualTableLog - lowThreshold = total >> tableLog - lowOne = (total * 3) >> (tableLog + 1) - ) - for i, cnt := range s.count[:s.symbolLen] { - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - distributed++ - total -= cnt - continue - } - if cnt <= lowOne { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - s.norm[i] = notYetAssigned - } - toDistribute := (1 << tableLog) - distributed - - if (total / toDistribute) > lowOne { - // risk of rounding to zero - lowOne = (total * 3) / (toDistribute * 2) - for i, cnt := range s.count[:s.symbolLen] { - if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - } - toDistribute = (1 << tableLog) - distributed - } - if distributed == uint32(s.symbolLen)+1 { - // all values are pretty poor; - // probably incompressible data (should have already been detected); - // find max, then give all remaining points to max - var maxV int - var maxC uint32 - for i, cnt := range s.count[:s.symbolLen] { - if cnt > maxC { - maxV = i - maxC = cnt - } - } - s.norm[maxV] += int16(toDistribute) - return nil - } - - if total == 0 { - // all of the symbols were low enough for the lowOne or lowThreshold - for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { - if s.norm[i] > 0 { - toDistribute-- - s.norm[i]++ - } - } - return nil - } - - var ( - vStepLog = 62 - uint64(tableLog) - mid = uint64((1 << (vStepLog - 1)) - 1) - rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining - tmpTotal = mid - ) - for i, cnt := range s.count[:s.symbolLen] { - if s.norm[i] == notYetAssigned { - var ( - end = tmpTotal + uint64(cnt)*rStep - sStart = uint32(tmpTotal >> vStepLog) - sEnd = uint32(end >> vStepLog) - weight = sEnd - sStart - ) - if weight < 1 { - return errors.New("weight < 1") - } - s.norm[i] = int16(weight) - tmpTotal = end - } - } - return nil -} - -// validateNorm validates the normalized histogram table. -func (s *Scratch) validateNorm() (err error) { - var total int - for _, v := range s.norm[:s.symbolLen] { - if v >= 0 { - total += int(v) - } else { - total -= int(v) - } - } - defer func() { - if err == nil { - return - } - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) - for i, v := range s.norm[:s.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) - } - }() - if total != (1 << s.actualTableLog) { - return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { - return errors.New("tableLog too large") - } - bitStream >>= 4 - bitCount := uint(4) - - s.actualTableLog = uint8(nbBits) - remaining := int32((1 << nbBits) + 1) - threshold := int32(1 << nbBits) - gotTotal := int32(0) - nbBits++ - - for remaining > 1 { - if previous0 { - n0 := charnum - for (bitStream & 0xFFFF) == 0xFFFF { - n0 += 24 - if b.off < iend-5 { - b.advance(2) - bitStream = b.Uint32() >> bitCount - } else { - bitStream >>= 16 - bitCount += 16 - } - } - for (bitStream & 3) == 3 { - n0 += 3 - bitStream >>= 2 - bitCount += 2 - } - n0 += uint16(bitStream & 3) - bitCount += 2 - if n0 > maxSymbolValue { - return errors.New("maxSymbolValue too small") - } - for charnum < n0 { - s.norm[charnum&0xff] = 0 - charnum++ - } - - if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { - b.advance(bitCount >> 3) - bitCount &= 7 - bitStream = b.Uint32() >> bitCount - } else { - bitStream >>= 2 - } - } - - max := (2*(threshold) - 1) - (remaining) - var count int32 - - if (int32(bitStream) & (threshold - 1)) < max { - count = int32(bitStream) & (threshold - 1) - bitCount += nbBits - 1 - } else { - count = int32(bitStream) & (2*threshold - 1) - if count >= threshold { - count -= max - } - bitCount += nbBits - } - - count-- // extra accuracy - if count < 0 { - // -1 means +1 - remaining += count - gotTotal -= count - } else { - remaining -= count - gotTotal += count - } - s.norm[charnum&0xff] = int16(count) - charnum++ - previous0 = count == 0 - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { - b.advance(bitCount >> 3) - bitCount &= 7 - } else { - bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) - b.off = len(b.b) - 4 - } - bitStream = b.Uint32() >> (bitCount & 31) - } - s.symbolLen = charnum - - if s.symbolLen <= 1 { - return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) - } - if s.symbolLen > maxSymbolValue+1 { - return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) - } - if remaining != 1 { - return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) - } - if bitCount > 32 { - return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) - } - if gotTotal != 1<> 3) - return nil -} - -// decSymbol contains information about a state entry, -// Including the state offset base, the output symbol and -// the number of bits to read for the low part of the destination state. -type decSymbol struct { - newState uint16 - symbol uint8 - nbBits uint8 -} - -// allocDtable will allocate decoding tables if they are not big enough. -func (s *Scratch) allocDtable() { - tableSize := 1 << s.actualTableLog - if cap(s.decTable) < tableSize { - s.decTable = make([]decSymbol, tableSize) - } - s.decTable = s.decTable[:tableSize] - - if cap(s.ct.tableSymbol) < 256 { - s.ct.tableSymbol = make([]byte, 256) - } - s.ct.tableSymbol = s.ct.tableSymbol[:256] - - if cap(s.ct.stateTable) < 256 { - s.ct.stateTable = make([]uint16, 256) - } - s.ct.stateTable = s.ct.stateTable[:256] -} - -// buildDtable will build the decoding table. -func (s *Scratch) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - s.allocDtable() - symbolNext := s.ct.stateTable[:256] - - // Init, lay down lowprob symbols - s.zeroBits = false - { - largeLimit := int16(1 << (s.actualTableLog - 1)) - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.decTable[highThreshold].symbol = uint8(i) - highThreshold-- - symbolNext[i] = 1 - } else { - if v >= largeLimit { - s.zeroBits = true - } - symbolNext[i] = uint16(v) - } - } - } - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.decTable[position].symbol = uint8(ss) - position = (position + step) & tableMask - for position > highThreshold { - // lowprob area - position = (position + step) & tableMask - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.decTable { - symbol := v.symbol - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.decTable[u].nbBits = nBits - newState := (nextState << nBits) - tableSize - if newState >= tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.decTable[u].newState = newState - } - } - return nil -} - -// decompress will decompress the bitstream. -// If the buffer is over-read an error is returned. -func (s *Scratch) decompress() error { - br := &s.bits - br.init(s.br.unread()) - - var s1, s2 decoder - // Initialize and decode first state and symbol. - s1.init(br, s.decTable, s.actualTableLog) - s2.init(br, s.decTable, s.actualTableLog) - - // Use temp table to avoid bound checks/append penalty. - var tmp = s.ct.tableSymbol[:256] - var off uint8 - - // Main part - if !s.zeroBits { - for br.off >= 8 { - br.fillFast() - tmp[off+0] = s1.nextFast() - tmp[off+1] = s2.nextFast() - br.fillFast() - tmp[off+2] = s1.nextFast() - tmp[off+3] = s2.nextFast() - off += 4 - // When off is 0, we have overflowed and should write. - if off == 0 { - s.Out = append(s.Out, tmp...) - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - } - } else { - for br.off >= 8 { - br.fillFast() - tmp[off+0] = s1.next() - tmp[off+1] = s2.next() - br.fillFast() - tmp[off+2] = s1.next() - tmp[off+3] = s2.next() - off += 4 - if off == 0 { - s.Out = append(s.Out, tmp...) - // When off is 0, we have overflowed and should write. - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - } - } - s.Out = append(s.Out, tmp[:off]...) - - // Final bits, a bit more expensive check - for { - if s1.finished() { - s.Out = append(s.Out, s1.final(), s2.final()) - break - } - br.fill() - s.Out = append(s.Out, s1.next()) - if s2.finished() { - s.Out = append(s.Out, s2.final(), s1.final()) - break - } - s.Out = append(s.Out, s2.next()) - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - return br.close() -} - -// decoder keeps track of the current state and updates it from the bitstream. -type decoder struct { - state uint16 - br *bitReader - dt []decSymbol -} - -// init will initialize the decoder and read the first state from the stream. -func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { - d.dt = dt - d.br = in - d.state = in.getBits(tableLog) -} - -// next returns the next symbol and sets the next state. -// At least tablelog bits must be available in the bit reader. -func (d *decoder) next() uint8 { - n := &d.dt[d.state] - lowBits := d.br.getBits(n.nbBits) - d.state = n.newState + lowBits - return n.symbol -} - -// finished returns true if all bits have been read from the bitstream -// and the next state would require reading bits from the input. -func (d *decoder) finished() bool { - return d.br.finished() && d.dt[d.state].nbBits > 0 -} - -// final returns the current state symbol without decoding the next. -func (d *decoder) final() uint8 { - return d.dt[d.state].symbol -} - -// nextFast returns the next symbol and sets the next state. -// This can only be used if no symbols are 0 bits. -// At least tablelog bits must be available in the bit reader. -func (d *decoder) nextFast() uint8 { - n := d.dt[d.state] - lowBits := d.br.getBitsFast(n.nbBits) - d.state = n.newState + lowBits - return n.symbol -} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go deleted file mode 100644 index 535cbadfd..000000000 --- a/vendor/github.com/klauspost/compress/fse/fse.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -// Package fse provides Finite State Entropy encoding and decoding. -// -// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding -// for byte blocks as implemented in zstd. -// -// See https://github.com/klauspost/compress/tree/master/fse for more information. -package fse - -import ( - "errors" - "fmt" - "math/bits" -) - -const ( - /*!MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) - * Increasing memory usage improves compression ratio - * Reduced memory usage can improve speed, due to cache effect - * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ - maxMemoryUsage = 14 - defaultMemoryUsage = 13 - - maxTableLog = maxMemoryUsage - 2 - maxTablesize = 1 << maxTableLog - defaultTablelog = defaultMemoryUsage - 2 - minTablelog = 5 - maxSymbolValue = 255 -) - -var ( - // ErrIncompressible is returned when input is judged to be too hard to compress. - ErrIncompressible = errors.New("input is not compressible") - - // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. - ErrUseRLE = errors.New("input is single value repeated") -) - -// Scratch provides temporary storage for compression and decompression. -type Scratch struct { - // Private - count [maxSymbolValue + 1]uint32 - norm [maxSymbolValue + 1]int16 - br byteReader - bits bitReader - bw bitWriter - ct cTable // Compression tables. - decTable []decSymbol // Decompression table. - maxCount int // count of the most probable symbol - - // Per block parameters. - // These can be used to override compression parameters of the block. - // Do not touch, unless you know what you are doing. - - // Out is output buffer. - // If the scratch is re-used before the caller is done processing the output, - // set this field to nil. - // Otherwise the output buffer will be re-used for next Compression/Decompression step - // and allocation will be avoided. - Out []byte - - // DecompressLimit limits the maximum decoded size acceptable. - // If > 0 decompression will stop when approximately this many bytes - // has been decoded. - // If 0, maximum size will be 2GB. - DecompressLimit int - - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - zeroBits bool // no bits has prob > 50%. - clearCount bool // clear count - - // MaxSymbolValue will override the maximum symbol value of the next block. - MaxSymbolValue uint8 - - // TableLog will attempt to override the tablelog for the next block. - TableLog uint8 -} - -// Histogram allows to populate the histogram and skip that step in the compression, -// It otherwise allows to inspect the histogram when compression is done. -// To indicate that you have populated the histogram call HistogramFinished -// with the value of the highest populated symbol, as well as the number of entries -// in the most populated entry. These are accepted at face value. -// The returned slice will always be length 256. -func (s *Scratch) Histogram() []uint32 { - return s.count[:] -} - -// HistogramFinished can be called to indicate that the histogram has been populated. -// maxSymbol is the index of the highest set symbol of the next data segment. -// maxCount is the number of entries in the most populated entry. -// These are accepted at face value. -func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { - s.maxCount = maxCount - s.symbolLen = uint16(maxSymbol) + 1 - s.clearCount = maxCount != 0 -} - -// prepare will prepare and allocate scratch tables used for both compression and decompression. -func (s *Scratch) prepare(in []byte) (*Scratch, error) { - if s == nil { - s = &Scratch{} - } - if s.MaxSymbolValue == 0 { - s.MaxSymbolValue = 255 - } - if s.TableLog == 0 { - s.TableLog = defaultTablelog - } - if s.TableLog > maxTableLog { - return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) - } - if cap(s.Out) == 0 { - s.Out = make([]byte, 0, len(in)) - } - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - s.br.init(in) - if s.DecompressLimit == 0 { - // Max size 2GB. - s.DecompressLimit = (2 << 30) - 1 - } - - return s, nil -} - -// tableStep returns the next table index. -func tableStep(tableSize uint32) uint32 { - return (tableSize >> 1) + (tableSize >> 3) + 3 -} - -func highBits(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore deleted file mode 100644 index b3d262958..000000000 --- a/vendor/github.com/klauspost/compress/huff0/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md deleted file mode 100644 index 8b6e5c663..000000000 --- a/vendor/github.com/klauspost/compress/huff0/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# Huff0 entropy compression - -This package provides Huff0 encoding and decoding as used in zstd. - -[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), -a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU -(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. - -This can be used for compressing input with a lot of similar input values to the smallest number of bytes. -This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, -but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. - -* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) - -## News - -This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. - -This ensures that most functionality is well tested. - -# Usage - -This package provides a low level interface that allows to compress single independent blocks. - -Each block is separate, and there is no built in integrity checks. -This means that the caller should keep track of block sizes and also do checksums if needed. - -Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and -[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. -You must provide input and will receive the output and maybe an error. - -These error values can be returned: - -| Error | Description | -|---------------------|-----------------------------------------------------------------------------| -| `` | Everything ok, output is returned | -| `ErrIncompressible` | Returned when input is judged to be too hard to compress | -| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | -| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | -| `(error)` | An internal error occurred. | - - -As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. - -To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object -that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same -object can be used for both. - -Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this -you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. - -The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. - -## Tables and re-use - -Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. - -The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) -that controls this behaviour. See the documentation for details. This can be altered between each block. - -Do however note that this information is *not* stored in the output block and it is up to the users of the package to -record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, -based on the boolean reported back from the CompressXX call. - -If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the -[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. - -## Decompressing - -The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). -This will initialize the decoding tables. -You can supply the complete block to `ReadTable` and it will return the data part of the block -which can be given to the decompressor. - -Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) -or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. - -For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. - -You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back -your input was likely corrupted. - -It is important to note that a successful decoding does *not* mean your output matches your original input. -There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. - -# Contributing - -Contributions are always welcome. Be aware that adding public functions will require good justification and breaking -changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go deleted file mode 100644 index a4979e886..000000000 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ /dev/null @@ -1,329 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -import ( - "encoding/binary" - "errors" - "io" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBit32(uint32(v))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) peekBitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -func (b *bitReader) advance(n uint8) { - b.bitsRead += n -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReaderBytes struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReaderBytes) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.advance(8 - uint8(highBit32(uint32(v)))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReaderBytes) peekByteFast() uint8 { - got := uint8(b.value >> 56) - return got -} - -func (b *bitReaderBytes) advance(n uint8) { - b.bitsRead += n - b.value <<= n & 63 -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReaderBytes) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << (b.bitsRead - 32) - b.bitsRead -= 32 - b.off -= 4 -} - -// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. -func (b *bitReaderBytes) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReaderBytes) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << (b.bitsRead - 32) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReaderBytes) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReaderBytes) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - -// bitReaderShifted reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReaderShifted struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReaderShifted) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.advance(8 - uint8(highBit32(uint32(v)))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { - return uint16(b.value >> ((64 - n) & 63)) -} - -func (b *bitReaderShifted) advance(n uint8) { - b.bitsRead += n - b.value <<= n & 63 -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReaderShifted) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << ((b.bitsRead - 32) & 63) - b.bitsRead -= 32 - b.off -= 4 -} - -// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. -func (b *bitReaderShifted) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReaderShifted) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << ((b.bitsRead - 32) & 63) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReaderShifted) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReaderShifted) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go deleted file mode 100644 index 6bce4e87d..000000000 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -import "fmt" - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) encSymbol(ct cTable, symbol byte) { - enc := ct[symbol] - b.bitContainer |= uint64(enc.val) << (b.nBits & 63) - if false { - if enc.nBits == 0 { - panic("nbits 0") - } - } - b.nBits += enc.nBits -} - -// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { - encA := ct[av] - encB := ct[bv] - sh := b.nBits & 63 - combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) - b.bitContainer |= combined << sh - if false { - if encA.nBits == 0 { - panic("nbitsA 0") - } - if encB.nBits == 0 { - panic("nbitsB 0") - } - } - b.nBits += encA.nBits + encB.nBits -} - -// addBits16ZeroNC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -// This is fastest if bits can be zero. -func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { - if bits == 0 { - return - } - value <<= (16 - bits) & 15 - value >>= (16 - bits) & 15 - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - return - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - b.bitContainer >>= 1 << 3 - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - b.bitContainer >>= 2 << 3 - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - b.bitContainer >>= 3 << 3 - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - b.bitContainer >>= 4 << 3 - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - b.bitContainer >>= 5 << 3 - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - b.bitContainer >>= 6 << 3 - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - b.bitContainer >>= 7 << 3 - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - b.bitContainer = 0 - b.nBits = 0 - return - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.nBits &= 7 -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() error { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() - return nil -} - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go deleted file mode 100644 index 50bcdf6ea..000000000 --- a/vendor/github.com/klauspost/compress/huff0/bytereader.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - -// Int32 returns a little endian int32 starting at current offset. -func (b byteReader) Int32() int32 { - v3 := int32(b.b[b.off+3]) - v2 := int32(b.b[b.off+2]) - v1 := int32(b.b[b.off+1]) - v0 := int32(b.b[b.off]) - return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - v3 := uint32(b.b[b.off+3]) - v2 := uint32(b.b[b.off+2]) - v1 := uint32(b.b[b.off+1]) - v0 := uint32(b.b[b.off]) - return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 -} - -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go deleted file mode 100644 index dea115b33..000000000 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ /dev/null @@ -1,657 +0,0 @@ -package huff0 - -import ( - "fmt" - "runtime" - "sync" -) - -// Compress1X will compress the input. -// The output can be decoded using Decompress1X. -// Supply a Scratch object. The scratch object contains state about re-use, -// So when sharing across independent encodes, be sure to set the re-use policy. -func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { - s, err = s.prepare(in) - if err != nil { - return nil, false, err - } - return compress(in, s, s.compress1X) -} - -// Compress4X will compress the input. The input is split into 4 independent blocks -// and compressed similar to Compress1X. -// The output can be decoded using Decompress4X. -// Supply a Scratch object. The scratch object contains state about re-use, -// So when sharing across independent encodes, be sure to set the re-use policy. -func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { - s, err = s.prepare(in) - if err != nil { - return nil, false, err - } - if false { - // TODO: compress4Xp only slightly faster. - const parallelThreshold = 8 << 10 - if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { - return compress(in, s, s.compress4X) - } - return compress(in, s, s.compress4Xp) - } - return compress(in, s, s.compress4X) -} - -func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { - // Nuke previous table if we cannot reuse anyway. - if s.Reuse == ReusePolicyNone { - s.prevTable = s.prevTable[:0] - } - - // Create histogram, if none was provided. - maxCount := s.maxCount - var canReuse = false - if maxCount == 0 { - maxCount, canReuse = s.countSimple(in) - } else { - canReuse = s.canUseTable(s.prevTable) - } - - // We want the output size to be less than this: - wantSize := len(in) - if s.WantLogLess > 0 { - wantSize -= wantSize >> s.WantLogLess - } - - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount >= len(in) { - if maxCount > len(in) { - return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) - } - if len(in) == 1 { - return nil, false, ErrIncompressible - } - // One symbol, use RLE - return nil, false, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return nil, false, ErrIncompressible - } - if s.Reuse == ReusePolicyMust && !canReuse { - // We must reuse, but we can't. - return nil, false, ErrIncompressible - } - if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { - keepTable := s.cTable - keepTL := s.actualTableLog - s.cTable = s.prevTable - s.actualTableLog = s.prevTableLog - s.Out, err = compressor(in) - s.cTable = keepTable - s.actualTableLog = keepTL - if err == nil && len(s.Out) < wantSize { - s.OutData = s.Out - return s.Out, true, nil - } - if s.Reuse == ReusePolicyMust { - return nil, false, ErrIncompressible - } - // Do not attempt to re-use later. - s.prevTable = s.prevTable[:0] - } - - // Calculate new table. - err = s.buildCTable() - if err != nil { - return nil, false, err - } - - if false && !s.canUseTable(s.cTable) { - panic("invalid table generated") - } - - if s.Reuse == ReusePolicyAllow && canReuse { - hSize := len(s.Out) - oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) - newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) - if oldSize <= hSize+newSize || hSize+12 >= wantSize { - // Retain cTable even if we re-use. - keepTable := s.cTable - keepTL := s.actualTableLog - - s.cTable = s.prevTable - s.actualTableLog = s.prevTableLog - s.Out, err = compressor(in) - - // Restore ctable. - s.cTable = keepTable - s.actualTableLog = keepTL - if err != nil { - return nil, false, err - } - if len(s.Out) >= wantSize { - return nil, false, ErrIncompressible - } - s.OutData = s.Out - return s.Out, true, nil - } - } - - // Use new table - err = s.cTable.write(s) - if err != nil { - s.OutTable = nil - return nil, false, err - } - s.OutTable = s.Out - - // Compress using new table - s.Out, err = compressor(in) - if err != nil { - s.OutTable = nil - return nil, false, err - } - if len(s.Out) >= wantSize { - s.OutTable = nil - return nil, false, ErrIncompressible - } - // Move current table into previous. - s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] - s.OutData = s.Out[len(s.OutTable):] - return s.Out, false, nil -} - -func (s *Scratch) compress1X(src []byte) ([]byte, error) { - return s.compress1xDo(s.Out, src) -} - -func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { - var bw = bitWriter{out: dst} - - // N is length divisible by 4. - n := len(src) - n -= n & 3 - cTable := s.cTable[:256] - - // Encode last bytes. - for i := len(src) & 3; i > 0; i-- { - bw.encSymbol(cTable, src[n+i-1]) - } - n -= 4 - if s.actualTableLog <= 8 { - for ; n >= 0; n -= 4 { - tmp := src[n : n+4] - // tmp should be len 4 - bw.flush32() - bw.encTwoSymbols(cTable, tmp[3], tmp[2]) - bw.encTwoSymbols(cTable, tmp[1], tmp[0]) - } - } else { - for ; n >= 0; n -= 4 { - tmp := src[n : n+4] - // tmp should be len 4 - bw.flush32() - bw.encTwoSymbols(cTable, tmp[3], tmp[2]) - bw.flush32() - bw.encTwoSymbols(cTable, tmp[1], tmp[0]) - } - } - err := bw.close() - return bw.out, err -} - -var sixZeros [6]byte - -func (s *Scratch) compress4X(src []byte) ([]byte, error) { - if len(src) < 12 { - return nil, ErrIncompressible - } - segmentSize := (len(src) + 3) / 4 - - // Add placeholder for output length - offsetIdx := len(s.Out) - s.Out = append(s.Out, sixZeros[:]...) - - for i := 0; i < 4; i++ { - toDo := src - if len(toDo) > segmentSize { - toDo = toDo[:segmentSize] - } - src = src[len(toDo):] - - var err error - idx := len(s.Out) - s.Out, err = s.compress1xDo(s.Out, toDo) - if err != nil { - return nil, err - } - // Write compressed length as little endian before block. - if i < 3 { - // Last length is not written. - length := len(s.Out) - idx - s.Out[i*2+offsetIdx] = byte(length) - s.Out[i*2+offsetIdx+1] = byte(length >> 8) - } - } - - return s.Out, nil -} - -// compress4Xp will compress 4 streams using separate goroutines. -func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { - if len(src) < 12 { - return nil, ErrIncompressible - } - // Add placeholder for output length - s.Out = s.Out[:6] - - segmentSize := (len(src) + 3) / 4 - var wg sync.WaitGroup - var errs [4]error - wg.Add(4) - for i := 0; i < 4; i++ { - toDo := src - if len(toDo) > segmentSize { - toDo = toDo[:segmentSize] - } - src = src[len(toDo):] - - // Separate goroutine for each block. - go func(i int) { - s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) - wg.Done() - }(i) - } - wg.Wait() - for i := 0; i < 4; i++ { - if errs[i] != nil { - return nil, errs[i] - } - o := s.tmpOut[i] - // Write compressed length as little endian before block. - if i < 3 { - // Last length is not written. - s.Out[i*2] = byte(len(o)) - s.Out[i*2+1] = byte(len(o) >> 8) - } - - // Write output. - s.Out = append(s.Out, o...) - } - return s.Out, nil -} - -// countSimple will create a simple histogram in s.count. -// Returns the biggest count. -// Does not update s.clearCount. -func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { - reuse = true - for _, v := range in { - s.count[v]++ - } - m := uint32(0) - if len(s.prevTable) > 0 { - for i, v := range s.count[:] { - if v > m { - m = v - } - if v > 0 { - s.symbolLen = uint16(i) + 1 - if i >= len(s.prevTable) { - reuse = false - } else { - if s.prevTable[i].nBits == 0 { - reuse = false - } - } - } - } - return int(m), reuse - } - for i, v := range s.count[:] { - if v > m { - m = v - } - if v > 0 { - s.symbolLen = uint16(i) + 1 - } - } - return int(m), false -} - -func (s *Scratch) canUseTable(c cTable) bool { - if len(c) < int(s.symbolLen) { - return false - } - for i, v := range s.count[:s.symbolLen] { - if v != 0 && c[i].nBits == 0 { - return false - } - } - return true -} - -func (s *Scratch) validateTable(c cTable) bool { - if len(c) < int(s.symbolLen) { - return false - } - for i, v := range s.count[:s.symbolLen] { - if v != 0 { - if c[i].nBits == 0 { - return false - } - if c[i].nBits > s.actualTableLog { - return false - } - } - } - return true -} - -// minTableLog provides the minimum logSize to safely represent a distribution. -func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBit32(uint32(s.br.remain())) + 1 - minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 - if minBitsSrc < minBitsSymbols { - return uint8(minBitsSrc) - } - return uint8(minBitsSymbols) -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *Scratch) optimalTableLog() { - tableLog := s.TableLog - minBits := s.minTableLog() - maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minTablelog { - tableLog = minTablelog - } - if tableLog > tableLogMax { - tableLog = tableLogMax - } - s.actualTableLog = tableLog -} - -type cTableEntry struct { - val uint16 - nBits uint8 - // We have 8 bits extra -} - -const huffNodesMask = huffNodesLen - 1 - -func (s *Scratch) buildCTable() error { - s.optimalTableLog() - s.huffSort() - if cap(s.cTable) < maxSymbolValue+1 { - s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) - } else { - s.cTable = s.cTable[:s.symbolLen] - for i := range s.cTable { - s.cTable[i] = cTableEntry{} - } - } - - var startNode = int16(s.symbolLen) - nonNullRank := s.symbolLen - 1 - - nodeNb := startNode - huffNode := s.nodes[1 : huffNodesLen+1] - - // This overlays the slice above, but allows "-1" index lookups. - // Different from reference implementation. - huffNode0 := s.nodes[0 : huffNodesLen+1] - - for huffNode[nonNullRank].count == 0 { - nonNullRank-- - } - - lowS := int16(nonNullRank) - nodeRoot := nodeNb + lowS - 1 - lowN := nodeNb - huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count - huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) - nodeNb++ - lowS -= 2 - for n := nodeNb; n <= nodeRoot; n++ { - huffNode[n].count = 1 << 30 - } - // fake entry, strong barrier - huffNode0[0].count = 1 << 31 - - // create parents - for nodeNb <= nodeRoot { - var n1, n2 int16 - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { - n1 = lowS - lowS-- - } else { - n1 = lowN - lowN++ - } - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { - n2 = lowS - lowS-- - } else { - n2 = lowN - lowN++ - } - - huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count - huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) - nodeNb++ - } - - // distribute weights (unlimited tree height) - huffNode[nodeRoot].nbBits = 0 - for n := nodeRoot - 1; n >= startNode; n-- { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 - } - for n := uint16(0); n <= nonNullRank; n++ { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 - } - s.actualTableLog = s.setMaxHeight(int(nonNullRank)) - maxNbBits := s.actualTableLog - - // fill result into tree (val, nbBits) - if maxNbBits > tableLogMax { - return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) - } - var nbPerRank [tableLogMax + 1]uint16 - var valPerRank [16]uint16 - for _, v := range huffNode[:nonNullRank+1] { - nbPerRank[v.nbBits]++ - } - // determine stating value per rank - { - min := uint16(0) - for n := maxNbBits; n > 0; n-- { - // get starting value within each rank - valPerRank[n] = min - min += nbPerRank[n] - min >>= 1 - } - } - - // push nbBits per symbol, symbol order - for _, v := range huffNode[:nonNullRank+1] { - s.cTable[v.symbol].nBits = v.nbBits - } - - // assign value within rank, symbol order - t := s.cTable[:s.symbolLen] - for n, val := range t { - nbits := val.nBits & 15 - v := valPerRank[nbits] - t[n].val = v - valPerRank[nbits] = v + 1 - } - - return nil -} - -// huffSort will sort symbols, decreasing order. -func (s *Scratch) huffSort() { - type rankPos struct { - base uint32 - current uint32 - } - - // Clear nodes - nodes := s.nodes[:huffNodesLen+1] - s.nodes = nodes - nodes = nodes[1 : huffNodesLen+1] - - // Sort into buckets based on length of symbol count. - var rank [32]rankPos - for _, v := range s.count[:s.symbolLen] { - r := highBit32(v+1) & 31 - rank[r].base++ - } - // maxBitLength is log2(BlockSizeMax) + 1 - const maxBitLength = 18 + 1 - for n := maxBitLength; n > 0; n-- { - rank[n-1].base += rank[n].base - } - for n := range rank[:maxBitLength] { - rank[n].current = rank[n].base - } - for n, c := range s.count[:s.symbolLen] { - r := (highBit32(c+1) + 1) & 31 - pos := rank[r].current - rank[r].current++ - prev := nodes[(pos-1)&huffNodesMask] - for pos > rank[r].base && c > prev.count { - nodes[pos&huffNodesMask] = prev - pos-- - prev = nodes[(pos-1)&huffNodesMask] - } - nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} - } - return -} - -func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { - maxNbBits := s.actualTableLog - huffNode := s.nodes[1 : huffNodesLen+1] - //huffNode = huffNode[: huffNodesLen] - - largestBits := huffNode[lastNonNull].nbBits - - // early exit : no elt > maxNbBits - if largestBits <= maxNbBits { - return largestBits - } - totalCost := int(0) - baseCost := int(1) << (largestBits - maxNbBits) - n := uint32(lastNonNull) - - for huffNode[n].nbBits > maxNbBits { - totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) - huffNode[n].nbBits = maxNbBits - n-- - } - // n stops at huffNode[n].nbBits <= maxNbBits - - for huffNode[n].nbBits == maxNbBits { - n-- - } - // n end at index of smallest symbol using < maxNbBits - - // renorm totalCost - totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ - - // repay normalized cost - { - const noSymbol = 0xF0F0F0F0 - var rankLast [tableLogMax + 2]uint32 - - for i := range rankLast[:] { - rankLast[i] = noSymbol - } - - // Get pos of last (smallest) symbol per rank - { - currentNbBits := maxNbBits - for pos := int(n); pos >= 0; pos-- { - if huffNode[pos].nbBits >= currentNbBits { - continue - } - currentNbBits = huffNode[pos].nbBits // < maxNbBits - rankLast[maxNbBits-currentNbBits] = uint32(pos) - } - } - - for totalCost > 0 { - nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 - - for ; nBitsToDecrease > 1; nBitsToDecrease-- { - highPos := rankLast[nBitsToDecrease] - lowPos := rankLast[nBitsToDecrease-1] - if highPos == noSymbol { - continue - } - if lowPos == noSymbol { - break - } - highTotal := huffNode[highPos].count - lowTotal := 2 * huffNode[lowPos].count - if highTotal <= lowTotal { - break - } - } - // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) - // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary - // FIXME: try to remove - for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { - nBitsToDecrease++ - } - totalCost -= 1 << (nBitsToDecrease - 1) - if rankLast[nBitsToDecrease-1] == noSymbol { - // this rank is no longer empty - rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] - } - huffNode[rankLast[nBitsToDecrease]].nbBits++ - if rankLast[nBitsToDecrease] == 0 { - /* special case, reached largest symbol */ - rankLast[nBitsToDecrease] = noSymbol - } else { - rankLast[nBitsToDecrease]-- - if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { - rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ - } - } - } - - for totalCost < 0 { /* Sometimes, cost correction overshoot */ - if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ - for huffNode[n].nbBits == maxNbBits { - n-- - } - huffNode[n+1].nbBits-- - rankLast[1] = n + 1 - totalCost++ - continue - } - huffNode[rankLast[1]+1].nbBits-- - rankLast[1]++ - totalCost++ - } - } - return maxNbBits -} - -type nodeElt struct { - count uint32 - parent uint16 - symbol byte - nbBits uint8 -} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go deleted file mode 100644 index 41703bba4..000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ /dev/null @@ -1,1164 +0,0 @@ -package huff0 - -import ( - "errors" - "fmt" - "io" - - "github.com/klauspost/compress/fse" -) - -type dTable struct { - single []dEntrySingle - double []dEntryDouble -} - -// single-symbols decoding -type dEntrySingle struct { - entry uint16 -} - -// double-symbols decoding -type dEntryDouble struct { - seq uint16 - nBits uint8 - len uint8 -} - -// Uses special code for all tables that are < 8 bits. -const use8BitTables = true - -// ReadTable will read a table from the input. -// The size of the input may be larger than the table definition. -// Any content remaining after the table definition will be returned. -// If no Scratch is provided a new one is allocated. -// The returned Scratch can be used for encoding or decoding input using this table. -func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { - s, err = s.prepare(in) - if err != nil { - return s, nil, err - } - if len(in) <= 1 { - return s, nil, errors.New("input too small for table") - } - iSize := in[0] - in = in[1:] - if iSize >= 128 { - // Uncompressed - oSize := iSize - 127 - iSize = (oSize + 1) / 2 - if int(iSize) > len(in) { - return s, nil, errors.New("input too small for table") - } - for n := uint8(0); n < oSize; n += 2 { - v := in[n/2] - s.huffWeight[n] = v >> 4 - s.huffWeight[n+1] = v & 15 - } - s.symbolLen = uint16(oSize) - in = in[iSize:] - } else { - if len(in) < int(iSize) { - return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) - } - // FSE compressed weights - s.fse.DecompressLimit = 255 - hw := s.huffWeight[:] - s.fse.Out = hw - b, err := fse.Decompress(in[:iSize], s.fse) - s.fse.Out = nil - if err != nil { - return s, nil, err - } - if len(b) > 255 { - return s, nil, errors.New("corrupt input: output table too large") - } - s.symbolLen = uint16(len(b)) - in = in[iSize:] - } - - // collect weight stats - var rankStats [16]uint32 - weightTotal := uint32(0) - for _, v := range s.huffWeight[:s.symbolLen] { - if v > tableLogMax { - return s, nil, errors.New("corrupt input: weight too large") - } - v2 := v & 15 - rankStats[v2]++ - // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. - weightTotal += (1 << v2) >> 1 - } - if weightTotal == 0 { - return s, nil, errors.New("corrupt input: weights zero") - } - - // get last non-null symbol weight (implied, total must be 2^n) - { - tableLog := highBit32(weightTotal) + 1 - if tableLog > tableLogMax { - return s, nil, errors.New("corrupt input: tableLog too big") - } - s.actualTableLog = uint8(tableLog) - // determine last weight - { - total := uint32(1) << tableLog - rest := total - weightTotal - verif := uint32(1) << highBit32(rest) - lastWeight := highBit32(rest) + 1 - if verif != rest { - // last value must be a clean power of 2 - return s, nil, errors.New("corrupt input: last value not power of two") - } - s.huffWeight[s.symbolLen] = uint8(lastWeight) - s.symbolLen++ - rankStats[lastWeight]++ - } - } - - if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { - // by construction : at least 2 elts of rank 1, must be even - return s, nil, errors.New("corrupt input: min elt size, even check failed ") - } - - // TODO: Choose between single/double symbol decoding - - // Calculate starting value for each rank - { - var nextRankStart uint32 - for n := uint8(1); n < s.actualTableLog+1; n++ { - current := nextRankStart - nextRankStart += rankStats[n] << (n - 1) - rankStats[n] = current - } - } - - // fill DTable (always full size) - tSize := 1 << tableLogMax - if len(s.dt.single) != tSize { - s.dt.single = make([]dEntrySingle, tSize) - } - cTable := s.prevTable - if cap(cTable) < maxSymbolValue+1 { - cTable = make([]cTableEntry, 0, maxSymbolValue+1) - } - cTable = cTable[:maxSymbolValue+1] - s.prevTable = cTable[:s.symbolLen] - s.prevTableLog = s.actualTableLog - - for n, w := range s.huffWeight[:s.symbolLen] { - if w == 0 { - cTable[n] = cTableEntry{ - val: 0, - nBits: 0, - } - continue - } - length := (uint32(1) << w) >> 1 - d := dEntrySingle{ - entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), - } - - rank := &rankStats[w] - cTable[n] = cTableEntry{ - val: uint16(*rank >> (w - 1)), - nBits: uint8(d.entry), - } - - single := s.dt.single[*rank : *rank+length] - for i := range single { - single[i] = d - } - *rank += length - } - - return s, in, nil -} - -// Decompress1X will decompress a 1X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// Before this is called, the table must be initialized with ReadTable unless -// the encoder re-used the table. -// deprecated: Use the stateless Decoder() to get a concurrent version. -func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { - if cap(s.Out) < s.MaxDecodedSize { - s.Out = make([]byte, s.MaxDecodedSize) - } - s.Out = s.Out[:0:s.MaxDecodedSize] - s.Out, err = s.Decoder().Decompress1X(s.Out, in) - return s.Out, err -} - -// Decompress4X will decompress a 4X encoded stream. -// Before this is called, the table must be initialized with ReadTable unless -// the encoder re-used the table. -// The length of the supplied input must match the end of a block exactly. -// The destination size of the uncompressed data must be known and provided. -// deprecated: Use the stateless Decoder() to get a concurrent version. -func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { - if dstSize > s.MaxDecodedSize { - return nil, ErrMaxDecodedSizeExceeded - } - if cap(s.Out) < dstSize { - s.Out = make([]byte, s.MaxDecodedSize) - } - s.Out = s.Out[:0:dstSize] - s.Out, err = s.Decoder().Decompress4X(s.Out, in) - return s.Out, err -} - -// Decoder will return a stateless decoder that can be used by multiple -// decompressors concurrently. -// Before this is called, the table must be initialized with ReadTable. -// The Decoder is still linked to the scratch buffer so that cannot be reused. -// However, it is safe to discard the scratch. -func (s *Scratch) Decoder() *Decoder { - return &Decoder{ - dt: s.dt, - actualTableLog: s.actualTableLog, - } -} - -// Decoder provides stateless decoding. -type Decoder struct { - dt dTable - actualTableLog uint8 -} - -// Decompress1X will decompress a 1X encoded stream. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress1X8Bit(dst, src) - } - var br bitReaderShifted - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - dt := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - - for br.off >= 8 { - br.fillFast() - v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - // Refill - br.fillFast() - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 8, so uint8 is fine - bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - if len(dst) >= maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= nBits - dst = append(dst, uint8(v.entry>>8)) - } - return dst, br.close() -} - -// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { - if d.actualTableLog == 8 { - return d.decompress1X8BitExactly(dst, src) - } - var br bitReaderBytes - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - dt := d.dt.single[:256] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - - shift := (8 - d.actualTableLog) & 7 - - //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) - for br.off >= 4 { - br.fillFast() - v := dt[br.peekByteFast()>>shift] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[br.peekByteFast()>>shift] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[br.peekByteFast()>>shift] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[br.peekByteFast()>>shift] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 4, so uint8 is fine - bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) - for bitsLeft > 0 { - if br.bitsRead >= 64-8 { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - if len(dst) >= maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := dt[br.peekByteFast()>>shift] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= int8(nBits) - dst = append(dst, uint8(v.entry>>8)) - } - return dst, br.close() -} - -// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { - var br bitReaderBytes - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - dt := d.dt.single[:256] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - - const shift = 0 - - //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) - for br.off >= 4 { - br.fillFast() - v := dt[br.peekByteFast()>>shift] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[br.peekByteFast()>>shift] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[br.peekByteFast()>>shift] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[br.peekByteFast()>>shift] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 4, so uint8 is fine - bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) - for bitsLeft > 0 { - if br.bitsRead >= 64-8 { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - if len(dst) >= maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := dt[br.peekByteFast()>>shift] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= int8(nBits) - dst = append(dst, uint8(v.entry>>8)) - } - return dst, br.close() -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - var decoded int - - // Decode 2 values from each decoder/loop. - const bufoff = 256 / 4 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - const stream = 0 - const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - br[stream].advance(uint8(v.entry)) - buf[off+bufoff*stream] = uint8(v.entry >> 8) - - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v2 := single[val2&tlMask] - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream2] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - br[stream].advance(uint8(v.entry)) - buf[off+bufoff*stream+1] = uint8(v.entry >> 8) - - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v2 = single[val2&tlMask] - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - br[stream].advance(uint8(v.entry)) - buf[off+bufoff*stream] = uint8(v.entry >> 8) - - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v2 := single[val2&tlMask] - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream2] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - br[stream].advance(uint8(v.entry)) - buf[off+bufoff*stream+1] = uint8(v.entry >> 8) - - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v2 = single[val2&tlMask] - br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) - } - - off += 2 - - if off == bufoff { - if bufoff > dstEvery { - return nil, errors.New("corruption detected: stream overrun 1") - } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 - out = out[bufoff:] - decoded += 256 - // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { - return nil, errors.New("corruption detected: stream overrun 2") - } - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - for i := range br { - offset := dstEvery * i - br := &br[i] - bitsLeft := br.off*8 + uint(64-br.bitsRead) - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= len(out) { - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { - if d.actualTableLog == 8 { - return d.decompress4X8bitExactly(dst, src) - } - - var br [4]bitReaderBytes - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - shift := (8 - d.actualTableLog) & 7 - - const tlSize = 1 << 8 - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - var decoded int - - // Decode 4 values from each decoder/loop. - const bufoff = 256 / 4 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - // Interleave 2 decodes. - const stream = 0 - const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 := single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+1] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+2] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+3] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - } - - { - const stream = 2 - const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 := single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+1] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+2] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+3] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - } - - off += 4 - - if off == bufoff { - if bufoff > dstEvery { - return nil, errors.New("corruption detected: stream overrun 1") - } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 - out = out[bufoff:] - decoded += 256 - // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { - return nil, errors.New("corruption detected: stream overrun 2") - } - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - for i := range br { - offset := dstEvery * i - br := &br[i] - bitsLeft := int(br.off*8) + int(64-br.bitsRead) - for bitsLeft > 0 { - if br.finished() { - return nil, io.ErrUnexpectedEOF - } - if br.bitsRead >= 56 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value |= uint64(low) << (br.bitsRead - 32) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= len(out) { - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - v := single[br.peekByteFast()>>shift].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= int(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { - var br [4]bitReaderBytes - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const shift = 0 - const tlSize = 1 << 8 - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - var buf [256]byte - var off uint8 - var decoded int - - // Decode 4 values from each decoder/loop. - const bufoff = 256 / 4 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - // Interleave 2 decodes. - const stream = 0 - const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 := single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+1] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+2] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+3] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - } - - { - const stream = 2 - const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - v := single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 := single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+1] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+2] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - - v = single[br[stream].peekByteFast()>>shift].entry - buf[off+bufoff*stream+3] = uint8(v >> 8) - br[stream].advance(uint8(v)) - - v2 = single[br[stream2].peekByteFast()>>shift].entry - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - br[stream2].advance(uint8(v2)) - } - - off += 4 - - if off == bufoff { - if bufoff > dstEvery { - return nil, errors.New("corruption detected: stream overrun 1") - } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 - out = out[bufoff:] - decoded += 256 - // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { - return nil, errors.New("corruption detected: stream overrun 2") - } - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - for i := range br { - offset := dstEvery * i - br := &br[i] - bitsLeft := int(br.off*8) + int(64-br.bitsRead) - for bitsLeft > 0 { - if br.finished() { - return nil, io.ErrUnexpectedEOF - } - if br.bitsRead >= 56 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value |= uint64(low) << (br.bitsRead - 32) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= len(out) { - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - v := single[br.peekByteFast()>>shift].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= int(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// matches will compare a decoding table to a coding table. -// Errors are written to the writer. -// Nothing will be written if table is ok. -func (s *Scratch) matches(ct cTable, w io.Writer) { - if s == nil || len(s.dt.single) == 0 { - return - } - dt := s.dt.single[:1<>8) == byte(sym) { - fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) - errs++ - break - } - } - if errs == 0 { - broken-- - } - continue - } - // Unused bits in input - ub := tablelog - enc.nBits - top := enc.val << ub - // decoder looks at top bits. - dec := dt[top] - if uint8(dec.entry) != enc.nBits { - fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) - errs++ - } - if uint8(dec.entry>>8) != uint8(sym) { - fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) - errs++ - } - if errs > 0 { - fmt.Fprintf(w, "%d errros in base, stopping\n", errs) - continue - } - // Ensure that all combinations are covered. - for i := uint16(0); i < (1 << ub); i++ { - vval := top | i - dec := dt[vval] - if uint8(dec.entry) != enc.nBits { - fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) - errs++ - } - if uint8(dec.entry>>8) != uint8(sym) { - fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) - errs++ - } - if errs > 20 { - fmt.Fprintf(w, "%d errros, stopping\n", errs) - break - } - } - if errs == 0 { - ok++ - broken-- - } - } - if broken > 0 { - fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) - } -} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go deleted file mode 100644 index 7ec2022b6..000000000 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ /dev/null @@ -1,273 +0,0 @@ -// Package huff0 provides fast huffman encoding as used in zstd. -// -// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. -package huff0 - -import ( - "errors" - "fmt" - "math" - "math/bits" - - "github.com/klauspost/compress/fse" -) - -const ( - maxSymbolValue = 255 - - // zstandard limits tablelog to 11, see: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description - tableLogMax = 11 - tableLogDefault = 11 - minTablelog = 5 - huffNodesLen = 512 - - // BlockSizeMax is maximum input size for a single block uncompressed. - BlockSizeMax = 1<<18 - 1 -) - -var ( - // ErrIncompressible is returned when input is judged to be too hard to compress. - ErrIncompressible = errors.New("input is not compressible") - - // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. - ErrUseRLE = errors.New("input is single value repeated") - - // ErrTooBig is return if input is too large for a single block. - ErrTooBig = errors.New("input too big") - - // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. - ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") -) - -type ReusePolicy uint8 - -const ( - // ReusePolicyAllow will allow reuse if it produces smaller output. - ReusePolicyAllow ReusePolicy = iota - - // ReusePolicyPrefer will re-use aggressively if possible. - // This will not check if a new table will produce smaller output, - // except if the current table is impossible to use or - // compressed output is bigger than input. - ReusePolicyPrefer - - // ReusePolicyNone will disable re-use of tables. - // This is slightly faster than ReusePolicyAllow but may produce larger output. - ReusePolicyNone - - // ReusePolicyMust must allow reuse and produce smaller output. - ReusePolicyMust -) - -type Scratch struct { - count [maxSymbolValue + 1]uint32 - - // Per block parameters. - // These can be used to override compression parameters of the block. - // Do not touch, unless you know what you are doing. - - // Out is output buffer. - // If the scratch is re-used before the caller is done processing the output, - // set this field to nil. - // Otherwise the output buffer will be re-used for next Compression/Decompression step - // and allocation will be avoided. - Out []byte - - // OutTable will contain the table data only, if a new table has been generated. - // Slice of the returned data. - OutTable []byte - - // OutData will contain the compressed data. - // Slice of the returned data. - OutData []byte - - // MaxDecodedSize will set the maximum allowed output size. - // This value will automatically be set to BlockSizeMax if not set. - // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. - MaxDecodedSize int - - br byteReader - - // MaxSymbolValue will override the maximum symbol value of the next block. - MaxSymbolValue uint8 - - // TableLog will attempt to override the tablelog for the next block. - // Must be <= 11 and >= 5. - TableLog uint8 - - // Reuse will specify the reuse policy - Reuse ReusePolicy - - // WantLogLess allows to specify a log 2 reduction that should at least be achieved, - // otherwise the block will be returned as incompressible. - // The reduction should then at least be (input size >> WantLogLess) - // If WantLogLess == 0 any improvement will do. - WantLogLess uint8 - - symbolLen uint16 // Length of active part of the symbol table. - maxCount int // count of the most probable symbol - clearCount bool // clear count - actualTableLog uint8 // Selected tablelog. - prevTableLog uint8 // Tablelog for previous table - prevTable cTable // Table used for previous compression. - cTable cTable // compression table - dt dTable // decompression table - nodes []nodeElt - tmpOut [4][]byte - fse *fse.Scratch - huffWeight [maxSymbolValue + 1]byte -} - -// TransferCTable will transfer the previously used compression table. -func (s *Scratch) TransferCTable(src *Scratch) { - if cap(s.prevTable) < len(src.prevTable) { - s.prevTable = make(cTable, 0, maxSymbolValue+1) - } - s.prevTable = s.prevTable[:len(src.prevTable)] - copy(s.prevTable, src.prevTable) - s.prevTableLog = src.prevTableLog -} - -func (s *Scratch) prepare(in []byte) (*Scratch, error) { - if len(in) > BlockSizeMax { - return nil, ErrTooBig - } - if s == nil { - s = &Scratch{} - } - if s.MaxSymbolValue == 0 { - s.MaxSymbolValue = maxSymbolValue - } - if s.TableLog == 0 { - s.TableLog = tableLogDefault - } - if s.TableLog > tableLogMax || s.TableLog < minTablelog { - return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) - } - if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { - s.MaxDecodedSize = BlockSizeMax - } - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - if cap(s.Out) == 0 { - s.Out = make([]byte, 0, len(in)) - } - s.Out = s.Out[:0] - - s.OutTable = nil - s.OutData = nil - if cap(s.nodes) < huffNodesLen+1 { - s.nodes = make([]nodeElt, 0, huffNodesLen+1) - } - s.nodes = s.nodes[:0] - if s.fse == nil { - s.fse = &fse.Scratch{} - } - s.br.init(in) - - return s, nil -} - -type cTable []cTableEntry - -func (c cTable) write(s *Scratch) error { - var ( - // precomputed conversion table - bitsToWeight [tableLogMax + 1]byte - huffLog = s.actualTableLog - // last weight is not saved. - maxSymbolValue = uint8(s.symbolLen - 1) - huffWeight = s.huffWeight[:256] - ) - const ( - maxFSETableLog = 6 - ) - // convert to weight - bitsToWeight[0] = 0 - for n := uint8(1); n < huffLog+1; n++ { - bitsToWeight[n] = huffLog + 1 - n - } - - // Acquire histogram for FSE. - hist := s.fse.Histogram() - hist = hist[:256] - for i := range hist[:16] { - hist[i] = 0 - } - for n := uint8(0); n < maxSymbolValue; n++ { - v := bitsToWeight[c[n].nBits] & 15 - huffWeight[n] = v - hist[v]++ - } - - // FSE compress if feasible. - if maxSymbolValue >= 2 { - huffMaxCnt := uint32(0) - huffMax := uint8(0) - for i, v := range hist[:16] { - if v == 0 { - continue - } - huffMax = byte(i) - if v > huffMaxCnt { - huffMaxCnt = v - } - } - s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) - s.fse.TableLog = maxFSETableLog - b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) - if err == nil && len(b) < int(s.symbolLen>>1) { - s.Out = append(s.Out, uint8(len(b))) - s.Out = append(s.Out, b...) - return nil - } - // Unable to compress (RLE/uncompressible) - } - // write raw values as 4-bits (max : 15) - if maxSymbolValue > (256 - 128) { - // should not happen : likely means source cannot be compressed - return ErrIncompressible - } - op := s.Out - // special case, pack weights 4 bits/weight. - op = append(op, 128|(maxSymbolValue-1)) - // be sure it doesn't cause msan issue in final combination - huffWeight[maxSymbolValue] = 0 - for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { - op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) - } - s.Out = op - return nil -} - -// estimateSize returns the estimated size in bytes of the input represented in the -// histogram supplied. -func (c cTable) estimateSize(hist []uint32) int { - nbBits := uint32(7) - for i, v := range c[:len(hist)] { - nbBits += uint32(v.nBits) * hist[i] - } - return int(nbBits >> 3) -} - -// minSize returns the minimum possible size considering the shannon limit. -func (s *Scratch) minSize(total int) int { - nbBits := float64(7) - fTotal := float64(total) - for _, v := range s.count[:s.symbolLen] { - n := float64(v) - if n > 0 { - nbBits += math.Log2(fTotal/n) * n - } - } - return int(nbBits) >> 3 -} - -func highBit32(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/vendor/github.com/klauspost/compress/snappy/.gitignore b/vendor/github.com/klauspost/compress/snappy/.gitignore deleted file mode 100644 index 042091d9b..000000000 --- a/vendor/github.com/klauspost/compress/snappy/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -cmd/snappytool/snappytool -testdata/bench - -# These explicitly listed benchmark data files are for an obsolete version of -# snappy_test.go. -testdata/alice29.txt -testdata/asyoulik.txt -testdata/fireworks.jpeg -testdata/geo.protodata -testdata/html -testdata/html_x_4 -testdata/kppkn.gtb -testdata/lcet10.txt -testdata/paper-100k.pdf -testdata/plrabn12.txt -testdata/urls.10K diff --git a/vendor/github.com/klauspost/compress/snappy/AUTHORS b/vendor/github.com/klauspost/compress/snappy/AUTHORS deleted file mode 100644 index bcfa19520..000000000 --- a/vendor/github.com/klauspost/compress/snappy/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of Snappy-Go authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. - -Damian Gryski -Google Inc. -Jan Mercl <0xjnml@gmail.com> -Rodolfo Carvalho -Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS deleted file mode 100644 index 931ae3160..000000000 --- a/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS +++ /dev/null @@ -1,37 +0,0 @@ -# This is the official list of people who can contribute -# (and typically have contributed) code to the Snappy-Go repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# http://code.google.com/legal/individual-cla-v1.0.html -# http://code.google.com/legal/corporate-cla-v1.0.html -# -# The agreement for individuals can be filled out on the web. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. - -# Names should be added to this file like so: -# Name - -# Please keep the list sorted. - -Damian Gryski -Jan Mercl <0xjnml@gmail.com> -Kai Backman -Marc-Antoine Ruel -Nigel Tao -Rob Pike -Rodolfo Carvalho -Russ Cox -Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/LICENSE b/vendor/github.com/klauspost/compress/snappy/LICENSE deleted file mode 100644 index 6050c10f4..000000000 --- a/vendor/github.com/klauspost/compress/snappy/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/snappy/README b/vendor/github.com/klauspost/compress/snappy/README deleted file mode 100644 index cea12879a..000000000 --- a/vendor/github.com/klauspost/compress/snappy/README +++ /dev/null @@ -1,107 +0,0 @@ -The Snappy compression format in the Go programming language. - -To download and install from source: -$ go get github.com/golang/snappy - -Unless otherwise noted, the Snappy-Go source files are distributed -under the BSD-style license found in the LICENSE file. - - - -Benchmarks. - -The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten -or so files, the same set used by the C++ Snappy code (github.com/google/snappy -and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ -3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: - -"go test -test.bench=." - -_UFlat0-8 2.19GB/s ± 0% html -_UFlat1-8 1.41GB/s ± 0% urls -_UFlat2-8 23.5GB/s ± 2% jpg -_UFlat3-8 1.91GB/s ± 0% jpg_200 -_UFlat4-8 14.0GB/s ± 1% pdf -_UFlat5-8 1.97GB/s ± 0% html4 -_UFlat6-8 814MB/s ± 0% txt1 -_UFlat7-8 785MB/s ± 0% txt2 -_UFlat8-8 857MB/s ± 0% txt3 -_UFlat9-8 719MB/s ± 1% txt4 -_UFlat10-8 2.84GB/s ± 0% pb -_UFlat11-8 1.05GB/s ± 0% gaviota - -_ZFlat0-8 1.04GB/s ± 0% html -_ZFlat1-8 534MB/s ± 0% urls -_ZFlat2-8 15.7GB/s ± 1% jpg -_ZFlat3-8 740MB/s ± 3% jpg_200 -_ZFlat4-8 9.20GB/s ± 1% pdf -_ZFlat5-8 991MB/s ± 0% html4 -_ZFlat6-8 379MB/s ± 0% txt1 -_ZFlat7-8 352MB/s ± 0% txt2 -_ZFlat8-8 396MB/s ± 1% txt3 -_ZFlat9-8 327MB/s ± 1% txt4 -_ZFlat10-8 1.33GB/s ± 1% pb -_ZFlat11-8 605MB/s ± 1% gaviota - - - -"go test -test.bench=. -tags=noasm" - -_UFlat0-8 621MB/s ± 2% html -_UFlat1-8 494MB/s ± 1% urls -_UFlat2-8 23.2GB/s ± 1% jpg -_UFlat3-8 1.12GB/s ± 1% jpg_200 -_UFlat4-8 4.35GB/s ± 1% pdf -_UFlat5-8 609MB/s ± 0% html4 -_UFlat6-8 296MB/s ± 0% txt1 -_UFlat7-8 288MB/s ± 0% txt2 -_UFlat8-8 309MB/s ± 1% txt3 -_UFlat9-8 280MB/s ± 1% txt4 -_UFlat10-8 753MB/s ± 0% pb -_UFlat11-8 400MB/s ± 0% gaviota - -_ZFlat0-8 409MB/s ± 1% html -_ZFlat1-8 250MB/s ± 1% urls -_ZFlat2-8 12.3GB/s ± 1% jpg -_ZFlat3-8 132MB/s ± 0% jpg_200 -_ZFlat4-8 2.92GB/s ± 0% pdf -_ZFlat5-8 405MB/s ± 1% html4 -_ZFlat6-8 179MB/s ± 1% txt1 -_ZFlat7-8 170MB/s ± 1% txt2 -_ZFlat8-8 189MB/s ± 1% txt3 -_ZFlat9-8 164MB/s ± 1% txt4 -_ZFlat10-8 479MB/s ± 1% pb -_ZFlat11-8 270MB/s ± 1% gaviota - - - -For comparison (Go's encoded output is byte-for-byte identical to C++'s), here -are the numbers from C++ Snappy's - -make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log - -BM_UFlat/0 2.4GB/s html -BM_UFlat/1 1.4GB/s urls -BM_UFlat/2 21.8GB/s jpg -BM_UFlat/3 1.5GB/s jpg_200 -BM_UFlat/4 13.3GB/s pdf -BM_UFlat/5 2.1GB/s html4 -BM_UFlat/6 1.0GB/s txt1 -BM_UFlat/7 959.4MB/s txt2 -BM_UFlat/8 1.0GB/s txt3 -BM_UFlat/9 864.5MB/s txt4 -BM_UFlat/10 2.9GB/s pb -BM_UFlat/11 1.2GB/s gaviota - -BM_ZFlat/0 944.3MB/s html (22.31 %) -BM_ZFlat/1 501.6MB/s urls (47.78 %) -BM_ZFlat/2 14.3GB/s jpg (99.95 %) -BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) -BM_ZFlat/4 8.3GB/s pdf (83.30 %) -BM_ZFlat/5 903.5MB/s html4 (22.52 %) -BM_ZFlat/6 336.0MB/s txt1 (57.88 %) -BM_ZFlat/7 312.3MB/s txt2 (61.91 %) -BM_ZFlat/8 353.1MB/s txt3 (54.99 %) -BM_ZFlat/9 289.9MB/s txt4 (66.26 %) -BM_ZFlat/10 1.2GB/s pb (19.68 %) -BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/klauspost/compress/snappy/decode.go b/vendor/github.com/klauspost/compress/snappy/decode.go deleted file mode 100644 index 72efb0353..000000000 --- a/vendor/github.com/klauspost/compress/snappy/decode.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("snappy: corrupt input") - // ErrTooLarge reports that the uncompressed length is too large. - ErrTooLarge = errors.New("snappy: decoded block is too large") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrTooLarge - } - return int(v), n, nil -} - -const ( - decodeErrCodeCorrupt = 1 - decodeErrCodeUnsupportedLiteralLength = 2 -) - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if dLen <= len(dst) { - dst = dst[:dLen] - } else { - dst = make([]byte, dLen) - } - switch decode(dst, src[s:]) { - case 0: - return dst, nil - case decodeErrCodeUnsupportedLiteralLength: - return nil, errUnsupportedLiteralLength - } - return nil, ErrCorrupt -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - decoded: make([]byte, maxBlockSize), - buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), - } -} - -// Reader is an io.Reader that can read Snappy-compressed bytes. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.readHeader = false -} - -func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrCorrupt - } - return false - } - return true -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - for { - if r.i < r.j { - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil - } - if !r.readFull(r.buf[:4], true) { - return 0, r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return 0, r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - r.err = ErrUnsupported - return 0, r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return 0, r.err - } - if n > len(r.decoded) { - r.err = ErrCorrupt - return 0, r.err - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return 0, r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf, false) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if n > len(r.decoded) { - r.err = ErrCorrupt - return 0, r.err - } - if !r.readFull(r.decoded[:n], false) { - return 0, r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return 0, r.err - } - if !r.readFull(r.buf[:len(magicBody)], false) { - return 0, r.err - } - for i := 0; i < len(magicBody); i++ { - if r.buf[i] != magicBody[i] { - r.err = ErrCorrupt - return 0, r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return 0, r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return 0, r.err - } - } -} diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.go b/vendor/github.com/klauspost/compress/snappy/decode_amd64.go deleted file mode 100644 index fcd192b84..000000000 --- a/vendor/github.com/klauspost/compress/snappy/decode_amd64.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -package snappy - -// decode has the same semantics as in decode_other.go. -// -//go:noescape -func decode(dst, src []byte) int diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.s b/vendor/github.com/klauspost/compress/snappy/decode_amd64.s deleted file mode 100644 index 1c66e3723..000000000 --- a/vendor/github.com/klauspost/compress/snappy/decode_amd64.s +++ /dev/null @@ -1,482 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The asm code generally follows the pure Go code in decode_other.go, except -// where marked with a "!!!". - -// func decode(dst, src []byte) int -// -// All local variables fit into registers. The non-zero stack size is only to -// spill registers and push args when issuing a CALL. The register allocation: -// - AX scratch -// - BX scratch -// - CX length or x -// - DX offset -// - SI &src[s] -// - DI &dst[d] -// + R8 dst_base -// + R9 dst_len -// + R10 dst_base + dst_len -// + R11 src_base -// + R12 src_len -// + R13 src_base + src_len -// - R14 used by doCopy -// - R15 used by doCopy -// -// The registers R8-R13 (marked with a "+") are set at the start of the -// function, and after a CALL returns, and are not otherwise modified. -// -// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. -// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. -TEXT ·decode(SB), NOSPLIT, $48-56 - // Initialize SI, DI and R8-R13. - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, DI - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, SI - MOVQ R11, R13 - ADDQ R12, R13 - -loop: - // for s < len(src) - CMPQ SI, R13 - JEQ end - - // CX = uint32(src[s]) - // - // switch src[s] & 0x03 - MOVBLZX (SI), CX - MOVL CX, BX - ANDL $3, BX - CMPL BX, $1 - JAE tagCopy - - // ---------------------------------------- - // The code below handles literal tags. - - // case tagLiteral: - // x := uint32(src[s] >> 2) - // switch - SHRL $2, CX - CMPL CX, $60 - JAE tagLit60Plus - - // case x < 60: - // s++ - INCQ SI - -doLit: - // This is the end of the inner "switch", when we have a literal tag. - // - // We assume that CX == x and x fits in a uint32, where x is the variable - // used in the pure Go decode_other.go code. - - // length = int(x) + 1 - // - // Unlike the pure Go code, we don't need to check if length <= 0 because - // CX can hold 64 bits, so the increment cannot overflow. - INCQ CX - - // Prepare to check if copying length bytes will run past the end of dst or - // src. - // - // AX = len(dst) - d - // BX = len(src) - s - MOVQ R10, AX - SUBQ DI, AX - MOVQ R13, BX - SUBQ SI, BX - - // !!! Try a faster technique for short (16 or fewer bytes) copies. - // - // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { - // goto callMemmove // Fall back on calling runtime·memmove. - // } - // - // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s - // against 21 instead of 16, because it cannot assume that all of its input - // is contiguous in memory and so it needs to leave enough source bytes to - // read the next tag without refilling buffers, but Go's Decode assumes - // contiguousness (the src argument is a []byte). - CMPQ CX, $16 - JGT callMemmove - CMPQ AX, $16 - JLT callMemmove - CMPQ BX, $16 - JLT callMemmove - - // !!! Implement the copy from src to dst as a 16-byte load and store. - // (Decode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only length bytes, but that's - // OK. If the input is a valid Snappy encoding then subsequent iterations - // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a - // non-nil error), so the overrun will be ignored. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(SI), X0 - MOVOU X0, 0(DI) - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -callMemmove: - // if length > len(dst)-d || length > len(src)-s { etc } - CMPQ CX, AX - JGT errCorrupt - CMPQ CX, BX - JGT errCorrupt - - // copy(dst[d:], src[s:s+length]) - // - // This means calling runtime·memmove(&dst[d], &src[s], length), so we push - // DI, SI and CX as arguments. Coincidentally, we also need to spill those - // three registers to the stack, to save local variables across the CALL. - MOVQ DI, 0(SP) - MOVQ SI, 8(SP) - MOVQ CX, 16(SP) - MOVQ DI, 24(SP) - MOVQ SI, 32(SP) - MOVQ CX, 40(SP) - CALL runtime·memmove(SB) - - // Restore local variables: unspill registers from the stack and - // re-calculate R8-R13. - MOVQ 24(SP), DI - MOVQ 32(SP), SI - MOVQ 40(SP), CX - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, R13 - ADDQ R12, R13 - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -tagLit60Plus: - // !!! This fragment does the - // - // s += x - 58; if uint(s) > uint(len(src)) { etc } - // - // checks. In the asm version, we code it once instead of once per switch case. - ADDQ CX, SI - SUBQ $58, SI - CMPQ SI, R13 - JA errCorrupt - - // case x == 60: - CMPL CX, $61 - JEQ tagLit61 - JA tagLit62Plus - - // x = uint32(src[s-1]) - MOVBLZX -1(SI), CX - JMP doLit - -tagLit61: - // case x == 61: - // x = uint32(src[s-2]) | uint32(src[s-1])<<8 - MOVWLZX -2(SI), CX - JMP doLit - -tagLit62Plus: - CMPL CX, $62 - JA tagLit63 - - // case x == 62: - // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - MOVWLZX -3(SI), CX - MOVBLZX -1(SI), BX - SHLL $16, BX - ORL BX, CX - JMP doLit - -tagLit63: - // case x == 63: - // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - MOVL -4(SI), CX - JMP doLit - -// The code above handles literal tags. -// ---------------------------------------- -// The code below handles copy tags. - -tagCopy4: - // case tagCopy4: - // s += 5 - ADDQ $5, SI - - // if uint(s) > uint(len(src)) { etc } - CMPQ SI, R13 - JA errCorrupt - - // length = 1 + int(src[s-5])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - MOVLQZX -4(SI), DX - JMP doCopy - -tagCopy2: - // case tagCopy2: - // s += 3 - ADDQ $3, SI - - // if uint(s) > uint(len(src)) { etc } - CMPQ SI, R13 - JA errCorrupt - - // length = 1 + int(src[s-3])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - MOVWQZX -2(SI), DX - JMP doCopy - -tagCopy: - // We have a copy tag. We assume that: - // - BX == src[s] & 0x03 - // - CX == src[s] - CMPQ BX, $2 - JEQ tagCopy2 - JA tagCopy4 - - // case tagCopy1: - // s += 2 - ADDQ $2, SI - - // if uint(s) > uint(len(src)) { etc } - CMPQ SI, R13 - JA errCorrupt - - // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - MOVQ CX, DX - ANDQ $0xe0, DX - SHLQ $3, DX - MOVBQZX -1(SI), BX - ORQ BX, DX - - // length = 4 + int(src[s-2])>>2&0x7 - SHRQ $2, CX - ANDQ $7, CX - ADDQ $4, CX - -doCopy: - // This is the end of the outer "switch", when we have a copy tag. - // - // We assume that: - // - CX == length && CX > 0 - // - DX == offset - - // if offset <= 0 { etc } - CMPQ DX, $0 - JLE errCorrupt - - // if d < offset { etc } - MOVQ DI, BX - SUBQ R8, BX - CMPQ BX, DX - JLT errCorrupt - - // if length > len(dst)-d { etc } - MOVQ R10, BX - SUBQ DI, BX - CMPQ CX, BX - JGT errCorrupt - - // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length - // - // Set: - // - R14 = len(dst)-d - // - R15 = &dst[d-offset] - MOVQ R10, R14 - SUBQ DI, R14 - MOVQ DI, R15 - SUBQ DX, R15 - - // !!! Try a faster technique for short (16 or fewer bytes) forward copies. - // - // First, try using two 8-byte load/stores, similar to the doLit technique - // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is - // still OK if offset >= 8. Note that this has to be two 8-byte load/stores - // and not one 16-byte load/store, and the first store has to be before the - // second load, due to the overlap if offset is in the range [8, 16). - // - // if length > 16 || offset < 8 || len(dst)-d < 16 { - // goto slowForwardCopy - // } - // copy 16 bytes - // d += length - CMPQ CX, $16 - JGT slowForwardCopy - CMPQ DX, $8 - JLT slowForwardCopy - CMPQ R14, $16 - JLT slowForwardCopy - MOVQ 0(R15), AX - MOVQ AX, 0(DI) - MOVQ 8(R15), BX - MOVQ BX, 8(DI) - ADDQ CX, DI - JMP loop - -slowForwardCopy: - // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we - // can still try 8-byte load stores, provided we can overrun up to 10 extra - // bytes. As above, the overrun will be fixed up by subsequent iterations - // of the outermost loop. - // - // The C++ snappy code calls this technique IncrementalCopyFastPath. Its - // commentary says: - // - // ---- - // - // The main part of this loop is a simple copy of eight bytes at a time - // until we've copied (at least) the requested amount of bytes. However, - // if d and d-offset are less than eight bytes apart (indicating a - // repeating pattern of length < 8), we first need to expand the pattern in - // order to get the correct results. For instance, if the buffer looks like - // this, with the eight-byte and patterns marked as - // intervals: - // - // abxxxxxxxxxxxx - // [------] d-offset - // [------] d - // - // a single eight-byte copy from to will repeat the pattern - // once, after which we can move two bytes without moving : - // - // ababxxxxxxxxxx - // [------] d-offset - // [------] d - // - // and repeat the exercise until the two no longer overlap. - // - // This allows us to do very well in the special case of one single byte - // repeated many times, without taking a big hit for more general cases. - // - // The worst case of extra writing past the end of the match occurs when - // offset == 1 and length == 1; the last copy will read from byte positions - // [0..7] and write to [4..11], whereas it was only supposed to write to - // position 1. Thus, ten excess bytes. - // - // ---- - // - // That "10 byte overrun" worst case is confirmed by Go's - // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy - // and finishSlowForwardCopy algorithm. - // - // if length > len(dst)-d-10 { - // goto verySlowForwardCopy - // } - SUBQ $10, R14 - CMPQ CX, R14 - JGT verySlowForwardCopy - -makeOffsetAtLeast8: - // !!! As above, expand the pattern so that offset >= 8 and we can use - // 8-byte load/stores. - // - // for offset < 8 { - // copy 8 bytes from dst[d-offset:] to dst[d:] - // length -= offset - // d += offset - // offset += offset - // // The two previous lines together means that d-offset, and therefore - // // R15, is unchanged. - // } - CMPQ DX, $8 - JGE fixUpSlowForwardCopy - MOVQ (R15), BX - MOVQ BX, (DI) - SUBQ DX, CX - ADDQ DX, DI - ADDQ DX, DX - JMP makeOffsetAtLeast8 - -fixUpSlowForwardCopy: - // !!! Add length (which might be negative now) to d (implied by DI being - // &dst[d]) so that d ends up at the right place when we jump back to the - // top of the loop. Before we do that, though, we save DI to AX so that, if - // length is positive, copying the remaining length bytes will write to the - // right place. - MOVQ DI, AX - ADDQ CX, DI - -finishSlowForwardCopy: - // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative - // length means that we overrun, but as above, that will be fixed up by - // subsequent iterations of the outermost loop. - CMPQ CX, $0 - JLE loop - MOVQ (R15), BX - MOVQ BX, (AX) - ADDQ $8, R15 - ADDQ $8, AX - SUBQ $8, CX - JMP finishSlowForwardCopy - -verySlowForwardCopy: - // verySlowForwardCopy is a simple implementation of forward copy. In C - // parlance, this is a do/while loop instead of a while loop, since we know - // that length > 0. In Go syntax: - // - // for { - // dst[d] = dst[d - offset] - // d++ - // length-- - // if length == 0 { - // break - // } - // } - MOVB (R15), BX - MOVB BX, (DI) - INCQ R15 - INCQ DI - DECQ CX - JNZ verySlowForwardCopy - JMP loop - -// The code above handles copy tags. -// ---------------------------------------- - -end: - // This is the end of the "for s < len(src)". - // - // if d != len(dst) { etc } - CMPQ DI, R10 - JNE errCorrupt - - // return 0 - MOVQ $0, ret+48(FP) - RET - -errCorrupt: - // return decodeErrCodeCorrupt - MOVQ $1, ret+48(FP) - RET diff --git a/vendor/github.com/klauspost/compress/snappy/decode_other.go b/vendor/github.com/klauspost/compress/snappy/decode_other.go deleted file mode 100644 index 94a96c5d7..000000000 --- a/vendor/github.com/klauspost/compress/snappy/decode_other.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 appengine !gc noasm - -package snappy - -// decode writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read, and that len(dst) -// equals that length. -// -// It returns 0 on success or a decodeErrCodeXxx error code on failure. -func decode(dst, src []byte) int { - var d, s, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - length = int(x) + 1 - if length <= 0 { - return decodeErrCodeUnsupportedLiteralLength - } - if length > len(dst)-d || length > len(src)-s { - return decodeErrCodeCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - - case tagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - - case tagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - } - - if offset <= 0 || d < offset || length > len(dst)-d { - return decodeErrCodeCorrupt - } - // Copy from an earlier sub-slice of dst to a later sub-slice. - // If no overlap, use the built-in copy: - if offset > length { - copy(dst[d:d+length], dst[d-offset:]) - d += length - continue - } - - // Unlike the built-in copy function, this byte-by-byte copy always runs - // forwards, even if the slices overlap. Conceptually, this is: - // - // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - // - // We align the slices into a and b and show the compiler they are the same size. - // This allows the loop to run without bounds checks. - a := dst[d : d+length] - b := dst[d-offset:] - b = b[:len(a)] - for i := range a { - a[i] = b[i] - } - d += length - } - if d != len(dst) { - return decodeErrCodeCorrupt - } - return 0 -} diff --git a/vendor/github.com/klauspost/compress/snappy/encode.go b/vendor/github.com/klauspost/compress/snappy/encode.go deleted file mode 100644 index 8d393e904..000000000 --- a/vendor/github.com/klauspost/compress/snappy/encode.go +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return dst[:d] -} - -// inputMargin is the minimum number of extra input bytes to keep, inside -// encodeBlock's inner loop. On some architectures, this margin lets us -// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) -// literals can be implemented as a single load to and store from a 16-byte -// register. That literal's actual length can be as short as 1 byte, so this -// can copy up to 15 bytes too much, but that's OK as subsequent iterations of -// the encoding loop will fix up the copy overrun, and this inputMargin ensures -// that we don't overrun the dst and src buffers. -const inputMargin = 16 - 1 - -// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that -// could be encoded with a copy tag. This is the minimum with respect to the -// algorithm used by encodeBlock, not a minimum enforced by the file format. -// -// The encoded output must start with at least a 1 byte literal, as there are -// no previous bytes to copy. A minimal (1 byte) copy after that, generated -// from an emitCopy call in encodeBlock's main loop, would require at least -// another inputMargin bytes, for the reason above: we want any emitLiteral -// calls inside encodeBlock's main loop to use the fast path if possible, which -// requires being able to overrun by inputMargin bytes. Thus, -// minNonLiteralBlockSize equals 1 + 1 + inputMargin. -// -// The C++ code doesn't use this exact threshold, but it could, as discussed at -// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion -// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an -// optimization. It should not affect the encoded form. This is tested by -// TestSameEncodingAsCppShortCopies. -const minNonLiteralBlockSize = 1 + 1 + inputMargin - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -// -// It will return a negative value if srcLen is too large to encode. -func MaxEncodedLen(srcLen int) int { - n := uint64(srcLen) - if n > 0xffffffff { - return -1 - } - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - n = 32 + n + n/6 - if n > 0xffffffff { - return -1 - } - return int(n) -} - -var errClosed = errors.New("snappy: Writer is closed") - -// NewWriter returns a new Writer that compresses to w. -// -// The Writer returned does not buffer writes. There is no need to Flush or -// Close such a Writer. -// -// Deprecated: the Writer returned is not suitable for many small writes, only -// for few large writes. Use NewBufferedWriter instead, which is efficient -// regardless of the frequency and shape of the writes, and remember to Close -// that Writer when done. -func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - obuf: make([]byte, obufLen), - } -} - -// NewBufferedWriter returns a new Writer that compresses to w, using the -// framing format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -// -// The Writer returned buffers writes. Users must call Close to guarantee all -// data has been forwarded to the underlying io.Writer. They may also call -// Flush zero or more times before calling Close. -func NewBufferedWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - ibuf: make([]byte, 0, maxBlockSize), - obuf: make([]byte, obufLen), - } -} - -// Writer is an io.Writer that can write Snappy-compressed bytes. -type Writer struct { - w io.Writer - err error - - // ibuf is a buffer for the incoming (uncompressed) bytes. - // - // Its use is optional. For backwards compatibility, Writers created by the - // NewWriter function have ibuf == nil, do not buffer incoming bytes, and - // therefore do not need to be Flush'ed or Close'd. - ibuf []byte - - // obuf is a buffer for the outgoing (compressed) bytes. - obuf []byte - - // wroteStreamHeader is whether we have written the stream header. - wroteStreamHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - w.w = writer - w.err = nil - if w.ibuf != nil { - w.ibuf = w.ibuf[:0] - } - w.wroteStreamHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (nRet int, errRet error) { - if w.ibuf == nil { - // Do not buffer incoming bytes. This does not perform or compress well - // if the caller of Writer.Write writes many small slices. This - // behavior is therefore deprecated, but still supported for backwards - // compatibility with code that doesn't explicitly Flush or Close. - return w.write(p) - } - - // The remainder of this method is based on bufio.Writer.Write from the - // standard library. - - for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { - var n int - if len(w.ibuf) == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, _ = w.write(p) - } else { - n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - w.Flush() - } - nRet += n - p = p[n:] - } - if w.err != nil { - return nRet, w.err - } - n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - nRet += n - return nRet, nil -} - -func (w *Writer) write(p []byte) (nRet int, errRet error) { - if w.err != nil { - return 0, w.err - } - for len(p) > 0 { - obufStart := len(magicChunk) - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - copy(w.obuf, magicChunk) - obufStart = 0 - } - - var uncompressed []byte - if len(p) > maxBlockSize { - uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] - } else { - uncompressed, p = p, nil - } - checksum := crc(uncompressed) - - // Compress the buffer, discarding the result if the improvement - // isn't at least 12.5%. - compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) - chunkType := uint8(chunkTypeCompressedData) - chunkLen := 4 + len(compressed) - obufEnd := obufHeaderLen + len(compressed) - if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { - chunkType = chunkTypeUncompressedData - chunkLen = 4 + len(uncompressed) - obufEnd = obufHeaderLen - } - - // Fill in the per-chunk header that comes before the body. - w.obuf[len(magicChunk)+0] = chunkType - w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) - w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) - w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) - w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) - w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) - w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) - w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) - - if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { - w.err = err - return nRet, err - } - if chunkType == chunkTypeUncompressedData { - if _, err := w.w.Write(uncompressed); err != nil { - w.err = err - return nRet, err - } - } - nRet += len(uncompressed) - } - return nRet, nil -} - -// Flush flushes the Writer to its underlying io.Writer. -func (w *Writer) Flush() error { - if w.err != nil { - return w.err - } - if len(w.ibuf) == 0 { - return nil - } - w.write(w.ibuf) - w.ibuf = w.ibuf[:0] - return w.err -} - -// Close calls Flush and then closes the Writer. -func (w *Writer) Close() error { - w.Flush() - ret := w.err - if w.err == nil { - w.err = errClosed - } - return ret -} diff --git a/vendor/github.com/klauspost/compress/snappy/encode_amd64.go b/vendor/github.com/klauspost/compress/snappy/encode_amd64.go deleted file mode 100644 index 150d91bc8..000000000 --- a/vendor/github.com/klauspost/compress/snappy/encode_amd64.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -package snappy - -// emitLiteral has the same semantics as in encode_other.go. -// -//go:noescape -func emitLiteral(dst, lit []byte) int - -// emitCopy has the same semantics as in encode_other.go. -// -//go:noescape -func emitCopy(dst []byte, offset, length int) int - -// extendMatch has the same semantics as in encode_other.go. -// -//go:noescape -func extendMatch(src []byte, i, j int) int - -// encodeBlock has the same semantics as in encode_other.go. -// -//go:noescape -func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/klauspost/compress/snappy/encode_amd64.s b/vendor/github.com/klauspost/compress/snappy/encode_amd64.s deleted file mode 100644 index adfd979fe..000000000 --- a/vendor/github.com/klauspost/compress/snappy/encode_amd64.s +++ /dev/null @@ -1,730 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a -// Go toolchain regression. See https://github.com/golang/go/issues/15426 and -// https://github.com/golang/snappy/issues/29 -// -// As a workaround, the package was built with a known good assembler, and -// those instructions were disassembled by "objdump -d" to yield the -// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 -// style comments, in AT&T asm syntax. Note that rsp here is a physical -// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). -// The instructions were then encoded as "BYTE $0x.." sequences, which assemble -// fine on Go 1.6. - -// The asm code generally follows the pure Go code in encode_other.go, except -// where marked with a "!!!". - -// ---------------------------------------------------------------------------- - -// func emitLiteral(dst, lit []byte) int -// -// All local variables fit into registers. The register allocation: -// - AX len(lit) -// - BX n -// - DX return value -// - DI &dst[i] -// - R10 &lit[0] -// -// The 24 bytes of stack space is to call runtime·memmove. -// -// The unusual register allocation of local variables, such as R10 for the -// source pointer, matches the allocation used at the call site in encodeBlock, -// which makes it easier to manually inline this function. -TEXT ·emitLiteral(SB), NOSPLIT, $24-56 - MOVQ dst_base+0(FP), DI - MOVQ lit_base+24(FP), R10 - MOVQ lit_len+32(FP), AX - MOVQ AX, DX - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT oneByte - CMPL BX, $256 - JLT twoBytes - -threeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - ADDQ $3, DX - JMP memmove - -twoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - ADDQ $2, DX - JMP memmove - -oneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - ADDQ $1, DX - -memmove: - MOVQ DX, ret+48(FP) - - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - CALL runtime·memmove(SB) - RET - -// ---------------------------------------------------------------------------- - -// func emitCopy(dst []byte, offset, length int) int -// -// All local variables fit into registers. The register allocation: -// - AX length -// - SI &dst[0] -// - DI &dst[i] -// - R11 offset -// -// The unusual register allocation of local variables, such as R11 for the -// offset, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·emitCopy(SB), NOSPLIT, $0-48 - MOVQ dst_base+0(FP), DI - MOVQ DI, SI - MOVQ offset+24(FP), R11 - MOVQ length+32(FP), AX - -loop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT step1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP loop0 - -step1: - // if length > 64 { etc } - CMPL AX, $64 - JLE step2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -step2: - // if length >= 12 || offset >= 2048 { goto step3 } - CMPL AX, $12 - JGE step3 - CMPL R11, $2048 - JGE step3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -step3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func extendMatch(src []byte, i, j int) int -// -// All local variables fit into registers. The register allocation: -// - DX &src[0] -// - SI &src[j] -// - R13 &src[len(src) - 8] -// - R14 &src[len(src)] -// - R15 &src[i] -// -// The unusual register allocation of local variables, such as R15 for a source -// pointer, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·extendMatch(SB), NOSPLIT, $0-48 - MOVQ src_base+0(FP), DX - MOVQ src_len+8(FP), R14 - MOVQ i+24(FP), R15 - MOVQ j+32(FP), SI - ADDQ DX, R14 - ADDQ DX, R15 - ADDQ DX, SI - MOVQ R14, R13 - SUBQ $8, R13 - -cmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA cmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE bsf - ADDQ $8, R15 - ADDQ $8, SI - JMP cmp8 - -bsf: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -cmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE extendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE extendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP cmp1 - -extendMatchEnd: - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func encodeBlock(dst, src []byte) (d int) -// -// All local variables fit into registers, other than "var table". The register -// allocation: -// - AX . . -// - BX . . -// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). -// - DX 64 &src[0], tableSize -// - SI 72 &src[s] -// - DI 80 &dst[d] -// - R9 88 sLimit -// - R10 . &src[nextEmit] -// - R11 96 prevHash, currHash, nextHash, offset -// - R12 104 &src[base], skip -// - R13 . &src[nextS], &src[len(src) - 8] -// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x -// - R15 112 candidate -// -// The second column (56, 64, etc) is the stack offset to spill the registers -// when calling other functions. We could pack this slightly tighter, but it's -// simpler to have a dedicated spill map independent of the function called. -// -// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An -// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill -// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. -TEXT ·encodeBlock(SB), 0, $32888-56 - MOVQ dst_base+0(FP), DI - MOVQ src_base+24(FP), SI - MOVQ src_len+32(FP), R14 - - // shift, tableSize := uint32(32-8), 1<<8 - MOVQ $24, CX - MOVQ $256, DX - -calcShift: - // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - // shift-- - // } - CMPQ DX, $16384 - JGE varTable - CMPQ DX, R14 - JGE varTable - SUBQ $1, CX - SHLQ $1, DX - JMP calcShift - -varTable: - // var table [maxTableSize]uint16 - // - // In the asm code, unlike the Go code, we can zero-initialize only the - // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU - // writes 16 bytes, so we can do only tableSize/8 writes instead of the - // 2048 writes that would zero-initialize all of table's 32768 bytes. - SHRQ $3, DX - LEAQ table-32768(SP), BX - PXOR X0, X0 - -memclr: - MOVOU X0, 0(BX) - ADDQ $16, BX - SUBQ $1, DX - JNZ memclr - - // !!! DX = &src[0] - MOVQ SI, DX - - // sLimit := len(src) - inputMargin - MOVQ R14, R9 - SUBQ $15, R9 - - // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't - // change for the rest of the function. - MOVQ CX, 56(SP) - MOVQ DX, 64(SP) - MOVQ R9, 88(SP) - - // nextEmit := 0 - MOVQ DX, R10 - - // s := 1 - ADDQ $1, SI - - // nextHash := hash(load32(src, s), shift) - MOVL 0(SI), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - -outer: - // for { etc } - - // skip := 32 - MOVQ $32, R12 - - // nextS := s - MOVQ SI, R13 - - // candidate := 0 - MOVQ $0, R15 - -inner0: - // for { etc } - - // s := nextS - MOVQ R13, SI - - // bytesBetweenHashLookups := skip >> 5 - MOVQ R12, R14 - SHRQ $5, R14 - - // nextS = s + bytesBetweenHashLookups - ADDQ R14, R13 - - // skip += bytesBetweenHashLookups - ADDQ R14, R12 - - // if nextS > sLimit { goto emitRemainder } - MOVQ R13, AX - SUBQ DX, AX - CMPQ AX, R9 - JA emitRemainder - - // candidate = int(table[nextHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[nextHash] = uint16(s) - MOVQ SI, AX - SUBQ DX, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // nextHash = hash(load32(src, nextS), shift) - MOVL 0(R13), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // if load32(src, s) != load32(src, candidate) { continue } break - MOVL 0(SI), AX - MOVL (DX)(R15*1), BX - CMPL AX, BX - JNE inner0 - -fourByteMatch: - // As per the encode_other.go code: - // - // A 4-byte match has been found. We'll later see etc. - - // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment - // on inputMargin in encode.go. - MOVQ SI, AX - SUBQ R10, AX - CMPQ AX, $16 - JLE emitLiteralFastPath - - // ---------------------------------------- - // Begin inline of the emitLiteral call. - // - // d += emitLiteral(dst[d:], src[nextEmit:s]) - - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT inlineEmitLiteralOneByte - CMPL BX, $256 - JLT inlineEmitLiteralTwoBytes - -inlineEmitLiteralThreeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralTwoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralOneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - -inlineEmitLiteralMemmove: - // Spill local variables (registers) onto the stack; call; unspill. - // - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". - MOVQ SI, 72(SP) - MOVQ DI, 80(SP) - MOVQ R15, 112(SP) - CALL runtime·memmove(SB) - MOVQ 56(SP), CX - MOVQ 64(SP), DX - MOVQ 72(SP), SI - MOVQ 80(SP), DI - MOVQ 88(SP), R9 - MOVQ 112(SP), R15 - JMP inner1 - -inlineEmitLiteralEnd: - // End inline of the emitLiteral call. - // ---------------------------------------- - -emitLiteralFastPath: - // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". - MOVB AX, BX - SUBB $1, BX - SHLB $2, BX - MOVB BX, (DI) - ADDQ $1, DI - - // !!! Implement the copy from lit to dst as a 16-byte load and store. - // (Encode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only len(lit) bytes, but that's - // OK. Subsequent iterations will fix up the overrun. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(R10), X0 - MOVOU X0, 0(DI) - ADDQ AX, DI - -inner1: - // for { etc } - - // base := s - MOVQ SI, R12 - - // !!! offset := base - candidate - MOVQ R12, R11 - SUBQ R15, R11 - SUBQ DX, R11 - - // ---------------------------------------- - // Begin inline of the extendMatch call. - // - // s = extendMatch(src, candidate+4, s+4) - - // !!! R14 = &src[len(src)] - MOVQ src_len+32(FP), R14 - ADDQ DX, R14 - - // !!! R13 = &src[len(src) - 8] - MOVQ R14, R13 - SUBQ $8, R13 - - // !!! R15 = &src[candidate + 4] - ADDQ $4, R15 - ADDQ DX, R15 - - // !!! s += 4 - ADDQ $4, SI - -inlineExtendMatchCmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA inlineExtendMatchCmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE inlineExtendMatchBSF - ADDQ $8, R15 - ADDQ $8, SI - JMP inlineExtendMatchCmp8 - -inlineExtendMatchBSF: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - JMP inlineExtendMatchEnd - -inlineExtendMatchCmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE inlineExtendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE inlineExtendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP inlineExtendMatchCmp1 - -inlineExtendMatchEnd: - // End inline of the extendMatch call. - // ---------------------------------------- - - // ---------------------------------------- - // Begin inline of the emitCopy call. - // - // d += emitCopy(dst[d:], base-candidate, s-base) - - // !!! length := s - base - MOVQ SI, AX - SUBQ R12, AX - -inlineEmitCopyLoop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT inlineEmitCopyStep1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP inlineEmitCopyLoop0 - -inlineEmitCopyStep1: - // if length > 64 { etc } - CMPL AX, $64 - JLE inlineEmitCopyStep2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -inlineEmitCopyStep2: - // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } - CMPL AX, $12 - JGE inlineEmitCopyStep3 - CMPL R11, $2048 - JGE inlineEmitCopyStep3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - JMP inlineEmitCopyEnd - -inlineEmitCopyStep3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - -inlineEmitCopyEnd: - // End inline of the emitCopy call. - // ---------------------------------------- - - // nextEmit = s - MOVQ SI, R10 - - // if s >= sLimit { goto emitRemainder } - MOVQ SI, AX - SUBQ DX, AX - CMPQ AX, R9 - JAE emitRemainder - - // As per the encode_other.go code: - // - // We could immediately etc. - - // x := load64(src, s-1) - MOVQ -1(SI), R14 - - // prevHash := hash(uint32(x>>0), shift) - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // table[prevHash] = uint16(s-1) - MOVQ SI, AX - SUBQ DX, AX - SUBQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // currHash := hash(uint32(x>>8), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // candidate = int(table[currHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[currHash] = uint16(s) - ADDQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // if uint32(x>>8) == load32(src, candidate) { continue } - MOVL (DX)(R15*1), BX - CMPL R14, BX - JEQ inner1 - - // nextHash = hash(uint32(x>>16), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // s++ - ADDQ $1, SI - - // break out of the inner1 for loop, i.e. continue the outer loop. - JMP outer - -emitRemainder: - // if nextEmit < len(src) { etc } - MOVQ src_len+32(FP), AX - ADDQ DX, AX - CMPQ R10, AX - JEQ encodeBlockEnd - - // d += emitLiteral(dst[d:], src[nextEmit:]) - // - // Push args. - MOVQ DI, 0(SP) - MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ R10, 24(SP) - SUBQ R10, AX - MOVQ AX, 32(SP) - MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. - - // Spill local variables (registers) onto the stack; call; unspill. - MOVQ DI, 80(SP) - CALL ·emitLiteral(SB) - MOVQ 80(SP), DI - - // Finish the "d +=" part of "d += emitLiteral(etc)". - ADDQ 48(SP), DI - -encodeBlockEnd: - MOVQ dst_base+0(FP), AX - SUBQ AX, DI - MOVQ DI, d+48(FP) - RET diff --git a/vendor/github.com/klauspost/compress/snappy/encode_other.go b/vendor/github.com/klauspost/compress/snappy/encode_other.go deleted file mode 100644 index dbcae905e..000000000 --- a/vendor/github.com/klauspost/compress/snappy/encode_other.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 appengine !gc noasm - -package snappy - -func load32(b []byte, i int) uint32 { - b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load64(b []byte, i int) uint64 { - b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= len(lit) && len(lit) <= 65536 -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - default: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - } - return i + copy(dst[i:], lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= 65535 -// 4 <= length && length <= 65535 -func emitCopy(dst []byte, offset, length int) int { - i := 0 - // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The - // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because - // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed - // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as - // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as - // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a - // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an - // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. - for length >= 68 { - // Emit a length 64 copy, encoded as 3 bytes. - dst[i+0] = 63<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 64 - } - if length > 64 { - // Emit a length 60 copy, encoded as 3 bytes. - dst[i+0] = 59<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 60 - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[i+0] = uint8(length-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - return i + 3 - } - // Emit the remaining copy, encoded as 2 bytes. - dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - return i + 2 -} - -// extendMatch returns the largest k such that k <= len(src) and that -// src[i:i+k-j] and src[j:k] have the same contents. -// -// It assumes that: -// 0 <= i && i < j && j <= len(src) -func extendMatch(src []byte, i, j int) int { - for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { - } - return j -} - -func hash(u, shift uint32) uint32 { - return (u * 0x1e35a7bd) >> shift -} - -// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlock(dst, src []byte) (d int) { - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - // The table element type is uint16, as s < sLimit and sLimit < len(src) - // and len(src) <= maxBlockSize and maxBlockSize == 65536. - const ( - maxTableSize = 1 << 14 - // tableMask is redundant, but helps the compiler eliminate bounds - // checks. - tableMask = maxTableSize - 1 - ) - shift := uint32(32 - 8) - for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - shift-- - } - // In Go, all array elements are zero-initialized, so there is no advantage - // to a smaller tableSize per se. However, it matches the C++ algorithm, - // and in the asm versions of this code, we can get away with zeroing only - // the first tableSize elements. - var table [maxTableSize]uint16 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - nextHash := hash(load32(src, s), shift) - - for { - // Copied from the C++ snappy implementation: - // - // Heuristic match skipping: If 32 bytes are scanned with no matches - // found, start looking only at every other byte. If 32 more bytes are - // scanned (or skipped), look at every third byte, etc.. When a match - // is found, immediately go back to looking at every byte. This is a - // small loss (~5% performance, ~0.1% density) for compressible data - // due to more bookkeeping, but for non-compressible data (such as - // JPEG) it's a huge win since the compressor quickly "realizes" the - // data is incompressible and doesn't bother looking for matches - // everywhere. - // - // The "skip" variable keeps track of how many bytes there are since - // the last match; dividing it by 32 (ie. right-shifting by five) gives - // the number of bytes to move ahead for each iteration. - skip := 32 - - nextS := s - candidate := 0 - for { - s = nextS - bytesBetweenHashLookups := skip >> 5 - nextS = s + bytesBetweenHashLookups - skip += bytesBetweenHashLookups - if nextS > sLimit { - goto emitRemainder - } - candidate = int(table[nextHash&tableMask]) - table[nextHash&tableMask] = uint16(s) - nextHash = hash(load32(src, nextS), shift) - if load32(src, s) == load32(src, candidate) { - break - } - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - d += emitLiteral(dst[d:], src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - - // Extend the 4-byte match as long as possible. - // - // This is an inlined version of: - // s = extendMatch(src, candidate+4, s+4) - s += 4 - for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { - } - - d += emitCopy(dst[d:], base-candidate, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load64(src, s-1) - prevHash := hash(uint32(x>>0), shift) - table[prevHash&tableMask] = uint16(s - 1) - currHash := hash(uint32(x>>8), shift) - candidate = int(table[currHash&tableMask]) - table[currHash&tableMask] = uint16(s) - if uint32(x>>8) != load32(src, candidate) { - nextHash = hash(uint32(x>>16), shift) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} diff --git a/vendor/github.com/klauspost/compress/snappy/runbench.cmd b/vendor/github.com/klauspost/compress/snappy/runbench.cmd deleted file mode 100644 index d24eb4b47..000000000 --- a/vendor/github.com/klauspost/compress/snappy/runbench.cmd +++ /dev/null @@ -1,2 +0,0 @@ -del old.txt -go test -bench=. >>old.txt && go test -bench=. >>old.txt && go test -bench=. >>old.txt && benchstat -delta-test=ttest old.txt new.txt diff --git a/vendor/github.com/klauspost/compress/snappy/snappy.go b/vendor/github.com/klauspost/compress/snappy/snappy.go deleted file mode 100644 index ea58ced88..000000000 --- a/vendor/github.com/klauspost/compress/snappy/snappy.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snappy implements the Snappy compression format. It aims for very -// high speeds and reasonable compression. -// -// There are actually two Snappy formats: block and stream. They are related, -// but different: trying to decompress block-compressed data as a Snappy stream -// will fail, and vice versa. The block format is the Decode and Encode -// functions and the stream format is the Reader and Writer types. -// -// The block format, the more common case, is used when the complete size (the -// number of bytes) of the original data is known upfront, at the time -// compression starts. The stream format, also known as the framing format, is -// for when that isn't always true. -// -// The canonical, C++ implementation is at https://github.com/google/snappy and -// it only implements the block format. -package snappy - -import ( - "hash/crc32" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer issued by most - encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in - [1, 65). The length is 1 + m. The offset is the little-endian unsigned - integer denoted by the next 4 bytes. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicBody = "sNaPpY" - - // maxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - maxBlockSize = 65536 - - // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - maxEncodedLenOfMaxBlockSize = 76490 - - obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize - obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return c>>15 | c<<17 + 0xa282ead8 -} diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md deleted file mode 100644 index 7680bfe1d..000000000 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ /dev/null @@ -1,417 +0,0 @@ -# zstd - -[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. -It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. -A high performance compression algorithm is implemented. For now focused on speed. - -This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. - -This package is pure Go and without use of "unsafe". - -The `zstd` package is provided as open source software using a Go standard license. - -Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. - -## Installation - -Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. - -Godoc Documentation: https://godoc.org/github.com/klauspost/compress/zstd - - -## Compressor - -### Status: - -STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively -used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. - -There may still be specific combinations of data types/size/settings that could lead to edge cases, -so as always, testing is recommended. - -For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. - -* The "Fastest" compression ratio is roughly equivalent to zstd level 1. -* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). -* The "Better" compression ratio is roughly equivalent to zstd level 7. -* The "Best" compression ratio is roughly equivalent to zstd level 11. - -In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. -The compression ratio compared to stdlib is around level 3, but usually 3x as fast. - - -### Usage - -An Encoder can be used for either compressing a stream via the -`io.WriteCloser` interface supported by the Encoder or as multiple independent -tasks via the `EncodeAll` function. -Smaller encodes are encouraged to use the EncodeAll function. -Use `NewWriter` to create a new instance that can be used for both. - -To create a writer with default options, do like this: - -```Go -// Compress input to output. -func Compress(in io.Reader, out io.Writer) error { - enc, err := zstd.NewWriter(out) - if err != nil { - return err - } - _, err = io.Copy(enc, in) - if err != nil { - enc.Close() - return err - } - return enc.Close() -} -``` - -Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. -Even if your encode fails, you should still call `Close()` to release any resources that may be held up. - -The above is fine for big encodes. However, whenever possible try to *reuse* the writer. - -To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. -This will allow the encoder to reuse all resources and avoid wasteful allocations. - -Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part -of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change -in the future. So if you want to limit concurrency for future updates, specify the concurrency -you would like. - -You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined -compression settings can be specified. - -#### Future Compatibility Guarantees - -This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. - -The goal will be to keep the default efficiency at the default zstd (level 3). -However the encoding should never be assumed to remain the same, -and you should not use hashes of compressed output for similarity checks. - -The Encoder can be assumed to produce the same output from the exact same code version. -However, the may be modes in the future that break this, -although they will not be enabled without an explicit option. - -This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. - -Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), -[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) -and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). - -#### Blocks - -For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. - -`EncodeAll` will encode all input in src and append it to dst. -This function can be called concurrently, but each call will only run on a single goroutine. - -Encoded blocks can be concatenated and the result will be the combined input stream. -Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. - -Especially when encoding blocks you should take special care to reuse the encoder. -This will effectively make it run without allocations after a warmup period. -To make it run completely without allocations, supply a destination buffer with space for all content. - -```Go -import "github.com/klauspost/compress/zstd" - -// Create a writer that caches compressors. -// For this operation type we supply a nil Reader. -var encoder, _ = zstd.NewWriter(nil) - -// Compress a buffer. -// If you have a destination buffer, the allocation in the call can also be eliminated. -func Compress(src []byte) []byte { - return encoder.EncodeAll(src, make([]byte, 0, len(src))) -} -``` - -You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` -option when creating the writer. - -Using the Encoder for both a stream and individual blocks concurrently is safe. - -### Performance - -I have collected some speed examples to compare speed and compression against other compressors. - -* `file` is the input file. -* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. -* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". -* `insize`/`outsize` is the input/output size. -* `millis` is the number of milliseconds used for compression. -* `mb/s` is megabytes (2^20 bytes) per second. - -``` -Silesia Corpus: -http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip - -This package: -file out level insize outsize millis mb/s -silesia.tar zskp 1 211947520 73101992 643 313.87 -silesia.tar zskp 2 211947520 67504318 969 208.38 -silesia.tar zskp 3 211947520 65177448 1899 106.44 -silesia.tar zskp 4 211947520 61381950 8115 24.91 - -cgo zstd: -silesia.tar zstd 1 211947520 73605392 543 371.56 -silesia.tar zstd 3 211947520 66793289 864 233.68 -silesia.tar zstd 6 211947520 62916450 1913 105.66 -silesia.tar zstd 9 211947520 60212393 5063 39.92 - -gzip, stdlib/this package: -silesia.tar gzstd 1 211947520 80007735 1654 122.21 -silesia.tar gzkp 1 211947520 80369488 1168 173.06 - -GOB stream of binary data. Highly compressible. -https://files.klauspost.com/compress/gob-stream.7z - -file out level insize outsize millis mb/s -gob-stream zskp 1 1911399616 235022249 3088 590.30 -gob-stream zskp 2 1911399616 205669791 3786 481.34 -gob-stream zskp 3 1911399616 185792019 9324 195.48 -gob-stream zskp 4 1911399616 171537212 32113 56.76 -gob-stream zstd 1 1911399616 249810424 2637 691.26 -gob-stream zstd 3 1911399616 208192146 3490 522.31 -gob-stream zstd 6 1911399616 193632038 6687 272.56 -gob-stream zstd 9 1911399616 177620386 16175 112.70 -gob-stream gzstd 1 1911399616 357382641 10251 177.82 -gob-stream gzkp 1 1911399616 362156523 5695 320.08 - -The test data for the Large Text Compression Benchmark is the first -10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. -http://mattmahoney.net/dc/textdata.html - -file out level insize outsize millis mb/s -enwik9 zskp 1 1000000000 343848582 3609 264.18 -enwik9 zskp 2 1000000000 317276632 5746 165.97 -enwik9 zskp 3 1000000000 294540704 11725 81.34 -enwik9 zskp 4 1000000000 276609671 44029 21.66 -enwik9 zstd 1 1000000000 358072021 3110 306.65 -enwik9 zstd 3 1000000000 313734672 4784 199.35 -enwik9 zstd 6 1000000000 295138875 10290 92.68 -enwik9 zstd 9 1000000000 278348700 28549 33.40 -enwik9 gzstd 1 1000000000 382578136 9604 99.30 -enwik9 gzkp 1 1000000000 383825945 6544 145.73 - -Highly compressible JSON file. -https://files.klauspost.com/compress/github-june-2days-2019.json.zst - -file out level insize outsize millis mb/s -github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40 -github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96 -github-june-2days-2019.json zskp 3 6273951764 537511906 29252 204.54 -github-june-2days-2019.json zskp 4 6273951764 512796117 97791 61.18 -github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 -github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 -github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 -github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 -github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79 -github-june-2days-2019.json gzkp 1 6273951764 1128755542 19236 311.03 - -VM Image, Linux mint with a few installed applications: -https://files.klauspost.com/compress/rawstudio-mint14.7z - -file out level insize outsize millis mb/s -rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84 -rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07 -rawstudio-mint14.tar zskp 3 8558382592 3224594213 71751 113.75 -rawstudio-mint14.tar zskp 4 8558382592 3027332295 486243 16.79 -rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 -rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 -rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 -rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 -rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40 -rawstudio-mint14.tar gzkp 1 8558382592 3970463184 41749 195.49 - -CSV data: -https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst - -file out level insize outsize millis mb/s -nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35 -nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44 -nyc-taxi-data-10M.csv zskp 3 3325605752 538490114 19880 159.53 -nyc-taxi-data-10M.csv zskp 4 3325605752 495986829 89368 35.49 -nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 -nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 -nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 -nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 -nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83 -nyc-taxi-data-10M.csv gzkp 1 3325605752 924718719 16388 193.53 -``` - -## Decompressor - -Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. - -This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), -kindly supplied by [fuzzit.dev](https://fuzzit.dev/). -The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, -or run it past its limits with ANY input provided. - -### Usage - -The package has been designed for two main usages, big streams of data and smaller in-memory buffers. -There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. - -For streaming use a simple setup could look like this: - -```Go -import "github.com/klauspost/compress/zstd" - -func Decompress(in io.Reader, out io.Writer) error { - d, err := zstd.NewReader(in) - if err != nil { - return err - } - defer d.Close() - - // Copy content... - _, err = io.Copy(out, d) - return err -} -``` - -It is important to use the "Close" function when you no longer need the Reader to stop running goroutines. -See "Allocation-less operation" below. - -For decoding buffers, it could look something like this: - -```Go -import "github.com/klauspost/compress/zstd" - -// Create a reader that caches decompressors. -// For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil) - -// Decompress a buffer. We don't supply a destination buffer, -// so it will be allocated by the decoder. -func Decompress(src []byte) ([]byte, error) { - return decoder.DecodeAll(src, nil) -} -``` - -Both of these cases should provide the functionality needed. -The decoder can be used for *concurrent* decompression of multiple buffers. -It will only allow a certain number of concurrent operations to run. -To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. - -### Dictionaries - -Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. - -Dictionaries are added individually to Decoders. -Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. -To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. -Several dictionaries can be added at once. - -The dictionary will be used automatically for the data that specifies them. -A re-used Decoder will still contain the dictionaries registered. - -When registering multiple dictionaries with the same ID, the last one will be used. - -It is possible to use dictionaries when compressing data. - -To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used -and it will likely be used even if it doesn't improve compression. - -The used dictionary must be used to decompress the content. - -For any real gains, the dictionary should be built with similar data. -If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. -Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. -For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). - -For now there is a fixed startup performance penalty for compressing content with dictionaries. -This will likely be improved over time. Just be aware to test performance when implementing. - -### Allocation-less operation - -The decoder has been designed to operate without allocations after a warmup. - -This means that you should *store* the decoder for best performance. -To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. -A decoder can safely be re-used even if the previous stream failed. - -To release the resources, you must call the `Close()` function on a decoder. -After this it can *no longer be reused*, but all running goroutines will be stopped. -So you *must* use this if you will no longer need the Reader. - -For decompressing smaller buffers a single decoder can be used. -When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. -In this case no unneeded allocations should be made. - -### Concurrency - -The buffer decoder does everything on the same goroutine and does nothing concurrently. -It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. - -The stream decoder operates on - -* One goroutine reads input and splits the input to several block decoders. -* A number of decoders will decode blocks. -* A goroutine coordinates these blocks and sends history from one to the next. - -So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. - -Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. - -In practice this means that concurrency is often limited to utilizing about 2 cores effectively. - - -### Benchmarks - -These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd). - -The first two are streaming decodes and the last are smaller inputs. - -``` -BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op -BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op - -BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op -BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op - -Concurrent performance: - -BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op - -BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op -``` - -This reflects the performance around May 2020, but this may be out of date. - -# Contributions - -Contributions are always welcome. -For new features/fixes, remember to add tests and for performance enhancements include benchmarks. - -For sending files for reproducing errors use a service like [goobox](https://goobox.io/#/upload) or similar to share your files. - -For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). - -This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go deleted file mode 100644 index 854458537..000000000 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "io" - "math/bits" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 // Maybe use [16]byte, but shifting is awkward. - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBits(uint32(v))) - return nil -} - -// getBits will return n bits. n can be 0. -func (b *bitReader) getBits(n uint8) int { - if n == 0 /*|| b.bitsRead >= 64 */ { - return 0 - } - return b.getBitsFast(n) -} - -// getBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) getBitsFast(n uint8) int { - const regMask = 64 - 1 - v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - b.bitsRead += n - return int(v) -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - // 2 bounds checks. - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off >= 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -// overread returns true if more bits have been requested than is on the stream. -func (b *bitReader) overread() bool { - return b.bitsRead > 64 -} - -// remain returns the number of bits remaining. -func (b *bitReader) remain() uint { - return b.off*8 + 64 - uint(b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - -func highBits(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go deleted file mode 100644 index 303ae90f9..000000000 --- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package zstd - -import "fmt" - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - -var bitMask32 = [32]uint32{ - 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, - 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, - 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, - 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, -} // up to 32 bits - -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits32NC will add up to 32 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits32NC(value uint32, bits uint8) { - b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.bitContainer >>= v << 3 - b.nBits &= 7 -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() error { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() - return nil -} - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go deleted file mode 100644 index b51d922bd..000000000 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ /dev/null @@ -1,739 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "io" - "sync" - - "github.com/klauspost/compress/huff0" - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -type blockType uint8 - -//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex - -const ( - blockTypeRaw blockType = iota - blockTypeRLE - blockTypeCompressed - blockTypeReserved -) - -type literalsBlockType uint8 - -const ( - literalsBlockRaw literalsBlockType = iota - literalsBlockRLE - literalsBlockCompressed - literalsBlockTreeless -) - -const ( - // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) - maxCompressedBlockSize = 128 << 10 - - // Maximum possible block size (all Raw+Uncompressed). - maxBlockSize = (1 << 21) - 1 - - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header - maxCompressedLiteralSize = 1 << 18 - maxRLELiteralSize = 1 << 20 - maxMatchLen = 131074 - maxSequences = 0x7f00 + 0xffff - - // We support slightly less than the reference decoder to be able to - // use ints on 32 bit archs. - maxOffsetBits = 30 -) - -var ( - huffDecoderPool = sync.Pool{New: func() interface{} { - return &huff0.Scratch{} - }} - - fseDecoderPool = sync.Pool{New: func() interface{} { - return &fseDecoder{} - }} -) - -type blockDec struct { - // Raw source data of the block. - data []byte - dataStorage []byte - - // Destination of the decoded data. - dst []byte - - // Buffer for literals data. - literalBuf []byte - - // Window size of the block. - WindowSize uint64 - - history chan *history - input chan struct{} - result chan decodeOutput - sequenceBuf []seq - err error - decWG sync.WaitGroup - - // Frame to use for singlethreaded decoding. - // Should not be used by the decoder itself since parent may be another frame. - localFrame *frameDec - - // Block is RLE, this is the size. - RLESize uint32 - tmp [4]byte - - Type blockType - - // Is this the last block of a frame? - Last bool - - // Use less memory - lowMem bool -} - -func (b *blockDec) String() string { - if b == nil { - return "" - } - return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) -} - -func newBlockDec(lowMem bool) *blockDec { - b := blockDec{ - lowMem: lowMem, - result: make(chan decodeOutput, 1), - input: make(chan struct{}, 1), - history: make(chan *history, 1), - } - b.decWG.Add(1) - go b.startDecoder() - return &b -} - -// reset will reset the block. -// Input must be a start of a block and will be at the end of the block when returned. -func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { - b.WindowSize = windowSize - tmp := br.readSmall(3) - if tmp == nil { - if debug { - println("Reading block header:", io.ErrUnexpectedEOF) - } - return io.ErrUnexpectedEOF - } - bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) - b.Last = bh&1 != 0 - b.Type = blockType((bh >> 1) & 3) - // find size. - cSize := int(bh >> 3) - maxSize := maxBlockSize - switch b.Type { - case blockTypeReserved: - return ErrReservedBlockType - case blockTypeRLE: - b.RLESize = uint32(cSize) - if b.lowMem { - maxSize = cSize - } - cSize = 1 - case blockTypeCompressed: - if debug { - println("Data size on stream:", cSize) - } - b.RLESize = 0 - maxSize = maxCompressedBlockSize - if windowSize < maxCompressedBlockSize && b.lowMem { - maxSize = int(windowSize) - } - if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { - if debug { - printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrCompressedSizeTooBig - } - case blockTypeRaw: - b.RLESize = 0 - // We do not need a destination for raw blocks. - maxSize = -1 - default: - panic("Invalid block type") - } - - // Read block data. - if cap(b.dataStorage) < cSize { - if b.lowMem { - b.dataStorage = make([]byte, 0, cSize) - } else { - b.dataStorage = make([]byte, 0, maxBlockSize) - } - } - if cap(b.dst) <= maxSize { - b.dst = make([]byte, 0, maxSize+1) - } - var err error - b.data, err = br.readBig(cSize, b.dataStorage) - if err != nil { - if debug { - println("Reading block:", err, "(", cSize, ")", len(b.data)) - printf("%T", br) - } - return err - } - return nil -} - -// sendEOF will make the decoder send EOF on this frame. -func (b *blockDec) sendErr(err error) { - b.Last = true - b.Type = blockTypeReserved - b.err = err - b.input <- struct{}{} -} - -// Close will release resources. -// Closed blockDec cannot be reset. -func (b *blockDec) Close() { - close(b.input) - close(b.history) - close(b.result) - b.decWG.Wait() -} - -// decodeAsync will prepare decoding the block when it receives input. -// This will separate output and history. -func (b *blockDec) startDecoder() { - defer b.decWG.Done() - for range b.input { - //println("blockDec: Got block input") - switch b.Type { - case blockTypeRLE: - if cap(b.dst) < int(b.RLESize) { - if b.lowMem { - b.dst = make([]byte, b.RLESize) - } else { - b.dst = make([]byte, maxBlockSize) - } - } - o := decodeOutput{ - d: b, - b: b.dst[:b.RLESize], - err: nil, - } - v := b.data[0] - for i := range o.b { - o.b[i] = v - } - hist := <-b.history - hist.append(o.b) - b.result <- o - case blockTypeRaw: - o := decodeOutput{ - d: b, - b: b.data, - err: nil, - } - hist := <-b.history - hist.append(o.b) - b.result <- o - case blockTypeCompressed: - b.dst = b.dst[:0] - err := b.decodeCompressed(nil) - o := decodeOutput{ - d: b, - b: b.dst, - err: err, - } - if debug { - println("Decompressed to", len(b.dst), "bytes, error:", err) - } - b.result <- o - case blockTypeReserved: - // Used for returning errors. - <-b.history - b.result <- decodeOutput{ - d: b, - b: nil, - err: b.err, - } - default: - panic("Invalid block type") - } - if debug { - println("blockDec: Finished block") - } - } -} - -// decodeAsync will prepare decoding the block when it receives the history. -// If history is provided, it will not fetch it from the channel. -func (b *blockDec) decodeBuf(hist *history) error { - switch b.Type { - case blockTypeRLE: - if cap(b.dst) < int(b.RLESize) { - if b.lowMem { - b.dst = make([]byte, b.RLESize) - } else { - b.dst = make([]byte, maxBlockSize) - } - } - b.dst = b.dst[:b.RLESize] - v := b.data[0] - for i := range b.dst { - b.dst[i] = v - } - hist.appendKeep(b.dst) - return nil - case blockTypeRaw: - hist.appendKeep(b.data) - return nil - case blockTypeCompressed: - saved := b.dst - b.dst = hist.b - hist.b = nil - err := b.decodeCompressed(hist) - if debug { - println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) - } - hist.b = b.dst - b.dst = saved - return err - case blockTypeReserved: - // Used for returning errors. - return b.err - default: - panic("Invalid block type") - } -} - -// decodeCompressed will start decompressing a block. -// If no history is supplied the decoder will decodeAsync as much as possible -// before fetching from blockDec.history -func (b *blockDec) decodeCompressed(hist *history) error { - in := b.data - delayedHistory := hist == nil - - if delayedHistory { - // We must always grab history. - defer func() { - if hist == nil { - <-b.history - } - }() - } - // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header - if len(in) < 2 { - return ErrBlockTooSmall - } - litType := literalsBlockType(in[0] & 3) - var litRegenSize int - var litCompSize int - sizeFormat := (in[0] >> 2) & 3 - var fourStreams bool - switch litType { - case literalsBlockRaw, literalsBlockRLE: - switch sizeFormat { - case 0, 2: - // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. - litRegenSize = int(in[0] >> 3) - in = in[1:] - case 1: - // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. - litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) - in = in[2:] - case 3: - // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. - if len(in) < 3 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall - } - litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) - in = in[3:] - } - case literalsBlockCompressed, literalsBlockTreeless: - switch sizeFormat { - case 0, 1: - // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). - if len(in) < 3 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) - litRegenSize = int(n & 1023) - litCompSize = int(n >> 10) - fourStreams = sizeFormat == 1 - in = in[3:] - case 2: - fourStreams = true - if len(in) < 4 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) - litRegenSize = int(n & 16383) - litCompSize = int(n >> 14) - in = in[4:] - case 3: - fourStreams = true - if len(in) < 5 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) - litRegenSize = int(n & 262143) - litCompSize = int(n >> 18) - in = in[5:] - } - } - if debug { - println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) - } - var literals []byte - var huff *huff0.Scratch - switch litType { - case literalsBlockRaw: - if len(in) < litRegenSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) - return ErrBlockTooSmall - } - literals = in[:litRegenSize] - in = in[litRegenSize:] - //printf("Found %d uncompressed literals\n", litRegenSize) - case literalsBlockRLE: - if len(in) < 1 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) - return ErrBlockTooSmall - } - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, litRegenSize) - } else { - if litRegenSize > maxCompressedLiteralSize { - // Exceptional - b.literalBuf = make([]byte, litRegenSize) - } else { - b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize) - - } - } - } - literals = b.literalBuf[:litRegenSize] - v := in[0] - for i := range literals { - literals[i] = v - } - in = in[1:] - if debug { - printf("Found %d RLE compressed literals\n", litRegenSize) - } - case literalsBlockTreeless: - if len(in) < litCompSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return ErrBlockTooSmall - } - // Store compressed literals, so we defer decoding until we get history. - literals = in[:litCompSize] - in = in[litCompSize:] - if debug { - printf("Found %d compressed literals\n", litCompSize) - } - case literalsBlockCompressed: - if len(in) < litCompSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return ErrBlockTooSmall - } - literals = in[:litCompSize] - in = in[litCompSize:] - huff = huffDecoderPool.Get().(*huff0.Scratch) - var err error - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize) - } else { - b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) - } - } - if huff == nil { - huff = &huff0.Scratch{} - } - huff, literals, err = huff0.ReadTable(literals, huff) - if err != nil { - println("reading huffman table:", err) - return err - } - // Use our out buffer. - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - if err != nil { - println("decoding compressed literals:", err) - return err - } - // Make sure we don't leak our literals buffer - if len(literals) != litRegenSize { - return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - if debug { - printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) - } - } - - // Decode Sequences - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section - if len(in) < 1 { - return ErrBlockTooSmall - } - seqHeader := in[0] - nSeqs := 0 - switch { - case seqHeader == 0: - in = in[1:] - case seqHeader < 128: - nSeqs = int(seqHeader) - in = in[1:] - case seqHeader < 255: - if len(in) < 2 { - return ErrBlockTooSmall - } - nSeqs = int(seqHeader-128)<<8 | int(in[1]) - in = in[2:] - case seqHeader == 255: - if len(in) < 3 { - return ErrBlockTooSmall - } - nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) - in = in[3:] - } - // Allocate sequences - if cap(b.sequenceBuf) < nSeqs { - if b.lowMem { - b.sequenceBuf = make([]seq, nSeqs) - } else { - // Allocate max - b.sequenceBuf = make([]seq, nSeqs, maxSequences) - } - } else { - // Reuse buffer - b.sequenceBuf = b.sequenceBuf[:nSeqs] - } - var seqs = &sequenceDecs{} - if nSeqs > 0 { - if len(in) < 1 { - return ErrBlockTooSmall - } - br := byteReader{b: in, off: 0} - compMode := br.Uint8() - br.advance(1) - if debug { - printf("Compression modes: 0b%b", compMode) - } - for i := uint(0); i < 3; i++ { - mode := seqCompMode((compMode >> (6 - i*2)) & 3) - if debug { - println("Table", tableIndex(i), "is", mode) - } - var seq *sequenceDec - switch tableIndex(i) { - case tableLiteralLengths: - seq = &seqs.litLengths - case tableOffsets: - seq = &seqs.offsets - case tableMatchLengths: - seq = &seqs.matchLengths - default: - panic("unknown table") - } - switch mode { - case compModePredefined: - seq.fse = &fsePredef[i] - case compModeRLE: - if br.remain() < 1 { - return ErrBlockTooSmall - } - v := br.Uint8() - br.advance(1) - dec := fseDecoderPool.Get().(*fseDecoder) - symb, err := decSymbolValue(v, symbolTableX[i]) - if err != nil { - printf("RLE Transform table (%v) error: %v", tableIndex(i), err) - return err - } - dec.setRLE(symb) - seq.fse = dec - if debug { - printf("RLE set to %+v, code: %v", symb, v) - } - case compModeFSE: - println("Reading table for", tableIndex(i)) - dec := fseDecoderPool.Get().(*fseDecoder) - err := dec.readNCount(&br, uint16(maxTableSymbol[i])) - if err != nil { - println("Read table error:", err) - return err - } - err = dec.transform(symbolTableX[i]) - if err != nil { - println("Transform table error:", err) - return err - } - if debug { - println("Read table ok", "symbolLen:", dec.symbolLen) - } - seq.fse = dec - case compModeRepeat: - seq.repeat = true - } - if br.overread() { - return io.ErrUnexpectedEOF - } - } - in = br.unread() - } - - // Wait for history. - // All time spent after this is critical since it is strictly sequential. - if hist == nil { - hist = <-b.history - if hist.error { - return ErrDecoderClosed - } - } - - // Decode treeless literal block. - if litType == literalsBlockTreeless { - // TODO: We could send the history early WITHOUT the stream history. - // This would allow decoding treeless literals before the byte history is available. - // Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless. - // So not much obvious gain here. - - if hist.huffTree == nil { - return errors.New("literal block was treeless, but no history was defined") - } - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize) - } else { - b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) - } - } - var err error - // Use our out buffer. - huff = hist.huffTree - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - // Make sure we don't leak our literals buffer - if err != nil { - println("decompressing literals:", err) - return err - } - if len(literals) != litRegenSize { - return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - } else { - if hist.huffTree != nil && huff != nil { - if hist.dict == nil || hist.dict.litEnc != hist.huffTree { - huffDecoderPool.Put(hist.huffTree) - } - hist.huffTree = nil - } - } - if huff != nil { - hist.huffTree = huff - } - if debug { - println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.") - } - - if nSeqs == 0 { - // Decompressed content is defined entirely as Literals Section content. - b.dst = append(b.dst, literals...) - if delayedHistory { - hist.append(literals) - } - return nil - } - - seqs, err := seqs.mergeHistory(&hist.decoders) - if err != nil { - return err - } - if debug { - println("History merged ok") - } - br := &bitReader{} - if err := br.init(in); err != nil { - return err - } - - // TODO: Investigate if sending history without decoders are faster. - // This would allow the sequences to be decoded async and only have to construct stream history. - // If only recent offsets were not transferred, this would be an obvious win. - // Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded. - - hbytes := hist.b - if len(hbytes) > hist.windowSize { - hbytes = hbytes[len(hbytes)-hist.windowSize:] - // We do not need history any more. - if hist.dict != nil { - hist.dict.content = nil - } - } - - if err := seqs.initialize(br, hist, literals, b.dst); err != nil { - println("initializing sequences:", err) - return err - } - - err = seqs.decode(nSeqs, br, hbytes) - if err != nil { - return err - } - if !br.finished() { - return fmt.Errorf("%d extra bits on block, should be 0", br.remain()) - } - - err = br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } - if len(b.data) > maxCompressedBlockSize { - return fmt.Errorf("compressed block size too large (%d)", len(b.data)) - } - // Set output and release references. - b.dst = seqs.out - seqs.out, seqs.literals, seqs.hist = nil, nil, nil - - if !delayedHistory { - // If we don't have delayed history, no need to update. - hist.recentOffsets = seqs.prevOffset - return nil - } - if b.Last { - // if last block we don't care about history. - println("Last block, no history returned") - hist.b = hist.b[:0] - return nil - } - hist.append(b.dst) - hist.recentOffsets = seqs.prevOffset - if debug { - println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.") - } - - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go deleted file mode 100644 index 9647c64e5..000000000 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ /dev/null @@ -1,871 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math" - "math/bits" - - "github.com/klauspost/compress/huff0" -) - -type blockEnc struct { - size int - literals []byte - sequences []seq - coders seqCoders - litEnc *huff0.Scratch - dictLitEnc *huff0.Scratch - wr bitWriter - - extraLits int - output []byte - recentOffsets [3]uint32 - prevRecentOffsets [3]uint32 - - last bool - lowMem bool -} - -// init should be used once the block has been created. -// If called more than once, the effect is the same as calling reset. -func (b *blockEnc) init() { - if b.lowMem { - // 1K literals - if cap(b.literals) < 1<<10 { - b.literals = make([]byte, 0, 1<<10) - } - const defSeqs = 20 - if cap(b.sequences) < defSeqs { - b.sequences = make([]seq, 0, defSeqs) - } - // 1K - if cap(b.output) < 1<<10 { - b.output = make([]byte, 0, 1<<10) - } - } else { - if cap(b.literals) < maxCompressedBlockSize { - b.literals = make([]byte, 0, maxCompressedBlockSize) - } - const defSeqs = 200 - if cap(b.sequences) < defSeqs { - b.sequences = make([]seq, 0, defSeqs) - } - if cap(b.output) < maxCompressedBlockSize { - b.output = make([]byte, 0, maxCompressedBlockSize) - } - } - - if b.coders.mlEnc == nil { - b.coders.mlEnc = &fseEncoder{} - b.coders.mlPrev = &fseEncoder{} - b.coders.ofEnc = &fseEncoder{} - b.coders.ofPrev = &fseEncoder{} - b.coders.llEnc = &fseEncoder{} - b.coders.llPrev = &fseEncoder{} - } - b.litEnc = &huff0.Scratch{WantLogLess: 4} - b.reset(nil) -} - -// initNewEncode can be used to reset offsets and encoders to the initial state. -func (b *blockEnc) initNewEncode() { - b.recentOffsets = [3]uint32{1, 4, 8} - b.litEnc.Reuse = huff0.ReusePolicyNone - b.coders.setPrev(nil, nil, nil) -} - -// reset will reset the block for a new encode, but in the same stream, -// meaning that state will be carried over, but the block content is reset. -// If a previous block is provided, the recent offsets are carried over. -func (b *blockEnc) reset(prev *blockEnc) { - b.extraLits = 0 - b.literals = b.literals[:0] - b.size = 0 - b.sequences = b.sequences[:0] - b.output = b.output[:0] - b.last = false - if prev != nil { - b.recentOffsets = prev.prevRecentOffsets - } - b.dictLitEnc = nil -} - -// reset will reset the block for a new encode, but in the same stream, -// meaning that state will be carried over, but the block content is reset. -// If a previous block is provided, the recent offsets are carried over. -func (b *blockEnc) swapEncoders(prev *blockEnc) { - b.coders.swap(&prev.coders) - b.litEnc, prev.litEnc = prev.litEnc, b.litEnc -} - -// blockHeader contains the information for a block header. -type blockHeader uint32 - -// setLast sets the 'last' indicator on a block. -func (h *blockHeader) setLast(b bool) { - if b { - *h = *h | 1 - } else { - const mask = (1 << 24) - 2 - *h = *h & mask - } -} - -// setSize will store the compressed size of a block. -func (h *blockHeader) setSize(v uint32) { - const mask = 7 - *h = (*h)&mask | blockHeader(v<<3) -} - -// setType sets the block type. -func (h *blockHeader) setType(t blockType) { - const mask = 1 | (((1 << 24) - 1) ^ 7) - *h = (*h & mask) | blockHeader(t<<1) -} - -// appendTo will append the block header to a slice. -func (h blockHeader) appendTo(b []byte) []byte { - return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) -} - -// String returns a string representation of the block. -func (h blockHeader) String() string { - return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) -} - -// literalsHeader contains literals header information. -type literalsHeader uint64 - -// setType can be used to set the type of literal block. -func (h *literalsHeader) setType(t literalsBlockType) { - const mask = math.MaxUint64 - 3 - *h = (*h & mask) | literalsHeader(t) -} - -// setSize can be used to set a single size, for uncompressed and RLE content. -func (h *literalsHeader) setSize(regenLen int) { - inBits := bits.Len32(uint32(regenLen)) - // Only retain 2 bits - const mask = 3 - lh := uint64(*h & mask) - switch { - case inBits < 5: - lh |= (uint64(regenLen) << 3) | (1 << 60) - if debug { - got := int(lh>>3) & 0xff - if got != regenLen { - panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) - } - } - case inBits < 12: - lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) - case inBits < 20: - lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) - default: - panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) - } - *h = literalsHeader(lh) -} - -// setSizes will set the size of a compressed literals section and the input length. -func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { - compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) - // Only retain 2 bits - const mask = 3 - lh := uint64(*h & mask) - switch { - case compBits <= 10 && inBits <= 10: - if !single { - lh |= 1 << 2 - } - lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) - if debug { - const mmask = (1 << 24) - 1 - n := (lh >> 4) & mmask - if int(n&1023) != inLen { - panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) - } - if int(n>>10) != compLen { - panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) - } - } - case compBits <= 14 && inBits <= 14: - lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) - if single { - panic("single stream used with more than 10 bits length.") - } - case compBits <= 18 && inBits <= 18: - lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) - if single { - panic("single stream used with more than 10 bits length.") - } - default: - panic("internal error: block too big") - } - *h = literalsHeader(lh) -} - -// appendTo will append the literals header to a byte slice. -func (h literalsHeader) appendTo(b []byte) []byte { - size := uint8(h >> 60) - switch size { - case 1: - b = append(b, uint8(h)) - case 2: - b = append(b, uint8(h), uint8(h>>8)) - case 3: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) - case 4: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) - case 5: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) - default: - panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) - } - return b -} - -// size returns the output size with currently set values. -func (h literalsHeader) size() int { - return int(h >> 60) -} - -func (h literalsHeader) String() string { - return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) -} - -// pushOffsets will push the recent offsets to the backup store. -func (b *blockEnc) pushOffsets() { - b.prevRecentOffsets = b.recentOffsets -} - -// pushOffsets will push the recent offsets to the backup store. -func (b *blockEnc) popOffsets() { - b.recentOffsets = b.prevRecentOffsets -} - -// matchOffset will adjust recent offsets and return the adjusted one, -// if it matches a previous offset. -func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { - // Check if offset is one of the recent offsets. - // Adjusts the output offset accordingly. - // Gives a tiny bit of compression, typically around 1%. - if true { - if lits > 0 { - switch offset { - case b.recentOffsets[0]: - offset = 1 - case b.recentOffsets[1]: - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 2 - case b.recentOffsets[2]: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 3 - default: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset += 3 - } - } else { - switch offset { - case b.recentOffsets[1]: - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 1 - case b.recentOffsets[2]: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 2 - case b.recentOffsets[0] - 1: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 3 - default: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset += 3 - } - } - } else { - offset += 3 - } - return offset -} - -// encodeRaw can be used to set the output to a raw representation of supplied bytes. -func (b *blockEnc) encodeRaw(a []byte) { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(a))) - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output[:0]) - b.output = append(b.output, a...) - if debug { - println("Adding RAW block, length", len(a), "last:", b.last) - } -} - -// encodeRaw can be used to set the output to a raw representation of supplied bytes. -func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(src))) - bh.setType(blockTypeRaw) - dst = bh.appendTo(dst) - dst = append(dst, src...) - if debug { - println("Adding RAW block, length", len(src), "last:", b.last) - } - return dst -} - -// encodeLits can be used if the block is only litLen. -func (b *blockEnc) encodeLits(lits []byte, raw bool) error { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(lits))) - - // Don't compress extremely small blocks - if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { - if debug { - println("Adding RAW block, length", len(lits), "last:", b.last) - } - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits...) - return nil - } - - var ( - out []byte - reUsed, single bool - err error - ) - if b.dictLitEnc != nil { - b.litEnc.TransferCTable(b.dictLitEnc) - b.litEnc.Reuse = huff0.ReusePolicyAllow - b.dictLitEnc = nil - } - if len(lits) >= 1024 { - // Use 4 Streams. - out, reUsed, err = huff0.Compress4X(lits, b.litEnc) - } else if len(lits) > 32 { - // Use 1 stream - single = true - out, reUsed, err = huff0.Compress1X(lits, b.litEnc) - } else { - err = huff0.ErrIncompressible - } - - switch err { - case huff0.ErrIncompressible: - if debug { - println("Adding RAW block, length", len(lits), "last:", b.last) - } - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits...) - return nil - case huff0.ErrUseRLE: - if debug { - println("Adding RLE block, length", len(lits)) - } - bh.setType(blockTypeRLE) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits[0]) - return nil - default: - return err - case nil: - } - // Compressed... - // Now, allow reuse - b.litEnc.Reuse = huff0.ReusePolicyAllow - bh.setType(blockTypeCompressed) - var lh literalsHeader - if reUsed { - if debug { - println("Reused tree, compressed to", len(out)) - } - lh.setType(literalsBlockTreeless) - } else { - if debug { - println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) - } - lh.setType(literalsBlockCompressed) - } - // Set sizes - lh.setSizes(len(out), len(lits), single) - bh.setSize(uint32(len(out) + lh.size() + 1)) - - // Write block headers. - b.output = bh.appendTo(b.output) - b.output = lh.appendTo(b.output) - // Add compressed data. - b.output = append(b.output, out...) - // No sequences. - b.output = append(b.output, 0) - return nil -} - -// fuzzFseEncoder can be used to fuzz the FSE encoder. -func fuzzFseEncoder(data []byte) int { - if len(data) > maxSequences || len(data) < 2 { - return 0 - } - enc := fseEncoder{} - hist := enc.Histogram()[:256] - maxSym := uint8(0) - for i, v := range data { - v = v & 63 - data[i] = v - hist[v]++ - if v > maxSym { - maxSym = v - } - } - if maxSym == 0 { - // All 0 - return 0 - } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } - cnt := maxCount(hist[:maxSym]) - if cnt == len(data) { - // RLE - return 0 - } - enc.HistogramFinished(maxSym, cnt) - err := enc.normalizeCount(len(data)) - if err != nil { - return 0 - } - _, err = enc.writeCount(nil) - if err != nil { - panic(err) - } - return 1 -} - -// encode will encode the block and append the output in b.output. -// Previous offset codes must be pushed if more blocks are expected. -func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { - if len(b.sequences) == 0 { - return b.encodeLits(b.literals, rawAllLits) - } - // We want some difference to at least account for the headers. - saved := b.size - len(b.literals) - (b.size >> 5) - if saved < 16 { - if org == nil { - return errIncompressible - } - b.popOffsets() - return b.encodeLits(org, rawAllLits) - } - - var bh blockHeader - var lh literalsHeader - bh.setLast(b.last) - bh.setType(blockTypeCompressed) - // Store offset of the block header. Needed when we know the size. - bhOffset := len(b.output) - b.output = bh.appendTo(b.output) - - var ( - out []byte - reUsed, single bool - err error - ) - if b.dictLitEnc != nil { - b.litEnc.TransferCTable(b.dictLitEnc) - b.litEnc.Reuse = huff0.ReusePolicyAllow - b.dictLitEnc = nil - } - if len(b.literals) >= 1024 && !raw { - // Use 4 Streams. - out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) - } else if len(b.literals) > 32 && !raw { - // Use 1 stream - single = true - out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) - } else { - err = huff0.ErrIncompressible - } - - switch err { - case huff0.ErrIncompressible: - lh.setType(literalsBlockRaw) - lh.setSize(len(b.literals)) - b.output = lh.appendTo(b.output) - b.output = append(b.output, b.literals...) - if debug { - println("Adding literals RAW, length", len(b.literals)) - } - case huff0.ErrUseRLE: - lh.setType(literalsBlockRLE) - lh.setSize(len(b.literals)) - b.output = lh.appendTo(b.output) - b.output = append(b.output, b.literals[0]) - if debug { - println("Adding literals RLE") - } - default: - if debug { - println("Adding literals ERROR:", err) - } - return err - case nil: - // Compressed litLen... - if reUsed { - if debug { - println("reused tree") - } - lh.setType(literalsBlockTreeless) - } else { - if debug { - println("new tree, size:", len(b.litEnc.OutTable)) - } - lh.setType(literalsBlockCompressed) - if debug { - _, _, err := huff0.ReadTable(out, nil) - if err != nil { - panic(err) - } - } - } - lh.setSizes(len(out), len(b.literals), single) - if debug { - printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) - println("Adding literal header:", lh) - } - b.output = lh.appendTo(b.output) - b.output = append(b.output, out...) - b.litEnc.Reuse = huff0.ReusePolicyAllow - if debug { - println("Adding literals compressed") - } - } - // Sequence compression - - // Write the number of sequences - switch { - case len(b.sequences) < 128: - b.output = append(b.output, uint8(len(b.sequences))) - case len(b.sequences) < 0x7f00: // TODO: this could be wrong - n := len(b.sequences) - b.output = append(b.output, 128+uint8(n>>8), uint8(n)) - default: - n := len(b.sequences) - 0x7f00 - b.output = append(b.output, 255, uint8(n), uint8(n>>8)) - } - if debug { - println("Encoding", len(b.sequences), "sequences") - } - b.genCodes() - llEnc := b.coders.llEnc - ofEnc := b.coders.ofEnc - mlEnc := b.coders.mlEnc - err = llEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - err = ofEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - err = mlEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - - // Choose the best compression mode for each type. - // Will evaluate the new vs predefined and previous. - chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { - // See if predefined/previous is better - hist := cur.count[:cur.symbolLen] - nSize := cur.approxSize(hist) + cur.maxHeaderSize() - predefSize := preDef.approxSize(hist) - prevSize := prev.approxSize(hist) - - // Add a small penalty for new encoders. - // Don't bother with extremely small (<2 byte gains). - nSize = nSize + (nSize+2*8*16)>>4 - switch { - case predefSize <= prevSize && predefSize <= nSize || forcePreDef: - if debug { - println("Using predefined", predefSize>>3, "<=", nSize>>3) - } - return preDef, compModePredefined - case prevSize <= nSize: - if debug { - println("Using previous", prevSize>>3, "<=", nSize>>3) - } - return prev, compModeRepeat - default: - if debug { - println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") - println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) - } - return cur, compModeFSE - } - } - - // Write compression mode - var mode uint8 - if llEnc.useRLE { - mode |= uint8(compModeRLE) << 6 - llEnc.setRLE(b.sequences[0].llCode) - if debug { - println("llEnc.useRLE") - } - } else { - var m seqCompMode - llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) - mode |= uint8(m) << 6 - } - if ofEnc.useRLE { - mode |= uint8(compModeRLE) << 4 - ofEnc.setRLE(b.sequences[0].ofCode) - if debug { - println("ofEnc.useRLE") - } - } else { - var m seqCompMode - ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) - mode |= uint8(m) << 4 - } - - if mlEnc.useRLE { - mode |= uint8(compModeRLE) << 2 - mlEnc.setRLE(b.sequences[0].mlCode) - if debug { - println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) - } - } else { - var m seqCompMode - mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) - mode |= uint8(m) << 2 - } - b.output = append(b.output, mode) - if debug { - printf("Compression modes: 0b%b", mode) - } - b.output, err = llEnc.writeCount(b.output) - if err != nil { - return err - } - start := len(b.output) - b.output, err = ofEnc.writeCount(b.output) - if err != nil { - return err - } - if false { - println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) - for i, v := range ofEnc.norm[:ofEnc.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) - } - } - b.output, err = mlEnc.writeCount(b.output) - if err != nil { - return err - } - - // Maybe in block? - wr := &b.wr - wr.reset(b.output) - - var ll, of, ml cState - - // Current sequence - seq := len(b.sequences) - 1 - s := b.sequences[seq] - llEnc.setBits(llBitsTable[:]) - mlEnc.setBits(mlBitsTable[:]) - ofEnc.setBits(nil) - - llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] - - // We have 3 bounds checks here (and in the loop). - // Since we are iterating backwards it is kinda hard to avoid. - llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] - ll.init(wr, &llEnc.ct, llB) - of.init(wr, &ofEnc.ct, ofB) - wr.flush32() - ml.init(wr, &mlEnc.ct, mlB) - - // Each of these lookups also generates a bounds check. - wr.addBits32NC(s.litLen, llB.outBits) - wr.addBits32NC(s.matchLen, mlB.outBits) - wr.flush32() - wr.addBits32NC(s.offset, ofB.outBits) - if debugSequences { - println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) - } - seq-- - if llEnc.maxBits+mlEnc.maxBits+ofEnc.maxBits <= 32 { - // No need to flush (common) - for seq >= 0 { - s = b.sequences[seq] - wr.flush32() - llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] - // tabelog max is 8 for all. - of.encode(ofB) - ml.encode(mlB) - ll.encode(llB) - wr.flush32() - - // We checked that all can stay within 32 bits - wr.addBits32NC(s.litLen, llB.outBits) - wr.addBits32NC(s.matchLen, mlB.outBits) - wr.addBits32NC(s.offset, ofB.outBits) - - if debugSequences { - println("Encoded seq", seq, s) - } - - seq-- - } - } else { - for seq >= 0 { - s = b.sequences[seq] - wr.flush32() - llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] - // tabelog max is below 8 for each. - of.encode(ofB) - ml.encode(mlB) - ll.encode(llB) - wr.flush32() - - // ml+ll = max 32 bits total - wr.addBits32NC(s.litLen, llB.outBits) - wr.addBits32NC(s.matchLen, mlB.outBits) - wr.flush32() - wr.addBits32NC(s.offset, ofB.outBits) - - if debugSequences { - println("Encoded seq", seq, s) - } - - seq-- - } - } - ml.flush(mlEnc.actualTableLog) - of.flush(ofEnc.actualTableLog) - ll.flush(llEnc.actualTableLog) - err = wr.close() - if err != nil { - return err - } - b.output = wr.out - - if len(b.output)-3-bhOffset >= b.size { - // Maybe even add a bigger margin. - b.litEnc.Reuse = huff0.ReusePolicyNone - return errIncompressible - } - - // Size is output minus block header. - bh.setSize(uint32(len(b.output)-bhOffset) - 3) - if debug { - println("Rewriting block header", bh) - } - _ = bh.appendTo(b.output[bhOffset:bhOffset]) - b.coders.setPrev(llEnc, mlEnc, ofEnc) - return nil -} - -var errIncompressible = errors.New("incompressible") - -func (b *blockEnc) genCodes() { - if len(b.sequences) == 0 { - // nothing to do - return - } - - if len(b.sequences) > math.MaxUint16 { - panic("can only encode up to 64K sequences") - } - // No bounds checks after here: - llH := b.coders.llEnc.Histogram()[:256] - ofH := b.coders.ofEnc.Histogram()[:256] - mlH := b.coders.mlEnc.Histogram()[:256] - for i := range llH { - llH[i] = 0 - } - for i := range ofH { - ofH[i] = 0 - } - for i := range mlH { - mlH[i] = 0 - } - - var llMax, ofMax, mlMax uint8 - for i, seq := range b.sequences { - v := llCode(seq.litLen) - seq.llCode = v - llH[v]++ - if v > llMax { - llMax = v - } - - v = ofCode(seq.offset) - seq.ofCode = v - ofH[v]++ - if v > ofMax { - ofMax = v - } - - v = mlCode(seq.matchLen) - seq.mlCode = v - mlH[v]++ - if v > mlMax { - mlMax = v - if debugAsserts && mlMax > maxMatchLengthSymbol { - panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) - } - } - b.sequences[i] = seq - } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } - if debugAsserts && mlMax > maxMatchLengthSymbol { - panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) - } - if debugAsserts && ofMax > maxOffsetBits { - panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) - } - if debugAsserts && llMax > maxLiteralLengthSymbol { - panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) - } - - b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) - b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) - b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) -} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go deleted file mode 100644 index 01a01e486..000000000 --- a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go +++ /dev/null @@ -1,85 +0,0 @@ -// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. - -package zstd - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[blockTypeRaw-0] - _ = x[blockTypeRLE-1] - _ = x[blockTypeCompressed-2] - _ = x[blockTypeReserved-3] -} - -const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" - -var _blockType_index = [...]uint8{0, 12, 24, 43, 60} - -func (i blockType) String() string { - if i >= blockType(len(_blockType_index)-1) { - return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[literalsBlockRaw-0] - _ = x[literalsBlockRLE-1] - _ = x[literalsBlockCompressed-2] - _ = x[literalsBlockTreeless-3] -} - -const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" - -var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} - -func (i literalsBlockType) String() string { - if i >= literalsBlockType(len(_literalsBlockType_index)-1) { - return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[compModePredefined-0] - _ = x[compModeRLE-1] - _ = x[compModeFSE-2] - _ = x[compModeRepeat-3] -} - -const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" - -var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} - -func (i seqCompMode) String() string { - if i >= seqCompMode(len(_seqCompMode_index)-1) { - return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[tableLiteralLengths-0] - _ = x[tableOffsets-1] - _ = x[tableMatchLengths-2] -} - -const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" - -var _tableIndex_index = [...]uint8{0, 19, 31, 48} - -func (i tableIndex) String() string { - if i >= tableIndex(len(_tableIndex_index)-1) { - return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] -} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go deleted file mode 100644 index 658ef7838..000000000 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" - "io" - "io/ioutil" -) - -type byteBuffer interface { - // Read up to 8 bytes. - // Returns nil if no more input is available. - readSmall(n int) []byte - - // Read >8 bytes. - // MAY use the destination slice. - readBig(n int, dst []byte) ([]byte, error) - - // Read a single byte. - readByte() (byte, error) - - // Skip n bytes. - skipN(n int) error -} - -// in-memory buffer -type byteBuf []byte - -func (b *byteBuf) readSmall(n int) []byte { - if debugAsserts && n > 8 { - panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) - } - bb := *b - if len(bb) < n { - return nil - } - r := bb[:n] - *b = bb[n:] - return r -} - -func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { - bb := *b - if len(bb) < n { - return nil, io.ErrUnexpectedEOF - } - r := bb[:n] - *b = bb[n:] - return r, nil -} - -func (b *byteBuf) remain() []byte { - return *b -} - -func (b *byteBuf) readByte() (byte, error) { - bb := *b - if len(bb) < 1 { - return 0, nil - } - r := bb[0] - *b = bb[1:] - return r, nil -} - -func (b *byteBuf) skipN(n int) error { - bb := *b - if len(bb) < n { - return io.ErrUnexpectedEOF - } - *b = bb[n:] - return nil -} - -// wrapper around a reader. -type readerWrapper struct { - r io.Reader - tmp [8]byte -} - -func (r *readerWrapper) readSmall(n int) []byte { - if debugAsserts && n > 8 { - panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) - } - n2, err := io.ReadFull(r.r, r.tmp[:n]) - // We only really care about the actual bytes read. - if n2 != n { - if debug { - println("readSmall: got", n2, "want", n, "err", err) - } - return nil - } - return r.tmp[:n] -} - -func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { - if cap(dst) < n { - dst = make([]byte, n) - } - n2, err := io.ReadFull(r.r, dst[:n]) - if err == io.EOF && n > 0 { - err = io.ErrUnexpectedEOF - } - return dst[:n2], err -} - -func (r *readerWrapper) readByte() (byte, error) { - n2, err := r.r.Read(r.tmp[:1]) - if err != nil { - return 0, err - } - if n2 != 1 { - return 0, io.ErrUnexpectedEOF - } - return r.tmp[0], nil -} - -func (r *readerWrapper) skipN(n int) error { - n2, err := io.CopyN(ioutil.Discard, r.r, int64(n)) - if n2 != int64(n) { - err = io.ErrUnexpectedEOF - } - return err -} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go deleted file mode 100644 index 2c4fca17f..000000000 --- a/vendor/github.com/klauspost/compress/zstd/bytereader.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - -// overread returns whether we have advanced too far. -func (b *byteReader) overread() bool { - return b.off > len(b.b) -} - -// Int32 returns a little endian int32 starting at current offset. -func (b byteReader) Int32() int32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := int32(b2[3]) - v2 := int32(b2[2]) - v1 := int32(b2[1]) - v0 := int32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// Uint8 returns the next byte -func (b *byteReader) Uint8() uint8 { - v := b.b[b.off] - return v -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - if r := b.remain(); r < 4 { - // Very rare - v := uint32(0) - for i := 1; i <= r; i++ { - v = (v << 8) | uint32(b.b[len(b.b)-i]) - } - return v - } - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// Uint32NC returns a little endian uint32 starting at current offset. -// The caller must be sure if there are at least 4 bytes left. -func (b byteReader) Uint32NC() uint32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go deleted file mode 100644 index 87896c5ea..000000000 --- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2020+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "bytes" - "errors" - "io" -) - -// HeaderMaxSize is the maximum size of a Frame and Block Header. -// If less is sent to Header.Decode it *may* still contain enough information. -const HeaderMaxSize = 14 + 3 - -// Header contains information about the first frame and block within that. -type Header struct { - // Window Size the window of data to keep while decoding. - // Will only be set if HasFCS is false. - WindowSize uint64 - - // Frame content size. - // Expected size of the entire frame. - FrameContentSize uint64 - - // Dictionary ID. - // If 0, no dictionary. - DictionaryID uint32 - - // First block information. - FirstBlock struct { - // OK will be set if first block could be decoded. - OK bool - - // Is this the last block of a frame? - Last bool - - // Is the data compressed? - // If true CompressedSize will be populated. - // Unfortunately DecompressedSize cannot be determined - // without decoding the blocks. - Compressed bool - - // DecompressedSize is the expected decompressed size of the block. - // Will be 0 if it cannot be determined. - DecompressedSize int - - // CompressedSize of the data in the block. - // Does not include the block header. - // Will be equal to DecompressedSize if not Compressed. - CompressedSize int - } - - // Skippable will be true if the frame is meant to be skipped. - // No other information will be populated. - Skippable bool - - // If set there is a checksum present for the block content. - HasCheckSum bool - - // If this is true FrameContentSize will have a valid value - HasFCS bool - - SingleSegment bool -} - -// Decode the header from the beginning of the stream. -// This will decode the frame header and the first block header if enough bytes are provided. -// It is recommended to provide at least HeaderMaxSize bytes. -// If the frame header cannot be read an error will be returned. -// If there isn't enough input, io.ErrUnexpectedEOF is returned. -// The FirstBlock.OK will indicate if enough information was available to decode the first block header. -func (h *Header) Decode(in []byte) error { - if len(in) < 4 { - return io.ErrUnexpectedEOF - } - b, in := in[:4], in[4:] - if !bytes.Equal(b, frameMagic) { - if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { - return ErrMagicMismatch - } - *h = Header{Skippable: true} - return nil - } - if len(in) < 1 { - return io.ErrUnexpectedEOF - } - - // Clear output - *h = Header{} - fhd, in := in[0], in[1:] - h.SingleSegment = fhd&(1<<5) != 0 - h.HasCheckSum = fhd&(1<<2) != 0 - - if fhd&(1<<3) != 0 { - return errors.New("Reserved bit set on frame header") - } - - // Read Window_Descriptor - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor - if !h.SingleSegment { - if len(in) < 1 { - return io.ErrUnexpectedEOF - } - var wd byte - wd, in = in[0], in[1:] - windowLog := 10 + (wd >> 3) - windowBase := uint64(1) << windowLog - windowAdd := (windowBase / 8) * uint64(wd&0x7) - h.WindowSize = windowBase + windowAdd - } - - // Read Dictionary_ID - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - if size := fhd & 3; size != 0 { - if size == 3 { - size = 4 - } - if len(in) < int(size) { - return io.ErrUnexpectedEOF - } - b, in = in[:size], in[size:] - if b == nil { - return io.ErrUnexpectedEOF - } - switch size { - case 1: - h.DictionaryID = uint32(b[0]) - case 2: - h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) - case 4: - h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - } - } - - // Read Frame_Content_Size - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size - var fcsSize int - v := fhd >> 6 - switch v { - case 0: - if h.SingleSegment { - fcsSize = 1 - } - default: - fcsSize = 1 << v - } - - if fcsSize > 0 { - h.HasFCS = true - if len(in) < fcsSize { - return io.ErrUnexpectedEOF - } - b, in = in[:fcsSize], in[fcsSize:] - if b == nil { - return io.ErrUnexpectedEOF - } - switch fcsSize { - case 1: - h.FrameContentSize = uint64(b[0]) - case 2: - // When FCS_Field_Size is 2, the offset of 256 is added. - h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 - case 4: - h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) - case 8: - d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) - h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) - } - } - - // Frame Header done, we will not fail from now on. - if len(in) < 3 { - return nil - } - tmp, in := in[:3], in[3:] - bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) - h.FirstBlock.Last = bh&1 != 0 - blockType := blockType((bh >> 1) & 3) - // find size. - cSize := int(bh >> 3) - switch blockType { - case blockTypeReserved: - return nil - case blockTypeRLE: - h.FirstBlock.Compressed = true - h.FirstBlock.DecompressedSize = cSize - h.FirstBlock.CompressedSize = 1 - case blockTypeCompressed: - h.FirstBlock.Compressed = true - h.FirstBlock.CompressedSize = cSize - case blockTypeRaw: - h.FirstBlock.DecompressedSize = cSize - h.FirstBlock.CompressedSize = cSize - default: - panic("Invalid block type") - } - - h.FirstBlock.OK = true - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go deleted file mode 100644 index 1d41c25d2..000000000 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ /dev/null @@ -1,561 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "io" - "sync" -) - -// Decoder provides decoding of zstandard streams. -// The decoder has been designed to operate without allocations after a warmup. -// This means that you should store the decoder for best performance. -// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. -// A decoder can safely be re-used even if the previous stream failed. -// To release the resources, you must call the Close() function on a decoder. -type Decoder struct { - o decoderOptions - - // Unreferenced decoders, ready for use. - decoders chan *blockDec - - // Streams ready to be decoded. - stream chan decodeStream - - // Current read position used for Reader functionality. - current decoderState - - // Custom dictionaries. - // Always uses copies. - dicts map[uint32]dict - - // streamWg is the waitgroup for all streams - streamWg sync.WaitGroup -} - -// decoderState is used for maintaining state when the decoder -// is used for streaming. -type decoderState struct { - // current block being written to stream. - decodeOutput - - // output in order to be written to stream. - output chan decodeOutput - - // cancel remaining output. - cancel chan struct{} - - flushed bool -} - -var ( - // Check the interfaces we want to support. - _ = io.WriterTo(&Decoder{}) - _ = io.Reader(&Decoder{}) -) - -// NewReader creates a new decoder. -// A nil Reader can be provided in which case Reset can be used to start a decode. -// -// A Decoder can be used in two modes: -// -// 1) As a stream, or -// 2) For stateless decoding using DecodeAll. -// -// Only a single stream can be decoded concurrently, but the same decoder -// can run multiple concurrent stateless decodes. It is even possible to -// use stateless decodes while a stream is being decoded. -// -// The Reset function can be used to initiate a new stream, which is will considerably -// reduce the allocations normally caused by NewReader. -func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { - initPredefined() - var d Decoder - d.o.setDefault() - for _, o := range opts { - err := o(&d.o) - if err != nil { - return nil, err - } - } - d.current.output = make(chan decodeOutput, d.o.concurrent) - d.current.flushed = true - - if r == nil { - d.current.err = ErrDecoderNilInput - } - - // Transfer option dicts. - d.dicts = make(map[uint32]dict, len(d.o.dicts)) - for _, dc := range d.o.dicts { - d.dicts[dc.id] = dc - } - d.o.dicts = nil - - // Create decoders - d.decoders = make(chan *blockDec, d.o.concurrent) - for i := 0; i < d.o.concurrent; i++ { - dec := newBlockDec(d.o.lowMem) - dec.localFrame = newFrameDec(d.o) - d.decoders <- dec - } - - if r == nil { - return &d, nil - } - return &d, d.Reset(r) -} - -// Read bytes from the decompressed stream into p. -// Returns the number of bytes written and any error that occurred. -// When the stream is done, io.EOF will be returned. -func (d *Decoder) Read(p []byte) (int, error) { - if d.stream == nil { - return 0, ErrDecoderNilInput - } - var n int - for { - if len(d.current.b) > 0 { - filled := copy(p, d.current.b) - p = p[filled:] - d.current.b = d.current.b[filled:] - n += filled - } - if len(p) == 0 { - break - } - if len(d.current.b) == 0 { - // We have an error and no more data - if d.current.err != nil { - break - } - if !d.nextBlock(n == 0) { - return n, nil - } - } - } - if len(d.current.b) > 0 { - if debug { - println("returning", n, "still bytes left:", len(d.current.b)) - } - // Only return error at end of block - return n, nil - } - if d.current.err != nil { - d.drainOutput() - } - if debug { - println("returning", n, d.current.err, len(d.decoders)) - } - return n, d.current.err -} - -// Reset will reset the decoder the supplied stream after the current has finished processing. -// Note that this functionality cannot be used after Close has been called. -// Reset can be called with a nil reader to release references to the previous reader. -// After being called with a nil reader, no other operations than Reset or DecodeAll or Close -// should be used. -func (d *Decoder) Reset(r io.Reader) error { - if d.current.err == ErrDecoderClosed { - return d.current.err - } - - d.drainOutput() - - if r == nil { - d.current.err = ErrDecoderNilInput - d.current.flushed = true - return nil - } - - if d.stream == nil { - d.stream = make(chan decodeStream, 1) - d.streamWg.Add(1) - go d.startStreamDecoder(d.stream) - } - - // If bytes buffer and < 1MB, do sync decoding anyway. - if bb, ok := r.(byter); ok && bb.Len() < 1<<20 { - var bb2 byter - bb2 = bb - if debug { - println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) - } - b := bb2.Bytes() - var dst []byte - if cap(d.current.b) > 0 { - dst = d.current.b - } - - dst, err := d.DecodeAll(b, dst[:0]) - if err == nil { - err = io.EOF - } - d.current.b = dst - d.current.err = err - d.current.flushed = true - if debug { - println("sync decode to", len(dst), "bytes, err:", err) - } - return nil - } - - // Remove current block. - d.current.decodeOutput = decodeOutput{} - d.current.err = nil - d.current.cancel = make(chan struct{}) - d.current.flushed = false - d.current.d = nil - - d.stream <- decodeStream{ - r: r, - output: d.current.output, - cancel: d.current.cancel, - } - return nil -} - -// drainOutput will drain the output until errEndOfStream is sent. -func (d *Decoder) drainOutput() { - if d.current.cancel != nil { - println("cancelling current") - close(d.current.cancel) - d.current.cancel = nil - } - if d.current.d != nil { - if debug { - printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) - } - d.decoders <- d.current.d - d.current.d = nil - d.current.b = nil - } - if d.current.output == nil || d.current.flushed { - println("current already flushed") - return - } - for { - select { - case v := <-d.current.output: - if v.d != nil { - if debug { - printf("re-adding decoder %p", v.d) - } - d.decoders <- v.d - } - if v.err == errEndOfStream { - println("current flushed") - d.current.flushed = true - return - } - } - } -} - -// WriteTo writes data to w until there's no more data to write or when an error occurs. -// The return value n is the number of bytes written. -// Any error encountered during the write is also returned. -func (d *Decoder) WriteTo(w io.Writer) (int64, error) { - if d.stream == nil { - return 0, ErrDecoderNilInput - } - var n int64 - for { - if len(d.current.b) > 0 { - n2, err2 := w.Write(d.current.b) - n += int64(n2) - if err2 != nil && d.current.err == nil { - d.current.err = err2 - break - } - } - if d.current.err != nil { - break - } - d.nextBlock(true) - } - err := d.current.err - if err != nil { - d.drainOutput() - } - if err == io.EOF { - err = nil - } - return n, err -} - -// DecodeAll allows stateless decoding of a blob of bytes. -// Output will be appended to dst, so if the destination size is known -// you can pre-allocate the destination slice to avoid allocations. -// DecodeAll can be used concurrently. -// The Decoder concurrency limits will be respected. -func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { - if d.current.err == ErrDecoderClosed { - return dst, ErrDecoderClosed - } - - // Grab a block decoder and frame decoder. - block := <-d.decoders - frame := block.localFrame - defer func() { - if debug { - printf("re-adding decoder: %p", block) - } - frame.rawInput = nil - frame.bBuf = nil - d.decoders <- block - }() - frame.bBuf = input - - for { - frame.history.reset() - err := frame.reset(&frame.bBuf) - if err == io.EOF { - if debug { - println("frame reset return EOF") - } - return dst, nil - } - if frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - return nil, ErrUnknownDictionary - } - frame.history.setDict(&dict) - } - if err != nil { - return dst, err - } - if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { - return dst, ErrDecoderSizeExceeded - } - if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 { - // Never preallocate moe than 1 GB up front. - if cap(dst)-len(dst) < int(frame.FrameContentSize) { - dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) - copy(dst2, dst) - dst = dst2 - } - } - if cap(dst) == 0 { - // Allocate len(input) * 2 by default if nothing is provided - // and we didn't get frame content size. - size := len(input) * 2 - // Cap to 1 MB. - if size > 1<<20 { - size = 1 << 20 - } - if uint64(size) > d.o.maxDecodedSize { - size = int(d.o.maxDecodedSize) - } - dst = make([]byte, 0, size) - } - - dst, err = frame.runDecoder(dst, block) - if err != nil { - return dst, err - } - if len(frame.bBuf) == 0 { - if debug { - println("frame dbuf empty") - } - break - } - } - return dst, nil -} - -// nextBlock returns the next block. -// If an error occurs d.err will be set. -// Optionally the function can block for new output. -// If non-blocking mode is used the returned boolean will be false -// if no data was available without blocking. -func (d *Decoder) nextBlock(blocking bool) (ok bool) { - if d.current.d != nil { - if debug { - printf("re-adding current decoder %p", d.current.d) - } - d.decoders <- d.current.d - d.current.d = nil - } - if d.current.err != nil { - // Keep error state. - return blocking - } - - if blocking { - d.current.decodeOutput = <-d.current.output - } else { - select { - case d.current.decodeOutput = <-d.current.output: - default: - return false - } - } - if debug { - println("got", len(d.current.b), "bytes, error:", d.current.err) - } - return true -} - -// Close will release all resources. -// It is NOT possible to reuse the decoder after this. -func (d *Decoder) Close() { - if d.current.err == ErrDecoderClosed { - return - } - d.drainOutput() - if d.stream != nil { - close(d.stream) - d.streamWg.Wait() - d.stream = nil - } - if d.decoders != nil { - close(d.decoders) - for dec := range d.decoders { - dec.Close() - } - d.decoders = nil - } - if d.current.d != nil { - d.current.d.Close() - d.current.d = nil - } - d.current.err = ErrDecoderClosed -} - -// IOReadCloser returns the decoder as an io.ReadCloser for convenience. -// Any changes to the decoder will be reflected, so the returned ReadCloser -// can be reused along with the decoder. -// io.WriterTo is also supported by the returned ReadCloser. -func (d *Decoder) IOReadCloser() io.ReadCloser { - return closeWrapper{d: d} -} - -// closeWrapper wraps a function call as a closer. -type closeWrapper struct { - d *Decoder -} - -// WriteTo forwards WriteTo calls to the decoder. -func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { - return c.d.WriteTo(w) -} - -// Read forwards read calls to the decoder. -func (c closeWrapper) Read(p []byte) (n int, err error) { - return c.d.Read(p) -} - -// Close closes the decoder. -func (c closeWrapper) Close() error { - c.d.Close() - return nil -} - -type decodeOutput struct { - d *blockDec - b []byte - err error -} - -type decodeStream struct { - r io.Reader - - // Blocks ready to be written to output. - output chan decodeOutput - - // cancel reading from the input - cancel chan struct{} -} - -// errEndOfStream indicates that everything from the stream was read. -var errEndOfStream = errors.New("end-of-stream") - -// Create Decoder: -// Spawn n block decoders. These accept tasks to decode a block. -// Create goroutine that handles stream processing, this will send history to decoders as they are available. -// Decoders update the history as they decode. -// When a block is returned: -// a) history is sent to the next decoder, -// b) content written to CRC. -// c) return data to WRITER. -// d) wait for next block to return data. -// Once WRITTEN, the decoders reused by the writer frame decoder for re-use. -func (d *Decoder) startStreamDecoder(inStream chan decodeStream) { - defer d.streamWg.Done() - frame := newFrameDec(d.o) - for stream := range inStream { - if debug { - println("got new stream") - } - br := readerWrapper{r: stream.r} - decodeStream: - for { - frame.history.reset() - err := frame.reset(&br) - if debug && err != nil { - println("Frame decoder returned", err) - } - if err == nil && frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - err = ErrUnknownDictionary - } else { - frame.history.setDict(&dict) - } - } - if err != nil { - stream.output <- decodeOutput{ - err: err, - } - break - } - if debug { - println("starting frame decoder") - } - - // This goroutine will forward history between frames. - frame.frameDone.Add(1) - frame.initAsync() - - go frame.startDecoder(stream.output) - decodeFrame: - // Go through all blocks of the frame. - for { - dec := <-d.decoders - select { - case <-stream.cancel: - if !frame.sendErr(dec, io.EOF) { - // To not let the decoder dangle, send it back. - stream.output <- decodeOutput{d: dec} - } - break decodeStream - default: - } - err := frame.next(dec) - switch err { - case io.EOF: - // End of current frame, no error - println("EOF on next block") - break decodeFrame - case nil: - continue - default: - println("block decoder returned", err) - break decodeStream - } - } - // All blocks have started decoding, check if there are more frames. - println("waiting for done") - frame.frameDone.Wait() - println("done waiting...") - } - frame.frameDone.Wait() - println("Sending EOS") - stream.output <- decodeOutput{err: errEndOfStream} - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go deleted file mode 100644 index 284d38449..000000000 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "runtime" -) - -// DOption is an option for creating a decoder. -type DOption func(*decoderOptions) error - -// options retains accumulated state of multiple options. -type decoderOptions struct { - lowMem bool - concurrent int - maxDecodedSize uint64 - dicts []dict -} - -func (o *decoderOptions) setDefault() { - *o = decoderOptions{ - // use less ram: true for now, but may change. - lowMem: true, - concurrent: runtime.GOMAXPROCS(0), - } - o.maxDecodedSize = 1 << 63 -} - -// WithDecoderLowmem will set whether to use a lower amount of memory, -// but possibly have to allocate more while running. -func WithDecoderLowmem(b bool) DOption { - return func(o *decoderOptions) error { o.lowMem = b; return nil } -} - -// WithDecoderConcurrency will set the concurrency, -// meaning the maximum number of decoders to run concurrently. -// The value supplied must be at least 1. -// By default this will be set to GOMAXPROCS. -func WithDecoderConcurrency(n int) DOption { - return func(o *decoderOptions) error { - if n <= 0 { - return fmt.Errorf("Concurrency must be at least 1") - } - o.concurrent = n - return nil - } -} - -// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory -// non-streaming operations or maximum window size for streaming operations. -// This can be used to control memory usage of potentially hostile content. -// For streaming operations, the maximum window size is capped at 1<<30 bytes. -// Maximum and default is 1 << 63 bytes. -func WithDecoderMaxMemory(n uint64) DOption { - return func(o *decoderOptions) error { - if n == 0 { - return errors.New("WithDecoderMaxMemory must be at least 1") - } - if n > 1<<63 { - return fmt.Errorf("WithDecoderMaxmemory must be less than 1 << 63") - } - o.maxDecodedSize = n - return nil - } -} - -// WithDecoderDicts allows to register one or more dictionaries for the decoder. -// If several dictionaries with the same ID is provided the last one will be used. -func WithDecoderDicts(dicts ...[]byte) DOption { - return func(o *decoderOptions) error { - for _, b := range dicts { - d, err := loadDict(b) - if err != nil { - return err - } - o.dicts = append(o.dicts, *d) - } - return nil - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go deleted file mode 100644 index fa25a18d8..000000000 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ /dev/null @@ -1,122 +0,0 @@ -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - - "github.com/klauspost/compress/huff0" -) - -type dict struct { - id uint32 - - litEnc *huff0.Scratch - llDec, ofDec, mlDec sequenceDec - //llEnc, ofEnc, mlEnc []*fseEncoder - offsets [3]int - content []byte -} - -var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec} - -// ID returns the dictionary id or 0 if d is nil. -func (d *dict) ID() uint32 { - if d == nil { - return 0 - } - return d.id -} - -// DictContentSize returns the dictionary content size or 0 if d is nil. -func (d *dict) DictContentSize() int { - if d == nil { - return 0 - } - return len(d.content) -} - -// Load a dictionary as described in -// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format -func loadDict(b []byte) (*dict, error) { - // Check static field size. - if len(b) <= 8+(3*4) { - return nil, io.ErrUnexpectedEOF - } - d := dict{ - llDec: sequenceDec{fse: &fseDecoder{}}, - ofDec: sequenceDec{fse: &fseDecoder{}}, - mlDec: sequenceDec{fse: &fseDecoder{}}, - } - if !bytes.Equal(b[:4], dictMagic[:]) { - return nil, ErrMagicMismatch - } - d.id = binary.LittleEndian.Uint32(b[4:8]) - if d.id == 0 { - return nil, errors.New("dictionaries cannot have ID 0") - } - - // Read literal table - var err error - d.litEnc, b, err = huff0.ReadTable(b[8:], nil) - if err != nil { - return nil, err - } - d.litEnc.Reuse = huff0.ReusePolicyMust - - br := byteReader{ - b: b, - off: 0, - } - readDec := func(i tableIndex, dec *fseDecoder) error { - if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { - return err - } - if br.overread() { - return io.ErrUnexpectedEOF - } - err = dec.transform(symbolTableX[i]) - if err != nil { - println("Transform table error:", err) - return err - } - if debug { - println("Read table ok", "symbolLen:", dec.symbolLen) - } - // Set decoders as predefined so they aren't reused. - dec.preDefined = true - return nil - } - - if err := readDec(tableOffsets, d.ofDec.fse); err != nil { - return nil, err - } - if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { - return nil, err - } - if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { - return nil, err - } - if br.remain() < 12 { - return nil, io.ErrUnexpectedEOF - } - - d.offsets[0] = int(br.Uint32()) - br.advance(4) - d.offsets[1] = int(br.Uint32()) - br.advance(4) - d.offsets[2] = int(br.Uint32()) - br.advance(4) - if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { - return nil, errors.New("invalid offset in dictionary") - } - d.content = make([]byte, br.remain()) - copy(d.content, br.unread()) - if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { - return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) - } - - return &d, nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go deleted file mode 100644 index 2d4d893eb..000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ /dev/null @@ -1,177 +0,0 @@ -package zstd - -import ( - "fmt" - "math/bits" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -const ( - dictShardBits = 6 -) - -type fastBase struct { - // cur is the offset at the start of hist - cur int32 - // maximum offset. Should be at least 2x block size. - maxMatchOff int32 - hist []byte - crc *xxhash.Digest - tmp [8]byte - blk *blockEnc - lastDictID uint32 - lowMem bool -} - -// CRC returns the underlying CRC writer. -func (e *fastBase) CRC() *xxhash.Digest { - return e.crc -} - -// AppendCRC will append the CRC to the destination slice and return it. -func (e *fastBase) AppendCRC(dst []byte) []byte { - crc := e.crc.Sum(e.tmp[:0]) - dst = append(dst, crc[7], crc[6], crc[5], crc[4]) - return dst -} - -// WindowSize returns the window size of the encoder, -// or a window size small enough to contain the input size, if > 0. -func (e *fastBase) WindowSize(size int) int32 { - if size > 0 && size < int(e.maxMatchOff) { - b := int32(1) << uint(bits.Len(uint(size))) - // Keep minimum window. - if b < 1024 { - b = 1024 - } - return b - } - return e.maxMatchOff -} - -// Block returns the current block. -func (e *fastBase) Block() *blockEnc { - return e.blk -} - -func (e *fastBase) addBlock(src []byte) int32 { - if debugAsserts && e.cur > bufferReset { - panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) - } - // check if we have space already - if len(e.hist)+len(src) > cap(e.hist) { - if cap(e.hist) == 0 { - e.ensureHist(len(src)) - } else { - if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { - panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) - } - // Move down - offset := int32(len(e.hist)) - e.maxMatchOff - copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) - e.cur += offset - e.hist = e.hist[:e.maxMatchOff] - } - } - s := int32(len(e.hist)) - e.hist = append(e.hist, src...) - return s -} - -// ensureHist will ensure that history can keep at least this many bytes. -func (e *fastBase) ensureHist(n int) { - if cap(e.hist) >= n { - return - } - l := e.maxMatchOff - if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { - l += maxCompressedBlockSize - } else { - l += e.maxMatchOff - } - // Make it at least 1MB. - if l < 1<<20 && !e.lowMem { - l = 1 << 20 - } - // Make it at least the requested size. - if l < int32(n) { - l = int32(n) - } - e.hist = make([]byte, 0, l) -} - -// useBlock will replace the block with the provided one, -// but transfer recent offsets from the previous. -func (e *fastBase) UseBlock(enc *blockEnc) { - enc.reset(e.blk) - e.blk = enc -} - -func (e *fastBase) matchlenNoHist(s, t int32, src []byte) int32 { - // Extend the match to be as long as possible. - return int32(matchLen(src[s:], src[t:])) -} - -func (e *fastBase) matchlen(s, t int32, src []byte) int32 { - if debugAsserts { - if s < 0 { - err := fmt.Sprintf("s (%d) < 0", s) - panic(err) - } - if t < 0 { - err := fmt.Sprintf("s (%d) < 0", s) - panic(err) - } - if s-t > e.maxMatchOff { - err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) - panic(err) - } - if len(src)-int(s) > maxCompressedBlockSize { - panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) - } - } - - // Extend the match to be as long as possible. - return int32(matchLen(src[s:], src[t:])) -} - -// Reset the encoding table. -func (e *fastBase) resetBase(d *dict, singleBlock bool) { - if e.blk == nil { - e.blk = &blockEnc{lowMem: e.lowMem} - e.blk.init() - } else { - e.blk.reset(nil) - } - e.blk.initNewEncode() - if e.crc == nil { - e.crc = xxhash.New() - } else { - e.crc.Reset() - } - if (!singleBlock || d.DictContentSize() > 0) && cap(e.hist) < int(e.maxMatchOff*2)+d.DictContentSize() { - l := e.maxMatchOff*2 + int32(d.DictContentSize()) - // Make it at least 1MB. - if l < 1<<20 { - l = 1 << 20 - } - e.hist = make([]byte, 0, l) - } - // We offset current position so everything will be out of reach. - // If above reset line, history will be purged. - if e.cur < bufferReset { - e.cur += e.maxMatchOff + int32(len(e.hist)) - } - e.hist = e.hist[:0] - if d != nil { - // Set offsets (currently not used) - for i, off := range d.offsets { - e.blk.recentOffsets[i] = uint32(off) - e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] - } - // Transfer litenc. - e.blk.dictLitEnc = d.litEnc - e.hist = append(e.hist, d.content...) - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go deleted file mode 100644 index fe3625c5f..000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ /dev/null @@ -1,487 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" - "math/bits" -) - -const ( - bestLongTableBits = 20 // Bits used in the long match table - bestLongTableSize = 1 << bestLongTableBits // Size of the table - - // Note: Increasing the short table bits or making the hash shorter - // can actually lead to compression degradation since it will 'steal' more from the - // long match table and match offsets are quite big. - // This greatly depends on the type of input. - bestShortTableBits = 16 // Bits used in the short match table - bestShortTableSize = 1 << bestShortTableBits // Size of the table -) - -// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. -// The long match table contains the previous entry with the same hash, -// effectively making it a "chain" of length 2. -// When we find a long match we choose between the two values and select the longest. -// When we find a short match, after checking the long, we check if we can find a long at n+1 -// and that it is longer (lazy matching). -type bestFastEncoder struct { - fastBase - table [bestShortTableSize]prevEntry - longTable [bestLongTableSize]prevEntry - dictTable []prevEntry - dictLongTable []prevEntry -} - -// Encode improves compression... -func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 4 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = prevEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - v2 := e.table[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.table[i] = prevEntry{ - offset: v, - prev: v2, - } - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - const kSearchStrength = 10 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - offset3 := int32(blk.recentOffsets[2]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - _ = addLiterals - - if debug { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - type match struct { - offset int32 - s int32 - length int32 - rep int32 - } - matchAt := func(offset int32, s int32, first uint32, rep int32) match { - if s-offset >= e.maxMatchOff || load3232(src, offset) != first { - return match{offset: offset, s: s} - } - return match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep} - } - - bestOf := func(a, b match) match { - aScore := b.s - a.s + a.length - bScore := a.s - b.s + b.length - if a.rep < 0 { - aScore = aScore - int32(bits.Len32(uint32(a.offset)))/8 - } - if b.rep < 0 { - bScore = bScore - int32(bits.Len32(uint32(b.offset)))/8 - } - if aScore >= bScore { - return a - } - return b - } - const goodEnough = 100 - - nextHashL := hash8(cv, bestLongTableBits) - nextHashS := hash4x64(cv, bestShortTableBits) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)) - if canRepeat && best.length < goodEnough { - best = bestOf(best, matchAt(s-offset1+1, s+1, uint32(cv>>8), 1)) - best = bestOf(best, matchAt(s-offset2+1, s+1, uint32(cv>>8), 2)) - best = bestOf(best, matchAt(s-offset3+1, s+1, uint32(cv>>8), 3)) - if best.length > 0 { - best = bestOf(best, matchAt(s-offset1+3, s+3, uint32(cv>>24), 1)) - best = bestOf(best, matchAt(s-offset2+3, s+3, uint32(cv>>24), 2)) - best = bestOf(best, matchAt(s-offset3+3, s+3, uint32(cv>>24), 3)) - } - } - // Load next and check... - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} - e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} - - // Look far ahead, unless we have a really long match already... - if best.length < goodEnough { - // No match found, move forward on input, no need to check forward... - if best.length < 4 { - s += 1 + (s-nextEmit)>>(kSearchStrength-1) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - continue - } - - s++ - candidateS = e.table[hash4x64(cv>>8, bestShortTableBits)] - cv = load6432(src, s) - cv2 := load6432(src, s+1) - candidateL = e.longTable[hash8(cv, bestLongTableBits)] - candidateL2 := e.longTable[hash8(cv2, bestLongTableBits)] - - best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)) - best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)) - } - - // We have a match, we can store the forward value - if best.rep > 0 { - s = best.s - var seq seq - seq.matchLen = uint32(best.length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := best.s - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - repIndex := best.offset - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = uint32(best.rep) - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index match start+1 (long) -> s - 1 - index0 := s - s = best.s + best.length - - nextEmit = s - if s >= sLimit { - if debug { - println("repeat ended", s, best.length) - - } - break encodeLoop - } - // Index skipped... - off := index0 + e.cur - for index0 < s-1 { - cv0 := load6432(src, index0) - h0 := hash8(cv0, bestLongTableBits) - h1 := hash4x64(cv0, bestShortTableBits) - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} - off++ - index0++ - } - switch best.rep { - case 2: - offset1, offset2 = offset2, offset1 - case 3: - offset1, offset2, offset3 = offset3, offset1, offset2 - } - cv = load6432(src, s) - continue - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - s = best.s - t := best.offset - offset1, offset2, offset3 = s-t, offset1, offset2 - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the n-byte match as long as possible. - l := best.length - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 - // every entry - for index0 < s-1 { - cv0 := load6432(src, index0) - h0 := hash8(cv0, bestLongTableBits) - h1 := hash4x64(cv0, bestShortTableBits) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} - index0++ - } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hash4x64(cv, bestShortTableBits) - nextHashL := hash8(cv, bestLongTableBits) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - blk.recentOffsets[2] = uint32(offset3) - if debug { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - e.ensureHist(len(src)) - e.Encode(blk, src) -} - -// ResetDict will reset and set a dictionary if not nil -func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]prevEntry, len(e.table)) - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff; i < end; i += 4 { - const hashLog = bestShortTableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hash4x64(cv, hashLog) // 0 -> 4 - nextHash1 := hash4x64(cv>>8, hashLog) // 1 -> 5 - nextHash2 := hash4x64(cv>>16, hashLog) // 2 -> 6 - nextHash3 := hash4x64(cv>>24, hashLog) // 3 -> 7 - e.dictTable[nextHash] = prevEntry{ - prev: e.dictTable[nextHash].offset, - offset: i, - } - e.dictTable[nextHash1] = prevEntry{ - prev: e.dictTable[nextHash1].offset, - offset: i + 1, - } - e.dictTable[nextHash2] = prevEntry{ - prev: e.dictTable[nextHash2].offset, - offset: i + 2, - } - e.dictTable[nextHash3] = prevEntry{ - prev: e.dictTable[nextHash3].offset, - offset: i + 3, - } - } - e.lastDictID = d.id - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]prevEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - h := hash8(cv, bestLongTableBits) - e.dictLongTable[h] = prevEntry{ - offset: e.maxMatchOff, - prev: e.dictLongTable[h].offset, - } - - end := int32(len(d.content)) - 8 + e.maxMatchOff - off := 8 // First to read - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hash8(cv, bestLongTableBits) - e.dictLongTable[h] = prevEntry{ - offset: i, - prev: e.dictLongTable[h].offset, - } - off++ - } - } - e.lastDictID = d.id - } - // Reset table to initial state - copy(e.longTable[:], e.dictLongTable) - - e.cur = e.maxMatchOff - // Reset table to initial state - copy(e.table[:], e.dictTable) -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go deleted file mode 100644 index c2ce4a2ba..000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ /dev/null @@ -1,1170 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "fmt" - -const ( - betterLongTableBits = 19 // Bits used in the long match table - betterLongTableSize = 1 << betterLongTableBits // Size of the table - - // Note: Increasing the short table bits or making the hash shorter - // can actually lead to compression degradation since it will 'steal' more from the - // long match table and match offsets are quite big. - // This greatly depends on the type of input. - betterShortTableBits = 13 // Bits used in the short match table - betterShortTableSize = 1 << betterShortTableBits // Size of the table - - betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table - betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard - - betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table - betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard -) - -type prevEntry struct { - offset int32 - prev int32 -} - -// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. -// The long match table contains the previous entry with the same hash, -// effectively making it a "chain" of length 2. -// When we find a long match we choose between the two values and select the longest. -// When we find a short match, after checking the long, we check if we can find a long at n+1 -// and that it is longer (lazy matching). -type betterFastEncoder struct { - fastBase - table [betterShortTableSize]tableEntry - longTable [betterLongTableSize]prevEntry -} - -type betterFastEncoderDict struct { - betterFastEncoder - dictTable []tableEntry - dictLongTable []prevEntry - shortTableShardDirty [betterShortTableShardCnt]bool - longTableShardDirty [betterLongTableShardCnt]bool - allDirty bool -} - -// Encode improves compression... -func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 9 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debug { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - var matched int32 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashS := hash5(cv, betterShortTableBits) - nextHashL := hash8(cv, betterLongTableBits) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - off := s + e.cur - e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} - e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index match start+1 (long) -> s - 1 - index0 := s + repOff - s += lenght + repOff - - nextEmit = s - if s >= sLimit { - if debug { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hash8(cv0, betterLongTableBits) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - cv = load6432(src, s) - continue - } - const repOff2 = 1 - - // We deviate from the reference encoder and also check offset 2. - // Still slower and not much better, so disabled. - // repIndex = s - offset2 + repOff2 - if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { - // Consider history as well. - var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 2 - seq.offset = 2 - if debugSequences { - println("repeat sequence 2", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - index0 := s + repOff2 - s += lenght + repOff2 - nextEmit = s - if s >= sLimit { - if debug { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hash8(cv0, betterLongTableBits) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - cv = load6432(src, s) - // Swap offsets - offset1, offset2 = offset2, offset1 - continue - } - } - // Find the offsets of our two matches. - coffsetL := candidateL.offset - e.cur - coffsetLP := candidateL.prev - e.cur - - // Check if we have a long match. - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetL+8, src) + 8 - t = coffsetL - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 - if prevMatch > matched { - matched = prevMatch - t = coffsetLP - } - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - } - break - } - - // Check if we have a long match on prev. - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetLP+8, src) + 8 - t = coffsetLP - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - coffsetS := candidateS.offset - e.cur - - // Check if we have a short match. - if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - matched = e.matchlen(s+4, coffsetS+4, src) + 4 - - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hash8(cv, betterLongTableBits) - candidateL = e.longTable[nextHashL] - coffsetL = candidateL.offset - e.cur - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("long match (after short)") - } - break - } - } - - // Check prev long... - coffsetL = candidateL.prev - e.cur - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("prev long match (after short)") - } - break - } - } - t = coffsetS - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the n-byte match as long as possible. - l := matched - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hash8(cv0, betterLongTableBits) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hash5(cv1, betterShortTableBits)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hash5(cv, betterShortTableBits) - nextHashL := hash8(cv, betterLongTableBits) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debug { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - e.ensureHist(len(src)) - e.Encode(blk, src) -} - -// Encode improves compression... -func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } - e.cur = e.maxMatchOff - e.allDirty = true - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.allDirty = true - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 9 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debug { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - var matched int32 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashS := hash5(cv, betterShortTableBits) - nextHashL := hash8(cv, betterLongTableBits) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - off := s + e.cur - e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} - e.markShortShardDirty(nextHashS) - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index match start+1 (long) -> s - 1 - index0 := s + repOff - s += lenght + repOff - - nextEmit = s - if s >= sLimit { - if debug { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hash8(cv0, betterLongTableBits) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hash5(cv1, betterShortTableBits) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - cv = load6432(src, s) - continue - } - const repOff2 = 1 - - // We deviate from the reference encoder and also check offset 2. - // Still slower and not much better, so disabled. - // repIndex = s - offset2 + repOff2 - if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { - // Consider history as well. - var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 2 - seq.offset = 2 - if debugSequences { - println("repeat sequence 2", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - index0 := s + repOff2 - s += lenght + repOff2 - nextEmit = s - if s >= sLimit { - if debug { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hash8(cv0, betterLongTableBits) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hash5(cv1, betterShortTableBits) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - cv = load6432(src, s) - // Swap offsets - offset1, offset2 = offset2, offset1 - continue - } - } - // Find the offsets of our two matches. - coffsetL := candidateL.offset - e.cur - coffsetLP := candidateL.prev - e.cur - - // Check if we have a long match. - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetL+8, src) + 8 - t = coffsetL - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 - if prevMatch > matched { - matched = prevMatch - t = coffsetLP - } - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - } - break - } - - // Check if we have a long match on prev. - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetLP+8, src) + 8 - t = coffsetLP - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - coffsetS := candidateS.offset - e.cur - - // Check if we have a short match. - if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - matched = e.matchlen(s+4, coffsetS+4, src) + 4 - - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hash8(cv, betterLongTableBits) - candidateL = e.longTable[nextHashL] - coffsetL = candidateL.offset - e.cur - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} - e.markLongShardDirty(nextHashL) - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("long match (after short)") - } - break - } - } - - // Check prev long... - coffsetL = candidateL.prev - e.cur - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("prev long match (after short)") - } - break - } - } - t = coffsetS - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the n-byte match as long as possible. - l := matched - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hash8(cv0, betterLongTableBits) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hash5(cv1, betterShortTableBits) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hash5(cv, betterShortTableBits) - nextHashL := hash8(cv, betterLongTableBits) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShortShardDirty(nextHashS) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debug { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d != nil { - panic("betterFastEncoder: Reset with dict") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]tableEntry, len(e.table)) - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff; i < end; i += 4 { - const hashLog = betterShortTableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hash5(cv, hashLog) // 0 -> 4 - nextHash1 := hash5(cv>>8, hashLog) // 1 -> 5 - nextHash2 := hash5(cv>>16, hashLog) // 2 -> 6 - nextHash3 := hash5(cv>>24, hashLog) // 3 -> 7 - e.dictTable[nextHash] = tableEntry{ - val: uint32(cv), - offset: i, - } - e.dictTable[nextHash1] = tableEntry{ - val: uint32(cv >> 8), - offset: i + 1, - } - e.dictTable[nextHash2] = tableEntry{ - val: uint32(cv >> 16), - offset: i + 2, - } - e.dictTable[nextHash3] = tableEntry{ - val: uint32(cv >> 24), - offset: i + 3, - } - } - e.lastDictID = d.id - e.allDirty = true - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]prevEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - h := hash8(cv, betterLongTableBits) - e.dictLongTable[h] = prevEntry{ - offset: e.maxMatchOff, - prev: e.dictLongTable[h].offset, - } - - end := int32(len(d.content)) - 8 + e.maxMatchOff - off := 8 // First to read - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hash8(cv, betterLongTableBits) - e.dictLongTable[h] = prevEntry{ - offset: i, - prev: e.dictLongTable[h].offset, - } - off++ - } - } - e.lastDictID = d.id - e.allDirty = true - } - - // Reset table to initial state - { - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.shortTableShardDirty { - if e.shortTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - const shardCnt = betterShortTableShardCnt - const shardSize = betterShortTableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.table[:], e.dictTable) - for i := range e.shortTableShardDirty { - e.shortTableShardDirty[i] = false - } - } else { - for i := range e.shortTableShardDirty { - if !e.shortTableShardDirty[i] { - continue - } - - copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) - e.shortTableShardDirty[i] = false - } - } - } - { - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.shortTableShardDirty { - if e.shortTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - const shardCnt = betterLongTableShardCnt - const shardSize = betterLongTableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.longTable[:], e.dictLongTable) - for i := range e.longTableShardDirty { - e.longTableShardDirty[i] = false - } - } else { - for i := range e.longTableShardDirty { - if !e.longTableShardDirty[i] { - continue - } - - copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) - e.longTableShardDirty[i] = false - } - } - } - e.cur = e.maxMatchOff - e.allDirty = false -} - -func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { - e.longTableShardDirty[entryNum/betterLongTableShardSize] = true -} - -func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { - e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go deleted file mode 100644 index 8629d43d8..000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ /dev/null @@ -1,1121 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "fmt" - -const ( - dFastLongTableBits = 17 // Bits used in the long match table - dFastLongTableSize = 1 << dFastLongTableBits // Size of the table - dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - - dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table - dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard - - dFastShortTableBits = tableBits // Bits used in the short match table - dFastShortTableSize = 1 << dFastShortTableBits // Size of the table - dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. -) - -type doubleFastEncoder struct { - fastEncoder - longTable [dFastLongTableSize]tableEntry -} - -type doubleFastEncoderDict struct { - fastEncoderDict - longTable [dFastLongTableSize]tableEntry - dictLongTable []tableEntry - longTableShardDirty [dLongTableShardCnt]bool -} - -// Encode mimmics functionality in zstd_dfast.c -func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.longTable[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debug { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashS := hash5(cv, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += lenght + repOff - nextEmit = s - if s >= sLimit { - if debug { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hash8(cv, dFastLongTableBits) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hash8(cv0, dFastLongTableBits)] = te0 - e.longTable[hash8(cv1, dFastLongTableBits)] = te1 - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - e.table[hash5(cv0, dFastShortTableBits)] = te0 - e.table[hash5(cv1, dFastShortTableBits)] = te1 - - cv = load6432(src, s) - - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hash5(cv, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debug { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - if e.cur >= bufferReset { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } - e.cur = e.maxMatchOff - } - - s := int32(0) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debug { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - for { - - nextHashS := hash5(cv, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - - if len(blk.sequences) > 2 { - if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + repOff - nextEmit = s - if s >= sLimit { - if debug { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hash8(cv, dFastLongTableBits) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - // Extend the 4-byte match as long as possible. - //l := e.matchlen(s+4, t+4, src) + 4 - l := int32(matchLen(src[s+4:], src[t+4:])) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hash8(cv0, dFastLongTableBits)] = te0 - e.longTable[hash8(cv1, dFastLongTableBits)] = te1 - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - e.table[hash5(cv0, dFastShortTableBits)] = te0 - e.table[hash5(cv1, dFastShortTableBits)] = te1 - - cv = load6432(src, s) - - if len(blk.sequences) <= 2 { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hash5(cv1>>8, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - //l := 4 + e.matchlen(s+4, o2+4, src) - l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - if debug { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - - // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < bufferReset { - e.cur += int32(len(src)) - } -} - -// Encode will encode the content, with a dictionary if initialized for it. -func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } - e.markAllShardsDirty() - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.longTable[i].offset = v - } - e.markAllShardsDirty() - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debug { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashS := hash5(cv, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = entry - e.markShardDirty(nextHashS) - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(lenght - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += lenght + repOff - nextEmit = s - if s >= sLimit { - if debug { - println("repeat ended", s, lenght) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hash8(cv, dFastLongTableBits) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - e.markLongShardDirty(nextHashL) - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - longHash1 := hash8(cv0, dFastLongTableBits) - longHash2 := hash8(cv0, dFastLongTableBits) - e.longTable[longHash1] = te0 - e.longTable[longHash2] = te1 - e.markLongShardDirty(longHash1) - e.markLongShardDirty(longHash2) - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - hashVal1 := hash5(cv0, dFastShortTableBits) - hashVal2 := hash5(cv1, dFastShortTableBits) - e.table[hashVal1] = te0 - e.markShardDirty(hashVal1) - e.table[hashVal2] = te1 - e.markShardDirty(hashVal2) - - cv = load6432(src, s) - - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hash5(cv, dFastShortTableBits) - nextHashL := hash8(cv, dFastLongTableBits) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = entry - e.markShardDirty(nextHashS) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debug { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - // If we encoded more than 64K mark all dirty. - if len(src) > 64<<10 { - e.markAllShardsDirty() - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { - e.fastEncoder.Reset(d, singleBlock) - if d != nil { - panic("doubleFastEncoder: Reset with dict not supported") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { - allDirty := e.allDirty - e.fastEncoderDict.Reset(d, singleBlock) - if d == nil { - return - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]tableEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{ - val: uint32(cv), - offset: e.maxMatchOff, - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) - e.dictLongTable[hash8(cv, dFastLongTableBits)] = tableEntry{ - val: uint32(cv), - offset: i, - } - } - } - e.lastDictID = d.id - e.allDirty = true - } - // Reset table to initial state - e.cur = e.maxMatchOff - - dirtyShardCnt := 0 - if !allDirty { - for i := range e.longTableShardDirty { - if e.longTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - - if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { - copy(e.longTable[:], e.dictLongTable) - for i := range e.longTableShardDirty { - e.longTableShardDirty[i] = false - } - return - } - for i := range e.longTableShardDirty { - if !e.longTableShardDirty[i] { - continue - } - - copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) - e.longTableShardDirty[i] = false - } -} - -func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { - e.longTableShardDirty[entryNum/dLongTableShardSize] = true -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go deleted file mode 100644 index ba4a17e10..000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ /dev/null @@ -1,1018 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" - "math" - "math/bits" -) - -const ( - tableBits = 15 // Bits used in the table - tableSize = 1 << tableBits // Size of the table - tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table - tableShardSize = tableSize / tableShardCnt // Size of an individual shard - tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - maxMatchLength = 131074 -) - -type tableEntry struct { - val uint32 - offset int32 -} - -type fastEncoder struct { - fastBase - table [tableSize]tableEntry -} - -type fastEncoderDict struct { - fastEncoder - dictTable []tableEntry - tableShardDirty [tableShardCnt]bool - allDirty bool -} - -// Encode mimmics functionality in zstd_fast.c -func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 7 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debug { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHash := hash6(cv, hashLog) - nextHash2 := hash6(cv>>8, hashLog) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - - if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - var length int32 - // length = 4 + e.matchlen(s+6, repIndex+4, src) - { - a := src[s+6:] - b := src[repIndex+4:] - endI := len(a) & (math.MaxInt32 - 7) - length = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debug { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - //l := e.matchlen(s+4, t+4, src) + 4 - var l int32 - { - a := src[s+4:] - b := src[t+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - //l := 4 + e.matchlen(s+4, o2+4, src) - var l int32 - { - a := src[s+4:] - b := src[o2+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } - - // Store this, since we have it. - nextHash := hash6(cv, hashLog) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debug { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - if debug { - if len(src) > maxBlockSize { - panic("src too big") - } - } - - // Protect against e.cur wraparound. - if e.cur >= bufferReset { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = e.maxMatchOff - } - - s := int32(0) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debug { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - - for { - nextHash := hash6(cv, hashLog) - nextHash2 := hash6(cv>>8, hashLog) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - - if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - // length := 4 + e.matchlen(s+6, repIndex+4, src) - // length := 4 + int32(matchLen(src[s+6:], src[repIndex+4:])) - var length int32 - { - a := src[s+6:] - b := src[repIndex+4:] - endI := len(a) & (math.MaxInt32 - 7) - length = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debug { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && t < 0 { - panic(fmt.Sprintf("t (%d) < 0 ", t)) - } - // Extend the 4-byte match as long as possible. - //l := e.matchlenNoHist(s+4, t+4, src) + 4 - // l := int32(matchLen(src[s+4:], src[t+4:])) + 4 - var l int32 - { - a := src[s+4:] - b := src[t+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - //l := 4 + e.matchlenNoHist(s+4, o2+4, src) - // l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) - var l int32 - { - a := src[s+4:] - b := src[o2+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } - - // Store this, since we have it. - nextHash := hash6(cv, hashLog) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - if debug { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < bufferReset { - e.cur += int32(len(src)) - } -} - -// Encode will encode the content, with a dictionary if initialized for it. -func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - if e.allDirty || len(src) > 32<<10 { - e.fastEncoder.Encode(blk, src) - e.allDirty = true - return - } - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 7 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debug { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHash := hash6(cv, hashLog) - nextHash2 := hash6(cv>>8, hashLog) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShardDirty(nextHash) - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - e.markShardDirty(nextHash2) - - if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - var length int32 - // length = 4 + e.matchlen(s+6, repIndex+4, src) - { - a := src[s+6:] - b := src[repIndex+4:] - endI := len(a) & (math.MaxInt32 - 7) - length = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - length = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debug { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - //l := e.matchlen(s+4, t+4, src) + 4 - var l int32 - { - a := src[s+4:] - b := src[t+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - //l := 4 + e.matchlen(s+4, o2+4, src) - var l int32 - { - a := src[s+4:] - b := src[o2+4:] - endI := len(a) & (math.MaxInt32 - 7) - l = int32(endI) + 4 - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - l = int32(i+bits.TrailingZeros64(diff)>>3) + 4 - break - } - } - } - - // Store this, since we have it. - nextHash := hash6(cv, hashLog) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShardDirty(nextHash) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debug { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *fastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d != nil { - panic("fastEncoder: Reset with dict") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]tableEntry, len(e.table)) - } - if true { - end := e.maxMatchOff + int32(len(d.content)) - 8 - for i := e.maxMatchOff; i < end; i += 3 { - const hashLog = tableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hash6(cv, hashLog) // 0 -> 5 - nextHash1 := hash6(cv>>8, hashLog) // 1 -> 6 - nextHash2 := hash6(cv>>16, hashLog) // 2 -> 7 - e.dictTable[nextHash] = tableEntry{ - val: uint32(cv), - offset: i, - } - e.dictTable[nextHash1] = tableEntry{ - val: uint32(cv >> 8), - offset: i + 1, - } - e.dictTable[nextHash2] = tableEntry{ - val: uint32(cv >> 16), - offset: i + 2, - } - } - } - e.lastDictID = d.id - e.allDirty = true - } - - e.cur = e.maxMatchOff - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.tableShardDirty { - if e.tableShardDirty[i] { - dirtyShardCnt++ - } - } - } - - const shardCnt = tableShardCnt - const shardSize = tableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.table[:], e.dictTable) - for i := range e.tableShardDirty { - e.tableShardDirty[i] = false - } - e.allDirty = false - return - } - for i := range e.tableShardDirty { - if !e.tableShardDirty[i] { - continue - } - - copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) - e.tableShardDirty[i] = false - } - e.allDirty = false -} - -func (e *fastEncoderDict) markAllShardsDirty() { - e.allDirty = true -} - -func (e *fastEncoderDict) markShardDirty(entryNum uint32) { - e.tableShardDirty[entryNum/tableShardSize] = true -} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go deleted file mode 100644 index 6f0265099..000000000 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ /dev/null @@ -1,576 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "crypto/rand" - "fmt" - "io" - rdebug "runtime/debug" - "sync" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -// Encoder provides encoding to Zstandard. -// An Encoder can be used for either compressing a stream via the -// io.WriteCloser interface supported by the Encoder or as multiple independent -// tasks via the EncodeAll function. -// Smaller encodes are encouraged to use the EncodeAll function. -// Use NewWriter to create a new instance. -type Encoder struct { - o encoderOptions - encoders chan encoder - state encoderState - init sync.Once -} - -type encoder interface { - Encode(blk *blockEnc, src []byte) - EncodeNoHist(blk *blockEnc, src []byte) - Block() *blockEnc - CRC() *xxhash.Digest - AppendCRC([]byte) []byte - WindowSize(size int) int32 - UseBlock(*blockEnc) - Reset(d *dict, singleBlock bool) -} - -type encoderState struct { - w io.Writer - filling []byte - current []byte - previous []byte - encoder encoder - writing *blockEnc - err error - writeErr error - nWritten int64 - headerWritten bool - eofWritten bool - fullFrameWritten bool - - // This waitgroup indicates an encode is running. - wg sync.WaitGroup - // This waitgroup indicates we have a block encoding/writing. - wWg sync.WaitGroup -} - -// NewWriter will create a new Zstandard encoder. -// If the encoder will be used for encoding blocks a nil writer can be used. -func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { - initPredefined() - var e Encoder - e.o.setDefault() - for _, o := range opts { - err := o(&e.o) - if err != nil { - return nil, err - } - } - if w != nil { - e.Reset(w) - } - return &e, nil -} - -func (e *Encoder) initialize() { - if e.o.concurrent == 0 { - e.o.setDefault() - } - e.encoders = make(chan encoder, e.o.concurrent) - for i := 0; i < e.o.concurrent; i++ { - enc := e.o.encoder() - e.encoders <- enc - } -} - -// Reset will re-initialize the writer and new writes will encode to the supplied writer -// as a new, independent stream. -func (e *Encoder) Reset(w io.Writer) { - s := &e.state - s.wg.Wait() - s.wWg.Wait() - if cap(s.filling) == 0 { - s.filling = make([]byte, 0, e.o.blockSize) - } - if cap(s.current) == 0 { - s.current = make([]byte, 0, e.o.blockSize) - } - if cap(s.previous) == 0 { - s.previous = make([]byte, 0, e.o.blockSize) - } - if s.encoder == nil { - s.encoder = e.o.encoder() - } - if s.writing == nil { - s.writing = &blockEnc{lowMem: e.o.lowMem} - s.writing.init() - } - s.writing.initNewEncode() - s.filling = s.filling[:0] - s.current = s.current[:0] - s.previous = s.previous[:0] - s.encoder.Reset(e.o.dict, false) - s.headerWritten = false - s.eofWritten = false - s.fullFrameWritten = false - s.w = w - s.err = nil - s.nWritten = 0 - s.writeErr = nil -} - -// Write data to the encoder. -// Input data will be buffered and as the buffer fills up -// content will be compressed and written to the output. -// When done writing, use Close to flush the remaining output -// and write CRC if requested. -func (e *Encoder) Write(p []byte) (n int, err error) { - s := &e.state - for len(p) > 0 { - if len(p)+len(s.filling) < e.o.blockSize { - if e.o.crc { - _, _ = s.encoder.CRC().Write(p) - } - s.filling = append(s.filling, p...) - return n + len(p), nil - } - add := p - if len(p)+len(s.filling) > e.o.blockSize { - add = add[:e.o.blockSize-len(s.filling)] - } - if e.o.crc { - _, _ = s.encoder.CRC().Write(add) - } - s.filling = append(s.filling, add...) - p = p[len(add):] - n += len(add) - if len(s.filling) < e.o.blockSize { - return n, nil - } - err := e.nextBlock(false) - if err != nil { - return n, err - } - if debugAsserts && len(s.filling) > 0 { - panic(len(s.filling)) - } - } - return n, nil -} - -// nextBlock will synchronize and start compressing input in e.state.filling. -// If an error has occurred during encoding it will be returned. -func (e *Encoder) nextBlock(final bool) error { - s := &e.state - // Wait for current block. - s.wg.Wait() - if s.err != nil { - return s.err - } - if len(s.filling) > e.o.blockSize { - return fmt.Errorf("block > maxStoreBlockSize") - } - if !s.headerWritten { - // If we have a single block encode, do a sync compression. - if final && len(s.filling) == 0 && !e.o.fullZero { - s.headerWritten = true - s.fullFrameWritten = true - s.eofWritten = true - return nil - } - if final && len(s.filling) > 0 { - s.current = e.EncodeAll(s.filling, s.current[:0]) - var n2 int - n2, s.err = s.w.Write(s.current) - if s.err != nil { - return s.err - } - s.nWritten += int64(n2) - s.current = s.current[:0] - s.filling = s.filling[:0] - s.headerWritten = true - s.fullFrameWritten = true - s.eofWritten = true - return nil - } - - var tmp [maxHeaderSize]byte - fh := frameHeader{ - ContentSize: 0, - WindowSize: uint32(s.encoder.WindowSize(0)), - SingleSegment: false, - Checksum: e.o.crc, - DictID: e.o.dict.ID(), - } - - dst, err := fh.appendTo(tmp[:0]) - if err != nil { - return err - } - s.headerWritten = true - s.wWg.Wait() - var n2 int - n2, s.err = s.w.Write(dst) - if s.err != nil { - return s.err - } - s.nWritten += int64(n2) - } - if s.eofWritten { - // Ensure we only write it once. - final = false - } - - if len(s.filling) == 0 { - // Final block, but no data. - if final { - enc := s.encoder - blk := enc.Block() - blk.reset(nil) - blk.last = true - blk.encodeRaw(nil) - s.wWg.Wait() - _, s.err = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - s.eofWritten = true - } - return s.err - } - - // Move blocks forward. - s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current - s.wg.Add(1) - go func(src []byte) { - if debug { - println("Adding block,", len(src), "bytes, final:", final) - } - defer func() { - if r := recover(); r != nil { - s.err = fmt.Errorf("panic while encoding: %v", r) - rdebug.PrintStack() - } - s.wg.Done() - }() - enc := s.encoder - blk := enc.Block() - enc.Encode(blk, src) - blk.last = final - if final { - s.eofWritten = true - } - // Wait for pending writes. - s.wWg.Wait() - if s.writeErr != nil { - s.err = s.writeErr - return - } - // Transfer encoders from previous write block. - blk.swapEncoders(s.writing) - // Transfer recent offsets to next. - enc.UseBlock(s.writing) - s.writing = blk - s.wWg.Add(1) - go func() { - defer func() { - if r := recover(); r != nil { - s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) - rdebug.PrintStack() - } - s.wWg.Done() - }() - err := errIncompressible - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - if len(src) != len(blk.literals) || len(src) != e.o.blockSize { - err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - } - switch err { - case errIncompressible: - if debug { - println("Storing incompressible block as raw") - } - blk.encodeRaw(src) - // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. - case nil: - default: - s.writeErr = err - return - } - _, s.writeErr = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - }() - }(s.current) - return nil -} - -// ReadFrom reads data from r until EOF or error. -// The return value n is the number of bytes read. -// Any error except io.EOF encountered during the read is also returned. -// -// The Copy function uses ReaderFrom if available. -func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { - if debug { - println("Using ReadFrom") - } - - // Flush any current writes. - if len(e.state.filling) > 0 { - if err := e.nextBlock(false); err != nil { - return 0, err - } - } - e.state.filling = e.state.filling[:e.o.blockSize] - src := e.state.filling - for { - n2, err := r.Read(src) - if e.o.crc { - _, _ = e.state.encoder.CRC().Write(src[:n2]) - } - // src is now the unfilled part... - src = src[n2:] - n += int64(n2) - switch err { - case io.EOF: - e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] - if debug { - println("ReadFrom: got EOF final block:", len(e.state.filling)) - } - return n, nil - default: - if debug { - println("ReadFrom: got error:", err) - } - e.state.err = err - return n, err - case nil: - } - if len(src) > 0 { - if debug { - println("ReadFrom: got space left in source:", len(src)) - } - continue - } - err = e.nextBlock(false) - if err != nil { - return n, err - } - e.state.filling = e.state.filling[:e.o.blockSize] - src = e.state.filling - } -} - -// Flush will send the currently written data to output -// and block until everything has been written. -// This should only be used on rare occasions where pushing the currently queued data is critical. -func (e *Encoder) Flush() error { - s := &e.state - if len(s.filling) > 0 { - err := e.nextBlock(false) - if err != nil { - return err - } - } - s.wg.Wait() - s.wWg.Wait() - if s.err != nil { - return s.err - } - return s.writeErr -} - -// Close will flush the final output and close the stream. -// The function will block until everything has been written. -// The Encoder can still be re-used after calling this. -func (e *Encoder) Close() error { - s := &e.state - if s.encoder == nil { - return nil - } - err := e.nextBlock(true) - if err != nil { - return err - } - if e.state.fullFrameWritten { - return s.err - } - s.wg.Wait() - s.wWg.Wait() - - if s.err != nil { - return s.err - } - if s.writeErr != nil { - return s.writeErr - } - - // Write CRC - if e.o.crc && s.err == nil { - // heap alloc. - var tmp [4]byte - _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) - s.nWritten += 4 - } - - // Add padding with content from crypto/rand.Reader - if s.err == nil && e.o.pad > 0 { - add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) - frame, err := skippableFrame(s.filling[:0], add, rand.Reader) - if err != nil { - return err - } - _, s.err = s.w.Write(frame) - } - return s.err -} - -// EncodeAll will encode all input in src and append it to dst. -// This function can be called concurrently, but each call will only run on a single goroutine. -// If empty input is given, nothing is returned, unless WithZeroFrames is specified. -// Encoded blocks can be concatenated and the result will be the combined input stream. -// Data compressed with EncodeAll can be decoded with the Decoder, -// using either a stream or DecodeAll. -func (e *Encoder) EncodeAll(src, dst []byte) []byte { - if len(src) == 0 { - if e.o.fullZero { - // Add frame header. - fh := frameHeader{ - ContentSize: 0, - WindowSize: MinWindowSize, - SingleSegment: true, - // Adding a checksum would be a waste of space. - Checksum: false, - DictID: 0, - } - dst, _ = fh.appendTo(dst) - - // Write raw block as last one only. - var blk blockHeader - blk.setSize(0) - blk.setType(blockTypeRaw) - blk.setLast(true) - dst = blk.appendTo(dst) - } - return dst - } - e.init.Do(e.initialize) - enc := <-e.encoders - defer func() { - // Release encoder reference to last block. - // If a non-single block is needed the encoder will reset again. - e.encoders <- enc - }() - // Use single segments when above minimum window and below 1MB. - single := len(src) < 1<<20 && len(src) > MinWindowSize - if e.o.single != nil { - single = *e.o.single - } - fh := frameHeader{ - ContentSize: uint64(len(src)), - WindowSize: uint32(enc.WindowSize(len(src))), - SingleSegment: single, - Checksum: e.o.crc, - DictID: e.o.dict.ID(), - } - - // If less than 1MB, allocate a buffer up front. - if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { - dst = make([]byte, 0, len(src)) - } - dst, err := fh.appendTo(dst) - if err != nil { - panic(err) - } - - // If we can do everything in one block, prefer that. - if len(src) <= maxCompressedBlockSize { - enc.Reset(e.o.dict, true) - // Slightly faster with no history and everything in one block. - if e.o.crc { - _, _ = enc.CRC().Write(src) - } - blk := enc.Block() - blk.last = true - if e.o.dict == nil { - enc.EncodeNoHist(blk, src) - } else { - enc.Encode(blk, src) - } - - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - err := errIncompressible - oldout := blk.output - if len(blk.literals) != len(src) || len(src) != e.o.blockSize { - // Output directly to dst - blk.output = dst - err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - } - - switch err { - case errIncompressible: - if debug { - println("Storing incompressible block as raw") - } - dst = blk.encodeRawTo(dst, src) - case nil: - dst = blk.output - default: - panic(err) - } - blk.output = oldout - } else { - enc.Reset(e.o.dict, false) - blk := enc.Block() - for len(src) > 0 { - todo := src - if len(todo) > e.o.blockSize { - todo = todo[:e.o.blockSize] - } - src = src[len(todo):] - if e.o.crc { - _, _ = enc.CRC().Write(todo) - } - blk.pushOffsets() - enc.Encode(blk, todo) - if len(src) == 0 { - blk.last = true - } - err := errIncompressible - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { - err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) - } - - switch err { - case errIncompressible: - if debug { - println("Storing incompressible block as raw") - } - dst = blk.encodeRawTo(dst, todo) - blk.popOffsets() - case nil: - dst = append(dst, blk.output...) - default: - panic(err) - } - blk.reset(nil) - } - } - if e.o.crc { - dst = enc.AppendCRC(dst) - } - // Add padding with content from crypto/rand.Reader - if e.o.pad > 0 { - add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) - dst, err = skippableFrame(dst, add, rand.Reader) - if err != nil { - panic(err) - } - } - return dst -} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go deleted file mode 100644 index 18a47eb03..000000000 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ /dev/null @@ -1,312 +0,0 @@ -package zstd - -import ( - "errors" - "fmt" - "runtime" - "strings" -) - -// EOption is an option for creating a encoder. -type EOption func(*encoderOptions) error - -// options retains accumulated state of multiple options. -type encoderOptions struct { - concurrent int - level EncoderLevel - single *bool - pad int - blockSize int - windowSize int - crc bool - fullZero bool - noEntropy bool - allLitEntropy bool - customWindow bool - customALEntropy bool - lowMem bool - dict *dict -} - -func (o *encoderOptions) setDefault() { - *o = encoderOptions{ - concurrent: runtime.GOMAXPROCS(0), - crc: true, - single: nil, - blockSize: 1 << 16, - windowSize: 8 << 20, - level: SpeedDefault, - allLitEntropy: true, - lowMem: false, - } -} - -// encoder returns an encoder with the selected options. -func (o encoderOptions) encoder() encoder { - switch o.level { - case SpeedFastest: - if o.dict != nil { - return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} - } - return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} - - case SpeedDefault: - if o.dict != nil { - return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}} - } - return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} - case SpeedBetterCompression: - if o.dict != nil { - return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} - } - return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} - case SpeedBestCompression: - return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} - } - panic("unknown compression level") -} - -// WithEncoderCRC will add CRC value to output. -// Output will be 4 bytes larger. -func WithEncoderCRC(b bool) EOption { - return func(o *encoderOptions) error { o.crc = b; return nil } -} - -// WithEncoderConcurrency will set the concurrency, -// meaning the maximum number of decoders to run concurrently. -// The value supplied must be at least 1. -// By default this will be set to GOMAXPROCS. -func WithEncoderConcurrency(n int) EOption { - return func(o *encoderOptions) error { - if n <= 0 { - return fmt.Errorf("concurrency must be at least 1") - } - o.concurrent = n - return nil - } -} - -// WithWindowSize will set the maximum allowed back-reference distance. -// The value must be a power of two between MinWindowSize and MaxWindowSize. -// A larger value will enable better compression but allocate more memory and, -// for above-default values, take considerably longer. -// The default value is determined by the compression level. -func WithWindowSize(n int) EOption { - return func(o *encoderOptions) error { - switch { - case n < MinWindowSize: - return fmt.Errorf("window size must be at least %d", MinWindowSize) - case n > MaxWindowSize: - return fmt.Errorf("window size must be at most %d", MaxWindowSize) - case (n & (n - 1)) != 0: - return errors.New("window size must be a power of 2") - } - - o.windowSize = n - o.customWindow = true - if o.blockSize > o.windowSize { - o.blockSize = o.windowSize - } - return nil - } -} - -// WithEncoderPadding will add padding to all output so the size will be a multiple of n. -// This can be used to obfuscate the exact output size or make blocks of a certain size. -// The contents will be a skippable frame, so it will be invisible by the decoder. -// n must be > 0 and <= 1GB, 1<<30 bytes. -// The padded area will be filled with data from crypto/rand.Reader. -// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. -func WithEncoderPadding(n int) EOption { - return func(o *encoderOptions) error { - if n <= 0 { - return fmt.Errorf("padding must be at least 1") - } - // No need to waste our time. - if n == 1 { - o.pad = 0 - } - if n > 1<<30 { - return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") - } - o.pad = n - return nil - } -} - -// EncoderLevel predefines encoder compression levels. -// Only use the constants made available, since the actual mapping -// of these values are very likely to change and your compression could change -// unpredictably when upgrading the library. -type EncoderLevel int - -const ( - speedNotSet EncoderLevel = iota - - // SpeedFastest will choose the fastest reasonable compression. - // This is roughly equivalent to the fastest Zstandard mode. - SpeedFastest - - // SpeedDefault is the default "pretty fast" compression option. - // This is roughly equivalent to the default Zstandard mode (level 3). - SpeedDefault - - // SpeedBetterCompression will yield better compression than the default. - // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. - // By using this, notice that CPU usage may go up in the future. - SpeedBetterCompression - - // SpeedBestCompression will choose the best available compression option. - // This will offer the best compression no matter the CPU cost. - SpeedBestCompression - - // speedLast should be kept as the last actual compression option. - // The is not for external usage, but is used to keep track of the valid options. - speedLast -) - -// EncoderLevelFromString will convert a string representation of an encoding level back -// to a compression level. The compare is not case sensitive. -// If the string wasn't recognized, (false, SpeedDefault) will be returned. -func EncoderLevelFromString(s string) (bool, EncoderLevel) { - for l := speedNotSet + 1; l < speedLast; l++ { - if strings.EqualFold(s, l.String()) { - return true, l - } - } - return false, SpeedDefault -} - -// EncoderLevelFromZstd will return an encoder level that closest matches the compression -// ratio of a specific zstd compression level. -// Many input values will provide the same compression level. -func EncoderLevelFromZstd(level int) EncoderLevel { - switch { - case level < 3: - return SpeedFastest - case level >= 3 && level < 6: - return SpeedDefault - case level >= 6 && level < 10: - return SpeedBetterCompression - case level >= 10: - return SpeedBetterCompression - } - return SpeedDefault -} - -// String provides a string representation of the compression level. -func (e EncoderLevel) String() string { - switch e { - case SpeedFastest: - return "fastest" - case SpeedDefault: - return "default" - case SpeedBetterCompression: - return "better" - case SpeedBestCompression: - return "best" - default: - return "invalid" - } -} - -// WithEncoderLevel specifies a predefined compression level. -func WithEncoderLevel(l EncoderLevel) EOption { - return func(o *encoderOptions) error { - switch { - case l <= speedNotSet || l >= speedLast: - return fmt.Errorf("unknown encoder level") - } - o.level = l - if !o.customWindow { - switch o.level { - case SpeedFastest: - o.windowSize = 4 << 20 - case SpeedDefault: - o.windowSize = 8 << 20 - case SpeedBetterCompression: - o.windowSize = 16 << 20 - case SpeedBestCompression: - o.windowSize = 32 << 20 - } - } - if !o.customALEntropy { - o.allLitEntropy = l > SpeedFastest - } - - return nil - } -} - -// WithZeroFrames will encode 0 length input as full frames. -// This can be needed for compatibility with zstandard usage, -// but is not needed for this package. -func WithZeroFrames(b bool) EOption { - return func(o *encoderOptions) error { - o.fullZero = b - return nil - } -} - -// WithAllLitEntropyCompression will apply entropy compression if no matches are found. -// Disabling this will skip incompressible data faster, but in cases with no matches but -// skewed character distribution compression is lost. -// Default value depends on the compression level selected. -func WithAllLitEntropyCompression(b bool) EOption { - return func(o *encoderOptions) error { - o.customALEntropy = true - o.allLitEntropy = b - return nil - } -} - -// WithNoEntropyCompression will always skip entropy compression of literals. -// This can be useful if content has matches, but unlikely to benefit from entropy -// compression. Usually the slight speed improvement is not worth enabling this. -func WithNoEntropyCompression(b bool) EOption { - return func(o *encoderOptions) error { - o.noEntropy = b - return nil - } -} - -// WithSingleSegment will set the "single segment" flag when EncodeAll is used. -// If this flag is set, data must be regenerated within a single continuous memory segment. -// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. -// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. -// In order to preserve the decoder from unreasonable memory requirements, -// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. -// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. -// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. -// If this is not specified, block encodes will automatically choose this based on the input size. -// This setting has no effect on streamed encodes. -func WithSingleSegment(b bool) EOption { - return func(o *encoderOptions) error { - o.single = &b - return nil - } -} - -// WithLowerEncoderMem will trade in some memory cases trade less memory usage for -// slower encoding speed. -// This will not change the window size which is the primary function for reducing -// memory usage. See WithWindowSize. -func WithLowerEncoderMem(b bool) EOption { - return func(o *encoderOptions) error { - o.lowMem = b - return nil - } -} - -// WithEncoderDict allows to register a dictionary that will be used for the encode. -// The encoder *may* choose to use no dictionary instead for certain payloads. -func WithEncoderDict(dict []byte) EOption { - return func(o *encoderOptions) error { - d, err := loadDict(dict) - if err != nil { - return err - } - o.dict = d - return nil - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go deleted file mode 100644 index fc4a566d3..000000000 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ /dev/null @@ -1,494 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "bytes" - "encoding/hex" - "errors" - "hash" - "io" - "sync" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -type frameDec struct { - o decoderOptions - crc hash.Hash64 - offset int64 - - WindowSize uint64 - - // maxWindowSize is the maximum windows size to support. - // should never be bigger than max-int. - maxWindowSize uint64 - - // In order queue of blocks being decoded. - decoding chan *blockDec - - // Frame history passed between blocks - history history - - rawInput byteBuffer - - // Byte buffer that can be reused for small input blocks. - bBuf byteBuf - - FrameContentSize uint64 - frameDone sync.WaitGroup - - DictionaryID *uint32 - HasCheckSum bool - SingleSegment bool - - // asyncRunning indicates whether the async routine processes input on 'decoding'. - asyncRunningMu sync.Mutex - asyncRunning bool -} - -const ( - // The minimum Window_Size is 1 KB. - MinWindowSize = 1 << 10 - MaxWindowSize = 1 << 29 -) - -var ( - frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} - skippableFrameMagic = []byte{0x2a, 0x4d, 0x18} -) - -func newFrameDec(o decoderOptions) *frameDec { - d := frameDec{ - o: o, - maxWindowSize: MaxWindowSize, - } - if d.maxWindowSize > o.maxDecodedSize { - d.maxWindowSize = o.maxDecodedSize - } - return &d -} - -// reset will read the frame header and prepare for block decoding. -// If nothing can be read from the input, io.EOF will be returned. -// Any other error indicated that the stream contained data, but -// there was a problem. -func (d *frameDec) reset(br byteBuffer) error { - d.HasCheckSum = false - d.WindowSize = 0 - var b []byte - for { - b = br.readSmall(4) - if b == nil { - return io.EOF - } - if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { - if debug { - println("Not skippable", hex.EncodeToString(b), hex.EncodeToString(skippableFrameMagic)) - } - // Break if not skippable frame. - break - } - // Read size to skip - b = br.readSmall(4) - if b == nil { - println("Reading Frame Size EOF") - return io.ErrUnexpectedEOF - } - n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - println("Skipping frame with", n, "bytes.") - err := br.skipN(int(n)) - if err != nil { - if debug { - println("Reading discarded frame", err) - } - return err - } - } - if !bytes.Equal(b, frameMagic) { - println("Got magic numbers: ", b, "want:", frameMagic) - return ErrMagicMismatch - } - - // Read Frame_Header_Descriptor - fhd, err := br.readByte() - if err != nil { - println("Reading Frame_Header_Descriptor", err) - return err - } - d.SingleSegment = fhd&(1<<5) != 0 - - if fhd&(1<<3) != 0 { - return errors.New("Reserved bit set on frame header") - } - - // Read Window_Descriptor - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor - d.WindowSize = 0 - if !d.SingleSegment { - wd, err := br.readByte() - if err != nil { - println("Reading Window_Descriptor", err) - return err - } - printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) - windowLog := 10 + (wd >> 3) - windowBase := uint64(1) << windowLog - windowAdd := (windowBase / 8) * uint64(wd&0x7) - d.WindowSize = windowBase + windowAdd - } - - // Read Dictionary_ID - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - d.DictionaryID = nil - if size := fhd & 3; size != 0 { - if size == 3 { - size = 4 - } - b = br.readSmall(int(size)) - if b == nil { - if debug { - println("Reading Dictionary_ID", io.ErrUnexpectedEOF) - } - return io.ErrUnexpectedEOF - } - var id uint32 - switch size { - case 1: - id = uint32(b[0]) - case 2: - id = uint32(b[0]) | (uint32(b[1]) << 8) - case 4: - id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - } - if debug { - println("Dict size", size, "ID:", id) - } - if id > 0 { - // ID 0 means "sorry, no dictionary anyway". - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format - d.DictionaryID = &id - } - } - - // Read Frame_Content_Size - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size - var fcsSize int - v := fhd >> 6 - switch v { - case 0: - if d.SingleSegment { - fcsSize = 1 - } - default: - fcsSize = 1 << v - } - d.FrameContentSize = 0 - if fcsSize > 0 { - b := br.readSmall(fcsSize) - if b == nil { - println("Reading Frame content", io.ErrUnexpectedEOF) - return io.ErrUnexpectedEOF - } - switch fcsSize { - case 1: - d.FrameContentSize = uint64(b[0]) - case 2: - // When FCS_Field_Size is 2, the offset of 256 is added. - d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 - case 4: - d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) - case 8: - d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) - d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) - } - if debug { - println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize) - } - } - // Move this to shared. - d.HasCheckSum = fhd&(1<<2) != 0 - if d.HasCheckSum { - if d.crc == nil { - d.crc = xxhash.New() - } - d.crc.Reset() - } - - if d.WindowSize == 0 && d.SingleSegment { - // We may not need window in this case. - d.WindowSize = d.FrameContentSize - if d.WindowSize < MinWindowSize { - d.WindowSize = MinWindowSize - } - } - - if d.WindowSize > d.maxWindowSize { - printf("window size %d > max %d\n", d.WindowSize, d.maxWindowSize) - return ErrWindowSizeExceeded - } - // The minimum Window_Size is 1 KB. - if d.WindowSize < MinWindowSize { - println("got window size: ", d.WindowSize) - return ErrWindowSizeTooSmall - } - d.history.windowSize = int(d.WindowSize) - if d.o.lowMem && d.history.windowSize < maxBlockSize { - d.history.maxSize = d.history.windowSize * 2 - } else { - d.history.maxSize = d.history.windowSize + maxBlockSize - } - // history contains input - maybe we do something - d.rawInput = br - return nil -} - -// next will start decoding the next block from stream. -func (d *frameDec) next(block *blockDec) error { - if debug { - printf("decoding new block %p:%p", block, block.data) - } - err := block.reset(d.rawInput, d.WindowSize) - if err != nil { - println("block error:", err) - // Signal the frame decoder we have a problem. - d.sendErr(block, err) - return err - } - block.input <- struct{}{} - if debug { - println("next block:", block) - } - d.asyncRunningMu.Lock() - defer d.asyncRunningMu.Unlock() - if !d.asyncRunning { - return nil - } - if block.Last { - // We indicate the frame is done by sending io.EOF - d.decoding <- block - return io.EOF - } - d.decoding <- block - return nil -} - -// sendEOF will queue an error block on the frame. -// This will cause the frame decoder to return when it encounters the block. -// Returns true if the decoder was added. -func (d *frameDec) sendErr(block *blockDec, err error) bool { - d.asyncRunningMu.Lock() - defer d.asyncRunningMu.Unlock() - if !d.asyncRunning { - return false - } - - println("sending error", err.Error()) - block.sendErr(err) - d.decoding <- block - return true -} - -// checkCRC will check the checksum if the frame has one. -// Will return ErrCRCMismatch if crc check failed, otherwise nil. -func (d *frameDec) checkCRC() error { - if !d.HasCheckSum { - return nil - } - var tmp [4]byte - got := d.crc.Sum64() - // Flip to match file order. - tmp[0] = byte(got >> 0) - tmp[1] = byte(got >> 8) - tmp[2] = byte(got >> 16) - tmp[3] = byte(got >> 24) - - // We can overwrite upper tmp now - want := d.rawInput.readSmall(4) - if want == nil { - println("CRC missing?") - return io.ErrUnexpectedEOF - } - - if !bytes.Equal(tmp[:], want) { - if debug { - println("CRC Check Failed:", tmp[:], "!=", want) - } - return ErrCRCMismatch - } - if debug { - println("CRC ok", tmp[:]) - } - return nil -} - -func (d *frameDec) initAsync() { - if !d.o.lowMem && !d.SingleSegment { - // set max extra size history to 10MB. - d.history.maxSize = d.history.windowSize + maxBlockSize*5 - } - // re-alloc if more than one extra block size. - if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize { - d.history.b = make([]byte, 0, d.history.maxSize) - } - if cap(d.history.b) < d.history.maxSize { - d.history.b = make([]byte, 0, d.history.maxSize) - } - if cap(d.decoding) < d.o.concurrent { - d.decoding = make(chan *blockDec, d.o.concurrent) - } - if debug { - h := d.history - printf("history init. len: %d, cap: %d", len(h.b), cap(h.b)) - } - d.asyncRunningMu.Lock() - d.asyncRunning = true - d.asyncRunningMu.Unlock() -} - -// startDecoder will start decoding blocks and write them to the writer. -// The decoder will stop as soon as an error occurs or at end of frame. -// When the frame has finished decoding the *bufio.Reader -// containing the remaining input will be sent on frameDec.frameDone. -func (d *frameDec) startDecoder(output chan decodeOutput) { - written := int64(0) - - defer func() { - d.asyncRunningMu.Lock() - d.asyncRunning = false - d.asyncRunningMu.Unlock() - - // Drain the currently decoding. - d.history.error = true - flushdone: - for { - select { - case b := <-d.decoding: - b.history <- &d.history - output <- <-b.result - default: - break flushdone - } - } - println("frame decoder done, signalling done") - d.frameDone.Done() - }() - // Get decoder for first block. - block := <-d.decoding - block.history <- &d.history - for { - var next *blockDec - // Get result - r := <-block.result - if r.err != nil { - println("Result contained error", r.err) - output <- r - return - } - if debug { - println("got result, from ", d.offset, "to", d.offset+int64(len(r.b))) - d.offset += int64(len(r.b)) - } - if !block.Last { - // Send history to next block - select { - case next = <-d.decoding: - if debug { - println("Sending ", len(d.history.b), "bytes as history") - } - next.history <- &d.history - default: - // Wait until we have sent the block, so - // other decoders can potentially get the decoder. - next = nil - } - } - - // Add checksum, async to decoding. - if d.HasCheckSum { - n, err := d.crc.Write(r.b) - if err != nil { - r.err = err - if n != len(r.b) { - r.err = io.ErrShortWrite - } - output <- r - return - } - } - written += int64(len(r.b)) - if d.SingleSegment && uint64(written) > d.FrameContentSize { - println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize) - r.err = ErrFrameSizeExceeded - output <- r - return - } - if block.Last { - r.err = d.checkCRC() - output <- r - return - } - output <- r - if next == nil { - // There was no decoder available, we wait for one now that we have sent to the writer. - if debug { - println("Sending ", len(d.history.b), " bytes as history") - } - next = <-d.decoding - next.history <- &d.history - } - block = next - } -} - -// runDecoder will create a sync decoder that will decode a block of data. -func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { - saved := d.history.b - - // We use the history for output to avoid copying it. - d.history.b = dst - // Store input length, so we only check new data. - crcStart := len(dst) - var err error - for { - err = dec.reset(d.rawInput, d.WindowSize) - if err != nil { - break - } - if debug { - println("next block:", dec) - } - err = dec.decodeBuf(&d.history) - if err != nil || dec.Last { - break - } - if uint64(len(d.history.b)) > d.o.maxDecodedSize { - err = ErrDecoderSizeExceeded - break - } - if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize { - println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize) - err = ErrFrameSizeExceeded - break - } - } - dst = d.history.b - if err == nil { - if d.HasCheckSum { - var n int - n, err = d.crc.Write(dst[crcStart:]) - if err == nil { - if n != len(dst)-crcStart { - err = io.ErrShortWrite - } else { - err = d.checkCRC() - } - } - } - } - d.history.b = saved - return dst, err -} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go deleted file mode 100644 index 4ef7f5a3e..000000000 --- a/vendor/github.com/klauspost/compress/zstd/frameenc.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "fmt" - "io" - "math" - "math/bits" -) - -type frameHeader struct { - ContentSize uint64 - WindowSize uint32 - SingleSegment bool - Checksum bool - DictID uint32 -} - -const maxHeaderSize = 14 - -func (f frameHeader) appendTo(dst []byte) ([]byte, error) { - dst = append(dst, frameMagic...) - var fhd uint8 - if f.Checksum { - fhd |= 1 << 2 - } - if f.SingleSegment { - fhd |= 1 << 5 - } - - var dictIDContent []byte - if f.DictID > 0 { - var tmp [4]byte - if f.DictID < 256 { - fhd |= 1 - tmp[0] = uint8(f.DictID) - dictIDContent = tmp[:1] - } else if f.DictID < 1<<16 { - fhd |= 2 - binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) - dictIDContent = tmp[:2] - } else { - fhd |= 3 - binary.LittleEndian.PutUint32(tmp[:4], f.DictID) - dictIDContent = tmp[:4] - } - } - var fcs uint8 - if f.ContentSize >= 256 { - fcs++ - } - if f.ContentSize >= 65536+256 { - fcs++ - } - if f.ContentSize >= 0xffffffff { - fcs++ - } - - fhd |= fcs << 6 - - dst = append(dst, fhd) - if !f.SingleSegment { - const winLogMin = 10 - windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 - dst = append(dst, uint8(windowLog)) - } - if f.DictID > 0 { - dst = append(dst, dictIDContent...) - } - switch fcs { - case 0: - if f.SingleSegment { - dst = append(dst, uint8(f.ContentSize)) - } - // Unless SingleSegment is set, framessizes < 256 are nto stored. - case 1: - f.ContentSize -= 256 - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) - case 2: - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) - case 3: - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), - uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) - default: - panic("invalid fcs") - } - return dst, nil -} - -const skippableFrameHeader = 4 + 4 - -// calcSkippableFrame will return a total size to be added for written -// to be divisible by multiple. -// The value will always be > skippableFrameHeader. -// The function will panic if written < 0 or wantMultiple <= 0. -func calcSkippableFrame(written, wantMultiple int64) int { - if wantMultiple <= 0 { - panic("wantMultiple <= 0") - } - if written < 0 { - panic("written < 0") - } - leftOver := written % wantMultiple - if leftOver == 0 { - return 0 - } - toAdd := wantMultiple - leftOver - for toAdd < skippableFrameHeader { - toAdd += wantMultiple - } - return int(toAdd) -} - -// skippableFrame will add a skippable frame with a total size of bytes. -// total should be >= skippableFrameHeader and < math.MaxUint32. -func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { - if total == 0 { - return dst, nil - } - if total < skippableFrameHeader { - return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) - } - if int64(total) > math.MaxUint32 { - return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) - } - dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) - f := uint32(total - skippableFrameHeader) - dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) - start := len(dst) - dst = append(dst, make([]byte, f)...) - _, err := io.ReadFull(r, dst[start:]) - return dst, err -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go deleted file mode 100644 index e6d3d49b3..000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go +++ /dev/null @@ -1,385 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" -) - -const ( - tablelogAbsoluteMax = 9 -) - -const ( - /*!MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) - * Increasing memory usage improves compression ratio - * Reduced memory usage can improve speed, due to cache effect - * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ - maxMemoryUsage = tablelogAbsoluteMax + 2 - - maxTableLog = maxMemoryUsage - 2 - maxTablesize = 1 << maxTableLog - maxTableMask = (1 << maxTableLog) - 1 - minTablelog = 5 - maxSymbolValue = 255 -) - -// fseDecoder provides temporary storage for compression and decompression. -type fseDecoder struct { - dt [maxTablesize]decSymbol // Decompression table. - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - maxBits uint8 // Maximum number of additional bits - - // used for table creation to avoid allocations. - stateTable [256]uint16 - norm [maxSymbolValue + 1]int16 - preDefined bool -} - -// tableStep returns the next table index. -func tableStep(tableSize uint32) uint32 { - return (tableSize >> 1) + (tableSize >> 3) + 3 -} - -// readNCount will read the symbol distribution so decoding tables can be constructed. -func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { - var ( - charnum uint16 - previous0 bool - ) - if b.remain() < 4 { - return errors.New("input too small") - } - bitStream := b.Uint32NC() - nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog - if nbBits > tablelogAbsoluteMax { - println("Invalid tablelog:", nbBits) - return errors.New("tableLog too large") - } - bitStream >>= 4 - bitCount := uint(4) - - s.actualTableLog = uint8(nbBits) - remaining := int32((1 << nbBits) + 1) - threshold := int32(1 << nbBits) - gotTotal := int32(0) - nbBits++ - - for remaining > 1 && charnum <= maxSymbol { - if previous0 { - //println("prev0") - n0 := charnum - for (bitStream & 0xFFFF) == 0xFFFF { - //println("24 x 0") - n0 += 24 - if r := b.remain(); r > 5 { - b.advance(2) - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> bitCount - } else { - // end of bit stream - bitStream >>= 16 - bitCount += 16 - } - } - //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) - for (bitStream & 3) == 3 { - n0 += 3 - bitStream >>= 2 - bitCount += 2 - } - n0 += uint16(bitStream & 3) - bitCount += 2 - - if n0 > maxSymbolValue { - return errors.New("maxSymbolValue too small") - } - //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) - for charnum < n0 { - s.norm[uint8(charnum)] = 0 - charnum++ - } - - if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { - b.advance(bitCount >> 3) - bitCount &= 7 - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> bitCount - } else { - bitStream >>= 2 - } - } - - max := (2*threshold - 1) - remaining - var count int32 - - if int32(bitStream)&(threshold-1) < max { - count = int32(bitStream) & (threshold - 1) - if debugAsserts && nbBits < 1 { - panic("nbBits underflow") - } - bitCount += nbBits - 1 - } else { - count = int32(bitStream) & (2*threshold - 1) - if count >= threshold { - count -= max - } - bitCount += nbBits - } - - // extra accuracy - count-- - if count < 0 { - // -1 means +1 - remaining += count - gotTotal -= count - } else { - remaining -= count - gotTotal += count - } - s.norm[charnum&0xff] = int16(count) - charnum++ - previous0 = count == 0 - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { - b.advance(bitCount >> 3) - bitCount &= 7 - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> (bitCount & 31) - } else { - bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) - b.off = len(b.b) - 4 - bitStream = b.Uint32() >> (bitCount & 31) - } - } - s.symbolLen = charnum - if s.symbolLen <= 1 { - return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) - } - if s.symbolLen > maxSymbolValue+1 { - return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) - } - if remaining != 1 { - return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) - } - if bitCount > 32 { - return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) - } - if gotTotal != 1<> 3) - // println(s.norm[:s.symbolLen], s.symbolLen) - return s.buildDtable() -} - -// decSymbol contains information about a state entry, -// Including the state offset base, the output symbol and -// the number of bits to read for the low part of the destination state. -// Using a composite uint64 is faster than a struct with separate members. -type decSymbol uint64 - -func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { - return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) -} - -func (d decSymbol) nbBits() uint8 { - return uint8(d) -} - -func (d decSymbol) addBits() uint8 { - return uint8(d >> 8) -} - -func (d decSymbol) newState() uint16 { - return uint16(d >> 16) -} - -func (d decSymbol) baseline() uint32 { - return uint32(d >> 32) -} - -func (d decSymbol) baselineInt() int { - return int(d >> 32) -} - -func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) { - *d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) -} - -func (d *decSymbol) setNBits(nBits uint8) { - const mask = 0xffffffffffffff00 - *d = (*d & mask) | decSymbol(nBits) -} - -func (d *decSymbol) setAddBits(addBits uint8) { - const mask = 0xffffffffffff00ff - *d = (*d & mask) | (decSymbol(addBits) << 8) -} - -func (d *decSymbol) setNewState(state uint16) { - const mask = 0xffffffff0000ffff - *d = (*d & mask) | decSymbol(state)<<16 -} - -func (d *decSymbol) setBaseline(baseline uint32) { - const mask = 0xffffffff - *d = (*d & mask) | decSymbol(baseline)<<32 -} - -func (d *decSymbol) setExt(addBits uint8, baseline uint32) { - const mask = 0xffff00ff - *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) -} - -// decSymbolValue returns the transformed decSymbol for the given symbol. -func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { - if int(symb) >= len(t) { - return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) - } - lu := t[symb] - return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil -} - -// setRLE will set the decoder til RLE mode. -func (s *fseDecoder) setRLE(symbol decSymbol) { - s.actualTableLog = 0 - s.maxBits = symbol.addBits() - s.dt[0] = symbol -} - -// buildDtable will build the decoding table. -func (s *fseDecoder) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - symbolNext := s.stateTable[:256] - - // Init, lay down lowprob symbols - { - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.dt[highThreshold].setAddBits(uint8(i)) - highThreshold-- - symbolNext[i] = 1 - } else { - symbolNext[i] = uint16(v) - } - } - } - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.dt[position].setAddBits(uint8(ss)) - position = (position + step) & tableMask - for position > highThreshold { - // lowprob area - position = (position + step) & tableMask - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.dt[:tableSize] { - symbol := v.addBits() - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.dt[u&maxTableMask].setNBits(nBits) - newState := (nextState << nBits) - tableSize - if newState > tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.dt[u&maxTableMask].setNewState(newState) - } - } - return nil -} - -// transform will transform the decoder table into a table usable for -// decoding without having to apply the transformation while decoding. -// The state will contain the base value and the number of bits to read. -func (s *fseDecoder) transform(t []baseOffset) error { - tableSize := uint16(1 << s.actualTableLog) - s.maxBits = 0 - for i, v := range s.dt[:tableSize] { - add := v.addBits() - if int(add) >= len(t) { - return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) - } - lu := t[add] - if lu.addBits > s.maxBits { - s.maxBits = lu.addBits - } - v.setExt(lu.addBits, lu.baseLine) - s.dt[i] = v - } - return nil -} - -type fseState struct { - dt []decSymbol - state decSymbol -} - -// Initialize and decodeAsync first state and symbol. -func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { - s.dt = dt - br.fill() - s.state = dt[br.getBits(tableLog)] -} - -// next returns the current symbol and sets the next state. -// At least tablelog bits must be available in the bit reader. -func (s *fseState) next(br *bitReader) { - lowBits := uint16(br.getBits(s.state.nbBits())) - s.state = s.dt[s.state.newState()+lowBits] -} - -// finished returns true if all bits have been read from the bitstream -// and the next state would require reading bits from the input. -func (s *fseState) finished(br *bitReader) bool { - return br.finished() && s.state.nbBits() > 0 -} - -// final returns the current state symbol without decoding the next. -func (s *fseState) final() (int, uint8) { - return s.state.baselineInt(), s.state.addBits() -} - -// final returns the current state symbol without decoding the next. -func (s decSymbol) final() (int, uint8) { - return s.baselineInt(), s.addBits() -} - -// nextFast returns the next symbol and sets the next state. -// This can only be used if no symbols are 0 bits. -// At least tablelog bits must be available in the bit reader. -func (s *fseState) nextFast(br *bitReader) (uint32, uint8) { - lowBits := uint16(br.getBitsFast(s.state.nbBits())) - s.state = s.dt[s.state.newState()+lowBits] - return s.state.baseline(), s.state.addBits() -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go deleted file mode 100644 index b80709d5e..000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ /dev/null @@ -1,726 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math" -) - -const ( - // For encoding we only support up to - maxEncTableLog = 8 - maxEncTablesize = 1 << maxTableLog - maxEncTableMask = (1 << maxTableLog) - 1 - minEncTablelog = 5 - maxEncSymbolValue = maxMatchLengthSymbol -) - -// Scratch provides temporary storage for compression and decompression. -type fseEncoder struct { - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - ct cTable // Compression tables. - maxCount int // count of the most probable symbol - zeroBits bool // no bits has prob > 50%. - clearCount bool // clear count - useRLE bool // This encoder is for RLE - preDefined bool // This encoder is predefined. - reUsed bool // Set to know when the encoder has been reused. - rleVal uint8 // RLE Symbol - maxBits uint8 // Maximum output bits after transform. - - // TODO: Technically zstd should be fine with 64 bytes. - count [256]uint32 - norm [256]int16 -} - -// cTable contains tables used for compression. -type cTable struct { - tableSymbol []byte - stateTable []uint16 - symbolTT []symbolTransform -} - -// symbolTransform contains the state transform for a symbol. -type symbolTransform struct { - deltaNbBits uint32 - deltaFindState int16 - outBits uint8 -} - -// String prints values as a human readable string. -func (s symbolTransform) String() string { - return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) -} - -// Histogram allows to populate the histogram and skip that step in the compression, -// It otherwise allows to inspect the histogram when compression is done. -// To indicate that you have populated the histogram call HistogramFinished -// with the value of the highest populated symbol, as well as the number of entries -// in the most populated entry. These are accepted at face value. -// The returned slice will always be length 256. -func (s *fseEncoder) Histogram() []uint32 { - return s.count[:] -} - -// HistogramFinished can be called to indicate that the histogram has been populated. -// maxSymbol is the index of the highest set symbol of the next data segment. -// maxCount is the number of entries in the most populated entry. -// These are accepted at face value. -func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { - s.maxCount = maxCount - s.symbolLen = uint16(maxSymbol) + 1 - s.clearCount = maxCount != 0 -} - -// prepare will prepare and allocate scratch tables used for both compression and decompression. -func (s *fseEncoder) prepare() (*fseEncoder, error) { - if s == nil { - s = &fseEncoder{} - } - s.useRLE = false - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - return s, nil -} - -// allocCtable will allocate tables needed for compression. -// If existing tables a re big enough, they are simply re-used. -func (s *fseEncoder) allocCtable() { - tableSize := 1 << s.actualTableLog - // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < tableSize { - s.ct.tableSymbol = make([]byte, tableSize) - } - s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] - - ctSize := tableSize - if cap(s.ct.stateTable) < ctSize { - s.ct.stateTable = make([]uint16, ctSize) - } - s.ct.stateTable = s.ct.stateTable[:ctSize] - - if cap(s.ct.symbolTT) < 256 { - s.ct.symbolTT = make([]symbolTransform, 256) - } - s.ct.symbolTT = s.ct.symbolTT[:256] -} - -// buildCTable will populate the compression table so it is ready to be used. -func (s *fseEncoder) buildCTable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - var cumul [256]int16 - - s.allocCtable() - tableSymbol := s.ct.tableSymbol[:tableSize] - // symbol start positions - { - cumul[0] = 0 - for ui, v := range s.norm[:s.symbolLen-1] { - u := byte(ui) // one less than reference - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = u - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - } - // Encode last symbol separately to avoid overflowing u - u := int(s.symbolLen - 1) - v := s.norm[s.symbolLen-1] - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = byte(u) - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - if uint32(cumul[s.symbolLen]) != tableSize { - return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) - } - cumul[s.symbolLen] = int16(tableSize) + 1 - } - // Spread symbols - s.zeroBits = false - { - step := tableStep(tableSize) - tableMask := tableSize - 1 - var position uint32 - // if any symbol > largeLimit, we may have 0 bits output. - largeLimit := int16(1 << (s.actualTableLog - 1)) - for ui, v := range s.norm[:s.symbolLen] { - symbol := byte(ui) - if v > largeLimit { - s.zeroBits = true - } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { - tableSymbol[position] = symbol - position = (position + step) & tableMask - for position > highThreshold { - position = (position + step) & tableMask - } /* Low proba area */ - } - } - - // Check if we have gone through all positions - if position != 0 { - return errors.New("position!=0") - } - } - - // Build table - table := s.ct.stateTable - { - tsi := int(tableSize) - for u, v := range tableSymbol { - // TableU16 : sorted by symbol order; gives next state value - table[cumul[v]] = uint16(tsi + u) - cumul[v]++ - } - } - - // Build Symbol Transformation Table - { - total := int16(0) - symbolTT := s.ct.symbolTT[:s.symbolLen] - tableLog := s.actualTableLog - tl := (uint32(tableLog) << 16) - (1 << tableLog) - for i, v := range s.norm[:s.symbolLen] { - switch v { - case 0: - case -1, 1: - symbolTT[i].deltaNbBits = tl - symbolTT[i].deltaFindState = total - 1 - total++ - default: - maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) - minStatePlus := uint32(v) << maxBitsOut - symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus - symbolTT[i].deltaFindState = total - v - total += v - } - } - if total != int16(tableSize) { - return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) - } - } - return nil -} - -var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} - -func (s *fseEncoder) setRLE(val byte) { - s.allocCtable() - s.actualTableLog = 0 - s.ct.stateTable = s.ct.stateTable[:1] - s.ct.symbolTT[val] = symbolTransform{ - deltaFindState: 0, - deltaNbBits: 0, - } - if debug { - println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) - } - s.rleVal = val - s.useRLE = true -} - -// setBits will set output bits for the transform. -// if nil is provided, the number of bits is equal to the index. -func (s *fseEncoder) setBits(transform []byte) { - if s.reUsed || s.preDefined { - return - } - if s.useRLE { - if transform == nil { - s.ct.symbolTT[s.rleVal].outBits = s.rleVal - s.maxBits = s.rleVal - return - } - s.maxBits = transform[s.rleVal] - s.ct.symbolTT[s.rleVal].outBits = s.maxBits - return - } - if transform == nil { - for i := range s.ct.symbolTT[:s.symbolLen] { - s.ct.symbolTT[i].outBits = uint8(i) - } - s.maxBits = uint8(s.symbolLen - 1) - return - } - s.maxBits = 0 - for i, v := range transform[:s.symbolLen] { - s.ct.symbolTT[i].outBits = v - if v > s.maxBits { - // We could assume bits always going up, but we play safe. - s.maxBits = v - } - } -} - -// normalizeCount will normalize the count of the symbols so -// the total is equal to the table size. -// If successful, compression tables will also be made ready. -func (s *fseEncoder) normalizeCount(length int) error { - if s.reUsed { - return nil - } - s.optimalTableLog(length) - var ( - tableLog = s.actualTableLog - scale = 62 - uint64(tableLog) - step = (1 << 62) / uint64(length) - vStep = uint64(1) << (scale - 20) - stillToDistribute = int16(1 << tableLog) - largest int - largestP int16 - lowThreshold = (uint32)(length >> tableLog) - ) - if s.maxCount == length { - s.useRLE = true - return nil - } - s.useRLE = false - for i, cnt := range s.count[:s.symbolLen] { - // already handled - // if (count[s] == s.length) return 0; /* rle special case */ - - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - stillToDistribute-- - } else { - proba := (int16)((uint64(cnt) * step) >> scale) - if proba < 8 { - restToBeat := vStep * uint64(rtbTable[proba]) - v := uint64(cnt)*step - (uint64(proba) << scale) - if v > restToBeat { - proba++ - } - } - if proba > largestP { - largestP = proba - largest = i - } - s.norm[i] = proba - stillToDistribute -= proba - } - } - - if -stillToDistribute >= (s.norm[largest] >> 1) { - // corner case, need another normalization method - err := s.normalizeCount2(length) - if err != nil { - return err - } - if debugAsserts { - err = s.validateNorm() - if err != nil { - return err - } - } - return s.buildCTable() - } - s.norm[largest] += stillToDistribute - if debugAsserts { - err := s.validateNorm() - if err != nil { - return err - } - } - return s.buildCTable() -} - -// Secondary normalization method. -// To be used when primary method fails. -func (s *fseEncoder) normalizeCount2(length int) error { - const notYetAssigned = -2 - var ( - distributed uint32 - total = uint32(length) - tableLog = s.actualTableLog - lowThreshold = total >> tableLog - lowOne = (total * 3) >> (tableLog + 1) - ) - for i, cnt := range s.count[:s.symbolLen] { - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - distributed++ - total -= cnt - continue - } - if cnt <= lowOne { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - s.norm[i] = notYetAssigned - } - toDistribute := (1 << tableLog) - distributed - - if (total / toDistribute) > lowOne { - // risk of rounding to zero - lowOne = (total * 3) / (toDistribute * 2) - for i, cnt := range s.count[:s.symbolLen] { - if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - } - toDistribute = (1 << tableLog) - distributed - } - if distributed == uint32(s.symbolLen)+1 { - // all values are pretty poor; - // probably incompressible data (should have already been detected); - // find max, then give all remaining points to max - var maxV int - var maxC uint32 - for i, cnt := range s.count[:s.symbolLen] { - if cnt > maxC { - maxV = i - maxC = cnt - } - } - s.norm[maxV] += int16(toDistribute) - return nil - } - - if total == 0 { - // all of the symbols were low enough for the lowOne or lowThreshold - for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { - if s.norm[i] > 0 { - toDistribute-- - s.norm[i]++ - } - } - return nil - } - - var ( - vStepLog = 62 - uint64(tableLog) - mid = uint64((1 << (vStepLog - 1)) - 1) - rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining - tmpTotal = mid - ) - for i, cnt := range s.count[:s.symbolLen] { - if s.norm[i] == notYetAssigned { - var ( - end = tmpTotal + uint64(cnt)*rStep - sStart = uint32(tmpTotal >> vStepLog) - sEnd = uint32(end >> vStepLog) - weight = sEnd - sStart - ) - if weight < 1 { - return errors.New("weight < 1") - } - s.norm[i] = int16(weight) - tmpTotal = end - } - } - return nil -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *fseEncoder) optimalTableLog(length int) { - tableLog := uint8(maxEncTableLog) - minBitsSrc := highBit(uint32(length)) + 1 - minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 - minBits := uint8(minBitsSymbols) - if minBitsSrc < minBitsSymbols { - minBits = uint8(minBitsSrc) - } - - maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minEncTablelog { - tableLog = minEncTablelog - } - if tableLog > maxEncTableLog { - tableLog = maxEncTableLog - } - s.actualTableLog = tableLog -} - -// validateNorm validates the normalized histogram table. -func (s *fseEncoder) validateNorm() (err error) { - var total int - for _, v := range s.norm[:s.symbolLen] { - if v >= 0 { - total += int(v) - } else { - total -= int(v) - } - } - defer func() { - if err == nil { - return - } - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) - for i, v := range s.norm[:s.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) - } - }() - if total != (1 << s.actualTableLog) { - return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 - - // Write Table Size - bitStream = uint32(tableLog - minEncTablelog) - bitCount = uint(4) - remaining = int16(tableSize + 1) /* +1 for extra accuracy */ - threshold = int16(tableSize) - nbBits = uint(tableLog + 1) - outP = len(out) - ) - if cap(out) < outP+maxHeaderSize { - out = append(out, make([]byte, maxHeaderSize*3)...) - out = out[:len(out)-maxHeaderSize*3] - } - out = out[:outP+maxHeaderSize] - - // stops at 1 - for remaining > 1 { - if previous0 { - start := charnum - for s.norm[charnum] == 0 { - charnum++ - } - for charnum >= start+24 { - start += 24 - bitStream += uint32(0xFFFF) << bitCount - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - } - for charnum >= start+3 { - start += 3 - bitStream += 3 << bitCount - bitCount += 2 - } - bitStream += uint32(charnum-start) << bitCount - bitCount += 2 - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - count := s.norm[charnum] - charnum++ - max := (2*threshold - 1) - remaining - if count < 0 { - remaining += count - } else { - remaining -= count - } - count++ // +1 for extra accuracy - if count >= threshold { - count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ - } - bitStream += uint32(count) << bitCount - bitCount += nbBits - if count < max { - bitCount-- - } - - previous0 = count == 1 - if remaining < 1 { - return nil, errors.New("internal error: remaining < 1") - } - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - if outP+2 > len(out) { - return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) - } - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += int((bitCount + 7) / 8) - - if charnum > s.symbolLen { - return nil, errors.New("internal error: charnum > s.symbolLen") - } - return out[:outP], nil -} - -// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) -// note 1 : assume symbolValue is valid (<= maxSymbolValue) -// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * -func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { - minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 - threshold := (minNbBits + 1) << 16 - if debugAsserts { - if !(s.actualTableLog < 16) { - panic("!s.actualTableLog < 16") - } - // ensure enough room for renormalization double shift - if !(uint8(accuracyLog) < 31-s.actualTableLog) { - panic("!uint8(accuracyLog) < 31-s.actualTableLog") - } - } - tableSize := uint32(1) << s.actualTableLog - deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) - // linear interpolation (very approximate) - normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog - bitMultiplier := uint32(1) << accuracyLog - if debugAsserts { - if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { - panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") - } - if normalizedDeltaFromThreshold > bitMultiplier { - panic("normalizedDeltaFromThreshold > bitMultiplier") - } - } - return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold -} - -// Returns the cost in bits of encoding the distribution in count using ctable. -// Histogram should only be up to the last non-zero symbol. -// Returns an -1 if ctable cannot represent all the symbols in count. -func (s *fseEncoder) approxSize(hist []uint32) uint32 { - if int(s.symbolLen) < len(hist) { - // More symbols than we have. - return math.MaxUint32 - } - if s.useRLE { - // We will never reuse RLE encoders. - return math.MaxUint32 - } - const kAccuracyLog = 8 - badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog - var cost uint32 - for i, v := range hist { - if v == 0 { - continue - } - if s.norm[i] == 0 { - return math.MaxUint32 - } - bitCost := s.bitCost(uint8(i), kAccuracyLog) - if bitCost > badCost { - return math.MaxUint32 - } - cost += v * bitCost - } - return cost >> kAccuracyLog -} - -// maxHeaderSize returns the maximum header size in bits. -// This is not exact size, but we want a penalty for new tables anyway. -func (s *fseEncoder) maxHeaderSize() uint32 { - if s.preDefined { - return 0 - } - if s.useRLE { - return 8 - } - return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 -} - -// cState contains the compression state of a stream. -type cState struct { - bw *bitWriter - stateTable []uint16 - state uint16 -} - -// init will initialize the compression state to the first symbol of the stream. -func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { - c.bw = bw - c.stateTable = ct.stateTable - if len(c.stateTable) == 1 { - // RLE - c.stateTable[0] = uint16(0) - c.state = 0 - return - } - nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 - im := int32((nbBitsOut << 16) - first.deltaNbBits) - lu := (im >> nbBitsOut) + int32(first.deltaFindState) - c.state = c.stateTable[lu] - return -} - -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encode(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState) - c.bw.addBits16NC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - -// flush will write the tablelog to the output and flush the remaining full bytes. -func (c *cState) flush(tableLog uint8) { - c.bw.flush32() - c.bw.addBits16NC(c.state, tableLog) -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go deleted file mode 100644 index 6c17dc17f..000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" - "math" - "sync" -) - -var ( - // fsePredef are the predefined fse tables as defined here: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - // These values are already transformed. - fsePredef [3]fseDecoder - - // fsePredefEnc are the predefined encoder based on fse tables as defined here: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - // These values are already transformed. - fsePredefEnc [3]fseEncoder - - // symbolTableX contain the transformations needed for each type as defined in - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets - symbolTableX [3][]baseOffset - - // maxTableSymbol is the biggest supported symbol for each table type - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets - maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} - - // bitTables is the bits table for each table. - bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} -) - -type tableIndex uint8 - -const ( - // indexes for fsePredef and symbolTableX - tableLiteralLengths tableIndex = 0 - tableOffsets tableIndex = 1 - tableMatchLengths tableIndex = 2 - - maxLiteralLengthSymbol = 35 - maxOffsetLengthSymbol = 30 - maxMatchLengthSymbol = 52 -) - -// baseOffset is used for calculating transformations. -type baseOffset struct { - baseLine uint32 - addBits uint8 -} - -// fillBase will precalculate base offsets with the given bit distributions. -func fillBase(dst []baseOffset, base uint32, bits ...uint8) { - if len(bits) != len(dst) { - panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) - } - for i, bit := range bits { - if base > math.MaxInt32 { - panic(fmt.Sprintf("invalid decoding table, base overflows int32")) - } - - dst[i] = baseOffset{ - baseLine: base, - addBits: bit, - } - base += 1 << bit - } -} - -var predef sync.Once - -func initPredefined() { - predef.Do(func() { - // Literals length codes - tmp := make([]baseOffset, 36) - for i := range tmp[:16] { - tmp[i] = baseOffset{ - baseLine: uint32(i), - addBits: 0, - } - } - fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) - symbolTableX[tableLiteralLengths] = tmp - - // Match length codes - tmp = make([]baseOffset, 53) - for i := range tmp[:32] { - tmp[i] = baseOffset{ - // The transformation adds the 3 length. - baseLine: uint32(i) + 3, - addBits: 0, - } - } - fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) - symbolTableX[tableMatchLengths] = tmp - - // Offset codes - tmp = make([]baseOffset, maxOffsetBits+1) - tmp[1] = baseOffset{ - baseLine: 1, - addBits: 1, - } - fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) - symbolTableX[tableOffsets] = tmp - - // Fill predefined tables and transform them. - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - for i := range fsePredef[:] { - f := &fsePredef[i] - switch tableIndex(i) { - case tableLiteralLengths: - // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 - f.actualTableLog = 6 - copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, - -1, -1, -1, -1}) - f.symbolLen = 36 - case tableOffsets: - // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 - f.actualTableLog = 5 - copy(f.norm[:], []int16{ - 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) - f.symbolLen = 29 - case tableMatchLengths: - //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 - f.actualTableLog = 6 - copy(f.norm[:], []int16{ - 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, - -1, -1, -1, -1, -1}) - f.symbolLen = 53 - } - if err := f.buildDtable(); err != nil { - panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) - } - if err := f.transform(symbolTableX[i]); err != nil { - panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) - } - f.preDefined = true - - // Create encoder as well - enc := &fsePredefEnc[i] - copy(enc.norm[:], f.norm[:]) - enc.symbolLen = f.symbolLen - enc.actualTableLog = f.actualTableLog - if err := enc.buildCTable(); err != nil { - panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) - } - enc.setBits(bitTables[i]) - enc.preDefined = true - } - }) -} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go deleted file mode 100644 index 4a752067f..000000000 --- a/vendor/github.com/klauspost/compress/zstd/hash.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -const ( - prime3bytes = 506832829 - prime4bytes = 2654435761 - prime5bytes = 889523592379 - prime6bytes = 227718039650203 - prime7bytes = 58295818150454627 - prime8bytes = 0xcf1bbcdcb7a56463 -) - -// hashLen returns a hash of the lowest l bytes of u for a size size of h bytes. -// l must be >=4 and <=8. Any other value will return hash for 4 bytes. -// h should always be <32. -// Preferably h and l should be a constant. -// FIXME: This does NOT get resolved, if 'mls' is constant, -// so this cannot be used. -func hashLen(u uint64, hashLog, mls uint8) uint32 { - switch mls { - case 5: - return hash5(u, hashLog) - case 6: - return hash6(u, hashLog) - case 7: - return hash7(u, hashLog) - case 8: - return hash8(u, hashLog) - default: - return hash4x64(u, hashLog) - } -} - -// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash3(u uint32, h uint8) uint32 { - return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31) -} - -// hash4 returns the hash of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash4(u uint32, h uint8) uint32 { - return (u * prime4bytes) >> ((32 - h) & 31) -} - -// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash4x64(u uint64, h uint8) uint32 { - return (uint32(u) * prime4bytes) >> ((32 - h) & 31) -} - -// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash5(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63)) -} - -// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash6(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) -} - -// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash7(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) -} - -// hash8 returns the hash of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash8(u uint64, h uint8) uint32 { - return uint32((u * prime8bytes) >> ((64 - h) & 63)) -} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go deleted file mode 100644 index f783e32d2..000000000 --- a/vendor/github.com/klauspost/compress/zstd/history.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "github.com/klauspost/compress/huff0" -) - -// history contains the information transferred between blocks. -type history struct { - b []byte - huffTree *huff0.Scratch - recentOffsets [3]int - decoders sequenceDecs - windowSize int - maxSize int - error bool - dict *dict -} - -// reset will reset the history to initial state of a frame. -// The history must already have been initialized to the desired size. -func (h *history) reset() { - h.b = h.b[:0] - h.error = false - h.recentOffsets = [3]int{1, 4, 8} - if f := h.decoders.litLengths.fse; f != nil && !f.preDefined { - fseDecoderPool.Put(f) - } - if f := h.decoders.offsets.fse; f != nil && !f.preDefined { - fseDecoderPool.Put(f) - } - if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined { - fseDecoderPool.Put(f) - } - h.decoders = sequenceDecs{} - if h.huffTree != nil { - if h.dict == nil || h.dict.litEnc != h.huffTree { - huffDecoderPool.Put(h.huffTree) - } - } - h.huffTree = nil - h.dict = nil - //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) -} - -func (h *history) setDict(dict *dict) { - if dict == nil { - return - } - h.dict = dict - h.decoders.litLengths = dict.llDec - h.decoders.offsets = dict.ofDec - h.decoders.matchLengths = dict.mlDec - h.recentOffsets = dict.offsets - h.huffTree = dict.litEnc -} - -// append bytes to history. -// This function will make sure there is space for it, -// if the buffer has been allocated with enough extra space. -func (h *history) append(b []byte) { - if len(b) >= h.windowSize { - // Discard all history by simply overwriting - h.b = h.b[:h.windowSize] - copy(h.b, b[len(b)-h.windowSize:]) - return - } - - // If there is space, append it. - if len(b) < cap(h.b)-len(h.b) { - h.b = append(h.b, b...) - return - } - - // Move data down so we only have window size left. - // We know we have less than window size in b at this point. - discard := len(b) + len(h.b) - h.windowSize - copy(h.b, h.b[discard:]) - h.b = h.b[:h.windowSize] - copy(h.b[h.windowSize-len(b):], b) -} - -// append bytes to history without ever discarding anything. -func (h *history) appendKeep(b []byte) { - h.b = append(h.b, b...) -} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt deleted file mode 100644 index 24b53065f..000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md deleted file mode 100644 index 69aa3bb58..000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# xxhash - -VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. - - -[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) -[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) - -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a -high-quality hashing algorithm that is much faster than anything in the Go -standard library. - -This package provides a straightforward API: - -``` -func Sum64(b []byte) uint64 -func Sum64String(s string) uint64 -type Digest struct{ ... } - func New() *Digest -``` - -The `Digest` type implements hash.Hash64. Its key methods are: - -``` -func (*Digest) Write([]byte) (int, error) -func (*Digest) WriteString(string) (int, error) -func (*Digest) Sum64() uint64 -``` - -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. - -## Benchmarks - -Here are some quick benchmarks comparing the pure-Go and assembly -implementations of Sum64. - -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | - -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: - -``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' -``` - -## Projects using this package - -- [InfluxDB](https://github.com/influxdata/influxdb) -- [Prometheus](https://github.com/prometheus/prometheus) -- [FreeCache](https://github.com/coocood/freecache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go deleted file mode 100644 index 426b9cac7..000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go +++ /dev/null @@ -1,238 +0,0 @@ -// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described -// at http://cyan4973.github.io/xxHash/. -// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. - -package xxhash - -import ( - "encoding/binary" - "errors" - "math/bits" -) - -const ( - prime1 uint64 = 11400714785074694791 - prime2 uint64 = 14029467366897019727 - prime3 uint64 = 1609587929392839161 - prime4 uint64 = 9650029242287828579 - prime5 uint64 = 2870177450012600261 -) - -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) - -// Digest implements hash.Hash64. -type Digest struct { - v1 uint64 - v2 uint64 - v3 uint64 - v4 uint64 - total uint64 - mem [32]byte - n int // how much of mem is used -} - -// New creates a new Digest that computes the 64-bit xxHash algorithm. -func New() *Digest { - var d Digest - d.Reset() - return &d -} - -// Reset clears the Digest's state so that it can be reused. -func (d *Digest) Reset() { - d.v1 = prime1v + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -prime1v - d.total = 0 - d.n = 0 -} - -// Size always returns 8 bytes. -func (d *Digest) Size() int { return 8 } - -// BlockSize always returns 32 bytes. -func (d *Digest) BlockSize() int { return 32 } - -// Write adds more data to d. It always returns len(b), nil. -func (d *Digest) Write(b []byte) (n int, err error) { - n = len(b) - d.total += uint64(n) - - if d.n+n < 32 { - // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) - d.n += n - return - } - - if d.n > 0 { - // Finish off the partial block. - copy(d.mem[d.n:], b) - d.v1 = round(d.v1, u64(d.mem[0:8])) - d.v2 = round(d.v2, u64(d.mem[8:16])) - d.v3 = round(d.v3, u64(d.mem[16:24])) - d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] - d.n = 0 - } - - if len(b) >= 32 { - // One or more full blocks left. - nw := writeBlocks(d, b) - b = b[nw:] - } - - // Store any remaining partial block. - copy(d.mem[:], b) - d.n = len(b) - - return -} - -// Sum appends the current hash to b and returns the resulting slice. -func (d *Digest) Sum(b []byte) []byte { - s := d.Sum64() - return append( - b, - byte(s>>56), - byte(s>>48), - byte(s>>40), - byte(s>>32), - byte(s>>24), - byte(s>>16), - byte(s>>8), - byte(s), - ) -} - -// Sum64 returns the current hash. -func (d *Digest) Sum64() uint64 { - var h uint64 - - if d.total >= 32 { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = d.v3 + prime5 - } - - h += d.total - - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for i < end { - h ^= uint64(d.mem[i]) * prime5 - h = rol11(h) * prime1 - i++ - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -const ( - magic = "xxh\x06" - marshaledSize = len(magic) + 8*5 + 32 -) - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (d *Digest) MarshalBinary() ([]byte, error) { - b := make([]byte, 0, marshaledSize) - b = append(b, magic...) - b = appendUint64(b, d.v1) - b = appendUint64(b, d.v2) - b = appendUint64(b, d.v3) - b = appendUint64(b, d.v4) - b = appendUint64(b, d.total) - b = append(b, d.mem[:d.n]...) - b = b[:len(b)+len(d.mem)-d.n] - return b, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (d *Digest) UnmarshalBinary(b []byte) error { - if len(b) < len(magic) || string(b[:len(magic)]) != magic { - return errors.New("xxhash: invalid hash state identifier") - } - if len(b) != marshaledSize { - return errors.New("xxhash: invalid hash state size") - } - b = b[len(magic):] - b, d.v1 = consumeUint64(b) - b, d.v2 = consumeUint64(b) - b, d.v3 = consumeUint64(b) - b, d.v4 = consumeUint64(b) - b, d.total = consumeUint64(b) - copy(d.mem[:], b) - b = b[len(d.mem):] - d.n = int(d.total % uint64(len(d.mem))) - return nil -} - -func appendUint64(b []byte, x uint64) []byte { - var a [8]byte - binary.LittleEndian.PutUint64(a[:], x) - return append(b, a[:]...) -} - -func consumeUint64(b []byte) ([]byte, uint64) { - x := u64(b) - return b[8:], x -} - -func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } -func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } - -func round(acc, input uint64) uint64 { - acc += input * prime2 - acc = rol31(acc) - acc *= prime1 - return acc -} - -func mergeRound(acc, val uint64) uint64 { - val = round(0, val) - acc ^= val - acc = acc*prime1 + prime4 - return acc -} - -func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } -func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } -func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } -func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } -func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } -func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } -func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } -func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go deleted file mode 100644 index 35318d7c4..000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -//go:noescape -func writeBlocks(*Digest, []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s deleted file mode 100644 index 2c9c5357a..000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s +++ /dev/null @@ -1,215 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -#include "textflag.h" - -// Register allocation: -// AX h -// CX pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// R15 prime4v - -// round reads from and advances the buffer pointer in CX. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (CX), R12 \ - ADDQ $8, CX \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ R15, acc - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 - // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), R15 - - // Load slice. - MOVQ b_base+0(FP), CX - MOVQ b_len+8(FP), DX - LEAQ (CX)(DX*1), BX - - // The first loop limit will be len(b)-32. - SUBQ $32, BX - - // Check whether we have at least one block. - CMPQ DX, $32 - JLT noBlocks - - // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until CX > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) - - JMP afterBlocks - -noBlocks: - MOVQ ·prime5v(SB), AX - -afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. - ADDQ $24, BX - - CMPQ CX, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (CX), R8 - ADDQ $8, CX - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ R15, AX - - CMPQ CX, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ CX, BX - JG singles - - MOVL (CX), R8 - ADDQ $4, CX - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ CX, BX - JGE finalize - -singlesLoop: - MOVBQZX (CX), R12 - ADDQ $1, CX - IMULQ ·prime5v(SB), R12 - XORQ R12, AX - - ROLQ $11, AX - IMULQ R13, AX - - CMPQ CX, BX - JL singlesLoop - -finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) - RET - -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - -// func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 - // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - - // Load slice. - MOVQ arg1_base+8(FP), CX - MOVQ arg1_len+16(FP), DX - LEAQ (CX)(DX*1), BX - SUBQ $32, BX - - // Load vN from d. - MOVQ arg+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 - - // We don't need to check the loop condition here; this function is - // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is CX minus the old base pointer. - SUBQ arg1_base+8(FP), CX - MOVQ CX, ret+32(FP) - - RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go deleted file mode 100644 index 4a5a82160..000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go +++ /dev/null @@ -1,76 +0,0 @@ -// +build !amd64 appengine !gc purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -func Sum64(b []byte) uint64 { - // A simpler version would be - // d := New() - // d.Write(b) - // return d.Sum64() - // but this is faster, particularly for small inputs. - - n := len(b) - var h uint64 - - if n >= 32 { - v1 := prime1v + prime2 - v2 := prime2 - v3 := uint64(0) - v4 := -prime1v - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = prime5 - } - - h += uint64(n) - - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func writeBlocks(d *Digest, b []byte) int { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - n := len(b) - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 - return n - len(b) -} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go deleted file mode 100644 index 6f3b0cb10..000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go +++ /dev/null @@ -1,11 +0,0 @@ -package xxhash - -// Sum64String computes the 64-bit xxHash digest of s. -func Sum64String(s string) uint64 { - return Sum64([]byte(s)) -} - -// WriteString adds more data to d. It always returns len(s), nil. -func (d *Digest) WriteString(s string) (n int, err error) { - return d.Write([]byte(s)) -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go deleted file mode 100644 index 1dd39e63b..000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ /dev/null @@ -1,492 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "io" -) - -type seq struct { - litLen uint32 - matchLen uint32 - offset uint32 - - // Codes are stored here for the encoder - // so they only have to be looked up once. - llCode, mlCode, ofCode uint8 -} - -func (s seq) String() string { - if s.offset <= 3 { - if s.offset == 0 { - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") - } - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") - } - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") -} - -type seqCompMode uint8 - -const ( - compModePredefined seqCompMode = iota - compModeRLE - compModeFSE - compModeRepeat -) - -type sequenceDec struct { - // decoder keeps track of the current state and updates it from the bitstream. - fse *fseDecoder - state fseState - repeat bool -} - -// init the state of the decoder with input from stream. -func (s *sequenceDec) init(br *bitReader) error { - if s.fse == nil { - return errors.New("sequence decoder not defined") - } - s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1<= 0; i-- { - if br.overread() { - printf("reading sequence %d, exceeded available data\n", seqs-i) - return io.ErrUnexpectedEOF - } - var ll, mo, ml int - if br.off > 4+((maxOffsetBits+16+16)>>3) { - // inlined function: - // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) - - // Final will not read from stream. - var llB, mlB, moB uint8 - ll, llB = llState.final() - ml, mlB = mlState.final() - mo, moB = ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - } else { - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - } else { - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - } - } - br.fillFast() - } else { - ll, mo, ml = s.next(br, llState, mlState, ofState) - br.fill() - } - - if debugSequences { - println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) - } - - if ll > len(s.literals) { - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) - } - size := ll + ml + len(s.out) - if size-startSize > maxBlockSize { - return fmt.Errorf("output (%d) bigger than max block size", size) - } - if size > cap(s.out) { - // Not enough size, which can happen under high volume block streaming conditions - // but could be if destination slice is too small for sync operations. - // over-allocating here can create a large amount of GC pressure so we try to keep - // it as contained as possible - used := len(s.out) - startSize - addBytes := 256 + ll + ml + used>>2 - // Clamp to max block size. - if used+addBytes > maxBlockSize { - addBytes = maxBlockSize - used - } - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - if ml > maxMatchLen { - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - } - - // Add literals - s.out = append(s.out, s.literals[:ll]...) - s.literals = s.literals[ll:] - out := s.out - - if mo == 0 && ml > 0 { - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - } - - if mo > len(s.out)+len(hist) || mo > s.windowSize { - if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) - } - - // we may be in dictionary. - dictO := len(s.dict) - (mo - (len(s.out) + len(hist))) - if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) - } - end := dictO + ml - if end > len(s.dict) { - out = append(out, s.dict[dictO:]...) - mo -= len(s.dict) - dictO - ml -= len(s.dict) - dictO - } else { - out = append(out, s.dict[dictO:end]...) - mo = 0 - ml = 0 - } - } - - // Copy from history. - // TODO: Blocks without history could be made to ignore this completely. - if v := mo - len(s.out); v > 0 { - // v is the start position in history from end. - start := len(s.hist) - v - if ml > v { - // Some goes into current block. - // Copy remainder of history - out = append(out, s.hist[start:]...) - mo -= v - ml -= v - } else { - out = append(out, s.hist[start:start+ml]...) - ml = 0 - } - } - // We must be in current buffer now - if ml > 0 { - start := len(s.out) - mo - if ml <= len(s.out)-start { - // No overlap - out = append(out, s.out[start:start+ml]...) - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - out = out[:len(out)+ml] - src := out[start : start+ml] - // Destination is the space we just added. - dst := out[len(out)-ml:] - dst = dst[:len(src)] - for i := range src { - dst[i] = src[i] - } - } - } - s.out = out - if i == 0 { - // This is the last sequence, so we shouldn't update state. - break - } - - // Manually inlined, ~ 5-20% faster - // Update all 3 states at once. Approx 20% faster. - nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() - if nBits == 0 { - llState = llTable[llState.newState()&maxTableMask] - mlState = mlTable[mlState.newState()&maxTableMask] - ofState = ofTable[ofState.newState()&maxTableMask] - } else { - bits := br.getBitsFast(nBits) - lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) - llState = llTable[(llState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits >> (ofState.nbBits() & 31)) - lowBits &= bitMask[mlState.nbBits()&15] - mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] - ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] - } - } - - // Add final literals - s.out = append(s.out, s.literals...) - return nil -} - -// update states, at least 27 bits must be available. -func (s *sequenceDecs) update(br *bitReader) { - // Max 8 bits - s.litLengths.state.next(br) - // Max 9 bits - s.matchLengths.state.next(br) - // Max 8 bits - s.offsets.state.next(br) -} - -var bitMask [16]uint16 - -func init() { - for i := range bitMask[:] { - bitMask[i] = uint16((1 << uint(i)) - 1) - } -} - -// update states, at least 27 bits must be available. -func (s *sequenceDecs) updateAlt(br *bitReader) { - // Update all 3 states at once. Approx 20% faster. - a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - - nBits := a.nbBits() + b.nbBits() + c.nbBits() - if nBits == 0 { - s.litLengths.state.state = s.litLengths.state.dt[a.newState()] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()] - s.offsets.state.state = s.offsets.state.dt[c.newState()] - return - } - bits := br.getBitsFast(nBits) - lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31)) - s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits] - - lowBits = uint16(bits >> (c.nbBits() & 31)) - lowBits &= bitMask[b.nbBits()&15] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits] - - lowBits = uint16(bits) & bitMask[c.nbBits()&15] - s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits] -} - -// nextFast will return new states when there are at least 4 unused bytes left on the stream when done. -func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { - // Final will not read from stream. - ll, llB := llState.final() - ml, mlB := mlState.final() - mo, moB := ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - return - } - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - return - } - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - return -} - -func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { - // Final will not read from stream. - ll, llB := llState.final() - ml, mlB := mlState.final() - mo, moB := ofState.final() - - // extra bits are stored in reverse order. - br.fill() - if s.maxBits <= 32 { - mo += br.getBits(moB) - ml += br.getBits(mlB) - ll += br.getBits(llB) - } else { - mo += br.getBits(moB) - br.fill() - // matchlength+literal length, max 32 bits - ml += br.getBits(mlB) - ll += br.getBits(llB) - - } - mo = s.adjustOffset(mo, ll, moB) - return -} - -func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { - if offsetB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = offset - return offset - } - - if litLen == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - offset++ - } - - if offset == 0 { - return s.prevOffset[0] - } - var temp int - if offset == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[offset] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") - temp = 1 - } - - if offset != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - return temp -} - -// mergeHistory will merge history. -func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) { - for i := uint(0); i < 3; i++ { - var sNew, sHist *sequenceDec - switch i { - default: - // same as "case 0": - sNew = &s.litLengths - sHist = &hist.litLengths - case 1: - sNew = &s.offsets - sHist = &hist.offsets - case 2: - sNew = &s.matchLengths - sHist = &hist.matchLengths - } - if sNew.repeat { - if sHist.fse == nil { - return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i) - } - continue - } - if sNew.fse == nil { - return nil, fmt.Errorf("sequence stream %d, no fse found", i) - } - if sHist.fse != nil && !sHist.fse.preDefined { - fseDecoderPool.Put(sHist.fse) - } - sHist.fse = sNew.fse - } - return hist, nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go deleted file mode 100644 index 36bcc3cc0..000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqenc.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "math/bits" - -type seqCoders struct { - llEnc, ofEnc, mlEnc *fseEncoder - llPrev, ofPrev, mlPrev *fseEncoder -} - -// swap coders with another (block). -func (s *seqCoders) swap(other *seqCoders) { - *s, *other = *other, *s -} - -// setPrev will update the previous encoders to the actually used ones -// and make sure a fresh one is in the main slot. -func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { - compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { - // We used the new one, more current to history and reuse the previous history - if *current == used { - *prev, *current = *current, *prev - c := *current - p := *prev - c.reUsed = false - p.reUsed = true - return - } - if used == *prev { - return - } - // Ensure we cannot reuse by accident - prevEnc := *prev - prevEnc.symbolLen = 0 - return - } - compareSwap(ll, &s.llEnc, &s.llPrev) - compareSwap(ml, &s.mlEnc, &s.mlPrev) - compareSwap(of, &s.ofEnc, &s.ofPrev) -} - -func highBit(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} - -var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, - 8, 9, 10, 11, 12, 13, 14, 15, - 16, 16, 17, 17, 18, 18, 19, 19, - 20, 20, 20, 20, 21, 21, 21, 21, - 22, 22, 22, 22, 22, 22, 22, 22, - 23, 23, 23, 23, 23, 23, 23, 23, - 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 24, 24, 24, 24, 24} - -// Up to 6 bits -const maxLLCode = 35 - -// llBitsTable translates from ll code to number of bits. -var llBitsTable = [maxLLCode + 1]byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 2, 2, 3, 3, - 4, 6, 7, 8, 9, 10, 11, 12, - 13, 14, 15, 16} - -// llCode returns the code that represents the literal length requested. -func llCode(litLength uint32) uint8 { - const llDeltaCode = 19 - if litLength <= 63 { - // Compiler insists on bounds check (Go 1.12) - return llCodeTable[litLength&63] - } - return uint8(highBit(litLength)) + llDeltaCode -} - -var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, - 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, - 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, - 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} - -// Up to 6 bits -const maxMLCode = 52 - -// mlBitsTable translates from ml code to number of bits. -var mlBitsTable = [maxMLCode + 1]byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 2, 2, 3, 3, - 4, 4, 5, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16} - -// note : mlBase = matchLength - MINMATCH; -// because it's the format it's stored in seqStore->sequences -func mlCode(mlBase uint32) uint8 { - const mlDeltaCode = 36 - if mlBase <= 127 { - // Compiler insists on bounds check (Go 1.12) - return mlCodeTable[mlBase&127] - } - return uint8(highBit(mlBase)) + mlDeltaCode -} - -func ofCode(offset uint32) uint8 { - // A valid offset will always be > 0. - return uint8(bits.Len32(offset) - 1) -} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go deleted file mode 100644 index c95fe5111..000000000 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ /dev/null @@ -1,436 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "hash/crc32" - "io" - - "github.com/klauspost/compress/huff0" - "github.com/klauspost/compress/snappy" -) - -const ( - snappyTagLiteral = 0x00 - snappyTagCopy1 = 0x01 - snappyTagCopy2 = 0x02 - snappyTagCopy4 = 0x03 -) - -const ( - snappyChecksumSize = 4 - snappyMagicBody = "sNaPpY" - - // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - snappyMaxBlockSize = 65536 - - // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - snappyMaxEncodedLenOfMaxBlockSize = 76490 -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var ( - // ErrSnappyCorrupt reports that the input is invalid. - ErrSnappyCorrupt = errors.New("snappy: corrupt input") - // ErrSnappyTooLarge reports that the uncompressed length is too large. - ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") - // ErrSnappyUnsupported reports that the input isn't supported. - ErrSnappyUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. -// Conversion is done by converting the stream directly from Snappy without intermediate -// full decoding. -// Therefore the compression ratio is much less than what can be done by a full decompression -// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without -// any errors being generated. -// No CRC value is being generated and not all CRC values of the Snappy stream are checked. -// However, it provides really fast recompression of Snappy streams. -// The converter can be reused to avoid allocations, even after errors. -type SnappyConverter struct { - r io.Reader - err error - buf []byte - block *blockEnc -} - -// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. -// If any error is detected on the Snappy stream it is returned. -// The number of bytes written is returned. -func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { - initPredefined() - r.err = nil - r.r = in - if r.block == nil { - r.block = &blockEnc{} - r.block.init() - } - r.block.initNewEncode() - if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { - r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) - } - r.block.litEnc.Reuse = huff0.ReusePolicyNone - var written int64 - var readHeader bool - { - var header []byte - var n int - header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) - - n, r.err = w.Write(header) - if r.err != nil { - return written, r.err - } - written += int64(n) - } - - for { - if !r.readFull(r.buf[:4], true) { - // Add empty last block - r.block.reset(nil) - r.block.last = true - err := r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - n, err := w.Write(r.block.output) - if err != nil { - return written, err - } - written += int64(n) - - return written, r.err - } - chunkType := r.buf[0] - if !readHeader { - if chunkType != chunkTypeStreamIdentifier { - println("chunkType != chunkTypeStreamIdentifier", chunkType) - r.err = ErrSnappyCorrupt - return written, r.err - } - readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - println("chunkLen > len(r.buf)", chunkType) - r.err = ErrSnappyUnsupported - return written, r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < snappyChecksumSize { - println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return written, r.err - } - //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[snappyChecksumSize:] - - n, hdr, err := snappyDecodedLen(buf) - if err != nil { - r.err = err - return written, r.err - } - buf = buf[hdr:] - if n > snappyMaxBlockSize { - println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.reset(nil) - r.block.pushOffsets() - if err := decodeSnappy(r.block, buf); err != nil { - r.err = err - return written, r.err - } - if r.block.size+r.block.extraLits != n { - printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) - r.err = ErrSnappyCorrupt - return written, r.err - } - err = r.block.encode(nil, false, false) - switch err { - case errIncompressible: - r.block.popOffsets() - r.block.reset(nil) - r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) - if err != nil { - println("snappy.Decode:", err) - return written, err - } - err = r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - case nil: - default: - return written, err - } - - n, r.err = w.Write(r.block.output) - if r.err != nil { - return written, err - } - written += int64(n) - continue - case chunkTypeUncompressedData: - if debug { - println("Uncompressed, chunklen", chunkLen) - } - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < snappyChecksumSize { - println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.reset(nil) - buf := r.buf[:snappyChecksumSize] - if !r.readFull(buf, false) { - return written, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - snappyChecksumSize - if n > snappyMaxBlockSize { - println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.literals = r.block.literals[:n] - if !r.readFull(r.block.literals, false) { - return written, r.err - } - if snappyCRC(r.block.literals) != checksum { - println("literals crc mismatch") - r.err = ErrSnappyCorrupt - return written, r.err - } - err := r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - n, r.err = w.Write(r.block.output) - if r.err != nil { - return written, err - } - written += int64(n) - continue - - case chunkTypeStreamIdentifier: - if debug { - println("stream id", chunkLen, len(snappyMagicBody)) - } - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(snappyMagicBody) { - println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) - r.err = ErrSnappyCorrupt - return written, r.err - } - if !r.readFull(r.buf[:len(snappyMagicBody)], false) { - return written, r.err - } - for i := 0; i < len(snappyMagicBody); i++ { - if r.buf[i] != snappyMagicBody[i] { - println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) - r.err = ErrSnappyCorrupt - return written, r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - println("chunkType <= 0x7f") - r.err = ErrSnappyUnsupported - return written, r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return written, r.err - } - } -} - -// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read. -func decodeSnappy(blk *blockEnc, src []byte) error { - //decodeRef(make([]byte, snappyMaxBlockSize), src) - var s, length int - lits := blk.extraLits - var offset uint32 - for s < len(src) { - switch src[s] & 0x03 { - case snappyTagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - if x > snappyMaxBlockSize { - println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) - return ErrSnappyCorrupt - } - length = int(x) + 1 - if length <= 0 { - println("length <= 0 ", length) - - return errUnsupportedLiteralLength - } - //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { - // return ErrSnappyCorrupt - //} - - blk.literals = append(blk.literals, src[s:s+length]...) - //println(length, "litLen") - lits += length - s += length - continue - - case snappyTagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) - - case snappyTagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = uint32(src[s-2]) | uint32(src[s-1])<<8 - - case snappyTagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - - if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { - println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) - - return ErrSnappyCorrupt - } - - // Check if offset is one of the recent offsets. - // Adjusts the output offset accordingly. - // Gives a tiny bit of compression, typically around 1%. - if false { - offset = blk.matchOffset(offset, uint32(lits)) - } else { - offset += 3 - } - - blk.sequences = append(blk.sequences, seq{ - litLen: uint32(lits), - offset: offset, - matchLen: uint32(length) - zstdMinMatch, - }) - blk.size += length + lits - lits = 0 - } - blk.extraLits = lits - return nil -} - -func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrSnappyCorrupt - } - return false - } - return true -} - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func snappyCRC(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return c>>15 | c<<17 + 0xa282ead8 -} - -// snappyDecodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrSnappyCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrSnappyTooLarge - } - return int(v), n, nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go deleted file mode 100644 index 9056beef2..000000000 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ /dev/null @@ -1,156 +0,0 @@ -// Package zstd provides decompression of zstandard files. -// -// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd -package zstd - -import ( - "bytes" - "errors" - "log" - "math" - "math/bits" -) - -// enable debug printing -const debug = false - -// Enable extra assertions. -const debugAsserts = debug || false - -// print sequence details -const debugSequences = false - -// print detailed matching information -const debugMatches = false - -// force encoder to use predefined tables. -const forcePreDef = false - -// zstdMinMatch is the minimum zstd match length. -const zstdMinMatch = 3 - -// Reset the buffer offset when reaching this. -const bufferReset = math.MaxInt32 - MaxWindowSize - -var ( - // ErrReservedBlockType is returned when a reserved block type is found. - // Typically this indicates wrong or corrupted input. - ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") - - // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. - // Typically this indicates wrong or corrupted input. - ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") - - // ErrBlockTooSmall is returned when a block is too small to be decoded. - // Typically returned on invalid input. - ErrBlockTooSmall = errors.New("block too small") - - // ErrMagicMismatch is returned when a "magic" number isn't what is expected. - // Typically this indicates wrong or corrupted input. - ErrMagicMismatch = errors.New("invalid input: magic number mismatch") - - // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. - // Typically this indicates wrong or corrupted input. - ErrWindowSizeExceeded = errors.New("window size exceeded") - - // ErrWindowSizeTooSmall is returned when no window size is specified. - // Typically this indicates wrong or corrupted input. - ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") - - // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. - ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") - - // ErrUnknownDictionary is returned if the dictionary ID is unknown. - // For the time being dictionaries are not supported. - ErrUnknownDictionary = errors.New("unknown dictionary") - - // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. - // This is only returned if SingleSegment is specified on the frame. - ErrFrameSizeExceeded = errors.New("frame size exceeded") - - // ErrCRCMismatch is returned if CRC mismatches. - ErrCRCMismatch = errors.New("CRC check failed") - - // ErrDecoderClosed will be returned if the Decoder was used after - // Close has been called. - ErrDecoderClosed = errors.New("decoder used after Close") - - // ErrDecoderNilInput is returned when a nil Reader was provided - // and an operation other than Reset/DecodeAll/Close was attempted. - ErrDecoderNilInput = errors.New("nil input provided as reader") -) - -func println(a ...interface{}) { - if debug { - log.Println(a...) - } -} - -func printf(format string, a ...interface{}) { - if debug { - log.Printf(format, a...) - } -} - -// matchLenFast does matching, but will not match the last up to 7 bytes. -func matchLenFast(a, b []byte) int { - endI := len(a) & (math.MaxInt32 - 7) - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - return i + bits.TrailingZeros64(diff)>>3 - } - } - return endI -} - -// matchLen returns the maximum length. -// a must be the shortest of the two. -// The function also returns whether all bytes matched. -func matchLen(a, b []byte) int { - b = b[:len(a)] - for i := 0; i < len(a)-7; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - return i + (bits.TrailingZeros64(diff) >> 3) - } - } - - checked := (len(a) >> 3) << 3 - a = a[checked:] - b = b[checked:] - for i := range a { - if a[i] != b[i] { - return i + checked - } - } - return len(a) + checked -} - -func load3232(b []byte, i int32) uint32 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:4] - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load6432(b []byte, i int32) uint64 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:8] - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -func load64(b []byte, i int) uint64 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:8] - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -type byter interface { - Bytes() []byte - Len() int -} - -var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/magiconair/properties/.gitignore b/vendor/github.com/magiconair/properties/.gitignore deleted file mode 100644 index e7081ff52..000000000 --- a/vendor/github.com/magiconair/properties/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -*.sublime-project -*.sublime-workspace -*.un~ -*.swp -.idea/ -*.iml diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md deleted file mode 100644 index 842e8e24f..000000000 --- a/vendor/github.com/magiconair/properties/CHANGELOG.md +++ /dev/null @@ -1,205 +0,0 @@ -## Changelog - -### [1.8.7](https://github.com/magiconair/properties/tree/v1.8.7) - 08 Dec 2022 - - * [PR #65](https://github.com/magiconair/properties/pull/65): Speedup Merge - - Thanks to [@AdityaVallabh](https://github.com/AdityaVallabh) for the patch. - - * [PR #66](https://github.com/magiconair/properties/pull/66): use github actions - -### [1.8.6](https://github.com/magiconair/properties/tree/v1.8.6) - 23 Feb 2022 - - * [PR #57](https://github.com/magiconair/properties/pull/57):Fix "unreachable code" lint error - - Thanks to [@ellie](https://github.com/ellie) for the patch. - - * [PR #63](https://github.com/magiconair/properties/pull/63): Make TestMustGetParsedDuration backwards compatible - - This patch ensures that the `TestMustGetParsedDuration` still works with `go1.3` to make the - author happy until it affects real users. - - Thanks to [@maage](https://github.com/maage) for the patch. - -### [1.8.5](https://github.com/magiconair/properties/tree/v1.8.5) - 24 Mar 2021 - - * [PR #55](https://github.com/magiconair/properties/pull/55): Fix: Encoding Bug in Comments - - When reading comments \ are loaded correctly, but when writing they are then - replaced by \\. This leads to wrong comments when writing and reading multiple times. - - Thanks to [@doxsch](https://github.com/doxsch) for the patch. - -### [1.8.4](https://github.com/magiconair/properties/tree/v1.8.4) - 23 Sep 2020 - - * [PR #50](https://github.com/magiconair/properties/pull/50): enhance error message for circular references - - Thanks to [@sriv](https://github.com/sriv) for the patch. - -### [1.8.3](https://github.com/magiconair/properties/tree/v1.8.3) - 14 Sep 2020 - - * [PR #49](https://github.com/magiconair/properties/pull/49): Include the key in error message causing the circular reference - - The change is include the key in the error message which is causing the circular - reference when parsing/loading the properties files. - - Thanks to [@haroon-sheikh](https://github.com/haroon-sheikh) for the patch. - -### [1.8.2](https://github.com/magiconair/properties/tree/v1.8.2) - 25 Aug 2020 - - * [PR #36](https://github.com/magiconair/properties/pull/36): Escape backslash on write - - This patch ensures that backslashes are escaped on write. Existing applications which - rely on the old behavior may need to be updated. - - Thanks to [@apesternikov](https://github.com/apesternikov) for the patch. - - * [PR #42](https://github.com/magiconair/properties/pull/42): Made Content-Type check whitespace agnostic in LoadURL() - - Thanks to [@aliras1](https://github.com/aliras1) for the patch. - - * [PR #41](https://github.com/magiconair/properties/pull/41): Make key/value separator configurable on Write() - - Thanks to [@mkjor](https://github.com/mkjor) for the patch. - - * [PR #40](https://github.com/magiconair/properties/pull/40): Add method to return a sorted list of keys - - Thanks to [@mkjor](https://github.com/mkjor) for the patch. - -### [1.8.1](https://github.com/magiconair/properties/tree/v1.8.1) - 10 May 2019 - - * [PR #35](https://github.com/magiconair/properties/pull/35): Close body always after request - - This patch ensures that in `LoadURL` the response body is always closed. - - Thanks to [@liubog2008](https://github.com/liubog2008) for the patch. - -### [1.8](https://github.com/magiconair/properties/tree/v1.8) - 15 May 2018 - - * [PR #26](https://github.com/magiconair/properties/pull/26): Disable expansion during loading - - This adds the option to disable property expansion during loading. - - Thanks to [@kmala](https://github.com/kmala) for the patch. - -### [1.7.6](https://github.com/magiconair/properties/tree/v1.7.6) - 14 Feb 2018 - - * [PR #29](https://github.com/magiconair/properties/pull/29): Reworked expansion logic to handle more complex cases. - - See PR for an example. - - Thanks to [@yobert](https://github.com/yobert) for the fix. - -### [1.7.5](https://github.com/magiconair/properties/tree/v1.7.5) - 13 Feb 2018 - - * [PR #28](https://github.com/magiconair/properties/pull/28): Support duplicate expansions in the same value - - Values which expand the same key multiple times (e.g. `key=${a} ${a}`) will no longer fail - with a `circular reference error`. - - Thanks to [@yobert](https://github.com/yobert) for the fix. - -### [1.7.4](https://github.com/magiconair/properties/tree/v1.7.4) - 31 Oct 2017 - - * [Issue #23](https://github.com/magiconair/properties/issues/23): Ignore blank lines with whitespaces - - * [PR #24](https://github.com/magiconair/properties/pull/24): Update keys when DisableExpansion is enabled - - Thanks to [@mgurov](https://github.com/mgurov) for the fix. - -### [1.7.3](https://github.com/magiconair/properties/tree/v1.7.3) - 10 Jul 2017 - - * [Issue #17](https://github.com/magiconair/properties/issues/17): Add [SetValue()](http://godoc.org/github.com/magiconair/properties#Properties.SetValue) method to set values generically - * [Issue #22](https://github.com/magiconair/properties/issues/22): Add [LoadMap()](http://godoc.org/github.com/magiconair/properties#LoadMap) function to load properties from a string map - -### [1.7.2](https://github.com/magiconair/properties/tree/v1.7.2) - 20 Mar 2017 - - * [Issue #15](https://github.com/magiconair/properties/issues/15): Drop gocheck dependency - * [PR #21](https://github.com/magiconair/properties/pull/21): Add [Map()](http://godoc.org/github.com/magiconair/properties#Properties.Map) and [FilterFunc()](http://godoc.org/github.com/magiconair/properties#Properties.FilterFunc) - -### [1.7.1](https://github.com/magiconair/properties/tree/v1.7.1) - 13 Jan 2017 - - * [Issue #14](https://github.com/magiconair/properties/issues/14): Decouple TestLoadExpandedFile from `$USER` - * [PR #12](https://github.com/magiconair/properties/pull/12): Load from files and URLs - * [PR #16](https://github.com/magiconair/properties/pull/16): Keep gofmt happy - * [PR #18](https://github.com/magiconair/properties/pull/18): Fix Delete() function - -### [1.7.0](https://github.com/magiconair/properties/tree/v1.7.0) - 20 Mar 2016 - - * [Issue #10](https://github.com/magiconair/properties/issues/10): Add [LoadURL,LoadURLs,MustLoadURL,MustLoadURLs](http://godoc.org/github.com/magiconair/properties#LoadURL) method to load properties from a URL. - * [Issue #11](https://github.com/magiconair/properties/issues/11): Add [LoadString,MustLoadString](http://godoc.org/github.com/magiconair/properties#LoadString) method to load properties from an UTF8 string. - * [PR #8](https://github.com/magiconair/properties/pull/8): Add [MustFlag](http://godoc.org/github.com/magiconair/properties#Properties.MustFlag) method to provide overrides via command line flags. (@pascaldekloe) - -### [1.6.0](https://github.com/magiconair/properties/tree/v1.6.0) - 11 Dec 2015 - - * Add [Decode](http://godoc.org/github.com/magiconair/properties#Properties.Decode) method to populate struct from properties via tags. - -### [1.5.6](https://github.com/magiconair/properties/tree/v1.5.6) - 18 Oct 2015 - - * Vendored in gopkg.in/check.v1 - -### [1.5.5](https://github.com/magiconair/properties/tree/v1.5.5) - 31 Jul 2015 - - * [PR #6](https://github.com/magiconair/properties/pull/6): Add [Delete](http://godoc.org/github.com/magiconair/properties#Properties.Delete) method to remove keys including comments. (@gerbenjacobs) - -### [1.5.4](https://github.com/magiconair/properties/tree/v1.5.4) - 23 Jun 2015 - - * [Issue #5](https://github.com/magiconair/properties/issues/5): Allow disabling of property expansion [DisableExpansion](http://godoc.org/github.com/magiconair/properties#Properties.DisableExpansion). When property expansion is disabled Properties become a simple key/value store and don't check for circular references. - -### [1.5.3](https://github.com/magiconair/properties/tree/v1.5.3) - 02 Jun 2015 - - * [Issue #4](https://github.com/magiconair/properties/issues/4): Maintain key order in [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) and [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) - -### [1.5.2](https://github.com/magiconair/properties/tree/v1.5.2) - 10 Apr 2015 - - * [Issue #3](https://github.com/magiconair/properties/issues/3): Don't print comments in [WriteComment()](http://godoc.org/github.com/magiconair/properties#Properties.WriteComment) if they are all empty - * Add clickable links to README - -### [1.5.1](https://github.com/magiconair/properties/tree/v1.5.1) - 08 Dec 2014 - - * Added [GetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.GetParsedDuration) and [MustGetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.MustGetParsedDuration) for values specified compatible with - [time.ParseDuration()](http://golang.org/pkg/time/#ParseDuration). - -### [1.5.0](https://github.com/magiconair/properties/tree/v1.5.0) - 18 Nov 2014 - - * Added support for single and multi-line comments (reading, writing and updating) - * The order of keys is now preserved - * Calling [Set()](http://godoc.org/github.com/magiconair/properties#Properties.Set) with an empty key now silently ignores the call and does not create a new entry - * Added a [MustSet()](http://godoc.org/github.com/magiconair/properties#Properties.MustSet) method - * Migrated test library from launchpad.net/gocheck to [gopkg.in/check.v1](http://gopkg.in/check.v1) - -### [1.4.2](https://github.com/magiconair/properties/tree/v1.4.2) - 15 Nov 2014 - - * [Issue #2](https://github.com/magiconair/properties/issues/2): Fixed goroutine leak in parser which created two lexers but cleaned up only one - -### [1.4.1](https://github.com/magiconair/properties/tree/v1.4.1) - 13 Nov 2014 - - * [Issue #1](https://github.com/magiconair/properties/issues/1): Fixed bug in Keys() method which returned an empty string - -### [1.4.0](https://github.com/magiconair/properties/tree/v1.4.0) - 23 Sep 2014 - - * Added [Keys()](http://godoc.org/github.com/magiconair/properties#Properties.Keys) to get the keys - * Added [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) and [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) to get a subset of the properties - -### [1.3.0](https://github.com/magiconair/properties/tree/v1.3.0) - 18 Mar 2014 - -* Added support for time.Duration -* Made MustXXX() failure beha[ior configurable (log.Fatal, panic](https://github.com/magiconair/properties/tree/vior configurable (log.Fatal, panic) - custom) -* Changed default of MustXXX() failure from panic to log.Fatal - -### [1.2.0](https://github.com/magiconair/properties/tree/v1.2.0) - 05 Mar 2014 - -* Added MustGet... functions -* Added support for int and uint with range checks on 32 bit platforms - -### [1.1.0](https://github.com/magiconair/properties/tree/v1.1.0) - 20 Jan 2014 - -* Renamed from goproperties to properties -* Added support for expansion of environment vars in - filenames and value expressions -* Fixed bug where value expressions were not at the - start of the string - -### [1.0.0](https://github.com/magiconair/properties/tree/v1.0.0) - 7 Jan 2014 - -* Initial release diff --git a/vendor/github.com/magiconair/properties/LICENSE.md b/vendor/github.com/magiconair/properties/LICENSE.md deleted file mode 100644 index 79c87e3e6..000000000 --- a/vendor/github.com/magiconair/properties/LICENSE.md +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2013-2020, Frank Schroeder - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/magiconair/properties/README.md b/vendor/github.com/magiconair/properties/README.md deleted file mode 100644 index e2edda025..000000000 --- a/vendor/github.com/magiconair/properties/README.md +++ /dev/null @@ -1,128 +0,0 @@ -[![](https://img.shields.io/github/tag/magiconair/properties.svg?style=flat-square&label=release)](https://github.com/magiconair/properties/releases) -[![Travis CI Status](https://img.shields.io/travis/magiconair/properties.svg?branch=master&style=flat-square&label=travis)](https://travis-ci.org/magiconair/properties) -[![License](https://img.shields.io/badge/License-BSD%202--Clause-orange.svg?style=flat-square)](https://raw.githubusercontent.com/magiconair/properties/master/LICENSE) -[![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) - -# Overview - -#### Please run `git pull --tags` to update the tags. See [below](#updated-git-tags) why. - -properties is a Go library for reading and writing properties files. - -It supports reading from multiple files or URLs and Spring style recursive -property expansion of expressions like `${key}` to their corresponding value. -Value expressions can refer to other keys like in `${key}` or to environment -variables like in `${USER}`. Filenames can also contain environment variables -like in `/home/${USER}/myapp.properties`. - -Properties can be decoded into structs, maps, arrays and values through -struct tags. - -Comments and the order of keys are preserved. Comments can be modified -and can be written to the output. - -The properties library supports both ISO-8859-1 and UTF-8 encoded data. - -Starting from version 1.3.0 the behavior of the MustXXX() functions is -configurable by providing a custom `ErrorHandler` function. The default has -changed from `panic` to `log.Fatal` but this is configurable and custom -error handling functions can be provided. See the package documentation for -details. - -Read the full documentation on [![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) - -## Getting Started - -```go -import ( - "flag" - "github.com/magiconair/properties" -) - -func main() { - // init from a file - p := properties.MustLoadFile("${HOME}/config.properties", properties.UTF8) - - // or multiple files - p = properties.MustLoadFiles([]string{ - "${HOME}/config.properties", - "${HOME}/config-${USER}.properties", - }, properties.UTF8, true) - - // or from a map - p = properties.LoadMap(map[string]string{"key": "value", "abc": "def"}) - - // or from a string - p = properties.MustLoadString("key=value\nabc=def") - - // or from a URL - p = properties.MustLoadURL("http://host/path") - - // or from multiple URLs - p = properties.MustLoadURL([]string{ - "http://host/config", - "http://host/config-${USER}", - }, true) - - // or from flags - p.MustFlag(flag.CommandLine) - - // get values through getters - host := p.MustGetString("host") - port := p.GetInt("port", 8080) - - // or through Decode - type Config struct { - Host string `properties:"host"` - Port int `properties:"port,default=9000"` - Accept []string `properties:"accept,default=image/png;image;gif"` - Timeout time.Duration `properties:"timeout,default=5s"` - } - var cfg Config - if err := p.Decode(&cfg); err != nil { - log.Fatal(err) - } -} - -``` - -## Installation and Upgrade - -``` -$ go get -u github.com/magiconair/properties -``` - -## License - -2 clause BSD license. See [LICENSE](https://github.com/magiconair/properties/blob/master/LICENSE) file for details. - -## ToDo - -* Dump contents with passwords and secrets obscured - -## Updated Git tags - -#### 13 Feb 2018 - -I realized that all of the git tags I had pushed before v1.7.5 were lightweight tags -and I've only recently learned that this doesn't play well with `git describe` 😞 - -I have replaced all lightweight tags with signed tags using this script which should -retain the commit date, name and email address. Please run `git pull --tags` to update them. - -Worst case you have to reclone the repo. - -```shell -#!/bin/bash -tag=$1 -echo "Updating $tag" -date=$(git show ${tag}^0 --format=%aD | head -1) -email=$(git show ${tag}^0 --format=%aE | head -1) -name=$(git show ${tag}^0 --format=%aN | head -1) -GIT_COMMITTER_DATE="$date" GIT_COMMITTER_NAME="$name" GIT_COMMITTER_EMAIL="$email" git tag -s -f ${tag} ${tag}^0 -m ${tag} -``` - -I apologize for the inconvenience. - -Frank - diff --git a/vendor/github.com/magiconair/properties/decode.go b/vendor/github.com/magiconair/properties/decode.go deleted file mode 100644 index 8e6aa441d..000000000 --- a/vendor/github.com/magiconair/properties/decode.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "time" -) - -// Decode assigns property values to exported fields of a struct. -// -// Decode traverses v recursively and returns an error if a value cannot be -// converted to the field type or a required value is missing for a field. -// -// The following type dependent decodings are used: -// -// String, boolean, numeric fields have the value of the property key assigned. -// The property key name is the name of the field. A different key and a default -// value can be set in the field's tag. Fields without default value are -// required. If the value cannot be converted to the field type an error is -// returned. -// -// time.Duration fields have the result of time.ParseDuration() assigned. -// -// time.Time fields have the vaule of time.Parse() assigned. The default layout -// is time.RFC3339 but can be set in the field's tag. -// -// Arrays and slices of string, boolean, numeric, time.Duration and time.Time -// fields have the value interpreted as a comma separated list of values. The -// individual values are trimmed of whitespace and empty values are ignored. A -// default value can be provided as a semicolon separated list in the field's -// tag. -// -// Struct fields are decoded recursively using the field name plus "." as -// prefix. The prefix (without dot) can be overridden in the field's tag. -// Default values are not supported in the field's tag. Specify them on the -// fields of the inner struct instead. -// -// Map fields must have a key of type string and are decoded recursively by -// using the field's name plus ".' as prefix and the next element of the key -// name as map key. The prefix (without dot) can be overridden in the field's -// tag. Default values are not supported. -// -// Examples: -// -// // Field is ignored. -// Field int `properties:"-"` -// -// // Field is assigned value of 'Field'. -// Field int -// -// // Field is assigned value of 'myName'. -// Field int `properties:"myName"` -// -// // Field is assigned value of key 'myName' and has a default -// // value 15 if the key does not exist. -// Field int `properties:"myName,default=15"` -// -// // Field is assigned value of key 'Field' and has a default -// // value 15 if the key does not exist. -// Field int `properties:",default=15"` -// -// // Field is assigned value of key 'date' and the date -// // is in format 2006-01-02 -// Field time.Time `properties:"date,layout=2006-01-02"` -// -// // Field is assigned the non-empty and whitespace trimmed -// // values of key 'Field' split by commas. -// Field []string -// -// // Field is assigned the non-empty and whitespace trimmed -// // values of key 'Field' split by commas and has a default -// // value ["a", "b", "c"] if the key does not exist. -// Field []string `properties:",default=a;b;c"` -// -// // Field is decoded recursively with "Field." as key prefix. -// Field SomeStruct -// -// // Field is decoded recursively with "myName." as key prefix. -// Field SomeStruct `properties:"myName"` -// -// // Field is decoded recursively with "Field." as key prefix -// // and the next dotted element of the key as map key. -// Field map[string]string -// -// // Field is decoded recursively with "myName." as key prefix -// // and the next dotted element of the key as map key. -// Field map[string]string `properties:"myName"` -func (p *Properties) Decode(x interface{}) error { - t, v := reflect.TypeOf(x), reflect.ValueOf(x) - if t.Kind() != reflect.Ptr || v.Elem().Type().Kind() != reflect.Struct { - return fmt.Errorf("not a pointer to struct: %s", t) - } - if err := dec(p, "", nil, nil, v); err != nil { - return err - } - return nil -} - -func dec(p *Properties, key string, def *string, opts map[string]string, v reflect.Value) error { - t := v.Type() - - // value returns the property value for key or the default if provided. - value := func() (string, error) { - if val, ok := p.Get(key); ok { - return val, nil - } - if def != nil { - return *def, nil - } - return "", fmt.Errorf("missing required key %s", key) - } - - // conv converts a string to a value of the given type. - conv := func(s string, t reflect.Type) (val reflect.Value, err error) { - var v interface{} - - switch { - case isDuration(t): - v, err = time.ParseDuration(s) - - case isTime(t): - layout := opts["layout"] - if layout == "" { - layout = time.RFC3339 - } - v, err = time.Parse(layout, s) - - case isBool(t): - v, err = boolVal(s), nil - - case isString(t): - v, err = s, nil - - case isFloat(t): - v, err = strconv.ParseFloat(s, 64) - - case isInt(t): - v, err = strconv.ParseInt(s, 10, 64) - - case isUint(t): - v, err = strconv.ParseUint(s, 10, 64) - - default: - return reflect.Zero(t), fmt.Errorf("unsupported type %s", t) - } - if err != nil { - return reflect.Zero(t), err - } - return reflect.ValueOf(v).Convert(t), nil - } - - // keydef returns the property key and the default value based on the - // name of the struct field and the options in the tag. - keydef := func(f reflect.StructField) (string, *string, map[string]string) { - _key, _opts := parseTag(f.Tag.Get("properties")) - - var _def *string - if d, ok := _opts["default"]; ok { - _def = &d - } - if _key != "" { - return _key, _def, _opts - } - return f.Name, _def, _opts - } - - switch { - case isDuration(t) || isTime(t) || isBool(t) || isString(t) || isFloat(t) || isInt(t) || isUint(t): - s, err := value() - if err != nil { - return err - } - val, err := conv(s, t) - if err != nil { - return err - } - v.Set(val) - - case isPtr(t): - return dec(p, key, def, opts, v.Elem()) - - case isStruct(t): - for i := 0; i < v.NumField(); i++ { - fv := v.Field(i) - fk, def, opts := keydef(t.Field(i)) - if !fv.CanSet() { - return fmt.Errorf("cannot set %s", t.Field(i).Name) - } - if fk == "-" { - continue - } - if key != "" { - fk = key + "." + fk - } - if err := dec(p, fk, def, opts, fv); err != nil { - return err - } - } - return nil - - case isArray(t): - val, err := value() - if err != nil { - return err - } - vals := split(val, ";") - a := reflect.MakeSlice(t, 0, len(vals)) - for _, s := range vals { - val, err := conv(s, t.Elem()) - if err != nil { - return err - } - a = reflect.Append(a, val) - } - v.Set(a) - - case isMap(t): - valT := t.Elem() - m := reflect.MakeMap(t) - for postfix := range p.FilterStripPrefix(key + ".").m { - pp := strings.SplitN(postfix, ".", 2) - mk, mv := pp[0], reflect.New(valT) - if err := dec(p, key+"."+mk, nil, nil, mv); err != nil { - return err - } - m.SetMapIndex(reflect.ValueOf(mk), mv.Elem()) - } - v.Set(m) - - default: - return fmt.Errorf("unsupported type %s", t) - } - return nil -} - -// split splits a string on sep, trims whitespace of elements -// and omits empty elements -func split(s string, sep string) []string { - var a []string - for _, v := range strings.Split(s, sep) { - if v = strings.TrimSpace(v); v != "" { - a = append(a, v) - } - } - return a -} - -// parseTag parses a "key,k=v,k=v,..." -func parseTag(tag string) (key string, opts map[string]string) { - opts = map[string]string{} - for i, s := range strings.Split(tag, ",") { - if i == 0 { - key = s - continue - } - - pp := strings.SplitN(s, "=", 2) - if len(pp) == 1 { - opts[pp[0]] = "" - } else { - opts[pp[0]] = pp[1] - } - } - return key, opts -} - -func isArray(t reflect.Type) bool { return t.Kind() == reflect.Array || t.Kind() == reflect.Slice } -func isBool(t reflect.Type) bool { return t.Kind() == reflect.Bool } -func isDuration(t reflect.Type) bool { return t == reflect.TypeOf(time.Second) } -func isMap(t reflect.Type) bool { return t.Kind() == reflect.Map } -func isPtr(t reflect.Type) bool { return t.Kind() == reflect.Ptr } -func isString(t reflect.Type) bool { return t.Kind() == reflect.String } -func isStruct(t reflect.Type) bool { return t.Kind() == reflect.Struct } -func isTime(t reflect.Type) bool { return t == reflect.TypeOf(time.Time{}) } -func isFloat(t reflect.Type) bool { - return t.Kind() == reflect.Float32 || t.Kind() == reflect.Float64 -} -func isInt(t reflect.Type) bool { - return t.Kind() == reflect.Int || t.Kind() == reflect.Int8 || t.Kind() == reflect.Int16 || t.Kind() == reflect.Int32 || t.Kind() == reflect.Int64 -} -func isUint(t reflect.Type) bool { - return t.Kind() == reflect.Uint || t.Kind() == reflect.Uint8 || t.Kind() == reflect.Uint16 || t.Kind() == reflect.Uint32 || t.Kind() == reflect.Uint64 -} diff --git a/vendor/github.com/magiconair/properties/doc.go b/vendor/github.com/magiconair/properties/doc.go deleted file mode 100644 index 7c7979315..000000000 --- a/vendor/github.com/magiconair/properties/doc.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package properties provides functions for reading and writing -// ISO-8859-1 and UTF-8 encoded .properties files and has -// support for recursive property expansion. -// -// Java properties files are ISO-8859-1 encoded and use Unicode -// literals for characters outside the ISO character set. Unicode -// literals can be used in UTF-8 encoded properties files but -// aren't necessary. -// -// To load a single properties file use MustLoadFile(): -// -// p := properties.MustLoadFile(filename, properties.UTF8) -// -// To load multiple properties files use MustLoadFiles() -// which loads the files in the given order and merges the -// result. Missing properties files can be ignored if the -// 'ignoreMissing' flag is set to true. -// -// Filenames can contain environment variables which are expanded -// before loading. -// -// f1 := "/etc/myapp/myapp.conf" -// f2 := "/home/${USER}/myapp.conf" -// p := MustLoadFiles([]string{f1, f2}, properties.UTF8, true) -// -// All of the different key/value delimiters ' ', ':' and '=' are -// supported as well as the comment characters '!' and '#' and -// multi-line values. -// -// ! this is a comment -// # and so is this -// -// # the following expressions are equal -// key value -// key=value -// key:value -// key = value -// key : value -// key = val\ -// ue -// -// Properties stores all comments preceding a key and provides -// GetComments() and SetComments() methods to retrieve and -// update them. The convenience functions GetComment() and -// SetComment() allow access to the last comment. The -// WriteComment() method writes properties files including -// the comments and with the keys in the original order. -// This can be used for sanitizing properties files. -// -// Property expansion is recursive and circular references -// and malformed expressions are not allowed and cause an -// error. Expansion of environment variables is supported. -// -// # standard property -// key = value -// -// # property expansion: key2 = value -// key2 = ${key} -// -// # recursive expansion: key3 = value -// key3 = ${key2} -// -// # circular reference (error) -// key = ${key} -// -// # malformed expression (error) -// key = ${ke -// -// # refers to the users' home dir -// home = ${HOME} -// -// # local key takes precedence over env var: u = foo -// USER = foo -// u = ${USER} -// -// The default property expansion format is ${key} but can be -// changed by setting different pre- and postfix values on the -// Properties object. -// -// p := properties.NewProperties() -// p.Prefix = "#[" -// p.Postfix = "]#" -// -// Properties provides convenience functions for getting typed -// values with default values if the key does not exist or the -// type conversion failed. -// -// # Returns true if the value is either "1", "on", "yes" or "true" -// # Returns false for every other value and the default value if -// # the key does not exist. -// v = p.GetBool("key", false) -// -// # Returns the value if the key exists and the format conversion -// # was successful. Otherwise, the default value is returned. -// v = p.GetInt64("key", 999) -// v = p.GetUint64("key", 999) -// v = p.GetFloat64("key", 123.0) -// v = p.GetString("key", "def") -// v = p.GetDuration("key", 999) -// -// As an alternative properties may be applied with the standard -// library's flag implementation at any time. -// -// # Standard configuration -// v = flag.Int("key", 999, "help message") -// flag.Parse() -// -// # Merge p into the flag set -// p.MustFlag(flag.CommandLine) -// -// Properties provides several MustXXX() convenience functions -// which will terminate the app if an error occurs. The behavior -// of the failure is configurable and the default is to call -// log.Fatal(err). To have the MustXXX() functions panic instead -// of logging the error set a different ErrorHandler before -// you use the Properties package. -// -// properties.ErrorHandler = properties.PanicHandler -// -// # Will panic instead of logging an error -// p := properties.MustLoadFile("config.properties") -// -// You can also provide your own ErrorHandler function. The only requirement -// is that the error handler function must exit after handling the error. -// -// properties.ErrorHandler = func(err error) { -// fmt.Println(err) -// os.Exit(1) -// } -// -// # Will write to stdout and then exit -// p := properties.MustLoadFile("config.properties") -// -// Properties can also be loaded into a struct via the `Decode` -// method, e.g. -// -// type S struct { -// A string `properties:"a,default=foo"` -// D time.Duration `properties:"timeout,default=5s"` -// E time.Time `properties:"expires,layout=2006-01-02,default=2015-01-01"` -// } -// -// See `Decode()` method for the full documentation. -// -// The following documents provide a description of the properties -// file format. -// -// http://en.wikipedia.org/wiki/.properties -// -// http://docs.oracle.com/javase/7/docs/api/java/util/Properties.html#load%28java.io.Reader%29 -package properties diff --git a/vendor/github.com/magiconair/properties/integrate.go b/vendor/github.com/magiconair/properties/integrate.go deleted file mode 100644 index 35d0ae97b..000000000 --- a/vendor/github.com/magiconair/properties/integrate.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import "flag" - -// MustFlag sets flags that are skipped by dst.Parse when p contains -// the respective key for flag.Flag.Name. -// -// It's use is recommended with command line arguments as in: -// -// flag.Parse() -// p.MustFlag(flag.CommandLine) -func (p *Properties) MustFlag(dst *flag.FlagSet) { - m := make(map[string]*flag.Flag) - dst.VisitAll(func(f *flag.Flag) { - m[f.Name] = f - }) - dst.Visit(func(f *flag.Flag) { - delete(m, f.Name) // overridden - }) - - for name, f := range m { - v, ok := p.Get(name) - if !ok { - continue - } - - if err := f.Value.Set(v); err != nil { - ErrorHandler(err) - } - } -} diff --git a/vendor/github.com/magiconair/properties/lex.go b/vendor/github.com/magiconair/properties/lex.go deleted file mode 100644 index 3d15a1f6e..000000000 --- a/vendor/github.com/magiconair/properties/lex.go +++ /dev/null @@ -1,395 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// Parts of the lexer are from the template/text/parser package -// For these parts the following applies: -// -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file of the go 1.2 -// distribution. - -package properties - -import ( - "fmt" - "strconv" - "strings" - "unicode/utf8" -) - -// item represents a token or text string returned from the scanner. -type item struct { - typ itemType // The type of this item. - pos int // The starting position, in bytes, of this item in the input string. - val string // The value of this item. -} - -func (i item) String() string { - switch { - case i.typ == itemEOF: - return "EOF" - case i.typ == itemError: - return i.val - case len(i.val) > 10: - return fmt.Sprintf("%.10q...", i.val) - } - return fmt.Sprintf("%q", i.val) -} - -// itemType identifies the type of lex items. -type itemType int - -const ( - itemError itemType = iota // error occurred; value is text of error - itemEOF - itemKey // a key - itemValue // a value - itemComment // a comment -) - -// defines a constant for EOF -const eof = -1 - -// permitted whitespace characters space, FF and TAB -const whitespace = " \f\t" - -// stateFn represents the state of the scanner as a function that returns the next state. -type stateFn func(*lexer) stateFn - -// lexer holds the state of the scanner. -type lexer struct { - input string // the string being scanned - state stateFn // the next lexing function to enter - pos int // current position in the input - start int // start position of this item - width int // width of last rune read from input - lastPos int // position of most recent item returned by nextItem - runes []rune // scanned runes for this item - items chan item // channel of scanned items -} - -// next returns the next rune in the input. -func (l *lexer) next() rune { - if l.pos >= len(l.input) { - l.width = 0 - return eof - } - r, w := utf8.DecodeRuneInString(l.input[l.pos:]) - l.width = w - l.pos += l.width - return r -} - -// peek returns but does not consume the next rune in the input. -func (l *lexer) peek() rune { - r := l.next() - l.backup() - return r -} - -// backup steps back one rune. Can only be called once per call of next. -func (l *lexer) backup() { - l.pos -= l.width -} - -// emit passes an item back to the client. -func (l *lexer) emit(t itemType) { - i := item{t, l.start, string(l.runes)} - l.items <- i - l.start = l.pos - l.runes = l.runes[:0] -} - -// ignore skips over the pending input before this point. -func (l *lexer) ignore() { - l.start = l.pos -} - -// appends the rune to the current value -func (l *lexer) appendRune(r rune) { - l.runes = append(l.runes, r) -} - -// accept consumes the next rune if it's from the valid set. -func (l *lexer) accept(valid string) bool { - if strings.ContainsRune(valid, l.next()) { - return true - } - l.backup() - return false -} - -// acceptRun consumes a run of runes from the valid set. -func (l *lexer) acceptRun(valid string) { - for strings.ContainsRune(valid, l.next()) { - } - l.backup() -} - -// lineNumber reports which line we're on, based on the position of -// the previous item returned by nextItem. Doing it this way -// means we don't have to worry about peek double counting. -func (l *lexer) lineNumber() int { - return 1 + strings.Count(l.input[:l.lastPos], "\n") -} - -// errorf returns an error token and terminates the scan by passing -// back a nil pointer that will be the next state, terminating l.nextItem. -func (l *lexer) errorf(format string, args ...interface{}) stateFn { - l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} - return nil -} - -// nextItem returns the next item from the input. -func (l *lexer) nextItem() item { - i := <-l.items - l.lastPos = i.pos - return i -} - -// lex creates a new scanner for the input string. -func lex(input string) *lexer { - l := &lexer{ - input: input, - items: make(chan item), - runes: make([]rune, 0, 32), - } - go l.run() - return l -} - -// run runs the state machine for the lexer. -func (l *lexer) run() { - for l.state = lexBeforeKey(l); l.state != nil; { - l.state = l.state(l) - } -} - -// state functions - -// lexBeforeKey scans until a key begins. -func lexBeforeKey(l *lexer) stateFn { - switch r := l.next(); { - case isEOF(r): - l.emit(itemEOF) - return nil - - case isEOL(r): - l.ignore() - return lexBeforeKey - - case isComment(r): - return lexComment - - case isWhitespace(r): - l.ignore() - return lexBeforeKey - - default: - l.backup() - return lexKey - } -} - -// lexComment scans a comment line. The comment character has already been scanned. -func lexComment(l *lexer) stateFn { - l.acceptRun(whitespace) - l.ignore() - for { - switch r := l.next(); { - case isEOF(r): - l.ignore() - l.emit(itemEOF) - return nil - case isEOL(r): - l.emit(itemComment) - return lexBeforeKey - default: - l.appendRune(r) - } - } -} - -// lexKey scans the key up to a delimiter -func lexKey(l *lexer) stateFn { - var r rune - -Loop: - for { - switch r = l.next(); { - - case isEscape(r): - err := l.scanEscapeSequence() - if err != nil { - return l.errorf(err.Error()) - } - - case isEndOfKey(r): - l.backup() - break Loop - - case isEOF(r): - break Loop - - default: - l.appendRune(r) - } - } - - if len(l.runes) > 0 { - l.emit(itemKey) - } - - if isEOF(r) { - l.emit(itemEOF) - return nil - } - - return lexBeforeValue -} - -// lexBeforeValue scans the delimiter between key and value. -// Leading and trailing whitespace is ignored. -// We expect to be just after the key. -func lexBeforeValue(l *lexer) stateFn { - l.acceptRun(whitespace) - l.accept(":=") - l.acceptRun(whitespace) - l.ignore() - return lexValue -} - -// lexValue scans text until the end of the line. We expect to be just after the delimiter. -func lexValue(l *lexer) stateFn { - for { - switch r := l.next(); { - case isEscape(r): - if isEOL(l.peek()) { - l.next() - l.acceptRun(whitespace) - } else { - err := l.scanEscapeSequence() - if err != nil { - return l.errorf(err.Error()) - } - } - - case isEOL(r): - l.emit(itemValue) - l.ignore() - return lexBeforeKey - - case isEOF(r): - l.emit(itemValue) - l.emit(itemEOF) - return nil - - default: - l.appendRune(r) - } - } -} - -// scanEscapeSequence scans either one of the escaped characters -// or a unicode literal. We expect to be after the escape character. -func (l *lexer) scanEscapeSequence() error { - switch r := l.next(); { - - case isEscapedCharacter(r): - l.appendRune(decodeEscapedCharacter(r)) - return nil - - case atUnicodeLiteral(r): - return l.scanUnicodeLiteral() - - case isEOF(r): - return fmt.Errorf("premature EOF") - - // silently drop the escape character and append the rune as is - default: - l.appendRune(r) - return nil - } -} - -// scans a unicode literal in the form \uXXXX. We expect to be after the \u. -func (l *lexer) scanUnicodeLiteral() error { - // scan the digits - d := make([]rune, 4) - for i := 0; i < 4; i++ { - d[i] = l.next() - if d[i] == eof || !strings.ContainsRune("0123456789abcdefABCDEF", d[i]) { - return fmt.Errorf("invalid unicode literal") - } - } - - // decode the digits into a rune - r, err := strconv.ParseInt(string(d), 16, 0) - if err != nil { - return err - } - - l.appendRune(rune(r)) - return nil -} - -// decodeEscapedCharacter returns the unescaped rune. We expect to be after the escape character. -func decodeEscapedCharacter(r rune) rune { - switch r { - case 'f': - return '\f' - case 'n': - return '\n' - case 'r': - return '\r' - case 't': - return '\t' - default: - return r - } -} - -// atUnicodeLiteral reports whether we are at a unicode literal. -// The escape character has already been consumed. -func atUnicodeLiteral(r rune) bool { - return r == 'u' -} - -// isComment reports whether we are at the start of a comment. -func isComment(r rune) bool { - return r == '#' || r == '!' -} - -// isEndOfKey reports whether the rune terminates the current key. -func isEndOfKey(r rune) bool { - return strings.ContainsRune(" \f\t\r\n:=", r) -} - -// isEOF reports whether we are at EOF. -func isEOF(r rune) bool { - return r == eof -} - -// isEOL reports whether we are at a new line character. -func isEOL(r rune) bool { - return r == '\n' || r == '\r' -} - -// isEscape reports whether the rune is the escape character which -// prefixes unicode literals and other escaped characters. -func isEscape(r rune) bool { - return r == '\\' -} - -// isEscapedCharacter reports whether we are at one of the characters that need escaping. -// The escape character has already been consumed. -func isEscapedCharacter(r rune) bool { - return strings.ContainsRune(" :=fnrt", r) -} - -// isWhitespace reports whether the rune is a whitespace character. -func isWhitespace(r rune) bool { - return strings.ContainsRune(whitespace, r) -} diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go deleted file mode 100644 index 635368dc8..000000000 --- a/vendor/github.com/magiconair/properties/load.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "io/ioutil" - "net/http" - "os" - "strings" -) - -// Encoding specifies encoding of the input data. -type Encoding uint - -const ( - // utf8Default is a private placeholder for the zero value of Encoding to - // ensure that it has the correct meaning. UTF8 is the default encoding but - // was assigned a non-zero value which cannot be changed without breaking - // existing code. Clients should continue to use the public constants. - utf8Default Encoding = iota - - // UTF8 interprets the input data as UTF-8. - UTF8 - - // ISO_8859_1 interprets the input data as ISO-8859-1. - ISO_8859_1 -) - -type Loader struct { - // Encoding determines how the data from files and byte buffers - // is interpreted. For URLs the Content-Type header is used - // to determine the encoding of the data. - Encoding Encoding - - // DisableExpansion configures the property expansion of the - // returned property object. When set to true, the property values - // will not be expanded and the Property object will not be checked - // for invalid expansion expressions. - DisableExpansion bool - - // IgnoreMissing configures whether missing files or URLs which return - // 404 are reported as errors. When set to true, missing files and 404 - // status codes are not reported as errors. - IgnoreMissing bool -} - -// Load reads a buffer into a Properties struct. -func (l *Loader) LoadBytes(buf []byte) (*Properties, error) { - return l.loadBytes(buf, l.Encoding) -} - -// LoadAll reads the content of multiple URLs or files in the given order into -// a Properties struct. If IgnoreMissing is true then a 404 status code or -// missing file will not be reported as error. Encoding sets the encoding for -// files. For the URLs see LoadURL for the Content-Type header and the -// encoding. -func (l *Loader) LoadAll(names []string) (*Properties, error) { - all := NewProperties() - for _, name := range names { - n, err := expandName(name) - if err != nil { - return nil, err - } - - var p *Properties - switch { - case strings.HasPrefix(n, "http://"): - p, err = l.LoadURL(n) - case strings.HasPrefix(n, "https://"): - p, err = l.LoadURL(n) - default: - p, err = l.LoadFile(n) - } - if err != nil { - return nil, err - } - all.Merge(p) - } - - all.DisableExpansion = l.DisableExpansion - if all.DisableExpansion { - return all, nil - } - return all, all.check() -} - -// LoadFile reads a file into a Properties struct. -// If IgnoreMissing is true then a missing file will not be -// reported as error. -func (l *Loader) LoadFile(filename string) (*Properties, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - if l.IgnoreMissing && os.IsNotExist(err) { - LogPrintf("properties: %s not found. skipping", filename) - return NewProperties(), nil - } - return nil, err - } - return l.loadBytes(data, l.Encoding) -} - -// LoadURL reads the content of the URL into a Properties struct. -// -// The encoding is determined via the Content-Type header which -// should be set to 'text/plain'. If the 'charset' parameter is -// missing, 'iso-8859-1' or 'latin1' the encoding is set to -// ISO-8859-1. If the 'charset' parameter is set to 'utf-8' the -// encoding is set to UTF-8. A missing content type header is -// interpreted as 'text/plain; charset=utf-8'. -func (l *Loader) LoadURL(url string) (*Properties, error) { - resp, err := http.Get(url) - if err != nil { - return nil, fmt.Errorf("properties: error fetching %q. %s", url, err) - } - defer resp.Body.Close() - - if resp.StatusCode == 404 && l.IgnoreMissing { - LogPrintf("properties: %s returned %d. skipping", url, resp.StatusCode) - return NewProperties(), nil - } - - if resp.StatusCode != 200 { - return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode) - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("properties: %s error reading response. %s", url, err) - } - - ct := resp.Header.Get("Content-Type") - ct = strings.Join(strings.Fields(ct), "") - var enc Encoding - switch strings.ToLower(ct) { - case "text/plain", "text/plain;charset=iso-8859-1", "text/plain;charset=latin1": - enc = ISO_8859_1 - case "", "text/plain;charset=utf-8": - enc = UTF8 - default: - return nil, fmt.Errorf("properties: invalid content type %s", ct) - } - - return l.loadBytes(body, enc) -} - -func (l *Loader) loadBytes(buf []byte, enc Encoding) (*Properties, error) { - p, err := parse(convert(buf, enc)) - if err != nil { - return nil, err - } - p.DisableExpansion = l.DisableExpansion - if p.DisableExpansion { - return p, nil - } - return p, p.check() -} - -// Load reads a buffer into a Properties struct. -func Load(buf []byte, enc Encoding) (*Properties, error) { - l := &Loader{Encoding: enc} - return l.LoadBytes(buf) -} - -// LoadString reads an UTF8 string into a properties struct. -func LoadString(s string) (*Properties, error) { - l := &Loader{Encoding: UTF8} - return l.LoadBytes([]byte(s)) -} - -// LoadMap creates a new Properties struct from a string map. -func LoadMap(m map[string]string) *Properties { - p := NewProperties() - for k, v := range m { - p.Set(k, v) - } - return p -} - -// LoadFile reads a file into a Properties struct. -func LoadFile(filename string, enc Encoding) (*Properties, error) { - l := &Loader{Encoding: enc} - return l.LoadAll([]string{filename}) -} - -// LoadFiles reads multiple files in the given order into -// a Properties struct. If 'ignoreMissing' is true then -// non-existent files will not be reported as error. -func LoadFiles(filenames []string, enc Encoding, ignoreMissing bool) (*Properties, error) { - l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing} - return l.LoadAll(filenames) -} - -// LoadURL reads the content of the URL into a Properties struct. -// See Loader#LoadURL for details. -func LoadURL(url string) (*Properties, error) { - l := &Loader{Encoding: UTF8} - return l.LoadAll([]string{url}) -} - -// LoadURLs reads the content of multiple URLs in the given order into a -// Properties struct. If IgnoreMissing is true then a 404 status code will -// not be reported as error. See Loader#LoadURL for the Content-Type header -// and the encoding. -func LoadURLs(urls []string, ignoreMissing bool) (*Properties, error) { - l := &Loader{Encoding: UTF8, IgnoreMissing: ignoreMissing} - return l.LoadAll(urls) -} - -// LoadAll reads the content of multiple URLs or files in the given order into a -// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will -// not be reported as error. Encoding sets the encoding for files. For the URLs please see -// LoadURL for the Content-Type header and the encoding. -func LoadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) { - l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing} - return l.LoadAll(names) -} - -// MustLoadString reads an UTF8 string into a Properties struct and -// panics on error. -func MustLoadString(s string) *Properties { - return must(LoadString(s)) -} - -// MustLoadFile reads a file into a Properties struct and -// panics on error. -func MustLoadFile(filename string, enc Encoding) *Properties { - return must(LoadFile(filename, enc)) -} - -// MustLoadFiles reads multiple files in the given order into -// a Properties struct and panics on error. If 'ignoreMissing' -// is true then non-existent files will not be reported as error. -func MustLoadFiles(filenames []string, enc Encoding, ignoreMissing bool) *Properties { - return must(LoadFiles(filenames, enc, ignoreMissing)) -} - -// MustLoadURL reads the content of a URL into a Properties struct and -// panics on error. -func MustLoadURL(url string) *Properties { - return must(LoadURL(url)) -} - -// MustLoadURLs reads the content of multiple URLs in the given order into a -// Properties struct and panics on error. If 'ignoreMissing' is true then a 404 -// status code will not be reported as error. -func MustLoadURLs(urls []string, ignoreMissing bool) *Properties { - return must(LoadURLs(urls, ignoreMissing)) -} - -// MustLoadAll reads the content of multiple URLs or files in the given order into a -// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will -// not be reported as error. Encoding sets the encoding for files. For the URLs please see -// LoadURL for the Content-Type header and the encoding. It panics on error. -func MustLoadAll(names []string, enc Encoding, ignoreMissing bool) *Properties { - return must(LoadAll(names, enc, ignoreMissing)) -} - -func must(p *Properties, err error) *Properties { - if err != nil { - ErrorHandler(err) - } - return p -} - -// expandName expands ${ENV_VAR} expressions in a name. -// If the environment variable does not exist then it will be replaced -// with an empty string. Malformed expressions like "${ENV_VAR" will -// be reported as error. -func expandName(name string) (string, error) { - return expand(name, []string{}, "${", "}", make(map[string]string)) -} - -// Interprets a byte buffer either as an ISO-8859-1 or UTF-8 encoded string. -// For ISO-8859-1 we can convert each byte straight into a rune since the -// first 256 unicode code points cover ISO-8859-1. -func convert(buf []byte, enc Encoding) string { - switch enc { - case utf8Default, UTF8: - return string(buf) - case ISO_8859_1: - runes := make([]rune, len(buf)) - for i, b := range buf { - runes[i] = rune(b) - } - return string(runes) - default: - ErrorHandler(fmt.Errorf("unsupported encoding %v", enc)) - } - panic("ErrorHandler should exit") -} diff --git a/vendor/github.com/magiconair/properties/parser.go b/vendor/github.com/magiconair/properties/parser.go deleted file mode 100644 index fccfd39f6..000000000 --- a/vendor/github.com/magiconair/properties/parser.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "runtime" -) - -type parser struct { - lex *lexer -} - -func parse(input string) (properties *Properties, err error) { - p := &parser{lex: lex(input)} - defer p.recover(&err) - - properties = NewProperties() - key := "" - comments := []string{} - - for { - token := p.expectOneOf(itemComment, itemKey, itemEOF) - switch token.typ { - case itemEOF: - goto done - case itemComment: - comments = append(comments, token.val) - continue - case itemKey: - key = token.val - if _, ok := properties.m[key]; !ok { - properties.k = append(properties.k, key) - } - } - - token = p.expectOneOf(itemValue, itemEOF) - if len(comments) > 0 { - properties.c[key] = comments - comments = []string{} - } - switch token.typ { - case itemEOF: - properties.m[key] = "" - goto done - case itemValue: - properties.m[key] = token.val - } - } - -done: - return properties, nil -} - -func (p *parser) errorf(format string, args ...interface{}) { - format = fmt.Sprintf("properties: Line %d: %s", p.lex.lineNumber(), format) - panic(fmt.Errorf(format, args...)) -} - -func (p *parser) expectOneOf(expected ...itemType) (token item) { - token = p.lex.nextItem() - for _, v := range expected { - if token.typ == v { - return token - } - } - p.unexpected(token) - panic("unexpected token") -} - -func (p *parser) unexpected(token item) { - p.errorf(token.String()) -} - -// recover is the handler that turns panics into returns from the top level of Parse. -func (p *parser) recover(errp *error) { - e := recover() - if e != nil { - if _, ok := e.(runtime.Error); ok { - panic(e) - } - *errp = e.(error) - } -} diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go deleted file mode 100644 index fb2f7b404..000000000 --- a/vendor/github.com/magiconair/properties/properties.go +++ /dev/null @@ -1,848 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -// BUG(frank): Set() does not check for invalid unicode literals since this is currently handled by the lexer. -// BUG(frank): Write() does not allow to configure the newline character. Therefore, on Windows LF is used. - -import ( - "bytes" - "fmt" - "io" - "log" - "os" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -const maxExpansionDepth = 64 - -// ErrorHandlerFunc defines the type of function which handles failures -// of the MustXXX() functions. An error handler function must exit -// the application after handling the error. -type ErrorHandlerFunc func(error) - -// ErrorHandler is the function which handles failures of the MustXXX() -// functions. The default is LogFatalHandler. -var ErrorHandler ErrorHandlerFunc = LogFatalHandler - -// LogHandlerFunc defines the function prototype for logging errors. -type LogHandlerFunc func(fmt string, args ...interface{}) - -// LogPrintf defines a log handler which uses log.Printf. -var LogPrintf LogHandlerFunc = log.Printf - -// LogFatalHandler handles the error by logging a fatal error and exiting. -func LogFatalHandler(err error) { - log.Fatal(err) -} - -// PanicHandler handles the error by panicking. -func PanicHandler(err error) { - panic(err) -} - -// ----------------------------------------------------------------------------- - -// A Properties contains the key/value pairs from the properties input. -// All values are stored in unexpanded form and are expanded at runtime -type Properties struct { - // Pre-/Postfix for property expansion. - Prefix string - Postfix string - - // DisableExpansion controls the expansion of properties on Get() - // and the check for circular references on Set(). When set to - // true Properties behaves like a simple key/value store and does - // not check for circular references on Get() or on Set(). - DisableExpansion bool - - // Stores the key/value pairs - m map[string]string - - // Stores the comments per key. - c map[string][]string - - // Stores the keys in order of appearance. - k []string - - // WriteSeparator specifies the separator of key and value while writing the properties. - WriteSeparator string -} - -// NewProperties creates a new Properties struct with the default -// configuration for "${key}" expressions. -func NewProperties() *Properties { - return &Properties{ - Prefix: "${", - Postfix: "}", - m: map[string]string{}, - c: map[string][]string{}, - k: []string{}, - } -} - -// Load reads a buffer into the given Properties struct. -func (p *Properties) Load(buf []byte, enc Encoding) error { - l := &Loader{Encoding: enc, DisableExpansion: p.DisableExpansion} - newProperties, err := l.LoadBytes(buf) - if err != nil { - return err - } - p.Merge(newProperties) - return nil -} - -// Get returns the expanded value for the given key if exists. -// Otherwise, ok is false. -func (p *Properties) Get(key string) (value string, ok bool) { - v, ok := p.m[key] - if p.DisableExpansion { - return v, ok - } - if !ok { - return "", false - } - - expanded, err := p.expand(key, v) - - // we guarantee that the expanded value is free of - // circular references and malformed expressions - // so we panic if we still get an error here. - if err != nil { - ErrorHandler(err) - } - - return expanded, true -} - -// MustGet returns the expanded value for the given key if exists. -// Otherwise, it panics. -func (p *Properties) MustGet(key string) string { - if v, ok := p.Get(key); ok { - return v - } - ErrorHandler(invalidKeyError(key)) - panic("ErrorHandler should exit") -} - -// ---------------------------------------------------------------------------- - -// ClearComments removes the comments for all keys. -func (p *Properties) ClearComments() { - p.c = map[string][]string{} -} - -// ---------------------------------------------------------------------------- - -// GetComment returns the last comment before the given key or an empty string. -func (p *Properties) GetComment(key string) string { - comments, ok := p.c[key] - if !ok || len(comments) == 0 { - return "" - } - return comments[len(comments)-1] -} - -// ---------------------------------------------------------------------------- - -// GetComments returns all comments that appeared before the given key or nil. -func (p *Properties) GetComments(key string) []string { - if comments, ok := p.c[key]; ok { - return comments - } - return nil -} - -// ---------------------------------------------------------------------------- - -// SetComment sets the comment for the key. -func (p *Properties) SetComment(key, comment string) { - p.c[key] = []string{comment} -} - -// ---------------------------------------------------------------------------- - -// SetComments sets the comments for the key. If the comments are nil then -// all comments for this key are deleted. -func (p *Properties) SetComments(key string, comments []string) { - if comments == nil { - delete(p.c, key) - return - } - p.c[key] = comments -} - -// ---------------------------------------------------------------------------- - -// GetBool checks if the expanded value is one of '1', 'yes', -// 'true' or 'on' if the key exists. The comparison is case-insensitive. -// If the key does not exist the default value is returned. -func (p *Properties) GetBool(key string, def bool) bool { - v, err := p.getBool(key) - if err != nil { - return def - } - return v -} - -// MustGetBool checks if the expanded value is one of '1', 'yes', -// 'true' or 'on' if the key exists. The comparison is case-insensitive. -// If the key does not exist the function panics. -func (p *Properties) MustGetBool(key string) bool { - v, err := p.getBool(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getBool(key string) (value bool, err error) { - if v, ok := p.Get(key); ok { - return boolVal(v), nil - } - return false, invalidKeyError(key) -} - -func boolVal(v string) bool { - v = strings.ToLower(v) - return v == "1" || v == "true" || v == "yes" || v == "on" -} - -// ---------------------------------------------------------------------------- - -// GetDuration parses the expanded value as an time.Duration (in ns) if the -// key exists. If key does not exist or the value cannot be parsed the default -// value is returned. In almost all cases you want to use GetParsedDuration(). -func (p *Properties) GetDuration(key string, def time.Duration) time.Duration { - v, err := p.getInt64(key) - if err != nil { - return def - } - return time.Duration(v) -} - -// MustGetDuration parses the expanded value as an time.Duration (in ns) if -// the key exists. If key does not exist or the value cannot be parsed the -// function panics. In almost all cases you want to use MustGetParsedDuration(). -func (p *Properties) MustGetDuration(key string) time.Duration { - v, err := p.getInt64(key) - if err != nil { - ErrorHandler(err) - } - return time.Duration(v) -} - -// ---------------------------------------------------------------------------- - -// GetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetParsedDuration(key string, def time.Duration) time.Duration { - s, ok := p.Get(key) - if !ok { - return def - } - v, err := time.ParseDuration(s) - if err != nil { - return def - } - return v -} - -// MustGetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetParsedDuration(key string) time.Duration { - s, ok := p.Get(key) - if !ok { - ErrorHandler(invalidKeyError(key)) - } - v, err := time.ParseDuration(s) - if err != nil { - ErrorHandler(err) - } - return v -} - -// ---------------------------------------------------------------------------- - -// GetFloat64 parses the expanded value as a float64 if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetFloat64(key string, def float64) float64 { - v, err := p.getFloat64(key) - if err != nil { - return def - } - return v -} - -// MustGetFloat64 parses the expanded value as a float64 if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetFloat64(key string) float64 { - v, err := p.getFloat64(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getFloat64(key string) (value float64, err error) { - if v, ok := p.Get(key); ok { - value, err = strconv.ParseFloat(v, 64) - if err != nil { - return 0, err - } - return value, nil - } - return 0, invalidKeyError(key) -} - -// ---------------------------------------------------------------------------- - -// GetInt parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. If the value does not fit into an int the -// function panics with an out of range error. -func (p *Properties) GetInt(key string, def int) int { - v, err := p.getInt64(key) - if err != nil { - return def - } - return intRangeCheck(key, v) -} - -// MustGetInt parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -// If the value does not fit into an int the function panics with -// an out of range error. -func (p *Properties) MustGetInt(key string) int { - v, err := p.getInt64(key) - if err != nil { - ErrorHandler(err) - } - return intRangeCheck(key, v) -} - -// ---------------------------------------------------------------------------- - -// GetInt64 parses the expanded value as an int64 if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetInt64(key string, def int64) int64 { - v, err := p.getInt64(key) - if err != nil { - return def - } - return v -} - -// MustGetInt64 parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetInt64(key string) int64 { - v, err := p.getInt64(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getInt64(key string) (value int64, err error) { - if v, ok := p.Get(key); ok { - value, err = strconv.ParseInt(v, 10, 64) - if err != nil { - return 0, err - } - return value, nil - } - return 0, invalidKeyError(key) -} - -// ---------------------------------------------------------------------------- - -// GetUint parses the expanded value as an uint if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. If the value does not fit into an int the -// function panics with an out of range error. -func (p *Properties) GetUint(key string, def uint) uint { - v, err := p.getUint64(key) - if err != nil { - return def - } - return uintRangeCheck(key, v) -} - -// MustGetUint parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -// If the value does not fit into an int the function panics with -// an out of range error. -func (p *Properties) MustGetUint(key string) uint { - v, err := p.getUint64(key) - if err != nil { - ErrorHandler(err) - } - return uintRangeCheck(key, v) -} - -// ---------------------------------------------------------------------------- - -// GetUint64 parses the expanded value as an uint64 if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetUint64(key string, def uint64) uint64 { - v, err := p.getUint64(key) - if err != nil { - return def - } - return v -} - -// MustGetUint64 parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetUint64(key string) uint64 { - v, err := p.getUint64(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getUint64(key string) (value uint64, err error) { - if v, ok := p.Get(key); ok { - value, err = strconv.ParseUint(v, 10, 64) - if err != nil { - return 0, err - } - return value, nil - } - return 0, invalidKeyError(key) -} - -// ---------------------------------------------------------------------------- - -// GetString returns the expanded value for the given key if exists or -// the default value otherwise. -func (p *Properties) GetString(key, def string) string { - if v, ok := p.Get(key); ok { - return v - } - return def -} - -// MustGetString returns the expanded value for the given key if exists or -// panics otherwise. -func (p *Properties) MustGetString(key string) string { - if v, ok := p.Get(key); ok { - return v - } - ErrorHandler(invalidKeyError(key)) - panic("ErrorHandler should exit") -} - -// ---------------------------------------------------------------------------- - -// Filter returns a new properties object which contains all properties -// for which the key matches the pattern. -func (p *Properties) Filter(pattern string) (*Properties, error) { - re, err := regexp.Compile(pattern) - if err != nil { - return nil, err - } - - return p.FilterRegexp(re), nil -} - -// FilterRegexp returns a new properties object which contains all properties -// for which the key matches the regular expression. -func (p *Properties) FilterRegexp(re *regexp.Regexp) *Properties { - pp := NewProperties() - for _, k := range p.k { - if re.MatchString(k) { - // TODO(fs): we are ignoring the error which flags a circular reference. - // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) - pp.Set(k, p.m[k]) - } - } - return pp -} - -// FilterPrefix returns a new properties object with a subset of all keys -// with the given prefix. -func (p *Properties) FilterPrefix(prefix string) *Properties { - pp := NewProperties() - for _, k := range p.k { - if strings.HasPrefix(k, prefix) { - // TODO(fs): we are ignoring the error which flags a circular reference. - // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) - pp.Set(k, p.m[k]) - } - } - return pp -} - -// FilterStripPrefix returns a new properties object with a subset of all keys -// with the given prefix and the prefix removed from the keys. -func (p *Properties) FilterStripPrefix(prefix string) *Properties { - pp := NewProperties() - n := len(prefix) - for _, k := range p.k { - if len(k) > len(prefix) && strings.HasPrefix(k, prefix) { - // TODO(fs): we are ignoring the error which flags a circular reference. - // TODO(fs): since we are modifying keys I am not entirely sure whether we can create a circular reference - // TODO(fs): this function should probably return an error but the signature is fixed - pp.Set(k[n:], p.m[k]) - } - } - return pp -} - -// Len returns the number of keys. -func (p *Properties) Len() int { - return len(p.m) -} - -// Keys returns all keys in the same order as in the input. -func (p *Properties) Keys() []string { - keys := make([]string, len(p.k)) - copy(keys, p.k) - return keys -} - -// Set sets the property key to the corresponding value. -// If a value for key existed before then ok is true and prev -// contains the previous value. If the value contains a -// circular reference or a malformed expression then -// an error is returned. -// An empty key is silently ignored. -func (p *Properties) Set(key, value string) (prev string, ok bool, err error) { - if key == "" { - return "", false, nil - } - - // if expansion is disabled we allow circular references - if p.DisableExpansion { - prev, ok = p.Get(key) - p.m[key] = value - if !ok { - p.k = append(p.k, key) - } - return prev, ok, nil - } - - // to check for a circular reference we temporarily need - // to set the new value. If there is an error then revert - // to the previous state. Only if all tests are successful - // then we add the key to the p.k list. - prev, ok = p.Get(key) - p.m[key] = value - - // now check for a circular reference - _, err = p.expand(key, value) - if err != nil { - - // revert to the previous state - if ok { - p.m[key] = prev - } else { - delete(p.m, key) - } - - return "", false, err - } - - if !ok { - p.k = append(p.k, key) - } - - return prev, ok, nil -} - -// SetValue sets property key to the default string value -// as defined by fmt.Sprintf("%v"). -func (p *Properties) SetValue(key string, value interface{}) error { - _, _, err := p.Set(key, fmt.Sprintf("%v", value)) - return err -} - -// MustSet sets the property key to the corresponding value. -// If a value for key existed before then ok is true and prev -// contains the previous value. An empty key is silently ignored. -func (p *Properties) MustSet(key, value string) (prev string, ok bool) { - prev, ok, err := p.Set(key, value) - if err != nil { - ErrorHandler(err) - } - return prev, ok -} - -// String returns a string of all expanded 'key = value' pairs. -func (p *Properties) String() string { - var s string - for _, key := range p.k { - value, _ := p.Get(key) - s = fmt.Sprintf("%s%s = %s\n", s, key, value) - } - return s -} - -// Sort sorts the properties keys in alphabetical order. -// This is helpfully before writing the properties. -func (p *Properties) Sort() { - sort.Strings(p.k) -} - -// Write writes all unexpanded 'key = value' pairs to the given writer. -// Write returns the number of bytes written and any write error encountered. -func (p *Properties) Write(w io.Writer, enc Encoding) (n int, err error) { - return p.WriteComment(w, "", enc) -} - -// WriteComment writes all unexpanced 'key = value' pairs to the given writer. -// If prefix is not empty then comments are written with a blank line and the -// given prefix. The prefix should be either "# " or "! " to be compatible with -// the properties file format. Otherwise, the properties parser will not be -// able to read the file back in. It returns the number of bytes written and -// any write error encountered. -func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n int, err error) { - var x int - - for _, key := range p.k { - value := p.m[key] - - if prefix != "" { - if comments, ok := p.c[key]; ok { - // don't print comments if they are all empty - allEmpty := true - for _, c := range comments { - if c != "" { - allEmpty = false - break - } - } - - if !allEmpty { - // add a blank line between entries but not at the top - if len(comments) > 0 && n > 0 { - x, err = fmt.Fprintln(w) - if err != nil { - return - } - n += x - } - - for _, c := range comments { - x, err = fmt.Fprintf(w, "%s%s\n", prefix, c) - if err != nil { - return - } - n += x - } - } - } - } - sep := " = " - if p.WriteSeparator != "" { - sep = p.WriteSeparator - } - x, err = fmt.Fprintf(w, "%s%s%s\n", encode(key, " :", enc), sep, encode(value, "", enc)) - if err != nil { - return - } - n += x - } - return -} - -// Map returns a copy of the properties as a map. -func (p *Properties) Map() map[string]string { - m := make(map[string]string) - for k, v := range p.m { - m[k] = v - } - return m -} - -// FilterFunc returns a copy of the properties which includes the values which passed all filters. -func (p *Properties) FilterFunc(filters ...func(k, v string) bool) *Properties { - pp := NewProperties() -outer: - for k, v := range p.m { - for _, f := range filters { - if !f(k, v) { - continue outer - } - pp.Set(k, v) - } - } - return pp -} - -// ---------------------------------------------------------------------------- - -// Delete removes the key and its comments. -func (p *Properties) Delete(key string) { - delete(p.m, key) - delete(p.c, key) - newKeys := []string{} - for _, k := range p.k { - if k != key { - newKeys = append(newKeys, k) - } - } - p.k = newKeys -} - -// Merge merges properties, comments and keys from other *Properties into p -func (p *Properties) Merge(other *Properties) { - for _, k := range other.k { - if _, ok := p.m[k]; !ok { - p.k = append(p.k, k) - } - } - for k, v := range other.m { - p.m[k] = v - } - for k, v := range other.c { - p.c[k] = v - } -} - -// ---------------------------------------------------------------------------- - -// check expands all values and returns an error if a circular reference or -// a malformed expression was found. -func (p *Properties) check() error { - for key, value := range p.m { - if _, err := p.expand(key, value); err != nil { - return err - } - } - return nil -} - -func (p *Properties) expand(key, input string) (string, error) { - // no pre/postfix -> nothing to expand - if p.Prefix == "" && p.Postfix == "" { - return input, nil - } - - return expand(input, []string{key}, p.Prefix, p.Postfix, p.m) -} - -// expand recursively expands expressions of '(prefix)key(postfix)' to their corresponding values. -// The function keeps track of the keys that were already expanded and stops if it -// detects a circular reference or a malformed expression of the form '(prefix)key'. -func expand(s string, keys []string, prefix, postfix string, values map[string]string) (string, error) { - if len(keys) > maxExpansionDepth { - return "", fmt.Errorf("expansion too deep") - } - - for { - start := strings.Index(s, prefix) - if start == -1 { - return s, nil - } - - keyStart := start + len(prefix) - keyLen := strings.Index(s[keyStart:], postfix) - if keyLen == -1 { - return "", fmt.Errorf("malformed expression") - } - - end := keyStart + keyLen + len(postfix) - 1 - key := s[keyStart : keyStart+keyLen] - - // fmt.Printf("s:%q pp:%q start:%d end:%d keyStart:%d keyLen:%d key:%q\n", s, prefix + "..." + postfix, start, end, keyStart, keyLen, key) - - for _, k := range keys { - if key == k { - var b bytes.Buffer - b.WriteString("circular reference in:\n") - for _, k1 := range keys { - fmt.Fprintf(&b, "%s=%s\n", k1, values[k1]) - } - return "", fmt.Errorf(b.String()) - } - } - - val, ok := values[key] - if !ok { - val = os.Getenv(key) - } - new_val, err := expand(val, append(keys, key), prefix, postfix, values) - if err != nil { - return "", err - } - s = s[:start] + new_val + s[end+1:] - } -} - -// encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters. -func encode(s string, special string, enc Encoding) string { - switch enc { - case UTF8: - return encodeUtf8(s, special) - case ISO_8859_1: - return encodeIso(s, special) - default: - panic(fmt.Sprintf("unsupported encoding %v", enc)) - } -} - -func encodeUtf8(s string, special string) string { - v := "" - for pos := 0; pos < len(s); { - r, w := utf8.DecodeRuneInString(s[pos:]) - pos += w - v += escape(r, special) - } - return v -} - -func encodeIso(s string, special string) string { - var r rune - var w int - var v string - for pos := 0; pos < len(s); { - switch r, w = utf8.DecodeRuneInString(s[pos:]); { - case r < 1<<8: // single byte rune -> escape special chars only - v += escape(r, special) - case r < 1<<16: // two byte rune -> unicode literal - v += fmt.Sprintf("\\u%04x", r) - default: // more than two bytes per rune -> can't encode - v += "?" - } - pos += w - } - return v -} - -func escape(r rune, special string) string { - switch r { - case '\f': - return "\\f" - case '\n': - return "\\n" - case '\r': - return "\\r" - case '\t': - return "\\t" - case '\\': - return "\\\\" - default: - if strings.ContainsRune(special, r) { - return "\\" + string(r) - } - return string(r) - } -} - -func invalidKeyError(key string) error { - return fmt.Errorf("unknown property: %s", key) -} diff --git a/vendor/github.com/magiconair/properties/rangecheck.go b/vendor/github.com/magiconair/properties/rangecheck.go deleted file mode 100644 index dbd60b36e..000000000 --- a/vendor/github.com/magiconair/properties/rangecheck.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "math" -) - -// make this a var to overwrite it in a test -var is32Bit = ^uint(0) == math.MaxUint32 - -// intRangeCheck checks if the value fits into the int type and -// panics if it does not. -func intRangeCheck(key string, v int64) int { - if is32Bit && (v < math.MinInt32 || v > math.MaxInt32) { - panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) - } - return int(v) -} - -// uintRangeCheck checks if the value fits into the uint type and -// panics if it does not. -func uintRangeCheck(key string, v uint64) uint { - if is32Bit && v > math.MaxUint32 { - panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) - } - return uint(v) -} diff --git a/vendor/github.com/moby/patternmatcher/LICENSE b/vendor/github.com/moby/patternmatcher/LICENSE deleted file mode 100644 index 6d8d58fb6..000000000 --- a/vendor/github.com/moby/patternmatcher/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2018 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/moby/patternmatcher/NOTICE b/vendor/github.com/moby/patternmatcher/NOTICE deleted file mode 100644 index e5154640f..000000000 --- a/vendor/github.com/moby/patternmatcher/NOTICE +++ /dev/null @@ -1,16 +0,0 @@ -Docker -Copyright 2012-2017 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/moby/patternmatcher/patternmatcher.go b/vendor/github.com/moby/patternmatcher/patternmatcher.go deleted file mode 100644 index 37a1a59ac..000000000 --- a/vendor/github.com/moby/patternmatcher/patternmatcher.go +++ /dev/null @@ -1,474 +0,0 @@ -package patternmatcher - -import ( - "errors" - "os" - "path/filepath" - "regexp" - "strings" - "text/scanner" - "unicode/utf8" -) - -// escapeBytes is a bitmap used to check whether a character should be escaped when creating the regex. -var escapeBytes [8]byte - -// shouldEscape reports whether a rune should be escaped as part of the regex. -// -// This only includes characters that require escaping in regex but are also NOT valid filepath pattern characters. -// Additionally, '\' is not excluded because there is specific logic to properly handle this, as it's a path separator -// on Windows. -// -// Adapted from regexp::QuoteMeta in go stdlib. -// See https://cs.opensource.google/go/go/+/refs/tags/go1.17.2:src/regexp/regexp.go;l=703-715;drc=refs%2Ftags%2Fgo1.17.2 -func shouldEscape(b rune) bool { - return b < utf8.RuneSelf && escapeBytes[b%8]&(1<<(b/8)) != 0 -} - -func init() { - for _, b := range []byte(`.+()|{}$`) { - escapeBytes[b%8] |= 1 << (b / 8) - } -} - -// PatternMatcher allows checking paths against a list of patterns -type PatternMatcher struct { - patterns []*Pattern - exclusions bool -} - -// New creates a new matcher object for specific patterns that can -// be used later to match against patterns against paths -func New(patterns []string) (*PatternMatcher, error) { - pm := &PatternMatcher{ - patterns: make([]*Pattern, 0, len(patterns)), - } - for _, p := range patterns { - // Eliminate leading and trailing whitespace. - p = strings.TrimSpace(p) - if p == "" { - continue - } - p = filepath.Clean(p) - newp := &Pattern{} - if p[0] == '!' { - if len(p) == 1 { - return nil, errors.New("illegal exclusion pattern: \"!\"") - } - newp.exclusion = true - p = p[1:] - pm.exclusions = true - } - // Do some syntax checking on the pattern. - // filepath's Match() has some really weird rules that are inconsistent - // so instead of trying to dup their logic, just call Match() for its - // error state and if there is an error in the pattern return it. - // If this becomes an issue we can remove this since its really only - // needed in the error (syntax) case - which isn't really critical. - if _, err := filepath.Match(p, "."); err != nil { - return nil, err - } - newp.cleanedPattern = p - newp.dirs = strings.Split(p, string(os.PathSeparator)) - pm.patterns = append(pm.patterns, newp) - } - return pm, nil -} - -// Matches returns true if "file" matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -// -// The "file" argument should be a slash-delimited path. -// -// Matches is not safe to call concurrently. -// -// Deprecated: This implementation is buggy (it only checks a single parent dir -// against the pattern) and will be removed soon. Use either -// MatchesOrParentMatches or MatchesUsingParentResults instead. -func (pm *PatternMatcher) Matches(file string) (bool, error) { - matched := false - file = filepath.FromSlash(file) - parentPath := filepath.Dir(file) - parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) - - for _, pattern := range pm.patterns { - // Skip evaluation if this is an inclusion and the filename - // already matched the pattern, or it's an exclusion and it has - // not matched the pattern yet. - if pattern.exclusion != matched { - continue - } - - match, err := pattern.match(file) - if err != nil { - return false, err - } - - if !match && parentPath != "." { - // Check to see if the pattern matches one of our parent dirs. - if len(pattern.dirs) <= len(parentPathDirs) { - match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator))) - } - } - - if match { - matched = !pattern.exclusion - } - } - - return matched, nil -} - -// MatchesOrParentMatches returns true if "file" matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -// -// The "file" argument should be a slash-delimited path. -// -// Matches is not safe to call concurrently. -func (pm *PatternMatcher) MatchesOrParentMatches(file string) (bool, error) { - matched := false - file = filepath.FromSlash(file) - parentPath := filepath.Dir(file) - parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) - - for _, pattern := range pm.patterns { - // Skip evaluation if this is an inclusion and the filename - // already matched the pattern, or it's an exclusion and it has - // not matched the pattern yet. - if pattern.exclusion != matched { - continue - } - - match, err := pattern.match(file) - if err != nil { - return false, err - } - - if !match && parentPath != "." { - // Check to see if the pattern matches one of our parent dirs. - for i := range parentPathDirs { - match, _ = pattern.match(strings.Join(parentPathDirs[:i+1], string(os.PathSeparator))) - if match { - break - } - } - } - - if match { - matched = !pattern.exclusion - } - } - - return matched, nil -} - -// MatchesUsingParentResult returns true if "file" matches any of the patterns -// and isn't excluded by any of the subsequent patterns. The functionality is -// the same as Matches, but as an optimization, the caller keeps track of -// whether the parent directory matched. -// -// The "file" argument should be a slash-delimited path. -// -// MatchesUsingParentResult is not safe to call concurrently. -// -// Deprecated: this function does behave correctly in some cases (see -// https://github.com/docker/buildx/issues/850). -// -// Use MatchesUsingParentResults instead. -func (pm *PatternMatcher) MatchesUsingParentResult(file string, parentMatched bool) (bool, error) { - matched := parentMatched - file = filepath.FromSlash(file) - - for _, pattern := range pm.patterns { - // Skip evaluation if this is an inclusion and the filename - // already matched the pattern, or it's an exclusion and it has - // not matched the pattern yet. - if pattern.exclusion != matched { - continue - } - - match, err := pattern.match(file) - if err != nil { - return false, err - } - - if match { - matched = !pattern.exclusion - } - } - return matched, nil -} - -// MatchInfo tracks information about parent dir matches while traversing a -// filesystem. -type MatchInfo struct { - parentMatched []bool -} - -// MatchesUsingParentResults returns true if "file" matches any of the patterns -// and isn't excluded by any of the subsequent patterns. The functionality is -// the same as Matches, but as an optimization, the caller passes in -// intermediate results from matching the parent directory. -// -// The "file" argument should be a slash-delimited path. -// -// MatchesUsingParentResults is not safe to call concurrently. -func (pm *PatternMatcher) MatchesUsingParentResults(file string, parentMatchInfo MatchInfo) (bool, MatchInfo, error) { - parentMatched := parentMatchInfo.parentMatched - if len(parentMatched) != 0 && len(parentMatched) != len(pm.patterns) { - return false, MatchInfo{}, errors.New("wrong number of values in parentMatched") - } - - file = filepath.FromSlash(file) - matched := false - - matchInfo := MatchInfo{ - parentMatched: make([]bool, len(pm.patterns)), - } - for i, pattern := range pm.patterns { - match := false - // If the parent matched this pattern, we don't need to recheck. - if len(parentMatched) != 0 { - match = parentMatched[i] - } - - if !match { - // Skip evaluation if this is an inclusion and the filename - // already matched the pattern, or it's an exclusion and it has - // not matched the pattern yet. - if pattern.exclusion != matched { - continue - } - - var err error - match, err = pattern.match(file) - if err != nil { - return false, matchInfo, err - } - - // If the zero value of MatchInfo was passed in, we don't have - // any information about the parent dir's match results, and we - // apply the same logic as MatchesOrParentMatches. - if !match && len(parentMatched) == 0 { - if parentPath := filepath.Dir(file); parentPath != "." { - parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) - // Check to see if the pattern matches one of our parent dirs. - for i := range parentPathDirs { - match, _ = pattern.match(strings.Join(parentPathDirs[:i+1], string(os.PathSeparator))) - if match { - break - } - } - } - } - } - matchInfo.parentMatched[i] = match - - if match { - matched = !pattern.exclusion - } - } - return matched, matchInfo, nil -} - -// Exclusions returns true if any of the patterns define exclusions -func (pm *PatternMatcher) Exclusions() bool { - return pm.exclusions -} - -// Patterns returns array of active patterns -func (pm *PatternMatcher) Patterns() []*Pattern { - return pm.patterns -} - -// Pattern defines a single regexp used to filter file paths. -type Pattern struct { - matchType matchType - cleanedPattern string - dirs []string - regexp *regexp.Regexp - exclusion bool -} - -type matchType int - -const ( - unknownMatch matchType = iota - exactMatch - prefixMatch - suffixMatch - regexpMatch -) - -func (p *Pattern) String() string { - return p.cleanedPattern -} - -// Exclusion returns true if this pattern defines exclusion -func (p *Pattern) Exclusion() bool { - return p.exclusion -} - -func (p *Pattern) match(path string) (bool, error) { - if p.matchType == unknownMatch { - if err := p.compile(string(os.PathSeparator)); err != nil { - return false, filepath.ErrBadPattern - } - } - - switch p.matchType { - case exactMatch: - return path == p.cleanedPattern, nil - case prefixMatch: - // strip trailing ** - return strings.HasPrefix(path, p.cleanedPattern[:len(p.cleanedPattern)-2]), nil - case suffixMatch: - // strip leading ** - suffix := p.cleanedPattern[2:] - if strings.HasSuffix(path, suffix) { - return true, nil - } - // **/foo matches "foo" - return suffix[0] == os.PathSeparator && path == suffix[1:], nil - case regexpMatch: - return p.regexp.MatchString(path), nil - } - - return false, nil -} - -func (p *Pattern) compile(sl string) error { - regStr := "^" - pattern := p.cleanedPattern - // Go through the pattern and convert it to a regexp. - // We use a scanner so we can support utf-8 chars. - var scan scanner.Scanner - scan.Init(strings.NewReader(pattern)) - - escSL := sl - if sl == `\` { - escSL += `\` - } - - p.matchType = exactMatch - for i := 0; scan.Peek() != scanner.EOF; i++ { - ch := scan.Next() - - if ch == '*' { - if scan.Peek() == '*' { - // is some flavor of "**" - scan.Next() - - // Treat **/ as ** so eat the "/" - if string(scan.Peek()) == sl { - scan.Next() - } - - if scan.Peek() == scanner.EOF { - // is "**EOF" - to align with .gitignore just accept all - if p.matchType == exactMatch { - p.matchType = prefixMatch - } else { - regStr += ".*" - p.matchType = regexpMatch - } - } else { - // is "**" - // Note that this allows for any # of /'s (even 0) because - // the .* will eat everything, even /'s - regStr += "(.*" + escSL + ")?" - p.matchType = regexpMatch - } - - if i == 0 { - p.matchType = suffixMatch - } - } else { - // is "*" so map it to anything but "/" - regStr += "[^" + escSL + "]*" - p.matchType = regexpMatch - } - } else if ch == '?' { - // "?" is any char except "/" - regStr += "[^" + escSL + "]" - p.matchType = regexpMatch - } else if shouldEscape(ch) { - // Escape some regexp special chars that have no meaning - // in golang's filepath.Match - regStr += `\` + string(ch) - } else if ch == '\\' { - // escape next char. Note that a trailing \ in the pattern - // will be left alone (but need to escape it) - if sl == `\` { - // On windows map "\" to "\\", meaning an escaped backslash, - // and then just continue because filepath.Match on - // Windows doesn't allow escaping at all - regStr += escSL - continue - } - if scan.Peek() != scanner.EOF { - regStr += `\` + string(scan.Next()) - p.matchType = regexpMatch - } else { - regStr += `\` - } - } else if ch == '[' || ch == ']' { - regStr += string(ch) - p.matchType = regexpMatch - } else { - regStr += string(ch) - } - } - - if p.matchType != regexpMatch { - return nil - } - - regStr += "$" - - re, err := regexp.Compile(regStr) - if err != nil { - return err - } - - p.regexp = re - p.matchType = regexpMatch - return nil -} - -// Matches returns true if file matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -// -// This implementation is buggy (it only checks a single parent dir against the -// pattern) and will be removed soon. Use MatchesOrParentMatches instead. -func Matches(file string, patterns []string) (bool, error) { - pm, err := New(patterns) - if err != nil { - return false, err - } - file = filepath.Clean(file) - - if file == "." { - // Don't let them exclude everything, kind of silly. - return false, nil - } - - return pm.Matches(file) -} - -// MatchesOrParentMatches returns true if file matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -func MatchesOrParentMatches(file string, patterns []string) (bool, error) { - pm, err := New(patterns) - if err != nil { - return false, err - } - file = filepath.Clean(file) - - if file == "." { - // Don't let them exclude everything, kind of silly. - return false, nil - } - - return pm.MatchesOrParentMatches(file) -} diff --git a/vendor/github.com/moby/sys/sequential/LICENSE b/vendor/github.com/moby/sys/sequential/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/moby/sys/sequential/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/moby/sys/sequential/doc.go b/vendor/github.com/moby/sys/sequential/doc.go deleted file mode 100644 index af2817504..000000000 --- a/vendor/github.com/moby/sys/sequential/doc.go +++ /dev/null @@ -1,15 +0,0 @@ -// Package sequential provides a set of functions for managing sequential -// files on Windows. -// -// The origin of these functions are the golang OS and windows packages, -// slightly modified to only cope with files, not directories due to the -// specific use case. -// -// The alteration is to allow a file on Windows to be opened with -// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating -// the standby list, particularly when accessing large files such as layer.tar. -// -// For non-Windows platforms, the package provides wrappers for the equivalents -// in the os packages. They are passthrough on Unix platforms, and only relevant -// on Windows. -package sequential diff --git a/vendor/github.com/moby/sys/sequential/sequential_unix.go b/vendor/github.com/moby/sys/sequential/sequential_unix.go deleted file mode 100644 index a3c7340e3..000000000 --- a/vendor/github.com/moby/sys/sequential/sequential_unix.go +++ /dev/null @@ -1,45 +0,0 @@ -//go:build !windows -// +build !windows - -package sequential - -import "os" - -// Create creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. -func Create(name string) (*os.File, error) { - return os.Create(name) -} - -// Open opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. -func Open(name string) (*os.File, error) { - return os.Open(name) -} - -// OpenFile is the generalized open call; most users will use Open -// or Create instead. It opens the named file with specified flag -// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, -// methods on the returned File can be used for I/O. -// If there is an error, it will be of type *PathError. -func OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) { - return os.OpenFile(name, flag, perm) -} - -// CreateTemp creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *os.File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func CreateTemp(dir, prefix string) (f *os.File, err error) { - return os.CreateTemp(dir, prefix) -} diff --git a/vendor/github.com/moby/sys/sequential/sequential_windows.go b/vendor/github.com/moby/sys/sequential/sequential_windows.go deleted file mode 100644 index 3f7f0d83e..000000000 --- a/vendor/github.com/moby/sys/sequential/sequential_windows.go +++ /dev/null @@ -1,161 +0,0 @@ -package sequential - -import ( - "os" - "path/filepath" - "strconv" - "sync" - "syscall" - "time" - "unsafe" - - "golang.org/x/sys/windows" -) - -// Create creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. -func Create(name string) (*os.File, error) { - return OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) -} - -// Open opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. -func Open(name string) (*os.File, error) { - return OpenFile(name, os.O_RDONLY, 0) -} - -// OpenFile is the generalized open call; most users will use Open -// or Create instead. -// If there is an error, it will be of type *PathError. -func OpenFile(name string, flag int, _ os.FileMode) (*os.File, error) { - if name == "" { - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} - } - r, err := openFileSequential(name, flag, 0) - if err == nil { - return r, nil - } - return nil, &os.PathError{Op: "open", Path: name, Err: err} -} - -func openFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { - r, e := openSequential(name, flag|windows.O_CLOEXEC, 0) - if e != nil { - return nil, e - } - return os.NewFile(uintptr(r), name), nil -} - -func makeInheritSa() *windows.SecurityAttributes { - var sa windows.SecurityAttributes - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - return &sa -} - -func openSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { - if len(path) == 0 { - return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND - } - pathp, err := windows.UTF16PtrFromString(path) - if err != nil { - return windows.InvalidHandle, err - } - var access uint32 - switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { - case windows.O_RDONLY: - access = windows.GENERIC_READ - case windows.O_WRONLY: - access = windows.GENERIC_WRITE - case windows.O_RDWR: - access = windows.GENERIC_READ | windows.GENERIC_WRITE - } - if mode&windows.O_CREAT != 0 { - access |= windows.GENERIC_WRITE - } - if mode&windows.O_APPEND != 0 { - access &^= windows.GENERIC_WRITE - access |= windows.FILE_APPEND_DATA - } - sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) - var sa *windows.SecurityAttributes - if mode&windows.O_CLOEXEC == 0 { - sa = makeInheritSa() - } - var createmode uint32 - switch { - case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): - createmode = windows.CREATE_NEW - case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): - createmode = windows.CREATE_ALWAYS - case mode&windows.O_CREAT == windows.O_CREAT: - createmode = windows.OPEN_ALWAYS - case mode&windows.O_TRUNC == windows.O_TRUNC: - createmode = windows.TRUNCATE_EXISTING - default: - createmode = windows.OPEN_EXISTING - } - // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. - // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx - const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN - h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) - return h, e -} - -// Helpers for CreateTemp -var rand uint32 -var randmu sync.Mutex - -func reseed() uint32 { - return uint32(time.Now().UnixNano() + int64(os.Getpid())) -} - -func nextSuffix() string { - randmu.Lock() - r := rand - if r == 0 { - r = reseed() - } - r = r*1664525 + 1013904223 // constants from Numerical Recipes - rand = r - randmu.Unlock() - return strconv.Itoa(int(1e9 + r%1e9))[1:] -} - -// CreateTemp is a copy of os.CreateTemp, modified to use sequential -// file access. Below is the original comment from golang: -// TempFile creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *os.File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func CreateTemp(dir, prefix string) (f *os.File, err error) { - if dir == "" { - dir = os.TempDir() - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - name := filepath.Join(dir, prefix+nextSuffix()) - f, err = OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - break - } - return -} diff --git a/vendor/github.com/moby/term/.gitignore b/vendor/github.com/moby/term/.gitignore deleted file mode 100644 index b0747ff01..000000000 --- a/vendor/github.com/moby/term/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -# if you want to ignore files created by your editor/tools, consider using a -# global .gitignore or .git/info/exclude see https://help.github.com/articles/ignoring-files -.* -!.github -!.gitignore -profile.out -# support running go modules in vendor mode for local development -vendor/ diff --git a/vendor/github.com/moby/term/LICENSE b/vendor/github.com/moby/term/LICENSE deleted file mode 100644 index 6d8d58fb6..000000000 --- a/vendor/github.com/moby/term/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2018 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/moby/term/README.md b/vendor/github.com/moby/term/README.md deleted file mode 100644 index 0ce92cc33..000000000 --- a/vendor/github.com/moby/term/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# term - utilities for dealing with terminals - -![Test](https://github.com/moby/term/workflows/Test/badge.svg) [![GoDoc](https://godoc.org/github.com/moby/term?status.svg)](https://godoc.org/github.com/moby/term) [![Go Report Card](https://goreportcard.com/badge/github.com/moby/term)](https://goreportcard.com/report/github.com/moby/term) - -term provides structures and helper functions to work with terminal (state, sizes). - -#### Using term - -```go -package main - -import ( - "log" - "os" - - "github.com/moby/term" -) - -func main() { - fd := os.Stdin.Fd() - if term.IsTerminal(fd) { - ws, err := term.GetWinsize(fd) - if err != nil { - log.Fatalf("term.GetWinsize: %s", err) - } - log.Printf("%d:%d\n", ws.Height, ws.Width) - } -} -``` - -## Contributing - -Want to hack on term? [Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md) apply. - -## Copyright and license -Code and documentation copyright 2015 Docker, inc. Code released under the Apache 2.0 license. Docs released under Creative commons. diff --git a/vendor/github.com/moby/term/ascii.go b/vendor/github.com/moby/term/ascii.go deleted file mode 100644 index 55873c055..000000000 --- a/vendor/github.com/moby/term/ascii.go +++ /dev/null @@ -1,66 +0,0 @@ -package term - -import ( - "fmt" - "strings" -) - -// ASCII list the possible supported ASCII key sequence -var ASCII = []string{ - "ctrl-@", - "ctrl-a", - "ctrl-b", - "ctrl-c", - "ctrl-d", - "ctrl-e", - "ctrl-f", - "ctrl-g", - "ctrl-h", - "ctrl-i", - "ctrl-j", - "ctrl-k", - "ctrl-l", - "ctrl-m", - "ctrl-n", - "ctrl-o", - "ctrl-p", - "ctrl-q", - "ctrl-r", - "ctrl-s", - "ctrl-t", - "ctrl-u", - "ctrl-v", - "ctrl-w", - "ctrl-x", - "ctrl-y", - "ctrl-z", - "ctrl-[", - "ctrl-\\", - "ctrl-]", - "ctrl-^", - "ctrl-_", -} - -// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. -func ToBytes(keys string) ([]byte, error) { - codes := []byte{} -next: - for _, key := range strings.Split(keys, ",") { - if len(key) != 1 { - for code, ctrl := range ASCII { - if ctrl == key { - codes = append(codes, byte(code)) - continue next - } - } - if key == "DEL" { - codes = append(codes, 127) - } else { - return nil, fmt.Errorf("Unknown character: '%s'", key) - } - } else { - codes = append(codes, key[0]) - } - } - return codes, nil -} diff --git a/vendor/github.com/moby/term/doc.go b/vendor/github.com/moby/term/doc.go deleted file mode 100644 index c9bc03244..000000000 --- a/vendor/github.com/moby/term/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package term provides structures and helper functions to work with -// terminal (state, sizes). -package term diff --git a/vendor/github.com/moby/term/proxy.go b/vendor/github.com/moby/term/proxy.go deleted file mode 100644 index c47756b89..000000000 --- a/vendor/github.com/moby/term/proxy.go +++ /dev/null @@ -1,88 +0,0 @@ -package term - -import ( - "io" -) - -// EscapeError is special error which returned by a TTY proxy reader's Read() -// method in case its detach escape sequence is read. -type EscapeError struct{} - -func (EscapeError) Error() string { - return "read escape sequence" -} - -// escapeProxy is used only for attaches with a TTY. It is used to proxy -// stdin keypresses from the underlying reader and look for the passed in -// escape key sequence to signal a detach. -type escapeProxy struct { - escapeKeys []byte - escapeKeyPos int - r io.Reader - buf []byte -} - -// NewEscapeProxy returns a new TTY proxy reader which wraps the given reader -// and detects when the specified escape keys are read, in which case the Read -// method will return an error of type EscapeError. -func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader { - return &escapeProxy{ - escapeKeys: escapeKeys, - r: r, - } -} - -func (r *escapeProxy) Read(buf []byte) (n int, err error) { - if len(r.escapeKeys) > 0 && r.escapeKeyPos == len(r.escapeKeys) { - return 0, EscapeError{} - } - - if len(r.buf) > 0 { - n = copy(buf, r.buf) - r.buf = r.buf[n:] - } - - nr, err := r.r.Read(buf[n:]) - n += nr - if len(r.escapeKeys) == 0 { - return n, err - } - - for i := 0; i < n; i++ { - if buf[i] == r.escapeKeys[r.escapeKeyPos] { - r.escapeKeyPos++ - - // Check if the full escape sequence is matched. - if r.escapeKeyPos == len(r.escapeKeys) { - n = i + 1 - r.escapeKeyPos - if n < 0 { - n = 0 - } - return n, EscapeError{} - } - continue - } - - // If we need to prepend a partial escape sequence from the previous - // read, make sure the new buffer size doesn't exceed len(buf). - // Otherwise, preserve any extra data in a buffer for the next read. - if i < r.escapeKeyPos { - preserve := make([]byte, 0, r.escapeKeyPos+n) - preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...) - preserve = append(preserve, buf[:n]...) - n = copy(buf, preserve) - i += r.escapeKeyPos - r.buf = append(r.buf, preserve[n:]...) - } - r.escapeKeyPos = 0 - } - - // If we're in the middle of reading an escape sequence, make sure we don't - // let the caller read it. If later on we find that this is not the escape - // sequence, we'll prepend it back to buf. - n -= r.escapeKeyPos - if n < 0 { - n = 0 - } - return n, err -} diff --git a/vendor/github.com/moby/term/term.go b/vendor/github.com/moby/term/term.go deleted file mode 100644 index f9d8988ef..000000000 --- a/vendor/github.com/moby/term/term.go +++ /dev/null @@ -1,85 +0,0 @@ -package term - -import "io" - -// State holds the platform-specific state / console mode for the terminal. -type State terminalState - -// Winsize represents the size of the terminal window. -type Winsize struct { - Height uint16 - Width uint16 - - // Only used on Unix - x uint16 - y uint16 -} - -// StdStreams returns the standard streams (stdin, stdout, stderr). -// -// On Windows, it attempts to turn on VT handling on all std handles if -// supported, or falls back to terminal emulation. On Unix, this returns -// the standard [os.Stdin], [os.Stdout] and [os.Stderr]. -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - return stdStreams() -} - -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (fd uintptr, isTerminal bool) { - return getFdInfo(in) -} - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - return getWinsize(fd) -} - -// SetWinsize tries to set the specified window size for the specified file -// descriptor. It is only implemented on Unix, and returns an error on Windows. -func SetWinsize(fd uintptr, ws *Winsize) error { - return setWinsize(fd, ws) -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - return isTerminal(fd) -} - -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { - return restoreTerminal(fd, state) -} - -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { - return saveState(fd) -} - -// DisableEcho applies the specified state to the terminal connected to the file -// descriptor, with echo disabled. -func DisableEcho(fd uintptr, state *State) error { - return disableEcho(fd, state) -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into -// raw mode and returns the previous state. On UNIX, this is the equivalent of -// [MakeRaw], and puts both the input and output into raw mode. On Windows, it -// only puts the input into raw mode. -func SetRawTerminal(fd uintptr) (previousState *State, err error) { - return setRawTerminal(fd) -} - -// SetRawTerminalOutput puts the output of terminal connected to the given file -// descriptor into raw mode. On UNIX, this does nothing and returns nil for the -// state. On Windows, it disables LF -> CRLF translation. -func SetRawTerminalOutput(fd uintptr) (previousState *State, err error) { - return setRawTerminalOutput(fd) -} - -// MakeRaw puts the terminal (Windows Console) connected to the -// given file descriptor into raw mode and returns the previous state of -// the terminal so that it can be restored. -func MakeRaw(fd uintptr) (previousState *State, err error) { - return makeRaw(fd) -} diff --git a/vendor/github.com/moby/term/term_unix.go b/vendor/github.com/moby/term/term_unix.go deleted file mode 100644 index 2ec7706a1..000000000 --- a/vendor/github.com/moby/term/term_unix.go +++ /dev/null @@ -1,98 +0,0 @@ -//go:build !windows -// +build !windows - -package term - -import ( - "errors" - "io" - "os" - - "golang.org/x/sys/unix" -) - -// ErrInvalidState is returned if the state of the terminal is invalid. -// -// Deprecated: ErrInvalidState is no longer used. -var ErrInvalidState = errors.New("Invalid terminal state") - -// terminalState holds the platform-specific state / console mode for the terminal. -type terminalState struct { - termios unix.Termios -} - -func stdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - return os.Stdin, os.Stdout, os.Stderr -} - -func getFdInfo(in interface{}) (uintptr, bool) { - var inFd uintptr - var isTerminalIn bool - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminalIn = isTerminal(inFd) - } - return inFd, isTerminalIn -} - -func getWinsize(fd uintptr) (*Winsize, error) { - uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) - ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel} - return ws, err -} - -func setWinsize(fd uintptr, ws *Winsize) error { - return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, &unix.Winsize{ - Row: ws.Height, - Col: ws.Width, - Xpixel: ws.x, - Ypixel: ws.y, - }) -} - -func isTerminal(fd uintptr) bool { - _, err := tcget(fd) - return err == nil -} - -func restoreTerminal(fd uintptr, state *State) error { - if state == nil { - return errors.New("invalid terminal state") - } - return tcset(fd, &state.termios) -} - -func saveState(fd uintptr) (*State, error) { - termios, err := tcget(fd) - if err != nil { - return nil, err - } - return &State{termios: *termios}, nil -} - -func disableEcho(fd uintptr, state *State) error { - newState := state.termios - newState.Lflag &^= unix.ECHO - - return tcset(fd, &newState) -} - -func setRawTerminal(fd uintptr) (*State, error) { - return makeRaw(fd) -} - -func setRawTerminalOutput(fd uintptr) (*State, error) { - return nil, nil -} - -func tcget(fd uintptr) (*unix.Termios, error) { - p, err := unix.IoctlGetTermios(int(fd), getTermios) - if err != nil { - return nil, err - } - return p, nil -} - -func tcset(fd uintptr, p *unix.Termios) error { - return unix.IoctlSetTermios(int(fd), setTermios, p) -} diff --git a/vendor/github.com/moby/term/term_windows.go b/vendor/github.com/moby/term/term_windows.go deleted file mode 100644 index 81ccff042..000000000 --- a/vendor/github.com/moby/term/term_windows.go +++ /dev/null @@ -1,176 +0,0 @@ -package term - -import ( - "fmt" - "io" - "os" - "os/signal" - - windowsconsole "github.com/moby/term/windows" - "golang.org/x/sys/windows" -) - -// terminalState holds the platform-specific state / console mode for the terminal. -type terminalState struct { - mode uint32 -} - -// vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console -var vtInputSupported bool - -func stdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - // Turn on VT handling on all std handles, if possible. This might - // fail, in which case we will fall back to terminal emulation. - var ( - emulateStdin, emulateStdout, emulateStderr bool - - mode uint32 - ) - - fd := windows.Handle(os.Stdin.Fd()) - if err := windows.GetConsoleMode(fd, &mode); err == nil { - // Validate that winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. - if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil { - emulateStdin = true - } else { - vtInputSupported = true - } - // Unconditionally set the console mode back even on failure because SetConsoleMode - // remembers invalid bits on input handles. - _ = windows.SetConsoleMode(fd, mode) - } - - fd = windows.Handle(os.Stdout.Fd()) - if err := windows.GetConsoleMode(fd, &mode); err == nil { - // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. - if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING|windows.DISABLE_NEWLINE_AUTO_RETURN); err != nil { - emulateStdout = true - } else { - _ = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING) - } - } - - fd = windows.Handle(os.Stderr.Fd()) - if err := windows.GetConsoleMode(fd, &mode); err == nil { - // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. - if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING|windows.DISABLE_NEWLINE_AUTO_RETURN); err != nil { - emulateStderr = true - } else { - _ = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING) - } - } - - if emulateStdin { - h := uint32(windows.STD_INPUT_HANDLE) - stdIn = windowsconsole.NewAnsiReader(int(h)) - } else { - stdIn = os.Stdin - } - - if emulateStdout { - h := uint32(windows.STD_OUTPUT_HANDLE) - stdOut = windowsconsole.NewAnsiWriter(int(h)) - } else { - stdOut = os.Stdout - } - - if emulateStderr { - h := uint32(windows.STD_ERROR_HANDLE) - stdErr = windowsconsole.NewAnsiWriter(int(h)) - } else { - stdErr = os.Stderr - } - - return stdIn, stdOut, stdErr -} - -func getFdInfo(in interface{}) (uintptr, bool) { - return windowsconsole.GetHandleInfo(in) -} - -func getWinsize(fd uintptr) (*Winsize, error) { - var info windows.ConsoleScreenBufferInfo - if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil { - return nil, err - } - - winsize := &Winsize{ - Width: uint16(info.Window.Right - info.Window.Left + 1), - Height: uint16(info.Window.Bottom - info.Window.Top + 1), - } - - return winsize, nil -} - -func setWinsize(fd uintptr, ws *Winsize) error { - return fmt.Errorf("not implemented on Windows") -} - -func isTerminal(fd uintptr) bool { - var mode uint32 - err := windows.GetConsoleMode(windows.Handle(fd), &mode) - return err == nil -} - -func restoreTerminal(fd uintptr, state *State) error { - return windows.SetConsoleMode(windows.Handle(fd), state.mode) -} - -func saveState(fd uintptr) (*State, error) { - var mode uint32 - - if err := windows.GetConsoleMode(windows.Handle(fd), &mode); err != nil { - return nil, err - } - - return &State{mode: mode}, nil -} - -func disableEcho(fd uintptr, state *State) error { - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx - mode := state.mode - mode &^= windows.ENABLE_ECHO_INPUT - mode |= windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT - err := windows.SetConsoleMode(windows.Handle(fd), mode) - if err != nil { - return err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return nil -} - -func setRawTerminal(fd uintptr) (*State, error) { - oldState, err := MakeRaw(fd) - if err != nil { - return nil, err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, oldState) - return oldState, err -} - -func setRawTerminalOutput(fd uintptr) (*State, error) { - oldState, err := saveState(fd) - if err != nil { - return nil, err - } - - // Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this - // version of Windows. - _ = windows.SetConsoleMode(windows.Handle(fd), oldState.mode|windows.DISABLE_NEWLINE_AUTO_RETURN) - return oldState, err -} - -func restoreAtInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - - go func() { - _ = <-sigchan - _ = RestoreTerminal(fd, state) - os.Exit(0) - }() -} diff --git a/vendor/github.com/moby/term/termios_bsd.go b/vendor/github.com/moby/term/termios_bsd.go deleted file mode 100644 index 45f77e03c..000000000 --- a/vendor/github.com/moby/term/termios_bsd.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build darwin || freebsd || openbsd || netbsd -// +build darwin freebsd openbsd netbsd - -package term - -import ( - "golang.org/x/sys/unix" -) - -const ( - getTermios = unix.TIOCGETA - setTermios = unix.TIOCSETA -) diff --git a/vendor/github.com/moby/term/termios_nonbsd.go b/vendor/github.com/moby/term/termios_nonbsd.go deleted file mode 100644 index 88b7b2156..000000000 --- a/vendor/github.com/moby/term/termios_nonbsd.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build !darwin && !freebsd && !netbsd && !openbsd && !windows -// +build !darwin,!freebsd,!netbsd,!openbsd,!windows - -package term - -import ( - "golang.org/x/sys/unix" -) - -const ( - getTermios = unix.TCGETS - setTermios = unix.TCSETS -) diff --git a/vendor/github.com/moby/term/termios_unix.go b/vendor/github.com/moby/term/termios_unix.go deleted file mode 100644 index 60c823783..000000000 --- a/vendor/github.com/moby/term/termios_unix.go +++ /dev/null @@ -1,35 +0,0 @@ -//go:build !windows -// +build !windows - -package term - -import ( - "golang.org/x/sys/unix" -) - -// Termios is the Unix API for terminal I/O. -// -// Deprecated: use [unix.Termios]. -type Termios = unix.Termios - -func makeRaw(fd uintptr) (*State, error) { - termios, err := tcget(fd) - if err != nil { - return nil, err - } - - oldState := State{termios: *termios} - - termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON - termios.Oflag &^= unix.OPOST - termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN - termios.Cflag &^= unix.CSIZE | unix.PARENB - termios.Cflag |= unix.CS8 - termios.Cc[unix.VMIN] = 1 - termios.Cc[unix.VTIME] = 0 - - if err := tcset(fd, termios); err != nil { - return nil, err - } - return &oldState, nil -} diff --git a/vendor/github.com/moby/term/termios_windows.go b/vendor/github.com/moby/term/termios_windows.go deleted file mode 100644 index 5be4e7601..000000000 --- a/vendor/github.com/moby/term/termios_windows.go +++ /dev/null @@ -1,37 +0,0 @@ -package term - -import "golang.org/x/sys/windows" - -func makeRaw(fd uintptr) (*State, error) { - state, err := SaveState(fd) - if err != nil { - return nil, err - } - - mode := state.mode - - // See - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx - - // Disable these modes - mode &^= windows.ENABLE_ECHO_INPUT - mode &^= windows.ENABLE_LINE_INPUT - mode &^= windows.ENABLE_MOUSE_INPUT - mode &^= windows.ENABLE_WINDOW_INPUT - mode &^= windows.ENABLE_PROCESSED_INPUT - - // Enable these modes - mode |= windows.ENABLE_EXTENDED_FLAGS - mode |= windows.ENABLE_INSERT_MODE - mode |= windows.ENABLE_QUICK_EDIT_MODE - if vtInputSupported { - mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT - } - - err = windows.SetConsoleMode(windows.Handle(fd), mode) - if err != nil { - return nil, err - } - return state, nil -} diff --git a/vendor/github.com/moby/term/windows/ansi_reader.go b/vendor/github.com/moby/term/windows/ansi_reader.go deleted file mode 100644 index fb34c547a..000000000 --- a/vendor/github.com/moby/term/windows/ansi_reader.go +++ /dev/null @@ -1,252 +0,0 @@ -//go:build windows -// +build windows - -package windowsconsole - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "strings" - "unsafe" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -const ( - escapeSequence = ansiterm.KEY_ESC_CSI -) - -// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. -type ansiReader struct { - file *os.File - fd uintptr - buffer []byte - cbBuffer int - command []byte -} - -// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a -// Windows console input handle. -func NewAnsiReader(nFile int) io.ReadCloser { - file, fd := winterm.GetStdFile(nFile) - return &ansiReader{ - file: file, - fd: fd, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - buffer: make([]byte, 0), - } -} - -// Close closes the wrapped file. -func (ar *ansiReader) Close() (err error) { - return ar.file.Close() -} - -// Fd returns the file descriptor of the wrapped file. -func (ar *ansiReader) Fd() uintptr { - return ar.fd -} - -// Read reads up to len(p) bytes of translated input events into p. -func (ar *ansiReader) Read(p []byte) (int, error) { - if len(p) == 0 { - return 0, nil - } - - // Previously read bytes exist, read as much as we can and return - if len(ar.buffer) > 0 { - originalLength := len(ar.buffer) - copiedLength := copy(p, ar.buffer) - - if copiedLength == originalLength { - ar.buffer = make([]byte, 0, len(p)) - } else { - ar.buffer = ar.buffer[copiedLength:] - } - - return copiedLength, nil - } - - // Read and translate key events - events, err := readInputEvents(ar, len(p)) - if err != nil { - return 0, err - } else if len(events) == 0 { - return 0, nil - } - - keyBytes := translateKeyEvents(events, []byte(escapeSequence)) - - // Save excess bytes and right-size keyBytes - if len(keyBytes) > len(p) { - ar.buffer = keyBytes[len(p):] - keyBytes = keyBytes[:len(p)] - } else if len(keyBytes) == 0 { - return 0, nil - } - - copiedLength := copy(p, keyBytes) - if copiedLength != len(keyBytes) { - return 0, errors.New("unexpected copy length encountered") - } - - return copiedLength, nil -} - -// readInputEvents polls until at least one event is available. -func readInputEvents(ar *ansiReader, maxBytes int) ([]winterm.INPUT_RECORD, error) { - // Determine the maximum number of records to retrieve - // -- Cast around the type system to obtain the size of a single INPUT_RECORD. - // unsafe.Sizeof requires an expression vs. a type-reference; the casting - // tricks the type system into believing it has such an expression. - recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) - countRecords := maxBytes / recordSize - if countRecords > ansiterm.MAX_INPUT_EVENTS { - countRecords = ansiterm.MAX_INPUT_EVENTS - } else if countRecords == 0 { - countRecords = 1 - } - - // Wait for and read input events - events := make([]winterm.INPUT_RECORD, countRecords) - nEvents := uint32(0) - eventsExist, err := winterm.WaitForSingleObject(ar.fd, winterm.WAIT_INFINITE) - if err != nil { - return nil, err - } - - if eventsExist { - err = winterm.ReadConsoleInput(ar.fd, events, &nEvents) - if err != nil { - return nil, err - } - } - - // Return a slice restricted to the number of returned records - return events[:nEvents], nil -} - -// KeyEvent Translation Helpers - -var arrowKeyMapPrefix = map[uint16]string{ - winterm.VK_UP: "%s%sA", - winterm.VK_DOWN: "%s%sB", - winterm.VK_RIGHT: "%s%sC", - winterm.VK_LEFT: "%s%sD", -} - -var keyMapPrefix = map[uint16]string{ - winterm.VK_UP: "\x1B[%sA", - winterm.VK_DOWN: "\x1B[%sB", - winterm.VK_RIGHT: "\x1B[%sC", - winterm.VK_LEFT: "\x1B[%sD", - winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 - winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 - winterm.VK_INSERT: "\x1B[2%s~", - winterm.VK_DELETE: "\x1B[3%s~", - winterm.VK_PRIOR: "\x1B[5%s~", - winterm.VK_NEXT: "\x1B[6%s~", - winterm.VK_F1: "", - winterm.VK_F2: "", - winterm.VK_F3: "\x1B[13%s~", - winterm.VK_F4: "\x1B[14%s~", - winterm.VK_F5: "\x1B[15%s~", - winterm.VK_F6: "\x1B[17%s~", - winterm.VK_F7: "\x1B[18%s~", - winterm.VK_F8: "\x1B[19%s~", - winterm.VK_F9: "\x1B[20%s~", - winterm.VK_F10: "\x1B[21%s~", - winterm.VK_F11: "\x1B[23%s~", - winterm.VK_F12: "\x1B[24%s~", -} - -// translateKeyEvents converts the input events into the appropriate ANSI string. -func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { - var buffer bytes.Buffer - for _, event := range events { - if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { - buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) - } - } - - return buffer.Bytes() -} - -// keyToString maps the given input event record to the corresponding string. -func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { - if keyEvent.UnicodeChar == 0 { - return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) - } - - _, alt, control := getControlKeys(keyEvent.ControlKeyState) - if control { - // TODO(azlinux): Implement following control sequences - // -D Signals the end of input from the keyboard; also exits current shell. - // -H Deletes the first character to the left of the cursor. Also called the ERASE key. - // -Q Restarts printing after it has been stopped with -s. - // -S Suspends printing on the screen (does not stop the program). - // -U Deletes all characters on the current line. Also called the KILL key. - // -E Quits current command and creates a core - } - - // +Key generates ESC N Key - if !control && alt { - return ansiterm.KEY_ESC_N + strings.ToLower(string(rune(keyEvent.UnicodeChar))) - } - - return string(rune(keyEvent.UnicodeChar)) -} - -// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. -func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { - shift, alt, control := getControlKeys(controlState) - modifier := getControlKeysModifier(shift, alt, control) - - if format, ok := arrowKeyMapPrefix[key]; ok { - return fmt.Sprintf(format, escapeSequence, modifier) - } - - if format, ok := keyMapPrefix[key]; ok { - return fmt.Sprintf(format, modifier) - } - - return "" -} - -// getControlKeys extracts the shift, alt, and ctrl key states. -func getControlKeys(controlState uint32) (shift, alt, control bool) { - shift = 0 != (controlState & winterm.SHIFT_PRESSED) - alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) - control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) - return shift, alt, control -} - -// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. -func getControlKeysModifier(shift, alt, control bool) string { - if shift && alt && control { - return ansiterm.KEY_CONTROL_PARAM_8 - } - if alt && control { - return ansiterm.KEY_CONTROL_PARAM_7 - } - if shift && control { - return ansiterm.KEY_CONTROL_PARAM_6 - } - if control { - return ansiterm.KEY_CONTROL_PARAM_5 - } - if shift && alt { - return ansiterm.KEY_CONTROL_PARAM_4 - } - if alt { - return ansiterm.KEY_CONTROL_PARAM_3 - } - if shift { - return ansiterm.KEY_CONTROL_PARAM_2 - } - return "" -} diff --git a/vendor/github.com/moby/term/windows/ansi_writer.go b/vendor/github.com/moby/term/windows/ansi_writer.go deleted file mode 100644 index 4243307fd..000000000 --- a/vendor/github.com/moby/term/windows/ansi_writer.go +++ /dev/null @@ -1,57 +0,0 @@ -//go:build windows -// +build windows - -package windowsconsole - -import ( - "io" - "os" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. -type ansiWriter struct { - file *os.File - fd uintptr - infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO - command []byte - escapeSequence []byte - inAnsiSequence bool - parser *ansiterm.AnsiParser -} - -// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a -// Windows console output handle. -func NewAnsiWriter(nFile int) io.Writer { - file, fd := winterm.GetStdFile(nFile) - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil - } - - parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) - - return &ansiWriter{ - file: file, - fd: fd, - infoReset: info, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - escapeSequence: []byte(ansiterm.KEY_ESC_CSI), - parser: parser, - } -} - -func (aw *ansiWriter) Fd() uintptr { - return aw.fd -} - -// Write writes len(p) bytes from p to the underlying data stream. -func (aw *ansiWriter) Write(p []byte) (total int, err error) { - if len(p) == 0 { - return 0, nil - } - - return aw.parser.Parse(p) -} diff --git a/vendor/github.com/moby/term/windows/console.go b/vendor/github.com/moby/term/windows/console.go deleted file mode 100644 index 21e57bd52..000000000 --- a/vendor/github.com/moby/term/windows/console.go +++ /dev/null @@ -1,43 +0,0 @@ -//go:build windows -// +build windows - -package windowsconsole - -import ( - "os" - - "golang.org/x/sys/windows" -) - -// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. -func GetHandleInfo(in interface{}) (uintptr, bool) { - switch t := in.(type) { - case *ansiReader: - return t.Fd(), true - case *ansiWriter: - return t.Fd(), true - } - - var inFd uintptr - var isTerminal bool - - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminal = isConsole(inFd) - } - return inFd, isTerminal -} - -// IsConsole returns true if the given file descriptor is a Windows Console. -// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. -// -// Deprecated: use [windows.GetConsoleMode] or [golang.org/x/term.IsTerminal]. -func IsConsole(fd uintptr) bool { - return isConsole(fd) -} - -func isConsole(fd uintptr) bool { - var mode uint32 - err := windows.GetConsoleMode(windows.Handle(fd), &mode) - return err == nil -} diff --git a/vendor/github.com/moby/term/windows/doc.go b/vendor/github.com/moby/term/windows/doc.go deleted file mode 100644 index 54265fffa..000000000 --- a/vendor/github.com/moby/term/windows/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// These files implement ANSI-aware input and output streams for use by the Docker Windows client. -// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create -// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. - -package windowsconsole diff --git a/vendor/github.com/morikuni/aec/LICENSE b/vendor/github.com/morikuni/aec/LICENSE deleted file mode 100644 index 1c2640164..000000000 --- a/vendor/github.com/morikuni/aec/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Taihei Morikuni - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/morikuni/aec/README.md b/vendor/github.com/morikuni/aec/README.md deleted file mode 100644 index 3cbc4343e..000000000 --- a/vendor/github.com/morikuni/aec/README.md +++ /dev/null @@ -1,178 +0,0 @@ -# aec - -[![GoDoc](https://godoc.org/github.com/morikuni/aec?status.svg)](https://godoc.org/github.com/morikuni/aec) - -Go wrapper for ANSI escape code. - -## Install - -```bash -go get github.com/morikuni/aec -``` - -## Features - -ANSI escape codes depend on terminal environment. -Some of these features may not work. -Check supported Font-Style/Font-Color features with [checkansi](./checkansi). - -[Wikipedia](https://en.wikipedia.org/wiki/ANSI_escape_code) for more detail. - -### Cursor - -- `Up(n)` -- `Down(n)` -- `Right(n)` -- `Left(n)` -- `NextLine(n)` -- `PreviousLine(n)` -- `Column(col)` -- `Position(row, col)` -- `Save` -- `Restore` -- `Hide` -- `Show` -- `Report` - -### Erase - -- `EraseDisplay(mode)` -- `EraseLine(mode)` - -### Scroll - -- `ScrollUp(n)` -- `ScrollDown(n)` - -### Font Style - -- `Bold` -- `Faint` -- `Italic` -- `Underline` -- `BlinkSlow` -- `BlinkRapid` -- `Inverse` -- `Conceal` -- `CrossOut` -- `Frame` -- `Encircle` -- `Overline` - -### Font Color - -Foreground color. - -- `DefaultF` -- `BlackF` -- `RedF` -- `GreenF` -- `YellowF` -- `BlueF` -- `MagentaF` -- `CyanF` -- `WhiteF` -- `LightBlackF` -- `LightRedF` -- `LightGreenF` -- `LightYellowF` -- `LightBlueF` -- `LightMagentaF` -- `LightCyanF` -- `LightWhiteF` -- `Color3BitF(color)` -- `Color8BitF(color)` -- `FullColorF(r, g, b)` - -Background color. - -- `DefaultB` -- `BlackB` -- `RedB` -- `GreenB` -- `YellowB` -- `BlueB` -- `MagentaB` -- `CyanB` -- `WhiteB` -- `LightBlackB` -- `LightRedB` -- `LightGreenB` -- `LightYellowB` -- `LightBlueB` -- `LightMagentaB` -- `LightCyanB` -- `LightWhiteB` -- `Color3BitB(color)` -- `Color8BitB(color)` -- `FullColorB(r, g, b)` - -### Color Converter - -24bit RGB color to ANSI color. - -- `NewRGB3Bit(r, g, b)` -- `NewRGB8Bit(r, g, b)` - -### Builder - -To mix these features. - -```go -custom := aec.EmptyBuilder.Right(2).RGB8BitF(128, 255, 64).RedB().ANSI -custom.Apply("Hello World") -``` - -## Usage - -1. Create ANSI by `aec.XXX().With(aec.YYY())` or `aec.EmptyBuilder.XXX().YYY().ANSI` -2. Print ANSI by `fmt.Print(ansi, "some string", aec.Reset)` or `fmt.Print(ansi.Apply("some string"))` - -`aec.Reset` should be added when using font style or font color features. - -## Example - -Simple progressbar. - -![sample](./sample.gif) - -```go -package main - -import ( - "fmt" - "strings" - "time" - - "github.com/morikuni/aec" -) - -func main() { - const n = 20 - builder := aec.EmptyBuilder - - up2 := aec.Up(2) - col := aec.Column(n + 2) - bar := aec.Color8BitF(aec.NewRGB8Bit(64, 255, 64)) - label := builder.LightRedF().Underline().With(col).Right(1).ANSI - - // for up2 - fmt.Println() - fmt.Println() - - for i := 0; i <= n; i++ { - fmt.Print(up2) - fmt.Println(label.Apply(fmt.Sprint(i, "/", n))) - fmt.Print("[") - fmt.Print(bar.Apply(strings.Repeat("=", i))) - fmt.Println(col.Apply("]")) - time.Sleep(100 * time.Millisecond) - } -} -``` - -## License - -[MIT](./LICENSE) - - diff --git a/vendor/github.com/morikuni/aec/aec.go b/vendor/github.com/morikuni/aec/aec.go deleted file mode 100644 index 566be6eb1..000000000 --- a/vendor/github.com/morikuni/aec/aec.go +++ /dev/null @@ -1,137 +0,0 @@ -package aec - -import "fmt" - -// EraseMode is listed in a variable EraseModes. -type EraseMode uint - -var ( - // EraseModes is a list of EraseMode. - EraseModes struct { - // All erase all. - All EraseMode - - // Head erase to head. - Head EraseMode - - // Tail erase to tail. - Tail EraseMode - } - - // Save saves the cursor position. - Save ANSI - - // Restore restores the cursor position. - Restore ANSI - - // Hide hides the cursor. - Hide ANSI - - // Show shows the cursor. - Show ANSI - - // Report reports the cursor position. - Report ANSI -) - -// Up moves up the cursor. -func Up(n uint) ANSI { - if n == 0 { - return empty - } - return newAnsi(fmt.Sprintf(esc+"%dA", n)) -} - -// Down moves down the cursor. -func Down(n uint) ANSI { - if n == 0 { - return empty - } - return newAnsi(fmt.Sprintf(esc+"%dB", n)) -} - -// Right moves right the cursor. -func Right(n uint) ANSI { - if n == 0 { - return empty - } - return newAnsi(fmt.Sprintf(esc+"%dC", n)) -} - -// Left moves left the cursor. -func Left(n uint) ANSI { - if n == 0 { - return empty - } - return newAnsi(fmt.Sprintf(esc+"%dD", n)) -} - -// NextLine moves down the cursor to head of a line. -func NextLine(n uint) ANSI { - if n == 0 { - return empty - } - return newAnsi(fmt.Sprintf(esc+"%dE", n)) -} - -// PreviousLine moves up the cursor to head of a line. -func PreviousLine(n uint) ANSI { - if n == 0 { - return empty - } - return newAnsi(fmt.Sprintf(esc+"%dF", n)) -} - -// Column set the cursor position to a given column. -func Column(col uint) ANSI { - return newAnsi(fmt.Sprintf(esc+"%dG", col)) -} - -// Position set the cursor position to a given absolute position. -func Position(row, col uint) ANSI { - return newAnsi(fmt.Sprintf(esc+"%d;%dH", row, col)) -} - -// EraseDisplay erases display by given EraseMode. -func EraseDisplay(m EraseMode) ANSI { - return newAnsi(fmt.Sprintf(esc+"%dJ", m)) -} - -// EraseLine erases lines by given EraseMode. -func EraseLine(m EraseMode) ANSI { - return newAnsi(fmt.Sprintf(esc+"%dK", m)) -} - -// ScrollUp scrolls up the page. -func ScrollUp(n int) ANSI { - if n == 0 { - return empty - } - return newAnsi(fmt.Sprintf(esc+"%dS", n)) -} - -// ScrollDown scrolls down the page. -func ScrollDown(n int) ANSI { - if n == 0 { - return empty - } - return newAnsi(fmt.Sprintf(esc+"%dT", n)) -} - -func init() { - EraseModes = struct { - All EraseMode - Head EraseMode - Tail EraseMode - }{ - Tail: 0, - Head: 1, - All: 2, - } - - Save = newAnsi(esc + "s") - Restore = newAnsi(esc + "u") - Hide = newAnsi(esc + "?25l") - Show = newAnsi(esc + "?25h") - Report = newAnsi(esc + "6n") -} diff --git a/vendor/github.com/morikuni/aec/ansi.go b/vendor/github.com/morikuni/aec/ansi.go deleted file mode 100644 index e60722e6e..000000000 --- a/vendor/github.com/morikuni/aec/ansi.go +++ /dev/null @@ -1,59 +0,0 @@ -package aec - -import ( - "fmt" - "strings" -) - -const esc = "\x1b[" - -// Reset resets SGR effect. -const Reset string = "\x1b[0m" - -var empty = newAnsi("") - -// ANSI represents ANSI escape code. -type ANSI interface { - fmt.Stringer - - // With adapts given ANSIs. - With(...ANSI) ANSI - - // Apply wraps given string in ANSI. - Apply(string) string -} - -type ansiImpl string - -func newAnsi(s string) *ansiImpl { - r := ansiImpl(s) - return &r -} - -func (a *ansiImpl) With(ansi ...ANSI) ANSI { - return concat(append([]ANSI{a}, ansi...)) -} - -func (a *ansiImpl) Apply(s string) string { - return a.String() + s + Reset -} - -func (a *ansiImpl) String() string { - return string(*a) -} - -// Apply wraps given string in ANSIs. -func Apply(s string, ansi ...ANSI) string { - if len(ansi) == 0 { - return s - } - return concat(ansi).Apply(s) -} - -func concat(ansi []ANSI) ANSI { - strs := make([]string, 0, len(ansi)) - for _, p := range ansi { - strs = append(strs, p.String()) - } - return newAnsi(strings.Join(strs, "")) -} diff --git a/vendor/github.com/morikuni/aec/builder.go b/vendor/github.com/morikuni/aec/builder.go deleted file mode 100644 index 13bd002d4..000000000 --- a/vendor/github.com/morikuni/aec/builder.go +++ /dev/null @@ -1,388 +0,0 @@ -package aec - -// Builder is a lightweight syntax to construct customized ANSI. -type Builder struct { - ANSI ANSI -} - -// EmptyBuilder is an initialized Builder. -var EmptyBuilder *Builder - -// NewBuilder creates a Builder from existing ANSI. -func NewBuilder(a ...ANSI) *Builder { - return &Builder{concat(a)} -} - -// With is a syntax for With. -func (builder *Builder) With(a ...ANSI) *Builder { - return NewBuilder(builder.ANSI.With(a...)) -} - -// Up is a syntax for Up. -func (builder *Builder) Up(n uint) *Builder { - return builder.With(Up(n)) -} - -// Down is a syntax for Down. -func (builder *Builder) Down(n uint) *Builder { - return builder.With(Down(n)) -} - -// Right is a syntax for Right. -func (builder *Builder) Right(n uint) *Builder { - return builder.With(Right(n)) -} - -// Left is a syntax for Left. -func (builder *Builder) Left(n uint) *Builder { - return builder.With(Left(n)) -} - -// NextLine is a syntax for NextLine. -func (builder *Builder) NextLine(n uint) *Builder { - return builder.With(NextLine(n)) -} - -// PreviousLine is a syntax for PreviousLine. -func (builder *Builder) PreviousLine(n uint) *Builder { - return builder.With(PreviousLine(n)) -} - -// Column is a syntax for Column. -func (builder *Builder) Column(col uint) *Builder { - return builder.With(Column(col)) -} - -// Position is a syntax for Position. -func (builder *Builder) Position(row, col uint) *Builder { - return builder.With(Position(row, col)) -} - -// EraseDisplay is a syntax for EraseDisplay. -func (builder *Builder) EraseDisplay(m EraseMode) *Builder { - return builder.With(EraseDisplay(m)) -} - -// EraseLine is a syntax for EraseLine. -func (builder *Builder) EraseLine(m EraseMode) *Builder { - return builder.With(EraseLine(m)) -} - -// ScrollUp is a syntax for ScrollUp. -func (builder *Builder) ScrollUp(n int) *Builder { - return builder.With(ScrollUp(n)) -} - -// ScrollDown is a syntax for ScrollDown. -func (builder *Builder) ScrollDown(n int) *Builder { - return builder.With(ScrollDown(n)) -} - -// Save is a syntax for Save. -func (builder *Builder) Save() *Builder { - return builder.With(Save) -} - -// Restore is a syntax for Restore. -func (builder *Builder) Restore() *Builder { - return builder.With(Restore) -} - -// Hide is a syntax for Hide. -func (builder *Builder) Hide() *Builder { - return builder.With(Hide) -} - -// Show is a syntax for Show. -func (builder *Builder) Show() *Builder { - return builder.With(Show) -} - -// Report is a syntax for Report. -func (builder *Builder) Report() *Builder { - return builder.With(Report) -} - -// Bold is a syntax for Bold. -func (builder *Builder) Bold() *Builder { - return builder.With(Bold) -} - -// Faint is a syntax for Faint. -func (builder *Builder) Faint() *Builder { - return builder.With(Faint) -} - -// Italic is a syntax for Italic. -func (builder *Builder) Italic() *Builder { - return builder.With(Italic) -} - -// Underline is a syntax for Underline. -func (builder *Builder) Underline() *Builder { - return builder.With(Underline) -} - -// BlinkSlow is a syntax for BlinkSlow. -func (builder *Builder) BlinkSlow() *Builder { - return builder.With(BlinkSlow) -} - -// BlinkRapid is a syntax for BlinkRapid. -func (builder *Builder) BlinkRapid() *Builder { - return builder.With(BlinkRapid) -} - -// Inverse is a syntax for Inverse. -func (builder *Builder) Inverse() *Builder { - return builder.With(Inverse) -} - -// Conceal is a syntax for Conceal. -func (builder *Builder) Conceal() *Builder { - return builder.With(Conceal) -} - -// CrossOut is a syntax for CrossOut. -func (builder *Builder) CrossOut() *Builder { - return builder.With(CrossOut) -} - -// BlackF is a syntax for BlackF. -func (builder *Builder) BlackF() *Builder { - return builder.With(BlackF) -} - -// RedF is a syntax for RedF. -func (builder *Builder) RedF() *Builder { - return builder.With(RedF) -} - -// GreenF is a syntax for GreenF. -func (builder *Builder) GreenF() *Builder { - return builder.With(GreenF) -} - -// YellowF is a syntax for YellowF. -func (builder *Builder) YellowF() *Builder { - return builder.With(YellowF) -} - -// BlueF is a syntax for BlueF. -func (builder *Builder) BlueF() *Builder { - return builder.With(BlueF) -} - -// MagentaF is a syntax for MagentaF. -func (builder *Builder) MagentaF() *Builder { - return builder.With(MagentaF) -} - -// CyanF is a syntax for CyanF. -func (builder *Builder) CyanF() *Builder { - return builder.With(CyanF) -} - -// WhiteF is a syntax for WhiteF. -func (builder *Builder) WhiteF() *Builder { - return builder.With(WhiteF) -} - -// DefaultF is a syntax for DefaultF. -func (builder *Builder) DefaultF() *Builder { - return builder.With(DefaultF) -} - -// BlackB is a syntax for BlackB. -func (builder *Builder) BlackB() *Builder { - return builder.With(BlackB) -} - -// RedB is a syntax for RedB. -func (builder *Builder) RedB() *Builder { - return builder.With(RedB) -} - -// GreenB is a syntax for GreenB. -func (builder *Builder) GreenB() *Builder { - return builder.With(GreenB) -} - -// YellowB is a syntax for YellowB. -func (builder *Builder) YellowB() *Builder { - return builder.With(YellowB) -} - -// BlueB is a syntax for BlueB. -func (builder *Builder) BlueB() *Builder { - return builder.With(BlueB) -} - -// MagentaB is a syntax for MagentaB. -func (builder *Builder) MagentaB() *Builder { - return builder.With(MagentaB) -} - -// CyanB is a syntax for CyanB. -func (builder *Builder) CyanB() *Builder { - return builder.With(CyanB) -} - -// WhiteB is a syntax for WhiteB. -func (builder *Builder) WhiteB() *Builder { - return builder.With(WhiteB) -} - -// DefaultB is a syntax for DefaultB. -func (builder *Builder) DefaultB() *Builder { - return builder.With(DefaultB) -} - -// Frame is a syntax for Frame. -func (builder *Builder) Frame() *Builder { - return builder.With(Frame) -} - -// Encircle is a syntax for Encircle. -func (builder *Builder) Encircle() *Builder { - return builder.With(Encircle) -} - -// Overline is a syntax for Overline. -func (builder *Builder) Overline() *Builder { - return builder.With(Overline) -} - -// LightBlackF is a syntax for LightBlueF. -func (builder *Builder) LightBlackF() *Builder { - return builder.With(LightBlackF) -} - -// LightRedF is a syntax for LightRedF. -func (builder *Builder) LightRedF() *Builder { - return builder.With(LightRedF) -} - -// LightGreenF is a syntax for LightGreenF. -func (builder *Builder) LightGreenF() *Builder { - return builder.With(LightGreenF) -} - -// LightYellowF is a syntax for LightYellowF. -func (builder *Builder) LightYellowF() *Builder { - return builder.With(LightYellowF) -} - -// LightBlueF is a syntax for LightBlueF. -func (builder *Builder) LightBlueF() *Builder { - return builder.With(LightBlueF) -} - -// LightMagentaF is a syntax for LightMagentaF. -func (builder *Builder) LightMagentaF() *Builder { - return builder.With(LightMagentaF) -} - -// LightCyanF is a syntax for LightCyanF. -func (builder *Builder) LightCyanF() *Builder { - return builder.With(LightCyanF) -} - -// LightWhiteF is a syntax for LightWhiteF. -func (builder *Builder) LightWhiteF() *Builder { - return builder.With(LightWhiteF) -} - -// LightBlackB is a syntax for LightBlackB. -func (builder *Builder) LightBlackB() *Builder { - return builder.With(LightBlackB) -} - -// LightRedB is a syntax for LightRedB. -func (builder *Builder) LightRedB() *Builder { - return builder.With(LightRedB) -} - -// LightGreenB is a syntax for LightGreenB. -func (builder *Builder) LightGreenB() *Builder { - return builder.With(LightGreenB) -} - -// LightYellowB is a syntax for LightYellowB. -func (builder *Builder) LightYellowB() *Builder { - return builder.With(LightYellowB) -} - -// LightBlueB is a syntax for LightBlueB. -func (builder *Builder) LightBlueB() *Builder { - return builder.With(LightBlueB) -} - -// LightMagentaB is a syntax for LightMagentaB. -func (builder *Builder) LightMagentaB() *Builder { - return builder.With(LightMagentaB) -} - -// LightCyanB is a syntax for LightCyanB. -func (builder *Builder) LightCyanB() *Builder { - return builder.With(LightCyanB) -} - -// LightWhiteB is a syntax for LightWhiteB. -func (builder *Builder) LightWhiteB() *Builder { - return builder.With(LightWhiteB) -} - -// Color3BitF is a syntax for Color3BitF. -func (builder *Builder) Color3BitF(c RGB3Bit) *Builder { - return builder.With(Color3BitF(c)) -} - -// Color3BitB is a syntax for Color3BitB. -func (builder *Builder) Color3BitB(c RGB3Bit) *Builder { - return builder.With(Color3BitB(c)) -} - -// Color8BitF is a syntax for Color8BitF. -func (builder *Builder) Color8BitF(c RGB8Bit) *Builder { - return builder.With(Color8BitF(c)) -} - -// Color8BitB is a syntax for Color8BitB. -func (builder *Builder) Color8BitB(c RGB8Bit) *Builder { - return builder.With(Color8BitB(c)) -} - -// FullColorF is a syntax for FullColorF. -func (builder *Builder) FullColorF(r, g, b uint8) *Builder { - return builder.With(FullColorF(r, g, b)) -} - -// FullColorB is a syntax for FullColorB. -func (builder *Builder) FullColorB(r, g, b uint8) *Builder { - return builder.With(FullColorB(r, g, b)) -} - -// RGB3BitF is a syntax for Color3BitF with NewRGB3Bit. -func (builder *Builder) RGB3BitF(r, g, b uint8) *Builder { - return builder.Color3BitF(NewRGB3Bit(r, g, b)) -} - -// RGB3BitB is a syntax for Color3BitB with NewRGB3Bit. -func (builder *Builder) RGB3BitB(r, g, b uint8) *Builder { - return builder.Color3BitB(NewRGB3Bit(r, g, b)) -} - -// RGB8BitF is a syntax for Color8BitF with NewRGB8Bit. -func (builder *Builder) RGB8BitF(r, g, b uint8) *Builder { - return builder.Color8BitF(NewRGB8Bit(r, g, b)) -} - -// RGB8BitB is a syntax for Color8BitB with NewRGB8Bit. -func (builder *Builder) RGB8BitB(r, g, b uint8) *Builder { - return builder.Color8BitB(NewRGB8Bit(r, g, b)) -} - -func init() { - EmptyBuilder = &Builder{empty} -} diff --git a/vendor/github.com/morikuni/aec/sample.gif b/vendor/github.com/morikuni/aec/sample.gif deleted file mode 100644 index c6c613bb70645efa4e184726060ea1ff981eeceb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12548 zcmeHtXHe7ox^?KiNed7{8X;7XCP*=%29P2G0wRPS6cCY(5D}@N_aaD@UX-rVRGLyn ziUk#vP5`li6zlsB?&3b@?0xn-bLW1#I^&ms!?1qqS><^Q4E0qHIxs9l8jiG4D5=G zkc%wJQx+903onrs@|6>>krR&H55KY>eoh`;asZm7fND|@8CDR!sE7_%MAs^cwkwL% zD8UkxCDWAAxhk0ZDx%L+gi8*hnhqkK92Ch^mCRSgl&MNwQ$;^kh1^y{4XcSwtBI7U zOIE2%HL9a~)WsLnMc33J&6<)AG*RQ4$Z0M3oEDOz1$(b0YH(Pw?XYC`VaYpKsV=P4 z7FJXVhd0D24dNsp;UpHd#UJTNZt5VXbS0+rBajp4Phz6_~W&Tz}DWZtdlR_FHdX zbw;p>nRc|ipYKnXb{y$w-B=ziF)lQ{-S%;9vg!1zk=yOtZ)bYq9mUM%UMfA9n94_HLw{y+a(h(q-H)YoF^N>j>XMNzW=%^r(B zo(sGdWe;Xm(&m&J4NpP=hLMdqme=}W3IoesvgnH&z-y5>+3elk1COcaV|B0R4H$9Y zV*2>z@q}O<@LD9v%r8`##56I&r^e>X2z1>hQ07gn4Oe)0x)57)f?6Z+T3DWpO+>(w z>MQt9F%RMD^U($GTVz$+w>n>xyGT1aY;RqzrjI&k+M=7B6Ttxi%Q#QpELdhJQ~WMw zrd9O}%uo?|yP&=kswoRaNCawq{q$jH<8$z(&I>QTF$-N~5~0(e?+vAYzwBrOT3w!r zIIv#MgcGuVLgREB?jH?3m{G>GkIoQ(H43Z^ycVgIdj(kkQGrfjQfKIB7>WB`iZeTV zDFP?^gs~(6!zhWus|6oSv9)GywvA@u-yUUr%Yidbb%azIq|wNzEo9j{_eP}B_#TW- zr16?9#`Q1WA7vD=qshlQS__p!U07Yq&z@YW^3YK)59q?W-}e|55j$k54LvS^H!pPh z>M0R?L0_s0XR*@INk(%=>L+B7Gm$#t_D_0q>8waq&>YTY$&~b)Z5}jE4d2Dfyh!xc zqvg6veBEv^D18mIao_Qpn%-4O0~-fstT}^Is@kX{Nw;&9USun7v>{JYox4d{U~N5X zYUTY3c1q1@)ZJD|mncx`v+Rjm43R#Q!eOs+67*;_)hVXq0E*zp`ncvfcK40?+b&LW zuF##nVk`4s{20GLStcc=Qo3nPM&-L9hw*yXoa~pPnrK@qnC`Jz-I{b}<=Z?^4?m~x z*QbxFck?DmxZQ}6BRz~$@PFN3$guW!E!ZHi<@ zMhcql2!%1dB+rs1iuusFx^h7aQ7BGTc)D4^zJY zH=l4Jr6WOWbx-BYUssFVYg@lLuciL>#^*bq(9Lv*+upVDp6OWY7rgQLL)JmFM;oI| za$n6K9sj=lQQNKV;;Sj9#jo2JqPZUJEG&C}Gow^o+Ww?<<88?FyKU6>FXx^-9Zx!hv)_14rhhu5DCU4NsxrVV%*@?Lx>=@Sb;o*5Cyd;CimDp(W!r0-= z{HeGa4(&)phc3(U{%(P`cTw^QPHYUg9^vQj$l)nhIoZN{pig|GvFxE-DHp-LqMQxU zdXHv#&SvWh$-O6=B+T-9gZ03s@1w2!XZby|^<;e-;uXIjz{gweL6RCG-87KG_bT-n z%Nr7XT#=B^+xOMRk0u59JBn^p8fbiYf8oLxBWTrb%qQ{ji!nMIg+vEJKl+S9s284(QkX=Nod_q1+A_ULu0)}7Y7rh6OE zx-99*MnLOiNdZ)?GrXBd)wr#t6AL46eeUF%4*DhZ9J z9~ShtozIU{i47IouQjo~A^$?v?%}sF^af>qXmi8i&ezMEv#(y6DIEU(9kI(a6DrCA zK8ngm0b0jmp*?evQJarN0?;~mRGJ3SF;X}H!xIf9Pjbf~vSPSn(dDY#acnf>{qgem zcbs8T7JbFq%nFq*;>xDyU9`zZ5DO`~tU>tX@vkxF*grJyFVR8(Xqo;4v=}id*gu?! zVFE*B?^uste|Y+D&LmRkb$wn>T3K}No-^5Wr^}}bLD4a;@ZvpZ@{urtW)PVfQ!K5f zI+HZ7nN9+arh6~dnaJuwWTpXUa$JU6(`yw#OArIr#D8Li9@!~j%VK8*oJrs;S<_y` zxS%7jM^w|A)f{jpKCv3vO#V<4J1-V3(iNiIgEr+y+?90g1tu?+B8M7Juf06^A+bQF zCWuWeX^eTOqBmVSdJ?1%8^NKwzfkL<;ls73{N}F;g4wp-K5vb`^Jixg|7yPPqFG_> zmz|IQ!I{K8Vnnj5^b-&)`pmrWTuY);R1UrpZs@H#HyfSol{QO8U0H~Z5ieIAk408f ziuGmgV37I;Mu_ve%KChU5*k|+WOBw-AR&ocuX`XBj}9tDX$#|B)6Ef^t{Ik`N^Y5! z4zc1Hc5?K1B!Mi6&JOe1b-c{FzB7L%GkZns3X71!K%V~-wPm=^7L$bb&&EdqfK=-r zK>CN-+|0f@2+<_n_Xq!GHuyb)Svx&Sj+U4ez-T|0R&Z60JLHY<@Y10#( z(aCa{W$u#z0iUwq?()?GX7gB^gho8{_OZBvyQRl!pq{#1sgLVj&d{H;1I#9CJORT@ z>qeaF467MEcE1NJcpmuCg*#fkAS!PouewZxZ>%*OjBH&YDQvy1=6&69eOB~mv&qmp zbeHAsx2L;1>;G;xKRo>%(jA`PA)VF#3DST)NGr#vkRtQ|NK149NL_S(h7|lKNNK5% zvI3Brm>Mo7T05w@WKqd`nLyrkgW0Y2m1(Pgt8^E4dw$-XO5Wu>|G^-9?#J7)|A@TW zASRF;&4K?iYWb`vV>+Ne$U8s|PzliZ73ntD#?+GGAF)C~*Cbbi8Y}#XqMF8*0@PS> zELrKyJM&Leq^&QKSmgOJPXO}1)F6dQTQvZq_G7kxbZ4#fQ-HjJSD%FWuB}c>Z)4~> zJa$>3Uz%mRIwE`+bQ7dfUQ~baFI^(6nE>UYoC@(yu*V- zs5xNN9t)Ojzv&&-d}i%s{)Jm-p54Cm=UDM}Wc1*nLI>qT{;NO7iqt*wE_j2h9P6~k zcDG?H-dD^ng`jtK_TN0)d-r>Z$@Zc|kwFMZbtX)J9}Ej;*DfVQa39(vMDn4PoODzD zi-A~i3U3(o;w1A?R9*&P6vd5bivtj~%+B$07R)XQ+#`5ogbJ09d*lt9qSz;~x=qfe zW->p6q*D2qZmz$(05?+TL;Z=5763}n|AtbW#U^p8CAI(FAK}kVwR$t~*YJmG)i;Z0 zP(3Gym;Vv|$iXGi5u?=bXL727GENPDwIS)?!;*(2XNbHzy-=WazjyZ(+ zMc2JwE=g2+rOL#(xmLJ;xbpo~&=xiPHT)Q{RaVu@F?YU>e|$InZ{aWeF(Z;*08c>B zpj81DDp3d#IT*~Wj<>4wY*ez>zS&5K{z5caluQ|o7KtMwv{40>#W=}YiZe)-I#`vS z5Rr))9Ylu&>R?UAGVYe34rTSZq{5-GsAwZONvCvEH1nc%sFL$yrj4BtK4ZqDYBb>> z*_P$g%6x10Qx19fF-0DSf%As0*&-R{U{i2$fra;)Q zB-Hi-F_jghV~+O!$o6a#lpLfsfll6m8MH^jBsKFxFdI>x42MxD<>|L?2G+CW>0GX=472}$wC?^CsDP76MjH4FauEC~M zCuJY7B7Ql|p2acCIRx*XBU%uN&i=CCv6 zYK$-mUo^}S_wyX~1Cag(`iIWeAX#lQ24a}@pzlYElLq5Xlm8g(x!2+x6tSNi*a&A& zmGz~zIK=x11QU3##W_SGsRAv|su4dz$Odq>(t5I_yg;DEk?Eoh-b|1my6J6IO3G-< zH$IQCtomhR^|q8n*UP)1YNliJ;y{bTw@rx+YiV$af`0@D9%=OhDLKiW*0-mjEIkZ$ zT*ZOf{9hi-F{$x+VQV(}xPG8pllxCCjY|siKb-9s_5P!sv`DE>ORb4 zqh<3w5KE|_7Hta6CCj?Fa`_A(e0(f7WMFtqSvgAeKKv$r7>t1#TRk;=fG37R!{A4R zZH#aEJZ>R1@;&JEkDk68nmi|8wQM*5595^%X^pIh?q?G)E_i*G0uiw~GSYwrW^FV9 z%FEpnwvyLc2za0D2uv5_k=SbmUt6!e3cvo5uk68v1BK;`IQw-IjdyP=8KTy@>^jt@ z8y#8sT%9dKH`Y9v*1GoD63jGjHN^LC8%>vH=b&>F9N+RAK>E&&G!{C3>`o98qx|lD z@VPGZ0+^XFKK{xgL*{=i+9nAi3H}tP=vX2i4$sXXI>+Y3s5*%xHB;u0xuul|a!9W# zGEwCv(J4`36%S9=vSuy?gw%N<6)$XPn9|GvkTGwMj7HrHUZxJage*Iiez(g^AuR5p z&I*8#63eU5SHcojY;)N7a~AS4Tb>E!u>wL`5WLm83=gm%d;CdA(*ST^{yn%ej7YaPB-oQ{i%6 z?(^}R-u;@XQ!hQuJ)^LB{w18&gqFmQm;4;nJhG+mEvO zV%KiLHzwM5^99_8>wSN3QD0T=eBN1GJ05fQ@W9u9ZBZM6z{0K1?1bQWV$hEUyT>@8 z`NyAIIpNa-3@}Ug_-CQ*;-sPv%u-zePgH8! z7raf1QFotf`WsD_#UKmQm6Mt5z%0$O4`_9RS&u|5YO8)Ax{EsvDwhMZbZ#lvw=D>N z1Bi=8@$Wn>qKjVOH;`ds0-fNaQ3Xf_?j-*fh0?vMoG&{p9*SVo z=^QIr9$|?gZ0a7c!QvqWB(e;?GQHwq4mFV4`D*i*s`9pBttcw`E7XDS>r-ZtMeY=I2XRW?uo!|+6$Y_-vjX??a+1lR>g{uJGTwin0v&C zN&Z^eNYW&GAT#PPZg=y&YvBcY0WeQ6V?MR=eD@e$>5X1fQ!M_3^wt%r63Nq?7#YTtC&V{MwJ?dT7+o6ija;!0|pOs$%dxN-md_@nXHh zg5w3qv6}&Q9m?Y#BdoLYxP0_>GDq{7yPIhCiF%sQwaa(zaawVoJuUYL8;F7&#B`+L zWHP#8`#Vl(Thmv##tTnoC=^d@jV79-bc5b5Q-yO`ynu)8FDV-_es z4>!gx$ghirX zqE6#O8oQX)`w?}ni<>QGW}53y9JwxRb<&RaZ_h-3zX*HY)2mtCL01Y@p~HN@1~WLsd0W zw_6NaIlBt8rUmlns4=t{A|o$Ayc+b5I9`T}2SoSrj6gYSo&I&8@iV>7(M1w9Rwwr{_QZQck(H&=D+e#q&4>oZ?@`?Eprcn#AViEqK5w%&fO-S2q# z%(rhJ8HK2tla=gcm4(?dmC6+?~fhi z0cUC*i;;n$k_ysw1pN1U*(BcTJoX&S>^wI;GSd`cedib|pI&ky1!vLi zVyyKE(9t6*E2#7|$bdzf#VG@~%!!k+Wqx)(lJ=Kn03FSC%~ErRIh>+jGQ#d#aToV= zZ+15TGRbm&;PW7S9{9n)(Vw{YmvD~*aM%6~+`q1z_q)#&N5^!tTP;z>6w7t)Q^QK6 z>ea#iB$jg>d9ki#04eut<*@L=g*6Qz^u&7Cg`_;7bx_7Iu_Z9rkq}?Zx%>NRjGIn9 zXw|FkN#(g2Cz{((V-OR(87V$=b6uy2`(y%Y=4S15gs-T3J>6!l|bz8u* zN9>6+Hy?-~yrKy1+fcdp=R*--)^PB^m6i?iT`P*@yi-xUI%$}2aYS?W3}pY?$op3} z#av`9GC&B)_a@Lw;L_z}k zoe)Bt0Or(KiiU4JAtPmO0+?gU`}4)*2UT4XRSp@rB&c!Ta!Ky}nhjvi=edxoXF*%4 zt33x`ZmusWo{=UabTr+%jqH|H;XP31;SeHen=J`oo71W2p92b|0?=R7ejJLvV>%00Sv5(_*k7*+BrLn|@2`p`2;-!IKho9cG7xeie*kHuz|OO=Gy;Si&MAby6VU&eOXVB!OHAajo_;w4zX5di zkUBZksyOIr0%4-s3S&JxLFe9Ho3DOLw!2cDzChXNR^9P-#sSW$wOj9Al`|G9fMuT6 z&zFR<%!%W78rj`u=23^ICG{!QcYXHpP{vE%0J} z8?{?+$?aTQ-%KD*z7aj~(vER~P9GKrlLdo0Z?}a0R{NdrtLcf)6{}lT5Bxys%l*?e z!InzGhqZ@={aQZSx%3!9XwJ^Iok$X}VL|5d5n*RTly+c|`%Eb(B0241M99KR_njTtPhefJN|+>WJU8 zviQK&tOU$KG61Cbkc1>}?bw8K*n?cQeJwzB?QH)@V2+3*5Q{l}VTO^iJV$l9+JA=db5wOxd;!kv4QS8HFDoo5Sg`B4$d zk#>dxJqY0(S+6lRu!i*e*~hiqW9Ey)hKtmF95A!aANe-%8{;N-!j!P?XwBI~d z$-)Faa<<``_lp`{f1DF{0#xyCdYy`CI_!vMZ#rF{cYWv4mg7ZJaG9^SUW4>+y42^< zm0a+n;n*t+LwA2Y3klKA0M0@TJYT&As-xAhSARMS**Pkbpo?*ej8&Zk!*g9Loya*h z{Wc;=kr*JJ0=bnKk`aiAld8?OjYGFhf)f;+^^sOU&pV%_v5JpPIHKALQBI4 z3i;22Rwo*${Q`fz?Ez>Rc{Np?r0xDblMDsv1FlbYx%_3mh4&p;u z*KH#;HdQ^1&%b^VVX+Bg=|m3atKt%#hz5VC?NTNs6^-pq(9ZS=atH=*9OKI3VzcSp zZ53$rAg8fdJei0f(5aUmuCWG_V2d)0^H*$5UJy=)Wf%({sp-S6zoPMBdOFctL{4E1 zeJbFin=CjF7s6D?PU+!a(O+NVk-vE39Vl>9VC2io{5RVO_<44|@Q#jCAMAl5u+f%_ zWBYA8!oK*NDbtslvwh|?GQ;n1G@##f)g?g>a6bu}F?`h9p6IBbZ+j@VA_=9H&T+BX zlLGMEUW`jZPD{VFND`1rk-3kENT-m^9b}3&r|t0+E#YSZX-4DsN^JlpFJ@R88OFw# zodO{K(f<)_UDI9N49jiY3v#TkoI>A2TZAg@ty_d*vJKt&-7f^_)XWSA*VgA(mkea)AWycwY#2v*f5OUJSUN>8Y~HHb1Rk*_GP$xDMW=rLjtX z%{JCqL|~@Gqxab@&Itv}>S)QSc9uoCJUjk6#|RRy-{X>*y4kQW9-tarte@{?0;<8; z9r=ZDk&oa?uN?)P5^2!y+nOyQs-VQsz@*ZGEBsN2F;gI~aKP)wOlE8EJwN++wlR_B z`C-4_b=?|l1e`A^%Q(_hv~BZ`yRN|(@&VLF5b+Mn;%)aZ(4PJJW%Yoqcu; z^rD@48c|v!5kJaqa(+hC-h96{1XJh_(QawL_Ap(bA7V5H%!Uj@TZ!~tIV zl0G(?Fp(=G4QyAg6T{RrSVbp`Rwkb~Ix=CUTL`aY67t zqYJA=)-TgC}e>hXU zhfQ(#9nGa5Cw3zq6VwyCA}9-b6R6>}!9Sna@!#m`k3l76Gshr#8VBNJYO`755J22a zP||7~h?gH(=u1>v?RQB&upWa*fkNH_TUU;iR#ar-!)TKKFi9@Yae%Vf2EU%}>z&CH)it%uW9T z=6K%-Ix=upDf6QT`q2X>#=>;=<(Ws#--~2;>6_Pd)GUhz;i4VvoQBkoJaH(5J0>Ez z0Im>${t;j#xX3g8)Iw0ZWbVi*^Ir==_zmfPzi^LaJ@KBLY|<;Y&gM1rnZ5yE8ta)o-#<<3w9mD;?+DF2 zES0URSfg`al4y|umsWkG;Q?p6@%qs_n%8h_zNgZj8A3~6M^T%>(K~yQrjPHz-Mg2@ zcE3u@gq~gT`2DOB3=G#F`q=_(SbpedrTOprc~$pU{bW80=w}Oef`WqXyytKFX?Xl# zfWXiC$-*$OfHT#yHP5i%)Gf3BNk8?o_Vjbf>ks{`_<6W$10ZVsD~JpN_>}sYZU98Y zUJ8xZ5kRriEYIch?$gm8CM?h8#S3ae{KNJwa;D0&sbHQhgkibFKf*BJxw#>b+71QB z(}Wp7E4-%&S=1*LKq&`R$sKb&;heeHSJ6sPCX&t=kao zSPyvys>#$=JS=jn+73mhY>&#u5u?)}p9I{hqPwtJz6|3*!h}R9IL^tZWyqhvSx5KS zj;B)ZVd60S;MAB4!)q+Gh>n=AgPjo=tb}Lv_mHr-ej3thtNgow_$dx!X79NxMRD-U~8KZm@LAKgE$Ll1{C#o(5BI7i+Vv2*c7*|S?05=O%_UI~9s<-^y0f%V4+@&lKGB*Zz!stBk& zdMQd#n9+E}6+hMmkK*{jXO!Gz2PQl+!}^qDSr}xs{8GT`il_LMZel;GH0zX9Rdi&W Y(?6j)4M_L*quqaULH)mVw5$IA0FB)@mjD0& diff --git a/vendor/github.com/morikuni/aec/sgr.go b/vendor/github.com/morikuni/aec/sgr.go deleted file mode 100644 index 0ba3464e6..000000000 --- a/vendor/github.com/morikuni/aec/sgr.go +++ /dev/null @@ -1,202 +0,0 @@ -package aec - -import ( - "fmt" -) - -// RGB3Bit is a 3bit RGB color. -type RGB3Bit uint8 - -// RGB8Bit is a 8bit RGB color. -type RGB8Bit uint8 - -func newSGR(n uint) ANSI { - return newAnsi(fmt.Sprintf(esc+"%dm", n)) -} - -// NewRGB3Bit create a RGB3Bit from given RGB. -func NewRGB3Bit(r, g, b uint8) RGB3Bit { - return RGB3Bit((r >> 7) | ((g >> 6) & 0x2) | ((b >> 5) & 0x4)) -} - -// NewRGB8Bit create a RGB8Bit from given RGB. -func NewRGB8Bit(r, g, b uint8) RGB8Bit { - return RGB8Bit(16 + 36*(r/43) + 6*(g/43) + b/43) -} - -// Color3BitF set the foreground color of text. -func Color3BitF(c RGB3Bit) ANSI { - return newAnsi(fmt.Sprintf(esc+"%dm", c+30)) -} - -// Color3BitB set the background color of text. -func Color3BitB(c RGB3Bit) ANSI { - return newAnsi(fmt.Sprintf(esc+"%dm", c+40)) -} - -// Color8BitF set the foreground color of text. -func Color8BitF(c RGB8Bit) ANSI { - return newAnsi(fmt.Sprintf(esc+"38;5;%dm", c)) -} - -// Color8BitB set the background color of text. -func Color8BitB(c RGB8Bit) ANSI { - return newAnsi(fmt.Sprintf(esc+"48;5;%dm", c)) -} - -// FullColorF set the foreground color of text. -func FullColorF(r, g, b uint8) ANSI { - return newAnsi(fmt.Sprintf(esc+"38;2;%d;%d;%dm", r, g, b)) -} - -// FullColorB set the foreground color of text. -func FullColorB(r, g, b uint8) ANSI { - return newAnsi(fmt.Sprintf(esc+"48;2;%d;%d;%dm", r, g, b)) -} - -// Style -var ( - // Bold set the text style to bold or increased intensity. - Bold ANSI - - // Faint set the text style to faint. - Faint ANSI - - // Italic set the text style to italic. - Italic ANSI - - // Underline set the text style to underline. - Underline ANSI - - // BlinkSlow set the text style to slow blink. - BlinkSlow ANSI - - // BlinkRapid set the text style to rapid blink. - BlinkRapid ANSI - - // Inverse swap the foreground color and background color. - Inverse ANSI - - // Conceal set the text style to conceal. - Conceal ANSI - - // CrossOut set the text style to crossed out. - CrossOut ANSI - - // Frame set the text style to framed. - Frame ANSI - - // Encircle set the text style to encircled. - Encircle ANSI - - // Overline set the text style to overlined. - Overline ANSI -) - -// Foreground color of text. -var ( - // DefaultF is the default color of foreground. - DefaultF ANSI - - // Normal color - BlackF ANSI - RedF ANSI - GreenF ANSI - YellowF ANSI - BlueF ANSI - MagentaF ANSI - CyanF ANSI - WhiteF ANSI - - // Light color - LightBlackF ANSI - LightRedF ANSI - LightGreenF ANSI - LightYellowF ANSI - LightBlueF ANSI - LightMagentaF ANSI - LightCyanF ANSI - LightWhiteF ANSI -) - -// Background color of text. -var ( - // DefaultB is the default color of background. - DefaultB ANSI - - // Normal color - BlackB ANSI - RedB ANSI - GreenB ANSI - YellowB ANSI - BlueB ANSI - MagentaB ANSI - CyanB ANSI - WhiteB ANSI - - // Light color - LightBlackB ANSI - LightRedB ANSI - LightGreenB ANSI - LightYellowB ANSI - LightBlueB ANSI - LightMagentaB ANSI - LightCyanB ANSI - LightWhiteB ANSI -) - -func init() { - Bold = newSGR(1) - Faint = newSGR(2) - Italic = newSGR(3) - Underline = newSGR(4) - BlinkSlow = newSGR(5) - BlinkRapid = newSGR(6) - Inverse = newSGR(7) - Conceal = newSGR(8) - CrossOut = newSGR(9) - - BlackF = newSGR(30) - RedF = newSGR(31) - GreenF = newSGR(32) - YellowF = newSGR(33) - BlueF = newSGR(34) - MagentaF = newSGR(35) - CyanF = newSGR(36) - WhiteF = newSGR(37) - - DefaultF = newSGR(39) - - BlackB = newSGR(40) - RedB = newSGR(41) - GreenB = newSGR(42) - YellowB = newSGR(43) - BlueB = newSGR(44) - MagentaB = newSGR(45) - CyanB = newSGR(46) - WhiteB = newSGR(47) - - DefaultB = newSGR(49) - - Frame = newSGR(51) - Encircle = newSGR(52) - Overline = newSGR(53) - - LightBlackF = newSGR(90) - LightRedF = newSGR(91) - LightGreenF = newSGR(92) - LightYellowF = newSGR(93) - LightBlueF = newSGR(94) - LightMagentaF = newSGR(95) - LightCyanF = newSGR(96) - LightWhiteF = newSGR(97) - - LightBlackB = newSGR(100) - LightRedB = newSGR(101) - LightGreenB = newSGR(102) - LightYellowB = newSGR(103) - LightBlueB = newSGR(104) - LightMagentaB = newSGR(105) - LightCyanB = newSGR(106) - LightWhiteB = newSGR(107) -} diff --git a/vendor/github.com/opencontainers/go-digest/.mailmap b/vendor/github.com/opencontainers/go-digest/.mailmap deleted file mode 100644 index eaf8b2f9e..000000000 --- a/vendor/github.com/opencontainers/go-digest/.mailmap +++ /dev/null @@ -1,4 +0,0 @@ -Aaron Lehmann -Derek McGowan -Stephen J Day -Haibing Zhou diff --git a/vendor/github.com/opencontainers/go-digest/.pullapprove.yml b/vendor/github.com/opencontainers/go-digest/.pullapprove.yml deleted file mode 100644 index b6165f83c..000000000 --- a/vendor/github.com/opencontainers/go-digest/.pullapprove.yml +++ /dev/null @@ -1,28 +0,0 @@ -version: 2 - -requirements: - signed_off_by: - required: true - -always_pending: - title_regex: '^WIP' - explanation: 'Work in progress...' - -group_defaults: - required: 2 - approve_by_comment: - enabled: true - approve_regex: '^LGTM' - reject_regex: '^Rejected' - reset_on_push: - enabled: true - author_approval: - ignored: true - conditions: - branches: - - master - -groups: - go-digest: - teams: - - go-digest-maintainers diff --git a/vendor/github.com/opencontainers/go-digest/.travis.yml b/vendor/github.com/opencontainers/go-digest/.travis.yml deleted file mode 100644 index 5775f885c..000000000 --- a/vendor/github.com/opencontainers/go-digest/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go -go: - - 1.12.x - - 1.13.x - - master diff --git a/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md b/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md deleted file mode 100644 index e4d962ac1..000000000 --- a/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md +++ /dev/null @@ -1,72 +0,0 @@ -# Contributing to Docker open source projects - -Want to hack on this project? Awesome! Here are instructions to get you started. - -This project is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read Docker's -[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), -[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), -[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and -[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). - -For an in-depth description of our contribution process, visit the -contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/) - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -1 Letterman Drive -Suite D4700 -San Francisco, CA, 94129 - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE b/vendor/github.com/opencontainers/go-digest/LICENSE deleted file mode 100644 index 3ac8ab648..000000000 --- a/vendor/github.com/opencontainers/go-digest/LICENSE +++ /dev/null @@ -1,192 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2019, 2020 OCI Contributors - Copyright 2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE.docs b/vendor/github.com/opencontainers/go-digest/LICENSE.docs deleted file mode 100644 index e26cd4fc8..000000000 --- a/vendor/github.com/opencontainers/go-digest/LICENSE.docs +++ /dev/null @@ -1,425 +0,0 @@ -Attribution-ShareAlike 4.0 International - -======================================================================= - -Creative Commons Corporation ("Creative Commons") is not a law firm and -does not provide legal services or legal advice. Distribution of -Creative Commons public licenses does not create a lawyer-client or -other relationship. Creative Commons makes its licenses and related -information available on an "as-is" basis. Creative Commons gives no -warranties regarding its licenses, any material licensed under their -terms and conditions, or any related information. Creative Commons -disclaims all liability for damages resulting from their use to the -fullest extent possible. - -Using Creative Commons Public Licenses - -Creative Commons public licenses provide a standard set of terms and -conditions that creators and other rights holders may use to share -original works of authorship and other material subject to copyright -and certain other rights specified in the public license below. The -following considerations are for informational purposes only, are not -exhaustive, and do not form part of our licenses. - - Considerations for licensors: Our public licenses are - intended for use by those authorized to give the public - permission to use material in ways otherwise restricted by - copyright and certain other rights. Our licenses are - irrevocable. Licensors should read and understand the terms - and conditions of the license they choose before applying it. - Licensors should also secure all rights necessary before - applying our licenses so that the public can reuse the - material as expected. Licensors should clearly mark any - material not subject to the license. This includes other CC- - licensed material, or material used under an exception or - limitation to copyright. More considerations for licensors: - wiki.creativecommons.org/Considerations_for_licensors - - Considerations for the public: By using one of our public - licenses, a licensor grants the public permission to use the - licensed material under specified terms and conditions. If - the licensor's permission is not necessary for any reason--for - example, because of any applicable exception or limitation to - copyright--then that use is not regulated by the license. Our - licenses grant only permissions under copyright and certain - other rights that a licensor has authority to grant. Use of - the licensed material may still be restricted for other - reasons, including because others have copyright or other - rights in the material. A licensor may make special requests, - such as asking that all changes be marked or described. - Although not required by our licenses, you are encouraged to - respect those requests where reasonable. More_considerations - for the public: - wiki.creativecommons.org/Considerations_for_licensees - -======================================================================= - -Creative Commons Attribution-ShareAlike 4.0 International Public -License - -By exercising the Licensed Rights (defined below), You accept and agree -to be bound by the terms and conditions of this Creative Commons -Attribution-ShareAlike 4.0 International Public License ("Public -License"). To the extent this Public License may be interpreted as a -contract, You are granted the Licensed Rights in consideration of Your -acceptance of these terms and conditions, and the Licensor grants You -such rights in consideration of benefits the Licensor receives from -making the Licensed Material available under these terms and -conditions. - - -Section 1 -- Definitions. - - a. Adapted Material means material subject to Copyright and Similar - Rights that is derived from or based upon the Licensed Material - and in which the Licensed Material is translated, altered, - arranged, transformed, or otherwise modified in a manner requiring - permission under the Copyright and Similar Rights held by the - Licensor. For purposes of this Public License, where the Licensed - Material is a musical work, performance, or sound recording, - Adapted Material is always produced where the Licensed Material is - synched in timed relation with a moving image. - - b. Adapter's License means the license You apply to Your Copyright - and Similar Rights in Your contributions to Adapted Material in - accordance with the terms and conditions of this Public License. - - c. BY-SA Compatible License means a license listed at - creativecommons.org/compatiblelicenses, approved by Creative - Commons as essentially the equivalent of this Public License. - - d. Copyright and Similar Rights means copyright and/or similar rights - closely related to copyright including, without limitation, - performance, broadcast, sound recording, and Sui Generis Database - Rights, without regard to how the rights are labeled or - categorized. For purposes of this Public License, the rights - specified in Section 2(b)(1)-(2) are not Copyright and Similar - Rights. - - e. Effective Technological Measures means those measures that, in the - absence of proper authority, may not be circumvented under laws - fulfilling obligations under Article 11 of the WIPO Copyright - Treaty adopted on December 20, 1996, and/or similar international - agreements. - - f. Exceptions and Limitations means fair use, fair dealing, and/or - any other exception or limitation to Copyright and Similar Rights - that applies to Your use of the Licensed Material. - - g. License Elements means the license attributes listed in the name - of a Creative Commons Public License. The License Elements of this - Public License are Attribution and ShareAlike. - - h. Licensed Material means the artistic or literary work, database, - or other material to which the Licensor applied this Public - License. - - i. Licensed Rights means the rights granted to You subject to the - terms and conditions of this Public License, which are limited to - all Copyright and Similar Rights that apply to Your use of the - Licensed Material and that the Licensor has authority to license. - - j. Licensor means the individual(s) or entity(ies) granting rights - under this Public License. - - k. Share means to provide material to the public by any means or - process that requires permission under the Licensed Rights, such - as reproduction, public display, public performance, distribution, - dissemination, communication, or importation, and to make material - available to the public including in ways that members of the - public may access the material from a place and at a time - individually chosen by them. - - l. Sui Generis Database Rights means rights other than copyright - resulting from Directive 96/9/EC of the European Parliament and of - the Council of 11 March 1996 on the legal protection of databases, - as amended and/or succeeded, as well as other essentially - equivalent rights anywhere in the world. - - m. You means the individual or entity exercising the Licensed Rights - under this Public License. Your has a corresponding meaning. - - -Section 2 -- Scope. - - a. License grant. - - 1. Subject to the terms and conditions of this Public License, - the Licensor hereby grants You a worldwide, royalty-free, - non-sublicensable, non-exclusive, irrevocable license to - exercise the Licensed Rights in the Licensed Material to: - - a. reproduce and Share the Licensed Material, in whole or - in part; and - - b. produce, reproduce, and Share Adapted Material. - - 2. Exceptions and Limitations. For the avoidance of doubt, where - Exceptions and Limitations apply to Your use, this Public - License does not apply, and You do not need to comply with - its terms and conditions. - - 3. Term. The term of this Public License is specified in Section - 6(a). - - 4. Media and formats; technical modifications allowed. The - Licensor authorizes You to exercise the Licensed Rights in - all media and formats whether now known or hereafter created, - and to make technical modifications necessary to do so. The - Licensor waives and/or agrees not to assert any right or - authority to forbid You from making technical modifications - necessary to exercise the Licensed Rights, including - technical modifications necessary to circumvent Effective - Technological Measures. For purposes of this Public License, - simply making modifications authorized by this Section 2(a) - (4) never produces Adapted Material. - - 5. Downstream recipients. - - a. Offer from the Licensor -- Licensed Material. Every - recipient of the Licensed Material automatically - receives an offer from the Licensor to exercise the - Licensed Rights under the terms and conditions of this - Public License. - - b. Additional offer from the Licensor -- Adapted Material. - Every recipient of Adapted Material from You - automatically receives an offer from the Licensor to - exercise the Licensed Rights in the Adapted Material - under the conditions of the Adapter's License You apply. - - c. No downstream restrictions. You may not offer or impose - any additional or different terms or conditions on, or - apply any Effective Technological Measures to, the - Licensed Material if doing so restricts exercise of the - Licensed Rights by any recipient of the Licensed - Material. - - 6. No endorsement. Nothing in this Public License constitutes or - may be construed as permission to assert or imply that You - are, or that Your use of the Licensed Material is, connected - with, or sponsored, endorsed, or granted official status by, - the Licensor or others designated to receive attribution as - provided in Section 3(a)(1)(A)(i). - - b. Other rights. - - 1. Moral rights, such as the right of integrity, are not - licensed under this Public License, nor are publicity, - privacy, and/or other similar personality rights; however, to - the extent possible, the Licensor waives and/or agrees not to - assert any such rights held by the Licensor to the limited - extent necessary to allow You to exercise the Licensed - Rights, but not otherwise. - - 2. Patent and trademark rights are not licensed under this - Public License. - - 3. To the extent possible, the Licensor waives any right to - collect royalties from You for the exercise of the Licensed - Rights, whether directly or through a collecting society - under any voluntary or waivable statutory or compulsory - licensing scheme. In all other cases the Licensor expressly - reserves any right to collect such royalties. - - -Section 3 -- License Conditions. - -Your exercise of the Licensed Rights is expressly made subject to the -following conditions. - - a. Attribution. - - 1. If You Share the Licensed Material (including in modified - form), You must: - - a. retain the following if it is supplied by the Licensor - with the Licensed Material: - - i. identification of the creator(s) of the Licensed - Material and any others designated to receive - attribution, in any reasonable manner requested by - the Licensor (including by pseudonym if - designated); - - ii. a copyright notice; - - iii. a notice that refers to this Public License; - - iv. a notice that refers to the disclaimer of - warranties; - - v. a URI or hyperlink to the Licensed Material to the - extent reasonably practicable; - - b. indicate if You modified the Licensed Material and - retain an indication of any previous modifications; and - - c. indicate the Licensed Material is licensed under this - Public License, and include the text of, or the URI or - hyperlink to, this Public License. - - 2. You may satisfy the conditions in Section 3(a)(1) in any - reasonable manner based on the medium, means, and context in - which You Share the Licensed Material. For example, it may be - reasonable to satisfy the conditions by providing a URI or - hyperlink to a resource that includes the required - information. - - 3. If requested by the Licensor, You must remove any of the - information required by Section 3(a)(1)(A) to the extent - reasonably practicable. - - b. ShareAlike. - - In addition to the conditions in Section 3(a), if You Share - Adapted Material You produce, the following conditions also apply. - - 1. The Adapter's License You apply must be a Creative Commons - license with the same License Elements, this version or - later, or a BY-SA Compatible License. - - 2. You must include the text of, or the URI or hyperlink to, the - Adapter's License You apply. You may satisfy this condition - in any reasonable manner based on the medium, means, and - context in which You Share Adapted Material. - - 3. You may not offer or impose any additional or different terms - or conditions on, or apply any Effective Technological - Measures to, Adapted Material that restrict exercise of the - rights granted under the Adapter's License You apply. - - -Section 4 -- Sui Generis Database Rights. - -Where the Licensed Rights include Sui Generis Database Rights that -apply to Your use of the Licensed Material: - - a. for the avoidance of doubt, Section 2(a)(1) grants You the right - to extract, reuse, reproduce, and Share all or a substantial - portion of the contents of the database; - - b. if You include all or a substantial portion of the database - contents in a database in which You have Sui Generis Database - Rights, then the database in which You have Sui Generis Database - Rights (but not its individual contents) is Adapted Material, - - including for purposes of Section 3(b); and - c. You must comply with the conditions in Section 3(a) if You Share - all or a substantial portion of the contents of the database. - -For the avoidance of doubt, this Section 4 supplements and does not -replace Your obligations under this Public License where the Licensed -Rights include other Copyright and Similar Rights. - - -Section 5 -- Disclaimer of Warranties and Limitation of Liability. - - a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE - EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS - AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF - ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, - IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, - WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR - PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, - ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT - KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT - ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. - - b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE - TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, - NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, - INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, - COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR - USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN - ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR - DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR - IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. - - c. The disclaimer of warranties and limitation of liability provided - above shall be interpreted in a manner that, to the extent - possible, most closely approximates an absolute disclaimer and - waiver of all liability. - - -Section 6 -- Term and Termination. - - a. This Public License applies for the term of the Copyright and - Similar Rights licensed here. However, if You fail to comply with - this Public License, then Your rights under this Public License - terminate automatically. - - b. Where Your right to use the Licensed Material has terminated under - Section 6(a), it reinstates: - - 1. automatically as of the date the violation is cured, provided - it is cured within 30 days of Your discovery of the - violation; or - - 2. upon express reinstatement by the Licensor. - - For the avoidance of doubt, this Section 6(b) does not affect any - right the Licensor may have to seek remedies for Your violations - of this Public License. - - c. For the avoidance of doubt, the Licensor may also offer the - Licensed Material under separate terms or conditions or stop - distributing the Licensed Material at any time; however, doing so - will not terminate this Public License. - - d. Sections 1, 5, 6, 7, and 8 survive termination of this Public - License. - - -Section 7 -- Other Terms and Conditions. - - a. The Licensor shall not be bound by any additional or different - terms or conditions communicated by You unless expressly agreed. - - b. Any arrangements, understandings, or agreements regarding the - Licensed Material not stated herein are separate from and - independent of the terms and conditions of this Public License. - - -Section 8 -- Interpretation. - - a. For the avoidance of doubt, this Public License does not, and - shall not be interpreted to, reduce, limit, restrict, or impose - conditions on any use of the Licensed Material that could lawfully - be made without permission under this Public License. - - b. To the extent possible, if any provision of this Public License is - deemed unenforceable, it shall be automatically reformed to the - minimum extent necessary to make it enforceable. If the provision - cannot be reformed, it shall be severed from this Public License - without affecting the enforceability of the remaining terms and - conditions. - - c. No term or condition of this Public License will be waived and no - failure to comply consented to unless expressly agreed to by the - Licensor. - - d. Nothing in this Public License constitutes or may be interpreted - as a limitation upon, or waiver of, any privileges and immunities - that apply to the Licensor or You, including from the legal - processes of any jurisdiction or authority. - - -======================================================================= - -Creative Commons is not a party to its public licenses. -Notwithstanding, Creative Commons may elect to apply one of its public -licenses to material it publishes and in those instances will be -considered the "Licensor." Except for the limited purpose of indicating -that material is shared under a Creative Commons public license or as -otherwise permitted by the Creative Commons policies published at -creativecommons.org/policies, Creative Commons does not authorize the -use of the trademark "Creative Commons" or any other trademark or logo -of Creative Commons without its prior written consent including, -without limitation, in connection with any unauthorized modifications -to any of its public licenses or any other arrangements, -understandings, or agreements concerning use of licensed material. For -the avoidance of doubt, this paragraph does not form part of the public -licenses. - -Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/opencontainers/go-digest/MAINTAINERS b/vendor/github.com/opencontainers/go-digest/MAINTAINERS deleted file mode 100644 index 843b1b206..000000000 --- a/vendor/github.com/opencontainers/go-digest/MAINTAINERS +++ /dev/null @@ -1,5 +0,0 @@ -Derek McGowan (@dmcgowan) -Stephen Day (@stevvooe) -Vincent Batts (@vbatts) -Akihiro Suda (@AkihiroSuda) -Sebastiaan van Stijn (@thaJeztah) diff --git a/vendor/github.com/opencontainers/go-digest/README.md b/vendor/github.com/opencontainers/go-digest/README.md deleted file mode 100644 index a11287207..000000000 --- a/vendor/github.com/opencontainers/go-digest/README.md +++ /dev/null @@ -1,96 +0,0 @@ -# go-digest - -[![GoDoc](https://godoc.org/github.com/opencontainers/go-digest?status.svg)](https://godoc.org/github.com/opencontainers/go-digest) [![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/go-digest)](https://goreportcard.com/report/github.com/opencontainers/go-digest) [![Build Status](https://travis-ci.org/opencontainers/go-digest.svg?branch=master)](https://travis-ci.org/opencontainers/go-digest) - -Common digest package used across the container ecosystem. - -Please see the [godoc](https://godoc.org/github.com/opencontainers/go-digest) for more information. - -# What is a digest? - -A digest is just a [hash](https://en.wikipedia.org/wiki/Hash_function). - -The most common use case for a digest is to create a content identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage) systems: - -```go -id := digest.FromBytes([]byte("my content")) -``` - -In the example above, the id can be used to uniquely identify the byte slice "my content". -This allows two disparate applications to agree on a verifiable identifier without having to trust one another. - -An identifying digest can be verified, as follows: - -```go -if id != digest.FromBytes([]byte("my content")) { - return errors.New("the content has changed!") -} -``` - -A `Verifier` type can be used to handle cases where an `io.Reader` makes more sense: - -```go -rd := getContent() -verifier := id.Verifier() -io.Copy(verifier, rd) - -if !verifier.Verified() { - return errors.New("the content has changed!") -} -``` - -Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this can power a rich, safe, content distribution system. - -# Usage - -While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is considered the best resource, a few important items need to be called out when using this package. - -1. Make sure to import the hash implementations into your application or the package will panic. - You should have something like the following in the main (or other entrypoint) of your application: - - ```go - import ( - _ "crypto/sha256" - _ "crypto/sha512" - ) - ``` - This may seem inconvenient but it allows you replace the hash - implementations with others, such as https://github.com/stevvooe/resumable. - -2. Even though `digest.Digest` may be assemblable as a string, _always_ verify your input with `digest.Parse` or use `Digest.Validate` when accepting untrusted input. - While there are measures to avoid common problems, this will ensure you have valid digests in the rest of your application. - -3. While alternative encodings of hash values (digests) are possible (for example, base64), this package deals exclusively with hex-encoded digests. - -# Stability - -The Go API, at this stage, is considered stable, unless otherwise noted. - -As always, before using a package export, read the [godoc](https://godoc.org/github.com/opencontainers/go-digest). - -# Contributing - -This package is considered fairly complete. -It has been in production in thousands (millions?) of deployments and is fairly battle-hardened. -New additions will be met with skepticism. -If you think there is a missing feature, please file a bug clearly describing the problem and the alternatives you tried before submitting a PR. - -## Code of Conduct - -Participation in the OpenContainers community is governed by [OpenContainer's Code of Conduct][code-of-conduct]. - -## Security - -If you find an issue, please follow the [security][security] protocol to report it. - -# Copyright and license - -Copyright © 2019, 2020 OCI Contributors -Copyright © 2016 Docker, Inc. -All rights reserved, except as follows. -Code is released under the [Apache 2.0 license](LICENSE). -This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). -You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/. - -[security]: https://github.com/opencontainers/org/blob/master/security -[code-of-conduct]: https://github.com/opencontainers/org/blob/master/CODE_OF_CONDUCT.md diff --git a/vendor/github.com/opencontainers/go-digest/algorithm.go b/vendor/github.com/opencontainers/go-digest/algorithm.go deleted file mode 100644 index 490951dc3..000000000 --- a/vendor/github.com/opencontainers/go-digest/algorithm.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2019, 2020 OCI Contributors -// Copyright 2017 Docker, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package digest - -import ( - "crypto" - "fmt" - "hash" - "io" - "regexp" -) - -// Algorithm identifies and implementation of a digester by an identifier. -// Note the that this defines both the hash algorithm used and the string -// encoding. -type Algorithm string - -// supported digest types -const ( - SHA256 Algorithm = "sha256" // sha256 with hex encoding (lower case only) - SHA384 Algorithm = "sha384" // sha384 with hex encoding (lower case only) - SHA512 Algorithm = "sha512" // sha512 with hex encoding (lower case only) - - // Canonical is the primary digest algorithm used with the distribution - // project. Other digests may be used but this one is the primary storage - // digest. - Canonical = SHA256 -) - -var ( - // TODO(stevvooe): Follow the pattern of the standard crypto package for - // registration of digests. Effectively, we are a registerable set and - // common symbol access. - - // algorithms maps values to hash.Hash implementations. Other algorithms - // may be available but they cannot be calculated by the digest package. - algorithms = map[Algorithm]crypto.Hash{ - SHA256: crypto.SHA256, - SHA384: crypto.SHA384, - SHA512: crypto.SHA512, - } - - // anchoredEncodedRegexps contains anchored regular expressions for hex-encoded digests. - // Note that /A-F/ disallowed. - anchoredEncodedRegexps = map[Algorithm]*regexp.Regexp{ - SHA256: regexp.MustCompile(`^[a-f0-9]{64}$`), - SHA384: regexp.MustCompile(`^[a-f0-9]{96}$`), - SHA512: regexp.MustCompile(`^[a-f0-9]{128}$`), - } -) - -// Available returns true if the digest type is available for use. If this -// returns false, Digester and Hash will return nil. -func (a Algorithm) Available() bool { - h, ok := algorithms[a] - if !ok { - return false - } - - // check availability of the hash, as well - return h.Available() -} - -func (a Algorithm) String() string { - return string(a) -} - -// Size returns number of bytes returned by the hash. -func (a Algorithm) Size() int { - h, ok := algorithms[a] - if !ok { - return 0 - } - return h.Size() -} - -// Set implemented to allow use of Algorithm as a command line flag. -func (a *Algorithm) Set(value string) error { - if value == "" { - *a = Canonical - } else { - // just do a type conversion, support is queried with Available. - *a = Algorithm(value) - } - - if !a.Available() { - return ErrDigestUnsupported - } - - return nil -} - -// Digester returns a new digester for the specified algorithm. If the algorithm -// does not have a digester implementation, nil will be returned. This can be -// checked by calling Available before calling Digester. -func (a Algorithm) Digester() Digester { - return &digester{ - alg: a, - hash: a.Hash(), - } -} - -// Hash returns a new hash as used by the algorithm. If not available, the -// method will panic. Check Algorithm.Available() before calling. -func (a Algorithm) Hash() hash.Hash { - if !a.Available() { - // Empty algorithm string is invalid - if a == "" { - panic(fmt.Sprintf("empty digest algorithm, validate before calling Algorithm.Hash()")) - } - - // NOTE(stevvooe): A missing hash is usually a programming error that - // must be resolved at compile time. We don't import in the digest - // package to allow users to choose their hash implementation (such as - // when using stevvooe/resumable or a hardware accelerated package). - // - // Applications that may want to resolve the hash at runtime should - // call Algorithm.Available before call Algorithm.Hash(). - panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) - } - - return algorithms[a].New() -} - -// Encode encodes the raw bytes of a digest, typically from a hash.Hash, into -// the encoded portion of the digest. -func (a Algorithm) Encode(d []byte) string { - // TODO(stevvooe): Currently, all algorithms use a hex encoding. When we - // add support for back registration, we can modify this accordingly. - return fmt.Sprintf("%x", d) -} - -// FromReader returns the digest of the reader using the algorithm. -func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { - digester := a.Digester() - - if _, err := io.Copy(digester.Hash(), rd); err != nil { - return "", err - } - - return digester.Digest(), nil -} - -// FromBytes digests the input and returns a Digest. -func (a Algorithm) FromBytes(p []byte) Digest { - digester := a.Digester() - - if _, err := digester.Hash().Write(p); err != nil { - // Writes to a Hash should never fail. None of the existing - // hash implementations in the stdlib or hashes vendored - // here can return errors from Write. Having a panic in this - // condition instead of having FromBytes return an error value - // avoids unnecessary error handling paths in all callers. - panic("write to hash function returned error: " + err.Error()) - } - - return digester.Digest() -} - -// FromString digests the string input and returns a Digest. -func (a Algorithm) FromString(s string) Digest { - return a.FromBytes([]byte(s)) -} - -// Validate validates the encoded portion string -func (a Algorithm) Validate(encoded string) error { - r, ok := anchoredEncodedRegexps[a] - if !ok { - return ErrDigestUnsupported - } - // Digests much always be hex-encoded, ensuring that their hex portion will - // always be size*2 - if a.Size()*2 != len(encoded) { - return ErrDigestInvalidLength - } - if r.MatchString(encoded) { - return nil - } - return ErrDigestInvalidFormat -} diff --git a/vendor/github.com/opencontainers/go-digest/digest.go b/vendor/github.com/opencontainers/go-digest/digest.go deleted file mode 100644 index 518b5e715..000000000 --- a/vendor/github.com/opencontainers/go-digest/digest.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2019, 2020 OCI Contributors -// Copyright 2017 Docker, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package digest - -import ( - "fmt" - "hash" - "io" - "regexp" - "strings" -) - -// Digest allows simple protection of hex formatted digest strings, prefixed -// by their algorithm. Strings of type Digest have some guarantee of being in -// the correct format and it provides quick access to the components of a -// digest string. -// -// The following is an example of the contents of Digest types: -// -// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc -// -// This allows to abstract the digest behind this type and work only in those -// terms. -type Digest string - -// NewDigest returns a Digest from alg and a hash.Hash object. -func NewDigest(alg Algorithm, h hash.Hash) Digest { - return NewDigestFromBytes(alg, h.Sum(nil)) -} - -// NewDigestFromBytes returns a new digest from the byte contents of p. -// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) -// functions. This is also useful for rebuilding digests from binary -// serializations. -func NewDigestFromBytes(alg Algorithm, p []byte) Digest { - return NewDigestFromEncoded(alg, alg.Encode(p)) -} - -// NewDigestFromHex is deprecated. Please use NewDigestFromEncoded. -func NewDigestFromHex(alg, hex string) Digest { - return NewDigestFromEncoded(Algorithm(alg), hex) -} - -// NewDigestFromEncoded returns a Digest from alg and the encoded digest. -func NewDigestFromEncoded(alg Algorithm, encoded string) Digest { - return Digest(fmt.Sprintf("%s:%s", alg, encoded)) -} - -// DigestRegexp matches valid digest types. -var DigestRegexp = regexp.MustCompile(`[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+`) - -// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. -var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) - -var ( - // ErrDigestInvalidFormat returned when digest format invalid. - ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") - - // ErrDigestInvalidLength returned when digest has invalid length. - ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length") - - // ErrDigestUnsupported returned when the digest algorithm is unsupported. - ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") -) - -// Parse parses s and returns the validated digest object. An error will -// be returned if the format is invalid. -func Parse(s string) (Digest, error) { - d := Digest(s) - return d, d.Validate() -} - -// FromReader consumes the content of rd until io.EOF, returning canonical digest. -func FromReader(rd io.Reader) (Digest, error) { - return Canonical.FromReader(rd) -} - -// FromBytes digests the input and returns a Digest. -func FromBytes(p []byte) Digest { - return Canonical.FromBytes(p) -} - -// FromString digests the input and returns a Digest. -func FromString(s string) Digest { - return Canonical.FromString(s) -} - -// Validate checks that the contents of d is a valid digest, returning an -// error if not. -func (d Digest) Validate() error { - s := string(d) - i := strings.Index(s, ":") - if i <= 0 || i+1 == len(s) { - return ErrDigestInvalidFormat - } - algorithm, encoded := Algorithm(s[:i]), s[i+1:] - if !algorithm.Available() { - if !DigestRegexpAnchored.MatchString(s) { - return ErrDigestInvalidFormat - } - return ErrDigestUnsupported - } - return algorithm.Validate(encoded) -} - -// Algorithm returns the algorithm portion of the digest. This will panic if -// the underlying digest is not in a valid format. -func (d Digest) Algorithm() Algorithm { - return Algorithm(d[:d.sepIndex()]) -} - -// Verifier returns a writer object that can be used to verify a stream of -// content against the digest. If the digest is invalid, the method will panic. -func (d Digest) Verifier() Verifier { - return hashVerifier{ - hash: d.Algorithm().Hash(), - digest: d, - } -} - -// Encoded returns the encoded portion of the digest. This will panic if the -// underlying digest is not in a valid format. -func (d Digest) Encoded() string { - return string(d[d.sepIndex()+1:]) -} - -// Hex is deprecated. Please use Digest.Encoded. -func (d Digest) Hex() string { - return d.Encoded() -} - -func (d Digest) String() string { - return string(d) -} - -func (d Digest) sepIndex() int { - i := strings.Index(string(d), ":") - - if i < 0 { - panic(fmt.Sprintf("no ':' separator in digest %q", d)) - } - - return i -} diff --git a/vendor/github.com/opencontainers/go-digest/digester.go b/vendor/github.com/opencontainers/go-digest/digester.go deleted file mode 100644 index ede907757..000000000 --- a/vendor/github.com/opencontainers/go-digest/digester.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2019, 2020 OCI Contributors -// Copyright 2017 Docker, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package digest - -import "hash" - -// Digester calculates the digest of written data. Writes should go directly -// to the return value of Hash, while calling Digest will return the current -// value of the digest. -type Digester interface { - Hash() hash.Hash // provides direct access to underlying hash instance. - Digest() Digest -} - -// digester provides a simple digester definition that embeds a hasher. -type digester struct { - alg Algorithm - hash hash.Hash -} - -func (d *digester) Hash() hash.Hash { - return d.hash -} - -func (d *digester) Digest() Digest { - return NewDigest(d.alg, d.hash) -} diff --git a/vendor/github.com/opencontainers/go-digest/doc.go b/vendor/github.com/opencontainers/go-digest/doc.go deleted file mode 100644 index 83d3a936c..000000000 --- a/vendor/github.com/opencontainers/go-digest/doc.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2019, 2020 OCI Contributors -// Copyright 2017 Docker, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package digest provides a generalized type to opaquely represent message -// digests and their operations within the registry. The Digest type is -// designed to serve as a flexible identifier in a content-addressable system. -// More importantly, it provides tools and wrappers to work with -// hash.Hash-based digests with little effort. -// -// Basics -// -// The format of a digest is simply a string with two parts, dubbed the -// "algorithm" and the "digest", separated by a colon: -// -// : -// -// An example of a sha256 digest representation follows: -// -// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc -// -// The "algorithm" portion defines both the hashing algorithm used to calculate -// the digest and the encoding of the resulting digest, which defaults to "hex" -// if not otherwise specified. Currently, all supported algorithms have their -// digests encoded in hex strings. -// -// In the example above, the string "sha256" is the algorithm and the hex bytes -// are the "digest". -// -// Because the Digest type is simply a string, once a valid Digest is -// obtained, comparisons are cheap, quick and simple to express with the -// standard equality operator. -// -// Verification -// -// The main benefit of using the Digest type is simple verification against a -// given digest. The Verifier interface, modeled after the stdlib hash.Hash -// interface, provides a common write sink for digest verification. After -// writing is complete, calling the Verifier.Verified method will indicate -// whether or not the stream of bytes matches the target digest. -// -// Missing Features -// -// In addition to the above, we intend to add the following features to this -// package: -// -// 1. A Digester type that supports write sink digest calculation. -// -// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry. -// -package digest diff --git a/vendor/github.com/opencontainers/go-digest/verifiers.go b/vendor/github.com/opencontainers/go-digest/verifiers.go deleted file mode 100644 index afef506f4..000000000 --- a/vendor/github.com/opencontainers/go-digest/verifiers.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2019, 2020 OCI Contributors -// Copyright 2017 Docker, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package digest - -import ( - "hash" - "io" -) - -// Verifier presents a general verification interface to be used with message -// digests and other byte stream verifications. Users instantiate a Verifier -// from one of the various methods, write the data under test to it then check -// the result with the Verified method. -type Verifier interface { - io.Writer - - // Verified will return true if the content written to Verifier matches - // the digest. - Verified() bool -} - -type hashVerifier struct { - digest Digest - hash hash.Hash -} - -func (hv hashVerifier) Write(p []byte) (n int, err error) { - return hv.hash.Write(p) -} - -func (hv hashVerifier) Verified() bool { - return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) -} diff --git a/vendor/github.com/opencontainers/image-spec/LICENSE b/vendor/github.com/opencontainers/image-spec/LICENSE deleted file mode 100644 index 9fdc20fdb..000000000 --- a/vendor/github.com/opencontainers/image-spec/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2016 The Linux Foundation. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go deleted file mode 100644 index 6f9e6fd3a..000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -const ( - // AnnotationCreated is the annotation key for the date and time on which the image was built (date-time string as defined by RFC 3339). - AnnotationCreated = "org.opencontainers.image.created" - - // AnnotationAuthors is the annotation key for the contact details of the people or organization responsible for the image (freeform string). - AnnotationAuthors = "org.opencontainers.image.authors" - - // AnnotationURL is the annotation key for the URL to find more information on the image. - AnnotationURL = "org.opencontainers.image.url" - - // AnnotationDocumentation is the annotation key for the URL to get documentation on the image. - AnnotationDocumentation = "org.opencontainers.image.documentation" - - // AnnotationSource is the annotation key for the URL to get source code for building the image. - AnnotationSource = "org.opencontainers.image.source" - - // AnnotationVersion is the annotation key for the version of the packaged software. - // The version MAY match a label or tag in the source code repository. - // The version MAY be Semantic versioning-compatible. - AnnotationVersion = "org.opencontainers.image.version" - - // AnnotationRevision is the annotation key for the source control revision identifier for the packaged software. - AnnotationRevision = "org.opencontainers.image.revision" - - // AnnotationVendor is the annotation key for the name of the distributing entity, organization or individual. - AnnotationVendor = "org.opencontainers.image.vendor" - - // AnnotationLicenses is the annotation key for the license(s) under which contained software is distributed as an SPDX License Expression. - AnnotationLicenses = "org.opencontainers.image.licenses" - - // AnnotationRefName is the annotation key for the name of the reference for a target. - // SHOULD only be considered valid when on descriptors on `index.json` within image layout. - AnnotationRefName = "org.opencontainers.image.ref.name" - - // AnnotationTitle is the annotation key for the human-readable title of the image. - AnnotationTitle = "org.opencontainers.image.title" - - // AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image. - AnnotationDescription = "org.opencontainers.image.description" - - // AnnotationBaseImageDigest is the annotation key for the digest of the image's base image. - AnnotationBaseImageDigest = "org.opencontainers.image.base.digest" - - // AnnotationBaseImageName is the annotation key for the image reference of the image's base image. - AnnotationBaseImageName = "org.opencontainers.image.base.name" - - // AnnotationArtifactCreated is the annotation key for the date and time on which the artifact was built, conforming to RFC 3339. - AnnotationArtifactCreated = "org.opencontainers.artifact.created" - - // AnnotationArtifactDescription is the annotation key for the human readable description for the artifact. - AnnotationArtifactDescription = "org.opencontainers.artifact.description" - - // AnnotationReferrersFiltersApplied is the annotation key for the comma separated list of filters applied by the registry in the referrers listing. - AnnotationReferrersFiltersApplied = "org.opencontainers.referrers.filtersApplied" -) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go deleted file mode 100644 index 03d76ce43..000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2022 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -// Artifact describes an artifact manifest. -// This structure provides `application/vnd.oci.artifact.manifest.v1+json` mediatype when marshalled to JSON. -type Artifact struct { - // MediaType is the media type of the object this schema refers to. - MediaType string `json:"mediaType"` - - // ArtifactType is the IANA media type of the artifact this schema refers to. - ArtifactType string `json:"artifactType"` - - // Blobs is a collection of blobs referenced by this manifest. - Blobs []Descriptor `json:"blobs,omitempty"` - - // Subject (reference) is an optional link from the artifact to another manifest forming an association between the artifact and the other manifest. - Subject *Descriptor `json:"subject,omitempty"` - - // Annotations contains arbitrary metadata for the artifact manifest. - Annotations map[string]string `json:"annotations,omitempty"` -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go deleted file mode 100644 index ffff4b6d1..000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "time" - - digest "github.com/opencontainers/go-digest" -) - -// ImageConfig defines the execution parameters which should be used as a base when running a container using an image. -type ImageConfig struct { - // User defines the username or UID which the process in the container should run as. - User string `json:"User,omitempty"` - - // ExposedPorts a set of ports to expose from a container running this image. - ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` - - // Env is a list of environment variables to be used in a container. - Env []string `json:"Env,omitempty"` - - // Entrypoint defines a list of arguments to use as the command to execute when the container starts. - Entrypoint []string `json:"Entrypoint,omitempty"` - - // Cmd defines the default arguments to the entrypoint of the container. - Cmd []string `json:"Cmd,omitempty"` - - // Volumes is a set of directories describing where the process is likely write data specific to a container instance. - Volumes map[string]struct{} `json:"Volumes,omitempty"` - - // WorkingDir sets the current working directory of the entrypoint process in the container. - WorkingDir string `json:"WorkingDir,omitempty"` - - // Labels contains arbitrary metadata for the container. - Labels map[string]string `json:"Labels,omitempty"` - - // StopSignal contains the system call signal that will be sent to the container to exit. - StopSignal string `json:"StopSignal,omitempty"` -} - -// RootFS describes a layer content addresses -type RootFS struct { - // Type is the type of the rootfs. - Type string `json:"type"` - - // DiffIDs is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. - DiffIDs []digest.Digest `json:"diff_ids"` -} - -// History describes the history of a layer. -type History struct { - // Created is the combined date and time at which the layer was created, formatted as defined by RFC 3339, section 5.6. - Created *time.Time `json:"created,omitempty"` - - // CreatedBy is the command which created the layer. - CreatedBy string `json:"created_by,omitempty"` - - // Author is the author of the build point. - Author string `json:"author,omitempty"` - - // Comment is a custom message set when creating the layer. - Comment string `json:"comment,omitempty"` - - // EmptyLayer is used to mark if the history item created a filesystem diff. - EmptyLayer bool `json:"empty_layer,omitempty"` -} - -// Image is the JSON structure which describes some basic information about the image. -// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. -type Image struct { - // Created is the combined date and time at which the image was created, formatted as defined by RFC 3339, section 5.6. - Created *time.Time `json:"created,omitempty"` - - // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. - Author string `json:"author,omitempty"` - - // Architecture is the CPU architecture which the binaries in this image are built to run on. - Architecture string `json:"architecture"` - - // Variant is the variant of the specified CPU architecture which image binaries are intended to run on. - Variant string `json:"variant,omitempty"` - - // OS is the name of the operating system which the image is built to run on. - OS string `json:"os"` - - // OSVersion is an optional field specifying the operating system - // version, for example on Windows `10.0.14393.1066`. - OSVersion string `json:"os.version,omitempty"` - - // OSFeatures is an optional field specifying an array of strings, - // each listing a required OS feature (for example on Windows `win32k`). - OSFeatures []string `json:"os.features,omitempty"` - - // Config defines the execution parameters which should be used as a base when running a container using the image. - Config ImageConfig `json:"config,omitempty"` - - // RootFS references the layer content addresses used by the image. - RootFS RootFS `json:"rootfs"` - - // History describes the history of each layer. - History []History `json:"history,omitempty"` -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go deleted file mode 100644 index 9654aa5af..000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016-2022 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import digest "github.com/opencontainers/go-digest" - -// Descriptor describes the disposition of targeted content. -// This structure provides `application/vnd.oci.descriptor.v1+json` mediatype -// when marshalled to JSON. -type Descriptor struct { - // MediaType is the media type of the object this schema refers to. - MediaType string `json:"mediaType,omitempty"` - - // Digest is the digest of the targeted content. - Digest digest.Digest `json:"digest"` - - // Size specifies the size in bytes of the blob. - Size int64 `json:"size"` - - // URLs specifies a list of URLs from which this object MAY be downloaded - URLs []string `json:"urls,omitempty"` - - // Annotations contains arbitrary metadata relating to the targeted content. - Annotations map[string]string `json:"annotations,omitempty"` - - // Data is an embedding of the targeted content. This is encoded as a base64 - // string when marshalled to JSON (automatically, by encoding/json). If - // present, Data can be used directly to avoid fetching the targeted content. - Data []byte `json:"data,omitempty"` - - // Platform describes the platform which the image in the manifest runs on. - // - // This should only be used when referring to a manifest. - Platform *Platform `json:"platform,omitempty"` - - // ArtifactType is the IANA media type of this artifact. - ArtifactType string `json:"artifactType,omitempty"` -} - -// Platform describes the platform which the image in the manifest runs on. -type Platform struct { - // Architecture field specifies the CPU architecture, for example - // `amd64` or `ppc64`. - Architecture string `json:"architecture"` - - // OS specifies the operating system, for example `linux` or `windows`. - OS string `json:"os"` - - // OSVersion is an optional field specifying the operating system - // version, for example on Windows `10.0.14393.1066`. - OSVersion string `json:"os.version,omitempty"` - - // OSFeatures is an optional field specifying an array of strings, - // each listing a required OS feature (for example on Windows `win32k`). - OSFeatures []string `json:"os.features,omitempty"` - - // Variant is an optional field specifying a variant of the CPU, for - // example `v7` to specify ARMv7 when architecture is `arm`. - Variant string `json:"variant,omitempty"` -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go deleted file mode 100644 index ed4a56e59..000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import "github.com/opencontainers/image-spec/specs-go" - -// Index references manifests for various platforms. -// This structure provides `application/vnd.oci.image.index.v1+json` mediatype when marshalled to JSON. -type Index struct { - specs.Versioned - - // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json` - MediaType string `json:"mediaType,omitempty"` - - // Manifests references platform specific manifests. - Manifests []Descriptor `json:"manifests"` - - // Annotations contains arbitrary metadata for the image index. - Annotations map[string]string `json:"annotations,omitempty"` -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go deleted file mode 100644 index fc79e9e0d..000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -const ( - // ImageLayoutFile is the file name of oci image layout file - ImageLayoutFile = "oci-layout" - // ImageLayoutVersion is the version of ImageLayout - ImageLayoutVersion = "1.0.0" -) - -// ImageLayout is the structure in the "oci-layout" file, found in the root -// of an OCI Image-layout directory. -type ImageLayout struct { - Version string `json:"imageLayoutVersion"` -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go deleted file mode 100644 index 730a09359..000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016-2022 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import "github.com/opencontainers/image-spec/specs-go" - -// Manifest provides `application/vnd.oci.image.manifest.v1+json` mediatype structure when marshalled to JSON. -type Manifest struct { - specs.Versioned - - // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json` - MediaType string `json:"mediaType,omitempty"` - - // Config references a configuration object for a container, by digest. - // The referenced configuration object is a JSON blob that the runtime uses to set up the container. - Config Descriptor `json:"config"` - - // Layers is an indexed list of layers referenced by the manifest. - Layers []Descriptor `json:"layers"` - - // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. - Subject *Descriptor `json:"subject,omitempty"` - - // Annotations contains arbitrary metadata for the image manifest. - Annotations map[string]string `json:"annotations,omitempty"` -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go deleted file mode 100644 index 935b481e3..000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -const ( - // MediaTypeDescriptor specifies the media type for a content descriptor. - MediaTypeDescriptor = "application/vnd.oci.descriptor.v1+json" - - // MediaTypeLayoutHeader specifies the media type for the oci-layout. - MediaTypeLayoutHeader = "application/vnd.oci.layout.header.v1+json" - - // MediaTypeImageManifest specifies the media type for an image manifest. - MediaTypeImageManifest = "application/vnd.oci.image.manifest.v1+json" - - // MediaTypeImageIndex specifies the media type for an image index. - MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json" - - // MediaTypeImageLayer is the media type used for layers referenced by the manifest. - MediaTypeImageLayer = "application/vnd.oci.image.layer.v1.tar" - - // MediaTypeImageLayerGzip is the media type used for gzipped layers - // referenced by the manifest. - MediaTypeImageLayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip" - - // MediaTypeImageLayerZstd is the media type used for zstd compressed - // layers referenced by the manifest. - MediaTypeImageLayerZstd = "application/vnd.oci.image.layer.v1.tar+zstd" - - // MediaTypeImageLayerNonDistributable is the media type for layers referenced by - // the manifest but with distribution restrictions. - MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar" - - // MediaTypeImageLayerNonDistributableGzip is the media type for - // gzipped layers referenced by the manifest but with distribution - // restrictions. - MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" - - // MediaTypeImageLayerNonDistributableZstd is the media type for zstd - // compressed layers referenced by the manifest but with distribution - // restrictions. - MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" - - // MediaTypeImageConfig specifies the media type for the image configuration. - MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" - - // MediaTypeArtifactManifest specifies the media type for a content descriptor. - MediaTypeArtifactManifest = "application/vnd.oci.artifact.manifest.v1+json" -) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go deleted file mode 100644 index d27903579..000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package specs - -import "fmt" - -const ( - // VersionMajor is for an API incompatible changes - VersionMajor = 1 - // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 1 - // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 - - // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-rc2" -) - -// Version is the specification version that the package types support. -var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go b/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go deleted file mode 100644 index 58a1510f3..000000000 --- a/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2016 The Linux Foundation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package specs - -// Versioned provides a struct with the manifest schemaVersion and mediaType. -// Incoming content with unknown schema version can be decoded against this -// struct to check the version. -type Versioned struct { - // SchemaVersion is the image manifest schema that this image follows - SchemaVersion int `json:"schemaVersion"` -} diff --git a/vendor/github.com/opencontainers/runc/LICENSE b/vendor/github.com/opencontainers/runc/LICENSE deleted file mode 100644 index 27448585a..000000000 --- a/vendor/github.com/opencontainers/runc/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/opencontainers/runc/NOTICE b/vendor/github.com/opencontainers/runc/NOTICE deleted file mode 100644 index 5c97abce4..000000000 --- a/vendor/github.com/opencontainers/runc/NOTICE +++ /dev/null @@ -1,17 +0,0 @@ -runc - -Copyright 2012-2015 Docker, Inc. - -This product includes software developed at Docker, Inc. (http://www.docker.com). - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see http://www.bis.doc.gov - -See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go deleted file mode 100644 index f95c1409f..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go +++ /dev/null @@ -1,157 +0,0 @@ -//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package user - -import ( - "io" - "os" - "strconv" - - "golang.org/x/sys/unix" -) - -// Unix-specific path to the passwd and group formatted files. -const ( - unixPasswdPath = "/etc/passwd" - unixGroupPath = "/etc/group" -) - -// LookupUser looks up a user by their username in /etc/passwd. If the user -// cannot be found (or there is no /etc/passwd file on the filesystem), then -// LookupUser returns an error. -func LookupUser(username string) (User, error) { - return lookupUserFunc(func(u User) bool { - return u.Name == username - }) -} - -// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot -// be found (or there is no /etc/passwd file on the filesystem), then LookupId -// returns an error. -func LookupUid(uid int) (User, error) { - return lookupUserFunc(func(u User) bool { - return u.Uid == uid - }) -} - -func lookupUserFunc(filter func(u User) bool) (User, error) { - // Get operating system-specific passwd reader-closer. - passwd, err := GetPasswd() - if err != nil { - return User{}, err - } - defer passwd.Close() - - // Get the users. - users, err := ParsePasswdFilter(passwd, filter) - if err != nil { - return User{}, err - } - - // No user entries found. - if len(users) == 0 { - return User{}, ErrNoPasswdEntries - } - - // Assume the first entry is the "correct" one. - return users[0], nil -} - -// LookupGroup looks up a group by its name in /etc/group. If the group cannot -// be found (or there is no /etc/group file on the filesystem), then LookupGroup -// returns an error. -func LookupGroup(groupname string) (Group, error) { - return lookupGroupFunc(func(g Group) bool { - return g.Name == groupname - }) -} - -// LookupGid looks up a group by its group id in /etc/group. If the group cannot -// be found (or there is no /etc/group file on the filesystem), then LookupGid -// returns an error. -func LookupGid(gid int) (Group, error) { - return lookupGroupFunc(func(g Group) bool { - return g.Gid == gid - }) -} - -func lookupGroupFunc(filter func(g Group) bool) (Group, error) { - // Get operating system-specific group reader-closer. - group, err := GetGroup() - if err != nil { - return Group{}, err - } - defer group.Close() - - // Get the users. - groups, err := ParseGroupFilter(group, filter) - if err != nil { - return Group{}, err - } - - // No user entries found. - if len(groups) == 0 { - return Group{}, ErrNoGroupEntries - } - - // Assume the first entry is the "correct" one. - return groups[0], nil -} - -func GetPasswdPath() (string, error) { - return unixPasswdPath, nil -} - -func GetPasswd() (io.ReadCloser, error) { - return os.Open(unixPasswdPath) -} - -func GetGroupPath() (string, error) { - return unixGroupPath, nil -} - -func GetGroup() (io.ReadCloser, error) { - return os.Open(unixGroupPath) -} - -// CurrentUser looks up the current user by their user id in /etc/passwd. If the -// user cannot be found (or there is no /etc/passwd file on the filesystem), -// then CurrentUser returns an error. -func CurrentUser() (User, error) { - return LookupUid(unix.Getuid()) -} - -// CurrentGroup looks up the current user's group by their primary group id's -// entry in /etc/passwd. If the group cannot be found (or there is no -// /etc/group file on the filesystem), then CurrentGroup returns an error. -func CurrentGroup() (Group, error) { - return LookupGid(unix.Getgid()) -} - -func currentUserSubIDs(fileName string) ([]SubID, error) { - u, err := CurrentUser() - if err != nil { - return nil, err - } - filter := func(entry SubID) bool { - return entry.Name == u.Name || entry.Name == strconv.Itoa(u.Uid) - } - return ParseSubIDFileFilter(fileName, filter) -} - -func CurrentUserSubUIDs() ([]SubID, error) { - return currentUserSubIDs("/etc/subuid") -} - -func CurrentUserSubGIDs() ([]SubID, error) { - return currentUserSubIDs("/etc/subgid") -} - -func CurrentProcessUIDMap() ([]IDMap, error) { - return ParseIDMapFile("/proc/self/uid_map") -} - -func CurrentProcessGIDMap() ([]IDMap, error) { - return ParseIDMapFile("/proc/self/gid_map") -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go deleted file mode 100644 index 2473c5ead..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go +++ /dev/null @@ -1,605 +0,0 @@ -package user - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -const ( - minID = 0 - maxID = 1<<31 - 1 // for 32-bit systems compatibility -) - -var ( - // ErrNoPasswdEntries is returned if no matching entries were found in /etc/group. - ErrNoPasswdEntries = errors.New("no matching entries in passwd file") - // ErrNoGroupEntries is returned if no matching entries were found in /etc/passwd. - ErrNoGroupEntries = errors.New("no matching entries in group file") - // ErrRange is returned if a UID or GID is outside of the valid range. - ErrRange = fmt.Errorf("uids and gids must be in range %d-%d", minID, maxID) -) - -type User struct { - Name string - Pass string - Uid int - Gid int - Gecos string - Home string - Shell string -} - -type Group struct { - Name string - Pass string - Gid int - List []string -} - -// SubID represents an entry in /etc/sub{u,g}id -type SubID struct { - Name string - SubID int64 - Count int64 -} - -// IDMap represents an entry in /proc/PID/{u,g}id_map -type IDMap struct { - ID int64 - ParentID int64 - Count int64 -} - -func parseLine(line []byte, v ...interface{}) { - parseParts(bytes.Split(line, []byte(":")), v...) -} - -func parseParts(parts [][]byte, v ...interface{}) { - if len(parts) == 0 { - return - } - - for i, p := range parts { - // Ignore cases where we don't have enough fields to populate the arguments. - // Some configuration files like to misbehave. - if len(v) <= i { - break - } - - // Use the type of the argument to figure out how to parse it, scanf() style. - // This is legit. - switch e := v[i].(type) { - case *string: - *e = string(p) - case *int: - // "numbers", with conversion errors ignored because of some misbehaving configuration files. - *e, _ = strconv.Atoi(string(p)) - case *int64: - *e, _ = strconv.ParseInt(string(p), 10, 64) - case *[]string: - // Comma-separated lists. - if len(p) != 0 { - *e = strings.Split(string(p), ",") - } else { - *e = []string{} - } - default: - // Someone goof'd when writing code using this function. Scream so they can hear us. - panic(fmt.Sprintf("parseLine only accepts {*string, *int, *int64, *[]string} as arguments! %#v is not a pointer!", e)) - } - } -} - -func ParsePasswdFile(path string) ([]User, error) { - passwd, err := os.Open(path) - if err != nil { - return nil, err - } - defer passwd.Close() - return ParsePasswd(passwd) -} - -func ParsePasswd(passwd io.Reader) ([]User, error) { - return ParsePasswdFilter(passwd, nil) -} - -func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) { - passwd, err := os.Open(path) - if err != nil { - return nil, err - } - defer passwd.Close() - return ParsePasswdFilter(passwd, filter) -} - -func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { - if r == nil { - return nil, errors.New("nil source for passwd-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []User{} - ) - - for s.Scan() { - line := bytes.TrimSpace(s.Bytes()) - if len(line) == 0 { - continue - } - - // see: man 5 passwd - // name:password:UID:GID:GECOS:directory:shell - // Name:Pass:Uid:Gid:Gecos:Home:Shell - // root:x:0:0:root:/root:/bin/bash - // adm:x:3:4:adm:/var/adm:/bin/false - p := User{} - parseLine(line, &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - if err := s.Err(); err != nil { - return nil, err - } - - return out, nil -} - -func ParseGroupFile(path string) ([]Group, error) { - group, err := os.Open(path) - if err != nil { - return nil, err - } - - defer group.Close() - return ParseGroup(group) -} - -func ParseGroup(group io.Reader) ([]Group, error) { - return ParseGroupFilter(group, nil) -} - -func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) { - group, err := os.Open(path) - if err != nil { - return nil, err - } - defer group.Close() - return ParseGroupFilter(group, filter) -} - -func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { - if r == nil { - return nil, errors.New("nil source for group-formatted data") - } - rd := bufio.NewReader(r) - out := []Group{} - - // Read the file line-by-line. - for { - var ( - isPrefix bool - wholeLine []byte - err error - ) - - // Read the next line. We do so in chunks (as much as reader's - // buffer is able to keep), check if we read enough columns - // already on each step and store final result in wholeLine. - for { - var line []byte - line, isPrefix, err = rd.ReadLine() - - if err != nil { - // We should return no error if EOF is reached - // without a match. - if err == io.EOF { //nolint:errorlint // comparison with io.EOF is legit, https://github.com/polyfloyd/go-errorlint/pull/12 - err = nil - } - return out, err - } - - // Simple common case: line is short enough to fit in a - // single reader's buffer. - if !isPrefix && len(wholeLine) == 0 { - wholeLine = line - break - } - - wholeLine = append(wholeLine, line...) - - // Check if we read the whole line already. - if !isPrefix { - break - } - } - - // There's no spec for /etc/passwd or /etc/group, but we try to follow - // the same rules as the glibc parser, which allows comments and blank - // space at the beginning of a line. - wholeLine = bytes.TrimSpace(wholeLine) - if len(wholeLine) == 0 || wholeLine[0] == '#' { - continue - } - - // see: man 5 group - // group_name:password:GID:user_list - // Name:Pass:Gid:List - // root:x:0:root - // adm:x:4:root,adm,daemon - p := Group{} - parseLine(wholeLine, &p.Name, &p.Pass, &p.Gid, &p.List) - - if filter == nil || filter(p) { - out = append(out, p) - } - } -} - -type ExecUser struct { - Uid int - Gid int - Sgids []int - Home string -} - -// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the -// given file paths and uses that data as the arguments to GetExecUser. If the -// files cannot be opened for any reason, the error is ignored and a nil -// io.Reader is passed instead. -func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { - var passwd, group io.Reader - - if passwdFile, err := os.Open(passwdPath); err == nil { - passwd = passwdFile - defer passwdFile.Close() - } - - if groupFile, err := os.Open(groupPath); err == nil { - group = groupFile - defer groupFile.Close() - } - - return GetExecUser(userSpec, defaults, passwd, group) -} - -// GetExecUser parses a user specification string (using the passwd and group -// readers as sources for /etc/passwd and /etc/group data, respectively). In -// the case of blank fields or missing data from the sources, the values in -// defaults is used. -// -// GetExecUser will return an error if a user or group literal could not be -// found in any entry in passwd and group respectively. -// -// Examples of valid user specifications are: -// * "" -// * "user" -// * "uid" -// * "user:group" -// * "uid:gid -// * "user:gid" -// * "uid:group" -// -// It should be noted that if you specify a numeric user or group id, they will -// not be evaluated as usernames (only the metadata will be filled). So attempting -// to parse a user with user.Name = "1337" will produce the user with a UID of -// 1337. -func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) { - if defaults == nil { - defaults = new(ExecUser) - } - - // Copy over defaults. - user := &ExecUser{ - Uid: defaults.Uid, - Gid: defaults.Gid, - Sgids: defaults.Sgids, - Home: defaults.Home, - } - - // Sgids slice *cannot* be nil. - if user.Sgids == nil { - user.Sgids = []int{} - } - - // Allow for userArg to have either "user" syntax, or optionally "user:group" syntax - var userArg, groupArg string - parseLine([]byte(userSpec), &userArg, &groupArg) - - // Convert userArg and groupArg to be numeric, so we don't have to execute - // Atoi *twice* for each iteration over lines. - uidArg, uidErr := strconv.Atoi(userArg) - gidArg, gidErr := strconv.Atoi(groupArg) - - // Find the matching user. - users, err := ParsePasswdFilter(passwd, func(u User) bool { - if userArg == "" { - // Default to current state of the user. - return u.Uid == user.Uid - } - - if uidErr == nil { - // If the userArg is numeric, always treat it as a UID. - return uidArg == u.Uid - } - - return u.Name == userArg - }) - - // If we can't find the user, we have to bail. - if err != nil && passwd != nil { - if userArg == "" { - userArg = strconv.Itoa(user.Uid) - } - return nil, fmt.Errorf("unable to find user %s: %w", userArg, err) - } - - var matchedUserName string - if len(users) > 0 { - // First match wins, even if there's more than one matching entry. - matchedUserName = users[0].Name - user.Uid = users[0].Uid - user.Gid = users[0].Gid - user.Home = users[0].Home - } else if userArg != "" { - // If we can't find a user with the given username, the only other valid - // option is if it's a numeric username with no associated entry in passwd. - - if uidErr != nil { - // Not numeric. - return nil, fmt.Errorf("unable to find user %s: %w", userArg, ErrNoPasswdEntries) - } - user.Uid = uidArg - - // Must be inside valid uid range. - if user.Uid < minID || user.Uid > maxID { - return nil, ErrRange - } - - // Okay, so it's numeric. We can just roll with this. - } - - // On to the groups. If we matched a username, we need to do this because of - // the supplementary group IDs. - if groupArg != "" || matchedUserName != "" { - groups, err := ParseGroupFilter(group, func(g Group) bool { - // If the group argument isn't explicit, we'll just search for it. - if groupArg == "" { - // Check if user is a member of this group. - for _, u := range g.List { - if u == matchedUserName { - return true - } - } - return false - } - - if gidErr == nil { - // If the groupArg is numeric, always treat it as a GID. - return gidArg == g.Gid - } - - return g.Name == groupArg - }) - if err != nil && group != nil { - return nil, fmt.Errorf("unable to find groups for spec %v: %w", matchedUserName, err) - } - - // Only start modifying user.Gid if it is in explicit form. - if groupArg != "" { - if len(groups) > 0 { - // First match wins, even if there's more than one matching entry. - user.Gid = groups[0].Gid - } else { - // If we can't find a group with the given name, the only other valid - // option is if it's a numeric group name with no associated entry in group. - - if gidErr != nil { - // Not numeric. - return nil, fmt.Errorf("unable to find group %s: %w", groupArg, ErrNoGroupEntries) - } - user.Gid = gidArg - - // Must be inside valid gid range. - if user.Gid < minID || user.Gid > maxID { - return nil, ErrRange - } - - // Okay, so it's numeric. We can just roll with this. - } - } else if len(groups) > 0 { - // Supplementary group ids only make sense if in the implicit form. - user.Sgids = make([]int, len(groups)) - for i, group := range groups { - user.Sgids[i] = group.Gid - } - } - } - - return user, nil -} - -// GetAdditionalGroups looks up a list of groups by name or group id -// against the given /etc/group formatted data. If a group name cannot -// be found, an error will be returned. If a group id cannot be found, -// or the given group data is nil, the id will be returned as-is -// provided it is in the legal range. -func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) { - groups := []Group{} - if group != nil { - var err error - groups, err = ParseGroupFilter(group, func(g Group) bool { - for _, ag := range additionalGroups { - if g.Name == ag || strconv.Itoa(g.Gid) == ag { - return true - } - } - return false - }) - if err != nil { - return nil, fmt.Errorf("Unable to find additional groups %v: %w", additionalGroups, err) - } - } - - gidMap := make(map[int]struct{}) - for _, ag := range additionalGroups { - var found bool - for _, g := range groups { - // if we found a matched group either by name or gid, take the - // first matched as correct - if g.Name == ag || strconv.Itoa(g.Gid) == ag { - if _, ok := gidMap[g.Gid]; !ok { - gidMap[g.Gid] = struct{}{} - found = true - break - } - } - } - // we asked for a group but didn't find it. let's check to see - // if we wanted a numeric group - if !found { - gid, err := strconv.ParseInt(ag, 10, 64) - if err != nil { - // Not a numeric ID either. - return nil, fmt.Errorf("Unable to find group %s: %w", ag, ErrNoGroupEntries) - } - // Ensure gid is inside gid range. - if gid < minID || gid > maxID { - return nil, ErrRange - } - gidMap[int(gid)] = struct{}{} - } - } - gids := []int{} - for gid := range gidMap { - gids = append(gids, gid) - } - return gids, nil -} - -// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups -// that opens the groupPath given and gives it as an argument to -// GetAdditionalGroups. -func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { - var group io.Reader - - if groupFile, err := os.Open(groupPath); err == nil { - group = groupFile - defer groupFile.Close() - } - return GetAdditionalGroups(additionalGroups, group) -} - -func ParseSubIDFile(path string) ([]SubID, error) { - subid, err := os.Open(path) - if err != nil { - return nil, err - } - defer subid.Close() - return ParseSubID(subid) -} - -func ParseSubID(subid io.Reader) ([]SubID, error) { - return ParseSubIDFilter(subid, nil) -} - -func ParseSubIDFileFilter(path string, filter func(SubID) bool) ([]SubID, error) { - subid, err := os.Open(path) - if err != nil { - return nil, err - } - defer subid.Close() - return ParseSubIDFilter(subid, filter) -} - -func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) { - if r == nil { - return nil, errors.New("nil source for subid-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []SubID{} - ) - - for s.Scan() { - line := bytes.TrimSpace(s.Bytes()) - if len(line) == 0 { - continue - } - - // see: man 5 subuid - p := SubID{} - parseLine(line, &p.Name, &p.SubID, &p.Count) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - if err := s.Err(); err != nil { - return nil, err - } - - return out, nil -} - -func ParseIDMapFile(path string) ([]IDMap, error) { - r, err := os.Open(path) - if err != nil { - return nil, err - } - defer r.Close() - return ParseIDMap(r) -} - -func ParseIDMap(r io.Reader) ([]IDMap, error) { - return ParseIDMapFilter(r, nil) -} - -func ParseIDMapFileFilter(path string, filter func(IDMap) bool) ([]IDMap, error) { - r, err := os.Open(path) - if err != nil { - return nil, err - } - defer r.Close() - return ParseIDMapFilter(r, filter) -} - -func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) { - if r == nil { - return nil, errors.New("nil source for idmap-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []IDMap{} - ) - - for s.Scan() { - line := bytes.TrimSpace(s.Bytes()) - if len(line) == 0 { - continue - } - - // see: man 7 user_namespaces - p := IDMap{} - parseParts(bytes.Fields(line), &p.ID, &p.ParentID, &p.Count) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - if err := s.Err(); err != nil { - return nil, err - } - - return out, nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go deleted file mode 100644 index e018eae61..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go +++ /dev/null @@ -1,43 +0,0 @@ -//go:build gofuzz -// +build gofuzz - -package user - -import ( - "io" - "strings" -) - -func IsDivisbleBy(n int, divisibleby int) bool { - return (n % divisibleby) == 0 -} - -func FuzzUser(data []byte) int { - if len(data) == 0 { - return -1 - } - if !IsDivisbleBy(len(data), 5) { - return -1 - } - - var divided [][]byte - - chunkSize := len(data) / 5 - - for i := 0; i < len(data); i += chunkSize { - end := i + chunkSize - - divided = append(divided, data[i:end]) - } - - _, _ = ParsePasswdFilter(strings.NewReader(string(divided[0])), nil) - - var passwd, group io.Reader - - group = strings.NewReader(string(divided[1])) - _, _ = GetAdditionalGroups([]string{string(divided[2])}, group) - - passwd = strings.NewReader(string(divided[3])) - _, _ = GetExecUser(string(divided[4]), nil, passwd, group) - return 1 -} diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore deleted file mode 100644 index 1fb13abeb..000000000 --- a/vendor/github.com/sirupsen/logrus/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -logrus -vendor - -.idea/ diff --git a/vendor/github.com/sirupsen/logrus/.golangci.yml b/vendor/github.com/sirupsen/logrus/.golangci.yml deleted file mode 100644 index 65dc28503..000000000 --- a/vendor/github.com/sirupsen/logrus/.golangci.yml +++ /dev/null @@ -1,40 +0,0 @@ -run: - # do not run on test files yet - tests: false - -# all available settings of specific linters -linters-settings: - errcheck: - # report about not checking of errors in type assetions: `a := b.(MyStruct)`; - # default is false: such cases aren't reported by default. - check-type-assertions: false - - # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; - # default is false: such cases aren't reported by default. - check-blank: false - - lll: - line-length: 100 - tab-width: 4 - - prealloc: - simple: false - range-loops: false - for-loops: false - - whitespace: - multi-if: false # Enforces newlines (or comments) after every multi-line if statement - multi-func: false # Enforces newlines (or comments) after every multi-line function signature - -linters: - enable: - - megacheck - - govet - disable: - - maligned - - prealloc - disable-all: false - presets: - - bugs - - unused - fast: false diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml deleted file mode 100644 index c1dbd5a3a..000000000 --- a/vendor/github.com/sirupsen/logrus/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go -go_import_path: github.com/sirupsen/logrus -git: - depth: 1 -env: - - GO111MODULE=on -go: 1.15.x -os: linux -install: - - ./travis/install.sh -script: - - cd ci - - go run mage.go -v -w ../ crossBuild - - go run mage.go -v -w ../ lint - - go run mage.go -v -w ../ test diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md deleted file mode 100644 index 7567f6128..000000000 --- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md +++ /dev/null @@ -1,259 +0,0 @@ -# 1.8.1 -Code quality: - * move magefile in its own subdir/submodule to remove magefile dependency on logrus consumer - * improve timestamp format documentation - -Fixes: - * fix race condition on logger hooks - - -# 1.8.0 - -Correct versioning number replacing v1.7.1. - -# 1.7.1 - -Beware this release has introduced a new public API and its semver is therefore incorrect. - -Code quality: - * use go 1.15 in travis - * use magefile as task runner - -Fixes: - * small fixes about new go 1.13 error formatting system - * Fix for long time race condiction with mutating data hooks - -Features: - * build support for zos - -# 1.7.0 -Fixes: - * the dependency toward a windows terminal library has been removed - -Features: - * a new buffer pool management API has been added - * a set of `Fn()` functions have been added - -# 1.6.0 -Fixes: - * end of line cleanup - * revert the entry concurrency bug fix whic leads to deadlock under some circumstances - * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14 - -Features: - * add an option to the `TextFormatter` to completely disable fields quoting - -# 1.5.0 -Code quality: - * add golangci linter run on travis - -Fixes: - * add mutex for hooks concurrent access on `Entry` data - * caller function field for go1.14 - * fix build issue for gopherjs target - -Feature: - * add an hooks/writer sub-package whose goal is to split output on different stream depending on the trace level - * add a `DisableHTMLEscape` option in the `JSONFormatter` - * add `ForceQuote` and `PadLevelText` options in the `TextFormatter` - -# 1.4.2 - * Fixes build break for plan9, nacl, solaris -# 1.4.1 -This new release introduces: - * Enhance TextFormatter to not print caller information when they are empty (#944) - * Remove dependency on golang.org/x/crypto (#932, #943) - -Fixes: - * Fix Entry.WithContext method to return a copy of the initial entry (#941) - -# 1.4.0 -This new release introduces: - * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848). - * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter` (#909, #911) - * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919). - -Fixes: - * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893). - * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903) - * Fix infinite recursion on unknown `Level.String()` (#907) - * Fix race condition in `getCaller` (#916). - - -# 1.3.0 -This new release introduces: - * Log, Logf, Logln functions for Logger and Entry that take a Level - -Fixes: - * Building prometheus node_exporter on AIX (#840) - * Race condition in TextFormatter (#468) - * Travis CI import path (#868) - * Remove coloured output on Windows (#862) - * Pointer to func as field in JSONFormatter (#870) - * Properly marshal Levels (#873) - -# 1.2.0 -This new release introduces: - * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued - * A new trace level named `Trace` whose level is below `Debug` - * A configurable exit function to be called upon a Fatal trace - * The `Level` object now implements `encoding.TextUnmarshaler` interface - -# 1.1.1 -This is a bug fix release. - * fix the build break on Solaris - * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized - -# 1.1.0 -This new release introduces: - * several fixes: - * a fix for a race condition on entry formatting - * proper cleanup of previously used entries before putting them back in the pool - * the extra new line at the end of message in text formatter has been removed - * a new global public API to check if a level is activated: IsLevelEnabled - * the following methods have been added to the Logger object - * IsLevelEnabled - * SetFormatter - * SetOutput - * ReplaceHooks - * introduction of go module - * an indent configuration for the json formatter - * output colour support for windows - * the field sort function is now configurable for text formatter - * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater - -# 1.0.6 - -This new release introduces: - * a new api WithTime which allows to easily force the time of the log entry - which is mostly useful for logger wrapper - * a fix reverting the immutability of the entry given as parameter to the hooks - a new configuration field of the json formatter in order to put all the fields - in a nested dictionnary - * a new SetOutput method in the Logger - * a new configuration of the textformatter to configure the name of the default keys - * a new configuration of the text formatter to disable the level truncation - -# 1.0.5 - -* Fix hooks race (#707) -* Fix panic deadlock (#695) - -# 1.0.4 - -* Fix race when adding hooks (#612) -* Fix terminal check in AppEngine (#635) - -# 1.0.3 - -* Replace example files with testable examples - -# 1.0.2 - -* bug: quote non-string values in text formatter (#583) -* Make (*Logger) SetLevel a public method - -# 1.0.1 - -* bug: fix escaping in text formatter (#575) - -# 1.0.0 - -* Officially changed name to lower-case -* bug: colors on Windows 10 (#541) -* bug: fix race in accessing level (#512) - -# 0.11.5 - -* feature: add writer and writerlevel to entry (#372) - -# 0.11.4 - -* bug: fix undefined variable on solaris (#493) - -# 0.11.3 - -* formatter: configure quoting of empty values (#484) -* formatter: configure quoting character (default is `"`) (#484) -* bug: fix not importing io correctly in non-linux environments (#481) - -# 0.11.2 - -* bug: fix windows terminal detection (#476) - -# 0.11.1 - -* bug: fix tty detection with custom out (#471) - -# 0.11.0 - -* performance: Use bufferpool to allocate (#370) -* terminal: terminal detection for app-engine (#343) -* feature: exit handler (#375) - -# 0.10.0 - -* feature: Add a test hook (#180) -* feature: `ParseLevel` is now case-insensitive (#326) -* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) -* performance: avoid re-allocations on `WithFields` (#335) - -# 0.9.0 - -* logrus/text_formatter: don't emit empty msg -* logrus/hooks/airbrake: move out of main repository -* logrus/hooks/sentry: move out of main repository -* logrus/hooks/papertrail: move out of main repository -* logrus/hooks/bugsnag: move out of main repository -* logrus/core: run tests with `-race` -* logrus/core: detect TTY based on `stderr` -* logrus/core: support `WithError` on logger -* logrus/core: Solaris support - -# 0.8.7 - -* logrus/core: fix possible race (#216) -* logrus/doc: small typo fixes and doc improvements - - -# 0.8.6 - -* hooks/raven: allow passing an initialized client - -# 0.8.5 - -* logrus/core: revert #208 - -# 0.8.4 - -* formatter/text: fix data race (#218) - -# 0.8.3 - -* logrus/core: fix entry log level (#208) -* logrus/core: improve performance of text formatter by 40% -* logrus/core: expose `LevelHooks` type -* logrus/core: add support for DragonflyBSD and NetBSD -* formatter/text: print structs more verbosely - -# 0.8.2 - -* logrus: fix more Fatal family functions - -# 0.8.1 - -* logrus: fix not exiting on `Fatalf` and `Fatalln` - -# 0.8.0 - -* logrus: defaults to stderr instead of stdout -* hooks/sentry: add special field for `*http.Request` -* formatter/text: ignore Windows for colors - -# 0.7.3 - -* formatter/\*: allow configuration of timestamp layout - -# 0.7.2 - -* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE deleted file mode 100644 index f090cb42f..000000000 --- a/vendor/github.com/sirupsen/logrus/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md deleted file mode 100644 index b042c896f..000000000 --- a/vendor/github.com/sirupsen/logrus/README.md +++ /dev/null @@ -1,513 +0,0 @@ -# Logrus :walrus: [![Build Status](https://github.com/sirupsen/logrus/workflows/CI/badge.svg)](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![Go Reference](https://pkg.go.dev/badge/github.com/sirupsen/logrus.svg)](https://pkg.go.dev/github.com/sirupsen/logrus) - -Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. - -**Logrus is in maintenance-mode.** We will not be introducing new features. It's -simply too hard to do in a way that won't break many people's projects, which is -the last thing you want from your Logging library (again...). - -This does not mean Logrus is dead. Logrus will continue to be maintained for -security, (backwards compatible) bug fixes, and performance (where we are -limited by the interface). - -I believe Logrus' biggest contribution is to have played a part in today's -widespread use of structured logging in Golang. There doesn't seem to be a -reason to do a major, breaking iteration into Logrus V2, since the fantastic Go -community has built those independently. Many fantastic alternatives have sprung -up. Logrus would look like those, had it been re-designed with what we know -about structured logging in Go today. Check out, for example, -[Zerolog][zerolog], [Zap][zap], and [Apex][apex]. - -[zerolog]: https://github.com/rs/zerolog -[zap]: https://github.com/uber-go/zap -[apex]: https://github.com/apex/log - -**Seeing weird case-sensitive problems?** It's in the past been possible to -import Logrus as both upper- and lower-case. Due to the Go package environment, -this caused issues in the community and we needed a standard. Some environments -experienced problems with the upper-case variant, so the lower-case was decided. -Everything using `logrus` will need to use the lower-case: -`github.com/sirupsen/logrus`. Any package that isn't, should be changed. - -To fix Glide, see [these -comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). -For an in-depth explanation of the casing issue, see [this -comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). - -Nicely color-coded in development (when a TTY is attached, otherwise just -plain text): - -![Colored](http://i.imgur.com/PY7qMwd.png) - -With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash -or Splunk: - -```json -{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the -ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} - -{"level":"warning","msg":"The group's number increased tremendously!", -"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"A giant walrus appears!", -"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", -"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} - -{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, -"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} -``` - -With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not -attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: - -```text -time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 -time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 -time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true -time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 -time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 -time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -``` -To ensure this behaviour even if a TTY is attached, set your formatter as follows: - -```go - log.SetFormatter(&log.TextFormatter{ - DisableColors: true, - FullTimestamp: true, - }) -``` - -#### Logging Method Name - -If you wish to add the calling method as a field, instruct the logger via: -```go -log.SetReportCaller(true) -``` -This adds the caller as 'method' like so: - -```json -{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by", -"time":"2014-03-10 19:57:38.562543129 -0400 EDT"} -``` - -```text -time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin -``` -Note that this does add measurable overhead - the cost will depend on the version of Go, but is -between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your -environment via benchmarks: -``` -go test -bench=.*CallerTracing -``` - - -#### Case-sensitivity - -The organization's name was changed to lower-case--and this will not be changed -back. If you are getting import conflicts due to case sensitivity, please use -the lower-case import: `github.com/sirupsen/logrus`. - -#### Example - -The simplest way to use Logrus is simply the package-level exported logger: - -```go -package main - -import ( - log "github.com/sirupsen/logrus" -) - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - }).Info("A walrus appears") -} -``` - -Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` -and you'll now have the flexibility of Logrus. You can customize it all you -want: - -```go -package main - -import ( - "os" - log "github.com/sirupsen/logrus" -) - -func init() { - // Log as JSON instead of the default ASCII formatter. - log.SetFormatter(&log.JSONFormatter{}) - - // Output to stdout instead of the default stderr - // Can be any io.Writer, see below for File example - log.SetOutput(os.Stdout) - - // Only log the warning severity or above. - log.SetLevel(log.WarnLevel) -} - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(log.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(log.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") - - // A common pattern is to re-use fields between logging statements by re-using - // the logrus.Entry returned from WithFields() - contextLogger := log.WithFields(log.Fields{ - "common": "this is a common field", - "other": "I also should be logged always", - }) - - contextLogger.Info("I'll be logged with common and other field") - contextLogger.Info("Me too") -} -``` - -For more advanced usage such as logging to multiple locations from the same -application, you can also create an instance of the `logrus` Logger: - -```go -package main - -import ( - "os" - "github.com/sirupsen/logrus" -) - -// Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() - -func main() { - // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stdout - - // You could set this to any `io.Writer` such as a file - // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) - // if err == nil { - // log.Out = file - // } else { - // log.Info("Failed to log to file, using default stderr") - // } - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") -} -``` - -#### Fields - -Logrus encourages careful, structured logging through logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed -to send event %s to topic %s with key %d")`, you should log the much more -discoverable: - -```go -log.WithFields(log.Fields{ - "event": event, - "topic": topic, - "key": key, -}).Fatal("Failed to send event") -``` - -We've found this API forces you to think about logging in a way that produces -much more useful logging messages. We've been in countless situations where just -a single added field to a log statement that was already there would've saved us -hours. The `WithFields` call is optional. - -In general, with Logrus using any of the `printf`-family functions should be -seen as a hint you should add a field, however, you can still use the -`printf`-family functions with Logrus. - -#### Default Fields - -Often it's helpful to have fields _always_ attached to log statements in an -application or parts of one. For example, you may want to always log the -`request_id` and `user_ip` in the context of a request. Instead of writing -`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on -every line, you can create a `logrus.Entry` to pass around instead: - -```go -requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) -requestLogger.Info("something happened on that request") # will log request_id and user_ip -requestLogger.Warn("something not great happened") -``` - -#### Hooks - -You can add hooks for logging levels. For example to send errors to an exception -tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to -multiple places simultaneously, e.g. syslog. - -Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in -`init`: - -```go -import ( - log "github.com/sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" - logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" - "log/syslog" -) - -func init() { - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook(123, "xyz", "production")) - - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - if err != nil { - log.Error("Unable to connect to local syslog daemon") - } else { - log.AddHook(hook) - } -} -``` -Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). - -A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) - - -#### Level logging - -Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. - -```go -log.Trace("Something very low level.") -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") -// Calls os.Exit(1) after logging -log.Fatal("Bye.") -// Calls panic() after logging -log.Panic("I'm bailing.") -``` - -You can set the logging level on a `Logger`, then it will only log entries with -that severity or anything above it: - -```go -// Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) -``` - -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose -environment if your application has that. - -#### Entries - -Besides the fields added with `WithField` or `WithFields` some fields are -automatically added to all logging events: - -1. `time`. The timestamp when the entry was created. -2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after - the `AddFields` call. E.g. `Failed to send event.` -3. `level`. The logging level. E.g. `info`. - -#### Environments - -Logrus has no notion of environment. - -If you wish for hooks and formatters to only be used in specific environments, -you should handle that yourself. For example, if your application has a global -variable `Environment`, which is a string representation of the environment you -could do: - -```go -import ( - log "github.com/sirupsen/logrus" -) - -func init() { - // do something here to set environment depending on an environment variable - // or command-line flag - if Environment == "production" { - log.SetFormatter(&log.JSONFormatter{}) - } else { - // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(&log.TextFormatter{}) - } -} -``` - -This configuration is how `logrus` was intended to be used, but JSON in -production is mostly only useful if you do log aggregation with tools like -Splunk or Logstash. - -#### Formatters - -The built-in logging formatters are: - -* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise - without colors. - * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true`. For Windows, see - [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). - * When colors are enabled, levels are truncated to 4 characters by default. To disable - truncation set the `DisableLevelTruncation` field to `true`. - * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text. - * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). -* `logrus.JSONFormatter`. Logs fields as JSON. - * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). - -Third party logging formatters: - -* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. -* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). -* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. -* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. -* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo. -* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. -* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files. -* [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added. - -You can define your formatter by implementing the `Formatter` interface, -requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a -`Fields` type (`map[string]interface{}`) with all your fields as well as the -default ones (see Entries section above): - -```go -type MyJSONFormatter struct { -} - -log.SetFormatter(new(MyJSONFormatter)) - -func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { - // Note this doesn't include Time, Level and Message which are available on - // the Entry. Consult `godoc` on information about those fields or read the - // source of the official loggers. - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err) - } - return append(serialized, '\n'), nil -} -``` - -#### Logger as an `io.Writer` - -Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. - -```go -w := logger.Writer() -defer w.Close() - -srv := http.Server{ - // create a stdlib log.Logger that writes to - // logrus.Logger. - ErrorLog: log.New(w, "", 0), -} -``` - -Each line written to that writer will be printed the usual way, using formatters -and hooks. The level for those entries is `info`. - -This means that we can override the standard library logger easily: - -```go -logger := logrus.New() -logger.Formatter = &logrus.JSONFormatter{} - -// Use logrus for standard log output -// Note that `log` here references stdlib's log -// Not logrus imported under the name `log`. -log.SetOutput(logger.Writer()) -``` - -#### Rotation - -Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotate(8)`) that can compress and delete old log -entries. It should not be a feature of the application-level logger. - -#### Tools - -| Tool | Description | -| ---- | ----------- | -|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will be generated with different configs in different environments.| -|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | - -#### Testing - -Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: - -* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook -* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): - -```go -import( - "github.com/sirupsen/logrus" - "github.com/sirupsen/logrus/hooks/test" - "github.com/stretchr/testify/assert" - "testing" -) - -func TestSomething(t*testing.T){ - logger, hook := test.NewNullLogger() - logger.Error("Helloerror") - - assert.Equal(t, 1, len(hook.Entries)) - assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) - assert.Equal(t, "Helloerror", hook.LastEntry().Message) - - hook.Reset() - assert.Nil(t, hook.LastEntry()) -} -``` - -#### Fatal handlers - -Logrus can register one or more functions that will be called when any `fatal` -level message is logged. The registered handlers will be executed before -logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need -to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. - -``` -... -handler := func() { - // gracefully shutdown something... -} -logrus.RegisterExitHandler(handler) -... -``` - -#### Thread safety - -By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. -If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. - -Situation when locking is not needed includes: - -* You have no hooks registered, or hooks calling is already thread-safe. - -* Writing to logger.Out is already thread-safe, for example: - - 1) logger.Out is protected by locks. - - 2) logger.Out is an os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allows multi-thread/multi-process writing) - - (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go deleted file mode 100644 index 8fd189e1c..000000000 --- a/vendor/github.com/sirupsen/logrus/alt_exit.go +++ /dev/null @@ -1,76 +0,0 @@ -package logrus - -// The following code was sourced and modified from the -// https://github.com/tebeka/atexit package governed by the following license: -// -// Copyright (c) 2012 Miki Tebeka . -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software is furnished to do so, -// subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -import ( - "fmt" - "os" -) - -var handlers = []func(){} - -func runHandler(handler func()) { - defer func() { - if err := recover(); err != nil { - fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) - } - }() - - handler() -} - -func runHandlers() { - for _, handler := range handlers { - runHandler(handler) - } -} - -// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) -func Exit(code int) { - runHandlers() - os.Exit(code) -} - -// RegisterExitHandler appends a Logrus Exit handler to the list of handlers, -// call logrus.Exit to invoke all handlers. The handlers will also be invoked when -// any Fatal log entry is made. -// -// This method is useful when a caller wishes to use logrus to log a fatal -// message but also needs to gracefully shutdown. An example usecase could be -// closing database connections, or sending a alert that the application is -// closing. -func RegisterExitHandler(handler func()) { - handlers = append(handlers, handler) -} - -// DeferExitHandler prepends a Logrus Exit handler to the list of handlers, -// call logrus.Exit to invoke all handlers. The handlers will also be invoked when -// any Fatal log entry is made. -// -// This method is useful when a caller wishes to use logrus to log a fatal -// message but also needs to gracefully shutdown. An example usecase could be -// closing database connections, or sending a alert that the application is -// closing. -func DeferExitHandler(handler func()) { - handlers = append([]func(){handler}, handlers...) -} diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml deleted file mode 100644 index df9d65c3a..000000000 --- a/vendor/github.com/sirupsen/logrus/appveyor.yml +++ /dev/null @@ -1,14 +0,0 @@ -version: "{build}" -platform: x64 -clone_folder: c:\gopath\src\github.com\sirupsen\logrus -environment: - GOPATH: c:\gopath -branches: - only: - - master -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version -build_script: - - go get -t - - go test diff --git a/vendor/github.com/sirupsen/logrus/buffer_pool.go b/vendor/github.com/sirupsen/logrus/buffer_pool.go deleted file mode 100644 index c7787f77c..000000000 --- a/vendor/github.com/sirupsen/logrus/buffer_pool.go +++ /dev/null @@ -1,43 +0,0 @@ -package logrus - -import ( - "bytes" - "sync" -) - -var ( - bufferPool BufferPool -) - -type BufferPool interface { - Put(*bytes.Buffer) - Get() *bytes.Buffer -} - -type defaultPool struct { - pool *sync.Pool -} - -func (p *defaultPool) Put(buf *bytes.Buffer) { - p.pool.Put(buf) -} - -func (p *defaultPool) Get() *bytes.Buffer { - return p.pool.Get().(*bytes.Buffer) -} - -// SetBufferPool allows to replace the default logrus buffer pool -// to better meets the specific needs of an application. -func SetBufferPool(bp BufferPool) { - bufferPool = bp -} - -func init() { - SetBufferPool(&defaultPool{ - pool: &sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, - }, - }) -} diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go deleted file mode 100644 index da67aba06..000000000 --- a/vendor/github.com/sirupsen/logrus/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Package logrus is a structured logger for Go, completely API compatible with the standard library logger. - - -The simplest way to use Logrus is simply the package-level exported logger: - - package main - - import ( - log "github.com/sirupsen/logrus" - ) - - func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "number": 1, - "size": 10, - }).Info("A walrus appears") - } - -Output: - time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 - -For a full guide visit https://github.com/sirupsen/logrus -*/ -package logrus diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go deleted file mode 100644 index 71cdbbc35..000000000 --- a/vendor/github.com/sirupsen/logrus/entry.go +++ /dev/null @@ -1,442 +0,0 @@ -package logrus - -import ( - "bytes" - "context" - "fmt" - "os" - "reflect" - "runtime" - "strings" - "sync" - "time" -) - -var ( - - // qualified package name, cached at first use - logrusPackage string - - // Positions in the call stack when tracing to report the calling method - minimumCallerDepth int - - // Used for caller information initialisation - callerInitOnce sync.Once -) - -const ( - maximumCallerDepth int = 25 - knownLogrusFrames int = 4 -) - -func init() { - // start at the bottom of the stack before the package-name cache is primed - minimumCallerDepth = 1 -} - -// Defines the key when adding errors using WithError. -var ErrorKey = "error" - -// An entry is the final or intermediate Logrus logging entry. It contains all -// the fields passed with WithField{,s}. It's finally logged when Trace, Debug, -// Info, Warn, Error, Fatal or Panic is called on it. These objects can be -// reused and passed around as much as you wish to avoid field duplication. -type Entry struct { - Logger *Logger - - // Contains all the fields set by the user. - Data Fields - - // Time at which the log entry was created - Time time.Time - - // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic - // This field will be set on entry firing and the value will be equal to the one in Logger struct field. - Level Level - - // Calling method, with package name - Caller *runtime.Frame - - // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic - Message string - - // When formatter is called in entry.log(), a Buffer may be set to entry - Buffer *bytes.Buffer - - // Contains the context set by the user. Useful for hook processing etc. - Context context.Context - - // err may contain a field formatting error - err string -} - -func NewEntry(logger *Logger) *Entry { - return &Entry{ - Logger: logger, - // Default is three fields, plus one optional. Give a little extra room. - Data: make(Fields, 6), - } -} - -func (entry *Entry) Dup() *Entry { - data := make(Fields, len(entry.Data)) - for k, v := range entry.Data { - data[k] = v - } - return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err} -} - -// Returns the bytes representation of this entry from the formatter. -func (entry *Entry) Bytes() ([]byte, error) { - return entry.Logger.Formatter.Format(entry) -} - -// Returns the string representation from the reader and ultimately the -// formatter. -func (entry *Entry) String() (string, error) { - serialized, err := entry.Bytes() - if err != nil { - return "", err - } - str := string(serialized) - return str, nil -} - -// Add an error as single field (using the key defined in ErrorKey) to the Entry. -func (entry *Entry) WithError(err error) *Entry { - return entry.WithField(ErrorKey, err) -} - -// Add a context to the Entry. -func (entry *Entry) WithContext(ctx context.Context) *Entry { - dataCopy := make(Fields, len(entry.Data)) - for k, v := range entry.Data { - dataCopy[k] = v - } - return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx} -} - -// Add a single field to the Entry. -func (entry *Entry) WithField(key string, value interface{}) *Entry { - return entry.WithFields(Fields{key: value}) -} - -// Add a map of fields to the Entry. -func (entry *Entry) WithFields(fields Fields) *Entry { - data := make(Fields, len(entry.Data)+len(fields)) - for k, v := range entry.Data { - data[k] = v - } - fieldErr := entry.err - for k, v := range fields { - isErrField := false - if t := reflect.TypeOf(v); t != nil { - switch { - case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func: - isErrField = true - } - } - if isErrField { - tmp := fmt.Sprintf("can not add field %q", k) - if fieldErr != "" { - fieldErr = entry.err + ", " + tmp - } else { - fieldErr = tmp - } - } else { - data[k] = v - } - } - return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} -} - -// Overrides the time of the Entry. -func (entry *Entry) WithTime(t time.Time) *Entry { - dataCopy := make(Fields, len(entry.Data)) - for k, v := range entry.Data { - dataCopy[k] = v - } - return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context} -} - -// getPackageName reduces a fully qualified function name to the package name -// There really ought to be to be a better way... -func getPackageName(f string) string { - for { - lastPeriod := strings.LastIndex(f, ".") - lastSlash := strings.LastIndex(f, "/") - if lastPeriod > lastSlash { - f = f[:lastPeriod] - } else { - break - } - } - - return f -} - -// getCaller retrieves the name of the first non-logrus calling function -func getCaller() *runtime.Frame { - // cache this package's fully-qualified name - callerInitOnce.Do(func() { - pcs := make([]uintptr, maximumCallerDepth) - _ = runtime.Callers(0, pcs) - - // dynamic get the package name and the minimum caller depth - for i := 0; i < maximumCallerDepth; i++ { - funcName := runtime.FuncForPC(pcs[i]).Name() - if strings.Contains(funcName, "getCaller") { - logrusPackage = getPackageName(funcName) - break - } - } - - minimumCallerDepth = knownLogrusFrames - }) - - // Restrict the lookback frames to avoid runaway lookups - pcs := make([]uintptr, maximumCallerDepth) - depth := runtime.Callers(minimumCallerDepth, pcs) - frames := runtime.CallersFrames(pcs[:depth]) - - for f, again := frames.Next(); again; f, again = frames.Next() { - pkg := getPackageName(f.Function) - - // If the caller isn't part of this package, we're done - if pkg != logrusPackage { - return &f //nolint:scopelint - } - } - - // if we got here, we failed to find the caller's context - return nil -} - -func (entry Entry) HasCaller() (has bool) { - return entry.Logger != nil && - entry.Logger.ReportCaller && - entry.Caller != nil -} - -func (entry *Entry) log(level Level, msg string) { - var buffer *bytes.Buffer - - newEntry := entry.Dup() - - if newEntry.Time.IsZero() { - newEntry.Time = time.Now() - } - - newEntry.Level = level - newEntry.Message = msg - - newEntry.Logger.mu.Lock() - reportCaller := newEntry.Logger.ReportCaller - bufPool := newEntry.getBufferPool() - newEntry.Logger.mu.Unlock() - - if reportCaller { - newEntry.Caller = getCaller() - } - - newEntry.fireHooks() - buffer = bufPool.Get() - defer func() { - newEntry.Buffer = nil - buffer.Reset() - bufPool.Put(buffer) - }() - buffer.Reset() - newEntry.Buffer = buffer - - newEntry.write() - - newEntry.Buffer = nil - - // To avoid Entry#log() returning a value that only would make sense for - // panic() to use in Entry#Panic(), we avoid the allocation by checking - // directly here. - if level <= PanicLevel { - panic(newEntry) - } -} - -func (entry *Entry) getBufferPool() (pool BufferPool) { - if entry.Logger.BufferPool != nil { - return entry.Logger.BufferPool - } - return bufferPool -} - -func (entry *Entry) fireHooks() { - var tmpHooks LevelHooks - entry.Logger.mu.Lock() - tmpHooks = make(LevelHooks, len(entry.Logger.Hooks)) - for k, v := range entry.Logger.Hooks { - tmpHooks[k] = v - } - entry.Logger.mu.Unlock() - - err := tmpHooks.Fire(entry.Level, entry) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) - } -} - -func (entry *Entry) write() { - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - serialized, err := entry.Logger.Formatter.Format(entry) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - return - } - if _, err := entry.Logger.Out.Write(serialized); err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } -} - -// Log will log a message at the level given as parameter. -// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. -// For this behaviour Entry.Panic or Entry.Fatal should be used instead. -func (entry *Entry) Log(level Level, args ...interface{}) { - if entry.Logger.IsLevelEnabled(level) { - entry.log(level, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Trace(args ...interface{}) { - entry.Log(TraceLevel, args...) -} - -func (entry *Entry) Debug(args ...interface{}) { - entry.Log(DebugLevel, args...) -} - -func (entry *Entry) Print(args ...interface{}) { - entry.Info(args...) -} - -func (entry *Entry) Info(args ...interface{}) { - entry.Log(InfoLevel, args...) -} - -func (entry *Entry) Warn(args ...interface{}) { - entry.Log(WarnLevel, args...) -} - -func (entry *Entry) Warning(args ...interface{}) { - entry.Warn(args...) -} - -func (entry *Entry) Error(args ...interface{}) { - entry.Log(ErrorLevel, args...) -} - -func (entry *Entry) Fatal(args ...interface{}) { - entry.Log(FatalLevel, args...) - entry.Logger.Exit(1) -} - -func (entry *Entry) Panic(args ...interface{}) { - entry.Log(PanicLevel, args...) -} - -// Entry Printf family functions - -func (entry *Entry) Logf(level Level, format string, args ...interface{}) { - if entry.Logger.IsLevelEnabled(level) { - entry.Log(level, fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Tracef(format string, args ...interface{}) { - entry.Logf(TraceLevel, format, args...) -} - -func (entry *Entry) Debugf(format string, args ...interface{}) { - entry.Logf(DebugLevel, format, args...) -} - -func (entry *Entry) Infof(format string, args ...interface{}) { - entry.Logf(InfoLevel, format, args...) -} - -func (entry *Entry) Printf(format string, args ...interface{}) { - entry.Infof(format, args...) -} - -func (entry *Entry) Warnf(format string, args ...interface{}) { - entry.Logf(WarnLevel, format, args...) -} - -func (entry *Entry) Warningf(format string, args ...interface{}) { - entry.Warnf(format, args...) -} - -func (entry *Entry) Errorf(format string, args ...interface{}) { - entry.Logf(ErrorLevel, format, args...) -} - -func (entry *Entry) Fatalf(format string, args ...interface{}) { - entry.Logf(FatalLevel, format, args...) - entry.Logger.Exit(1) -} - -func (entry *Entry) Panicf(format string, args ...interface{}) { - entry.Logf(PanicLevel, format, args...) -} - -// Entry Println family functions - -func (entry *Entry) Logln(level Level, args ...interface{}) { - if entry.Logger.IsLevelEnabled(level) { - entry.Log(level, entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Traceln(args ...interface{}) { - entry.Logln(TraceLevel, args...) -} - -func (entry *Entry) Debugln(args ...interface{}) { - entry.Logln(DebugLevel, args...) -} - -func (entry *Entry) Infoln(args ...interface{}) { - entry.Logln(InfoLevel, args...) -} - -func (entry *Entry) Println(args ...interface{}) { - entry.Infoln(args...) -} - -func (entry *Entry) Warnln(args ...interface{}) { - entry.Logln(WarnLevel, args...) -} - -func (entry *Entry) Warningln(args ...interface{}) { - entry.Warnln(args...) -} - -func (entry *Entry) Errorln(args ...interface{}) { - entry.Logln(ErrorLevel, args...) -} - -func (entry *Entry) Fatalln(args ...interface{}) { - entry.Logln(FatalLevel, args...) - entry.Logger.Exit(1) -} - -func (entry *Entry) Panicln(args ...interface{}) { - entry.Logln(PanicLevel, args...) -} - -// Sprintlnn => Sprint no newline. This is to get the behavior of how -// fmt.Sprintln where spaces are always added between operands, regardless of -// their type. Instead of vendoring the Sprintln implementation to spare a -// string allocation, we do the simplest thing. -func (entry *Entry) sprintlnn(args ...interface{}) string { - msg := fmt.Sprintln(args...) - return msg[:len(msg)-1] -} diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go deleted file mode 100644 index 017c30ce6..000000000 --- a/vendor/github.com/sirupsen/logrus/exported.go +++ /dev/null @@ -1,270 +0,0 @@ -package logrus - -import ( - "context" - "io" - "time" -) - -var ( - // std is the name of the standard logger in stdlib `log` - std = New() -) - -func StandardLogger() *Logger { - return std -} - -// SetOutput sets the standard logger output. -func SetOutput(out io.Writer) { - std.SetOutput(out) -} - -// SetFormatter sets the standard logger formatter. -func SetFormatter(formatter Formatter) { - std.SetFormatter(formatter) -} - -// SetReportCaller sets whether the standard logger will include the calling -// method as a field. -func SetReportCaller(include bool) { - std.SetReportCaller(include) -} - -// SetLevel sets the standard logger level. -func SetLevel(level Level) { - std.SetLevel(level) -} - -// GetLevel returns the standard logger level. -func GetLevel() Level { - return std.GetLevel() -} - -// IsLevelEnabled checks if the log level of the standard logger is greater than the level param -func IsLevelEnabled(level Level) bool { - return std.IsLevelEnabled(level) -} - -// AddHook adds a hook to the standard logger hooks. -func AddHook(hook Hook) { - std.AddHook(hook) -} - -// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. -func WithError(err error) *Entry { - return std.WithField(ErrorKey, err) -} - -// WithContext creates an entry from the standard logger and adds a context to it. -func WithContext(ctx context.Context) *Entry { - return std.WithContext(ctx) -} - -// WithField creates an entry from the standard logger and adds a field to -// it. If you want multiple fields, use `WithFields`. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithField(key string, value interface{}) *Entry { - return std.WithField(key, value) -} - -// WithFields creates an entry from the standard logger and adds multiple -// fields to it. This is simply a helper for `WithField`, invoking it -// once for each field. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithFields(fields Fields) *Entry { - return std.WithFields(fields) -} - -// WithTime creates an entry from the standard logger and overrides the time of -// logs generated with it. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithTime(t time.Time) *Entry { - return std.WithTime(t) -} - -// Trace logs a message at level Trace on the standard logger. -func Trace(args ...interface{}) { - std.Trace(args...) -} - -// Debug logs a message at level Debug on the standard logger. -func Debug(args ...interface{}) { - std.Debug(args...) -} - -// Print logs a message at level Info on the standard logger. -func Print(args ...interface{}) { - std.Print(args...) -} - -// Info logs a message at level Info on the standard logger. -func Info(args ...interface{}) { - std.Info(args...) -} - -// Warn logs a message at level Warn on the standard logger. -func Warn(args ...interface{}) { - std.Warn(args...) -} - -// Warning logs a message at level Warn on the standard logger. -func Warning(args ...interface{}) { - std.Warning(args...) -} - -// Error logs a message at level Error on the standard logger. -func Error(args ...interface{}) { - std.Error(args...) -} - -// Panic logs a message at level Panic on the standard logger. -func Panic(args ...interface{}) { - std.Panic(args...) -} - -// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1. -func Fatal(args ...interface{}) { - std.Fatal(args...) -} - -// TraceFn logs a message from a func at level Trace on the standard logger. -func TraceFn(fn LogFunction) { - std.TraceFn(fn) -} - -// DebugFn logs a message from a func at level Debug on the standard logger. -func DebugFn(fn LogFunction) { - std.DebugFn(fn) -} - -// PrintFn logs a message from a func at level Info on the standard logger. -func PrintFn(fn LogFunction) { - std.PrintFn(fn) -} - -// InfoFn logs a message from a func at level Info on the standard logger. -func InfoFn(fn LogFunction) { - std.InfoFn(fn) -} - -// WarnFn logs a message from a func at level Warn on the standard logger. -func WarnFn(fn LogFunction) { - std.WarnFn(fn) -} - -// WarningFn logs a message from a func at level Warn on the standard logger. -func WarningFn(fn LogFunction) { - std.WarningFn(fn) -} - -// ErrorFn logs a message from a func at level Error on the standard logger. -func ErrorFn(fn LogFunction) { - std.ErrorFn(fn) -} - -// PanicFn logs a message from a func at level Panic on the standard logger. -func PanicFn(fn LogFunction) { - std.PanicFn(fn) -} - -// FatalFn logs a message from a func at level Fatal on the standard logger then the process will exit with status set to 1. -func FatalFn(fn LogFunction) { - std.FatalFn(fn) -} - -// Tracef logs a message at level Trace on the standard logger. -func Tracef(format string, args ...interface{}) { - std.Tracef(format, args...) -} - -// Debugf logs a message at level Debug on the standard logger. -func Debugf(format string, args ...interface{}) { - std.Debugf(format, args...) -} - -// Printf logs a message at level Info on the standard logger. -func Printf(format string, args ...interface{}) { - std.Printf(format, args...) -} - -// Infof logs a message at level Info on the standard logger. -func Infof(format string, args ...interface{}) { - std.Infof(format, args...) -} - -// Warnf logs a message at level Warn on the standard logger. -func Warnf(format string, args ...interface{}) { - std.Warnf(format, args...) -} - -// Warningf logs a message at level Warn on the standard logger. -func Warningf(format string, args ...interface{}) { - std.Warningf(format, args...) -} - -// Errorf logs a message at level Error on the standard logger. -func Errorf(format string, args ...interface{}) { - std.Errorf(format, args...) -} - -// Panicf logs a message at level Panic on the standard logger. -func Panicf(format string, args ...interface{}) { - std.Panicf(format, args...) -} - -// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1. -func Fatalf(format string, args ...interface{}) { - std.Fatalf(format, args...) -} - -// Traceln logs a message at level Trace on the standard logger. -func Traceln(args ...interface{}) { - std.Traceln(args...) -} - -// Debugln logs a message at level Debug on the standard logger. -func Debugln(args ...interface{}) { - std.Debugln(args...) -} - -// Println logs a message at level Info on the standard logger. -func Println(args ...interface{}) { - std.Println(args...) -} - -// Infoln logs a message at level Info on the standard logger. -func Infoln(args ...interface{}) { - std.Infoln(args...) -} - -// Warnln logs a message at level Warn on the standard logger. -func Warnln(args ...interface{}) { - std.Warnln(args...) -} - -// Warningln logs a message at level Warn on the standard logger. -func Warningln(args ...interface{}) { - std.Warningln(args...) -} - -// Errorln logs a message at level Error on the standard logger. -func Errorln(args ...interface{}) { - std.Errorln(args...) -} - -// Panicln logs a message at level Panic on the standard logger. -func Panicln(args ...interface{}) { - std.Panicln(args...) -} - -// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1. -func Fatalln(args ...interface{}) { - std.Fatalln(args...) -} diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go deleted file mode 100644 index 408883773..000000000 --- a/vendor/github.com/sirupsen/logrus/formatter.go +++ /dev/null @@ -1,78 +0,0 @@ -package logrus - -import "time" - -// Default key names for the default fields -const ( - defaultTimestampFormat = time.RFC3339 - FieldKeyMsg = "msg" - FieldKeyLevel = "level" - FieldKeyTime = "time" - FieldKeyLogrusError = "logrus_error" - FieldKeyFunc = "func" - FieldKeyFile = "file" -) - -// The Formatter interface is used to implement a custom Formatter. It takes an -// `Entry`. It exposes all the fields, including the default ones: -// -// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. -// * `entry.Data["time"]`. The timestamp. -// * `entry.Data["level"]. The level the entry was logged at. -// -// Any additional fields added with `WithField` or `WithFields` are also in -// `entry.Data`. Format is expected to return an array of bytes which are then -// logged to `logger.Out`. -type Formatter interface { - Format(*Entry) ([]byte, error) -} - -// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when -// dumping it. If this code wasn't there doing: -// -// logrus.WithField("level", 1).Info("hello") -// -// Would just silently drop the user provided level. Instead with this code -// it'll logged as: -// -// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} -// -// It's not exported because it's still using Data in an opinionated way. It's to -// avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { - timeKey := fieldMap.resolve(FieldKeyTime) - if t, ok := data[timeKey]; ok { - data["fields."+timeKey] = t - delete(data, timeKey) - } - - msgKey := fieldMap.resolve(FieldKeyMsg) - if m, ok := data[msgKey]; ok { - data["fields."+msgKey] = m - delete(data, msgKey) - } - - levelKey := fieldMap.resolve(FieldKeyLevel) - if l, ok := data[levelKey]; ok { - data["fields."+levelKey] = l - delete(data, levelKey) - } - - logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) - if l, ok := data[logrusErrKey]; ok { - data["fields."+logrusErrKey] = l - delete(data, logrusErrKey) - } - - // If reportCaller is not set, 'func' will not conflict. - if reportCaller { - funcKey := fieldMap.resolve(FieldKeyFunc) - if l, ok := data[funcKey]; ok { - data["fields."+funcKey] = l - } - fileKey := fieldMap.resolve(FieldKeyFile) - if l, ok := data[fileKey]; ok { - data["fields."+fileKey] = l - } - } -} diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go deleted file mode 100644 index 3f151cdc3..000000000 --- a/vendor/github.com/sirupsen/logrus/hooks.go +++ /dev/null @@ -1,34 +0,0 @@ -package logrus - -// A hook to be fired when logging on the logging levels returned from -// `Levels()` on your implementation of the interface. Note that this is not -// fired in a goroutine or a channel with workers, you should handle such -// functionality yourself if your call is non-blocking and you don't wish for -// the logging calls for levels returned from `Levels()` to block. -type Hook interface { - Levels() []Level - Fire(*Entry) error -} - -// Internal type for storing the hooks on a logger instance. -type LevelHooks map[Level][]Hook - -// Add a hook to an instance of logger. This is called with -// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. -func (hooks LevelHooks) Add(hook Hook) { - for _, level := range hook.Levels() { - hooks[level] = append(hooks[level], hook) - } -} - -// Fire all the hooks for the passed level. Used by `entry.log` to fire -// appropriate hooks for a log entry. -func (hooks LevelHooks) Fire(level Level, entry *Entry) error { - for _, hook := range hooks[level] { - if err := hook.Fire(entry); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go deleted file mode 100644 index c96dc5636..000000000 --- a/vendor/github.com/sirupsen/logrus/json_formatter.go +++ /dev/null @@ -1,128 +0,0 @@ -package logrus - -import ( - "bytes" - "encoding/json" - "fmt" - "runtime" -) - -type fieldKey string - -// FieldMap allows customization of the key names for default fields. -type FieldMap map[fieldKey]string - -func (f FieldMap) resolve(key fieldKey) string { - if k, ok := f[key]; ok { - return k - } - - return string(key) -} - -// JSONFormatter formats logs into parsable json -type JSONFormatter struct { - // TimestampFormat sets the format used for marshaling timestamps. - // The format to use is the same than for time.Format or time.Parse from the standard - // library. - // The standard Library already provides a set of predefined format. - TimestampFormat string - - // DisableTimestamp allows disabling automatic timestamps in output - DisableTimestamp bool - - // DisableHTMLEscape allows disabling html escaping in output - DisableHTMLEscape bool - - // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. - DataKey string - - // FieldMap allows users to customize the names of keys for default fields. - // As an example: - // formatter := &JSONFormatter{ - // FieldMap: FieldMap{ - // FieldKeyTime: "@timestamp", - // FieldKeyLevel: "@level", - // FieldKeyMsg: "@message", - // FieldKeyFunc: "@caller", - // }, - // } - FieldMap FieldMap - - // CallerPrettyfier can be set by the user to modify the content - // of the function and file keys in the json data when ReportCaller is - // activated. If any of the returned value is the empty string the - // corresponding key will be removed from json fields. - CallerPrettyfier func(*runtime.Frame) (function string, file string) - - // PrettyPrint will indent all json logs - PrettyPrint bool -} - -// Format renders a single log entry -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+4) - for k, v := range entry.Data { - switch v := v.(type) { - case error: - // Otherwise errors are ignored by `encoding/json` - // https://github.com/sirupsen/logrus/issues/137 - data[k] = v.Error() - default: - data[k] = v - } - } - - if f.DataKey != "" { - newData := make(Fields, 4) - newData[f.DataKey] = data - data = newData - } - - prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = defaultTimestampFormat - } - - if entry.err != "" { - data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err - } - if !f.DisableTimestamp { - data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) - } - data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message - data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() - if entry.HasCaller() { - funcVal := entry.Caller.Function - fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) - if f.CallerPrettyfier != nil { - funcVal, fileVal = f.CallerPrettyfier(entry.Caller) - } - if funcVal != "" { - data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal - } - if fileVal != "" { - data[f.FieldMap.resolve(FieldKeyFile)] = fileVal - } - } - - var b *bytes.Buffer - if entry.Buffer != nil { - b = entry.Buffer - } else { - b = &bytes.Buffer{} - } - - encoder := json.NewEncoder(b) - encoder.SetEscapeHTML(!f.DisableHTMLEscape) - if f.PrettyPrint { - encoder.SetIndent("", " ") - } - if err := encoder.Encode(data); err != nil { - return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err) - } - - return b.Bytes(), nil -} diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go deleted file mode 100644 index 5ff0aef6d..000000000 --- a/vendor/github.com/sirupsen/logrus/logger.go +++ /dev/null @@ -1,417 +0,0 @@ -package logrus - -import ( - "context" - "io" - "os" - "sync" - "sync/atomic" - "time" -) - -// LogFunction For big messages, it can be more efficient to pass a function -// and only call it if the log level is actually enables rather than -// generating the log message and then checking if the level is enabled -type LogFunction func() []interface{} - -type Logger struct { - // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a - // file, or leave it default which is `os.Stderr`. You can also set this to - // something more adventurous, such as logging to Kafka. - Out io.Writer - // Hooks for the logger instance. These allow firing events based on logging - // levels and log entries. For example, to send errors to an error tracking - // service, log to StatsD or dump the core on fatal errors. - Hooks LevelHooks - // All log entries pass through the formatter before logged to Out. The - // included formatters are `TextFormatter` and `JSONFormatter` for which - // TextFormatter is the default. In development (when a TTY is attached) it - // logs with colors, but to a file it wouldn't. You can easily implement your - // own that implements the `Formatter` interface, see the `README` or included - // formatters for examples. - Formatter Formatter - - // Flag for whether to log caller info (off by default) - ReportCaller bool - - // The logging level the logger should log at. This is typically (and defaults - // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be - // logged. - Level Level - // Used to sync writing to the log. Locking is enabled by Default - mu MutexWrap - // Reusable empty entry - entryPool sync.Pool - // Function to exit the application, defaults to `os.Exit()` - ExitFunc exitFunc - // The buffer pool used to format the log. If it is nil, the default global - // buffer pool will be used. - BufferPool BufferPool -} - -type exitFunc func(int) - -type MutexWrap struct { - lock sync.Mutex - disabled bool -} - -func (mw *MutexWrap) Lock() { - if !mw.disabled { - mw.lock.Lock() - } -} - -func (mw *MutexWrap) Unlock() { - if !mw.disabled { - mw.lock.Unlock() - } -} - -func (mw *MutexWrap) Disable() { - mw.disabled = true -} - -// Creates a new logger. Configuration should be set by changing `Formatter`, -// `Out` and `Hooks` directly on the default logger instance. You can also just -// instantiate your own: -// -// var log = &logrus.Logger{ -// Out: os.Stderr, -// Formatter: new(logrus.TextFormatter), -// Hooks: make(logrus.LevelHooks), -// Level: logrus.DebugLevel, -// } -// -// It's recommended to make this a global instance called `log`. -func New() *Logger { - return &Logger{ - Out: os.Stderr, - Formatter: new(TextFormatter), - Hooks: make(LevelHooks), - Level: InfoLevel, - ExitFunc: os.Exit, - ReportCaller: false, - } -} - -func (logger *Logger) newEntry() *Entry { - entry, ok := logger.entryPool.Get().(*Entry) - if ok { - return entry - } - return NewEntry(logger) -} - -func (logger *Logger) releaseEntry(entry *Entry) { - entry.Data = map[string]interface{}{} - logger.entryPool.Put(entry) -} - -// WithField allocates a new entry and adds a field to it. -// Debug, Print, Info, Warn, Error, Fatal or Panic must be then applied to -// this new returned entry. -// If you want multiple fields, use `WithFields`. -func (logger *Logger) WithField(key string, value interface{}) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithField(key, value) -} - -// Adds a struct of fields to the log entry. All it does is call `WithField` for -// each `Field`. -func (logger *Logger) WithFields(fields Fields) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithFields(fields) -} - -// Add an error as single field to the log entry. All it does is call -// `WithError` for the given `error`. -func (logger *Logger) WithError(err error) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithError(err) -} - -// Add a context to the log entry. -func (logger *Logger) WithContext(ctx context.Context) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithContext(ctx) -} - -// Overrides the time of the log entry. -func (logger *Logger) WithTime(t time.Time) *Entry { - entry := logger.newEntry() - defer logger.releaseEntry(entry) - return entry.WithTime(t) -} - -func (logger *Logger) Logf(level Level, format string, args ...interface{}) { - if logger.IsLevelEnabled(level) { - entry := logger.newEntry() - entry.Logf(level, format, args...) - logger.releaseEntry(entry) - } -} - -func (logger *Logger) Tracef(format string, args ...interface{}) { - logger.Logf(TraceLevel, format, args...) -} - -func (logger *Logger) Debugf(format string, args ...interface{}) { - logger.Logf(DebugLevel, format, args...) -} - -func (logger *Logger) Infof(format string, args ...interface{}) { - logger.Logf(InfoLevel, format, args...) -} - -func (logger *Logger) Printf(format string, args ...interface{}) { - entry := logger.newEntry() - entry.Printf(format, args...) - logger.releaseEntry(entry) -} - -func (logger *Logger) Warnf(format string, args ...interface{}) { - logger.Logf(WarnLevel, format, args...) -} - -func (logger *Logger) Warningf(format string, args ...interface{}) { - logger.Warnf(format, args...) -} - -func (logger *Logger) Errorf(format string, args ...interface{}) { - logger.Logf(ErrorLevel, format, args...) -} - -func (logger *Logger) Fatalf(format string, args ...interface{}) { - logger.Logf(FatalLevel, format, args...) - logger.Exit(1) -} - -func (logger *Logger) Panicf(format string, args ...interface{}) { - logger.Logf(PanicLevel, format, args...) -} - -// Log will log a message at the level given as parameter. -// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. -// For this behaviour Logger.Panic or Logger.Fatal should be used instead. -func (logger *Logger) Log(level Level, args ...interface{}) { - if logger.IsLevelEnabled(level) { - entry := logger.newEntry() - entry.Log(level, args...) - logger.releaseEntry(entry) - } -} - -func (logger *Logger) LogFn(level Level, fn LogFunction) { - if logger.IsLevelEnabled(level) { - entry := logger.newEntry() - entry.Log(level, fn()...) - logger.releaseEntry(entry) - } -} - -func (logger *Logger) Trace(args ...interface{}) { - logger.Log(TraceLevel, args...) -} - -func (logger *Logger) Debug(args ...interface{}) { - logger.Log(DebugLevel, args...) -} - -func (logger *Logger) Info(args ...interface{}) { - logger.Log(InfoLevel, args...) -} - -func (logger *Logger) Print(args ...interface{}) { - entry := logger.newEntry() - entry.Print(args...) - logger.releaseEntry(entry) -} - -func (logger *Logger) Warn(args ...interface{}) { - logger.Log(WarnLevel, args...) -} - -func (logger *Logger) Warning(args ...interface{}) { - logger.Warn(args...) -} - -func (logger *Logger) Error(args ...interface{}) { - logger.Log(ErrorLevel, args...) -} - -func (logger *Logger) Fatal(args ...interface{}) { - logger.Log(FatalLevel, args...) - logger.Exit(1) -} - -func (logger *Logger) Panic(args ...interface{}) { - logger.Log(PanicLevel, args...) -} - -func (logger *Logger) TraceFn(fn LogFunction) { - logger.LogFn(TraceLevel, fn) -} - -func (logger *Logger) DebugFn(fn LogFunction) { - logger.LogFn(DebugLevel, fn) -} - -func (logger *Logger) InfoFn(fn LogFunction) { - logger.LogFn(InfoLevel, fn) -} - -func (logger *Logger) PrintFn(fn LogFunction) { - entry := logger.newEntry() - entry.Print(fn()...) - logger.releaseEntry(entry) -} - -func (logger *Logger) WarnFn(fn LogFunction) { - logger.LogFn(WarnLevel, fn) -} - -func (logger *Logger) WarningFn(fn LogFunction) { - logger.WarnFn(fn) -} - -func (logger *Logger) ErrorFn(fn LogFunction) { - logger.LogFn(ErrorLevel, fn) -} - -func (logger *Logger) FatalFn(fn LogFunction) { - logger.LogFn(FatalLevel, fn) - logger.Exit(1) -} - -func (logger *Logger) PanicFn(fn LogFunction) { - logger.LogFn(PanicLevel, fn) -} - -func (logger *Logger) Logln(level Level, args ...interface{}) { - if logger.IsLevelEnabled(level) { - entry := logger.newEntry() - entry.Logln(level, args...) - logger.releaseEntry(entry) - } -} - -func (logger *Logger) Traceln(args ...interface{}) { - logger.Logln(TraceLevel, args...) -} - -func (logger *Logger) Debugln(args ...interface{}) { - logger.Logln(DebugLevel, args...) -} - -func (logger *Logger) Infoln(args ...interface{}) { - logger.Logln(InfoLevel, args...) -} - -func (logger *Logger) Println(args ...interface{}) { - entry := logger.newEntry() - entry.Println(args...) - logger.releaseEntry(entry) -} - -func (logger *Logger) Warnln(args ...interface{}) { - logger.Logln(WarnLevel, args...) -} - -func (logger *Logger) Warningln(args ...interface{}) { - logger.Warnln(args...) -} - -func (logger *Logger) Errorln(args ...interface{}) { - logger.Logln(ErrorLevel, args...) -} - -func (logger *Logger) Fatalln(args ...interface{}) { - logger.Logln(FatalLevel, args...) - logger.Exit(1) -} - -func (logger *Logger) Panicln(args ...interface{}) { - logger.Logln(PanicLevel, args...) -} - -func (logger *Logger) Exit(code int) { - runHandlers() - if logger.ExitFunc == nil { - logger.ExitFunc = os.Exit - } - logger.ExitFunc(code) -} - -//When file is opened with appending mode, it's safe to -//write concurrently to a file (within 4k message on Linux). -//In these cases user can choose to disable the lock. -func (logger *Logger) SetNoLock() { - logger.mu.Disable() -} - -func (logger *Logger) level() Level { - return Level(atomic.LoadUint32((*uint32)(&logger.Level))) -} - -// SetLevel sets the logger level. -func (logger *Logger) SetLevel(level Level) { - atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) -} - -// GetLevel returns the logger level. -func (logger *Logger) GetLevel() Level { - return logger.level() -} - -// AddHook adds a hook to the logger hooks. -func (logger *Logger) AddHook(hook Hook) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.Hooks.Add(hook) -} - -// IsLevelEnabled checks if the log level of the logger is greater than the level param -func (logger *Logger) IsLevelEnabled(level Level) bool { - return logger.level() >= level -} - -// SetFormatter sets the logger formatter. -func (logger *Logger) SetFormatter(formatter Formatter) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.Formatter = formatter -} - -// SetOutput sets the logger output. -func (logger *Logger) SetOutput(output io.Writer) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.Out = output -} - -func (logger *Logger) SetReportCaller(reportCaller bool) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.ReportCaller = reportCaller -} - -// ReplaceHooks replaces the logger hooks and returns the old ones -func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { - logger.mu.Lock() - oldHooks := logger.Hooks - logger.Hooks = hooks - logger.mu.Unlock() - return oldHooks -} - -// SetBufferPool sets the logger buffer pool. -func (logger *Logger) SetBufferPool(pool BufferPool) { - logger.mu.Lock() - defer logger.mu.Unlock() - logger.BufferPool = pool -} diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go deleted file mode 100644 index 2f16224cb..000000000 --- a/vendor/github.com/sirupsen/logrus/logrus.go +++ /dev/null @@ -1,186 +0,0 @@ -package logrus - -import ( - "fmt" - "log" - "strings" -) - -// Fields type, used to pass to `WithFields`. -type Fields map[string]interface{} - -// Level type -type Level uint32 - -// Convert the Level to a string. E.g. PanicLevel becomes "panic". -func (level Level) String() string { - if b, err := level.MarshalText(); err == nil { - return string(b) - } else { - return "unknown" - } -} - -// ParseLevel takes a string level and returns the Logrus log level constant. -func ParseLevel(lvl string) (Level, error) { - switch strings.ToLower(lvl) { - case "panic": - return PanicLevel, nil - case "fatal": - return FatalLevel, nil - case "error": - return ErrorLevel, nil - case "warn", "warning": - return WarnLevel, nil - case "info": - return InfoLevel, nil - case "debug": - return DebugLevel, nil - case "trace": - return TraceLevel, nil - } - - var l Level - return l, fmt.Errorf("not a valid logrus Level: %q", lvl) -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (level *Level) UnmarshalText(text []byte) error { - l, err := ParseLevel(string(text)) - if err != nil { - return err - } - - *level = l - - return nil -} - -func (level Level) MarshalText() ([]byte, error) { - switch level { - case TraceLevel: - return []byte("trace"), nil - case DebugLevel: - return []byte("debug"), nil - case InfoLevel: - return []byte("info"), nil - case WarnLevel: - return []byte("warning"), nil - case ErrorLevel: - return []byte("error"), nil - case FatalLevel: - return []byte("fatal"), nil - case PanicLevel: - return []byte("panic"), nil - } - - return nil, fmt.Errorf("not a valid logrus level %d", level) -} - -// A constant exposing all logging levels -var AllLevels = []Level{ - PanicLevel, - FatalLevel, - ErrorLevel, - WarnLevel, - InfoLevel, - DebugLevel, - TraceLevel, -} - -// These are the different logging levels. You can set the logging level to log -// on your instance of logger, obtained with `logrus.New()`. -const ( - // PanicLevel level, highest level of severity. Logs and then calls panic with the - // message passed to Debug, Info, ... - PanicLevel Level = iota - // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the - // logging level is set to Panic. - FatalLevel - // ErrorLevel level. Logs. Used for errors that should definitely be noted. - // Commonly used for hooks to send errors to an error tracking service. - ErrorLevel - // WarnLevel level. Non-critical entries that deserve eyes. - WarnLevel - // InfoLevel level. General operational entries about what's going on inside the - // application. - InfoLevel - // DebugLevel level. Usually only enabled when debugging. Very verbose logging. - DebugLevel - // TraceLevel level. Designates finer-grained informational events than the Debug. - TraceLevel -) - -// Won't compile if StdLogger can't be realized by a log.Logger -var ( - _ StdLogger = &log.Logger{} - _ StdLogger = &Entry{} - _ StdLogger = &Logger{} -) - -// StdLogger is what your logrus-enabled library should take, that way -// it'll accept a stdlib logger and a logrus logger. There's no standard -// interface, this is the closest we get, unfortunately. -type StdLogger interface { - Print(...interface{}) - Printf(string, ...interface{}) - Println(...interface{}) - - Fatal(...interface{}) - Fatalf(string, ...interface{}) - Fatalln(...interface{}) - - Panic(...interface{}) - Panicf(string, ...interface{}) - Panicln(...interface{}) -} - -// The FieldLogger interface generalizes the Entry and Logger types -type FieldLogger interface { - WithField(key string, value interface{}) *Entry - WithFields(fields Fields) *Entry - WithError(err error) *Entry - - Debugf(format string, args ...interface{}) - Infof(format string, args ...interface{}) - Printf(format string, args ...interface{}) - Warnf(format string, args ...interface{}) - Warningf(format string, args ...interface{}) - Errorf(format string, args ...interface{}) - Fatalf(format string, args ...interface{}) - Panicf(format string, args ...interface{}) - - Debug(args ...interface{}) - Info(args ...interface{}) - Print(args ...interface{}) - Warn(args ...interface{}) - Warning(args ...interface{}) - Error(args ...interface{}) - Fatal(args ...interface{}) - Panic(args ...interface{}) - - Debugln(args ...interface{}) - Infoln(args ...interface{}) - Println(args ...interface{}) - Warnln(args ...interface{}) - Warningln(args ...interface{}) - Errorln(args ...interface{}) - Fatalln(args ...interface{}) - Panicln(args ...interface{}) - - // IsDebugEnabled() bool - // IsInfoEnabled() bool - // IsWarnEnabled() bool - // IsErrorEnabled() bool - // IsFatalEnabled() bool - // IsPanicEnabled() bool -} - -// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is -// here for consistancy. Do not use. Use Logger or Entry instead. -type Ext1FieldLogger interface { - FieldLogger - Tracef(format string, args ...interface{}) - Trace(args ...interface{}) - Traceln(args ...interface{}) -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go deleted file mode 100644 index 2403de981..000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build appengine - -package logrus - -import ( - "io" -) - -func checkIfTerminal(w io.Writer) bool { - return true -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go deleted file mode 100644 index 499789984..000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build darwin dragonfly freebsd netbsd openbsd -// +build !js - -package logrus - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TIOCGETA - -func isTerminal(fd int) bool { - _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) - return err == nil -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/vendor/github.com/sirupsen/logrus/terminal_check_js.go deleted file mode 100644 index ebdae3ec6..000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_js.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build js - -package logrus - -func isTerminal(fd int) bool { - return false -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go deleted file mode 100644 index 97af92c68..000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build js nacl plan9 - -package logrus - -import ( - "io" -) - -func checkIfTerminal(w io.Writer) bool { - return false -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go deleted file mode 100644 index 3293fb3ca..000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !appengine,!js,!windows,!nacl,!plan9 - -package logrus - -import ( - "io" - "os" -) - -func checkIfTerminal(w io.Writer) bool { - switch v := w.(type) { - case *os.File: - return isTerminal(int(v.Fd())) - default: - return false - } -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go deleted file mode 100644 index f6710b3bd..000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go +++ /dev/null @@ -1,11 +0,0 @@ -package logrus - -import ( - "golang.org/x/sys/unix" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func isTerminal(fd int) bool { - _, err := unix.IoctlGetTermio(fd, unix.TCGETA) - return err == nil -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go deleted file mode 100644 index 04748b851..000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build linux aix zos -// +build !js - -package logrus - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TCGETS - -func isTerminal(fd int) bool { - _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) - return err == nil -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go deleted file mode 100644 index 2879eb50e..000000000 --- a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !appengine,!js,windows - -package logrus - -import ( - "io" - "os" - - "golang.org/x/sys/windows" -) - -func checkIfTerminal(w io.Writer) bool { - switch v := w.(type) { - case *os.File: - handle := windows.Handle(v.Fd()) - var mode uint32 - if err := windows.GetConsoleMode(handle, &mode); err != nil { - return false - } - mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING - if err := windows.SetConsoleMode(handle, mode); err != nil { - return false - } - return true - } - return false -} diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go deleted file mode 100644 index be2c6efe5..000000000 --- a/vendor/github.com/sirupsen/logrus/text_formatter.go +++ /dev/null @@ -1,339 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "os" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "time" - "unicode/utf8" -) - -const ( - red = 31 - yellow = 33 - blue = 36 - gray = 37 -) - -var baseTimestamp time.Time - -func init() { - baseTimestamp = time.Now() -} - -// TextFormatter formats logs into text -type TextFormatter struct { - // Set to true to bypass checking for a TTY before outputting colors. - ForceColors bool - - // Force disabling colors. - DisableColors bool - - // Force quoting of all values - ForceQuote bool - - // DisableQuote disables quoting for all values. - // DisableQuote will have a lower priority than ForceQuote. - // If both of them are set to true, quote will be forced on all values. - DisableQuote bool - - // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ - EnvironmentOverrideColors bool - - // Disable timestamp logging. useful when output is redirected to logging - // system that already adds timestamps. - DisableTimestamp bool - - // Enable logging the full timestamp when a TTY is attached instead of just - // the time passed since beginning of execution. - FullTimestamp bool - - // TimestampFormat to use for display when a full timestamp is printed. - // The format to use is the same than for time.Format or time.Parse from the standard - // library. - // The standard Library already provides a set of predefined format. - TimestampFormat string - - // The fields are sorted by default for a consistent output. For applications - // that log extremely frequently and don't use the JSON formatter this may not - // be desired. - DisableSorting bool - - // The keys sorting function, when uninitialized it uses sort.Strings. - SortingFunc func([]string) - - // Disables the truncation of the level text to 4 characters. - DisableLevelTruncation bool - - // PadLevelText Adds padding the level text so that all the levels output at the same length - // PadLevelText is a superset of the DisableLevelTruncation option - PadLevelText bool - - // QuoteEmptyFields will wrap empty fields in quotes if true - QuoteEmptyFields bool - - // Whether the logger's out is to a terminal - isTerminal bool - - // FieldMap allows users to customize the names of keys for default fields. - // As an example: - // formatter := &TextFormatter{ - // FieldMap: FieldMap{ - // FieldKeyTime: "@timestamp", - // FieldKeyLevel: "@level", - // FieldKeyMsg: "@message"}} - FieldMap FieldMap - - // CallerPrettyfier can be set by the user to modify the content - // of the function and file keys in the data when ReportCaller is - // activated. If any of the returned value is the empty string the - // corresponding key will be removed from fields. - CallerPrettyfier func(*runtime.Frame) (function string, file string) - - terminalInitOnce sync.Once - - // The max length of the level text, generated dynamically on init - levelTextMaxLength int -} - -func (f *TextFormatter) init(entry *Entry) { - if entry.Logger != nil { - f.isTerminal = checkIfTerminal(entry.Logger.Out) - } - // Get the max length of the level text - for _, level := range AllLevels { - levelTextLength := utf8.RuneCount([]byte(level.String())) - if levelTextLength > f.levelTextMaxLength { - f.levelTextMaxLength = levelTextLength - } - } -} - -func (f *TextFormatter) isColored() bool { - isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) - - if f.EnvironmentOverrideColors { - switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); { - case ok && force != "0": - isColored = true - case ok && force == "0", os.Getenv("CLICOLOR") == "0": - isColored = false - } - } - - return isColored && !f.DisableColors -} - -// Format renders a single log entry -func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields) - for k, v := range entry.Data { - data[k] = v - } - prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) - keys := make([]string, 0, len(data)) - for k := range data { - keys = append(keys, k) - } - - var funcVal, fileVal string - - fixedKeys := make([]string, 0, 4+len(data)) - if !f.DisableTimestamp { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) - } - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel)) - if entry.Message != "" { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) - } - if entry.err != "" { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) - } - if entry.HasCaller() { - if f.CallerPrettyfier != nil { - funcVal, fileVal = f.CallerPrettyfier(entry.Caller) - } else { - funcVal = entry.Caller.Function - fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) - } - - if funcVal != "" { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc)) - } - if fileVal != "" { - fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile)) - } - } - - if !f.DisableSorting { - if f.SortingFunc == nil { - sort.Strings(keys) - fixedKeys = append(fixedKeys, keys...) - } else { - if !f.isColored() { - fixedKeys = append(fixedKeys, keys...) - f.SortingFunc(fixedKeys) - } else { - f.SortingFunc(keys) - } - } - } else { - fixedKeys = append(fixedKeys, keys...) - } - - var b *bytes.Buffer - if entry.Buffer != nil { - b = entry.Buffer - } else { - b = &bytes.Buffer{} - } - - f.terminalInitOnce.Do(func() { f.init(entry) }) - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = defaultTimestampFormat - } - if f.isColored() { - f.printColored(b, entry, keys, data, timestampFormat) - } else { - - for _, key := range fixedKeys { - var value interface{} - switch { - case key == f.FieldMap.resolve(FieldKeyTime): - value = entry.Time.Format(timestampFormat) - case key == f.FieldMap.resolve(FieldKeyLevel): - value = entry.Level.String() - case key == f.FieldMap.resolve(FieldKeyMsg): - value = entry.Message - case key == f.FieldMap.resolve(FieldKeyLogrusError): - value = entry.err - case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): - value = funcVal - case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): - value = fileVal - default: - value = data[key] - } - f.appendKeyValue(b, key, value) - } - } - - b.WriteByte('\n') - return b.Bytes(), nil -} - -func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) { - var levelColor int - switch entry.Level { - case DebugLevel, TraceLevel: - levelColor = gray - case WarnLevel: - levelColor = yellow - case ErrorLevel, FatalLevel, PanicLevel: - levelColor = red - case InfoLevel: - levelColor = blue - default: - levelColor = blue - } - - levelText := strings.ToUpper(entry.Level.String()) - if !f.DisableLevelTruncation && !f.PadLevelText { - levelText = levelText[0:4] - } - if f.PadLevelText { - // Generates the format string used in the next line, for example "%-6s" or "%-7s". - // Based on the max level text length. - formatString := "%-" + strconv.Itoa(f.levelTextMaxLength) + "s" - // Formats the level text by appending spaces up to the max length, for example: - // - "INFO " - // - "WARNING" - levelText = fmt.Sprintf(formatString, levelText) - } - - // Remove a single newline if it already exists in the message to keep - // the behavior of logrus text_formatter the same as the stdlib log package - entry.Message = strings.TrimSuffix(entry.Message, "\n") - - caller := "" - if entry.HasCaller() { - funcVal := fmt.Sprintf("%s()", entry.Caller.Function) - fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) - - if f.CallerPrettyfier != nil { - funcVal, fileVal = f.CallerPrettyfier(entry.Caller) - } - - if fileVal == "" { - caller = funcVal - } else if funcVal == "" { - caller = fileVal - } else { - caller = fileVal + " " + funcVal - } - } - - switch { - case f.DisableTimestamp: - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) - case !f.FullTimestamp: - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) - default: - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) - } - for _, k := range keys { - v := data[k] - fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) - f.appendValue(b, v) - } -} - -func (f *TextFormatter) needsQuoting(text string) bool { - if f.ForceQuote { - return true - } - if f.QuoteEmptyFields && len(text) == 0 { - return true - } - if f.DisableQuote { - return false - } - for _, ch := range text { - if !((ch >= 'a' && ch <= 'z') || - (ch >= 'A' && ch <= 'Z') || - (ch >= '0' && ch <= '9') || - ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { - return true - } - } - return false -} - -func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { - if b.Len() > 0 { - b.WriteByte(' ') - } - b.WriteString(key) - b.WriteByte('=') - f.appendValue(b, value) -} - -func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { - stringVal, ok := value.(string) - if !ok { - stringVal = fmt.Sprint(value) - } - - if !f.needsQuoting(stringVal) { - b.WriteString(stringVal) - } else { - b.WriteString(fmt.Sprintf("%q", stringVal)) - } -} diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go deleted file mode 100644 index 72e8e3a1b..000000000 --- a/vendor/github.com/sirupsen/logrus/writer.go +++ /dev/null @@ -1,70 +0,0 @@ -package logrus - -import ( - "bufio" - "io" - "runtime" -) - -// Writer at INFO level. See WriterLevel for details. -func (logger *Logger) Writer() *io.PipeWriter { - return logger.WriterLevel(InfoLevel) -} - -// WriterLevel returns an io.Writer that can be used to write arbitrary text to -// the logger at the given log level. Each line written to the writer will be -// printed in the usual way using formatters and hooks. The writer is part of an -// io.Pipe and it is the callers responsibility to close the writer when done. -// This can be used to override the standard library logger easily. -func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { - return NewEntry(logger).WriterLevel(level) -} - -func (entry *Entry) Writer() *io.PipeWriter { - return entry.WriterLevel(InfoLevel) -} - -func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { - reader, writer := io.Pipe() - - var printFunc func(args ...interface{}) - - switch level { - case TraceLevel: - printFunc = entry.Trace - case DebugLevel: - printFunc = entry.Debug - case InfoLevel: - printFunc = entry.Info - case WarnLevel: - printFunc = entry.Warn - case ErrorLevel: - printFunc = entry.Error - case FatalLevel: - printFunc = entry.Fatal - case PanicLevel: - printFunc = entry.Panic - default: - printFunc = entry.Print - } - - go entry.writerScanner(reader, printFunc) - runtime.SetFinalizer(writer, writerFinalizer) - - return writer -} - -func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - printFunc(scanner.Text()) - } - if err := scanner.Err(); err != nil { - entry.Errorf("Error while reading from Writer: %s", err) - } - reader.Close() -} - -func writerFinalizer(writer *io.PipeWriter) { - writer.Close() -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/.gitignore b/vendor/github.com/testcontainers/testcontainers-go/.gitignore deleted file mode 100644 index 4d27765dd..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/.gitignore +++ /dev/null @@ -1,13 +0,0 @@ -# Generated by golang tooling -debug.test -vendor - -# Generated docs -site/ -.direnv/ -src/mkdocs-codeinclude-plugin -src/pip-delete-this-directory.txt -.idea/ -.DS_Store - -TEST-*.xml diff --git a/vendor/github.com/testcontainers/testcontainers-go/.golangci.yml b/vendor/github.com/testcontainers/testcontainers-go/.golangci.yml deleted file mode 100644 index 89bf14cc4..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/.golangci.yml +++ /dev/null @@ -1,16 +0,0 @@ -linters: - enable: - - gci - - gofmt - - misspell - - -linters-settings: - gci: - sections: - - standard - - default - - prefix(github.com/testcontainters) - -run: - timeout: 3m diff --git a/vendor/github.com/testcontainers/testcontainers-go/CONTRIBUTING.md b/vendor/github.com/testcontainers/testcontainers-go/CONTRIBUTING.md deleted file mode 100644 index c8194c275..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/CONTRIBUTING.md +++ /dev/null @@ -1,13 +0,0 @@ -# Contributing - -Please see the [main contributing guidelines](./docs/contributing.md). - -There are additional docs describing [contributing documentation changes](./docs/contributing_docs.md). - -### GitHub Sponsorship - -Testcontainers is [in the GitHub Sponsors program](https://github.com/sponsors/testcontainers)! - -This repository is supported by our sponsors, meaning that issues are eligible to have a 'bounty' attached to them by sponsors. - -Please see [the bounty policy page](https://golang.testcontainers.org/bounty) if you are interested, either as a sponsor or as a contributor. diff --git a/vendor/github.com/testcontainers/testcontainers-go/LICENSE b/vendor/github.com/testcontainers/testcontainers-go/LICENSE deleted file mode 100644 index 607a9c3c4..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2017-2019 Gianluca Arbezzano - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/testcontainers/testcontainers-go/Makefile b/vendor/github.com/testcontainers/testcontainers-go/Makefile deleted file mode 100644 index cb9d2f49a..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -include ./commons-test.mk - -.PHONY: test-all -test-all: tools test-unit - -.PHONY: test-examples -test-examples: - @echo "Running example tests..." - make -C examples test diff --git a/vendor/github.com/testcontainers/testcontainers-go/Pipfile b/vendor/github.com/testcontainers/testcontainers-go/Pipfile deleted file mode 100644 index 2236de86d..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/Pipfile +++ /dev/null @@ -1,15 +0,0 @@ -[[source]] -name = "pypi" -url = "https://pypi.org/simple" -verify_ssl = true - -[dev-packages] - -[packages] -mkdocs = "*" -mkdocs-codeinclude-plugin = "*" -mkdocs-material = "*" -mkdocs-markdownextradata-plugin = "*" - -[requires] -python_version = "3.7" diff --git a/vendor/github.com/testcontainers/testcontainers-go/Pipfile.lock b/vendor/github.com/testcontainers/testcontainers-go/Pipfile.lock deleted file mode 100644 index c208f40e2..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/Pipfile.lock +++ /dev/null @@ -1,298 +0,0 @@ -{ - "_meta": { - "hash": { - "sha256": "d3830267ea93391a077a2ba5b93aa8218c0a38002694b404b56df11498da3e56" - }, - "pipfile-spec": 6, - "requires": { - "python_version": "3.7" - }, - "sources": [ - { - "name": "pypi", - "url": "https://pypi.org/simple", - "verify_ssl": true - } - ] - }, - "default": { - "click": { - "hashes": [ - "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e", - "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48" - ], - "markers": "python_version >= '3.7'", - "version": "==8.1.3" - }, - "ghp-import": { - "hashes": [ - "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619", - "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343" - ], - "version": "==2.1.0" - }, - "importlib-metadata": { - "hashes": [ - "sha256:43dd286a2cd8995d5eaef7fee2066340423b818ed3fd70adf0bad5f1fac53fed", - "sha256:92501cdf9cc66ebd3e612f1b4f0c0765dfa42f0fa38ffb319b6bd84dd675d705" - ], - "markers": "python_version >= '3.7'", - "version": "==6.6.0" - }, - "jinja2": { - "hashes": [ - "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852", - "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61" - ], - "markers": "python_version >= '3.7'", - "version": "==3.1.2" - }, - "markdown": { - "hashes": [ - "sha256:065fd4df22da73a625f14890dd77eb8040edcbd68794bcd35943be14490608b2", - "sha256:8bf101198e004dc93e84a12a7395e31aac6a9c9942848ae1d99b9d72cf9b3520" - ], - "markers": "python_version >= '3.7'", - "version": "==3.4.3" - }, - "markupsafe": { - "hashes": [ - "sha256:0576fe974b40a400449768941d5d0858cc624e3249dfd1e0c33674e5c7ca7aed", - "sha256:085fd3201e7b12809f9e6e9bc1e5c96a368c8523fad5afb02afe3c051ae4afcc", - "sha256:090376d812fb6ac5f171e5938e82e7f2d7adc2b629101cec0db8b267815c85e2", - "sha256:0b462104ba25f1ac006fdab8b6a01ebbfbce9ed37fd37fd4acd70c67c973e460", - "sha256:137678c63c977754abe9086a3ec011e8fd985ab90631145dfb9294ad09c102a7", - "sha256:1bea30e9bf331f3fef67e0a3877b2288593c98a21ccb2cf29b74c581a4eb3af0", - "sha256:22152d00bf4a9c7c83960521fc558f55a1adbc0631fbb00a9471e097b19d72e1", - "sha256:22731d79ed2eb25059ae3df1dfc9cb1546691cc41f4e3130fe6bfbc3ecbbecfa", - "sha256:2298c859cfc5463f1b64bd55cb3e602528db6fa0f3cfd568d3605c50678f8f03", - "sha256:28057e985dace2f478e042eaa15606c7efccb700797660629da387eb289b9323", - "sha256:2e7821bffe00aa6bd07a23913b7f4e01328c3d5cc0b40b36c0bd81d362faeb65", - "sha256:2ec4f2d48ae59bbb9d1f9d7efb9236ab81429a764dedca114f5fdabbc3788013", - "sha256:340bea174e9761308703ae988e982005aedf427de816d1afe98147668cc03036", - "sha256:40627dcf047dadb22cd25ea7ecfe9cbf3bbbad0482ee5920b582f3809c97654f", - "sha256:40dfd3fefbef579ee058f139733ac336312663c6706d1163b82b3003fb1925c4", - "sha256:4cf06cdc1dda95223e9d2d3c58d3b178aa5dacb35ee7e3bbac10e4e1faacb419", - "sha256:50c42830a633fa0cf9e7d27664637532791bfc31c731a87b202d2d8ac40c3ea2", - "sha256:55f44b440d491028addb3b88f72207d71eeebfb7b5dbf0643f7c023ae1fba619", - "sha256:608e7073dfa9e38a85d38474c082d4281f4ce276ac0010224eaba11e929dd53a", - "sha256:63ba06c9941e46fa389d389644e2d8225e0e3e5ebcc4ff1ea8506dce646f8c8a", - "sha256:65608c35bfb8a76763f37036547f7adfd09270fbdbf96608be2bead319728fcd", - "sha256:665a36ae6f8f20a4676b53224e33d456a6f5a72657d9c83c2aa00765072f31f7", - "sha256:6d6607f98fcf17e534162f0709aaad3ab7a96032723d8ac8750ffe17ae5a0666", - "sha256:7313ce6a199651c4ed9d7e4cfb4aa56fe923b1adf9af3b420ee14e6d9a73df65", - "sha256:7668b52e102d0ed87cb082380a7e2e1e78737ddecdde129acadb0eccc5423859", - "sha256:7df70907e00c970c60b9ef2938d894a9381f38e6b9db73c5be35e59d92e06625", - "sha256:7e007132af78ea9df29495dbf7b5824cb71648d7133cf7848a2a5dd00d36f9ff", - "sha256:835fb5e38fd89328e9c81067fd642b3593c33e1e17e2fdbf77f5676abb14a156", - "sha256:8bca7e26c1dd751236cfb0c6c72d4ad61d986e9a41bbf76cb445f69488b2a2bd", - "sha256:8db032bf0ce9022a8e41a22598eefc802314e81b879ae093f36ce9ddf39ab1ba", - "sha256:99625a92da8229df6d44335e6fcc558a5037dd0a760e11d84be2260e6f37002f", - "sha256:9cad97ab29dfc3f0249b483412c85c8ef4766d96cdf9dcf5a1e3caa3f3661cf1", - "sha256:a4abaec6ca3ad8660690236d11bfe28dfd707778e2442b45addd2f086d6ef094", - "sha256:a6e40afa7f45939ca356f348c8e23048e02cb109ced1eb8420961b2f40fb373a", - "sha256:a6f2fcca746e8d5910e18782f976489939d54a91f9411c32051b4aab2bd7c513", - "sha256:a806db027852538d2ad7555b203300173dd1b77ba116de92da9afbc3a3be3eed", - "sha256:abcabc8c2b26036d62d4c746381a6f7cf60aafcc653198ad678306986b09450d", - "sha256:b8526c6d437855442cdd3d87eede9c425c4445ea011ca38d937db299382e6fa3", - "sha256:bb06feb762bade6bf3c8b844462274db0c76acc95c52abe8dbed28ae3d44a147", - "sha256:c0a33bc9f02c2b17c3ea382f91b4db0e6cde90b63b296422a939886a7a80de1c", - "sha256:c4a549890a45f57f1ebf99c067a4ad0cb423a05544accaf2b065246827ed9603", - "sha256:ca244fa73f50a800cf8c3ebf7fd93149ec37f5cb9596aa8873ae2c1d23498601", - "sha256:cf877ab4ed6e302ec1d04952ca358b381a882fbd9d1b07cccbfd61783561f98a", - "sha256:d9d971ec1e79906046aa3ca266de79eac42f1dbf3612a05dc9368125952bd1a1", - "sha256:da25303d91526aac3672ee6d49a2f3db2d9502a4a60b55519feb1a4c7714e07d", - "sha256:e55e40ff0cc8cc5c07996915ad367fa47da6b3fc091fdadca7f5403239c5fec3", - "sha256:f03a532d7dee1bed20bc4884194a16160a2de9ffc6354b3878ec9682bb623c54", - "sha256:f1cd098434e83e656abf198f103a8207a8187c0fc110306691a2e94a78d0abb2", - "sha256:f2bfb563d0211ce16b63c7cb9395d2c682a23187f54c3d79bfec33e6705473c6", - "sha256:f8ffb705ffcf5ddd0e80b65ddf7bed7ee4f5a441ea7d3419e861a12eaf41af58" - ], - "markers": "python_version >= '3.7'", - "version": "==2.1.2" - }, - "mergedeep": { - "hashes": [ - "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8", - "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307" - ], - "markers": "python_version >= '3.6'", - "version": "==1.3.4" - }, - "mkdocs": { - "hashes": [ - "sha256:89f5a094764381cda656af4298727c9f53dc3e602983087e1fe96ea1df24f4c1", - "sha256:a1fa8c2d0c1305d7fc2b9d9f607c71778572a8b110fb26642aa00296c9e6d072" - ], - "index": "pypi", - "version": "==1.2.3" - }, - "mkdocs-codeinclude-plugin": { - "hashes": [ - "sha256:172a917c9b257fa62850b669336151f85d3cd40312b2b52520cbcceab557ea6c", - "sha256:305387f67a885f0e36ec1cf977324fe1fe50d31301147194b63631d0864601b1" - ], - "index": "pypi", - "version": "==0.2.1" - }, - "mkdocs-markdownextradata-plugin": { - "hashes": [ - "sha256:64d1c966b288d653f51f7531c03204eb988d0d77e56055c9d703d99105259a36" - ], - "index": "pypi", - "version": "==0.0.5" - }, - "mkdocs-material": { - "hashes": [ - "sha256:524debb6ee8ee89cee08886f2a67c3c3875c0ee9579c598d7448cbd2607cd3b7", - "sha256:62ae84082fa9f077c86b7db63e7bedf392005041b451defc850f8d0887a11e91" - ], - "index": "pypi", - "version": "==3.2.0" - }, - "packaging": { - "hashes": [ - "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61", - "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f" - ], - "markers": "python_version >= '3.7'", - "version": "==23.1" - }, - "pygments": { - "hashes": [ - "sha256:8ace4d3c1dd481894b2005f560ead0f9f19ee64fe983366be1a21e171d12775c", - "sha256:db2db3deb4b4179f399a09054b023b6a586b76499d36965813c71aa8ed7b5fd1" - ], - "markers": "python_version >= '3.7'", - "version": "==2.15.1" - }, - "pymdown-extensions": { - "hashes": [ - "sha256:9a77955e63528c2ee98073a1fb3207c1a45607bc74a34ef21acd098f46c3aa8a", - "sha256:e6cbe8ace7d8feda30bc4fd6a21a073893a9a0e90c373e92d69ce5b653051f55" - ], - "index": "pypi", - "version": "==10.0" - }, - "python-dateutil": { - "hashes": [ - "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86", - "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==2.8.2" - }, - "pyyaml": { - "hashes": [ - "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf", - "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293", - "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b", - "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57", - "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b", - "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4", - "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07", - "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba", - "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9", - "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287", - "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513", - "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0", - "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782", - "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0", - "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92", - "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f", - "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2", - "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc", - "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1", - "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c", - "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86", - "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4", - "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c", - "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34", - "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b", - "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d", - "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c", - "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb", - "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7", - "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737", - "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3", - "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d", - "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358", - "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53", - "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78", - "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803", - "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a", - "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f", - "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174", - "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5" - ], - "markers": "python_version >= '3.6'", - "version": "==6.0" - }, - "pyyaml-env-tag": { - "hashes": [ - "sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb", - "sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069" - ], - "markers": "python_version >= '3.6'", - "version": "==0.1" - }, - "six": { - "hashes": [ - "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", - "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==1.16.0" - }, - "typing-extensions": { - "hashes": [ - "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb", - "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4" - ], - "markers": "python_version < '3.8'", - "version": "==4.5.0" - }, - "watchdog": { - "hashes": [ - "sha256:0e06ab8858a76e1219e68c7573dfeba9dd1c0219476c5a44d5333b01d7e1743a", - "sha256:13bbbb462ee42ec3c5723e1205be8ced776f05b100e4737518c67c8325cf6100", - "sha256:233b5817932685d39a7896b1090353fc8efc1ef99c9c054e46c8002561252fb8", - "sha256:25f70b4aa53bd743729c7475d7ec41093a580528b100e9a8c5b5efe8899592fc", - "sha256:2b57a1e730af3156d13b7fdddfc23dea6487fceca29fc75c5a868beed29177ae", - "sha256:336adfc6f5cc4e037d52db31194f7581ff744b67382eb6021c868322e32eef41", - "sha256:3aa7f6a12e831ddfe78cdd4f8996af9cf334fd6346531b16cec61c3b3c0d8da0", - "sha256:3ed7c71a9dccfe838c2f0b6314ed0d9b22e77d268c67e015450a29036a81f60f", - "sha256:4c9956d27be0bb08fc5f30d9d0179a855436e655f046d288e2bcc11adfae893c", - "sha256:4d98a320595da7a7c5a18fc48cb633c2e73cda78f93cac2ef42d42bf609a33f9", - "sha256:4f94069eb16657d2c6faada4624c39464f65c05606af50bb7902e036e3219be3", - "sha256:5113334cf8cf0ac8cd45e1f8309a603291b614191c9add34d33075727a967709", - "sha256:51f90f73b4697bac9c9a78394c3acbbd331ccd3655c11be1a15ae6fe289a8c83", - "sha256:5d9f3a10e02d7371cd929b5d8f11e87d4bad890212ed3901f9b4d68767bee759", - "sha256:7ade88d0d778b1b222adebcc0927428f883db07017618a5e684fd03b83342bd9", - "sha256:7c5f84b5194c24dd573fa6472685b2a27cc5a17fe5f7b6fd40345378ca6812e3", - "sha256:7e447d172af52ad204d19982739aa2346245cc5ba6f579d16dac4bfec226d2e7", - "sha256:8ae9cda41fa114e28faf86cb137d751a17ffd0316d1c34ccf2235e8a84365c7f", - "sha256:8f3ceecd20d71067c7fd4c9e832d4e22584318983cabc013dbf3f70ea95de346", - "sha256:9fac43a7466eb73e64a9940ac9ed6369baa39b3bf221ae23493a9ec4d0022674", - "sha256:a70a8dcde91be523c35b2bf96196edc5730edb347e374c7de7cd20c43ed95397", - "sha256:adfdeab2da79ea2f76f87eb42a3ab1966a5313e5a69a0213a3cc06ef692b0e96", - "sha256:ba07e92756c97e3aca0912b5cbc4e5ad802f4557212788e72a72a47ff376950d", - "sha256:c07253088265c363d1ddf4b3cdb808d59a0468ecd017770ed716991620b8f77a", - "sha256:c9d8c8ec7efb887333cf71e328e39cffbf771d8f8f95d308ea4125bf5f90ba64", - "sha256:d00e6be486affb5781468457b21a6cbe848c33ef43f9ea4a73b4882e5f188a44", - "sha256:d429c2430c93b7903914e4db9a966c7f2b068dd2ebdd2fa9b9ce094c7d459f33" - ], - "markers": "python_version >= '3.7'", - "version": "==3.0.0" - }, - "zipp": { - "hashes": [ - "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b", - "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556" - ], - "markers": "python_version >= '3.7'", - "version": "==3.15.0" - } - }, - "develop": {} -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/README.md b/vendor/github.com/testcontainers/testcontainers-go/README.md deleted file mode 100644 index 01ecc7334..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# Testcontainers - -[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://github.com/codespaces/new?hide_repo_select=true&ref=main&repo=141451032&machine=standardLinux32gb&devcontainer_path=.devcontainer%2Fdevcontainer.json&location=EastUs) - -[![Main pipeline](https://github.com/testcontainers/testcontainers-go/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/testcontainers/testcontainers-go/actions/workflows/ci.yml) -[![Go Report Card](https://goreportcard.com/badge/github.com/testcontainers/testcontainers-go)](https://goreportcard.com/report/github.com/testcontainers/testcontainers-go) -[![GoDoc Reference](https://camo.githubusercontent.com/8609cfcb531fa0f5598a3d4353596fae9336cce3/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f79616e6777656e6d61692f686f772d746f2d6164642d62616467652d696e2d6769746875622d726561646d653f7374617475732e737667)](https://pkg.go.dev/github.com/testcontainers/testcontainers-go) - -_Testcontainers for Go_ is a Go package that makes it simple to create and clean up container-based dependencies for -automated integration/smoke tests. The clean, easy-to-use API enables developers to programmatically define containers -that should be run as part of a test and clean up those resources when the test is done. - -You can find more information about _Testcontainers for Go_ at [golang.testcontainers.org](https://golang.testcontainers.org), which is rendered from the [./docs](./docs) directory. - -## Using _Testcontainers for Go_ - -Please visit [the quickstart guide](https://golang.testcontainers.org/quickstart) to understand how to add the dependency to your Go project. diff --git a/vendor/github.com/testcontainers/testcontainers-go/RELEASING.md b/vendor/github.com/testcontainers/testcontainers-go/RELEASING.md deleted file mode 100644 index 3990216f7..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/RELEASING.md +++ /dev/null @@ -1,203 +0,0 @@ -# Releasing Testcontainers for Go - -In order to create a release, we have added a shell script that performs all the tasks for you, allowing a dry-run mode for checking it before creating the release. We are going to explain how to use it in this document. - -## Prerequisites - -First, it's really important that you first check that the [version.go](./internal/version.go) file is up-to-date, containing the right version you want to create. That file will be used by the automation to perform the release. -Once the version file is correct in the repository: - -Second, check that the git remote for the `origin` is pointing to `github.com/testcontainers/testcontainers-go`. You can check it by running: - -```shell -git remote -v -``` - -## Prepare the release - -Once the remote is properly set, please follow these steps: - -- Run the [pre-release.sh](./scripts/pre-release.sh) shell script to run it in dry-run mode. -- You can use the `DRY_RUN` variable to enable or disable the dry-run mode. By default, it's enabled. -- To prepare for a release, updating the _Testcontainers for Go_ dependency for all the modules and examples, without performing any Git operation: - - DRY_RUN="false" ./scripts/pre-release.sh - -- The script will update the [mkdocs.yml](./mkdocks.yml) file, updating the `latest_version` field to the current version. -- The script will update the `go.mod` files for each Go modules and example modules under the examples and modules directories, updating the version of the testcontainers-go dependency to the recently created tag. -- The script will modify the docs for the each Go module **that was not released yet**, updating the version of _Testcontainers for Go_ where it was added to the recently created tag. - -An example execution, with dry-run mode enabled: - -```shell -sed "s/latest_version: .*/latest_version: v0.20.1/g" /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/mkdocs.yml > /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/mkdocs.yml.tmp -mv /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/mkdocs.yml.tmp /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/mkdocs.yml -sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" bigtable/go.mod > bigtable/go.mod.tmp -mv bigtable/go.mod.tmp bigtable/go.mod -sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" cockroachdb/go.mod > cockroachdb/go.mod.tmp -mv cockroachdb/go.mod.tmp cockroachdb/go.mod -sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" consul/go.mod > consul/go.mod.tmp -mv consul/go.mod.tmp consul/go.mod -sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" datastore/go.mod > datastore/go.mod.tmp -mv datastore/go.mod.tmp datastore/go.mod -sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" firestore/go.mod > firestore/go.mod.tmp -mv firestore/go.mod.tmp firestore/go.mod -sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" mongodb/go.mod > mongodb/go.mod.tmp -mv mongodb/go.mod.tmp mongodb/go.mod -sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" nginx/go.mod > nginx/go.mod.tmp -mv nginx/go.mod.tmp nginx/go.mod -sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" pubsub/go.mod > pubsub/go.mod.tmp -mv pubsub/go.mod.tmp pubsub/go.mod -sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" spanner/go.mod > spanner/go.mod.tmp -mv spanner/go.mod.tmp spanner/go.mod -sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" toxiproxy/go.mod > toxiproxy/go.mod.tmp -mv toxiproxy/go.mod.tmp toxiproxy/go.mod -go mod tidy -go mod tidy -go mod tidy -go mod tidy -go mod tidy -go mod tidy -go mod tidy -go mod tidy -go mod tidy -go mod tidy -go mod tidy -sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" compose/go.mod > compose/go.mod.tmp -mv compose/go.mod.tmp compose/go.mod -sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" couchbase/go.mod > couchbase/go.mod.tmp -mv couchbase/go.mod.tmp couchbase/go.mod -sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" localstack/go.mod > localstack/go.mod.tmp -mv localstack/go.mod.tmp localstack/go.mod -sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" mysql/go.mod > mysql/go.mod.tmp -mv mysql/go.mod.tmp mysql/go.mod -sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" neo4j/go.mod > neo4j/go.mod.tmp -mv neo4j/go.mod.tmp neo4j/go.mod -sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" postgres/go.mod > postgres/go.mod.tmp -mv postgres/go.mod.tmp postgres/go.mod -sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" pulsar/go.mod > pulsar/go.mod.tmp -mv pulsar/go.mod.tmp pulsar/go.mod -sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" redis/go.mod > redis/go.mod.tmp -mv redis/go.mod.tmp redis/go.mod -sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" redpanda/go.mod > redpanda/go.mod.tmp -mv redpanda/go.mod.tmp redpanda/go.mod -sed "s/testcontainers-go v.*/testcontainers-go v0.20.1/g" vault/go.mod > vault/go.mod.tmp -mv vault/go.mod.tmp vault/go.mod -go mod tidy -go mod tidy -go mod tidy -go mod tidy -go mod tidy -go mod tidy -go mod tidy -go mod tidy -go mod tidy -go mod tidy -sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" couchbase.md > couchbase.md.tmp -mv couchbase.md.tmp couchbase.md -sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" localstack.md > localstack.md.tmp -mv localstack.md.tmp localstack.md -sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" mysql.md > mysql.md.tmp -mv mysql.md.tmp mysql.md -sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" neo4j.md > neo4j.md.tmp -mv neo4j.md.tmp neo4j.md -sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" postgres.md > postgres.md.tmp -mv postgres.md.tmp postgres.md -sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" pulsar.md > pulsar.md.tmp -mv pulsar.md.tmp pulsar.md -sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" redis.md > redis.md.tmp -mv redis.md.tmp redis.md -sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" redpanda.md > redpanda.md.tmp -mv redpanda.md.tmp redpanda.md -sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" vault.md > vault.md.tmp -mv vault.md.tmp vault.md -``` - -## Performing a release - -Once you are satisfied with the modified files in the git state: - -- Run the [release.sh](./scripts/release.sh) shell script to create the release in dry-run mode. -- You can use the `DRY_RUN` variable to enable or disable the dry-run mode. By default, it's enabled. - - DRY_RUN="false" ./scripts/release.sh - -- You can define the bump type, using the `BUMP_TYPE` environment variable. The default value is `minor`, but you can also use `major` or `patch` (the script will fail if the value is not one of these three): - - BUMP_TYPE="major" ./scripts/release.sh - -- The script will commit the current state of the git repository, if the `DRY_RUN` variable is set to `false`. The modified files are the ones modified by the `pre-release.sh` script. -- The script will create a git tag with the current value of the [version.go](./internal/version.go) file, starting with `v`: e.g. `v0.18.0`, for the following Go modules: - - the root module, representing the Testcontainers for Go library. - - all the Go modules living in both the `examples` and `modules` directory. The git tag value for these Go modules will be created using this name convention: - - "${directory}/${module_name}/${version}", e.g. "examples/mysql/v0.18.0", "modules/compose/v0.18.0" - -- The script will update the [version.go](./internal/version.go) file, setting the next development version to the value defined in the `BUMP_TYPE` environment variable. For example, if the current version is `v0.18.0`, the script will update the [version.go](./internal/version.go) file with the next development version `v0.19.0`. -- The script will create a commit in the **main** branch if the `DRY_RUN` variable is set to `false`. -- The script will push the main branch including the tags to the upstream repository, https://github.com/testcontainers/testcontainers-go, if the `DRY_RUN` variable is set to `false`. -- Finally, the script will trigger the Golang proxy to update the modules in https://proxy.golang.org/, if the `DRY_RUN` variable is set to `false`. - -An example execution, with dry-run mode enabled: - -``` -$ ./scripts/release.sh -Current version: v0.20.1 -git add /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/internal/version.go -git add /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/mkdocs.yml -git add examples/**/go.* -git add modules/**/go.* -git commit -m chore: use new version (v0.20.1) in modules and examples -git tag v0.20.1 -git tag examples/bigtable/v0.20.1 -git tag examples/cockroachdb/v0.20.1 -git tag examples/consul/v0.20.1 -git tag examples/datastore/v0.20.1 -git tag examples/firestore/v0.20.1 -git tag examples/mongodb/v0.20.1 -git tag examples/nginx/v0.20.1 -git tag examples/pubsub/v0.20.1 -git tag examples/spanner/v0.20.1 -git tag examples/toxiproxy/v0.20.1 -git tag modules/compose/v0.20.1 -git tag modules/couchbase/v0.20.1 -git tag modules/localstack/v0.20.1 -git tag modules/mysql/v0.20.1 -git tag modules/neo4j/v0.20.1 -git tag modules/postgres/v0.20.1 -git tag modules/pulsar/v0.20.1 -git tag modules/redis/v0.20.1 -git tag modules/redpanda/v0.20.1 -git tag modules/vault/v0.20.1 -WARNING: The requested image's platform (linux/amd64) does not match the detected host platform (linux/arm64/v8) and no specific platform was requested -Producing a minor bump of the version, from 0.20.1 to 0.21.0 -sed "s/const Version = ".*"/const Version = "0.21.0"/g" /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/internal/version.go > /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/internal/version.go.tmp -mv /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/internal/version.go.tmp /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/internal/version.go -git add /Users/mdelapenya/sourcecode/src/github.com/testcontainers/testcontainers-go/internal/version.go -git commit -m chore: prepare for next minor development cycle (0.21.0) -git push origin main --tags -curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/@v/v0.20.1.info -curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/bigtable/@v/v0.20.1.info -curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/cockroachdb/@v/v0.20.1.info -curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/consul/@v/v0.20.1.info -curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/datastore/@v/v0.20.1.info -curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/firestore/@v/v0.20.1.info -curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/mongodb/@v/v0.20.1.info -curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/nginx/@v/v0.20.1.info -curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/pubsub/@v/v0.20.1.info -curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/spanner/@v/v0.20.1.info -curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/examples/toxiproxy/@v/v0.20.1.info -curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/compose/@v/v0.20.1.info -curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/couchbase/@v/v0.20.1.info -curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/localstack/@v/v0.20.1.info -curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/mysql/@v/v0.20.1.info -curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/neo4j/@v/v0.20.1.info -curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/postgres/@v/v0.20.1.info -curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/pulsar/@v/v0.20.1.info -curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/redis/@v/v0.20.1.info -curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/redpanda/@v/v0.20.1.info -curl https://proxy.golang.org/github.com/testcontainers/testcontainers-go/modules/vault/@v/v0.20.1.info -``` - -Right after that, you have to: -- Verify that the commits are in the upstream repository, otherwise, update it with the current state of the main branch. diff --git a/vendor/github.com/testcontainers/testcontainers-go/commons-test.mk b/vendor/github.com/testcontainers/testcontainers-go/commons-test.mk deleted file mode 100644 index 4a1213d96..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/commons-test.mk +++ /dev/null @@ -1,21 +0,0 @@ -.PHONY: dependencies-scan -dependencies-scan: - @echo ">> Scanning dependencies in $(CURDIR)..." - go list -json -m all | docker run --rm -i sonatypecommunity/nancy:latest sleuth --skip-update-check - -.PHONY: test-% -test-%: - @echo "Running $* tests..." - go run gotest.tools/gotestsum \ - --format short-verbose \ - --rerun-fails=5 \ - --packages="./..." \ - --junitfile TEST-$*.xml - -.PHONY: tools -tools: - go mod download - -.PHONY: tools-tidy -tools-tidy: - go mod tidy diff --git a/vendor/github.com/testcontainers/testcontainers-go/config.go b/vendor/github.com/testcontainers/testcontainers-go/config.go deleted file mode 100644 index 91a333107..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/config.go +++ /dev/null @@ -1,29 +0,0 @@ -package testcontainers - -import ( - "github.com/testcontainers/testcontainers-go/internal/config" -) - -// TestcontainersConfig represents the configuration for Testcontainers -type TestcontainersConfig struct { - Host string `properties:"docker.host,default="` // Deprecated: use Config.Host instead - TLSVerify int `properties:"docker.tls.verify,default=0"` // Deprecated: use Config.TLSVerify instead - CertPath string `properties:"docker.cert.path,default="` // Deprecated: use Config.CertPath instead - RyukDisabled bool `properties:"ryuk.disabled,default=false"` // Deprecated: use Config.RyukDisabled instead - RyukPrivileged bool `properties:"ryuk.container.privileged,default=false"` // Deprecated: use Config.RyukPrivileged instead - Config config.Config -} - -// ReadConfig reads from testcontainers properties file, storing the result in a singleton instance -// of the TestcontainersConfig struct -func ReadConfig() TestcontainersConfig { - cfg := config.Read() - return TestcontainersConfig{ - Host: cfg.Host, - TLSVerify: cfg.TLSVerify, - CertPath: cfg.CertPath, - RyukDisabled: cfg.RyukDisabled, - RyukPrivileged: cfg.RyukPrivileged, - Config: cfg, - } -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/container.go b/vendor/github.com/testcontainers/testcontainers-go/container.go deleted file mode 100644 index 77ce49e32..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/container.go +++ /dev/null @@ -1,263 +0,0 @@ -package testcontainers - -import ( - "context" - "errors" - "fmt" - "io" - "path/filepath" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/pkg/archive" - "github.com/docker/go-connections/nat" - tcexec "github.com/testcontainers/testcontainers-go/exec" - "github.com/testcontainers/testcontainers-go/internal/testcontainersdocker" - "github.com/testcontainers/testcontainers-go/wait" -) - -// DeprecatedContainer shows methods that were supported before, but are now deprecated -// Deprecated: Use Container -type DeprecatedContainer interface { - GetHostEndpoint(ctx context.Context, port string) (string, string, error) - GetIPAddress(ctx context.Context) (string, error) - LivenessCheckPorts(ctx context.Context) (nat.PortSet, error) - Terminate(ctx context.Context) error -} - -// Container allows getting info about and controlling a single container instance -type Container interface { - GetContainerID() string // get the container id from the provider - Endpoint(context.Context, string) (string, error) // get proto://ip:port string for the first exposed port - PortEndpoint(context.Context, nat.Port, string) (string, error) // get proto://ip:port string for the given exposed port - Host(context.Context) (string, error) // get host where the container port is exposed - MappedPort(context.Context, nat.Port) (nat.Port, error) // get externally mapped port for a container port - Ports(context.Context) (nat.PortMap, error) // get all exposed ports - SessionID() string // get session id - IsRunning() bool - Start(context.Context) error // start the container - Stop(context.Context, *time.Duration) error // stop the container - Terminate(context.Context) error // terminate the container - Logs(context.Context) (io.ReadCloser, error) // Get logs of the container - FollowOutput(LogConsumer) - StartLogProducer(context.Context) error - StopLogProducer() error - Name(context.Context) (string, error) // get container name - State(context.Context) (*types.ContainerState, error) // returns container's running state - Networks(context.Context) ([]string, error) // get container networks - NetworkAliases(context.Context) (map[string][]string, error) // get container network aliases for a network - Exec(ctx context.Context, cmd []string, options ...tcexec.ProcessOption) (int, io.Reader, error) - ContainerIP(context.Context) (string, error) // get container ip - ContainerIPs(context.Context) ([]string, error) // get all container IPs - CopyToContainer(ctx context.Context, fileContent []byte, containerFilePath string, fileMode int64) error - CopyDirToContainer(ctx context.Context, hostDirPath string, containerParentPath string, fileMode int64) error - CopyFileToContainer(ctx context.Context, hostFilePath string, containerFilePath string, fileMode int64) error - CopyFileFromContainer(ctx context.Context, filePath string) (io.ReadCloser, error) -} - -// ImageBuildInfo defines what is needed to build an image -type ImageBuildInfo interface { - GetContext() (io.Reader, error) // the path to the build context - GetDockerfile() string // the relative path to the Dockerfile, including the fileitself - ShouldPrintBuildLog() bool // allow build log to be printed to stdout - ShouldBuildImage() bool // return true if the image needs to be built - GetBuildArgs() map[string]*string // return the environment args used to build the from Dockerfile - GetAuthConfigs() map[string]types.AuthConfig // return the auth configs to be able to pull from an authenticated docker registry -} - -// FromDockerfile represents the parameters needed to build an image from a Dockerfile -// rather than using a pre-built one -type FromDockerfile struct { - Context string // the path to the context of of the docker build - ContextArchive io.Reader // the tar archive file to send to docker that contains the build context - Dockerfile string // the path from the context to the Dockerfile for the image, defaults to "Dockerfile" - BuildArgs map[string]*string // enable user to pass build args to docker daemon - PrintBuildLog bool // enable user to print build log - AuthConfigs map[string]types.AuthConfig // Deprecated. Testcontainers will detect registry credentials automatically. Enable auth configs to be able to pull from an authenticated docker registry -} - -type ContainerFile struct { - HostFilePath string - ContainerFilePath string - FileMode int64 -} - -// ContainerRequest represents the parameters used to get a running container -type ContainerRequest struct { - FromDockerfile - Image string - Entrypoint []string - Env map[string]string - ExposedPorts []string // allow specifying protocol info - Cmd []string - Labels map[string]string - Mounts ContainerMounts - Tmpfs map[string]string - RegistryCred string // Deprecated: Testcontainers will detect registry credentials automatically - WaitingFor wait.Strategy - Name string // for specifying container name - Hostname string - ExtraHosts []string // Deprecated: Use HostConfigModifier instead - Privileged bool // For starting privileged container - Networks []string // for specifying network names - NetworkAliases map[string][]string // for specifying network aliases - NetworkMode container.NetworkMode // Deprecated: Use HostConfigModifier instead - Resources container.Resources // Deprecated: Use HostConfigModifier instead - Files []ContainerFile // files which will be copied when container starts - User string // for specifying uid:gid - SkipReaper bool // Deprecated: The reaper is globally controlled by the .testcontainers.properties file or the TESTCONTAINERS_RYUK_DISABLED environment variable - ReaperImage string // Deprecated: use WithImageName ContainerOption instead. Alternative reaper image - ReaperOptions []ContainerOption // options for the reaper - AutoRemove bool // Deprecated: Use HostConfigModifier instead. If set to true, the container will be removed from the host when stopped - AlwaysPullImage bool // Always pull image - ImagePlatform string // ImagePlatform describes the platform which the image runs on. - Binds []string // Deprecated: Use HostConfigModifier instead - ShmSize int64 // Amount of memory shared with the host (in bytes) - CapAdd []string // Deprecated: Use HostConfigModifier instead. Add Linux capabilities - CapDrop []string // Deprecated: Use HostConfigModifier instead. Drop Linux capabilities - ConfigModifier func(*container.Config) // Modifier for the config before container creation - HostConfigModifier func(*container.HostConfig) // Modifier for the host config before container creation - EnpointSettingsModifier func(map[string]*network.EndpointSettings) // Modifier for the network settings before container creation - LifecycleHooks []ContainerLifecycleHooks // define hooks to be executed during container lifecycle -} - -// containerOptions functional options for a container -type containerOptions struct { - ImageName string - RegistryCredentials string // Deprecated: Testcontainers will detect registry credentials automatically -} - -// functional option for setting the reaper image -type ContainerOption func(*containerOptions) - -// WithImageName sets the reaper image name -func WithImageName(imageName string) ContainerOption { - return func(o *containerOptions) { - o.ImageName = imageName - } -} - -// Deprecated: Testcontainers will detect registry credentials automatically -// WithRegistryCredentials sets the reaper registry credentials -func WithRegistryCredentials(registryCredentials string) ContainerOption { - return func(o *containerOptions) { - o.RegistryCredentials = registryCredentials - } -} - -// Validate ensures that the ContainerRequest does not have invalid parameters configured to it -// ex. make sure you are not specifying both an image as well as a context -func (c *ContainerRequest) Validate() error { - validationMethods := []func() error{ - c.validateContextAndImage, - c.validateContextOrImageIsSpecified, - c.validateMounts, - } - - var err error - for _, validationMethod := range validationMethods { - err = validationMethod() - if err != nil { - return err - } - } - - return nil -} - -// GetContext retrieve the build context for the request -func (c *ContainerRequest) GetContext() (io.Reader, error) { - if c.ContextArchive != nil { - return c.ContextArchive, nil - } - - // always pass context as absolute path - abs, err := filepath.Abs(c.Context) - if err != nil { - return nil, fmt.Errorf("error getting absolute path: %w", err) - } - c.Context = abs - - buildContext, err := archive.TarWithOptions(c.Context, &archive.TarOptions{}) - if err != nil { - return nil, err - } - - return buildContext, nil -} - -// GetBuildArgs returns the env args to be used when creating from Dockerfile -func (c *ContainerRequest) GetBuildArgs() map[string]*string { - return c.FromDockerfile.BuildArgs -} - -// GetDockerfile returns the Dockerfile from the ContainerRequest, defaults to "Dockerfile" -func (c *ContainerRequest) GetDockerfile() string { - f := c.FromDockerfile.Dockerfile - if f == "" { - return "Dockerfile" - } - - return f -} - -// GetAuthConfigs returns the auth configs to be able to pull from an authenticated docker registry -func (c *ContainerRequest) GetAuthConfigs() map[string]types.AuthConfig { - images, err := testcontainersdocker.ExtractImagesFromDockerfile(filepath.Join(c.Context, c.GetDockerfile()), c.GetBuildArgs()) - if err != nil { - return map[string]types.AuthConfig{} - } - - authConfigs := map[string]types.AuthConfig{} - for _, image := range images { - registry, authConfig, err := DockerImageAuth(context.Background(), image) - if err != nil { - continue - } - - authConfigs[registry] = authConfig - } - - return authConfigs -} - -func (c *ContainerRequest) ShouldBuildImage() bool { - return c.FromDockerfile.Context != "" || c.FromDockerfile.ContextArchive != nil -} - -func (c *ContainerRequest) ShouldPrintBuildLog() bool { - return c.FromDockerfile.PrintBuildLog -} - -func (c *ContainerRequest) validateContextAndImage() error { - if c.FromDockerfile.Context != "" && c.Image != "" { - return errors.New("you cannot specify both an Image and Context in a ContainerRequest") - } - - return nil -} - -func (c *ContainerRequest) validateContextOrImageIsSpecified() error { - if c.FromDockerfile.Context == "" && c.FromDockerfile.ContextArchive == nil && c.Image == "" { - return errors.New("you must specify either a build context or an image") - } - - return nil -} - -func (c *ContainerRequest) validateMounts() error { - targets := make(map[string]bool, len(c.Mounts)) - - for idx := range c.Mounts { - m := c.Mounts[idx] - targetPath := m.Target.Target() - if targets[targetPath] { - return fmt.Errorf("%w: %s", ErrDuplicateMountTarget, targetPath) - } else { - targets[targetPath] = true - } - } - return nil -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/docker.go b/vendor/github.com/testcontainers/testcontainers-go/docker.go deleted file mode 100644 index 5e935c678..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/docker.go +++ /dev/null @@ -1,1404 +0,0 @@ -package testcontainers - -import ( - "archive/tar" - "bufio" - "bytes" - "context" - "encoding/base64" - "encoding/binary" - "encoding/json" - "errors" - "fmt" - "io" - "net/url" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/cenkalti/backoff/v4" - "github.com/containerd/containerd/platforms" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/client" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/go-connections/nat" - "github.com/google/uuid" - "github.com/moby/term" - specs "github.com/opencontainers/image-spec/specs-go/v1" - tcexec "github.com/testcontainers/testcontainers-go/exec" - "github.com/testcontainers/testcontainers-go/internal" - "github.com/testcontainers/testcontainers-go/internal/testcontainersdocker" - "github.com/testcontainers/testcontainers-go/internal/testcontainerssession" - "github.com/testcontainers/testcontainers-go/wait" -) - -var ( - // Implement interfaces - _ Container = (*DockerContainer)(nil) - - logOnce sync.Once - ErrDuplicateMountTarget = errors.New("duplicate mount target detected") -) - -const ( - Bridge = "bridge" // Bridge network name (as well as driver) - Podman = "podman" - ReaperDefault = "reaper_default" // Default network name when bridge is not available - packagePath = "github.com/testcontainers/testcontainers-go" - - logStoppedForOutOfSyncMessage = "Stopping log consumer: Headers out of sync" -) - -// DockerContainer represents a container started using Docker -type DockerContainer struct { - // Container ID from Docker - ID string - WaitingFor wait.Strategy - Image string - - isRunning bool - imageWasBuilt bool - provider *DockerProvider - sessionID uuid.UUID - terminationSignal chan bool - consumers []LogConsumer - raw *types.ContainerJSON - stopProducer chan bool - logger Logging - lifecycleHooks []ContainerLifecycleHooks -} - -// SetLogger sets the logger for the container -func (c *DockerContainer) SetLogger(logger Logging) { - c.logger = logger -} - -// SetProvider sets the provider for the container -func (c *DockerContainer) SetProvider(provider *DockerProvider) { - c.provider = provider -} - -func (c *DockerContainer) GetContainerID() string { - return c.ID -} - -func (c *DockerContainer) IsRunning() bool { - return c.isRunning -} - -// Endpoint gets proto://host:port string for the first exposed port -// Will returns just host:port if proto is "" -func (c *DockerContainer) Endpoint(ctx context.Context, proto string) (string, error) { - ports, err := c.Ports(ctx) - if err != nil { - return "", err - } - - // get first port - var firstPort nat.Port - for p := range ports { - firstPort = p - break - } - - return c.PortEndpoint(ctx, firstPort, proto) -} - -// PortEndpoint gets proto://host:port string for the given exposed port -// Will returns just host:port if proto is "" -func (c *DockerContainer) PortEndpoint(ctx context.Context, port nat.Port, proto string) (string, error) { - host, err := c.Host(ctx) - if err != nil { - return "", err - } - - outerPort, err := c.MappedPort(ctx, port) - if err != nil { - return "", err - } - - protoFull := "" - if proto != "" { - protoFull = fmt.Sprintf("%s://", proto) - } - - return fmt.Sprintf("%s%s:%s", protoFull, host, outerPort.Port()), nil -} - -// Host gets host (ip or name) of the docker daemon where the container port is exposed -// Warning: this is based on your Docker host setting. Will fail if using an SSH tunnel -// You can use the "TC_HOST" env variable to set this yourself -func (c *DockerContainer) Host(ctx context.Context) (string, error) { - host, err := c.provider.DaemonHost(ctx) - if err != nil { - return "", err - } - return host, nil -} - -// MappedPort gets externally mapped port for a container port -func (c *DockerContainer) MappedPort(ctx context.Context, port nat.Port) (nat.Port, error) { - inspect, err := c.inspectContainer(ctx) - if err != nil { - return "", err - } - if inspect.ContainerJSONBase.HostConfig.NetworkMode == "host" { - return port, nil - } - ports, err := c.Ports(ctx) - if err != nil { - return "", err - } - - for k, p := range ports { - if k.Port() != port.Port() { - continue - } - if port.Proto() != "" && k.Proto() != port.Proto() { - continue - } - if len(p) == 0 { - continue - } - return nat.NewPort(k.Proto(), p[0].HostPort) - } - - return "", errors.New("port not found") -} - -// Ports gets the exposed ports for the container. -func (c *DockerContainer) Ports(ctx context.Context) (nat.PortMap, error) { - inspect, err := c.inspectContainer(ctx) - if err != nil { - return nil, err - } - return inspect.NetworkSettings.Ports, nil -} - -// SessionID gets the current session id -func (c *DockerContainer) SessionID() string { - return c.sessionID.String() -} - -// Start will start an already created container -func (c *DockerContainer) Start(ctx context.Context) error { - err := c.startingHook(ctx) - if err != nil { - return err - } - - if err := c.provider.client.ContainerStart(ctx, c.ID, types.ContainerStartOptions{}); err != nil { - return err - } - defer c.provider.Close() - - err = c.startedHook(ctx) - if err != nil { - return err - } - - return nil -} - -// Stop will stop an already started container -// -// In case the container fails to stop -// gracefully within a time frame specified by the timeout argument, -// it is forcefully terminated (killed). -// -// If the timeout is nil, the container's StopTimeout value is used, if set, -// otherwise the engine default. A negative timeout value can be specified, -// meaning no timeout, i.e. no forceful termination is performed. -func (c *DockerContainer) Stop(ctx context.Context, timeout *time.Duration) error { - err := c.stoppingHook(ctx) - if err != nil { - return err - } - - var options container.StopOptions - - if timeout != nil { - timeoutSeconds := int(timeout.Seconds()) - options.Timeout = &timeoutSeconds - } - - if err := c.provider.client.ContainerStop(ctx, c.ID, options); err != nil { - return err - } - defer c.provider.Close() - - c.isRunning = false - - err = c.stoppedHook(ctx) - if err != nil { - return err - } - - return nil -} - -// Terminate is used to kill the container. It is usually triggered by as defer function. -func (c *DockerContainer) Terminate(ctx context.Context) error { - err := c.terminatingHook(ctx) - if err != nil { - return err - } - - err = c.StopLogProducer() - if err != nil { - return err - } - - select { - // close reaper if it was created - case c.terminationSignal <- true: - default: - } - err = c.provider.client.ContainerRemove(ctx, c.GetContainerID(), types.ContainerRemoveOptions{ - RemoveVolumes: true, - Force: true, - }) - if err != nil { - return err - } - - err = c.terminatedHook(ctx) - if err != nil { - return err - } - - if c.imageWasBuilt { - _, err := c.provider.client.ImageRemove(ctx, c.Image, types.ImageRemoveOptions{ - Force: true, - PruneChildren: true, - }) - if err != nil { - return err - } - } - - if err := c.provider.client.Close(); err != nil { - return err - } - - c.sessionID = uuid.UUID{} - c.isRunning = false - return nil -} - -// update container raw info -func (c *DockerContainer) inspectRawContainer(ctx context.Context) (*types.ContainerJSON, error) { - inspect, err := c.provider.client.ContainerInspect(ctx, c.ID) - if err != nil { - return nil, err - } - defer c.provider.Close() - - c.raw = &inspect - return c.raw, nil -} - -func (c *DockerContainer) inspectContainer(ctx context.Context) (*types.ContainerJSON, error) { - inspect, err := c.provider.client.ContainerInspect(ctx, c.ID) - if err != nil { - return nil, err - } - defer c.provider.Close() - - return &inspect, nil -} - -// Logs will fetch both STDOUT and STDERR from the current container. Returns a -// ReadCloser and leaves it up to the caller to extract what it wants. -func (c *DockerContainer) Logs(ctx context.Context) (io.ReadCloser, error) { - - const streamHeaderSize = 8 - - options := types.ContainerLogsOptions{ - ShowStdout: true, - ShowStderr: true, - } - - rc, err := c.provider.client.ContainerLogs(ctx, c.ID, options) - if err != nil { - return nil, err - } - defer c.provider.Close() - - pr, pw := io.Pipe() - r := bufio.NewReader(rc) - - go func() { - var lineStarted = true - for err == nil { - line, isPrefix, err := r.ReadLine() - - if lineStarted && len(line) >= streamHeaderSize { - line = line[streamHeaderSize:] // trim stream header - lineStarted = false - } - if !isPrefix { - lineStarted = true - } - - _, errW := pw.Write(line) - if errW != nil { - return - } - - if !isPrefix { - _, errW := pw.Write([]byte("\n")) - if errW != nil { - return - } - } - - if err != nil { - _ = pw.CloseWithError(err) - return - } - } - }() - - return pr, nil -} - -// FollowOutput adds a LogConsumer to be sent logs from the container's -// STDOUT and STDERR -func (c *DockerContainer) FollowOutput(consumer LogConsumer) { - c.consumers = append(c.consumers, consumer) -} - -// Name gets the name of the container. -func (c *DockerContainer) Name(ctx context.Context) (string, error) { - inspect, err := c.inspectContainer(ctx) - if err != nil { - return "", err - } - return inspect.Name, nil -} - -// State returns container's running state -func (c *DockerContainer) State(ctx context.Context) (*types.ContainerState, error) { - inspect, err := c.inspectRawContainer(ctx) - if err != nil { - if c.raw != nil { - return c.raw.State, err - } - return nil, err - } - return inspect.State, nil -} - -// Networks gets the names of the networks the container is attached to. -func (c *DockerContainer) Networks(ctx context.Context) ([]string, error) { - inspect, err := c.inspectContainer(ctx) - if err != nil { - return []string{}, err - } - - networks := inspect.NetworkSettings.Networks - - n := []string{} - - for k := range networks { - n = append(n, k) - } - - return n, nil -} - -// ContainerIP gets the IP address of the primary network within the container. -func (c *DockerContainer) ContainerIP(ctx context.Context) (string, error) { - inspect, err := c.inspectContainer(ctx) - if err != nil { - return "", err - } - - ip := inspect.NetworkSettings.IPAddress - if ip == "" { - // use IP from "Networks" if only single network defined - networks := inspect.NetworkSettings.Networks - if len(networks) == 1 { - for _, v := range networks { - ip = v.IPAddress - } - } - } - - return ip, nil -} - -// ContainerIPs gets the IP addresses of all the networks within the container. -func (c *DockerContainer) ContainerIPs(ctx context.Context) ([]string, error) { - ips := make([]string, 0) - - inspect, err := c.inspectContainer(ctx) - if err != nil { - return nil, err - } - - networks := inspect.NetworkSettings.Networks - for _, nw := range networks { - ips = append(ips, nw.IPAddress) - } - - return ips, nil -} - -// NetworkAliases gets the aliases of the container for the networks it is attached to. -func (c *DockerContainer) NetworkAliases(ctx context.Context) (map[string][]string, error) { - inspect, err := c.inspectContainer(ctx) - if err != nil { - return map[string][]string{}, err - } - - networks := inspect.NetworkSettings.Networks - - a := map[string][]string{} - - for k := range networks { - a[k] = networks[k].Aliases - } - - return a, nil -} - -func (c *DockerContainer) Exec(ctx context.Context, cmd []string, options ...tcexec.ProcessOption) (int, io.Reader, error) { - cli := c.provider.client - response, err := cli.ContainerExecCreate(ctx, c.ID, types.ExecConfig{ - Cmd: cmd, - Detach: false, - AttachStdout: true, - AttachStderr: true, - }) - if err != nil { - return 0, nil, err - } - - hijack, err := cli.ContainerExecAttach(ctx, response.ID, types.ExecStartCheck{}) - if err != nil { - return 0, nil, err - } - - opt := &tcexec.ProcessOptions{ - Reader: hijack.Reader, - } - - for _, o := range options { - o.Apply(opt) - } - - var exitCode int - for { - execResp, err := cli.ContainerExecInspect(ctx, response.ID) - if err != nil { - return 0, nil, err - } - - if !execResp.Running { - exitCode = execResp.ExitCode - break - } - - time.Sleep(100 * time.Millisecond) - } - - return exitCode, opt.Reader, nil -} - -type FileFromContainer struct { - underlying *io.ReadCloser - tarreader *tar.Reader -} - -func (fc *FileFromContainer) Read(b []byte) (int, error) { - return (*fc.tarreader).Read(b) -} - -func (fc *FileFromContainer) Close() error { - return (*fc.underlying).Close() -} - -func (c *DockerContainer) CopyFileFromContainer(ctx context.Context, filePath string) (io.ReadCloser, error) { - r, _, err := c.provider.client.CopyFromContainer(ctx, c.ID, filePath) - if err != nil { - return nil, err - } - defer c.provider.Close() - - tarReader := tar.NewReader(r) - - // if we got here we have exactly one file in the TAR-stream - // so we advance the index by one so the next call to Read will start reading it - _, err = tarReader.Next() - if err != nil { - return nil, err - } - - ret := &FileFromContainer{ - underlying: &r, - tarreader: tarReader, - } - - return ret, nil -} - -// CopyDirToContainer copies the contents of a directory to a parent path in the container. This parent path must exist in the container first -// as we cannot create it -func (c *DockerContainer) CopyDirToContainer(ctx context.Context, hostDirPath string, containerParentPath string, fileMode int64) error { - dir, err := isDir(hostDirPath) - if err != nil { - return err - } - - if !dir { - // it's not a dir: let the consumer to handle an error - return fmt.Errorf("path %s is not a directory", hostDirPath) - } - - buff, err := tarDir(hostDirPath, fileMode) - if err != nil { - return err - } - - // create the directory under its parent - parent := filepath.Dir(containerParentPath) - - err = c.provider.client.CopyToContainer(ctx, c.ID, parent, buff, types.CopyToContainerOptions{}) - if err != nil { - return err - } - defer c.provider.Close() - - return nil -} - -func (c *DockerContainer) CopyFileToContainer(ctx context.Context, hostFilePath string, containerFilePath string, fileMode int64) error { - dir, err := isDir(hostFilePath) - if err != nil { - return err - } - - if dir { - return c.CopyDirToContainer(ctx, hostFilePath, containerFilePath, fileMode) - } - - fileContent, err := os.ReadFile(hostFilePath) - if err != nil { - return err - } - return c.CopyToContainer(ctx, fileContent, containerFilePath, fileMode) -} - -// CopyToContainer copies fileContent data to a file in container -func (c *DockerContainer) CopyToContainer(ctx context.Context, fileContent []byte, containerFilePath string, fileMode int64) error { - buffer, err := tarFile(fileContent, containerFilePath, fileMode) - if err != nil { - return err - } - - err = c.provider.client.CopyToContainer(ctx, c.ID, filepath.Dir(containerFilePath), buffer, types.CopyToContainerOptions{}) - if err != nil { - return err - } - defer c.provider.Close() - - return nil -} - -// StartLogProducer will start a concurrent process that will continuously read logs -// from the container and will send them to each added LogConsumer -func (c *DockerContainer) StartLogProducer(ctx context.Context) error { - if c.stopProducer != nil { - return errors.New("log producer already started") - } - - c.stopProducer = make(chan bool) - - go func(stop <-chan bool) { - since := "" - // if the socket is closed we will make additional logs request with updated Since timestamp - BEGIN: - options := types.ContainerLogsOptions{ - ShowStdout: true, - ShowStderr: true, - Follow: true, - Since: since, - } - - ctx, cancel := context.WithTimeout(ctx, time.Second*5) - defer cancel() - - r, err := c.provider.client.ContainerLogs(ctx, c.GetContainerID(), options) - if err != nil { - // if we can't get the logs, panic, we can't return an error to anything - // from within this goroutine - panic(err) - } - defer c.provider.Close() - - for { - select { - case <-stop: - err := r.Close() - if err != nil { - // we can't close the read closer, this should never happen - panic(err) - } - return - default: - h := make([]byte, 8) - _, err := io.ReadFull(r, h) - if err != nil { - // proper type matching requires https://go-review.googlesource.com/c/go/+/250357/ (go 1.16) - if strings.Contains(err.Error(), "use of closed network connection") { - now := time.Now() - since = fmt.Sprintf("%d.%09d", now.Unix(), int64(now.Nanosecond())) - goto BEGIN - } - if errors.Is(err, context.DeadlineExceeded) { - // Probably safe to continue here - continue - } - _, _ = fmt.Fprintf(os.Stderr, "container log error: %+v. %s", err, logStoppedForOutOfSyncMessage) - // if we would continue here, the next header-read will result into random data... - return - } - - count := binary.BigEndian.Uint32(h[4:]) - if count == 0 { - continue - } - logType := h[0] - if logType > 2 { - _, _ = fmt.Fprintf(os.Stderr, "received invalid log type: %d", logType) - // sometimes docker returns logType = 3 which is an undocumented log type, so treat it as stdout - logType = 1 - } - - // a map of the log type --> int representation in the header, notice the first is blank, this is stdin, but the go docker client doesn't allow following that in logs - logTypes := []string{"", StdoutLog, StderrLog} - - b := make([]byte, count) - _, err = io.ReadFull(r, b) - if err != nil { - // TODO: add-logger: use logger to log out this error - _, _ = fmt.Fprintf(os.Stderr, "error occurred reading log with known length %s", err.Error()) - if errors.Is(err, context.DeadlineExceeded) { - // Probably safe to continue here - continue - } - // we can not continue here as the next read most likely will not be the next header - _, _ = fmt.Fprintln(os.Stderr, logStoppedForOutOfSyncMessage) - return - } - for _, c := range c.consumers { - c.Accept(Log{ - LogType: logTypes[logType], - Content: b, - }) - } - } - } - }(c.stopProducer) - - return nil -} - -// StopLogProducer will stop the concurrent process that is reading logs -// and sending them to each added LogConsumer -func (c *DockerContainer) StopLogProducer() error { - if c.stopProducer != nil { - c.stopProducer <- true - c.stopProducer = nil - } - return nil -} - -// DockerNetwork represents a network started using Docker -type DockerNetwork struct { - ID string // Network ID from Docker - Driver string - Name string - provider *DockerProvider - terminationSignal chan bool -} - -// Remove is used to remove the network. It is usually triggered by as defer function. -func (n *DockerNetwork) Remove(ctx context.Context) error { - select { - // close reaper if it was created - case n.terminationSignal <- true: - default: - } - - err := n.provider.client.NetworkRemove(ctx, n.ID) - if err != nil { - return err - } - defer n.provider.Close() - - return nil -} - -// DockerProvider implements the ContainerProvider interface -type DockerProvider struct { - *DockerProviderOptions - client client.APIClient - host string - hostCache string - config TestcontainersConfig -} - -// Client gets the docker client used by the provider -func (p *DockerProvider) Client() client.APIClient { - return p.client -} - -// Close closes the docker client used by the provider -func (p *DockerProvider) Close() error { - if p.client == nil { - return nil - } - - return p.client.Close() -} - -// SetClient sets the docker client to be used by the provider -func (p *DockerProvider) SetClient(c client.APIClient) { - p.client = c -} - -var _ ContainerProvider = (*DockerProvider)(nil) - -func NewDockerClient() (cli *client.Client, err error) { - return testcontainersdocker.NewClient(context.Background()) -} - -// BuildImage will build and image from context and Dockerfile, then return the tag -func (p *DockerProvider) BuildImage(ctx context.Context, img ImageBuildInfo) (string, error) { - repo := uuid.New() - tag := uuid.New() - - repoTag := fmt.Sprintf("%s:%s", repo, tag) - - buildContext, err := img.GetContext() - if err != nil { - return "", err - } - - buildOptions := types.ImageBuildOptions{ - BuildArgs: img.GetBuildArgs(), - Dockerfile: img.GetDockerfile(), - AuthConfigs: img.GetAuthConfigs(), - Context: buildContext, - Tags: []string{repoTag}, - Remove: true, - ForceRemove: true, - } - - var resp types.ImageBuildResponse - err = backoff.Retry(func() error { - resp, err = p.client.ImageBuild(ctx, buildContext, buildOptions) - if err != nil { - if _, ok := err.(errdefs.ErrNotFound); ok { - return backoff.Permanent(err) - } - Logger.Printf("Failed to build image: %s, will retry", err) - return err - } - defer p.Close() - - return nil - }, backoff.WithContext(backoff.NewExponentialBackOff(), ctx)) - if err != nil { - return "", err - } - - if img.ShouldPrintBuildLog() { - termFd, isTerm := term.GetFdInfo(os.Stderr) - err = jsonmessage.DisplayJSONMessagesStream(resp.Body, os.Stderr, termFd, isTerm, nil) - if err != nil { - return "", err - } - } - - // need to read the response from Docker, I think otherwise the image - // might not finish building before continuing to execute here - buf := new(bytes.Buffer) - _, err = buf.ReadFrom(resp.Body) - if err != nil { - return "", err - } - - _ = resp.Body.Close() - - return repoTag, nil -} - -// CreateContainer fulfills a request for a container without starting it -func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerRequest) (Container, error) { - var err error - - // defer the close of the Docker client connection the soonest - defer p.Close() - - // Make sure that bridge network exists - // In case it is disabled we will create reaper_default network - if p.DefaultNetwork == "" { - p.DefaultNetwork, err = p.getDefaultNetwork(ctx, p.client) - if err != nil { - return nil, err - } - } - - // If default network is not bridge make sure it is attached to the request - // as container won't be attached to it automatically - // in case of Podman the bridge network is called 'podman' as 'bridge' would conflict - if p.DefaultNetwork != p.defaultBridgeNetworkName { - isAttached := false - for _, net := range req.Networks { - if net == p.DefaultNetwork { - isAttached = true - break - } - } - - if !isAttached { - req.Networks = append(req.Networks, p.DefaultNetwork) - } - } - - env := []string{} - for envKey, envVar := range req.Env { - env = append(env, envKey+"="+envVar) - } - - if req.Labels == nil { - req.Labels = make(map[string]string) - } - - reaperOpts := containerOptions{ - ImageName: req.ReaperImage, - } - for _, opt := range req.ReaperOptions { - opt(&reaperOpts) - } - - tcConfig := p.Config().Config - - var termSignal chan bool - // the reaper does not need to start a reaper for itself - isReaperContainer := strings.EqualFold(req.Image, reaperImage(reaperOpts.ImageName)) - if !tcConfig.RyukDisabled && !isReaperContainer { - r, err := reuseOrCreateReaper(context.WithValue(ctx, testcontainersdocker.DockerHostContextKey, p.host), testcontainerssession.String(), p, req.ReaperOptions...) - if err != nil { - return nil, fmt.Errorf("%w: creating reaper failed", err) - } - termSignal, err = r.Connect() - if err != nil { - return nil, fmt.Errorf("%w: connecting to reaper failed", err) - } - for k, v := range r.Labels() { - if _, ok := req.Labels[k]; !ok { - req.Labels[k] = v - } - } - } - - if err = req.Validate(); err != nil { - return nil, err - } - - var tag string - var platform *specs.Platform - - if req.ShouldBuildImage() { - tag, err = p.BuildImage(ctx, &req) - if err != nil { - return nil, err - } - } else { - tag = req.Image - - if req.ImagePlatform != "" { - p, err := platforms.Parse(req.ImagePlatform) - if err != nil { - return nil, fmt.Errorf("invalid platform %s: %w", req.ImagePlatform, err) - } - platform = &p - } - - var shouldPullImage bool - - if req.AlwaysPullImage { - shouldPullImage = true // If requested always attempt to pull image - } else { - image, _, err := p.client.ImageInspectWithRaw(ctx, tag) - if err != nil { - if client.IsErrNotFound(err) { - shouldPullImage = true - } else { - return nil, err - } - } - if platform != nil && (image.Architecture != platform.Architecture || image.Os != platform.OS) { - shouldPullImage = true - } - } - - if shouldPullImage { - pullOpt := types.ImagePullOptions{ - Platform: req.ImagePlatform, // may be empty - } - - registry, imageAuth, err := DockerImageAuth(ctx, req.Image) - if err != nil { - p.Logger.Printf("Failed to get image auth for %s. Setting empty credentials for the image: %s. Error is:%s", registry, req.Image, err) - } else { - // see https://github.com/docker/docs/blob/e8e1204f914767128814dca0ea008644709c117f/engine/api/sdk/examples.md?plain=1#L649-L657 - encodedJSON, err := json.Marshal(imageAuth) - if err != nil { - p.Logger.Printf("Failed to marshal image auth. Setting empty credentials for the image: %s. Error is:%s", req.Image, err) - } else { - pullOpt.RegistryAuth = base64.URLEncoding.EncodeToString(encodedJSON) - } - } - - if err := p.attemptToPullImage(ctx, tag, pullOpt); err != nil { - return nil, err - } - } - } - - dockerInput := &container.Config{ - Entrypoint: req.Entrypoint, - Image: tag, - Env: env, - Labels: req.Labels, - Cmd: req.Cmd, - Hostname: req.Hostname, - User: req.User, - } - - hostConfig := &container.HostConfig{ - Privileged: req.Privileged, - ShmSize: req.ShmSize, - Tmpfs: req.Tmpfs, - } - - networkingConfig := &network.NetworkingConfig{} - - // default hooks include logger hook and pre-create hook - defaultHooks := []ContainerLifecycleHooks{ - DefaultLoggingHook(p.Logger), - { - PreCreates: []ContainerRequestHook{ - func(ctx context.Context, req ContainerRequest) error { - return p.preCreateContainerHook(ctx, req, dockerInput, hostConfig, networkingConfig) - }, - }, - PostCreates: []ContainerHook{ - // copy files to container after it's created - func(ctx context.Context, c Container) error { - for _, f := range req.Files { - err := c.CopyFileToContainer(ctx, f.HostFilePath, f.ContainerFilePath, f.FileMode) - if err != nil { - return fmt.Errorf("can't copy %s to container: %w", f.HostFilePath, err) - } - } - - return nil - }, - }, - PostStarts: []ContainerHook{ - // first post-start hook is to wait for the container to be ready - func(ctx context.Context, c Container) error { - dockerContainer := c.(*DockerContainer) - - // if a Wait Strategy has been specified, wait before returning - if dockerContainer.WaitingFor != nil { - dockerContainer.logger.Printf( - "🚧 Waiting for container id %s image: %s. Waiting for: %+v", - dockerContainer.ID[:12], dockerContainer.Image, dockerContainer.WaitingFor, - ) - if err := dockerContainer.WaitingFor.WaitUntilReady(ctx, c); err != nil { - return err - } - } - - dockerContainer.isRunning = true - - return nil - }, - }, - }, - } - - // always prepend default lifecycle hooks to user-defined hooks - req.LifecycleHooks = append(defaultHooks, req.LifecycleHooks...) - - err = req.creatingHook(ctx) - if err != nil { - return nil, err - } - - resp, err := p.client.ContainerCreate(ctx, dockerInput, hostConfig, networkingConfig, platform, req.Name) - if err != nil { - return nil, err - } - - // #248: If there is more than one network specified in the request attach newly created container to them one by one - if len(req.Networks) > 1 { - for _, n := range req.Networks[1:] { - nw, err := p.GetNetwork(ctx, NetworkRequest{ - Name: n, - }) - if err == nil { - endpointSetting := network.EndpointSettings{ - Aliases: req.NetworkAliases[n], - } - err = p.client.NetworkConnect(ctx, nw.ID, resp.ID, &endpointSetting) - if err != nil { - return nil, err - } - } - } - } - - c := &DockerContainer{ - ID: resp.ID, - WaitingFor: req.WaitingFor, - Image: tag, - imageWasBuilt: req.ShouldBuildImage(), - sessionID: testcontainerssession.ID(), - provider: p, - terminationSignal: termSignal, - stopProducer: nil, - logger: p.Logger, - lifecycleHooks: req.LifecycleHooks, - } - - err = c.createdHook(ctx) - if err != nil { - return nil, err - } - - return c, nil -} - -func (p *DockerProvider) findContainerByName(ctx context.Context, name string) (*types.Container, error) { - if name == "" { - return nil, nil - } - - // Note that, 'name' filter will use regex to find the containers - filter := filters.NewArgs(filters.Arg("name", fmt.Sprintf("^%s$", name))) - containers, err := p.client.ContainerList(ctx, types.ContainerListOptions{Filters: filter}) - if err != nil { - return nil, err - } - defer p.Close() - - if len(containers) > 0 { - return &containers[0], nil - } - return nil, nil -} - -func (p *DockerProvider) ReuseOrCreateContainer(ctx context.Context, req ContainerRequest) (Container, error) { - c, err := p.findContainerByName(ctx, req.Name) - if err != nil { - return nil, err - } - if c == nil { - return p.CreateContainer(ctx, req) - } - - tcConfig := p.Config().Config - - var termSignal chan bool - if !tcConfig.RyukDisabled { - r, err := reuseOrCreateReaper(context.WithValue(ctx, testcontainersdocker.DockerHostContextKey, p.host), testcontainerssession.String(), p, req.ReaperOptions...) - if err != nil { - return nil, fmt.Errorf("%w: creating reaper failed", err) - } - termSignal, err = r.Connect() - if err != nil { - return nil, fmt.Errorf("%w: connecting to reaper failed", err) - } - } - - dc := &DockerContainer{ - ID: c.ID, - WaitingFor: req.WaitingFor, - Image: c.Image, - sessionID: testcontainerssession.ID(), - provider: p, - terminationSignal: termSignal, - stopProducer: nil, - logger: p.Logger, - isRunning: c.State == "running", - } - - return dc, nil -} - -// attemptToPullImage tries to pull the image while respecting the ctx cancellations. -// Besides, if the image cannot be pulled due to ErrorNotFound then no need to retry but terminate immediately. -func (p *DockerProvider) attemptToPullImage(ctx context.Context, tag string, pullOpt types.ImagePullOptions) error { - var ( - err error - pull io.ReadCloser - ) - err = backoff.Retry(func() error { - pull, err = p.client.ImagePull(ctx, tag, pullOpt) - if err != nil { - if _, ok := err.(errdefs.ErrNotFound); ok { - return backoff.Permanent(err) - } - Logger.Printf("Failed to pull image: %s, will retry", err) - return err - } - defer p.Close() - - return nil - }, backoff.WithContext(backoff.NewExponentialBackOff(), ctx)) - if err != nil { - return err - } - defer pull.Close() - - // download of docker image finishes at EOF of the pull request - _, err = io.ReadAll(pull) - return err -} - -// Health measure the healthiness of the provider. Right now we leverage the -// docker-client ping endpoint to see if the daemon is reachable. -func (p *DockerProvider) Health(ctx context.Context) (err error) { - _, err = p.client.Ping(ctx) - defer p.Close() - - return err -} - -// RunContainer takes a RequestContainer as input and it runs a container via the docker sdk -func (p *DockerProvider) RunContainer(ctx context.Context, req ContainerRequest) (Container, error) { - c, err := p.CreateContainer(ctx, req) - if err != nil { - return nil, err - } - - if err := c.Start(ctx); err != nil { - return c, fmt.Errorf("%w: could not start container", err) - } - - return c, nil -} - -// Config provides the TestcontainersConfig read from $HOME/.testcontainers.properties or -// the environment variables -func (p *DockerProvider) Config() TestcontainersConfig { - return p.config -} - -// DaemonHost gets the host or ip of the Docker daemon where ports are exposed on -// Warning: this is based on your Docker host setting. Will fail if using an SSH tunnel -// You can use the "TC_HOST" env variable to set this yourself -func (p *DockerProvider) DaemonHost(ctx context.Context) (string, error) { - return daemonHost(ctx, p) -} - -func daemonHost(ctx context.Context, p *DockerProvider) (string, error) { - if p.hostCache != "" { - return p.hostCache, nil - } - - host, exists := os.LookupEnv("TC_HOST") - if exists { - p.hostCache = host - return p.hostCache, nil - } - - // infer from Docker host - url, err := url.Parse(p.client.DaemonHost()) - if err != nil { - return "", err - } - defer p.Close() - - switch url.Scheme { - case "http", "https", "tcp": - p.hostCache = url.Hostname() - case "unix", "npipe": - if testcontainersdocker.InAContainer() { - ip, err := p.GetGatewayIP(ctx) - if err != nil { - ip, err = testcontainersdocker.DefaultGatewayIP() - if err != nil { - ip = "localhost" - } - } - p.hostCache = ip - } else { - p.hostCache = "localhost" - } - default: - return "", errors.New("could not determine host through env or docker host") - } - - return p.hostCache, nil -} - -// CreateNetwork returns the object representing a new network identified by its name -func (p *DockerProvider) CreateNetwork(ctx context.Context, req NetworkRequest) (Network, error) { - var err error - - // defer the close of the Docker client connection the soonest - defer p.Close() - - // Make sure that bridge network exists - // In case it is disabled we will create reaper_default network - if p.DefaultNetwork == "" { - if p.DefaultNetwork, err = p.getDefaultNetwork(ctx, p.client); err != nil { - return nil, err - } - } - - if req.Labels == nil { - req.Labels = make(map[string]string) - } - - tcConfig := p.Config().Config - - nc := types.NetworkCreate{ - Driver: req.Driver, - CheckDuplicate: req.CheckDuplicate, - Internal: req.Internal, - EnableIPv6: req.EnableIPv6, - Attachable: req.Attachable, - Labels: req.Labels, - IPAM: req.IPAM, - } - - var termSignal chan bool - if !tcConfig.RyukDisabled { - r, err := reuseOrCreateReaper(context.WithValue(ctx, testcontainersdocker.DockerHostContextKey, p.host), testcontainerssession.String(), p, req.ReaperOptions...) - if err != nil { - return nil, fmt.Errorf("%w: creating network reaper failed", err) - } - termSignal, err = r.Connect() - if err != nil { - return nil, fmt.Errorf("%w: connecting to network reaper failed", err) - } - for k, v := range r.Labels() { - if _, ok := req.Labels[k]; !ok { - req.Labels[k] = v - } - } - } - - response, err := p.client.NetworkCreate(ctx, req.Name, nc) - if err != nil { - return &DockerNetwork{}, err - } - - n := &DockerNetwork{ - ID: response.ID, - Driver: req.Driver, - Name: req.Name, - terminationSignal: termSignal, - provider: p, - } - - return n, nil -} - -// GetNetwork returns the object representing the network identified by its name -func (p *DockerProvider) GetNetwork(ctx context.Context, req NetworkRequest) (types.NetworkResource, error) { - networkResource, err := p.client.NetworkInspect(ctx, req.Name, types.NetworkInspectOptions{ - Verbose: true, - }) - if err != nil { - return types.NetworkResource{}, err - } - - return networkResource, err -} - -func (p *DockerProvider) GetGatewayIP(ctx context.Context) (string, error) { - // Use a default network as defined in the DockerProvider - if p.DefaultNetwork == "" { - var err error - p.DefaultNetwork, err = p.getDefaultNetwork(ctx, p.client) - if err != nil { - return "", err - } - } - nw, err := p.GetNetwork(ctx, NetworkRequest{Name: p.DefaultNetwork}) - if err != nil { - return "", err - } - - var ip string - for _, config := range nw.IPAM.Config { - if config.Gateway != "" { - ip = config.Gateway - break - } - } - if ip == "" { - return "", errors.New("Failed to get gateway IP from network settings") - } - - return ip, nil -} - -func (p *DockerProvider) getDefaultNetwork(ctx context.Context, cli client.APIClient) (string, error) { - // Get list of available networks - networkResources, err := cli.NetworkList(ctx, types.NetworkListOptions{}) - if err != nil { - return "", err - } - - reaperNetwork := ReaperDefault - - reaperNetworkExists := false - - for _, net := range networkResources { - if net.Name == p.defaultBridgeNetworkName { - return p.defaultBridgeNetworkName, nil - } - - if net.Name == reaperNetwork { - reaperNetworkExists = true - } - } - - // Create a bridge network for the container communications - if !reaperNetworkExists { - _, err = cli.NetworkCreate(ctx, reaperNetwork, types.NetworkCreate{ - Driver: Bridge, - Attachable: true, - Labels: map[string]string{ - TestcontainerLabel: "true", - testcontainersdocker.LabelLang: "go", - testcontainersdocker.LabelVersion: internal.Version, - }, - }) - - if err != nil { - return "", err - } - } - - return reaperNetwork, nil -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/docker_auth.go b/vendor/github.com/testcontainers/testcontainers-go/docker_auth.go deleted file mode 100644 index c95f848c0..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/docker_auth.go +++ /dev/null @@ -1,108 +0,0 @@ -package testcontainers - -import ( - "context" - "encoding/base64" - "encoding/json" - "os" - - "github.com/cpuguy83/dockercfg" - "github.com/docker/docker/api/types" - "github.com/testcontainers/testcontainers-go/internal/testcontainersdocker" -) - -// DockerImageAuth returns the auth config for the given Docker image, extracting first its Docker registry. -// Finally, it will use the credential helpers to extract the information from the docker config file -// for that registry, if it exists. -func DockerImageAuth(ctx context.Context, image string) (string, types.AuthConfig, error) { - defaultRegistry := defaultRegistry(ctx) - registry := testcontainersdocker.ExtractRegistry(image, defaultRegistry) - - cfgs, err := getDockerAuthConfigs() - if err != nil { - return registry, types.AuthConfig{}, err - } - - if cfg, ok := cfgs[registry]; ok { - return registry, cfg, nil - } - - return registry, types.AuthConfig{}, dockercfg.ErrCredentialsNotFound -} - -// defaultRegistry returns the default registry to use when pulling images -// It will use the docker daemon to get the default registry, returning "https://index.docker.io/v1/" if -// it fails to get the information from the daemon -func defaultRegistry(ctx context.Context) string { - client, err := testcontainersdocker.NewClient(ctx) - if err != nil { - return testcontainersdocker.IndexDockerIO - } - defer client.Close() - - info, err := client.Info(ctx) - if err != nil { - return testcontainersdocker.IndexDockerIO - } - - return info.IndexServerAddress -} - -// getDockerAuthConfigs returns a map with the auth configs from the docker config file -// using the registry as the key -func getDockerAuthConfigs() (map[string]types.AuthConfig, error) { - cfg, err := getDockerConfig() - if err != nil { - return nil, err - } - - cfgs := map[string]types.AuthConfig{} - for k, v := range cfg.AuthConfigs { - ac := types.AuthConfig{ - Auth: v.Auth, - Email: v.Email, - IdentityToken: v.IdentityToken, - Password: v.Password, - RegistryToken: v.RegistryToken, - ServerAddress: v.ServerAddress, - Username: v.Username, - } - - if v.Username == "" && v.Password == "" { - u, p, _ := dockercfg.GetRegistryCredentials(k) - ac.Username = u - ac.Password = p - } - - if v.Auth == "" { - ac.Auth = base64.StdEncoding.EncodeToString([]byte(ac.Username + ":" + ac.Password)) - } - - cfgs[k] = ac - } - - return cfgs, nil -} - -// getDockerConfig returns the docker config file. It will internally check, in this particular order: -// 1. the DOCKER_AUTH_CONFIG environment variable, unmarshalling it into a dockercfg.Config -// 2. the DOCKER_CONFIG environment variable, as the path to the config file -// 3. else it will load the default config file, which is ~/.docker/config.json -func getDockerConfig() (dockercfg.Config, error) { - dockerAuthConfig := os.Getenv("DOCKER_AUTH_CONFIG") - if dockerAuthConfig != "" { - cfg := dockercfg.Config{} - err := json.Unmarshal([]byte(dockerAuthConfig), &cfg) - if err == nil { - return cfg, nil - } - - } - - cfg, err := dockercfg.LoadDefaultConfig() - if err != nil { - return cfg, err - } - - return cfg, nil -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/docker_mounts.go b/vendor/github.com/testcontainers/testcontainers-go/docker_mounts.go deleted file mode 100644 index bb63dc047..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/docker_mounts.go +++ /dev/null @@ -1,116 +0,0 @@ -package testcontainers - -import "github.com/docker/docker/api/types/mount" - -var ( - mountTypeMapping = map[MountType]mount.Type{ - MountTypeBind: mount.TypeBind, - MountTypeVolume: mount.TypeVolume, - MountTypeTmpfs: mount.TypeTmpfs, - MountTypePipe: mount.TypeNamedPipe, - } -) - -// BindMounter can optionally be implemented by mount sources -// to support advanced scenarios based on mount.BindOptions -type BindMounter interface { - GetBindOptions() *mount.BindOptions -} - -// VolumeMounter can optionally be implemented by mount sources -// to support advanced scenarios based on mount.VolumeOptions -type VolumeMounter interface { - GetVolumeOptions() *mount.VolumeOptions -} - -// TmpfsMounter can optionally be implemented by mount sources -// to support advanced scenarios based on mount.TmpfsOptions -type TmpfsMounter interface { - GetTmpfsOptions() *mount.TmpfsOptions -} - -type DockerBindMountSource struct { - *mount.BindOptions - - // HostPath is the path mounted into the container - // the same host path might be mounted to multiple locations within a single container - HostPath string -} - -func (s DockerBindMountSource) Source() string { - return s.HostPath -} - -func (DockerBindMountSource) Type() MountType { - return MountTypeBind -} - -func (s DockerBindMountSource) GetBindOptions() *mount.BindOptions { - return s.BindOptions -} - -type DockerVolumeMountSource struct { - *mount.VolumeOptions - - // Name refers to the name of the volume to be mounted - // the same volume might be mounted to multiple locations within a single container - Name string -} - -func (s DockerVolumeMountSource) Source() string { - return s.Name -} - -func (DockerVolumeMountSource) Type() MountType { - return MountTypeVolume -} - -func (s DockerVolumeMountSource) GetVolumeOptions() *mount.VolumeOptions { - return s.VolumeOptions -} - -type DockerTmpfsMountSource struct { - GenericTmpfsMountSource - *mount.TmpfsOptions -} - -func (s DockerTmpfsMountSource) GetTmpfsOptions() *mount.TmpfsOptions { - return s.TmpfsOptions -} - -// mapToDockerMounts maps the given []ContainerMount to the corresponding -// []mount.Mount for further processing -func mapToDockerMounts(containerMounts ContainerMounts) []mount.Mount { - mounts := make([]mount.Mount, 0, len(containerMounts)) - - for idx := range containerMounts { - m := containerMounts[idx] - - var mountType mount.Type - if mt, ok := mountTypeMapping[m.Source.Type()]; ok { - mountType = mt - } else { - continue - } - - containerMount := mount.Mount{ - Type: mountType, - Source: m.Source.Source(), - ReadOnly: m.ReadOnly, - Target: m.Target.Target(), - } - - switch typedMounter := m.Source.(type) { - case BindMounter: - containerMount.BindOptions = typedMounter.GetBindOptions() - case VolumeMounter: - containerMount.VolumeOptions = typedMounter.GetVolumeOptions() - case TmpfsMounter: - containerMount.TmpfsOptions = typedMounter.GetTmpfsOptions() - } - - mounts = append(mounts, containerMount) - } - - return mounts -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/exec/processor.go b/vendor/github.com/testcontainers/testcontainers-go/exec/processor.go deleted file mode 100644 index 63987e461..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/exec/processor.go +++ /dev/null @@ -1,44 +0,0 @@ -package exec - -import ( - "bytes" - "io" - - "github.com/docker/docker/pkg/stdcopy" -) - -// ProcessOptions defines options applicable to the reader processor -type ProcessOptions struct { - Reader io.Reader -} - -// ProcessOption defines a common interface to modify the reader processor -// These options can be passed to the Exec function in a variadic way to customize the returned Reader instance -type ProcessOption interface { - Apply(opts *ProcessOptions) -} - -type ProcessOptionFunc func(opts *ProcessOptions) - -func (fn ProcessOptionFunc) Apply(opts *ProcessOptions) { - fn(opts) -} - -func Multiplexed() ProcessOption { - return ProcessOptionFunc(func(opts *ProcessOptions) { - done := make(chan struct{}) - - var outBuff bytes.Buffer - var errBuff bytes.Buffer - go func() { - if _, err := stdcopy.StdCopy(&outBuff, &errBuff, opts.Reader); err != nil { - return - } - close(done) - }() - - <-done - - opts.Reader = &outBuff - }) -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/file.go b/vendor/github.com/testcontainers/testcontainers-go/file.go deleted file mode 100644 index 509da643d..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/file.go +++ /dev/null @@ -1,141 +0,0 @@ -package testcontainers - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "fmt" - "io" - "os" - "path/filepath" - "strings" -) - -func isDir(path string) (bool, error) { - file, err := os.Open(path) - if err != nil { - return false, err - } - defer file.Close() - - fileInfo, err := file.Stat() - if err != nil { - return false, err - } - - if fileInfo.IsDir() { - return true, nil - } - - return false, nil -} - -// tarDir compress a directory using tar + gzip algorithms -func tarDir(src string, fileMode int64) (*bytes.Buffer, error) { - // always pass src as absolute path - abs, err := filepath.Abs(src) - if err != nil { - return &bytes.Buffer{}, fmt.Errorf("error getting absolute path: %w", err) - } - src = abs - - buffer := &bytes.Buffer{} - - fmt.Printf(">> creating TAR file from directory: %s\n", src) - - // tar > gzip > buffer - zr := gzip.NewWriter(buffer) - tw := tar.NewWriter(zr) - - _, baseDir := filepath.Split(src) - // keep the path relative to the parent directory - index := strings.LastIndex(src, baseDir) - - // walk through every file in the folder - err = filepath.Walk(src, func(file string, fi os.FileInfo, errFn error) error { - if errFn != nil { - return fmt.Errorf("error traversing the file system: %w", errFn) - } - - // if a symlink, skip file - if fi.Mode().Type() == os.ModeSymlink { - fmt.Printf(">> skipping symlink: %s\n", file) - return nil - } - - // generate tar header - header, err := tar.FileInfoHeader(fi, file) - if err != nil { - return fmt.Errorf("error getting file info header: %w", err) - } - - // see https://pkg.go.dev/archive/tar#FileInfoHeader: - // Since fs.FileInfo's Name method only returns the base name of the file it describes, - // it may be necessary to modify Header.Name to provide the full path name of the file. - header.Name = filepath.ToSlash(file[index:]) - header.Mode = fileMode - - // write header - if err := tw.WriteHeader(header); err != nil { - return fmt.Errorf("error writing header: %w", err) - } - - // if not a dir, write file content - if !fi.IsDir() { - data, err := os.Open(file) - if err != nil { - return fmt.Errorf("error opening file: %w", err) - } - defer data.Close() - if _, err := io.Copy(tw, data); err != nil { - return fmt.Errorf("error compressing file: %w", err) - } - } - return nil - }) - if err != nil { - return buffer, err - } - - // produce tar - if err := tw.Close(); err != nil { - return buffer, fmt.Errorf("error closing tar file: %w", err) - } - // produce gzip - if err := zr.Close(); err != nil { - return buffer, fmt.Errorf("error closing gzip file: %w", err) - } - - return buffer, nil -} - -// tarFile compress a single file using tar + gzip algorithms -func tarFile(fileContent []byte, basePath string, fileMode int64) (*bytes.Buffer, error) { - buffer := &bytes.Buffer{} - - zr := gzip.NewWriter(buffer) - tw := tar.NewWriter(zr) - - hdr := &tar.Header{ - Name: filepath.Base(basePath), - Mode: fileMode, - Size: int64(len(fileContent)), - } - if err := tw.WriteHeader(hdr); err != nil { - return buffer, err - } - if _, err := tw.Write(fileContent); err != nil { - return buffer, err - } - - // produce tar - if err := tw.Close(); err != nil { - return buffer, fmt.Errorf("error closing tar file: %w", err) - } - // produce gzip - if err := zr.Close(); err != nil { - return buffer, fmt.Errorf("error closing gzip file: %w", err) - } - - return buffer, nil -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/generic.go b/vendor/github.com/testcontainers/testcontainers-go/generic.go deleted file mode 100644 index 86a280be3..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/generic.go +++ /dev/null @@ -1,157 +0,0 @@ -package testcontainers - -import ( - "context" - "errors" - "fmt" - "sync" - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" - "github.com/imdario/mergo" - "github.com/testcontainers/testcontainers-go/wait" -) - -var ( - reuseContainerMx sync.Mutex - ErrReuseEmptyName = errors.New("with reuse option a container name mustn't be empty") -) - -// GenericContainerRequest represents parameters to a generic container -type GenericContainerRequest struct { - ContainerRequest // embedded request for provider - Started bool // whether to auto-start the container - ProviderType ProviderType // which provider to use, Docker if empty - Logger Logging // provide a container specific Logging - use default global logger if empty - Reuse bool // reuse an existing container if it exists or create a new one. a container name mustn't be empty -} - -// ContainerCustomizer is an interface that can be used to configure the Testcontainers container -// request. The passed request will be merged with the default one. -type ContainerCustomizer interface { - Customize(req *GenericContainerRequest) -} - -// CustomizeRequestOption is a type that can be used to configure the Testcontainers container request. -// The passed request will be merged with the default one. -type CustomizeRequestOption func(req *GenericContainerRequest) - -func (opt CustomizeRequestOption) Customize(req *GenericContainerRequest) { - opt(req) -} - -// CustomizeRequest returns a function that can be used to merge the passed container request with the one that is used by the container. -// Slices and Maps will be appended. -func CustomizeRequest(src GenericContainerRequest) CustomizeRequestOption { - return func(req *GenericContainerRequest) { - if err := mergo.Merge(req, &src, mergo.WithOverride, mergo.WithAppendSlice); err != nil { - fmt.Printf("error merging container request, keeping the original one. Error: %v", err) - return - } - } -} - -// WithImage sets the image for a container -func WithImage(image string) CustomizeRequestOption { - return func(req *GenericContainerRequest) { - req.Image = image - } -} - -// WithConfigModifier allows to override the default container config -func WithConfigModifier(modifier func(config *container.Config)) CustomizeRequestOption { - return func(req *GenericContainerRequest) { - req.ConfigModifier = modifier - } -} - -// WithEndpointSettingsModifier allows to override the default endpoint settings -func WithEndpointSettingsModifier(modifier func(settings map[string]*network.EndpointSettings)) CustomizeRequestOption { - return func(req *GenericContainerRequest) { - req.EnpointSettingsModifier = modifier - } -} - -// WithHostConfigModifier allows to override the default host config -func WithHostConfigModifier(modifier func(hostConfig *container.HostConfig)) CustomizeRequestOption { - return func(req *GenericContainerRequest) { - req.HostConfigModifier = modifier - } -} - -// WithWaitStrategy sets the wait strategy for a container, using 60 seconds as deadline -func WithWaitStrategy(strategies ...wait.Strategy) CustomizeRequestOption { - return WithWaitStrategyAndDeadline(60*time.Second, strategies...) -} - -// WithWaitStrategyAndDeadline sets the wait strategy for a container, including deadline -func WithWaitStrategyAndDeadline(deadline time.Duration, strategies ...wait.Strategy) CustomizeRequestOption { - return func(req *GenericContainerRequest) { - req.WaitingFor = wait.ForAll(strategies...).WithDeadline(deadline) - } -} - -// GenericNetworkRequest represents parameters to a generic network -type GenericNetworkRequest struct { - NetworkRequest // embedded request for provider - ProviderType ProviderType // which provider to use, Docker if empty -} - -// GenericNetwork creates a generic network with parameters -func GenericNetwork(ctx context.Context, req GenericNetworkRequest) (Network, error) { - provider, err := req.ProviderType.GetProvider() - if err != nil { - return nil, err - } - network, err := provider.CreateNetwork(ctx, req.NetworkRequest) - if err != nil { - return nil, fmt.Errorf("%w: failed to create network", err) - } - - return network, nil -} - -// GenericContainer creates a generic container with parameters -func GenericContainer(ctx context.Context, req GenericContainerRequest) (Container, error) { - if req.Reuse && req.Name == "" { - return nil, ErrReuseEmptyName - } - - logging := req.Logger - if logging == nil { - logging = Logger - } - provider, err := req.ProviderType.GetProvider(WithLogger(logging)) - if err != nil { - return nil, err - } - - var c Container - if req.Reuse { - // we must protect the reusability of the container in the case it's invoked - // in a parallel execution, via ParallelContainers or t.Parallel() - reuseContainerMx.Lock() - defer reuseContainerMx.Unlock() - - c, err = provider.ReuseOrCreateContainer(ctx, req.ContainerRequest) - } else { - c, err = provider.CreateContainer(ctx, req.ContainerRequest) - } - if err != nil { - return nil, fmt.Errorf("%w: failed to create container", err) - } - - if req.Started && !c.IsRunning() { - if err := c.Start(ctx); err != nil { - return c, fmt.Errorf("%w: failed to start container", err) - } - } - return c, nil -} - -// GenericProvider represents an abstraction for container and network providers -type GenericProvider interface { - ContainerProvider - NetworkProvider -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/config/config.go b/vendor/github.com/testcontainers/testcontainers-go/internal/config/config.go deleted file mode 100644 index 5460d6d67..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/internal/config/config.go +++ /dev/null @@ -1,100 +0,0 @@ -package config - -import ( - "fmt" - "os" - "path/filepath" - "strconv" - "sync" - - "github.com/magiconair/properties" -) - -var tcConfig Config -var tcConfigOnce *sync.Once = new(sync.Once) - -// Config represents the configuration for Testcontainers -// testcontainersConfig { -type Config struct { - Host string `properties:"docker.host,default="` - TLSVerify int `properties:"docker.tls.verify,default=0"` - CertPath string `properties:"docker.cert.path,default="` - RyukDisabled bool `properties:"ryuk.disabled,default=false"` - RyukPrivileged bool `properties:"ryuk.container.privileged,default=false"` - TestcontainersHost string `properties:"tc.host,default="` -} - -// } - -// Read reads from testcontainers properties file, if it exists -// it is possible that certain values get overridden when set as environment variables -func Read() Config { - tcConfigOnce.Do(func() { - tcConfig = read() - - if tcConfig.RyukDisabled { - ryukDisabledMessage := ` -********************************************************************************************** -Ryuk has been disabled for the current execution. This can cause unexpected behavior in your environment. -More on this: https://golang.testcontainers.org/features/garbage_collector/ -**********************************************************************************************` - fmt.Println(ryukDisabledMessage) - } - }) - - return tcConfig -} - -// Reset resets the singleton instance of the Config struct, -// allowing to read the configuration again. -// Handy for testing, so do not use it in production code -// This function is not thread-safe -func Reset() { - tcConfigOnce = new(sync.Once) -} - -func read() Config { - config := Config{} - - applyEnvironmentConfiguration := func(config Config) Config { - ryukDisabledEnv := os.Getenv("TESTCONTAINERS_RYUK_DISABLED") - if parseBool(ryukDisabledEnv) { - config.RyukDisabled = ryukDisabledEnv == "true" - } - - ryukPrivilegedEnv := os.Getenv("TESTCONTAINERS_RYUK_CONTAINER_PRIVILEGED") - if parseBool(ryukPrivilegedEnv) { - config.RyukPrivileged = ryukPrivilegedEnv == "true" - } - - return config - } - - home, err := os.UserHomeDir() - if err != nil { - return applyEnvironmentConfiguration(config) - } - - tcProp := filepath.Join(home, ".testcontainers.properties") - // init from a file - properties, err := properties.LoadFile(tcProp, properties.UTF8) - if err != nil { - return applyEnvironmentConfiguration(config) - } - - if err := properties.Decode(&config); err != nil { - fmt.Printf("invalid testcontainers properties file, returning an empty Testcontainers configuration: %v\n", err) - return applyEnvironmentConfiguration(config) - } - - fmt.Printf("Testcontainers properties file has been found: %s\n", tcProp) - - return applyEnvironmentConfiguration(config) -} - -func parseBool(input string) bool { - if _, err := strconv.ParseBool(input); err == nil { - return true - } - return false -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/client.go b/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/client.go deleted file mode 100644 index ec10d34d1..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/client.go +++ /dev/null @@ -1,65 +0,0 @@ -package testcontainersdocker - -import ( - "context" - "path/filepath" - - "github.com/docker/docker/client" - "github.com/testcontainers/testcontainers-go/internal/config" - "github.com/testcontainers/testcontainers-go/internal/testcontainerssession" -) - -// NewClient returns a new docker client extracting the docker host from the different alternatives -func NewClient(ctx context.Context, ops ...client.Opt) (*client.Client, error) { - tcConfig := config.Read() - - dockerHost := ExtractDockerHost(ctx) - - opts := []client.Opt{client.FromEnv, client.WithAPIVersionNegotiation()} - if dockerHost != "" { - opts = append(opts, client.WithHost(dockerHost)) - - // for further information, read https://docs.docker.com/engine/security/protect-access/ - if tcConfig.TLSVerify == 1 { - cacertPath := filepath.Join(tcConfig.CertPath, "ca.pem") - certPath := filepath.Join(tcConfig.CertPath, "cert.pem") - keyPath := filepath.Join(tcConfig.CertPath, "key.pem") - - opts = append(opts, client.WithTLSClientConfig(cacertPath, certPath, keyPath)) - } - } - - opts = append(opts, client.WithHTTPHeaders( - map[string]string{ - "x-tc-sid": testcontainerssession.String(), - }), - ) - - // passed options have priority over the default ones - opts = append(opts, ops...) - - cli, err := client.NewClientWithOpts(opts...) - if err != nil { - return nil, err - } - - if _, err = cli.Ping(context.Background()); err != nil { - // Fallback to environment, including the original options - cli, err = defaultClient(context.Background(), ops...) - if err != nil { - return nil, err - } - } - defer cli.Close() - - return cli, nil -} - -// defaultClient returns a plain, new docker client with the default options -func defaultClient(ctx context.Context, ops ...client.Opt) (*client.Client, error) { - if len(ops) == 0 { - ops = []client.Opt{client.FromEnv, client.WithAPIVersionNegotiation()} - } - - return client.NewClientWithOpts(ops...) -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/docker_host.go b/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/docker_host.go deleted file mode 100644 index c625c6cd5..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/docker_host.go +++ /dev/null @@ -1,264 +0,0 @@ -package testcontainersdocker - -import ( - "context" - "errors" - "fmt" - "os" - "os/exec" - "strings" - "sync" - - "github.com/docker/docker/client" - "github.com/testcontainers/testcontainers-go/internal/config" -) - -type dockerHostContext string - -var DockerHostContextKey = dockerHostContext("docker_host") - -var ( - ErrDockerHostNotSet = errors.New("DOCKER_HOST is not set") - ErrDockerSocketOverrideNotSet = errors.New("TESTCONTAINERS_DOCKER_SOCKET_OVERRIDE is not set") - ErrDockerSocketNotSetInContext = errors.New("socket not set in context") - ErrDockerSocketNotSetInProperties = errors.New("socket not set in ~/.testcontainers.properties") - ErrNoUnixSchema = errors.New("URL schema is not unix") - ErrSocketNotFound = errors.New("socket not found") - ErrSocketNotFoundInPath = errors.New("docker socket not found in " + DockerSocketPath) - // ErrTestcontainersHostNotSetInProperties this error is specific to Testcontainers - ErrTestcontainersHostNotSetInProperties = errors.New("tc.host not set in ~/.testcontainers.properties") -) - -var dockerHostCache string -var dockerHostOnce sync.Once - -var dockerSocketPathCache string -var dockerSocketPathOnce sync.Once - -// deprecated -// see https://github.com/testcontainers/testcontainers-java/blob/main/core/src/main/java/org/testcontainers/dockerclient/DockerClientConfigUtils.java#L46 -func DefaultGatewayIP() (string, error) { - // see https://github.com/testcontainers/testcontainers-java/blob/3ad8d80e2484864e554744a4800a81f6b7982168/core/src/main/java/org/testcontainers/dockerclient/DockerClientConfigUtils.java#L27 - cmd := exec.Command("sh", "-c", "ip route|awk '/default/ { print $3 }'") - stdout, err := cmd.Output() - if err != nil { - return "", errors.New("failed to detect docker host") - } - ip := strings.TrimSpace(string(stdout)) - if len(ip) == 0 { - return "", errors.New("failed to parse default gateway IP") - } - return ip, nil -} - -// ExtractDockerHost Extracts the docker host from the different alternatives, caching the result to avoid unnecessary -// calculations. Use this function to get the actual Docker host. This function does not consider Windows containers at the moment. -// The possible alternatives are: -// -// 1. Docker host from the "tc.host" property in the ~/.testcontainers.properties file. -// 2. DOCKER_HOST environment variable. -// 3. Docker host from context. -// 4. Docker host from the default docker socket path, without the unix schema. -// 5. Docker host from the "docker.host" property in the ~/.testcontainers.properties file. -// 6. Rootless docker socket path. -// 7. Else, the default Docker socket including schema will be returned. -func ExtractDockerHost(ctx context.Context) string { - dockerHostOnce.Do(func() { - dockerHostCache = extractDockerHost(ctx) - }) - - return dockerHostCache -} - -// ExtractDockerSocket Extracts the docker socket from the different alternatives, removing the socket schema and -// caching the result to avoid unnecessary calculations. Use this function to get the docker socket path, -// not the host (e.g. mounting the socket in a container). This function does not consider Windows containers at the moment. -// The possible alternatives are: -// -// 1. Docker host from the "tc.host" property in the ~/.testcontainers.properties file. -// 2. The TESTCONTAINERS_DOCKER_SOCKET_OVERRIDE environment variable. -// 3. Using a Docker client, check if the Info().OperativeSystem is "Docker Desktop" and return the default docker socket path for rootless docker. -// 4. Else, Get the current Docker Host from the existing strategies: see ExtractDockerHost. -// 5. If the socket contains the unix schema, the schema is removed (e.g. unix:///var/run/docker.sock -> /var/run/docker.sock) -// 6. Else, the default location of the docker socket is used (/var/run/docker.sock) -// -// In any case, if the docker socket schema is "tcp://", the default docker socket path will be returned. -func ExtractDockerSocket(ctx context.Context) string { - dockerSocketPathOnce.Do(func() { - dockerSocketPathCache = extractDockerSocket(ctx) - }) - - return dockerSocketPathCache -} - -// extractDockerHost Extracts the docker host from the different alternatives, without caching the result. -// This internal method is handy for testing purposes. -func extractDockerHost(ctx context.Context) string { - dockerHostFns := []func(context.Context) (string, error){ - testcontainersHostFromProperties, - dockerHostFromEnv, - dockerHostFromContext, - dockerSocketPath, - dockerHostFromProperties, - rootlessDockerSocketPath, - } - - outerErr := ErrSocketNotFound - for _, dockerHostFn := range dockerHostFns { - dockerHost, err := dockerHostFn(ctx) - if err != nil { - outerErr = fmt.Errorf("%w: %v", outerErr, err) - continue - } - - return dockerHost - } - - // We are not supporting Windows containers at the moment - return DockerSocketPathWithSchema -} - -// extractDockerHost Extracts the docker socket from the different alternatives, without caching the result. -// It will internally use the default Docker client, calling the internal method extractDockerSocketFromClient with it. -// This internal method is handy for testing purposes. -// If a Docker client cannot be created, the program will panic. -func extractDockerSocket(ctx context.Context) string { - cli, err := NewClient(ctx) - if err != nil { - panic(err) // a Docker client is required to get the Docker info - } - - return extractDockerSocketFromClient(ctx, cli) -} - -// extractDockerSocketFromClient Extracts the docker socket from the different alternatives, without caching the result, -// and receiving an instance of the Docker API client interface. -// This internal method is handy for testing purposes, passing a mock type simulating the desired behaviour. -func extractDockerSocketFromClient(ctx context.Context, cli client.APIClient) string { - // check that the socket is not a tcp or unix socket - checkDockerSocketFn := func(socket string) string { - // this use case will cover the case when the docker host is a tcp socket - if strings.HasPrefix(socket, TCPSchema) { - return DockerSocketPath - } - - if strings.HasPrefix(socket, DockerSocketSchema) { - return strings.Replace(socket, DockerSocketSchema, "", 1) - } - - return socket - } - - tcHost, err := testcontainersHostFromProperties(ctx) - if err == nil { - return checkDockerSocketFn(tcHost) - } - - testcontainersDockerSocket, err := dockerSocketOverridePath(ctx) - if err == nil { - return checkDockerSocketFn(testcontainersDockerSocket) - } - - info, err := cli.Info(ctx) - if err != nil { - panic(err) // Docker Info is required to get the Operating System - } - - // Because Docker Desktop runs in a VM, we need to use the default docker path for rootless docker - if info.OperatingSystem == "Docker Desktop" { - return DockerSocketPath - } - - dockerHost := extractDockerHost(ctx) - - return checkDockerSocketFn(dockerHost) -} - -// dockerHostFromEnv returns the docker host from the DOCKER_HOST environment variable, if it's not empty -func dockerHostFromEnv(ctx context.Context) (string, error) { - if dockerHostPath := os.Getenv("DOCKER_HOST"); dockerHostPath != "" { - return dockerHostPath, nil - } - - return "", ErrDockerHostNotSet -} - -// dockerHostFromContext returns the docker host from the Go context, if it's not empty -func dockerHostFromContext(ctx context.Context) (string, error) { - if socketPath, ok := ctx.Value(DockerHostContextKey).(string); ok && socketPath != "" { - parsed, err := parseURL(socketPath) - if err != nil { - return "", err - } - - return parsed, nil - } - - return "", ErrDockerSocketNotSetInContext -} - -// dockerHostFromProperties returns the docker host from the ~/.testcontainers.properties file, if it's not empty -func dockerHostFromProperties(ctx context.Context) (string, error) { - cfg := config.Read() - socketPath := cfg.Host - if socketPath != "" { - parsed, err := parseURL(socketPath) - if err != nil { - return "", err - } - - return parsed, nil - } - - return "", ErrDockerSocketNotSetInProperties -} - -// dockerSocketOverridePath returns the docker socket from the TESTCONTAINERS_DOCKER_SOCKET_OVERRIDE environment variable, -// if it's not empty -func dockerSocketOverridePath(ctx context.Context) (string, error) { - if dockerHostPath, exists := os.LookupEnv("TESTCONTAINERS_DOCKER_SOCKET_OVERRIDE"); exists { - return dockerHostPath, nil - } - - return "", ErrDockerSocketOverrideNotSet -} - -// dockerSocketPath returns the docker socket from the default docker socket path, if it's not empty -// and the socket exists -func dockerSocketPath(ctx context.Context) (string, error) { - if fileExists(DockerSocketPath) { - return DockerSocketPathWithSchema, nil - } - - return "", ErrSocketNotFoundInPath -} - -// testcontainersHostFromProperties returns the testcontainers host from the ~/.testcontainers.properties file, if it's not empty -func testcontainersHostFromProperties(ctx context.Context) (string, error) { - cfg := config.Read() - testcontainersHost := cfg.TestcontainersHost - if testcontainersHost != "" { - parsed, err := parseURL(testcontainersHost) - if err != nil { - return "", err - } - - return parsed, nil - } - - return "", ErrTestcontainersHostNotSetInProperties -} - -// InAContainer returns true if the code is running inside a container -// See https://github.com/docker/docker/blob/a9fa38b1edf30b23cae3eade0be48b3d4b1de14b/daemon/initlayer/setup_unix.go#L25 -func InAContainer() bool { - return inAContainer("/.dockerenv") -} - -func inAContainer(path string) bool { - // see https://github.com/testcontainers/testcontainers-java/blob/3ad8d80e2484864e554744a4800a81f6b7982168/core/src/main/java/org/testcontainers/dockerclient/DockerClientConfigUtils.java#L15 - if _, err := os.Stat(path); err == nil { - return true - } - return false -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/docker_rootless.go b/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/docker_rootless.go deleted file mode 100644 index 43ff4637d..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/docker_rootless.go +++ /dev/null @@ -1,141 +0,0 @@ -package testcontainersdocker - -import ( - "context" - "errors" - "fmt" - "net/url" - "os" - "path/filepath" - "runtime" -) - -var ( - ErrRootlessDockerNotFound = errors.New("rootless Docker not found") - ErrRootlessDockerNotFoundHomeDesktopDir = errors.New("checked path: ~/.docker/desktop/docker.sock") - ErrRootlessDockerNotFoundHomeRunDir = errors.New("checked path: ~/.docker/run/docker.sock") - ErrRootlessDockerNotFoundRunDir = errors.New("checked path: /run/user/${uid}/docker.sock") - ErrRootlessDockerNotFoundXDGRuntimeDir = errors.New("checked path: $XDG_RUNTIME_DIR") - ErrRootlessDockerNotSupportedWindows = errors.New("rootless Docker is not supported on Windows") - ErrXDGRuntimeDirNotSet = errors.New("XDG_RUNTIME_DIR is not set") -) - -// baseRunDir is the base directory for the "/run/user/${uid}" directory. -// It is a variable so it can be modified for testing. -var baseRunDir = "/run" - -// rootlessDockerSocketPath returns if the path to the rootless Docker socket exists. -// The rootless socket path is determined by the following order: -// -// 1. XDG_RUNTIME_DIR environment variable. -// 2. ~/.docker/run/docker.sock file. -// 3. ~/.docker/desktop/docker.sock file. -// 4. /run/user/${uid}/docker.sock file. -// 5. Else, return ErrRootlessDockerNotFound, wrapping secific errors for each of the above paths. -// -// It should include the Docker socket schema (unix://) in the returned path. -func rootlessDockerSocketPath(_ context.Context) (string, error) { - // adding a manner to test it on non-windows machines, setting the GOOS env var to windows - // This is needed because runtime.GOOS is a constant that returns the OS of the machine running the test - if os.Getenv("GOOS") == "windows" || runtime.GOOS == "windows" { - return "", ErrRootlessDockerNotSupportedWindows - } - - socketPathFns := []func() (string, error){ - rootlessSocketPathFromEnv, - rootlessSocketPathFromHomeRunDir, - rootlessSocketPathFromHomeDesktopDir, - rootlessSocketPathFromRunDir, - } - - outerErr := ErrRootlessDockerNotFound - for _, socketPathFn := range socketPathFns { - s, err := socketPathFn() - if err != nil { - outerErr = fmt.Errorf("%w: %v", outerErr, err) - continue - } - - return DockerSocketSchema + s, nil - } - - return "", outerErr -} - -func fileExists(f string) bool { - _, err := os.Stat(f) - return err == nil -} - -func parseURL(s string) (string, error) { - var hostURL *url.URL - if u, err := url.Parse(s); err != nil { - return "", err - } else { - hostURL = u - } - - switch hostURL.Scheme { - case "unix", "npipe": - return hostURL.Path, nil - case "tcp": - // return the original URL, as it is a valid TCP URL - return s, nil - default: - return "", ErrNoUnixSchema - } -} - -// rootlessSocketPathFromEnv returns the path to the rootless Docker socket from the XDG_RUNTIME_DIR environment variable. -// It should include the Docker socket schema (unix://) in the returned path. -func rootlessSocketPathFromEnv() (string, error) { - xdgRuntimeDir, exists := os.LookupEnv("XDG_RUNTIME_DIR") - if exists { - f := filepath.Join(xdgRuntimeDir, "docker.sock") - if fileExists(f) { - return f, nil - } - - return "", ErrRootlessDockerNotFoundXDGRuntimeDir - } - - return "", ErrXDGRuntimeDirNotSet -} - -// rootlessSocketPathFromHomeRunDir returns the path to the rootless Docker socket from the ~/.docker/run/docker.sock file. -func rootlessSocketPathFromHomeRunDir() (string, error) { - home, err := os.UserHomeDir() - if err != nil { - return "", err - } - - f := filepath.Join(home, ".docker", "run", "docker.sock") - if fileExists(f) { - return f, nil - } - return "", ErrRootlessDockerNotFoundHomeRunDir -} - -// rootlessSocketPathFromHomeDesktopDir returns the path to the rootless Docker socket from the ~/.docker/desktop/docker.sock file. -func rootlessSocketPathFromHomeDesktopDir() (string, error) { - home, err := os.UserHomeDir() - if err != nil { - return "", err - } - - f := filepath.Join(home, ".docker", "desktop", "docker.sock") - if fileExists(f) { - return f, nil - } - return "", ErrRootlessDockerNotFoundHomeDesktopDir -} - -// rootlessSocketPathFromRunDir returns the path to the rootless Docker socket from the /run/user//docker.sock file. -func rootlessSocketPathFromRunDir() (string, error) { - uid := os.Getuid() - f := filepath.Join(baseRunDir, "user", fmt.Sprintf("%d", uid), "docker.sock") - if fileExists(f) { - return f, nil - } - return "", ErrRootlessDockerNotFoundRunDir -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/docker_socket.go b/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/docker_socket.go deleted file mode 100644 index 42414e94b..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/docker_socket.go +++ /dev/null @@ -1,46 +0,0 @@ -package testcontainersdocker - -import ( - "net/url" - "strings" - - "github.com/docker/docker/client" -) - -// DockerSocketSchema is the unix schema. -var DockerSocketSchema = "unix://" - -// DockerSocketPath is the path to the docker socket under unix systems. -var DockerSocketPath = "/var/run/docker.sock" - -// DockerSocketPathWithSchema is the path to the docker socket under unix systems with the unix schema. -var DockerSocketPathWithSchema = DockerSocketSchema + DockerSocketPath - -// TCPSchema is the tcp schema. -var TCPSchema = "tcp://" - -func init() { - const DefaultDockerHost = client.DefaultDockerHost - - u, err := url.Parse(DefaultDockerHost) - if err != nil { - // unsupported default host specified by the docker client package, - // so revert to the default unix docker socket path - return - } - - switch u.Scheme { - case "unix", "npipe": - DockerSocketSchema = u.Scheme + "://" - DockerSocketPath = u.Path - if !strings.HasPrefix(DockerSocketPath, "/") { - // seeing as the code in this module depends on DockerSocketPath having - // a slash (`/`) prefix, we add it here if it is missing. - // for the known environments, we do not foresee how the socket-path - // should miss the slash, however this extra if-condition is worth to - // save future pain from innocent users. - DockerSocketPath = "/" + DockerSocketPath - } - DockerSocketPathWithSchema = DockerSocketSchema + DockerSocketPath - } -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/images.go b/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/images.go deleted file mode 100644 index 3dc10c2a4..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/images.go +++ /dev/null @@ -1,126 +0,0 @@ -package testcontainersdocker - -import ( - "bufio" - "net/url" - "os" - "regexp" - "strings" - "unicode/utf8" -) - -const ( - IndexDockerIO = "https://index.docker.io/v1/" - maxURLRuneCount = 2083 - minURLRuneCount = 3 - URLSchema = `((ftp|tcp|udp|wss?|https?):\/\/)` - URLUsername = `(\S+(:\S*)?@)` - URLIP = `([1-9]\d?|1\d\d|2[01]\d|22[0-3]|24\d|25[0-5])(\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-5]))` - IP = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))` - URLSubdomain = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))` - URLPath = `((\/|\?|#)[^\s]*)` - URLPort = `(:(\d{1,5}))` - URL = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$` -) - -var rxURL = regexp.MustCompile(URL) - -func ExtractImagesFromDockerfile(dockerfile string, buildArgs map[string]*string) ([]string, error) { - var images []string - - file, err := os.Open(dockerfile) - if err != nil { - return nil, err - } - defer file.Close() - - var lines []string - scanner := bufio.NewScanner(file) - for scanner.Scan() { - lines = append(lines, scanner.Text()) - } - if scanner.Err() != nil { - return nil, scanner.Err() - } - - // extract images from dockerfile - for _, line := range lines { - line = strings.TrimSpace(line) - if !strings.HasPrefix(strings.ToUpper(line), "FROM") { - continue - } - - // remove FROM - line = strings.TrimPrefix(line, "FROM") - parts := strings.Split(strings.TrimSpace(line), " ") - if len(parts) == 0 { - continue - } - - // interpolate build args - for k, v := range buildArgs { - if v != nil { - parts[0] = strings.Replace(parts[0], "${"+k+"}", *v, -1) - } - } - images = append(images, parts[0]) - } - - return images, nil -} - -// ExtractRegistry extracts the registry from the image name, using a regular expression to extract the registry from the image name. -// regular expression to extract the registry from the image name -// the regular expression is based on the grammar defined in -// - image:tag -// - image -// - repository/image:tag -// - repository/image -// - registry/image:tag -// - registry/image -// - registry/repository/image:tag -// - registry/repository/image -// - registry:port/repository/image:tag -// - registry:port/repository/image -// - registry:port/image:tag -// - registry:port/image -// Once extracted the registry, it is validated to check if it is a valid URL or an IP address. -func ExtractRegistry(image string, fallback string) string { - exp := regexp.MustCompile(`^(?:(?P(https?://)?[^/]+)(?::(?P\d+))?/)?(?:(?P[^/]+)/)?(?P[^:]+)(?::(?P.+))?$`).FindStringSubmatch(image) - if len(exp) == 0 { - return "" - } - - registry := exp[1] - - if IsURL(registry) { - return registry - } - - return fallback -} - -// IsURL checks if the string is an URL. -// Extracted from https://github.com/asaskevich/govalidator/blob/f21760c49a8d/validator.go#L104 -func IsURL(str string) bool { - if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") { - return false - } - strTemp := str - if strings.Contains(str, ":") && !strings.Contains(str, "://") { - // support no indicated urlscheme but with colon for port number - // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString - strTemp = "http://" + str - } - u, err := url.Parse(strTemp) - if err != nil { - return false - } - if strings.HasPrefix(u.Host, ".") { - return false - } - if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) { - return false - } - return rxURL.MatchString(str) -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/labels.go b/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/labels.go deleted file mode 100644 index 12742769b..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainersdocker/labels.go +++ /dev/null @@ -1,9 +0,0 @@ -package testcontainersdocker - -const ( - LabelBase = "org.testcontainers" - LabelLang = LabelBase + ".lang" - LabelReaper = LabelBase + ".reaper" - LabelSessionID = LabelBase + ".sessionId" - LabelVersion = LabelBase + ".version" -) diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainerssession/session.go b/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainerssession/session.go deleted file mode 100644 index 21bd1f980..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/internal/testcontainerssession/session.go +++ /dev/null @@ -1,22 +0,0 @@ -package testcontainerssession - -import ( - "sync" - - "github.com/google/uuid" -) - -var id uuid.UUID -var idOnce sync.Once - -func ID() uuid.UUID { - idOnce.Do(func() { - id = uuid.New() - }) - - return id -} - -func String() string { - return ID().String() -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/version.go b/vendor/github.com/testcontainers/testcontainers-go/internal/version.go deleted file mode 100644 index 1177c175a..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/internal/version.go +++ /dev/null @@ -1,4 +0,0 @@ -package internal - -// Version is the next development version of the application -const Version = "0.21.0" diff --git a/vendor/github.com/testcontainers/testcontainers-go/lifecycle.go b/vendor/github.com/testcontainers/testcontainers-go/lifecycle.go deleted file mode 100644 index 55dd48cf7..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/lifecycle.go +++ /dev/null @@ -1,379 +0,0 @@ -package testcontainers - -import ( - "context" - "io" - "strings" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" - "github.com/docker/go-connections/nat" - "golang.org/x/exp/slices" -) - -// ContainerRequestHook is a hook that will be called before a container is created. -// It can be used to modify container configuration before it is created, -// using the different lifecycle hooks that are available: -// - Creating -// For that, it will receive a ContainerRequest, modify it and return an error if needed. -type ContainerRequestHook func(ctx context.Context, req ContainerRequest) error - -// ContainerHook is a hook that will be called after a container is created -// It can be used to modify the state of the container after it is created, -// using the different lifecycle hooks that are available: -// - Created -// - Starting -// - Started -// - Stopping -// - Stopped -// - Terminating -// - Terminated -// For that, it will receive a Container, modify it and return an error if needed. -type ContainerHook func(ctx context.Context, container Container) error - -// ContainerLifecycleHooks is a struct that contains all the hooks that can be used -// to modify the container lifecycle. All the container lifecycle hooks except the PreCreates hooks -// will be passed to the container once it's created -type ContainerLifecycleHooks struct { - PreCreates []ContainerRequestHook - PostCreates []ContainerHook - PreStarts []ContainerHook - PostStarts []ContainerHook - PreStops []ContainerHook - PostStops []ContainerHook - PreTerminates []ContainerHook - PostTerminates []ContainerHook -} - -var DefaultLoggingHook = func(logger Logging) ContainerLifecycleHooks { - shortContainerID := func(c Container) string { - return c.GetContainerID()[:12] - } - - return ContainerLifecycleHooks{ - PreCreates: []ContainerRequestHook{ - func(ctx context.Context, req ContainerRequest) error { - logger.Printf("🐳 Creating container for image %s", req.Image) - return nil - }, - }, - PostCreates: []ContainerHook{ - func(ctx context.Context, c Container) error { - logger.Printf("✅ Container created: %s", shortContainerID(c)) - return nil - }, - }, - PreStarts: []ContainerHook{ - func(ctx context.Context, c Container) error { - logger.Printf("🐳 Starting container: %s", shortContainerID(c)) - return nil - }, - }, - PostStarts: []ContainerHook{ - func(ctx context.Context, c Container) error { - logger.Printf("✅ Container started: %s", shortContainerID(c)) - return nil - }, - }, - PreStops: []ContainerHook{ - func(ctx context.Context, c Container) error { - logger.Printf("🐳 Stopping container: %s", shortContainerID(c)) - return nil - }, - }, - PostStops: []ContainerHook{ - func(ctx context.Context, c Container) error { - logger.Printf("✋ Container stopped: %s", shortContainerID(c)) - return nil - }, - }, - PreTerminates: []ContainerHook{ - func(ctx context.Context, c Container) error { - logger.Printf("🐳 Terminating container: %s", shortContainerID(c)) - return nil - }, - }, - PostTerminates: []ContainerHook{ - func(ctx context.Context, c Container) error { - logger.Printf("🚫 Container terminated: %s", shortContainerID(c)) - return nil - }, - }, - } -} - -// creatingHook is a hook that will be called before a container is created. -func (req ContainerRequest) creatingHook(ctx context.Context) error { - for _, lifecycleHooks := range req.LifecycleHooks { - err := lifecycleHooks.Creating(ctx)(req) - if err != nil { - return err - } - } - - return nil -} - -// createdHook is a hook that will be called after a container is created -func (c *DockerContainer) createdHook(ctx context.Context) error { - for _, lifecycleHooks := range c.lifecycleHooks { - err := containerHookFn(ctx, lifecycleHooks.PostCreates)(c) - if err != nil { - return err - } - } - - return nil -} - -// startingHook is a hook that will be called before a container is started -func (c *DockerContainer) startingHook(ctx context.Context) error { - for _, lifecycleHooks := range c.lifecycleHooks { - err := containerHookFn(ctx, lifecycleHooks.PreStarts)(c) - if err != nil { - c.printLogs(ctx) - return err - } - } - - return nil -} - -// startedHook is a hook that will be called after a container is started -func (c *DockerContainer) startedHook(ctx context.Context) error { - for _, lifecycleHooks := range c.lifecycleHooks { - err := containerHookFn(ctx, lifecycleHooks.PostStarts)(c) - if err != nil { - c.printLogs(ctx) - return err - } - } - - return nil -} - -// printLogs is a helper function that will print the logs of a Docker container -// We are going to use this helper function to inform the user of the logs when an error occurs -func (c *DockerContainer) printLogs(ctx context.Context) { - reader, err := c.Logs(ctx) - if err != nil { - c.logger.Printf("failed accessing container logs: %w\n", err) - return - } - - b, err := io.ReadAll(reader) - if err != nil { - c.logger.Printf("failed reading container logs: %w\n", err) - return - } - - c.logger.Printf("container logs:\n%s", b) -} - -// stoppingHook is a hook that will be called before a container is stopped -func (c *DockerContainer) stoppingHook(ctx context.Context) error { - for _, lifecycleHooks := range c.lifecycleHooks { - err := containerHookFn(ctx, lifecycleHooks.PreStops)(c) - if err != nil { - return err - } - } - - return nil -} - -// stoppedHook is a hook that will be called after a container is stopped -func (c *DockerContainer) stoppedHook(ctx context.Context) error { - for _, lifecycleHooks := range c.lifecycleHooks { - err := containerHookFn(ctx, lifecycleHooks.PostStops)(c) - if err != nil { - return err - } - } - - return nil -} - -// terminatingHook is a hook that will be called before a container is terminated -func (c *DockerContainer) terminatingHook(ctx context.Context) error { - for _, lifecycleHooks := range c.lifecycleHooks { - err := containerHookFn(ctx, lifecycleHooks.PreTerminates)(c) - if err != nil { - return err - } - } - - return nil -} - -// terminatedHook is a hook that will be called after a container is terminated -func (c *DockerContainer) terminatedHook(ctx context.Context) error { - for _, lifecycleHooks := range c.lifecycleHooks { - err := containerHookFn(ctx, lifecycleHooks.PostTerminates)(c) - if err != nil { - return err - } - } - - return nil -} - -// Creating is a hook that will be called before a container is created. -func (c ContainerLifecycleHooks) Creating(ctx context.Context) func(req ContainerRequest) error { - return func(req ContainerRequest) error { - for _, hook := range c.PreCreates { - if err := hook(ctx, req); err != nil { - return err - } - } - - return nil - } -} - -// containerHookFn is a helper function that will create a function to be returned by all the different -// container lifecycle hooks. The created function will iterate over all the hooks and call them one by one. -func containerHookFn(ctx context.Context, containerHook []ContainerHook) func(container Container) error { - return func(container Container) error { - for _, hook := range containerHook { - if err := hook(ctx, container); err != nil { - return err - } - } - - return nil - } -} - -// Created is a hook that will be called after a container is created -func (c ContainerLifecycleHooks) Created(ctx context.Context) func(container Container) error { - return containerHookFn(ctx, c.PostCreates) -} - -// Starting is a hook that will be called before a container is started -func (c ContainerLifecycleHooks) Starting(ctx context.Context) func(container Container) error { - return containerHookFn(ctx, c.PreStarts) -} - -// Started is a hook that will be called after a container is started -func (c ContainerLifecycleHooks) Started(ctx context.Context) func(container Container) error { - return containerHookFn(ctx, c.PostStarts) -} - -// Stopping is a hook that will be called before a container is stopped -func (c ContainerLifecycleHooks) Stopping(ctx context.Context) func(container Container) error { - return containerHookFn(ctx, c.PreStops) -} - -// Stopped is a hook that will be called after a container is stopped -func (c ContainerLifecycleHooks) Stopped(ctx context.Context) func(container Container) error { - return containerHookFn(ctx, c.PostStops) -} - -// Terminating is a hook that will be called before a container is terminated -func (c ContainerLifecycleHooks) Terminating(ctx context.Context) func(container Container) error { - return containerHookFn(ctx, c.PreTerminates) -} - -// Terminated is a hook that will be called after a container is terminated -func (c ContainerLifecycleHooks) Terminated(ctx context.Context) func(container Container) error { - return containerHookFn(ctx, c.PostTerminates) -} - -func (p *DockerProvider) preCreateContainerHook(ctx context.Context, req ContainerRequest, dockerInput *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig) error { - // prepare mounts - hostConfig.Mounts = mapToDockerMounts(req.Mounts) - - endpointSettings := map[string]*network.EndpointSettings{} - - // #248: Docker allows only one network to be specified during container creation - // If there is more than one network specified in the request container should be attached to them - // once it is created. We will take a first network if any specified in the request and use it to create container - if len(req.Networks) > 0 { - attachContainerTo := req.Networks[0] - - nw, err := p.GetNetwork(ctx, NetworkRequest{ - Name: attachContainerTo, - }) - if err == nil { - aliases := []string{} - if _, ok := req.NetworkAliases[attachContainerTo]; ok { - aliases = req.NetworkAliases[attachContainerTo] - } - endpointSetting := network.EndpointSettings{ - Aliases: aliases, - NetworkID: nw.ID, - } - endpointSettings[attachContainerTo] = &endpointSetting - } - } - - if req.ConfigModifier != nil { - req.ConfigModifier(dockerInput) - } - - if req.HostConfigModifier == nil { - req.HostConfigModifier = defaultHostConfigModifier(req) - } - req.HostConfigModifier(hostConfig) - - if req.EnpointSettingsModifier != nil { - req.EnpointSettingsModifier(endpointSettings) - } - - networkingConfig.EndpointsConfig = endpointSettings - - exposedPorts := req.ExposedPorts - // this check must be done after the pre-creation Modifiers are called, so the network mode is already set - if len(exposedPorts) == 0 && !hostConfig.NetworkMode.IsContainer() { - image, _, err := p.client.ImageInspectWithRaw(ctx, dockerInput.Image) - if err != nil { - return err - } - for p := range image.ContainerConfig.ExposedPorts { - exposedPorts = append(exposedPorts, string(p)) - } - } - - exposedPortSet, exposedPortMap, err := nat.ParsePortSpecs(exposedPorts) - if err != nil { - return err - } - - dockerInput.ExposedPorts = exposedPortSet - - // only exposing those ports automatically if the container request exposes zero ports and the container does not run in a container network - if len(exposedPorts) == 0 && !hostConfig.NetworkMode.IsContainer() { - hostConfig.PortBindings = exposedPortMap - } else { - hostConfig.PortBindings = mergePortBindings(hostConfig.PortBindings, exposedPortMap, req.ExposedPorts) - } - - return nil -} - -func mergePortBindings(configPortMap, exposedPortMap nat.PortMap, exposedPorts []string) nat.PortMap { - if exposedPortMap == nil { - exposedPortMap = make(map[nat.Port][]nat.PortBinding) - } - - for k, v := range configPortMap { - if slices.Contains(exposedPorts, strings.Split(string(k), "/")[0]) { - exposedPortMap[k] = v - } - } - return exposedPortMap -} - -// defaultHostConfigModifier provides a default modifier including the deprecated fields -func defaultHostConfigModifier(req ContainerRequest) func(hostConfig *container.HostConfig) { - return func(hostConfig *container.HostConfig) { - hostConfig.AutoRemove = req.AutoRemove - hostConfig.CapAdd = req.CapAdd - hostConfig.CapDrop = req.CapDrop - hostConfig.Binds = req.Binds - hostConfig.ExtraHosts = req.ExtraHosts - hostConfig.NetworkMode = req.NetworkMode - hostConfig.Resources = req.Resources - } -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/logconsumer.go b/vendor/github.com/testcontainers/testcontainers-go/logconsumer.go deleted file mode 100644 index c5a2e29cf..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/logconsumer.go +++ /dev/null @@ -1,22 +0,0 @@ -package testcontainers - -// StdoutLog is the log type for STDOUT -const StdoutLog = "STDOUT" - -// StderrLog is the log type for STDERR -const StderrLog = "STDERR" - -// Log represents a message that was created by a process, -// LogType is either "STDOUT" or "STDERR", -// Content is the byte contents of the message itself -type Log struct { - LogType string - Content []byte -} - -// LogConsumer represents any object that can -// handle a Log, it is up to the LogConsumer instance -// what to do with the log -type LogConsumer interface { - Accept(Log) -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/logger.go b/vendor/github.com/testcontainers/testcontainers-go/logger.go deleted file mode 100644 index 002ab19bb..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/logger.go +++ /dev/null @@ -1,75 +0,0 @@ -package testcontainers - -import ( - "context" - "log" - "os" - "testing" - - "github.com/docker/docker/client" -) - -// Logger is the default log instance -var Logger Logging = log.New(os.Stderr, "", log.LstdFlags) - -// Logging defines the Logger interface -type Logging interface { - Printf(format string, v ...interface{}) -} - -// LogDockerServerInfo logs the docker server info using the provided logger and Docker client -func LogDockerServerInfo(ctx context.Context, client client.APIClient, logger Logging) { - infoMessage := `%v - Connected to docker: - Server Version: %v - API Version: %v - Operating System: %v - Total Memory: %v MB -` - - info, err := client.Info(ctx) - if err != nil { - logger.Printf("failed getting information about docker server: %s", err) - return - } - - logger.Printf(infoMessage, packagePath, - info.ServerVersion, client.ClientVersion(), - info.OperatingSystem, info.MemTotal/1024/1024) -} - -// TestLogger returns a Logging implementation for testing.TB -// This way logs from testcontainers are part of the test output of a test suite or test case -func TestLogger(tb testing.TB) Logging { - tb.Helper() - return testLogger{TB: tb} -} - -// WithLogger is a generic option that implements GenericProviderOption, DockerProviderOption -// It replaces the global Logging implementation with a user defined one e.g. to aggregate logs from testcontainers -// with the logs of specific test case -func WithLogger(logger Logging) LoggerOption { - return LoggerOption{ - logger: logger, - } -} - -type LoggerOption struct { - logger Logging -} - -func (o LoggerOption) ApplyGenericTo(opts *GenericProviderOptions) { - opts.Logger = o.logger -} - -func (o LoggerOption) ApplyDockerTo(opts *DockerProviderOptions) { - opts.Logger = o.logger -} - -type testLogger struct { - testing.TB -} - -func (t testLogger) Printf(format string, v ...interface{}) { - t.Helper() - t.Logf(format, v...) -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/mkdocs.yml b/vendor/github.com/testcontainers/testcontainers-go/mkdocs.yml deleted file mode 100644 index 91383709d..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/mkdocs.yml +++ /dev/null @@ -1,106 +0,0 @@ -site_name: Testcontainers for Go -site_url: https://golang.testcontainers.org -plugins: - - search - - codeinclude - - markdownextradata -theme: - name: material - custom_dir: docs/theme - palette: - scheme: testcontainers - font: - text: Roboto - code: Roboto Mono - logo: logo.svg - favicon: favicon.ico -extra_css: - - css/extra.css -repo_name: testcontainers-go -repo_url: https://github.com/testcontainers/testcontainers-go -markdown_extensions: - - admonition - - codehilite: - linenums: false - - pymdownx.superfences - - pymdownx.tabbed: - alternate_style: true - - pymdownx.snippets - - toc: - permalink: true - - attr_list - - pymdownx.emoji: - emoji_index: !!python/name:materialx.emoji.twemoji - emoji_generator: !!python/name:materialx.emoji.to_svg -nav: - - Home: index.md - - Quickstart: quickstart.md - - Features: - - features/creating_container.md - - features/files_and_mounts.md - - features/configuration.md - - features/networking.md - - features/garbage_collector.md - - features/build_from_dockerfile.md - - features/docker_auth.md - - features/docker_compose.md - - features/follow_logs.md - - features/override_container_command.md - - features/copy_file.md - - Wait Strategies: - - Introduction: features/wait/introduction.md - - Exec: features/wait/exec.md - - Exit: features/wait/exit.md - - Health: features/wait/health.md - - HostPort: features/wait/host_port.md - - HTTP: features/wait/http.md - - Log: features/wait/log.md - - Multi: features/wait/multi.md - - SQL: features/wait/sql.md - - Modules: - - modules/index.md - - modules/couchbase.md - - modules/k3s.md - - modules/localstack.md - - modules/mysql.md - - modules/neo4j.md - - modules/postgres.md - - modules/pulsar.md - - modules/redis.md - - modules/redpanda.md - - modules/vault.md - - Examples: - - examples/index.md - - examples/bigtable.md - - examples/cockroachdb.md - - examples/consul.md - - examples/datastore.md - - examples/firestore.md - - examples/mongodb.md - - examples/nats.md - - examples/nginx.md - - examples/pubsub.md - - examples/spanner.md - - examples/toxiproxy.md - - System Requirements: - - system_requirements/index.md - - system_requirements/docker.md - - Continuous Integration: - - system_requirements/ci/aws_codebuild.md - - system_requirements/ci/bitbucket_pipelines.md - - system_requirements/ci/circle_ci.md - - system_requirements/ci/concourse_ci.md - - system_requirements/ci/dind_patterns.md - - system_requirements/ci/drone.md - - system_requirements/ci/gitlab_ci.md - - system_requirements/ci/tekton.md - - system_requirements/ci/travis.md - - system_requirements/using_colima.md - - system_requirements/using_podman.md - - Contributing: - - contributing.md - - contributing_docs.md - - Getting help: getting_help.md -edit_uri: edit/main/docs/ -extra: - latest_version: v0.21.0 diff --git a/vendor/github.com/testcontainers/testcontainers-go/mounts.go b/vendor/github.com/testcontainers/testcontainers-go/mounts.go deleted file mode 100644 index c4b6fc916..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/mounts.go +++ /dev/null @@ -1,116 +0,0 @@ -package testcontainers - -const ( - MountTypeBind MountType = iota - MountTypeVolume - MountTypeTmpfs - MountTypePipe -) - -var ( - _ ContainerMountSource = (*GenericBindMountSource)(nil) - _ ContainerMountSource = (*GenericVolumeMountSource)(nil) - _ ContainerMountSource = (*GenericTmpfsMountSource)(nil) -) - -type ( - // ContainerMounts represents a collection of mounts for a container - ContainerMounts []ContainerMount - MountType uint -) - -// ContainerMountSource is the base for all mount sources -type ContainerMountSource interface { - // Source will be used as Source field in the final mount - // this might either be a volume name, a host path or might be empty e.g. for Tmpfs - Source() string - - // Type determines the final mount type - // possible options are limited by the Docker API - Type() MountType -} - -// GenericBindMountSource implements ContainerMountSource and represents a bind mount -// Optionally mount.BindOptions might be added for advanced scenarios -type GenericBindMountSource struct { - // HostPath is the path mounted into the container - // the same host path might be mounted to multiple locations within a single container - HostPath string -} - -func (s GenericBindMountSource) Source() string { - return s.HostPath -} - -func (GenericBindMountSource) Type() MountType { - return MountTypeBind -} - -// GenericVolumeMountSource implements ContainerMountSource and represents a volume mount -type GenericVolumeMountSource struct { - // Name refers to the name of the volume to be mounted - // the same volume might be mounted to multiple locations within a single container - Name string -} - -func (s GenericVolumeMountSource) Source() string { - return s.Name -} - -func (GenericVolumeMountSource) Type() MountType { - return MountTypeVolume -} - -// GenericTmpfsMountSource implements ContainerMountSource and represents a TmpFS mount -// Optionally mount.TmpfsOptions might be added for advanced scenarios -type GenericTmpfsMountSource struct { -} - -func (s GenericTmpfsMountSource) Source() string { - return "" -} - -func (GenericTmpfsMountSource) Type() MountType { - return MountTypeTmpfs -} - -// ContainerMountTarget represents the target path within a container where the mount will be available -// Note that mount targets must be unique. It's not supported to mount different sources to the same target. -type ContainerMountTarget string - -func (t ContainerMountTarget) Target() string { - return string(t) -} - -// BindMount returns a new ContainerMount with a GenericBindMountSource as source -// This is a convenience method to cover typical use cases. -func BindMount(hostPath string, mountTarget ContainerMountTarget) ContainerMount { - return ContainerMount{ - Source: GenericBindMountSource{HostPath: hostPath}, - Target: mountTarget, - } -} - -// VolumeMount returns a new ContainerMount with a GenericVolumeMountSource as source -// This is a convenience method to cover typical use cases. -func VolumeMount(volumeName string, mountTarget ContainerMountTarget) ContainerMount { - return ContainerMount{ - Source: GenericVolumeMountSource{Name: volumeName}, - Target: mountTarget, - } -} - -// Mounts returns a ContainerMounts to support a more fluent API -func Mounts(mounts ...ContainerMount) ContainerMounts { - return mounts -} - -// ContainerMount models a mount into a container -type ContainerMount struct { - // Source is typically either a GenericBindMountSource or a GenericVolumeMountSource - Source ContainerMountSource - // Target is the path where the mount should be mounted within the container - Target ContainerMountTarget - // ReadOnly determines if the mount should be read-only - ReadOnly bool -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/network.go b/vendor/github.com/testcontainers/testcontainers-go/network.go deleted file mode 100644 index c96fc11c9..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/network.go +++ /dev/null @@ -1,45 +0,0 @@ -package testcontainers - -import ( - "context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/network" -) - -// NetworkProvider allows the creation of networks on an arbitrary system -type NetworkProvider interface { - CreateNetwork(context.Context, NetworkRequest) (Network, error) // create a network - GetNetwork(context.Context, NetworkRequest) (types.NetworkResource, error) // get a network -} - -// Network allows getting info about a single network instance -type Network interface { - Remove(context.Context) error // removes the network -} - -type DefaultNetwork string - -func (n DefaultNetwork) ApplyGenericTo(opts *GenericProviderOptions) { - opts.DefaultNetwork = string(n) -} - -func (n DefaultNetwork) ApplyDockerTo(opts *DockerProviderOptions) { - opts.DefaultNetwork = string(n) -} - -// NetworkRequest represents the parameters used to get a network -type NetworkRequest struct { - Driver string - CheckDuplicate bool - Internal bool - EnableIPv6 bool - Name string - Labels map[string]string - Attachable bool - IPAM *network.IPAM - - SkipReaper bool // Deprecated: The reaper is globally controlled by the .testcontainers.properties file or the TESTCONTAINERS_RYUK_DISABLED environment variable - ReaperImage string // Deprecated: use WithImageName ContainerOption instead. Alternative reaper registry - ReaperOptions []ContainerOption // Reaper options to use for this network -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/parallel.go b/vendor/github.com/testcontainers/testcontainers-go/parallel.go deleted file mode 100644 index 297ab9d6d..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/parallel.go +++ /dev/null @@ -1,122 +0,0 @@ -package testcontainers - -import ( - "context" - "fmt" - "sync" -) - -const ( - defaultWorkersCount = 8 -) - -type ParallelContainerRequest []GenericContainerRequest - -// ParallelContainersOptions represents additional options for parallel running -type ParallelContainersOptions struct { - WorkersCount int // count of parallel workers. If field empty(zero), default value will be 'defaultWorkersCount' -} - -// ParallelContainersRequestError represents error from parallel request -type ParallelContainersRequestError struct { - Request GenericContainerRequest - Error error -} - -type ParallelContainersError struct { - Errors []ParallelContainersRequestError -} - -func (gpe ParallelContainersError) Error() string { - return fmt.Sprintf("%v", gpe.Errors) -} - -func parallelContainersRunner( - ctx context.Context, - requests <-chan GenericContainerRequest, - errors chan<- ParallelContainersRequestError, - containers chan<- Container, - wg *sync.WaitGroup) { - - for req := range requests { - c, err := GenericContainer(ctx, req) - if err != nil { - errors <- ParallelContainersRequestError{ - Request: req, - Error: err, - } - continue - } - containers <- c - } - wg.Done() -} - -// ParallelContainers creates a generic containers with parameters and run it in parallel mode -func ParallelContainers(ctx context.Context, reqs ParallelContainerRequest, opt ParallelContainersOptions) ([]Container, error) { - if opt.WorkersCount == 0 { - opt.WorkersCount = defaultWorkersCount - } - - tasksChanSize := opt.WorkersCount - if tasksChanSize > len(reqs) { - tasksChanSize = len(reqs) - } - - tasksChan := make(chan GenericContainerRequest, tasksChanSize) - errsChan := make(chan ParallelContainersRequestError) - resChan := make(chan Container) - waitRes := make(chan struct{}) - - containers := make([]Container, 0) - errors := make([]ParallelContainersRequestError, 0) - - wg := sync.WaitGroup{} - wg.Add(tasksChanSize) - - // run workers - for i := 0; i < tasksChanSize; i++ { - go parallelContainersRunner(ctx, tasksChan, errsChan, resChan, &wg) - } - - go func() { - for { - select { - case c, ok := <-resChan: - if !ok { - resChan = nil - } else { - containers = append(containers, c) - } - case e, ok := <-errsChan: - if !ok { - errsChan = nil - } else { - errors = append(errors, e) - } - } - - if resChan == nil && errsChan == nil { - waitRes <- struct{}{} - break - } - } - - }() - - for _, req := range reqs { - tasksChan <- req - } - close(tasksChan) - wg.Wait() - close(resChan) - close(errsChan) - - <-waitRes - - if len(errors) != 0 { - return containers, ParallelContainersError{Errors: errors} - } - - return containers, nil -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/provider.go b/vendor/github.com/testcontainers/testcontainers-go/provider.go deleted file mode 100644 index d6868f8d1..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/provider.go +++ /dev/null @@ -1,163 +0,0 @@ -package testcontainers - -import ( - "context" - "errors" - "fmt" - "os" - "strings" - - "github.com/testcontainers/testcontainers-go/internal/testcontainersdocker" -) - -// possible provider types -const ( - ProviderDefault ProviderType = iota // default will auto-detect provider from DOCKER_HOST environment variable - ProviderDocker - ProviderPodman -) - -type ( - // ProviderType is an enum for the possible providers - ProviderType int - - // GenericProviderOptions defines options applicable to all providers - GenericProviderOptions struct { - Logger Logging - DefaultNetwork string - } - - // GenericProviderOption defines a common interface to modify GenericProviderOptions - // These options can be passed to GetProvider in a variadic way to customize the returned GenericProvider instance - GenericProviderOption interface { - ApplyGenericTo(opts *GenericProviderOptions) - } - - // GenericProviderOptionFunc is a shorthand to implement the GenericProviderOption interface - GenericProviderOptionFunc func(opts *GenericProviderOptions) - - // DockerProviderOptions defines options applicable to DockerProvider - DockerProviderOptions struct { - defaultBridgeNetworkName string - *GenericProviderOptions - } - - // DockerProviderOption defines a common interface to modify DockerProviderOptions - // These can be passed to NewDockerProvider in a variadic way to customize the returned DockerProvider instance - DockerProviderOption interface { - ApplyDockerTo(opts *DockerProviderOptions) - } - - // DockerProviderOptionFunc is a shorthand to implement the DockerProviderOption interface - DockerProviderOptionFunc func(opts *DockerProviderOptions) -) - -func (f DockerProviderOptionFunc) ApplyDockerTo(opts *DockerProviderOptions) { - f(opts) -} - -func Generic2DockerOptions(opts ...GenericProviderOption) []DockerProviderOption { - converted := make([]DockerProviderOption, 0, len(opts)) - for _, o := range opts { - switch c := o.(type) { - case DockerProviderOption: - converted = append(converted, c) - default: - converted = append(converted, DockerProviderOptionFunc(func(opts *DockerProviderOptions) { - o.ApplyGenericTo(opts.GenericProviderOptions) - })) - } - } - - return converted -} - -func WithDefaultBridgeNetwork(bridgeNetworkName string) DockerProviderOption { - return DockerProviderOptionFunc(func(opts *DockerProviderOptions) { - opts.defaultBridgeNetworkName = bridgeNetworkName - }) -} - -func (f GenericProviderOptionFunc) ApplyGenericTo(opts *GenericProviderOptions) { - f(opts) -} - -// ContainerProvider allows the creation of containers on an arbitrary system -type ContainerProvider interface { - Close() error // close the provider - CreateContainer(context.Context, ContainerRequest) (Container, error) // create a container without starting it - ReuseOrCreateContainer(context.Context, ContainerRequest) (Container, error) // reuses a container if it exists or creates a container without starting - RunContainer(context.Context, ContainerRequest) (Container, error) // create a container and start it - Health(context.Context) error - Config() TestcontainersConfig -} - -// GetProvider provides the provider implementation for a certain type -func (t ProviderType) GetProvider(opts ...GenericProviderOption) (GenericProvider, error) { - opt := &GenericProviderOptions{ - Logger: Logger, - } - - for _, o := range opts { - o.ApplyGenericTo(opt) - } - - pt := t - if pt == ProviderDefault && strings.Contains(os.Getenv("DOCKER_HOST"), "podman.sock") { - pt = ProviderPodman - } - - switch pt { - case ProviderDefault, ProviderDocker: - providerOptions := append(Generic2DockerOptions(opts...), WithDefaultBridgeNetwork(Bridge)) - provider, err := NewDockerProvider(providerOptions...) - if err != nil { - return nil, fmt.Errorf("%w, failed to create Docker provider", err) - } - return provider, nil - case ProviderPodman: - providerOptions := append(Generic2DockerOptions(opts...), WithDefaultBridgeNetwork(Podman)) - provider, err := NewDockerProvider(providerOptions...) - if err != nil { - return nil, fmt.Errorf("%w, failed to create Docker provider", err) - } - return provider, nil - } - return nil, errors.New("unknown provider") -} - -// NewDockerProvider creates a Docker provider with the EnvClient -func NewDockerProvider(provOpts ...DockerProviderOption) (*DockerProvider, error) { - o := &DockerProviderOptions{ - GenericProviderOptions: &GenericProviderOptions{ - Logger: Logger, - }, - } - - for idx := range provOpts { - provOpts[idx].ApplyDockerTo(o) - } - - c, err := NewDockerClient() - if err != nil { - return nil, err - } - - tcConfig := ReadConfig() - - dockerHost := testcontainersdocker.ExtractDockerHost(context.Background()) - - p := &DockerProvider{ - DockerProviderOptions: o, - host: dockerHost, - client: c, - config: tcConfig, - } - - // log docker server info only once - logOnce.Do(func() { - LogDockerServerInfo(context.Background(), p.client, p.Logger) - }) - - return p, nil -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/reaper.go b/vendor/github.com/testcontainers/testcontainers-go/reaper.go deleted file mode 100644 index b72a9bec5..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/reaper.go +++ /dev/null @@ -1,212 +0,0 @@ -package testcontainers - -import ( - "bufio" - "context" - "fmt" - "net" - "strings" - "sync" - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/go-connections/nat" - "github.com/testcontainers/testcontainers-go/internal" - "github.com/testcontainers/testcontainers-go/internal/testcontainersdocker" - "github.com/testcontainers/testcontainers-go/wait" -) - -const ( - // Deprecated: it has been replaced by the internal testcontainersdocker.LabelLang - TestcontainerLabel = "org.testcontainers.golang" - // Deprecated: it has been replaced by the internal testcontainersdocker.LabelSessionID - TestcontainerLabelSessionID = TestcontainerLabel + ".sessionId" - // Deprecated: it has been replaced by the internal testcontainersdocker.LabelReaper - TestcontainerLabelIsReaper = TestcontainerLabel + ".reaper" - - ReaperDefaultImage = "docker.io/testcontainers/ryuk:0.5.1" -) - -var ( - reaperInstance *Reaper // We would like to create reaper only once - mutex sync.Mutex -) - -// ReaperProvider represents a provider for the reaper to run itself with -// The ContainerProvider interface should usually satisfy this as well, so it is pluggable -type ReaperProvider interface { - RunContainer(ctx context.Context, req ContainerRequest) (Container, error) - Config() TestcontainersConfig -} - -// NewReaper creates a Reaper with a sessionID to identify containers and a provider to use -// Deprecated: it's not possible to create a reaper anymore. -func NewReaper(ctx context.Context, sessionID string, provider ReaperProvider, reaperImageName string) (*Reaper, error) { - return reuseOrCreateReaper(ctx, sessionID, provider, WithImageName(reaperImageName)) -} - -// reuseOrCreateReaper returns an existing Reaper instance if it exists and is running. Otherwise, a new Reaper instance -// will be created with a sessionID to identify containers and a provider to use -func reuseOrCreateReaper(ctx context.Context, sessionID string, provider ReaperProvider, opts ...ContainerOption) (*Reaper, error) { - mutex.Lock() - defer mutex.Unlock() - // If reaper already exists and healthy, re-use it - if reaperInstance != nil { - // Verify this instance is still running by checking state. - // Can't use Container.IsRunning because the bool is not updated when Reaper is terminated - state, err := reaperInstance.container.State(ctx) - if err == nil && state.Running { - return reaperInstance, nil - } - } - - r, err := newReaper(ctx, sessionID, provider, opts...) - if err != nil { - return nil, err - } - - reaperInstance = r - return reaperInstance, nil -} - -// newReaper creates a Reaper with a sessionID to identify containers and a provider to use -// Should only be used internally and instead use reuseOrCreateReaper to prefer reusing an existing Reaper instance -func newReaper(ctx context.Context, sessionID string, provider ReaperProvider, opts ...ContainerOption) (*Reaper, error) { - dockerHostMount := testcontainersdocker.ExtractDockerSocket(ctx) - - reaper := &Reaper{ - Provider: provider, - SessionID: sessionID, - } - - listeningPort := nat.Port("8080/tcp") - - tcConfig := provider.Config().Config - - reaperOpts := containerOptions{} - - for _, opt := range opts { - opt(&reaperOpts) - } - - req := ContainerRequest{ - Image: reaperImage(reaperOpts.ImageName), - ExposedPorts: []string{string(listeningPort)}, - Labels: map[string]string{ - TestcontainerLabelIsReaper: "true", - testcontainersdocker.LabelReaper: "true", - }, - Mounts: Mounts(BindMount(dockerHostMount, "/var/run/docker.sock")), - Privileged: tcConfig.RyukPrivileged, - WaitingFor: wait.ForListeningPort(listeningPort), - ReaperOptions: opts, - HostConfigModifier: func(hc *container.HostConfig) { - hc.AutoRemove = true - hc.NetworkMode = Bridge - }, - } - - // keep backwards compatibility - req.ReaperImage = req.Image - - // include reaper-specific labels to the reaper container - for k, v := range reaper.Labels() { - if k == TestcontainerLabelSessionID || k == testcontainersdocker.LabelSessionID { - continue - } - req.Labels[k] = v - } - - // Attach reaper container to a requested network if it is specified - if p, ok := provider.(*DockerProvider); ok { - req.Networks = append(req.Networks, p.DefaultNetwork) - } - - c, err := provider.RunContainer(ctx, req) - if err != nil { - return nil, err - } - reaper.container = c - - endpoint, err := c.PortEndpoint(ctx, "8080", "") - if err != nil { - return nil, err - } - reaper.Endpoint = endpoint - - return reaper, nil -} - -// Reaper is used to start a sidecar container that cleans up resources -type Reaper struct { - Provider ReaperProvider - SessionID string - Endpoint string - container Container -} - -// Connect runs a goroutine which can be terminated by sending true into the returned channel -func (r *Reaper) Connect() (chan bool, error) { - conn, err := net.DialTimeout("tcp", r.Endpoint, 10*time.Second) - if err != nil { - return nil, fmt.Errorf("%w: Connecting to Ryuk on %s failed", err, r.Endpoint) - } - - terminationSignal := make(chan bool) - go func(conn net.Conn) { - sock := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)) - defer conn.Close() - - labelFilters := []string{} - for l, v := range r.Labels() { - labelFilters = append(labelFilters, fmt.Sprintf("label=%s=%s", l, v)) - } - - retryLimit := 3 - for retryLimit > 0 { - retryLimit-- - - if _, err := sock.WriteString(strings.Join(labelFilters, "&")); err != nil { - continue - } - - if _, err := sock.WriteString("\n"); err != nil { - continue - } - - if err := sock.Flush(); err != nil { - continue - } - - resp, err := sock.ReadString('\n') - if err != nil { - continue - } - - if resp == "ACK\n" { - break - } - } - - <-terminationSignal - }(conn) - return terminationSignal, nil -} - -// Labels returns the container labels to use so that this Reaper cleans them up -func (r *Reaper) Labels() map[string]string { - return map[string]string{ - TestcontainerLabel: "true", - TestcontainerLabelSessionID: r.SessionID, - testcontainersdocker.LabelLang: "go", - testcontainersdocker.LabelVersion: internal.Version, - testcontainersdocker.LabelSessionID: r.SessionID, - } -} - -func reaperImage(reaperImageName string) string { - if reaperImageName == "" { - return ReaperDefaultImage - } - return reaperImageName -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/requirements.txt b/vendor/github.com/testcontainers/testcontainers-go/requirements.txt deleted file mode 100644 index f77d057b5..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -mkdocs==1.3.0 -mkdocs-codeinclude-plugin==0.2.0 -mkdocs-material==8.1.3 -mkdocs-markdownextradata-plugin==0.2.5 diff --git a/vendor/github.com/testcontainers/testcontainers-go/runtime.txt b/vendor/github.com/testcontainers/testcontainers-go/runtime.txt deleted file mode 100644 index cc1923a40..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/runtime.txt +++ /dev/null @@ -1 +0,0 @@ -3.8 diff --git a/vendor/github.com/testcontainers/testcontainers-go/testcontainer.go b/vendor/github.com/testcontainers/testcontainers-go/testcontainer.go deleted file mode 100644 index 6c42b00d4..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/testcontainer.go +++ /dev/null @@ -1 +0,0 @@ -package testcontainers diff --git a/vendor/github.com/testcontainers/testcontainers-go/testing.go b/vendor/github.com/testcontainers/testcontainers-go/testing.go deleted file mode 100644 index cdbd68d72..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/testing.go +++ /dev/null @@ -1,42 +0,0 @@ -package testcontainers - -import ( - "context" - "testing" - - "github.com/testcontainers/testcontainers-go/internal/testcontainersdocker" -) - -// SkipIfProviderIsNotHealthy is a utility function capable of skipping tests -// if the provider is not healthy, or running at all. -// This is a function designed to be used in your test, when Docker is not mandatory for CI/CD. -// In this way tests that depend on Testcontainers won't run if the provider is provisioned correctly. -func SkipIfProviderIsNotHealthy(t *testing.T) { - ctx := context.Background() - provider, err := ProviderDocker.GetProvider() - if err != nil { - t.Skipf("Docker is not running. TestContainers can't perform is work without it: %s", err) - } - err = provider.Health(ctx) - if err != nil { - t.Skipf("Docker is not running. TestContainers can't perform is work without it: %s", err) - } -} - -// SkipIfDockerDesktop is a utility function capable of skipping tests -// if tests are run using Docker Desktop. -func SkipIfDockerDesktop(t *testing.T, ctx context.Context) { - cli, err := testcontainersdocker.NewClient(ctx) - if err != nil { - t.Fatalf("failed to create docker client: %s", err) - } - - info, err := cli.Info(ctx) - if err != nil { - t.Fatalf("failed to get docker info: %s", err) - } - - if info.OperatingSystem == "Docker Desktop" { - t.Skip("Skipping test that requires host network access when running in Docker Desktop") - } -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/all.go b/vendor/github.com/testcontainers/testcontainers-go/wait/all.go deleted file mode 100644 index 18531cbf5..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/all.go +++ /dev/null @@ -1,80 +0,0 @@ -package wait - -import ( - "context" - "fmt" - "time" -) - -// Implement interface -var _ Strategy = (*MultiStrategy)(nil) -var _ StrategyTimeout = (*MultiStrategy)(nil) - -type MultiStrategy struct { - // all Strategies should have a startupTimeout to avoid waiting infinitely - timeout *time.Duration - deadline *time.Duration - - // additional properties - Strategies []Strategy -} - -// WithStartupTimeoutDefault sets the default timeout for all inner wait strategies -func (ms *MultiStrategy) WithStartupTimeoutDefault(timeout time.Duration) *MultiStrategy { - ms.timeout = &timeout - return ms -} - -// WithStartupTimeout sets a time.Duration which limits all wait strategies -// -// Deprecated: use WithDeadline -func (ms *MultiStrategy) WithStartupTimeout(timeout time.Duration) Strategy { - return ms.WithDeadline(timeout) -} - -// WithDeadline sets a time.Duration which limits all wait strategies -func (ms *MultiStrategy) WithDeadline(deadline time.Duration) *MultiStrategy { - ms.deadline = &deadline - return ms -} - -func ForAll(strategies ...Strategy) *MultiStrategy { - return &MultiStrategy{ - Strategies: strategies, - } -} - -func (ms *MultiStrategy) Timeout() *time.Duration { - return ms.timeout -} - -func (ms *MultiStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) error { - var cancel context.CancelFunc - if ms.deadline != nil { - ctx, cancel = context.WithTimeout(ctx, *ms.deadline) - defer cancel() - } - - if len(ms.Strategies) == 0 { - return fmt.Errorf("no wait strategy supplied") - } - - for _, strategy := range ms.Strategies { - strategyCtx := ctx - - // Set default Timeout when strategy implements StrategyTimeout - if st, ok := strategy.(StrategyTimeout); ok { - if ms.Timeout() != nil && st.Timeout() == nil { - strategyCtx, cancel = context.WithTimeout(ctx, *ms.Timeout()) - defer cancel() - } - } - - err := strategy.WaitUntilReady(strategyCtx, target) - if err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/errors.go b/vendor/github.com/testcontainers/testcontainers-go/wait/errors.go deleted file mode 100644 index f8a02a59b..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build !windows -// +build !windows - -package wait - -import "syscall" - -func isConnRefusedErr(err error) bool { - return err == syscall.ECONNREFUSED -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/errors_windows.go b/vendor/github.com/testcontainers/testcontainers-go/wait/errors_windows.go deleted file mode 100644 index 3ae346d8a..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/errors_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -package wait - -import ( - "golang.org/x/sys/windows" -) - -func isConnRefusedErr(err error) bool { - return err == windows.WSAECONNREFUSED -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/exec.go b/vendor/github.com/testcontainers/testcontainers-go/wait/exec.go deleted file mode 100644 index 82f15c609..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/exec.go +++ /dev/null @@ -1,99 +0,0 @@ -package wait - -import ( - "context" - "io" - "time" - - tcexec "github.com/testcontainers/testcontainers-go/exec" -) - -// Implement interface -var _ Strategy = (*ExecStrategy)(nil) -var _ StrategyTimeout = (*ExecStrategy)(nil) - -type ExecStrategy struct { - // all Strategies should have a startupTimeout to avoid waiting infinitely - timeout *time.Duration - cmd []string - - // additional properties - ExitCodeMatcher func(exitCode int) bool - ResponseMatcher func(body io.Reader) bool - PollInterval time.Duration -} - -// NewExecStrategy constructs an Exec strategy ... -func NewExecStrategy(cmd []string) *ExecStrategy { - return &ExecStrategy{ - cmd: cmd, - ExitCodeMatcher: defaultExitCodeMatcher, - ResponseMatcher: func(body io.Reader) bool { return true }, - PollInterval: defaultPollInterval(), - } -} - -func defaultExitCodeMatcher(exitCode int) bool { - return exitCode == 0 -} - -// WithStartupTimeout can be used to change the default startup timeout -func (ws *ExecStrategy) WithStartupTimeout(startupTimeout time.Duration) *ExecStrategy { - ws.timeout = &startupTimeout - return ws -} - -func (ws *ExecStrategy) WithExitCodeMatcher(exitCodeMatcher func(exitCode int) bool) *ExecStrategy { - ws.ExitCodeMatcher = exitCodeMatcher - return ws -} - -func (ws *ExecStrategy) WithResponseMatcher(matcher func(body io.Reader) bool) *ExecStrategy { - ws.ResponseMatcher = matcher - return ws -} - -// WithPollInterval can be used to override the default polling interval of 100 milliseconds -func (ws *ExecStrategy) WithPollInterval(pollInterval time.Duration) *ExecStrategy { - ws.PollInterval = pollInterval - return ws -} - -// ForExec is a convenience method to assign ExecStrategy -func ForExec(cmd []string) *ExecStrategy { - return NewExecStrategy(cmd) -} - -func (ws *ExecStrategy) Timeout() *time.Duration { - return ws.timeout -} - -func (ws *ExecStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) error { - timeout := defaultStartupTimeout() - if ws.timeout != nil { - timeout = *ws.timeout - } - - ctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - for { - select { - case <-ctx.Done(): - return ctx.Err() - case <-time.After(ws.PollInterval): - exitCode, resp, err := target.Exec(ctx, ws.cmd, tcexec.Multiplexed()) - if err != nil { - return err - } - if !ws.ExitCodeMatcher(exitCode) { - continue - } - if ws.ResponseMatcher != nil && !ws.ResponseMatcher(resp) { - continue - } - - return nil - } - } -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/exit.go b/vendor/github.com/testcontainers/testcontainers-go/wait/exit.go deleted file mode 100644 index 8ff59fe0a..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/exit.go +++ /dev/null @@ -1,89 +0,0 @@ -package wait - -import ( - "context" - "strings" - "time" -) - -// Implement interface -var _ Strategy = (*ExitStrategy)(nil) -var _ StrategyTimeout = (*ExitStrategy)(nil) - -// ExitStrategy will wait until container exit -type ExitStrategy struct { - // all Strategies should have a timeout to avoid waiting infinitely - timeout *time.Duration - - // additional properties - PollInterval time.Duration -} - -// NewExitStrategy constructs with polling interval of 100 milliseconds without timeout by default -func NewExitStrategy() *ExitStrategy { - return &ExitStrategy{ - PollInterval: defaultPollInterval(), - } - -} - -// fluent builders for each property -// since go has neither covariance nor generics, the return type must be the type of the concrete implementation -// this is true for all properties, even the "shared" ones - -// WithExitTimeout can be used to change the default exit timeout -func (ws *ExitStrategy) WithExitTimeout(exitTimeout time.Duration) *ExitStrategy { - ws.timeout = &exitTimeout - return ws -} - -// WithPollInterval can be used to override the default polling interval of 100 milliseconds -func (ws *ExitStrategy) WithPollInterval(pollInterval time.Duration) *ExitStrategy { - ws.PollInterval = pollInterval - return ws -} - -// ForExit is the default construction for the fluid interface. -// -// For Example: -// -// wait. -// ForExit(). -// WithPollInterval(1 * time.Second) -func ForExit() *ExitStrategy { - return NewExitStrategy() -} - -func (ws *ExitStrategy) Timeout() *time.Duration { - return ws.timeout -} - -// WaitUntilReady implements Strategy.WaitUntilReady -func (ws *ExitStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) (err error) { - if ws.timeout != nil { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, *ws.timeout) - defer cancel() - } - - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - state, err := target.State(ctx) - if err != nil { - if !strings.Contains(err.Error(), "No such container") { - return err - } else { - return nil - } - } - if state.Running { - time.Sleep(ws.PollInterval) - continue - } - return nil - } - } -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/health.go b/vendor/github.com/testcontainers/testcontainers-go/wait/health.go deleted file mode 100644 index b0821c8ae..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/health.go +++ /dev/null @@ -1,91 +0,0 @@ -package wait - -import ( - "context" - "time" - - "github.com/docker/docker/api/types" -) - -// Implement interface -var _ Strategy = (*HealthStrategy)(nil) -var _ StrategyTimeout = (*HealthStrategy)(nil) - -// HealthStrategy will wait until the container becomes healthy -type HealthStrategy struct { - // all Strategies should have a startupTimeout to avoid waiting infinitely - timeout *time.Duration - - // additional properties - PollInterval time.Duration -} - -// NewHealthStrategy constructs with polling interval of 100 milliseconds and startup timeout of 60 seconds by default -func NewHealthStrategy() *HealthStrategy { - return &HealthStrategy{ - PollInterval: defaultPollInterval(), - } - -} - -// fluent builders for each property -// since go has neither covariance nor generics, the return type must be the type of the concrete implementation -// this is true for all properties, even the "shared" ones like startupTimeout - -// WithStartupTimeout can be used to change the default startup timeout -func (ws *HealthStrategy) WithStartupTimeout(startupTimeout time.Duration) *HealthStrategy { - ws.timeout = &startupTimeout - return ws -} - -// WithPollInterval can be used to override the default polling interval of 100 milliseconds -func (ws *HealthStrategy) WithPollInterval(pollInterval time.Duration) *HealthStrategy { - ws.PollInterval = pollInterval - return ws -} - -// ForHealthCheck is the default construction for the fluid interface. -// -// For Example: -// -// wait. -// ForHealthCheck(). -// WithPollInterval(1 * time.Second) -func ForHealthCheck() *HealthStrategy { - return NewHealthStrategy() -} - -func (ws *HealthStrategy) Timeout() *time.Duration { - return ws.timeout -} - -// WaitUntilReady implements Strategy.WaitUntilReady -func (ws *HealthStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) (err error) { - timeout := defaultStartupTimeout() - if ws.timeout != nil { - timeout = *ws.timeout - } - - ctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - state, err := target.State(ctx) - if err != nil { - return err - } - if err := checkState(state); err != nil { - return err - } - if state.Health == nil || state.Health.Status != types.Healthy { - time.Sleep(ws.PollInterval) - continue - } - return nil - } - } -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/host_port.go b/vendor/github.com/testcontainers/testcontainers-go/wait/host_port.go deleted file mode 100644 index 59fe7c113..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/host_port.go +++ /dev/null @@ -1,204 +0,0 @@ -package wait - -import ( - "context" - "errors" - "fmt" - "log" - "net" - "os" - "strconv" - "time" - - "github.com/docker/go-connections/nat" -) - -// Implement interface -var _ Strategy = (*HostPortStrategy)(nil) -var _ StrategyTimeout = (*HostPortStrategy)(nil) - -var errShellNotExecutable = errors.New("/bin/sh command not executable") - -type HostPortStrategy struct { - // Port is a string containing port number and protocol in the format "80/tcp" - // which - Port nat.Port - // all WaitStrategies should have a startupTimeout to avoid waiting infinitely - timeout *time.Duration - PollInterval time.Duration -} - -// NewHostPortStrategy constructs a default host port strategy -func NewHostPortStrategy(port nat.Port) *HostPortStrategy { - return &HostPortStrategy{ - Port: port, - PollInterval: defaultPollInterval(), - } -} - -// fluent builders for each property -// since go has neither covariance nor generics, the return type must be the type of the concrete implementation -// this is true for all properties, even the "shared" ones like startupTimeout - -// ForListeningPort is a helper similar to those in Wait.java -// https://github.com/testcontainers/testcontainers-java/blob/1d85a3834bd937f80aad3a4cec249c027f31aeb4/core/src/main/java/org/testcontainers/containers/wait/strategy/Wait.java -func ForListeningPort(port nat.Port) *HostPortStrategy { - return NewHostPortStrategy(port) -} - -// ForExposedPort constructs an exposed port strategy. Alias for `NewHostPortStrategy("")`. -// This strategy waits for the first port exposed in the Docker container. -func ForExposedPort() *HostPortStrategy { - return NewHostPortStrategy("") -} - -// WithStartupTimeout can be used to change the default startup timeout -func (hp *HostPortStrategy) WithStartupTimeout(startupTimeout time.Duration) *HostPortStrategy { - hp.timeout = &startupTimeout - return hp -} - -// WithPollInterval can be used to override the default polling interval of 100 milliseconds -func (hp *HostPortStrategy) WithPollInterval(pollInterval time.Duration) *HostPortStrategy { - hp.PollInterval = pollInterval - return hp -} - -func (hp *HostPortStrategy) Timeout() *time.Duration { - return hp.timeout -} - -// WaitUntilReady implements Strategy.WaitUntilReady -func (hp *HostPortStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) (err error) { - timeout := defaultStartupTimeout() - if hp.timeout != nil { - timeout = *hp.timeout - } - - ctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - ipAddress, err := target.Host(ctx) - if err != nil { - return - } - - var waitInterval = hp.PollInterval - - internalPort := hp.Port - if internalPort == "" { - var ports nat.PortMap - ports, err = target.Ports(ctx) - if err != nil { - return - } - if len(ports) > 0 { - for p := range ports { - internalPort = p - break - } - } - } - - if internalPort == "" { - err = fmt.Errorf("no port to wait for") - return - } - - var port nat.Port - port, err = target.MappedPort(ctx, internalPort) - var i = 0 - - for port == "" { - i++ - - select { - case <-ctx.Done(): - return fmt.Errorf("%s:%w", ctx.Err(), err) - case <-time.After(waitInterval): - if err := checkTarget(ctx, target); err != nil { - return err - } - port, err = target.MappedPort(ctx, internalPort) - if err != nil { - fmt.Printf("(%d) [%s] %s\n", i, port, err) - } - } - } - - if err := externalCheck(ctx, ipAddress, port, target, waitInterval); err != nil { - return err - } - - err = internalCheck(ctx, internalPort, target) - if err != nil && errors.Is(errShellNotExecutable, err) { - log.Println("Shell not executable in container, only external port check will be performed") - } else { - return err - } - - return nil -} - -func externalCheck(ctx context.Context, ipAddress string, port nat.Port, target StrategyTarget, waitInterval time.Duration) error { - proto := port.Proto() - portNumber := port.Int() - portString := strconv.Itoa(portNumber) - - dialer := net.Dialer{} - address := net.JoinHostPort(ipAddress, portString) - for { - if err := checkTarget(ctx, target); err != nil { - return err - } - conn, err := dialer.DialContext(ctx, proto, address) - if err != nil { - if v, ok := err.(*net.OpError); ok { - if v2, ok := (v.Err).(*os.SyscallError); ok { - if isConnRefusedErr(v2.Err) { - time.Sleep(waitInterval) - continue - } - } - } - return err - } else { - _ = conn.Close() - break - } - } - return nil -} - -func internalCheck(ctx context.Context, internalPort nat.Port, target StrategyTarget) error { - command := buildInternalCheckCommand(internalPort.Int()) - for { - if ctx.Err() != nil { - return ctx.Err() - } - if err := checkTarget(ctx, target); err != nil { - return err - } - exitCode, _, err := target.Exec(ctx, []string{"/bin/sh", "-c", command}) - if err != nil { - return fmt.Errorf("%w, host port waiting failed", err) - } - - if exitCode == 0 { - break - } else if exitCode == 126 { - return errShellNotExecutable - } - } - return nil -} - -func buildInternalCheckCommand(internalPort int) string { - command := `( - cat /proc/net/tcp* | awk '{print $2}' | grep -i :%04x || - nc -vz -w 1 localhost %d || - /bin/sh -c ' 0 { - ws.TLSConfig = tlsconf[0] - } - return ws -} - -func (ws *HTTPStrategy) WithAllowInsecure(allowInsecure bool) *HTTPStrategy { - ws.AllowInsecure = allowInsecure - return ws -} - -func (ws *HTTPStrategy) WithMethod(method string) *HTTPStrategy { - ws.Method = method - return ws -} - -func (ws *HTTPStrategy) WithBody(reqdata io.Reader) *HTTPStrategy { - ws.Body = reqdata - return ws -} - -func (ws *HTTPStrategy) WithBasicAuth(username, password string) *HTTPStrategy { - ws.UserInfo = url.UserPassword(username, password) - return ws -} - -// WithPollInterval can be used to override the default polling interval of 100 milliseconds -func (ws *HTTPStrategy) WithPollInterval(pollInterval time.Duration) *HTTPStrategy { - ws.PollInterval = pollInterval - return ws -} - -// ForHTTP is a convenience method similar to Wait.java -// https://github.com/testcontainers/testcontainers-java/blob/1d85a3834bd937f80aad3a4cec249c027f31aeb4/core/src/main/java/org/testcontainers/containers/wait/strategy/Wait.java -func ForHTTP(path string) *HTTPStrategy { - return NewHTTPStrategy(path) -} - -func (ws *HTTPStrategy) Timeout() *time.Duration { - return ws.timeout -} - -// WaitUntilReady implements Strategy.WaitUntilReady -func (ws *HTTPStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) (err error) { - timeout := defaultStartupTimeout() - if ws.timeout != nil { - timeout = *ws.timeout - } - - ctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - ipAddress, err := target.Host(ctx) - if err != nil { - return - } - - var mappedPort nat.Port - if ws.Port == "" { - ports, err := target.Ports(ctx) - for err != nil { - select { - case <-ctx.Done(): - return fmt.Errorf("%s:%w", ctx.Err(), err) - case <-time.After(ws.PollInterval): - if err := checkTarget(ctx, target); err != nil { - return err - } - - ports, err = target.Ports(ctx) - } - } - - for k, bindings := range ports { - if len(bindings) == 0 || k.Proto() != "tcp" { - continue - } - mappedPort, _ = nat.NewPort(k.Proto(), bindings[0].HostPort) - break - } - - if mappedPort == "" { - return errors.New("No exposed tcp ports or mapped ports - cannot wait for status") - } - } else { - mappedPort, err = target.MappedPort(ctx, ws.Port) - - for mappedPort == "" { - select { - case <-ctx.Done(): - return fmt.Errorf("%s:%w", ctx.Err(), err) - case <-time.After(ws.PollInterval): - if err := checkTarget(ctx, target); err != nil { - return err - } - - mappedPort, err = target.MappedPort(ctx, ws.Port) - } - } - - if mappedPort.Proto() != "tcp" { - return errors.New("Cannot use HTTP client on non-TCP ports") - } - } - - switch ws.Method { - case http.MethodGet, http.MethodHead, http.MethodPost, - http.MethodPut, http.MethodPatch, http.MethodDelete, - http.MethodConnect, http.MethodOptions, http.MethodTrace: - default: - if ws.Method != "" { - return fmt.Errorf("invalid http method %q", ws.Method) - } - ws.Method = http.MethodGet - } - - tripper := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - ForceAttemptHTTP2: true, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - TLSClientConfig: ws.TLSConfig, - } - - var proto string - if ws.UseTLS { - proto = "https" - if ws.AllowInsecure { - if ws.TLSConfig == nil { - tripper.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - } else { - ws.TLSConfig.InsecureSkipVerify = true - } - } - } else { - proto = "http" - } - - client := http.Client{Transport: tripper, Timeout: time.Second} - address := net.JoinHostPort(ipAddress, strconv.Itoa(mappedPort.Int())) - - endpoint := url.URL{ - Scheme: proto, - Host: address, - Path: ws.Path, - } - - if ws.UserInfo != nil { - endpoint.User = ws.UserInfo - } - - // cache the body into a byte-slice so that it can be iterated over multiple times - var body []byte - if ws.Body != nil { - body, err = io.ReadAll(ws.Body) - if err != nil { - return - } - } - - for { - select { - case <-ctx.Done(): - return ctx.Err() - case <-time.After(ws.PollInterval): - if err := checkTarget(ctx, target); err != nil { - return err - } - req, err := http.NewRequestWithContext(ctx, ws.Method, endpoint.String(), bytes.NewReader(body)) - if err != nil { - return err - } - resp, err := client.Do(req) - if err != nil { - continue - } - if ws.StatusCodeMatcher != nil && !ws.StatusCodeMatcher(resp.StatusCode) { - _ = resp.Body.Close() - continue - } - if ws.ResponseMatcher != nil && !ws.ResponseMatcher(resp.Body) { - _ = resp.Body.Close() - continue - } - if err := resp.Body.Close(); err != nil { - continue - } - return nil - } - } -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/log.go b/vendor/github.com/testcontainers/testcontainers-go/wait/log.go deleted file mode 100644 index 4e14e9d31..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/log.go +++ /dev/null @@ -1,117 +0,0 @@ -package wait - -import ( - "context" - "io" - "strings" - "time" -) - -// Implement interface -var _ Strategy = (*LogStrategy)(nil) -var _ StrategyTimeout = (*LogStrategy)(nil) - -// LogStrategy will wait until a given log entry shows up in the docker logs -type LogStrategy struct { - // all Strategies should have a startupTimeout to avoid waiting infinitely - timeout *time.Duration - - // additional properties - Log string - Occurrence int - PollInterval time.Duration -} - -// NewLogStrategy constructs with polling interval of 100 milliseconds and startup timeout of 60 seconds by default -func NewLogStrategy(log string) *LogStrategy { - return &LogStrategy{ - Log: log, - Occurrence: 1, - PollInterval: defaultPollInterval(), - } -} - -// fluent builders for each property -// since go has neither covariance nor generics, the return type must be the type of the concrete implementation -// this is true for all properties, even the "shared" ones like startupTimeout - -// WithStartupTimeout can be used to change the default startup timeout -func (ws *LogStrategy) WithStartupTimeout(timeout time.Duration) *LogStrategy { - ws.timeout = &timeout - return ws -} - -// WithPollInterval can be used to override the default polling interval of 100 milliseconds -func (ws *LogStrategy) WithPollInterval(pollInterval time.Duration) *LogStrategy { - ws.PollInterval = pollInterval - return ws -} - -func (ws *LogStrategy) WithOccurrence(o int) *LogStrategy { - // the number of occurrence needs to be positive - if o <= 0 { - o = 1 - } - ws.Occurrence = o - return ws -} - -// ForLog is the default construction for the fluid interface. -// -// For Example: -// -// wait. -// ForLog("some text"). -// WithPollInterval(1 * time.Second) -func ForLog(log string) *LogStrategy { - return NewLogStrategy(log) -} - -func (ws *LogStrategy) Timeout() *time.Duration { - return ws.timeout -} - -// WaitUntilReady implements Strategy.WaitUntilReady -func (ws *LogStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) (err error) { - timeout := defaultStartupTimeout() - if ws.timeout != nil { - timeout = *ws.timeout - } - - ctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - -LOOP: - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - checkErr := checkTarget(ctx, target) - - reader, err := target.Logs(ctx) - if err != nil { - time.Sleep(ws.PollInterval) - continue - } - - b, err := io.ReadAll(reader) - if err != nil { - time.Sleep(ws.PollInterval) - continue - } - - logs := string(b) - if logs == "" && checkErr != nil { - return checkErr - } else if strings.Count(logs, ws.Log) >= ws.Occurrence { - break LOOP - } else { - time.Sleep(ws.PollInterval) - continue - } - } - } - - return nil -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/nop.go b/vendor/github.com/testcontainers/testcontainers-go/wait/nop.go deleted file mode 100644 index a16feb878..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/nop.go +++ /dev/null @@ -1,69 +0,0 @@ -package wait - -import ( - "context" - "io" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/go-connections/nat" - "github.com/testcontainers/testcontainers-go/exec" -) - -var _ Strategy = (*NopStrategy)(nil) -var _ StrategyTimeout = (*NopStrategy)(nil) - -type NopStrategy struct { - timeout *time.Duration - waitUntilReady func(context.Context, StrategyTarget) error -} - -func ForNop( - waitUntilReady func(context.Context, StrategyTarget) error, -) *NopStrategy { - return &NopStrategy{ - waitUntilReady: waitUntilReady, - } -} - -func (ws *NopStrategy) Timeout() *time.Duration { - return ws.timeout -} - -func (ws *NopStrategy) WithStartupTimeout(timeout time.Duration) *NopStrategy { - ws.timeout = &timeout - return ws -} - -func (ws *NopStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) error { - return ws.waitUntilReady(ctx, target) -} - -type NopStrategyTarget struct { - ReaderCloser io.ReadCloser - ContainerState types.ContainerState -} - -func (st NopStrategyTarget) Host(_ context.Context) (string, error) { - return "", nil -} - -func (st NopStrategyTarget) Ports(_ context.Context) (nat.PortMap, error) { - return nil, nil -} - -func (st NopStrategyTarget) MappedPort(_ context.Context, n nat.Port) (nat.Port, error) { - return n, nil -} - -func (st NopStrategyTarget) Logs(_ context.Context) (io.ReadCloser, error) { - return st.ReaderCloser, nil -} - -func (st NopStrategyTarget) Exec(_ context.Context, _ []string, _ ...exec.ProcessOption) (int, io.Reader, error) { - return 0, nil, nil -} - -func (st NopStrategyTarget) State(_ context.Context) (*types.ContainerState, error) { - return &st.ContainerState, nil -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/sql.go b/vendor/github.com/testcontainers/testcontainers-go/wait/sql.go deleted file mode 100644 index 9e0c05607..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/sql.go +++ /dev/null @@ -1,116 +0,0 @@ -package wait - -import ( - "context" - "database/sql" - "fmt" - "time" - - "github.com/docker/go-connections/nat" -) - -var _ Strategy = (*waitForSql)(nil) -var _ StrategyTimeout = (*waitForSql)(nil) - -const defaultForSqlQuery = "SELECT 1" - -// ForSQL constructs a new waitForSql strategy for the given driver -func ForSQL(port nat.Port, driver string, url func(host string, port nat.Port) string) *waitForSql { - return &waitForSql{ - Port: port, - URL: url, - Driver: driver, - startupTimeout: defaultStartupTimeout(), - PollInterval: defaultPollInterval(), - query: defaultForSqlQuery, - } -} - -type waitForSql struct { - timeout *time.Duration - - URL func(host string, port nat.Port) string - Driver string - Port nat.Port - startupTimeout time.Duration - PollInterval time.Duration - query string -} - -// WithStartupTimeout can be used to change the default startup timeout -func (w *waitForSql) WithStartupTimeout(timeout time.Duration) *waitForSql { - w.timeout = &timeout - return w -} - -// WithPollInterval can be used to override the default polling interval of 100 milliseconds -func (w *waitForSql) WithPollInterval(pollInterval time.Duration) *waitForSql { - w.PollInterval = pollInterval - return w -} - -// WithQuery can be used to override the default query used in the strategy. -func (w *waitForSql) WithQuery(query string) *waitForSql { - w.query = query - return w -} - -func (w *waitForSql) Timeout() *time.Duration { - return w.timeout -} - -// WaitUntilReady repeatedly tries to run "SELECT 1" or user defined query on the given port using sql and driver. -// -// If it doesn't succeed until the timeout value which defaults to 60 seconds, it will return an error. -func (w *waitForSql) WaitUntilReady(ctx context.Context, target StrategyTarget) (err error) { - timeout := defaultStartupTimeout() - if w.timeout != nil { - timeout = *w.timeout - } - - ctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - host, err := target.Host(ctx) - if err != nil { - return - } - - ticker := time.NewTicker(w.PollInterval) - defer ticker.Stop() - - var port nat.Port - port, err = target.MappedPort(ctx, w.Port) - - for port == "" { - select { - case <-ctx.Done(): - return fmt.Errorf("%s:%w", ctx.Err(), err) - case <-ticker.C: - if err := checkTarget(ctx, target); err != nil { - return err - } - port, err = target.MappedPort(ctx, w.Port) - } - } - - db, err := sql.Open(w.Driver, w.URL(host, port)) - if err != nil { - return fmt.Errorf("sql.Open: %v", err) - } - defer db.Close() - for { - select { - case <-ctx.Done(): - return ctx.Err() - case <-ticker.C: - if err := checkTarget(ctx, target); err != nil { - return err - } - if _, err := db.ExecContext(ctx, w.query); err != nil { - continue - } - return nil - } - } -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/wait.go b/vendor/github.com/testcontainers/testcontainers-go/wait/wait.go deleted file mode 100644 index 559ce3979..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/wait.go +++ /dev/null @@ -1,62 +0,0 @@ -package wait - -import ( - "context" - "errors" - "fmt" - "io" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/go-connections/nat" - "github.com/testcontainers/testcontainers-go/exec" -) - -// Strategy defines the basic interface for a Wait Strategy -type Strategy interface { - WaitUntilReady(context.Context, StrategyTarget) error -} - -// StrategyTimeout allows MultiStrategy to configure a Strategy's Timeout -type StrategyTimeout interface { - Timeout() *time.Duration -} - -type StrategyTarget interface { - Host(context.Context) (string, error) - Ports(ctx context.Context) (nat.PortMap, error) - MappedPort(context.Context, nat.Port) (nat.Port, error) - Logs(context.Context) (io.ReadCloser, error) - Exec(context.Context, []string, ...exec.ProcessOption) (int, io.Reader, error) - State(context.Context) (*types.ContainerState, error) -} - -func checkTarget(ctx context.Context, target StrategyTarget) error { - state, err := target.State(ctx) - if err != nil { - return err - } - - return checkState(state) -} - -func checkState(state *types.ContainerState) error { - switch { - case state.Running: - return nil - case state.OOMKilled: - return errors.New("container crashed with out-of-memory (OOMKilled)") - case state.Status == "exited": - return fmt.Errorf("container exited with code %d", state.ExitCode) - default: - return fmt.Errorf("unexpected container status %q", state.Status) - } -} - -func defaultStartupTimeout() time.Duration { - return 60 * time.Second -} - -func defaultPollInterval() time.Duration { - return 100 * time.Millisecond -} diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/vendor/golang.org/x/exp/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS deleted file mode 100644 index 733099041..000000000 --- a/vendor/golang.org/x/exp/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go deleted file mode 100644 index 2c033dff4..000000000 --- a/vendor/golang.org/x/exp/constraints/constraints.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package constraints defines a set of useful constraints to be used -// with type parameters. -package constraints - -// Signed is a constraint that permits any signed integer type. -// If future releases of Go add new predeclared signed integer types, -// this constraint will be modified to include them. -type Signed interface { - ~int | ~int8 | ~int16 | ~int32 | ~int64 -} - -// Unsigned is a constraint that permits any unsigned integer type. -// If future releases of Go add new predeclared unsigned integer types, -// this constraint will be modified to include them. -type Unsigned interface { - ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr -} - -// Integer is a constraint that permits any integer type. -// If future releases of Go add new predeclared integer types, -// this constraint will be modified to include them. -type Integer interface { - Signed | Unsigned -} - -// Float is a constraint that permits any floating-point type. -// If future releases of Go add new predeclared floating-point types, -// this constraint will be modified to include them. -type Float interface { - ~float32 | ~float64 -} - -// Complex is a constraint that permits any complex numeric type. -// If future releases of Go add new predeclared complex numeric types, -// this constraint will be modified to include them. -type Complex interface { - ~complex64 | ~complex128 -} - -// Ordered is a constraint that permits any ordered type: any type -// that supports the operators < <= >= >. -// If future releases of Go add new ordered types, -// this constraint will be modified to include them. -type Ordered interface { - Integer | Float | ~string -} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go deleted file mode 100644 index 2540bd682..000000000 --- a/vendor/golang.org/x/exp/slices/slices.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package slices defines various functions useful with slices of any type. -// Unless otherwise specified, these functions all apply to the elements -// of a slice at index 0 <= i < len(s). -// -// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a -// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings), -// or the sorting may fail to sort correctly. A common case is when sorting slices of -// floating-point numbers containing NaN values. -package slices - -import "golang.org/x/exp/constraints" - -// Equal reports whether two slices are equal: the same length and all -// elements equal. If the lengths are different, Equal returns false. -// Otherwise, the elements are compared in increasing index order, and the -// comparison stops at the first unequal pair. -// Floating point NaNs are not considered equal. -func Equal[E comparable](s1, s2 []E) bool { - if len(s1) != len(s2) { - return false - } - for i := range s1 { - if s1[i] != s2[i] { - return false - } - } - return true -} - -// EqualFunc reports whether two slices are equal using a comparison -// function on each pair of elements. If the lengths are different, -// EqualFunc returns false. Otherwise, the elements are compared in -// increasing index order, and the comparison stops at the first index -// for which eq returns false. -func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { - if len(s1) != len(s2) { - return false - } - for i, v1 := range s1 { - v2 := s2[i] - if !eq(v1, v2) { - return false - } - } - return true -} - -// Compare compares the elements of s1 and s2. -// The elements are compared sequentially, starting at index 0, -// until one element is not equal to the other. -// The result of comparing the first non-matching elements is returned. -// If both slices are equal until one of them ends, the shorter slice is -// considered less than the longer one. -// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. -// Comparisons involving floating point NaNs are ignored. -func Compare[E constraints.Ordered](s1, s2 []E) int { - s2len := len(s2) - for i, v1 := range s1 { - if i >= s2len { - return +1 - } - v2 := s2[i] - switch { - case v1 < v2: - return -1 - case v1 > v2: - return +1 - } - } - if len(s1) < s2len { - return -1 - } - return 0 -} - -// CompareFunc is like Compare but uses a comparison function -// on each pair of elements. The elements are compared in increasing -// index order, and the comparisons stop after the first time cmp -// returns non-zero. -// The result is the first non-zero result of cmp; if cmp always -// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), -// and +1 if len(s1) > len(s2). -func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { - s2len := len(s2) - for i, v1 := range s1 { - if i >= s2len { - return +1 - } - v2 := s2[i] - if c := cmp(v1, v2); c != 0 { - return c - } - } - if len(s1) < s2len { - return -1 - } - return 0 -} - -// Index returns the index of the first occurrence of v in s, -// or -1 if not present. -func Index[E comparable](s []E, v E) int { - for i := range s { - if v == s[i] { - return i - } - } - return -1 -} - -// IndexFunc returns the first index i satisfying f(s[i]), -// or -1 if none do. -func IndexFunc[E any](s []E, f func(E) bool) int { - for i := range s { - if f(s[i]) { - return i - } - } - return -1 -} - -// Contains reports whether v is present in s. -func Contains[E comparable](s []E, v E) bool { - return Index(s, v) >= 0 -} - -// ContainsFunc reports whether at least one -// element e of s satisfies f(e). -func ContainsFunc[E any](s []E, f func(E) bool) bool { - return IndexFunc(s, f) >= 0 -} - -// Insert inserts the values v... into s at index i, -// returning the modified slice. -// In the returned slice r, r[i] == v[0]. -// Insert panics if i is out of range. -// This function is O(len(s) + len(v)). -func Insert[S ~[]E, E any](s S, i int, v ...E) S { - tot := len(s) + len(v) - if tot <= cap(s) { - s2 := s[:tot] - copy(s2[i+len(v):], s[i:]) - copy(s2[i:], v) - return s2 - } - s2 := make(S, tot) - copy(s2, s[:i]) - copy(s2[i:], v) - copy(s2[i+len(v):], s[i:]) - return s2 -} - -// Delete removes the elements s[i:j] from s, returning the modified slice. -// Delete panics if s[i:j] is not a valid slice of s. -// Delete modifies the contents of the slice s; it does not create a new slice. -// Delete is O(len(s)-j), so if many items must be deleted, it is better to -// make a single call deleting them all together than to delete one at a time. -// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those -// elements contain pointers you might consider zeroing those elements so that -// objects they reference can be garbage collected. -func Delete[S ~[]E, E any](s S, i, j int) S { - _ = s[i:j] // bounds check - - return append(s[:i], s[j:]...) -} - -// Replace replaces the elements s[i:j] by the given v, and returns the -// modified slice. Replace panics if s[i:j] is not a valid slice of s. -func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { - _ = s[i:j] // verify that i:j is a valid subslice - tot := len(s[:i]) + len(v) + len(s[j:]) - if tot <= cap(s) { - s2 := s[:tot] - copy(s2[i+len(v):], s[j:]) - copy(s2[i:], v) - return s2 - } - s2 := make(S, tot) - copy(s2, s[:i]) - copy(s2[i:], v) - copy(s2[i+len(v):], s[j:]) - return s2 -} - -// Clone returns a copy of the slice. -// The elements are copied using assignment, so this is a shallow clone. -func Clone[S ~[]E, E any](s S) S { - // Preserve nil in case it matters. - if s == nil { - return nil - } - return append(S([]E{}), s...) -} - -// Compact replaces consecutive runs of equal elements with a single copy. -// This is like the uniq command found on Unix. -// Compact modifies the contents of the slice s; it does not create a new slice. -// When Compact discards m elements in total, it might not modify the elements -// s[len(s)-m:len(s)]. If those elements contain pointers you might consider -// zeroing those elements so that objects they reference can be garbage collected. -func Compact[S ~[]E, E comparable](s S) S { - if len(s) < 2 { - return s - } - i := 1 - for k := 1; k < len(s); k++ { - if s[k] != s[k-1] { - if i != k { - s[i] = s[k] - } - i++ - } - } - return s[:i] -} - -// CompactFunc is like Compact but uses a comparison function. -func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { - if len(s) < 2 { - return s - } - i := 1 - for k := 1; k < len(s); k++ { - if !eq(s[k], s[k-1]) { - if i != k { - s[i] = s[k] - } - i++ - } - } - return s[:i] -} - -// Grow increases the slice's capacity, if necessary, to guarantee space for -// another n elements. After Grow(n), at least n elements can be appended -// to the slice without another allocation. If n is negative or too large to -// allocate the memory, Grow panics. -func Grow[S ~[]E, E any](s S, n int) S { - if n < 0 { - panic("cannot be negative") - } - if n -= cap(s) - len(s); n > 0 { - // TODO(https://go.dev/issue/53888): Make using []E instead of S - // to workaround a compiler bug where the runtime.growslice optimization - // does not take effect. Revert when the compiler is fixed. - s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] - } - return s -} - -// Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. -func Clip[S ~[]E, E any](s S) S { - return s[:len(s):len(s)] -} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go deleted file mode 100644 index 231b6448a..000000000 --- a/vendor/golang.org/x/exp/slices/sort.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slices - -import ( - "math/bits" - - "golang.org/x/exp/constraints" -) - -// Sort sorts a slice of any ordered type in ascending order. -// Sort may fail to sort correctly when sorting slices of floating-point -// numbers containing Not-a-number (NaN) values. -// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))}) -// instead if the input may contain NaNs. -func Sort[E constraints.Ordered](x []E) { - n := len(x) - pdqsortOrdered(x, 0, n, bits.Len(uint(n))) -} - -// SortFunc sorts the slice x in ascending order as determined by the less function. -// This sort is not guaranteed to be stable. -// -// SortFunc requires that less is a strict weak ordering. -// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. -func SortFunc[E any](x []E, less func(a, b E) bool) { - n := len(x) - pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less) -} - -// SortStableFunc sorts the slice x while keeping the original order of equal -// elements, using less to compare elements. -func SortStableFunc[E any](x []E, less func(a, b E) bool) { - stableLessFunc(x, len(x), less) -} - -// IsSorted reports whether x is sorted in ascending order. -func IsSorted[E constraints.Ordered](x []E) bool { - for i := len(x) - 1; i > 0; i-- { - if x[i] < x[i-1] { - return false - } - } - return true -} - -// IsSortedFunc reports whether x is sorted in ascending order, with less as the -// comparison function. -func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool { - for i := len(x) - 1; i > 0; i-- { - if less(x[i], x[i-1]) { - return false - } - } - return true -} - -// BinarySearch searches for target in a sorted slice and returns the position -// where target is found, or the position where target would appear in the -// sort order; it also returns a bool saying whether the target is really found -// in the slice. The slice must be sorted in increasing order. -func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { - // Inlining is faster than calling BinarySearchFunc with a lambda. - n := len(x) - // Define x[-1] < target and x[n] >= target. - // Invariant: x[i-1] < target, x[j] >= target. - i, j := 0, n - for i < j { - h := int(uint(i+j) >> 1) // avoid overflow when computing h - // i ≤ h < j - if x[h] < target { - i = h + 1 // preserves x[i-1] < target - } else { - j = h // preserves x[j] >= target - } - } - // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. - return i, i < n && x[i] == target -} - -// BinarySearchFunc works like BinarySearch, but uses a custom comparison -// function. The slice must be sorted in increasing order, where "increasing" -// is defined by cmp. cmp should return 0 if the slice element matches -// the target, a negative number if the slice element precedes the target, -// or a positive number if the slice element follows the target. -// cmp must implement the same ordering as the slice, such that if -// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. -func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) { - n := len(x) - // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . - // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. - i, j := 0, n - for i < j { - h := int(uint(i+j) >> 1) // avoid overflow when computing h - // i ≤ h < j - if cmp(x[h], target) < 0 { - i = h + 1 // preserves cmp(x[i - 1], target) < 0 - } else { - j = h // preserves cmp(x[j], target) >= 0 - } - } - // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i. - return i, i < n && cmp(x[i], target) == 0 -} - -type sortedHint int // hint for pdqsort when choosing the pivot - -const ( - unknownHint sortedHint = iota - increasingHint - decreasingHint -) - -// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf -type xorshift uint64 - -func (r *xorshift) Next() uint64 { - *r ^= *r << 13 - *r ^= *r >> 17 - *r ^= *r << 5 - return uint64(*r) -} - -func nextPowerOfTwo(length int) uint { - return 1 << bits.Len(uint(length)) -} diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortfunc.go deleted file mode 100644 index 2a632476c..000000000 --- a/vendor/golang.org/x/exp/slices/zsortfunc.go +++ /dev/null @@ -1,479 +0,0 @@ -// Code generated by gen_sort_variants.go; DO NOT EDIT. - -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slices - -// insertionSortLessFunc sorts data[a:b] using insertion sort. -func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { - for i := a + 1; i < b; i++ { - for j := i; j > a && less(data[j], data[j-1]); j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// siftDownLessFunc implements the heap property on data[lo:hi]. -// first is an offset into the array where the root of the heap lies. -func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && less(data[first+child], data[first+child+1]) { - child++ - } - if !less(data[first+root], data[first+child]) { - return - } - data[first+root], data[first+child] = data[first+child], data[first+root] - root = child - } -} - -func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { - first := a - lo := 0 - hi := b - a - - // Build heap with greatest element at top. - for i := (hi - 1) / 2; i >= 0; i-- { - siftDownLessFunc(data, i, hi, first, less) - } - - // Pop elements, largest first, into end of data. - for i := hi - 1; i >= 0; i-- { - data[first], data[first+i] = data[first+i], data[first] - siftDownLessFunc(data, lo, i, first, less) - } -} - -// pdqsortLessFunc sorts data[a:b]. -// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. -// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf -// C++ implementation: https://github.com/orlp/pdqsort -// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ -// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. -func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { - const maxInsertion = 12 - - var ( - wasBalanced = true // whether the last partitioning was reasonably balanced - wasPartitioned = true // whether the slice was already partitioned - ) - - for { - length := b - a - - if length <= maxInsertion { - insertionSortLessFunc(data, a, b, less) - return - } - - // Fall back to heapsort if too many bad choices were made. - if limit == 0 { - heapSortLessFunc(data, a, b, less) - return - } - - // If the last partitioning was imbalanced, we need to breaking patterns. - if !wasBalanced { - breakPatternsLessFunc(data, a, b, less) - limit-- - } - - pivot, hint := choosePivotLessFunc(data, a, b, less) - if hint == decreasingHint { - reverseRangeLessFunc(data, a, b, less) - // The chosen pivot was pivot-a elements after the start of the array. - // After reversing it is pivot-a elements before the end of the array. - // The idea came from Rust's implementation. - pivot = (b - 1) - (pivot - a) - hint = increasingHint - } - - // The slice is likely already sorted. - if wasBalanced && wasPartitioned && hint == increasingHint { - if partialInsertionSortLessFunc(data, a, b, less) { - return - } - } - - // Probably the slice contains many duplicate elements, partition the slice into - // elements equal to and elements greater than the pivot. - if a > 0 && !less(data[a-1], data[pivot]) { - mid := partitionEqualLessFunc(data, a, b, pivot, less) - a = mid - continue - } - - mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less) - wasPartitioned = alreadyPartitioned - - leftLen, rightLen := mid-a, b-mid - balanceThreshold := length / 8 - if leftLen < rightLen { - wasBalanced = leftLen >= balanceThreshold - pdqsortLessFunc(data, a, mid, limit, less) - a = mid + 1 - } else { - wasBalanced = rightLen >= balanceThreshold - pdqsortLessFunc(data, mid+1, b, limit, less) - b = mid - } - } -} - -// partitionLessFunc does one quicksort partition. -// Let p = data[pivot] -// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. -// On return, data[newpivot] = p -func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for i <= j && less(data[i], data[a]) { - i++ - } - for i <= j && !less(data[j], data[a]) { - j-- - } - if i > j { - data[j], data[a] = data[a], data[j] - return j, true - } - data[i], data[j] = data[j], data[i] - i++ - j-- - - for { - for i <= j && less(data[i], data[a]) { - i++ - } - for i <= j && !less(data[j], data[a]) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - data[j], data[a] = data[a], data[j] - return j, false -} - -// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. -// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. -func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for { - for i <= j && !less(data[a], data[i]) { - i++ - } - for i <= j && less(data[a], data[j]) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - return i -} - -// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end. -func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool { - const ( - maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted - shortestShifting = 50 // don't shift any elements on short arrays - ) - i := a + 1 - for j := 0; j < maxSteps; j++ { - for i < b && !less(data[i], data[i-1]) { - i++ - } - - if i == b { - return true - } - - if b-a < shortestShifting { - return false - } - - data[i], data[i-1] = data[i-1], data[i] - - // Shift the smaller one to the left. - if i-a >= 2 { - for j := i - 1; j >= 1; j-- { - if !less(data[j], data[j-1]) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - // Shift the greater one to the right. - if b-i >= 2 { - for j := i + 1; j < b; j++ { - if !less(data[j], data[j-1]) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - } - return false -} - -// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns -// that might cause imbalanced partitions in quicksort. -func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { - length := b - a - if length >= 8 { - random := xorshift(length) - modulus := nextPowerOfTwo(length) - - for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { - other := int(uint(random.Next()) & (modulus - 1)) - if other >= length { - other -= length - } - data[idx], data[a+other] = data[a+other], data[idx] - } - } -} - -// choosePivotLessFunc chooses a pivot in data[a:b]. -// -// [0,8): chooses a static pivot. -// [8,shortestNinther): uses the simple median-of-three method. -// [shortestNinther,∞): uses the Tukey ninther method. -func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) { - const ( - shortestNinther = 50 - maxSwaps = 4 * 3 - ) - - l := b - a - - var ( - swaps int - i = a + l/4*1 - j = a + l/4*2 - k = a + l/4*3 - ) - - if l >= 8 { - if l >= shortestNinther { - // Tukey ninther method, the idea came from Rust's implementation. - i = medianAdjacentLessFunc(data, i, &swaps, less) - j = medianAdjacentLessFunc(data, j, &swaps, less) - k = medianAdjacentLessFunc(data, k, &swaps, less) - } - // Find the median among i, j, k and stores it into j. - j = medianLessFunc(data, i, j, k, &swaps, less) - } - - switch swaps { - case 0: - return j, increasingHint - case maxSwaps: - return j, decreasingHint - default: - return j, unknownHint - } -} - -// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. -func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) { - if less(data[b], data[a]) { - *swaps++ - return b, a - } - return a, b -} - -// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. -func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int { - a, b = order2LessFunc(data, a, b, swaps, less) - b, c = order2LessFunc(data, b, c, swaps, less) - a, b = order2LessFunc(data, a, b, swaps, less) - return b -} - -// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. -func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int { - return medianLessFunc(data, a-1, a, a+1, swaps, less) -} - -func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { - i := a - j := b - 1 - for i < j { - data[i], data[j] = data[j], data[i] - i++ - j-- - } -} - -func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) { - for i := 0; i < n; i++ { - data[a+i], data[b+i] = data[b+i], data[a+i] - } -} - -func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { - blockSize := 20 // must be > 0 - a, b := 0, blockSize - for b <= n { - insertionSortLessFunc(data, a, b, less) - a = b - b += blockSize - } - insertionSortLessFunc(data, a, n, less) - - for blockSize < n { - a, b = 0, 2*blockSize - for b <= n { - symMergeLessFunc(data, a, a+blockSize, b, less) - a = b - b += 2 * blockSize - } - if m := a + blockSize; m < n { - symMergeLessFunc(data, a, m, n, less) - } - blockSize *= 2 - } -} - -// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using -// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum -// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz -// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in -// Computer Science, pages 714-723. Springer, 2004. -// -// Let M = m-a and N = b-n. Wolog M < N. -// The recursion depth is bound by ceil(log(N+M)). -// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. -// The algorithm needs O((M+N)*log(M)) calls to data.Swap. -// -// The paper gives O((M+N)*log(M)) as the number of assignments assuming a -// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation -// in the paper carries through for Swap operations, especially as the block -// swapping rotate uses only O(M+N) Swaps. -// -// symMerge assumes non-degenerate arguments: a < m && m < b. -// Having the caller check this condition eliminates many leaf recursion calls, -// which improves performance. -func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[a] into data[m:b] - // if data[a:m] only contains one element. - if m-a == 1 { - // Use binary search to find the lowest index i - // such that data[i] >= data[a] for m <= i < b. - // Exit the search loop with i == b in case no such index exists. - i := m - j := b - for i < j { - h := int(uint(i+j) >> 1) - if less(data[h], data[a]) { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[a] reaches the position before i. - for k := a; k < i-1; k++ { - data[k], data[k+1] = data[k+1], data[k] - } - return - } - - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[m] into data[a:m] - // if data[m:b] only contains one element. - if b-m == 1 { - // Use binary search to find the lowest index i - // such that data[i] > data[m] for a <= i < m. - // Exit the search loop with i == m in case no such index exists. - i := a - j := m - for i < j { - h := int(uint(i+j) >> 1) - if !less(data[m], data[h]) { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[m] reaches the position i. - for k := m; k > i; k-- { - data[k], data[k-1] = data[k-1], data[k] - } - return - } - - mid := int(uint(a+b) >> 1) - n := mid + m - var start, r int - if m > mid { - start = n - b - r = mid - } else { - start = a - r = m - } - p := n - 1 - - for start < r { - c := int(uint(start+r) >> 1) - if !less(data[p-c], data[c]) { - start = c + 1 - } else { - r = c - } - } - - end := n - start - if start < m && m < end { - rotateLessFunc(data, start, m, end, less) - } - if a < start && start < mid { - symMergeLessFunc(data, a, start, mid, less) - } - if mid < end && end < b { - symMergeLessFunc(data, mid, end, b, less) - } -} - -// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: -// Data of the form 'x u v y' is changed to 'x v u y'. -// rotate performs at most b-a many calls to data.Swap, -// and it assumes non-degenerate arguments: a < m && m < b. -func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { - i := m - a - j := b - m - - for i != j { - if i > j { - swapRangeLessFunc(data, m-i, m, j, less) - i -= j - } else { - swapRangeLessFunc(data, m-i, m+j-i, i, less) - j -= i - } - } - // i == j - swapRangeLessFunc(data, m-i, m, i, less) -} diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go deleted file mode 100644 index efaa1c8b7..000000000 --- a/vendor/golang.org/x/exp/slices/zsortordered.go +++ /dev/null @@ -1,481 +0,0 @@ -// Code generated by gen_sort_variants.go; DO NOT EDIT. - -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slices - -import "golang.org/x/exp/constraints" - -// insertionSortOrdered sorts data[a:b] using insertion sort. -func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { - for i := a + 1; i < b; i++ { - for j := i; j > a && (data[j] < data[j-1]); j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// siftDownOrdered implements the heap property on data[lo:hi]. -// first is an offset into the array where the root of the heap lies. -func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && (data[first+child] < data[first+child+1]) { - child++ - } - if !(data[first+root] < data[first+child]) { - return - } - data[first+root], data[first+child] = data[first+child], data[first+root] - root = child - } -} - -func heapSortOrdered[E constraints.Ordered](data []E, a, b int) { - first := a - lo := 0 - hi := b - a - - // Build heap with greatest element at top. - for i := (hi - 1) / 2; i >= 0; i-- { - siftDownOrdered(data, i, hi, first) - } - - // Pop elements, largest first, into end of data. - for i := hi - 1; i >= 0; i-- { - data[first], data[first+i] = data[first+i], data[first] - siftDownOrdered(data, lo, i, first) - } -} - -// pdqsortOrdered sorts data[a:b]. -// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. -// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf -// C++ implementation: https://github.com/orlp/pdqsort -// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ -// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. -func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { - const maxInsertion = 12 - - var ( - wasBalanced = true // whether the last partitioning was reasonably balanced - wasPartitioned = true // whether the slice was already partitioned - ) - - for { - length := b - a - - if length <= maxInsertion { - insertionSortOrdered(data, a, b) - return - } - - // Fall back to heapsort if too many bad choices were made. - if limit == 0 { - heapSortOrdered(data, a, b) - return - } - - // If the last partitioning was imbalanced, we need to breaking patterns. - if !wasBalanced { - breakPatternsOrdered(data, a, b) - limit-- - } - - pivot, hint := choosePivotOrdered(data, a, b) - if hint == decreasingHint { - reverseRangeOrdered(data, a, b) - // The chosen pivot was pivot-a elements after the start of the array. - // After reversing it is pivot-a elements before the end of the array. - // The idea came from Rust's implementation. - pivot = (b - 1) - (pivot - a) - hint = increasingHint - } - - // The slice is likely already sorted. - if wasBalanced && wasPartitioned && hint == increasingHint { - if partialInsertionSortOrdered(data, a, b) { - return - } - } - - // Probably the slice contains many duplicate elements, partition the slice into - // elements equal to and elements greater than the pivot. - if a > 0 && !(data[a-1] < data[pivot]) { - mid := partitionEqualOrdered(data, a, b, pivot) - a = mid - continue - } - - mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot) - wasPartitioned = alreadyPartitioned - - leftLen, rightLen := mid-a, b-mid - balanceThreshold := length / 8 - if leftLen < rightLen { - wasBalanced = leftLen >= balanceThreshold - pdqsortOrdered(data, a, mid, limit) - a = mid + 1 - } else { - wasBalanced = rightLen >= balanceThreshold - pdqsortOrdered(data, mid+1, b, limit) - b = mid - } - } -} - -// partitionOrdered does one quicksort partition. -// Let p = data[pivot] -// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. -// On return, data[newpivot] = p -func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for i <= j && (data[i] < data[a]) { - i++ - } - for i <= j && !(data[j] < data[a]) { - j-- - } - if i > j { - data[j], data[a] = data[a], data[j] - return j, true - } - data[i], data[j] = data[j], data[i] - i++ - j-- - - for { - for i <= j && (data[i] < data[a]) { - i++ - } - for i <= j && !(data[j] < data[a]) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - data[j], data[a] = data[a], data[j] - return j, false -} - -// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. -// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. -func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for { - for i <= j && !(data[a] < data[i]) { - i++ - } - for i <= j && (data[a] < data[j]) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - return i -} - -// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end. -func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool { - const ( - maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted - shortestShifting = 50 // don't shift any elements on short arrays - ) - i := a + 1 - for j := 0; j < maxSteps; j++ { - for i < b && !(data[i] < data[i-1]) { - i++ - } - - if i == b { - return true - } - - if b-a < shortestShifting { - return false - } - - data[i], data[i-1] = data[i-1], data[i] - - // Shift the smaller one to the left. - if i-a >= 2 { - for j := i - 1; j >= 1; j-- { - if !(data[j] < data[j-1]) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - // Shift the greater one to the right. - if b-i >= 2 { - for j := i + 1; j < b; j++ { - if !(data[j] < data[j-1]) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - } - return false -} - -// breakPatternsOrdered scatters some elements around in an attempt to break some patterns -// that might cause imbalanced partitions in quicksort. -func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) { - length := b - a - if length >= 8 { - random := xorshift(length) - modulus := nextPowerOfTwo(length) - - for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { - other := int(uint(random.Next()) & (modulus - 1)) - if other >= length { - other -= length - } - data[idx], data[a+other] = data[a+other], data[idx] - } - } -} - -// choosePivotOrdered chooses a pivot in data[a:b]. -// -// [0,8): chooses a static pivot. -// [8,shortestNinther): uses the simple median-of-three method. -// [shortestNinther,∞): uses the Tukey ninther method. -func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) { - const ( - shortestNinther = 50 - maxSwaps = 4 * 3 - ) - - l := b - a - - var ( - swaps int - i = a + l/4*1 - j = a + l/4*2 - k = a + l/4*3 - ) - - if l >= 8 { - if l >= shortestNinther { - // Tukey ninther method, the idea came from Rust's implementation. - i = medianAdjacentOrdered(data, i, &swaps) - j = medianAdjacentOrdered(data, j, &swaps) - k = medianAdjacentOrdered(data, k, &swaps) - } - // Find the median among i, j, k and stores it into j. - j = medianOrdered(data, i, j, k, &swaps) - } - - switch swaps { - case 0: - return j, increasingHint - case maxSwaps: - return j, decreasingHint - default: - return j, unknownHint - } -} - -// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. -func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { - if data[b] < data[a] { - *swaps++ - return b, a - } - return a, b -} - -// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. -func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int { - a, b = order2Ordered(data, a, b, swaps) - b, c = order2Ordered(data, b, c, swaps) - a, b = order2Ordered(data, a, b, swaps) - return b -} - -// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. -func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int { - return medianOrdered(data, a-1, a, a+1, swaps) -} - -func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) { - i := a - j := b - 1 - for i < j { - data[i], data[j] = data[j], data[i] - i++ - j-- - } -} - -func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) { - for i := 0; i < n; i++ { - data[a+i], data[b+i] = data[b+i], data[a+i] - } -} - -func stableOrdered[E constraints.Ordered](data []E, n int) { - blockSize := 20 // must be > 0 - a, b := 0, blockSize - for b <= n { - insertionSortOrdered(data, a, b) - a = b - b += blockSize - } - insertionSortOrdered(data, a, n) - - for blockSize < n { - a, b = 0, 2*blockSize - for b <= n { - symMergeOrdered(data, a, a+blockSize, b) - a = b - b += 2 * blockSize - } - if m := a + blockSize; m < n { - symMergeOrdered(data, a, m, n) - } - blockSize *= 2 - } -} - -// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using -// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum -// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz -// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in -// Computer Science, pages 714-723. Springer, 2004. -// -// Let M = m-a and N = b-n. Wolog M < N. -// The recursion depth is bound by ceil(log(N+M)). -// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. -// The algorithm needs O((M+N)*log(M)) calls to data.Swap. -// -// The paper gives O((M+N)*log(M)) as the number of assignments assuming a -// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation -// in the paper carries through for Swap operations, especially as the block -// swapping rotate uses only O(M+N) Swaps. -// -// symMerge assumes non-degenerate arguments: a < m && m < b. -// Having the caller check this condition eliminates many leaf recursion calls, -// which improves performance. -func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[a] into data[m:b] - // if data[a:m] only contains one element. - if m-a == 1 { - // Use binary search to find the lowest index i - // such that data[i] >= data[a] for m <= i < b. - // Exit the search loop with i == b in case no such index exists. - i := m - j := b - for i < j { - h := int(uint(i+j) >> 1) - if data[h] < data[a] { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[a] reaches the position before i. - for k := a; k < i-1; k++ { - data[k], data[k+1] = data[k+1], data[k] - } - return - } - - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[m] into data[a:m] - // if data[m:b] only contains one element. - if b-m == 1 { - // Use binary search to find the lowest index i - // such that data[i] > data[m] for a <= i < m. - // Exit the search loop with i == m in case no such index exists. - i := a - j := m - for i < j { - h := int(uint(i+j) >> 1) - if !(data[m] < data[h]) { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[m] reaches the position i. - for k := m; k > i; k-- { - data[k], data[k-1] = data[k-1], data[k] - } - return - } - - mid := int(uint(a+b) >> 1) - n := mid + m - var start, r int - if m > mid { - start = n - b - r = mid - } else { - start = a - r = m - } - p := n - 1 - - for start < r { - c := int(uint(start+r) >> 1) - if !(data[p-c] < data[c]) { - start = c + 1 - } else { - r = c - } - } - - end := n - start - if start < m && m < end { - rotateOrdered(data, start, m, end) - } - if a < start && start < mid { - symMergeOrdered(data, a, start, mid) - } - if mid < end && end < b { - symMergeOrdered(data, mid, end, b) - } -} - -// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: -// Data of the form 'x u v y' is changed to 'x v u y'. -// rotate performs at most b-a many calls to data.Swap, -// and it assumes non-degenerate arguments: a < m && m < b. -func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) { - i := m - a - j := b - m - - for i != j { - if i > j { - swapRangeOrdered(data, m-i, m, j) - i -= j - } else { - swapRangeOrdered(data, m-i, m+j-i, i) - j -= i - } - } - // i == j - swapRangeOrdered(data, m-i, m, i) -} diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/vendor/golang.org/x/net/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS deleted file mode 100644 index 733099041..000000000 --- a/vendor/golang.org/x/net/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/internal/socks/client.go b/vendor/golang.org/x/net/internal/socks/client.go deleted file mode 100644 index 3d6f516a5..000000000 --- a/vendor/golang.org/x/net/internal/socks/client.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package socks - -import ( - "context" - "errors" - "io" - "net" - "strconv" - "time" -) - -var ( - noDeadline = time.Time{} - aLongTimeAgo = time.Unix(1, 0) -) - -func (d *Dialer) connect(ctx context.Context, c net.Conn, address string) (_ net.Addr, ctxErr error) { - host, port, err := splitHostPort(address) - if err != nil { - return nil, err - } - if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() { - c.SetDeadline(deadline) - defer c.SetDeadline(noDeadline) - } - if ctx != context.Background() { - errCh := make(chan error, 1) - done := make(chan struct{}) - defer func() { - close(done) - if ctxErr == nil { - ctxErr = <-errCh - } - }() - go func() { - select { - case <-ctx.Done(): - c.SetDeadline(aLongTimeAgo) - errCh <- ctx.Err() - case <-done: - errCh <- nil - } - }() - } - - b := make([]byte, 0, 6+len(host)) // the size here is just an estimate - b = append(b, Version5) - if len(d.AuthMethods) == 0 || d.Authenticate == nil { - b = append(b, 1, byte(AuthMethodNotRequired)) - } else { - ams := d.AuthMethods - if len(ams) > 255 { - return nil, errors.New("too many authentication methods") - } - b = append(b, byte(len(ams))) - for _, am := range ams { - b = append(b, byte(am)) - } - } - if _, ctxErr = c.Write(b); ctxErr != nil { - return - } - - if _, ctxErr = io.ReadFull(c, b[:2]); ctxErr != nil { - return - } - if b[0] != Version5 { - return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) - } - am := AuthMethod(b[1]) - if am == AuthMethodNoAcceptableMethods { - return nil, errors.New("no acceptable authentication methods") - } - if d.Authenticate != nil { - if ctxErr = d.Authenticate(ctx, c, am); ctxErr != nil { - return - } - } - - b = b[:0] - b = append(b, Version5, byte(d.cmd), 0) - if ip := net.ParseIP(host); ip != nil { - if ip4 := ip.To4(); ip4 != nil { - b = append(b, AddrTypeIPv4) - b = append(b, ip4...) - } else if ip6 := ip.To16(); ip6 != nil { - b = append(b, AddrTypeIPv6) - b = append(b, ip6...) - } else { - return nil, errors.New("unknown address type") - } - } else { - if len(host) > 255 { - return nil, errors.New("FQDN too long") - } - b = append(b, AddrTypeFQDN) - b = append(b, byte(len(host))) - b = append(b, host...) - } - b = append(b, byte(port>>8), byte(port)) - if _, ctxErr = c.Write(b); ctxErr != nil { - return - } - - if _, ctxErr = io.ReadFull(c, b[:4]); ctxErr != nil { - return - } - if b[0] != Version5 { - return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) - } - if cmdErr := Reply(b[1]); cmdErr != StatusSucceeded { - return nil, errors.New("unknown error " + cmdErr.String()) - } - if b[2] != 0 { - return nil, errors.New("non-zero reserved field") - } - l := 2 - var a Addr - switch b[3] { - case AddrTypeIPv4: - l += net.IPv4len - a.IP = make(net.IP, net.IPv4len) - case AddrTypeIPv6: - l += net.IPv6len - a.IP = make(net.IP, net.IPv6len) - case AddrTypeFQDN: - if _, err := io.ReadFull(c, b[:1]); err != nil { - return nil, err - } - l += int(b[0]) - default: - return nil, errors.New("unknown address type " + strconv.Itoa(int(b[3]))) - } - if cap(b) < l { - b = make([]byte, l) - } else { - b = b[:l] - } - if _, ctxErr = io.ReadFull(c, b); ctxErr != nil { - return - } - if a.IP != nil { - copy(a.IP, b) - } else { - a.Name = string(b[:len(b)-2]) - } - a.Port = int(b[len(b)-2])<<8 | int(b[len(b)-1]) - return &a, nil -} - -func splitHostPort(address string) (string, int, error) { - host, port, err := net.SplitHostPort(address) - if err != nil { - return "", 0, err - } - portnum, err := strconv.Atoi(port) - if err != nil { - return "", 0, err - } - if 1 > portnum || portnum > 0xffff { - return "", 0, errors.New("port number out of range " + port) - } - return host, portnum, nil -} diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go deleted file mode 100644 index 97db2340e..000000000 --- a/vendor/golang.org/x/net/internal/socks/socks.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package socks provides a SOCKS version 5 client implementation. -// -// SOCKS protocol version 5 is defined in RFC 1928. -// Username/Password authentication for SOCKS version 5 is defined in -// RFC 1929. -package socks - -import ( - "context" - "errors" - "io" - "net" - "strconv" -) - -// A Command represents a SOCKS command. -type Command int - -func (cmd Command) String() string { - switch cmd { - case CmdConnect: - return "socks connect" - case cmdBind: - return "socks bind" - default: - return "socks " + strconv.Itoa(int(cmd)) - } -} - -// An AuthMethod represents a SOCKS authentication method. -type AuthMethod int - -// A Reply represents a SOCKS command reply code. -type Reply int - -func (code Reply) String() string { - switch code { - case StatusSucceeded: - return "succeeded" - case 0x01: - return "general SOCKS server failure" - case 0x02: - return "connection not allowed by ruleset" - case 0x03: - return "network unreachable" - case 0x04: - return "host unreachable" - case 0x05: - return "connection refused" - case 0x06: - return "TTL expired" - case 0x07: - return "command not supported" - case 0x08: - return "address type not supported" - default: - return "unknown code: " + strconv.Itoa(int(code)) - } -} - -// Wire protocol constants. -const ( - Version5 = 0x05 - - AddrTypeIPv4 = 0x01 - AddrTypeFQDN = 0x03 - AddrTypeIPv6 = 0x04 - - CmdConnect Command = 0x01 // establishes an active-open forward proxy connection - cmdBind Command = 0x02 // establishes a passive-open forward proxy connection - - AuthMethodNotRequired AuthMethod = 0x00 // no authentication required - AuthMethodUsernamePassword AuthMethod = 0x02 // use username/password - AuthMethodNoAcceptableMethods AuthMethod = 0xff // no acceptable authentication methods - - StatusSucceeded Reply = 0x00 -) - -// An Addr represents a SOCKS-specific address. -// Either Name or IP is used exclusively. -type Addr struct { - Name string // fully-qualified domain name - IP net.IP - Port int -} - -func (a *Addr) Network() string { return "socks" } - -func (a *Addr) String() string { - if a == nil { - return "" - } - port := strconv.Itoa(a.Port) - if a.IP == nil { - return net.JoinHostPort(a.Name, port) - } - return net.JoinHostPort(a.IP.String(), port) -} - -// A Conn represents a forward proxy connection. -type Conn struct { - net.Conn - - boundAddr net.Addr -} - -// BoundAddr returns the address assigned by the proxy server for -// connecting to the command target address from the proxy server. -func (c *Conn) BoundAddr() net.Addr { - if c == nil { - return nil - } - return c.boundAddr -} - -// A Dialer holds SOCKS-specific options. -type Dialer struct { - cmd Command // either CmdConnect or cmdBind - proxyNetwork string // network between a proxy server and a client - proxyAddress string // proxy server address - - // ProxyDial specifies the optional dial function for - // establishing the transport connection. - ProxyDial func(context.Context, string, string) (net.Conn, error) - - // AuthMethods specifies the list of request authentication - // methods. - // If empty, SOCKS client requests only AuthMethodNotRequired. - AuthMethods []AuthMethod - - // Authenticate specifies the optional authentication - // function. It must be non-nil when AuthMethods is not empty. - // It must return an error when the authentication is failed. - Authenticate func(context.Context, io.ReadWriter, AuthMethod) error -} - -// DialContext connects to the provided address on the provided -// network. -// -// The returned error value may be a net.OpError. When the Op field of -// net.OpError contains "socks", the Source field contains a proxy -// server address and the Addr field contains a command target -// address. -// -// See func Dial of the net package of standard library for a -// description of the network and address parameters. -func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { - if err := d.validateTarget(network, address); err != nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - if ctx == nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} - } - var err error - var c net.Conn - if d.ProxyDial != nil { - c, err = d.ProxyDial(ctx, d.proxyNetwork, d.proxyAddress) - } else { - var dd net.Dialer - c, err = dd.DialContext(ctx, d.proxyNetwork, d.proxyAddress) - } - if err != nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - a, err := d.connect(ctx, c, address) - if err != nil { - c.Close() - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - return &Conn{Conn: c, boundAddr: a}, nil -} - -// DialWithConn initiates a connection from SOCKS server to the target -// network and address using the connection c that is already -// connected to the SOCKS server. -// -// It returns the connection's local address assigned by the SOCKS -// server. -func (d *Dialer) DialWithConn(ctx context.Context, c net.Conn, network, address string) (net.Addr, error) { - if err := d.validateTarget(network, address); err != nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - if ctx == nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} - } - a, err := d.connect(ctx, c, address) - if err != nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - return a, nil -} - -// Dial connects to the provided address on the provided network. -// -// Unlike DialContext, it returns a raw transport connection instead -// of a forward proxy connection. -// -// Deprecated: Use DialContext or DialWithConn instead. -func (d *Dialer) Dial(network, address string) (net.Conn, error) { - if err := d.validateTarget(network, address); err != nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - var err error - var c net.Conn - if d.ProxyDial != nil { - c, err = d.ProxyDial(context.Background(), d.proxyNetwork, d.proxyAddress) - } else { - c, err = net.Dial(d.proxyNetwork, d.proxyAddress) - } - if err != nil { - proxy, dst, _ := d.pathAddrs(address) - return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} - } - if _, err := d.DialWithConn(context.Background(), c, network, address); err != nil { - c.Close() - return nil, err - } - return c, nil -} - -func (d *Dialer) validateTarget(network, address string) error { - switch network { - case "tcp", "tcp6", "tcp4": - default: - return errors.New("network not implemented") - } - switch d.cmd { - case CmdConnect, cmdBind: - default: - return errors.New("command not implemented") - } - return nil -} - -func (d *Dialer) pathAddrs(address string) (proxy, dst net.Addr, err error) { - for i, s := range []string{d.proxyAddress, address} { - host, port, err := splitHostPort(s) - if err != nil { - return nil, nil, err - } - a := &Addr{Port: port} - a.IP = net.ParseIP(host) - if a.IP == nil { - a.Name = host - } - if i == 0 { - proxy = a - } else { - dst = a - } - } - return -} - -// NewDialer returns a new Dialer that dials through the provided -// proxy server's network and address. -func NewDialer(network, address string) *Dialer { - return &Dialer{proxyNetwork: network, proxyAddress: address, cmd: CmdConnect} -} - -const ( - authUsernamePasswordVersion = 0x01 - authStatusSucceeded = 0x00 -) - -// UsernamePassword are the credentials for the username/password -// authentication method. -type UsernamePassword struct { - Username string - Password string -} - -// Authenticate authenticates a pair of username and password with the -// proxy server. -func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, auth AuthMethod) error { - switch auth { - case AuthMethodNotRequired: - return nil - case AuthMethodUsernamePassword: - if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) == 0 || len(up.Password) > 255 { - return errors.New("invalid username/password") - } - b := []byte{authUsernamePasswordVersion} - b = append(b, byte(len(up.Username))) - b = append(b, up.Username...) - b = append(b, byte(len(up.Password))) - b = append(b, up.Password...) - // TODO(mikio): handle IO deadlines and cancelation if - // necessary - if _, err := rw.Write(b); err != nil { - return err - } - if _, err := io.ReadFull(rw, b[:2]); err != nil { - return err - } - if b[0] != authUsernamePasswordVersion { - return errors.New("invalid username/password version") - } - if b[1] != authStatusSucceeded { - return errors.New("username/password authentication failed") - } - return nil - } - return errors.New("unsupported authentication method " + strconv.Itoa(int(auth))) -} diff --git a/vendor/golang.org/x/net/proxy/dial.go b/vendor/golang.org/x/net/proxy/dial.go deleted file mode 100644 index 811c2e4e9..000000000 --- a/vendor/golang.org/x/net/proxy/dial.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proxy - -import ( - "context" - "net" -) - -// A ContextDialer dials using a context. -type ContextDialer interface { - DialContext(ctx context.Context, network, address string) (net.Conn, error) -} - -// Dial works like DialContext on net.Dialer but using a dialer returned by FromEnvironment. -// -// The passed ctx is only used for returning the Conn, not the lifetime of the Conn. -// -// Custom dialers (registered via RegisterDialerType) that do not implement ContextDialer -// can leak a goroutine for as long as it takes the underlying Dialer implementation to timeout. -// -// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. -func Dial(ctx context.Context, network, address string) (net.Conn, error) { - d := FromEnvironment() - if xd, ok := d.(ContextDialer); ok { - return xd.DialContext(ctx, network, address) - } - return dialContext(ctx, d, network, address) -} - -// WARNING: this can leak a goroutine for as long as the underlying Dialer implementation takes to timeout -// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. -func dialContext(ctx context.Context, d Dialer, network, address string) (net.Conn, error) { - var ( - conn net.Conn - done = make(chan struct{}, 1) - err error - ) - go func() { - conn, err = d.Dial(network, address) - close(done) - if conn != nil && ctx.Err() != nil { - conn.Close() - } - }() - select { - case <-ctx.Done(): - err = ctx.Err() - case <-done: - } - return conn, err -} diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go deleted file mode 100644 index 3d66bdef9..000000000 --- a/vendor/golang.org/x/net/proxy/direct.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proxy - -import ( - "context" - "net" -) - -type direct struct{} - -// Direct implements Dialer by making network connections directly using net.Dial or net.DialContext. -var Direct = direct{} - -var ( - _ Dialer = Direct - _ ContextDialer = Direct -) - -// Dial directly invokes net.Dial with the supplied parameters. -func (direct) Dial(network, addr string) (net.Conn, error) { - return net.Dial(network, addr) -} - -// DialContext instantiates a net.Dialer and invokes its DialContext receiver with the supplied parameters. -func (direct) DialContext(ctx context.Context, network, addr string) (net.Conn, error) { - var d net.Dialer - return d.DialContext(ctx, network, addr) -} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go deleted file mode 100644 index 573fe79e8..000000000 --- a/vendor/golang.org/x/net/proxy/per_host.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proxy - -import ( - "context" - "net" - "strings" -) - -// A PerHost directs connections to a default Dialer unless the host name -// requested matches one of a number of exceptions. -type PerHost struct { - def, bypass Dialer - - bypassNetworks []*net.IPNet - bypassIPs []net.IP - bypassZones []string - bypassHosts []string -} - -// NewPerHost returns a PerHost Dialer that directs connections to either -// defaultDialer or bypass, depending on whether the connection matches one of -// the configured rules. -func NewPerHost(defaultDialer, bypass Dialer) *PerHost { - return &PerHost{ - def: defaultDialer, - bypass: bypass, - } -} - -// Dial connects to the address addr on the given network through either -// defaultDialer or bypass. -func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - - return p.dialerForRequest(host).Dial(network, addr) -} - -// DialContext connects to the address addr on the given network through either -// defaultDialer or bypass. -func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net.Conn, err error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - d := p.dialerForRequest(host) - if x, ok := d.(ContextDialer); ok { - return x.DialContext(ctx, network, addr) - } - return dialContext(ctx, d, network, addr) -} - -func (p *PerHost) dialerForRequest(host string) Dialer { - if ip := net.ParseIP(host); ip != nil { - for _, net := range p.bypassNetworks { - if net.Contains(ip) { - return p.bypass - } - } - for _, bypassIP := range p.bypassIPs { - if bypassIP.Equal(ip) { - return p.bypass - } - } - return p.def - } - - for _, zone := range p.bypassZones { - if strings.HasSuffix(host, zone) { - return p.bypass - } - if host == zone[1:] { - // For a zone ".example.com", we match "example.com" - // too. - return p.bypass - } - } - for _, bypassHost := range p.bypassHosts { - if bypassHost == host { - return p.bypass - } - } - return p.def -} - -// AddFromString parses a string that contains comma-separated values -// specifying hosts that should use the bypass proxy. Each value is either an -// IP address, a CIDR range, a zone (*.example.com) or a host name -// (localhost). A best effort is made to parse the string and errors are -// ignored. -func (p *PerHost) AddFromString(s string) { - hosts := strings.Split(s, ",") - for _, host := range hosts { - host = strings.TrimSpace(host) - if len(host) == 0 { - continue - } - if strings.Contains(host, "/") { - // We assume that it's a CIDR address like 127.0.0.0/8 - if _, net, err := net.ParseCIDR(host); err == nil { - p.AddNetwork(net) - } - continue - } - if ip := net.ParseIP(host); ip != nil { - p.AddIP(ip) - continue - } - if strings.HasPrefix(host, "*.") { - p.AddZone(host[1:]) - continue - } - p.AddHost(host) - } -} - -// AddIP specifies an IP address that will use the bypass proxy. Note that -// this will only take effect if a literal IP address is dialed. A connection -// to a named host will never match an IP. -func (p *PerHost) AddIP(ip net.IP) { - p.bypassIPs = append(p.bypassIPs, ip) -} - -// AddNetwork specifies an IP range that will use the bypass proxy. Note that -// this will only take effect if a literal IP address is dialed. A connection -// to a named host will never match. -func (p *PerHost) AddNetwork(net *net.IPNet) { - p.bypassNetworks = append(p.bypassNetworks, net) -} - -// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of -// "example.com" matches "example.com" and all of its subdomains. -func (p *PerHost) AddZone(zone string) { - if strings.HasSuffix(zone, ".") { - zone = zone[:len(zone)-1] - } - if !strings.HasPrefix(zone, ".") { - zone = "." + zone - } - p.bypassZones = append(p.bypassZones, zone) -} - -// AddHost specifies a host name that will use the bypass proxy. -func (p *PerHost) AddHost(host string) { - if strings.HasSuffix(host, ".") { - host = host[:len(host)-1] - } - p.bypassHosts = append(p.bypassHosts, host) -} diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go deleted file mode 100644 index 9ff4b9a77..000000000 --- a/vendor/golang.org/x/net/proxy/proxy.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package proxy provides support for a variety of protocols to proxy network -// data. -package proxy // import "golang.org/x/net/proxy" - -import ( - "errors" - "net" - "net/url" - "os" - "sync" -) - -// A Dialer is a means to establish a connection. -// Custom dialers should also implement ContextDialer. -type Dialer interface { - // Dial connects to the given address via the proxy. - Dial(network, addr string) (c net.Conn, err error) -} - -// Auth contains authentication parameters that specific Dialers may require. -type Auth struct { - User, Password string -} - -// FromEnvironment returns the dialer specified by the proxy-related -// variables in the environment and makes underlying connections -// directly. -func FromEnvironment() Dialer { - return FromEnvironmentUsing(Direct) -} - -// FromEnvironmentUsing returns the dialer specify by the proxy-related -// variables in the environment and makes underlying connections -// using the provided forwarding Dialer (for instance, a *net.Dialer -// with desired configuration). -func FromEnvironmentUsing(forward Dialer) Dialer { - allProxy := allProxyEnv.Get() - if len(allProxy) == 0 { - return forward - } - - proxyURL, err := url.Parse(allProxy) - if err != nil { - return forward - } - proxy, err := FromURL(proxyURL, forward) - if err != nil { - return forward - } - - noProxy := noProxyEnv.Get() - if len(noProxy) == 0 { - return proxy - } - - perHost := NewPerHost(proxy, forward) - perHost.AddFromString(noProxy) - return perHost -} - -// proxySchemes is a map from URL schemes to a function that creates a Dialer -// from a URL with such a scheme. -var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error) - -// RegisterDialerType takes a URL scheme and a function to generate Dialers from -// a URL with that scheme and a forwarding Dialer. Registered schemes are used -// by FromURL. -func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) { - if proxySchemes == nil { - proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error)) - } - proxySchemes[scheme] = f -} - -// FromURL returns a Dialer given a URL specification and an underlying -// Dialer for it to make network requests. -func FromURL(u *url.URL, forward Dialer) (Dialer, error) { - var auth *Auth - if u.User != nil { - auth = new(Auth) - auth.User = u.User.Username() - if p, ok := u.User.Password(); ok { - auth.Password = p - } - } - - switch u.Scheme { - case "socks5", "socks5h": - addr := u.Hostname() - port := u.Port() - if port == "" { - port = "1080" - } - return SOCKS5("tcp", net.JoinHostPort(addr, port), auth, forward) - } - - // If the scheme doesn't match any of the built-in schemes, see if it - // was registered by another package. - if proxySchemes != nil { - if f, ok := proxySchemes[u.Scheme]; ok { - return f(u, forward) - } - } - - return nil, errors.New("proxy: unknown scheme: " + u.Scheme) -} - -var ( - allProxyEnv = &envOnce{ - names: []string{"ALL_PROXY", "all_proxy"}, - } - noProxyEnv = &envOnce{ - names: []string{"NO_PROXY", "no_proxy"}, - } -) - -// envOnce looks up an environment variable (optionally by multiple -// names) once. It mitigates expensive lookups on some platforms -// (e.g. Windows). -// (Borrowed from net/http/transport.go) -type envOnce struct { - names []string - once sync.Once - val string -} - -func (e *envOnce) Get() string { - e.once.Do(e.init) - return e.val -} - -func (e *envOnce) init() { - for _, n := range e.names { - e.val = os.Getenv(n) - if e.val != "" { - return - } - } -} - -// reset is used by tests -func (e *envOnce) reset() { - e.once = sync.Once{} - e.val = "" -} diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go deleted file mode 100644 index c91651f96..000000000 --- a/vendor/golang.org/x/net/proxy/socks5.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proxy - -import ( - "context" - "net" - - "golang.org/x/net/internal/socks" -) - -// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given -// address with an optional username and password. -// See RFC 1928 and RFC 1929. -func SOCKS5(network, address string, auth *Auth, forward Dialer) (Dialer, error) { - d := socks.NewDialer(network, address) - if forward != nil { - if f, ok := forward.(ContextDialer); ok { - d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { - return f.DialContext(ctx, network, address) - } - } else { - d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { - return dialContext(ctx, forward, network, address) - } - } - } - if auth != nil { - up := socks.UsernamePassword{ - Username: auth.User, - Password: auth.Password, - } - d.AuthMethods = []socks.AuthMethod{ - socks.AuthMethodNotRequired, - socks.AuthMethodUsernamePassword, - } - d.Authenticate = up.Authenticate - } - return d, nil -} diff --git a/vendor/golang.org/x/sys/execabs/execabs.go b/vendor/golang.org/x/sys/execabs/execabs.go deleted file mode 100644 index 3bf40fdfe..000000000 --- a/vendor/golang.org/x/sys/execabs/execabs.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package execabs is a drop-in replacement for os/exec -// that requires PATH lookups to find absolute paths. -// That is, execabs.Command("cmd") runs the same PATH lookup -// as exec.Command("cmd"), but if the result is a path -// which is relative, the Run and Start methods will report -// an error instead of running the executable. -// -// See https://blog.golang.org/path-security for more information -// about when it may be necessary or appropriate to use this package. -package execabs - -import ( - "context" - "fmt" - "os/exec" - "path/filepath" - "reflect" - "unsafe" -) - -// ErrNotFound is the error resulting if a path search failed to find an executable file. -// It is an alias for exec.ErrNotFound. -var ErrNotFound = exec.ErrNotFound - -// Cmd represents an external command being prepared or run. -// It is an alias for exec.Cmd. -type Cmd = exec.Cmd - -// Error is returned by LookPath when it fails to classify a file as an executable. -// It is an alias for exec.Error. -type Error = exec.Error - -// An ExitError reports an unsuccessful exit by a command. -// It is an alias for exec.ExitError. -type ExitError = exec.ExitError - -func relError(file, path string) error { - return fmt.Errorf("%s resolves to executable in current directory (.%c%s)", file, filepath.Separator, path) -} - -// LookPath searches for an executable named file in the directories -// named by the PATH environment variable. If file contains a slash, -// it is tried directly and the PATH is not consulted. The result will be -// an absolute path. -// -// LookPath differs from exec.LookPath in its handling of PATH lookups, -// which are used for file names without slashes. If exec.LookPath's -// PATH lookup would have returned an executable from the current directory, -// LookPath instead returns an error. -func LookPath(file string) (string, error) { - path, err := exec.LookPath(file) - if err != nil && !isGo119ErrDot(err) { - return "", err - } - if filepath.Base(file) == file && !filepath.IsAbs(path) { - return "", relError(file, path) - } - return path, nil -} - -func fixCmd(name string, cmd *exec.Cmd) { - if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) && !isGo119ErrFieldSet(cmd) { - // exec.Command was called with a bare binary name and - // exec.LookPath returned a path which is not absolute. - // Set cmd.lookPathErr and clear cmd.Path so that it - // cannot be run. - lookPathErr := (*error)(unsafe.Pointer(reflect.ValueOf(cmd).Elem().FieldByName("lookPathErr").Addr().Pointer())) - if *lookPathErr == nil { - *lookPathErr = relError(name, cmd.Path) - } - cmd.Path = "" - } -} - -// CommandContext is like Command but includes a context. -// -// The provided context is used to kill the process (by calling os.Process.Kill) -// if the context becomes done before the command completes on its own. -func CommandContext(ctx context.Context, name string, arg ...string) *exec.Cmd { - cmd := exec.CommandContext(ctx, name, arg...) - fixCmd(name, cmd) - return cmd - -} - -// Command returns the Cmd struct to execute the named program with the given arguments. -// See exec.Command for most details. -// -// Command differs from exec.Command in its handling of PATH lookups, -// which are used when the program name contains no slashes. -// If exec.Command would have returned an exec.Cmd configured to run an -// executable from the current directory, Command instead -// returns an exec.Cmd that will return an error from Start or Run. -func Command(name string, arg ...string) *exec.Cmd { - cmd := exec.Command(name, arg...) - fixCmd(name, cmd) - return cmd -} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go118.go b/vendor/golang.org/x/sys/execabs/execabs_go118.go deleted file mode 100644 index 2000064a8..000000000 --- a/vendor/golang.org/x/sys/execabs/execabs_go118.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.19 -// +build !go1.19 - -package execabs - -import "os/exec" - -func isGo119ErrDot(err error) bool { - return false -} - -func isGo119ErrFieldSet(cmd *exec.Cmd) bool { - return false -} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go deleted file mode 100644 index f364b3418..000000000 --- a/vendor/golang.org/x/sys/execabs/execabs_go119.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.19 -// +build go1.19 - -package execabs - -import ( - "errors" - "os/exec" -) - -func isGo119ErrDot(err error) bool { - return errors.Is(err, exec.ErrDot) -} - -func isGo119ErrFieldSet(cmd *exec.Cmd) bool { - return cmd.Err != nil -} diff --git a/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go b/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go deleted file mode 100644 index e07899b90..000000000 --- a/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package unsafeheader contains header declarations for the Go runtime's -// slice and string implementations. -// -// This package allows x/sys to use types equivalent to -// reflect.SliceHeader and reflect.StringHeader without introducing -// a dependency on the (relatively heavy) "reflect" package. -package unsafeheader - -import ( - "unsafe" -) - -// Slice is the runtime representation of a slice. -// It cannot be used safely or portably and its representation may change in a later release. -type Slice struct { - Data unsafe.Pointer - Len int - Cap int -} - -// String is the runtime representation of a string. -// It cannot be used safely or portably and its representation may change in a later release. -type String struct { - Data unsafe.Pointer - Len int -} diff --git a/vendor/golang.org/x/sys/windows/aliases.go b/vendor/golang.org/x/sys/windows/aliases.go deleted file mode 100644 index a20ebea63..000000000 --- a/vendor/golang.org/x/sys/windows/aliases.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build windows && go1.9 -// +build windows,go1.9 - -package windows - -import "syscall" - -type Errno = syscall.Errno -type SysProcAttr = syscall.SysProcAttr diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go deleted file mode 100644 index 115341fba..000000000 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ /dev/null @@ -1,416 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -import ( - "sync" - "sync/atomic" - "syscall" - "unsafe" -) - -// We need to use LoadLibrary and GetProcAddress from the Go runtime, because -// the these symbols are loaded by the system linker and are required to -// dynamically load additional symbols. Note that in the Go runtime, these -// return syscall.Handle and syscall.Errno, but these are the same, in fact, -// as windows.Handle and windows.Errno, and we intend to keep these the same. - -//go:linkname syscall_loadlibrary syscall.loadlibrary -func syscall_loadlibrary(filename *uint16) (handle Handle, err Errno) - -//go:linkname syscall_getprocaddress syscall.getprocaddress -func syscall_getprocaddress(handle Handle, procname *uint8) (proc uintptr, err Errno) - -// DLLError describes reasons for DLL load failures. -type DLLError struct { - Err error - ObjName string - Msg string -} - -func (e *DLLError) Error() string { return e.Msg } - -func (e *DLLError) Unwrap() error { return e.Err } - -// A DLL implements access to a single DLL. -type DLL struct { - Name string - Handle Handle -} - -// LoadDLL loads DLL file into memory. -// -// Warning: using LoadDLL without an absolute path name is subject to -// DLL preloading attacks. To safely load a system DLL, use LazyDLL -// with System set to true, or use LoadLibraryEx directly. -func LoadDLL(name string) (dll *DLL, err error) { - namep, err := UTF16PtrFromString(name) - if err != nil { - return nil, err - } - h, e := syscall_loadlibrary(namep) - if e != 0 { - return nil, &DLLError{ - Err: e, - ObjName: name, - Msg: "Failed to load " + name + ": " + e.Error(), - } - } - d := &DLL{ - Name: name, - Handle: h, - } - return d, nil -} - -// MustLoadDLL is like LoadDLL but panics if load operation failes. -func MustLoadDLL(name string) *DLL { - d, e := LoadDLL(name) - if e != nil { - panic(e) - } - return d -} - -// FindProc searches DLL d for procedure named name and returns *Proc -// if found. It returns an error if search fails. -func (d *DLL) FindProc(name string) (proc *Proc, err error) { - namep, err := BytePtrFromString(name) - if err != nil { - return nil, err - } - a, e := syscall_getprocaddress(d.Handle, namep) - if e != 0 { - return nil, &DLLError{ - Err: e, - ObjName: name, - Msg: "Failed to find " + name + " procedure in " + d.Name + ": " + e.Error(), - } - } - p := &Proc{ - Dll: d, - Name: name, - addr: a, - } - return p, nil -} - -// MustFindProc is like FindProc but panics if search fails. -func (d *DLL) MustFindProc(name string) *Proc { - p, e := d.FindProc(name) - if e != nil { - panic(e) - } - return p -} - -// FindProcByOrdinal searches DLL d for procedure by ordinal and returns *Proc -// if found. It returns an error if search fails. -func (d *DLL) FindProcByOrdinal(ordinal uintptr) (proc *Proc, err error) { - a, e := GetProcAddressByOrdinal(d.Handle, ordinal) - name := "#" + itoa(int(ordinal)) - if e != nil { - return nil, &DLLError{ - Err: e, - ObjName: name, - Msg: "Failed to find " + name + " procedure in " + d.Name + ": " + e.Error(), - } - } - p := &Proc{ - Dll: d, - Name: name, - addr: a, - } - return p, nil -} - -// MustFindProcByOrdinal is like FindProcByOrdinal but panics if search fails. -func (d *DLL) MustFindProcByOrdinal(ordinal uintptr) *Proc { - p, e := d.FindProcByOrdinal(ordinal) - if e != nil { - panic(e) - } - return p -} - -// Release unloads DLL d from memory. -func (d *DLL) Release() (err error) { - return FreeLibrary(d.Handle) -} - -// A Proc implements access to a procedure inside a DLL. -type Proc struct { - Dll *DLL - Name string - addr uintptr -} - -// Addr returns the address of the procedure represented by p. -// The return value can be passed to Syscall to run the procedure. -func (p *Proc) Addr() uintptr { - return p.addr -} - -//go:uintptrescapes - -// Call executes procedure p with arguments a. It will panic, if more than 15 arguments -// are supplied. -// -// The returned error is always non-nil, constructed from the result of GetLastError. -// Callers must inspect the primary return value to decide whether an error occurred -// (according to the semantics of the specific function being called) before consulting -// the error. The error will be guaranteed to contain windows.Errno. -func (p *Proc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) { - switch len(a) { - case 0: - return syscall.Syscall(p.Addr(), uintptr(len(a)), 0, 0, 0) - case 1: - return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], 0, 0) - case 2: - return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], 0) - case 3: - return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], a[2]) - case 4: - return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], 0, 0) - case 5: - return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], 0) - case 6: - return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5]) - case 7: - return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], 0, 0) - case 8: - return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], 0) - case 9: - return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8]) - case 10: - return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], 0, 0) - case 11: - return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], 0) - case 12: - return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11]) - case 13: - return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], 0, 0) - case 14: - return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], 0) - case 15: - return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14]) - default: - panic("Call " + p.Name + " with too many arguments " + itoa(len(a)) + ".") - } -} - -// A LazyDLL implements access to a single DLL. -// It will delay the load of the DLL until the first -// call to its Handle method or to one of its -// LazyProc's Addr method. -type LazyDLL struct { - Name string - - // System determines whether the DLL must be loaded from the - // Windows System directory, bypassing the normal DLL search - // path. - System bool - - mu sync.Mutex - dll *DLL // non nil once DLL is loaded -} - -// Load loads DLL file d.Name into memory. It returns an error if fails. -// Load will not try to load DLL, if it is already loaded into memory. -func (d *LazyDLL) Load() error { - // Non-racy version of: - // if d.dll != nil { - if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll))) != nil { - return nil - } - d.mu.Lock() - defer d.mu.Unlock() - if d.dll != nil { - return nil - } - - // kernel32.dll is special, since it's where LoadLibraryEx comes from. - // The kernel already special-cases its name, so it's always - // loaded from system32. - var dll *DLL - var err error - if d.Name == "kernel32.dll" { - dll, err = LoadDLL(d.Name) - } else { - dll, err = loadLibraryEx(d.Name, d.System) - } - if err != nil { - return err - } - - // Non-racy version of: - // d.dll = dll - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll)), unsafe.Pointer(dll)) - return nil -} - -// mustLoad is like Load but panics if search fails. -func (d *LazyDLL) mustLoad() { - e := d.Load() - if e != nil { - panic(e) - } -} - -// Handle returns d's module handle. -func (d *LazyDLL) Handle() uintptr { - d.mustLoad() - return uintptr(d.dll.Handle) -} - -// NewProc returns a LazyProc for accessing the named procedure in the DLL d. -func (d *LazyDLL) NewProc(name string) *LazyProc { - return &LazyProc{l: d, Name: name} -} - -// NewLazyDLL creates new LazyDLL associated with DLL file. -func NewLazyDLL(name string) *LazyDLL { - return &LazyDLL{Name: name} -} - -// NewLazySystemDLL is like NewLazyDLL, but will only -// search Windows System directory for the DLL if name is -// a base name (like "advapi32.dll"). -func NewLazySystemDLL(name string) *LazyDLL { - return &LazyDLL{Name: name, System: true} -} - -// A LazyProc implements access to a procedure inside a LazyDLL. -// It delays the lookup until the Addr method is called. -type LazyProc struct { - Name string - - mu sync.Mutex - l *LazyDLL - proc *Proc -} - -// Find searches DLL for procedure named p.Name. It returns -// an error if search fails. Find will not search procedure, -// if it is already found and loaded into memory. -func (p *LazyProc) Find() error { - // Non-racy version of: - // if p.proc == nil { - if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&p.proc))) == nil { - p.mu.Lock() - defer p.mu.Unlock() - if p.proc == nil { - e := p.l.Load() - if e != nil { - return e - } - proc, e := p.l.dll.FindProc(p.Name) - if e != nil { - return e - } - // Non-racy version of: - // p.proc = proc - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&p.proc)), unsafe.Pointer(proc)) - } - } - return nil -} - -// mustFind is like Find but panics if search fails. -func (p *LazyProc) mustFind() { - e := p.Find() - if e != nil { - panic(e) - } -} - -// Addr returns the address of the procedure represented by p. -// The return value can be passed to Syscall to run the procedure. -// It will panic if the procedure cannot be found. -func (p *LazyProc) Addr() uintptr { - p.mustFind() - return p.proc.Addr() -} - -//go:uintptrescapes - -// Call executes procedure p with arguments a. It will panic, if more than 15 arguments -// are supplied. It will also panic if the procedure cannot be found. -// -// The returned error is always non-nil, constructed from the result of GetLastError. -// Callers must inspect the primary return value to decide whether an error occurred -// (according to the semantics of the specific function being called) before consulting -// the error. The error will be guaranteed to contain windows.Errno. -func (p *LazyProc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) { - p.mustFind() - return p.proc.Call(a...) -} - -var canDoSearchSystem32Once struct { - sync.Once - v bool -} - -func initCanDoSearchSystem32() { - // https://msdn.microsoft.com/en-us/library/ms684179(v=vs.85).aspx says: - // "Windows 7, Windows Server 2008 R2, Windows Vista, and Windows - // Server 2008: The LOAD_LIBRARY_SEARCH_* flags are available on - // systems that have KB2533623 installed. To determine whether the - // flags are available, use GetProcAddress to get the address of the - // AddDllDirectory, RemoveDllDirectory, or SetDefaultDllDirectories - // function. If GetProcAddress succeeds, the LOAD_LIBRARY_SEARCH_* - // flags can be used with LoadLibraryEx." - canDoSearchSystem32Once.v = (modkernel32.NewProc("AddDllDirectory").Find() == nil) -} - -func canDoSearchSystem32() bool { - canDoSearchSystem32Once.Do(initCanDoSearchSystem32) - return canDoSearchSystem32Once.v -} - -func isBaseName(name string) bool { - for _, c := range name { - if c == ':' || c == '/' || c == '\\' { - return false - } - } - return true -} - -// loadLibraryEx wraps the Windows LoadLibraryEx function. -// -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684179(v=vs.85).aspx -// -// If name is not an absolute path, LoadLibraryEx searches for the DLL -// in a variety of automatic locations unless constrained by flags. -// See: https://msdn.microsoft.com/en-us/library/ff919712%28VS.85%29.aspx -func loadLibraryEx(name string, system bool) (*DLL, error) { - loadDLL := name - var flags uintptr - if system { - if canDoSearchSystem32() { - flags = LOAD_LIBRARY_SEARCH_SYSTEM32 - } else if isBaseName(name) { - // WindowsXP or unpatched Windows machine - // trying to load "foo.dll" out of the system - // folder, but LoadLibraryEx doesn't support - // that yet on their system, so emulate it. - systemdir, err := GetSystemDirectory() - if err != nil { - return nil, err - } - loadDLL = systemdir + "\\" + name - } - } - h, err := LoadLibraryEx(loadDLL, 0, flags) - if err != nil { - return nil, err - } - return &DLL{Name: name, Handle: h}, nil -} - -type errString string - -func (s errString) Error() string { return string(s) } diff --git a/vendor/golang.org/x/sys/windows/empty.s b/vendor/golang.org/x/sys/windows/empty.s deleted file mode 100644 index fdbbbcd31..000000000 --- a/vendor/golang.org/x/sys/windows/empty.s +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.12 -// +build !go1.12 - -// This file is here to allow bodyless functions with go:linkname for Go 1.11 -// and earlier (see https://golang.org/issue/23311). diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go deleted file mode 100644 index b8ad19250..000000000 --- a/vendor/golang.org/x/sys/windows/env_windows.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Windows environment variables. - -package windows - -import ( - "syscall" - "unsafe" -) - -func Getenv(key string) (value string, found bool) { - return syscall.Getenv(key) -} - -func Setenv(key, value string) error { - return syscall.Setenv(key, value) -} - -func Clearenv() { - syscall.Clearenv() -} - -func Environ() []string { - return syscall.Environ() -} - -// Returns a default environment associated with the token, rather than the current -// process. If inheritExisting is true, then this environment also inherits the -// environment of the current process. -func (token Token) Environ(inheritExisting bool) (env []string, err error) { - var block *uint16 - err = CreateEnvironmentBlock(&block, token, inheritExisting) - if err != nil { - return nil, err - } - defer DestroyEnvironmentBlock(block) - blockp := unsafe.Pointer(block) - for { - entry := UTF16PtrToString((*uint16)(blockp)) - if len(entry) == 0 { - break - } - env = append(env, entry) - blockp = unsafe.Add(blockp, 2*(len(entry)+1)) - } - return env, nil -} - -func Unsetenv(key string) error { - return syscall.Unsetenv(key) -} diff --git a/vendor/golang.org/x/sys/windows/eventlog.go b/vendor/golang.org/x/sys/windows/eventlog.go deleted file mode 100644 index 2cd60645e..000000000 --- a/vendor/golang.org/x/sys/windows/eventlog.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build windows -// +build windows - -package windows - -const ( - EVENTLOG_SUCCESS = 0 - EVENTLOG_ERROR_TYPE = 1 - EVENTLOG_WARNING_TYPE = 2 - EVENTLOG_INFORMATION_TYPE = 4 - EVENTLOG_AUDIT_SUCCESS = 8 - EVENTLOG_AUDIT_FAILURE = 16 -) - -//sys RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) [failretval==0] = advapi32.RegisterEventSourceW -//sys DeregisterEventSource(handle Handle) (err error) = advapi32.DeregisterEventSource -//sys ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) = advapi32.ReportEventW diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go deleted file mode 100644 index a52e0331d..000000000 --- a/vendor/golang.org/x/sys/windows/exec_windows.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Fork, exec, wait, etc. - -package windows - -import ( - errorspkg "errors" - "unsafe" -) - -// EscapeArg rewrites command line argument s as prescribed -// in http://msdn.microsoft.com/en-us/library/ms880421. -// This function returns "" (2 double quotes) if s is empty. -// Alternatively, these transformations are done: -// - every back slash (\) is doubled, but only if immediately -// followed by double quote ("); -// - every double quote (") is escaped by back slash (\); -// - finally, s is wrapped with double quotes (arg -> "arg"), -// but only if there is space or tab inside s. -func EscapeArg(s string) string { - if len(s) == 0 { - return "\"\"" - } - n := len(s) - hasSpace := false - for i := 0; i < len(s); i++ { - switch s[i] { - case '"', '\\': - n++ - case ' ', '\t': - hasSpace = true - } - } - if hasSpace { - n += 2 - } - if n == len(s) { - return s - } - - qs := make([]byte, n) - j := 0 - if hasSpace { - qs[j] = '"' - j++ - } - slashes := 0 - for i := 0; i < len(s); i++ { - switch s[i] { - default: - slashes = 0 - qs[j] = s[i] - case '\\': - slashes++ - qs[j] = s[i] - case '"': - for ; slashes > 0; slashes-- { - qs[j] = '\\' - j++ - } - qs[j] = '\\' - j++ - qs[j] = s[i] - } - j++ - } - if hasSpace { - for ; slashes > 0; slashes-- { - qs[j] = '\\' - j++ - } - qs[j] = '"' - j++ - } - return string(qs[:j]) -} - -// ComposeCommandLine escapes and joins the given arguments suitable for use as a Windows command line, -// in CreateProcess's CommandLine argument, CreateService/ChangeServiceConfig's BinaryPathName argument, -// or any program that uses CommandLineToArgv. -func ComposeCommandLine(args []string) string { - var commandLine string - for i := range args { - if i > 0 { - commandLine += " " - } - commandLine += EscapeArg(args[i]) - } - return commandLine -} - -// DecomposeCommandLine breaks apart its argument command line into unescaped parts using CommandLineToArgv, -// as gathered from GetCommandLine, QUERY_SERVICE_CONFIG's BinaryPathName argument, or elsewhere that -// command lines are passed around. -// DecomposeCommandLine returns error if commandLine contains NUL. -func DecomposeCommandLine(commandLine string) ([]string, error) { - if len(commandLine) == 0 { - return []string{}, nil - } - utf16CommandLine, err := UTF16FromString(commandLine) - if err != nil { - return nil, errorspkg.New("string with NUL passed to DecomposeCommandLine") - } - var argc int32 - argv, err := CommandLineToArgv(&utf16CommandLine[0], &argc) - if err != nil { - return nil, err - } - defer LocalFree(Handle(unsafe.Pointer(argv))) - var args []string - for _, v := range (*argv)[:argc] { - args = append(args, UTF16ToString((*v)[:])) - } - return args, nil -} - -func CloseOnExec(fd Handle) { - SetHandleInformation(Handle(fd), HANDLE_FLAG_INHERIT, 0) -} - -// FullPath retrieves the full path of the specified file. -func FullPath(name string) (path string, err error) { - p, err := UTF16PtrFromString(name) - if err != nil { - return "", err - } - n := uint32(100) - for { - buf := make([]uint16, n) - n, err = GetFullPathName(p, uint32(len(buf)), &buf[0], nil) - if err != nil { - return "", err - } - if n <= uint32(len(buf)) { - return UTF16ToString(buf[:n]), nil - } - } -} - -// NewProcThreadAttributeList allocates a new ProcThreadAttributeListContainer, with the requested maximum number of attributes. -func NewProcThreadAttributeList(maxAttrCount uint32) (*ProcThreadAttributeListContainer, error) { - var size uintptr - err := initializeProcThreadAttributeList(nil, maxAttrCount, 0, &size) - if err != ERROR_INSUFFICIENT_BUFFER { - if err == nil { - return nil, errorspkg.New("unable to query buffer size from InitializeProcThreadAttributeList") - } - return nil, err - } - alloc, err := LocalAlloc(LMEM_FIXED, uint32(size)) - if err != nil { - return nil, err - } - // size is guaranteed to be ≥1 by InitializeProcThreadAttributeList. - al := &ProcThreadAttributeListContainer{data: (*ProcThreadAttributeList)(unsafe.Pointer(alloc))} - err = initializeProcThreadAttributeList(al.data, maxAttrCount, 0, &size) - if err != nil { - return nil, err - } - return al, err -} - -// Update modifies the ProcThreadAttributeList using UpdateProcThreadAttribute. -func (al *ProcThreadAttributeListContainer) Update(attribute uintptr, value unsafe.Pointer, size uintptr) error { - al.pointers = append(al.pointers, value) - return updateProcThreadAttribute(al.data, 0, attribute, value, size, nil, nil) -} - -// Delete frees ProcThreadAttributeList's resources. -func (al *ProcThreadAttributeListContainer) Delete() { - deleteProcThreadAttributeList(al.data) - LocalFree(Handle(unsafe.Pointer(al.data))) - al.data = nil - al.pointers = nil -} - -// List returns the actual ProcThreadAttributeList to be passed to StartupInfoEx. -func (al *ProcThreadAttributeListContainer) List() *ProcThreadAttributeList { - return al.data -} diff --git a/vendor/golang.org/x/sys/windows/memory_windows.go b/vendor/golang.org/x/sys/windows/memory_windows.go deleted file mode 100644 index 6dc0920a8..000000000 --- a/vendor/golang.org/x/sys/windows/memory_windows.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -const ( - MEM_COMMIT = 0x00001000 - MEM_RESERVE = 0x00002000 - MEM_DECOMMIT = 0x00004000 - MEM_RELEASE = 0x00008000 - MEM_RESET = 0x00080000 - MEM_TOP_DOWN = 0x00100000 - MEM_WRITE_WATCH = 0x00200000 - MEM_PHYSICAL = 0x00400000 - MEM_RESET_UNDO = 0x01000000 - MEM_LARGE_PAGES = 0x20000000 - - PAGE_NOACCESS = 0x00000001 - PAGE_READONLY = 0x00000002 - PAGE_READWRITE = 0x00000004 - PAGE_WRITECOPY = 0x00000008 - PAGE_EXECUTE = 0x00000010 - PAGE_EXECUTE_READ = 0x00000020 - PAGE_EXECUTE_READWRITE = 0x00000040 - PAGE_EXECUTE_WRITECOPY = 0x00000080 - PAGE_GUARD = 0x00000100 - PAGE_NOCACHE = 0x00000200 - PAGE_WRITECOMBINE = 0x00000400 - PAGE_TARGETS_INVALID = 0x40000000 - PAGE_TARGETS_NO_UPDATE = 0x40000000 - - QUOTA_LIMITS_HARDWS_MIN_DISABLE = 0x00000002 - QUOTA_LIMITS_HARDWS_MIN_ENABLE = 0x00000001 - QUOTA_LIMITS_HARDWS_MAX_DISABLE = 0x00000008 - QUOTA_LIMITS_HARDWS_MAX_ENABLE = 0x00000004 -) - -type MemoryBasicInformation struct { - BaseAddress uintptr - AllocationBase uintptr - AllocationProtect uint32 - PartitionId uint16 - RegionSize uintptr - State uint32 - Protect uint32 - Type uint32 -} diff --git a/vendor/golang.org/x/sys/windows/mkerrors.bash b/vendor/golang.org/x/sys/windows/mkerrors.bash deleted file mode 100644 index 58e0188fb..000000000 --- a/vendor/golang.org/x/sys/windows/mkerrors.bash +++ /dev/null @@ -1,70 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -set -e -shopt -s nullglob - -winerror="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/shared/winerror.h | sort -Vr | head -n 1)" -[[ -n $winerror ]] || { echo "Unable to find winerror.h" >&2; exit 1; } -ntstatus="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/shared/ntstatus.h | sort -Vr | head -n 1)" -[[ -n $ntstatus ]] || { echo "Unable to find ntstatus.h" >&2; exit 1; } - -declare -A errors - -{ - echo "// Code generated by 'mkerrors.bash'; DO NOT EDIT." - echo - echo "package windows" - echo "import \"syscall\"" - echo "const (" - - while read -r line; do - unset vtype - if [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +([A-Z0-9_]+\()?([A-Z][A-Z0-9_]+k?)\)? ]]; then - key="${BASH_REMATCH[1]}" - value="${BASH_REMATCH[3]}" - elif [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +([A-Z0-9_]+\()?((0x)?[0-9A-Fa-f]+)L?\)? ]]; then - key="${BASH_REMATCH[1]}" - value="${BASH_REMATCH[3]}" - vtype="${BASH_REMATCH[2]}" - elif [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +\(\(([A-Z]+)\)((0x)?[0-9A-Fa-f]+)L?\) ]]; then - key="${BASH_REMATCH[1]}" - value="${BASH_REMATCH[3]}" - vtype="${BASH_REMATCH[2]}" - else - continue - fi - [[ -n $key && -n $value ]] || continue - [[ -z ${errors["$key"]} ]] || continue - errors["$key"]="$value" - if [[ -v vtype ]]; then - if [[ $key == FACILITY_* || $key == NO_ERROR ]]; then - vtype="" - elif [[ $vtype == *HANDLE* || $vtype == *HRESULT* ]]; then - vtype="Handle" - else - vtype="syscall.Errno" - fi - last_vtype="$vtype" - else - vtype="" - if [[ $last_vtype == Handle && $value == NO_ERROR ]]; then - value="S_OK" - elif [[ $last_vtype == syscall.Errno && $value == NO_ERROR ]]; then - value="ERROR_SUCCESS" - fi - fi - - echo "$key $vtype = $value" - done < "$winerror" - - while read -r line; do - [[ $line =~ ^#define\ (STATUS_[^\s]+)\ +\(\(NTSTATUS\)((0x)?[0-9a-fA-F]+)L?\) ]] || continue - echo "${BASH_REMATCH[1]} NTStatus = ${BASH_REMATCH[2]}" - done < "$ntstatus" - - echo ")" -} | gofmt > "zerrors_windows.go" diff --git a/vendor/golang.org/x/sys/windows/mkknownfolderids.bash b/vendor/golang.org/x/sys/windows/mkknownfolderids.bash deleted file mode 100644 index ab8924e93..000000000 --- a/vendor/golang.org/x/sys/windows/mkknownfolderids.bash +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -set -e -shopt -s nullglob - -knownfolders="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/um/KnownFolders.h | sort -Vr | head -n 1)" -[[ -n $knownfolders ]] || { echo "Unable to find KnownFolders.h" >&2; exit 1; } - -{ - echo "// Code generated by 'mkknownfolderids.bash'; DO NOT EDIT." - echo - echo "package windows" - echo "type KNOWNFOLDERID GUID" - echo "var (" - while read -r line; do - [[ $line =~ DEFINE_KNOWN_FOLDER\((FOLDERID_[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+)\) ]] || continue - printf "%s = &KNOWNFOLDERID{0x%08x, 0x%04x, 0x%04x, [8]byte{0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x}}\n" \ - "${BASH_REMATCH[1]}" $(( "${BASH_REMATCH[2]}" )) $(( "${BASH_REMATCH[3]}" )) $(( "${BASH_REMATCH[4]}" )) \ - $(( "${BASH_REMATCH[5]}" )) $(( "${BASH_REMATCH[6]}" )) $(( "${BASH_REMATCH[7]}" )) $(( "${BASH_REMATCH[8]}" )) \ - $(( "${BASH_REMATCH[9]}" )) $(( "${BASH_REMATCH[10]}" )) $(( "${BASH_REMATCH[11]}" )) $(( "${BASH_REMATCH[12]}" )) - done < "$knownfolders" - echo ")" -} | gofmt > "zknownfolderids_windows.go" diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go deleted file mode 100644 index 8563f79c5..000000000 --- a/vendor/golang.org/x/sys/windows/mksyscall.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build generate -// +build generate - -package windows - -//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go setupapi_windows.go diff --git a/vendor/golang.org/x/sys/windows/race.go b/vendor/golang.org/x/sys/windows/race.go deleted file mode 100644 index 9196b089c..000000000 --- a/vendor/golang.org/x/sys/windows/race.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build windows && race -// +build windows,race - -package windows - -import ( - "runtime" - "unsafe" -) - -const raceenabled = true - -func raceAcquire(addr unsafe.Pointer) { - runtime.RaceAcquire(addr) -} - -func raceReleaseMerge(addr unsafe.Pointer) { - runtime.RaceReleaseMerge(addr) -} - -func raceReadRange(addr unsafe.Pointer, len int) { - runtime.RaceReadRange(addr, len) -} - -func raceWriteRange(addr unsafe.Pointer, len int) { - runtime.RaceWriteRange(addr, len) -} diff --git a/vendor/golang.org/x/sys/windows/race0.go b/vendor/golang.org/x/sys/windows/race0.go deleted file mode 100644 index 7bae4817a..000000000 --- a/vendor/golang.org/x/sys/windows/race0.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build windows && !race -// +build windows,!race - -package windows - -import ( - "unsafe" -) - -const raceenabled = false - -func raceAcquire(addr unsafe.Pointer) { -} - -func raceReleaseMerge(addr unsafe.Pointer) { -} - -func raceReadRange(addr unsafe.Pointer, len int) { -} - -func raceWriteRange(addr unsafe.Pointer, len int) { -} diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go deleted file mode 100644 index d414ef13b..000000000 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ /dev/null @@ -1,1444 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/internal/unsafeheader" -) - -const ( - NameUnknown = 0 - NameFullyQualifiedDN = 1 - NameSamCompatible = 2 - NameDisplay = 3 - NameUniqueId = 6 - NameCanonical = 7 - NameUserPrincipal = 8 - NameCanonicalEx = 9 - NameServicePrincipal = 10 - NameDnsDomain = 12 -) - -// This function returns 1 byte BOOLEAN rather than the 4 byte BOOL. -// http://blogs.msdn.com/b/drnick/archive/2007/12/19/windows-and-upn-format-credentials.aspx -//sys TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.TranslateNameW -//sys GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.GetUserNameExW - -// TranslateAccountName converts a directory service -// object name from one format to another. -func TranslateAccountName(username string, from, to uint32, initSize int) (string, error) { - u, e := UTF16PtrFromString(username) - if e != nil { - return "", e - } - n := uint32(50) - for { - b := make([]uint16, n) - e = TranslateName(u, from, to, &b[0], &n) - if e == nil { - return UTF16ToString(b[:n]), nil - } - if e != ERROR_INSUFFICIENT_BUFFER { - return "", e - } - if n <= uint32(len(b)) { - return "", e - } - } -} - -const ( - // do not reorder - NetSetupUnknownStatus = iota - NetSetupUnjoined - NetSetupWorkgroupName - NetSetupDomainName -) - -type UserInfo10 struct { - Name *uint16 - Comment *uint16 - UsrComment *uint16 - FullName *uint16 -} - -//sys NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) = netapi32.NetUserGetInfo -//sys NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) = netapi32.NetGetJoinInformation -//sys NetApiBufferFree(buf *byte) (neterr error) = netapi32.NetApiBufferFree - -const ( - // do not reorder - SidTypeUser = 1 + iota - SidTypeGroup - SidTypeDomain - SidTypeAlias - SidTypeWellKnownGroup - SidTypeDeletedAccount - SidTypeInvalid - SidTypeUnknown - SidTypeComputer - SidTypeLabel -) - -type SidIdentifierAuthority struct { - Value [6]byte -} - -var ( - SECURITY_NULL_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 0}} - SECURITY_WORLD_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 1}} - SECURITY_LOCAL_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 2}} - SECURITY_CREATOR_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 3}} - SECURITY_NON_UNIQUE_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 4}} - SECURITY_NT_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 5}} - SECURITY_MANDATORY_LABEL_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 16}} -) - -const ( - SECURITY_NULL_RID = 0 - SECURITY_WORLD_RID = 0 - SECURITY_LOCAL_RID = 0 - SECURITY_CREATOR_OWNER_RID = 0 - SECURITY_CREATOR_GROUP_RID = 1 - SECURITY_DIALUP_RID = 1 - SECURITY_NETWORK_RID = 2 - SECURITY_BATCH_RID = 3 - SECURITY_INTERACTIVE_RID = 4 - SECURITY_LOGON_IDS_RID = 5 - SECURITY_SERVICE_RID = 6 - SECURITY_LOCAL_SYSTEM_RID = 18 - SECURITY_BUILTIN_DOMAIN_RID = 32 - SECURITY_PRINCIPAL_SELF_RID = 10 - SECURITY_CREATOR_OWNER_SERVER_RID = 0x2 - SECURITY_CREATOR_GROUP_SERVER_RID = 0x3 - SECURITY_LOGON_IDS_RID_COUNT = 0x3 - SECURITY_ANONYMOUS_LOGON_RID = 0x7 - SECURITY_PROXY_RID = 0x8 - SECURITY_ENTERPRISE_CONTROLLERS_RID = 0x9 - SECURITY_SERVER_LOGON_RID = SECURITY_ENTERPRISE_CONTROLLERS_RID - SECURITY_AUTHENTICATED_USER_RID = 0xb - SECURITY_RESTRICTED_CODE_RID = 0xc - SECURITY_NT_NON_UNIQUE_RID = 0x15 -) - -// Predefined domain-relative RIDs for local groups. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa379649(v=vs.85).aspx -const ( - DOMAIN_ALIAS_RID_ADMINS = 0x220 - DOMAIN_ALIAS_RID_USERS = 0x221 - DOMAIN_ALIAS_RID_GUESTS = 0x222 - DOMAIN_ALIAS_RID_POWER_USERS = 0x223 - DOMAIN_ALIAS_RID_ACCOUNT_OPS = 0x224 - DOMAIN_ALIAS_RID_SYSTEM_OPS = 0x225 - DOMAIN_ALIAS_RID_PRINT_OPS = 0x226 - DOMAIN_ALIAS_RID_BACKUP_OPS = 0x227 - DOMAIN_ALIAS_RID_REPLICATOR = 0x228 - DOMAIN_ALIAS_RID_RAS_SERVERS = 0x229 - DOMAIN_ALIAS_RID_PREW2KCOMPACCESS = 0x22a - DOMAIN_ALIAS_RID_REMOTE_DESKTOP_USERS = 0x22b - DOMAIN_ALIAS_RID_NETWORK_CONFIGURATION_OPS = 0x22c - DOMAIN_ALIAS_RID_INCOMING_FOREST_TRUST_BUILDERS = 0x22d - DOMAIN_ALIAS_RID_MONITORING_USERS = 0x22e - DOMAIN_ALIAS_RID_LOGGING_USERS = 0x22f - DOMAIN_ALIAS_RID_AUTHORIZATIONACCESS = 0x230 - DOMAIN_ALIAS_RID_TS_LICENSE_SERVERS = 0x231 - DOMAIN_ALIAS_RID_DCOM_USERS = 0x232 - DOMAIN_ALIAS_RID_IUSERS = 0x238 - DOMAIN_ALIAS_RID_CRYPTO_OPERATORS = 0x239 - DOMAIN_ALIAS_RID_CACHEABLE_PRINCIPALS_GROUP = 0x23b - DOMAIN_ALIAS_RID_NON_CACHEABLE_PRINCIPALS_GROUP = 0x23c - DOMAIN_ALIAS_RID_EVENT_LOG_READERS_GROUP = 0x23d - DOMAIN_ALIAS_RID_CERTSVC_DCOM_ACCESS_GROUP = 0x23e -) - -//sys LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountSidW -//sys LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountNameW -//sys ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) = advapi32.ConvertSidToStringSidW -//sys ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) = advapi32.ConvertStringSidToSidW -//sys GetLengthSid(sid *SID) (len uint32) = advapi32.GetLengthSid -//sys CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) = advapi32.CopySid -//sys AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) = advapi32.AllocateAndInitializeSid -//sys createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) = advapi32.CreateWellKnownSid -//sys isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) = advapi32.IsWellKnownSid -//sys FreeSid(sid *SID) (err error) [failretval!=0] = advapi32.FreeSid -//sys EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) = advapi32.EqualSid -//sys getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) = advapi32.GetSidIdentifierAuthority -//sys getSidSubAuthorityCount(sid *SID) (count *uint8) = advapi32.GetSidSubAuthorityCount -//sys getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) = advapi32.GetSidSubAuthority -//sys isValidSid(sid *SID) (isValid bool) = advapi32.IsValidSid - -// The security identifier (SID) structure is a variable-length -// structure used to uniquely identify users or groups. -type SID struct{} - -// StringToSid converts a string-format security identifier -// SID into a valid, functional SID. -func StringToSid(s string) (*SID, error) { - var sid *SID - p, e := UTF16PtrFromString(s) - if e != nil { - return nil, e - } - e = ConvertStringSidToSid(p, &sid) - if e != nil { - return nil, e - } - defer LocalFree((Handle)(unsafe.Pointer(sid))) - return sid.Copy() -} - -// LookupSID retrieves a security identifier SID for the account -// and the name of the domain on which the account was found. -// System specify target computer to search. -func LookupSID(system, account string) (sid *SID, domain string, accType uint32, err error) { - if len(account) == 0 { - return nil, "", 0, syscall.EINVAL - } - acc, e := UTF16PtrFromString(account) - if e != nil { - return nil, "", 0, e - } - var sys *uint16 - if len(system) > 0 { - sys, e = UTF16PtrFromString(system) - if e != nil { - return nil, "", 0, e - } - } - n := uint32(50) - dn := uint32(50) - for { - b := make([]byte, n) - db := make([]uint16, dn) - sid = (*SID)(unsafe.Pointer(&b[0])) - e = LookupAccountName(sys, acc, sid, &n, &db[0], &dn, &accType) - if e == nil { - return sid, UTF16ToString(db), accType, nil - } - if e != ERROR_INSUFFICIENT_BUFFER { - return nil, "", 0, e - } - if n <= uint32(len(b)) { - return nil, "", 0, e - } - } -} - -// String converts SID to a string format suitable for display, storage, or transmission. -func (sid *SID) String() string { - var s *uint16 - e := ConvertSidToStringSid(sid, &s) - if e != nil { - return "" - } - defer LocalFree((Handle)(unsafe.Pointer(s))) - return UTF16ToString((*[256]uint16)(unsafe.Pointer(s))[:]) -} - -// Len returns the length, in bytes, of a valid security identifier SID. -func (sid *SID) Len() int { - return int(GetLengthSid(sid)) -} - -// Copy creates a duplicate of security identifier SID. -func (sid *SID) Copy() (*SID, error) { - b := make([]byte, sid.Len()) - sid2 := (*SID)(unsafe.Pointer(&b[0])) - e := CopySid(uint32(len(b)), sid2, sid) - if e != nil { - return nil, e - } - return sid2, nil -} - -// IdentifierAuthority returns the identifier authority of the SID. -func (sid *SID) IdentifierAuthority() SidIdentifierAuthority { - return *getSidIdentifierAuthority(sid) -} - -// SubAuthorityCount returns the number of sub-authorities in the SID. -func (sid *SID) SubAuthorityCount() uint8 { - return *getSidSubAuthorityCount(sid) -} - -// SubAuthority returns the sub-authority of the SID as specified by -// the index, which must be less than sid.SubAuthorityCount(). -func (sid *SID) SubAuthority(idx uint32) uint32 { - if idx >= uint32(sid.SubAuthorityCount()) { - panic("sub-authority index out of range") - } - return *getSidSubAuthority(sid, idx) -} - -// IsValid returns whether the SID has a valid revision and length. -func (sid *SID) IsValid() bool { - return isValidSid(sid) -} - -// Equals compares two SIDs for equality. -func (sid *SID) Equals(sid2 *SID) bool { - return EqualSid(sid, sid2) -} - -// IsWellKnown determines whether the SID matches the well-known sidType. -func (sid *SID) IsWellKnown(sidType WELL_KNOWN_SID_TYPE) bool { - return isWellKnownSid(sid, sidType) -} - -// LookupAccount retrieves the name of the account for this SID -// and the name of the first domain on which this SID is found. -// System specify target computer to search for. -func (sid *SID) LookupAccount(system string) (account, domain string, accType uint32, err error) { - var sys *uint16 - if len(system) > 0 { - sys, err = UTF16PtrFromString(system) - if err != nil { - return "", "", 0, err - } - } - n := uint32(50) - dn := uint32(50) - for { - b := make([]uint16, n) - db := make([]uint16, dn) - e := LookupAccountSid(sys, sid, &b[0], &n, &db[0], &dn, &accType) - if e == nil { - return UTF16ToString(b), UTF16ToString(db), accType, nil - } - if e != ERROR_INSUFFICIENT_BUFFER { - return "", "", 0, e - } - if n <= uint32(len(b)) { - return "", "", 0, e - } - } -} - -// Various types of pre-specified SIDs that can be synthesized and compared at runtime. -type WELL_KNOWN_SID_TYPE uint32 - -const ( - WinNullSid = 0 - WinWorldSid = 1 - WinLocalSid = 2 - WinCreatorOwnerSid = 3 - WinCreatorGroupSid = 4 - WinCreatorOwnerServerSid = 5 - WinCreatorGroupServerSid = 6 - WinNtAuthoritySid = 7 - WinDialupSid = 8 - WinNetworkSid = 9 - WinBatchSid = 10 - WinInteractiveSid = 11 - WinServiceSid = 12 - WinAnonymousSid = 13 - WinProxySid = 14 - WinEnterpriseControllersSid = 15 - WinSelfSid = 16 - WinAuthenticatedUserSid = 17 - WinRestrictedCodeSid = 18 - WinTerminalServerSid = 19 - WinRemoteLogonIdSid = 20 - WinLogonIdsSid = 21 - WinLocalSystemSid = 22 - WinLocalServiceSid = 23 - WinNetworkServiceSid = 24 - WinBuiltinDomainSid = 25 - WinBuiltinAdministratorsSid = 26 - WinBuiltinUsersSid = 27 - WinBuiltinGuestsSid = 28 - WinBuiltinPowerUsersSid = 29 - WinBuiltinAccountOperatorsSid = 30 - WinBuiltinSystemOperatorsSid = 31 - WinBuiltinPrintOperatorsSid = 32 - WinBuiltinBackupOperatorsSid = 33 - WinBuiltinReplicatorSid = 34 - WinBuiltinPreWindows2000CompatibleAccessSid = 35 - WinBuiltinRemoteDesktopUsersSid = 36 - WinBuiltinNetworkConfigurationOperatorsSid = 37 - WinAccountAdministratorSid = 38 - WinAccountGuestSid = 39 - WinAccountKrbtgtSid = 40 - WinAccountDomainAdminsSid = 41 - WinAccountDomainUsersSid = 42 - WinAccountDomainGuestsSid = 43 - WinAccountComputersSid = 44 - WinAccountControllersSid = 45 - WinAccountCertAdminsSid = 46 - WinAccountSchemaAdminsSid = 47 - WinAccountEnterpriseAdminsSid = 48 - WinAccountPolicyAdminsSid = 49 - WinAccountRasAndIasServersSid = 50 - WinNTLMAuthenticationSid = 51 - WinDigestAuthenticationSid = 52 - WinSChannelAuthenticationSid = 53 - WinThisOrganizationSid = 54 - WinOtherOrganizationSid = 55 - WinBuiltinIncomingForestTrustBuildersSid = 56 - WinBuiltinPerfMonitoringUsersSid = 57 - WinBuiltinPerfLoggingUsersSid = 58 - WinBuiltinAuthorizationAccessSid = 59 - WinBuiltinTerminalServerLicenseServersSid = 60 - WinBuiltinDCOMUsersSid = 61 - WinBuiltinIUsersSid = 62 - WinIUserSid = 63 - WinBuiltinCryptoOperatorsSid = 64 - WinUntrustedLabelSid = 65 - WinLowLabelSid = 66 - WinMediumLabelSid = 67 - WinHighLabelSid = 68 - WinSystemLabelSid = 69 - WinWriteRestrictedCodeSid = 70 - WinCreatorOwnerRightsSid = 71 - WinCacheablePrincipalsGroupSid = 72 - WinNonCacheablePrincipalsGroupSid = 73 - WinEnterpriseReadonlyControllersSid = 74 - WinAccountReadonlyControllersSid = 75 - WinBuiltinEventLogReadersGroup = 76 - WinNewEnterpriseReadonlyControllersSid = 77 - WinBuiltinCertSvcDComAccessGroup = 78 - WinMediumPlusLabelSid = 79 - WinLocalLogonSid = 80 - WinConsoleLogonSid = 81 - WinThisOrganizationCertificateSid = 82 - WinApplicationPackageAuthoritySid = 83 - WinBuiltinAnyPackageSid = 84 - WinCapabilityInternetClientSid = 85 - WinCapabilityInternetClientServerSid = 86 - WinCapabilityPrivateNetworkClientServerSid = 87 - WinCapabilityPicturesLibrarySid = 88 - WinCapabilityVideosLibrarySid = 89 - WinCapabilityMusicLibrarySid = 90 - WinCapabilityDocumentsLibrarySid = 91 - WinCapabilitySharedUserCertificatesSid = 92 - WinCapabilityEnterpriseAuthenticationSid = 93 - WinCapabilityRemovableStorageSid = 94 - WinBuiltinRDSRemoteAccessServersSid = 95 - WinBuiltinRDSEndpointServersSid = 96 - WinBuiltinRDSManagementServersSid = 97 - WinUserModeDriversSid = 98 - WinBuiltinHyperVAdminsSid = 99 - WinAccountCloneableControllersSid = 100 - WinBuiltinAccessControlAssistanceOperatorsSid = 101 - WinBuiltinRemoteManagementUsersSid = 102 - WinAuthenticationAuthorityAssertedSid = 103 - WinAuthenticationServiceAssertedSid = 104 - WinLocalAccountSid = 105 - WinLocalAccountAndAdministratorSid = 106 - WinAccountProtectedUsersSid = 107 - WinCapabilityAppointmentsSid = 108 - WinCapabilityContactsSid = 109 - WinAccountDefaultSystemManagedSid = 110 - WinBuiltinDefaultSystemManagedGroupSid = 111 - WinBuiltinStorageReplicaAdminsSid = 112 - WinAccountKeyAdminsSid = 113 - WinAccountEnterpriseKeyAdminsSid = 114 - WinAuthenticationKeyTrustSid = 115 - WinAuthenticationKeyPropertyMFASid = 116 - WinAuthenticationKeyPropertyAttestationSid = 117 - WinAuthenticationFreshKeyAuthSid = 118 - WinBuiltinDeviceOwnersSid = 119 -) - -// Creates a SID for a well-known predefined alias, generally using the constants of the form -// Win*Sid, for the local machine. -func CreateWellKnownSid(sidType WELL_KNOWN_SID_TYPE) (*SID, error) { - return CreateWellKnownDomainSid(sidType, nil) -} - -// Creates a SID for a well-known predefined alias, generally using the constants of the form -// Win*Sid, for the domain specified by the domainSid parameter. -func CreateWellKnownDomainSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID) (*SID, error) { - n := uint32(50) - for { - b := make([]byte, n) - sid := (*SID)(unsafe.Pointer(&b[0])) - err := createWellKnownSid(sidType, domainSid, sid, &n) - if err == nil { - return sid, nil - } - if err != ERROR_INSUFFICIENT_BUFFER { - return nil, err - } - if n <= uint32(len(b)) { - return nil, err - } - } -} - -const ( - // do not reorder - TOKEN_ASSIGN_PRIMARY = 1 << iota - TOKEN_DUPLICATE - TOKEN_IMPERSONATE - TOKEN_QUERY - TOKEN_QUERY_SOURCE - TOKEN_ADJUST_PRIVILEGES - TOKEN_ADJUST_GROUPS - TOKEN_ADJUST_DEFAULT - TOKEN_ADJUST_SESSIONID - - TOKEN_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | - TOKEN_ASSIGN_PRIMARY | - TOKEN_DUPLICATE | - TOKEN_IMPERSONATE | - TOKEN_QUERY | - TOKEN_QUERY_SOURCE | - TOKEN_ADJUST_PRIVILEGES | - TOKEN_ADJUST_GROUPS | - TOKEN_ADJUST_DEFAULT | - TOKEN_ADJUST_SESSIONID - TOKEN_READ = STANDARD_RIGHTS_READ | TOKEN_QUERY - TOKEN_WRITE = STANDARD_RIGHTS_WRITE | - TOKEN_ADJUST_PRIVILEGES | - TOKEN_ADJUST_GROUPS | - TOKEN_ADJUST_DEFAULT - TOKEN_EXECUTE = STANDARD_RIGHTS_EXECUTE -) - -const ( - // do not reorder - TokenUser = 1 + iota - TokenGroups - TokenPrivileges - TokenOwner - TokenPrimaryGroup - TokenDefaultDacl - TokenSource - TokenType - TokenImpersonationLevel - TokenStatistics - TokenRestrictedSids - TokenSessionId - TokenGroupsAndPrivileges - TokenSessionReference - TokenSandBoxInert - TokenAuditPolicy - TokenOrigin - TokenElevationType - TokenLinkedToken - TokenElevation - TokenHasRestrictions - TokenAccessInformation - TokenVirtualizationAllowed - TokenVirtualizationEnabled - TokenIntegrityLevel - TokenUIAccess - TokenMandatoryPolicy - TokenLogonSid - MaxTokenInfoClass -) - -// Group attributes inside of Tokengroups.Groups[i].Attributes -const ( - SE_GROUP_MANDATORY = 0x00000001 - SE_GROUP_ENABLED_BY_DEFAULT = 0x00000002 - SE_GROUP_ENABLED = 0x00000004 - SE_GROUP_OWNER = 0x00000008 - SE_GROUP_USE_FOR_DENY_ONLY = 0x00000010 - SE_GROUP_INTEGRITY = 0x00000020 - SE_GROUP_INTEGRITY_ENABLED = 0x00000040 - SE_GROUP_LOGON_ID = 0xC0000000 - SE_GROUP_RESOURCE = 0x20000000 - SE_GROUP_VALID_ATTRIBUTES = SE_GROUP_MANDATORY | SE_GROUP_ENABLED_BY_DEFAULT | SE_GROUP_ENABLED | SE_GROUP_OWNER | SE_GROUP_USE_FOR_DENY_ONLY | SE_GROUP_LOGON_ID | SE_GROUP_RESOURCE | SE_GROUP_INTEGRITY | SE_GROUP_INTEGRITY_ENABLED -) - -// Privilege attributes -const ( - SE_PRIVILEGE_ENABLED_BY_DEFAULT = 0x00000001 - SE_PRIVILEGE_ENABLED = 0x00000002 - SE_PRIVILEGE_REMOVED = 0x00000004 - SE_PRIVILEGE_USED_FOR_ACCESS = 0x80000000 - SE_PRIVILEGE_VALID_ATTRIBUTES = SE_PRIVILEGE_ENABLED_BY_DEFAULT | SE_PRIVILEGE_ENABLED | SE_PRIVILEGE_REMOVED | SE_PRIVILEGE_USED_FOR_ACCESS -) - -// Token types -const ( - TokenPrimary = 1 - TokenImpersonation = 2 -) - -// Impersonation levels -const ( - SecurityAnonymous = 0 - SecurityIdentification = 1 - SecurityImpersonation = 2 - SecurityDelegation = 3 -) - -type LUID struct { - LowPart uint32 - HighPart int32 -} - -type LUIDAndAttributes struct { - Luid LUID - Attributes uint32 -} - -type SIDAndAttributes struct { - Sid *SID - Attributes uint32 -} - -type Tokenuser struct { - User SIDAndAttributes -} - -type Tokenprimarygroup struct { - PrimaryGroup *SID -} - -type Tokengroups struct { - GroupCount uint32 - Groups [1]SIDAndAttributes // Use AllGroups() for iterating. -} - -// AllGroups returns a slice that can be used to iterate over the groups in g. -func (g *Tokengroups) AllGroups() []SIDAndAttributes { - return (*[(1 << 28) - 1]SIDAndAttributes)(unsafe.Pointer(&g.Groups[0]))[:g.GroupCount:g.GroupCount] -} - -type Tokenprivileges struct { - PrivilegeCount uint32 - Privileges [1]LUIDAndAttributes // Use AllPrivileges() for iterating. -} - -// AllPrivileges returns a slice that can be used to iterate over the privileges in p. -func (p *Tokenprivileges) AllPrivileges() []LUIDAndAttributes { - return (*[(1 << 27) - 1]LUIDAndAttributes)(unsafe.Pointer(&p.Privileges[0]))[:p.PrivilegeCount:p.PrivilegeCount] -} - -type Tokenmandatorylabel struct { - Label SIDAndAttributes -} - -func (tml *Tokenmandatorylabel) Size() uint32 { - return uint32(unsafe.Sizeof(Tokenmandatorylabel{})) + GetLengthSid(tml.Label.Sid) -} - -// Authorization Functions -//sys checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) = advapi32.CheckTokenMembership -//sys isTokenRestricted(tokenHandle Token) (ret bool, err error) [!failretval] = advapi32.IsTokenRestricted -//sys OpenProcessToken(process Handle, access uint32, token *Token) (err error) = advapi32.OpenProcessToken -//sys OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) = advapi32.OpenThreadToken -//sys ImpersonateSelf(impersonationlevel uint32) (err error) = advapi32.ImpersonateSelf -//sys RevertToSelf() (err error) = advapi32.RevertToSelf -//sys SetThreadToken(thread *Handle, token Token) (err error) = advapi32.SetThreadToken -//sys LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) = advapi32.LookupPrivilegeValueW -//sys AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) = advapi32.AdjustTokenPrivileges -//sys AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) = advapi32.AdjustTokenGroups -//sys GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) = advapi32.GetTokenInformation -//sys SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) = advapi32.SetTokenInformation -//sys DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) = advapi32.DuplicateTokenEx -//sys GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) = userenv.GetUserProfileDirectoryW -//sys getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetSystemDirectoryW -//sys getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetWindowsDirectoryW -//sys getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetSystemWindowsDirectoryW - -// An access token contains the security information for a logon session. -// The system creates an access token when a user logs on, and every -// process executed on behalf of the user has a copy of the token. -// The token identifies the user, the user's groups, and the user's -// privileges. The system uses the token to control access to securable -// objects and to control the ability of the user to perform various -// system-related operations on the local computer. -type Token Handle - -// OpenCurrentProcessToken opens an access token associated with current -// process with TOKEN_QUERY access. It is a real token that needs to be closed. -// -// Deprecated: Explicitly call OpenProcessToken(CurrentProcess(), ...) -// with the desired access instead, or use GetCurrentProcessToken for a -// TOKEN_QUERY token. -func OpenCurrentProcessToken() (Token, error) { - var token Token - err := OpenProcessToken(CurrentProcess(), TOKEN_QUERY, &token) - return token, err -} - -// GetCurrentProcessToken returns the access token associated with -// the current process. It is a pseudo token that does not need -// to be closed. -func GetCurrentProcessToken() Token { - return Token(^uintptr(4 - 1)) -} - -// GetCurrentThreadToken return the access token associated with -// the current thread. It is a pseudo token that does not need -// to be closed. -func GetCurrentThreadToken() Token { - return Token(^uintptr(5 - 1)) -} - -// GetCurrentThreadEffectiveToken returns the effective access token -// associated with the current thread. It is a pseudo token that does -// not need to be closed. -func GetCurrentThreadEffectiveToken() Token { - return Token(^uintptr(6 - 1)) -} - -// Close releases access to access token. -func (t Token) Close() error { - return CloseHandle(Handle(t)) -} - -// getInfo retrieves a specified type of information about an access token. -func (t Token) getInfo(class uint32, initSize int) (unsafe.Pointer, error) { - n := uint32(initSize) - for { - b := make([]byte, n) - e := GetTokenInformation(t, class, &b[0], uint32(len(b)), &n) - if e == nil { - return unsafe.Pointer(&b[0]), nil - } - if e != ERROR_INSUFFICIENT_BUFFER { - return nil, e - } - if n <= uint32(len(b)) { - return nil, e - } - } -} - -// GetTokenUser retrieves access token t user account information. -func (t Token) GetTokenUser() (*Tokenuser, error) { - i, e := t.getInfo(TokenUser, 50) - if e != nil { - return nil, e - } - return (*Tokenuser)(i), nil -} - -// GetTokenGroups retrieves group accounts associated with access token t. -func (t Token) GetTokenGroups() (*Tokengroups, error) { - i, e := t.getInfo(TokenGroups, 50) - if e != nil { - return nil, e - } - return (*Tokengroups)(i), nil -} - -// GetTokenPrimaryGroup retrieves access token t primary group information. -// A pointer to a SID structure representing a group that will become -// the primary group of any objects created by a process using this access token. -func (t Token) GetTokenPrimaryGroup() (*Tokenprimarygroup, error) { - i, e := t.getInfo(TokenPrimaryGroup, 50) - if e != nil { - return nil, e - } - return (*Tokenprimarygroup)(i), nil -} - -// GetUserProfileDirectory retrieves path to the -// root directory of the access token t user's profile. -func (t Token) GetUserProfileDirectory() (string, error) { - n := uint32(100) - for { - b := make([]uint16, n) - e := GetUserProfileDirectory(t, &b[0], &n) - if e == nil { - return UTF16ToString(b), nil - } - if e != ERROR_INSUFFICIENT_BUFFER { - return "", e - } - if n <= uint32(len(b)) { - return "", e - } - } -} - -// IsElevated returns whether the current token is elevated from a UAC perspective. -func (token Token) IsElevated() bool { - var isElevated uint32 - var outLen uint32 - err := GetTokenInformation(token, TokenElevation, (*byte)(unsafe.Pointer(&isElevated)), uint32(unsafe.Sizeof(isElevated)), &outLen) - if err != nil { - return false - } - return outLen == uint32(unsafe.Sizeof(isElevated)) && isElevated != 0 -} - -// GetLinkedToken returns the linked token, which may be an elevated UAC token. -func (token Token) GetLinkedToken() (Token, error) { - var linkedToken Token - var outLen uint32 - err := GetTokenInformation(token, TokenLinkedToken, (*byte)(unsafe.Pointer(&linkedToken)), uint32(unsafe.Sizeof(linkedToken)), &outLen) - if err != nil { - return Token(0), err - } - return linkedToken, nil -} - -// GetSystemDirectory retrieves the path to current location of the system -// directory, which is typically, though not always, `C:\Windows\System32`. -func GetSystemDirectory() (string, error) { - n := uint32(MAX_PATH) - for { - b := make([]uint16, n) - l, e := getSystemDirectory(&b[0], n) - if e != nil { - return "", e - } - if l <= n { - return UTF16ToString(b[:l]), nil - } - n = l - } -} - -// GetWindowsDirectory retrieves the path to current location of the Windows -// directory, which is typically, though not always, `C:\Windows`. This may -// be a private user directory in the case that the application is running -// under a terminal server. -func GetWindowsDirectory() (string, error) { - n := uint32(MAX_PATH) - for { - b := make([]uint16, n) - l, e := getWindowsDirectory(&b[0], n) - if e != nil { - return "", e - } - if l <= n { - return UTF16ToString(b[:l]), nil - } - n = l - } -} - -// GetSystemWindowsDirectory retrieves the path to current location of the -// Windows directory, which is typically, though not always, `C:\Windows`. -func GetSystemWindowsDirectory() (string, error) { - n := uint32(MAX_PATH) - for { - b := make([]uint16, n) - l, e := getSystemWindowsDirectory(&b[0], n) - if e != nil { - return "", e - } - if l <= n { - return UTF16ToString(b[:l]), nil - } - n = l - } -} - -// IsMember reports whether the access token t is a member of the provided SID. -func (t Token) IsMember(sid *SID) (bool, error) { - var b int32 - if e := checkTokenMembership(t, sid, &b); e != nil { - return false, e - } - return b != 0, nil -} - -// IsRestricted reports whether the access token t is a restricted token. -func (t Token) IsRestricted() (isRestricted bool, err error) { - isRestricted, err = isTokenRestricted(t) - if !isRestricted && err == syscall.EINVAL { - // If err is EINVAL, this returned ERROR_SUCCESS indicating a non-restricted token. - err = nil - } - return -} - -const ( - WTS_CONSOLE_CONNECT = 0x1 - WTS_CONSOLE_DISCONNECT = 0x2 - WTS_REMOTE_CONNECT = 0x3 - WTS_REMOTE_DISCONNECT = 0x4 - WTS_SESSION_LOGON = 0x5 - WTS_SESSION_LOGOFF = 0x6 - WTS_SESSION_LOCK = 0x7 - WTS_SESSION_UNLOCK = 0x8 - WTS_SESSION_REMOTE_CONTROL = 0x9 - WTS_SESSION_CREATE = 0xa - WTS_SESSION_TERMINATE = 0xb -) - -const ( - WTSActive = 0 - WTSConnected = 1 - WTSConnectQuery = 2 - WTSShadow = 3 - WTSDisconnected = 4 - WTSIdle = 5 - WTSListen = 6 - WTSReset = 7 - WTSDown = 8 - WTSInit = 9 -) - -type WTSSESSION_NOTIFICATION struct { - Size uint32 - SessionID uint32 -} - -type WTS_SESSION_INFO struct { - SessionID uint32 - WindowStationName *uint16 - State uint32 -} - -//sys WTSQueryUserToken(session uint32, token *Token) (err error) = wtsapi32.WTSQueryUserToken -//sys WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) = wtsapi32.WTSEnumerateSessionsW -//sys WTSFreeMemory(ptr uintptr) = wtsapi32.WTSFreeMemory -//sys WTSGetActiveConsoleSessionId() (sessionID uint32) - -type ACL struct { - aclRevision byte - sbz1 byte - aclSize uint16 - aceCount uint16 - sbz2 uint16 -} - -type SECURITY_DESCRIPTOR struct { - revision byte - sbz1 byte - control SECURITY_DESCRIPTOR_CONTROL - owner *SID - group *SID - sacl *ACL - dacl *ACL -} - -type SECURITY_QUALITY_OF_SERVICE struct { - Length uint32 - ImpersonationLevel uint32 - ContextTrackingMode byte - EffectiveOnly byte -} - -// Constants for the ContextTrackingMode field of SECURITY_QUALITY_OF_SERVICE. -const ( - SECURITY_STATIC_TRACKING = 0 - SECURITY_DYNAMIC_TRACKING = 1 -) - -type SecurityAttributes struct { - Length uint32 - SecurityDescriptor *SECURITY_DESCRIPTOR - InheritHandle uint32 -} - -type SE_OBJECT_TYPE uint32 - -// Constants for type SE_OBJECT_TYPE -const ( - SE_UNKNOWN_OBJECT_TYPE = 0 - SE_FILE_OBJECT = 1 - SE_SERVICE = 2 - SE_PRINTER = 3 - SE_REGISTRY_KEY = 4 - SE_LMSHARE = 5 - SE_KERNEL_OBJECT = 6 - SE_WINDOW_OBJECT = 7 - SE_DS_OBJECT = 8 - SE_DS_OBJECT_ALL = 9 - SE_PROVIDER_DEFINED_OBJECT = 10 - SE_WMIGUID_OBJECT = 11 - SE_REGISTRY_WOW64_32KEY = 12 - SE_REGISTRY_WOW64_64KEY = 13 -) - -type SECURITY_INFORMATION uint32 - -// Constants for type SECURITY_INFORMATION -const ( - OWNER_SECURITY_INFORMATION = 0x00000001 - GROUP_SECURITY_INFORMATION = 0x00000002 - DACL_SECURITY_INFORMATION = 0x00000004 - SACL_SECURITY_INFORMATION = 0x00000008 - LABEL_SECURITY_INFORMATION = 0x00000010 - ATTRIBUTE_SECURITY_INFORMATION = 0x00000020 - SCOPE_SECURITY_INFORMATION = 0x00000040 - BACKUP_SECURITY_INFORMATION = 0x00010000 - PROTECTED_DACL_SECURITY_INFORMATION = 0x80000000 - PROTECTED_SACL_SECURITY_INFORMATION = 0x40000000 - UNPROTECTED_DACL_SECURITY_INFORMATION = 0x20000000 - UNPROTECTED_SACL_SECURITY_INFORMATION = 0x10000000 -) - -type SECURITY_DESCRIPTOR_CONTROL uint16 - -// Constants for type SECURITY_DESCRIPTOR_CONTROL -const ( - SE_OWNER_DEFAULTED = 0x0001 - SE_GROUP_DEFAULTED = 0x0002 - SE_DACL_PRESENT = 0x0004 - SE_DACL_DEFAULTED = 0x0008 - SE_SACL_PRESENT = 0x0010 - SE_SACL_DEFAULTED = 0x0020 - SE_DACL_AUTO_INHERIT_REQ = 0x0100 - SE_SACL_AUTO_INHERIT_REQ = 0x0200 - SE_DACL_AUTO_INHERITED = 0x0400 - SE_SACL_AUTO_INHERITED = 0x0800 - SE_DACL_PROTECTED = 0x1000 - SE_SACL_PROTECTED = 0x2000 - SE_RM_CONTROL_VALID = 0x4000 - SE_SELF_RELATIVE = 0x8000 -) - -type ACCESS_MASK uint32 - -// Constants for type ACCESS_MASK -const ( - DELETE = 0x00010000 - READ_CONTROL = 0x00020000 - WRITE_DAC = 0x00040000 - WRITE_OWNER = 0x00080000 - SYNCHRONIZE = 0x00100000 - STANDARD_RIGHTS_REQUIRED = 0x000F0000 - STANDARD_RIGHTS_READ = READ_CONTROL - STANDARD_RIGHTS_WRITE = READ_CONTROL - STANDARD_RIGHTS_EXECUTE = READ_CONTROL - STANDARD_RIGHTS_ALL = 0x001F0000 - SPECIFIC_RIGHTS_ALL = 0x0000FFFF - ACCESS_SYSTEM_SECURITY = 0x01000000 - MAXIMUM_ALLOWED = 0x02000000 - GENERIC_READ = 0x80000000 - GENERIC_WRITE = 0x40000000 - GENERIC_EXECUTE = 0x20000000 - GENERIC_ALL = 0x10000000 -) - -type ACCESS_MODE uint32 - -// Constants for type ACCESS_MODE -const ( - NOT_USED_ACCESS = 0 - GRANT_ACCESS = 1 - SET_ACCESS = 2 - DENY_ACCESS = 3 - REVOKE_ACCESS = 4 - SET_AUDIT_SUCCESS = 5 - SET_AUDIT_FAILURE = 6 -) - -// Constants for AceFlags and Inheritance fields -const ( - NO_INHERITANCE = 0x0 - SUB_OBJECTS_ONLY_INHERIT = 0x1 - SUB_CONTAINERS_ONLY_INHERIT = 0x2 - SUB_CONTAINERS_AND_OBJECTS_INHERIT = 0x3 - INHERIT_NO_PROPAGATE = 0x4 - INHERIT_ONLY = 0x8 - INHERITED_ACCESS_ENTRY = 0x10 - INHERITED_PARENT = 0x10000000 - INHERITED_GRANDPARENT = 0x20000000 - OBJECT_INHERIT_ACE = 0x1 - CONTAINER_INHERIT_ACE = 0x2 - NO_PROPAGATE_INHERIT_ACE = 0x4 - INHERIT_ONLY_ACE = 0x8 - INHERITED_ACE = 0x10 - VALID_INHERIT_FLAGS = 0x1F -) - -type MULTIPLE_TRUSTEE_OPERATION uint32 - -// Constants for MULTIPLE_TRUSTEE_OPERATION -const ( - NO_MULTIPLE_TRUSTEE = 0 - TRUSTEE_IS_IMPERSONATE = 1 -) - -type TRUSTEE_FORM uint32 - -// Constants for TRUSTEE_FORM -const ( - TRUSTEE_IS_SID = 0 - TRUSTEE_IS_NAME = 1 - TRUSTEE_BAD_FORM = 2 - TRUSTEE_IS_OBJECTS_AND_SID = 3 - TRUSTEE_IS_OBJECTS_AND_NAME = 4 -) - -type TRUSTEE_TYPE uint32 - -// Constants for TRUSTEE_TYPE -const ( - TRUSTEE_IS_UNKNOWN = 0 - TRUSTEE_IS_USER = 1 - TRUSTEE_IS_GROUP = 2 - TRUSTEE_IS_DOMAIN = 3 - TRUSTEE_IS_ALIAS = 4 - TRUSTEE_IS_WELL_KNOWN_GROUP = 5 - TRUSTEE_IS_DELETED = 6 - TRUSTEE_IS_INVALID = 7 - TRUSTEE_IS_COMPUTER = 8 -) - -// Constants for ObjectsPresent field -const ( - ACE_OBJECT_TYPE_PRESENT = 0x1 - ACE_INHERITED_OBJECT_TYPE_PRESENT = 0x2 -) - -type EXPLICIT_ACCESS struct { - AccessPermissions ACCESS_MASK - AccessMode ACCESS_MODE - Inheritance uint32 - Trustee TRUSTEE -} - -// This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions. -type TrusteeValue uintptr - -func TrusteeValueFromString(str string) TrusteeValue { - return TrusteeValue(unsafe.Pointer(StringToUTF16Ptr(str))) -} -func TrusteeValueFromSID(sid *SID) TrusteeValue { - return TrusteeValue(unsafe.Pointer(sid)) -} -func TrusteeValueFromObjectsAndSid(objectsAndSid *OBJECTS_AND_SID) TrusteeValue { - return TrusteeValue(unsafe.Pointer(objectsAndSid)) -} -func TrusteeValueFromObjectsAndName(objectsAndName *OBJECTS_AND_NAME) TrusteeValue { - return TrusteeValue(unsafe.Pointer(objectsAndName)) -} - -type TRUSTEE struct { - MultipleTrustee *TRUSTEE - MultipleTrusteeOperation MULTIPLE_TRUSTEE_OPERATION - TrusteeForm TRUSTEE_FORM - TrusteeType TRUSTEE_TYPE - TrusteeValue TrusteeValue -} - -type OBJECTS_AND_SID struct { - ObjectsPresent uint32 - ObjectTypeGuid GUID - InheritedObjectTypeGuid GUID - Sid *SID -} - -type OBJECTS_AND_NAME struct { - ObjectsPresent uint32 - ObjectType SE_OBJECT_TYPE - ObjectTypeName *uint16 - InheritedObjectTypeName *uint16 - Name *uint16 -} - -//sys getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) = advapi32.GetSecurityInfo -//sys SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) = advapi32.SetSecurityInfo -//sys getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) = advapi32.GetNamedSecurityInfoW -//sys SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) = advapi32.SetNamedSecurityInfoW -//sys SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) = advapi32.SetKernelObjectSecurity - -//sys buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) = advapi32.BuildSecurityDescriptorW -//sys initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) = advapi32.InitializeSecurityDescriptor - -//sys getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) = advapi32.GetSecurityDescriptorControl -//sys getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl **ACL, daclDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorDacl -//sys getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl **ACL, saclDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorSacl -//sys getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorOwner -//sys getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorGroup -//sys getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) = advapi32.GetSecurityDescriptorLength -//sys getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) [failretval!=0] = advapi32.GetSecurityDescriptorRMControl -//sys isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) = advapi32.IsValidSecurityDescriptor - -//sys setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) = advapi32.SetSecurityDescriptorControl -//sys setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *ACL, daclDefaulted bool) (err error) = advapi32.SetSecurityDescriptorDacl -//sys setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *ACL, saclDefaulted bool) (err error) = advapi32.SetSecurityDescriptorSacl -//sys setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaulted bool) (err error) = advapi32.SetSecurityDescriptorOwner -//sys setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaulted bool) (err error) = advapi32.SetSecurityDescriptorGroup -//sys setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) = advapi32.SetSecurityDescriptorRMControl - -//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW -//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW - -//sys makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) = advapi32.MakeAbsoluteSD -//sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD - -//sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW - -// Control returns the security descriptor control bits. -func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) { - err = getSecurityDescriptorControl(sd, &control, &revision) - return -} - -// SetControl sets the security descriptor control bits. -func (sd *SECURITY_DESCRIPTOR) SetControl(controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) error { - return setSecurityDescriptorControl(sd, controlBitsOfInterest, controlBitsToSet) -} - -// RMControl returns the security descriptor resource manager control bits. -func (sd *SECURITY_DESCRIPTOR) RMControl() (control uint8, err error) { - err = getSecurityDescriptorRMControl(sd, &control) - return -} - -// SetRMControl sets the security descriptor resource manager control bits. -func (sd *SECURITY_DESCRIPTOR) SetRMControl(rmControl uint8) { - setSecurityDescriptorRMControl(sd, &rmControl) -} - -// DACL returns the security descriptor DACL and whether it was defaulted. The dacl return value may be nil -// if a DACL exists but is an "empty DACL", meaning fully permissive. If the DACL does not exist, err returns -// ERROR_OBJECT_NOT_FOUND. -func (sd *SECURITY_DESCRIPTOR) DACL() (dacl *ACL, defaulted bool, err error) { - var present bool - err = getSecurityDescriptorDacl(sd, &present, &dacl, &defaulted) - if !present { - err = ERROR_OBJECT_NOT_FOUND - } - return -} - -// SetDACL sets the absolute security descriptor DACL. -func (absoluteSD *SECURITY_DESCRIPTOR) SetDACL(dacl *ACL, present, defaulted bool) error { - return setSecurityDescriptorDacl(absoluteSD, present, dacl, defaulted) -} - -// SACL returns the security descriptor SACL and whether it was defaulted. The sacl return value may be nil -// if a SACL exists but is an "empty SACL", meaning fully permissive. If the SACL does not exist, err returns -// ERROR_OBJECT_NOT_FOUND. -func (sd *SECURITY_DESCRIPTOR) SACL() (sacl *ACL, defaulted bool, err error) { - var present bool - err = getSecurityDescriptorSacl(sd, &present, &sacl, &defaulted) - if !present { - err = ERROR_OBJECT_NOT_FOUND - } - return -} - -// SetSACL sets the absolute security descriptor SACL. -func (absoluteSD *SECURITY_DESCRIPTOR) SetSACL(sacl *ACL, present, defaulted bool) error { - return setSecurityDescriptorSacl(absoluteSD, present, sacl, defaulted) -} - -// Owner returns the security descriptor owner and whether it was defaulted. -func (sd *SECURITY_DESCRIPTOR) Owner() (owner *SID, defaulted bool, err error) { - err = getSecurityDescriptorOwner(sd, &owner, &defaulted) - return -} - -// SetOwner sets the absolute security descriptor owner. -func (absoluteSD *SECURITY_DESCRIPTOR) SetOwner(owner *SID, defaulted bool) error { - return setSecurityDescriptorOwner(absoluteSD, owner, defaulted) -} - -// Group returns the security descriptor group and whether it was defaulted. -func (sd *SECURITY_DESCRIPTOR) Group() (group *SID, defaulted bool, err error) { - err = getSecurityDescriptorGroup(sd, &group, &defaulted) - return -} - -// SetGroup sets the absolute security descriptor owner. -func (absoluteSD *SECURITY_DESCRIPTOR) SetGroup(group *SID, defaulted bool) error { - return setSecurityDescriptorGroup(absoluteSD, group, defaulted) -} - -// Length returns the length of the security descriptor. -func (sd *SECURITY_DESCRIPTOR) Length() uint32 { - return getSecurityDescriptorLength(sd) -} - -// IsValid returns whether the security descriptor is valid. -func (sd *SECURITY_DESCRIPTOR) IsValid() bool { - return isValidSecurityDescriptor(sd) -} - -// String returns the SDDL form of the security descriptor, with a function signature that can be -// used with %v formatting directives. -func (sd *SECURITY_DESCRIPTOR) String() string { - var sddl *uint16 - err := convertSecurityDescriptorToStringSecurityDescriptor(sd, 1, 0xff, &sddl, nil) - if err != nil { - return "" - } - defer LocalFree(Handle(unsafe.Pointer(sddl))) - return UTF16PtrToString(sddl) -} - -// ToAbsolute converts a self-relative security descriptor into an absolute one. -func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DESCRIPTOR, err error) { - control, _, err := selfRelativeSD.Control() - if err != nil { - return - } - if control&SE_SELF_RELATIVE == 0 { - err = ERROR_INVALID_PARAMETER - return - } - var absoluteSDSize, daclSize, saclSize, ownerSize, groupSize uint32 - err = makeAbsoluteSD(selfRelativeSD, nil, &absoluteSDSize, - nil, &daclSize, nil, &saclSize, nil, &ownerSize, nil, &groupSize) - switch err { - case ERROR_INSUFFICIENT_BUFFER: - case nil: - // makeAbsoluteSD is expected to fail, but it succeeds. - return nil, ERROR_INTERNAL_ERROR - default: - return nil, err - } - if absoluteSDSize > 0 { - absoluteSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, absoluteSDSize)[0])) - } - var ( - dacl *ACL - sacl *ACL - owner *SID - group *SID - ) - if daclSize > 0 { - dacl = (*ACL)(unsafe.Pointer(&make([]byte, daclSize)[0])) - } - if saclSize > 0 { - sacl = (*ACL)(unsafe.Pointer(&make([]byte, saclSize)[0])) - } - if ownerSize > 0 { - owner = (*SID)(unsafe.Pointer(&make([]byte, ownerSize)[0])) - } - if groupSize > 0 { - group = (*SID)(unsafe.Pointer(&make([]byte, groupSize)[0])) - } - err = makeAbsoluteSD(selfRelativeSD, absoluteSD, &absoluteSDSize, - dacl, &daclSize, sacl, &saclSize, owner, &ownerSize, group, &groupSize) - return -} - -// ToSelfRelative converts an absolute security descriptor into a self-relative one. -func (absoluteSD *SECURITY_DESCRIPTOR) ToSelfRelative() (selfRelativeSD *SECURITY_DESCRIPTOR, err error) { - control, _, err := absoluteSD.Control() - if err != nil { - return - } - if control&SE_SELF_RELATIVE != 0 { - err = ERROR_INVALID_PARAMETER - return - } - var selfRelativeSDSize uint32 - err = makeSelfRelativeSD(absoluteSD, nil, &selfRelativeSDSize) - switch err { - case ERROR_INSUFFICIENT_BUFFER: - case nil: - // makeSelfRelativeSD is expected to fail, but it succeeds. - return nil, ERROR_INTERNAL_ERROR - default: - return nil, err - } - if selfRelativeSDSize > 0 { - selfRelativeSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, selfRelativeSDSize)[0])) - } - err = makeSelfRelativeSD(absoluteSD, selfRelativeSD, &selfRelativeSDSize) - return -} - -func (selfRelativeSD *SECURITY_DESCRIPTOR) copySelfRelativeSecurityDescriptor() *SECURITY_DESCRIPTOR { - sdLen := int(selfRelativeSD.Length()) - const min = int(unsafe.Sizeof(SECURITY_DESCRIPTOR{})) - if sdLen < min { - sdLen = min - } - - var src []byte - h := (*unsafeheader.Slice)(unsafe.Pointer(&src)) - h.Data = unsafe.Pointer(selfRelativeSD) - h.Len = sdLen - h.Cap = sdLen - - const psize = int(unsafe.Sizeof(uintptr(0))) - - var dst []byte - h = (*unsafeheader.Slice)(unsafe.Pointer(&dst)) - alloc := make([]uintptr, (sdLen+psize-1)/psize) - h.Data = (*unsafeheader.Slice)(unsafe.Pointer(&alloc)).Data - h.Len = sdLen - h.Cap = sdLen - - copy(dst, src) - return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&dst[0])) -} - -// SecurityDescriptorFromString converts an SDDL string describing a security descriptor into a -// self-relative security descriptor object allocated on the Go heap. -func SecurityDescriptorFromString(sddl string) (sd *SECURITY_DESCRIPTOR, err error) { - var winHeapSD *SECURITY_DESCRIPTOR - err = convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &winHeapSD, nil) - if err != nil { - return - } - defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) - return winHeapSD.copySelfRelativeSecurityDescriptor(), nil -} - -// GetSecurityInfo queries the security information for a given handle and returns the self-relative security -// descriptor result on the Go heap. -func GetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION) (sd *SECURITY_DESCRIPTOR, err error) { - var winHeapSD *SECURITY_DESCRIPTOR - err = getSecurityInfo(handle, objectType, securityInformation, nil, nil, nil, nil, &winHeapSD) - if err != nil { - return - } - defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) - return winHeapSD.copySelfRelativeSecurityDescriptor(), nil -} - -// GetNamedSecurityInfo queries the security information for a given named object and returns the self-relative security -// descriptor result on the Go heap. -func GetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION) (sd *SECURITY_DESCRIPTOR, err error) { - var winHeapSD *SECURITY_DESCRIPTOR - err = getNamedSecurityInfo(objectName, objectType, securityInformation, nil, nil, nil, nil, &winHeapSD) - if err != nil { - return - } - defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) - return winHeapSD.copySelfRelativeSecurityDescriptor(), nil -} - -// BuildSecurityDescriptor makes a new security descriptor using the input trustees, explicit access lists, and -// prior security descriptor to be merged, any of which can be nil, returning the self-relative security descriptor -// result on the Go heap. -func BuildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, accessEntries []EXPLICIT_ACCESS, auditEntries []EXPLICIT_ACCESS, mergedSecurityDescriptor *SECURITY_DESCRIPTOR) (sd *SECURITY_DESCRIPTOR, err error) { - var winHeapSD *SECURITY_DESCRIPTOR - var winHeapSDSize uint32 - var firstAccessEntry *EXPLICIT_ACCESS - if len(accessEntries) > 0 { - firstAccessEntry = &accessEntries[0] - } - var firstAuditEntry *EXPLICIT_ACCESS - if len(auditEntries) > 0 { - firstAuditEntry = &auditEntries[0] - } - err = buildSecurityDescriptor(owner, group, uint32(len(accessEntries)), firstAccessEntry, uint32(len(auditEntries)), firstAuditEntry, mergedSecurityDescriptor, &winHeapSDSize, &winHeapSD) - if err != nil { - return - } - defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) - return winHeapSD.copySelfRelativeSecurityDescriptor(), nil -} - -// NewSecurityDescriptor creates and initializes a new absolute security descriptor. -func NewSecurityDescriptor() (absoluteSD *SECURITY_DESCRIPTOR, err error) { - absoluteSD = &SECURITY_DESCRIPTOR{} - err = initializeSecurityDescriptor(absoluteSD, 1) - return -} - -// ACLFromEntries returns a new ACL on the Go heap containing a list of explicit entries as well as those of another ACL. -// Both explicitEntries and mergedACL are optional and can be nil. -func ACLFromEntries(explicitEntries []EXPLICIT_ACCESS, mergedACL *ACL) (acl *ACL, err error) { - var firstExplicitEntry *EXPLICIT_ACCESS - if len(explicitEntries) > 0 { - firstExplicitEntry = &explicitEntries[0] - } - var winHeapACL *ACL - err = setEntriesInAcl(uint32(len(explicitEntries)), firstExplicitEntry, mergedACL, &winHeapACL) - if err != nil { - return - } - defer LocalFree(Handle(unsafe.Pointer(winHeapACL))) - aclBytes := make([]byte, winHeapACL.aclSize) - copy(aclBytes, (*[(1 << 31) - 1]byte)(unsafe.Pointer(winHeapACL))[:len(aclBytes):len(aclBytes)]) - return (*ACL)(unsafe.Pointer(&aclBytes[0])), nil -} diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go deleted file mode 100644 index c964b6848..000000000 --- a/vendor/golang.org/x/sys/windows/service.go +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build windows -// +build windows - -package windows - -const ( - SC_MANAGER_CONNECT = 1 - SC_MANAGER_CREATE_SERVICE = 2 - SC_MANAGER_ENUMERATE_SERVICE = 4 - SC_MANAGER_LOCK = 8 - SC_MANAGER_QUERY_LOCK_STATUS = 16 - SC_MANAGER_MODIFY_BOOT_CONFIG = 32 - SC_MANAGER_ALL_ACCESS = 0xf003f -) - -const ( - SERVICE_KERNEL_DRIVER = 1 - SERVICE_FILE_SYSTEM_DRIVER = 2 - SERVICE_ADAPTER = 4 - SERVICE_RECOGNIZER_DRIVER = 8 - SERVICE_WIN32_OWN_PROCESS = 16 - SERVICE_WIN32_SHARE_PROCESS = 32 - SERVICE_WIN32 = SERVICE_WIN32_OWN_PROCESS | SERVICE_WIN32_SHARE_PROCESS - SERVICE_INTERACTIVE_PROCESS = 256 - SERVICE_DRIVER = SERVICE_KERNEL_DRIVER | SERVICE_FILE_SYSTEM_DRIVER | SERVICE_RECOGNIZER_DRIVER - SERVICE_TYPE_ALL = SERVICE_WIN32 | SERVICE_ADAPTER | SERVICE_DRIVER | SERVICE_INTERACTIVE_PROCESS - - SERVICE_BOOT_START = 0 - SERVICE_SYSTEM_START = 1 - SERVICE_AUTO_START = 2 - SERVICE_DEMAND_START = 3 - SERVICE_DISABLED = 4 - - SERVICE_ERROR_IGNORE = 0 - SERVICE_ERROR_NORMAL = 1 - SERVICE_ERROR_SEVERE = 2 - SERVICE_ERROR_CRITICAL = 3 - - SC_STATUS_PROCESS_INFO = 0 - - SC_ACTION_NONE = 0 - SC_ACTION_RESTART = 1 - SC_ACTION_REBOOT = 2 - SC_ACTION_RUN_COMMAND = 3 - - SERVICE_STOPPED = 1 - SERVICE_START_PENDING = 2 - SERVICE_STOP_PENDING = 3 - SERVICE_RUNNING = 4 - SERVICE_CONTINUE_PENDING = 5 - SERVICE_PAUSE_PENDING = 6 - SERVICE_PAUSED = 7 - SERVICE_NO_CHANGE = 0xffffffff - - SERVICE_ACCEPT_STOP = 1 - SERVICE_ACCEPT_PAUSE_CONTINUE = 2 - SERVICE_ACCEPT_SHUTDOWN = 4 - SERVICE_ACCEPT_PARAMCHANGE = 8 - SERVICE_ACCEPT_NETBINDCHANGE = 16 - SERVICE_ACCEPT_HARDWAREPROFILECHANGE = 32 - SERVICE_ACCEPT_POWEREVENT = 64 - SERVICE_ACCEPT_SESSIONCHANGE = 128 - SERVICE_ACCEPT_PRESHUTDOWN = 256 - - SERVICE_CONTROL_STOP = 1 - SERVICE_CONTROL_PAUSE = 2 - SERVICE_CONTROL_CONTINUE = 3 - SERVICE_CONTROL_INTERROGATE = 4 - SERVICE_CONTROL_SHUTDOWN = 5 - SERVICE_CONTROL_PARAMCHANGE = 6 - SERVICE_CONTROL_NETBINDADD = 7 - SERVICE_CONTROL_NETBINDREMOVE = 8 - SERVICE_CONTROL_NETBINDENABLE = 9 - SERVICE_CONTROL_NETBINDDISABLE = 10 - SERVICE_CONTROL_DEVICEEVENT = 11 - SERVICE_CONTROL_HARDWAREPROFILECHANGE = 12 - SERVICE_CONTROL_POWEREVENT = 13 - SERVICE_CONTROL_SESSIONCHANGE = 14 - SERVICE_CONTROL_PRESHUTDOWN = 15 - - SERVICE_ACTIVE = 1 - SERVICE_INACTIVE = 2 - SERVICE_STATE_ALL = 3 - - SERVICE_QUERY_CONFIG = 1 - SERVICE_CHANGE_CONFIG = 2 - SERVICE_QUERY_STATUS = 4 - SERVICE_ENUMERATE_DEPENDENTS = 8 - SERVICE_START = 16 - SERVICE_STOP = 32 - SERVICE_PAUSE_CONTINUE = 64 - SERVICE_INTERROGATE = 128 - SERVICE_USER_DEFINED_CONTROL = 256 - SERVICE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SERVICE_QUERY_CONFIG | SERVICE_CHANGE_CONFIG | SERVICE_QUERY_STATUS | SERVICE_ENUMERATE_DEPENDENTS | SERVICE_START | SERVICE_STOP | SERVICE_PAUSE_CONTINUE | SERVICE_INTERROGATE | SERVICE_USER_DEFINED_CONTROL - - SERVICE_RUNS_IN_SYSTEM_PROCESS = 1 - - SERVICE_CONFIG_DESCRIPTION = 1 - SERVICE_CONFIG_FAILURE_ACTIONS = 2 - SERVICE_CONFIG_DELAYED_AUTO_START_INFO = 3 - SERVICE_CONFIG_FAILURE_ACTIONS_FLAG = 4 - SERVICE_CONFIG_SERVICE_SID_INFO = 5 - SERVICE_CONFIG_REQUIRED_PRIVILEGES_INFO = 6 - SERVICE_CONFIG_PRESHUTDOWN_INFO = 7 - SERVICE_CONFIG_TRIGGER_INFO = 8 - SERVICE_CONFIG_PREFERRED_NODE = 9 - SERVICE_CONFIG_LAUNCH_PROTECTED = 12 - - SERVICE_SID_TYPE_NONE = 0 - SERVICE_SID_TYPE_UNRESTRICTED = 1 - SERVICE_SID_TYPE_RESTRICTED = 2 | SERVICE_SID_TYPE_UNRESTRICTED - - SC_ENUM_PROCESS_INFO = 0 - - SERVICE_NOTIFY_STATUS_CHANGE = 2 - SERVICE_NOTIFY_STOPPED = 0x00000001 - SERVICE_NOTIFY_START_PENDING = 0x00000002 - SERVICE_NOTIFY_STOP_PENDING = 0x00000004 - SERVICE_NOTIFY_RUNNING = 0x00000008 - SERVICE_NOTIFY_CONTINUE_PENDING = 0x00000010 - SERVICE_NOTIFY_PAUSE_PENDING = 0x00000020 - SERVICE_NOTIFY_PAUSED = 0x00000040 - SERVICE_NOTIFY_CREATED = 0x00000080 - SERVICE_NOTIFY_DELETED = 0x00000100 - SERVICE_NOTIFY_DELETE_PENDING = 0x00000200 - - SC_EVENT_DATABASE_CHANGE = 0 - SC_EVENT_PROPERTY_CHANGE = 1 - SC_EVENT_STATUS_CHANGE = 2 - - SERVICE_START_REASON_DEMAND = 0x00000001 - SERVICE_START_REASON_AUTO = 0x00000002 - SERVICE_START_REASON_TRIGGER = 0x00000004 - SERVICE_START_REASON_RESTART_ON_FAILURE = 0x00000008 - SERVICE_START_REASON_DELAYEDAUTO = 0x00000010 - - SERVICE_DYNAMIC_INFORMATION_LEVEL_START_REASON = 1 -) - -type ENUM_SERVICE_STATUS struct { - ServiceName *uint16 - DisplayName *uint16 - ServiceStatus SERVICE_STATUS -} - -type SERVICE_STATUS struct { - ServiceType uint32 - CurrentState uint32 - ControlsAccepted uint32 - Win32ExitCode uint32 - ServiceSpecificExitCode uint32 - CheckPoint uint32 - WaitHint uint32 -} - -type SERVICE_TABLE_ENTRY struct { - ServiceName *uint16 - ServiceProc uintptr -} - -type QUERY_SERVICE_CONFIG struct { - ServiceType uint32 - StartType uint32 - ErrorControl uint32 - BinaryPathName *uint16 - LoadOrderGroup *uint16 - TagId uint32 - Dependencies *uint16 - ServiceStartName *uint16 - DisplayName *uint16 -} - -type SERVICE_DESCRIPTION struct { - Description *uint16 -} - -type SERVICE_DELAYED_AUTO_START_INFO struct { - IsDelayedAutoStartUp uint32 -} - -type SERVICE_STATUS_PROCESS struct { - ServiceType uint32 - CurrentState uint32 - ControlsAccepted uint32 - Win32ExitCode uint32 - ServiceSpecificExitCode uint32 - CheckPoint uint32 - WaitHint uint32 - ProcessId uint32 - ServiceFlags uint32 -} - -type ENUM_SERVICE_STATUS_PROCESS struct { - ServiceName *uint16 - DisplayName *uint16 - ServiceStatusProcess SERVICE_STATUS_PROCESS -} - -type SERVICE_NOTIFY struct { - Version uint32 - NotifyCallback uintptr - Context uintptr - NotificationStatus uint32 - ServiceStatus SERVICE_STATUS_PROCESS - NotificationTriggered uint32 - ServiceNames *uint16 -} - -type SERVICE_FAILURE_ACTIONS struct { - ResetPeriod uint32 - RebootMsg *uint16 - Command *uint16 - ActionsCount uint32 - Actions *SC_ACTION -} - -type SC_ACTION struct { - Type uint32 - Delay uint32 -} - -type QUERY_SERVICE_LOCK_STATUS struct { - IsLocked uint32 - LockOwner *uint16 - LockDuration uint32 -} - -//sys OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenSCManagerW -//sys CloseServiceHandle(handle Handle) (err error) = advapi32.CloseServiceHandle -//sys CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) [failretval==0] = advapi32.CreateServiceW -//sys OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenServiceW -//sys DeleteService(service Handle) (err error) = advapi32.DeleteService -//sys StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) = advapi32.StartServiceW -//sys QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) = advapi32.QueryServiceStatus -//sys QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceLockStatusW -//sys ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) = advapi32.ControlService -//sys StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) = advapi32.StartServiceCtrlDispatcherW -//sys SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) = advapi32.SetServiceStatus -//sys ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) = advapi32.ChangeServiceConfigW -//sys QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfigW -//sys ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) = advapi32.ChangeServiceConfig2W -//sys QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfig2W -//sys EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) = advapi32.EnumServicesStatusExW -//sys QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceStatusEx -//sys NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) = advapi32.NotifyServiceStatusChangeW -//sys SubscribeServiceChangeNotifications(service Handle, eventType uint32, callback uintptr, callbackCtx uintptr, subscription *uintptr) (ret error) = sechost.SubscribeServiceChangeNotifications? -//sys UnsubscribeServiceChangeNotifications(subscription uintptr) = sechost.UnsubscribeServiceChangeNotifications? -//sys RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) = advapi32.RegisterServiceCtrlHandlerExW -//sys QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInfo unsafe.Pointer) (err error) = advapi32.QueryServiceDynamicInformation? -//sys EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) = advapi32.EnumDependentServicesW diff --git a/vendor/golang.org/x/sys/windows/setupapi_windows.go b/vendor/golang.org/x/sys/windows/setupapi_windows.go deleted file mode 100644 index f8126482f..000000000 --- a/vendor/golang.org/x/sys/windows/setupapi_windows.go +++ /dev/null @@ -1,1425 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -import ( - "encoding/binary" - "errors" - "fmt" - "runtime" - "strings" - "syscall" - "unsafe" -) - -// This file contains functions that wrap SetupAPI.dll and CfgMgr32.dll, -// core system functions for managing hardware devices, drivers, and the PnP tree. -// Information about these APIs can be found at: -// https://docs.microsoft.com/en-us/windows-hardware/drivers/install/setupapi -// https://docs.microsoft.com/en-us/windows/win32/devinst/cfgmgr32- - -const ( - ERROR_EXPECTED_SECTION_NAME Errno = 0x20000000 | 0xC0000000 | 0 - ERROR_BAD_SECTION_NAME_LINE Errno = 0x20000000 | 0xC0000000 | 1 - ERROR_SECTION_NAME_TOO_LONG Errno = 0x20000000 | 0xC0000000 | 2 - ERROR_GENERAL_SYNTAX Errno = 0x20000000 | 0xC0000000 | 3 - ERROR_WRONG_INF_STYLE Errno = 0x20000000 | 0xC0000000 | 0x100 - ERROR_SECTION_NOT_FOUND Errno = 0x20000000 | 0xC0000000 | 0x101 - ERROR_LINE_NOT_FOUND Errno = 0x20000000 | 0xC0000000 | 0x102 - ERROR_NO_BACKUP Errno = 0x20000000 | 0xC0000000 | 0x103 - ERROR_NO_ASSOCIATED_CLASS Errno = 0x20000000 | 0xC0000000 | 0x200 - ERROR_CLASS_MISMATCH Errno = 0x20000000 | 0xC0000000 | 0x201 - ERROR_DUPLICATE_FOUND Errno = 0x20000000 | 0xC0000000 | 0x202 - ERROR_NO_DRIVER_SELECTED Errno = 0x20000000 | 0xC0000000 | 0x203 - ERROR_KEY_DOES_NOT_EXIST Errno = 0x20000000 | 0xC0000000 | 0x204 - ERROR_INVALID_DEVINST_NAME Errno = 0x20000000 | 0xC0000000 | 0x205 - ERROR_INVALID_CLASS Errno = 0x20000000 | 0xC0000000 | 0x206 - ERROR_DEVINST_ALREADY_EXISTS Errno = 0x20000000 | 0xC0000000 | 0x207 - ERROR_DEVINFO_NOT_REGISTERED Errno = 0x20000000 | 0xC0000000 | 0x208 - ERROR_INVALID_REG_PROPERTY Errno = 0x20000000 | 0xC0000000 | 0x209 - ERROR_NO_INF Errno = 0x20000000 | 0xC0000000 | 0x20A - ERROR_NO_SUCH_DEVINST Errno = 0x20000000 | 0xC0000000 | 0x20B - ERROR_CANT_LOAD_CLASS_ICON Errno = 0x20000000 | 0xC0000000 | 0x20C - ERROR_INVALID_CLASS_INSTALLER Errno = 0x20000000 | 0xC0000000 | 0x20D - ERROR_DI_DO_DEFAULT Errno = 0x20000000 | 0xC0000000 | 0x20E - ERROR_DI_NOFILECOPY Errno = 0x20000000 | 0xC0000000 | 0x20F - ERROR_INVALID_HWPROFILE Errno = 0x20000000 | 0xC0000000 | 0x210 - ERROR_NO_DEVICE_SELECTED Errno = 0x20000000 | 0xC0000000 | 0x211 - ERROR_DEVINFO_LIST_LOCKED Errno = 0x20000000 | 0xC0000000 | 0x212 - ERROR_DEVINFO_DATA_LOCKED Errno = 0x20000000 | 0xC0000000 | 0x213 - ERROR_DI_BAD_PATH Errno = 0x20000000 | 0xC0000000 | 0x214 - ERROR_NO_CLASSINSTALL_PARAMS Errno = 0x20000000 | 0xC0000000 | 0x215 - ERROR_FILEQUEUE_LOCKED Errno = 0x20000000 | 0xC0000000 | 0x216 - ERROR_BAD_SERVICE_INSTALLSECT Errno = 0x20000000 | 0xC0000000 | 0x217 - ERROR_NO_CLASS_DRIVER_LIST Errno = 0x20000000 | 0xC0000000 | 0x218 - ERROR_NO_ASSOCIATED_SERVICE Errno = 0x20000000 | 0xC0000000 | 0x219 - ERROR_NO_DEFAULT_DEVICE_INTERFACE Errno = 0x20000000 | 0xC0000000 | 0x21A - ERROR_DEVICE_INTERFACE_ACTIVE Errno = 0x20000000 | 0xC0000000 | 0x21B - ERROR_DEVICE_INTERFACE_REMOVED Errno = 0x20000000 | 0xC0000000 | 0x21C - ERROR_BAD_INTERFACE_INSTALLSECT Errno = 0x20000000 | 0xC0000000 | 0x21D - ERROR_NO_SUCH_INTERFACE_CLASS Errno = 0x20000000 | 0xC0000000 | 0x21E - ERROR_INVALID_REFERENCE_STRING Errno = 0x20000000 | 0xC0000000 | 0x21F - ERROR_INVALID_MACHINENAME Errno = 0x20000000 | 0xC0000000 | 0x220 - ERROR_REMOTE_COMM_FAILURE Errno = 0x20000000 | 0xC0000000 | 0x221 - ERROR_MACHINE_UNAVAILABLE Errno = 0x20000000 | 0xC0000000 | 0x222 - ERROR_NO_CONFIGMGR_SERVICES Errno = 0x20000000 | 0xC0000000 | 0x223 - ERROR_INVALID_PROPPAGE_PROVIDER Errno = 0x20000000 | 0xC0000000 | 0x224 - ERROR_NO_SUCH_DEVICE_INTERFACE Errno = 0x20000000 | 0xC0000000 | 0x225 - ERROR_DI_POSTPROCESSING_REQUIRED Errno = 0x20000000 | 0xC0000000 | 0x226 - ERROR_INVALID_COINSTALLER Errno = 0x20000000 | 0xC0000000 | 0x227 - ERROR_NO_COMPAT_DRIVERS Errno = 0x20000000 | 0xC0000000 | 0x228 - ERROR_NO_DEVICE_ICON Errno = 0x20000000 | 0xC0000000 | 0x229 - ERROR_INVALID_INF_LOGCONFIG Errno = 0x20000000 | 0xC0000000 | 0x22A - ERROR_DI_DONT_INSTALL Errno = 0x20000000 | 0xC0000000 | 0x22B - ERROR_INVALID_FILTER_DRIVER Errno = 0x20000000 | 0xC0000000 | 0x22C - ERROR_NON_WINDOWS_NT_DRIVER Errno = 0x20000000 | 0xC0000000 | 0x22D - ERROR_NON_WINDOWS_DRIVER Errno = 0x20000000 | 0xC0000000 | 0x22E - ERROR_NO_CATALOG_FOR_OEM_INF Errno = 0x20000000 | 0xC0000000 | 0x22F - ERROR_DEVINSTALL_QUEUE_NONNATIVE Errno = 0x20000000 | 0xC0000000 | 0x230 - ERROR_NOT_DISABLEABLE Errno = 0x20000000 | 0xC0000000 | 0x231 - ERROR_CANT_REMOVE_DEVINST Errno = 0x20000000 | 0xC0000000 | 0x232 - ERROR_INVALID_TARGET Errno = 0x20000000 | 0xC0000000 | 0x233 - ERROR_DRIVER_NONNATIVE Errno = 0x20000000 | 0xC0000000 | 0x234 - ERROR_IN_WOW64 Errno = 0x20000000 | 0xC0000000 | 0x235 - ERROR_SET_SYSTEM_RESTORE_POINT Errno = 0x20000000 | 0xC0000000 | 0x236 - ERROR_SCE_DISABLED Errno = 0x20000000 | 0xC0000000 | 0x238 - ERROR_UNKNOWN_EXCEPTION Errno = 0x20000000 | 0xC0000000 | 0x239 - ERROR_PNP_REGISTRY_ERROR Errno = 0x20000000 | 0xC0000000 | 0x23A - ERROR_REMOTE_REQUEST_UNSUPPORTED Errno = 0x20000000 | 0xC0000000 | 0x23B - ERROR_NOT_AN_INSTALLED_OEM_INF Errno = 0x20000000 | 0xC0000000 | 0x23C - ERROR_INF_IN_USE_BY_DEVICES Errno = 0x20000000 | 0xC0000000 | 0x23D - ERROR_DI_FUNCTION_OBSOLETE Errno = 0x20000000 | 0xC0000000 | 0x23E - ERROR_NO_AUTHENTICODE_CATALOG Errno = 0x20000000 | 0xC0000000 | 0x23F - ERROR_AUTHENTICODE_DISALLOWED Errno = 0x20000000 | 0xC0000000 | 0x240 - ERROR_AUTHENTICODE_TRUSTED_PUBLISHER Errno = 0x20000000 | 0xC0000000 | 0x241 - ERROR_AUTHENTICODE_TRUST_NOT_ESTABLISHED Errno = 0x20000000 | 0xC0000000 | 0x242 - ERROR_AUTHENTICODE_PUBLISHER_NOT_TRUSTED Errno = 0x20000000 | 0xC0000000 | 0x243 - ERROR_SIGNATURE_OSATTRIBUTE_MISMATCH Errno = 0x20000000 | 0xC0000000 | 0x244 - ERROR_ONLY_VALIDATE_VIA_AUTHENTICODE Errno = 0x20000000 | 0xC0000000 | 0x245 - ERROR_DEVICE_INSTALLER_NOT_READY Errno = 0x20000000 | 0xC0000000 | 0x246 - ERROR_DRIVER_STORE_ADD_FAILED Errno = 0x20000000 | 0xC0000000 | 0x247 - ERROR_DEVICE_INSTALL_BLOCKED Errno = 0x20000000 | 0xC0000000 | 0x248 - ERROR_DRIVER_INSTALL_BLOCKED Errno = 0x20000000 | 0xC0000000 | 0x249 - ERROR_WRONG_INF_TYPE Errno = 0x20000000 | 0xC0000000 | 0x24A - ERROR_FILE_HASH_NOT_IN_CATALOG Errno = 0x20000000 | 0xC0000000 | 0x24B - ERROR_DRIVER_STORE_DELETE_FAILED Errno = 0x20000000 | 0xC0000000 | 0x24C - ERROR_UNRECOVERABLE_STACK_OVERFLOW Errno = 0x20000000 | 0xC0000000 | 0x300 - EXCEPTION_SPAPI_UNRECOVERABLE_STACK_OVERFLOW Errno = ERROR_UNRECOVERABLE_STACK_OVERFLOW - ERROR_NO_DEFAULT_INTERFACE_DEVICE Errno = ERROR_NO_DEFAULT_DEVICE_INTERFACE - ERROR_INTERFACE_DEVICE_ACTIVE Errno = ERROR_DEVICE_INTERFACE_ACTIVE - ERROR_INTERFACE_DEVICE_REMOVED Errno = ERROR_DEVICE_INTERFACE_REMOVED - ERROR_NO_SUCH_INTERFACE_DEVICE Errno = ERROR_NO_SUCH_DEVICE_INTERFACE -) - -const ( - MAX_DEVICE_ID_LEN = 200 - MAX_DEVNODE_ID_LEN = MAX_DEVICE_ID_LEN - MAX_GUID_STRING_LEN = 39 // 38 chars + terminator null - MAX_CLASS_NAME_LEN = 32 - MAX_PROFILE_LEN = 80 - MAX_CONFIG_VALUE = 9999 - MAX_INSTANCE_VALUE = 9999 - CONFIGMG_VERSION = 0x0400 -) - -// Maximum string length constants -const ( - LINE_LEN = 256 // Windows 9x-compatible maximum for displayable strings coming from a device INF. - MAX_INF_STRING_LENGTH = 4096 // Actual maximum size of an INF string (including string substitutions). - MAX_INF_SECTION_NAME_LENGTH = 255 // For Windows 9x compatibility, INF section names should be constrained to 32 characters. - MAX_TITLE_LEN = 60 - MAX_INSTRUCTION_LEN = 256 - MAX_LABEL_LEN = 30 - MAX_SERVICE_NAME_LEN = 256 - MAX_SUBTITLE_LEN = 256 -) - -const ( - // SP_MAX_MACHINENAME_LENGTH defines maximum length of a machine name in the format expected by ConfigMgr32 CM_Connect_Machine (i.e., "\\\\MachineName\0"). - SP_MAX_MACHINENAME_LENGTH = MAX_PATH + 3 -) - -// HSPFILEQ is type for setup file queue -type HSPFILEQ uintptr - -// DevInfo holds reference to device information set -type DevInfo Handle - -// DEVINST is a handle usually recognized by cfgmgr32 APIs -type DEVINST uint32 - -// DevInfoData is a device information structure (references a device instance that is a member of a device information set) -type DevInfoData struct { - size uint32 - ClassGUID GUID - DevInst DEVINST - _ uintptr -} - -// DevInfoListDetailData is a structure for detailed information on a device information set (used for SetupDiGetDeviceInfoListDetail which supersedes the functionality of SetupDiGetDeviceInfoListClass). -type DevInfoListDetailData struct { - size uint32 // Use unsafeSizeOf method - ClassGUID GUID - RemoteMachineHandle Handle - remoteMachineName [SP_MAX_MACHINENAME_LENGTH]uint16 -} - -func (*DevInfoListDetailData) unsafeSizeOf() uint32 { - if unsafe.Sizeof(uintptr(0)) == 4 { - // Windows declares this with pshpack1.h - return uint32(unsafe.Offsetof(DevInfoListDetailData{}.remoteMachineName) + unsafe.Sizeof(DevInfoListDetailData{}.remoteMachineName)) - } - return uint32(unsafe.Sizeof(DevInfoListDetailData{})) -} - -func (data *DevInfoListDetailData) RemoteMachineName() string { - return UTF16ToString(data.remoteMachineName[:]) -} - -func (data *DevInfoListDetailData) SetRemoteMachineName(remoteMachineName string) error { - str, err := UTF16FromString(remoteMachineName) - if err != nil { - return err - } - copy(data.remoteMachineName[:], str) - return nil -} - -// DI_FUNCTION is function type for device installer -type DI_FUNCTION uint32 - -const ( - DIF_SELECTDEVICE DI_FUNCTION = 0x00000001 - DIF_INSTALLDEVICE DI_FUNCTION = 0x00000002 - DIF_ASSIGNRESOURCES DI_FUNCTION = 0x00000003 - DIF_PROPERTIES DI_FUNCTION = 0x00000004 - DIF_REMOVE DI_FUNCTION = 0x00000005 - DIF_FIRSTTIMESETUP DI_FUNCTION = 0x00000006 - DIF_FOUNDDEVICE DI_FUNCTION = 0x00000007 - DIF_SELECTCLASSDRIVERS DI_FUNCTION = 0x00000008 - DIF_VALIDATECLASSDRIVERS DI_FUNCTION = 0x00000009 - DIF_INSTALLCLASSDRIVERS DI_FUNCTION = 0x0000000A - DIF_CALCDISKSPACE DI_FUNCTION = 0x0000000B - DIF_DESTROYPRIVATEDATA DI_FUNCTION = 0x0000000C - DIF_VALIDATEDRIVER DI_FUNCTION = 0x0000000D - DIF_DETECT DI_FUNCTION = 0x0000000F - DIF_INSTALLWIZARD DI_FUNCTION = 0x00000010 - DIF_DESTROYWIZARDDATA DI_FUNCTION = 0x00000011 - DIF_PROPERTYCHANGE DI_FUNCTION = 0x00000012 - DIF_ENABLECLASS DI_FUNCTION = 0x00000013 - DIF_DETECTVERIFY DI_FUNCTION = 0x00000014 - DIF_INSTALLDEVICEFILES DI_FUNCTION = 0x00000015 - DIF_UNREMOVE DI_FUNCTION = 0x00000016 - DIF_SELECTBESTCOMPATDRV DI_FUNCTION = 0x00000017 - DIF_ALLOW_INSTALL DI_FUNCTION = 0x00000018 - DIF_REGISTERDEVICE DI_FUNCTION = 0x00000019 - DIF_NEWDEVICEWIZARD_PRESELECT DI_FUNCTION = 0x0000001A - DIF_NEWDEVICEWIZARD_SELECT DI_FUNCTION = 0x0000001B - DIF_NEWDEVICEWIZARD_PREANALYZE DI_FUNCTION = 0x0000001C - DIF_NEWDEVICEWIZARD_POSTANALYZE DI_FUNCTION = 0x0000001D - DIF_NEWDEVICEWIZARD_FINISHINSTALL DI_FUNCTION = 0x0000001E - DIF_INSTALLINTERFACES DI_FUNCTION = 0x00000020 - DIF_DETECTCANCEL DI_FUNCTION = 0x00000021 - DIF_REGISTER_COINSTALLERS DI_FUNCTION = 0x00000022 - DIF_ADDPROPERTYPAGE_ADVANCED DI_FUNCTION = 0x00000023 - DIF_ADDPROPERTYPAGE_BASIC DI_FUNCTION = 0x00000024 - DIF_TROUBLESHOOTER DI_FUNCTION = 0x00000026 - DIF_POWERMESSAGEWAKE DI_FUNCTION = 0x00000027 - DIF_ADDREMOTEPROPERTYPAGE_ADVANCED DI_FUNCTION = 0x00000028 - DIF_UPDATEDRIVER_UI DI_FUNCTION = 0x00000029 - DIF_FINISHINSTALL_ACTION DI_FUNCTION = 0x0000002A -) - -// DevInstallParams is device installation parameters structure (associated with a particular device information element, or globally with a device information set) -type DevInstallParams struct { - size uint32 - Flags DI_FLAGS - FlagsEx DI_FLAGSEX - hwndParent uintptr - InstallMsgHandler uintptr - InstallMsgHandlerContext uintptr - FileQueue HSPFILEQ - _ uintptr - _ uint32 - driverPath [MAX_PATH]uint16 -} - -func (params *DevInstallParams) DriverPath() string { - return UTF16ToString(params.driverPath[:]) -} - -func (params *DevInstallParams) SetDriverPath(driverPath string) error { - str, err := UTF16FromString(driverPath) - if err != nil { - return err - } - copy(params.driverPath[:], str) - return nil -} - -// DI_FLAGS is SP_DEVINSTALL_PARAMS.Flags values -type DI_FLAGS uint32 - -const ( - // Flags for choosing a device - DI_SHOWOEM DI_FLAGS = 0x00000001 // support Other... button - DI_SHOWCOMPAT DI_FLAGS = 0x00000002 // show compatibility list - DI_SHOWCLASS DI_FLAGS = 0x00000004 // show class list - DI_SHOWALL DI_FLAGS = 0x00000007 // both class & compat list shown - DI_NOVCP DI_FLAGS = 0x00000008 // don't create a new copy queue--use caller-supplied FileQueue - DI_DIDCOMPAT DI_FLAGS = 0x00000010 // Searched for compatible devices - DI_DIDCLASS DI_FLAGS = 0x00000020 // Searched for class devices - DI_AUTOASSIGNRES DI_FLAGS = 0x00000040 // No UI for resources if possible - - // Flags returned by DiInstallDevice to indicate need to reboot/restart - DI_NEEDRESTART DI_FLAGS = 0x00000080 // Reboot required to take effect - DI_NEEDREBOOT DI_FLAGS = 0x00000100 // "" - - // Flags for device installation - DI_NOBROWSE DI_FLAGS = 0x00000200 // no Browse... in InsertDisk - - // Flags set by DiBuildDriverInfoList - DI_MULTMFGS DI_FLAGS = 0x00000400 // Set if multiple manufacturers in class driver list - - // Flag indicates that device is disabled - DI_DISABLED DI_FLAGS = 0x00000800 // Set if device disabled - - // Flags for Device/Class Properties - DI_GENERALPAGE_ADDED DI_FLAGS = 0x00001000 - DI_RESOURCEPAGE_ADDED DI_FLAGS = 0x00002000 - - // Flag to indicate the setting properties for this Device (or class) caused a change so the Dev Mgr UI probably needs to be updated. - DI_PROPERTIES_CHANGE DI_FLAGS = 0x00004000 - - // Flag to indicate that the sorting from the INF file should be used. - DI_INF_IS_SORTED DI_FLAGS = 0x00008000 - - // Flag to indicate that only the INF specified by SP_DEVINSTALL_PARAMS.DriverPath should be searched. - DI_ENUMSINGLEINF DI_FLAGS = 0x00010000 - - // Flag that prevents ConfigMgr from removing/re-enumerating devices during device - // registration, installation, and deletion. - DI_DONOTCALLCONFIGMG DI_FLAGS = 0x00020000 - - // The following flag can be used to install a device disabled - DI_INSTALLDISABLED DI_FLAGS = 0x00040000 - - // Flag that causes SetupDiBuildDriverInfoList to build a device's compatible driver - // list from its existing class driver list, instead of the normal INF search. - DI_COMPAT_FROM_CLASS DI_FLAGS = 0x00080000 - - // This flag is set if the Class Install params should be used. - DI_CLASSINSTALLPARAMS DI_FLAGS = 0x00100000 - - // This flag is set if the caller of DiCallClassInstaller does NOT want the internal default action performed if the Class installer returns ERROR_DI_DO_DEFAULT. - DI_NODI_DEFAULTACTION DI_FLAGS = 0x00200000 - - // Flags for device installation - DI_QUIETINSTALL DI_FLAGS = 0x00800000 // don't confuse the user with questions or excess info - DI_NOFILECOPY DI_FLAGS = 0x01000000 // No file Copy necessary - DI_FORCECOPY DI_FLAGS = 0x02000000 // Force files to be copied from install path - DI_DRIVERPAGE_ADDED DI_FLAGS = 0x04000000 // Prop provider added Driver page. - DI_USECI_SELECTSTRINGS DI_FLAGS = 0x08000000 // Use Class Installer Provided strings in the Select Device Dlg - DI_OVERRIDE_INFFLAGS DI_FLAGS = 0x10000000 // Override INF flags - DI_PROPS_NOCHANGEUSAGE DI_FLAGS = 0x20000000 // No Enable/Disable in General Props - - DI_NOSELECTICONS DI_FLAGS = 0x40000000 // No small icons in select device dialogs - - DI_NOWRITE_IDS DI_FLAGS = 0x80000000 // Don't write HW & Compat IDs on install -) - -// DI_FLAGSEX is SP_DEVINSTALL_PARAMS.FlagsEx values -type DI_FLAGSEX uint32 - -const ( - DI_FLAGSEX_CI_FAILED DI_FLAGSEX = 0x00000004 // Failed to Load/Call class installer - DI_FLAGSEX_FINISHINSTALL_ACTION DI_FLAGSEX = 0x00000008 // Class/co-installer wants to get a DIF_FINISH_INSTALL action in client context. - DI_FLAGSEX_DIDINFOLIST DI_FLAGSEX = 0x00000010 // Did the Class Info List - DI_FLAGSEX_DIDCOMPATINFO DI_FLAGSEX = 0x00000020 // Did the Compat Info List - DI_FLAGSEX_FILTERCLASSES DI_FLAGSEX = 0x00000040 - DI_FLAGSEX_SETFAILEDINSTALL DI_FLAGSEX = 0x00000080 - DI_FLAGSEX_DEVICECHANGE DI_FLAGSEX = 0x00000100 - DI_FLAGSEX_ALWAYSWRITEIDS DI_FLAGSEX = 0x00000200 - DI_FLAGSEX_PROPCHANGE_PENDING DI_FLAGSEX = 0x00000400 // One or more device property sheets have had changes made to them, and need to have a DIF_PROPERTYCHANGE occur. - DI_FLAGSEX_ALLOWEXCLUDEDDRVS DI_FLAGSEX = 0x00000800 - DI_FLAGSEX_NOUIONQUERYREMOVE DI_FLAGSEX = 0x00001000 - DI_FLAGSEX_USECLASSFORCOMPAT DI_FLAGSEX = 0x00002000 // Use the device's class when building compat drv list. (Ignored if DI_COMPAT_FROM_CLASS flag is specified.) - DI_FLAGSEX_NO_DRVREG_MODIFY DI_FLAGSEX = 0x00008000 // Don't run AddReg and DelReg for device's software (driver) key. - DI_FLAGSEX_IN_SYSTEM_SETUP DI_FLAGSEX = 0x00010000 // Installation is occurring during initial system setup. - DI_FLAGSEX_INET_DRIVER DI_FLAGSEX = 0x00020000 // Driver came from Windows Update - DI_FLAGSEX_APPENDDRIVERLIST DI_FLAGSEX = 0x00040000 // Cause SetupDiBuildDriverInfoList to append a new driver list to an existing list. - DI_FLAGSEX_PREINSTALLBACKUP DI_FLAGSEX = 0x00080000 // not used - DI_FLAGSEX_BACKUPONREPLACE DI_FLAGSEX = 0x00100000 // not used - DI_FLAGSEX_DRIVERLIST_FROM_URL DI_FLAGSEX = 0x00200000 // build driver list from INF(s) retrieved from URL specified in SP_DEVINSTALL_PARAMS.DriverPath (empty string means Windows Update website) - DI_FLAGSEX_EXCLUDE_OLD_INET_DRIVERS DI_FLAGSEX = 0x00800000 // Don't include old Internet drivers when building a driver list. Ignored on Windows Vista and later. - DI_FLAGSEX_POWERPAGE_ADDED DI_FLAGSEX = 0x01000000 // class installer added their own power page - DI_FLAGSEX_FILTERSIMILARDRIVERS DI_FLAGSEX = 0x02000000 // only include similar drivers in class list - DI_FLAGSEX_INSTALLEDDRIVER DI_FLAGSEX = 0x04000000 // only add the installed driver to the class or compat driver list. Used in calls to SetupDiBuildDriverInfoList - DI_FLAGSEX_NO_CLASSLIST_NODE_MERGE DI_FLAGSEX = 0x08000000 // Don't remove identical driver nodes from the class list - DI_FLAGSEX_ALTPLATFORM_DRVSEARCH DI_FLAGSEX = 0x10000000 // Build driver list based on alternate platform information specified in associated file queue - DI_FLAGSEX_RESTART_DEVICE_ONLY DI_FLAGSEX = 0x20000000 // only restart the device drivers are being installed on as opposed to restarting all devices using those drivers. - DI_FLAGSEX_RECURSIVESEARCH DI_FLAGSEX = 0x40000000 // Tell SetupDiBuildDriverInfoList to do a recursive search - DI_FLAGSEX_SEARCH_PUBLISHED_INFS DI_FLAGSEX = 0x80000000 // Tell SetupDiBuildDriverInfoList to do a "published INF" search -) - -// ClassInstallHeader is the first member of any class install parameters structure. It contains the device installation request code that defines the format of the rest of the install parameters structure. -type ClassInstallHeader struct { - size uint32 - InstallFunction DI_FUNCTION -} - -func MakeClassInstallHeader(installFunction DI_FUNCTION) *ClassInstallHeader { - hdr := &ClassInstallHeader{InstallFunction: installFunction} - hdr.size = uint32(unsafe.Sizeof(*hdr)) - return hdr -} - -// DICS_STATE specifies values indicating a change in a device's state -type DICS_STATE uint32 - -const ( - DICS_ENABLE DICS_STATE = 0x00000001 // The device is being enabled. - DICS_DISABLE DICS_STATE = 0x00000002 // The device is being disabled. - DICS_PROPCHANGE DICS_STATE = 0x00000003 // The properties of the device have changed. - DICS_START DICS_STATE = 0x00000004 // The device is being started (if the request is for the currently active hardware profile). - DICS_STOP DICS_STATE = 0x00000005 // The device is being stopped. The driver stack will be unloaded and the CSCONFIGFLAG_DO_NOT_START flag will be set for the device. -) - -// DICS_FLAG specifies the scope of a device property change -type DICS_FLAG uint32 - -const ( - DICS_FLAG_GLOBAL DICS_FLAG = 0x00000001 // make change in all hardware profiles - DICS_FLAG_CONFIGSPECIFIC DICS_FLAG = 0x00000002 // make change in specified profile only - DICS_FLAG_CONFIGGENERAL DICS_FLAG = 0x00000004 // 1 or more hardware profile-specific changes to follow (obsolete) -) - -// PropChangeParams is a structure corresponding to a DIF_PROPERTYCHANGE install function. -type PropChangeParams struct { - ClassInstallHeader ClassInstallHeader - StateChange DICS_STATE - Scope DICS_FLAG - HwProfile uint32 -} - -// DI_REMOVEDEVICE specifies the scope of the device removal -type DI_REMOVEDEVICE uint32 - -const ( - DI_REMOVEDEVICE_GLOBAL DI_REMOVEDEVICE = 0x00000001 // Make this change in all hardware profiles. Remove information about the device from the registry. - DI_REMOVEDEVICE_CONFIGSPECIFIC DI_REMOVEDEVICE = 0x00000002 // Make this change to only the hardware profile specified by HwProfile. this flag only applies to root-enumerated devices. When Windows removes the device from the last hardware profile in which it was configured, Windows performs a global removal. -) - -// RemoveDeviceParams is a structure corresponding to a DIF_REMOVE install function. -type RemoveDeviceParams struct { - ClassInstallHeader ClassInstallHeader - Scope DI_REMOVEDEVICE - HwProfile uint32 -} - -// DrvInfoData is driver information structure (member of a driver info list that may be associated with a particular device instance, or (globally) with a device information set) -type DrvInfoData struct { - size uint32 - DriverType uint32 - _ uintptr - description [LINE_LEN]uint16 - mfgName [LINE_LEN]uint16 - providerName [LINE_LEN]uint16 - DriverDate Filetime - DriverVersion uint64 -} - -func (data *DrvInfoData) Description() string { - return UTF16ToString(data.description[:]) -} - -func (data *DrvInfoData) SetDescription(description string) error { - str, err := UTF16FromString(description) - if err != nil { - return err - } - copy(data.description[:], str) - return nil -} - -func (data *DrvInfoData) MfgName() string { - return UTF16ToString(data.mfgName[:]) -} - -func (data *DrvInfoData) SetMfgName(mfgName string) error { - str, err := UTF16FromString(mfgName) - if err != nil { - return err - } - copy(data.mfgName[:], str) - return nil -} - -func (data *DrvInfoData) ProviderName() string { - return UTF16ToString(data.providerName[:]) -} - -func (data *DrvInfoData) SetProviderName(providerName string) error { - str, err := UTF16FromString(providerName) - if err != nil { - return err - } - copy(data.providerName[:], str) - return nil -} - -// IsNewer method returns true if DrvInfoData date and version is newer than supplied parameters. -func (data *DrvInfoData) IsNewer(driverDate Filetime, driverVersion uint64) bool { - if data.DriverDate.HighDateTime > driverDate.HighDateTime { - return true - } - if data.DriverDate.HighDateTime < driverDate.HighDateTime { - return false - } - - if data.DriverDate.LowDateTime > driverDate.LowDateTime { - return true - } - if data.DriverDate.LowDateTime < driverDate.LowDateTime { - return false - } - - if data.DriverVersion > driverVersion { - return true - } - if data.DriverVersion < driverVersion { - return false - } - - return false -} - -// DrvInfoDetailData is driver information details structure (provides detailed information about a particular driver information structure) -type DrvInfoDetailData struct { - size uint32 // Use unsafeSizeOf method - InfDate Filetime - compatIDsOffset uint32 - compatIDsLength uint32 - _ uintptr - sectionName [LINE_LEN]uint16 - infFileName [MAX_PATH]uint16 - drvDescription [LINE_LEN]uint16 - hardwareID [1]uint16 -} - -func (*DrvInfoDetailData) unsafeSizeOf() uint32 { - if unsafe.Sizeof(uintptr(0)) == 4 { - // Windows declares this with pshpack1.h - return uint32(unsafe.Offsetof(DrvInfoDetailData{}.hardwareID) + unsafe.Sizeof(DrvInfoDetailData{}.hardwareID)) - } - return uint32(unsafe.Sizeof(DrvInfoDetailData{})) -} - -func (data *DrvInfoDetailData) SectionName() string { - return UTF16ToString(data.sectionName[:]) -} - -func (data *DrvInfoDetailData) InfFileName() string { - return UTF16ToString(data.infFileName[:]) -} - -func (data *DrvInfoDetailData) DrvDescription() string { - return UTF16ToString(data.drvDescription[:]) -} - -func (data *DrvInfoDetailData) HardwareID() string { - if data.compatIDsOffset > 1 { - bufW := data.getBuf() - return UTF16ToString(bufW[:wcslen(bufW)]) - } - - return "" -} - -func (data *DrvInfoDetailData) CompatIDs() []string { - a := make([]string, 0) - - if data.compatIDsLength > 0 { - bufW := data.getBuf() - bufW = bufW[data.compatIDsOffset : data.compatIDsOffset+data.compatIDsLength] - for i := 0; i < len(bufW); { - j := i + wcslen(bufW[i:]) - if i < j { - a = append(a, UTF16ToString(bufW[i:j])) - } - i = j + 1 - } - } - - return a -} - -func (data *DrvInfoDetailData) getBuf() []uint16 { - len := (data.size - uint32(unsafe.Offsetof(data.hardwareID))) / 2 - sl := struct { - addr *uint16 - len int - cap int - }{&data.hardwareID[0], int(len), int(len)} - return *(*[]uint16)(unsafe.Pointer(&sl)) -} - -// IsCompatible method tests if given hardware ID matches the driver or is listed on the compatible ID list. -func (data *DrvInfoDetailData) IsCompatible(hwid string) bool { - hwidLC := strings.ToLower(hwid) - if strings.ToLower(data.HardwareID()) == hwidLC { - return true - } - a := data.CompatIDs() - for i := range a { - if strings.ToLower(a[i]) == hwidLC { - return true - } - } - - return false -} - -// DICD flags control SetupDiCreateDeviceInfo -type DICD uint32 - -const ( - DICD_GENERATE_ID DICD = 0x00000001 - DICD_INHERIT_CLASSDRVS DICD = 0x00000002 -) - -// SUOI flags control SetupUninstallOEMInf -type SUOI uint32 - -const ( - SUOI_FORCEDELETE SUOI = 0x0001 -) - -// SPDIT flags to distinguish between class drivers and -// device drivers. (Passed in 'DriverType' parameter of -// driver information list APIs) -type SPDIT uint32 - -const ( - SPDIT_NODRIVER SPDIT = 0x00000000 - SPDIT_CLASSDRIVER SPDIT = 0x00000001 - SPDIT_COMPATDRIVER SPDIT = 0x00000002 -) - -// DIGCF flags control what is included in the device information set built by SetupDiGetClassDevs -type DIGCF uint32 - -const ( - DIGCF_DEFAULT DIGCF = 0x00000001 // only valid with DIGCF_DEVICEINTERFACE - DIGCF_PRESENT DIGCF = 0x00000002 - DIGCF_ALLCLASSES DIGCF = 0x00000004 - DIGCF_PROFILE DIGCF = 0x00000008 - DIGCF_DEVICEINTERFACE DIGCF = 0x00000010 -) - -// DIREG specifies values for SetupDiCreateDevRegKey, SetupDiOpenDevRegKey, and SetupDiDeleteDevRegKey. -type DIREG uint32 - -const ( - DIREG_DEV DIREG = 0x00000001 // Open/Create/Delete device key - DIREG_DRV DIREG = 0x00000002 // Open/Create/Delete driver key - DIREG_BOTH DIREG = 0x00000004 // Delete both driver and Device key -) - -// SPDRP specifies device registry property codes -// (Codes marked as read-only (R) may only be used for -// SetupDiGetDeviceRegistryProperty) -// -// These values should cover the same set of registry properties -// as defined by the CM_DRP codes in cfgmgr32.h. -// -// Note that SPDRP codes are zero based while CM_DRP codes are one based! -type SPDRP uint32 - -const ( - SPDRP_DEVICEDESC SPDRP = 0x00000000 // DeviceDesc (R/W) - SPDRP_HARDWAREID SPDRP = 0x00000001 // HardwareID (R/W) - SPDRP_COMPATIBLEIDS SPDRP = 0x00000002 // CompatibleIDs (R/W) - SPDRP_SERVICE SPDRP = 0x00000004 // Service (R/W) - SPDRP_CLASS SPDRP = 0x00000007 // Class (R--tied to ClassGUID) - SPDRP_CLASSGUID SPDRP = 0x00000008 // ClassGUID (R/W) - SPDRP_DRIVER SPDRP = 0x00000009 // Driver (R/W) - SPDRP_CONFIGFLAGS SPDRP = 0x0000000A // ConfigFlags (R/W) - SPDRP_MFG SPDRP = 0x0000000B // Mfg (R/W) - SPDRP_FRIENDLYNAME SPDRP = 0x0000000C // FriendlyName (R/W) - SPDRP_LOCATION_INFORMATION SPDRP = 0x0000000D // LocationInformation (R/W) - SPDRP_PHYSICAL_DEVICE_OBJECT_NAME SPDRP = 0x0000000E // PhysicalDeviceObjectName (R) - SPDRP_CAPABILITIES SPDRP = 0x0000000F // Capabilities (R) - SPDRP_UI_NUMBER SPDRP = 0x00000010 // UiNumber (R) - SPDRP_UPPERFILTERS SPDRP = 0x00000011 // UpperFilters (R/W) - SPDRP_LOWERFILTERS SPDRP = 0x00000012 // LowerFilters (R/W) - SPDRP_BUSTYPEGUID SPDRP = 0x00000013 // BusTypeGUID (R) - SPDRP_LEGACYBUSTYPE SPDRP = 0x00000014 // LegacyBusType (R) - SPDRP_BUSNUMBER SPDRP = 0x00000015 // BusNumber (R) - SPDRP_ENUMERATOR_NAME SPDRP = 0x00000016 // Enumerator Name (R) - SPDRP_SECURITY SPDRP = 0x00000017 // Security (R/W, binary form) - SPDRP_SECURITY_SDS SPDRP = 0x00000018 // Security (W, SDS form) - SPDRP_DEVTYPE SPDRP = 0x00000019 // Device Type (R/W) - SPDRP_EXCLUSIVE SPDRP = 0x0000001A // Device is exclusive-access (R/W) - SPDRP_CHARACTERISTICS SPDRP = 0x0000001B // Device Characteristics (R/W) - SPDRP_ADDRESS SPDRP = 0x0000001C // Device Address (R) - SPDRP_UI_NUMBER_DESC_FORMAT SPDRP = 0x0000001D // UiNumberDescFormat (R/W) - SPDRP_DEVICE_POWER_DATA SPDRP = 0x0000001E // Device Power Data (R) - SPDRP_REMOVAL_POLICY SPDRP = 0x0000001F // Removal Policy (R) - SPDRP_REMOVAL_POLICY_HW_DEFAULT SPDRP = 0x00000020 // Hardware Removal Policy (R) - SPDRP_REMOVAL_POLICY_OVERRIDE SPDRP = 0x00000021 // Removal Policy Override (RW) - SPDRP_INSTALL_STATE SPDRP = 0x00000022 // Device Install State (R) - SPDRP_LOCATION_PATHS SPDRP = 0x00000023 // Device Location Paths (R) - SPDRP_BASE_CONTAINERID SPDRP = 0x00000024 // Base ContainerID (R) - - SPDRP_MAXIMUM_PROPERTY SPDRP = 0x00000025 // Upper bound on ordinals -) - -// DEVPROPTYPE represents the property-data-type identifier that specifies the -// data type of a device property value in the unified device property model. -type DEVPROPTYPE uint32 - -const ( - DEVPROP_TYPEMOD_ARRAY DEVPROPTYPE = 0x00001000 - DEVPROP_TYPEMOD_LIST DEVPROPTYPE = 0x00002000 - - DEVPROP_TYPE_EMPTY DEVPROPTYPE = 0x00000000 - DEVPROP_TYPE_NULL DEVPROPTYPE = 0x00000001 - DEVPROP_TYPE_SBYTE DEVPROPTYPE = 0x00000002 - DEVPROP_TYPE_BYTE DEVPROPTYPE = 0x00000003 - DEVPROP_TYPE_INT16 DEVPROPTYPE = 0x00000004 - DEVPROP_TYPE_UINT16 DEVPROPTYPE = 0x00000005 - DEVPROP_TYPE_INT32 DEVPROPTYPE = 0x00000006 - DEVPROP_TYPE_UINT32 DEVPROPTYPE = 0x00000007 - DEVPROP_TYPE_INT64 DEVPROPTYPE = 0x00000008 - DEVPROP_TYPE_UINT64 DEVPROPTYPE = 0x00000009 - DEVPROP_TYPE_FLOAT DEVPROPTYPE = 0x0000000A - DEVPROP_TYPE_DOUBLE DEVPROPTYPE = 0x0000000B - DEVPROP_TYPE_DECIMAL DEVPROPTYPE = 0x0000000C - DEVPROP_TYPE_GUID DEVPROPTYPE = 0x0000000D - DEVPROP_TYPE_CURRENCY DEVPROPTYPE = 0x0000000E - DEVPROP_TYPE_DATE DEVPROPTYPE = 0x0000000F - DEVPROP_TYPE_FILETIME DEVPROPTYPE = 0x00000010 - DEVPROP_TYPE_BOOLEAN DEVPROPTYPE = 0x00000011 - DEVPROP_TYPE_STRING DEVPROPTYPE = 0x00000012 - DEVPROP_TYPE_STRING_LIST DEVPROPTYPE = DEVPROP_TYPE_STRING | DEVPROP_TYPEMOD_LIST - DEVPROP_TYPE_SECURITY_DESCRIPTOR DEVPROPTYPE = 0x00000013 - DEVPROP_TYPE_SECURITY_DESCRIPTOR_STRING DEVPROPTYPE = 0x00000014 - DEVPROP_TYPE_DEVPROPKEY DEVPROPTYPE = 0x00000015 - DEVPROP_TYPE_DEVPROPTYPE DEVPROPTYPE = 0x00000016 - DEVPROP_TYPE_BINARY DEVPROPTYPE = DEVPROP_TYPE_BYTE | DEVPROP_TYPEMOD_ARRAY - DEVPROP_TYPE_ERROR DEVPROPTYPE = 0x00000017 - DEVPROP_TYPE_NTSTATUS DEVPROPTYPE = 0x00000018 - DEVPROP_TYPE_STRING_INDIRECT DEVPROPTYPE = 0x00000019 - - MAX_DEVPROP_TYPE DEVPROPTYPE = 0x00000019 - MAX_DEVPROP_TYPEMOD DEVPROPTYPE = 0x00002000 - - DEVPROP_MASK_TYPE DEVPROPTYPE = 0x00000FFF - DEVPROP_MASK_TYPEMOD DEVPROPTYPE = 0x0000F000 -) - -// DEVPROPGUID specifies a property category. -type DEVPROPGUID GUID - -// DEVPROPID uniquely identifies the property within the property category. -type DEVPROPID uint32 - -const DEVPROPID_FIRST_USABLE DEVPROPID = 2 - -// DEVPROPKEY represents a device property key for a device property in the -// unified device property model. -type DEVPROPKEY struct { - FmtID DEVPROPGUID - PID DEVPROPID -} - -// CONFIGRET is a return value or error code from cfgmgr32 APIs -type CONFIGRET uint32 - -func (ret CONFIGRET) Error() string { - if win32Error, ok := ret.Unwrap().(Errno); ok { - return fmt.Sprintf("%s (CfgMgr error: 0x%08x)", win32Error.Error(), uint32(ret)) - } - return fmt.Sprintf("CfgMgr error: 0x%08x", uint32(ret)) -} - -func (ret CONFIGRET) Win32Error(defaultError Errno) Errno { - return cm_MapCrToWin32Err(ret, defaultError) -} - -func (ret CONFIGRET) Unwrap() error { - const noMatch = Errno(^uintptr(0)) - win32Error := ret.Win32Error(noMatch) - if win32Error == noMatch { - return nil - } - return win32Error -} - -const ( - CR_SUCCESS CONFIGRET = 0x00000000 - CR_DEFAULT CONFIGRET = 0x00000001 - CR_OUT_OF_MEMORY CONFIGRET = 0x00000002 - CR_INVALID_POINTER CONFIGRET = 0x00000003 - CR_INVALID_FLAG CONFIGRET = 0x00000004 - CR_INVALID_DEVNODE CONFIGRET = 0x00000005 - CR_INVALID_DEVINST = CR_INVALID_DEVNODE - CR_INVALID_RES_DES CONFIGRET = 0x00000006 - CR_INVALID_LOG_CONF CONFIGRET = 0x00000007 - CR_INVALID_ARBITRATOR CONFIGRET = 0x00000008 - CR_INVALID_NODELIST CONFIGRET = 0x00000009 - CR_DEVNODE_HAS_REQS CONFIGRET = 0x0000000A - CR_DEVINST_HAS_REQS = CR_DEVNODE_HAS_REQS - CR_INVALID_RESOURCEID CONFIGRET = 0x0000000B - CR_DLVXD_NOT_FOUND CONFIGRET = 0x0000000C - CR_NO_SUCH_DEVNODE CONFIGRET = 0x0000000D - CR_NO_SUCH_DEVINST = CR_NO_SUCH_DEVNODE - CR_NO_MORE_LOG_CONF CONFIGRET = 0x0000000E - CR_NO_MORE_RES_DES CONFIGRET = 0x0000000F - CR_ALREADY_SUCH_DEVNODE CONFIGRET = 0x00000010 - CR_ALREADY_SUCH_DEVINST = CR_ALREADY_SUCH_DEVNODE - CR_INVALID_RANGE_LIST CONFIGRET = 0x00000011 - CR_INVALID_RANGE CONFIGRET = 0x00000012 - CR_FAILURE CONFIGRET = 0x00000013 - CR_NO_SUCH_LOGICAL_DEV CONFIGRET = 0x00000014 - CR_CREATE_BLOCKED CONFIGRET = 0x00000015 - CR_NOT_SYSTEM_VM CONFIGRET = 0x00000016 - CR_REMOVE_VETOED CONFIGRET = 0x00000017 - CR_APM_VETOED CONFIGRET = 0x00000018 - CR_INVALID_LOAD_TYPE CONFIGRET = 0x00000019 - CR_BUFFER_SMALL CONFIGRET = 0x0000001A - CR_NO_ARBITRATOR CONFIGRET = 0x0000001B - CR_NO_REGISTRY_HANDLE CONFIGRET = 0x0000001C - CR_REGISTRY_ERROR CONFIGRET = 0x0000001D - CR_INVALID_DEVICE_ID CONFIGRET = 0x0000001E - CR_INVALID_DATA CONFIGRET = 0x0000001F - CR_INVALID_API CONFIGRET = 0x00000020 - CR_DEVLOADER_NOT_READY CONFIGRET = 0x00000021 - CR_NEED_RESTART CONFIGRET = 0x00000022 - CR_NO_MORE_HW_PROFILES CONFIGRET = 0x00000023 - CR_DEVICE_NOT_THERE CONFIGRET = 0x00000024 - CR_NO_SUCH_VALUE CONFIGRET = 0x00000025 - CR_WRONG_TYPE CONFIGRET = 0x00000026 - CR_INVALID_PRIORITY CONFIGRET = 0x00000027 - CR_NOT_DISABLEABLE CONFIGRET = 0x00000028 - CR_FREE_RESOURCES CONFIGRET = 0x00000029 - CR_QUERY_VETOED CONFIGRET = 0x0000002A - CR_CANT_SHARE_IRQ CONFIGRET = 0x0000002B - CR_NO_DEPENDENT CONFIGRET = 0x0000002C - CR_SAME_RESOURCES CONFIGRET = 0x0000002D - CR_NO_SUCH_REGISTRY_KEY CONFIGRET = 0x0000002E - CR_INVALID_MACHINENAME CONFIGRET = 0x0000002F - CR_REMOTE_COMM_FAILURE CONFIGRET = 0x00000030 - CR_MACHINE_UNAVAILABLE CONFIGRET = 0x00000031 - CR_NO_CM_SERVICES CONFIGRET = 0x00000032 - CR_ACCESS_DENIED CONFIGRET = 0x00000033 - CR_CALL_NOT_IMPLEMENTED CONFIGRET = 0x00000034 - CR_INVALID_PROPERTY CONFIGRET = 0x00000035 - CR_DEVICE_INTERFACE_ACTIVE CONFIGRET = 0x00000036 - CR_NO_SUCH_DEVICE_INTERFACE CONFIGRET = 0x00000037 - CR_INVALID_REFERENCE_STRING CONFIGRET = 0x00000038 - CR_INVALID_CONFLICT_LIST CONFIGRET = 0x00000039 - CR_INVALID_INDEX CONFIGRET = 0x0000003A - CR_INVALID_STRUCTURE_SIZE CONFIGRET = 0x0000003B - NUM_CR_RESULTS CONFIGRET = 0x0000003C -) - -const ( - CM_GET_DEVICE_INTERFACE_LIST_PRESENT = 0 // only currently 'live' device interfaces - CM_GET_DEVICE_INTERFACE_LIST_ALL_DEVICES = 1 // all registered device interfaces, live or not -) - -const ( - DN_ROOT_ENUMERATED = 0x00000001 // Was enumerated by ROOT - DN_DRIVER_LOADED = 0x00000002 // Has Register_Device_Driver - DN_ENUM_LOADED = 0x00000004 // Has Register_Enumerator - DN_STARTED = 0x00000008 // Is currently configured - DN_MANUAL = 0x00000010 // Manually installed - DN_NEED_TO_ENUM = 0x00000020 // May need reenumeration - DN_NOT_FIRST_TIME = 0x00000040 // Has received a config - DN_HARDWARE_ENUM = 0x00000080 // Enum generates hardware ID - DN_LIAR = 0x00000100 // Lied about can reconfig once - DN_HAS_MARK = 0x00000200 // Not CM_Create_DevInst lately - DN_HAS_PROBLEM = 0x00000400 // Need device installer - DN_FILTERED = 0x00000800 // Is filtered - DN_MOVED = 0x00001000 // Has been moved - DN_DISABLEABLE = 0x00002000 // Can be disabled - DN_REMOVABLE = 0x00004000 // Can be removed - DN_PRIVATE_PROBLEM = 0x00008000 // Has a private problem - DN_MF_PARENT = 0x00010000 // Multi function parent - DN_MF_CHILD = 0x00020000 // Multi function child - DN_WILL_BE_REMOVED = 0x00040000 // DevInst is being removed - DN_NOT_FIRST_TIMEE = 0x00080000 // Has received a config enumerate - DN_STOP_FREE_RES = 0x00100000 // When child is stopped, free resources - DN_REBAL_CANDIDATE = 0x00200000 // Don't skip during rebalance - DN_BAD_PARTIAL = 0x00400000 // This devnode's log_confs do not have same resources - DN_NT_ENUMERATOR = 0x00800000 // This devnode's is an NT enumerator - DN_NT_DRIVER = 0x01000000 // This devnode's is an NT driver - DN_NEEDS_LOCKING = 0x02000000 // Devnode need lock resume processing - DN_ARM_WAKEUP = 0x04000000 // Devnode can be the wakeup device - DN_APM_ENUMERATOR = 0x08000000 // APM aware enumerator - DN_APM_DRIVER = 0x10000000 // APM aware driver - DN_SILENT_INSTALL = 0x20000000 // Silent install - DN_NO_SHOW_IN_DM = 0x40000000 // No show in device manager - DN_BOOT_LOG_PROB = 0x80000000 // Had a problem during preassignment of boot log conf - DN_NEED_RESTART = DN_LIAR // System needs to be restarted for this Devnode to work properly - DN_DRIVER_BLOCKED = DN_NOT_FIRST_TIME // One or more drivers are blocked from loading for this Devnode - DN_LEGACY_DRIVER = DN_MOVED // This device is using a legacy driver - DN_CHILD_WITH_INVALID_ID = DN_HAS_MARK // One or more children have invalid IDs - DN_DEVICE_DISCONNECTED = DN_NEEDS_LOCKING // The function driver for a device reported that the device is not connected. Typically this means a wireless device is out of range. - DN_QUERY_REMOVE_PENDING = DN_MF_PARENT // Device is part of a set of related devices collectively pending query-removal - DN_QUERY_REMOVE_ACTIVE = DN_MF_CHILD // Device is actively engaged in a query-remove IRP - DN_CHANGEABLE_FLAGS = DN_NOT_FIRST_TIME | DN_HARDWARE_ENUM | DN_HAS_MARK | DN_DISABLEABLE | DN_REMOVABLE | DN_MF_CHILD | DN_MF_PARENT | DN_NOT_FIRST_TIMEE | DN_STOP_FREE_RES | DN_REBAL_CANDIDATE | DN_NT_ENUMERATOR | DN_NT_DRIVER | DN_SILENT_INSTALL | DN_NO_SHOW_IN_DM -) - -//sys setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) [failretval==DevInfo(InvalidHandle)] = setupapi.SetupDiCreateDeviceInfoListExW - -// SetupDiCreateDeviceInfoListEx function creates an empty device information set on a remote or a local computer and optionally associates the set with a device setup class. -func SetupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName string) (deviceInfoSet DevInfo, err error) { - var machineNameUTF16 *uint16 - if machineName != "" { - machineNameUTF16, err = UTF16PtrFromString(machineName) - if err != nil { - return - } - } - return setupDiCreateDeviceInfoListEx(classGUID, hwndParent, machineNameUTF16, 0) -} - -//sys setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) = setupapi.SetupDiGetDeviceInfoListDetailW - -// SetupDiGetDeviceInfoListDetail function retrieves information associated with a device information set including the class GUID, remote computer handle, and remote computer name. -func SetupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo) (deviceInfoSetDetailData *DevInfoListDetailData, err error) { - data := &DevInfoListDetailData{} - data.size = data.unsafeSizeOf() - - return data, setupDiGetDeviceInfoListDetail(deviceInfoSet, data) -} - -// DeviceInfoListDetail method retrieves information associated with a device information set including the class GUID, remote computer handle, and remote computer name. -func (deviceInfoSet DevInfo) DeviceInfoListDetail() (*DevInfoListDetailData, error) { - return SetupDiGetDeviceInfoListDetail(deviceInfoSet) -} - -//sys setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiCreateDeviceInfoW - -// SetupDiCreateDeviceInfo function creates a new device information element and adds it as a new member to the specified device information set. -func SetupDiCreateDeviceInfo(deviceInfoSet DevInfo, deviceName string, classGUID *GUID, deviceDescription string, hwndParent uintptr, creationFlags DICD) (deviceInfoData *DevInfoData, err error) { - deviceNameUTF16, err := UTF16PtrFromString(deviceName) - if err != nil { - return - } - - var deviceDescriptionUTF16 *uint16 - if deviceDescription != "" { - deviceDescriptionUTF16, err = UTF16PtrFromString(deviceDescription) - if err != nil { - return - } - } - - data := &DevInfoData{} - data.size = uint32(unsafe.Sizeof(*data)) - - return data, setupDiCreateDeviceInfo(deviceInfoSet, deviceNameUTF16, classGUID, deviceDescriptionUTF16, hwndParent, creationFlags, data) -} - -// CreateDeviceInfo method creates a new device information element and adds it as a new member to the specified device information set. -func (deviceInfoSet DevInfo) CreateDeviceInfo(deviceName string, classGUID *GUID, deviceDescription string, hwndParent uintptr, creationFlags DICD) (*DevInfoData, error) { - return SetupDiCreateDeviceInfo(deviceInfoSet, deviceName, classGUID, deviceDescription, hwndParent, creationFlags) -} - -//sys setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiEnumDeviceInfo - -// SetupDiEnumDeviceInfo function returns a DevInfoData structure that specifies a device information element in a device information set. -func SetupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex int) (*DevInfoData, error) { - data := &DevInfoData{} - data.size = uint32(unsafe.Sizeof(*data)) - - return data, setupDiEnumDeviceInfo(deviceInfoSet, uint32(memberIndex), data) -} - -// EnumDeviceInfo method returns a DevInfoData structure that specifies a device information element in a device information set. -func (deviceInfoSet DevInfo) EnumDeviceInfo(memberIndex int) (*DevInfoData, error) { - return SetupDiEnumDeviceInfo(deviceInfoSet, memberIndex) -} - -// SetupDiDestroyDeviceInfoList function deletes a device information set and frees all associated memory. -//sys SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) = setupapi.SetupDiDestroyDeviceInfoList - -// Close method deletes a device information set and frees all associated memory. -func (deviceInfoSet DevInfo) Close() error { - return SetupDiDestroyDeviceInfoList(deviceInfoSet) -} - -//sys SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) = setupapi.SetupDiBuildDriverInfoList - -// BuildDriverInfoList method builds a list of drivers that is associated with a specific device or with the global class driver list for a device information set. -func (deviceInfoSet DevInfo) BuildDriverInfoList(deviceInfoData *DevInfoData, driverType SPDIT) error { - return SetupDiBuildDriverInfoList(deviceInfoSet, deviceInfoData, driverType) -} - -//sys SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) = setupapi.SetupDiCancelDriverInfoSearch - -// CancelDriverInfoSearch method cancels a driver list search that is currently in progress in a different thread. -func (deviceInfoSet DevInfo) CancelDriverInfoSearch() error { - return SetupDiCancelDriverInfoSearch(deviceInfoSet) -} - -//sys setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) = setupapi.SetupDiEnumDriverInfoW - -// SetupDiEnumDriverInfo function enumerates the members of a driver list. -func SetupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex int) (*DrvInfoData, error) { - data := &DrvInfoData{} - data.size = uint32(unsafe.Sizeof(*data)) - - return data, setupDiEnumDriverInfo(deviceInfoSet, deviceInfoData, driverType, uint32(memberIndex), data) -} - -// EnumDriverInfo method enumerates the members of a driver list. -func (deviceInfoSet DevInfo) EnumDriverInfo(deviceInfoData *DevInfoData, driverType SPDIT, memberIndex int) (*DrvInfoData, error) { - return SetupDiEnumDriverInfo(deviceInfoSet, deviceInfoData, driverType, memberIndex) -} - -//sys setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) = setupapi.SetupDiGetSelectedDriverW - -// SetupDiGetSelectedDriver function retrieves the selected driver for a device information set or a particular device information element. -func SetupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (*DrvInfoData, error) { - data := &DrvInfoData{} - data.size = uint32(unsafe.Sizeof(*data)) - - return data, setupDiGetSelectedDriver(deviceInfoSet, deviceInfoData, data) -} - -// SelectedDriver method retrieves the selected driver for a device information set or a particular device information element. -func (deviceInfoSet DevInfo) SelectedDriver(deviceInfoData *DevInfoData) (*DrvInfoData, error) { - return SetupDiGetSelectedDriver(deviceInfoSet, deviceInfoData) -} - -//sys SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) = setupapi.SetupDiSetSelectedDriverW - -// SetSelectedDriver method sets, or resets, the selected driver for a device information element or the selected class driver for a device information set. -func (deviceInfoSet DevInfo) SetSelectedDriver(deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) error { - return SetupDiSetSelectedDriver(deviceInfoSet, deviceInfoData, driverInfoData) -} - -//sys setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) = setupapi.SetupDiGetDriverInfoDetailW - -// SetupDiGetDriverInfoDetail function retrieves driver information detail for a device information set or a particular device information element in the device information set. -func SetupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (*DrvInfoDetailData, error) { - reqSize := uint32(2048) - for { - buf := make([]byte, reqSize) - data := (*DrvInfoDetailData)(unsafe.Pointer(&buf[0])) - data.size = data.unsafeSizeOf() - err := setupDiGetDriverInfoDetail(deviceInfoSet, deviceInfoData, driverInfoData, data, uint32(len(buf)), &reqSize) - if err == ERROR_INSUFFICIENT_BUFFER { - continue - } - if err != nil { - return nil, err - } - data.size = reqSize - return data, nil - } -} - -// DriverInfoDetail method retrieves driver information detail for a device information set or a particular device information element in the device information set. -func (deviceInfoSet DevInfo) DriverInfoDetail(deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (*DrvInfoDetailData, error) { - return SetupDiGetDriverInfoDetail(deviceInfoSet, deviceInfoData, driverInfoData) -} - -//sys SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) = setupapi.SetupDiDestroyDriverInfoList - -// DestroyDriverInfoList method deletes a driver list. -func (deviceInfoSet DevInfo) DestroyDriverInfoList(deviceInfoData *DevInfoData, driverType SPDIT) error { - return SetupDiDestroyDriverInfoList(deviceInfoSet, deviceInfoData, driverType) -} - -//sys setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) [failretval==DevInfo(InvalidHandle)] = setupapi.SetupDiGetClassDevsExW - -// SetupDiGetClassDevsEx function returns a handle to a device information set that contains requested device information elements for a local or a remote computer. -func SetupDiGetClassDevsEx(classGUID *GUID, enumerator string, hwndParent uintptr, flags DIGCF, deviceInfoSet DevInfo, machineName string) (handle DevInfo, err error) { - var enumeratorUTF16 *uint16 - if enumerator != "" { - enumeratorUTF16, err = UTF16PtrFromString(enumerator) - if err != nil { - return - } - } - var machineNameUTF16 *uint16 - if machineName != "" { - machineNameUTF16, err = UTF16PtrFromString(machineName) - if err != nil { - return - } - } - return setupDiGetClassDevsEx(classGUID, enumeratorUTF16, hwndParent, flags, deviceInfoSet, machineNameUTF16, 0) -} - -// SetupDiCallClassInstaller function calls the appropriate class installer, and any registered co-installers, with the specified installation request (DIF code). -//sys SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiCallClassInstaller - -// CallClassInstaller member calls the appropriate class installer, and any registered co-installers, with the specified installation request (DIF code). -func (deviceInfoSet DevInfo) CallClassInstaller(installFunction DI_FUNCTION, deviceInfoData *DevInfoData) error { - return SetupDiCallClassInstaller(installFunction, deviceInfoSet, deviceInfoData) -} - -// SetupDiOpenDevRegKey function opens a registry key for device-specific configuration information. -//sys SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key Handle, err error) [failretval==InvalidHandle] = setupapi.SetupDiOpenDevRegKey - -// OpenDevRegKey method opens a registry key for device-specific configuration information. -func (deviceInfoSet DevInfo) OpenDevRegKey(DeviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (Handle, error) { - return SetupDiOpenDevRegKey(deviceInfoSet, DeviceInfoData, Scope, HwProfile, KeyType, samDesired) -} - -//sys setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY, propertyType *DEVPROPTYPE, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32, flags uint32) (err error) = setupapi.SetupDiGetDevicePropertyW - -// SetupDiGetDeviceProperty function retrieves a specified device instance property. -func SetupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY) (value interface{}, err error) { - reqSize := uint32(256) - for { - var dataType DEVPROPTYPE - buf := make([]byte, reqSize) - err = setupDiGetDeviceProperty(deviceInfoSet, deviceInfoData, propertyKey, &dataType, &buf[0], uint32(len(buf)), &reqSize, 0) - if err == ERROR_INSUFFICIENT_BUFFER { - continue - } - if err != nil { - return - } - switch dataType { - case DEVPROP_TYPE_STRING: - ret := UTF16ToString(bufToUTF16(buf)) - runtime.KeepAlive(buf) - return ret, nil - } - return nil, errors.New("unimplemented property type") - } -} - -//sys setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) = setupapi.SetupDiGetDeviceRegistryPropertyW - -// SetupDiGetDeviceRegistryProperty function retrieves a specified Plug and Play device property. -func SetupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP) (value interface{}, err error) { - reqSize := uint32(256) - for { - var dataType uint32 - buf := make([]byte, reqSize) - err = setupDiGetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, &dataType, &buf[0], uint32(len(buf)), &reqSize) - if err == ERROR_INSUFFICIENT_BUFFER { - continue - } - if err != nil { - return - } - return getRegistryValue(buf[:reqSize], dataType) - } -} - -func getRegistryValue(buf []byte, dataType uint32) (interface{}, error) { - switch dataType { - case REG_SZ: - ret := UTF16ToString(bufToUTF16(buf)) - runtime.KeepAlive(buf) - return ret, nil - case REG_EXPAND_SZ: - value := UTF16ToString(bufToUTF16(buf)) - if value == "" { - return "", nil - } - p, err := syscall.UTF16PtrFromString(value) - if err != nil { - return "", err - } - ret := make([]uint16, 100) - for { - n, err := ExpandEnvironmentStrings(p, &ret[0], uint32(len(ret))) - if err != nil { - return "", err - } - if n <= uint32(len(ret)) { - return UTF16ToString(ret[:n]), nil - } - ret = make([]uint16, n) - } - case REG_BINARY: - return buf, nil - case REG_DWORD_LITTLE_ENDIAN: - return binary.LittleEndian.Uint32(buf), nil - case REG_DWORD_BIG_ENDIAN: - return binary.BigEndian.Uint32(buf), nil - case REG_MULTI_SZ: - bufW := bufToUTF16(buf) - a := []string{} - for i := 0; i < len(bufW); { - j := i + wcslen(bufW[i:]) - if i < j { - a = append(a, UTF16ToString(bufW[i:j])) - } - i = j + 1 - } - runtime.KeepAlive(buf) - return a, nil - case REG_QWORD_LITTLE_ENDIAN: - return binary.LittleEndian.Uint64(buf), nil - default: - return nil, fmt.Errorf("Unsupported registry value type: %v", dataType) - } -} - -// bufToUTF16 function reinterprets []byte buffer as []uint16 -func bufToUTF16(buf []byte) []uint16 { - sl := struct { - addr *uint16 - len int - cap int - }{(*uint16)(unsafe.Pointer(&buf[0])), len(buf) / 2, cap(buf) / 2} - return *(*[]uint16)(unsafe.Pointer(&sl)) -} - -// utf16ToBuf function reinterprets []uint16 as []byte -func utf16ToBuf(buf []uint16) []byte { - sl := struct { - addr *byte - len int - cap int - }{(*byte)(unsafe.Pointer(&buf[0])), len(buf) * 2, cap(buf) * 2} - return *(*[]byte)(unsafe.Pointer(&sl)) -} - -func wcslen(str []uint16) int { - for i := 0; i < len(str); i++ { - if str[i] == 0 { - return i - } - } - return len(str) -} - -// DeviceRegistryProperty method retrieves a specified Plug and Play device property. -func (deviceInfoSet DevInfo) DeviceRegistryProperty(deviceInfoData *DevInfoData, property SPDRP) (interface{}, error) { - return SetupDiGetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property) -} - -//sys setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) = setupapi.SetupDiSetDeviceRegistryPropertyW - -// SetupDiSetDeviceRegistryProperty function sets a Plug and Play device property for a device. -func SetupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffers []byte) error { - return setupDiSetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, &propertyBuffers[0], uint32(len(propertyBuffers))) -} - -// SetDeviceRegistryProperty function sets a Plug and Play device property for a device. -func (deviceInfoSet DevInfo) SetDeviceRegistryProperty(deviceInfoData *DevInfoData, property SPDRP, propertyBuffers []byte) error { - return SetupDiSetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, propertyBuffers) -} - -// SetDeviceRegistryPropertyString method sets a Plug and Play device property string for a device. -func (deviceInfoSet DevInfo) SetDeviceRegistryPropertyString(deviceInfoData *DevInfoData, property SPDRP, str string) error { - str16, err := UTF16FromString(str) - if err != nil { - return err - } - err = SetupDiSetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, utf16ToBuf(append(str16, 0))) - runtime.KeepAlive(str16) - return err -} - -//sys setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) = setupapi.SetupDiGetDeviceInstallParamsW - -// SetupDiGetDeviceInstallParams function retrieves device installation parameters for a device information set or a particular device information element. -func SetupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (*DevInstallParams, error) { - params := &DevInstallParams{} - params.size = uint32(unsafe.Sizeof(*params)) - - return params, setupDiGetDeviceInstallParams(deviceInfoSet, deviceInfoData, params) -} - -// DeviceInstallParams method retrieves device installation parameters for a device information set or a particular device information element. -func (deviceInfoSet DevInfo) DeviceInstallParams(deviceInfoData *DevInfoData) (*DevInstallParams, error) { - return SetupDiGetDeviceInstallParams(deviceInfoSet, deviceInfoData) -} - -//sys setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, instanceId *uint16, instanceIdSize uint32, instanceIdRequiredSize *uint32) (err error) = setupapi.SetupDiGetDeviceInstanceIdW - -// SetupDiGetDeviceInstanceId function retrieves the instance ID of the device. -func SetupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (string, error) { - reqSize := uint32(1024) - for { - buf := make([]uint16, reqSize) - err := setupDiGetDeviceInstanceId(deviceInfoSet, deviceInfoData, &buf[0], uint32(len(buf)), &reqSize) - if err == ERROR_INSUFFICIENT_BUFFER { - continue - } - if err != nil { - return "", err - } - return UTF16ToString(buf), nil - } -} - -// DeviceInstanceID method retrieves the instance ID of the device. -func (deviceInfoSet DevInfo) DeviceInstanceID(deviceInfoData *DevInfoData) (string, error) { - return SetupDiGetDeviceInstanceId(deviceInfoSet, deviceInfoData) -} - -// SetupDiGetClassInstallParams function retrieves class installation parameters for a device information set or a particular device information element. -//sys SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) = setupapi.SetupDiGetClassInstallParamsW - -// ClassInstallParams method retrieves class installation parameters for a device information set or a particular device information element. -func (deviceInfoSet DevInfo) ClassInstallParams(deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) error { - return SetupDiGetClassInstallParams(deviceInfoSet, deviceInfoData, classInstallParams, classInstallParamsSize, requiredSize) -} - -//sys SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) = setupapi.SetupDiSetDeviceInstallParamsW - -// SetDeviceInstallParams member sets device installation parameters for a device information set or a particular device information element. -func (deviceInfoSet DevInfo) SetDeviceInstallParams(deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) error { - return SetupDiSetDeviceInstallParams(deviceInfoSet, deviceInfoData, deviceInstallParams) -} - -// SetupDiSetClassInstallParams function sets or clears class install parameters for a device information set or a particular device information element. -//sys SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) = setupapi.SetupDiSetClassInstallParamsW - -// SetClassInstallParams method sets or clears class install parameters for a device information set or a particular device information element. -func (deviceInfoSet DevInfo) SetClassInstallParams(deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) error { - return SetupDiSetClassInstallParams(deviceInfoSet, deviceInfoData, classInstallParams, classInstallParamsSize) -} - -//sys setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) = setupapi.SetupDiClassNameFromGuidExW - -// SetupDiClassNameFromGuidEx function retrieves the class name associated with a class GUID. The class can be installed on a local or remote computer. -func SetupDiClassNameFromGuidEx(classGUID *GUID, machineName string) (className string, err error) { - var classNameUTF16 [MAX_CLASS_NAME_LEN]uint16 - - var machineNameUTF16 *uint16 - if machineName != "" { - machineNameUTF16, err = UTF16PtrFromString(machineName) - if err != nil { - return - } - } - - err = setupDiClassNameFromGuidEx(classGUID, &classNameUTF16[0], MAX_CLASS_NAME_LEN, nil, machineNameUTF16, 0) - if err != nil { - return - } - - className = UTF16ToString(classNameUTF16[:]) - return -} - -//sys setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) = setupapi.SetupDiClassGuidsFromNameExW - -// SetupDiClassGuidsFromNameEx function retrieves the GUIDs associated with the specified class name. This resulting list contains the classes currently installed on a local or remote computer. -func SetupDiClassGuidsFromNameEx(className string, machineName string) ([]GUID, error) { - classNameUTF16, err := UTF16PtrFromString(className) - if err != nil { - return nil, err - } - - var machineNameUTF16 *uint16 - if machineName != "" { - machineNameUTF16, err = UTF16PtrFromString(machineName) - if err != nil { - return nil, err - } - } - - reqSize := uint32(4) - for { - buf := make([]GUID, reqSize) - err = setupDiClassGuidsFromNameEx(classNameUTF16, &buf[0], uint32(len(buf)), &reqSize, machineNameUTF16, 0) - if err == ERROR_INSUFFICIENT_BUFFER { - continue - } - if err != nil { - return nil, err - } - return buf[:reqSize], nil - } -} - -//sys setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiGetSelectedDevice - -// SetupDiGetSelectedDevice function retrieves the selected device information element in a device information set. -func SetupDiGetSelectedDevice(deviceInfoSet DevInfo) (*DevInfoData, error) { - data := &DevInfoData{} - data.size = uint32(unsafe.Sizeof(*data)) - - return data, setupDiGetSelectedDevice(deviceInfoSet, data) -} - -// SelectedDevice method retrieves the selected device information element in a device information set. -func (deviceInfoSet DevInfo) SelectedDevice() (*DevInfoData, error) { - return SetupDiGetSelectedDevice(deviceInfoSet) -} - -// SetupDiSetSelectedDevice function sets a device information element as the selected member of a device information set. This function is typically used by an installation wizard. -//sys SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiSetSelectedDevice - -// SetSelectedDevice method sets a device information element as the selected member of a device information set. This function is typically used by an installation wizard. -func (deviceInfoSet DevInfo) SetSelectedDevice(deviceInfoData *DevInfoData) error { - return SetupDiSetSelectedDevice(deviceInfoSet, deviceInfoData) -} - -//sys setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (err error) = setupapi.SetupUninstallOEMInfW - -// SetupUninstallOEMInf uninstalls the specified driver. -func SetupUninstallOEMInf(infFileName string, flags SUOI) error { - infFileName16, err := UTF16PtrFromString(infFileName) - if err != nil { - return err - } - return setupUninstallOEMInf(infFileName16, flags, 0) -} - -//sys cm_MapCrToWin32Err(configRet CONFIGRET, defaultWin32Error Errno) (ret Errno) = CfgMgr32.CM_MapCrToWin32Err - -//sys cm_Get_Device_Interface_List_Size(len *uint32, interfaceClass *GUID, deviceID *uint16, flags uint32) (ret CONFIGRET) = CfgMgr32.CM_Get_Device_Interface_List_SizeW -//sys cm_Get_Device_Interface_List(interfaceClass *GUID, deviceID *uint16, buffer *uint16, bufferLen uint32, flags uint32) (ret CONFIGRET) = CfgMgr32.CM_Get_Device_Interface_ListW - -func CM_Get_Device_Interface_List(deviceID string, interfaceClass *GUID, flags uint32) ([]string, error) { - deviceID16, err := UTF16PtrFromString(deviceID) - if err != nil { - return nil, err - } - var buf []uint16 - var buflen uint32 - for { - if ret := cm_Get_Device_Interface_List_Size(&buflen, interfaceClass, deviceID16, flags); ret != CR_SUCCESS { - return nil, ret - } - buf = make([]uint16, buflen) - if ret := cm_Get_Device_Interface_List(interfaceClass, deviceID16, &buf[0], buflen, flags); ret == CR_SUCCESS { - break - } else if ret != CR_BUFFER_SMALL { - return nil, ret - } - } - var interfaces []string - for i := 0; i < len(buf); { - j := i + wcslen(buf[i:]) - if i < j { - interfaces = append(interfaces, UTF16ToString(buf[i:j])) - } - i = j + 1 - } - if interfaces == nil { - return nil, ERROR_NO_SUCH_DEVICE_INTERFACE - } - return interfaces, nil -} - -//sys cm_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) (ret CONFIGRET) = CfgMgr32.CM_Get_DevNode_Status - -func CM_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) error { - ret := cm_Get_DevNode_Status(status, problemNumber, devInst, flags) - if ret == CR_SUCCESS { - return nil - } - return ret -} diff --git a/vendor/golang.org/x/sys/windows/str.go b/vendor/golang.org/x/sys/windows/str.go deleted file mode 100644 index 4fc01434e..000000000 --- a/vendor/golang.org/x/sys/windows/str.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build windows -// +build windows - -package windows - -func itoa(val int) string { // do it here rather than with fmt to avoid dependency - if val < 0 { - return "-" + itoa(-val) - } - var buf [32]byte // big enough for int64 - i := len(buf) - 1 - for val >= 10 { - buf[i] = byte(val%10 + '0') - i-- - val /= 10 - } - buf[i] = byte(val + '0') - return string(buf[i:]) -} diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go deleted file mode 100644 index 8732cdb95..000000000 --- a/vendor/golang.org/x/sys/windows/syscall.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build windows -// +build windows - -// Package windows contains an interface to the low-level operating system -// primitives. OS details vary depending on the underlying system, and -// by default, godoc will display the OS-specific documentation for the current -// system. If you want godoc to display syscall documentation for another -// system, set $GOOS and $GOARCH to the desired system. For example, if -// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS -// to freebsd and $GOARCH to arm. -// -// The primary use of this package is inside other packages that provide a more -// portable interface to the system, such as "os", "time" and "net". Use -// those packages rather than this one if you can. -// -// For details of the functions and data types in this package consult -// the manuals for the appropriate operating system. -// -// These calls return err == nil to indicate success; otherwise -// err represents an operating system error describing the failure and -// holds a value of type syscall.Errno. -package windows // import "golang.org/x/sys/windows" - -import ( - "bytes" - "strings" - "syscall" - "unsafe" -) - -// ByteSliceFromString returns a NUL-terminated slice of bytes -// containing the text of s. If s contains a NUL byte at any -// location, it returns (nil, syscall.EINVAL). -func ByteSliceFromString(s string) ([]byte, error) { - if strings.IndexByte(s, 0) != -1 { - return nil, syscall.EINVAL - } - a := make([]byte, len(s)+1) - copy(a, s) - return a, nil -} - -// BytePtrFromString returns a pointer to a NUL-terminated array of -// bytes containing the text of s. If s contains a NUL byte at any -// location, it returns (nil, syscall.EINVAL). -func BytePtrFromString(s string) (*byte, error) { - a, err := ByteSliceFromString(s) - if err != nil { - return nil, err - } - return &a[0], nil -} - -// ByteSliceToString returns a string form of the text represented by the slice s, with a terminating NUL and any -// bytes after the NUL removed. -func ByteSliceToString(s []byte) string { - if i := bytes.IndexByte(s, 0); i != -1 { - s = s[:i] - } - return string(s) -} - -// BytePtrToString takes a pointer to a sequence of text and returns the corresponding string. -// If the pointer is nil, it returns the empty string. It assumes that the text sequence is terminated -// at a zero byte; if the zero byte is not present, the program may crash. -func BytePtrToString(p *byte) string { - if p == nil { - return "" - } - if *p == 0 { - return "" - } - - // Find NUL terminator. - n := 0 - for ptr := unsafe.Pointer(p); *(*byte)(ptr) != 0; n++ { - ptr = unsafe.Pointer(uintptr(ptr) + 1) - } - - return string(unsafe.Slice(p, n)) -} - -// Single-word zero for use when we need a valid pointer to 0 bytes. -// See mksyscall.pl. -var _zero uintptr - -func (ts *Timespec) Unix() (sec int64, nsec int64) { - return int64(ts.Sec), int64(ts.Nsec) -} - -func (tv *Timeval) Unix() (sec int64, nsec int64) { - return int64(tv.Sec), int64(tv.Usec) * 1000 -} - -func (ts *Timespec) Nano() int64 { - return int64(ts.Sec)*1e9 + int64(ts.Nsec) -} - -func (tv *Timeval) Nano() int64 { - return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000 -} diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go deleted file mode 100644 index 3723b2c22..000000000 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ /dev/null @@ -1,1808 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Windows system calls. - -package windows - -import ( - errorspkg "errors" - "fmt" - "runtime" - "sync" - "syscall" - "time" - "unicode/utf16" - "unsafe" - - "golang.org/x/sys/internal/unsafeheader" -) - -type Handle uintptr -type HWND uintptr - -const ( - InvalidHandle = ^Handle(0) - InvalidHWND = ^HWND(0) - - // Flags for DefineDosDevice. - DDD_EXACT_MATCH_ON_REMOVE = 0x00000004 - DDD_NO_BROADCAST_SYSTEM = 0x00000008 - DDD_RAW_TARGET_PATH = 0x00000001 - DDD_REMOVE_DEFINITION = 0x00000002 - - // Return values for GetDriveType. - DRIVE_UNKNOWN = 0 - DRIVE_NO_ROOT_DIR = 1 - DRIVE_REMOVABLE = 2 - DRIVE_FIXED = 3 - DRIVE_REMOTE = 4 - DRIVE_CDROM = 5 - DRIVE_RAMDISK = 6 - - // File system flags from GetVolumeInformation and GetVolumeInformationByHandle. - FILE_CASE_SENSITIVE_SEARCH = 0x00000001 - FILE_CASE_PRESERVED_NAMES = 0x00000002 - FILE_FILE_COMPRESSION = 0x00000010 - FILE_DAX_VOLUME = 0x20000000 - FILE_NAMED_STREAMS = 0x00040000 - FILE_PERSISTENT_ACLS = 0x00000008 - FILE_READ_ONLY_VOLUME = 0x00080000 - FILE_SEQUENTIAL_WRITE_ONCE = 0x00100000 - FILE_SUPPORTS_ENCRYPTION = 0x00020000 - FILE_SUPPORTS_EXTENDED_ATTRIBUTES = 0x00800000 - FILE_SUPPORTS_HARD_LINKS = 0x00400000 - FILE_SUPPORTS_OBJECT_IDS = 0x00010000 - FILE_SUPPORTS_OPEN_BY_FILE_ID = 0x01000000 - FILE_SUPPORTS_REPARSE_POINTS = 0x00000080 - FILE_SUPPORTS_SPARSE_FILES = 0x00000040 - FILE_SUPPORTS_TRANSACTIONS = 0x00200000 - FILE_SUPPORTS_USN_JOURNAL = 0x02000000 - FILE_UNICODE_ON_DISK = 0x00000004 - FILE_VOLUME_IS_COMPRESSED = 0x00008000 - FILE_VOLUME_QUOTAS = 0x00000020 - - // Flags for LockFileEx. - LOCKFILE_FAIL_IMMEDIATELY = 0x00000001 - LOCKFILE_EXCLUSIVE_LOCK = 0x00000002 - - // Return value of SleepEx and other APC functions - WAIT_IO_COMPLETION = 0x000000C0 -) - -// StringToUTF16 is deprecated. Use UTF16FromString instead. -// If s contains a NUL byte this function panics instead of -// returning an error. -func StringToUTF16(s string) []uint16 { - a, err := UTF16FromString(s) - if err != nil { - panic("windows: string with NUL passed to StringToUTF16") - } - return a -} - -// UTF16FromString returns the UTF-16 encoding of the UTF-8 string -// s, with a terminating NUL added. If s contains a NUL byte at any -// location, it returns (nil, syscall.EINVAL). -func UTF16FromString(s string) ([]uint16, error) { - return syscall.UTF16FromString(s) -} - -// UTF16ToString returns the UTF-8 encoding of the UTF-16 sequence s, -// with a terminating NUL and any bytes after the NUL removed. -func UTF16ToString(s []uint16) string { - return syscall.UTF16ToString(s) -} - -// StringToUTF16Ptr is deprecated. Use UTF16PtrFromString instead. -// If s contains a NUL byte this function panics instead of -// returning an error. -func StringToUTF16Ptr(s string) *uint16 { return &StringToUTF16(s)[0] } - -// UTF16PtrFromString returns pointer to the UTF-16 encoding of -// the UTF-8 string s, with a terminating NUL added. If s -// contains a NUL byte at any location, it returns (nil, syscall.EINVAL). -func UTF16PtrFromString(s string) (*uint16, error) { - a, err := UTF16FromString(s) - if err != nil { - return nil, err - } - return &a[0], nil -} - -// UTF16PtrToString takes a pointer to a UTF-16 sequence and returns the corresponding UTF-8 encoded string. -// If the pointer is nil, it returns the empty string. It assumes that the UTF-16 sequence is terminated -// at a zero word; if the zero word is not present, the program may crash. -func UTF16PtrToString(p *uint16) string { - if p == nil { - return "" - } - if *p == 0 { - return "" - } - - // Find NUL terminator. - n := 0 - for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ { - ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p)) - } - - return string(utf16.Decode(unsafe.Slice(p, n))) -} - -func Getpagesize() int { return 4096 } - -// NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention. -// This is useful when interoperating with Windows code requiring callbacks. -// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. -func NewCallback(fn interface{}) uintptr { - return syscall.NewCallback(fn) -} - -// NewCallbackCDecl converts a Go function to a function pointer conforming to the cdecl calling convention. -// This is useful when interoperating with Windows code requiring callbacks. -// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. -func NewCallbackCDecl(fn interface{}) uintptr { - return syscall.NewCallbackCDecl(fn) -} - -// windows api calls - -//sys GetLastError() (lasterr error) -//sys LoadLibrary(libname string) (handle Handle, err error) = LoadLibraryW -//sys LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) = LoadLibraryExW -//sys FreeLibrary(handle Handle) (err error) -//sys GetProcAddress(module Handle, procname string) (proc uintptr, err error) -//sys GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) = kernel32.GetModuleFileNameW -//sys GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) = kernel32.GetModuleHandleExW -//sys SetDefaultDllDirectories(directoryFlags uint32) (err error) -//sys SetDllDirectory(path string) (err error) = kernel32.SetDllDirectoryW -//sys GetVersion() (ver uint32, err error) -//sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW -//sys ExitProcess(exitcode uint32) -//sys IsWow64Process(handle Handle, isWow64 *bool) (err error) = IsWow64Process -//sys IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint16) (err error) = IsWow64Process2? -//sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW -//sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW -//sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) -//sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) -//sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW -//sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState -//sys readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) = ReadFile -//sys writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) = WriteFile -//sys GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) -//sys SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) [failretval==0xffffffff] -//sys CloseHandle(handle Handle) (err error) -//sys GetStdHandle(stdhandle uint32) (handle Handle, err error) [failretval==InvalidHandle] -//sys SetStdHandle(stdhandle uint32, handle Handle) (err error) -//sys findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstFileW -//sys findNextFile1(handle Handle, data *win32finddata1) (err error) = FindNextFileW -//sys FindClose(handle Handle) (err error) -//sys GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) -//sys GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) -//sys SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inBufferLen uint32) (err error) -//sys GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) = GetCurrentDirectoryW -//sys SetCurrentDirectory(path *uint16) (err error) = SetCurrentDirectoryW -//sys CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) = CreateDirectoryW -//sys RemoveDirectory(path *uint16) (err error) = RemoveDirectoryW -//sys DeleteFile(path *uint16) (err error) = DeleteFileW -//sys MoveFile(from *uint16, to *uint16) (err error) = MoveFileW -//sys MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) = MoveFileExW -//sys LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) -//sys UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) -//sys GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW -//sys GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW -//sys SetEndOfFile(handle Handle) (err error) -//sys GetSystemTimeAsFileTime(time *Filetime) -//sys GetSystemTimePreciseAsFileTime(time *Filetime) -//sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff] -//sys CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, threadcnt uint32) (handle Handle, err error) -//sys GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overlapped **Overlapped, timeout uint32) (err error) -//sys PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overlapped *Overlapped) (err error) -//sys CancelIo(s Handle) (err error) -//sys CancelIoEx(s Handle, o *Overlapped) (err error) -//sys CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessW -//sys CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = advapi32.CreateProcessAsUserW -//sys initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) = InitializeProcThreadAttributeList -//sys deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) = DeleteProcThreadAttributeList -//sys updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) = UpdateProcThreadAttribute -//sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) -//sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) [failretval<=32] = shell32.ShellExecuteW -//sys GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) = user32.GetWindowThreadProcessId -//sys GetShellWindow() (shellWindow HWND) = user32.GetShellWindow -//sys MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW -//sys ExitWindowsEx(flags uint32, reason uint32) (err error) = user32.ExitWindowsEx -//sys shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) = shell32.SHGetKnownFolderPath -//sys TerminateProcess(handle Handle, exitcode uint32) (err error) -//sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) -//sys GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW -//sys GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) -//sys DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) -//sys WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] -//sys waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] = WaitForMultipleObjects -//sys GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) = GetTempPathW -//sys CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) -//sys GetFileType(filehandle Handle) (n uint32, err error) -//sys CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) = advapi32.CryptAcquireContextW -//sys CryptReleaseContext(provhandle Handle, flags uint32) (err error) = advapi32.CryptReleaseContext -//sys CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) = advapi32.CryptGenRandom -//sys GetEnvironmentStrings() (envs *uint16, err error) [failretval==nil] = kernel32.GetEnvironmentStringsW -//sys FreeEnvironmentStrings(envs *uint16) (err error) = kernel32.FreeEnvironmentStringsW -//sys GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) = kernel32.GetEnvironmentVariableW -//sys SetEnvironmentVariable(name *uint16, value *uint16) (err error) = kernel32.SetEnvironmentVariableW -//sys ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW -//sys CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock -//sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock -//sys getTickCount64() (ms uint64) = kernel32.GetTickCount64 -//sys SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) -//sys GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW -//sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW -//sys GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) = kernel32.GetFileAttributesExW -//sys GetCommandLine() (cmd *uint16) = kernel32.GetCommandLineW -//sys CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW -//sys LocalFree(hmem Handle) (handle Handle, err error) [failretval!=0] -//sys LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) -//sys SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) -//sys FlushFileBuffers(handle Handle) (err error) -//sys GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) = kernel32.GetFullPathNameW -//sys GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) = kernel32.GetLongPathNameW -//sys GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) = kernel32.GetShortPathNameW -//sys GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) = kernel32.GetFinalPathNameByHandleW -//sys CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateFileMappingW -//sys MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) -//sys UnmapViewOfFile(addr uintptr) (err error) -//sys FlushViewOfFile(addr uintptr, length uintptr) (err error) -//sys VirtualLock(addr uintptr, length uintptr) (err error) -//sys VirtualUnlock(addr uintptr, length uintptr) (err error) -//sys VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) = kernel32.VirtualAlloc -//sys VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) = kernel32.VirtualFree -//sys VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) = kernel32.VirtualProtect -//sys VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect uint32, oldProtect *uint32) (err error) = kernel32.VirtualProtectEx -//sys VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) = kernel32.VirtualQuery -//sys VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) = kernel32.VirtualQueryEx -//sys ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesRead *uintptr) (err error) = kernel32.ReadProcessMemory -//sys WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesWritten *uintptr) (err error) = kernel32.WriteProcessMemory -//sys TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) = mswsock.TransmitFile -//sys ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) = kernel32.ReadDirectoryChangesW -//sys FindFirstChangeNotification(path string, watchSubtree bool, notifyFilter uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.FindFirstChangeNotificationW -//sys FindNextChangeNotification(handle Handle) (err error) -//sys FindCloseChangeNotification(handle Handle) (err error) -//sys CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) = crypt32.CertOpenSystemStoreW -//sys CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) = crypt32.CertOpenStore -//sys CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) [failretval==nil] = crypt32.CertEnumCertificatesInStore -//sys CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) = crypt32.CertAddCertificateContextToStore -//sys CertCloseStore(store Handle, flags uint32) (err error) = crypt32.CertCloseStore -//sys CertDeleteCertificateFromStore(certContext *CertContext) (err error) = crypt32.CertDeleteCertificateFromStore -//sys CertDuplicateCertificateContext(certContext *CertContext) (dupContext *CertContext) = crypt32.CertDuplicateCertificateContext -//sys PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (store Handle, err error) = crypt32.PFXImportCertStore -//sys CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) = crypt32.CertGetCertificateChain -//sys CertFreeCertificateChain(ctx *CertChainContext) = crypt32.CertFreeCertificateChain -//sys CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) [failretval==nil] = crypt32.CertCreateCertificateContext -//sys CertFreeCertificateContext(ctx *CertContext) (err error) = crypt32.CertFreeCertificateContext -//sys CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) = crypt32.CertVerifyCertificateChainPolicy -//sys CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) = crypt32.CertGetNameStringW -//sys CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) = crypt32.CertFindExtension -//sys CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevCertContext *CertContext) (cert *CertContext, err error) [failretval==nil] = crypt32.CertFindCertificateInStore -//sys CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevChainContext *CertChainContext) (certchain *CertChainContext, err error) [failretval==nil] = crypt32.CertFindChainInStore -//sys CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, parameters unsafe.Pointer, cryptProvOrNCryptKey *Handle, keySpec *uint32, callerFreeProvOrNCryptKey *bool) (err error) = crypt32.CryptAcquireCertificatePrivateKey -//sys CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) = crypt32.CryptQueryObject -//sys CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) = crypt32.CryptDecodeObject -//sys CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) = crypt32.CryptProtectData -//sys CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) = crypt32.CryptUnprotectData -//sys WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) = wintrust.WinVerifyTrustEx -//sys RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) = advapi32.RegOpenKeyExW -//sys RegCloseKey(key Handle) (regerrno error) = advapi32.RegCloseKey -//sys RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegQueryInfoKeyW -//sys RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegEnumKeyExW -//sys RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegQueryValueExW -//sys RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, event Handle, asynchronous bool) (regerrno error) = advapi32.RegNotifyChangeKeyValue -//sys GetCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId -//sys ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) = kernel32.ProcessIdToSessionId -//sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode -//sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode -//sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo -//sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition -//sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW -//sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW -//sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot -//sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW -//sys Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32NextW -//sys Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32FirstW -//sys Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32NextW -//sys Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) -//sys Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) -//sys DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) -// This function returns 1 byte BOOLEAN rather than the 4 byte BOOL. -//sys CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) [failretval&0xff==0] = CreateSymbolicLinkW -//sys CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) [failretval&0xff==0] = CreateHardLinkW -//sys GetCurrentThreadId() (id uint32) -//sys CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateEventW -//sys CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateEventExW -//sys OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenEventW -//sys SetEvent(event Handle) (err error) = kernel32.SetEvent -//sys ResetEvent(event Handle) (err error) = kernel32.ResetEvent -//sys PulseEvent(event Handle) (err error) = kernel32.PulseEvent -//sys CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateMutexW -//sys CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateMutexExW -//sys OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenMutexW -//sys ReleaseMutex(mutex Handle) (err error) = kernel32.ReleaseMutex -//sys SleepEx(milliseconds uint32, alertable bool) (ret uint32) = kernel32.SleepEx -//sys CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) = kernel32.CreateJobObjectW -//sys AssignProcessToJobObject(job Handle, process Handle) (err error) = kernel32.AssignProcessToJobObject -//sys TerminateJobObject(job Handle, exitCode uint32) (err error) = kernel32.TerminateJobObject -//sys SetErrorMode(mode uint32) (ret uint32) = kernel32.SetErrorMode -//sys ResumeThread(thread Handle) (ret uint32, err error) [failretval==0xffffffff] = kernel32.ResumeThread -//sys SetPriorityClass(process Handle, priorityClass uint32) (err error) = kernel32.SetPriorityClass -//sys GetPriorityClass(process Handle) (ret uint32, err error) = kernel32.GetPriorityClass -//sys QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) = kernel32.QueryInformationJobObject -//sys SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) -//sys GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) -//sys GetProcessId(process Handle) (id uint32, err error) -//sys QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) = kernel32.QueryFullProcessImageNameW -//sys OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) -//sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost -//sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) -//sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) -//sys GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) -//sys SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) -//sys GetActiveProcessorCount(groupNumber uint16) (ret uint32) -//sys GetMaximumProcessorCount(groupNumber uint16) (ret uint32) -//sys EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) = user32.EnumWindows -//sys EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) = user32.EnumChildWindows -//sys GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) = user32.GetClassNameW -//sys GetDesktopWindow() (hwnd HWND) = user32.GetDesktopWindow -//sys GetForegroundWindow() (hwnd HWND) = user32.GetForegroundWindow -//sys IsWindow(hwnd HWND) (isWindow bool) = user32.IsWindow -//sys IsWindowUnicode(hwnd HWND) (isUnicode bool) = user32.IsWindowUnicode -//sys IsWindowVisible(hwnd HWND) (isVisible bool) = user32.IsWindowVisible -//sys GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) = user32.GetGUIThreadInfo -//sys GetLargePageMinimum() (size uintptr) - -// Volume Management Functions -//sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW -//sys DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) = DeleteVolumeMountPointW -//sys FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstVolumeW -//sys FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstVolumeMountPointW -//sys FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) = FindNextVolumeW -//sys FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) = FindNextVolumeMountPointW -//sys FindVolumeClose(findVolume Handle) (err error) -//sys FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) -//sys GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) = GetDiskFreeSpaceExW -//sys GetDriveType(rootPathName *uint16) (driveType uint32) = GetDriveTypeW -//sys GetLogicalDrives() (drivesBitMask uint32, err error) [failretval==0] -//sys GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) [failretval==0] = GetLogicalDriveStringsW -//sys GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationW -//sys GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationByHandleW -//sys GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) = GetVolumeNameForVolumeMountPointW -//sys GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) = GetVolumePathNameW -//sys GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) = GetVolumePathNamesForVolumeNameW -//sys QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) [failretval==0] = QueryDosDeviceW -//sys SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) = SetVolumeLabelW -//sys SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) = SetVolumeMountPointW -//sys InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint32, forceAppsClosed bool, rebootAfterShutdown bool, reason uint32) (err error) = advapi32.InitiateSystemShutdownExW -//sys SetProcessShutdownParameters(level uint32, flags uint32) (err error) = kernel32.SetProcessShutdownParameters -//sys GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) = kernel32.GetProcessShutdownParameters -//sys clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) = ole32.CLSIDFromString -//sys stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) = ole32.StringFromGUID2 -//sys coCreateGuid(pguid *GUID) (ret error) = ole32.CoCreateGuid -//sys CoTaskMemFree(address unsafe.Pointer) = ole32.CoTaskMemFree -//sys CoInitializeEx(reserved uintptr, coInit uint32) (ret error) = ole32.CoInitializeEx -//sys CoUninitialize() = ole32.CoUninitialize -//sys CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) = ole32.CoGetObject -//sys getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetProcessPreferredUILanguages -//sys getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetThreadPreferredUILanguages -//sys getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetUserPreferredUILanguages -//sys getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetSystemPreferredUILanguages -//sys findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) = kernel32.FindResourceW -//sys SizeofResource(module Handle, resInfo Handle) (size uint32, err error) = kernel32.SizeofResource -//sys LoadResource(module Handle, resInfo Handle) (resData Handle, err error) = kernel32.LoadResource -//sys LockResource(resData Handle) (addr uintptr, err error) = kernel32.LockResource - -// Version APIs -//sys GetFileVersionInfoSize(filename string, zeroHandle *Handle) (bufSize uint32, err error) = version.GetFileVersionInfoSizeW -//sys GetFileVersionInfo(filename string, handle uint32, bufSize uint32, buffer unsafe.Pointer) (err error) = version.GetFileVersionInfoW -//sys VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) = version.VerQueryValueW - -// Process Status API (PSAPI) -//sys EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses -//sys EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) = psapi.EnumProcessModules -//sys EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) = psapi.EnumProcessModulesEx -//sys GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) = psapi.GetModuleInformation -//sys GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) = psapi.GetModuleFileNameExW -//sys GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) = psapi.GetModuleBaseNameW -//sys QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) = psapi.QueryWorkingSetEx - -// NT Native APIs -//sys rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) = ntdll.RtlNtStatusToDosErrorNoTeb -//sys rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) = ntdll.RtlGetVersion -//sys rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) = ntdll.RtlGetNtVersionNumbers -//sys RtlGetCurrentPeb() (peb *PEB) = ntdll.RtlGetCurrentPeb -//sys RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) = ntdll.RtlInitUnicodeString -//sys RtlInitString(destinationString *NTString, sourceString *byte) = ntdll.RtlInitString -//sys NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) = ntdll.NtCreateFile -//sys NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) = ntdll.NtCreateNamedPipeFile -//sys NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, inBufferLen uint32, class uint32) (ntstatus error) = ntdll.NtSetInformationFile -//sys RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) = ntdll.RtlDosPathNameToNtPathName_U_WithStatus -//sys RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) = ntdll.RtlDosPathNameToRelativeNtPathName_U_WithStatus -//sys RtlDefaultNpAcl(acl **ACL) (ntstatus error) = ntdll.RtlDefaultNpAcl -//sys NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) = ntdll.NtQueryInformationProcess -//sys NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) = ntdll.NtSetInformationProcess -//sys NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32, retLen *uint32) (ntstatus error) = ntdll.NtQuerySystemInformation -//sys NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32) (ntstatus error) = ntdll.NtSetSystemInformation -//sys RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) = ntdll.RtlAddFunctionTable -//sys RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) = ntdll.RtlDeleteFunctionTable - -// Desktop Window Manager API (Dwmapi) -//sys DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmGetWindowAttribute -//sys DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmSetWindowAttribute - -// syscall interface implementation for other packages - -// GetCurrentProcess returns the handle for the current process. -// It is a pseudo handle that does not need to be closed. -// The returned error is always nil. -// -// Deprecated: use CurrentProcess for the same Handle without the nil -// error. -func GetCurrentProcess() (Handle, error) { - return CurrentProcess(), nil -} - -// CurrentProcess returns the handle for the current process. -// It is a pseudo handle that does not need to be closed. -func CurrentProcess() Handle { return Handle(^uintptr(1 - 1)) } - -// GetCurrentThread returns the handle for the current thread. -// It is a pseudo handle that does not need to be closed. -// The returned error is always nil. -// -// Deprecated: use CurrentThread for the same Handle without the nil -// error. -func GetCurrentThread() (Handle, error) { - return CurrentThread(), nil -} - -// CurrentThread returns the handle for the current thread. -// It is a pseudo handle that does not need to be closed. -func CurrentThread() Handle { return Handle(^uintptr(2 - 1)) } - -// GetProcAddressByOrdinal retrieves the address of the exported -// function from module by ordinal. -func GetProcAddressByOrdinal(module Handle, ordinal uintptr) (proc uintptr, err error) { - r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), ordinal, 0) - proc = uintptr(r0) - if proc == 0 { - err = errnoErr(e1) - } - return -} - -func Exit(code int) { ExitProcess(uint32(code)) } - -func makeInheritSa() *SecurityAttributes { - var sa SecurityAttributes - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - return &sa -} - -func Open(path string, mode int, perm uint32) (fd Handle, err error) { - if len(path) == 0 { - return InvalidHandle, ERROR_FILE_NOT_FOUND - } - pathp, err := UTF16PtrFromString(path) - if err != nil { - return InvalidHandle, err - } - var access uint32 - switch mode & (O_RDONLY | O_WRONLY | O_RDWR) { - case O_RDONLY: - access = GENERIC_READ - case O_WRONLY: - access = GENERIC_WRITE - case O_RDWR: - access = GENERIC_READ | GENERIC_WRITE - } - if mode&O_CREAT != 0 { - access |= GENERIC_WRITE - } - if mode&O_APPEND != 0 { - access &^= GENERIC_WRITE - access |= FILE_APPEND_DATA - } - sharemode := uint32(FILE_SHARE_READ | FILE_SHARE_WRITE) - var sa *SecurityAttributes - if mode&O_CLOEXEC == 0 { - sa = makeInheritSa() - } - var createmode uint32 - switch { - case mode&(O_CREAT|O_EXCL) == (O_CREAT | O_EXCL): - createmode = CREATE_NEW - case mode&(O_CREAT|O_TRUNC) == (O_CREAT | O_TRUNC): - createmode = CREATE_ALWAYS - case mode&O_CREAT == O_CREAT: - createmode = OPEN_ALWAYS - case mode&O_TRUNC == O_TRUNC: - createmode = TRUNCATE_EXISTING - default: - createmode = OPEN_EXISTING - } - var attrs uint32 = FILE_ATTRIBUTE_NORMAL - if perm&S_IWRITE == 0 { - attrs = FILE_ATTRIBUTE_READONLY - } - h, e := CreateFile(pathp, access, sharemode, sa, createmode, attrs, 0) - return h, e -} - -func Read(fd Handle, p []byte) (n int, err error) { - var done uint32 - e := ReadFile(fd, p, &done, nil) - if e != nil { - if e == ERROR_BROKEN_PIPE { - // NOTE(brainman): work around ERROR_BROKEN_PIPE is returned on reading EOF from stdin - return 0, nil - } - return 0, e - } - return int(done), nil -} - -func Write(fd Handle, p []byte) (n int, err error) { - if raceenabled { - raceReleaseMerge(unsafe.Pointer(&ioSync)) - } - var done uint32 - e := WriteFile(fd, p, &done, nil) - if e != nil { - return 0, e - } - return int(done), nil -} - -func ReadFile(fd Handle, p []byte, done *uint32, overlapped *Overlapped) error { - err := readFile(fd, p, done, overlapped) - if raceenabled { - if *done > 0 { - raceWriteRange(unsafe.Pointer(&p[0]), int(*done)) - } - raceAcquire(unsafe.Pointer(&ioSync)) - } - return err -} - -func WriteFile(fd Handle, p []byte, done *uint32, overlapped *Overlapped) error { - if raceenabled { - raceReleaseMerge(unsafe.Pointer(&ioSync)) - } - err := writeFile(fd, p, done, overlapped) - if raceenabled && *done > 0 { - raceReadRange(unsafe.Pointer(&p[0]), int(*done)) - } - return err -} - -var ioSync int64 - -func Seek(fd Handle, offset int64, whence int) (newoffset int64, err error) { - var w uint32 - switch whence { - case 0: - w = FILE_BEGIN - case 1: - w = FILE_CURRENT - case 2: - w = FILE_END - } - hi := int32(offset >> 32) - lo := int32(offset) - // use GetFileType to check pipe, pipe can't do seek - ft, _ := GetFileType(fd) - if ft == FILE_TYPE_PIPE { - return 0, syscall.EPIPE - } - rlo, e := SetFilePointer(fd, lo, &hi, w) - if e != nil { - return 0, e - } - return int64(hi)<<32 + int64(rlo), nil -} - -func Close(fd Handle) (err error) { - return CloseHandle(fd) -} - -var ( - Stdin = getStdHandle(STD_INPUT_HANDLE) - Stdout = getStdHandle(STD_OUTPUT_HANDLE) - Stderr = getStdHandle(STD_ERROR_HANDLE) -) - -func getStdHandle(stdhandle uint32) (fd Handle) { - r, _ := GetStdHandle(stdhandle) - return r -} - -const ImplementsGetwd = true - -func Getwd() (wd string, err error) { - b := make([]uint16, 300) - n, e := GetCurrentDirectory(uint32(len(b)), &b[0]) - if e != nil { - return "", e - } - return string(utf16.Decode(b[0:n])), nil -} - -func Chdir(path string) (err error) { - pathp, err := UTF16PtrFromString(path) - if err != nil { - return err - } - return SetCurrentDirectory(pathp) -} - -func Mkdir(path string, mode uint32) (err error) { - pathp, err := UTF16PtrFromString(path) - if err != nil { - return err - } - return CreateDirectory(pathp, nil) -} - -func Rmdir(path string) (err error) { - pathp, err := UTF16PtrFromString(path) - if err != nil { - return err - } - return RemoveDirectory(pathp) -} - -func Unlink(path string) (err error) { - pathp, err := UTF16PtrFromString(path) - if err != nil { - return err - } - return DeleteFile(pathp) -} - -func Rename(oldpath, newpath string) (err error) { - from, err := UTF16PtrFromString(oldpath) - if err != nil { - return err - } - to, err := UTF16PtrFromString(newpath) - if err != nil { - return err - } - return MoveFileEx(from, to, MOVEFILE_REPLACE_EXISTING) -} - -func ComputerName() (name string, err error) { - var n uint32 = MAX_COMPUTERNAME_LENGTH + 1 - b := make([]uint16, n) - e := GetComputerName(&b[0], &n) - if e != nil { - return "", e - } - return string(utf16.Decode(b[0:n])), nil -} - -func DurationSinceBoot() time.Duration { - return time.Duration(getTickCount64()) * time.Millisecond -} - -func Ftruncate(fd Handle, length int64) (err error) { - curoffset, e := Seek(fd, 0, 1) - if e != nil { - return e - } - defer Seek(fd, curoffset, 0) - _, e = Seek(fd, length, 0) - if e != nil { - return e - } - e = SetEndOfFile(fd) - if e != nil { - return e - } - return nil -} - -func Gettimeofday(tv *Timeval) (err error) { - var ft Filetime - GetSystemTimeAsFileTime(&ft) - *tv = NsecToTimeval(ft.Nanoseconds()) - return nil -} - -func Pipe(p []Handle) (err error) { - if len(p) != 2 { - return syscall.EINVAL - } - var r, w Handle - e := CreatePipe(&r, &w, makeInheritSa(), 0) - if e != nil { - return e - } - p[0] = r - p[1] = w - return nil -} - -func Utimes(path string, tv []Timeval) (err error) { - if len(tv) != 2 { - return syscall.EINVAL - } - pathp, e := UTF16PtrFromString(path) - if e != nil { - return e - } - h, e := CreateFile(pathp, - FILE_WRITE_ATTRIBUTES, FILE_SHARE_WRITE, nil, - OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0) - if e != nil { - return e - } - defer CloseHandle(h) - a := NsecToFiletime(tv[0].Nanoseconds()) - w := NsecToFiletime(tv[1].Nanoseconds()) - return SetFileTime(h, nil, &a, &w) -} - -func UtimesNano(path string, ts []Timespec) (err error) { - if len(ts) != 2 { - return syscall.EINVAL - } - pathp, e := UTF16PtrFromString(path) - if e != nil { - return e - } - h, e := CreateFile(pathp, - FILE_WRITE_ATTRIBUTES, FILE_SHARE_WRITE, nil, - OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0) - if e != nil { - return e - } - defer CloseHandle(h) - a := NsecToFiletime(TimespecToNsec(ts[0])) - w := NsecToFiletime(TimespecToNsec(ts[1])) - return SetFileTime(h, nil, &a, &w) -} - -func Fsync(fd Handle) (err error) { - return FlushFileBuffers(fd) -} - -func Chmod(path string, mode uint32) (err error) { - p, e := UTF16PtrFromString(path) - if e != nil { - return e - } - attrs, e := GetFileAttributes(p) - if e != nil { - return e - } - if mode&S_IWRITE != 0 { - attrs &^= FILE_ATTRIBUTE_READONLY - } else { - attrs |= FILE_ATTRIBUTE_READONLY - } - return SetFileAttributes(p, attrs) -} - -func LoadGetSystemTimePreciseAsFileTime() error { - return procGetSystemTimePreciseAsFileTime.Find() -} - -func LoadCancelIoEx() error { - return procCancelIoEx.Find() -} - -func LoadSetFileCompletionNotificationModes() error { - return procSetFileCompletionNotificationModes.Find() -} - -func WaitForMultipleObjects(handles []Handle, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { - // Every other win32 array API takes arguments as "pointer, count", except for this function. So we - // can't declare it as a usual [] type, because mksyscall will use the opposite order. We therefore - // trivially stub this ourselves. - - var handlePtr *Handle - if len(handles) > 0 { - handlePtr = &handles[0] - } - return waitForMultipleObjects(uint32(len(handles)), uintptr(unsafe.Pointer(handlePtr)), waitAll, waitMilliseconds) -} - -// net api calls - -const socket_error = uintptr(^uint32(0)) - -//sys WSAStartup(verreq uint32, data *WSAData) (sockerr error) = ws2_32.WSAStartup -//sys WSACleanup() (err error) [failretval==socket_error] = ws2_32.WSACleanup -//sys WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) [failretval==socket_error] = ws2_32.WSAIoctl -//sys WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceBeginW -//sys WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceNextW -//sys WSALookupServiceEnd(handle Handle) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceEnd -//sys socket(af int32, typ int32, protocol int32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.socket -//sys sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) [failretval==socket_error] = ws2_32.sendto -//sys recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) [failretval==-1] = ws2_32.recvfrom -//sys Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) [failretval==socket_error] = ws2_32.setsockopt -//sys Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) [failretval==socket_error] = ws2_32.getsockopt -//sys bind(s Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socket_error] = ws2_32.bind -//sys connect(s Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socket_error] = ws2_32.connect -//sys getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) [failretval==socket_error] = ws2_32.getsockname -//sys getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) [failretval==socket_error] = ws2_32.getpeername -//sys listen(s Handle, backlog int32) (err error) [failretval==socket_error] = ws2_32.listen -//sys shutdown(s Handle, how int32) (err error) [failretval==socket_error] = ws2_32.shutdown -//sys Closesocket(s Handle) (err error) [failretval==socket_error] = ws2_32.closesocket -//sys AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) = mswsock.AcceptEx -//sys GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) = mswsock.GetAcceptExSockaddrs -//sys WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecv -//sys WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASend -//sys WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecvFrom -//sys WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASendTo -//sys WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.WSASocketW -//sys GetHostByName(name string) (h *Hostent, err error) [failretval==nil] = ws2_32.gethostbyname -//sys GetServByName(name string, proto string) (s *Servent, err error) [failretval==nil] = ws2_32.getservbyname -//sys Ntohs(netshort uint16) (u uint16) = ws2_32.ntohs -//sys GetProtoByName(name string) (p *Protoent, err error) [failretval==nil] = ws2_32.getprotobyname -//sys DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) = dnsapi.DnsQuery_W -//sys DnsRecordListFree(rl *DNSRecord, freetype uint32) = dnsapi.DnsRecordListFree -//sys DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) = dnsapi.DnsNameCompare_W -//sys GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) = ws2_32.GetAddrInfoW -//sys FreeAddrInfoW(addrinfo *AddrinfoW) = ws2_32.FreeAddrInfoW -//sys GetIfEntry(pIfRow *MibIfRow) (errcode error) = iphlpapi.GetIfEntry -//sys GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) = iphlpapi.GetAdaptersInfo -//sys SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) = kernel32.SetFileCompletionNotificationModes -//sys WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) [failretval==-1] = ws2_32.WSAEnumProtocolsW -//sys WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult -//sys GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) = iphlpapi.GetAdaptersAddresses -//sys GetACP() (acp uint32) = kernel32.GetACP -//sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar -//sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx - -// For testing: clients can set this flag to force -// creation of IPv6 sockets to return EAFNOSUPPORT. -var SocketDisableIPv6 bool - -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddr struct { - Family uint16 - Data [14]int8 -} - -type RawSockaddrAny struct { - Addr RawSockaddr - Pad [100]int8 -} - -type Sockaddr interface { - sockaddr() (ptr unsafe.Pointer, len int32, err error) // lowercase; only we can define Sockaddrs -} - -type SockaddrInet4 struct { - Port int - Addr [4]byte - raw RawSockaddrInet4 -} - -func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, int32, error) { - if sa.Port < 0 || sa.Port > 0xFFFF { - return nil, 0, syscall.EINVAL - } - sa.raw.Family = AF_INET - p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) - p[0] = byte(sa.Port >> 8) - p[1] = byte(sa.Port) - sa.raw.Addr = sa.Addr - return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil -} - -type SockaddrInet6 struct { - Port int - ZoneId uint32 - Addr [16]byte - raw RawSockaddrInet6 -} - -func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, int32, error) { - if sa.Port < 0 || sa.Port > 0xFFFF { - return nil, 0, syscall.EINVAL - } - sa.raw.Family = AF_INET6 - p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) - p[0] = byte(sa.Port >> 8) - p[1] = byte(sa.Port) - sa.raw.Scope_id = sa.ZoneId - sa.raw.Addr = sa.Addr - return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil -} - -type RawSockaddrUnix struct { - Family uint16 - Path [UNIX_PATH_MAX]int8 -} - -type SockaddrUnix struct { - Name string - raw RawSockaddrUnix -} - -func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { - name := sa.Name - n := len(name) - if n > len(sa.raw.Path) { - return nil, 0, syscall.EINVAL - } - if n == len(sa.raw.Path) && name[0] != '@' { - return nil, 0, syscall.EINVAL - } - sa.raw.Family = AF_UNIX - for i := 0; i < n; i++ { - sa.raw.Path[i] = int8(name[i]) - } - // length is family (uint16), name, NUL. - sl := int32(2) - if n > 0 { - sl += int32(n) + 1 - } - if sa.raw.Path[0] == '@' { - sa.raw.Path[0] = 0 - // Don't count trailing NUL for abstract address. - sl-- - } - - return unsafe.Pointer(&sa.raw), sl, nil -} - -type RawSockaddrBth struct { - AddressFamily [2]byte - BtAddr [8]byte - ServiceClassId [16]byte - Port [4]byte -} - -type SockaddrBth struct { - BtAddr uint64 - ServiceClassId GUID - Port uint32 - - raw RawSockaddrBth -} - -func (sa *SockaddrBth) sockaddr() (unsafe.Pointer, int32, error) { - family := AF_BTH - sa.raw = RawSockaddrBth{ - AddressFamily: *(*[2]byte)(unsafe.Pointer(&family)), - BtAddr: *(*[8]byte)(unsafe.Pointer(&sa.BtAddr)), - Port: *(*[4]byte)(unsafe.Pointer(&sa.Port)), - ServiceClassId: *(*[16]byte)(unsafe.Pointer(&sa.ServiceClassId)), - } - return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil -} - -func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { - switch rsa.Addr.Family { - case AF_UNIX: - pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa)) - sa := new(SockaddrUnix) - if pp.Path[0] == 0 { - // "Abstract" Unix domain socket. - // Rewrite leading NUL as @ for textual display. - // (This is the standard convention.) - // Not friendly to overwrite in place, - // but the callers below don't care. - pp.Path[0] = '@' - } - - // Assume path ends at NUL. - // This is not technically the Linux semantics for - // abstract Unix domain sockets--they are supposed - // to be uninterpreted fixed-size binary blobs--but - // everyone uses this convention. - n := 0 - for n < len(pp.Path) && pp.Path[n] != 0 { - n++ - } - sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) - return sa, nil - - case AF_INET: - pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) - sa := new(SockaddrInet4) - p := (*[2]byte)(unsafe.Pointer(&pp.Port)) - sa.Port = int(p[0])<<8 + int(p[1]) - sa.Addr = pp.Addr - return sa, nil - - case AF_INET6: - pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) - sa := new(SockaddrInet6) - p := (*[2]byte)(unsafe.Pointer(&pp.Port)) - sa.Port = int(p[0])<<8 + int(p[1]) - sa.ZoneId = pp.Scope_id - sa.Addr = pp.Addr - return sa, nil - } - return nil, syscall.EAFNOSUPPORT -} - -func Socket(domain, typ, proto int) (fd Handle, err error) { - if domain == AF_INET6 && SocketDisableIPv6 { - return InvalidHandle, syscall.EAFNOSUPPORT - } - return socket(int32(domain), int32(typ), int32(proto)) -} - -func SetsockoptInt(fd Handle, level, opt int, value int) (err error) { - v := int32(value) - return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&v)), int32(unsafe.Sizeof(v))) -} - -func Bind(fd Handle, sa Sockaddr) (err error) { - ptr, n, err := sa.sockaddr() - if err != nil { - return err - } - return bind(fd, ptr, n) -} - -func Connect(fd Handle, sa Sockaddr) (err error) { - ptr, n, err := sa.sockaddr() - if err != nil { - return err - } - return connect(fd, ptr, n) -} - -func GetBestInterfaceEx(sa Sockaddr, pdwBestIfIndex *uint32) (err error) { - ptr, _, err := sa.sockaddr() - if err != nil { - return err - } - return getBestInterfaceEx(ptr, pdwBestIfIndex) -} - -func Getsockname(fd Handle) (sa Sockaddr, err error) { - var rsa RawSockaddrAny - l := int32(unsafe.Sizeof(rsa)) - if err = getsockname(fd, &rsa, &l); err != nil { - return - } - return rsa.Sockaddr() -} - -func Getpeername(fd Handle) (sa Sockaddr, err error) { - var rsa RawSockaddrAny - l := int32(unsafe.Sizeof(rsa)) - if err = getpeername(fd, &rsa, &l); err != nil { - return - } - return rsa.Sockaddr() -} - -func Listen(s Handle, n int) (err error) { - return listen(s, int32(n)) -} - -func Shutdown(fd Handle, how int) (err error) { - return shutdown(fd, int32(how)) -} - -func WSASendto(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to Sockaddr, overlapped *Overlapped, croutine *byte) (err error) { - var rsa unsafe.Pointer - var l int32 - if to != nil { - rsa, l, err = to.sockaddr() - if err != nil { - return err - } - } - return WSASendTo(s, bufs, bufcnt, sent, flags, (*RawSockaddrAny)(unsafe.Pointer(rsa)), l, overlapped, croutine) -} - -func LoadGetAddrInfo() error { - return procGetAddrInfoW.Find() -} - -var connectExFunc struct { - once sync.Once - addr uintptr - err error -} - -func LoadConnectEx() error { - connectExFunc.once.Do(func() { - var s Handle - s, connectExFunc.err = Socket(AF_INET, SOCK_STREAM, IPPROTO_TCP) - if connectExFunc.err != nil { - return - } - defer CloseHandle(s) - var n uint32 - connectExFunc.err = WSAIoctl(s, - SIO_GET_EXTENSION_FUNCTION_POINTER, - (*byte)(unsafe.Pointer(&WSAID_CONNECTEX)), - uint32(unsafe.Sizeof(WSAID_CONNECTEX)), - (*byte)(unsafe.Pointer(&connectExFunc.addr)), - uint32(unsafe.Sizeof(connectExFunc.addr)), - &n, nil, 0) - }) - return connectExFunc.err -} - -func connectEx(s Handle, name unsafe.Pointer, namelen int32, sendBuf *byte, sendDataLen uint32, bytesSent *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(connectExFunc.addr, 7, uintptr(s), uintptr(name), uintptr(namelen), uintptr(unsafe.Pointer(sendBuf)), uintptr(sendDataLen), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ConnectEx(fd Handle, sa Sockaddr, sendBuf *byte, sendDataLen uint32, bytesSent *uint32, overlapped *Overlapped) error { - err := LoadConnectEx() - if err != nil { - return errorspkg.New("failed to find ConnectEx: " + err.Error()) - } - ptr, n, err := sa.sockaddr() - if err != nil { - return err - } - return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped) -} - -var sendRecvMsgFunc struct { - once sync.Once - sendAddr uintptr - recvAddr uintptr - err error -} - -func loadWSASendRecvMsg() error { - sendRecvMsgFunc.once.Do(func() { - var s Handle - s, sendRecvMsgFunc.err = Socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP) - if sendRecvMsgFunc.err != nil { - return - } - defer CloseHandle(s) - var n uint32 - sendRecvMsgFunc.err = WSAIoctl(s, - SIO_GET_EXTENSION_FUNCTION_POINTER, - (*byte)(unsafe.Pointer(&WSAID_WSARECVMSG)), - uint32(unsafe.Sizeof(WSAID_WSARECVMSG)), - (*byte)(unsafe.Pointer(&sendRecvMsgFunc.recvAddr)), - uint32(unsafe.Sizeof(sendRecvMsgFunc.recvAddr)), - &n, nil, 0) - if sendRecvMsgFunc.err != nil { - return - } - sendRecvMsgFunc.err = WSAIoctl(s, - SIO_GET_EXTENSION_FUNCTION_POINTER, - (*byte)(unsafe.Pointer(&WSAID_WSASENDMSG)), - uint32(unsafe.Sizeof(WSAID_WSASENDMSG)), - (*byte)(unsafe.Pointer(&sendRecvMsgFunc.sendAddr)), - uint32(unsafe.Sizeof(sendRecvMsgFunc.sendAddr)), - &n, nil, 0) - }) - return sendRecvMsgFunc.err -} - -func WSASendMsg(fd Handle, msg *WSAMsg, flags uint32, bytesSent *uint32, overlapped *Overlapped, croutine *byte) error { - err := loadWSASendRecvMsg() - if err != nil { - return err - } - r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.sendAddr, 6, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(flags), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) - if r1 == socket_error { - err = errnoErr(e1) - } - return err -} - -func WSARecvMsg(fd Handle, msg *WSAMsg, bytesReceived *uint32, overlapped *Overlapped, croutine *byte) error { - err := loadWSASendRecvMsg() - if err != nil { - return err - } - r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.recvAddr, 5, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(bytesReceived)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0) - if r1 == socket_error { - err = errnoErr(e1) - } - return err -} - -// Invented structures to support what package os expects. -type Rusage struct { - CreationTime Filetime - ExitTime Filetime - KernelTime Filetime - UserTime Filetime -} - -type WaitStatus struct { - ExitCode uint32 -} - -func (w WaitStatus) Exited() bool { return true } - -func (w WaitStatus) ExitStatus() int { return int(w.ExitCode) } - -func (w WaitStatus) Signal() Signal { return -1 } - -func (w WaitStatus) CoreDump() bool { return false } - -func (w WaitStatus) Stopped() bool { return false } - -func (w WaitStatus) Continued() bool { return false } - -func (w WaitStatus) StopSignal() Signal { return -1 } - -func (w WaitStatus) Signaled() bool { return false } - -func (w WaitStatus) TrapCause() int { return -1 } - -// Timespec is an invented structure on Windows, but here for -// consistency with the corresponding package for other operating systems. -type Timespec struct { - Sec int64 - Nsec int64 -} - -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return -} - -// TODO(brainman): fix all needed for net - -func Accept(fd Handle) (nfd Handle, sa Sockaddr, err error) { return 0, nil, syscall.EWINDOWS } - -func Recvfrom(fd Handle, p []byte, flags int) (n int, from Sockaddr, err error) { - var rsa RawSockaddrAny - l := int32(unsafe.Sizeof(rsa)) - n32, err := recvfrom(fd, p, int32(flags), &rsa, &l) - n = int(n32) - if err != nil { - return - } - from, err = rsa.Sockaddr() - return -} - -func Sendto(fd Handle, p []byte, flags int, to Sockaddr) (err error) { - ptr, l, err := to.sockaddr() - if err != nil { - return err - } - return sendto(fd, p, int32(flags), ptr, l) -} - -func SetsockoptTimeval(fd Handle, level, opt int, tv *Timeval) (err error) { return syscall.EWINDOWS } - -// The Linger struct is wrong but we only noticed after Go 1. -// sysLinger is the real system call structure. - -// BUG(brainman): The definition of Linger is not appropriate for direct use -// with Setsockopt and Getsockopt. -// Use SetsockoptLinger instead. - -type Linger struct { - Onoff int32 - Linger int32 -} - -type sysLinger struct { - Onoff uint16 - Linger uint16 -} - -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -func GetsockoptInt(fd Handle, level, opt int) (int, error) { - v := int32(0) - l := int32(unsafe.Sizeof(v)) - err := Getsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&v)), &l) - return int(v), err -} - -func SetsockoptLinger(fd Handle, level, opt int, l *Linger) (err error) { - sys := sysLinger{Onoff: uint16(l.Onoff), Linger: uint16(l.Linger)} - return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&sys)), int32(unsafe.Sizeof(sys))) -} - -func SetsockoptInet4Addr(fd Handle, level, opt int, value [4]byte) (err error) { - return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&value[0])), 4) -} -func SetsockoptIPMreq(fd Handle, level, opt int, mreq *IPMreq) (err error) { - return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(mreq)), int32(unsafe.Sizeof(*mreq))) -} -func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { - return syscall.EWINDOWS -} - -func Getpid() (pid int) { return int(GetCurrentProcessId()) } - -func FindFirstFile(name *uint16, data *Win32finddata) (handle Handle, err error) { - // NOTE(rsc): The Win32finddata struct is wrong for the system call: - // the two paths are each one uint16 short. Use the correct struct, - // a win32finddata1, and then copy the results out. - // There is no loss of expressivity here, because the final - // uint16, if it is used, is supposed to be a NUL, and Go doesn't need that. - // For Go 1.1, we might avoid the allocation of win32finddata1 here - // by adding a final Bug [2]uint16 field to the struct and then - // adjusting the fields in the result directly. - var data1 win32finddata1 - handle, err = findFirstFile1(name, &data1) - if err == nil { - copyFindData(data, &data1) - } - return -} - -func FindNextFile(handle Handle, data *Win32finddata) (err error) { - var data1 win32finddata1 - err = findNextFile1(handle, &data1) - if err == nil { - copyFindData(data, &data1) - } - return -} - -func getProcessEntry(pid int) (*ProcessEntry32, error) { - snapshot, err := CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0) - if err != nil { - return nil, err - } - defer CloseHandle(snapshot) - var procEntry ProcessEntry32 - procEntry.Size = uint32(unsafe.Sizeof(procEntry)) - if err = Process32First(snapshot, &procEntry); err != nil { - return nil, err - } - for { - if procEntry.ProcessID == uint32(pid) { - return &procEntry, nil - } - err = Process32Next(snapshot, &procEntry) - if err != nil { - return nil, err - } - } -} - -func Getppid() (ppid int) { - pe, err := getProcessEntry(Getpid()) - if err != nil { - return -1 - } - return int(pe.ParentProcessID) -} - -// TODO(brainman): fix all needed for os -func Fchdir(fd Handle) (err error) { return syscall.EWINDOWS } -func Link(oldpath, newpath string) (err error) { return syscall.EWINDOWS } -func Symlink(path, link string) (err error) { return syscall.EWINDOWS } - -func Fchmod(fd Handle, mode uint32) (err error) { return syscall.EWINDOWS } -func Chown(path string, uid int, gid int) (err error) { return syscall.EWINDOWS } -func Lchown(path string, uid int, gid int) (err error) { return syscall.EWINDOWS } -func Fchown(fd Handle, uid int, gid int) (err error) { return syscall.EWINDOWS } - -func Getuid() (uid int) { return -1 } -func Geteuid() (euid int) { return -1 } -func Getgid() (gid int) { return -1 } -func Getegid() (egid int) { return -1 } -func Getgroups() (gids []int, err error) { return nil, syscall.EWINDOWS } - -type Signal int - -func (s Signal) Signal() {} - -func (s Signal) String() string { - if 0 <= s && int(s) < len(signals) { - str := signals[s] - if str != "" { - return str - } - } - return "signal " + itoa(int(s)) -} - -func LoadCreateSymbolicLink() error { - return procCreateSymbolicLinkW.Find() -} - -// Readlink returns the destination of the named symbolic link. -func Readlink(path string, buf []byte) (n int, err error) { - fd, err := CreateFile(StringToUTF16Ptr(path), GENERIC_READ, 0, nil, OPEN_EXISTING, - FILE_FLAG_OPEN_REPARSE_POINT|FILE_FLAG_BACKUP_SEMANTICS, 0) - if err != nil { - return -1, err - } - defer CloseHandle(fd) - - rdbbuf := make([]byte, MAXIMUM_REPARSE_DATA_BUFFER_SIZE) - var bytesReturned uint32 - err = DeviceIoControl(fd, FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0], uint32(len(rdbbuf)), &bytesReturned, nil) - if err != nil { - return -1, err - } - - rdb := (*reparseDataBuffer)(unsafe.Pointer(&rdbbuf[0])) - var s string - switch rdb.ReparseTag { - case IO_REPARSE_TAG_SYMLINK: - data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) - p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) - s = UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength-data.PrintNameOffset)/2]) - case IO_REPARSE_TAG_MOUNT_POINT: - data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) - p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) - s = UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength-data.PrintNameOffset)/2]) - default: - // the path is not a symlink or junction but another type of reparse - // point - return -1, syscall.ENOENT - } - n = copy(buf, []byte(s)) - - return n, nil -} - -// GUIDFromString parses a string in the form of -// "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}" into a GUID. -func GUIDFromString(str string) (GUID, error) { - guid := GUID{} - str16, err := syscall.UTF16PtrFromString(str) - if err != nil { - return guid, err - } - err = clsidFromString(str16, &guid) - if err != nil { - return guid, err - } - return guid, nil -} - -// GenerateGUID creates a new random GUID. -func GenerateGUID() (GUID, error) { - guid := GUID{} - err := coCreateGuid(&guid) - if err != nil { - return guid, err - } - return guid, nil -} - -// String returns the canonical string form of the GUID, -// in the form of "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}". -func (guid GUID) String() string { - var str [100]uint16 - chars := stringFromGUID2(&guid, &str[0], int32(len(str))) - if chars <= 1 { - return "" - } - return string(utf16.Decode(str[:chars-1])) -} - -// KnownFolderPath returns a well-known folder path for the current user, specified by one of -// the FOLDERID_ constants, and chosen and optionally created based on a KF_ flag. -func KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, error) { - return Token(0).KnownFolderPath(folderID, flags) -} - -// KnownFolderPath returns a well-known folder path for the user token, specified by one of -// the FOLDERID_ constants, and chosen and optionally created based on a KF_ flag. -func (t Token) KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, error) { - var p *uint16 - err := shGetKnownFolderPath(folderID, flags, t, &p) - if err != nil { - return "", err - } - defer CoTaskMemFree(unsafe.Pointer(p)) - return UTF16PtrToString(p), nil -} - -// RtlGetVersion returns the version of the underlying operating system, ignoring -// manifest semantics but is affected by the application compatibility layer. -func RtlGetVersion() *OsVersionInfoEx { - info := &OsVersionInfoEx{} - info.osVersionInfoSize = uint32(unsafe.Sizeof(*info)) - // According to documentation, this function always succeeds. - // The function doesn't even check the validity of the - // osVersionInfoSize member. Disassembling ntdll.dll indicates - // that the documentation is indeed correct about that. - _ = rtlGetVersion(info) - return info -} - -// RtlGetNtVersionNumbers returns the version of the underlying operating system, -// ignoring manifest semantics and the application compatibility layer. -func RtlGetNtVersionNumbers() (majorVersion, minorVersion, buildNumber uint32) { - rtlGetNtVersionNumbers(&majorVersion, &minorVersion, &buildNumber) - buildNumber &= 0xffff - return -} - -// GetProcessPreferredUILanguages retrieves the process preferred UI languages. -func GetProcessPreferredUILanguages(flags uint32) ([]string, error) { - return getUILanguages(flags, getProcessPreferredUILanguages) -} - -// GetThreadPreferredUILanguages retrieves the thread preferred UI languages for the current thread. -func GetThreadPreferredUILanguages(flags uint32) ([]string, error) { - return getUILanguages(flags, getThreadPreferredUILanguages) -} - -// GetUserPreferredUILanguages retrieves information about the user preferred UI languages. -func GetUserPreferredUILanguages(flags uint32) ([]string, error) { - return getUILanguages(flags, getUserPreferredUILanguages) -} - -// GetSystemPreferredUILanguages retrieves the system preferred UI languages. -func GetSystemPreferredUILanguages(flags uint32) ([]string, error) { - return getUILanguages(flags, getSystemPreferredUILanguages) -} - -func getUILanguages(flags uint32, f func(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) error) ([]string, error) { - size := uint32(128) - for { - var numLanguages uint32 - buf := make([]uint16, size) - err := f(flags, &numLanguages, &buf[0], &size) - if err == ERROR_INSUFFICIENT_BUFFER { - continue - } - if err != nil { - return nil, err - } - buf = buf[:size] - if numLanguages == 0 || len(buf) == 0 { // GetProcessPreferredUILanguages may return numLanguages==0 with "\0\0" - return []string{}, nil - } - if buf[len(buf)-1] == 0 { - buf = buf[:len(buf)-1] // remove terminating null - } - languages := make([]string, 0, numLanguages) - from := 0 - for i, c := range buf { - if c == 0 { - languages = append(languages, string(utf16.Decode(buf[from:i]))) - from = i + 1 - } - } - return languages, nil - } -} - -func SetConsoleCursorPosition(console Handle, position Coord) error { - return setConsoleCursorPosition(console, *((*uint32)(unsafe.Pointer(&position)))) -} - -func (s NTStatus) Errno() syscall.Errno { - return rtlNtStatusToDosErrorNoTeb(s) -} - -func langID(pri, sub uint16) uint32 { return uint32(sub)<<10 | uint32(pri) } - -func (s NTStatus) Error() string { - b := make([]uint16, 300) - n, err := FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_FROM_HMODULE|FORMAT_MESSAGE_ARGUMENT_ARRAY, modntdll.Handle(), uint32(s), langID(LANG_ENGLISH, SUBLANG_ENGLISH_US), b, nil) - if err != nil { - return fmt.Sprintf("NTSTATUS 0x%08x", uint32(s)) - } - // trim terminating \r and \n - for ; n > 0 && (b[n-1] == '\n' || b[n-1] == '\r'); n-- { - } - return string(utf16.Decode(b[:n])) -} - -// NewNTUnicodeString returns a new NTUnicodeString structure for use with native -// NT APIs that work over the NTUnicodeString type. Note that most Windows APIs -// do not use NTUnicodeString, and instead UTF16PtrFromString should be used for -// the more common *uint16 string type. -func NewNTUnicodeString(s string) (*NTUnicodeString, error) { - var u NTUnicodeString - s16, err := UTF16PtrFromString(s) - if err != nil { - return nil, err - } - RtlInitUnicodeString(&u, s16) - return &u, nil -} - -// Slice returns a uint16 slice that aliases the data in the NTUnicodeString. -func (s *NTUnicodeString) Slice() []uint16 { - var slice []uint16 - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) - hdr.Data = unsafe.Pointer(s.Buffer) - hdr.Len = int(s.Length) - hdr.Cap = int(s.MaximumLength) - return slice -} - -func (s *NTUnicodeString) String() string { - return UTF16ToString(s.Slice()) -} - -// NewNTString returns a new NTString structure for use with native -// NT APIs that work over the NTString type. Note that most Windows APIs -// do not use NTString, and instead UTF16PtrFromString should be used for -// the more common *uint16 string type. -func NewNTString(s string) (*NTString, error) { - var nts NTString - s8, err := BytePtrFromString(s) - if err != nil { - return nil, err - } - RtlInitString(&nts, s8) - return &nts, nil -} - -// Slice returns a byte slice that aliases the data in the NTString. -func (s *NTString) Slice() []byte { - var slice []byte - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) - hdr.Data = unsafe.Pointer(s.Buffer) - hdr.Len = int(s.Length) - hdr.Cap = int(s.MaximumLength) - return slice -} - -func (s *NTString) String() string { - return ByteSliceToString(s.Slice()) -} - -// FindResource resolves a resource of the given name and resource type. -func FindResource(module Handle, name, resType ResourceIDOrString) (Handle, error) { - var namePtr, resTypePtr uintptr - var name16, resType16 *uint16 - var err error - resolvePtr := func(i interface{}, keep **uint16) (uintptr, error) { - switch v := i.(type) { - case string: - *keep, err = UTF16PtrFromString(v) - if err != nil { - return 0, err - } - return uintptr(unsafe.Pointer(*keep)), nil - case ResourceID: - return uintptr(v), nil - } - return 0, errorspkg.New("parameter must be a ResourceID or a string") - } - namePtr, err = resolvePtr(name, &name16) - if err != nil { - return 0, err - } - resTypePtr, err = resolvePtr(resType, &resType16) - if err != nil { - return 0, err - } - resInfo, err := findResource(module, namePtr, resTypePtr) - runtime.KeepAlive(name16) - runtime.KeepAlive(resType16) - return resInfo, err -} - -func LoadResourceData(module, resInfo Handle) (data []byte, err error) { - size, err := SizeofResource(module, resInfo) - if err != nil { - return - } - resData, err := LoadResource(module, resInfo) - if err != nil { - return - } - ptr, err := LockResource(resData) - if err != nil { - return - } - h := (*unsafeheader.Slice)(unsafe.Pointer(&data)) - h.Data = unsafe.Pointer(ptr) - h.Len = int(size) - h.Cap = int(size) - return -} - -// PSAPI_WORKING_SET_EX_BLOCK contains extended working set information for a page. -type PSAPI_WORKING_SET_EX_BLOCK uint64 - -// Valid returns the validity of this page. -// If this bit is 1, the subsequent members are valid; otherwise they should be ignored. -func (b PSAPI_WORKING_SET_EX_BLOCK) Valid() bool { - return (b & 1) == 1 -} - -// ShareCount is the number of processes that share this page. The maximum value of this member is 7. -func (b PSAPI_WORKING_SET_EX_BLOCK) ShareCount() uint64 { - return b.intField(1, 3) -} - -// Win32Protection is the memory protection attributes of the page. For a list of values, see -// https://docs.microsoft.com/en-us/windows/win32/memory/memory-protection-constants -func (b PSAPI_WORKING_SET_EX_BLOCK) Win32Protection() uint64 { - return b.intField(4, 11) -} - -// Shared returns the shared status of this page. -// If this bit is 1, the page can be shared. -func (b PSAPI_WORKING_SET_EX_BLOCK) Shared() bool { - return (b & (1 << 15)) == 1 -} - -// Node is the NUMA node. The maximum value of this member is 63. -func (b PSAPI_WORKING_SET_EX_BLOCK) Node() uint64 { - return b.intField(16, 6) -} - -// Locked returns the locked status of this page. -// If this bit is 1, the virtual page is locked in physical memory. -func (b PSAPI_WORKING_SET_EX_BLOCK) Locked() bool { - return (b & (1 << 22)) == 1 -} - -// LargePage returns the large page status of this page. -// If this bit is 1, the page is a large page. -func (b PSAPI_WORKING_SET_EX_BLOCK) LargePage() bool { - return (b & (1 << 23)) == 1 -} - -// Bad returns the bad status of this page. -// If this bit is 1, the page is has been reported as bad. -func (b PSAPI_WORKING_SET_EX_BLOCK) Bad() bool { - return (b & (1 << 31)) == 1 -} - -// intField extracts an integer field in the PSAPI_WORKING_SET_EX_BLOCK union. -func (b PSAPI_WORKING_SET_EX_BLOCK) intField(start, length int) uint64 { - var mask PSAPI_WORKING_SET_EX_BLOCK - for pos := start; pos < start+length; pos++ { - mask |= (1 << pos) - } - - masked := b & mask - return uint64(masked >> start) -} - -// PSAPI_WORKING_SET_EX_INFORMATION contains extended working set information for a process. -type PSAPI_WORKING_SET_EX_INFORMATION struct { - // The virtual address. - VirtualAddress Pointer - // A PSAPI_WORKING_SET_EX_BLOCK union that indicates the attributes of the page at VirtualAddress. - VirtualAttributes PSAPI_WORKING_SET_EX_BLOCK -} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go deleted file mode 100644 index 88e62a638..000000000 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ /dev/null @@ -1,3349 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -import ( - "net" - "syscall" - "unsafe" -) - -// NTStatus corresponds with NTSTATUS, error values returned by ntdll.dll and -// other native functions. -type NTStatus uint32 - -const ( - // Invented values to support what package os expects. - O_RDONLY = 0x00000 - O_WRONLY = 0x00001 - O_RDWR = 0x00002 - O_CREAT = 0x00040 - O_EXCL = 0x00080 - O_NOCTTY = 0x00100 - O_TRUNC = 0x00200 - O_NONBLOCK = 0x00800 - O_APPEND = 0x00400 - O_SYNC = 0x01000 - O_ASYNC = 0x02000 - O_CLOEXEC = 0x80000 -) - -const ( - // More invented values for signals - SIGHUP = Signal(0x1) - SIGINT = Signal(0x2) - SIGQUIT = Signal(0x3) - SIGILL = Signal(0x4) - SIGTRAP = Signal(0x5) - SIGABRT = Signal(0x6) - SIGBUS = Signal(0x7) - SIGFPE = Signal(0x8) - SIGKILL = Signal(0x9) - SIGSEGV = Signal(0xb) - SIGPIPE = Signal(0xd) - SIGALRM = Signal(0xe) - SIGTERM = Signal(0xf) -) - -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "bus error", - 8: "floating point exception", - 9: "killed", - 10: "user defined signal 1", - 11: "segmentation fault", - 12: "user defined signal 2", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", -} - -const ( - FILE_READ_DATA = 0x00000001 - FILE_READ_ATTRIBUTES = 0x00000080 - FILE_READ_EA = 0x00000008 - FILE_WRITE_DATA = 0x00000002 - FILE_WRITE_ATTRIBUTES = 0x00000100 - FILE_WRITE_EA = 0x00000010 - FILE_APPEND_DATA = 0x00000004 - FILE_EXECUTE = 0x00000020 - - FILE_GENERIC_READ = STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE - FILE_GENERIC_WRITE = STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE - FILE_GENERIC_EXECUTE = STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE - - FILE_LIST_DIRECTORY = 0x00000001 - FILE_TRAVERSE = 0x00000020 - - FILE_SHARE_READ = 0x00000001 - FILE_SHARE_WRITE = 0x00000002 - FILE_SHARE_DELETE = 0x00000004 - - FILE_ATTRIBUTE_READONLY = 0x00000001 - FILE_ATTRIBUTE_HIDDEN = 0x00000002 - FILE_ATTRIBUTE_SYSTEM = 0x00000004 - FILE_ATTRIBUTE_DIRECTORY = 0x00000010 - FILE_ATTRIBUTE_ARCHIVE = 0x00000020 - FILE_ATTRIBUTE_DEVICE = 0x00000040 - FILE_ATTRIBUTE_NORMAL = 0x00000080 - FILE_ATTRIBUTE_TEMPORARY = 0x00000100 - FILE_ATTRIBUTE_SPARSE_FILE = 0x00000200 - FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400 - FILE_ATTRIBUTE_COMPRESSED = 0x00000800 - FILE_ATTRIBUTE_OFFLINE = 0x00001000 - FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x00002000 - FILE_ATTRIBUTE_ENCRYPTED = 0x00004000 - FILE_ATTRIBUTE_INTEGRITY_STREAM = 0x00008000 - FILE_ATTRIBUTE_VIRTUAL = 0x00010000 - FILE_ATTRIBUTE_NO_SCRUB_DATA = 0x00020000 - FILE_ATTRIBUTE_RECALL_ON_OPEN = 0x00040000 - FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS = 0x00400000 - - INVALID_FILE_ATTRIBUTES = 0xffffffff - - CREATE_NEW = 1 - CREATE_ALWAYS = 2 - OPEN_EXISTING = 3 - OPEN_ALWAYS = 4 - TRUNCATE_EXISTING = 5 - - FILE_FLAG_OPEN_REQUIRING_OPLOCK = 0x00040000 - FILE_FLAG_FIRST_PIPE_INSTANCE = 0x00080000 - FILE_FLAG_OPEN_NO_RECALL = 0x00100000 - FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000 - FILE_FLAG_SESSION_AWARE = 0x00800000 - FILE_FLAG_POSIX_SEMANTICS = 0x01000000 - FILE_FLAG_BACKUP_SEMANTICS = 0x02000000 - FILE_FLAG_DELETE_ON_CLOSE = 0x04000000 - FILE_FLAG_SEQUENTIAL_SCAN = 0x08000000 - FILE_FLAG_RANDOM_ACCESS = 0x10000000 - FILE_FLAG_NO_BUFFERING = 0x20000000 - FILE_FLAG_OVERLAPPED = 0x40000000 - FILE_FLAG_WRITE_THROUGH = 0x80000000 - - HANDLE_FLAG_INHERIT = 0x00000001 - STARTF_USESTDHANDLES = 0x00000100 - STARTF_USESHOWWINDOW = 0x00000001 - DUPLICATE_CLOSE_SOURCE = 0x00000001 - DUPLICATE_SAME_ACCESS = 0x00000002 - - STD_INPUT_HANDLE = -10 & (1<<32 - 1) - STD_OUTPUT_HANDLE = -11 & (1<<32 - 1) - STD_ERROR_HANDLE = -12 & (1<<32 - 1) - - FILE_BEGIN = 0 - FILE_CURRENT = 1 - FILE_END = 2 - - LANG_ENGLISH = 0x09 - SUBLANG_ENGLISH_US = 0x01 - - FORMAT_MESSAGE_ALLOCATE_BUFFER = 256 - FORMAT_MESSAGE_IGNORE_INSERTS = 512 - FORMAT_MESSAGE_FROM_STRING = 1024 - FORMAT_MESSAGE_FROM_HMODULE = 2048 - FORMAT_MESSAGE_FROM_SYSTEM = 4096 - FORMAT_MESSAGE_ARGUMENT_ARRAY = 8192 - FORMAT_MESSAGE_MAX_WIDTH_MASK = 255 - - MAX_PATH = 260 - MAX_LONG_PATH = 32768 - - MAX_MODULE_NAME32 = 255 - - MAX_COMPUTERNAME_LENGTH = 15 - - MAX_DHCPV6_DUID_LENGTH = 130 - - MAX_DNS_SUFFIX_STRING_LENGTH = 256 - - TIME_ZONE_ID_UNKNOWN = 0 - TIME_ZONE_ID_STANDARD = 1 - - TIME_ZONE_ID_DAYLIGHT = 2 - IGNORE = 0 - INFINITE = 0xffffffff - - WAIT_ABANDONED = 0x00000080 - WAIT_OBJECT_0 = 0x00000000 - WAIT_FAILED = 0xFFFFFFFF - - // Access rights for process. - PROCESS_CREATE_PROCESS = 0x0080 - PROCESS_CREATE_THREAD = 0x0002 - PROCESS_DUP_HANDLE = 0x0040 - PROCESS_QUERY_INFORMATION = 0x0400 - PROCESS_QUERY_LIMITED_INFORMATION = 0x1000 - PROCESS_SET_INFORMATION = 0x0200 - PROCESS_SET_QUOTA = 0x0100 - PROCESS_SUSPEND_RESUME = 0x0800 - PROCESS_TERMINATE = 0x0001 - PROCESS_VM_OPERATION = 0x0008 - PROCESS_VM_READ = 0x0010 - PROCESS_VM_WRITE = 0x0020 - - // Access rights for thread. - THREAD_DIRECT_IMPERSONATION = 0x0200 - THREAD_GET_CONTEXT = 0x0008 - THREAD_IMPERSONATE = 0x0100 - THREAD_QUERY_INFORMATION = 0x0040 - THREAD_QUERY_LIMITED_INFORMATION = 0x0800 - THREAD_SET_CONTEXT = 0x0010 - THREAD_SET_INFORMATION = 0x0020 - THREAD_SET_LIMITED_INFORMATION = 0x0400 - THREAD_SET_THREAD_TOKEN = 0x0080 - THREAD_SUSPEND_RESUME = 0x0002 - THREAD_TERMINATE = 0x0001 - - FILE_MAP_COPY = 0x01 - FILE_MAP_WRITE = 0x02 - FILE_MAP_READ = 0x04 - FILE_MAP_EXECUTE = 0x20 - - CTRL_C_EVENT = 0 - CTRL_BREAK_EVENT = 1 - CTRL_CLOSE_EVENT = 2 - CTRL_LOGOFF_EVENT = 5 - CTRL_SHUTDOWN_EVENT = 6 - - // Windows reserves errors >= 1<<29 for application use. - APPLICATION_ERROR = 1 << 29 -) - -const ( - // Process creation flags. - CREATE_BREAKAWAY_FROM_JOB = 0x01000000 - CREATE_DEFAULT_ERROR_MODE = 0x04000000 - CREATE_NEW_CONSOLE = 0x00000010 - CREATE_NEW_PROCESS_GROUP = 0x00000200 - CREATE_NO_WINDOW = 0x08000000 - CREATE_PROTECTED_PROCESS = 0x00040000 - CREATE_PRESERVE_CODE_AUTHZ_LEVEL = 0x02000000 - CREATE_SEPARATE_WOW_VDM = 0x00000800 - CREATE_SHARED_WOW_VDM = 0x00001000 - CREATE_SUSPENDED = 0x00000004 - CREATE_UNICODE_ENVIRONMENT = 0x00000400 - DEBUG_ONLY_THIS_PROCESS = 0x00000002 - DEBUG_PROCESS = 0x00000001 - DETACHED_PROCESS = 0x00000008 - EXTENDED_STARTUPINFO_PRESENT = 0x00080000 - INHERIT_PARENT_AFFINITY = 0x00010000 -) - -const ( - // attributes for ProcThreadAttributeList - PROC_THREAD_ATTRIBUTE_PARENT_PROCESS = 0x00020000 - PROC_THREAD_ATTRIBUTE_HANDLE_LIST = 0x00020002 - PROC_THREAD_ATTRIBUTE_GROUP_AFFINITY = 0x00030003 - PROC_THREAD_ATTRIBUTE_PREFERRED_NODE = 0x00020004 - PROC_THREAD_ATTRIBUTE_IDEAL_PROCESSOR = 0x00030005 - PROC_THREAD_ATTRIBUTE_MITIGATION_POLICY = 0x00020007 - PROC_THREAD_ATTRIBUTE_UMS_THREAD = 0x00030006 - PROC_THREAD_ATTRIBUTE_PROTECTION_LEVEL = 0x0002000b -) - -const ( - // flags for CreateToolhelp32Snapshot - TH32CS_SNAPHEAPLIST = 0x01 - TH32CS_SNAPPROCESS = 0x02 - TH32CS_SNAPTHREAD = 0x04 - TH32CS_SNAPMODULE = 0x08 - TH32CS_SNAPMODULE32 = 0x10 - TH32CS_SNAPALL = TH32CS_SNAPHEAPLIST | TH32CS_SNAPMODULE | TH32CS_SNAPPROCESS | TH32CS_SNAPTHREAD - TH32CS_INHERIT = 0x80000000 -) - -const ( - // flags for EnumProcessModulesEx - LIST_MODULES_32BIT = 0x01 - LIST_MODULES_64BIT = 0x02 - LIST_MODULES_ALL = 0x03 - LIST_MODULES_DEFAULT = 0x00 -) - -const ( - // filters for ReadDirectoryChangesW and FindFirstChangeNotificationW - FILE_NOTIFY_CHANGE_FILE_NAME = 0x001 - FILE_NOTIFY_CHANGE_DIR_NAME = 0x002 - FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x004 - FILE_NOTIFY_CHANGE_SIZE = 0x008 - FILE_NOTIFY_CHANGE_LAST_WRITE = 0x010 - FILE_NOTIFY_CHANGE_LAST_ACCESS = 0x020 - FILE_NOTIFY_CHANGE_CREATION = 0x040 - FILE_NOTIFY_CHANGE_SECURITY = 0x100 -) - -const ( - // do not reorder - FILE_ACTION_ADDED = iota + 1 - FILE_ACTION_REMOVED - FILE_ACTION_MODIFIED - FILE_ACTION_RENAMED_OLD_NAME - FILE_ACTION_RENAMED_NEW_NAME -) - -const ( - // wincrypt.h - /* certenrolld_begin -- PROV_RSA_*/ - PROV_RSA_FULL = 1 - PROV_RSA_SIG = 2 - PROV_DSS = 3 - PROV_FORTEZZA = 4 - PROV_MS_EXCHANGE = 5 - PROV_SSL = 6 - PROV_RSA_SCHANNEL = 12 - PROV_DSS_DH = 13 - PROV_EC_ECDSA_SIG = 14 - PROV_EC_ECNRA_SIG = 15 - PROV_EC_ECDSA_FULL = 16 - PROV_EC_ECNRA_FULL = 17 - PROV_DH_SCHANNEL = 18 - PROV_SPYRUS_LYNKS = 20 - PROV_RNG = 21 - PROV_INTEL_SEC = 22 - PROV_REPLACE_OWF = 23 - PROV_RSA_AES = 24 - - /* dwFlags definitions for CryptAcquireContext */ - CRYPT_VERIFYCONTEXT = 0xF0000000 - CRYPT_NEWKEYSET = 0x00000008 - CRYPT_DELETEKEYSET = 0x00000010 - CRYPT_MACHINE_KEYSET = 0x00000020 - CRYPT_SILENT = 0x00000040 - CRYPT_DEFAULT_CONTAINER_OPTIONAL = 0x00000080 - - /* Flags for PFXImportCertStore */ - CRYPT_EXPORTABLE = 0x00000001 - CRYPT_USER_PROTECTED = 0x00000002 - CRYPT_USER_KEYSET = 0x00001000 - PKCS12_PREFER_CNG_KSP = 0x00000100 - PKCS12_ALWAYS_CNG_KSP = 0x00000200 - PKCS12_ALLOW_OVERWRITE_KEY = 0x00004000 - PKCS12_NO_PERSIST_KEY = 0x00008000 - PKCS12_INCLUDE_EXTENDED_PROPERTIES = 0x00000010 - - /* Flags for CryptAcquireCertificatePrivateKey */ - CRYPT_ACQUIRE_CACHE_FLAG = 0x00000001 - CRYPT_ACQUIRE_USE_PROV_INFO_FLAG = 0x00000002 - CRYPT_ACQUIRE_COMPARE_KEY_FLAG = 0x00000004 - CRYPT_ACQUIRE_NO_HEALING = 0x00000008 - CRYPT_ACQUIRE_SILENT_FLAG = 0x00000040 - CRYPT_ACQUIRE_WINDOW_HANDLE_FLAG = 0x00000080 - CRYPT_ACQUIRE_NCRYPT_KEY_FLAGS_MASK = 0x00070000 - CRYPT_ACQUIRE_ALLOW_NCRYPT_KEY_FLAG = 0x00010000 - CRYPT_ACQUIRE_PREFER_NCRYPT_KEY_FLAG = 0x00020000 - CRYPT_ACQUIRE_ONLY_NCRYPT_KEY_FLAG = 0x00040000 - - /* pdwKeySpec for CryptAcquireCertificatePrivateKey */ - AT_KEYEXCHANGE = 1 - AT_SIGNATURE = 2 - CERT_NCRYPT_KEY_SPEC = 0xFFFFFFFF - - /* Default usage match type is AND with value zero */ - USAGE_MATCH_TYPE_AND = 0 - USAGE_MATCH_TYPE_OR = 1 - - /* msgAndCertEncodingType values for CertOpenStore function */ - X509_ASN_ENCODING = 0x00000001 - PKCS_7_ASN_ENCODING = 0x00010000 - - /* storeProvider values for CertOpenStore function */ - CERT_STORE_PROV_MSG = 1 - CERT_STORE_PROV_MEMORY = 2 - CERT_STORE_PROV_FILE = 3 - CERT_STORE_PROV_REG = 4 - CERT_STORE_PROV_PKCS7 = 5 - CERT_STORE_PROV_SERIALIZED = 6 - CERT_STORE_PROV_FILENAME_A = 7 - CERT_STORE_PROV_FILENAME_W = 8 - CERT_STORE_PROV_FILENAME = CERT_STORE_PROV_FILENAME_W - CERT_STORE_PROV_SYSTEM_A = 9 - CERT_STORE_PROV_SYSTEM_W = 10 - CERT_STORE_PROV_SYSTEM = CERT_STORE_PROV_SYSTEM_W - CERT_STORE_PROV_COLLECTION = 11 - CERT_STORE_PROV_SYSTEM_REGISTRY_A = 12 - CERT_STORE_PROV_SYSTEM_REGISTRY_W = 13 - CERT_STORE_PROV_SYSTEM_REGISTRY = CERT_STORE_PROV_SYSTEM_REGISTRY_W - CERT_STORE_PROV_PHYSICAL_W = 14 - CERT_STORE_PROV_PHYSICAL = CERT_STORE_PROV_PHYSICAL_W - CERT_STORE_PROV_SMART_CARD_W = 15 - CERT_STORE_PROV_SMART_CARD = CERT_STORE_PROV_SMART_CARD_W - CERT_STORE_PROV_LDAP_W = 16 - CERT_STORE_PROV_LDAP = CERT_STORE_PROV_LDAP_W - CERT_STORE_PROV_PKCS12 = 17 - - /* store characteristics (low WORD of flag) for CertOpenStore function */ - CERT_STORE_NO_CRYPT_RELEASE_FLAG = 0x00000001 - CERT_STORE_SET_LOCALIZED_NAME_FLAG = 0x00000002 - CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG = 0x00000004 - CERT_STORE_DELETE_FLAG = 0x00000010 - CERT_STORE_UNSAFE_PHYSICAL_FLAG = 0x00000020 - CERT_STORE_SHARE_STORE_FLAG = 0x00000040 - CERT_STORE_SHARE_CONTEXT_FLAG = 0x00000080 - CERT_STORE_MANIFOLD_FLAG = 0x00000100 - CERT_STORE_ENUM_ARCHIVED_FLAG = 0x00000200 - CERT_STORE_UPDATE_KEYID_FLAG = 0x00000400 - CERT_STORE_BACKUP_RESTORE_FLAG = 0x00000800 - CERT_STORE_MAXIMUM_ALLOWED_FLAG = 0x00001000 - CERT_STORE_CREATE_NEW_FLAG = 0x00002000 - CERT_STORE_OPEN_EXISTING_FLAG = 0x00004000 - CERT_STORE_READONLY_FLAG = 0x00008000 - - /* store locations (high WORD of flag) for CertOpenStore function */ - CERT_SYSTEM_STORE_CURRENT_USER = 0x00010000 - CERT_SYSTEM_STORE_LOCAL_MACHINE = 0x00020000 - CERT_SYSTEM_STORE_CURRENT_SERVICE = 0x00040000 - CERT_SYSTEM_STORE_SERVICES = 0x00050000 - CERT_SYSTEM_STORE_USERS = 0x00060000 - CERT_SYSTEM_STORE_CURRENT_USER_GROUP_POLICY = 0x00070000 - CERT_SYSTEM_STORE_LOCAL_MACHINE_GROUP_POLICY = 0x00080000 - CERT_SYSTEM_STORE_LOCAL_MACHINE_ENTERPRISE = 0x00090000 - CERT_SYSTEM_STORE_UNPROTECTED_FLAG = 0x40000000 - CERT_SYSTEM_STORE_RELOCATE_FLAG = 0x80000000 - - /* Miscellaneous high-WORD flags for CertOpenStore function */ - CERT_REGISTRY_STORE_REMOTE_FLAG = 0x00010000 - CERT_REGISTRY_STORE_SERIALIZED_FLAG = 0x00020000 - CERT_REGISTRY_STORE_ROAMING_FLAG = 0x00040000 - CERT_REGISTRY_STORE_MY_IE_DIRTY_FLAG = 0x00080000 - CERT_REGISTRY_STORE_LM_GPT_FLAG = 0x01000000 - CERT_REGISTRY_STORE_CLIENT_GPT_FLAG = 0x80000000 - CERT_FILE_STORE_COMMIT_ENABLE_FLAG = 0x00010000 - CERT_LDAP_STORE_SIGN_FLAG = 0x00010000 - CERT_LDAP_STORE_AREC_EXCLUSIVE_FLAG = 0x00020000 - CERT_LDAP_STORE_OPENED_FLAG = 0x00040000 - CERT_LDAP_STORE_UNBIND_FLAG = 0x00080000 - - /* addDisposition values for CertAddCertificateContextToStore function */ - CERT_STORE_ADD_NEW = 1 - CERT_STORE_ADD_USE_EXISTING = 2 - CERT_STORE_ADD_REPLACE_EXISTING = 3 - CERT_STORE_ADD_ALWAYS = 4 - CERT_STORE_ADD_REPLACE_EXISTING_INHERIT_PROPERTIES = 5 - CERT_STORE_ADD_NEWER = 6 - CERT_STORE_ADD_NEWER_INHERIT_PROPERTIES = 7 - - /* ErrorStatus values for CertTrustStatus struct */ - CERT_TRUST_NO_ERROR = 0x00000000 - CERT_TRUST_IS_NOT_TIME_VALID = 0x00000001 - CERT_TRUST_IS_REVOKED = 0x00000004 - CERT_TRUST_IS_NOT_SIGNATURE_VALID = 0x00000008 - CERT_TRUST_IS_NOT_VALID_FOR_USAGE = 0x00000010 - CERT_TRUST_IS_UNTRUSTED_ROOT = 0x00000020 - CERT_TRUST_REVOCATION_STATUS_UNKNOWN = 0x00000040 - CERT_TRUST_IS_CYCLIC = 0x00000080 - CERT_TRUST_INVALID_EXTENSION = 0x00000100 - CERT_TRUST_INVALID_POLICY_CONSTRAINTS = 0x00000200 - CERT_TRUST_INVALID_BASIC_CONSTRAINTS = 0x00000400 - CERT_TRUST_INVALID_NAME_CONSTRAINTS = 0x00000800 - CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT = 0x00001000 - CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT = 0x00002000 - CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT = 0x00004000 - CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT = 0x00008000 - CERT_TRUST_IS_PARTIAL_CHAIN = 0x00010000 - CERT_TRUST_CTL_IS_NOT_TIME_VALID = 0x00020000 - CERT_TRUST_CTL_IS_NOT_SIGNATURE_VALID = 0x00040000 - CERT_TRUST_CTL_IS_NOT_VALID_FOR_USAGE = 0x00080000 - CERT_TRUST_HAS_WEAK_SIGNATURE = 0x00100000 - CERT_TRUST_IS_OFFLINE_REVOCATION = 0x01000000 - CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY = 0x02000000 - CERT_TRUST_IS_EXPLICIT_DISTRUST = 0x04000000 - CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT = 0x08000000 - - /* InfoStatus values for CertTrustStatus struct */ - CERT_TRUST_HAS_EXACT_MATCH_ISSUER = 0x00000001 - CERT_TRUST_HAS_KEY_MATCH_ISSUER = 0x00000002 - CERT_TRUST_HAS_NAME_MATCH_ISSUER = 0x00000004 - CERT_TRUST_IS_SELF_SIGNED = 0x00000008 - CERT_TRUST_HAS_PREFERRED_ISSUER = 0x00000100 - CERT_TRUST_HAS_ISSUANCE_CHAIN_POLICY = 0x00000400 - CERT_TRUST_HAS_VALID_NAME_CONSTRAINTS = 0x00000400 - CERT_TRUST_IS_PEER_TRUSTED = 0x00000800 - CERT_TRUST_HAS_CRL_VALIDITY_EXTENDED = 0x00001000 - CERT_TRUST_IS_FROM_EXCLUSIVE_TRUST_STORE = 0x00002000 - CERT_TRUST_IS_CA_TRUSTED = 0x00004000 - CERT_TRUST_IS_COMPLEX_CHAIN = 0x00010000 - - /* Certificate Information Flags */ - CERT_INFO_VERSION_FLAG = 1 - CERT_INFO_SERIAL_NUMBER_FLAG = 2 - CERT_INFO_SIGNATURE_ALGORITHM_FLAG = 3 - CERT_INFO_ISSUER_FLAG = 4 - CERT_INFO_NOT_BEFORE_FLAG = 5 - CERT_INFO_NOT_AFTER_FLAG = 6 - CERT_INFO_SUBJECT_FLAG = 7 - CERT_INFO_SUBJECT_PUBLIC_KEY_INFO_FLAG = 8 - CERT_INFO_ISSUER_UNIQUE_ID_FLAG = 9 - CERT_INFO_SUBJECT_UNIQUE_ID_FLAG = 10 - CERT_INFO_EXTENSION_FLAG = 11 - - /* dwFindType for CertFindCertificateInStore */ - CERT_COMPARE_MASK = 0xFFFF - CERT_COMPARE_SHIFT = 16 - CERT_COMPARE_ANY = 0 - CERT_COMPARE_SHA1_HASH = 1 - CERT_COMPARE_NAME = 2 - CERT_COMPARE_ATTR = 3 - CERT_COMPARE_MD5_HASH = 4 - CERT_COMPARE_PROPERTY = 5 - CERT_COMPARE_PUBLIC_KEY = 6 - CERT_COMPARE_HASH = CERT_COMPARE_SHA1_HASH - CERT_COMPARE_NAME_STR_A = 7 - CERT_COMPARE_NAME_STR_W = 8 - CERT_COMPARE_KEY_SPEC = 9 - CERT_COMPARE_ENHKEY_USAGE = 10 - CERT_COMPARE_CTL_USAGE = CERT_COMPARE_ENHKEY_USAGE - CERT_COMPARE_SUBJECT_CERT = 11 - CERT_COMPARE_ISSUER_OF = 12 - CERT_COMPARE_EXISTING = 13 - CERT_COMPARE_SIGNATURE_HASH = 14 - CERT_COMPARE_KEY_IDENTIFIER = 15 - CERT_COMPARE_CERT_ID = 16 - CERT_COMPARE_CROSS_CERT_DIST_POINTS = 17 - CERT_COMPARE_PUBKEY_MD5_HASH = 18 - CERT_COMPARE_SUBJECT_INFO_ACCESS = 19 - CERT_COMPARE_HASH_STR = 20 - CERT_COMPARE_HAS_PRIVATE_KEY = 21 - CERT_FIND_ANY = (CERT_COMPARE_ANY << CERT_COMPARE_SHIFT) - CERT_FIND_SHA1_HASH = (CERT_COMPARE_SHA1_HASH << CERT_COMPARE_SHIFT) - CERT_FIND_MD5_HASH = (CERT_COMPARE_MD5_HASH << CERT_COMPARE_SHIFT) - CERT_FIND_SIGNATURE_HASH = (CERT_COMPARE_SIGNATURE_HASH << CERT_COMPARE_SHIFT) - CERT_FIND_KEY_IDENTIFIER = (CERT_COMPARE_KEY_IDENTIFIER << CERT_COMPARE_SHIFT) - CERT_FIND_HASH = CERT_FIND_SHA1_HASH - CERT_FIND_PROPERTY = (CERT_COMPARE_PROPERTY << CERT_COMPARE_SHIFT) - CERT_FIND_PUBLIC_KEY = (CERT_COMPARE_PUBLIC_KEY << CERT_COMPARE_SHIFT) - CERT_FIND_SUBJECT_NAME = (CERT_COMPARE_NAME<> 32 & 0xffffffff) - return ft -} - -type Win32finddata struct { - FileAttributes uint32 - CreationTime Filetime - LastAccessTime Filetime - LastWriteTime Filetime - FileSizeHigh uint32 - FileSizeLow uint32 - Reserved0 uint32 - Reserved1 uint32 - FileName [MAX_PATH - 1]uint16 - AlternateFileName [13]uint16 -} - -// This is the actual system call structure. -// Win32finddata is what we committed to in Go 1. -type win32finddata1 struct { - FileAttributes uint32 - CreationTime Filetime - LastAccessTime Filetime - LastWriteTime Filetime - FileSizeHigh uint32 - FileSizeLow uint32 - Reserved0 uint32 - Reserved1 uint32 - FileName [MAX_PATH]uint16 - AlternateFileName [14]uint16 - - // The Microsoft documentation for this struct¹ describes three additional - // fields: dwFileType, dwCreatorType, and wFinderFlags. However, those fields - // are empirically only present in the macOS port of the Win32 API,² and thus - // not needed for binaries built for Windows. - // - // ¹ https://docs.microsoft.com/en-us/windows/win32/api/minwinbase/ns-minwinbase-win32_find_dataw describe - // ² https://golang.org/issue/42637#issuecomment-760715755. -} - -func copyFindData(dst *Win32finddata, src *win32finddata1) { - dst.FileAttributes = src.FileAttributes - dst.CreationTime = src.CreationTime - dst.LastAccessTime = src.LastAccessTime - dst.LastWriteTime = src.LastWriteTime - dst.FileSizeHigh = src.FileSizeHigh - dst.FileSizeLow = src.FileSizeLow - dst.Reserved0 = src.Reserved0 - dst.Reserved1 = src.Reserved1 - - // The src is 1 element bigger than dst, but it must be NUL. - copy(dst.FileName[:], src.FileName[:]) - copy(dst.AlternateFileName[:], src.AlternateFileName[:]) -} - -type ByHandleFileInformation struct { - FileAttributes uint32 - CreationTime Filetime - LastAccessTime Filetime - LastWriteTime Filetime - VolumeSerialNumber uint32 - FileSizeHigh uint32 - FileSizeLow uint32 - NumberOfLinks uint32 - FileIndexHigh uint32 - FileIndexLow uint32 -} - -const ( - GetFileExInfoStandard = 0 - GetFileExMaxInfoLevel = 1 -) - -type Win32FileAttributeData struct { - FileAttributes uint32 - CreationTime Filetime - LastAccessTime Filetime - LastWriteTime Filetime - FileSizeHigh uint32 - FileSizeLow uint32 -} - -// ShowWindow constants -const ( - // winuser.h - SW_HIDE = 0 - SW_NORMAL = 1 - SW_SHOWNORMAL = 1 - SW_SHOWMINIMIZED = 2 - SW_SHOWMAXIMIZED = 3 - SW_MAXIMIZE = 3 - SW_SHOWNOACTIVATE = 4 - SW_SHOW = 5 - SW_MINIMIZE = 6 - SW_SHOWMINNOACTIVE = 7 - SW_SHOWNA = 8 - SW_RESTORE = 9 - SW_SHOWDEFAULT = 10 - SW_FORCEMINIMIZE = 11 -) - -type StartupInfo struct { - Cb uint32 - _ *uint16 - Desktop *uint16 - Title *uint16 - X uint32 - Y uint32 - XSize uint32 - YSize uint32 - XCountChars uint32 - YCountChars uint32 - FillAttribute uint32 - Flags uint32 - ShowWindow uint16 - _ uint16 - _ *byte - StdInput Handle - StdOutput Handle - StdErr Handle -} - -type StartupInfoEx struct { - StartupInfo - ProcThreadAttributeList *ProcThreadAttributeList -} - -// ProcThreadAttributeList is a placeholder type to represent a PROC_THREAD_ATTRIBUTE_LIST. -// -// To create a *ProcThreadAttributeList, use NewProcThreadAttributeList, update -// it with ProcThreadAttributeListContainer.Update, free its memory using -// ProcThreadAttributeListContainer.Delete, and access the list itself using -// ProcThreadAttributeListContainer.List. -type ProcThreadAttributeList struct{} - -type ProcThreadAttributeListContainer struct { - data *ProcThreadAttributeList - pointers []unsafe.Pointer -} - -type ProcessInformation struct { - Process Handle - Thread Handle - ProcessId uint32 - ThreadId uint32 -} - -type ProcessEntry32 struct { - Size uint32 - Usage uint32 - ProcessID uint32 - DefaultHeapID uintptr - ModuleID uint32 - Threads uint32 - ParentProcessID uint32 - PriClassBase int32 - Flags uint32 - ExeFile [MAX_PATH]uint16 -} - -type ThreadEntry32 struct { - Size uint32 - Usage uint32 - ThreadID uint32 - OwnerProcessID uint32 - BasePri int32 - DeltaPri int32 - Flags uint32 -} - -type ModuleEntry32 struct { - Size uint32 - ModuleID uint32 - ProcessID uint32 - GlblcntUsage uint32 - ProccntUsage uint32 - ModBaseAddr uintptr - ModBaseSize uint32 - ModuleHandle Handle - Module [MAX_MODULE_NAME32 + 1]uint16 - ExePath [MAX_PATH]uint16 -} - -const SizeofModuleEntry32 = unsafe.Sizeof(ModuleEntry32{}) - -type Systemtime struct { - Year uint16 - Month uint16 - DayOfWeek uint16 - Day uint16 - Hour uint16 - Minute uint16 - Second uint16 - Milliseconds uint16 -} - -type Timezoneinformation struct { - Bias int32 - StandardName [32]uint16 - StandardDate Systemtime - StandardBias int32 - DaylightName [32]uint16 - DaylightDate Systemtime - DaylightBias int32 -} - -// Socket related. - -const ( - AF_UNSPEC = 0 - AF_UNIX = 1 - AF_INET = 2 - AF_NETBIOS = 17 - AF_INET6 = 23 - AF_IRDA = 26 - AF_BTH = 32 - - SOCK_STREAM = 1 - SOCK_DGRAM = 2 - SOCK_RAW = 3 - SOCK_RDM = 4 - SOCK_SEQPACKET = 5 - - IPPROTO_IP = 0 - IPPROTO_ICMP = 1 - IPPROTO_IGMP = 2 - BTHPROTO_RFCOMM = 3 - IPPROTO_TCP = 6 - IPPROTO_UDP = 17 - IPPROTO_IPV6 = 41 - IPPROTO_ICMPV6 = 58 - IPPROTO_RM = 113 - - SOL_SOCKET = 0xffff - SO_REUSEADDR = 4 - SO_KEEPALIVE = 8 - SO_DONTROUTE = 16 - SO_BROADCAST = 32 - SO_LINGER = 128 - SO_RCVBUF = 0x1002 - SO_RCVTIMEO = 0x1006 - SO_SNDBUF = 0x1001 - SO_UPDATE_ACCEPT_CONTEXT = 0x700b - SO_UPDATE_CONNECT_CONTEXT = 0x7010 - - IOC_OUT = 0x40000000 - IOC_IN = 0x80000000 - IOC_VENDOR = 0x18000000 - IOC_INOUT = IOC_IN | IOC_OUT - IOC_WS2 = 0x08000000 - SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6 - SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4 - SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12 - - // cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460 - - IP_HDRINCL = 0x2 - IP_TOS = 0x3 - IP_TTL = 0x4 - IP_MULTICAST_IF = 0x9 - IP_MULTICAST_TTL = 0xa - IP_MULTICAST_LOOP = 0xb - IP_ADD_MEMBERSHIP = 0xc - IP_DROP_MEMBERSHIP = 0xd - IP_PKTINFO = 0x13 - - IPV6_V6ONLY = 0x1b - IPV6_UNICAST_HOPS = 0x4 - IPV6_MULTICAST_IF = 0x9 - IPV6_MULTICAST_HOPS = 0xa - IPV6_MULTICAST_LOOP = 0xb - IPV6_JOIN_GROUP = 0xc - IPV6_LEAVE_GROUP = 0xd - IPV6_PKTINFO = 0x13 - - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_DONTROUTE = 0x4 - MSG_WAITALL = 0x8 - - MSG_TRUNC = 0x0100 - MSG_CTRUNC = 0x0200 - MSG_BCAST = 0x0400 - MSG_MCAST = 0x0800 - - SOMAXCONN = 0x7fffffff - - TCP_NODELAY = 1 - - SHUT_RD = 0 - SHUT_WR = 1 - SHUT_RDWR = 2 - - WSADESCRIPTION_LEN = 256 - WSASYS_STATUS_LEN = 128 -) - -type WSABuf struct { - Len uint32 - Buf *byte -} - -type WSAMsg struct { - Name *syscall.RawSockaddrAny - Namelen int32 - Buffers *WSABuf - BufferCount uint32 - Control WSABuf - Flags uint32 -} - -// Flags for WSASocket -const ( - WSA_FLAG_OVERLAPPED = 0x01 - WSA_FLAG_MULTIPOINT_C_ROOT = 0x02 - WSA_FLAG_MULTIPOINT_C_LEAF = 0x04 - WSA_FLAG_MULTIPOINT_D_ROOT = 0x08 - WSA_FLAG_MULTIPOINT_D_LEAF = 0x10 - WSA_FLAG_ACCESS_SYSTEM_SECURITY = 0x40 - WSA_FLAG_NO_HANDLE_INHERIT = 0x80 - WSA_FLAG_REGISTERED_IO = 0x100 -) - -// Invented values to support what package os expects. -const ( - S_IFMT = 0x1f000 - S_IFIFO = 0x1000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFBLK = 0x6000 - S_IFREG = 0x8000 - S_IFLNK = 0xa000 - S_IFSOCK = 0xc000 - S_ISUID = 0x800 - S_ISGID = 0x400 - S_ISVTX = 0x200 - S_IRUSR = 0x100 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXUSR = 0x40 -) - -const ( - FILE_TYPE_CHAR = 0x0002 - FILE_TYPE_DISK = 0x0001 - FILE_TYPE_PIPE = 0x0003 - FILE_TYPE_REMOTE = 0x8000 - FILE_TYPE_UNKNOWN = 0x0000 -) - -type Hostent struct { - Name *byte - Aliases **byte - AddrType uint16 - Length uint16 - AddrList **byte -} - -type Protoent struct { - Name *byte - Aliases **byte - Proto uint16 -} - -const ( - DNS_TYPE_A = 0x0001 - DNS_TYPE_NS = 0x0002 - DNS_TYPE_MD = 0x0003 - DNS_TYPE_MF = 0x0004 - DNS_TYPE_CNAME = 0x0005 - DNS_TYPE_SOA = 0x0006 - DNS_TYPE_MB = 0x0007 - DNS_TYPE_MG = 0x0008 - DNS_TYPE_MR = 0x0009 - DNS_TYPE_NULL = 0x000a - DNS_TYPE_WKS = 0x000b - DNS_TYPE_PTR = 0x000c - DNS_TYPE_HINFO = 0x000d - DNS_TYPE_MINFO = 0x000e - DNS_TYPE_MX = 0x000f - DNS_TYPE_TEXT = 0x0010 - DNS_TYPE_RP = 0x0011 - DNS_TYPE_AFSDB = 0x0012 - DNS_TYPE_X25 = 0x0013 - DNS_TYPE_ISDN = 0x0014 - DNS_TYPE_RT = 0x0015 - DNS_TYPE_NSAP = 0x0016 - DNS_TYPE_NSAPPTR = 0x0017 - DNS_TYPE_SIG = 0x0018 - DNS_TYPE_KEY = 0x0019 - DNS_TYPE_PX = 0x001a - DNS_TYPE_GPOS = 0x001b - DNS_TYPE_AAAA = 0x001c - DNS_TYPE_LOC = 0x001d - DNS_TYPE_NXT = 0x001e - DNS_TYPE_EID = 0x001f - DNS_TYPE_NIMLOC = 0x0020 - DNS_TYPE_SRV = 0x0021 - DNS_TYPE_ATMA = 0x0022 - DNS_TYPE_NAPTR = 0x0023 - DNS_TYPE_KX = 0x0024 - DNS_TYPE_CERT = 0x0025 - DNS_TYPE_A6 = 0x0026 - DNS_TYPE_DNAME = 0x0027 - DNS_TYPE_SINK = 0x0028 - DNS_TYPE_OPT = 0x0029 - DNS_TYPE_DS = 0x002B - DNS_TYPE_RRSIG = 0x002E - DNS_TYPE_NSEC = 0x002F - DNS_TYPE_DNSKEY = 0x0030 - DNS_TYPE_DHCID = 0x0031 - DNS_TYPE_UINFO = 0x0064 - DNS_TYPE_UID = 0x0065 - DNS_TYPE_GID = 0x0066 - DNS_TYPE_UNSPEC = 0x0067 - DNS_TYPE_ADDRS = 0x00f8 - DNS_TYPE_TKEY = 0x00f9 - DNS_TYPE_TSIG = 0x00fa - DNS_TYPE_IXFR = 0x00fb - DNS_TYPE_AXFR = 0x00fc - DNS_TYPE_MAILB = 0x00fd - DNS_TYPE_MAILA = 0x00fe - DNS_TYPE_ALL = 0x00ff - DNS_TYPE_ANY = 0x00ff - DNS_TYPE_WINS = 0xff01 - DNS_TYPE_WINSR = 0xff02 - DNS_TYPE_NBSTAT = 0xff01 -) - -const ( - // flags inside DNSRecord.Dw - DnsSectionQuestion = 0x0000 - DnsSectionAnswer = 0x0001 - DnsSectionAuthority = 0x0002 - DnsSectionAdditional = 0x0003 -) - -const ( - // flags of WSALookupService - LUP_DEEP = 0x0001 - LUP_CONTAINERS = 0x0002 - LUP_NOCONTAINERS = 0x0004 - LUP_NEAREST = 0x0008 - LUP_RETURN_NAME = 0x0010 - LUP_RETURN_TYPE = 0x0020 - LUP_RETURN_VERSION = 0x0040 - LUP_RETURN_COMMENT = 0x0080 - LUP_RETURN_ADDR = 0x0100 - LUP_RETURN_BLOB = 0x0200 - LUP_RETURN_ALIASES = 0x0400 - LUP_RETURN_QUERY_STRING = 0x0800 - LUP_RETURN_ALL = 0x0FF0 - LUP_RES_SERVICE = 0x8000 - - LUP_FLUSHCACHE = 0x1000 - LUP_FLUSHPREVIOUS = 0x2000 - - LUP_NON_AUTHORITATIVE = 0x4000 - LUP_SECURE = 0x8000 - LUP_RETURN_PREFERRED_NAMES = 0x10000 - LUP_DNS_ONLY = 0x20000 - - LUP_ADDRCONFIG = 0x100000 - LUP_DUAL_ADDR = 0x200000 - LUP_FILESERVER = 0x400000 - LUP_DISABLE_IDN_ENCODING = 0x00800000 - LUP_API_ANSI = 0x01000000 - - LUP_RESOLUTION_HANDLE = 0x80000000 -) - -const ( - // values of WSAQUERYSET's namespace - NS_ALL = 0 - NS_DNS = 12 - NS_NLA = 15 - NS_BTH = 16 - NS_EMAIL = 37 - NS_PNRPNAME = 38 - NS_PNRPCLOUD = 39 -) - -type DNSSRVData struct { - Target *uint16 - Priority uint16 - Weight uint16 - Port uint16 - Pad uint16 -} - -type DNSPTRData struct { - Host *uint16 -} - -type DNSMXData struct { - NameExchange *uint16 - Preference uint16 - Pad uint16 -} - -type DNSTXTData struct { - StringCount uint16 - StringArray [1]*uint16 -} - -type DNSRecord struct { - Next *DNSRecord - Name *uint16 - Type uint16 - Length uint16 - Dw uint32 - Ttl uint32 - Reserved uint32 - Data [40]byte -} - -const ( - TF_DISCONNECT = 1 - TF_REUSE_SOCKET = 2 - TF_WRITE_BEHIND = 4 - TF_USE_DEFAULT_WORKER = 0 - TF_USE_SYSTEM_THREAD = 16 - TF_USE_KERNEL_APC = 32 -) - -type TransmitFileBuffers struct { - Head uintptr - HeadLength uint32 - Tail uintptr - TailLength uint32 -} - -const ( - IFF_UP = 1 - IFF_BROADCAST = 2 - IFF_LOOPBACK = 4 - IFF_POINTTOPOINT = 8 - IFF_MULTICAST = 16 -) - -const SIO_GET_INTERFACE_LIST = 0x4004747F - -// TODO(mattn): SockaddrGen is union of sockaddr/sockaddr_in/sockaddr_in6_old. -// will be fixed to change variable type as suitable. - -type SockaddrGen [24]byte - -type InterfaceInfo struct { - Flags uint32 - Address SockaddrGen - BroadcastAddress SockaddrGen - Netmask SockaddrGen -} - -type IpAddressString struct { - String [16]byte -} - -type IpMaskString IpAddressString - -type IpAddrString struct { - Next *IpAddrString - IpAddress IpAddressString - IpMask IpMaskString - Context uint32 -} - -const MAX_ADAPTER_NAME_LENGTH = 256 -const MAX_ADAPTER_DESCRIPTION_LENGTH = 128 -const MAX_ADAPTER_ADDRESS_LENGTH = 8 - -type IpAdapterInfo struct { - Next *IpAdapterInfo - ComboIndex uint32 - AdapterName [MAX_ADAPTER_NAME_LENGTH + 4]byte - Description [MAX_ADAPTER_DESCRIPTION_LENGTH + 4]byte - AddressLength uint32 - Address [MAX_ADAPTER_ADDRESS_LENGTH]byte - Index uint32 - Type uint32 - DhcpEnabled uint32 - CurrentIpAddress *IpAddrString - IpAddressList IpAddrString - GatewayList IpAddrString - DhcpServer IpAddrString - HaveWins bool - PrimaryWinsServer IpAddrString - SecondaryWinsServer IpAddrString - LeaseObtained int64 - LeaseExpires int64 -} - -const MAXLEN_PHYSADDR = 8 -const MAX_INTERFACE_NAME_LEN = 256 -const MAXLEN_IFDESCR = 256 - -type MibIfRow struct { - Name [MAX_INTERFACE_NAME_LEN]uint16 - Index uint32 - Type uint32 - Mtu uint32 - Speed uint32 - PhysAddrLen uint32 - PhysAddr [MAXLEN_PHYSADDR]byte - AdminStatus uint32 - OperStatus uint32 - LastChange uint32 - InOctets uint32 - InUcastPkts uint32 - InNUcastPkts uint32 - InDiscards uint32 - InErrors uint32 - InUnknownProtos uint32 - OutOctets uint32 - OutUcastPkts uint32 - OutNUcastPkts uint32 - OutDiscards uint32 - OutErrors uint32 - OutQLen uint32 - DescrLen uint32 - Descr [MAXLEN_IFDESCR]byte -} - -type CertInfo struct { - Version uint32 - SerialNumber CryptIntegerBlob - SignatureAlgorithm CryptAlgorithmIdentifier - Issuer CertNameBlob - NotBefore Filetime - NotAfter Filetime - Subject CertNameBlob - SubjectPublicKeyInfo CertPublicKeyInfo - IssuerUniqueId CryptBitBlob - SubjectUniqueId CryptBitBlob - CountExtensions uint32 - Extensions *CertExtension -} - -type CertExtension struct { - ObjId *byte - Critical int32 - Value CryptObjidBlob -} - -type CryptAlgorithmIdentifier struct { - ObjId *byte - Parameters CryptObjidBlob -} - -type CertPublicKeyInfo struct { - Algorithm CryptAlgorithmIdentifier - PublicKey CryptBitBlob -} - -type DataBlob struct { - Size uint32 - Data *byte -} -type CryptIntegerBlob DataBlob -type CryptUintBlob DataBlob -type CryptObjidBlob DataBlob -type CertNameBlob DataBlob -type CertRdnValueBlob DataBlob -type CertBlob DataBlob -type CrlBlob DataBlob -type CryptDataBlob DataBlob -type CryptHashBlob DataBlob -type CryptDigestBlob DataBlob -type CryptDerBlob DataBlob -type CryptAttrBlob DataBlob - -type CryptBitBlob struct { - Size uint32 - Data *byte - UnusedBits uint32 -} - -type CertContext struct { - EncodingType uint32 - EncodedCert *byte - Length uint32 - CertInfo *CertInfo - Store Handle -} - -type CertChainContext struct { - Size uint32 - TrustStatus CertTrustStatus - ChainCount uint32 - Chains **CertSimpleChain - LowerQualityChainCount uint32 - LowerQualityChains **CertChainContext - HasRevocationFreshnessTime uint32 - RevocationFreshnessTime uint32 -} - -type CertTrustListInfo struct { - // Not implemented -} - -type CertSimpleChain struct { - Size uint32 - TrustStatus CertTrustStatus - NumElements uint32 - Elements **CertChainElement - TrustListInfo *CertTrustListInfo - HasRevocationFreshnessTime uint32 - RevocationFreshnessTime uint32 -} - -type CertChainElement struct { - Size uint32 - CertContext *CertContext - TrustStatus CertTrustStatus - RevocationInfo *CertRevocationInfo - IssuanceUsage *CertEnhKeyUsage - ApplicationUsage *CertEnhKeyUsage - ExtendedErrorInfo *uint16 -} - -type CertRevocationCrlInfo struct { - // Not implemented -} - -type CertRevocationInfo struct { - Size uint32 - RevocationResult uint32 - RevocationOid *byte - OidSpecificInfo Pointer - HasFreshnessTime uint32 - FreshnessTime uint32 - CrlInfo *CertRevocationCrlInfo -} - -type CertTrustStatus struct { - ErrorStatus uint32 - InfoStatus uint32 -} - -type CertUsageMatch struct { - Type uint32 - Usage CertEnhKeyUsage -} - -type CertEnhKeyUsage struct { - Length uint32 - UsageIdentifiers **byte -} - -type CertChainPara struct { - Size uint32 - RequestedUsage CertUsageMatch - RequstedIssuancePolicy CertUsageMatch - URLRetrievalTimeout uint32 - CheckRevocationFreshnessTime uint32 - RevocationFreshnessTime uint32 - CacheResync *Filetime -} - -type CertChainPolicyPara struct { - Size uint32 - Flags uint32 - ExtraPolicyPara Pointer -} - -type SSLExtraCertChainPolicyPara struct { - Size uint32 - AuthType uint32 - Checks uint32 - ServerName *uint16 -} - -type CertChainPolicyStatus struct { - Size uint32 - Error uint32 - ChainIndex uint32 - ElementIndex uint32 - ExtraPolicyStatus Pointer -} - -type CertPolicyInfo struct { - Identifier *byte - CountQualifiers uint32 - Qualifiers *CertPolicyQualifierInfo -} - -type CertPoliciesInfo struct { - Count uint32 - PolicyInfos *CertPolicyInfo -} - -type CertPolicyQualifierInfo struct { - // Not implemented -} - -type CertStrongSignPara struct { - Size uint32 - InfoChoice uint32 - InfoOrSerializedInfoOrOID unsafe.Pointer -} - -type CryptProtectPromptStruct struct { - Size uint32 - PromptFlags uint32 - App HWND - Prompt *uint16 -} - -type CertChainFindByIssuerPara struct { - Size uint32 - UsageIdentifier *byte - KeySpec uint32 - AcquirePrivateKeyFlags uint32 - IssuerCount uint32 - Issuer Pointer - FindCallback Pointer - FindArg Pointer - IssuerChainIndex *uint32 - IssuerElementIndex *uint32 -} - -type WinTrustData struct { - Size uint32 - PolicyCallbackData uintptr - SIPClientData uintptr - UIChoice uint32 - RevocationChecks uint32 - UnionChoice uint32 - FileOrCatalogOrBlobOrSgnrOrCert unsafe.Pointer - StateAction uint32 - StateData Handle - URLReference *uint16 - ProvFlags uint32 - UIContext uint32 - SignatureSettings *WinTrustSignatureSettings -} - -type WinTrustFileInfo struct { - Size uint32 - FilePath *uint16 - File Handle - KnownSubject *GUID -} - -type WinTrustSignatureSettings struct { - Size uint32 - Index uint32 - Flags uint32 - SecondarySigs uint32 - VerifiedSigIndex uint32 - CryptoPolicy *CertStrongSignPara -} - -const ( - // do not reorder - HKEY_CLASSES_ROOT = 0x80000000 + iota - HKEY_CURRENT_USER - HKEY_LOCAL_MACHINE - HKEY_USERS - HKEY_PERFORMANCE_DATA - HKEY_CURRENT_CONFIG - HKEY_DYN_DATA - - KEY_QUERY_VALUE = 1 - KEY_SET_VALUE = 2 - KEY_CREATE_SUB_KEY = 4 - KEY_ENUMERATE_SUB_KEYS = 8 - KEY_NOTIFY = 16 - KEY_CREATE_LINK = 32 - KEY_WRITE = 0x20006 - KEY_EXECUTE = 0x20019 - KEY_READ = 0x20019 - KEY_WOW64_64KEY = 0x0100 - KEY_WOW64_32KEY = 0x0200 - KEY_ALL_ACCESS = 0xf003f -) - -const ( - // do not reorder - REG_NONE = iota - REG_SZ - REG_EXPAND_SZ - REG_BINARY - REG_DWORD_LITTLE_ENDIAN - REG_DWORD_BIG_ENDIAN - REG_LINK - REG_MULTI_SZ - REG_RESOURCE_LIST - REG_FULL_RESOURCE_DESCRIPTOR - REG_RESOURCE_REQUIREMENTS_LIST - REG_QWORD_LITTLE_ENDIAN - REG_DWORD = REG_DWORD_LITTLE_ENDIAN - REG_QWORD = REG_QWORD_LITTLE_ENDIAN -) - -const ( - EVENT_MODIFY_STATE = 0x0002 - EVENT_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3 - - MUTANT_QUERY_STATE = 0x0001 - MUTANT_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | MUTANT_QUERY_STATE - - SEMAPHORE_MODIFY_STATE = 0x0002 - SEMAPHORE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3 - - TIMER_QUERY_STATE = 0x0001 - TIMER_MODIFY_STATE = 0x0002 - TIMER_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | TIMER_QUERY_STATE | TIMER_MODIFY_STATE - - MUTEX_MODIFY_STATE = MUTANT_QUERY_STATE - MUTEX_ALL_ACCESS = MUTANT_ALL_ACCESS - - CREATE_EVENT_MANUAL_RESET = 0x1 - CREATE_EVENT_INITIAL_SET = 0x2 - CREATE_MUTEX_INITIAL_OWNER = 0x1 -) - -type AddrinfoW struct { - Flags int32 - Family int32 - Socktype int32 - Protocol int32 - Addrlen uintptr - Canonname *uint16 - Addr uintptr - Next *AddrinfoW -} - -const ( - AI_PASSIVE = 1 - AI_CANONNAME = 2 - AI_NUMERICHOST = 4 -) - -type GUID struct { - Data1 uint32 - Data2 uint16 - Data3 uint16 - Data4 [8]byte -} - -var WSAID_CONNECTEX = GUID{ - 0x25a207b9, - 0xddf3, - 0x4660, - [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}, -} - -var WSAID_WSASENDMSG = GUID{ - 0xa441e712, - 0x754f, - 0x43ca, - [8]byte{0x84, 0xa7, 0x0d, 0xee, 0x44, 0xcf, 0x60, 0x6d}, -} - -var WSAID_WSARECVMSG = GUID{ - 0xf689d7c8, - 0x6f1f, - 0x436b, - [8]byte{0x8a, 0x53, 0xe5, 0x4f, 0xe3, 0x51, 0xc3, 0x22}, -} - -const ( - FILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 - FILE_SKIP_SET_EVENT_ON_HANDLE = 2 -) - -const ( - WSAPROTOCOL_LEN = 255 - MAX_PROTOCOL_CHAIN = 7 - BASE_PROTOCOL = 1 - LAYERED_PROTOCOL = 0 - - XP1_CONNECTIONLESS = 0x00000001 - XP1_GUARANTEED_DELIVERY = 0x00000002 - XP1_GUARANTEED_ORDER = 0x00000004 - XP1_MESSAGE_ORIENTED = 0x00000008 - XP1_PSEUDO_STREAM = 0x00000010 - XP1_GRACEFUL_CLOSE = 0x00000020 - XP1_EXPEDITED_DATA = 0x00000040 - XP1_CONNECT_DATA = 0x00000080 - XP1_DISCONNECT_DATA = 0x00000100 - XP1_SUPPORT_BROADCAST = 0x00000200 - XP1_SUPPORT_MULTIPOINT = 0x00000400 - XP1_MULTIPOINT_CONTROL_PLANE = 0x00000800 - XP1_MULTIPOINT_DATA_PLANE = 0x00001000 - XP1_QOS_SUPPORTED = 0x00002000 - XP1_UNI_SEND = 0x00008000 - XP1_UNI_RECV = 0x00010000 - XP1_IFS_HANDLES = 0x00020000 - XP1_PARTIAL_MESSAGE = 0x00040000 - XP1_SAN_SUPPORT_SDP = 0x00080000 - - PFL_MULTIPLE_PROTO_ENTRIES = 0x00000001 - PFL_RECOMMENDED_PROTO_ENTRY = 0x00000002 - PFL_HIDDEN = 0x00000004 - PFL_MATCHES_PROTOCOL_ZERO = 0x00000008 - PFL_NETWORKDIRECT_PROVIDER = 0x00000010 -) - -type WSAProtocolInfo struct { - ServiceFlags1 uint32 - ServiceFlags2 uint32 - ServiceFlags3 uint32 - ServiceFlags4 uint32 - ProviderFlags uint32 - ProviderId GUID - CatalogEntryId uint32 - ProtocolChain WSAProtocolChain - Version int32 - AddressFamily int32 - MaxSockAddr int32 - MinSockAddr int32 - SocketType int32 - Protocol int32 - ProtocolMaxOffset int32 - NetworkByteOrder int32 - SecurityScheme int32 - MessageSize uint32 - ProviderReserved uint32 - ProtocolName [WSAPROTOCOL_LEN + 1]uint16 -} - -type WSAProtocolChain struct { - ChainLen int32 - ChainEntries [MAX_PROTOCOL_CHAIN]uint32 -} - -type TCPKeepalive struct { - OnOff uint32 - Time uint32 - Interval uint32 -} - -type symbolicLinkReparseBuffer struct { - SubstituteNameOffset uint16 - SubstituteNameLength uint16 - PrintNameOffset uint16 - PrintNameLength uint16 - Flags uint32 - PathBuffer [1]uint16 -} - -type mountPointReparseBuffer struct { - SubstituteNameOffset uint16 - SubstituteNameLength uint16 - PrintNameOffset uint16 - PrintNameLength uint16 - PathBuffer [1]uint16 -} - -type reparseDataBuffer struct { - ReparseTag uint32 - ReparseDataLength uint16 - Reserved uint16 - - // GenericReparseBuffer - reparseBuffer byte -} - -const ( - FSCTL_CREATE_OR_GET_OBJECT_ID = 0x0900C0 - FSCTL_DELETE_OBJECT_ID = 0x0900A0 - FSCTL_DELETE_REPARSE_POINT = 0x0900AC - FSCTL_DUPLICATE_EXTENTS_TO_FILE = 0x098344 - FSCTL_DUPLICATE_EXTENTS_TO_FILE_EX = 0x0983E8 - FSCTL_FILESYSTEM_GET_STATISTICS = 0x090060 - FSCTL_FILE_LEVEL_TRIM = 0x098208 - FSCTL_FIND_FILES_BY_SID = 0x09008F - FSCTL_GET_COMPRESSION = 0x09003C - FSCTL_GET_INTEGRITY_INFORMATION = 0x09027C - FSCTL_GET_NTFS_VOLUME_DATA = 0x090064 - FSCTL_GET_REFS_VOLUME_DATA = 0x0902D8 - FSCTL_GET_OBJECT_ID = 0x09009C - FSCTL_GET_REPARSE_POINT = 0x0900A8 - FSCTL_GET_RETRIEVAL_POINTER_COUNT = 0x09042B - FSCTL_GET_RETRIEVAL_POINTERS = 0x090073 - FSCTL_GET_RETRIEVAL_POINTERS_AND_REFCOUNT = 0x0903D3 - FSCTL_IS_PATHNAME_VALID = 0x09002C - FSCTL_LMR_SET_LINK_TRACKING_INFORMATION = 0x1400EC - FSCTL_MARK_HANDLE = 0x0900FC - FSCTL_OFFLOAD_READ = 0x094264 - FSCTL_OFFLOAD_WRITE = 0x098268 - FSCTL_PIPE_PEEK = 0x11400C - FSCTL_PIPE_TRANSCEIVE = 0x11C017 - FSCTL_PIPE_WAIT = 0x110018 - FSCTL_QUERY_ALLOCATED_RANGES = 0x0940CF - FSCTL_QUERY_FAT_BPB = 0x090058 - FSCTL_QUERY_FILE_REGIONS = 0x090284 - FSCTL_QUERY_ON_DISK_VOLUME_INFO = 0x09013C - FSCTL_QUERY_SPARING_INFO = 0x090138 - FSCTL_READ_FILE_USN_DATA = 0x0900EB - FSCTL_RECALL_FILE = 0x090117 - FSCTL_REFS_STREAM_SNAPSHOT_MANAGEMENT = 0x090440 - FSCTL_SET_COMPRESSION = 0x09C040 - FSCTL_SET_DEFECT_MANAGEMENT = 0x098134 - FSCTL_SET_ENCRYPTION = 0x0900D7 - FSCTL_SET_INTEGRITY_INFORMATION = 0x09C280 - FSCTL_SET_INTEGRITY_INFORMATION_EX = 0x090380 - FSCTL_SET_OBJECT_ID = 0x090098 - FSCTL_SET_OBJECT_ID_EXTENDED = 0x0900BC - FSCTL_SET_REPARSE_POINT = 0x0900A4 - FSCTL_SET_SPARSE = 0x0900C4 - FSCTL_SET_ZERO_DATA = 0x0980C8 - FSCTL_SET_ZERO_ON_DEALLOCATION = 0x090194 - FSCTL_SIS_COPYFILE = 0x090100 - FSCTL_WRITE_USN_CLOSE_RECORD = 0x0900EF - - MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16 * 1024 - IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003 - IO_REPARSE_TAG_SYMLINK = 0xA000000C - SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1 -) - -const ( - ComputerNameNetBIOS = 0 - ComputerNameDnsHostname = 1 - ComputerNameDnsDomain = 2 - ComputerNameDnsFullyQualified = 3 - ComputerNamePhysicalNetBIOS = 4 - ComputerNamePhysicalDnsHostname = 5 - ComputerNamePhysicalDnsDomain = 6 - ComputerNamePhysicalDnsFullyQualified = 7 - ComputerNameMax = 8 -) - -// For MessageBox() -const ( - MB_OK = 0x00000000 - MB_OKCANCEL = 0x00000001 - MB_ABORTRETRYIGNORE = 0x00000002 - MB_YESNOCANCEL = 0x00000003 - MB_YESNO = 0x00000004 - MB_RETRYCANCEL = 0x00000005 - MB_CANCELTRYCONTINUE = 0x00000006 - MB_ICONHAND = 0x00000010 - MB_ICONQUESTION = 0x00000020 - MB_ICONEXCLAMATION = 0x00000030 - MB_ICONASTERISK = 0x00000040 - MB_USERICON = 0x00000080 - MB_ICONWARNING = MB_ICONEXCLAMATION - MB_ICONERROR = MB_ICONHAND - MB_ICONINFORMATION = MB_ICONASTERISK - MB_ICONSTOP = MB_ICONHAND - MB_DEFBUTTON1 = 0x00000000 - MB_DEFBUTTON2 = 0x00000100 - MB_DEFBUTTON3 = 0x00000200 - MB_DEFBUTTON4 = 0x00000300 - MB_APPLMODAL = 0x00000000 - MB_SYSTEMMODAL = 0x00001000 - MB_TASKMODAL = 0x00002000 - MB_HELP = 0x00004000 - MB_NOFOCUS = 0x00008000 - MB_SETFOREGROUND = 0x00010000 - MB_DEFAULT_DESKTOP_ONLY = 0x00020000 - MB_TOPMOST = 0x00040000 - MB_RIGHT = 0x00080000 - MB_RTLREADING = 0x00100000 - MB_SERVICE_NOTIFICATION = 0x00200000 -) - -const ( - MOVEFILE_REPLACE_EXISTING = 0x1 - MOVEFILE_COPY_ALLOWED = 0x2 - MOVEFILE_DELAY_UNTIL_REBOOT = 0x4 - MOVEFILE_WRITE_THROUGH = 0x8 - MOVEFILE_CREATE_HARDLINK = 0x10 - MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20 -) - -const GAA_FLAG_INCLUDE_PREFIX = 0x00000010 - -const ( - IF_TYPE_OTHER = 1 - IF_TYPE_ETHERNET_CSMACD = 6 - IF_TYPE_ISO88025_TOKENRING = 9 - IF_TYPE_PPP = 23 - IF_TYPE_SOFTWARE_LOOPBACK = 24 - IF_TYPE_ATM = 37 - IF_TYPE_IEEE80211 = 71 - IF_TYPE_TUNNEL = 131 - IF_TYPE_IEEE1394 = 144 -) - -type SocketAddress struct { - Sockaddr *syscall.RawSockaddrAny - SockaddrLength int32 -} - -// IP returns an IPv4 or IPv6 address, or nil if the underlying SocketAddress is neither. -func (addr *SocketAddress) IP() net.IP { - if uintptr(addr.SockaddrLength) >= unsafe.Sizeof(RawSockaddrInet4{}) && addr.Sockaddr.Addr.Family == AF_INET { - return (*RawSockaddrInet4)(unsafe.Pointer(addr.Sockaddr)).Addr[:] - } else if uintptr(addr.SockaddrLength) >= unsafe.Sizeof(RawSockaddrInet6{}) && addr.Sockaddr.Addr.Family == AF_INET6 { - return (*RawSockaddrInet6)(unsafe.Pointer(addr.Sockaddr)).Addr[:] - } - return nil -} - -type IpAdapterUnicastAddress struct { - Length uint32 - Flags uint32 - Next *IpAdapterUnicastAddress - Address SocketAddress - PrefixOrigin int32 - SuffixOrigin int32 - DadState int32 - ValidLifetime uint32 - PreferredLifetime uint32 - LeaseLifetime uint32 - OnLinkPrefixLength uint8 -} - -type IpAdapterAnycastAddress struct { - Length uint32 - Flags uint32 - Next *IpAdapterAnycastAddress - Address SocketAddress -} - -type IpAdapterMulticastAddress struct { - Length uint32 - Flags uint32 - Next *IpAdapterMulticastAddress - Address SocketAddress -} - -type IpAdapterDnsServerAdapter struct { - Length uint32 - Reserved uint32 - Next *IpAdapterDnsServerAdapter - Address SocketAddress -} - -type IpAdapterPrefix struct { - Length uint32 - Flags uint32 - Next *IpAdapterPrefix - Address SocketAddress - PrefixLength uint32 -} - -type IpAdapterAddresses struct { - Length uint32 - IfIndex uint32 - Next *IpAdapterAddresses - AdapterName *byte - FirstUnicastAddress *IpAdapterUnicastAddress - FirstAnycastAddress *IpAdapterAnycastAddress - FirstMulticastAddress *IpAdapterMulticastAddress - FirstDnsServerAddress *IpAdapterDnsServerAdapter - DnsSuffix *uint16 - Description *uint16 - FriendlyName *uint16 - PhysicalAddress [syscall.MAX_ADAPTER_ADDRESS_LENGTH]byte - PhysicalAddressLength uint32 - Flags uint32 - Mtu uint32 - IfType uint32 - OperStatus uint32 - Ipv6IfIndex uint32 - ZoneIndices [16]uint32 - FirstPrefix *IpAdapterPrefix - TransmitLinkSpeed uint64 - ReceiveLinkSpeed uint64 - FirstWinsServerAddress *IpAdapterWinsServerAddress - FirstGatewayAddress *IpAdapterGatewayAddress - Ipv4Metric uint32 - Ipv6Metric uint32 - Luid uint64 - Dhcpv4Server SocketAddress - CompartmentId uint32 - NetworkGuid GUID - ConnectionType uint32 - TunnelType uint32 - Dhcpv6Server SocketAddress - Dhcpv6ClientDuid [MAX_DHCPV6_DUID_LENGTH]byte - Dhcpv6ClientDuidLength uint32 - Dhcpv6Iaid uint32 - FirstDnsSuffix *IpAdapterDNSSuffix -} - -type IpAdapterWinsServerAddress struct { - Length uint32 - Reserved uint32 - Next *IpAdapterWinsServerAddress - Address SocketAddress -} - -type IpAdapterGatewayAddress struct { - Length uint32 - Reserved uint32 - Next *IpAdapterGatewayAddress - Address SocketAddress -} - -type IpAdapterDNSSuffix struct { - Next *IpAdapterDNSSuffix - String [MAX_DNS_SUFFIX_STRING_LENGTH]uint16 -} - -const ( - IfOperStatusUp = 1 - IfOperStatusDown = 2 - IfOperStatusTesting = 3 - IfOperStatusUnknown = 4 - IfOperStatusDormant = 5 - IfOperStatusNotPresent = 6 - IfOperStatusLowerLayerDown = 7 -) - -// Console related constants used for the mode parameter to SetConsoleMode. See -// https://docs.microsoft.com/en-us/windows/console/setconsolemode for details. - -const ( - ENABLE_PROCESSED_INPUT = 0x1 - ENABLE_LINE_INPUT = 0x2 - ENABLE_ECHO_INPUT = 0x4 - ENABLE_WINDOW_INPUT = 0x8 - ENABLE_MOUSE_INPUT = 0x10 - ENABLE_INSERT_MODE = 0x20 - ENABLE_QUICK_EDIT_MODE = 0x40 - ENABLE_EXTENDED_FLAGS = 0x80 - ENABLE_AUTO_POSITION = 0x100 - ENABLE_VIRTUAL_TERMINAL_INPUT = 0x200 - - ENABLE_PROCESSED_OUTPUT = 0x1 - ENABLE_WRAP_AT_EOL_OUTPUT = 0x2 - ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 - DISABLE_NEWLINE_AUTO_RETURN = 0x8 - ENABLE_LVB_GRID_WORLDWIDE = 0x10 -) - -type Coord struct { - X int16 - Y int16 -} - -type SmallRect struct { - Left int16 - Top int16 - Right int16 - Bottom int16 -} - -// Used with GetConsoleScreenBuffer to retrieve information about a console -// screen buffer. See -// https://docs.microsoft.com/en-us/windows/console/console-screen-buffer-info-str -// for details. - -type ConsoleScreenBufferInfo struct { - Size Coord - CursorPosition Coord - Attributes uint16 - Window SmallRect - MaximumWindowSize Coord -} - -const UNIX_PATH_MAX = 108 // defined in afunix.h - -const ( - // flags for JOBOBJECT_BASIC_LIMIT_INFORMATION.LimitFlags - JOB_OBJECT_LIMIT_ACTIVE_PROCESS = 0x00000008 - JOB_OBJECT_LIMIT_AFFINITY = 0x00000010 - JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x00000800 - JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION = 0x00000400 - JOB_OBJECT_LIMIT_JOB_MEMORY = 0x00000200 - JOB_OBJECT_LIMIT_JOB_TIME = 0x00000004 - JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x00002000 - JOB_OBJECT_LIMIT_PRESERVE_JOB_TIME = 0x00000040 - JOB_OBJECT_LIMIT_PRIORITY_CLASS = 0x00000020 - JOB_OBJECT_LIMIT_PROCESS_MEMORY = 0x00000100 - JOB_OBJECT_LIMIT_PROCESS_TIME = 0x00000002 - JOB_OBJECT_LIMIT_SCHEDULING_CLASS = 0x00000080 - JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK = 0x00001000 - JOB_OBJECT_LIMIT_SUBSET_AFFINITY = 0x00004000 - JOB_OBJECT_LIMIT_WORKINGSET = 0x00000001 -) - -type IO_COUNTERS struct { - ReadOperationCount uint64 - WriteOperationCount uint64 - OtherOperationCount uint64 - ReadTransferCount uint64 - WriteTransferCount uint64 - OtherTransferCount uint64 -} - -type JOBOBJECT_EXTENDED_LIMIT_INFORMATION struct { - BasicLimitInformation JOBOBJECT_BASIC_LIMIT_INFORMATION - IoInfo IO_COUNTERS - ProcessMemoryLimit uintptr - JobMemoryLimit uintptr - PeakProcessMemoryUsed uintptr - PeakJobMemoryUsed uintptr -} - -const ( - // UIRestrictionsClass - JOB_OBJECT_UILIMIT_DESKTOP = 0x00000040 - JOB_OBJECT_UILIMIT_DISPLAYSETTINGS = 0x00000010 - JOB_OBJECT_UILIMIT_EXITWINDOWS = 0x00000080 - JOB_OBJECT_UILIMIT_GLOBALATOMS = 0x00000020 - JOB_OBJECT_UILIMIT_HANDLES = 0x00000001 - JOB_OBJECT_UILIMIT_READCLIPBOARD = 0x00000002 - JOB_OBJECT_UILIMIT_SYSTEMPARAMETERS = 0x00000008 - JOB_OBJECT_UILIMIT_WRITECLIPBOARD = 0x00000004 -) - -type JOBOBJECT_BASIC_UI_RESTRICTIONS struct { - UIRestrictionsClass uint32 -} - -const ( - // JobObjectInformationClass for QueryInformationJobObject and SetInformationJobObject - JobObjectAssociateCompletionPortInformation = 7 - JobObjectBasicAccountingInformation = 1 - JobObjectBasicAndIoAccountingInformation = 8 - JobObjectBasicLimitInformation = 2 - JobObjectBasicProcessIdList = 3 - JobObjectBasicUIRestrictions = 4 - JobObjectCpuRateControlInformation = 15 - JobObjectEndOfJobTimeInformation = 6 - JobObjectExtendedLimitInformation = 9 - JobObjectGroupInformation = 11 - JobObjectGroupInformationEx = 14 - JobObjectLimitViolationInformation = 13 - JobObjectLimitViolationInformation2 = 34 - JobObjectNetRateControlInformation = 32 - JobObjectNotificationLimitInformation = 12 - JobObjectNotificationLimitInformation2 = 33 - JobObjectSecurityLimitInformation = 5 -) - -const ( - KF_FLAG_DEFAULT = 0x00000000 - KF_FLAG_FORCE_APP_DATA_REDIRECTION = 0x00080000 - KF_FLAG_RETURN_FILTER_REDIRECTION_TARGET = 0x00040000 - KF_FLAG_FORCE_PACKAGE_REDIRECTION = 0x00020000 - KF_FLAG_NO_PACKAGE_REDIRECTION = 0x00010000 - KF_FLAG_FORCE_APPCONTAINER_REDIRECTION = 0x00020000 - KF_FLAG_NO_APPCONTAINER_REDIRECTION = 0x00010000 - KF_FLAG_CREATE = 0x00008000 - KF_FLAG_DONT_VERIFY = 0x00004000 - KF_FLAG_DONT_UNEXPAND = 0x00002000 - KF_FLAG_NO_ALIAS = 0x00001000 - KF_FLAG_INIT = 0x00000800 - KF_FLAG_DEFAULT_PATH = 0x00000400 - KF_FLAG_NOT_PARENT_RELATIVE = 0x00000200 - KF_FLAG_SIMPLE_IDLIST = 0x00000100 - KF_FLAG_ALIAS_ONLY = 0x80000000 -) - -type OsVersionInfoEx struct { - osVersionInfoSize uint32 - MajorVersion uint32 - MinorVersion uint32 - BuildNumber uint32 - PlatformId uint32 - CsdVersion [128]uint16 - ServicePackMajor uint16 - ServicePackMinor uint16 - SuiteMask uint16 - ProductType byte - _ byte -} - -const ( - EWX_LOGOFF = 0x00000000 - EWX_SHUTDOWN = 0x00000001 - EWX_REBOOT = 0x00000002 - EWX_FORCE = 0x00000004 - EWX_POWEROFF = 0x00000008 - EWX_FORCEIFHUNG = 0x00000010 - EWX_QUICKRESOLVE = 0x00000020 - EWX_RESTARTAPPS = 0x00000040 - EWX_HYBRID_SHUTDOWN = 0x00400000 - EWX_BOOTOPTIONS = 0x01000000 - - SHTDN_REASON_FLAG_COMMENT_REQUIRED = 0x01000000 - SHTDN_REASON_FLAG_DIRTY_PROBLEM_ID_REQUIRED = 0x02000000 - SHTDN_REASON_FLAG_CLEAN_UI = 0x04000000 - SHTDN_REASON_FLAG_DIRTY_UI = 0x08000000 - SHTDN_REASON_FLAG_USER_DEFINED = 0x40000000 - SHTDN_REASON_FLAG_PLANNED = 0x80000000 - SHTDN_REASON_MAJOR_OTHER = 0x00000000 - SHTDN_REASON_MAJOR_NONE = 0x00000000 - SHTDN_REASON_MAJOR_HARDWARE = 0x00010000 - SHTDN_REASON_MAJOR_OPERATINGSYSTEM = 0x00020000 - SHTDN_REASON_MAJOR_SOFTWARE = 0x00030000 - SHTDN_REASON_MAJOR_APPLICATION = 0x00040000 - SHTDN_REASON_MAJOR_SYSTEM = 0x00050000 - SHTDN_REASON_MAJOR_POWER = 0x00060000 - SHTDN_REASON_MAJOR_LEGACY_API = 0x00070000 - SHTDN_REASON_MINOR_OTHER = 0x00000000 - SHTDN_REASON_MINOR_NONE = 0x000000ff - SHTDN_REASON_MINOR_MAINTENANCE = 0x00000001 - SHTDN_REASON_MINOR_INSTALLATION = 0x00000002 - SHTDN_REASON_MINOR_UPGRADE = 0x00000003 - SHTDN_REASON_MINOR_RECONFIG = 0x00000004 - SHTDN_REASON_MINOR_HUNG = 0x00000005 - SHTDN_REASON_MINOR_UNSTABLE = 0x00000006 - SHTDN_REASON_MINOR_DISK = 0x00000007 - SHTDN_REASON_MINOR_PROCESSOR = 0x00000008 - SHTDN_REASON_MINOR_NETWORKCARD = 0x00000009 - SHTDN_REASON_MINOR_POWER_SUPPLY = 0x0000000a - SHTDN_REASON_MINOR_CORDUNPLUGGED = 0x0000000b - SHTDN_REASON_MINOR_ENVIRONMENT = 0x0000000c - SHTDN_REASON_MINOR_HARDWARE_DRIVER = 0x0000000d - SHTDN_REASON_MINOR_OTHERDRIVER = 0x0000000e - SHTDN_REASON_MINOR_BLUESCREEN = 0x0000000F - SHTDN_REASON_MINOR_SERVICEPACK = 0x00000010 - SHTDN_REASON_MINOR_HOTFIX = 0x00000011 - SHTDN_REASON_MINOR_SECURITYFIX = 0x00000012 - SHTDN_REASON_MINOR_SECURITY = 0x00000013 - SHTDN_REASON_MINOR_NETWORK_CONNECTIVITY = 0x00000014 - SHTDN_REASON_MINOR_WMI = 0x00000015 - SHTDN_REASON_MINOR_SERVICEPACK_UNINSTALL = 0x00000016 - SHTDN_REASON_MINOR_HOTFIX_UNINSTALL = 0x00000017 - SHTDN_REASON_MINOR_SECURITYFIX_UNINSTALL = 0x00000018 - SHTDN_REASON_MINOR_MMC = 0x00000019 - SHTDN_REASON_MINOR_SYSTEMRESTORE = 0x0000001a - SHTDN_REASON_MINOR_TERMSRV = 0x00000020 - SHTDN_REASON_MINOR_DC_PROMOTION = 0x00000021 - SHTDN_REASON_MINOR_DC_DEMOTION = 0x00000022 - SHTDN_REASON_UNKNOWN = SHTDN_REASON_MINOR_NONE - SHTDN_REASON_LEGACY_API = SHTDN_REASON_MAJOR_LEGACY_API | SHTDN_REASON_FLAG_PLANNED - SHTDN_REASON_VALID_BIT_MASK = 0xc0ffffff - - SHUTDOWN_NORETRY = 0x1 -) - -// Flags used for GetModuleHandleEx -const ( - GET_MODULE_HANDLE_EX_FLAG_PIN = 1 - GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT = 2 - GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS = 4 -) - -// MUI function flag values -const ( - MUI_LANGUAGE_ID = 0x4 - MUI_LANGUAGE_NAME = 0x8 - MUI_MERGE_SYSTEM_FALLBACK = 0x10 - MUI_MERGE_USER_FALLBACK = 0x20 - MUI_UI_FALLBACK = MUI_MERGE_SYSTEM_FALLBACK | MUI_MERGE_USER_FALLBACK - MUI_THREAD_LANGUAGES = 0x40 - MUI_CONSOLE_FILTER = 0x100 - MUI_COMPLEX_SCRIPT_FILTER = 0x200 - MUI_RESET_FILTERS = 0x001 - MUI_USER_PREFERRED_UI_LANGUAGES = 0x10 - MUI_USE_INSTALLED_LANGUAGES = 0x20 - MUI_USE_SEARCH_ALL_LANGUAGES = 0x40 - MUI_LANG_NEUTRAL_PE_FILE = 0x100 - MUI_NON_LANG_NEUTRAL_FILE = 0x200 - MUI_MACHINE_LANGUAGE_SETTINGS = 0x400 - MUI_FILETYPE_NOT_LANGUAGE_NEUTRAL = 0x001 - MUI_FILETYPE_LANGUAGE_NEUTRAL_MAIN = 0x002 - MUI_FILETYPE_LANGUAGE_NEUTRAL_MUI = 0x004 - MUI_QUERY_TYPE = 0x001 - MUI_QUERY_CHECKSUM = 0x002 - MUI_QUERY_LANGUAGE_NAME = 0x004 - MUI_QUERY_RESOURCE_TYPES = 0x008 - MUI_FILEINFO_VERSION = 0x001 - - MUI_FULL_LANGUAGE = 0x01 - MUI_PARTIAL_LANGUAGE = 0x02 - MUI_LIP_LANGUAGE = 0x04 - MUI_LANGUAGE_INSTALLED = 0x20 - MUI_LANGUAGE_LICENSED = 0x40 -) - -// FILE_INFO_BY_HANDLE_CLASS constants for SetFileInformationByHandle/GetFileInformationByHandleEx -const ( - FileBasicInfo = 0 - FileStandardInfo = 1 - FileNameInfo = 2 - FileRenameInfo = 3 - FileDispositionInfo = 4 - FileAllocationInfo = 5 - FileEndOfFileInfo = 6 - FileStreamInfo = 7 - FileCompressionInfo = 8 - FileAttributeTagInfo = 9 - FileIdBothDirectoryInfo = 10 - FileIdBothDirectoryRestartInfo = 11 - FileIoPriorityHintInfo = 12 - FileRemoteProtocolInfo = 13 - FileFullDirectoryInfo = 14 - FileFullDirectoryRestartInfo = 15 - FileStorageInfo = 16 - FileAlignmentInfo = 17 - FileIdInfo = 18 - FileIdExtdDirectoryInfo = 19 - FileIdExtdDirectoryRestartInfo = 20 - FileDispositionInfoEx = 21 - FileRenameInfoEx = 22 - FileCaseSensitiveInfo = 23 - FileNormalizedNameInfo = 24 -) - -// LoadLibrary flags for determining from where to search for a DLL -const ( - DONT_RESOLVE_DLL_REFERENCES = 0x1 - LOAD_LIBRARY_AS_DATAFILE = 0x2 - LOAD_WITH_ALTERED_SEARCH_PATH = 0x8 - LOAD_IGNORE_CODE_AUTHZ_LEVEL = 0x10 - LOAD_LIBRARY_AS_IMAGE_RESOURCE = 0x20 - LOAD_LIBRARY_AS_DATAFILE_EXCLUSIVE = 0x40 - LOAD_LIBRARY_REQUIRE_SIGNED_TARGET = 0x80 - LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR = 0x100 - LOAD_LIBRARY_SEARCH_APPLICATION_DIR = 0x200 - LOAD_LIBRARY_SEARCH_USER_DIRS = 0x400 - LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x800 - LOAD_LIBRARY_SEARCH_DEFAULT_DIRS = 0x1000 - LOAD_LIBRARY_SAFE_CURRENT_DIRS = 0x00002000 - LOAD_LIBRARY_SEARCH_SYSTEM32_NO_FORWARDER = 0x00004000 - LOAD_LIBRARY_OS_INTEGRITY_CONTINUITY = 0x00008000 -) - -// RegNotifyChangeKeyValue notifyFilter flags. -const ( - // REG_NOTIFY_CHANGE_NAME notifies the caller if a subkey is added or deleted. - REG_NOTIFY_CHANGE_NAME = 0x00000001 - - // REG_NOTIFY_CHANGE_ATTRIBUTES notifies the caller of changes to the attributes of the key, such as the security descriptor information. - REG_NOTIFY_CHANGE_ATTRIBUTES = 0x00000002 - - // REG_NOTIFY_CHANGE_LAST_SET notifies the caller of changes to a value of the key. This can include adding or deleting a value, or changing an existing value. - REG_NOTIFY_CHANGE_LAST_SET = 0x00000004 - - // REG_NOTIFY_CHANGE_SECURITY notifies the caller of changes to the security descriptor of the key. - REG_NOTIFY_CHANGE_SECURITY = 0x00000008 - - // REG_NOTIFY_THREAD_AGNOSTIC indicates that the lifetime of the registration must not be tied to the lifetime of the thread issuing the RegNotifyChangeKeyValue call. Note: This flag value is only supported in Windows 8 and later. - REG_NOTIFY_THREAD_AGNOSTIC = 0x10000000 -) - -type CommTimeouts struct { - ReadIntervalTimeout uint32 - ReadTotalTimeoutMultiplier uint32 - ReadTotalTimeoutConstant uint32 - WriteTotalTimeoutMultiplier uint32 - WriteTotalTimeoutConstant uint32 -} - -// NTUnicodeString is a UTF-16 string for NT native APIs, corresponding to UNICODE_STRING. -type NTUnicodeString struct { - Length uint16 - MaximumLength uint16 - Buffer *uint16 -} - -// NTString is an ANSI string for NT native APIs, corresponding to STRING. -type NTString struct { - Length uint16 - MaximumLength uint16 - Buffer *byte -} - -type LIST_ENTRY struct { - Flink *LIST_ENTRY - Blink *LIST_ENTRY -} - -type RUNTIME_FUNCTION struct { - BeginAddress uint32 - EndAddress uint32 - UnwindData uint32 -} - -type LDR_DATA_TABLE_ENTRY struct { - reserved1 [2]uintptr - InMemoryOrderLinks LIST_ENTRY - reserved2 [2]uintptr - DllBase uintptr - reserved3 [2]uintptr - FullDllName NTUnicodeString - reserved4 [8]byte - reserved5 [3]uintptr - reserved6 uintptr - TimeDateStamp uint32 -} - -type PEB_LDR_DATA struct { - reserved1 [8]byte - reserved2 [3]uintptr - InMemoryOrderModuleList LIST_ENTRY -} - -type CURDIR struct { - DosPath NTUnicodeString - Handle Handle -} - -type RTL_DRIVE_LETTER_CURDIR struct { - Flags uint16 - Length uint16 - TimeStamp uint32 - DosPath NTString -} - -type RTL_USER_PROCESS_PARAMETERS struct { - MaximumLength, Length uint32 - - Flags, DebugFlags uint32 - - ConsoleHandle Handle - ConsoleFlags uint32 - StandardInput, StandardOutput, StandardError Handle - - CurrentDirectory CURDIR - DllPath NTUnicodeString - ImagePathName NTUnicodeString - CommandLine NTUnicodeString - Environment unsafe.Pointer - - StartingX, StartingY, CountX, CountY, CountCharsX, CountCharsY, FillAttribute uint32 - - WindowFlags, ShowWindowFlags uint32 - WindowTitle, DesktopInfo, ShellInfo, RuntimeData NTUnicodeString - CurrentDirectories [32]RTL_DRIVE_LETTER_CURDIR - - EnvironmentSize, EnvironmentVersion uintptr - - PackageDependencyData unsafe.Pointer - ProcessGroupId uint32 - LoaderThreads uint32 - - RedirectionDllName NTUnicodeString - HeapPartitionName NTUnicodeString - DefaultThreadpoolCpuSetMasks uintptr - DefaultThreadpoolCpuSetMaskCount uint32 -} - -type PEB struct { - reserved1 [2]byte - BeingDebugged byte - BitField byte - reserved3 uintptr - ImageBaseAddress uintptr - Ldr *PEB_LDR_DATA - ProcessParameters *RTL_USER_PROCESS_PARAMETERS - reserved4 [3]uintptr - AtlThunkSListPtr uintptr - reserved5 uintptr - reserved6 uint32 - reserved7 uintptr - reserved8 uint32 - AtlThunkSListPtr32 uint32 - reserved9 [45]uintptr - reserved10 [96]byte - PostProcessInitRoutine uintptr - reserved11 [128]byte - reserved12 [1]uintptr - SessionId uint32 -} - -type OBJECT_ATTRIBUTES struct { - Length uint32 - RootDirectory Handle - ObjectName *NTUnicodeString - Attributes uint32 - SecurityDescriptor *SECURITY_DESCRIPTOR - SecurityQoS *SECURITY_QUALITY_OF_SERVICE -} - -// Values for the Attributes member of OBJECT_ATTRIBUTES. -const ( - OBJ_INHERIT = 0x00000002 - OBJ_PERMANENT = 0x00000010 - OBJ_EXCLUSIVE = 0x00000020 - OBJ_CASE_INSENSITIVE = 0x00000040 - OBJ_OPENIF = 0x00000080 - OBJ_OPENLINK = 0x00000100 - OBJ_KERNEL_HANDLE = 0x00000200 - OBJ_FORCE_ACCESS_CHECK = 0x00000400 - OBJ_IGNORE_IMPERSONATED_DEVICEMAP = 0x00000800 - OBJ_DONT_REPARSE = 0x00001000 - OBJ_VALID_ATTRIBUTES = 0x00001FF2 -) - -type IO_STATUS_BLOCK struct { - Status NTStatus - Information uintptr -} - -type RTLP_CURDIR_REF struct { - RefCount int32 - Handle Handle -} - -type RTL_RELATIVE_NAME struct { - RelativeName NTUnicodeString - ContainingDirectory Handle - CurDirRef *RTLP_CURDIR_REF -} - -const ( - // CreateDisposition flags for NtCreateFile and NtCreateNamedPipeFile. - FILE_SUPERSEDE = 0x00000000 - FILE_OPEN = 0x00000001 - FILE_CREATE = 0x00000002 - FILE_OPEN_IF = 0x00000003 - FILE_OVERWRITE = 0x00000004 - FILE_OVERWRITE_IF = 0x00000005 - FILE_MAXIMUM_DISPOSITION = 0x00000005 - - // CreateOptions flags for NtCreateFile and NtCreateNamedPipeFile. - FILE_DIRECTORY_FILE = 0x00000001 - FILE_WRITE_THROUGH = 0x00000002 - FILE_SEQUENTIAL_ONLY = 0x00000004 - FILE_NO_INTERMEDIATE_BUFFERING = 0x00000008 - FILE_SYNCHRONOUS_IO_ALERT = 0x00000010 - FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020 - FILE_NON_DIRECTORY_FILE = 0x00000040 - FILE_CREATE_TREE_CONNECTION = 0x00000080 - FILE_COMPLETE_IF_OPLOCKED = 0x00000100 - FILE_NO_EA_KNOWLEDGE = 0x00000200 - FILE_OPEN_REMOTE_INSTANCE = 0x00000400 - FILE_RANDOM_ACCESS = 0x00000800 - FILE_DELETE_ON_CLOSE = 0x00001000 - FILE_OPEN_BY_FILE_ID = 0x00002000 - FILE_OPEN_FOR_BACKUP_INTENT = 0x00004000 - FILE_NO_COMPRESSION = 0x00008000 - FILE_OPEN_REQUIRING_OPLOCK = 0x00010000 - FILE_DISALLOW_EXCLUSIVE = 0x00020000 - FILE_RESERVE_OPFILTER = 0x00100000 - FILE_OPEN_REPARSE_POINT = 0x00200000 - FILE_OPEN_NO_RECALL = 0x00400000 - FILE_OPEN_FOR_FREE_SPACE_QUERY = 0x00800000 - - // Parameter constants for NtCreateNamedPipeFile. - - FILE_PIPE_BYTE_STREAM_TYPE = 0x00000000 - FILE_PIPE_MESSAGE_TYPE = 0x00000001 - - FILE_PIPE_ACCEPT_REMOTE_CLIENTS = 0x00000000 - FILE_PIPE_REJECT_REMOTE_CLIENTS = 0x00000002 - - FILE_PIPE_TYPE_VALID_MASK = 0x00000003 - - FILE_PIPE_BYTE_STREAM_MODE = 0x00000000 - FILE_PIPE_MESSAGE_MODE = 0x00000001 - - FILE_PIPE_QUEUE_OPERATION = 0x00000000 - FILE_PIPE_COMPLETE_OPERATION = 0x00000001 - - FILE_PIPE_INBOUND = 0x00000000 - FILE_PIPE_OUTBOUND = 0x00000001 - FILE_PIPE_FULL_DUPLEX = 0x00000002 - - FILE_PIPE_DISCONNECTED_STATE = 0x00000001 - FILE_PIPE_LISTENING_STATE = 0x00000002 - FILE_PIPE_CONNECTED_STATE = 0x00000003 - FILE_PIPE_CLOSING_STATE = 0x00000004 - - FILE_PIPE_CLIENT_END = 0x00000000 - FILE_PIPE_SERVER_END = 0x00000001 -) - -const ( - // FileInformationClass for NtSetInformationFile - FileBasicInformation = 4 - FileRenameInformation = 10 - FileDispositionInformation = 13 - FilePositionInformation = 14 - FileEndOfFileInformation = 20 - FileValidDataLengthInformation = 39 - FileShortNameInformation = 40 - FileIoPriorityHintInformation = 43 - FileReplaceCompletionInformation = 61 - FileDispositionInformationEx = 64 - FileCaseSensitiveInformation = 71 - FileLinkInformation = 72 - FileCaseSensitiveInformationForceAccessCheck = 75 - FileKnownFolderInformation = 76 - - // Flags for FILE_RENAME_INFORMATION - FILE_RENAME_REPLACE_IF_EXISTS = 0x00000001 - FILE_RENAME_POSIX_SEMANTICS = 0x00000002 - FILE_RENAME_SUPPRESS_PIN_STATE_INHERITANCE = 0x00000004 - FILE_RENAME_SUPPRESS_STORAGE_RESERVE_INHERITANCE = 0x00000008 - FILE_RENAME_NO_INCREASE_AVAILABLE_SPACE = 0x00000010 - FILE_RENAME_NO_DECREASE_AVAILABLE_SPACE = 0x00000020 - FILE_RENAME_PRESERVE_AVAILABLE_SPACE = 0x00000030 - FILE_RENAME_IGNORE_READONLY_ATTRIBUTE = 0x00000040 - FILE_RENAME_FORCE_RESIZE_TARGET_SR = 0x00000080 - FILE_RENAME_FORCE_RESIZE_SOURCE_SR = 0x00000100 - FILE_RENAME_FORCE_RESIZE_SR = 0x00000180 - - // Flags for FILE_DISPOSITION_INFORMATION_EX - FILE_DISPOSITION_DO_NOT_DELETE = 0x00000000 - FILE_DISPOSITION_DELETE = 0x00000001 - FILE_DISPOSITION_POSIX_SEMANTICS = 0x00000002 - FILE_DISPOSITION_FORCE_IMAGE_SECTION_CHECK = 0x00000004 - FILE_DISPOSITION_ON_CLOSE = 0x00000008 - FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE = 0x00000010 - - // Flags for FILE_CASE_SENSITIVE_INFORMATION - FILE_CS_FLAG_CASE_SENSITIVE_DIR = 0x00000001 - - // Flags for FILE_LINK_INFORMATION - FILE_LINK_REPLACE_IF_EXISTS = 0x00000001 - FILE_LINK_POSIX_SEMANTICS = 0x00000002 - FILE_LINK_SUPPRESS_STORAGE_RESERVE_INHERITANCE = 0x00000008 - FILE_LINK_NO_INCREASE_AVAILABLE_SPACE = 0x00000010 - FILE_LINK_NO_DECREASE_AVAILABLE_SPACE = 0x00000020 - FILE_LINK_PRESERVE_AVAILABLE_SPACE = 0x00000030 - FILE_LINK_IGNORE_READONLY_ATTRIBUTE = 0x00000040 - FILE_LINK_FORCE_RESIZE_TARGET_SR = 0x00000080 - FILE_LINK_FORCE_RESIZE_SOURCE_SR = 0x00000100 - FILE_LINK_FORCE_RESIZE_SR = 0x00000180 -) - -// ProcessInformationClasses for NtQueryInformationProcess and NtSetInformationProcess. -const ( - ProcessBasicInformation = iota - ProcessQuotaLimits - ProcessIoCounters - ProcessVmCounters - ProcessTimes - ProcessBasePriority - ProcessRaisePriority - ProcessDebugPort - ProcessExceptionPort - ProcessAccessToken - ProcessLdtInformation - ProcessLdtSize - ProcessDefaultHardErrorMode - ProcessIoPortHandlers - ProcessPooledUsageAndLimits - ProcessWorkingSetWatch - ProcessUserModeIOPL - ProcessEnableAlignmentFaultFixup - ProcessPriorityClass - ProcessWx86Information - ProcessHandleCount - ProcessAffinityMask - ProcessPriorityBoost - ProcessDeviceMap - ProcessSessionInformation - ProcessForegroundInformation - ProcessWow64Information - ProcessImageFileName - ProcessLUIDDeviceMapsEnabled - ProcessBreakOnTermination - ProcessDebugObjectHandle - ProcessDebugFlags - ProcessHandleTracing - ProcessIoPriority - ProcessExecuteFlags - ProcessTlsInformation - ProcessCookie - ProcessImageInformation - ProcessCycleTime - ProcessPagePriority - ProcessInstrumentationCallback - ProcessThreadStackAllocation - ProcessWorkingSetWatchEx - ProcessImageFileNameWin32 - ProcessImageFileMapping - ProcessAffinityUpdateMode - ProcessMemoryAllocationMode - ProcessGroupInformation - ProcessTokenVirtualizationEnabled - ProcessConsoleHostProcess - ProcessWindowInformation - ProcessHandleInformation - ProcessMitigationPolicy - ProcessDynamicFunctionTableInformation - ProcessHandleCheckingMode - ProcessKeepAliveCount - ProcessRevokeFileHandles - ProcessWorkingSetControl - ProcessHandleTable - ProcessCheckStackExtentsMode - ProcessCommandLineInformation - ProcessProtectionInformation - ProcessMemoryExhaustion - ProcessFaultInformation - ProcessTelemetryIdInformation - ProcessCommitReleaseInformation - ProcessDefaultCpuSetsInformation - ProcessAllowedCpuSetsInformation - ProcessSubsystemProcess - ProcessJobMemoryInformation - ProcessInPrivate - ProcessRaiseUMExceptionOnInvalidHandleClose - ProcessIumChallengeResponse - ProcessChildProcessInformation - ProcessHighGraphicsPriorityInformation - ProcessSubsystemInformation - ProcessEnergyValues - ProcessActivityThrottleState - ProcessActivityThrottlePolicy - ProcessWin32kSyscallFilterInformation - ProcessDisableSystemAllowedCpuSets - ProcessWakeInformation - ProcessEnergyTrackingState - ProcessManageWritesToExecutableMemory - ProcessCaptureTrustletLiveDump - ProcessTelemetryCoverage - ProcessEnclaveInformation - ProcessEnableReadWriteVmLogging - ProcessUptimeInformation - ProcessImageSection - ProcessDebugAuthInformation - ProcessSystemResourceManagement - ProcessSequenceNumber - ProcessLoaderDetour - ProcessSecurityDomainInformation - ProcessCombineSecurityDomainsInformation - ProcessEnableLogging - ProcessLeapSecondInformation - ProcessFiberShadowStackAllocation - ProcessFreeFiberShadowStackAllocation - ProcessAltSystemCallInformation - ProcessDynamicEHContinuationTargets - ProcessDynamicEnforcedCetCompatibleRanges -) - -type PROCESS_BASIC_INFORMATION struct { - ExitStatus NTStatus - PebBaseAddress *PEB - AffinityMask uintptr - BasePriority int32 - UniqueProcessId uintptr - InheritedFromUniqueProcessId uintptr -} - -type SYSTEM_PROCESS_INFORMATION struct { - NextEntryOffset uint32 - NumberOfThreads uint32 - WorkingSetPrivateSize int64 - HardFaultCount uint32 - NumberOfThreadsHighWatermark uint32 - CycleTime uint64 - CreateTime int64 - UserTime int64 - KernelTime int64 - ImageName NTUnicodeString - BasePriority int32 - UniqueProcessID uintptr - InheritedFromUniqueProcessID uintptr - HandleCount uint32 - SessionID uint32 - UniqueProcessKey *uint32 - PeakVirtualSize uintptr - VirtualSize uintptr - PageFaultCount uint32 - PeakWorkingSetSize uintptr - WorkingSetSize uintptr - QuotaPeakPagedPoolUsage uintptr - QuotaPagedPoolUsage uintptr - QuotaPeakNonPagedPoolUsage uintptr - QuotaNonPagedPoolUsage uintptr - PagefileUsage uintptr - PeakPagefileUsage uintptr - PrivatePageCount uintptr - ReadOperationCount int64 - WriteOperationCount int64 - OtherOperationCount int64 - ReadTransferCount int64 - WriteTransferCount int64 - OtherTransferCount int64 -} - -// SystemInformationClasses for NtQuerySystemInformation and NtSetSystemInformation -const ( - SystemBasicInformation = iota - SystemProcessorInformation - SystemPerformanceInformation - SystemTimeOfDayInformation - SystemPathInformation - SystemProcessInformation - SystemCallCountInformation - SystemDeviceInformation - SystemProcessorPerformanceInformation - SystemFlagsInformation - SystemCallTimeInformation - SystemModuleInformation - SystemLocksInformation - SystemStackTraceInformation - SystemPagedPoolInformation - SystemNonPagedPoolInformation - SystemHandleInformation - SystemObjectInformation - SystemPageFileInformation - SystemVdmInstemulInformation - SystemVdmBopInformation - SystemFileCacheInformation - SystemPoolTagInformation - SystemInterruptInformation - SystemDpcBehaviorInformation - SystemFullMemoryInformation - SystemLoadGdiDriverInformation - SystemUnloadGdiDriverInformation - SystemTimeAdjustmentInformation - SystemSummaryMemoryInformation - SystemMirrorMemoryInformation - SystemPerformanceTraceInformation - systemObsolete0 - SystemExceptionInformation - SystemCrashDumpStateInformation - SystemKernelDebuggerInformation - SystemContextSwitchInformation - SystemRegistryQuotaInformation - SystemExtendServiceTableInformation - SystemPrioritySeperation - SystemVerifierAddDriverInformation - SystemVerifierRemoveDriverInformation - SystemProcessorIdleInformation - SystemLegacyDriverInformation - SystemCurrentTimeZoneInformation - SystemLookasideInformation - SystemTimeSlipNotification - SystemSessionCreate - SystemSessionDetach - SystemSessionInformation - SystemRangeStartInformation - SystemVerifierInformation - SystemVerifierThunkExtend - SystemSessionProcessInformation - SystemLoadGdiDriverInSystemSpace - SystemNumaProcessorMap - SystemPrefetcherInformation - SystemExtendedProcessInformation - SystemRecommendedSharedDataAlignment - SystemComPlusPackage - SystemNumaAvailableMemory - SystemProcessorPowerInformation - SystemEmulationBasicInformation - SystemEmulationProcessorInformation - SystemExtendedHandleInformation - SystemLostDelayedWriteInformation - SystemBigPoolInformation - SystemSessionPoolTagInformation - SystemSessionMappedViewInformation - SystemHotpatchInformation - SystemObjectSecurityMode - SystemWatchdogTimerHandler - SystemWatchdogTimerInformation - SystemLogicalProcessorInformation - SystemWow64SharedInformationObsolete - SystemRegisterFirmwareTableInformationHandler - SystemFirmwareTableInformation - SystemModuleInformationEx - SystemVerifierTriageInformation - SystemSuperfetchInformation - SystemMemoryListInformation - SystemFileCacheInformationEx - SystemThreadPriorityClientIdInformation - SystemProcessorIdleCycleTimeInformation - SystemVerifierCancellationInformation - SystemProcessorPowerInformationEx - SystemRefTraceInformation - SystemSpecialPoolInformation - SystemProcessIdInformation - SystemErrorPortInformation - SystemBootEnvironmentInformation - SystemHypervisorInformation - SystemVerifierInformationEx - SystemTimeZoneInformation - SystemImageFileExecutionOptionsInformation - SystemCoverageInformation - SystemPrefetchPatchInformation - SystemVerifierFaultsInformation - SystemSystemPartitionInformation - SystemSystemDiskInformation - SystemProcessorPerformanceDistribution - SystemNumaProximityNodeInformation - SystemDynamicTimeZoneInformation - SystemCodeIntegrityInformation - SystemProcessorMicrocodeUpdateInformation - SystemProcessorBrandString - SystemVirtualAddressInformation - SystemLogicalProcessorAndGroupInformation - SystemProcessorCycleTimeInformation - SystemStoreInformation - SystemRegistryAppendString - SystemAitSamplingValue - SystemVhdBootInformation - SystemCpuQuotaInformation - SystemNativeBasicInformation - systemSpare1 - SystemLowPriorityIoInformation - SystemTpmBootEntropyInformation - SystemVerifierCountersInformation - SystemPagedPoolInformationEx - SystemSystemPtesInformationEx - SystemNodeDistanceInformation - SystemAcpiAuditInformation - SystemBasicPerformanceInformation - SystemQueryPerformanceCounterInformation - SystemSessionBigPoolInformation - SystemBootGraphicsInformation - SystemScrubPhysicalMemoryInformation - SystemBadPageInformation - SystemProcessorProfileControlArea - SystemCombinePhysicalMemoryInformation - SystemEntropyInterruptTimingCallback - SystemConsoleInformation - SystemPlatformBinaryInformation - SystemThrottleNotificationInformation - SystemHypervisorProcessorCountInformation - SystemDeviceDataInformation - SystemDeviceDataEnumerationInformation - SystemMemoryTopologyInformation - SystemMemoryChannelInformation - SystemBootLogoInformation - SystemProcessorPerformanceInformationEx - systemSpare0 - SystemSecureBootPolicyInformation - SystemPageFileInformationEx - SystemSecureBootInformation - SystemEntropyInterruptTimingRawInformation - SystemPortableWorkspaceEfiLauncherInformation - SystemFullProcessInformation - SystemKernelDebuggerInformationEx - SystemBootMetadataInformation - SystemSoftRebootInformation - SystemElamCertificateInformation - SystemOfflineDumpConfigInformation - SystemProcessorFeaturesInformation - SystemRegistryReconciliationInformation - SystemEdidInformation - SystemManufacturingInformation - SystemEnergyEstimationConfigInformation - SystemHypervisorDetailInformation - SystemProcessorCycleStatsInformation - SystemVmGenerationCountInformation - SystemTrustedPlatformModuleInformation - SystemKernelDebuggerFlags - SystemCodeIntegrityPolicyInformation - SystemIsolatedUserModeInformation - SystemHardwareSecurityTestInterfaceResultsInformation - SystemSingleModuleInformation - SystemAllowedCpuSetsInformation - SystemDmaProtectionInformation - SystemInterruptCpuSetsInformation - SystemSecureBootPolicyFullInformation - SystemCodeIntegrityPolicyFullInformation - SystemAffinitizedInterruptProcessorInformation - SystemRootSiloInformation -) - -type RTL_PROCESS_MODULE_INFORMATION struct { - Section Handle - MappedBase uintptr - ImageBase uintptr - ImageSize uint32 - Flags uint32 - LoadOrderIndex uint16 - InitOrderIndex uint16 - LoadCount uint16 - OffsetToFileName uint16 - FullPathName [256]byte -} - -type RTL_PROCESS_MODULES struct { - NumberOfModules uint32 - Modules [1]RTL_PROCESS_MODULE_INFORMATION -} - -// Constants for LocalAlloc flags. -const ( - LMEM_FIXED = 0x0 - LMEM_MOVEABLE = 0x2 - LMEM_NOCOMPACT = 0x10 - LMEM_NODISCARD = 0x20 - LMEM_ZEROINIT = 0x40 - LMEM_MODIFY = 0x80 - LMEM_DISCARDABLE = 0xf00 - LMEM_VALID_FLAGS = 0xf72 - LMEM_INVALID_HANDLE = 0x8000 - LHND = LMEM_MOVEABLE | LMEM_ZEROINIT - LPTR = LMEM_FIXED | LMEM_ZEROINIT - NONZEROLHND = LMEM_MOVEABLE - NONZEROLPTR = LMEM_FIXED -) - -// Constants for the CreateNamedPipe-family of functions. -const ( - PIPE_ACCESS_INBOUND = 0x1 - PIPE_ACCESS_OUTBOUND = 0x2 - PIPE_ACCESS_DUPLEX = 0x3 - - PIPE_CLIENT_END = 0x0 - PIPE_SERVER_END = 0x1 - - PIPE_WAIT = 0x0 - PIPE_NOWAIT = 0x1 - PIPE_READMODE_BYTE = 0x0 - PIPE_READMODE_MESSAGE = 0x2 - PIPE_TYPE_BYTE = 0x0 - PIPE_TYPE_MESSAGE = 0x4 - PIPE_ACCEPT_REMOTE_CLIENTS = 0x0 - PIPE_REJECT_REMOTE_CLIENTS = 0x8 - - PIPE_UNLIMITED_INSTANCES = 255 -) - -// Constants for security attributes when opening named pipes. -const ( - SECURITY_ANONYMOUS = SecurityAnonymous << 16 - SECURITY_IDENTIFICATION = SecurityIdentification << 16 - SECURITY_IMPERSONATION = SecurityImpersonation << 16 - SECURITY_DELEGATION = SecurityDelegation << 16 - - SECURITY_CONTEXT_TRACKING = 0x40000 - SECURITY_EFFECTIVE_ONLY = 0x80000 - - SECURITY_SQOS_PRESENT = 0x100000 - SECURITY_VALID_SQOS_FLAGS = 0x1f0000 -) - -// ResourceID represents a 16-bit resource identifier, traditionally created with the MAKEINTRESOURCE macro. -type ResourceID uint16 - -// ResourceIDOrString must be either a ResourceID, to specify a resource or resource type by ID, -// or a string, to specify a resource or resource type by name. -type ResourceIDOrString interface{} - -// Predefined resource names and types. -var ( - // Predefined names. - CREATEPROCESS_MANIFEST_RESOURCE_ID ResourceID = 1 - ISOLATIONAWARE_MANIFEST_RESOURCE_ID ResourceID = 2 - ISOLATIONAWARE_NOSTATICIMPORT_MANIFEST_RESOURCE_ID ResourceID = 3 - ISOLATIONPOLICY_MANIFEST_RESOURCE_ID ResourceID = 4 - ISOLATIONPOLICY_BROWSER_MANIFEST_RESOURCE_ID ResourceID = 5 - MINIMUM_RESERVED_MANIFEST_RESOURCE_ID ResourceID = 1 // inclusive - MAXIMUM_RESERVED_MANIFEST_RESOURCE_ID ResourceID = 16 // inclusive - - // Predefined types. - RT_CURSOR ResourceID = 1 - RT_BITMAP ResourceID = 2 - RT_ICON ResourceID = 3 - RT_MENU ResourceID = 4 - RT_DIALOG ResourceID = 5 - RT_STRING ResourceID = 6 - RT_FONTDIR ResourceID = 7 - RT_FONT ResourceID = 8 - RT_ACCELERATOR ResourceID = 9 - RT_RCDATA ResourceID = 10 - RT_MESSAGETABLE ResourceID = 11 - RT_GROUP_CURSOR ResourceID = 12 - RT_GROUP_ICON ResourceID = 14 - RT_VERSION ResourceID = 16 - RT_DLGINCLUDE ResourceID = 17 - RT_PLUGPLAY ResourceID = 19 - RT_VXD ResourceID = 20 - RT_ANICURSOR ResourceID = 21 - RT_ANIICON ResourceID = 22 - RT_HTML ResourceID = 23 - RT_MANIFEST ResourceID = 24 -) - -type VS_FIXEDFILEINFO struct { - Signature uint32 - StrucVersion uint32 - FileVersionMS uint32 - FileVersionLS uint32 - ProductVersionMS uint32 - ProductVersionLS uint32 - FileFlagsMask uint32 - FileFlags uint32 - FileOS uint32 - FileType uint32 - FileSubtype uint32 - FileDateMS uint32 - FileDateLS uint32 -} - -type COAUTHIDENTITY struct { - User *uint16 - UserLength uint32 - Domain *uint16 - DomainLength uint32 - Password *uint16 - PasswordLength uint32 - Flags uint32 -} - -type COAUTHINFO struct { - AuthnSvc uint32 - AuthzSvc uint32 - ServerPrincName *uint16 - AuthnLevel uint32 - ImpersonationLevel uint32 - AuthIdentityData *COAUTHIDENTITY - Capabilities uint32 -} - -type COSERVERINFO struct { - Reserved1 uint32 - Aame *uint16 - AuthInfo *COAUTHINFO - Reserved2 uint32 -} - -type BIND_OPTS3 struct { - CbStruct uint32 - Flags uint32 - Mode uint32 - TickCountDeadline uint32 - TrackFlags uint32 - ClassContext uint32 - Locale uint32 - ServerInfo *COSERVERINFO - Hwnd HWND -} - -const ( - CLSCTX_INPROC_SERVER = 0x1 - CLSCTX_INPROC_HANDLER = 0x2 - CLSCTX_LOCAL_SERVER = 0x4 - CLSCTX_INPROC_SERVER16 = 0x8 - CLSCTX_REMOTE_SERVER = 0x10 - CLSCTX_INPROC_HANDLER16 = 0x20 - CLSCTX_RESERVED1 = 0x40 - CLSCTX_RESERVED2 = 0x80 - CLSCTX_RESERVED3 = 0x100 - CLSCTX_RESERVED4 = 0x200 - CLSCTX_NO_CODE_DOWNLOAD = 0x400 - CLSCTX_RESERVED5 = 0x800 - CLSCTX_NO_CUSTOM_MARSHAL = 0x1000 - CLSCTX_ENABLE_CODE_DOWNLOAD = 0x2000 - CLSCTX_NO_FAILURE_LOG = 0x4000 - CLSCTX_DISABLE_AAA = 0x8000 - CLSCTX_ENABLE_AAA = 0x10000 - CLSCTX_FROM_DEFAULT_CONTEXT = 0x20000 - CLSCTX_ACTIVATE_32_BIT_SERVER = 0x40000 - CLSCTX_ACTIVATE_64_BIT_SERVER = 0x80000 - CLSCTX_ENABLE_CLOAKING = 0x100000 - CLSCTX_APPCONTAINER = 0x400000 - CLSCTX_ACTIVATE_AAA_AS_IU = 0x800000 - CLSCTX_PS_DLL = 0x80000000 - - COINIT_MULTITHREADED = 0x0 - COINIT_APARTMENTTHREADED = 0x2 - COINIT_DISABLE_OLE1DDE = 0x4 - COINIT_SPEED_OVER_MEMORY = 0x8 -) - -// Flag for QueryFullProcessImageName. -const PROCESS_NAME_NATIVE = 1 - -type ModuleInfo struct { - BaseOfDll uintptr - SizeOfImage uint32 - EntryPoint uintptr -} - -const ALL_PROCESSOR_GROUPS = 0xFFFF - -type Rect struct { - Left int32 - Top int32 - Right int32 - Bottom int32 -} - -type GUIThreadInfo struct { - Size uint32 - Flags uint32 - Active HWND - Focus HWND - Capture HWND - MenuOwner HWND - MoveSize HWND - CaretHandle HWND - CaretRect Rect -} - -const ( - DWMWA_NCRENDERING_ENABLED = 1 - DWMWA_NCRENDERING_POLICY = 2 - DWMWA_TRANSITIONS_FORCEDISABLED = 3 - DWMWA_ALLOW_NCPAINT = 4 - DWMWA_CAPTION_BUTTON_BOUNDS = 5 - DWMWA_NONCLIENT_RTL_LAYOUT = 6 - DWMWA_FORCE_ICONIC_REPRESENTATION = 7 - DWMWA_FLIP3D_POLICY = 8 - DWMWA_EXTENDED_FRAME_BOUNDS = 9 - DWMWA_HAS_ICONIC_BITMAP = 10 - DWMWA_DISALLOW_PEEK = 11 - DWMWA_EXCLUDED_FROM_PEEK = 12 - DWMWA_CLOAK = 13 - DWMWA_CLOAKED = 14 - DWMWA_FREEZE_REPRESENTATION = 15 - DWMWA_PASSIVE_UPDATE_MODE = 16 - DWMWA_USE_HOSTBACKDROPBRUSH = 17 - DWMWA_USE_IMMERSIVE_DARK_MODE = 20 - DWMWA_WINDOW_CORNER_PREFERENCE = 33 - DWMWA_BORDER_COLOR = 34 - DWMWA_CAPTION_COLOR = 35 - DWMWA_TEXT_COLOR = 36 - DWMWA_VISIBLE_FRAME_BORDER_THICKNESS = 37 -) - -type WSAQUERYSET struct { - Size uint32 - ServiceInstanceName *uint16 - ServiceClassId *GUID - Version *WSAVersion - Comment *uint16 - NameSpace uint32 - NSProviderId *GUID - Context *uint16 - NumberOfProtocols uint32 - AfpProtocols *AFProtocols - QueryString *uint16 - NumberOfCsAddrs uint32 - SaBuffer *CSAddrInfo - OutputFlags uint32 - Blob *BLOB -} - -type WSAVersion struct { - Version uint32 - EnumerationOfComparison int32 -} - -type AFProtocols struct { - AddressFamily int32 - Protocol int32 -} - -type CSAddrInfo struct { - LocalAddr SocketAddress - RemoteAddr SocketAddress - SocketType int32 - Protocol int32 -} - -type BLOB struct { - Size uint32 - BlobData *byte -} diff --git a/vendor/golang.org/x/sys/windows/types_windows_386.go b/vendor/golang.org/x/sys/windows/types_windows_386.go deleted file mode 100644 index 8bce3e2fc..000000000 --- a/vendor/golang.org/x/sys/windows/types_windows_386.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -type WSAData struct { - Version uint16 - HighVersion uint16 - Description [WSADESCRIPTION_LEN + 1]byte - SystemStatus [WSASYS_STATUS_LEN + 1]byte - MaxSockets uint16 - MaxUdpDg uint16 - VendorInfo *byte -} - -type Servent struct { - Name *byte - Aliases **byte - Port uint16 - Proto *byte -} - -type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { - PerProcessUserTimeLimit int64 - PerJobUserTimeLimit int64 - LimitFlags uint32 - MinimumWorkingSetSize uintptr - MaximumWorkingSetSize uintptr - ActiveProcessLimit uint32 - Affinity uintptr - PriorityClass uint32 - SchedulingClass uint32 - _ uint32 // pad to 8 byte boundary -} diff --git a/vendor/golang.org/x/sys/windows/types_windows_amd64.go b/vendor/golang.org/x/sys/windows/types_windows_amd64.go deleted file mode 100644 index fdddc0c70..000000000 --- a/vendor/golang.org/x/sys/windows/types_windows_amd64.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -type WSAData struct { - Version uint16 - HighVersion uint16 - MaxSockets uint16 - MaxUdpDg uint16 - VendorInfo *byte - Description [WSADESCRIPTION_LEN + 1]byte - SystemStatus [WSASYS_STATUS_LEN + 1]byte -} - -type Servent struct { - Name *byte - Aliases **byte - Proto *byte - Port uint16 -} - -type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { - PerProcessUserTimeLimit int64 - PerJobUserTimeLimit int64 - LimitFlags uint32 - MinimumWorkingSetSize uintptr - MaximumWorkingSetSize uintptr - ActiveProcessLimit uint32 - Affinity uintptr - PriorityClass uint32 - SchedulingClass uint32 -} diff --git a/vendor/golang.org/x/sys/windows/types_windows_arm.go b/vendor/golang.org/x/sys/windows/types_windows_arm.go deleted file mode 100644 index 321872c3e..000000000 --- a/vendor/golang.org/x/sys/windows/types_windows_arm.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -type WSAData struct { - Version uint16 - HighVersion uint16 - Description [WSADESCRIPTION_LEN + 1]byte - SystemStatus [WSASYS_STATUS_LEN + 1]byte - MaxSockets uint16 - MaxUdpDg uint16 - VendorInfo *byte -} - -type Servent struct { - Name *byte - Aliases **byte - Port uint16 - Proto *byte -} - -type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { - PerProcessUserTimeLimit int64 - PerJobUserTimeLimit int64 - LimitFlags uint32 - MinimumWorkingSetSize uintptr - MaximumWorkingSetSize uintptr - ActiveProcessLimit uint32 - Affinity uintptr - PriorityClass uint32 - SchedulingClass uint32 - _ uint32 // pad to 8 byte boundary -} diff --git a/vendor/golang.org/x/sys/windows/types_windows_arm64.go b/vendor/golang.org/x/sys/windows/types_windows_arm64.go deleted file mode 100644 index fdddc0c70..000000000 --- a/vendor/golang.org/x/sys/windows/types_windows_arm64.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -type WSAData struct { - Version uint16 - HighVersion uint16 - MaxSockets uint16 - MaxUdpDg uint16 - VendorInfo *byte - Description [WSADESCRIPTION_LEN + 1]byte - SystemStatus [WSASYS_STATUS_LEN + 1]byte -} - -type Servent struct { - Name *byte - Aliases **byte - Proto *byte - Port uint16 -} - -type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { - PerProcessUserTimeLimit int64 - PerJobUserTimeLimit int64 - LimitFlags uint32 - MinimumWorkingSetSize uintptr - MaximumWorkingSetSize uintptr - ActiveProcessLimit uint32 - Affinity uintptr - PriorityClass uint32 - SchedulingClass uint32 -} diff --git a/vendor/golang.org/x/sys/windows/zerrors_windows.go b/vendor/golang.org/x/sys/windows/zerrors_windows.go deleted file mode 100644 index 0cf658fbd..000000000 --- a/vendor/golang.org/x/sys/windows/zerrors_windows.go +++ /dev/null @@ -1,9468 +0,0 @@ -// Code generated by 'mkerrors.bash'; DO NOT EDIT. - -package windows - -import "syscall" - -const ( - FACILITY_NULL = 0 - FACILITY_RPC = 1 - FACILITY_DISPATCH = 2 - FACILITY_STORAGE = 3 - FACILITY_ITF = 4 - FACILITY_WIN32 = 7 - FACILITY_WINDOWS = 8 - FACILITY_SSPI = 9 - FACILITY_SECURITY = 9 - FACILITY_CONTROL = 10 - FACILITY_CERT = 11 - FACILITY_INTERNET = 12 - FACILITY_MEDIASERVER = 13 - FACILITY_MSMQ = 14 - FACILITY_SETUPAPI = 15 - FACILITY_SCARD = 16 - FACILITY_COMPLUS = 17 - FACILITY_AAF = 18 - FACILITY_URT = 19 - FACILITY_ACS = 20 - FACILITY_DPLAY = 21 - FACILITY_UMI = 22 - FACILITY_SXS = 23 - FACILITY_WINDOWS_CE = 24 - FACILITY_HTTP = 25 - FACILITY_USERMODE_COMMONLOG = 26 - FACILITY_WER = 27 - FACILITY_USERMODE_FILTER_MANAGER = 31 - FACILITY_BACKGROUNDCOPY = 32 - FACILITY_CONFIGURATION = 33 - FACILITY_WIA = 33 - FACILITY_STATE_MANAGEMENT = 34 - FACILITY_METADIRECTORY = 35 - FACILITY_WINDOWSUPDATE = 36 - FACILITY_DIRECTORYSERVICE = 37 - FACILITY_GRAPHICS = 38 - FACILITY_SHELL = 39 - FACILITY_NAP = 39 - FACILITY_TPM_SERVICES = 40 - FACILITY_TPM_SOFTWARE = 41 - FACILITY_UI = 42 - FACILITY_XAML = 43 - FACILITY_ACTION_QUEUE = 44 - FACILITY_PLA = 48 - FACILITY_WINDOWS_SETUP = 48 - FACILITY_FVE = 49 - FACILITY_FWP = 50 - FACILITY_WINRM = 51 - FACILITY_NDIS = 52 - FACILITY_USERMODE_HYPERVISOR = 53 - FACILITY_CMI = 54 - FACILITY_USERMODE_VIRTUALIZATION = 55 - FACILITY_USERMODE_VOLMGR = 56 - FACILITY_BCD = 57 - FACILITY_USERMODE_VHD = 58 - FACILITY_USERMODE_HNS = 59 - FACILITY_SDIAG = 60 - FACILITY_WEBSERVICES = 61 - FACILITY_WINPE = 61 - FACILITY_WPN = 62 - FACILITY_WINDOWS_STORE = 63 - FACILITY_INPUT = 64 - FACILITY_EAP = 66 - FACILITY_WINDOWS_DEFENDER = 80 - FACILITY_OPC = 81 - FACILITY_XPS = 82 - FACILITY_MBN = 84 - FACILITY_POWERSHELL = 84 - FACILITY_RAS = 83 - FACILITY_P2P_INT = 98 - FACILITY_P2P = 99 - FACILITY_DAF = 100 - FACILITY_BLUETOOTH_ATT = 101 - FACILITY_AUDIO = 102 - FACILITY_STATEREPOSITORY = 103 - FACILITY_VISUALCPP = 109 - FACILITY_SCRIPT = 112 - FACILITY_PARSE = 113 - FACILITY_BLB = 120 - FACILITY_BLB_CLI = 121 - FACILITY_WSBAPP = 122 - FACILITY_BLBUI = 128 - FACILITY_USN = 129 - FACILITY_USERMODE_VOLSNAP = 130 - FACILITY_TIERING = 131 - FACILITY_WSB_ONLINE = 133 - FACILITY_ONLINE_ID = 134 - FACILITY_DEVICE_UPDATE_AGENT = 135 - FACILITY_DRVSERVICING = 136 - FACILITY_DLS = 153 - FACILITY_DELIVERY_OPTIMIZATION = 208 - FACILITY_USERMODE_SPACES = 231 - FACILITY_USER_MODE_SECURITY_CORE = 232 - FACILITY_USERMODE_LICENSING = 234 - FACILITY_SOS = 160 - FACILITY_DEBUGGERS = 176 - FACILITY_SPP = 256 - FACILITY_RESTORE = 256 - FACILITY_DMSERVER = 256 - FACILITY_DEPLOYMENT_SERVICES_SERVER = 257 - FACILITY_DEPLOYMENT_SERVICES_IMAGING = 258 - FACILITY_DEPLOYMENT_SERVICES_MANAGEMENT = 259 - FACILITY_DEPLOYMENT_SERVICES_UTIL = 260 - FACILITY_DEPLOYMENT_SERVICES_BINLSVC = 261 - FACILITY_DEPLOYMENT_SERVICES_PXE = 263 - FACILITY_DEPLOYMENT_SERVICES_TFTP = 264 - FACILITY_DEPLOYMENT_SERVICES_TRANSPORT_MANAGEMENT = 272 - FACILITY_DEPLOYMENT_SERVICES_DRIVER_PROVISIONING = 278 - FACILITY_DEPLOYMENT_SERVICES_MULTICAST_SERVER = 289 - FACILITY_DEPLOYMENT_SERVICES_MULTICAST_CLIENT = 290 - FACILITY_DEPLOYMENT_SERVICES_CONTENT_PROVIDER = 293 - FACILITY_LINGUISTIC_SERVICES = 305 - FACILITY_AUDIOSTREAMING = 1094 - FACILITY_ACCELERATOR = 1536 - FACILITY_WMAAECMA = 1996 - FACILITY_DIRECTMUSIC = 2168 - FACILITY_DIRECT3D10 = 2169 - FACILITY_DXGI = 2170 - FACILITY_DXGI_DDI = 2171 - FACILITY_DIRECT3D11 = 2172 - FACILITY_DIRECT3D11_DEBUG = 2173 - FACILITY_DIRECT3D12 = 2174 - FACILITY_DIRECT3D12_DEBUG = 2175 - FACILITY_LEAP = 2184 - FACILITY_AUDCLNT = 2185 - FACILITY_WINCODEC_DWRITE_DWM = 2200 - FACILITY_WINML = 2192 - FACILITY_DIRECT2D = 2201 - FACILITY_DEFRAG = 2304 - FACILITY_USERMODE_SDBUS = 2305 - FACILITY_JSCRIPT = 2306 - FACILITY_PIDGENX = 2561 - FACILITY_EAS = 85 - FACILITY_WEB = 885 - FACILITY_WEB_SOCKET = 886 - FACILITY_MOBILE = 1793 - FACILITY_SQLITE = 1967 - FACILITY_UTC = 1989 - FACILITY_WEP = 2049 - FACILITY_SYNCENGINE = 2050 - FACILITY_XBOX = 2339 - FACILITY_GAME = 2340 - FACILITY_PIX = 2748 - ERROR_SUCCESS syscall.Errno = 0 - NO_ERROR = 0 - SEC_E_OK Handle = 0x00000000 - ERROR_INVALID_FUNCTION syscall.Errno = 1 - ERROR_FILE_NOT_FOUND syscall.Errno = 2 - ERROR_PATH_NOT_FOUND syscall.Errno = 3 - ERROR_TOO_MANY_OPEN_FILES syscall.Errno = 4 - ERROR_ACCESS_DENIED syscall.Errno = 5 - ERROR_INVALID_HANDLE syscall.Errno = 6 - ERROR_ARENA_TRASHED syscall.Errno = 7 - ERROR_NOT_ENOUGH_MEMORY syscall.Errno = 8 - ERROR_INVALID_BLOCK syscall.Errno = 9 - ERROR_BAD_ENVIRONMENT syscall.Errno = 10 - ERROR_BAD_FORMAT syscall.Errno = 11 - ERROR_INVALID_ACCESS syscall.Errno = 12 - ERROR_INVALID_DATA syscall.Errno = 13 - ERROR_OUTOFMEMORY syscall.Errno = 14 - ERROR_INVALID_DRIVE syscall.Errno = 15 - ERROR_CURRENT_DIRECTORY syscall.Errno = 16 - ERROR_NOT_SAME_DEVICE syscall.Errno = 17 - ERROR_NO_MORE_FILES syscall.Errno = 18 - ERROR_WRITE_PROTECT syscall.Errno = 19 - ERROR_BAD_UNIT syscall.Errno = 20 - ERROR_NOT_READY syscall.Errno = 21 - ERROR_BAD_COMMAND syscall.Errno = 22 - ERROR_CRC syscall.Errno = 23 - ERROR_BAD_LENGTH syscall.Errno = 24 - ERROR_SEEK syscall.Errno = 25 - ERROR_NOT_DOS_DISK syscall.Errno = 26 - ERROR_SECTOR_NOT_FOUND syscall.Errno = 27 - ERROR_OUT_OF_PAPER syscall.Errno = 28 - ERROR_WRITE_FAULT syscall.Errno = 29 - ERROR_READ_FAULT syscall.Errno = 30 - ERROR_GEN_FAILURE syscall.Errno = 31 - ERROR_SHARING_VIOLATION syscall.Errno = 32 - ERROR_LOCK_VIOLATION syscall.Errno = 33 - ERROR_WRONG_DISK syscall.Errno = 34 - ERROR_SHARING_BUFFER_EXCEEDED syscall.Errno = 36 - ERROR_HANDLE_EOF syscall.Errno = 38 - ERROR_HANDLE_DISK_FULL syscall.Errno = 39 - ERROR_NOT_SUPPORTED syscall.Errno = 50 - ERROR_REM_NOT_LIST syscall.Errno = 51 - ERROR_DUP_NAME syscall.Errno = 52 - ERROR_BAD_NETPATH syscall.Errno = 53 - ERROR_NETWORK_BUSY syscall.Errno = 54 - ERROR_DEV_NOT_EXIST syscall.Errno = 55 - ERROR_TOO_MANY_CMDS syscall.Errno = 56 - ERROR_ADAP_HDW_ERR syscall.Errno = 57 - ERROR_BAD_NET_RESP syscall.Errno = 58 - ERROR_UNEXP_NET_ERR syscall.Errno = 59 - ERROR_BAD_REM_ADAP syscall.Errno = 60 - ERROR_PRINTQ_FULL syscall.Errno = 61 - ERROR_NO_SPOOL_SPACE syscall.Errno = 62 - ERROR_PRINT_CANCELLED syscall.Errno = 63 - ERROR_NETNAME_DELETED syscall.Errno = 64 - ERROR_NETWORK_ACCESS_DENIED syscall.Errno = 65 - ERROR_BAD_DEV_TYPE syscall.Errno = 66 - ERROR_BAD_NET_NAME syscall.Errno = 67 - ERROR_TOO_MANY_NAMES syscall.Errno = 68 - ERROR_TOO_MANY_SESS syscall.Errno = 69 - ERROR_SHARING_PAUSED syscall.Errno = 70 - ERROR_REQ_NOT_ACCEP syscall.Errno = 71 - ERROR_REDIR_PAUSED syscall.Errno = 72 - ERROR_FILE_EXISTS syscall.Errno = 80 - ERROR_CANNOT_MAKE syscall.Errno = 82 - ERROR_FAIL_I24 syscall.Errno = 83 - ERROR_OUT_OF_STRUCTURES syscall.Errno = 84 - ERROR_ALREADY_ASSIGNED syscall.Errno = 85 - ERROR_INVALID_PASSWORD syscall.Errno = 86 - ERROR_INVALID_PARAMETER syscall.Errno = 87 - ERROR_NET_WRITE_FAULT syscall.Errno = 88 - ERROR_NO_PROC_SLOTS syscall.Errno = 89 - ERROR_TOO_MANY_SEMAPHORES syscall.Errno = 100 - ERROR_EXCL_SEM_ALREADY_OWNED syscall.Errno = 101 - ERROR_SEM_IS_SET syscall.Errno = 102 - ERROR_TOO_MANY_SEM_REQUESTS syscall.Errno = 103 - ERROR_INVALID_AT_INTERRUPT_TIME syscall.Errno = 104 - ERROR_SEM_OWNER_DIED syscall.Errno = 105 - ERROR_SEM_USER_LIMIT syscall.Errno = 106 - ERROR_DISK_CHANGE syscall.Errno = 107 - ERROR_DRIVE_LOCKED syscall.Errno = 108 - ERROR_BROKEN_PIPE syscall.Errno = 109 - ERROR_OPEN_FAILED syscall.Errno = 110 - ERROR_BUFFER_OVERFLOW syscall.Errno = 111 - ERROR_DISK_FULL syscall.Errno = 112 - ERROR_NO_MORE_SEARCH_HANDLES syscall.Errno = 113 - ERROR_INVALID_TARGET_HANDLE syscall.Errno = 114 - ERROR_INVALID_CATEGORY syscall.Errno = 117 - ERROR_INVALID_VERIFY_SWITCH syscall.Errno = 118 - ERROR_BAD_DRIVER_LEVEL syscall.Errno = 119 - ERROR_CALL_NOT_IMPLEMENTED syscall.Errno = 120 - ERROR_SEM_TIMEOUT syscall.Errno = 121 - ERROR_INSUFFICIENT_BUFFER syscall.Errno = 122 - ERROR_INVALID_NAME syscall.Errno = 123 - ERROR_INVALID_LEVEL syscall.Errno = 124 - ERROR_NO_VOLUME_LABEL syscall.Errno = 125 - ERROR_MOD_NOT_FOUND syscall.Errno = 126 - ERROR_PROC_NOT_FOUND syscall.Errno = 127 - ERROR_WAIT_NO_CHILDREN syscall.Errno = 128 - ERROR_CHILD_NOT_COMPLETE syscall.Errno = 129 - ERROR_DIRECT_ACCESS_HANDLE syscall.Errno = 130 - ERROR_NEGATIVE_SEEK syscall.Errno = 131 - ERROR_SEEK_ON_DEVICE syscall.Errno = 132 - ERROR_IS_JOIN_TARGET syscall.Errno = 133 - ERROR_IS_JOINED syscall.Errno = 134 - ERROR_IS_SUBSTED syscall.Errno = 135 - ERROR_NOT_JOINED syscall.Errno = 136 - ERROR_NOT_SUBSTED syscall.Errno = 137 - ERROR_JOIN_TO_JOIN syscall.Errno = 138 - ERROR_SUBST_TO_SUBST syscall.Errno = 139 - ERROR_JOIN_TO_SUBST syscall.Errno = 140 - ERROR_SUBST_TO_JOIN syscall.Errno = 141 - ERROR_BUSY_DRIVE syscall.Errno = 142 - ERROR_SAME_DRIVE syscall.Errno = 143 - ERROR_DIR_NOT_ROOT syscall.Errno = 144 - ERROR_DIR_NOT_EMPTY syscall.Errno = 145 - ERROR_IS_SUBST_PATH syscall.Errno = 146 - ERROR_IS_JOIN_PATH syscall.Errno = 147 - ERROR_PATH_BUSY syscall.Errno = 148 - ERROR_IS_SUBST_TARGET syscall.Errno = 149 - ERROR_SYSTEM_TRACE syscall.Errno = 150 - ERROR_INVALID_EVENT_COUNT syscall.Errno = 151 - ERROR_TOO_MANY_MUXWAITERS syscall.Errno = 152 - ERROR_INVALID_LIST_FORMAT syscall.Errno = 153 - ERROR_LABEL_TOO_LONG syscall.Errno = 154 - ERROR_TOO_MANY_TCBS syscall.Errno = 155 - ERROR_SIGNAL_REFUSED syscall.Errno = 156 - ERROR_DISCARDED syscall.Errno = 157 - ERROR_NOT_LOCKED syscall.Errno = 158 - ERROR_BAD_THREADID_ADDR syscall.Errno = 159 - ERROR_BAD_ARGUMENTS syscall.Errno = 160 - ERROR_BAD_PATHNAME syscall.Errno = 161 - ERROR_SIGNAL_PENDING syscall.Errno = 162 - ERROR_MAX_THRDS_REACHED syscall.Errno = 164 - ERROR_LOCK_FAILED syscall.Errno = 167 - ERROR_BUSY syscall.Errno = 170 - ERROR_DEVICE_SUPPORT_IN_PROGRESS syscall.Errno = 171 - ERROR_CANCEL_VIOLATION syscall.Errno = 173 - ERROR_ATOMIC_LOCKS_NOT_SUPPORTED syscall.Errno = 174 - ERROR_INVALID_SEGMENT_NUMBER syscall.Errno = 180 - ERROR_INVALID_ORDINAL syscall.Errno = 182 - ERROR_ALREADY_EXISTS syscall.Errno = 183 - ERROR_INVALID_FLAG_NUMBER syscall.Errno = 186 - ERROR_SEM_NOT_FOUND syscall.Errno = 187 - ERROR_INVALID_STARTING_CODESEG syscall.Errno = 188 - ERROR_INVALID_STACKSEG syscall.Errno = 189 - ERROR_INVALID_MODULETYPE syscall.Errno = 190 - ERROR_INVALID_EXE_SIGNATURE syscall.Errno = 191 - ERROR_EXE_MARKED_INVALID syscall.Errno = 192 - ERROR_BAD_EXE_FORMAT syscall.Errno = 193 - ERROR_ITERATED_DATA_EXCEEDS_64k syscall.Errno = 194 - ERROR_INVALID_MINALLOCSIZE syscall.Errno = 195 - ERROR_DYNLINK_FROM_INVALID_RING syscall.Errno = 196 - ERROR_IOPL_NOT_ENABLED syscall.Errno = 197 - ERROR_INVALID_SEGDPL syscall.Errno = 198 - ERROR_AUTODATASEG_EXCEEDS_64k syscall.Errno = 199 - ERROR_RING2SEG_MUST_BE_MOVABLE syscall.Errno = 200 - ERROR_RELOC_CHAIN_XEEDS_SEGLIM syscall.Errno = 201 - ERROR_INFLOOP_IN_RELOC_CHAIN syscall.Errno = 202 - ERROR_ENVVAR_NOT_FOUND syscall.Errno = 203 - ERROR_NO_SIGNAL_SENT syscall.Errno = 205 - ERROR_FILENAME_EXCED_RANGE syscall.Errno = 206 - ERROR_RING2_STACK_IN_USE syscall.Errno = 207 - ERROR_META_EXPANSION_TOO_LONG syscall.Errno = 208 - ERROR_INVALID_SIGNAL_NUMBER syscall.Errno = 209 - ERROR_THREAD_1_INACTIVE syscall.Errno = 210 - ERROR_LOCKED syscall.Errno = 212 - ERROR_TOO_MANY_MODULES syscall.Errno = 214 - ERROR_NESTING_NOT_ALLOWED syscall.Errno = 215 - ERROR_EXE_MACHINE_TYPE_MISMATCH syscall.Errno = 216 - ERROR_EXE_CANNOT_MODIFY_SIGNED_BINARY syscall.Errno = 217 - ERROR_EXE_CANNOT_MODIFY_STRONG_SIGNED_BINARY syscall.Errno = 218 - ERROR_FILE_CHECKED_OUT syscall.Errno = 220 - ERROR_CHECKOUT_REQUIRED syscall.Errno = 221 - ERROR_BAD_FILE_TYPE syscall.Errno = 222 - ERROR_FILE_TOO_LARGE syscall.Errno = 223 - ERROR_FORMS_AUTH_REQUIRED syscall.Errno = 224 - ERROR_VIRUS_INFECTED syscall.Errno = 225 - ERROR_VIRUS_DELETED syscall.Errno = 226 - ERROR_PIPE_LOCAL syscall.Errno = 229 - ERROR_BAD_PIPE syscall.Errno = 230 - ERROR_PIPE_BUSY syscall.Errno = 231 - ERROR_NO_DATA syscall.Errno = 232 - ERROR_PIPE_NOT_CONNECTED syscall.Errno = 233 - ERROR_MORE_DATA syscall.Errno = 234 - ERROR_NO_WORK_DONE syscall.Errno = 235 - ERROR_VC_DISCONNECTED syscall.Errno = 240 - ERROR_INVALID_EA_NAME syscall.Errno = 254 - ERROR_EA_LIST_INCONSISTENT syscall.Errno = 255 - WAIT_TIMEOUT syscall.Errno = 258 - ERROR_NO_MORE_ITEMS syscall.Errno = 259 - ERROR_CANNOT_COPY syscall.Errno = 266 - ERROR_DIRECTORY syscall.Errno = 267 - ERROR_EAS_DIDNT_FIT syscall.Errno = 275 - ERROR_EA_FILE_CORRUPT syscall.Errno = 276 - ERROR_EA_TABLE_FULL syscall.Errno = 277 - ERROR_INVALID_EA_HANDLE syscall.Errno = 278 - ERROR_EAS_NOT_SUPPORTED syscall.Errno = 282 - ERROR_NOT_OWNER syscall.Errno = 288 - ERROR_TOO_MANY_POSTS syscall.Errno = 298 - ERROR_PARTIAL_COPY syscall.Errno = 299 - ERROR_OPLOCK_NOT_GRANTED syscall.Errno = 300 - ERROR_INVALID_OPLOCK_PROTOCOL syscall.Errno = 301 - ERROR_DISK_TOO_FRAGMENTED syscall.Errno = 302 - ERROR_DELETE_PENDING syscall.Errno = 303 - ERROR_INCOMPATIBLE_WITH_GLOBAL_SHORT_NAME_REGISTRY_SETTING syscall.Errno = 304 - ERROR_SHORT_NAMES_NOT_ENABLED_ON_VOLUME syscall.Errno = 305 - ERROR_SECURITY_STREAM_IS_INCONSISTENT syscall.Errno = 306 - ERROR_INVALID_LOCK_RANGE syscall.Errno = 307 - ERROR_IMAGE_SUBSYSTEM_NOT_PRESENT syscall.Errno = 308 - ERROR_NOTIFICATION_GUID_ALREADY_DEFINED syscall.Errno = 309 - ERROR_INVALID_EXCEPTION_HANDLER syscall.Errno = 310 - ERROR_DUPLICATE_PRIVILEGES syscall.Errno = 311 - ERROR_NO_RANGES_PROCESSED syscall.Errno = 312 - ERROR_NOT_ALLOWED_ON_SYSTEM_FILE syscall.Errno = 313 - ERROR_DISK_RESOURCES_EXHAUSTED syscall.Errno = 314 - ERROR_INVALID_TOKEN syscall.Errno = 315 - ERROR_DEVICE_FEATURE_NOT_SUPPORTED syscall.Errno = 316 - ERROR_MR_MID_NOT_FOUND syscall.Errno = 317 - ERROR_SCOPE_NOT_FOUND syscall.Errno = 318 - ERROR_UNDEFINED_SCOPE syscall.Errno = 319 - ERROR_INVALID_CAP syscall.Errno = 320 - ERROR_DEVICE_UNREACHABLE syscall.Errno = 321 - ERROR_DEVICE_NO_RESOURCES syscall.Errno = 322 - ERROR_DATA_CHECKSUM_ERROR syscall.Errno = 323 - ERROR_INTERMIXED_KERNEL_EA_OPERATION syscall.Errno = 324 - ERROR_FILE_LEVEL_TRIM_NOT_SUPPORTED syscall.Errno = 326 - ERROR_OFFSET_ALIGNMENT_VIOLATION syscall.Errno = 327 - ERROR_INVALID_FIELD_IN_PARAMETER_LIST syscall.Errno = 328 - ERROR_OPERATION_IN_PROGRESS syscall.Errno = 329 - ERROR_BAD_DEVICE_PATH syscall.Errno = 330 - ERROR_TOO_MANY_DESCRIPTORS syscall.Errno = 331 - ERROR_SCRUB_DATA_DISABLED syscall.Errno = 332 - ERROR_NOT_REDUNDANT_STORAGE syscall.Errno = 333 - ERROR_RESIDENT_FILE_NOT_SUPPORTED syscall.Errno = 334 - ERROR_COMPRESSED_FILE_NOT_SUPPORTED syscall.Errno = 335 - ERROR_DIRECTORY_NOT_SUPPORTED syscall.Errno = 336 - ERROR_NOT_READ_FROM_COPY syscall.Errno = 337 - ERROR_FT_WRITE_FAILURE syscall.Errno = 338 - ERROR_FT_DI_SCAN_REQUIRED syscall.Errno = 339 - ERROR_INVALID_KERNEL_INFO_VERSION syscall.Errno = 340 - ERROR_INVALID_PEP_INFO_VERSION syscall.Errno = 341 - ERROR_OBJECT_NOT_EXTERNALLY_BACKED syscall.Errno = 342 - ERROR_EXTERNAL_BACKING_PROVIDER_UNKNOWN syscall.Errno = 343 - ERROR_COMPRESSION_NOT_BENEFICIAL syscall.Errno = 344 - ERROR_STORAGE_TOPOLOGY_ID_MISMATCH syscall.Errno = 345 - ERROR_BLOCKED_BY_PARENTAL_CONTROLS syscall.Errno = 346 - ERROR_BLOCK_TOO_MANY_REFERENCES syscall.Errno = 347 - ERROR_MARKED_TO_DISALLOW_WRITES syscall.Errno = 348 - ERROR_ENCLAVE_FAILURE syscall.Errno = 349 - ERROR_FAIL_NOACTION_REBOOT syscall.Errno = 350 - ERROR_FAIL_SHUTDOWN syscall.Errno = 351 - ERROR_FAIL_RESTART syscall.Errno = 352 - ERROR_MAX_SESSIONS_REACHED syscall.Errno = 353 - ERROR_NETWORK_ACCESS_DENIED_EDP syscall.Errno = 354 - ERROR_DEVICE_HINT_NAME_BUFFER_TOO_SMALL syscall.Errno = 355 - ERROR_EDP_POLICY_DENIES_OPERATION syscall.Errno = 356 - ERROR_EDP_DPL_POLICY_CANT_BE_SATISFIED syscall.Errno = 357 - ERROR_CLOUD_FILE_SYNC_ROOT_METADATA_CORRUPT syscall.Errno = 358 - ERROR_DEVICE_IN_MAINTENANCE syscall.Errno = 359 - ERROR_NOT_SUPPORTED_ON_DAX syscall.Errno = 360 - ERROR_DAX_MAPPING_EXISTS syscall.Errno = 361 - ERROR_CLOUD_FILE_PROVIDER_NOT_RUNNING syscall.Errno = 362 - ERROR_CLOUD_FILE_METADATA_CORRUPT syscall.Errno = 363 - ERROR_CLOUD_FILE_METADATA_TOO_LARGE syscall.Errno = 364 - ERROR_CLOUD_FILE_PROPERTY_BLOB_TOO_LARGE syscall.Errno = 365 - ERROR_CLOUD_FILE_PROPERTY_BLOB_CHECKSUM_MISMATCH syscall.Errno = 366 - ERROR_CHILD_PROCESS_BLOCKED syscall.Errno = 367 - ERROR_STORAGE_LOST_DATA_PERSISTENCE syscall.Errno = 368 - ERROR_FILE_SYSTEM_VIRTUALIZATION_UNAVAILABLE syscall.Errno = 369 - ERROR_FILE_SYSTEM_VIRTUALIZATION_METADATA_CORRUPT syscall.Errno = 370 - ERROR_FILE_SYSTEM_VIRTUALIZATION_BUSY syscall.Errno = 371 - ERROR_FILE_SYSTEM_VIRTUALIZATION_PROVIDER_UNKNOWN syscall.Errno = 372 - ERROR_GDI_HANDLE_LEAK syscall.Errno = 373 - ERROR_CLOUD_FILE_TOO_MANY_PROPERTY_BLOBS syscall.Errno = 374 - ERROR_CLOUD_FILE_PROPERTY_VERSION_NOT_SUPPORTED syscall.Errno = 375 - ERROR_NOT_A_CLOUD_FILE syscall.Errno = 376 - ERROR_CLOUD_FILE_NOT_IN_SYNC syscall.Errno = 377 - ERROR_CLOUD_FILE_ALREADY_CONNECTED syscall.Errno = 378 - ERROR_CLOUD_FILE_NOT_SUPPORTED syscall.Errno = 379 - ERROR_CLOUD_FILE_INVALID_REQUEST syscall.Errno = 380 - ERROR_CLOUD_FILE_READ_ONLY_VOLUME syscall.Errno = 381 - ERROR_CLOUD_FILE_CONNECTED_PROVIDER_ONLY syscall.Errno = 382 - ERROR_CLOUD_FILE_VALIDATION_FAILED syscall.Errno = 383 - ERROR_SMB1_NOT_AVAILABLE syscall.Errno = 384 - ERROR_FILE_SYSTEM_VIRTUALIZATION_INVALID_OPERATION syscall.Errno = 385 - ERROR_CLOUD_FILE_AUTHENTICATION_FAILED syscall.Errno = 386 - ERROR_CLOUD_FILE_INSUFFICIENT_RESOURCES syscall.Errno = 387 - ERROR_CLOUD_FILE_NETWORK_UNAVAILABLE syscall.Errno = 388 - ERROR_CLOUD_FILE_UNSUCCESSFUL syscall.Errno = 389 - ERROR_CLOUD_FILE_NOT_UNDER_SYNC_ROOT syscall.Errno = 390 - ERROR_CLOUD_FILE_IN_USE syscall.Errno = 391 - ERROR_CLOUD_FILE_PINNED syscall.Errno = 392 - ERROR_CLOUD_FILE_REQUEST_ABORTED syscall.Errno = 393 - ERROR_CLOUD_FILE_PROPERTY_CORRUPT syscall.Errno = 394 - ERROR_CLOUD_FILE_ACCESS_DENIED syscall.Errno = 395 - ERROR_CLOUD_FILE_INCOMPATIBLE_HARDLINKS syscall.Errno = 396 - ERROR_CLOUD_FILE_PROPERTY_LOCK_CONFLICT syscall.Errno = 397 - ERROR_CLOUD_FILE_REQUEST_CANCELED syscall.Errno = 398 - ERROR_EXTERNAL_SYSKEY_NOT_SUPPORTED syscall.Errno = 399 - ERROR_THREAD_MODE_ALREADY_BACKGROUND syscall.Errno = 400 - ERROR_THREAD_MODE_NOT_BACKGROUND syscall.Errno = 401 - ERROR_PROCESS_MODE_ALREADY_BACKGROUND syscall.Errno = 402 - ERROR_PROCESS_MODE_NOT_BACKGROUND syscall.Errno = 403 - ERROR_CLOUD_FILE_PROVIDER_TERMINATED syscall.Errno = 404 - ERROR_NOT_A_CLOUD_SYNC_ROOT syscall.Errno = 405 - ERROR_FILE_PROTECTED_UNDER_DPL syscall.Errno = 406 - ERROR_VOLUME_NOT_CLUSTER_ALIGNED syscall.Errno = 407 - ERROR_NO_PHYSICALLY_ALIGNED_FREE_SPACE_FOUND syscall.Errno = 408 - ERROR_APPX_FILE_NOT_ENCRYPTED syscall.Errno = 409 - ERROR_RWRAW_ENCRYPTED_FILE_NOT_ENCRYPTED syscall.Errno = 410 - ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILEOFFSET syscall.Errno = 411 - ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILERANGE syscall.Errno = 412 - ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_PARAMETER syscall.Errno = 413 - ERROR_LINUX_SUBSYSTEM_NOT_PRESENT syscall.Errno = 414 - ERROR_FT_READ_FAILURE syscall.Errno = 415 - ERROR_STORAGE_RESERVE_ID_INVALID syscall.Errno = 416 - ERROR_STORAGE_RESERVE_DOES_NOT_EXIST syscall.Errno = 417 - ERROR_STORAGE_RESERVE_ALREADY_EXISTS syscall.Errno = 418 - ERROR_STORAGE_RESERVE_NOT_EMPTY syscall.Errno = 419 - ERROR_NOT_A_DAX_VOLUME syscall.Errno = 420 - ERROR_NOT_DAX_MAPPABLE syscall.Errno = 421 - ERROR_TIME_SENSITIVE_THREAD syscall.Errno = 422 - ERROR_DPL_NOT_SUPPORTED_FOR_USER syscall.Errno = 423 - ERROR_CASE_DIFFERING_NAMES_IN_DIR syscall.Errno = 424 - ERROR_FILE_NOT_SUPPORTED syscall.Errno = 425 - ERROR_CLOUD_FILE_REQUEST_TIMEOUT syscall.Errno = 426 - ERROR_NO_TASK_QUEUE syscall.Errno = 427 - ERROR_SRC_SRV_DLL_LOAD_FAILED syscall.Errno = 428 - ERROR_NOT_SUPPORTED_WITH_BTT syscall.Errno = 429 - ERROR_ENCRYPTION_DISABLED syscall.Errno = 430 - ERROR_ENCRYPTING_METADATA_DISALLOWED syscall.Errno = 431 - ERROR_CANT_CLEAR_ENCRYPTION_FLAG syscall.Errno = 432 - ERROR_NO_SUCH_DEVICE syscall.Errno = 433 - ERROR_CAPAUTHZ_NOT_DEVUNLOCKED syscall.Errno = 450 - ERROR_CAPAUTHZ_CHANGE_TYPE syscall.Errno = 451 - ERROR_CAPAUTHZ_NOT_PROVISIONED syscall.Errno = 452 - ERROR_CAPAUTHZ_NOT_AUTHORIZED syscall.Errno = 453 - ERROR_CAPAUTHZ_NO_POLICY syscall.Errno = 454 - ERROR_CAPAUTHZ_DB_CORRUPTED syscall.Errno = 455 - ERROR_CAPAUTHZ_SCCD_INVALID_CATALOG syscall.Errno = 456 - ERROR_CAPAUTHZ_SCCD_NO_AUTH_ENTITY syscall.Errno = 457 - ERROR_CAPAUTHZ_SCCD_PARSE_ERROR syscall.Errno = 458 - ERROR_CAPAUTHZ_SCCD_DEV_MODE_REQUIRED syscall.Errno = 459 - ERROR_CAPAUTHZ_SCCD_NO_CAPABILITY_MATCH syscall.Errno = 460 - ERROR_PNP_QUERY_REMOVE_DEVICE_TIMEOUT syscall.Errno = 480 - ERROR_PNP_QUERY_REMOVE_RELATED_DEVICE_TIMEOUT syscall.Errno = 481 - ERROR_PNP_QUERY_REMOVE_UNRELATED_DEVICE_TIMEOUT syscall.Errno = 482 - ERROR_DEVICE_HARDWARE_ERROR syscall.Errno = 483 - ERROR_INVALID_ADDRESS syscall.Errno = 487 - ERROR_VRF_CFG_ENABLED syscall.Errno = 1183 - ERROR_PARTITION_TERMINATING syscall.Errno = 1184 - ERROR_USER_PROFILE_LOAD syscall.Errno = 500 - ERROR_ARITHMETIC_OVERFLOW syscall.Errno = 534 - ERROR_PIPE_CONNECTED syscall.Errno = 535 - ERROR_PIPE_LISTENING syscall.Errno = 536 - ERROR_VERIFIER_STOP syscall.Errno = 537 - ERROR_ABIOS_ERROR syscall.Errno = 538 - ERROR_WX86_WARNING syscall.Errno = 539 - ERROR_WX86_ERROR syscall.Errno = 540 - ERROR_TIMER_NOT_CANCELED syscall.Errno = 541 - ERROR_UNWIND syscall.Errno = 542 - ERROR_BAD_STACK syscall.Errno = 543 - ERROR_INVALID_UNWIND_TARGET syscall.Errno = 544 - ERROR_INVALID_PORT_ATTRIBUTES syscall.Errno = 545 - ERROR_PORT_MESSAGE_TOO_LONG syscall.Errno = 546 - ERROR_INVALID_QUOTA_LOWER syscall.Errno = 547 - ERROR_DEVICE_ALREADY_ATTACHED syscall.Errno = 548 - ERROR_INSTRUCTION_MISALIGNMENT syscall.Errno = 549 - ERROR_PROFILING_NOT_STARTED syscall.Errno = 550 - ERROR_PROFILING_NOT_STOPPED syscall.Errno = 551 - ERROR_COULD_NOT_INTERPRET syscall.Errno = 552 - ERROR_PROFILING_AT_LIMIT syscall.Errno = 553 - ERROR_CANT_WAIT syscall.Errno = 554 - ERROR_CANT_TERMINATE_SELF syscall.Errno = 555 - ERROR_UNEXPECTED_MM_CREATE_ERR syscall.Errno = 556 - ERROR_UNEXPECTED_MM_MAP_ERROR syscall.Errno = 557 - ERROR_UNEXPECTED_MM_EXTEND_ERR syscall.Errno = 558 - ERROR_BAD_FUNCTION_TABLE syscall.Errno = 559 - ERROR_NO_GUID_TRANSLATION syscall.Errno = 560 - ERROR_INVALID_LDT_SIZE syscall.Errno = 561 - ERROR_INVALID_LDT_OFFSET syscall.Errno = 563 - ERROR_INVALID_LDT_DESCRIPTOR syscall.Errno = 564 - ERROR_TOO_MANY_THREADS syscall.Errno = 565 - ERROR_THREAD_NOT_IN_PROCESS syscall.Errno = 566 - ERROR_PAGEFILE_QUOTA_EXCEEDED syscall.Errno = 567 - ERROR_LOGON_SERVER_CONFLICT syscall.Errno = 568 - ERROR_SYNCHRONIZATION_REQUIRED syscall.Errno = 569 - ERROR_NET_OPEN_FAILED syscall.Errno = 570 - ERROR_IO_PRIVILEGE_FAILED syscall.Errno = 571 - ERROR_CONTROL_C_EXIT syscall.Errno = 572 - ERROR_MISSING_SYSTEMFILE syscall.Errno = 573 - ERROR_UNHANDLED_EXCEPTION syscall.Errno = 574 - ERROR_APP_INIT_FAILURE syscall.Errno = 575 - ERROR_PAGEFILE_CREATE_FAILED syscall.Errno = 576 - ERROR_INVALID_IMAGE_HASH syscall.Errno = 577 - ERROR_NO_PAGEFILE syscall.Errno = 578 - ERROR_ILLEGAL_FLOAT_CONTEXT syscall.Errno = 579 - ERROR_NO_EVENT_PAIR syscall.Errno = 580 - ERROR_DOMAIN_CTRLR_CONFIG_ERROR syscall.Errno = 581 - ERROR_ILLEGAL_CHARACTER syscall.Errno = 582 - ERROR_UNDEFINED_CHARACTER syscall.Errno = 583 - ERROR_FLOPPY_VOLUME syscall.Errno = 584 - ERROR_BIOS_FAILED_TO_CONNECT_INTERRUPT syscall.Errno = 585 - ERROR_BACKUP_CONTROLLER syscall.Errno = 586 - ERROR_MUTANT_LIMIT_EXCEEDED syscall.Errno = 587 - ERROR_FS_DRIVER_REQUIRED syscall.Errno = 588 - ERROR_CANNOT_LOAD_REGISTRY_FILE syscall.Errno = 589 - ERROR_DEBUG_ATTACH_FAILED syscall.Errno = 590 - ERROR_SYSTEM_PROCESS_TERMINATED syscall.Errno = 591 - ERROR_DATA_NOT_ACCEPTED syscall.Errno = 592 - ERROR_VDM_HARD_ERROR syscall.Errno = 593 - ERROR_DRIVER_CANCEL_TIMEOUT syscall.Errno = 594 - ERROR_REPLY_MESSAGE_MISMATCH syscall.Errno = 595 - ERROR_LOST_WRITEBEHIND_DATA syscall.Errno = 596 - ERROR_CLIENT_SERVER_PARAMETERS_INVALID syscall.Errno = 597 - ERROR_NOT_TINY_STREAM syscall.Errno = 598 - ERROR_STACK_OVERFLOW_READ syscall.Errno = 599 - ERROR_CONVERT_TO_LARGE syscall.Errno = 600 - ERROR_FOUND_OUT_OF_SCOPE syscall.Errno = 601 - ERROR_ALLOCATE_BUCKET syscall.Errno = 602 - ERROR_MARSHALL_OVERFLOW syscall.Errno = 603 - ERROR_INVALID_VARIANT syscall.Errno = 604 - ERROR_BAD_COMPRESSION_BUFFER syscall.Errno = 605 - ERROR_AUDIT_FAILED syscall.Errno = 606 - ERROR_TIMER_RESOLUTION_NOT_SET syscall.Errno = 607 - ERROR_INSUFFICIENT_LOGON_INFO syscall.Errno = 608 - ERROR_BAD_DLL_ENTRYPOINT syscall.Errno = 609 - ERROR_BAD_SERVICE_ENTRYPOINT syscall.Errno = 610 - ERROR_IP_ADDRESS_CONFLICT1 syscall.Errno = 611 - ERROR_IP_ADDRESS_CONFLICT2 syscall.Errno = 612 - ERROR_REGISTRY_QUOTA_LIMIT syscall.Errno = 613 - ERROR_NO_CALLBACK_ACTIVE syscall.Errno = 614 - ERROR_PWD_TOO_SHORT syscall.Errno = 615 - ERROR_PWD_TOO_RECENT syscall.Errno = 616 - ERROR_PWD_HISTORY_CONFLICT syscall.Errno = 617 - ERROR_UNSUPPORTED_COMPRESSION syscall.Errno = 618 - ERROR_INVALID_HW_PROFILE syscall.Errno = 619 - ERROR_INVALID_PLUGPLAY_DEVICE_PATH syscall.Errno = 620 - ERROR_QUOTA_LIST_INCONSISTENT syscall.Errno = 621 - ERROR_EVALUATION_EXPIRATION syscall.Errno = 622 - ERROR_ILLEGAL_DLL_RELOCATION syscall.Errno = 623 - ERROR_DLL_INIT_FAILED_LOGOFF syscall.Errno = 624 - ERROR_VALIDATE_CONTINUE syscall.Errno = 625 - ERROR_NO_MORE_MATCHES syscall.Errno = 626 - ERROR_RANGE_LIST_CONFLICT syscall.Errno = 627 - ERROR_SERVER_SID_MISMATCH syscall.Errno = 628 - ERROR_CANT_ENABLE_DENY_ONLY syscall.Errno = 629 - ERROR_FLOAT_MULTIPLE_FAULTS syscall.Errno = 630 - ERROR_FLOAT_MULTIPLE_TRAPS syscall.Errno = 631 - ERROR_NOINTERFACE syscall.Errno = 632 - ERROR_DRIVER_FAILED_SLEEP syscall.Errno = 633 - ERROR_CORRUPT_SYSTEM_FILE syscall.Errno = 634 - ERROR_COMMITMENT_MINIMUM syscall.Errno = 635 - ERROR_PNP_RESTART_ENUMERATION syscall.Errno = 636 - ERROR_SYSTEM_IMAGE_BAD_SIGNATURE syscall.Errno = 637 - ERROR_PNP_REBOOT_REQUIRED syscall.Errno = 638 - ERROR_INSUFFICIENT_POWER syscall.Errno = 639 - ERROR_MULTIPLE_FAULT_VIOLATION syscall.Errno = 640 - ERROR_SYSTEM_SHUTDOWN syscall.Errno = 641 - ERROR_PORT_NOT_SET syscall.Errno = 642 - ERROR_DS_VERSION_CHECK_FAILURE syscall.Errno = 643 - ERROR_RANGE_NOT_FOUND syscall.Errno = 644 - ERROR_NOT_SAFE_MODE_DRIVER syscall.Errno = 646 - ERROR_FAILED_DRIVER_ENTRY syscall.Errno = 647 - ERROR_DEVICE_ENUMERATION_ERROR syscall.Errno = 648 - ERROR_MOUNT_POINT_NOT_RESOLVED syscall.Errno = 649 - ERROR_INVALID_DEVICE_OBJECT_PARAMETER syscall.Errno = 650 - ERROR_MCA_OCCURED syscall.Errno = 651 - ERROR_DRIVER_DATABASE_ERROR syscall.Errno = 652 - ERROR_SYSTEM_HIVE_TOO_LARGE syscall.Errno = 653 - ERROR_DRIVER_FAILED_PRIOR_UNLOAD syscall.Errno = 654 - ERROR_VOLSNAP_PREPARE_HIBERNATE syscall.Errno = 655 - ERROR_HIBERNATION_FAILURE syscall.Errno = 656 - ERROR_PWD_TOO_LONG syscall.Errno = 657 - ERROR_FILE_SYSTEM_LIMITATION syscall.Errno = 665 - ERROR_ASSERTION_FAILURE syscall.Errno = 668 - ERROR_ACPI_ERROR syscall.Errno = 669 - ERROR_WOW_ASSERTION syscall.Errno = 670 - ERROR_PNP_BAD_MPS_TABLE syscall.Errno = 671 - ERROR_PNP_TRANSLATION_FAILED syscall.Errno = 672 - ERROR_PNP_IRQ_TRANSLATION_FAILED syscall.Errno = 673 - ERROR_PNP_INVALID_ID syscall.Errno = 674 - ERROR_WAKE_SYSTEM_DEBUGGER syscall.Errno = 675 - ERROR_HANDLES_CLOSED syscall.Errno = 676 - ERROR_EXTRANEOUS_INFORMATION syscall.Errno = 677 - ERROR_RXACT_COMMIT_NECESSARY syscall.Errno = 678 - ERROR_MEDIA_CHECK syscall.Errno = 679 - ERROR_GUID_SUBSTITUTION_MADE syscall.Errno = 680 - ERROR_STOPPED_ON_SYMLINK syscall.Errno = 681 - ERROR_LONGJUMP syscall.Errno = 682 - ERROR_PLUGPLAY_QUERY_VETOED syscall.Errno = 683 - ERROR_UNWIND_CONSOLIDATE syscall.Errno = 684 - ERROR_REGISTRY_HIVE_RECOVERED syscall.Errno = 685 - ERROR_DLL_MIGHT_BE_INSECURE syscall.Errno = 686 - ERROR_DLL_MIGHT_BE_INCOMPATIBLE syscall.Errno = 687 - ERROR_DBG_EXCEPTION_NOT_HANDLED syscall.Errno = 688 - ERROR_DBG_REPLY_LATER syscall.Errno = 689 - ERROR_DBG_UNABLE_TO_PROVIDE_HANDLE syscall.Errno = 690 - ERROR_DBG_TERMINATE_THREAD syscall.Errno = 691 - ERROR_DBG_TERMINATE_PROCESS syscall.Errno = 692 - ERROR_DBG_CONTROL_C syscall.Errno = 693 - ERROR_DBG_PRINTEXCEPTION_C syscall.Errno = 694 - ERROR_DBG_RIPEXCEPTION syscall.Errno = 695 - ERROR_DBG_CONTROL_BREAK syscall.Errno = 696 - ERROR_DBG_COMMAND_EXCEPTION syscall.Errno = 697 - ERROR_OBJECT_NAME_EXISTS syscall.Errno = 698 - ERROR_THREAD_WAS_SUSPENDED syscall.Errno = 699 - ERROR_IMAGE_NOT_AT_BASE syscall.Errno = 700 - ERROR_RXACT_STATE_CREATED syscall.Errno = 701 - ERROR_SEGMENT_NOTIFICATION syscall.Errno = 702 - ERROR_BAD_CURRENT_DIRECTORY syscall.Errno = 703 - ERROR_FT_READ_RECOVERY_FROM_BACKUP syscall.Errno = 704 - ERROR_FT_WRITE_RECOVERY syscall.Errno = 705 - ERROR_IMAGE_MACHINE_TYPE_MISMATCH syscall.Errno = 706 - ERROR_RECEIVE_PARTIAL syscall.Errno = 707 - ERROR_RECEIVE_EXPEDITED syscall.Errno = 708 - ERROR_RECEIVE_PARTIAL_EXPEDITED syscall.Errno = 709 - ERROR_EVENT_DONE syscall.Errno = 710 - ERROR_EVENT_PENDING syscall.Errno = 711 - ERROR_CHECKING_FILE_SYSTEM syscall.Errno = 712 - ERROR_FATAL_APP_EXIT syscall.Errno = 713 - ERROR_PREDEFINED_HANDLE syscall.Errno = 714 - ERROR_WAS_UNLOCKED syscall.Errno = 715 - ERROR_SERVICE_NOTIFICATION syscall.Errno = 716 - ERROR_WAS_LOCKED syscall.Errno = 717 - ERROR_LOG_HARD_ERROR syscall.Errno = 718 - ERROR_ALREADY_WIN32 syscall.Errno = 719 - ERROR_IMAGE_MACHINE_TYPE_MISMATCH_EXE syscall.Errno = 720 - ERROR_NO_YIELD_PERFORMED syscall.Errno = 721 - ERROR_TIMER_RESUME_IGNORED syscall.Errno = 722 - ERROR_ARBITRATION_UNHANDLED syscall.Errno = 723 - ERROR_CARDBUS_NOT_SUPPORTED syscall.Errno = 724 - ERROR_MP_PROCESSOR_MISMATCH syscall.Errno = 725 - ERROR_HIBERNATED syscall.Errno = 726 - ERROR_RESUME_HIBERNATION syscall.Errno = 727 - ERROR_FIRMWARE_UPDATED syscall.Errno = 728 - ERROR_DRIVERS_LEAKING_LOCKED_PAGES syscall.Errno = 729 - ERROR_WAKE_SYSTEM syscall.Errno = 730 - ERROR_WAIT_1 syscall.Errno = 731 - ERROR_WAIT_2 syscall.Errno = 732 - ERROR_WAIT_3 syscall.Errno = 733 - ERROR_WAIT_63 syscall.Errno = 734 - ERROR_ABANDONED_WAIT_0 syscall.Errno = 735 - ERROR_ABANDONED_WAIT_63 syscall.Errno = 736 - ERROR_USER_APC syscall.Errno = 737 - ERROR_KERNEL_APC syscall.Errno = 738 - ERROR_ALERTED syscall.Errno = 739 - ERROR_ELEVATION_REQUIRED syscall.Errno = 740 - ERROR_REPARSE syscall.Errno = 741 - ERROR_OPLOCK_BREAK_IN_PROGRESS syscall.Errno = 742 - ERROR_VOLUME_MOUNTED syscall.Errno = 743 - ERROR_RXACT_COMMITTED syscall.Errno = 744 - ERROR_NOTIFY_CLEANUP syscall.Errno = 745 - ERROR_PRIMARY_TRANSPORT_CONNECT_FAILED syscall.Errno = 746 - ERROR_PAGE_FAULT_TRANSITION syscall.Errno = 747 - ERROR_PAGE_FAULT_DEMAND_ZERO syscall.Errno = 748 - ERROR_PAGE_FAULT_COPY_ON_WRITE syscall.Errno = 749 - ERROR_PAGE_FAULT_GUARD_PAGE syscall.Errno = 750 - ERROR_PAGE_FAULT_PAGING_FILE syscall.Errno = 751 - ERROR_CACHE_PAGE_LOCKED syscall.Errno = 752 - ERROR_CRASH_DUMP syscall.Errno = 753 - ERROR_BUFFER_ALL_ZEROS syscall.Errno = 754 - ERROR_REPARSE_OBJECT syscall.Errno = 755 - ERROR_RESOURCE_REQUIREMENTS_CHANGED syscall.Errno = 756 - ERROR_TRANSLATION_COMPLETE syscall.Errno = 757 - ERROR_NOTHING_TO_TERMINATE syscall.Errno = 758 - ERROR_PROCESS_NOT_IN_JOB syscall.Errno = 759 - ERROR_PROCESS_IN_JOB syscall.Errno = 760 - ERROR_VOLSNAP_HIBERNATE_READY syscall.Errno = 761 - ERROR_FSFILTER_OP_COMPLETED_SUCCESSFULLY syscall.Errno = 762 - ERROR_INTERRUPT_VECTOR_ALREADY_CONNECTED syscall.Errno = 763 - ERROR_INTERRUPT_STILL_CONNECTED syscall.Errno = 764 - ERROR_WAIT_FOR_OPLOCK syscall.Errno = 765 - ERROR_DBG_EXCEPTION_HANDLED syscall.Errno = 766 - ERROR_DBG_CONTINUE syscall.Errno = 767 - ERROR_CALLBACK_POP_STACK syscall.Errno = 768 - ERROR_COMPRESSION_DISABLED syscall.Errno = 769 - ERROR_CANTFETCHBACKWARDS syscall.Errno = 770 - ERROR_CANTSCROLLBACKWARDS syscall.Errno = 771 - ERROR_ROWSNOTRELEASED syscall.Errno = 772 - ERROR_BAD_ACCESSOR_FLAGS syscall.Errno = 773 - ERROR_ERRORS_ENCOUNTERED syscall.Errno = 774 - ERROR_NOT_CAPABLE syscall.Errno = 775 - ERROR_REQUEST_OUT_OF_SEQUENCE syscall.Errno = 776 - ERROR_VERSION_PARSE_ERROR syscall.Errno = 777 - ERROR_BADSTARTPOSITION syscall.Errno = 778 - ERROR_MEMORY_HARDWARE syscall.Errno = 779 - ERROR_DISK_REPAIR_DISABLED syscall.Errno = 780 - ERROR_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE syscall.Errno = 781 - ERROR_SYSTEM_POWERSTATE_TRANSITION syscall.Errno = 782 - ERROR_SYSTEM_POWERSTATE_COMPLEX_TRANSITION syscall.Errno = 783 - ERROR_MCA_EXCEPTION syscall.Errno = 784 - ERROR_ACCESS_AUDIT_BY_POLICY syscall.Errno = 785 - ERROR_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY syscall.Errno = 786 - ERROR_ABANDON_HIBERFILE syscall.Errno = 787 - ERROR_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED syscall.Errno = 788 - ERROR_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR syscall.Errno = 789 - ERROR_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR syscall.Errno = 790 - ERROR_BAD_MCFG_TABLE syscall.Errno = 791 - ERROR_DISK_REPAIR_REDIRECTED syscall.Errno = 792 - ERROR_DISK_REPAIR_UNSUCCESSFUL syscall.Errno = 793 - ERROR_CORRUPT_LOG_OVERFULL syscall.Errno = 794 - ERROR_CORRUPT_LOG_CORRUPTED syscall.Errno = 795 - ERROR_CORRUPT_LOG_UNAVAILABLE syscall.Errno = 796 - ERROR_CORRUPT_LOG_DELETED_FULL syscall.Errno = 797 - ERROR_CORRUPT_LOG_CLEARED syscall.Errno = 798 - ERROR_ORPHAN_NAME_EXHAUSTED syscall.Errno = 799 - ERROR_OPLOCK_SWITCHED_TO_NEW_HANDLE syscall.Errno = 800 - ERROR_CANNOT_GRANT_REQUESTED_OPLOCK syscall.Errno = 801 - ERROR_CANNOT_BREAK_OPLOCK syscall.Errno = 802 - ERROR_OPLOCK_HANDLE_CLOSED syscall.Errno = 803 - ERROR_NO_ACE_CONDITION syscall.Errno = 804 - ERROR_INVALID_ACE_CONDITION syscall.Errno = 805 - ERROR_FILE_HANDLE_REVOKED syscall.Errno = 806 - ERROR_IMAGE_AT_DIFFERENT_BASE syscall.Errno = 807 - ERROR_ENCRYPTED_IO_NOT_POSSIBLE syscall.Errno = 808 - ERROR_FILE_METADATA_OPTIMIZATION_IN_PROGRESS syscall.Errno = 809 - ERROR_QUOTA_ACTIVITY syscall.Errno = 810 - ERROR_HANDLE_REVOKED syscall.Errno = 811 - ERROR_CALLBACK_INVOKE_INLINE syscall.Errno = 812 - ERROR_CPU_SET_INVALID syscall.Errno = 813 - ERROR_ENCLAVE_NOT_TERMINATED syscall.Errno = 814 - ERROR_ENCLAVE_VIOLATION syscall.Errno = 815 - ERROR_EA_ACCESS_DENIED syscall.Errno = 994 - ERROR_OPERATION_ABORTED syscall.Errno = 995 - ERROR_IO_INCOMPLETE syscall.Errno = 996 - ERROR_IO_PENDING syscall.Errno = 997 - ERROR_NOACCESS syscall.Errno = 998 - ERROR_SWAPERROR syscall.Errno = 999 - ERROR_STACK_OVERFLOW syscall.Errno = 1001 - ERROR_INVALID_MESSAGE syscall.Errno = 1002 - ERROR_CAN_NOT_COMPLETE syscall.Errno = 1003 - ERROR_INVALID_FLAGS syscall.Errno = 1004 - ERROR_UNRECOGNIZED_VOLUME syscall.Errno = 1005 - ERROR_FILE_INVALID syscall.Errno = 1006 - ERROR_FULLSCREEN_MODE syscall.Errno = 1007 - ERROR_NO_TOKEN syscall.Errno = 1008 - ERROR_BADDB syscall.Errno = 1009 - ERROR_BADKEY syscall.Errno = 1010 - ERROR_CANTOPEN syscall.Errno = 1011 - ERROR_CANTREAD syscall.Errno = 1012 - ERROR_CANTWRITE syscall.Errno = 1013 - ERROR_REGISTRY_RECOVERED syscall.Errno = 1014 - ERROR_REGISTRY_CORRUPT syscall.Errno = 1015 - ERROR_REGISTRY_IO_FAILED syscall.Errno = 1016 - ERROR_NOT_REGISTRY_FILE syscall.Errno = 1017 - ERROR_KEY_DELETED syscall.Errno = 1018 - ERROR_NO_LOG_SPACE syscall.Errno = 1019 - ERROR_KEY_HAS_CHILDREN syscall.Errno = 1020 - ERROR_CHILD_MUST_BE_VOLATILE syscall.Errno = 1021 - ERROR_NOTIFY_ENUM_DIR syscall.Errno = 1022 - ERROR_DEPENDENT_SERVICES_RUNNING syscall.Errno = 1051 - ERROR_INVALID_SERVICE_CONTROL syscall.Errno = 1052 - ERROR_SERVICE_REQUEST_TIMEOUT syscall.Errno = 1053 - ERROR_SERVICE_NO_THREAD syscall.Errno = 1054 - ERROR_SERVICE_DATABASE_LOCKED syscall.Errno = 1055 - ERROR_SERVICE_ALREADY_RUNNING syscall.Errno = 1056 - ERROR_INVALID_SERVICE_ACCOUNT syscall.Errno = 1057 - ERROR_SERVICE_DISABLED syscall.Errno = 1058 - ERROR_CIRCULAR_DEPENDENCY syscall.Errno = 1059 - ERROR_SERVICE_DOES_NOT_EXIST syscall.Errno = 1060 - ERROR_SERVICE_CANNOT_ACCEPT_CTRL syscall.Errno = 1061 - ERROR_SERVICE_NOT_ACTIVE syscall.Errno = 1062 - ERROR_FAILED_SERVICE_CONTROLLER_CONNECT syscall.Errno = 1063 - ERROR_EXCEPTION_IN_SERVICE syscall.Errno = 1064 - ERROR_DATABASE_DOES_NOT_EXIST syscall.Errno = 1065 - ERROR_SERVICE_SPECIFIC_ERROR syscall.Errno = 1066 - ERROR_PROCESS_ABORTED syscall.Errno = 1067 - ERROR_SERVICE_DEPENDENCY_FAIL syscall.Errno = 1068 - ERROR_SERVICE_LOGON_FAILED syscall.Errno = 1069 - ERROR_SERVICE_START_HANG syscall.Errno = 1070 - ERROR_INVALID_SERVICE_LOCK syscall.Errno = 1071 - ERROR_SERVICE_MARKED_FOR_DELETE syscall.Errno = 1072 - ERROR_SERVICE_EXISTS syscall.Errno = 1073 - ERROR_ALREADY_RUNNING_LKG syscall.Errno = 1074 - ERROR_SERVICE_DEPENDENCY_DELETED syscall.Errno = 1075 - ERROR_BOOT_ALREADY_ACCEPTED syscall.Errno = 1076 - ERROR_SERVICE_NEVER_STARTED syscall.Errno = 1077 - ERROR_DUPLICATE_SERVICE_NAME syscall.Errno = 1078 - ERROR_DIFFERENT_SERVICE_ACCOUNT syscall.Errno = 1079 - ERROR_CANNOT_DETECT_DRIVER_FAILURE syscall.Errno = 1080 - ERROR_CANNOT_DETECT_PROCESS_ABORT syscall.Errno = 1081 - ERROR_NO_RECOVERY_PROGRAM syscall.Errno = 1082 - ERROR_SERVICE_NOT_IN_EXE syscall.Errno = 1083 - ERROR_NOT_SAFEBOOT_SERVICE syscall.Errno = 1084 - ERROR_END_OF_MEDIA syscall.Errno = 1100 - ERROR_FILEMARK_DETECTED syscall.Errno = 1101 - ERROR_BEGINNING_OF_MEDIA syscall.Errno = 1102 - ERROR_SETMARK_DETECTED syscall.Errno = 1103 - ERROR_NO_DATA_DETECTED syscall.Errno = 1104 - ERROR_PARTITION_FAILURE syscall.Errno = 1105 - ERROR_INVALID_BLOCK_LENGTH syscall.Errno = 1106 - ERROR_DEVICE_NOT_PARTITIONED syscall.Errno = 1107 - ERROR_UNABLE_TO_LOCK_MEDIA syscall.Errno = 1108 - ERROR_UNABLE_TO_UNLOAD_MEDIA syscall.Errno = 1109 - ERROR_MEDIA_CHANGED syscall.Errno = 1110 - ERROR_BUS_RESET syscall.Errno = 1111 - ERROR_NO_MEDIA_IN_DRIVE syscall.Errno = 1112 - ERROR_NO_UNICODE_TRANSLATION syscall.Errno = 1113 - ERROR_DLL_INIT_FAILED syscall.Errno = 1114 - ERROR_SHUTDOWN_IN_PROGRESS syscall.Errno = 1115 - ERROR_NO_SHUTDOWN_IN_PROGRESS syscall.Errno = 1116 - ERROR_IO_DEVICE syscall.Errno = 1117 - ERROR_SERIAL_NO_DEVICE syscall.Errno = 1118 - ERROR_IRQ_BUSY syscall.Errno = 1119 - ERROR_MORE_WRITES syscall.Errno = 1120 - ERROR_COUNTER_TIMEOUT syscall.Errno = 1121 - ERROR_FLOPPY_ID_MARK_NOT_FOUND syscall.Errno = 1122 - ERROR_FLOPPY_WRONG_CYLINDER syscall.Errno = 1123 - ERROR_FLOPPY_UNKNOWN_ERROR syscall.Errno = 1124 - ERROR_FLOPPY_BAD_REGISTERS syscall.Errno = 1125 - ERROR_DISK_RECALIBRATE_FAILED syscall.Errno = 1126 - ERROR_DISK_OPERATION_FAILED syscall.Errno = 1127 - ERROR_DISK_RESET_FAILED syscall.Errno = 1128 - ERROR_EOM_OVERFLOW syscall.Errno = 1129 - ERROR_NOT_ENOUGH_SERVER_MEMORY syscall.Errno = 1130 - ERROR_POSSIBLE_DEADLOCK syscall.Errno = 1131 - ERROR_MAPPED_ALIGNMENT syscall.Errno = 1132 - ERROR_SET_POWER_STATE_VETOED syscall.Errno = 1140 - ERROR_SET_POWER_STATE_FAILED syscall.Errno = 1141 - ERROR_TOO_MANY_LINKS syscall.Errno = 1142 - ERROR_OLD_WIN_VERSION syscall.Errno = 1150 - ERROR_APP_WRONG_OS syscall.Errno = 1151 - ERROR_SINGLE_INSTANCE_APP syscall.Errno = 1152 - ERROR_RMODE_APP syscall.Errno = 1153 - ERROR_INVALID_DLL syscall.Errno = 1154 - ERROR_NO_ASSOCIATION syscall.Errno = 1155 - ERROR_DDE_FAIL syscall.Errno = 1156 - ERROR_DLL_NOT_FOUND syscall.Errno = 1157 - ERROR_NO_MORE_USER_HANDLES syscall.Errno = 1158 - ERROR_MESSAGE_SYNC_ONLY syscall.Errno = 1159 - ERROR_SOURCE_ELEMENT_EMPTY syscall.Errno = 1160 - ERROR_DESTINATION_ELEMENT_FULL syscall.Errno = 1161 - ERROR_ILLEGAL_ELEMENT_ADDRESS syscall.Errno = 1162 - ERROR_MAGAZINE_NOT_PRESENT syscall.Errno = 1163 - ERROR_DEVICE_REINITIALIZATION_NEEDED syscall.Errno = 1164 - ERROR_DEVICE_REQUIRES_CLEANING syscall.Errno = 1165 - ERROR_DEVICE_DOOR_OPEN syscall.Errno = 1166 - ERROR_DEVICE_NOT_CONNECTED syscall.Errno = 1167 - ERROR_NOT_FOUND syscall.Errno = 1168 - ERROR_NO_MATCH syscall.Errno = 1169 - ERROR_SET_NOT_FOUND syscall.Errno = 1170 - ERROR_POINT_NOT_FOUND syscall.Errno = 1171 - ERROR_NO_TRACKING_SERVICE syscall.Errno = 1172 - ERROR_NO_VOLUME_ID syscall.Errno = 1173 - ERROR_UNABLE_TO_REMOVE_REPLACED syscall.Errno = 1175 - ERROR_UNABLE_TO_MOVE_REPLACEMENT syscall.Errno = 1176 - ERROR_UNABLE_TO_MOVE_REPLACEMENT_2 syscall.Errno = 1177 - ERROR_JOURNAL_DELETE_IN_PROGRESS syscall.Errno = 1178 - ERROR_JOURNAL_NOT_ACTIVE syscall.Errno = 1179 - ERROR_POTENTIAL_FILE_FOUND syscall.Errno = 1180 - ERROR_JOURNAL_ENTRY_DELETED syscall.Errno = 1181 - ERROR_SHUTDOWN_IS_SCHEDULED syscall.Errno = 1190 - ERROR_SHUTDOWN_USERS_LOGGED_ON syscall.Errno = 1191 - ERROR_BAD_DEVICE syscall.Errno = 1200 - ERROR_CONNECTION_UNAVAIL syscall.Errno = 1201 - ERROR_DEVICE_ALREADY_REMEMBERED syscall.Errno = 1202 - ERROR_NO_NET_OR_BAD_PATH syscall.Errno = 1203 - ERROR_BAD_PROVIDER syscall.Errno = 1204 - ERROR_CANNOT_OPEN_PROFILE syscall.Errno = 1205 - ERROR_BAD_PROFILE syscall.Errno = 1206 - ERROR_NOT_CONTAINER syscall.Errno = 1207 - ERROR_EXTENDED_ERROR syscall.Errno = 1208 - ERROR_INVALID_GROUPNAME syscall.Errno = 1209 - ERROR_INVALID_COMPUTERNAME syscall.Errno = 1210 - ERROR_INVALID_EVENTNAME syscall.Errno = 1211 - ERROR_INVALID_DOMAINNAME syscall.Errno = 1212 - ERROR_INVALID_SERVICENAME syscall.Errno = 1213 - ERROR_INVALID_NETNAME syscall.Errno = 1214 - ERROR_INVALID_SHARENAME syscall.Errno = 1215 - ERROR_INVALID_PASSWORDNAME syscall.Errno = 1216 - ERROR_INVALID_MESSAGENAME syscall.Errno = 1217 - ERROR_INVALID_MESSAGEDEST syscall.Errno = 1218 - ERROR_SESSION_CREDENTIAL_CONFLICT syscall.Errno = 1219 - ERROR_REMOTE_SESSION_LIMIT_EXCEEDED syscall.Errno = 1220 - ERROR_DUP_DOMAINNAME syscall.Errno = 1221 - ERROR_NO_NETWORK syscall.Errno = 1222 - ERROR_CANCELLED syscall.Errno = 1223 - ERROR_USER_MAPPED_FILE syscall.Errno = 1224 - ERROR_CONNECTION_REFUSED syscall.Errno = 1225 - ERROR_GRACEFUL_DISCONNECT syscall.Errno = 1226 - ERROR_ADDRESS_ALREADY_ASSOCIATED syscall.Errno = 1227 - ERROR_ADDRESS_NOT_ASSOCIATED syscall.Errno = 1228 - ERROR_CONNECTION_INVALID syscall.Errno = 1229 - ERROR_CONNECTION_ACTIVE syscall.Errno = 1230 - ERROR_NETWORK_UNREACHABLE syscall.Errno = 1231 - ERROR_HOST_UNREACHABLE syscall.Errno = 1232 - ERROR_PROTOCOL_UNREACHABLE syscall.Errno = 1233 - ERROR_PORT_UNREACHABLE syscall.Errno = 1234 - ERROR_REQUEST_ABORTED syscall.Errno = 1235 - ERROR_CONNECTION_ABORTED syscall.Errno = 1236 - ERROR_RETRY syscall.Errno = 1237 - ERROR_CONNECTION_COUNT_LIMIT syscall.Errno = 1238 - ERROR_LOGIN_TIME_RESTRICTION syscall.Errno = 1239 - ERROR_LOGIN_WKSTA_RESTRICTION syscall.Errno = 1240 - ERROR_INCORRECT_ADDRESS syscall.Errno = 1241 - ERROR_ALREADY_REGISTERED syscall.Errno = 1242 - ERROR_SERVICE_NOT_FOUND syscall.Errno = 1243 - ERROR_NOT_AUTHENTICATED syscall.Errno = 1244 - ERROR_NOT_LOGGED_ON syscall.Errno = 1245 - ERROR_CONTINUE syscall.Errno = 1246 - ERROR_ALREADY_INITIALIZED syscall.Errno = 1247 - ERROR_NO_MORE_DEVICES syscall.Errno = 1248 - ERROR_NO_SUCH_SITE syscall.Errno = 1249 - ERROR_DOMAIN_CONTROLLER_EXISTS syscall.Errno = 1250 - ERROR_ONLY_IF_CONNECTED syscall.Errno = 1251 - ERROR_OVERRIDE_NOCHANGES syscall.Errno = 1252 - ERROR_BAD_USER_PROFILE syscall.Errno = 1253 - ERROR_NOT_SUPPORTED_ON_SBS syscall.Errno = 1254 - ERROR_SERVER_SHUTDOWN_IN_PROGRESS syscall.Errno = 1255 - ERROR_HOST_DOWN syscall.Errno = 1256 - ERROR_NON_ACCOUNT_SID syscall.Errno = 1257 - ERROR_NON_DOMAIN_SID syscall.Errno = 1258 - ERROR_APPHELP_BLOCK syscall.Errno = 1259 - ERROR_ACCESS_DISABLED_BY_POLICY syscall.Errno = 1260 - ERROR_REG_NAT_CONSUMPTION syscall.Errno = 1261 - ERROR_CSCSHARE_OFFLINE syscall.Errno = 1262 - ERROR_PKINIT_FAILURE syscall.Errno = 1263 - ERROR_SMARTCARD_SUBSYSTEM_FAILURE syscall.Errno = 1264 - ERROR_DOWNGRADE_DETECTED syscall.Errno = 1265 - ERROR_MACHINE_LOCKED syscall.Errno = 1271 - ERROR_SMB_GUEST_LOGON_BLOCKED syscall.Errno = 1272 - ERROR_CALLBACK_SUPPLIED_INVALID_DATA syscall.Errno = 1273 - ERROR_SYNC_FOREGROUND_REFRESH_REQUIRED syscall.Errno = 1274 - ERROR_DRIVER_BLOCKED syscall.Errno = 1275 - ERROR_INVALID_IMPORT_OF_NON_DLL syscall.Errno = 1276 - ERROR_ACCESS_DISABLED_WEBBLADE syscall.Errno = 1277 - ERROR_ACCESS_DISABLED_WEBBLADE_TAMPER syscall.Errno = 1278 - ERROR_RECOVERY_FAILURE syscall.Errno = 1279 - ERROR_ALREADY_FIBER syscall.Errno = 1280 - ERROR_ALREADY_THREAD syscall.Errno = 1281 - ERROR_STACK_BUFFER_OVERRUN syscall.Errno = 1282 - ERROR_PARAMETER_QUOTA_EXCEEDED syscall.Errno = 1283 - ERROR_DEBUGGER_INACTIVE syscall.Errno = 1284 - ERROR_DELAY_LOAD_FAILED syscall.Errno = 1285 - ERROR_VDM_DISALLOWED syscall.Errno = 1286 - ERROR_UNIDENTIFIED_ERROR syscall.Errno = 1287 - ERROR_INVALID_CRUNTIME_PARAMETER syscall.Errno = 1288 - ERROR_BEYOND_VDL syscall.Errno = 1289 - ERROR_INCOMPATIBLE_SERVICE_SID_TYPE syscall.Errno = 1290 - ERROR_DRIVER_PROCESS_TERMINATED syscall.Errno = 1291 - ERROR_IMPLEMENTATION_LIMIT syscall.Errno = 1292 - ERROR_PROCESS_IS_PROTECTED syscall.Errno = 1293 - ERROR_SERVICE_NOTIFY_CLIENT_LAGGING syscall.Errno = 1294 - ERROR_DISK_QUOTA_EXCEEDED syscall.Errno = 1295 - ERROR_CONTENT_BLOCKED syscall.Errno = 1296 - ERROR_INCOMPATIBLE_SERVICE_PRIVILEGE syscall.Errno = 1297 - ERROR_APP_HANG syscall.Errno = 1298 - ERROR_INVALID_LABEL syscall.Errno = 1299 - ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 - ERROR_SOME_NOT_MAPPED syscall.Errno = 1301 - ERROR_NO_QUOTAS_FOR_ACCOUNT syscall.Errno = 1302 - ERROR_LOCAL_USER_SESSION_KEY syscall.Errno = 1303 - ERROR_NULL_LM_PASSWORD syscall.Errno = 1304 - ERROR_UNKNOWN_REVISION syscall.Errno = 1305 - ERROR_REVISION_MISMATCH syscall.Errno = 1306 - ERROR_INVALID_OWNER syscall.Errno = 1307 - ERROR_INVALID_PRIMARY_GROUP syscall.Errno = 1308 - ERROR_NO_IMPERSONATION_TOKEN syscall.Errno = 1309 - ERROR_CANT_DISABLE_MANDATORY syscall.Errno = 1310 - ERROR_NO_LOGON_SERVERS syscall.Errno = 1311 - ERROR_NO_SUCH_LOGON_SESSION syscall.Errno = 1312 - ERROR_NO_SUCH_PRIVILEGE syscall.Errno = 1313 - ERROR_PRIVILEGE_NOT_HELD syscall.Errno = 1314 - ERROR_INVALID_ACCOUNT_NAME syscall.Errno = 1315 - ERROR_USER_EXISTS syscall.Errno = 1316 - ERROR_NO_SUCH_USER syscall.Errno = 1317 - ERROR_GROUP_EXISTS syscall.Errno = 1318 - ERROR_NO_SUCH_GROUP syscall.Errno = 1319 - ERROR_MEMBER_IN_GROUP syscall.Errno = 1320 - ERROR_MEMBER_NOT_IN_GROUP syscall.Errno = 1321 - ERROR_LAST_ADMIN syscall.Errno = 1322 - ERROR_WRONG_PASSWORD syscall.Errno = 1323 - ERROR_ILL_FORMED_PASSWORD syscall.Errno = 1324 - ERROR_PASSWORD_RESTRICTION syscall.Errno = 1325 - ERROR_LOGON_FAILURE syscall.Errno = 1326 - ERROR_ACCOUNT_RESTRICTION syscall.Errno = 1327 - ERROR_INVALID_LOGON_HOURS syscall.Errno = 1328 - ERROR_INVALID_WORKSTATION syscall.Errno = 1329 - ERROR_PASSWORD_EXPIRED syscall.Errno = 1330 - ERROR_ACCOUNT_DISABLED syscall.Errno = 1331 - ERROR_NONE_MAPPED syscall.Errno = 1332 - ERROR_TOO_MANY_LUIDS_REQUESTED syscall.Errno = 1333 - ERROR_LUIDS_EXHAUSTED syscall.Errno = 1334 - ERROR_INVALID_SUB_AUTHORITY syscall.Errno = 1335 - ERROR_INVALID_ACL syscall.Errno = 1336 - ERROR_INVALID_SID syscall.Errno = 1337 - ERROR_INVALID_SECURITY_DESCR syscall.Errno = 1338 - ERROR_BAD_INHERITANCE_ACL syscall.Errno = 1340 - ERROR_SERVER_DISABLED syscall.Errno = 1341 - ERROR_SERVER_NOT_DISABLED syscall.Errno = 1342 - ERROR_INVALID_ID_AUTHORITY syscall.Errno = 1343 - ERROR_ALLOTTED_SPACE_EXCEEDED syscall.Errno = 1344 - ERROR_INVALID_GROUP_ATTRIBUTES syscall.Errno = 1345 - ERROR_BAD_IMPERSONATION_LEVEL syscall.Errno = 1346 - ERROR_CANT_OPEN_ANONYMOUS syscall.Errno = 1347 - ERROR_BAD_VALIDATION_CLASS syscall.Errno = 1348 - ERROR_BAD_TOKEN_TYPE syscall.Errno = 1349 - ERROR_NO_SECURITY_ON_OBJECT syscall.Errno = 1350 - ERROR_CANT_ACCESS_DOMAIN_INFO syscall.Errno = 1351 - ERROR_INVALID_SERVER_STATE syscall.Errno = 1352 - ERROR_INVALID_DOMAIN_STATE syscall.Errno = 1353 - ERROR_INVALID_DOMAIN_ROLE syscall.Errno = 1354 - ERROR_NO_SUCH_DOMAIN syscall.Errno = 1355 - ERROR_DOMAIN_EXISTS syscall.Errno = 1356 - ERROR_DOMAIN_LIMIT_EXCEEDED syscall.Errno = 1357 - ERROR_INTERNAL_DB_CORRUPTION syscall.Errno = 1358 - ERROR_INTERNAL_ERROR syscall.Errno = 1359 - ERROR_GENERIC_NOT_MAPPED syscall.Errno = 1360 - ERROR_BAD_DESCRIPTOR_FORMAT syscall.Errno = 1361 - ERROR_NOT_LOGON_PROCESS syscall.Errno = 1362 - ERROR_LOGON_SESSION_EXISTS syscall.Errno = 1363 - ERROR_NO_SUCH_PACKAGE syscall.Errno = 1364 - ERROR_BAD_LOGON_SESSION_STATE syscall.Errno = 1365 - ERROR_LOGON_SESSION_COLLISION syscall.Errno = 1366 - ERROR_INVALID_LOGON_TYPE syscall.Errno = 1367 - ERROR_CANNOT_IMPERSONATE syscall.Errno = 1368 - ERROR_RXACT_INVALID_STATE syscall.Errno = 1369 - ERROR_RXACT_COMMIT_FAILURE syscall.Errno = 1370 - ERROR_SPECIAL_ACCOUNT syscall.Errno = 1371 - ERROR_SPECIAL_GROUP syscall.Errno = 1372 - ERROR_SPECIAL_USER syscall.Errno = 1373 - ERROR_MEMBERS_PRIMARY_GROUP syscall.Errno = 1374 - ERROR_TOKEN_ALREADY_IN_USE syscall.Errno = 1375 - ERROR_NO_SUCH_ALIAS syscall.Errno = 1376 - ERROR_MEMBER_NOT_IN_ALIAS syscall.Errno = 1377 - ERROR_MEMBER_IN_ALIAS syscall.Errno = 1378 - ERROR_ALIAS_EXISTS syscall.Errno = 1379 - ERROR_LOGON_NOT_GRANTED syscall.Errno = 1380 - ERROR_TOO_MANY_SECRETS syscall.Errno = 1381 - ERROR_SECRET_TOO_LONG syscall.Errno = 1382 - ERROR_INTERNAL_DB_ERROR syscall.Errno = 1383 - ERROR_TOO_MANY_CONTEXT_IDS syscall.Errno = 1384 - ERROR_LOGON_TYPE_NOT_GRANTED syscall.Errno = 1385 - ERROR_NT_CROSS_ENCRYPTION_REQUIRED syscall.Errno = 1386 - ERROR_NO_SUCH_MEMBER syscall.Errno = 1387 - ERROR_INVALID_MEMBER syscall.Errno = 1388 - ERROR_TOO_MANY_SIDS syscall.Errno = 1389 - ERROR_LM_CROSS_ENCRYPTION_REQUIRED syscall.Errno = 1390 - ERROR_NO_INHERITANCE syscall.Errno = 1391 - ERROR_FILE_CORRUPT syscall.Errno = 1392 - ERROR_DISK_CORRUPT syscall.Errno = 1393 - ERROR_NO_USER_SESSION_KEY syscall.Errno = 1394 - ERROR_LICENSE_QUOTA_EXCEEDED syscall.Errno = 1395 - ERROR_WRONG_TARGET_NAME syscall.Errno = 1396 - ERROR_MUTUAL_AUTH_FAILED syscall.Errno = 1397 - ERROR_TIME_SKEW syscall.Errno = 1398 - ERROR_CURRENT_DOMAIN_NOT_ALLOWED syscall.Errno = 1399 - ERROR_INVALID_WINDOW_HANDLE syscall.Errno = 1400 - ERROR_INVALID_MENU_HANDLE syscall.Errno = 1401 - ERROR_INVALID_CURSOR_HANDLE syscall.Errno = 1402 - ERROR_INVALID_ACCEL_HANDLE syscall.Errno = 1403 - ERROR_INVALID_HOOK_HANDLE syscall.Errno = 1404 - ERROR_INVALID_DWP_HANDLE syscall.Errno = 1405 - ERROR_TLW_WITH_WSCHILD syscall.Errno = 1406 - ERROR_CANNOT_FIND_WND_CLASS syscall.Errno = 1407 - ERROR_WINDOW_OF_OTHER_THREAD syscall.Errno = 1408 - ERROR_HOTKEY_ALREADY_REGISTERED syscall.Errno = 1409 - ERROR_CLASS_ALREADY_EXISTS syscall.Errno = 1410 - ERROR_CLASS_DOES_NOT_EXIST syscall.Errno = 1411 - ERROR_CLASS_HAS_WINDOWS syscall.Errno = 1412 - ERROR_INVALID_INDEX syscall.Errno = 1413 - ERROR_INVALID_ICON_HANDLE syscall.Errno = 1414 - ERROR_PRIVATE_DIALOG_INDEX syscall.Errno = 1415 - ERROR_LISTBOX_ID_NOT_FOUND syscall.Errno = 1416 - ERROR_NO_WILDCARD_CHARACTERS syscall.Errno = 1417 - ERROR_CLIPBOARD_NOT_OPEN syscall.Errno = 1418 - ERROR_HOTKEY_NOT_REGISTERED syscall.Errno = 1419 - ERROR_WINDOW_NOT_DIALOG syscall.Errno = 1420 - ERROR_CONTROL_ID_NOT_FOUND syscall.Errno = 1421 - ERROR_INVALID_COMBOBOX_MESSAGE syscall.Errno = 1422 - ERROR_WINDOW_NOT_COMBOBOX syscall.Errno = 1423 - ERROR_INVALID_EDIT_HEIGHT syscall.Errno = 1424 - ERROR_DC_NOT_FOUND syscall.Errno = 1425 - ERROR_INVALID_HOOK_FILTER syscall.Errno = 1426 - ERROR_INVALID_FILTER_PROC syscall.Errno = 1427 - ERROR_HOOK_NEEDS_HMOD syscall.Errno = 1428 - ERROR_GLOBAL_ONLY_HOOK syscall.Errno = 1429 - ERROR_JOURNAL_HOOK_SET syscall.Errno = 1430 - ERROR_HOOK_NOT_INSTALLED syscall.Errno = 1431 - ERROR_INVALID_LB_MESSAGE syscall.Errno = 1432 - ERROR_SETCOUNT_ON_BAD_LB syscall.Errno = 1433 - ERROR_LB_WITHOUT_TABSTOPS syscall.Errno = 1434 - ERROR_DESTROY_OBJECT_OF_OTHER_THREAD syscall.Errno = 1435 - ERROR_CHILD_WINDOW_MENU syscall.Errno = 1436 - ERROR_NO_SYSTEM_MENU syscall.Errno = 1437 - ERROR_INVALID_MSGBOX_STYLE syscall.Errno = 1438 - ERROR_INVALID_SPI_VALUE syscall.Errno = 1439 - ERROR_SCREEN_ALREADY_LOCKED syscall.Errno = 1440 - ERROR_HWNDS_HAVE_DIFF_PARENT syscall.Errno = 1441 - ERROR_NOT_CHILD_WINDOW syscall.Errno = 1442 - ERROR_INVALID_GW_COMMAND syscall.Errno = 1443 - ERROR_INVALID_THREAD_ID syscall.Errno = 1444 - ERROR_NON_MDICHILD_WINDOW syscall.Errno = 1445 - ERROR_POPUP_ALREADY_ACTIVE syscall.Errno = 1446 - ERROR_NO_SCROLLBARS syscall.Errno = 1447 - ERROR_INVALID_SCROLLBAR_RANGE syscall.Errno = 1448 - ERROR_INVALID_SHOWWIN_COMMAND syscall.Errno = 1449 - ERROR_NO_SYSTEM_RESOURCES syscall.Errno = 1450 - ERROR_NONPAGED_SYSTEM_RESOURCES syscall.Errno = 1451 - ERROR_PAGED_SYSTEM_RESOURCES syscall.Errno = 1452 - ERROR_WORKING_SET_QUOTA syscall.Errno = 1453 - ERROR_PAGEFILE_QUOTA syscall.Errno = 1454 - ERROR_COMMITMENT_LIMIT syscall.Errno = 1455 - ERROR_MENU_ITEM_NOT_FOUND syscall.Errno = 1456 - ERROR_INVALID_KEYBOARD_HANDLE syscall.Errno = 1457 - ERROR_HOOK_TYPE_NOT_ALLOWED syscall.Errno = 1458 - ERROR_REQUIRES_INTERACTIVE_WINDOWSTATION syscall.Errno = 1459 - ERROR_TIMEOUT syscall.Errno = 1460 - ERROR_INVALID_MONITOR_HANDLE syscall.Errno = 1461 - ERROR_INCORRECT_SIZE syscall.Errno = 1462 - ERROR_SYMLINK_CLASS_DISABLED syscall.Errno = 1463 - ERROR_SYMLINK_NOT_SUPPORTED syscall.Errno = 1464 - ERROR_XML_PARSE_ERROR syscall.Errno = 1465 - ERROR_XMLDSIG_ERROR syscall.Errno = 1466 - ERROR_RESTART_APPLICATION syscall.Errno = 1467 - ERROR_WRONG_COMPARTMENT syscall.Errno = 1468 - ERROR_AUTHIP_FAILURE syscall.Errno = 1469 - ERROR_NO_NVRAM_RESOURCES syscall.Errno = 1470 - ERROR_NOT_GUI_PROCESS syscall.Errno = 1471 - ERROR_EVENTLOG_FILE_CORRUPT syscall.Errno = 1500 - ERROR_EVENTLOG_CANT_START syscall.Errno = 1501 - ERROR_LOG_FILE_FULL syscall.Errno = 1502 - ERROR_EVENTLOG_FILE_CHANGED syscall.Errno = 1503 - ERROR_CONTAINER_ASSIGNED syscall.Errno = 1504 - ERROR_JOB_NO_CONTAINER syscall.Errno = 1505 - ERROR_INVALID_TASK_NAME syscall.Errno = 1550 - ERROR_INVALID_TASK_INDEX syscall.Errno = 1551 - ERROR_THREAD_ALREADY_IN_TASK syscall.Errno = 1552 - ERROR_INSTALL_SERVICE_FAILURE syscall.Errno = 1601 - ERROR_INSTALL_USEREXIT syscall.Errno = 1602 - ERROR_INSTALL_FAILURE syscall.Errno = 1603 - ERROR_INSTALL_SUSPEND syscall.Errno = 1604 - ERROR_UNKNOWN_PRODUCT syscall.Errno = 1605 - ERROR_UNKNOWN_FEATURE syscall.Errno = 1606 - ERROR_UNKNOWN_COMPONENT syscall.Errno = 1607 - ERROR_UNKNOWN_PROPERTY syscall.Errno = 1608 - ERROR_INVALID_HANDLE_STATE syscall.Errno = 1609 - ERROR_BAD_CONFIGURATION syscall.Errno = 1610 - ERROR_INDEX_ABSENT syscall.Errno = 1611 - ERROR_INSTALL_SOURCE_ABSENT syscall.Errno = 1612 - ERROR_INSTALL_PACKAGE_VERSION syscall.Errno = 1613 - ERROR_PRODUCT_UNINSTALLED syscall.Errno = 1614 - ERROR_BAD_QUERY_SYNTAX syscall.Errno = 1615 - ERROR_INVALID_FIELD syscall.Errno = 1616 - ERROR_DEVICE_REMOVED syscall.Errno = 1617 - ERROR_INSTALL_ALREADY_RUNNING syscall.Errno = 1618 - ERROR_INSTALL_PACKAGE_OPEN_FAILED syscall.Errno = 1619 - ERROR_INSTALL_PACKAGE_INVALID syscall.Errno = 1620 - ERROR_INSTALL_UI_FAILURE syscall.Errno = 1621 - ERROR_INSTALL_LOG_FAILURE syscall.Errno = 1622 - ERROR_INSTALL_LANGUAGE_UNSUPPORTED syscall.Errno = 1623 - ERROR_INSTALL_TRANSFORM_FAILURE syscall.Errno = 1624 - ERROR_INSTALL_PACKAGE_REJECTED syscall.Errno = 1625 - ERROR_FUNCTION_NOT_CALLED syscall.Errno = 1626 - ERROR_FUNCTION_FAILED syscall.Errno = 1627 - ERROR_INVALID_TABLE syscall.Errno = 1628 - ERROR_DATATYPE_MISMATCH syscall.Errno = 1629 - ERROR_UNSUPPORTED_TYPE syscall.Errno = 1630 - ERROR_CREATE_FAILED syscall.Errno = 1631 - ERROR_INSTALL_TEMP_UNWRITABLE syscall.Errno = 1632 - ERROR_INSTALL_PLATFORM_UNSUPPORTED syscall.Errno = 1633 - ERROR_INSTALL_NOTUSED syscall.Errno = 1634 - ERROR_PATCH_PACKAGE_OPEN_FAILED syscall.Errno = 1635 - ERROR_PATCH_PACKAGE_INVALID syscall.Errno = 1636 - ERROR_PATCH_PACKAGE_UNSUPPORTED syscall.Errno = 1637 - ERROR_PRODUCT_VERSION syscall.Errno = 1638 - ERROR_INVALID_COMMAND_LINE syscall.Errno = 1639 - ERROR_INSTALL_REMOTE_DISALLOWED syscall.Errno = 1640 - ERROR_SUCCESS_REBOOT_INITIATED syscall.Errno = 1641 - ERROR_PATCH_TARGET_NOT_FOUND syscall.Errno = 1642 - ERROR_PATCH_PACKAGE_REJECTED syscall.Errno = 1643 - ERROR_INSTALL_TRANSFORM_REJECTED syscall.Errno = 1644 - ERROR_INSTALL_REMOTE_PROHIBITED syscall.Errno = 1645 - ERROR_PATCH_REMOVAL_UNSUPPORTED syscall.Errno = 1646 - ERROR_UNKNOWN_PATCH syscall.Errno = 1647 - ERROR_PATCH_NO_SEQUENCE syscall.Errno = 1648 - ERROR_PATCH_REMOVAL_DISALLOWED syscall.Errno = 1649 - ERROR_INVALID_PATCH_XML syscall.Errno = 1650 - ERROR_PATCH_MANAGED_ADVERTISED_PRODUCT syscall.Errno = 1651 - ERROR_INSTALL_SERVICE_SAFEBOOT syscall.Errno = 1652 - ERROR_FAIL_FAST_EXCEPTION syscall.Errno = 1653 - ERROR_INSTALL_REJECTED syscall.Errno = 1654 - ERROR_DYNAMIC_CODE_BLOCKED syscall.Errno = 1655 - ERROR_NOT_SAME_OBJECT syscall.Errno = 1656 - ERROR_STRICT_CFG_VIOLATION syscall.Errno = 1657 - ERROR_SET_CONTEXT_DENIED syscall.Errno = 1660 - ERROR_CROSS_PARTITION_VIOLATION syscall.Errno = 1661 - RPC_S_INVALID_STRING_BINDING syscall.Errno = 1700 - RPC_S_WRONG_KIND_OF_BINDING syscall.Errno = 1701 - RPC_S_INVALID_BINDING syscall.Errno = 1702 - RPC_S_PROTSEQ_NOT_SUPPORTED syscall.Errno = 1703 - RPC_S_INVALID_RPC_PROTSEQ syscall.Errno = 1704 - RPC_S_INVALID_STRING_UUID syscall.Errno = 1705 - RPC_S_INVALID_ENDPOINT_FORMAT syscall.Errno = 1706 - RPC_S_INVALID_NET_ADDR syscall.Errno = 1707 - RPC_S_NO_ENDPOINT_FOUND syscall.Errno = 1708 - RPC_S_INVALID_TIMEOUT syscall.Errno = 1709 - RPC_S_OBJECT_NOT_FOUND syscall.Errno = 1710 - RPC_S_ALREADY_REGISTERED syscall.Errno = 1711 - RPC_S_TYPE_ALREADY_REGISTERED syscall.Errno = 1712 - RPC_S_ALREADY_LISTENING syscall.Errno = 1713 - RPC_S_NO_PROTSEQS_REGISTERED syscall.Errno = 1714 - RPC_S_NOT_LISTENING syscall.Errno = 1715 - RPC_S_UNKNOWN_MGR_TYPE syscall.Errno = 1716 - RPC_S_UNKNOWN_IF syscall.Errno = 1717 - RPC_S_NO_BINDINGS syscall.Errno = 1718 - RPC_S_NO_PROTSEQS syscall.Errno = 1719 - RPC_S_CANT_CREATE_ENDPOINT syscall.Errno = 1720 - RPC_S_OUT_OF_RESOURCES syscall.Errno = 1721 - RPC_S_SERVER_UNAVAILABLE syscall.Errno = 1722 - RPC_S_SERVER_TOO_BUSY syscall.Errno = 1723 - RPC_S_INVALID_NETWORK_OPTIONS syscall.Errno = 1724 - RPC_S_NO_CALL_ACTIVE syscall.Errno = 1725 - RPC_S_CALL_FAILED syscall.Errno = 1726 - RPC_S_CALL_FAILED_DNE syscall.Errno = 1727 - RPC_S_PROTOCOL_ERROR syscall.Errno = 1728 - RPC_S_PROXY_ACCESS_DENIED syscall.Errno = 1729 - RPC_S_UNSUPPORTED_TRANS_SYN syscall.Errno = 1730 - RPC_S_UNSUPPORTED_TYPE syscall.Errno = 1732 - RPC_S_INVALID_TAG syscall.Errno = 1733 - RPC_S_INVALID_BOUND syscall.Errno = 1734 - RPC_S_NO_ENTRY_NAME syscall.Errno = 1735 - RPC_S_INVALID_NAME_SYNTAX syscall.Errno = 1736 - RPC_S_UNSUPPORTED_NAME_SYNTAX syscall.Errno = 1737 - RPC_S_UUID_NO_ADDRESS syscall.Errno = 1739 - RPC_S_DUPLICATE_ENDPOINT syscall.Errno = 1740 - RPC_S_UNKNOWN_AUTHN_TYPE syscall.Errno = 1741 - RPC_S_MAX_CALLS_TOO_SMALL syscall.Errno = 1742 - RPC_S_STRING_TOO_LONG syscall.Errno = 1743 - RPC_S_PROTSEQ_NOT_FOUND syscall.Errno = 1744 - RPC_S_PROCNUM_OUT_OF_RANGE syscall.Errno = 1745 - RPC_S_BINDING_HAS_NO_AUTH syscall.Errno = 1746 - RPC_S_UNKNOWN_AUTHN_SERVICE syscall.Errno = 1747 - RPC_S_UNKNOWN_AUTHN_LEVEL syscall.Errno = 1748 - RPC_S_INVALID_AUTH_IDENTITY syscall.Errno = 1749 - RPC_S_UNKNOWN_AUTHZ_SERVICE syscall.Errno = 1750 - EPT_S_INVALID_ENTRY syscall.Errno = 1751 - EPT_S_CANT_PERFORM_OP syscall.Errno = 1752 - EPT_S_NOT_REGISTERED syscall.Errno = 1753 - RPC_S_NOTHING_TO_EXPORT syscall.Errno = 1754 - RPC_S_INCOMPLETE_NAME syscall.Errno = 1755 - RPC_S_INVALID_VERS_OPTION syscall.Errno = 1756 - RPC_S_NO_MORE_MEMBERS syscall.Errno = 1757 - RPC_S_NOT_ALL_OBJS_UNEXPORTED syscall.Errno = 1758 - RPC_S_INTERFACE_NOT_FOUND syscall.Errno = 1759 - RPC_S_ENTRY_ALREADY_EXISTS syscall.Errno = 1760 - RPC_S_ENTRY_NOT_FOUND syscall.Errno = 1761 - RPC_S_NAME_SERVICE_UNAVAILABLE syscall.Errno = 1762 - RPC_S_INVALID_NAF_ID syscall.Errno = 1763 - RPC_S_CANNOT_SUPPORT syscall.Errno = 1764 - RPC_S_NO_CONTEXT_AVAILABLE syscall.Errno = 1765 - RPC_S_INTERNAL_ERROR syscall.Errno = 1766 - RPC_S_ZERO_DIVIDE syscall.Errno = 1767 - RPC_S_ADDRESS_ERROR syscall.Errno = 1768 - RPC_S_FP_DIV_ZERO syscall.Errno = 1769 - RPC_S_FP_UNDERFLOW syscall.Errno = 1770 - RPC_S_FP_OVERFLOW syscall.Errno = 1771 - RPC_X_NO_MORE_ENTRIES syscall.Errno = 1772 - RPC_X_SS_CHAR_TRANS_OPEN_FAIL syscall.Errno = 1773 - RPC_X_SS_CHAR_TRANS_SHORT_FILE syscall.Errno = 1774 - RPC_X_SS_IN_NULL_CONTEXT syscall.Errno = 1775 - RPC_X_SS_CONTEXT_DAMAGED syscall.Errno = 1777 - RPC_X_SS_HANDLES_MISMATCH syscall.Errno = 1778 - RPC_X_SS_CANNOT_GET_CALL_HANDLE syscall.Errno = 1779 - RPC_X_NULL_REF_POINTER syscall.Errno = 1780 - RPC_X_ENUM_VALUE_OUT_OF_RANGE syscall.Errno = 1781 - RPC_X_BYTE_COUNT_TOO_SMALL syscall.Errno = 1782 - RPC_X_BAD_STUB_DATA syscall.Errno = 1783 - ERROR_INVALID_USER_BUFFER syscall.Errno = 1784 - ERROR_UNRECOGNIZED_MEDIA syscall.Errno = 1785 - ERROR_NO_TRUST_LSA_SECRET syscall.Errno = 1786 - ERROR_NO_TRUST_SAM_ACCOUNT syscall.Errno = 1787 - ERROR_TRUSTED_DOMAIN_FAILURE syscall.Errno = 1788 - ERROR_TRUSTED_RELATIONSHIP_FAILURE syscall.Errno = 1789 - ERROR_TRUST_FAILURE syscall.Errno = 1790 - RPC_S_CALL_IN_PROGRESS syscall.Errno = 1791 - ERROR_NETLOGON_NOT_STARTED syscall.Errno = 1792 - ERROR_ACCOUNT_EXPIRED syscall.Errno = 1793 - ERROR_REDIRECTOR_HAS_OPEN_HANDLES syscall.Errno = 1794 - ERROR_PRINTER_DRIVER_ALREADY_INSTALLED syscall.Errno = 1795 - ERROR_UNKNOWN_PORT syscall.Errno = 1796 - ERROR_UNKNOWN_PRINTER_DRIVER syscall.Errno = 1797 - ERROR_UNKNOWN_PRINTPROCESSOR syscall.Errno = 1798 - ERROR_INVALID_SEPARATOR_FILE syscall.Errno = 1799 - ERROR_INVALID_PRIORITY syscall.Errno = 1800 - ERROR_INVALID_PRINTER_NAME syscall.Errno = 1801 - ERROR_PRINTER_ALREADY_EXISTS syscall.Errno = 1802 - ERROR_INVALID_PRINTER_COMMAND syscall.Errno = 1803 - ERROR_INVALID_DATATYPE syscall.Errno = 1804 - ERROR_INVALID_ENVIRONMENT syscall.Errno = 1805 - RPC_S_NO_MORE_BINDINGS syscall.Errno = 1806 - ERROR_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT syscall.Errno = 1807 - ERROR_NOLOGON_WORKSTATION_TRUST_ACCOUNT syscall.Errno = 1808 - ERROR_NOLOGON_SERVER_TRUST_ACCOUNT syscall.Errno = 1809 - ERROR_DOMAIN_TRUST_INCONSISTENT syscall.Errno = 1810 - ERROR_SERVER_HAS_OPEN_HANDLES syscall.Errno = 1811 - ERROR_RESOURCE_DATA_NOT_FOUND syscall.Errno = 1812 - ERROR_RESOURCE_TYPE_NOT_FOUND syscall.Errno = 1813 - ERROR_RESOURCE_NAME_NOT_FOUND syscall.Errno = 1814 - ERROR_RESOURCE_LANG_NOT_FOUND syscall.Errno = 1815 - ERROR_NOT_ENOUGH_QUOTA syscall.Errno = 1816 - RPC_S_NO_INTERFACES syscall.Errno = 1817 - RPC_S_CALL_CANCELLED syscall.Errno = 1818 - RPC_S_BINDING_INCOMPLETE syscall.Errno = 1819 - RPC_S_COMM_FAILURE syscall.Errno = 1820 - RPC_S_UNSUPPORTED_AUTHN_LEVEL syscall.Errno = 1821 - RPC_S_NO_PRINC_NAME syscall.Errno = 1822 - RPC_S_NOT_RPC_ERROR syscall.Errno = 1823 - RPC_S_UUID_LOCAL_ONLY syscall.Errno = 1824 - RPC_S_SEC_PKG_ERROR syscall.Errno = 1825 - RPC_S_NOT_CANCELLED syscall.Errno = 1826 - RPC_X_INVALID_ES_ACTION syscall.Errno = 1827 - RPC_X_WRONG_ES_VERSION syscall.Errno = 1828 - RPC_X_WRONG_STUB_VERSION syscall.Errno = 1829 - RPC_X_INVALID_PIPE_OBJECT syscall.Errno = 1830 - RPC_X_WRONG_PIPE_ORDER syscall.Errno = 1831 - RPC_X_WRONG_PIPE_VERSION syscall.Errno = 1832 - RPC_S_COOKIE_AUTH_FAILED syscall.Errno = 1833 - RPC_S_DO_NOT_DISTURB syscall.Errno = 1834 - RPC_S_SYSTEM_HANDLE_COUNT_EXCEEDED syscall.Errno = 1835 - RPC_S_SYSTEM_HANDLE_TYPE_MISMATCH syscall.Errno = 1836 - RPC_S_GROUP_MEMBER_NOT_FOUND syscall.Errno = 1898 - EPT_S_CANT_CREATE syscall.Errno = 1899 - RPC_S_INVALID_OBJECT syscall.Errno = 1900 - ERROR_INVALID_TIME syscall.Errno = 1901 - ERROR_INVALID_FORM_NAME syscall.Errno = 1902 - ERROR_INVALID_FORM_SIZE syscall.Errno = 1903 - ERROR_ALREADY_WAITING syscall.Errno = 1904 - ERROR_PRINTER_DELETED syscall.Errno = 1905 - ERROR_INVALID_PRINTER_STATE syscall.Errno = 1906 - ERROR_PASSWORD_MUST_CHANGE syscall.Errno = 1907 - ERROR_DOMAIN_CONTROLLER_NOT_FOUND syscall.Errno = 1908 - ERROR_ACCOUNT_LOCKED_OUT syscall.Errno = 1909 - OR_INVALID_OXID syscall.Errno = 1910 - OR_INVALID_OID syscall.Errno = 1911 - OR_INVALID_SET syscall.Errno = 1912 - RPC_S_SEND_INCOMPLETE syscall.Errno = 1913 - RPC_S_INVALID_ASYNC_HANDLE syscall.Errno = 1914 - RPC_S_INVALID_ASYNC_CALL syscall.Errno = 1915 - RPC_X_PIPE_CLOSED syscall.Errno = 1916 - RPC_X_PIPE_DISCIPLINE_ERROR syscall.Errno = 1917 - RPC_X_PIPE_EMPTY syscall.Errno = 1918 - ERROR_NO_SITENAME syscall.Errno = 1919 - ERROR_CANT_ACCESS_FILE syscall.Errno = 1920 - ERROR_CANT_RESOLVE_FILENAME syscall.Errno = 1921 - RPC_S_ENTRY_TYPE_MISMATCH syscall.Errno = 1922 - RPC_S_NOT_ALL_OBJS_EXPORTED syscall.Errno = 1923 - RPC_S_INTERFACE_NOT_EXPORTED syscall.Errno = 1924 - RPC_S_PROFILE_NOT_ADDED syscall.Errno = 1925 - RPC_S_PRF_ELT_NOT_ADDED syscall.Errno = 1926 - RPC_S_PRF_ELT_NOT_REMOVED syscall.Errno = 1927 - RPC_S_GRP_ELT_NOT_ADDED syscall.Errno = 1928 - RPC_S_GRP_ELT_NOT_REMOVED syscall.Errno = 1929 - ERROR_KM_DRIVER_BLOCKED syscall.Errno = 1930 - ERROR_CONTEXT_EXPIRED syscall.Errno = 1931 - ERROR_PER_USER_TRUST_QUOTA_EXCEEDED syscall.Errno = 1932 - ERROR_ALL_USER_TRUST_QUOTA_EXCEEDED syscall.Errno = 1933 - ERROR_USER_DELETE_TRUST_QUOTA_EXCEEDED syscall.Errno = 1934 - ERROR_AUTHENTICATION_FIREWALL_FAILED syscall.Errno = 1935 - ERROR_REMOTE_PRINT_CONNECTIONS_BLOCKED syscall.Errno = 1936 - ERROR_NTLM_BLOCKED syscall.Errno = 1937 - ERROR_PASSWORD_CHANGE_REQUIRED syscall.Errno = 1938 - ERROR_LOST_MODE_LOGON_RESTRICTION syscall.Errno = 1939 - ERROR_INVALID_PIXEL_FORMAT syscall.Errno = 2000 - ERROR_BAD_DRIVER syscall.Errno = 2001 - ERROR_INVALID_WINDOW_STYLE syscall.Errno = 2002 - ERROR_METAFILE_NOT_SUPPORTED syscall.Errno = 2003 - ERROR_TRANSFORM_NOT_SUPPORTED syscall.Errno = 2004 - ERROR_CLIPPING_NOT_SUPPORTED syscall.Errno = 2005 - ERROR_INVALID_CMM syscall.Errno = 2010 - ERROR_INVALID_PROFILE syscall.Errno = 2011 - ERROR_TAG_NOT_FOUND syscall.Errno = 2012 - ERROR_TAG_NOT_PRESENT syscall.Errno = 2013 - ERROR_DUPLICATE_TAG syscall.Errno = 2014 - ERROR_PROFILE_NOT_ASSOCIATED_WITH_DEVICE syscall.Errno = 2015 - ERROR_PROFILE_NOT_FOUND syscall.Errno = 2016 - ERROR_INVALID_COLORSPACE syscall.Errno = 2017 - ERROR_ICM_NOT_ENABLED syscall.Errno = 2018 - ERROR_DELETING_ICM_XFORM syscall.Errno = 2019 - ERROR_INVALID_TRANSFORM syscall.Errno = 2020 - ERROR_COLORSPACE_MISMATCH syscall.Errno = 2021 - ERROR_INVALID_COLORINDEX syscall.Errno = 2022 - ERROR_PROFILE_DOES_NOT_MATCH_DEVICE syscall.Errno = 2023 - ERROR_CONNECTED_OTHER_PASSWORD syscall.Errno = 2108 - ERROR_CONNECTED_OTHER_PASSWORD_DEFAULT syscall.Errno = 2109 - ERROR_BAD_USERNAME syscall.Errno = 2202 - ERROR_NOT_CONNECTED syscall.Errno = 2250 - ERROR_OPEN_FILES syscall.Errno = 2401 - ERROR_ACTIVE_CONNECTIONS syscall.Errno = 2402 - ERROR_DEVICE_IN_USE syscall.Errno = 2404 - ERROR_UNKNOWN_PRINT_MONITOR syscall.Errno = 3000 - ERROR_PRINTER_DRIVER_IN_USE syscall.Errno = 3001 - ERROR_SPOOL_FILE_NOT_FOUND syscall.Errno = 3002 - ERROR_SPL_NO_STARTDOC syscall.Errno = 3003 - ERROR_SPL_NO_ADDJOB syscall.Errno = 3004 - ERROR_PRINT_PROCESSOR_ALREADY_INSTALLED syscall.Errno = 3005 - ERROR_PRINT_MONITOR_ALREADY_INSTALLED syscall.Errno = 3006 - ERROR_INVALID_PRINT_MONITOR syscall.Errno = 3007 - ERROR_PRINT_MONITOR_IN_USE syscall.Errno = 3008 - ERROR_PRINTER_HAS_JOBS_QUEUED syscall.Errno = 3009 - ERROR_SUCCESS_REBOOT_REQUIRED syscall.Errno = 3010 - ERROR_SUCCESS_RESTART_REQUIRED syscall.Errno = 3011 - ERROR_PRINTER_NOT_FOUND syscall.Errno = 3012 - ERROR_PRINTER_DRIVER_WARNED syscall.Errno = 3013 - ERROR_PRINTER_DRIVER_BLOCKED syscall.Errno = 3014 - ERROR_PRINTER_DRIVER_PACKAGE_IN_USE syscall.Errno = 3015 - ERROR_CORE_DRIVER_PACKAGE_NOT_FOUND syscall.Errno = 3016 - ERROR_FAIL_REBOOT_REQUIRED syscall.Errno = 3017 - ERROR_FAIL_REBOOT_INITIATED syscall.Errno = 3018 - ERROR_PRINTER_DRIVER_DOWNLOAD_NEEDED syscall.Errno = 3019 - ERROR_PRINT_JOB_RESTART_REQUIRED syscall.Errno = 3020 - ERROR_INVALID_PRINTER_DRIVER_MANIFEST syscall.Errno = 3021 - ERROR_PRINTER_NOT_SHAREABLE syscall.Errno = 3022 - ERROR_REQUEST_PAUSED syscall.Errno = 3050 - ERROR_APPEXEC_CONDITION_NOT_SATISFIED syscall.Errno = 3060 - ERROR_APPEXEC_HANDLE_INVALIDATED syscall.Errno = 3061 - ERROR_APPEXEC_INVALID_HOST_GENERATION syscall.Errno = 3062 - ERROR_APPEXEC_UNEXPECTED_PROCESS_REGISTRATION syscall.Errno = 3063 - ERROR_APPEXEC_INVALID_HOST_STATE syscall.Errno = 3064 - ERROR_APPEXEC_NO_DONOR syscall.Errno = 3065 - ERROR_APPEXEC_HOST_ID_MISMATCH syscall.Errno = 3066 - ERROR_APPEXEC_UNKNOWN_USER syscall.Errno = 3067 - ERROR_IO_REISSUE_AS_CACHED syscall.Errno = 3950 - ERROR_WINS_INTERNAL syscall.Errno = 4000 - ERROR_CAN_NOT_DEL_LOCAL_WINS syscall.Errno = 4001 - ERROR_STATIC_INIT syscall.Errno = 4002 - ERROR_INC_BACKUP syscall.Errno = 4003 - ERROR_FULL_BACKUP syscall.Errno = 4004 - ERROR_REC_NON_EXISTENT syscall.Errno = 4005 - ERROR_RPL_NOT_ALLOWED syscall.Errno = 4006 - PEERDIST_ERROR_CONTENTINFO_VERSION_UNSUPPORTED syscall.Errno = 4050 - PEERDIST_ERROR_CANNOT_PARSE_CONTENTINFO syscall.Errno = 4051 - PEERDIST_ERROR_MISSING_DATA syscall.Errno = 4052 - PEERDIST_ERROR_NO_MORE syscall.Errno = 4053 - PEERDIST_ERROR_NOT_INITIALIZED syscall.Errno = 4054 - PEERDIST_ERROR_ALREADY_INITIALIZED syscall.Errno = 4055 - PEERDIST_ERROR_SHUTDOWN_IN_PROGRESS syscall.Errno = 4056 - PEERDIST_ERROR_INVALIDATED syscall.Errno = 4057 - PEERDIST_ERROR_ALREADY_EXISTS syscall.Errno = 4058 - PEERDIST_ERROR_OPERATION_NOTFOUND syscall.Errno = 4059 - PEERDIST_ERROR_ALREADY_COMPLETED syscall.Errno = 4060 - PEERDIST_ERROR_OUT_OF_BOUNDS syscall.Errno = 4061 - PEERDIST_ERROR_VERSION_UNSUPPORTED syscall.Errno = 4062 - PEERDIST_ERROR_INVALID_CONFIGURATION syscall.Errno = 4063 - PEERDIST_ERROR_NOT_LICENSED syscall.Errno = 4064 - PEERDIST_ERROR_SERVICE_UNAVAILABLE syscall.Errno = 4065 - PEERDIST_ERROR_TRUST_FAILURE syscall.Errno = 4066 - ERROR_DHCP_ADDRESS_CONFLICT syscall.Errno = 4100 - ERROR_WMI_GUID_NOT_FOUND syscall.Errno = 4200 - ERROR_WMI_INSTANCE_NOT_FOUND syscall.Errno = 4201 - ERROR_WMI_ITEMID_NOT_FOUND syscall.Errno = 4202 - ERROR_WMI_TRY_AGAIN syscall.Errno = 4203 - ERROR_WMI_DP_NOT_FOUND syscall.Errno = 4204 - ERROR_WMI_UNRESOLVED_INSTANCE_REF syscall.Errno = 4205 - ERROR_WMI_ALREADY_ENABLED syscall.Errno = 4206 - ERROR_WMI_GUID_DISCONNECTED syscall.Errno = 4207 - ERROR_WMI_SERVER_UNAVAILABLE syscall.Errno = 4208 - ERROR_WMI_DP_FAILED syscall.Errno = 4209 - ERROR_WMI_INVALID_MOF syscall.Errno = 4210 - ERROR_WMI_INVALID_REGINFO syscall.Errno = 4211 - ERROR_WMI_ALREADY_DISABLED syscall.Errno = 4212 - ERROR_WMI_READ_ONLY syscall.Errno = 4213 - ERROR_WMI_SET_FAILURE syscall.Errno = 4214 - ERROR_NOT_APPCONTAINER syscall.Errno = 4250 - ERROR_APPCONTAINER_REQUIRED syscall.Errno = 4251 - ERROR_NOT_SUPPORTED_IN_APPCONTAINER syscall.Errno = 4252 - ERROR_INVALID_PACKAGE_SID_LENGTH syscall.Errno = 4253 - ERROR_INVALID_MEDIA syscall.Errno = 4300 - ERROR_INVALID_LIBRARY syscall.Errno = 4301 - ERROR_INVALID_MEDIA_POOL syscall.Errno = 4302 - ERROR_DRIVE_MEDIA_MISMATCH syscall.Errno = 4303 - ERROR_MEDIA_OFFLINE syscall.Errno = 4304 - ERROR_LIBRARY_OFFLINE syscall.Errno = 4305 - ERROR_EMPTY syscall.Errno = 4306 - ERROR_NOT_EMPTY syscall.Errno = 4307 - ERROR_MEDIA_UNAVAILABLE syscall.Errno = 4308 - ERROR_RESOURCE_DISABLED syscall.Errno = 4309 - ERROR_INVALID_CLEANER syscall.Errno = 4310 - ERROR_UNABLE_TO_CLEAN syscall.Errno = 4311 - ERROR_OBJECT_NOT_FOUND syscall.Errno = 4312 - ERROR_DATABASE_FAILURE syscall.Errno = 4313 - ERROR_DATABASE_FULL syscall.Errno = 4314 - ERROR_MEDIA_INCOMPATIBLE syscall.Errno = 4315 - ERROR_RESOURCE_NOT_PRESENT syscall.Errno = 4316 - ERROR_INVALID_OPERATION syscall.Errno = 4317 - ERROR_MEDIA_NOT_AVAILABLE syscall.Errno = 4318 - ERROR_DEVICE_NOT_AVAILABLE syscall.Errno = 4319 - ERROR_REQUEST_REFUSED syscall.Errno = 4320 - ERROR_INVALID_DRIVE_OBJECT syscall.Errno = 4321 - ERROR_LIBRARY_FULL syscall.Errno = 4322 - ERROR_MEDIUM_NOT_ACCESSIBLE syscall.Errno = 4323 - ERROR_UNABLE_TO_LOAD_MEDIUM syscall.Errno = 4324 - ERROR_UNABLE_TO_INVENTORY_DRIVE syscall.Errno = 4325 - ERROR_UNABLE_TO_INVENTORY_SLOT syscall.Errno = 4326 - ERROR_UNABLE_TO_INVENTORY_TRANSPORT syscall.Errno = 4327 - ERROR_TRANSPORT_FULL syscall.Errno = 4328 - ERROR_CONTROLLING_IEPORT syscall.Errno = 4329 - ERROR_UNABLE_TO_EJECT_MOUNTED_MEDIA syscall.Errno = 4330 - ERROR_CLEANER_SLOT_SET syscall.Errno = 4331 - ERROR_CLEANER_SLOT_NOT_SET syscall.Errno = 4332 - ERROR_CLEANER_CARTRIDGE_SPENT syscall.Errno = 4333 - ERROR_UNEXPECTED_OMID syscall.Errno = 4334 - ERROR_CANT_DELETE_LAST_ITEM syscall.Errno = 4335 - ERROR_MESSAGE_EXCEEDS_MAX_SIZE syscall.Errno = 4336 - ERROR_VOLUME_CONTAINS_SYS_FILES syscall.Errno = 4337 - ERROR_INDIGENOUS_TYPE syscall.Errno = 4338 - ERROR_NO_SUPPORTING_DRIVES syscall.Errno = 4339 - ERROR_CLEANER_CARTRIDGE_INSTALLED syscall.Errno = 4340 - ERROR_IEPORT_FULL syscall.Errno = 4341 - ERROR_FILE_OFFLINE syscall.Errno = 4350 - ERROR_REMOTE_STORAGE_NOT_ACTIVE syscall.Errno = 4351 - ERROR_REMOTE_STORAGE_MEDIA_ERROR syscall.Errno = 4352 - ERROR_NOT_A_REPARSE_POINT syscall.Errno = 4390 - ERROR_REPARSE_ATTRIBUTE_CONFLICT syscall.Errno = 4391 - ERROR_INVALID_REPARSE_DATA syscall.Errno = 4392 - ERROR_REPARSE_TAG_INVALID syscall.Errno = 4393 - ERROR_REPARSE_TAG_MISMATCH syscall.Errno = 4394 - ERROR_REPARSE_POINT_ENCOUNTERED syscall.Errno = 4395 - ERROR_APP_DATA_NOT_FOUND syscall.Errno = 4400 - ERROR_APP_DATA_EXPIRED syscall.Errno = 4401 - ERROR_APP_DATA_CORRUPT syscall.Errno = 4402 - ERROR_APP_DATA_LIMIT_EXCEEDED syscall.Errno = 4403 - ERROR_APP_DATA_REBOOT_REQUIRED syscall.Errno = 4404 - ERROR_SECUREBOOT_ROLLBACK_DETECTED syscall.Errno = 4420 - ERROR_SECUREBOOT_POLICY_VIOLATION syscall.Errno = 4421 - ERROR_SECUREBOOT_INVALID_POLICY syscall.Errno = 4422 - ERROR_SECUREBOOT_POLICY_PUBLISHER_NOT_FOUND syscall.Errno = 4423 - ERROR_SECUREBOOT_POLICY_NOT_SIGNED syscall.Errno = 4424 - ERROR_SECUREBOOT_NOT_ENABLED syscall.Errno = 4425 - ERROR_SECUREBOOT_FILE_REPLACED syscall.Errno = 4426 - ERROR_SECUREBOOT_POLICY_NOT_AUTHORIZED syscall.Errno = 4427 - ERROR_SECUREBOOT_POLICY_UNKNOWN syscall.Errno = 4428 - ERROR_SECUREBOOT_POLICY_MISSING_ANTIROLLBACKVERSION syscall.Errno = 4429 - ERROR_SECUREBOOT_PLATFORM_ID_MISMATCH syscall.Errno = 4430 - ERROR_SECUREBOOT_POLICY_ROLLBACK_DETECTED syscall.Errno = 4431 - ERROR_SECUREBOOT_POLICY_UPGRADE_MISMATCH syscall.Errno = 4432 - ERROR_SECUREBOOT_REQUIRED_POLICY_FILE_MISSING syscall.Errno = 4433 - ERROR_SECUREBOOT_NOT_BASE_POLICY syscall.Errno = 4434 - ERROR_SECUREBOOT_NOT_SUPPLEMENTAL_POLICY syscall.Errno = 4435 - ERROR_OFFLOAD_READ_FLT_NOT_SUPPORTED syscall.Errno = 4440 - ERROR_OFFLOAD_WRITE_FLT_NOT_SUPPORTED syscall.Errno = 4441 - ERROR_OFFLOAD_READ_FILE_NOT_SUPPORTED syscall.Errno = 4442 - ERROR_OFFLOAD_WRITE_FILE_NOT_SUPPORTED syscall.Errno = 4443 - ERROR_ALREADY_HAS_STREAM_ID syscall.Errno = 4444 - ERROR_SMR_GARBAGE_COLLECTION_REQUIRED syscall.Errno = 4445 - ERROR_WOF_WIM_HEADER_CORRUPT syscall.Errno = 4446 - ERROR_WOF_WIM_RESOURCE_TABLE_CORRUPT syscall.Errno = 4447 - ERROR_WOF_FILE_RESOURCE_TABLE_CORRUPT syscall.Errno = 4448 - ERROR_VOLUME_NOT_SIS_ENABLED syscall.Errno = 4500 - ERROR_SYSTEM_INTEGRITY_ROLLBACK_DETECTED syscall.Errno = 4550 - ERROR_SYSTEM_INTEGRITY_POLICY_VIOLATION syscall.Errno = 4551 - ERROR_SYSTEM_INTEGRITY_INVALID_POLICY syscall.Errno = 4552 - ERROR_SYSTEM_INTEGRITY_POLICY_NOT_SIGNED syscall.Errno = 4553 - ERROR_SYSTEM_INTEGRITY_TOO_MANY_POLICIES syscall.Errno = 4554 - ERROR_SYSTEM_INTEGRITY_SUPPLEMENTAL_POLICY_NOT_AUTHORIZED syscall.Errno = 4555 - ERROR_VSM_NOT_INITIALIZED syscall.Errno = 4560 - ERROR_VSM_DMA_PROTECTION_NOT_IN_USE syscall.Errno = 4561 - ERROR_PLATFORM_MANIFEST_NOT_AUTHORIZED syscall.Errno = 4570 - ERROR_PLATFORM_MANIFEST_INVALID syscall.Errno = 4571 - ERROR_PLATFORM_MANIFEST_FILE_NOT_AUTHORIZED syscall.Errno = 4572 - ERROR_PLATFORM_MANIFEST_CATALOG_NOT_AUTHORIZED syscall.Errno = 4573 - ERROR_PLATFORM_MANIFEST_BINARY_ID_NOT_FOUND syscall.Errno = 4574 - ERROR_PLATFORM_MANIFEST_NOT_ACTIVE syscall.Errno = 4575 - ERROR_PLATFORM_MANIFEST_NOT_SIGNED syscall.Errno = 4576 - ERROR_DEPENDENT_RESOURCE_EXISTS syscall.Errno = 5001 - ERROR_DEPENDENCY_NOT_FOUND syscall.Errno = 5002 - ERROR_DEPENDENCY_ALREADY_EXISTS syscall.Errno = 5003 - ERROR_RESOURCE_NOT_ONLINE syscall.Errno = 5004 - ERROR_HOST_NODE_NOT_AVAILABLE syscall.Errno = 5005 - ERROR_RESOURCE_NOT_AVAILABLE syscall.Errno = 5006 - ERROR_RESOURCE_NOT_FOUND syscall.Errno = 5007 - ERROR_SHUTDOWN_CLUSTER syscall.Errno = 5008 - ERROR_CANT_EVICT_ACTIVE_NODE syscall.Errno = 5009 - ERROR_OBJECT_ALREADY_EXISTS syscall.Errno = 5010 - ERROR_OBJECT_IN_LIST syscall.Errno = 5011 - ERROR_GROUP_NOT_AVAILABLE syscall.Errno = 5012 - ERROR_GROUP_NOT_FOUND syscall.Errno = 5013 - ERROR_GROUP_NOT_ONLINE syscall.Errno = 5014 - ERROR_HOST_NODE_NOT_RESOURCE_OWNER syscall.Errno = 5015 - ERROR_HOST_NODE_NOT_GROUP_OWNER syscall.Errno = 5016 - ERROR_RESMON_CREATE_FAILED syscall.Errno = 5017 - ERROR_RESMON_ONLINE_FAILED syscall.Errno = 5018 - ERROR_RESOURCE_ONLINE syscall.Errno = 5019 - ERROR_QUORUM_RESOURCE syscall.Errno = 5020 - ERROR_NOT_QUORUM_CAPABLE syscall.Errno = 5021 - ERROR_CLUSTER_SHUTTING_DOWN syscall.Errno = 5022 - ERROR_INVALID_STATE syscall.Errno = 5023 - ERROR_RESOURCE_PROPERTIES_STORED syscall.Errno = 5024 - ERROR_NOT_QUORUM_CLASS syscall.Errno = 5025 - ERROR_CORE_RESOURCE syscall.Errno = 5026 - ERROR_QUORUM_RESOURCE_ONLINE_FAILED syscall.Errno = 5027 - ERROR_QUORUMLOG_OPEN_FAILED syscall.Errno = 5028 - ERROR_CLUSTERLOG_CORRUPT syscall.Errno = 5029 - ERROR_CLUSTERLOG_RECORD_EXCEEDS_MAXSIZE syscall.Errno = 5030 - ERROR_CLUSTERLOG_EXCEEDS_MAXSIZE syscall.Errno = 5031 - ERROR_CLUSTERLOG_CHKPOINT_NOT_FOUND syscall.Errno = 5032 - ERROR_CLUSTERLOG_NOT_ENOUGH_SPACE syscall.Errno = 5033 - ERROR_QUORUM_OWNER_ALIVE syscall.Errno = 5034 - ERROR_NETWORK_NOT_AVAILABLE syscall.Errno = 5035 - ERROR_NODE_NOT_AVAILABLE syscall.Errno = 5036 - ERROR_ALL_NODES_NOT_AVAILABLE syscall.Errno = 5037 - ERROR_RESOURCE_FAILED syscall.Errno = 5038 - ERROR_CLUSTER_INVALID_NODE syscall.Errno = 5039 - ERROR_CLUSTER_NODE_EXISTS syscall.Errno = 5040 - ERROR_CLUSTER_JOIN_IN_PROGRESS syscall.Errno = 5041 - ERROR_CLUSTER_NODE_NOT_FOUND syscall.Errno = 5042 - ERROR_CLUSTER_LOCAL_NODE_NOT_FOUND syscall.Errno = 5043 - ERROR_CLUSTER_NETWORK_EXISTS syscall.Errno = 5044 - ERROR_CLUSTER_NETWORK_NOT_FOUND syscall.Errno = 5045 - ERROR_CLUSTER_NETINTERFACE_EXISTS syscall.Errno = 5046 - ERROR_CLUSTER_NETINTERFACE_NOT_FOUND syscall.Errno = 5047 - ERROR_CLUSTER_INVALID_REQUEST syscall.Errno = 5048 - ERROR_CLUSTER_INVALID_NETWORK_PROVIDER syscall.Errno = 5049 - ERROR_CLUSTER_NODE_DOWN syscall.Errno = 5050 - ERROR_CLUSTER_NODE_UNREACHABLE syscall.Errno = 5051 - ERROR_CLUSTER_NODE_NOT_MEMBER syscall.Errno = 5052 - ERROR_CLUSTER_JOIN_NOT_IN_PROGRESS syscall.Errno = 5053 - ERROR_CLUSTER_INVALID_NETWORK syscall.Errno = 5054 - ERROR_CLUSTER_NODE_UP syscall.Errno = 5056 - ERROR_CLUSTER_IPADDR_IN_USE syscall.Errno = 5057 - ERROR_CLUSTER_NODE_NOT_PAUSED syscall.Errno = 5058 - ERROR_CLUSTER_NO_SECURITY_CONTEXT syscall.Errno = 5059 - ERROR_CLUSTER_NETWORK_NOT_INTERNAL syscall.Errno = 5060 - ERROR_CLUSTER_NODE_ALREADY_UP syscall.Errno = 5061 - ERROR_CLUSTER_NODE_ALREADY_DOWN syscall.Errno = 5062 - ERROR_CLUSTER_NETWORK_ALREADY_ONLINE syscall.Errno = 5063 - ERROR_CLUSTER_NETWORK_ALREADY_OFFLINE syscall.Errno = 5064 - ERROR_CLUSTER_NODE_ALREADY_MEMBER syscall.Errno = 5065 - ERROR_CLUSTER_LAST_INTERNAL_NETWORK syscall.Errno = 5066 - ERROR_CLUSTER_NETWORK_HAS_DEPENDENTS syscall.Errno = 5067 - ERROR_INVALID_OPERATION_ON_QUORUM syscall.Errno = 5068 - ERROR_DEPENDENCY_NOT_ALLOWED syscall.Errno = 5069 - ERROR_CLUSTER_NODE_PAUSED syscall.Errno = 5070 - ERROR_NODE_CANT_HOST_RESOURCE syscall.Errno = 5071 - ERROR_CLUSTER_NODE_NOT_READY syscall.Errno = 5072 - ERROR_CLUSTER_NODE_SHUTTING_DOWN syscall.Errno = 5073 - ERROR_CLUSTER_JOIN_ABORTED syscall.Errno = 5074 - ERROR_CLUSTER_INCOMPATIBLE_VERSIONS syscall.Errno = 5075 - ERROR_CLUSTER_MAXNUM_OF_RESOURCES_EXCEEDED syscall.Errno = 5076 - ERROR_CLUSTER_SYSTEM_CONFIG_CHANGED syscall.Errno = 5077 - ERROR_CLUSTER_RESOURCE_TYPE_NOT_FOUND syscall.Errno = 5078 - ERROR_CLUSTER_RESTYPE_NOT_SUPPORTED syscall.Errno = 5079 - ERROR_CLUSTER_RESNAME_NOT_FOUND syscall.Errno = 5080 - ERROR_CLUSTER_NO_RPC_PACKAGES_REGISTERED syscall.Errno = 5081 - ERROR_CLUSTER_OWNER_NOT_IN_PREFLIST syscall.Errno = 5082 - ERROR_CLUSTER_DATABASE_SEQMISMATCH syscall.Errno = 5083 - ERROR_RESMON_INVALID_STATE syscall.Errno = 5084 - ERROR_CLUSTER_GUM_NOT_LOCKER syscall.Errno = 5085 - ERROR_QUORUM_DISK_NOT_FOUND syscall.Errno = 5086 - ERROR_DATABASE_BACKUP_CORRUPT syscall.Errno = 5087 - ERROR_CLUSTER_NODE_ALREADY_HAS_DFS_ROOT syscall.Errno = 5088 - ERROR_RESOURCE_PROPERTY_UNCHANGEABLE syscall.Errno = 5089 - ERROR_NO_ADMIN_ACCESS_POINT syscall.Errno = 5090 - ERROR_CLUSTER_MEMBERSHIP_INVALID_STATE syscall.Errno = 5890 - ERROR_CLUSTER_QUORUMLOG_NOT_FOUND syscall.Errno = 5891 - ERROR_CLUSTER_MEMBERSHIP_HALT syscall.Errno = 5892 - ERROR_CLUSTER_INSTANCE_ID_MISMATCH syscall.Errno = 5893 - ERROR_CLUSTER_NETWORK_NOT_FOUND_FOR_IP syscall.Errno = 5894 - ERROR_CLUSTER_PROPERTY_DATA_TYPE_MISMATCH syscall.Errno = 5895 - ERROR_CLUSTER_EVICT_WITHOUT_CLEANUP syscall.Errno = 5896 - ERROR_CLUSTER_PARAMETER_MISMATCH syscall.Errno = 5897 - ERROR_NODE_CANNOT_BE_CLUSTERED syscall.Errno = 5898 - ERROR_CLUSTER_WRONG_OS_VERSION syscall.Errno = 5899 - ERROR_CLUSTER_CANT_CREATE_DUP_CLUSTER_NAME syscall.Errno = 5900 - ERROR_CLUSCFG_ALREADY_COMMITTED syscall.Errno = 5901 - ERROR_CLUSCFG_ROLLBACK_FAILED syscall.Errno = 5902 - ERROR_CLUSCFG_SYSTEM_DISK_DRIVE_LETTER_CONFLICT syscall.Errno = 5903 - ERROR_CLUSTER_OLD_VERSION syscall.Errno = 5904 - ERROR_CLUSTER_MISMATCHED_COMPUTER_ACCT_NAME syscall.Errno = 5905 - ERROR_CLUSTER_NO_NET_ADAPTERS syscall.Errno = 5906 - ERROR_CLUSTER_POISONED syscall.Errno = 5907 - ERROR_CLUSTER_GROUP_MOVING syscall.Errno = 5908 - ERROR_CLUSTER_RESOURCE_TYPE_BUSY syscall.Errno = 5909 - ERROR_RESOURCE_CALL_TIMED_OUT syscall.Errno = 5910 - ERROR_INVALID_CLUSTER_IPV6_ADDRESS syscall.Errno = 5911 - ERROR_CLUSTER_INTERNAL_INVALID_FUNCTION syscall.Errno = 5912 - ERROR_CLUSTER_PARAMETER_OUT_OF_BOUNDS syscall.Errno = 5913 - ERROR_CLUSTER_PARTIAL_SEND syscall.Errno = 5914 - ERROR_CLUSTER_REGISTRY_INVALID_FUNCTION syscall.Errno = 5915 - ERROR_CLUSTER_INVALID_STRING_TERMINATION syscall.Errno = 5916 - ERROR_CLUSTER_INVALID_STRING_FORMAT syscall.Errno = 5917 - ERROR_CLUSTER_DATABASE_TRANSACTION_IN_PROGRESS syscall.Errno = 5918 - ERROR_CLUSTER_DATABASE_TRANSACTION_NOT_IN_PROGRESS syscall.Errno = 5919 - ERROR_CLUSTER_NULL_DATA syscall.Errno = 5920 - ERROR_CLUSTER_PARTIAL_READ syscall.Errno = 5921 - ERROR_CLUSTER_PARTIAL_WRITE syscall.Errno = 5922 - ERROR_CLUSTER_CANT_DESERIALIZE_DATA syscall.Errno = 5923 - ERROR_DEPENDENT_RESOURCE_PROPERTY_CONFLICT syscall.Errno = 5924 - ERROR_CLUSTER_NO_QUORUM syscall.Errno = 5925 - ERROR_CLUSTER_INVALID_IPV6_NETWORK syscall.Errno = 5926 - ERROR_CLUSTER_INVALID_IPV6_TUNNEL_NETWORK syscall.Errno = 5927 - ERROR_QUORUM_NOT_ALLOWED_IN_THIS_GROUP syscall.Errno = 5928 - ERROR_DEPENDENCY_TREE_TOO_COMPLEX syscall.Errno = 5929 - ERROR_EXCEPTION_IN_RESOURCE_CALL syscall.Errno = 5930 - ERROR_CLUSTER_RHS_FAILED_INITIALIZATION syscall.Errno = 5931 - ERROR_CLUSTER_NOT_INSTALLED syscall.Errno = 5932 - ERROR_CLUSTER_RESOURCES_MUST_BE_ONLINE_ON_THE_SAME_NODE syscall.Errno = 5933 - ERROR_CLUSTER_MAX_NODES_IN_CLUSTER syscall.Errno = 5934 - ERROR_CLUSTER_TOO_MANY_NODES syscall.Errno = 5935 - ERROR_CLUSTER_OBJECT_ALREADY_USED syscall.Errno = 5936 - ERROR_NONCORE_GROUPS_FOUND syscall.Errno = 5937 - ERROR_FILE_SHARE_RESOURCE_CONFLICT syscall.Errno = 5938 - ERROR_CLUSTER_EVICT_INVALID_REQUEST syscall.Errno = 5939 - ERROR_CLUSTER_SINGLETON_RESOURCE syscall.Errno = 5940 - ERROR_CLUSTER_GROUP_SINGLETON_RESOURCE syscall.Errno = 5941 - ERROR_CLUSTER_RESOURCE_PROVIDER_FAILED syscall.Errno = 5942 - ERROR_CLUSTER_RESOURCE_CONFIGURATION_ERROR syscall.Errno = 5943 - ERROR_CLUSTER_GROUP_BUSY syscall.Errno = 5944 - ERROR_CLUSTER_NOT_SHARED_VOLUME syscall.Errno = 5945 - ERROR_CLUSTER_INVALID_SECURITY_DESCRIPTOR syscall.Errno = 5946 - ERROR_CLUSTER_SHARED_VOLUMES_IN_USE syscall.Errno = 5947 - ERROR_CLUSTER_USE_SHARED_VOLUMES_API syscall.Errno = 5948 - ERROR_CLUSTER_BACKUP_IN_PROGRESS syscall.Errno = 5949 - ERROR_NON_CSV_PATH syscall.Errno = 5950 - ERROR_CSV_VOLUME_NOT_LOCAL syscall.Errno = 5951 - ERROR_CLUSTER_WATCHDOG_TERMINATING syscall.Errno = 5952 - ERROR_CLUSTER_RESOURCE_VETOED_MOVE_INCOMPATIBLE_NODES syscall.Errno = 5953 - ERROR_CLUSTER_INVALID_NODE_WEIGHT syscall.Errno = 5954 - ERROR_CLUSTER_RESOURCE_VETOED_CALL syscall.Errno = 5955 - ERROR_RESMON_SYSTEM_RESOURCES_LACKING syscall.Errno = 5956 - ERROR_CLUSTER_RESOURCE_VETOED_MOVE_NOT_ENOUGH_RESOURCES_ON_DESTINATION syscall.Errno = 5957 - ERROR_CLUSTER_RESOURCE_VETOED_MOVE_NOT_ENOUGH_RESOURCES_ON_SOURCE syscall.Errno = 5958 - ERROR_CLUSTER_GROUP_QUEUED syscall.Errno = 5959 - ERROR_CLUSTER_RESOURCE_LOCKED_STATUS syscall.Errno = 5960 - ERROR_CLUSTER_SHARED_VOLUME_FAILOVER_NOT_ALLOWED syscall.Errno = 5961 - ERROR_CLUSTER_NODE_DRAIN_IN_PROGRESS syscall.Errno = 5962 - ERROR_CLUSTER_DISK_NOT_CONNECTED syscall.Errno = 5963 - ERROR_DISK_NOT_CSV_CAPABLE syscall.Errno = 5964 - ERROR_RESOURCE_NOT_IN_AVAILABLE_STORAGE syscall.Errno = 5965 - ERROR_CLUSTER_SHARED_VOLUME_REDIRECTED syscall.Errno = 5966 - ERROR_CLUSTER_SHARED_VOLUME_NOT_REDIRECTED syscall.Errno = 5967 - ERROR_CLUSTER_CANNOT_RETURN_PROPERTIES syscall.Errno = 5968 - ERROR_CLUSTER_RESOURCE_CONTAINS_UNSUPPORTED_DIFF_AREA_FOR_SHARED_VOLUMES syscall.Errno = 5969 - ERROR_CLUSTER_RESOURCE_IS_IN_MAINTENANCE_MODE syscall.Errno = 5970 - ERROR_CLUSTER_AFFINITY_CONFLICT syscall.Errno = 5971 - ERROR_CLUSTER_RESOURCE_IS_REPLICA_VIRTUAL_MACHINE syscall.Errno = 5972 - ERROR_CLUSTER_UPGRADE_INCOMPATIBLE_VERSIONS syscall.Errno = 5973 - ERROR_CLUSTER_UPGRADE_FIX_QUORUM_NOT_SUPPORTED syscall.Errno = 5974 - ERROR_CLUSTER_UPGRADE_RESTART_REQUIRED syscall.Errno = 5975 - ERROR_CLUSTER_UPGRADE_IN_PROGRESS syscall.Errno = 5976 - ERROR_CLUSTER_UPGRADE_INCOMPLETE syscall.Errno = 5977 - ERROR_CLUSTER_NODE_IN_GRACE_PERIOD syscall.Errno = 5978 - ERROR_CLUSTER_CSV_IO_PAUSE_TIMEOUT syscall.Errno = 5979 - ERROR_NODE_NOT_ACTIVE_CLUSTER_MEMBER syscall.Errno = 5980 - ERROR_CLUSTER_RESOURCE_NOT_MONITORED syscall.Errno = 5981 - ERROR_CLUSTER_RESOURCE_DOES_NOT_SUPPORT_UNMONITORED syscall.Errno = 5982 - ERROR_CLUSTER_RESOURCE_IS_REPLICATED syscall.Errno = 5983 - ERROR_CLUSTER_NODE_ISOLATED syscall.Errno = 5984 - ERROR_CLUSTER_NODE_QUARANTINED syscall.Errno = 5985 - ERROR_CLUSTER_DATABASE_UPDATE_CONDITION_FAILED syscall.Errno = 5986 - ERROR_CLUSTER_SPACE_DEGRADED syscall.Errno = 5987 - ERROR_CLUSTER_TOKEN_DELEGATION_NOT_SUPPORTED syscall.Errno = 5988 - ERROR_CLUSTER_CSV_INVALID_HANDLE syscall.Errno = 5989 - ERROR_CLUSTER_CSV_SUPPORTED_ONLY_ON_COORDINATOR syscall.Errno = 5990 - ERROR_GROUPSET_NOT_AVAILABLE syscall.Errno = 5991 - ERROR_GROUPSET_NOT_FOUND syscall.Errno = 5992 - ERROR_GROUPSET_CANT_PROVIDE syscall.Errno = 5993 - ERROR_CLUSTER_FAULT_DOMAIN_PARENT_NOT_FOUND syscall.Errno = 5994 - ERROR_CLUSTER_FAULT_DOMAIN_INVALID_HIERARCHY syscall.Errno = 5995 - ERROR_CLUSTER_FAULT_DOMAIN_FAILED_S2D_VALIDATION syscall.Errno = 5996 - ERROR_CLUSTER_FAULT_DOMAIN_S2D_CONNECTIVITY_LOSS syscall.Errno = 5997 - ERROR_CLUSTER_INVALID_INFRASTRUCTURE_FILESERVER_NAME syscall.Errno = 5998 - ERROR_CLUSTERSET_MANAGEMENT_CLUSTER_UNREACHABLE syscall.Errno = 5999 - ERROR_ENCRYPTION_FAILED syscall.Errno = 6000 - ERROR_DECRYPTION_FAILED syscall.Errno = 6001 - ERROR_FILE_ENCRYPTED syscall.Errno = 6002 - ERROR_NO_RECOVERY_POLICY syscall.Errno = 6003 - ERROR_NO_EFS syscall.Errno = 6004 - ERROR_WRONG_EFS syscall.Errno = 6005 - ERROR_NO_USER_KEYS syscall.Errno = 6006 - ERROR_FILE_NOT_ENCRYPTED syscall.Errno = 6007 - ERROR_NOT_EXPORT_FORMAT syscall.Errno = 6008 - ERROR_FILE_READ_ONLY syscall.Errno = 6009 - ERROR_DIR_EFS_DISALLOWED syscall.Errno = 6010 - ERROR_EFS_SERVER_NOT_TRUSTED syscall.Errno = 6011 - ERROR_BAD_RECOVERY_POLICY syscall.Errno = 6012 - ERROR_EFS_ALG_BLOB_TOO_BIG syscall.Errno = 6013 - ERROR_VOLUME_NOT_SUPPORT_EFS syscall.Errno = 6014 - ERROR_EFS_DISABLED syscall.Errno = 6015 - ERROR_EFS_VERSION_NOT_SUPPORT syscall.Errno = 6016 - ERROR_CS_ENCRYPTION_INVALID_SERVER_RESPONSE syscall.Errno = 6017 - ERROR_CS_ENCRYPTION_UNSUPPORTED_SERVER syscall.Errno = 6018 - ERROR_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE syscall.Errno = 6019 - ERROR_CS_ENCRYPTION_NEW_ENCRYPTED_FILE syscall.Errno = 6020 - ERROR_CS_ENCRYPTION_FILE_NOT_CSE syscall.Errno = 6021 - ERROR_ENCRYPTION_POLICY_DENIES_OPERATION syscall.Errno = 6022 - ERROR_WIP_ENCRYPTION_FAILED syscall.Errno = 6023 - ERROR_NO_BROWSER_SERVERS_FOUND syscall.Errno = 6118 - SCHED_E_SERVICE_NOT_LOCALSYSTEM syscall.Errno = 6200 - ERROR_LOG_SECTOR_INVALID syscall.Errno = 6600 - ERROR_LOG_SECTOR_PARITY_INVALID syscall.Errno = 6601 - ERROR_LOG_SECTOR_REMAPPED syscall.Errno = 6602 - ERROR_LOG_BLOCK_INCOMPLETE syscall.Errno = 6603 - ERROR_LOG_INVALID_RANGE syscall.Errno = 6604 - ERROR_LOG_BLOCKS_EXHAUSTED syscall.Errno = 6605 - ERROR_LOG_READ_CONTEXT_INVALID syscall.Errno = 6606 - ERROR_LOG_RESTART_INVALID syscall.Errno = 6607 - ERROR_LOG_BLOCK_VERSION syscall.Errno = 6608 - ERROR_LOG_BLOCK_INVALID syscall.Errno = 6609 - ERROR_LOG_READ_MODE_INVALID syscall.Errno = 6610 - ERROR_LOG_NO_RESTART syscall.Errno = 6611 - ERROR_LOG_METADATA_CORRUPT syscall.Errno = 6612 - ERROR_LOG_METADATA_INVALID syscall.Errno = 6613 - ERROR_LOG_METADATA_INCONSISTENT syscall.Errno = 6614 - ERROR_LOG_RESERVATION_INVALID syscall.Errno = 6615 - ERROR_LOG_CANT_DELETE syscall.Errno = 6616 - ERROR_LOG_CONTAINER_LIMIT_EXCEEDED syscall.Errno = 6617 - ERROR_LOG_START_OF_LOG syscall.Errno = 6618 - ERROR_LOG_POLICY_ALREADY_INSTALLED syscall.Errno = 6619 - ERROR_LOG_POLICY_NOT_INSTALLED syscall.Errno = 6620 - ERROR_LOG_POLICY_INVALID syscall.Errno = 6621 - ERROR_LOG_POLICY_CONFLICT syscall.Errno = 6622 - ERROR_LOG_PINNED_ARCHIVE_TAIL syscall.Errno = 6623 - ERROR_LOG_RECORD_NONEXISTENT syscall.Errno = 6624 - ERROR_LOG_RECORDS_RESERVED_INVALID syscall.Errno = 6625 - ERROR_LOG_SPACE_RESERVED_INVALID syscall.Errno = 6626 - ERROR_LOG_TAIL_INVALID syscall.Errno = 6627 - ERROR_LOG_FULL syscall.Errno = 6628 - ERROR_COULD_NOT_RESIZE_LOG syscall.Errno = 6629 - ERROR_LOG_MULTIPLEXED syscall.Errno = 6630 - ERROR_LOG_DEDICATED syscall.Errno = 6631 - ERROR_LOG_ARCHIVE_NOT_IN_PROGRESS syscall.Errno = 6632 - ERROR_LOG_ARCHIVE_IN_PROGRESS syscall.Errno = 6633 - ERROR_LOG_EPHEMERAL syscall.Errno = 6634 - ERROR_LOG_NOT_ENOUGH_CONTAINERS syscall.Errno = 6635 - ERROR_LOG_CLIENT_ALREADY_REGISTERED syscall.Errno = 6636 - ERROR_LOG_CLIENT_NOT_REGISTERED syscall.Errno = 6637 - ERROR_LOG_FULL_HANDLER_IN_PROGRESS syscall.Errno = 6638 - ERROR_LOG_CONTAINER_READ_FAILED syscall.Errno = 6639 - ERROR_LOG_CONTAINER_WRITE_FAILED syscall.Errno = 6640 - ERROR_LOG_CONTAINER_OPEN_FAILED syscall.Errno = 6641 - ERROR_LOG_CONTAINER_STATE_INVALID syscall.Errno = 6642 - ERROR_LOG_STATE_INVALID syscall.Errno = 6643 - ERROR_LOG_PINNED syscall.Errno = 6644 - ERROR_LOG_METADATA_FLUSH_FAILED syscall.Errno = 6645 - ERROR_LOG_INCONSISTENT_SECURITY syscall.Errno = 6646 - ERROR_LOG_APPENDED_FLUSH_FAILED syscall.Errno = 6647 - ERROR_LOG_PINNED_RESERVATION syscall.Errno = 6648 - ERROR_INVALID_TRANSACTION syscall.Errno = 6700 - ERROR_TRANSACTION_NOT_ACTIVE syscall.Errno = 6701 - ERROR_TRANSACTION_REQUEST_NOT_VALID syscall.Errno = 6702 - ERROR_TRANSACTION_NOT_REQUESTED syscall.Errno = 6703 - ERROR_TRANSACTION_ALREADY_ABORTED syscall.Errno = 6704 - ERROR_TRANSACTION_ALREADY_COMMITTED syscall.Errno = 6705 - ERROR_TM_INITIALIZATION_FAILED syscall.Errno = 6706 - ERROR_RESOURCEMANAGER_READ_ONLY syscall.Errno = 6707 - ERROR_TRANSACTION_NOT_JOINED syscall.Errno = 6708 - ERROR_TRANSACTION_SUPERIOR_EXISTS syscall.Errno = 6709 - ERROR_CRM_PROTOCOL_ALREADY_EXISTS syscall.Errno = 6710 - ERROR_TRANSACTION_PROPAGATION_FAILED syscall.Errno = 6711 - ERROR_CRM_PROTOCOL_NOT_FOUND syscall.Errno = 6712 - ERROR_TRANSACTION_INVALID_MARSHALL_BUFFER syscall.Errno = 6713 - ERROR_CURRENT_TRANSACTION_NOT_VALID syscall.Errno = 6714 - ERROR_TRANSACTION_NOT_FOUND syscall.Errno = 6715 - ERROR_RESOURCEMANAGER_NOT_FOUND syscall.Errno = 6716 - ERROR_ENLISTMENT_NOT_FOUND syscall.Errno = 6717 - ERROR_TRANSACTIONMANAGER_NOT_FOUND syscall.Errno = 6718 - ERROR_TRANSACTIONMANAGER_NOT_ONLINE syscall.Errno = 6719 - ERROR_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION syscall.Errno = 6720 - ERROR_TRANSACTION_NOT_ROOT syscall.Errno = 6721 - ERROR_TRANSACTION_OBJECT_EXPIRED syscall.Errno = 6722 - ERROR_TRANSACTION_RESPONSE_NOT_ENLISTED syscall.Errno = 6723 - ERROR_TRANSACTION_RECORD_TOO_LONG syscall.Errno = 6724 - ERROR_IMPLICIT_TRANSACTION_NOT_SUPPORTED syscall.Errno = 6725 - ERROR_TRANSACTION_INTEGRITY_VIOLATED syscall.Errno = 6726 - ERROR_TRANSACTIONMANAGER_IDENTITY_MISMATCH syscall.Errno = 6727 - ERROR_RM_CANNOT_BE_FROZEN_FOR_SNAPSHOT syscall.Errno = 6728 - ERROR_TRANSACTION_MUST_WRITETHROUGH syscall.Errno = 6729 - ERROR_TRANSACTION_NO_SUPERIOR syscall.Errno = 6730 - ERROR_HEURISTIC_DAMAGE_POSSIBLE syscall.Errno = 6731 - ERROR_TRANSACTIONAL_CONFLICT syscall.Errno = 6800 - ERROR_RM_NOT_ACTIVE syscall.Errno = 6801 - ERROR_RM_METADATA_CORRUPT syscall.Errno = 6802 - ERROR_DIRECTORY_NOT_RM syscall.Errno = 6803 - ERROR_TRANSACTIONS_UNSUPPORTED_REMOTE syscall.Errno = 6805 - ERROR_LOG_RESIZE_INVALID_SIZE syscall.Errno = 6806 - ERROR_OBJECT_NO_LONGER_EXISTS syscall.Errno = 6807 - ERROR_STREAM_MINIVERSION_NOT_FOUND syscall.Errno = 6808 - ERROR_STREAM_MINIVERSION_NOT_VALID syscall.Errno = 6809 - ERROR_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION syscall.Errno = 6810 - ERROR_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT syscall.Errno = 6811 - ERROR_CANT_CREATE_MORE_STREAM_MINIVERSIONS syscall.Errno = 6812 - ERROR_REMOTE_FILE_VERSION_MISMATCH syscall.Errno = 6814 - ERROR_HANDLE_NO_LONGER_VALID syscall.Errno = 6815 - ERROR_NO_TXF_METADATA syscall.Errno = 6816 - ERROR_LOG_CORRUPTION_DETECTED syscall.Errno = 6817 - ERROR_CANT_RECOVER_WITH_HANDLE_OPEN syscall.Errno = 6818 - ERROR_RM_DISCONNECTED syscall.Errno = 6819 - ERROR_ENLISTMENT_NOT_SUPERIOR syscall.Errno = 6820 - ERROR_RECOVERY_NOT_NEEDED syscall.Errno = 6821 - ERROR_RM_ALREADY_STARTED syscall.Errno = 6822 - ERROR_FILE_IDENTITY_NOT_PERSISTENT syscall.Errno = 6823 - ERROR_CANT_BREAK_TRANSACTIONAL_DEPENDENCY syscall.Errno = 6824 - ERROR_CANT_CROSS_RM_BOUNDARY syscall.Errno = 6825 - ERROR_TXF_DIR_NOT_EMPTY syscall.Errno = 6826 - ERROR_INDOUBT_TRANSACTIONS_EXIST syscall.Errno = 6827 - ERROR_TM_VOLATILE syscall.Errno = 6828 - ERROR_ROLLBACK_TIMER_EXPIRED syscall.Errno = 6829 - ERROR_TXF_ATTRIBUTE_CORRUPT syscall.Errno = 6830 - ERROR_EFS_NOT_ALLOWED_IN_TRANSACTION syscall.Errno = 6831 - ERROR_TRANSACTIONAL_OPEN_NOT_ALLOWED syscall.Errno = 6832 - ERROR_LOG_GROWTH_FAILED syscall.Errno = 6833 - ERROR_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE syscall.Errno = 6834 - ERROR_TXF_METADATA_ALREADY_PRESENT syscall.Errno = 6835 - ERROR_TRANSACTION_SCOPE_CALLBACKS_NOT_SET syscall.Errno = 6836 - ERROR_TRANSACTION_REQUIRED_PROMOTION syscall.Errno = 6837 - ERROR_CANNOT_EXECUTE_FILE_IN_TRANSACTION syscall.Errno = 6838 - ERROR_TRANSACTIONS_NOT_FROZEN syscall.Errno = 6839 - ERROR_TRANSACTION_FREEZE_IN_PROGRESS syscall.Errno = 6840 - ERROR_NOT_SNAPSHOT_VOLUME syscall.Errno = 6841 - ERROR_NO_SAVEPOINT_WITH_OPEN_FILES syscall.Errno = 6842 - ERROR_DATA_LOST_REPAIR syscall.Errno = 6843 - ERROR_SPARSE_NOT_ALLOWED_IN_TRANSACTION syscall.Errno = 6844 - ERROR_TM_IDENTITY_MISMATCH syscall.Errno = 6845 - ERROR_FLOATED_SECTION syscall.Errno = 6846 - ERROR_CANNOT_ACCEPT_TRANSACTED_WORK syscall.Errno = 6847 - ERROR_CANNOT_ABORT_TRANSACTIONS syscall.Errno = 6848 - ERROR_BAD_CLUSTERS syscall.Errno = 6849 - ERROR_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION syscall.Errno = 6850 - ERROR_VOLUME_DIRTY syscall.Errno = 6851 - ERROR_NO_LINK_TRACKING_IN_TRANSACTION syscall.Errno = 6852 - ERROR_OPERATION_NOT_SUPPORTED_IN_TRANSACTION syscall.Errno = 6853 - ERROR_EXPIRED_HANDLE syscall.Errno = 6854 - ERROR_TRANSACTION_NOT_ENLISTED syscall.Errno = 6855 - ERROR_CTX_WINSTATION_NAME_INVALID syscall.Errno = 7001 - ERROR_CTX_INVALID_PD syscall.Errno = 7002 - ERROR_CTX_PD_NOT_FOUND syscall.Errno = 7003 - ERROR_CTX_WD_NOT_FOUND syscall.Errno = 7004 - ERROR_CTX_CANNOT_MAKE_EVENTLOG_ENTRY syscall.Errno = 7005 - ERROR_CTX_SERVICE_NAME_COLLISION syscall.Errno = 7006 - ERROR_CTX_CLOSE_PENDING syscall.Errno = 7007 - ERROR_CTX_NO_OUTBUF syscall.Errno = 7008 - ERROR_CTX_MODEM_INF_NOT_FOUND syscall.Errno = 7009 - ERROR_CTX_INVALID_MODEMNAME syscall.Errno = 7010 - ERROR_CTX_MODEM_RESPONSE_ERROR syscall.Errno = 7011 - ERROR_CTX_MODEM_RESPONSE_TIMEOUT syscall.Errno = 7012 - ERROR_CTX_MODEM_RESPONSE_NO_CARRIER syscall.Errno = 7013 - ERROR_CTX_MODEM_RESPONSE_NO_DIALTONE syscall.Errno = 7014 - ERROR_CTX_MODEM_RESPONSE_BUSY syscall.Errno = 7015 - ERROR_CTX_MODEM_RESPONSE_VOICE syscall.Errno = 7016 - ERROR_CTX_TD_ERROR syscall.Errno = 7017 - ERROR_CTX_WINSTATION_NOT_FOUND syscall.Errno = 7022 - ERROR_CTX_WINSTATION_ALREADY_EXISTS syscall.Errno = 7023 - ERROR_CTX_WINSTATION_BUSY syscall.Errno = 7024 - ERROR_CTX_BAD_VIDEO_MODE syscall.Errno = 7025 - ERROR_CTX_GRAPHICS_INVALID syscall.Errno = 7035 - ERROR_CTX_LOGON_DISABLED syscall.Errno = 7037 - ERROR_CTX_NOT_CONSOLE syscall.Errno = 7038 - ERROR_CTX_CLIENT_QUERY_TIMEOUT syscall.Errno = 7040 - ERROR_CTX_CONSOLE_DISCONNECT syscall.Errno = 7041 - ERROR_CTX_CONSOLE_CONNECT syscall.Errno = 7042 - ERROR_CTX_SHADOW_DENIED syscall.Errno = 7044 - ERROR_CTX_WINSTATION_ACCESS_DENIED syscall.Errno = 7045 - ERROR_CTX_INVALID_WD syscall.Errno = 7049 - ERROR_CTX_SHADOW_INVALID syscall.Errno = 7050 - ERROR_CTX_SHADOW_DISABLED syscall.Errno = 7051 - ERROR_CTX_CLIENT_LICENSE_IN_USE syscall.Errno = 7052 - ERROR_CTX_CLIENT_LICENSE_NOT_SET syscall.Errno = 7053 - ERROR_CTX_LICENSE_NOT_AVAILABLE syscall.Errno = 7054 - ERROR_CTX_LICENSE_CLIENT_INVALID syscall.Errno = 7055 - ERROR_CTX_LICENSE_EXPIRED syscall.Errno = 7056 - ERROR_CTX_SHADOW_NOT_RUNNING syscall.Errno = 7057 - ERROR_CTX_SHADOW_ENDED_BY_MODE_CHANGE syscall.Errno = 7058 - ERROR_ACTIVATION_COUNT_EXCEEDED syscall.Errno = 7059 - ERROR_CTX_WINSTATIONS_DISABLED syscall.Errno = 7060 - ERROR_CTX_ENCRYPTION_LEVEL_REQUIRED syscall.Errno = 7061 - ERROR_CTX_SESSION_IN_USE syscall.Errno = 7062 - ERROR_CTX_NO_FORCE_LOGOFF syscall.Errno = 7063 - ERROR_CTX_ACCOUNT_RESTRICTION syscall.Errno = 7064 - ERROR_RDP_PROTOCOL_ERROR syscall.Errno = 7065 - ERROR_CTX_CDM_CONNECT syscall.Errno = 7066 - ERROR_CTX_CDM_DISCONNECT syscall.Errno = 7067 - ERROR_CTX_SECURITY_LAYER_ERROR syscall.Errno = 7068 - ERROR_TS_INCOMPATIBLE_SESSIONS syscall.Errno = 7069 - ERROR_TS_VIDEO_SUBSYSTEM_ERROR syscall.Errno = 7070 - FRS_ERR_INVALID_API_SEQUENCE syscall.Errno = 8001 - FRS_ERR_STARTING_SERVICE syscall.Errno = 8002 - FRS_ERR_STOPPING_SERVICE syscall.Errno = 8003 - FRS_ERR_INTERNAL_API syscall.Errno = 8004 - FRS_ERR_INTERNAL syscall.Errno = 8005 - FRS_ERR_SERVICE_COMM syscall.Errno = 8006 - FRS_ERR_INSUFFICIENT_PRIV syscall.Errno = 8007 - FRS_ERR_AUTHENTICATION syscall.Errno = 8008 - FRS_ERR_PARENT_INSUFFICIENT_PRIV syscall.Errno = 8009 - FRS_ERR_PARENT_AUTHENTICATION syscall.Errno = 8010 - FRS_ERR_CHILD_TO_PARENT_COMM syscall.Errno = 8011 - FRS_ERR_PARENT_TO_CHILD_COMM syscall.Errno = 8012 - FRS_ERR_SYSVOL_POPULATE syscall.Errno = 8013 - FRS_ERR_SYSVOL_POPULATE_TIMEOUT syscall.Errno = 8014 - FRS_ERR_SYSVOL_IS_BUSY syscall.Errno = 8015 - FRS_ERR_SYSVOL_DEMOTE syscall.Errno = 8016 - FRS_ERR_INVALID_SERVICE_PARAMETER syscall.Errno = 8017 - DS_S_SUCCESS = ERROR_SUCCESS - ERROR_DS_NOT_INSTALLED syscall.Errno = 8200 - ERROR_DS_MEMBERSHIP_EVALUATED_LOCALLY syscall.Errno = 8201 - ERROR_DS_NO_ATTRIBUTE_OR_VALUE syscall.Errno = 8202 - ERROR_DS_INVALID_ATTRIBUTE_SYNTAX syscall.Errno = 8203 - ERROR_DS_ATTRIBUTE_TYPE_UNDEFINED syscall.Errno = 8204 - ERROR_DS_ATTRIBUTE_OR_VALUE_EXISTS syscall.Errno = 8205 - ERROR_DS_BUSY syscall.Errno = 8206 - ERROR_DS_UNAVAILABLE syscall.Errno = 8207 - ERROR_DS_NO_RIDS_ALLOCATED syscall.Errno = 8208 - ERROR_DS_NO_MORE_RIDS syscall.Errno = 8209 - ERROR_DS_INCORRECT_ROLE_OWNER syscall.Errno = 8210 - ERROR_DS_RIDMGR_INIT_ERROR syscall.Errno = 8211 - ERROR_DS_OBJ_CLASS_VIOLATION syscall.Errno = 8212 - ERROR_DS_CANT_ON_NON_LEAF syscall.Errno = 8213 - ERROR_DS_CANT_ON_RDN syscall.Errno = 8214 - ERROR_DS_CANT_MOD_OBJ_CLASS syscall.Errno = 8215 - ERROR_DS_CROSS_DOM_MOVE_ERROR syscall.Errno = 8216 - ERROR_DS_GC_NOT_AVAILABLE syscall.Errno = 8217 - ERROR_SHARED_POLICY syscall.Errno = 8218 - ERROR_POLICY_OBJECT_NOT_FOUND syscall.Errno = 8219 - ERROR_POLICY_ONLY_IN_DS syscall.Errno = 8220 - ERROR_PROMOTION_ACTIVE syscall.Errno = 8221 - ERROR_NO_PROMOTION_ACTIVE syscall.Errno = 8222 - ERROR_DS_OPERATIONS_ERROR syscall.Errno = 8224 - ERROR_DS_PROTOCOL_ERROR syscall.Errno = 8225 - ERROR_DS_TIMELIMIT_EXCEEDED syscall.Errno = 8226 - ERROR_DS_SIZELIMIT_EXCEEDED syscall.Errno = 8227 - ERROR_DS_ADMIN_LIMIT_EXCEEDED syscall.Errno = 8228 - ERROR_DS_COMPARE_FALSE syscall.Errno = 8229 - ERROR_DS_COMPARE_TRUE syscall.Errno = 8230 - ERROR_DS_AUTH_METHOD_NOT_SUPPORTED syscall.Errno = 8231 - ERROR_DS_STRONG_AUTH_REQUIRED syscall.Errno = 8232 - ERROR_DS_INAPPROPRIATE_AUTH syscall.Errno = 8233 - ERROR_DS_AUTH_UNKNOWN syscall.Errno = 8234 - ERROR_DS_REFERRAL syscall.Errno = 8235 - ERROR_DS_UNAVAILABLE_CRIT_EXTENSION syscall.Errno = 8236 - ERROR_DS_CONFIDENTIALITY_REQUIRED syscall.Errno = 8237 - ERROR_DS_INAPPROPRIATE_MATCHING syscall.Errno = 8238 - ERROR_DS_CONSTRAINT_VIOLATION syscall.Errno = 8239 - ERROR_DS_NO_SUCH_OBJECT syscall.Errno = 8240 - ERROR_DS_ALIAS_PROBLEM syscall.Errno = 8241 - ERROR_DS_INVALID_DN_SYNTAX syscall.Errno = 8242 - ERROR_DS_IS_LEAF syscall.Errno = 8243 - ERROR_DS_ALIAS_DEREF_PROBLEM syscall.Errno = 8244 - ERROR_DS_UNWILLING_TO_PERFORM syscall.Errno = 8245 - ERROR_DS_LOOP_DETECT syscall.Errno = 8246 - ERROR_DS_NAMING_VIOLATION syscall.Errno = 8247 - ERROR_DS_OBJECT_RESULTS_TOO_LARGE syscall.Errno = 8248 - ERROR_DS_AFFECTS_MULTIPLE_DSAS syscall.Errno = 8249 - ERROR_DS_SERVER_DOWN syscall.Errno = 8250 - ERROR_DS_LOCAL_ERROR syscall.Errno = 8251 - ERROR_DS_ENCODING_ERROR syscall.Errno = 8252 - ERROR_DS_DECODING_ERROR syscall.Errno = 8253 - ERROR_DS_FILTER_UNKNOWN syscall.Errno = 8254 - ERROR_DS_PARAM_ERROR syscall.Errno = 8255 - ERROR_DS_NOT_SUPPORTED syscall.Errno = 8256 - ERROR_DS_NO_RESULTS_RETURNED syscall.Errno = 8257 - ERROR_DS_CONTROL_NOT_FOUND syscall.Errno = 8258 - ERROR_DS_CLIENT_LOOP syscall.Errno = 8259 - ERROR_DS_REFERRAL_LIMIT_EXCEEDED syscall.Errno = 8260 - ERROR_DS_SORT_CONTROL_MISSING syscall.Errno = 8261 - ERROR_DS_OFFSET_RANGE_ERROR syscall.Errno = 8262 - ERROR_DS_RIDMGR_DISABLED syscall.Errno = 8263 - ERROR_DS_ROOT_MUST_BE_NC syscall.Errno = 8301 - ERROR_DS_ADD_REPLICA_INHIBITED syscall.Errno = 8302 - ERROR_DS_ATT_NOT_DEF_IN_SCHEMA syscall.Errno = 8303 - ERROR_DS_MAX_OBJ_SIZE_EXCEEDED syscall.Errno = 8304 - ERROR_DS_OBJ_STRING_NAME_EXISTS syscall.Errno = 8305 - ERROR_DS_NO_RDN_DEFINED_IN_SCHEMA syscall.Errno = 8306 - ERROR_DS_RDN_DOESNT_MATCH_SCHEMA syscall.Errno = 8307 - ERROR_DS_NO_REQUESTED_ATTS_FOUND syscall.Errno = 8308 - ERROR_DS_USER_BUFFER_TO_SMALL syscall.Errno = 8309 - ERROR_DS_ATT_IS_NOT_ON_OBJ syscall.Errno = 8310 - ERROR_DS_ILLEGAL_MOD_OPERATION syscall.Errno = 8311 - ERROR_DS_OBJ_TOO_LARGE syscall.Errno = 8312 - ERROR_DS_BAD_INSTANCE_TYPE syscall.Errno = 8313 - ERROR_DS_MASTERDSA_REQUIRED syscall.Errno = 8314 - ERROR_DS_OBJECT_CLASS_REQUIRED syscall.Errno = 8315 - ERROR_DS_MISSING_REQUIRED_ATT syscall.Errno = 8316 - ERROR_DS_ATT_NOT_DEF_FOR_CLASS syscall.Errno = 8317 - ERROR_DS_ATT_ALREADY_EXISTS syscall.Errno = 8318 - ERROR_DS_CANT_ADD_ATT_VALUES syscall.Errno = 8320 - ERROR_DS_SINGLE_VALUE_CONSTRAINT syscall.Errno = 8321 - ERROR_DS_RANGE_CONSTRAINT syscall.Errno = 8322 - ERROR_DS_ATT_VAL_ALREADY_EXISTS syscall.Errno = 8323 - ERROR_DS_CANT_REM_MISSING_ATT syscall.Errno = 8324 - ERROR_DS_CANT_REM_MISSING_ATT_VAL syscall.Errno = 8325 - ERROR_DS_ROOT_CANT_BE_SUBREF syscall.Errno = 8326 - ERROR_DS_NO_CHAINING syscall.Errno = 8327 - ERROR_DS_NO_CHAINED_EVAL syscall.Errno = 8328 - ERROR_DS_NO_PARENT_OBJECT syscall.Errno = 8329 - ERROR_DS_PARENT_IS_AN_ALIAS syscall.Errno = 8330 - ERROR_DS_CANT_MIX_MASTER_AND_REPS syscall.Errno = 8331 - ERROR_DS_CHILDREN_EXIST syscall.Errno = 8332 - ERROR_DS_OBJ_NOT_FOUND syscall.Errno = 8333 - ERROR_DS_ALIASED_OBJ_MISSING syscall.Errno = 8334 - ERROR_DS_BAD_NAME_SYNTAX syscall.Errno = 8335 - ERROR_DS_ALIAS_POINTS_TO_ALIAS syscall.Errno = 8336 - ERROR_DS_CANT_DEREF_ALIAS syscall.Errno = 8337 - ERROR_DS_OUT_OF_SCOPE syscall.Errno = 8338 - ERROR_DS_OBJECT_BEING_REMOVED syscall.Errno = 8339 - ERROR_DS_CANT_DELETE_DSA_OBJ syscall.Errno = 8340 - ERROR_DS_GENERIC_ERROR syscall.Errno = 8341 - ERROR_DS_DSA_MUST_BE_INT_MASTER syscall.Errno = 8342 - ERROR_DS_CLASS_NOT_DSA syscall.Errno = 8343 - ERROR_DS_INSUFF_ACCESS_RIGHTS syscall.Errno = 8344 - ERROR_DS_ILLEGAL_SUPERIOR syscall.Errno = 8345 - ERROR_DS_ATTRIBUTE_OWNED_BY_SAM syscall.Errno = 8346 - ERROR_DS_NAME_TOO_MANY_PARTS syscall.Errno = 8347 - ERROR_DS_NAME_TOO_LONG syscall.Errno = 8348 - ERROR_DS_NAME_VALUE_TOO_LONG syscall.Errno = 8349 - ERROR_DS_NAME_UNPARSEABLE syscall.Errno = 8350 - ERROR_DS_NAME_TYPE_UNKNOWN syscall.Errno = 8351 - ERROR_DS_NOT_AN_OBJECT syscall.Errno = 8352 - ERROR_DS_SEC_DESC_TOO_SHORT syscall.Errno = 8353 - ERROR_DS_SEC_DESC_INVALID syscall.Errno = 8354 - ERROR_DS_NO_DELETED_NAME syscall.Errno = 8355 - ERROR_DS_SUBREF_MUST_HAVE_PARENT syscall.Errno = 8356 - ERROR_DS_NCNAME_MUST_BE_NC syscall.Errno = 8357 - ERROR_DS_CANT_ADD_SYSTEM_ONLY syscall.Errno = 8358 - ERROR_DS_CLASS_MUST_BE_CONCRETE syscall.Errno = 8359 - ERROR_DS_INVALID_DMD syscall.Errno = 8360 - ERROR_DS_OBJ_GUID_EXISTS syscall.Errno = 8361 - ERROR_DS_NOT_ON_BACKLINK syscall.Errno = 8362 - ERROR_DS_NO_CROSSREF_FOR_NC syscall.Errno = 8363 - ERROR_DS_SHUTTING_DOWN syscall.Errno = 8364 - ERROR_DS_UNKNOWN_OPERATION syscall.Errno = 8365 - ERROR_DS_INVALID_ROLE_OWNER syscall.Errno = 8366 - ERROR_DS_COULDNT_CONTACT_FSMO syscall.Errno = 8367 - ERROR_DS_CROSS_NC_DN_RENAME syscall.Errno = 8368 - ERROR_DS_CANT_MOD_SYSTEM_ONLY syscall.Errno = 8369 - ERROR_DS_REPLICATOR_ONLY syscall.Errno = 8370 - ERROR_DS_OBJ_CLASS_NOT_DEFINED syscall.Errno = 8371 - ERROR_DS_OBJ_CLASS_NOT_SUBCLASS syscall.Errno = 8372 - ERROR_DS_NAME_REFERENCE_INVALID syscall.Errno = 8373 - ERROR_DS_CROSS_REF_EXISTS syscall.Errno = 8374 - ERROR_DS_CANT_DEL_MASTER_CROSSREF syscall.Errno = 8375 - ERROR_DS_SUBTREE_NOTIFY_NOT_NC_HEAD syscall.Errno = 8376 - ERROR_DS_NOTIFY_FILTER_TOO_COMPLEX syscall.Errno = 8377 - ERROR_DS_DUP_RDN syscall.Errno = 8378 - ERROR_DS_DUP_OID syscall.Errno = 8379 - ERROR_DS_DUP_MAPI_ID syscall.Errno = 8380 - ERROR_DS_DUP_SCHEMA_ID_GUID syscall.Errno = 8381 - ERROR_DS_DUP_LDAP_DISPLAY_NAME syscall.Errno = 8382 - ERROR_DS_SEMANTIC_ATT_TEST syscall.Errno = 8383 - ERROR_DS_SYNTAX_MISMATCH syscall.Errno = 8384 - ERROR_DS_EXISTS_IN_MUST_HAVE syscall.Errno = 8385 - ERROR_DS_EXISTS_IN_MAY_HAVE syscall.Errno = 8386 - ERROR_DS_NONEXISTENT_MAY_HAVE syscall.Errno = 8387 - ERROR_DS_NONEXISTENT_MUST_HAVE syscall.Errno = 8388 - ERROR_DS_AUX_CLS_TEST_FAIL syscall.Errno = 8389 - ERROR_DS_NONEXISTENT_POSS_SUP syscall.Errno = 8390 - ERROR_DS_SUB_CLS_TEST_FAIL syscall.Errno = 8391 - ERROR_DS_BAD_RDN_ATT_ID_SYNTAX syscall.Errno = 8392 - ERROR_DS_EXISTS_IN_AUX_CLS syscall.Errno = 8393 - ERROR_DS_EXISTS_IN_SUB_CLS syscall.Errno = 8394 - ERROR_DS_EXISTS_IN_POSS_SUP syscall.Errno = 8395 - ERROR_DS_RECALCSCHEMA_FAILED syscall.Errno = 8396 - ERROR_DS_TREE_DELETE_NOT_FINISHED syscall.Errno = 8397 - ERROR_DS_CANT_DELETE syscall.Errno = 8398 - ERROR_DS_ATT_SCHEMA_REQ_ID syscall.Errno = 8399 - ERROR_DS_BAD_ATT_SCHEMA_SYNTAX syscall.Errno = 8400 - ERROR_DS_CANT_CACHE_ATT syscall.Errno = 8401 - ERROR_DS_CANT_CACHE_CLASS syscall.Errno = 8402 - ERROR_DS_CANT_REMOVE_ATT_CACHE syscall.Errno = 8403 - ERROR_DS_CANT_REMOVE_CLASS_CACHE syscall.Errno = 8404 - ERROR_DS_CANT_RETRIEVE_DN syscall.Errno = 8405 - ERROR_DS_MISSING_SUPREF syscall.Errno = 8406 - ERROR_DS_CANT_RETRIEVE_INSTANCE syscall.Errno = 8407 - ERROR_DS_CODE_INCONSISTENCY syscall.Errno = 8408 - ERROR_DS_DATABASE_ERROR syscall.Errno = 8409 - ERROR_DS_GOVERNSID_MISSING syscall.Errno = 8410 - ERROR_DS_MISSING_EXPECTED_ATT syscall.Errno = 8411 - ERROR_DS_NCNAME_MISSING_CR_REF syscall.Errno = 8412 - ERROR_DS_SECURITY_CHECKING_ERROR syscall.Errno = 8413 - ERROR_DS_SCHEMA_NOT_LOADED syscall.Errno = 8414 - ERROR_DS_SCHEMA_ALLOC_FAILED syscall.Errno = 8415 - ERROR_DS_ATT_SCHEMA_REQ_SYNTAX syscall.Errno = 8416 - ERROR_DS_GCVERIFY_ERROR syscall.Errno = 8417 - ERROR_DS_DRA_SCHEMA_MISMATCH syscall.Errno = 8418 - ERROR_DS_CANT_FIND_DSA_OBJ syscall.Errno = 8419 - ERROR_DS_CANT_FIND_EXPECTED_NC syscall.Errno = 8420 - ERROR_DS_CANT_FIND_NC_IN_CACHE syscall.Errno = 8421 - ERROR_DS_CANT_RETRIEVE_CHILD syscall.Errno = 8422 - ERROR_DS_SECURITY_ILLEGAL_MODIFY syscall.Errno = 8423 - ERROR_DS_CANT_REPLACE_HIDDEN_REC syscall.Errno = 8424 - ERROR_DS_BAD_HIERARCHY_FILE syscall.Errno = 8425 - ERROR_DS_BUILD_HIERARCHY_TABLE_FAILED syscall.Errno = 8426 - ERROR_DS_CONFIG_PARAM_MISSING syscall.Errno = 8427 - ERROR_DS_COUNTING_AB_INDICES_FAILED syscall.Errno = 8428 - ERROR_DS_HIERARCHY_TABLE_MALLOC_FAILED syscall.Errno = 8429 - ERROR_DS_INTERNAL_FAILURE syscall.Errno = 8430 - ERROR_DS_UNKNOWN_ERROR syscall.Errno = 8431 - ERROR_DS_ROOT_REQUIRES_CLASS_TOP syscall.Errno = 8432 - ERROR_DS_REFUSING_FSMO_ROLES syscall.Errno = 8433 - ERROR_DS_MISSING_FSMO_SETTINGS syscall.Errno = 8434 - ERROR_DS_UNABLE_TO_SURRENDER_ROLES syscall.Errno = 8435 - ERROR_DS_DRA_GENERIC syscall.Errno = 8436 - ERROR_DS_DRA_INVALID_PARAMETER syscall.Errno = 8437 - ERROR_DS_DRA_BUSY syscall.Errno = 8438 - ERROR_DS_DRA_BAD_DN syscall.Errno = 8439 - ERROR_DS_DRA_BAD_NC syscall.Errno = 8440 - ERROR_DS_DRA_DN_EXISTS syscall.Errno = 8441 - ERROR_DS_DRA_INTERNAL_ERROR syscall.Errno = 8442 - ERROR_DS_DRA_INCONSISTENT_DIT syscall.Errno = 8443 - ERROR_DS_DRA_CONNECTION_FAILED syscall.Errno = 8444 - ERROR_DS_DRA_BAD_INSTANCE_TYPE syscall.Errno = 8445 - ERROR_DS_DRA_OUT_OF_MEM syscall.Errno = 8446 - ERROR_DS_DRA_MAIL_PROBLEM syscall.Errno = 8447 - ERROR_DS_DRA_REF_ALREADY_EXISTS syscall.Errno = 8448 - ERROR_DS_DRA_REF_NOT_FOUND syscall.Errno = 8449 - ERROR_DS_DRA_OBJ_IS_REP_SOURCE syscall.Errno = 8450 - ERROR_DS_DRA_DB_ERROR syscall.Errno = 8451 - ERROR_DS_DRA_NO_REPLICA syscall.Errno = 8452 - ERROR_DS_DRA_ACCESS_DENIED syscall.Errno = 8453 - ERROR_DS_DRA_NOT_SUPPORTED syscall.Errno = 8454 - ERROR_DS_DRA_RPC_CANCELLED syscall.Errno = 8455 - ERROR_DS_DRA_SOURCE_DISABLED syscall.Errno = 8456 - ERROR_DS_DRA_SINK_DISABLED syscall.Errno = 8457 - ERROR_DS_DRA_NAME_COLLISION syscall.Errno = 8458 - ERROR_DS_DRA_SOURCE_REINSTALLED syscall.Errno = 8459 - ERROR_DS_DRA_MISSING_PARENT syscall.Errno = 8460 - ERROR_DS_DRA_PREEMPTED syscall.Errno = 8461 - ERROR_DS_DRA_ABANDON_SYNC syscall.Errno = 8462 - ERROR_DS_DRA_SHUTDOWN syscall.Errno = 8463 - ERROR_DS_DRA_INCOMPATIBLE_PARTIAL_SET syscall.Errno = 8464 - ERROR_DS_DRA_SOURCE_IS_PARTIAL_REPLICA syscall.Errno = 8465 - ERROR_DS_DRA_EXTN_CONNECTION_FAILED syscall.Errno = 8466 - ERROR_DS_INSTALL_SCHEMA_MISMATCH syscall.Errno = 8467 - ERROR_DS_DUP_LINK_ID syscall.Errno = 8468 - ERROR_DS_NAME_ERROR_RESOLVING syscall.Errno = 8469 - ERROR_DS_NAME_ERROR_NOT_FOUND syscall.Errno = 8470 - ERROR_DS_NAME_ERROR_NOT_UNIQUE syscall.Errno = 8471 - ERROR_DS_NAME_ERROR_NO_MAPPING syscall.Errno = 8472 - ERROR_DS_NAME_ERROR_DOMAIN_ONLY syscall.Errno = 8473 - ERROR_DS_NAME_ERROR_NO_SYNTACTICAL_MAPPING syscall.Errno = 8474 - ERROR_DS_CONSTRUCTED_ATT_MOD syscall.Errno = 8475 - ERROR_DS_WRONG_OM_OBJ_CLASS syscall.Errno = 8476 - ERROR_DS_DRA_REPL_PENDING syscall.Errno = 8477 - ERROR_DS_DS_REQUIRED syscall.Errno = 8478 - ERROR_DS_INVALID_LDAP_DISPLAY_NAME syscall.Errno = 8479 - ERROR_DS_NON_BASE_SEARCH syscall.Errno = 8480 - ERROR_DS_CANT_RETRIEVE_ATTS syscall.Errno = 8481 - ERROR_DS_BACKLINK_WITHOUT_LINK syscall.Errno = 8482 - ERROR_DS_EPOCH_MISMATCH syscall.Errno = 8483 - ERROR_DS_SRC_NAME_MISMATCH syscall.Errno = 8484 - ERROR_DS_SRC_AND_DST_NC_IDENTICAL syscall.Errno = 8485 - ERROR_DS_DST_NC_MISMATCH syscall.Errno = 8486 - ERROR_DS_NOT_AUTHORITIVE_FOR_DST_NC syscall.Errno = 8487 - ERROR_DS_SRC_GUID_MISMATCH syscall.Errno = 8488 - ERROR_DS_CANT_MOVE_DELETED_OBJECT syscall.Errno = 8489 - ERROR_DS_PDC_OPERATION_IN_PROGRESS syscall.Errno = 8490 - ERROR_DS_CROSS_DOMAIN_CLEANUP_REQD syscall.Errno = 8491 - ERROR_DS_ILLEGAL_XDOM_MOVE_OPERATION syscall.Errno = 8492 - ERROR_DS_CANT_WITH_ACCT_GROUP_MEMBERSHPS syscall.Errno = 8493 - ERROR_DS_NC_MUST_HAVE_NC_PARENT syscall.Errno = 8494 - ERROR_DS_CR_IMPOSSIBLE_TO_VALIDATE syscall.Errno = 8495 - ERROR_DS_DST_DOMAIN_NOT_NATIVE syscall.Errno = 8496 - ERROR_DS_MISSING_INFRASTRUCTURE_CONTAINER syscall.Errno = 8497 - ERROR_DS_CANT_MOVE_ACCOUNT_GROUP syscall.Errno = 8498 - ERROR_DS_CANT_MOVE_RESOURCE_GROUP syscall.Errno = 8499 - ERROR_DS_INVALID_SEARCH_FLAG syscall.Errno = 8500 - ERROR_DS_NO_TREE_DELETE_ABOVE_NC syscall.Errno = 8501 - ERROR_DS_COULDNT_LOCK_TREE_FOR_DELETE syscall.Errno = 8502 - ERROR_DS_COULDNT_IDENTIFY_OBJECTS_FOR_TREE_DELETE syscall.Errno = 8503 - ERROR_DS_SAM_INIT_FAILURE syscall.Errno = 8504 - ERROR_DS_SENSITIVE_GROUP_VIOLATION syscall.Errno = 8505 - ERROR_DS_CANT_MOD_PRIMARYGROUPID syscall.Errno = 8506 - ERROR_DS_ILLEGAL_BASE_SCHEMA_MOD syscall.Errno = 8507 - ERROR_DS_NONSAFE_SCHEMA_CHANGE syscall.Errno = 8508 - ERROR_DS_SCHEMA_UPDATE_DISALLOWED syscall.Errno = 8509 - ERROR_DS_CANT_CREATE_UNDER_SCHEMA syscall.Errno = 8510 - ERROR_DS_INSTALL_NO_SRC_SCH_VERSION syscall.Errno = 8511 - ERROR_DS_INSTALL_NO_SCH_VERSION_IN_INIFILE syscall.Errno = 8512 - ERROR_DS_INVALID_GROUP_TYPE syscall.Errno = 8513 - ERROR_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN syscall.Errno = 8514 - ERROR_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN syscall.Errno = 8515 - ERROR_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER syscall.Errno = 8516 - ERROR_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER syscall.Errno = 8517 - ERROR_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER syscall.Errno = 8518 - ERROR_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER syscall.Errno = 8519 - ERROR_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER syscall.Errno = 8520 - ERROR_DS_HAVE_PRIMARY_MEMBERS syscall.Errno = 8521 - ERROR_DS_STRING_SD_CONVERSION_FAILED syscall.Errno = 8522 - ERROR_DS_NAMING_MASTER_GC syscall.Errno = 8523 - ERROR_DS_DNS_LOOKUP_FAILURE syscall.Errno = 8524 - ERROR_DS_COULDNT_UPDATE_SPNS syscall.Errno = 8525 - ERROR_DS_CANT_RETRIEVE_SD syscall.Errno = 8526 - ERROR_DS_KEY_NOT_UNIQUE syscall.Errno = 8527 - ERROR_DS_WRONG_LINKED_ATT_SYNTAX syscall.Errno = 8528 - ERROR_DS_SAM_NEED_BOOTKEY_PASSWORD syscall.Errno = 8529 - ERROR_DS_SAM_NEED_BOOTKEY_FLOPPY syscall.Errno = 8530 - ERROR_DS_CANT_START syscall.Errno = 8531 - ERROR_DS_INIT_FAILURE syscall.Errno = 8532 - ERROR_DS_NO_PKT_PRIVACY_ON_CONNECTION syscall.Errno = 8533 - ERROR_DS_SOURCE_DOMAIN_IN_FOREST syscall.Errno = 8534 - ERROR_DS_DESTINATION_DOMAIN_NOT_IN_FOREST syscall.Errno = 8535 - ERROR_DS_DESTINATION_AUDITING_NOT_ENABLED syscall.Errno = 8536 - ERROR_DS_CANT_FIND_DC_FOR_SRC_DOMAIN syscall.Errno = 8537 - ERROR_DS_SRC_OBJ_NOT_GROUP_OR_USER syscall.Errno = 8538 - ERROR_DS_SRC_SID_EXISTS_IN_FOREST syscall.Errno = 8539 - ERROR_DS_SRC_AND_DST_OBJECT_CLASS_MISMATCH syscall.Errno = 8540 - ERROR_SAM_INIT_FAILURE syscall.Errno = 8541 - ERROR_DS_DRA_SCHEMA_INFO_SHIP syscall.Errno = 8542 - ERROR_DS_DRA_SCHEMA_CONFLICT syscall.Errno = 8543 - ERROR_DS_DRA_EARLIER_SCHEMA_CONFLICT syscall.Errno = 8544 - ERROR_DS_DRA_OBJ_NC_MISMATCH syscall.Errno = 8545 - ERROR_DS_NC_STILL_HAS_DSAS syscall.Errno = 8546 - ERROR_DS_GC_REQUIRED syscall.Errno = 8547 - ERROR_DS_LOCAL_MEMBER_OF_LOCAL_ONLY syscall.Errno = 8548 - ERROR_DS_NO_FPO_IN_UNIVERSAL_GROUPS syscall.Errno = 8549 - ERROR_DS_CANT_ADD_TO_GC syscall.Errno = 8550 - ERROR_DS_NO_CHECKPOINT_WITH_PDC syscall.Errno = 8551 - ERROR_DS_SOURCE_AUDITING_NOT_ENABLED syscall.Errno = 8552 - ERROR_DS_CANT_CREATE_IN_NONDOMAIN_NC syscall.Errno = 8553 - ERROR_DS_INVALID_NAME_FOR_SPN syscall.Errno = 8554 - ERROR_DS_FILTER_USES_CONTRUCTED_ATTRS syscall.Errno = 8555 - ERROR_DS_UNICODEPWD_NOT_IN_QUOTES syscall.Errno = 8556 - ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED syscall.Errno = 8557 - ERROR_DS_MUST_BE_RUN_ON_DST_DC syscall.Errno = 8558 - ERROR_DS_SRC_DC_MUST_BE_SP4_OR_GREATER syscall.Errno = 8559 - ERROR_DS_CANT_TREE_DELETE_CRITICAL_OBJ syscall.Errno = 8560 - ERROR_DS_INIT_FAILURE_CONSOLE syscall.Errno = 8561 - ERROR_DS_SAM_INIT_FAILURE_CONSOLE syscall.Errno = 8562 - ERROR_DS_FOREST_VERSION_TOO_HIGH syscall.Errno = 8563 - ERROR_DS_DOMAIN_VERSION_TOO_HIGH syscall.Errno = 8564 - ERROR_DS_FOREST_VERSION_TOO_LOW syscall.Errno = 8565 - ERROR_DS_DOMAIN_VERSION_TOO_LOW syscall.Errno = 8566 - ERROR_DS_INCOMPATIBLE_VERSION syscall.Errno = 8567 - ERROR_DS_LOW_DSA_VERSION syscall.Errno = 8568 - ERROR_DS_NO_BEHAVIOR_VERSION_IN_MIXEDDOMAIN syscall.Errno = 8569 - ERROR_DS_NOT_SUPPORTED_SORT_ORDER syscall.Errno = 8570 - ERROR_DS_NAME_NOT_UNIQUE syscall.Errno = 8571 - ERROR_DS_MACHINE_ACCOUNT_CREATED_PRENT4 syscall.Errno = 8572 - ERROR_DS_OUT_OF_VERSION_STORE syscall.Errno = 8573 - ERROR_DS_INCOMPATIBLE_CONTROLS_USED syscall.Errno = 8574 - ERROR_DS_NO_REF_DOMAIN syscall.Errno = 8575 - ERROR_DS_RESERVED_LINK_ID syscall.Errno = 8576 - ERROR_DS_LINK_ID_NOT_AVAILABLE syscall.Errno = 8577 - ERROR_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER syscall.Errno = 8578 - ERROR_DS_MODIFYDN_DISALLOWED_BY_INSTANCE_TYPE syscall.Errno = 8579 - ERROR_DS_NO_OBJECT_MOVE_IN_SCHEMA_NC syscall.Errno = 8580 - ERROR_DS_MODIFYDN_DISALLOWED_BY_FLAG syscall.Errno = 8581 - ERROR_DS_MODIFYDN_WRONG_GRANDPARENT syscall.Errno = 8582 - ERROR_DS_NAME_ERROR_TRUST_REFERRAL syscall.Errno = 8583 - ERROR_NOT_SUPPORTED_ON_STANDARD_SERVER syscall.Errno = 8584 - ERROR_DS_CANT_ACCESS_REMOTE_PART_OF_AD syscall.Errno = 8585 - ERROR_DS_CR_IMPOSSIBLE_TO_VALIDATE_V2 syscall.Errno = 8586 - ERROR_DS_THREAD_LIMIT_EXCEEDED syscall.Errno = 8587 - ERROR_DS_NOT_CLOSEST syscall.Errno = 8588 - ERROR_DS_CANT_DERIVE_SPN_WITHOUT_SERVER_REF syscall.Errno = 8589 - ERROR_DS_SINGLE_USER_MODE_FAILED syscall.Errno = 8590 - ERROR_DS_NTDSCRIPT_SYNTAX_ERROR syscall.Errno = 8591 - ERROR_DS_NTDSCRIPT_PROCESS_ERROR syscall.Errno = 8592 - ERROR_DS_DIFFERENT_REPL_EPOCHS syscall.Errno = 8593 - ERROR_DS_DRS_EXTENSIONS_CHANGED syscall.Errno = 8594 - ERROR_DS_REPLICA_SET_CHANGE_NOT_ALLOWED_ON_DISABLED_CR syscall.Errno = 8595 - ERROR_DS_NO_MSDS_INTID syscall.Errno = 8596 - ERROR_DS_DUP_MSDS_INTID syscall.Errno = 8597 - ERROR_DS_EXISTS_IN_RDNATTID syscall.Errno = 8598 - ERROR_DS_AUTHORIZATION_FAILED syscall.Errno = 8599 - ERROR_DS_INVALID_SCRIPT syscall.Errno = 8600 - ERROR_DS_REMOTE_CROSSREF_OP_FAILED syscall.Errno = 8601 - ERROR_DS_CROSS_REF_BUSY syscall.Errno = 8602 - ERROR_DS_CANT_DERIVE_SPN_FOR_DELETED_DOMAIN syscall.Errno = 8603 - ERROR_DS_CANT_DEMOTE_WITH_WRITEABLE_NC syscall.Errno = 8604 - ERROR_DS_DUPLICATE_ID_FOUND syscall.Errno = 8605 - ERROR_DS_INSUFFICIENT_ATTR_TO_CREATE_OBJECT syscall.Errno = 8606 - ERROR_DS_GROUP_CONVERSION_ERROR syscall.Errno = 8607 - ERROR_DS_CANT_MOVE_APP_BASIC_GROUP syscall.Errno = 8608 - ERROR_DS_CANT_MOVE_APP_QUERY_GROUP syscall.Errno = 8609 - ERROR_DS_ROLE_NOT_VERIFIED syscall.Errno = 8610 - ERROR_DS_WKO_CONTAINER_CANNOT_BE_SPECIAL syscall.Errno = 8611 - ERROR_DS_DOMAIN_RENAME_IN_PROGRESS syscall.Errno = 8612 - ERROR_DS_EXISTING_AD_CHILD_NC syscall.Errno = 8613 - ERROR_DS_REPL_LIFETIME_EXCEEDED syscall.Errno = 8614 - ERROR_DS_DISALLOWED_IN_SYSTEM_CONTAINER syscall.Errno = 8615 - ERROR_DS_LDAP_SEND_QUEUE_FULL syscall.Errno = 8616 - ERROR_DS_DRA_OUT_SCHEDULE_WINDOW syscall.Errno = 8617 - ERROR_DS_POLICY_NOT_KNOWN syscall.Errno = 8618 - ERROR_NO_SITE_SETTINGS_OBJECT syscall.Errno = 8619 - ERROR_NO_SECRETS syscall.Errno = 8620 - ERROR_NO_WRITABLE_DC_FOUND syscall.Errno = 8621 - ERROR_DS_NO_SERVER_OBJECT syscall.Errno = 8622 - ERROR_DS_NO_NTDSA_OBJECT syscall.Errno = 8623 - ERROR_DS_NON_ASQ_SEARCH syscall.Errno = 8624 - ERROR_DS_AUDIT_FAILURE syscall.Errno = 8625 - ERROR_DS_INVALID_SEARCH_FLAG_SUBTREE syscall.Errno = 8626 - ERROR_DS_INVALID_SEARCH_FLAG_TUPLE syscall.Errno = 8627 - ERROR_DS_HIERARCHY_TABLE_TOO_DEEP syscall.Errno = 8628 - ERROR_DS_DRA_CORRUPT_UTD_VECTOR syscall.Errno = 8629 - ERROR_DS_DRA_SECRETS_DENIED syscall.Errno = 8630 - ERROR_DS_RESERVED_MAPI_ID syscall.Errno = 8631 - ERROR_DS_MAPI_ID_NOT_AVAILABLE syscall.Errno = 8632 - ERROR_DS_DRA_MISSING_KRBTGT_SECRET syscall.Errno = 8633 - ERROR_DS_DOMAIN_NAME_EXISTS_IN_FOREST syscall.Errno = 8634 - ERROR_DS_FLAT_NAME_EXISTS_IN_FOREST syscall.Errno = 8635 - ERROR_INVALID_USER_PRINCIPAL_NAME syscall.Errno = 8636 - ERROR_DS_OID_MAPPED_GROUP_CANT_HAVE_MEMBERS syscall.Errno = 8637 - ERROR_DS_OID_NOT_FOUND syscall.Errno = 8638 - ERROR_DS_DRA_RECYCLED_TARGET syscall.Errno = 8639 - ERROR_DS_DISALLOWED_NC_REDIRECT syscall.Errno = 8640 - ERROR_DS_HIGH_ADLDS_FFL syscall.Errno = 8641 - ERROR_DS_HIGH_DSA_VERSION syscall.Errno = 8642 - ERROR_DS_LOW_ADLDS_FFL syscall.Errno = 8643 - ERROR_DOMAIN_SID_SAME_AS_LOCAL_WORKSTATION syscall.Errno = 8644 - ERROR_DS_UNDELETE_SAM_VALIDATION_FAILED syscall.Errno = 8645 - ERROR_INCORRECT_ACCOUNT_TYPE syscall.Errno = 8646 - ERROR_DS_SPN_VALUE_NOT_UNIQUE_IN_FOREST syscall.Errno = 8647 - ERROR_DS_UPN_VALUE_NOT_UNIQUE_IN_FOREST syscall.Errno = 8648 - ERROR_DS_MISSING_FOREST_TRUST syscall.Errno = 8649 - ERROR_DS_VALUE_KEY_NOT_UNIQUE syscall.Errno = 8650 - DNS_ERROR_RESPONSE_CODES_BASE syscall.Errno = 9000 - DNS_ERROR_RCODE_NO_ERROR = ERROR_SUCCESS - DNS_ERROR_MASK syscall.Errno = 0x00002328 - DNS_ERROR_RCODE_FORMAT_ERROR syscall.Errno = 9001 - DNS_ERROR_RCODE_SERVER_FAILURE syscall.Errno = 9002 - DNS_ERROR_RCODE_NAME_ERROR syscall.Errno = 9003 - DNS_ERROR_RCODE_NOT_IMPLEMENTED syscall.Errno = 9004 - DNS_ERROR_RCODE_REFUSED syscall.Errno = 9005 - DNS_ERROR_RCODE_YXDOMAIN syscall.Errno = 9006 - DNS_ERROR_RCODE_YXRRSET syscall.Errno = 9007 - DNS_ERROR_RCODE_NXRRSET syscall.Errno = 9008 - DNS_ERROR_RCODE_NOTAUTH syscall.Errno = 9009 - DNS_ERROR_RCODE_NOTZONE syscall.Errno = 9010 - DNS_ERROR_RCODE_BADSIG syscall.Errno = 9016 - DNS_ERROR_RCODE_BADKEY syscall.Errno = 9017 - DNS_ERROR_RCODE_BADTIME syscall.Errno = 9018 - DNS_ERROR_RCODE_LAST = DNS_ERROR_RCODE_BADTIME - DNS_ERROR_DNSSEC_BASE syscall.Errno = 9100 - DNS_ERROR_KEYMASTER_REQUIRED syscall.Errno = 9101 - DNS_ERROR_NOT_ALLOWED_ON_SIGNED_ZONE syscall.Errno = 9102 - DNS_ERROR_NSEC3_INCOMPATIBLE_WITH_RSA_SHA1 syscall.Errno = 9103 - DNS_ERROR_NOT_ENOUGH_SIGNING_KEY_DESCRIPTORS syscall.Errno = 9104 - DNS_ERROR_UNSUPPORTED_ALGORITHM syscall.Errno = 9105 - DNS_ERROR_INVALID_KEY_SIZE syscall.Errno = 9106 - DNS_ERROR_SIGNING_KEY_NOT_ACCESSIBLE syscall.Errno = 9107 - DNS_ERROR_KSP_DOES_NOT_SUPPORT_PROTECTION syscall.Errno = 9108 - DNS_ERROR_UNEXPECTED_DATA_PROTECTION_ERROR syscall.Errno = 9109 - DNS_ERROR_UNEXPECTED_CNG_ERROR syscall.Errno = 9110 - DNS_ERROR_UNKNOWN_SIGNING_PARAMETER_VERSION syscall.Errno = 9111 - DNS_ERROR_KSP_NOT_ACCESSIBLE syscall.Errno = 9112 - DNS_ERROR_TOO_MANY_SKDS syscall.Errno = 9113 - DNS_ERROR_INVALID_ROLLOVER_PERIOD syscall.Errno = 9114 - DNS_ERROR_INVALID_INITIAL_ROLLOVER_OFFSET syscall.Errno = 9115 - DNS_ERROR_ROLLOVER_IN_PROGRESS syscall.Errno = 9116 - DNS_ERROR_STANDBY_KEY_NOT_PRESENT syscall.Errno = 9117 - DNS_ERROR_NOT_ALLOWED_ON_ZSK syscall.Errno = 9118 - DNS_ERROR_NOT_ALLOWED_ON_ACTIVE_SKD syscall.Errno = 9119 - DNS_ERROR_ROLLOVER_ALREADY_QUEUED syscall.Errno = 9120 - DNS_ERROR_NOT_ALLOWED_ON_UNSIGNED_ZONE syscall.Errno = 9121 - DNS_ERROR_BAD_KEYMASTER syscall.Errno = 9122 - DNS_ERROR_INVALID_SIGNATURE_VALIDITY_PERIOD syscall.Errno = 9123 - DNS_ERROR_INVALID_NSEC3_ITERATION_COUNT syscall.Errno = 9124 - DNS_ERROR_DNSSEC_IS_DISABLED syscall.Errno = 9125 - DNS_ERROR_INVALID_XML syscall.Errno = 9126 - DNS_ERROR_NO_VALID_TRUST_ANCHORS syscall.Errno = 9127 - DNS_ERROR_ROLLOVER_NOT_POKEABLE syscall.Errno = 9128 - DNS_ERROR_NSEC3_NAME_COLLISION syscall.Errno = 9129 - DNS_ERROR_NSEC_INCOMPATIBLE_WITH_NSEC3_RSA_SHA1 syscall.Errno = 9130 - DNS_ERROR_PACKET_FMT_BASE syscall.Errno = 9500 - DNS_INFO_NO_RECORDS syscall.Errno = 9501 - DNS_ERROR_BAD_PACKET syscall.Errno = 9502 - DNS_ERROR_NO_PACKET syscall.Errno = 9503 - DNS_ERROR_RCODE syscall.Errno = 9504 - DNS_ERROR_UNSECURE_PACKET syscall.Errno = 9505 - DNS_STATUS_PACKET_UNSECURE = DNS_ERROR_UNSECURE_PACKET - DNS_REQUEST_PENDING syscall.Errno = 9506 - DNS_ERROR_NO_MEMORY = ERROR_OUTOFMEMORY - DNS_ERROR_INVALID_NAME = ERROR_INVALID_NAME - DNS_ERROR_INVALID_DATA = ERROR_INVALID_DATA - DNS_ERROR_GENERAL_API_BASE syscall.Errno = 9550 - DNS_ERROR_INVALID_TYPE syscall.Errno = 9551 - DNS_ERROR_INVALID_IP_ADDRESS syscall.Errno = 9552 - DNS_ERROR_INVALID_PROPERTY syscall.Errno = 9553 - DNS_ERROR_TRY_AGAIN_LATER syscall.Errno = 9554 - DNS_ERROR_NOT_UNIQUE syscall.Errno = 9555 - DNS_ERROR_NON_RFC_NAME syscall.Errno = 9556 - DNS_STATUS_FQDN syscall.Errno = 9557 - DNS_STATUS_DOTTED_NAME syscall.Errno = 9558 - DNS_STATUS_SINGLE_PART_NAME syscall.Errno = 9559 - DNS_ERROR_INVALID_NAME_CHAR syscall.Errno = 9560 - DNS_ERROR_NUMERIC_NAME syscall.Errno = 9561 - DNS_ERROR_NOT_ALLOWED_ON_ROOT_SERVER syscall.Errno = 9562 - DNS_ERROR_NOT_ALLOWED_UNDER_DELEGATION syscall.Errno = 9563 - DNS_ERROR_CANNOT_FIND_ROOT_HINTS syscall.Errno = 9564 - DNS_ERROR_INCONSISTENT_ROOT_HINTS syscall.Errno = 9565 - DNS_ERROR_DWORD_VALUE_TOO_SMALL syscall.Errno = 9566 - DNS_ERROR_DWORD_VALUE_TOO_LARGE syscall.Errno = 9567 - DNS_ERROR_BACKGROUND_LOADING syscall.Errno = 9568 - DNS_ERROR_NOT_ALLOWED_ON_RODC syscall.Errno = 9569 - DNS_ERROR_NOT_ALLOWED_UNDER_DNAME syscall.Errno = 9570 - DNS_ERROR_DELEGATION_REQUIRED syscall.Errno = 9571 - DNS_ERROR_INVALID_POLICY_TABLE syscall.Errno = 9572 - DNS_ERROR_ADDRESS_REQUIRED syscall.Errno = 9573 - DNS_ERROR_ZONE_BASE syscall.Errno = 9600 - DNS_ERROR_ZONE_DOES_NOT_EXIST syscall.Errno = 9601 - DNS_ERROR_NO_ZONE_INFO syscall.Errno = 9602 - DNS_ERROR_INVALID_ZONE_OPERATION syscall.Errno = 9603 - DNS_ERROR_ZONE_CONFIGURATION_ERROR syscall.Errno = 9604 - DNS_ERROR_ZONE_HAS_NO_SOA_RECORD syscall.Errno = 9605 - DNS_ERROR_ZONE_HAS_NO_NS_RECORDS syscall.Errno = 9606 - DNS_ERROR_ZONE_LOCKED syscall.Errno = 9607 - DNS_ERROR_ZONE_CREATION_FAILED syscall.Errno = 9608 - DNS_ERROR_ZONE_ALREADY_EXISTS syscall.Errno = 9609 - DNS_ERROR_AUTOZONE_ALREADY_EXISTS syscall.Errno = 9610 - DNS_ERROR_INVALID_ZONE_TYPE syscall.Errno = 9611 - DNS_ERROR_SECONDARY_REQUIRES_MASTER_IP syscall.Errno = 9612 - DNS_ERROR_ZONE_NOT_SECONDARY syscall.Errno = 9613 - DNS_ERROR_NEED_SECONDARY_ADDRESSES syscall.Errno = 9614 - DNS_ERROR_WINS_INIT_FAILED syscall.Errno = 9615 - DNS_ERROR_NEED_WINS_SERVERS syscall.Errno = 9616 - DNS_ERROR_NBSTAT_INIT_FAILED syscall.Errno = 9617 - DNS_ERROR_SOA_DELETE_INVALID syscall.Errno = 9618 - DNS_ERROR_FORWARDER_ALREADY_EXISTS syscall.Errno = 9619 - DNS_ERROR_ZONE_REQUIRES_MASTER_IP syscall.Errno = 9620 - DNS_ERROR_ZONE_IS_SHUTDOWN syscall.Errno = 9621 - DNS_ERROR_ZONE_LOCKED_FOR_SIGNING syscall.Errno = 9622 - DNS_ERROR_DATAFILE_BASE syscall.Errno = 9650 - DNS_ERROR_PRIMARY_REQUIRES_DATAFILE syscall.Errno = 9651 - DNS_ERROR_INVALID_DATAFILE_NAME syscall.Errno = 9652 - DNS_ERROR_DATAFILE_OPEN_FAILURE syscall.Errno = 9653 - DNS_ERROR_FILE_WRITEBACK_FAILED syscall.Errno = 9654 - DNS_ERROR_DATAFILE_PARSING syscall.Errno = 9655 - DNS_ERROR_DATABASE_BASE syscall.Errno = 9700 - DNS_ERROR_RECORD_DOES_NOT_EXIST syscall.Errno = 9701 - DNS_ERROR_RECORD_FORMAT syscall.Errno = 9702 - DNS_ERROR_NODE_CREATION_FAILED syscall.Errno = 9703 - DNS_ERROR_UNKNOWN_RECORD_TYPE syscall.Errno = 9704 - DNS_ERROR_RECORD_TIMED_OUT syscall.Errno = 9705 - DNS_ERROR_NAME_NOT_IN_ZONE syscall.Errno = 9706 - DNS_ERROR_CNAME_LOOP syscall.Errno = 9707 - DNS_ERROR_NODE_IS_CNAME syscall.Errno = 9708 - DNS_ERROR_CNAME_COLLISION syscall.Errno = 9709 - DNS_ERROR_RECORD_ONLY_AT_ZONE_ROOT syscall.Errno = 9710 - DNS_ERROR_RECORD_ALREADY_EXISTS syscall.Errno = 9711 - DNS_ERROR_SECONDARY_DATA syscall.Errno = 9712 - DNS_ERROR_NO_CREATE_CACHE_DATA syscall.Errno = 9713 - DNS_ERROR_NAME_DOES_NOT_EXIST syscall.Errno = 9714 - DNS_WARNING_PTR_CREATE_FAILED syscall.Errno = 9715 - DNS_WARNING_DOMAIN_UNDELETED syscall.Errno = 9716 - DNS_ERROR_DS_UNAVAILABLE syscall.Errno = 9717 - DNS_ERROR_DS_ZONE_ALREADY_EXISTS syscall.Errno = 9718 - DNS_ERROR_NO_BOOTFILE_IF_DS_ZONE syscall.Errno = 9719 - DNS_ERROR_NODE_IS_DNAME syscall.Errno = 9720 - DNS_ERROR_DNAME_COLLISION syscall.Errno = 9721 - DNS_ERROR_ALIAS_LOOP syscall.Errno = 9722 - DNS_ERROR_OPERATION_BASE syscall.Errno = 9750 - DNS_INFO_AXFR_COMPLETE syscall.Errno = 9751 - DNS_ERROR_AXFR syscall.Errno = 9752 - DNS_INFO_ADDED_LOCAL_WINS syscall.Errno = 9753 - DNS_ERROR_SECURE_BASE syscall.Errno = 9800 - DNS_STATUS_CONTINUE_NEEDED syscall.Errno = 9801 - DNS_ERROR_SETUP_BASE syscall.Errno = 9850 - DNS_ERROR_NO_TCPIP syscall.Errno = 9851 - DNS_ERROR_NO_DNS_SERVERS syscall.Errno = 9852 - DNS_ERROR_DP_BASE syscall.Errno = 9900 - DNS_ERROR_DP_DOES_NOT_EXIST syscall.Errno = 9901 - DNS_ERROR_DP_ALREADY_EXISTS syscall.Errno = 9902 - DNS_ERROR_DP_NOT_ENLISTED syscall.Errno = 9903 - DNS_ERROR_DP_ALREADY_ENLISTED syscall.Errno = 9904 - DNS_ERROR_DP_NOT_AVAILABLE syscall.Errno = 9905 - DNS_ERROR_DP_FSMO_ERROR syscall.Errno = 9906 - DNS_ERROR_RRL_NOT_ENABLED syscall.Errno = 9911 - DNS_ERROR_RRL_INVALID_WINDOW_SIZE syscall.Errno = 9912 - DNS_ERROR_RRL_INVALID_IPV4_PREFIX syscall.Errno = 9913 - DNS_ERROR_RRL_INVALID_IPV6_PREFIX syscall.Errno = 9914 - DNS_ERROR_RRL_INVALID_TC_RATE syscall.Errno = 9915 - DNS_ERROR_RRL_INVALID_LEAK_RATE syscall.Errno = 9916 - DNS_ERROR_RRL_LEAK_RATE_LESSTHAN_TC_RATE syscall.Errno = 9917 - DNS_ERROR_VIRTUALIZATION_INSTANCE_ALREADY_EXISTS syscall.Errno = 9921 - DNS_ERROR_VIRTUALIZATION_INSTANCE_DOES_NOT_EXIST syscall.Errno = 9922 - DNS_ERROR_VIRTUALIZATION_TREE_LOCKED syscall.Errno = 9923 - DNS_ERROR_INVAILD_VIRTUALIZATION_INSTANCE_NAME syscall.Errno = 9924 - DNS_ERROR_DEFAULT_VIRTUALIZATION_INSTANCE syscall.Errno = 9925 - DNS_ERROR_ZONESCOPE_ALREADY_EXISTS syscall.Errno = 9951 - DNS_ERROR_ZONESCOPE_DOES_NOT_EXIST syscall.Errno = 9952 - DNS_ERROR_DEFAULT_ZONESCOPE syscall.Errno = 9953 - DNS_ERROR_INVALID_ZONESCOPE_NAME syscall.Errno = 9954 - DNS_ERROR_NOT_ALLOWED_WITH_ZONESCOPES syscall.Errno = 9955 - DNS_ERROR_LOAD_ZONESCOPE_FAILED syscall.Errno = 9956 - DNS_ERROR_ZONESCOPE_FILE_WRITEBACK_FAILED syscall.Errno = 9957 - DNS_ERROR_INVALID_SCOPE_NAME syscall.Errno = 9958 - DNS_ERROR_SCOPE_DOES_NOT_EXIST syscall.Errno = 9959 - DNS_ERROR_DEFAULT_SCOPE syscall.Errno = 9960 - DNS_ERROR_INVALID_SCOPE_OPERATION syscall.Errno = 9961 - DNS_ERROR_SCOPE_LOCKED syscall.Errno = 9962 - DNS_ERROR_SCOPE_ALREADY_EXISTS syscall.Errno = 9963 - DNS_ERROR_POLICY_ALREADY_EXISTS syscall.Errno = 9971 - DNS_ERROR_POLICY_DOES_NOT_EXIST syscall.Errno = 9972 - DNS_ERROR_POLICY_INVALID_CRITERIA syscall.Errno = 9973 - DNS_ERROR_POLICY_INVALID_SETTINGS syscall.Errno = 9974 - DNS_ERROR_CLIENT_SUBNET_IS_ACCESSED syscall.Errno = 9975 - DNS_ERROR_CLIENT_SUBNET_DOES_NOT_EXIST syscall.Errno = 9976 - DNS_ERROR_CLIENT_SUBNET_ALREADY_EXISTS syscall.Errno = 9977 - DNS_ERROR_SUBNET_DOES_NOT_EXIST syscall.Errno = 9978 - DNS_ERROR_SUBNET_ALREADY_EXISTS syscall.Errno = 9979 - DNS_ERROR_POLICY_LOCKED syscall.Errno = 9980 - DNS_ERROR_POLICY_INVALID_WEIGHT syscall.Errno = 9981 - DNS_ERROR_POLICY_INVALID_NAME syscall.Errno = 9982 - DNS_ERROR_POLICY_MISSING_CRITERIA syscall.Errno = 9983 - DNS_ERROR_INVALID_CLIENT_SUBNET_NAME syscall.Errno = 9984 - DNS_ERROR_POLICY_PROCESSING_ORDER_INVALID syscall.Errno = 9985 - DNS_ERROR_POLICY_SCOPE_MISSING syscall.Errno = 9986 - DNS_ERROR_POLICY_SCOPE_NOT_ALLOWED syscall.Errno = 9987 - DNS_ERROR_SERVERSCOPE_IS_REFERENCED syscall.Errno = 9988 - DNS_ERROR_ZONESCOPE_IS_REFERENCED syscall.Errno = 9989 - DNS_ERROR_POLICY_INVALID_CRITERIA_CLIENT_SUBNET syscall.Errno = 9990 - DNS_ERROR_POLICY_INVALID_CRITERIA_TRANSPORT_PROTOCOL syscall.Errno = 9991 - DNS_ERROR_POLICY_INVALID_CRITERIA_NETWORK_PROTOCOL syscall.Errno = 9992 - DNS_ERROR_POLICY_INVALID_CRITERIA_INTERFACE syscall.Errno = 9993 - DNS_ERROR_POLICY_INVALID_CRITERIA_FQDN syscall.Errno = 9994 - DNS_ERROR_POLICY_INVALID_CRITERIA_QUERY_TYPE syscall.Errno = 9995 - DNS_ERROR_POLICY_INVALID_CRITERIA_TIME_OF_DAY syscall.Errno = 9996 - WSABASEERR syscall.Errno = 10000 - WSAEINTR syscall.Errno = 10004 - WSAEBADF syscall.Errno = 10009 - WSAEACCES syscall.Errno = 10013 - WSAEFAULT syscall.Errno = 10014 - WSAEINVAL syscall.Errno = 10022 - WSAEMFILE syscall.Errno = 10024 - WSAEWOULDBLOCK syscall.Errno = 10035 - WSAEINPROGRESS syscall.Errno = 10036 - WSAEALREADY syscall.Errno = 10037 - WSAENOTSOCK syscall.Errno = 10038 - WSAEDESTADDRREQ syscall.Errno = 10039 - WSAEMSGSIZE syscall.Errno = 10040 - WSAEPROTOTYPE syscall.Errno = 10041 - WSAENOPROTOOPT syscall.Errno = 10042 - WSAEPROTONOSUPPORT syscall.Errno = 10043 - WSAESOCKTNOSUPPORT syscall.Errno = 10044 - WSAEOPNOTSUPP syscall.Errno = 10045 - WSAEPFNOSUPPORT syscall.Errno = 10046 - WSAEAFNOSUPPORT syscall.Errno = 10047 - WSAEADDRINUSE syscall.Errno = 10048 - WSAEADDRNOTAVAIL syscall.Errno = 10049 - WSAENETDOWN syscall.Errno = 10050 - WSAENETUNREACH syscall.Errno = 10051 - WSAENETRESET syscall.Errno = 10052 - WSAECONNABORTED syscall.Errno = 10053 - WSAECONNRESET syscall.Errno = 10054 - WSAENOBUFS syscall.Errno = 10055 - WSAEISCONN syscall.Errno = 10056 - WSAENOTCONN syscall.Errno = 10057 - WSAESHUTDOWN syscall.Errno = 10058 - WSAETOOMANYREFS syscall.Errno = 10059 - WSAETIMEDOUT syscall.Errno = 10060 - WSAECONNREFUSED syscall.Errno = 10061 - WSAELOOP syscall.Errno = 10062 - WSAENAMETOOLONG syscall.Errno = 10063 - WSAEHOSTDOWN syscall.Errno = 10064 - WSAEHOSTUNREACH syscall.Errno = 10065 - WSAENOTEMPTY syscall.Errno = 10066 - WSAEPROCLIM syscall.Errno = 10067 - WSAEUSERS syscall.Errno = 10068 - WSAEDQUOT syscall.Errno = 10069 - WSAESTALE syscall.Errno = 10070 - WSAEREMOTE syscall.Errno = 10071 - WSASYSNOTREADY syscall.Errno = 10091 - WSAVERNOTSUPPORTED syscall.Errno = 10092 - WSANOTINITIALISED syscall.Errno = 10093 - WSAEDISCON syscall.Errno = 10101 - WSAENOMORE syscall.Errno = 10102 - WSAECANCELLED syscall.Errno = 10103 - WSAEINVALIDPROCTABLE syscall.Errno = 10104 - WSAEINVALIDPROVIDER syscall.Errno = 10105 - WSAEPROVIDERFAILEDINIT syscall.Errno = 10106 - WSASYSCALLFAILURE syscall.Errno = 10107 - WSASERVICE_NOT_FOUND syscall.Errno = 10108 - WSATYPE_NOT_FOUND syscall.Errno = 10109 - WSA_E_NO_MORE syscall.Errno = 10110 - WSA_E_CANCELLED syscall.Errno = 10111 - WSAEREFUSED syscall.Errno = 10112 - WSAHOST_NOT_FOUND syscall.Errno = 11001 - WSATRY_AGAIN syscall.Errno = 11002 - WSANO_RECOVERY syscall.Errno = 11003 - WSANO_DATA syscall.Errno = 11004 - WSA_QOS_RECEIVERS syscall.Errno = 11005 - WSA_QOS_SENDERS syscall.Errno = 11006 - WSA_QOS_NO_SENDERS syscall.Errno = 11007 - WSA_QOS_NO_RECEIVERS syscall.Errno = 11008 - WSA_QOS_REQUEST_CONFIRMED syscall.Errno = 11009 - WSA_QOS_ADMISSION_FAILURE syscall.Errno = 11010 - WSA_QOS_POLICY_FAILURE syscall.Errno = 11011 - WSA_QOS_BAD_STYLE syscall.Errno = 11012 - WSA_QOS_BAD_OBJECT syscall.Errno = 11013 - WSA_QOS_TRAFFIC_CTRL_ERROR syscall.Errno = 11014 - WSA_QOS_GENERIC_ERROR syscall.Errno = 11015 - WSA_QOS_ESERVICETYPE syscall.Errno = 11016 - WSA_QOS_EFLOWSPEC syscall.Errno = 11017 - WSA_QOS_EPROVSPECBUF syscall.Errno = 11018 - WSA_QOS_EFILTERSTYLE syscall.Errno = 11019 - WSA_QOS_EFILTERTYPE syscall.Errno = 11020 - WSA_QOS_EFILTERCOUNT syscall.Errno = 11021 - WSA_QOS_EOBJLENGTH syscall.Errno = 11022 - WSA_QOS_EFLOWCOUNT syscall.Errno = 11023 - WSA_QOS_EUNKOWNPSOBJ syscall.Errno = 11024 - WSA_QOS_EPOLICYOBJ syscall.Errno = 11025 - WSA_QOS_EFLOWDESC syscall.Errno = 11026 - WSA_QOS_EPSFLOWSPEC syscall.Errno = 11027 - WSA_QOS_EPSFILTERSPEC syscall.Errno = 11028 - WSA_QOS_ESDMODEOBJ syscall.Errno = 11029 - WSA_QOS_ESHAPERATEOBJ syscall.Errno = 11030 - WSA_QOS_RESERVED_PETYPE syscall.Errno = 11031 - WSA_SECURE_HOST_NOT_FOUND syscall.Errno = 11032 - WSA_IPSEC_NAME_POLICY_ERROR syscall.Errno = 11033 - ERROR_IPSEC_QM_POLICY_EXISTS syscall.Errno = 13000 - ERROR_IPSEC_QM_POLICY_NOT_FOUND syscall.Errno = 13001 - ERROR_IPSEC_QM_POLICY_IN_USE syscall.Errno = 13002 - ERROR_IPSEC_MM_POLICY_EXISTS syscall.Errno = 13003 - ERROR_IPSEC_MM_POLICY_NOT_FOUND syscall.Errno = 13004 - ERROR_IPSEC_MM_POLICY_IN_USE syscall.Errno = 13005 - ERROR_IPSEC_MM_FILTER_EXISTS syscall.Errno = 13006 - ERROR_IPSEC_MM_FILTER_NOT_FOUND syscall.Errno = 13007 - ERROR_IPSEC_TRANSPORT_FILTER_EXISTS syscall.Errno = 13008 - ERROR_IPSEC_TRANSPORT_FILTER_NOT_FOUND syscall.Errno = 13009 - ERROR_IPSEC_MM_AUTH_EXISTS syscall.Errno = 13010 - ERROR_IPSEC_MM_AUTH_NOT_FOUND syscall.Errno = 13011 - ERROR_IPSEC_MM_AUTH_IN_USE syscall.Errno = 13012 - ERROR_IPSEC_DEFAULT_MM_POLICY_NOT_FOUND syscall.Errno = 13013 - ERROR_IPSEC_DEFAULT_MM_AUTH_NOT_FOUND syscall.Errno = 13014 - ERROR_IPSEC_DEFAULT_QM_POLICY_NOT_FOUND syscall.Errno = 13015 - ERROR_IPSEC_TUNNEL_FILTER_EXISTS syscall.Errno = 13016 - ERROR_IPSEC_TUNNEL_FILTER_NOT_FOUND syscall.Errno = 13017 - ERROR_IPSEC_MM_FILTER_PENDING_DELETION syscall.Errno = 13018 - ERROR_IPSEC_TRANSPORT_FILTER_PENDING_DELETION syscall.Errno = 13019 - ERROR_IPSEC_TUNNEL_FILTER_PENDING_DELETION syscall.Errno = 13020 - ERROR_IPSEC_MM_POLICY_PENDING_DELETION syscall.Errno = 13021 - ERROR_IPSEC_MM_AUTH_PENDING_DELETION syscall.Errno = 13022 - ERROR_IPSEC_QM_POLICY_PENDING_DELETION syscall.Errno = 13023 - WARNING_IPSEC_MM_POLICY_PRUNED syscall.Errno = 13024 - WARNING_IPSEC_QM_POLICY_PRUNED syscall.Errno = 13025 - ERROR_IPSEC_IKE_NEG_STATUS_BEGIN syscall.Errno = 13800 - ERROR_IPSEC_IKE_AUTH_FAIL syscall.Errno = 13801 - ERROR_IPSEC_IKE_ATTRIB_FAIL syscall.Errno = 13802 - ERROR_IPSEC_IKE_NEGOTIATION_PENDING syscall.Errno = 13803 - ERROR_IPSEC_IKE_GENERAL_PROCESSING_ERROR syscall.Errno = 13804 - ERROR_IPSEC_IKE_TIMED_OUT syscall.Errno = 13805 - ERROR_IPSEC_IKE_NO_CERT syscall.Errno = 13806 - ERROR_IPSEC_IKE_SA_DELETED syscall.Errno = 13807 - ERROR_IPSEC_IKE_SA_REAPED syscall.Errno = 13808 - ERROR_IPSEC_IKE_MM_ACQUIRE_DROP syscall.Errno = 13809 - ERROR_IPSEC_IKE_QM_ACQUIRE_DROP syscall.Errno = 13810 - ERROR_IPSEC_IKE_QUEUE_DROP_MM syscall.Errno = 13811 - ERROR_IPSEC_IKE_QUEUE_DROP_NO_MM syscall.Errno = 13812 - ERROR_IPSEC_IKE_DROP_NO_RESPONSE syscall.Errno = 13813 - ERROR_IPSEC_IKE_MM_DELAY_DROP syscall.Errno = 13814 - ERROR_IPSEC_IKE_QM_DELAY_DROP syscall.Errno = 13815 - ERROR_IPSEC_IKE_ERROR syscall.Errno = 13816 - ERROR_IPSEC_IKE_CRL_FAILED syscall.Errno = 13817 - ERROR_IPSEC_IKE_INVALID_KEY_USAGE syscall.Errno = 13818 - ERROR_IPSEC_IKE_INVALID_CERT_TYPE syscall.Errno = 13819 - ERROR_IPSEC_IKE_NO_PRIVATE_KEY syscall.Errno = 13820 - ERROR_IPSEC_IKE_SIMULTANEOUS_REKEY syscall.Errno = 13821 - ERROR_IPSEC_IKE_DH_FAIL syscall.Errno = 13822 - ERROR_IPSEC_IKE_CRITICAL_PAYLOAD_NOT_RECOGNIZED syscall.Errno = 13823 - ERROR_IPSEC_IKE_INVALID_HEADER syscall.Errno = 13824 - ERROR_IPSEC_IKE_NO_POLICY syscall.Errno = 13825 - ERROR_IPSEC_IKE_INVALID_SIGNATURE syscall.Errno = 13826 - ERROR_IPSEC_IKE_KERBEROS_ERROR syscall.Errno = 13827 - ERROR_IPSEC_IKE_NO_PUBLIC_KEY syscall.Errno = 13828 - ERROR_IPSEC_IKE_PROCESS_ERR syscall.Errno = 13829 - ERROR_IPSEC_IKE_PROCESS_ERR_SA syscall.Errno = 13830 - ERROR_IPSEC_IKE_PROCESS_ERR_PROP syscall.Errno = 13831 - ERROR_IPSEC_IKE_PROCESS_ERR_TRANS syscall.Errno = 13832 - ERROR_IPSEC_IKE_PROCESS_ERR_KE syscall.Errno = 13833 - ERROR_IPSEC_IKE_PROCESS_ERR_ID syscall.Errno = 13834 - ERROR_IPSEC_IKE_PROCESS_ERR_CERT syscall.Errno = 13835 - ERROR_IPSEC_IKE_PROCESS_ERR_CERT_REQ syscall.Errno = 13836 - ERROR_IPSEC_IKE_PROCESS_ERR_HASH syscall.Errno = 13837 - ERROR_IPSEC_IKE_PROCESS_ERR_SIG syscall.Errno = 13838 - ERROR_IPSEC_IKE_PROCESS_ERR_NONCE syscall.Errno = 13839 - ERROR_IPSEC_IKE_PROCESS_ERR_NOTIFY syscall.Errno = 13840 - ERROR_IPSEC_IKE_PROCESS_ERR_DELETE syscall.Errno = 13841 - ERROR_IPSEC_IKE_PROCESS_ERR_VENDOR syscall.Errno = 13842 - ERROR_IPSEC_IKE_INVALID_PAYLOAD syscall.Errno = 13843 - ERROR_IPSEC_IKE_LOAD_SOFT_SA syscall.Errno = 13844 - ERROR_IPSEC_IKE_SOFT_SA_TORN_DOWN syscall.Errno = 13845 - ERROR_IPSEC_IKE_INVALID_COOKIE syscall.Errno = 13846 - ERROR_IPSEC_IKE_NO_PEER_CERT syscall.Errno = 13847 - ERROR_IPSEC_IKE_PEER_CRL_FAILED syscall.Errno = 13848 - ERROR_IPSEC_IKE_POLICY_CHANGE syscall.Errno = 13849 - ERROR_IPSEC_IKE_NO_MM_POLICY syscall.Errno = 13850 - ERROR_IPSEC_IKE_NOTCBPRIV syscall.Errno = 13851 - ERROR_IPSEC_IKE_SECLOADFAIL syscall.Errno = 13852 - ERROR_IPSEC_IKE_FAILSSPINIT syscall.Errno = 13853 - ERROR_IPSEC_IKE_FAILQUERYSSP syscall.Errno = 13854 - ERROR_IPSEC_IKE_SRVACQFAIL syscall.Errno = 13855 - ERROR_IPSEC_IKE_SRVQUERYCRED syscall.Errno = 13856 - ERROR_IPSEC_IKE_GETSPIFAIL syscall.Errno = 13857 - ERROR_IPSEC_IKE_INVALID_FILTER syscall.Errno = 13858 - ERROR_IPSEC_IKE_OUT_OF_MEMORY syscall.Errno = 13859 - ERROR_IPSEC_IKE_ADD_UPDATE_KEY_FAILED syscall.Errno = 13860 - ERROR_IPSEC_IKE_INVALID_POLICY syscall.Errno = 13861 - ERROR_IPSEC_IKE_UNKNOWN_DOI syscall.Errno = 13862 - ERROR_IPSEC_IKE_INVALID_SITUATION syscall.Errno = 13863 - ERROR_IPSEC_IKE_DH_FAILURE syscall.Errno = 13864 - ERROR_IPSEC_IKE_INVALID_GROUP syscall.Errno = 13865 - ERROR_IPSEC_IKE_ENCRYPT syscall.Errno = 13866 - ERROR_IPSEC_IKE_DECRYPT syscall.Errno = 13867 - ERROR_IPSEC_IKE_POLICY_MATCH syscall.Errno = 13868 - ERROR_IPSEC_IKE_UNSUPPORTED_ID syscall.Errno = 13869 - ERROR_IPSEC_IKE_INVALID_HASH syscall.Errno = 13870 - ERROR_IPSEC_IKE_INVALID_HASH_ALG syscall.Errno = 13871 - ERROR_IPSEC_IKE_INVALID_HASH_SIZE syscall.Errno = 13872 - ERROR_IPSEC_IKE_INVALID_ENCRYPT_ALG syscall.Errno = 13873 - ERROR_IPSEC_IKE_INVALID_AUTH_ALG syscall.Errno = 13874 - ERROR_IPSEC_IKE_INVALID_SIG syscall.Errno = 13875 - ERROR_IPSEC_IKE_LOAD_FAILED syscall.Errno = 13876 - ERROR_IPSEC_IKE_RPC_DELETE syscall.Errno = 13877 - ERROR_IPSEC_IKE_BENIGN_REINIT syscall.Errno = 13878 - ERROR_IPSEC_IKE_INVALID_RESPONDER_LIFETIME_NOTIFY syscall.Errno = 13879 - ERROR_IPSEC_IKE_INVALID_MAJOR_VERSION syscall.Errno = 13880 - ERROR_IPSEC_IKE_INVALID_CERT_KEYLEN syscall.Errno = 13881 - ERROR_IPSEC_IKE_MM_LIMIT syscall.Errno = 13882 - ERROR_IPSEC_IKE_NEGOTIATION_DISABLED syscall.Errno = 13883 - ERROR_IPSEC_IKE_QM_LIMIT syscall.Errno = 13884 - ERROR_IPSEC_IKE_MM_EXPIRED syscall.Errno = 13885 - ERROR_IPSEC_IKE_PEER_MM_ASSUMED_INVALID syscall.Errno = 13886 - ERROR_IPSEC_IKE_CERT_CHAIN_POLICY_MISMATCH syscall.Errno = 13887 - ERROR_IPSEC_IKE_UNEXPECTED_MESSAGE_ID syscall.Errno = 13888 - ERROR_IPSEC_IKE_INVALID_AUTH_PAYLOAD syscall.Errno = 13889 - ERROR_IPSEC_IKE_DOS_COOKIE_SENT syscall.Errno = 13890 - ERROR_IPSEC_IKE_SHUTTING_DOWN syscall.Errno = 13891 - ERROR_IPSEC_IKE_CGA_AUTH_FAILED syscall.Errno = 13892 - ERROR_IPSEC_IKE_PROCESS_ERR_NATOA syscall.Errno = 13893 - ERROR_IPSEC_IKE_INVALID_MM_FOR_QM syscall.Errno = 13894 - ERROR_IPSEC_IKE_QM_EXPIRED syscall.Errno = 13895 - ERROR_IPSEC_IKE_TOO_MANY_FILTERS syscall.Errno = 13896 - ERROR_IPSEC_IKE_NEG_STATUS_END syscall.Errno = 13897 - ERROR_IPSEC_IKE_KILL_DUMMY_NAP_TUNNEL syscall.Errno = 13898 - ERROR_IPSEC_IKE_INNER_IP_ASSIGNMENT_FAILURE syscall.Errno = 13899 - ERROR_IPSEC_IKE_REQUIRE_CP_PAYLOAD_MISSING syscall.Errno = 13900 - ERROR_IPSEC_KEY_MODULE_IMPERSONATION_NEGOTIATION_PENDING syscall.Errno = 13901 - ERROR_IPSEC_IKE_COEXISTENCE_SUPPRESS syscall.Errno = 13902 - ERROR_IPSEC_IKE_RATELIMIT_DROP syscall.Errno = 13903 - ERROR_IPSEC_IKE_PEER_DOESNT_SUPPORT_MOBIKE syscall.Errno = 13904 - ERROR_IPSEC_IKE_AUTHORIZATION_FAILURE syscall.Errno = 13905 - ERROR_IPSEC_IKE_STRONG_CRED_AUTHORIZATION_FAILURE syscall.Errno = 13906 - ERROR_IPSEC_IKE_AUTHORIZATION_FAILURE_WITH_OPTIONAL_RETRY syscall.Errno = 13907 - ERROR_IPSEC_IKE_STRONG_CRED_AUTHORIZATION_AND_CERTMAP_FAILURE syscall.Errno = 13908 - ERROR_IPSEC_IKE_NEG_STATUS_EXTENDED_END syscall.Errno = 13909 - ERROR_IPSEC_BAD_SPI syscall.Errno = 13910 - ERROR_IPSEC_SA_LIFETIME_EXPIRED syscall.Errno = 13911 - ERROR_IPSEC_WRONG_SA syscall.Errno = 13912 - ERROR_IPSEC_REPLAY_CHECK_FAILED syscall.Errno = 13913 - ERROR_IPSEC_INVALID_PACKET syscall.Errno = 13914 - ERROR_IPSEC_INTEGRITY_CHECK_FAILED syscall.Errno = 13915 - ERROR_IPSEC_CLEAR_TEXT_DROP syscall.Errno = 13916 - ERROR_IPSEC_AUTH_FIREWALL_DROP syscall.Errno = 13917 - ERROR_IPSEC_THROTTLE_DROP syscall.Errno = 13918 - ERROR_IPSEC_DOSP_BLOCK syscall.Errno = 13925 - ERROR_IPSEC_DOSP_RECEIVED_MULTICAST syscall.Errno = 13926 - ERROR_IPSEC_DOSP_INVALID_PACKET syscall.Errno = 13927 - ERROR_IPSEC_DOSP_STATE_LOOKUP_FAILED syscall.Errno = 13928 - ERROR_IPSEC_DOSP_MAX_ENTRIES syscall.Errno = 13929 - ERROR_IPSEC_DOSP_KEYMOD_NOT_ALLOWED syscall.Errno = 13930 - ERROR_IPSEC_DOSP_NOT_INSTALLED syscall.Errno = 13931 - ERROR_IPSEC_DOSP_MAX_PER_IP_RATELIMIT_QUEUES syscall.Errno = 13932 - ERROR_SXS_SECTION_NOT_FOUND syscall.Errno = 14000 - ERROR_SXS_CANT_GEN_ACTCTX syscall.Errno = 14001 - ERROR_SXS_INVALID_ACTCTXDATA_FORMAT syscall.Errno = 14002 - ERROR_SXS_ASSEMBLY_NOT_FOUND syscall.Errno = 14003 - ERROR_SXS_MANIFEST_FORMAT_ERROR syscall.Errno = 14004 - ERROR_SXS_MANIFEST_PARSE_ERROR syscall.Errno = 14005 - ERROR_SXS_ACTIVATION_CONTEXT_DISABLED syscall.Errno = 14006 - ERROR_SXS_KEY_NOT_FOUND syscall.Errno = 14007 - ERROR_SXS_VERSION_CONFLICT syscall.Errno = 14008 - ERROR_SXS_WRONG_SECTION_TYPE syscall.Errno = 14009 - ERROR_SXS_THREAD_QUERIES_DISABLED syscall.Errno = 14010 - ERROR_SXS_PROCESS_DEFAULT_ALREADY_SET syscall.Errno = 14011 - ERROR_SXS_UNKNOWN_ENCODING_GROUP syscall.Errno = 14012 - ERROR_SXS_UNKNOWN_ENCODING syscall.Errno = 14013 - ERROR_SXS_INVALID_XML_NAMESPACE_URI syscall.Errno = 14014 - ERROR_SXS_ROOT_MANIFEST_DEPENDENCY_NOT_INSTALLED syscall.Errno = 14015 - ERROR_SXS_LEAF_MANIFEST_DEPENDENCY_NOT_INSTALLED syscall.Errno = 14016 - ERROR_SXS_INVALID_ASSEMBLY_IDENTITY_ATTRIBUTE syscall.Errno = 14017 - ERROR_SXS_MANIFEST_MISSING_REQUIRED_DEFAULT_NAMESPACE syscall.Errno = 14018 - ERROR_SXS_MANIFEST_INVALID_REQUIRED_DEFAULT_NAMESPACE syscall.Errno = 14019 - ERROR_SXS_PRIVATE_MANIFEST_CROSS_PATH_WITH_REPARSE_POINT syscall.Errno = 14020 - ERROR_SXS_DUPLICATE_DLL_NAME syscall.Errno = 14021 - ERROR_SXS_DUPLICATE_WINDOWCLASS_NAME syscall.Errno = 14022 - ERROR_SXS_DUPLICATE_CLSID syscall.Errno = 14023 - ERROR_SXS_DUPLICATE_IID syscall.Errno = 14024 - ERROR_SXS_DUPLICATE_TLBID syscall.Errno = 14025 - ERROR_SXS_DUPLICATE_PROGID syscall.Errno = 14026 - ERROR_SXS_DUPLICATE_ASSEMBLY_NAME syscall.Errno = 14027 - ERROR_SXS_FILE_HASH_MISMATCH syscall.Errno = 14028 - ERROR_SXS_POLICY_PARSE_ERROR syscall.Errno = 14029 - ERROR_SXS_XML_E_MISSINGQUOTE syscall.Errno = 14030 - ERROR_SXS_XML_E_COMMENTSYNTAX syscall.Errno = 14031 - ERROR_SXS_XML_E_BADSTARTNAMECHAR syscall.Errno = 14032 - ERROR_SXS_XML_E_BADNAMECHAR syscall.Errno = 14033 - ERROR_SXS_XML_E_BADCHARINSTRING syscall.Errno = 14034 - ERROR_SXS_XML_E_XMLDECLSYNTAX syscall.Errno = 14035 - ERROR_SXS_XML_E_BADCHARDATA syscall.Errno = 14036 - ERROR_SXS_XML_E_MISSINGWHITESPACE syscall.Errno = 14037 - ERROR_SXS_XML_E_EXPECTINGTAGEND syscall.Errno = 14038 - ERROR_SXS_XML_E_MISSINGSEMICOLON syscall.Errno = 14039 - ERROR_SXS_XML_E_UNBALANCEDPAREN syscall.Errno = 14040 - ERROR_SXS_XML_E_INTERNALERROR syscall.Errno = 14041 - ERROR_SXS_XML_E_UNEXPECTED_WHITESPACE syscall.Errno = 14042 - ERROR_SXS_XML_E_INCOMPLETE_ENCODING syscall.Errno = 14043 - ERROR_SXS_XML_E_MISSING_PAREN syscall.Errno = 14044 - ERROR_SXS_XML_E_EXPECTINGCLOSEQUOTE syscall.Errno = 14045 - ERROR_SXS_XML_E_MULTIPLE_COLONS syscall.Errno = 14046 - ERROR_SXS_XML_E_INVALID_DECIMAL syscall.Errno = 14047 - ERROR_SXS_XML_E_INVALID_HEXIDECIMAL syscall.Errno = 14048 - ERROR_SXS_XML_E_INVALID_UNICODE syscall.Errno = 14049 - ERROR_SXS_XML_E_WHITESPACEORQUESTIONMARK syscall.Errno = 14050 - ERROR_SXS_XML_E_UNEXPECTEDENDTAG syscall.Errno = 14051 - ERROR_SXS_XML_E_UNCLOSEDTAG syscall.Errno = 14052 - ERROR_SXS_XML_E_DUPLICATEATTRIBUTE syscall.Errno = 14053 - ERROR_SXS_XML_E_MULTIPLEROOTS syscall.Errno = 14054 - ERROR_SXS_XML_E_INVALIDATROOTLEVEL syscall.Errno = 14055 - ERROR_SXS_XML_E_BADXMLDECL syscall.Errno = 14056 - ERROR_SXS_XML_E_MISSINGROOT syscall.Errno = 14057 - ERROR_SXS_XML_E_UNEXPECTEDEOF syscall.Errno = 14058 - ERROR_SXS_XML_E_BADPEREFINSUBSET syscall.Errno = 14059 - ERROR_SXS_XML_E_UNCLOSEDSTARTTAG syscall.Errno = 14060 - ERROR_SXS_XML_E_UNCLOSEDENDTAG syscall.Errno = 14061 - ERROR_SXS_XML_E_UNCLOSEDSTRING syscall.Errno = 14062 - ERROR_SXS_XML_E_UNCLOSEDCOMMENT syscall.Errno = 14063 - ERROR_SXS_XML_E_UNCLOSEDDECL syscall.Errno = 14064 - ERROR_SXS_XML_E_UNCLOSEDCDATA syscall.Errno = 14065 - ERROR_SXS_XML_E_RESERVEDNAMESPACE syscall.Errno = 14066 - ERROR_SXS_XML_E_INVALIDENCODING syscall.Errno = 14067 - ERROR_SXS_XML_E_INVALIDSWITCH syscall.Errno = 14068 - ERROR_SXS_XML_E_BADXMLCASE syscall.Errno = 14069 - ERROR_SXS_XML_E_INVALID_STANDALONE syscall.Errno = 14070 - ERROR_SXS_XML_E_UNEXPECTED_STANDALONE syscall.Errno = 14071 - ERROR_SXS_XML_E_INVALID_VERSION syscall.Errno = 14072 - ERROR_SXS_XML_E_MISSINGEQUALS syscall.Errno = 14073 - ERROR_SXS_PROTECTION_RECOVERY_FAILED syscall.Errno = 14074 - ERROR_SXS_PROTECTION_PUBLIC_KEY_TOO_SHORT syscall.Errno = 14075 - ERROR_SXS_PROTECTION_CATALOG_NOT_VALID syscall.Errno = 14076 - ERROR_SXS_UNTRANSLATABLE_HRESULT syscall.Errno = 14077 - ERROR_SXS_PROTECTION_CATALOG_FILE_MISSING syscall.Errno = 14078 - ERROR_SXS_MISSING_ASSEMBLY_IDENTITY_ATTRIBUTE syscall.Errno = 14079 - ERROR_SXS_INVALID_ASSEMBLY_IDENTITY_ATTRIBUTE_NAME syscall.Errno = 14080 - ERROR_SXS_ASSEMBLY_MISSING syscall.Errno = 14081 - ERROR_SXS_CORRUPT_ACTIVATION_STACK syscall.Errno = 14082 - ERROR_SXS_CORRUPTION syscall.Errno = 14083 - ERROR_SXS_EARLY_DEACTIVATION syscall.Errno = 14084 - ERROR_SXS_INVALID_DEACTIVATION syscall.Errno = 14085 - ERROR_SXS_MULTIPLE_DEACTIVATION syscall.Errno = 14086 - ERROR_SXS_PROCESS_TERMINATION_REQUESTED syscall.Errno = 14087 - ERROR_SXS_RELEASE_ACTIVATION_CONTEXT syscall.Errno = 14088 - ERROR_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY syscall.Errno = 14089 - ERROR_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE syscall.Errno = 14090 - ERROR_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME syscall.Errno = 14091 - ERROR_SXS_IDENTITY_DUPLICATE_ATTRIBUTE syscall.Errno = 14092 - ERROR_SXS_IDENTITY_PARSE_ERROR syscall.Errno = 14093 - ERROR_MALFORMED_SUBSTITUTION_STRING syscall.Errno = 14094 - ERROR_SXS_INCORRECT_PUBLIC_KEY_TOKEN syscall.Errno = 14095 - ERROR_UNMAPPED_SUBSTITUTION_STRING syscall.Errno = 14096 - ERROR_SXS_ASSEMBLY_NOT_LOCKED syscall.Errno = 14097 - ERROR_SXS_COMPONENT_STORE_CORRUPT syscall.Errno = 14098 - ERROR_ADVANCED_INSTALLER_FAILED syscall.Errno = 14099 - ERROR_XML_ENCODING_MISMATCH syscall.Errno = 14100 - ERROR_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT syscall.Errno = 14101 - ERROR_SXS_IDENTITIES_DIFFERENT syscall.Errno = 14102 - ERROR_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT syscall.Errno = 14103 - ERROR_SXS_FILE_NOT_PART_OF_ASSEMBLY syscall.Errno = 14104 - ERROR_SXS_MANIFEST_TOO_BIG syscall.Errno = 14105 - ERROR_SXS_SETTING_NOT_REGISTERED syscall.Errno = 14106 - ERROR_SXS_TRANSACTION_CLOSURE_INCOMPLETE syscall.Errno = 14107 - ERROR_SMI_PRIMITIVE_INSTALLER_FAILED syscall.Errno = 14108 - ERROR_GENERIC_COMMAND_FAILED syscall.Errno = 14109 - ERROR_SXS_FILE_HASH_MISSING syscall.Errno = 14110 - ERROR_SXS_DUPLICATE_ACTIVATABLE_CLASS syscall.Errno = 14111 - ERROR_EVT_INVALID_CHANNEL_PATH syscall.Errno = 15000 - ERROR_EVT_INVALID_QUERY syscall.Errno = 15001 - ERROR_EVT_PUBLISHER_METADATA_NOT_FOUND syscall.Errno = 15002 - ERROR_EVT_EVENT_TEMPLATE_NOT_FOUND syscall.Errno = 15003 - ERROR_EVT_INVALID_PUBLISHER_NAME syscall.Errno = 15004 - ERROR_EVT_INVALID_EVENT_DATA syscall.Errno = 15005 - ERROR_EVT_CHANNEL_NOT_FOUND syscall.Errno = 15007 - ERROR_EVT_MALFORMED_XML_TEXT syscall.Errno = 15008 - ERROR_EVT_SUBSCRIPTION_TO_DIRECT_CHANNEL syscall.Errno = 15009 - ERROR_EVT_CONFIGURATION_ERROR syscall.Errno = 15010 - ERROR_EVT_QUERY_RESULT_STALE syscall.Errno = 15011 - ERROR_EVT_QUERY_RESULT_INVALID_POSITION syscall.Errno = 15012 - ERROR_EVT_NON_VALIDATING_MSXML syscall.Errno = 15013 - ERROR_EVT_FILTER_ALREADYSCOPED syscall.Errno = 15014 - ERROR_EVT_FILTER_NOTELTSET syscall.Errno = 15015 - ERROR_EVT_FILTER_INVARG syscall.Errno = 15016 - ERROR_EVT_FILTER_INVTEST syscall.Errno = 15017 - ERROR_EVT_FILTER_INVTYPE syscall.Errno = 15018 - ERROR_EVT_FILTER_PARSEERR syscall.Errno = 15019 - ERROR_EVT_FILTER_UNSUPPORTEDOP syscall.Errno = 15020 - ERROR_EVT_FILTER_UNEXPECTEDTOKEN syscall.Errno = 15021 - ERROR_EVT_INVALID_OPERATION_OVER_ENABLED_DIRECT_CHANNEL syscall.Errno = 15022 - ERROR_EVT_INVALID_CHANNEL_PROPERTY_VALUE syscall.Errno = 15023 - ERROR_EVT_INVALID_PUBLISHER_PROPERTY_VALUE syscall.Errno = 15024 - ERROR_EVT_CHANNEL_CANNOT_ACTIVATE syscall.Errno = 15025 - ERROR_EVT_FILTER_TOO_COMPLEX syscall.Errno = 15026 - ERROR_EVT_MESSAGE_NOT_FOUND syscall.Errno = 15027 - ERROR_EVT_MESSAGE_ID_NOT_FOUND syscall.Errno = 15028 - ERROR_EVT_UNRESOLVED_VALUE_INSERT syscall.Errno = 15029 - ERROR_EVT_UNRESOLVED_PARAMETER_INSERT syscall.Errno = 15030 - ERROR_EVT_MAX_INSERTS_REACHED syscall.Errno = 15031 - ERROR_EVT_EVENT_DEFINITION_NOT_FOUND syscall.Errno = 15032 - ERROR_EVT_MESSAGE_LOCALE_NOT_FOUND syscall.Errno = 15033 - ERROR_EVT_VERSION_TOO_OLD syscall.Errno = 15034 - ERROR_EVT_VERSION_TOO_NEW syscall.Errno = 15035 - ERROR_EVT_CANNOT_OPEN_CHANNEL_OF_QUERY syscall.Errno = 15036 - ERROR_EVT_PUBLISHER_DISABLED syscall.Errno = 15037 - ERROR_EVT_FILTER_OUT_OF_RANGE syscall.Errno = 15038 - ERROR_EC_SUBSCRIPTION_CANNOT_ACTIVATE syscall.Errno = 15080 - ERROR_EC_LOG_DISABLED syscall.Errno = 15081 - ERROR_EC_CIRCULAR_FORWARDING syscall.Errno = 15082 - ERROR_EC_CREDSTORE_FULL syscall.Errno = 15083 - ERROR_EC_CRED_NOT_FOUND syscall.Errno = 15084 - ERROR_EC_NO_ACTIVE_CHANNEL syscall.Errno = 15085 - ERROR_MUI_FILE_NOT_FOUND syscall.Errno = 15100 - ERROR_MUI_INVALID_FILE syscall.Errno = 15101 - ERROR_MUI_INVALID_RC_CONFIG syscall.Errno = 15102 - ERROR_MUI_INVALID_LOCALE_NAME syscall.Errno = 15103 - ERROR_MUI_INVALID_ULTIMATEFALLBACK_NAME syscall.Errno = 15104 - ERROR_MUI_FILE_NOT_LOADED syscall.Errno = 15105 - ERROR_RESOURCE_ENUM_USER_STOP syscall.Errno = 15106 - ERROR_MUI_INTLSETTINGS_UILANG_NOT_INSTALLED syscall.Errno = 15107 - ERROR_MUI_INTLSETTINGS_INVALID_LOCALE_NAME syscall.Errno = 15108 - ERROR_MRM_RUNTIME_NO_DEFAULT_OR_NEUTRAL_RESOURCE syscall.Errno = 15110 - ERROR_MRM_INVALID_PRICONFIG syscall.Errno = 15111 - ERROR_MRM_INVALID_FILE_TYPE syscall.Errno = 15112 - ERROR_MRM_UNKNOWN_QUALIFIER syscall.Errno = 15113 - ERROR_MRM_INVALID_QUALIFIER_VALUE syscall.Errno = 15114 - ERROR_MRM_NO_CANDIDATE syscall.Errno = 15115 - ERROR_MRM_NO_MATCH_OR_DEFAULT_CANDIDATE syscall.Errno = 15116 - ERROR_MRM_RESOURCE_TYPE_MISMATCH syscall.Errno = 15117 - ERROR_MRM_DUPLICATE_MAP_NAME syscall.Errno = 15118 - ERROR_MRM_DUPLICATE_ENTRY syscall.Errno = 15119 - ERROR_MRM_INVALID_RESOURCE_IDENTIFIER syscall.Errno = 15120 - ERROR_MRM_FILEPATH_TOO_LONG syscall.Errno = 15121 - ERROR_MRM_UNSUPPORTED_DIRECTORY_TYPE syscall.Errno = 15122 - ERROR_MRM_INVALID_PRI_FILE syscall.Errno = 15126 - ERROR_MRM_NAMED_RESOURCE_NOT_FOUND syscall.Errno = 15127 - ERROR_MRM_MAP_NOT_FOUND syscall.Errno = 15135 - ERROR_MRM_UNSUPPORTED_PROFILE_TYPE syscall.Errno = 15136 - ERROR_MRM_INVALID_QUALIFIER_OPERATOR syscall.Errno = 15137 - ERROR_MRM_INDETERMINATE_QUALIFIER_VALUE syscall.Errno = 15138 - ERROR_MRM_AUTOMERGE_ENABLED syscall.Errno = 15139 - ERROR_MRM_TOO_MANY_RESOURCES syscall.Errno = 15140 - ERROR_MRM_UNSUPPORTED_FILE_TYPE_FOR_MERGE syscall.Errno = 15141 - ERROR_MRM_UNSUPPORTED_FILE_TYPE_FOR_LOAD_UNLOAD_PRI_FILE syscall.Errno = 15142 - ERROR_MRM_NO_CURRENT_VIEW_ON_THREAD syscall.Errno = 15143 - ERROR_DIFFERENT_PROFILE_RESOURCE_MANAGER_EXIST syscall.Errno = 15144 - ERROR_OPERATION_NOT_ALLOWED_FROM_SYSTEM_COMPONENT syscall.Errno = 15145 - ERROR_MRM_DIRECT_REF_TO_NON_DEFAULT_RESOURCE syscall.Errno = 15146 - ERROR_MRM_GENERATION_COUNT_MISMATCH syscall.Errno = 15147 - ERROR_PRI_MERGE_VERSION_MISMATCH syscall.Errno = 15148 - ERROR_PRI_MERGE_MISSING_SCHEMA syscall.Errno = 15149 - ERROR_PRI_MERGE_LOAD_FILE_FAILED syscall.Errno = 15150 - ERROR_PRI_MERGE_ADD_FILE_FAILED syscall.Errno = 15151 - ERROR_PRI_MERGE_WRITE_FILE_FAILED syscall.Errno = 15152 - ERROR_PRI_MERGE_MULTIPLE_PACKAGE_FAMILIES_NOT_ALLOWED syscall.Errno = 15153 - ERROR_PRI_MERGE_MULTIPLE_MAIN_PACKAGES_NOT_ALLOWED syscall.Errno = 15154 - ERROR_PRI_MERGE_BUNDLE_PACKAGES_NOT_ALLOWED syscall.Errno = 15155 - ERROR_PRI_MERGE_MAIN_PACKAGE_REQUIRED syscall.Errno = 15156 - ERROR_PRI_MERGE_RESOURCE_PACKAGE_REQUIRED syscall.Errno = 15157 - ERROR_PRI_MERGE_INVALID_FILE_NAME syscall.Errno = 15158 - ERROR_MRM_PACKAGE_NOT_FOUND syscall.Errno = 15159 - ERROR_MRM_MISSING_DEFAULT_LANGUAGE syscall.Errno = 15160 - ERROR_MCA_INVALID_CAPABILITIES_STRING syscall.Errno = 15200 - ERROR_MCA_INVALID_VCP_VERSION syscall.Errno = 15201 - ERROR_MCA_MONITOR_VIOLATES_MCCS_SPECIFICATION syscall.Errno = 15202 - ERROR_MCA_MCCS_VERSION_MISMATCH syscall.Errno = 15203 - ERROR_MCA_UNSUPPORTED_MCCS_VERSION syscall.Errno = 15204 - ERROR_MCA_INTERNAL_ERROR syscall.Errno = 15205 - ERROR_MCA_INVALID_TECHNOLOGY_TYPE_RETURNED syscall.Errno = 15206 - ERROR_MCA_UNSUPPORTED_COLOR_TEMPERATURE syscall.Errno = 15207 - ERROR_AMBIGUOUS_SYSTEM_DEVICE syscall.Errno = 15250 - ERROR_SYSTEM_DEVICE_NOT_FOUND syscall.Errno = 15299 - ERROR_HASH_NOT_SUPPORTED syscall.Errno = 15300 - ERROR_HASH_NOT_PRESENT syscall.Errno = 15301 - ERROR_SECONDARY_IC_PROVIDER_NOT_REGISTERED syscall.Errno = 15321 - ERROR_GPIO_CLIENT_INFORMATION_INVALID syscall.Errno = 15322 - ERROR_GPIO_VERSION_NOT_SUPPORTED syscall.Errno = 15323 - ERROR_GPIO_INVALID_REGISTRATION_PACKET syscall.Errno = 15324 - ERROR_GPIO_OPERATION_DENIED syscall.Errno = 15325 - ERROR_GPIO_INCOMPATIBLE_CONNECT_MODE syscall.Errno = 15326 - ERROR_GPIO_INTERRUPT_ALREADY_UNMASKED syscall.Errno = 15327 - ERROR_CANNOT_SWITCH_RUNLEVEL syscall.Errno = 15400 - ERROR_INVALID_RUNLEVEL_SETTING syscall.Errno = 15401 - ERROR_RUNLEVEL_SWITCH_TIMEOUT syscall.Errno = 15402 - ERROR_RUNLEVEL_SWITCH_AGENT_TIMEOUT syscall.Errno = 15403 - ERROR_RUNLEVEL_SWITCH_IN_PROGRESS syscall.Errno = 15404 - ERROR_SERVICES_FAILED_AUTOSTART syscall.Errno = 15405 - ERROR_COM_TASK_STOP_PENDING syscall.Errno = 15501 - ERROR_INSTALL_OPEN_PACKAGE_FAILED syscall.Errno = 15600 - ERROR_INSTALL_PACKAGE_NOT_FOUND syscall.Errno = 15601 - ERROR_INSTALL_INVALID_PACKAGE syscall.Errno = 15602 - ERROR_INSTALL_RESOLVE_DEPENDENCY_FAILED syscall.Errno = 15603 - ERROR_INSTALL_OUT_OF_DISK_SPACE syscall.Errno = 15604 - ERROR_INSTALL_NETWORK_FAILURE syscall.Errno = 15605 - ERROR_INSTALL_REGISTRATION_FAILURE syscall.Errno = 15606 - ERROR_INSTALL_DEREGISTRATION_FAILURE syscall.Errno = 15607 - ERROR_INSTALL_CANCEL syscall.Errno = 15608 - ERROR_INSTALL_FAILED syscall.Errno = 15609 - ERROR_REMOVE_FAILED syscall.Errno = 15610 - ERROR_PACKAGE_ALREADY_EXISTS syscall.Errno = 15611 - ERROR_NEEDS_REMEDIATION syscall.Errno = 15612 - ERROR_INSTALL_PREREQUISITE_FAILED syscall.Errno = 15613 - ERROR_PACKAGE_REPOSITORY_CORRUPTED syscall.Errno = 15614 - ERROR_INSTALL_POLICY_FAILURE syscall.Errno = 15615 - ERROR_PACKAGE_UPDATING syscall.Errno = 15616 - ERROR_DEPLOYMENT_BLOCKED_BY_POLICY syscall.Errno = 15617 - ERROR_PACKAGES_IN_USE syscall.Errno = 15618 - ERROR_RECOVERY_FILE_CORRUPT syscall.Errno = 15619 - ERROR_INVALID_STAGED_SIGNATURE syscall.Errno = 15620 - ERROR_DELETING_EXISTING_APPLICATIONDATA_STORE_FAILED syscall.Errno = 15621 - ERROR_INSTALL_PACKAGE_DOWNGRADE syscall.Errno = 15622 - ERROR_SYSTEM_NEEDS_REMEDIATION syscall.Errno = 15623 - ERROR_APPX_INTEGRITY_FAILURE_CLR_NGEN syscall.Errno = 15624 - ERROR_RESILIENCY_FILE_CORRUPT syscall.Errno = 15625 - ERROR_INSTALL_FIREWALL_SERVICE_NOT_RUNNING syscall.Errno = 15626 - ERROR_PACKAGE_MOVE_FAILED syscall.Errno = 15627 - ERROR_INSTALL_VOLUME_NOT_EMPTY syscall.Errno = 15628 - ERROR_INSTALL_VOLUME_OFFLINE syscall.Errno = 15629 - ERROR_INSTALL_VOLUME_CORRUPT syscall.Errno = 15630 - ERROR_NEEDS_REGISTRATION syscall.Errno = 15631 - ERROR_INSTALL_WRONG_PROCESSOR_ARCHITECTURE syscall.Errno = 15632 - ERROR_DEV_SIDELOAD_LIMIT_EXCEEDED syscall.Errno = 15633 - ERROR_INSTALL_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE syscall.Errno = 15634 - ERROR_PACKAGE_NOT_SUPPORTED_ON_FILESYSTEM syscall.Errno = 15635 - ERROR_PACKAGE_MOVE_BLOCKED_BY_STREAMING syscall.Errno = 15636 - ERROR_INSTALL_OPTIONAL_PACKAGE_APPLICATIONID_NOT_UNIQUE syscall.Errno = 15637 - ERROR_PACKAGE_STAGING_ONHOLD syscall.Errno = 15638 - ERROR_INSTALL_INVALID_RELATED_SET_UPDATE syscall.Errno = 15639 - ERROR_INSTALL_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE_FULLTRUST_CAPABILITY syscall.Errno = 15640 - ERROR_DEPLOYMENT_BLOCKED_BY_USER_LOG_OFF syscall.Errno = 15641 - ERROR_PROVISION_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE_PROVISIONED syscall.Errno = 15642 - ERROR_PACKAGES_REPUTATION_CHECK_FAILED syscall.Errno = 15643 - ERROR_PACKAGES_REPUTATION_CHECK_TIMEDOUT syscall.Errno = 15644 - ERROR_DEPLOYMENT_OPTION_NOT_SUPPORTED syscall.Errno = 15645 - ERROR_APPINSTALLER_ACTIVATION_BLOCKED syscall.Errno = 15646 - ERROR_REGISTRATION_FROM_REMOTE_DRIVE_NOT_SUPPORTED syscall.Errno = 15647 - ERROR_APPX_RAW_DATA_WRITE_FAILED syscall.Errno = 15648 - ERROR_DEPLOYMENT_BLOCKED_BY_VOLUME_POLICY_PACKAGE syscall.Errno = 15649 - ERROR_DEPLOYMENT_BLOCKED_BY_VOLUME_POLICY_MACHINE syscall.Errno = 15650 - ERROR_DEPLOYMENT_BLOCKED_BY_PROFILE_POLICY syscall.Errno = 15651 - ERROR_DEPLOYMENT_FAILED_CONFLICTING_MUTABLE_PACKAGE_DIRECTORY syscall.Errno = 15652 - ERROR_SINGLETON_RESOURCE_INSTALLED_IN_ACTIVE_USER syscall.Errno = 15653 - ERROR_DIFFERENT_VERSION_OF_PACKAGED_SERVICE_INSTALLED syscall.Errno = 15654 - ERROR_SERVICE_EXISTS_AS_NON_PACKAGED_SERVICE syscall.Errno = 15655 - ERROR_PACKAGED_SERVICE_REQUIRES_ADMIN_PRIVILEGES syscall.Errno = 15656 - APPMODEL_ERROR_NO_PACKAGE syscall.Errno = 15700 - APPMODEL_ERROR_PACKAGE_RUNTIME_CORRUPT syscall.Errno = 15701 - APPMODEL_ERROR_PACKAGE_IDENTITY_CORRUPT syscall.Errno = 15702 - APPMODEL_ERROR_NO_APPLICATION syscall.Errno = 15703 - APPMODEL_ERROR_DYNAMIC_PROPERTY_READ_FAILED syscall.Errno = 15704 - APPMODEL_ERROR_DYNAMIC_PROPERTY_INVALID syscall.Errno = 15705 - APPMODEL_ERROR_PACKAGE_NOT_AVAILABLE syscall.Errno = 15706 - APPMODEL_ERROR_NO_MUTABLE_DIRECTORY syscall.Errno = 15707 - ERROR_STATE_LOAD_STORE_FAILED syscall.Errno = 15800 - ERROR_STATE_GET_VERSION_FAILED syscall.Errno = 15801 - ERROR_STATE_SET_VERSION_FAILED syscall.Errno = 15802 - ERROR_STATE_STRUCTURED_RESET_FAILED syscall.Errno = 15803 - ERROR_STATE_OPEN_CONTAINER_FAILED syscall.Errno = 15804 - ERROR_STATE_CREATE_CONTAINER_FAILED syscall.Errno = 15805 - ERROR_STATE_DELETE_CONTAINER_FAILED syscall.Errno = 15806 - ERROR_STATE_READ_SETTING_FAILED syscall.Errno = 15807 - ERROR_STATE_WRITE_SETTING_FAILED syscall.Errno = 15808 - ERROR_STATE_DELETE_SETTING_FAILED syscall.Errno = 15809 - ERROR_STATE_QUERY_SETTING_FAILED syscall.Errno = 15810 - ERROR_STATE_READ_COMPOSITE_SETTING_FAILED syscall.Errno = 15811 - ERROR_STATE_WRITE_COMPOSITE_SETTING_FAILED syscall.Errno = 15812 - ERROR_STATE_ENUMERATE_CONTAINER_FAILED syscall.Errno = 15813 - ERROR_STATE_ENUMERATE_SETTINGS_FAILED syscall.Errno = 15814 - ERROR_STATE_COMPOSITE_SETTING_VALUE_SIZE_LIMIT_EXCEEDED syscall.Errno = 15815 - ERROR_STATE_SETTING_VALUE_SIZE_LIMIT_EXCEEDED syscall.Errno = 15816 - ERROR_STATE_SETTING_NAME_SIZE_LIMIT_EXCEEDED syscall.Errno = 15817 - ERROR_STATE_CONTAINER_NAME_SIZE_LIMIT_EXCEEDED syscall.Errno = 15818 - ERROR_API_UNAVAILABLE syscall.Errno = 15841 - STORE_ERROR_UNLICENSED syscall.Errno = 15861 - STORE_ERROR_UNLICENSED_USER syscall.Errno = 15862 - STORE_ERROR_PENDING_COM_TRANSACTION syscall.Errno = 15863 - STORE_ERROR_LICENSE_REVOKED syscall.Errno = 15864 - SEVERITY_SUCCESS syscall.Errno = 0 - SEVERITY_ERROR syscall.Errno = 1 - FACILITY_NT_BIT = 0x10000000 - E_NOT_SET = ERROR_NOT_FOUND - E_NOT_VALID_STATE = ERROR_INVALID_STATE - E_NOT_SUFFICIENT_BUFFER = ERROR_INSUFFICIENT_BUFFER - E_TIME_SENSITIVE_THREAD = ERROR_TIME_SENSITIVE_THREAD - E_NO_TASK_QUEUE = ERROR_NO_TASK_QUEUE - NOERROR syscall.Errno = 0 - E_UNEXPECTED Handle = 0x8000FFFF - E_NOTIMPL Handle = 0x80004001 - E_OUTOFMEMORY Handle = 0x8007000E - E_INVALIDARG Handle = 0x80070057 - E_NOINTERFACE Handle = 0x80004002 - E_POINTER Handle = 0x80004003 - E_HANDLE Handle = 0x80070006 - E_ABORT Handle = 0x80004004 - E_FAIL Handle = 0x80004005 - E_ACCESSDENIED Handle = 0x80070005 - E_PENDING Handle = 0x8000000A - E_BOUNDS Handle = 0x8000000B - E_CHANGED_STATE Handle = 0x8000000C - E_ILLEGAL_STATE_CHANGE Handle = 0x8000000D - E_ILLEGAL_METHOD_CALL Handle = 0x8000000E - RO_E_METADATA_NAME_NOT_FOUND Handle = 0x8000000F - RO_E_METADATA_NAME_IS_NAMESPACE Handle = 0x80000010 - RO_E_METADATA_INVALID_TYPE_FORMAT Handle = 0x80000011 - RO_E_INVALID_METADATA_FILE Handle = 0x80000012 - RO_E_CLOSED Handle = 0x80000013 - RO_E_EXCLUSIVE_WRITE Handle = 0x80000014 - RO_E_CHANGE_NOTIFICATION_IN_PROGRESS Handle = 0x80000015 - RO_E_ERROR_STRING_NOT_FOUND Handle = 0x80000016 - E_STRING_NOT_NULL_TERMINATED Handle = 0x80000017 - E_ILLEGAL_DELEGATE_ASSIGNMENT Handle = 0x80000018 - E_ASYNC_OPERATION_NOT_STARTED Handle = 0x80000019 - E_APPLICATION_EXITING Handle = 0x8000001A - E_APPLICATION_VIEW_EXITING Handle = 0x8000001B - RO_E_MUST_BE_AGILE Handle = 0x8000001C - RO_E_UNSUPPORTED_FROM_MTA Handle = 0x8000001D - RO_E_COMMITTED Handle = 0x8000001E - RO_E_BLOCKED_CROSS_ASTA_CALL Handle = 0x8000001F - RO_E_CANNOT_ACTIVATE_FULL_TRUST_SERVER Handle = 0x80000020 - RO_E_CANNOT_ACTIVATE_UNIVERSAL_APPLICATION_SERVER Handle = 0x80000021 - CO_E_INIT_TLS Handle = 0x80004006 - CO_E_INIT_SHARED_ALLOCATOR Handle = 0x80004007 - CO_E_INIT_MEMORY_ALLOCATOR Handle = 0x80004008 - CO_E_INIT_CLASS_CACHE Handle = 0x80004009 - CO_E_INIT_RPC_CHANNEL Handle = 0x8000400A - CO_E_INIT_TLS_SET_CHANNEL_CONTROL Handle = 0x8000400B - CO_E_INIT_TLS_CHANNEL_CONTROL Handle = 0x8000400C - CO_E_INIT_UNACCEPTED_USER_ALLOCATOR Handle = 0x8000400D - CO_E_INIT_SCM_MUTEX_EXISTS Handle = 0x8000400E - CO_E_INIT_SCM_FILE_MAPPING_EXISTS Handle = 0x8000400F - CO_E_INIT_SCM_MAP_VIEW_OF_FILE Handle = 0x80004010 - CO_E_INIT_SCM_EXEC_FAILURE Handle = 0x80004011 - CO_E_INIT_ONLY_SINGLE_THREADED Handle = 0x80004012 - CO_E_CANT_REMOTE Handle = 0x80004013 - CO_E_BAD_SERVER_NAME Handle = 0x80004014 - CO_E_WRONG_SERVER_IDENTITY Handle = 0x80004015 - CO_E_OLE1DDE_DISABLED Handle = 0x80004016 - CO_E_RUNAS_SYNTAX Handle = 0x80004017 - CO_E_CREATEPROCESS_FAILURE Handle = 0x80004018 - CO_E_RUNAS_CREATEPROCESS_FAILURE Handle = 0x80004019 - CO_E_RUNAS_LOGON_FAILURE Handle = 0x8000401A - CO_E_LAUNCH_PERMSSION_DENIED Handle = 0x8000401B - CO_E_START_SERVICE_FAILURE Handle = 0x8000401C - CO_E_REMOTE_COMMUNICATION_FAILURE Handle = 0x8000401D - CO_E_SERVER_START_TIMEOUT Handle = 0x8000401E - CO_E_CLSREG_INCONSISTENT Handle = 0x8000401F - CO_E_IIDREG_INCONSISTENT Handle = 0x80004020 - CO_E_NOT_SUPPORTED Handle = 0x80004021 - CO_E_RELOAD_DLL Handle = 0x80004022 - CO_E_MSI_ERROR Handle = 0x80004023 - CO_E_ATTEMPT_TO_CREATE_OUTSIDE_CLIENT_CONTEXT Handle = 0x80004024 - CO_E_SERVER_PAUSED Handle = 0x80004025 - CO_E_SERVER_NOT_PAUSED Handle = 0x80004026 - CO_E_CLASS_DISABLED Handle = 0x80004027 - CO_E_CLRNOTAVAILABLE Handle = 0x80004028 - CO_E_ASYNC_WORK_REJECTED Handle = 0x80004029 - CO_E_SERVER_INIT_TIMEOUT Handle = 0x8000402A - CO_E_NO_SECCTX_IN_ACTIVATE Handle = 0x8000402B - CO_E_TRACKER_CONFIG Handle = 0x80004030 - CO_E_THREADPOOL_CONFIG Handle = 0x80004031 - CO_E_SXS_CONFIG Handle = 0x80004032 - CO_E_MALFORMED_SPN Handle = 0x80004033 - CO_E_UNREVOKED_REGISTRATION_ON_APARTMENT_SHUTDOWN Handle = 0x80004034 - CO_E_PREMATURE_STUB_RUNDOWN Handle = 0x80004035 - S_OK Handle = 0 - S_FALSE Handle = 1 - OLE_E_FIRST Handle = 0x80040000 - OLE_E_LAST Handle = 0x800400FF - OLE_S_FIRST Handle = 0x00040000 - OLE_S_LAST Handle = 0x000400FF - OLE_E_OLEVERB Handle = 0x80040000 - OLE_E_ADVF Handle = 0x80040001 - OLE_E_ENUM_NOMORE Handle = 0x80040002 - OLE_E_ADVISENOTSUPPORTED Handle = 0x80040003 - OLE_E_NOCONNECTION Handle = 0x80040004 - OLE_E_NOTRUNNING Handle = 0x80040005 - OLE_E_NOCACHE Handle = 0x80040006 - OLE_E_BLANK Handle = 0x80040007 - OLE_E_CLASSDIFF Handle = 0x80040008 - OLE_E_CANT_GETMONIKER Handle = 0x80040009 - OLE_E_CANT_BINDTOSOURCE Handle = 0x8004000A - OLE_E_STATIC Handle = 0x8004000B - OLE_E_PROMPTSAVECANCELLED Handle = 0x8004000C - OLE_E_INVALIDRECT Handle = 0x8004000D - OLE_E_WRONGCOMPOBJ Handle = 0x8004000E - OLE_E_INVALIDHWND Handle = 0x8004000F - OLE_E_NOT_INPLACEACTIVE Handle = 0x80040010 - OLE_E_CANTCONVERT Handle = 0x80040011 - OLE_E_NOSTORAGE Handle = 0x80040012 - DV_E_FORMATETC Handle = 0x80040064 - DV_E_DVTARGETDEVICE Handle = 0x80040065 - DV_E_STGMEDIUM Handle = 0x80040066 - DV_E_STATDATA Handle = 0x80040067 - DV_E_LINDEX Handle = 0x80040068 - DV_E_TYMED Handle = 0x80040069 - DV_E_CLIPFORMAT Handle = 0x8004006A - DV_E_DVASPECT Handle = 0x8004006B - DV_E_DVTARGETDEVICE_SIZE Handle = 0x8004006C - DV_E_NOIVIEWOBJECT Handle = 0x8004006D - DRAGDROP_E_FIRST syscall.Errno = 0x80040100 - DRAGDROP_E_LAST syscall.Errno = 0x8004010F - DRAGDROP_S_FIRST syscall.Errno = 0x00040100 - DRAGDROP_S_LAST syscall.Errno = 0x0004010F - DRAGDROP_E_NOTREGISTERED Handle = 0x80040100 - DRAGDROP_E_ALREADYREGISTERED Handle = 0x80040101 - DRAGDROP_E_INVALIDHWND Handle = 0x80040102 - DRAGDROP_E_CONCURRENT_DRAG_ATTEMPTED Handle = 0x80040103 - CLASSFACTORY_E_FIRST syscall.Errno = 0x80040110 - CLASSFACTORY_E_LAST syscall.Errno = 0x8004011F - CLASSFACTORY_S_FIRST syscall.Errno = 0x00040110 - CLASSFACTORY_S_LAST syscall.Errno = 0x0004011F - CLASS_E_NOAGGREGATION Handle = 0x80040110 - CLASS_E_CLASSNOTAVAILABLE Handle = 0x80040111 - CLASS_E_NOTLICENSED Handle = 0x80040112 - MARSHAL_E_FIRST syscall.Errno = 0x80040120 - MARSHAL_E_LAST syscall.Errno = 0x8004012F - MARSHAL_S_FIRST syscall.Errno = 0x00040120 - MARSHAL_S_LAST syscall.Errno = 0x0004012F - DATA_E_FIRST syscall.Errno = 0x80040130 - DATA_E_LAST syscall.Errno = 0x8004013F - DATA_S_FIRST syscall.Errno = 0x00040130 - DATA_S_LAST syscall.Errno = 0x0004013F - VIEW_E_FIRST syscall.Errno = 0x80040140 - VIEW_E_LAST syscall.Errno = 0x8004014F - VIEW_S_FIRST syscall.Errno = 0x00040140 - VIEW_S_LAST syscall.Errno = 0x0004014F - VIEW_E_DRAW Handle = 0x80040140 - REGDB_E_FIRST syscall.Errno = 0x80040150 - REGDB_E_LAST syscall.Errno = 0x8004015F - REGDB_S_FIRST syscall.Errno = 0x00040150 - REGDB_S_LAST syscall.Errno = 0x0004015F - REGDB_E_READREGDB Handle = 0x80040150 - REGDB_E_WRITEREGDB Handle = 0x80040151 - REGDB_E_KEYMISSING Handle = 0x80040152 - REGDB_E_INVALIDVALUE Handle = 0x80040153 - REGDB_E_CLASSNOTREG Handle = 0x80040154 - REGDB_E_IIDNOTREG Handle = 0x80040155 - REGDB_E_BADTHREADINGMODEL Handle = 0x80040156 - REGDB_E_PACKAGEPOLICYVIOLATION Handle = 0x80040157 - CAT_E_FIRST syscall.Errno = 0x80040160 - CAT_E_LAST syscall.Errno = 0x80040161 - CAT_E_CATIDNOEXIST Handle = 0x80040160 - CAT_E_NODESCRIPTION Handle = 0x80040161 - CS_E_FIRST syscall.Errno = 0x80040164 - CS_E_LAST syscall.Errno = 0x8004016F - CS_E_PACKAGE_NOTFOUND Handle = 0x80040164 - CS_E_NOT_DELETABLE Handle = 0x80040165 - CS_E_CLASS_NOTFOUND Handle = 0x80040166 - CS_E_INVALID_VERSION Handle = 0x80040167 - CS_E_NO_CLASSSTORE Handle = 0x80040168 - CS_E_OBJECT_NOTFOUND Handle = 0x80040169 - CS_E_OBJECT_ALREADY_EXISTS Handle = 0x8004016A - CS_E_INVALID_PATH Handle = 0x8004016B - CS_E_NETWORK_ERROR Handle = 0x8004016C - CS_E_ADMIN_LIMIT_EXCEEDED Handle = 0x8004016D - CS_E_SCHEMA_MISMATCH Handle = 0x8004016E - CS_E_INTERNAL_ERROR Handle = 0x8004016F - CACHE_E_FIRST syscall.Errno = 0x80040170 - CACHE_E_LAST syscall.Errno = 0x8004017F - CACHE_S_FIRST syscall.Errno = 0x00040170 - CACHE_S_LAST syscall.Errno = 0x0004017F - CACHE_E_NOCACHE_UPDATED Handle = 0x80040170 - OLEOBJ_E_FIRST syscall.Errno = 0x80040180 - OLEOBJ_E_LAST syscall.Errno = 0x8004018F - OLEOBJ_S_FIRST syscall.Errno = 0x00040180 - OLEOBJ_S_LAST syscall.Errno = 0x0004018F - OLEOBJ_E_NOVERBS Handle = 0x80040180 - OLEOBJ_E_INVALIDVERB Handle = 0x80040181 - CLIENTSITE_E_FIRST syscall.Errno = 0x80040190 - CLIENTSITE_E_LAST syscall.Errno = 0x8004019F - CLIENTSITE_S_FIRST syscall.Errno = 0x00040190 - CLIENTSITE_S_LAST syscall.Errno = 0x0004019F - INPLACE_E_NOTUNDOABLE Handle = 0x800401A0 - INPLACE_E_NOTOOLSPACE Handle = 0x800401A1 - INPLACE_E_FIRST syscall.Errno = 0x800401A0 - INPLACE_E_LAST syscall.Errno = 0x800401AF - INPLACE_S_FIRST syscall.Errno = 0x000401A0 - INPLACE_S_LAST syscall.Errno = 0x000401AF - ENUM_E_FIRST syscall.Errno = 0x800401B0 - ENUM_E_LAST syscall.Errno = 0x800401BF - ENUM_S_FIRST syscall.Errno = 0x000401B0 - ENUM_S_LAST syscall.Errno = 0x000401BF - CONVERT10_E_FIRST syscall.Errno = 0x800401C0 - CONVERT10_E_LAST syscall.Errno = 0x800401CF - CONVERT10_S_FIRST syscall.Errno = 0x000401C0 - CONVERT10_S_LAST syscall.Errno = 0x000401CF - CONVERT10_E_OLESTREAM_GET Handle = 0x800401C0 - CONVERT10_E_OLESTREAM_PUT Handle = 0x800401C1 - CONVERT10_E_OLESTREAM_FMT Handle = 0x800401C2 - CONVERT10_E_OLESTREAM_BITMAP_TO_DIB Handle = 0x800401C3 - CONVERT10_E_STG_FMT Handle = 0x800401C4 - CONVERT10_E_STG_NO_STD_STREAM Handle = 0x800401C5 - CONVERT10_E_STG_DIB_TO_BITMAP Handle = 0x800401C6 - CLIPBRD_E_FIRST syscall.Errno = 0x800401D0 - CLIPBRD_E_LAST syscall.Errno = 0x800401DF - CLIPBRD_S_FIRST syscall.Errno = 0x000401D0 - CLIPBRD_S_LAST syscall.Errno = 0x000401DF - CLIPBRD_E_CANT_OPEN Handle = 0x800401D0 - CLIPBRD_E_CANT_EMPTY Handle = 0x800401D1 - CLIPBRD_E_CANT_SET Handle = 0x800401D2 - CLIPBRD_E_BAD_DATA Handle = 0x800401D3 - CLIPBRD_E_CANT_CLOSE Handle = 0x800401D4 - MK_E_FIRST syscall.Errno = 0x800401E0 - MK_E_LAST syscall.Errno = 0x800401EF - MK_S_FIRST syscall.Errno = 0x000401E0 - MK_S_LAST syscall.Errno = 0x000401EF - MK_E_CONNECTMANUALLY Handle = 0x800401E0 - MK_E_EXCEEDEDDEADLINE Handle = 0x800401E1 - MK_E_NEEDGENERIC Handle = 0x800401E2 - MK_E_UNAVAILABLE Handle = 0x800401E3 - MK_E_SYNTAX Handle = 0x800401E4 - MK_E_NOOBJECT Handle = 0x800401E5 - MK_E_INVALIDEXTENSION Handle = 0x800401E6 - MK_E_INTERMEDIATEINTERFACENOTSUPPORTED Handle = 0x800401E7 - MK_E_NOTBINDABLE Handle = 0x800401E8 - MK_E_NOTBOUND Handle = 0x800401E9 - MK_E_CANTOPENFILE Handle = 0x800401EA - MK_E_MUSTBOTHERUSER Handle = 0x800401EB - MK_E_NOINVERSE Handle = 0x800401EC - MK_E_NOSTORAGE Handle = 0x800401ED - MK_E_NOPREFIX Handle = 0x800401EE - MK_E_ENUMERATION_FAILED Handle = 0x800401EF - CO_E_FIRST syscall.Errno = 0x800401F0 - CO_E_LAST syscall.Errno = 0x800401FF - CO_S_FIRST syscall.Errno = 0x000401F0 - CO_S_LAST syscall.Errno = 0x000401FF - CO_E_NOTINITIALIZED Handle = 0x800401F0 - CO_E_ALREADYINITIALIZED Handle = 0x800401F1 - CO_E_CANTDETERMINECLASS Handle = 0x800401F2 - CO_E_CLASSSTRING Handle = 0x800401F3 - CO_E_IIDSTRING Handle = 0x800401F4 - CO_E_APPNOTFOUND Handle = 0x800401F5 - CO_E_APPSINGLEUSE Handle = 0x800401F6 - CO_E_ERRORINAPP Handle = 0x800401F7 - CO_E_DLLNOTFOUND Handle = 0x800401F8 - CO_E_ERRORINDLL Handle = 0x800401F9 - CO_E_WRONGOSFORAPP Handle = 0x800401FA - CO_E_OBJNOTREG Handle = 0x800401FB - CO_E_OBJISREG Handle = 0x800401FC - CO_E_OBJNOTCONNECTED Handle = 0x800401FD - CO_E_APPDIDNTREG Handle = 0x800401FE - CO_E_RELEASED Handle = 0x800401FF - EVENT_E_FIRST syscall.Errno = 0x80040200 - EVENT_E_LAST syscall.Errno = 0x8004021F - EVENT_S_FIRST syscall.Errno = 0x00040200 - EVENT_S_LAST syscall.Errno = 0x0004021F - EVENT_S_SOME_SUBSCRIBERS_FAILED Handle = 0x00040200 - EVENT_E_ALL_SUBSCRIBERS_FAILED Handle = 0x80040201 - EVENT_S_NOSUBSCRIBERS Handle = 0x00040202 - EVENT_E_QUERYSYNTAX Handle = 0x80040203 - EVENT_E_QUERYFIELD Handle = 0x80040204 - EVENT_E_INTERNALEXCEPTION Handle = 0x80040205 - EVENT_E_INTERNALERROR Handle = 0x80040206 - EVENT_E_INVALID_PER_USER_SID Handle = 0x80040207 - EVENT_E_USER_EXCEPTION Handle = 0x80040208 - EVENT_E_TOO_MANY_METHODS Handle = 0x80040209 - EVENT_E_MISSING_EVENTCLASS Handle = 0x8004020A - EVENT_E_NOT_ALL_REMOVED Handle = 0x8004020B - EVENT_E_COMPLUS_NOT_INSTALLED Handle = 0x8004020C - EVENT_E_CANT_MODIFY_OR_DELETE_UNCONFIGURED_OBJECT Handle = 0x8004020D - EVENT_E_CANT_MODIFY_OR_DELETE_CONFIGURED_OBJECT Handle = 0x8004020E - EVENT_E_INVALID_EVENT_CLASS_PARTITION Handle = 0x8004020F - EVENT_E_PER_USER_SID_NOT_LOGGED_ON Handle = 0x80040210 - TPC_E_INVALID_PROPERTY Handle = 0x80040241 - TPC_E_NO_DEFAULT_TABLET Handle = 0x80040212 - TPC_E_UNKNOWN_PROPERTY Handle = 0x8004021B - TPC_E_INVALID_INPUT_RECT Handle = 0x80040219 - TPC_E_INVALID_STROKE Handle = 0x80040222 - TPC_E_INITIALIZE_FAIL Handle = 0x80040223 - TPC_E_NOT_RELEVANT Handle = 0x80040232 - TPC_E_INVALID_PACKET_DESCRIPTION Handle = 0x80040233 - TPC_E_RECOGNIZER_NOT_REGISTERED Handle = 0x80040235 - TPC_E_INVALID_RIGHTS Handle = 0x80040236 - TPC_E_OUT_OF_ORDER_CALL Handle = 0x80040237 - TPC_E_QUEUE_FULL Handle = 0x80040238 - TPC_E_INVALID_CONFIGURATION Handle = 0x80040239 - TPC_E_INVALID_DATA_FROM_RECOGNIZER Handle = 0x8004023A - TPC_S_TRUNCATED Handle = 0x00040252 - TPC_S_INTERRUPTED Handle = 0x00040253 - TPC_S_NO_DATA_TO_PROCESS Handle = 0x00040254 - XACT_E_FIRST syscall.Errno = 0x8004D000 - XACT_E_LAST syscall.Errno = 0x8004D02B - XACT_S_FIRST syscall.Errno = 0x0004D000 - XACT_S_LAST syscall.Errno = 0x0004D010 - XACT_E_ALREADYOTHERSINGLEPHASE Handle = 0x8004D000 - XACT_E_CANTRETAIN Handle = 0x8004D001 - XACT_E_COMMITFAILED Handle = 0x8004D002 - XACT_E_COMMITPREVENTED Handle = 0x8004D003 - XACT_E_HEURISTICABORT Handle = 0x8004D004 - XACT_E_HEURISTICCOMMIT Handle = 0x8004D005 - XACT_E_HEURISTICDAMAGE Handle = 0x8004D006 - XACT_E_HEURISTICDANGER Handle = 0x8004D007 - XACT_E_ISOLATIONLEVEL Handle = 0x8004D008 - XACT_E_NOASYNC Handle = 0x8004D009 - XACT_E_NOENLIST Handle = 0x8004D00A - XACT_E_NOISORETAIN Handle = 0x8004D00B - XACT_E_NORESOURCE Handle = 0x8004D00C - XACT_E_NOTCURRENT Handle = 0x8004D00D - XACT_E_NOTRANSACTION Handle = 0x8004D00E - XACT_E_NOTSUPPORTED Handle = 0x8004D00F - XACT_E_UNKNOWNRMGRID Handle = 0x8004D010 - XACT_E_WRONGSTATE Handle = 0x8004D011 - XACT_E_WRONGUOW Handle = 0x8004D012 - XACT_E_XTIONEXISTS Handle = 0x8004D013 - XACT_E_NOIMPORTOBJECT Handle = 0x8004D014 - XACT_E_INVALIDCOOKIE Handle = 0x8004D015 - XACT_E_INDOUBT Handle = 0x8004D016 - XACT_E_NOTIMEOUT Handle = 0x8004D017 - XACT_E_ALREADYINPROGRESS Handle = 0x8004D018 - XACT_E_ABORTED Handle = 0x8004D019 - XACT_E_LOGFULL Handle = 0x8004D01A - XACT_E_TMNOTAVAILABLE Handle = 0x8004D01B - XACT_E_CONNECTION_DOWN Handle = 0x8004D01C - XACT_E_CONNECTION_DENIED Handle = 0x8004D01D - XACT_E_REENLISTTIMEOUT Handle = 0x8004D01E - XACT_E_TIP_CONNECT_FAILED Handle = 0x8004D01F - XACT_E_TIP_PROTOCOL_ERROR Handle = 0x8004D020 - XACT_E_TIP_PULL_FAILED Handle = 0x8004D021 - XACT_E_DEST_TMNOTAVAILABLE Handle = 0x8004D022 - XACT_E_TIP_DISABLED Handle = 0x8004D023 - XACT_E_NETWORK_TX_DISABLED Handle = 0x8004D024 - XACT_E_PARTNER_NETWORK_TX_DISABLED Handle = 0x8004D025 - XACT_E_XA_TX_DISABLED Handle = 0x8004D026 - XACT_E_UNABLE_TO_READ_DTC_CONFIG Handle = 0x8004D027 - XACT_E_UNABLE_TO_LOAD_DTC_PROXY Handle = 0x8004D028 - XACT_E_ABORTING Handle = 0x8004D029 - XACT_E_PUSH_COMM_FAILURE Handle = 0x8004D02A - XACT_E_PULL_COMM_FAILURE Handle = 0x8004D02B - XACT_E_LU_TX_DISABLED Handle = 0x8004D02C - XACT_E_CLERKNOTFOUND Handle = 0x8004D080 - XACT_E_CLERKEXISTS Handle = 0x8004D081 - XACT_E_RECOVERYINPROGRESS Handle = 0x8004D082 - XACT_E_TRANSACTIONCLOSED Handle = 0x8004D083 - XACT_E_INVALIDLSN Handle = 0x8004D084 - XACT_E_REPLAYREQUEST Handle = 0x8004D085 - XACT_S_ASYNC Handle = 0x0004D000 - XACT_S_DEFECT Handle = 0x0004D001 - XACT_S_READONLY Handle = 0x0004D002 - XACT_S_SOMENORETAIN Handle = 0x0004D003 - XACT_S_OKINFORM Handle = 0x0004D004 - XACT_S_MADECHANGESCONTENT Handle = 0x0004D005 - XACT_S_MADECHANGESINFORM Handle = 0x0004D006 - XACT_S_ALLNORETAIN Handle = 0x0004D007 - XACT_S_ABORTING Handle = 0x0004D008 - XACT_S_SINGLEPHASE Handle = 0x0004D009 - XACT_S_LOCALLY_OK Handle = 0x0004D00A - XACT_S_LASTRESOURCEMANAGER Handle = 0x0004D010 - CONTEXT_E_FIRST syscall.Errno = 0x8004E000 - CONTEXT_E_LAST syscall.Errno = 0x8004E02F - CONTEXT_S_FIRST syscall.Errno = 0x0004E000 - CONTEXT_S_LAST syscall.Errno = 0x0004E02F - CONTEXT_E_ABORTED Handle = 0x8004E002 - CONTEXT_E_ABORTING Handle = 0x8004E003 - CONTEXT_E_NOCONTEXT Handle = 0x8004E004 - CONTEXT_E_WOULD_DEADLOCK Handle = 0x8004E005 - CONTEXT_E_SYNCH_TIMEOUT Handle = 0x8004E006 - CONTEXT_E_OLDREF Handle = 0x8004E007 - CONTEXT_E_ROLENOTFOUND Handle = 0x8004E00C - CONTEXT_E_TMNOTAVAILABLE Handle = 0x8004E00F - CO_E_ACTIVATIONFAILED Handle = 0x8004E021 - CO_E_ACTIVATIONFAILED_EVENTLOGGED Handle = 0x8004E022 - CO_E_ACTIVATIONFAILED_CATALOGERROR Handle = 0x8004E023 - CO_E_ACTIVATIONFAILED_TIMEOUT Handle = 0x8004E024 - CO_E_INITIALIZATIONFAILED Handle = 0x8004E025 - CONTEXT_E_NOJIT Handle = 0x8004E026 - CONTEXT_E_NOTRANSACTION Handle = 0x8004E027 - CO_E_THREADINGMODEL_CHANGED Handle = 0x8004E028 - CO_E_NOIISINTRINSICS Handle = 0x8004E029 - CO_E_NOCOOKIES Handle = 0x8004E02A - CO_E_DBERROR Handle = 0x8004E02B - CO_E_NOTPOOLED Handle = 0x8004E02C - CO_E_NOTCONSTRUCTED Handle = 0x8004E02D - CO_E_NOSYNCHRONIZATION Handle = 0x8004E02E - CO_E_ISOLEVELMISMATCH Handle = 0x8004E02F - CO_E_CALL_OUT_OF_TX_SCOPE_NOT_ALLOWED Handle = 0x8004E030 - CO_E_EXIT_TRANSACTION_SCOPE_NOT_CALLED Handle = 0x8004E031 - OLE_S_USEREG Handle = 0x00040000 - OLE_S_STATIC Handle = 0x00040001 - OLE_S_MAC_CLIPFORMAT Handle = 0x00040002 - DRAGDROP_S_DROP Handle = 0x00040100 - DRAGDROP_S_CANCEL Handle = 0x00040101 - DRAGDROP_S_USEDEFAULTCURSORS Handle = 0x00040102 - DATA_S_SAMEFORMATETC Handle = 0x00040130 - VIEW_S_ALREADY_FROZEN Handle = 0x00040140 - CACHE_S_FORMATETC_NOTSUPPORTED Handle = 0x00040170 - CACHE_S_SAMECACHE Handle = 0x00040171 - CACHE_S_SOMECACHES_NOTUPDATED Handle = 0x00040172 - OLEOBJ_S_INVALIDVERB Handle = 0x00040180 - OLEOBJ_S_CANNOT_DOVERB_NOW Handle = 0x00040181 - OLEOBJ_S_INVALIDHWND Handle = 0x00040182 - INPLACE_S_TRUNCATED Handle = 0x000401A0 - CONVERT10_S_NO_PRESENTATION Handle = 0x000401C0 - MK_S_REDUCED_TO_SELF Handle = 0x000401E2 - MK_S_ME Handle = 0x000401E4 - MK_S_HIM Handle = 0x000401E5 - MK_S_US Handle = 0x000401E6 - MK_S_MONIKERALREADYREGISTERED Handle = 0x000401E7 - SCHED_S_TASK_READY Handle = 0x00041300 - SCHED_S_TASK_RUNNING Handle = 0x00041301 - SCHED_S_TASK_DISABLED Handle = 0x00041302 - SCHED_S_TASK_HAS_NOT_RUN Handle = 0x00041303 - SCHED_S_TASK_NO_MORE_RUNS Handle = 0x00041304 - SCHED_S_TASK_NOT_SCHEDULED Handle = 0x00041305 - SCHED_S_TASK_TERMINATED Handle = 0x00041306 - SCHED_S_TASK_NO_VALID_TRIGGERS Handle = 0x00041307 - SCHED_S_EVENT_TRIGGER Handle = 0x00041308 - SCHED_E_TRIGGER_NOT_FOUND Handle = 0x80041309 - SCHED_E_TASK_NOT_READY Handle = 0x8004130A - SCHED_E_TASK_NOT_RUNNING Handle = 0x8004130B - SCHED_E_SERVICE_NOT_INSTALLED Handle = 0x8004130C - SCHED_E_CANNOT_OPEN_TASK Handle = 0x8004130D - SCHED_E_INVALID_TASK Handle = 0x8004130E - SCHED_E_ACCOUNT_INFORMATION_NOT_SET Handle = 0x8004130F - SCHED_E_ACCOUNT_NAME_NOT_FOUND Handle = 0x80041310 - SCHED_E_ACCOUNT_DBASE_CORRUPT Handle = 0x80041311 - SCHED_E_NO_SECURITY_SERVICES Handle = 0x80041312 - SCHED_E_UNKNOWN_OBJECT_VERSION Handle = 0x80041313 - SCHED_E_UNSUPPORTED_ACCOUNT_OPTION Handle = 0x80041314 - SCHED_E_SERVICE_NOT_RUNNING Handle = 0x80041315 - SCHED_E_UNEXPECTEDNODE Handle = 0x80041316 - SCHED_E_NAMESPACE Handle = 0x80041317 - SCHED_E_INVALIDVALUE Handle = 0x80041318 - SCHED_E_MISSINGNODE Handle = 0x80041319 - SCHED_E_MALFORMEDXML Handle = 0x8004131A - SCHED_S_SOME_TRIGGERS_FAILED Handle = 0x0004131B - SCHED_S_BATCH_LOGON_PROBLEM Handle = 0x0004131C - SCHED_E_TOO_MANY_NODES Handle = 0x8004131D - SCHED_E_PAST_END_BOUNDARY Handle = 0x8004131E - SCHED_E_ALREADY_RUNNING Handle = 0x8004131F - SCHED_E_USER_NOT_LOGGED_ON Handle = 0x80041320 - SCHED_E_INVALID_TASK_HASH Handle = 0x80041321 - SCHED_E_SERVICE_NOT_AVAILABLE Handle = 0x80041322 - SCHED_E_SERVICE_TOO_BUSY Handle = 0x80041323 - SCHED_E_TASK_ATTEMPTED Handle = 0x80041324 - SCHED_S_TASK_QUEUED Handle = 0x00041325 - SCHED_E_TASK_DISABLED Handle = 0x80041326 - SCHED_E_TASK_NOT_V1_COMPAT Handle = 0x80041327 - SCHED_E_START_ON_DEMAND Handle = 0x80041328 - SCHED_E_TASK_NOT_UBPM_COMPAT Handle = 0x80041329 - SCHED_E_DEPRECATED_FEATURE_USED Handle = 0x80041330 - CO_E_CLASS_CREATE_FAILED Handle = 0x80080001 - CO_E_SCM_ERROR Handle = 0x80080002 - CO_E_SCM_RPC_FAILURE Handle = 0x80080003 - CO_E_BAD_PATH Handle = 0x80080004 - CO_E_SERVER_EXEC_FAILURE Handle = 0x80080005 - CO_E_OBJSRV_RPC_FAILURE Handle = 0x80080006 - MK_E_NO_NORMALIZED Handle = 0x80080007 - CO_E_SERVER_STOPPING Handle = 0x80080008 - MEM_E_INVALID_ROOT Handle = 0x80080009 - MEM_E_INVALID_LINK Handle = 0x80080010 - MEM_E_INVALID_SIZE Handle = 0x80080011 - CO_S_NOTALLINTERFACES Handle = 0x00080012 - CO_S_MACHINENAMENOTFOUND Handle = 0x00080013 - CO_E_MISSING_DISPLAYNAME Handle = 0x80080015 - CO_E_RUNAS_VALUE_MUST_BE_AAA Handle = 0x80080016 - CO_E_ELEVATION_DISABLED Handle = 0x80080017 - APPX_E_PACKAGING_INTERNAL Handle = 0x80080200 - APPX_E_INTERLEAVING_NOT_ALLOWED Handle = 0x80080201 - APPX_E_RELATIONSHIPS_NOT_ALLOWED Handle = 0x80080202 - APPX_E_MISSING_REQUIRED_FILE Handle = 0x80080203 - APPX_E_INVALID_MANIFEST Handle = 0x80080204 - APPX_E_INVALID_BLOCKMAP Handle = 0x80080205 - APPX_E_CORRUPT_CONTENT Handle = 0x80080206 - APPX_E_BLOCK_HASH_INVALID Handle = 0x80080207 - APPX_E_REQUESTED_RANGE_TOO_LARGE Handle = 0x80080208 - APPX_E_INVALID_SIP_CLIENT_DATA Handle = 0x80080209 - APPX_E_INVALID_KEY_INFO Handle = 0x8008020A - APPX_E_INVALID_CONTENTGROUPMAP Handle = 0x8008020B - APPX_E_INVALID_APPINSTALLER Handle = 0x8008020C - APPX_E_DELTA_BASELINE_VERSION_MISMATCH Handle = 0x8008020D - APPX_E_DELTA_PACKAGE_MISSING_FILE Handle = 0x8008020E - APPX_E_INVALID_DELTA_PACKAGE Handle = 0x8008020F - APPX_E_DELTA_APPENDED_PACKAGE_NOT_ALLOWED Handle = 0x80080210 - APPX_E_INVALID_PACKAGING_LAYOUT Handle = 0x80080211 - APPX_E_INVALID_PACKAGESIGNCONFIG Handle = 0x80080212 - APPX_E_RESOURCESPRI_NOT_ALLOWED Handle = 0x80080213 - APPX_E_FILE_COMPRESSION_MISMATCH Handle = 0x80080214 - APPX_E_INVALID_PAYLOAD_PACKAGE_EXTENSION Handle = 0x80080215 - APPX_E_INVALID_ENCRYPTION_EXCLUSION_FILE_LIST Handle = 0x80080216 - BT_E_SPURIOUS_ACTIVATION Handle = 0x80080300 - DISP_E_UNKNOWNINTERFACE Handle = 0x80020001 - DISP_E_MEMBERNOTFOUND Handle = 0x80020003 - DISP_E_PARAMNOTFOUND Handle = 0x80020004 - DISP_E_TYPEMISMATCH Handle = 0x80020005 - DISP_E_UNKNOWNNAME Handle = 0x80020006 - DISP_E_NONAMEDARGS Handle = 0x80020007 - DISP_E_BADVARTYPE Handle = 0x80020008 - DISP_E_EXCEPTION Handle = 0x80020009 - DISP_E_OVERFLOW Handle = 0x8002000A - DISP_E_BADINDEX Handle = 0x8002000B - DISP_E_UNKNOWNLCID Handle = 0x8002000C - DISP_E_ARRAYISLOCKED Handle = 0x8002000D - DISP_E_BADPARAMCOUNT Handle = 0x8002000E - DISP_E_PARAMNOTOPTIONAL Handle = 0x8002000F - DISP_E_BADCALLEE Handle = 0x80020010 - DISP_E_NOTACOLLECTION Handle = 0x80020011 - DISP_E_DIVBYZERO Handle = 0x80020012 - DISP_E_BUFFERTOOSMALL Handle = 0x80020013 - TYPE_E_BUFFERTOOSMALL Handle = 0x80028016 - TYPE_E_FIELDNOTFOUND Handle = 0x80028017 - TYPE_E_INVDATAREAD Handle = 0x80028018 - TYPE_E_UNSUPFORMAT Handle = 0x80028019 - TYPE_E_REGISTRYACCESS Handle = 0x8002801C - TYPE_E_LIBNOTREGISTERED Handle = 0x8002801D - TYPE_E_UNDEFINEDTYPE Handle = 0x80028027 - TYPE_E_QUALIFIEDNAMEDISALLOWED Handle = 0x80028028 - TYPE_E_INVALIDSTATE Handle = 0x80028029 - TYPE_E_WRONGTYPEKIND Handle = 0x8002802A - TYPE_E_ELEMENTNOTFOUND Handle = 0x8002802B - TYPE_E_AMBIGUOUSNAME Handle = 0x8002802C - TYPE_E_NAMECONFLICT Handle = 0x8002802D - TYPE_E_UNKNOWNLCID Handle = 0x8002802E - TYPE_E_DLLFUNCTIONNOTFOUND Handle = 0x8002802F - TYPE_E_BADMODULEKIND Handle = 0x800288BD - TYPE_E_SIZETOOBIG Handle = 0x800288C5 - TYPE_E_DUPLICATEID Handle = 0x800288C6 - TYPE_E_INVALIDID Handle = 0x800288CF - TYPE_E_TYPEMISMATCH Handle = 0x80028CA0 - TYPE_E_OUTOFBOUNDS Handle = 0x80028CA1 - TYPE_E_IOERROR Handle = 0x80028CA2 - TYPE_E_CANTCREATETMPFILE Handle = 0x80028CA3 - TYPE_E_CANTLOADLIBRARY Handle = 0x80029C4A - TYPE_E_INCONSISTENTPROPFUNCS Handle = 0x80029C83 - TYPE_E_CIRCULARTYPE Handle = 0x80029C84 - STG_E_INVALIDFUNCTION Handle = 0x80030001 - STG_E_FILENOTFOUND Handle = 0x80030002 - STG_E_PATHNOTFOUND Handle = 0x80030003 - STG_E_TOOMANYOPENFILES Handle = 0x80030004 - STG_E_ACCESSDENIED Handle = 0x80030005 - STG_E_INVALIDHANDLE Handle = 0x80030006 - STG_E_INSUFFICIENTMEMORY Handle = 0x80030008 - STG_E_INVALIDPOINTER Handle = 0x80030009 - STG_E_NOMOREFILES Handle = 0x80030012 - STG_E_DISKISWRITEPROTECTED Handle = 0x80030013 - STG_E_SEEKERROR Handle = 0x80030019 - STG_E_WRITEFAULT Handle = 0x8003001D - STG_E_READFAULT Handle = 0x8003001E - STG_E_SHAREVIOLATION Handle = 0x80030020 - STG_E_LOCKVIOLATION Handle = 0x80030021 - STG_E_FILEALREADYEXISTS Handle = 0x80030050 - STG_E_INVALIDPARAMETER Handle = 0x80030057 - STG_E_MEDIUMFULL Handle = 0x80030070 - STG_E_PROPSETMISMATCHED Handle = 0x800300F0 - STG_E_ABNORMALAPIEXIT Handle = 0x800300FA - STG_E_INVALIDHEADER Handle = 0x800300FB - STG_E_INVALIDNAME Handle = 0x800300FC - STG_E_UNKNOWN Handle = 0x800300FD - STG_E_UNIMPLEMENTEDFUNCTION Handle = 0x800300FE - STG_E_INVALIDFLAG Handle = 0x800300FF - STG_E_INUSE Handle = 0x80030100 - STG_E_NOTCURRENT Handle = 0x80030101 - STG_E_REVERTED Handle = 0x80030102 - STG_E_CANTSAVE Handle = 0x80030103 - STG_E_OLDFORMAT Handle = 0x80030104 - STG_E_OLDDLL Handle = 0x80030105 - STG_E_SHAREREQUIRED Handle = 0x80030106 - STG_E_NOTFILEBASEDSTORAGE Handle = 0x80030107 - STG_E_EXTANTMARSHALLINGS Handle = 0x80030108 - STG_E_DOCFILECORRUPT Handle = 0x80030109 - STG_E_BADBASEADDRESS Handle = 0x80030110 - STG_E_DOCFILETOOLARGE Handle = 0x80030111 - STG_E_NOTSIMPLEFORMAT Handle = 0x80030112 - STG_E_INCOMPLETE Handle = 0x80030201 - STG_E_TERMINATED Handle = 0x80030202 - STG_S_CONVERTED Handle = 0x00030200 - STG_S_BLOCK Handle = 0x00030201 - STG_S_RETRYNOW Handle = 0x00030202 - STG_S_MONITORING Handle = 0x00030203 - STG_S_MULTIPLEOPENS Handle = 0x00030204 - STG_S_CONSOLIDATIONFAILED Handle = 0x00030205 - STG_S_CANNOTCONSOLIDATE Handle = 0x00030206 - STG_S_POWER_CYCLE_REQUIRED Handle = 0x00030207 - STG_E_FIRMWARE_SLOT_INVALID Handle = 0x80030208 - STG_E_FIRMWARE_IMAGE_INVALID Handle = 0x80030209 - STG_E_DEVICE_UNRESPONSIVE Handle = 0x8003020A - STG_E_STATUS_COPY_PROTECTION_FAILURE Handle = 0x80030305 - STG_E_CSS_AUTHENTICATION_FAILURE Handle = 0x80030306 - STG_E_CSS_KEY_NOT_PRESENT Handle = 0x80030307 - STG_E_CSS_KEY_NOT_ESTABLISHED Handle = 0x80030308 - STG_E_CSS_SCRAMBLED_SECTOR Handle = 0x80030309 - STG_E_CSS_REGION_MISMATCH Handle = 0x8003030A - STG_E_RESETS_EXHAUSTED Handle = 0x8003030B - RPC_E_CALL_REJECTED Handle = 0x80010001 - RPC_E_CALL_CANCELED Handle = 0x80010002 - RPC_E_CANTPOST_INSENDCALL Handle = 0x80010003 - RPC_E_CANTCALLOUT_INASYNCCALL Handle = 0x80010004 - RPC_E_CANTCALLOUT_INEXTERNALCALL Handle = 0x80010005 - RPC_E_CONNECTION_TERMINATED Handle = 0x80010006 - RPC_E_SERVER_DIED Handle = 0x80010007 - RPC_E_CLIENT_DIED Handle = 0x80010008 - RPC_E_INVALID_DATAPACKET Handle = 0x80010009 - RPC_E_CANTTRANSMIT_CALL Handle = 0x8001000A - RPC_E_CLIENT_CANTMARSHAL_DATA Handle = 0x8001000B - RPC_E_CLIENT_CANTUNMARSHAL_DATA Handle = 0x8001000C - RPC_E_SERVER_CANTMARSHAL_DATA Handle = 0x8001000D - RPC_E_SERVER_CANTUNMARSHAL_DATA Handle = 0x8001000E - RPC_E_INVALID_DATA Handle = 0x8001000F - RPC_E_INVALID_PARAMETER Handle = 0x80010010 - RPC_E_CANTCALLOUT_AGAIN Handle = 0x80010011 - RPC_E_SERVER_DIED_DNE Handle = 0x80010012 - RPC_E_SYS_CALL_FAILED Handle = 0x80010100 - RPC_E_OUT_OF_RESOURCES Handle = 0x80010101 - RPC_E_ATTEMPTED_MULTITHREAD Handle = 0x80010102 - RPC_E_NOT_REGISTERED Handle = 0x80010103 - RPC_E_FAULT Handle = 0x80010104 - RPC_E_SERVERFAULT Handle = 0x80010105 - RPC_E_CHANGED_MODE Handle = 0x80010106 - RPC_E_INVALIDMETHOD Handle = 0x80010107 - RPC_E_DISCONNECTED Handle = 0x80010108 - RPC_E_RETRY Handle = 0x80010109 - RPC_E_SERVERCALL_RETRYLATER Handle = 0x8001010A - RPC_E_SERVERCALL_REJECTED Handle = 0x8001010B - RPC_E_INVALID_CALLDATA Handle = 0x8001010C - RPC_E_CANTCALLOUT_ININPUTSYNCCALL Handle = 0x8001010D - RPC_E_WRONG_THREAD Handle = 0x8001010E - RPC_E_THREAD_NOT_INIT Handle = 0x8001010F - RPC_E_VERSION_MISMATCH Handle = 0x80010110 - RPC_E_INVALID_HEADER Handle = 0x80010111 - RPC_E_INVALID_EXTENSION Handle = 0x80010112 - RPC_E_INVALID_IPID Handle = 0x80010113 - RPC_E_INVALID_OBJECT Handle = 0x80010114 - RPC_S_CALLPENDING Handle = 0x80010115 - RPC_S_WAITONTIMER Handle = 0x80010116 - RPC_E_CALL_COMPLETE Handle = 0x80010117 - RPC_E_UNSECURE_CALL Handle = 0x80010118 - RPC_E_TOO_LATE Handle = 0x80010119 - RPC_E_NO_GOOD_SECURITY_PACKAGES Handle = 0x8001011A - RPC_E_ACCESS_DENIED Handle = 0x8001011B - RPC_E_REMOTE_DISABLED Handle = 0x8001011C - RPC_E_INVALID_OBJREF Handle = 0x8001011D - RPC_E_NO_CONTEXT Handle = 0x8001011E - RPC_E_TIMEOUT Handle = 0x8001011F - RPC_E_NO_SYNC Handle = 0x80010120 - RPC_E_FULLSIC_REQUIRED Handle = 0x80010121 - RPC_E_INVALID_STD_NAME Handle = 0x80010122 - CO_E_FAILEDTOIMPERSONATE Handle = 0x80010123 - CO_E_FAILEDTOGETSECCTX Handle = 0x80010124 - CO_E_FAILEDTOOPENTHREADTOKEN Handle = 0x80010125 - CO_E_FAILEDTOGETTOKENINFO Handle = 0x80010126 - CO_E_TRUSTEEDOESNTMATCHCLIENT Handle = 0x80010127 - CO_E_FAILEDTOQUERYCLIENTBLANKET Handle = 0x80010128 - CO_E_FAILEDTOSETDACL Handle = 0x80010129 - CO_E_ACCESSCHECKFAILED Handle = 0x8001012A - CO_E_NETACCESSAPIFAILED Handle = 0x8001012B - CO_E_WRONGTRUSTEENAMESYNTAX Handle = 0x8001012C - CO_E_INVALIDSID Handle = 0x8001012D - CO_E_CONVERSIONFAILED Handle = 0x8001012E - CO_E_NOMATCHINGSIDFOUND Handle = 0x8001012F - CO_E_LOOKUPACCSIDFAILED Handle = 0x80010130 - CO_E_NOMATCHINGNAMEFOUND Handle = 0x80010131 - CO_E_LOOKUPACCNAMEFAILED Handle = 0x80010132 - CO_E_SETSERLHNDLFAILED Handle = 0x80010133 - CO_E_FAILEDTOGETWINDIR Handle = 0x80010134 - CO_E_PATHTOOLONG Handle = 0x80010135 - CO_E_FAILEDTOGENUUID Handle = 0x80010136 - CO_E_FAILEDTOCREATEFILE Handle = 0x80010137 - CO_E_FAILEDTOCLOSEHANDLE Handle = 0x80010138 - CO_E_EXCEEDSYSACLLIMIT Handle = 0x80010139 - CO_E_ACESINWRONGORDER Handle = 0x8001013A - CO_E_INCOMPATIBLESTREAMVERSION Handle = 0x8001013B - CO_E_FAILEDTOOPENPROCESSTOKEN Handle = 0x8001013C - CO_E_DECODEFAILED Handle = 0x8001013D - CO_E_ACNOTINITIALIZED Handle = 0x8001013F - CO_E_CANCEL_DISABLED Handle = 0x80010140 - RPC_E_UNEXPECTED Handle = 0x8001FFFF - ERROR_AUDITING_DISABLED Handle = 0xC0090001 - ERROR_ALL_SIDS_FILTERED Handle = 0xC0090002 - ERROR_BIZRULES_NOT_ENABLED Handle = 0xC0090003 - NTE_BAD_UID Handle = 0x80090001 - NTE_BAD_HASH Handle = 0x80090002 - NTE_BAD_KEY Handle = 0x80090003 - NTE_BAD_LEN Handle = 0x80090004 - NTE_BAD_DATA Handle = 0x80090005 - NTE_BAD_SIGNATURE Handle = 0x80090006 - NTE_BAD_VER Handle = 0x80090007 - NTE_BAD_ALGID Handle = 0x80090008 - NTE_BAD_FLAGS Handle = 0x80090009 - NTE_BAD_TYPE Handle = 0x8009000A - NTE_BAD_KEY_STATE Handle = 0x8009000B - NTE_BAD_HASH_STATE Handle = 0x8009000C - NTE_NO_KEY Handle = 0x8009000D - NTE_NO_MEMORY Handle = 0x8009000E - NTE_EXISTS Handle = 0x8009000F - NTE_PERM Handle = 0x80090010 - NTE_NOT_FOUND Handle = 0x80090011 - NTE_DOUBLE_ENCRYPT Handle = 0x80090012 - NTE_BAD_PROVIDER Handle = 0x80090013 - NTE_BAD_PROV_TYPE Handle = 0x80090014 - NTE_BAD_PUBLIC_KEY Handle = 0x80090015 - NTE_BAD_KEYSET Handle = 0x80090016 - NTE_PROV_TYPE_NOT_DEF Handle = 0x80090017 - NTE_PROV_TYPE_ENTRY_BAD Handle = 0x80090018 - NTE_KEYSET_NOT_DEF Handle = 0x80090019 - NTE_KEYSET_ENTRY_BAD Handle = 0x8009001A - NTE_PROV_TYPE_NO_MATCH Handle = 0x8009001B - NTE_SIGNATURE_FILE_BAD Handle = 0x8009001C - NTE_PROVIDER_DLL_FAIL Handle = 0x8009001D - NTE_PROV_DLL_NOT_FOUND Handle = 0x8009001E - NTE_BAD_KEYSET_PARAM Handle = 0x8009001F - NTE_FAIL Handle = 0x80090020 - NTE_SYS_ERR Handle = 0x80090021 - NTE_SILENT_CONTEXT Handle = 0x80090022 - NTE_TOKEN_KEYSET_STORAGE_FULL Handle = 0x80090023 - NTE_TEMPORARY_PROFILE Handle = 0x80090024 - NTE_FIXEDPARAMETER Handle = 0x80090025 - NTE_INVALID_HANDLE Handle = 0x80090026 - NTE_INVALID_PARAMETER Handle = 0x80090027 - NTE_BUFFER_TOO_SMALL Handle = 0x80090028 - NTE_NOT_SUPPORTED Handle = 0x80090029 - NTE_NO_MORE_ITEMS Handle = 0x8009002A - NTE_BUFFERS_OVERLAP Handle = 0x8009002B - NTE_DECRYPTION_FAILURE Handle = 0x8009002C - NTE_INTERNAL_ERROR Handle = 0x8009002D - NTE_UI_REQUIRED Handle = 0x8009002E - NTE_HMAC_NOT_SUPPORTED Handle = 0x8009002F - NTE_DEVICE_NOT_READY Handle = 0x80090030 - NTE_AUTHENTICATION_IGNORED Handle = 0x80090031 - NTE_VALIDATION_FAILED Handle = 0x80090032 - NTE_INCORRECT_PASSWORD Handle = 0x80090033 - NTE_ENCRYPTION_FAILURE Handle = 0x80090034 - NTE_DEVICE_NOT_FOUND Handle = 0x80090035 - NTE_USER_CANCELLED Handle = 0x80090036 - NTE_PASSWORD_CHANGE_REQUIRED Handle = 0x80090037 - NTE_NOT_ACTIVE_CONSOLE Handle = 0x80090038 - SEC_E_INSUFFICIENT_MEMORY Handle = 0x80090300 - SEC_E_INVALID_HANDLE Handle = 0x80090301 - SEC_E_UNSUPPORTED_FUNCTION Handle = 0x80090302 - SEC_E_TARGET_UNKNOWN Handle = 0x80090303 - SEC_E_INTERNAL_ERROR Handle = 0x80090304 - SEC_E_SECPKG_NOT_FOUND Handle = 0x80090305 - SEC_E_NOT_OWNER Handle = 0x80090306 - SEC_E_CANNOT_INSTALL Handle = 0x80090307 - SEC_E_INVALID_TOKEN Handle = 0x80090308 - SEC_E_CANNOT_PACK Handle = 0x80090309 - SEC_E_QOP_NOT_SUPPORTED Handle = 0x8009030A - SEC_E_NO_IMPERSONATION Handle = 0x8009030B - SEC_E_LOGON_DENIED Handle = 0x8009030C - SEC_E_UNKNOWN_CREDENTIALS Handle = 0x8009030D - SEC_E_NO_CREDENTIALS Handle = 0x8009030E - SEC_E_MESSAGE_ALTERED Handle = 0x8009030F - SEC_E_OUT_OF_SEQUENCE Handle = 0x80090310 - SEC_E_NO_AUTHENTICATING_AUTHORITY Handle = 0x80090311 - SEC_I_CONTINUE_NEEDED Handle = 0x00090312 - SEC_I_COMPLETE_NEEDED Handle = 0x00090313 - SEC_I_COMPLETE_AND_CONTINUE Handle = 0x00090314 - SEC_I_LOCAL_LOGON Handle = 0x00090315 - SEC_I_GENERIC_EXTENSION_RECEIVED Handle = 0x00090316 - SEC_E_BAD_PKGID Handle = 0x80090316 - SEC_E_CONTEXT_EXPIRED Handle = 0x80090317 - SEC_I_CONTEXT_EXPIRED Handle = 0x00090317 - SEC_E_INCOMPLETE_MESSAGE Handle = 0x80090318 - SEC_E_INCOMPLETE_CREDENTIALS Handle = 0x80090320 - SEC_E_BUFFER_TOO_SMALL Handle = 0x80090321 - SEC_I_INCOMPLETE_CREDENTIALS Handle = 0x00090320 - SEC_I_RENEGOTIATE Handle = 0x00090321 - SEC_E_WRONG_PRINCIPAL Handle = 0x80090322 - SEC_I_NO_LSA_CONTEXT Handle = 0x00090323 - SEC_E_TIME_SKEW Handle = 0x80090324 - SEC_E_UNTRUSTED_ROOT Handle = 0x80090325 - SEC_E_ILLEGAL_MESSAGE Handle = 0x80090326 - SEC_E_CERT_UNKNOWN Handle = 0x80090327 - SEC_E_CERT_EXPIRED Handle = 0x80090328 - SEC_E_ENCRYPT_FAILURE Handle = 0x80090329 - SEC_E_DECRYPT_FAILURE Handle = 0x80090330 - SEC_E_ALGORITHM_MISMATCH Handle = 0x80090331 - SEC_E_SECURITY_QOS_FAILED Handle = 0x80090332 - SEC_E_UNFINISHED_CONTEXT_DELETED Handle = 0x80090333 - SEC_E_NO_TGT_REPLY Handle = 0x80090334 - SEC_E_NO_IP_ADDRESSES Handle = 0x80090335 - SEC_E_WRONG_CREDENTIAL_HANDLE Handle = 0x80090336 - SEC_E_CRYPTO_SYSTEM_INVALID Handle = 0x80090337 - SEC_E_MAX_REFERRALS_EXCEEDED Handle = 0x80090338 - SEC_E_MUST_BE_KDC Handle = 0x80090339 - SEC_E_STRONG_CRYPTO_NOT_SUPPORTED Handle = 0x8009033A - SEC_E_TOO_MANY_PRINCIPALS Handle = 0x8009033B - SEC_E_NO_PA_DATA Handle = 0x8009033C - SEC_E_PKINIT_NAME_MISMATCH Handle = 0x8009033D - SEC_E_SMARTCARD_LOGON_REQUIRED Handle = 0x8009033E - SEC_E_SHUTDOWN_IN_PROGRESS Handle = 0x8009033F - SEC_E_KDC_INVALID_REQUEST Handle = 0x80090340 - SEC_E_KDC_UNABLE_TO_REFER Handle = 0x80090341 - SEC_E_KDC_UNKNOWN_ETYPE Handle = 0x80090342 - SEC_E_UNSUPPORTED_PREAUTH Handle = 0x80090343 - SEC_E_DELEGATION_REQUIRED Handle = 0x80090345 - SEC_E_BAD_BINDINGS Handle = 0x80090346 - SEC_E_MULTIPLE_ACCOUNTS Handle = 0x80090347 - SEC_E_NO_KERB_KEY Handle = 0x80090348 - SEC_E_CERT_WRONG_USAGE Handle = 0x80090349 - SEC_E_DOWNGRADE_DETECTED Handle = 0x80090350 - SEC_E_SMARTCARD_CERT_REVOKED Handle = 0x80090351 - SEC_E_ISSUING_CA_UNTRUSTED Handle = 0x80090352 - SEC_E_REVOCATION_OFFLINE_C Handle = 0x80090353 - SEC_E_PKINIT_CLIENT_FAILURE Handle = 0x80090354 - SEC_E_SMARTCARD_CERT_EXPIRED Handle = 0x80090355 - SEC_E_NO_S4U_PROT_SUPPORT Handle = 0x80090356 - SEC_E_CROSSREALM_DELEGATION_FAILURE Handle = 0x80090357 - SEC_E_REVOCATION_OFFLINE_KDC Handle = 0x80090358 - SEC_E_ISSUING_CA_UNTRUSTED_KDC Handle = 0x80090359 - SEC_E_KDC_CERT_EXPIRED Handle = 0x8009035A - SEC_E_KDC_CERT_REVOKED Handle = 0x8009035B - SEC_I_SIGNATURE_NEEDED Handle = 0x0009035C - SEC_E_INVALID_PARAMETER Handle = 0x8009035D - SEC_E_DELEGATION_POLICY Handle = 0x8009035E - SEC_E_POLICY_NLTM_ONLY Handle = 0x8009035F - SEC_I_NO_RENEGOTIATION Handle = 0x00090360 - SEC_E_NO_CONTEXT Handle = 0x80090361 - SEC_E_PKU2U_CERT_FAILURE Handle = 0x80090362 - SEC_E_MUTUAL_AUTH_FAILED Handle = 0x80090363 - SEC_I_MESSAGE_FRAGMENT Handle = 0x00090364 - SEC_E_ONLY_HTTPS_ALLOWED Handle = 0x80090365 - SEC_I_CONTINUE_NEEDED_MESSAGE_OK Handle = 0x00090366 - SEC_E_APPLICATION_PROTOCOL_MISMATCH Handle = 0x80090367 - SEC_I_ASYNC_CALL_PENDING Handle = 0x00090368 - SEC_E_INVALID_UPN_NAME Handle = 0x80090369 - SEC_E_EXT_BUFFER_TOO_SMALL Handle = 0x8009036A - SEC_E_INSUFFICIENT_BUFFERS Handle = 0x8009036B - SEC_E_NO_SPM = SEC_E_INTERNAL_ERROR - SEC_E_NOT_SUPPORTED = SEC_E_UNSUPPORTED_FUNCTION - CRYPT_E_MSG_ERROR Handle = 0x80091001 - CRYPT_E_UNKNOWN_ALGO Handle = 0x80091002 - CRYPT_E_OID_FORMAT Handle = 0x80091003 - CRYPT_E_INVALID_MSG_TYPE Handle = 0x80091004 - CRYPT_E_UNEXPECTED_ENCODING Handle = 0x80091005 - CRYPT_E_AUTH_ATTR_MISSING Handle = 0x80091006 - CRYPT_E_HASH_VALUE Handle = 0x80091007 - CRYPT_E_INVALID_INDEX Handle = 0x80091008 - CRYPT_E_ALREADY_DECRYPTED Handle = 0x80091009 - CRYPT_E_NOT_DECRYPTED Handle = 0x8009100A - CRYPT_E_RECIPIENT_NOT_FOUND Handle = 0x8009100B - CRYPT_E_CONTROL_TYPE Handle = 0x8009100C - CRYPT_E_ISSUER_SERIALNUMBER Handle = 0x8009100D - CRYPT_E_SIGNER_NOT_FOUND Handle = 0x8009100E - CRYPT_E_ATTRIBUTES_MISSING Handle = 0x8009100F - CRYPT_E_STREAM_MSG_NOT_READY Handle = 0x80091010 - CRYPT_E_STREAM_INSUFFICIENT_DATA Handle = 0x80091011 - CRYPT_I_NEW_PROTECTION_REQUIRED Handle = 0x00091012 - CRYPT_E_BAD_LEN Handle = 0x80092001 - CRYPT_E_BAD_ENCODE Handle = 0x80092002 - CRYPT_E_FILE_ERROR Handle = 0x80092003 - CRYPT_E_NOT_FOUND Handle = 0x80092004 - CRYPT_E_EXISTS Handle = 0x80092005 - CRYPT_E_NO_PROVIDER Handle = 0x80092006 - CRYPT_E_SELF_SIGNED Handle = 0x80092007 - CRYPT_E_DELETED_PREV Handle = 0x80092008 - CRYPT_E_NO_MATCH Handle = 0x80092009 - CRYPT_E_UNEXPECTED_MSG_TYPE Handle = 0x8009200A - CRYPT_E_NO_KEY_PROPERTY Handle = 0x8009200B - CRYPT_E_NO_DECRYPT_CERT Handle = 0x8009200C - CRYPT_E_BAD_MSG Handle = 0x8009200D - CRYPT_E_NO_SIGNER Handle = 0x8009200E - CRYPT_E_PENDING_CLOSE Handle = 0x8009200F - CRYPT_E_REVOKED Handle = 0x80092010 - CRYPT_E_NO_REVOCATION_DLL Handle = 0x80092011 - CRYPT_E_NO_REVOCATION_CHECK Handle = 0x80092012 - CRYPT_E_REVOCATION_OFFLINE Handle = 0x80092013 - CRYPT_E_NOT_IN_REVOCATION_DATABASE Handle = 0x80092014 - CRYPT_E_INVALID_NUMERIC_STRING Handle = 0x80092020 - CRYPT_E_INVALID_PRINTABLE_STRING Handle = 0x80092021 - CRYPT_E_INVALID_IA5_STRING Handle = 0x80092022 - CRYPT_E_INVALID_X500_STRING Handle = 0x80092023 - CRYPT_E_NOT_CHAR_STRING Handle = 0x80092024 - CRYPT_E_FILERESIZED Handle = 0x80092025 - CRYPT_E_SECURITY_SETTINGS Handle = 0x80092026 - CRYPT_E_NO_VERIFY_USAGE_DLL Handle = 0x80092027 - CRYPT_E_NO_VERIFY_USAGE_CHECK Handle = 0x80092028 - CRYPT_E_VERIFY_USAGE_OFFLINE Handle = 0x80092029 - CRYPT_E_NOT_IN_CTL Handle = 0x8009202A - CRYPT_E_NO_TRUSTED_SIGNER Handle = 0x8009202B - CRYPT_E_MISSING_PUBKEY_PARA Handle = 0x8009202C - CRYPT_E_OBJECT_LOCATOR_OBJECT_NOT_FOUND Handle = 0x8009202D - CRYPT_E_OSS_ERROR Handle = 0x80093000 - OSS_MORE_BUF Handle = 0x80093001 - OSS_NEGATIVE_UINTEGER Handle = 0x80093002 - OSS_PDU_RANGE Handle = 0x80093003 - OSS_MORE_INPUT Handle = 0x80093004 - OSS_DATA_ERROR Handle = 0x80093005 - OSS_BAD_ARG Handle = 0x80093006 - OSS_BAD_VERSION Handle = 0x80093007 - OSS_OUT_MEMORY Handle = 0x80093008 - OSS_PDU_MISMATCH Handle = 0x80093009 - OSS_LIMITED Handle = 0x8009300A - OSS_BAD_PTR Handle = 0x8009300B - OSS_BAD_TIME Handle = 0x8009300C - OSS_INDEFINITE_NOT_SUPPORTED Handle = 0x8009300D - OSS_MEM_ERROR Handle = 0x8009300E - OSS_BAD_TABLE Handle = 0x8009300F - OSS_TOO_LONG Handle = 0x80093010 - OSS_CONSTRAINT_VIOLATED Handle = 0x80093011 - OSS_FATAL_ERROR Handle = 0x80093012 - OSS_ACCESS_SERIALIZATION_ERROR Handle = 0x80093013 - OSS_NULL_TBL Handle = 0x80093014 - OSS_NULL_FCN Handle = 0x80093015 - OSS_BAD_ENCRULES Handle = 0x80093016 - OSS_UNAVAIL_ENCRULES Handle = 0x80093017 - OSS_CANT_OPEN_TRACE_WINDOW Handle = 0x80093018 - OSS_UNIMPLEMENTED Handle = 0x80093019 - OSS_OID_DLL_NOT_LINKED Handle = 0x8009301A - OSS_CANT_OPEN_TRACE_FILE Handle = 0x8009301B - OSS_TRACE_FILE_ALREADY_OPEN Handle = 0x8009301C - OSS_TABLE_MISMATCH Handle = 0x8009301D - OSS_TYPE_NOT_SUPPORTED Handle = 0x8009301E - OSS_REAL_DLL_NOT_LINKED Handle = 0x8009301F - OSS_REAL_CODE_NOT_LINKED Handle = 0x80093020 - OSS_OUT_OF_RANGE Handle = 0x80093021 - OSS_COPIER_DLL_NOT_LINKED Handle = 0x80093022 - OSS_CONSTRAINT_DLL_NOT_LINKED Handle = 0x80093023 - OSS_COMPARATOR_DLL_NOT_LINKED Handle = 0x80093024 - OSS_COMPARATOR_CODE_NOT_LINKED Handle = 0x80093025 - OSS_MEM_MGR_DLL_NOT_LINKED Handle = 0x80093026 - OSS_PDV_DLL_NOT_LINKED Handle = 0x80093027 - OSS_PDV_CODE_NOT_LINKED Handle = 0x80093028 - OSS_API_DLL_NOT_LINKED Handle = 0x80093029 - OSS_BERDER_DLL_NOT_LINKED Handle = 0x8009302A - OSS_PER_DLL_NOT_LINKED Handle = 0x8009302B - OSS_OPEN_TYPE_ERROR Handle = 0x8009302C - OSS_MUTEX_NOT_CREATED Handle = 0x8009302D - OSS_CANT_CLOSE_TRACE_FILE Handle = 0x8009302E - CRYPT_E_ASN1_ERROR Handle = 0x80093100 - CRYPT_E_ASN1_INTERNAL Handle = 0x80093101 - CRYPT_E_ASN1_EOD Handle = 0x80093102 - CRYPT_E_ASN1_CORRUPT Handle = 0x80093103 - CRYPT_E_ASN1_LARGE Handle = 0x80093104 - CRYPT_E_ASN1_CONSTRAINT Handle = 0x80093105 - CRYPT_E_ASN1_MEMORY Handle = 0x80093106 - CRYPT_E_ASN1_OVERFLOW Handle = 0x80093107 - CRYPT_E_ASN1_BADPDU Handle = 0x80093108 - CRYPT_E_ASN1_BADARGS Handle = 0x80093109 - CRYPT_E_ASN1_BADREAL Handle = 0x8009310A - CRYPT_E_ASN1_BADTAG Handle = 0x8009310B - CRYPT_E_ASN1_CHOICE Handle = 0x8009310C - CRYPT_E_ASN1_RULE Handle = 0x8009310D - CRYPT_E_ASN1_UTF8 Handle = 0x8009310E - CRYPT_E_ASN1_PDU_TYPE Handle = 0x80093133 - CRYPT_E_ASN1_NYI Handle = 0x80093134 - CRYPT_E_ASN1_EXTENDED Handle = 0x80093201 - CRYPT_E_ASN1_NOEOD Handle = 0x80093202 - CERTSRV_E_BAD_REQUESTSUBJECT Handle = 0x80094001 - CERTSRV_E_NO_REQUEST Handle = 0x80094002 - CERTSRV_E_BAD_REQUESTSTATUS Handle = 0x80094003 - CERTSRV_E_PROPERTY_EMPTY Handle = 0x80094004 - CERTSRV_E_INVALID_CA_CERTIFICATE Handle = 0x80094005 - CERTSRV_E_SERVER_SUSPENDED Handle = 0x80094006 - CERTSRV_E_ENCODING_LENGTH Handle = 0x80094007 - CERTSRV_E_ROLECONFLICT Handle = 0x80094008 - CERTSRV_E_RESTRICTEDOFFICER Handle = 0x80094009 - CERTSRV_E_KEY_ARCHIVAL_NOT_CONFIGURED Handle = 0x8009400A - CERTSRV_E_NO_VALID_KRA Handle = 0x8009400B - CERTSRV_E_BAD_REQUEST_KEY_ARCHIVAL Handle = 0x8009400C - CERTSRV_E_NO_CAADMIN_DEFINED Handle = 0x8009400D - CERTSRV_E_BAD_RENEWAL_CERT_ATTRIBUTE Handle = 0x8009400E - CERTSRV_E_NO_DB_SESSIONS Handle = 0x8009400F - CERTSRV_E_ALIGNMENT_FAULT Handle = 0x80094010 - CERTSRV_E_ENROLL_DENIED Handle = 0x80094011 - CERTSRV_E_TEMPLATE_DENIED Handle = 0x80094012 - CERTSRV_E_DOWNLEVEL_DC_SSL_OR_UPGRADE Handle = 0x80094013 - CERTSRV_E_ADMIN_DENIED_REQUEST Handle = 0x80094014 - CERTSRV_E_NO_POLICY_SERVER Handle = 0x80094015 - CERTSRV_E_WEAK_SIGNATURE_OR_KEY Handle = 0x80094016 - CERTSRV_E_KEY_ATTESTATION_NOT_SUPPORTED Handle = 0x80094017 - CERTSRV_E_ENCRYPTION_CERT_REQUIRED Handle = 0x80094018 - CERTSRV_E_UNSUPPORTED_CERT_TYPE Handle = 0x80094800 - CERTSRV_E_NO_CERT_TYPE Handle = 0x80094801 - CERTSRV_E_TEMPLATE_CONFLICT Handle = 0x80094802 - CERTSRV_E_SUBJECT_ALT_NAME_REQUIRED Handle = 0x80094803 - CERTSRV_E_ARCHIVED_KEY_REQUIRED Handle = 0x80094804 - CERTSRV_E_SMIME_REQUIRED Handle = 0x80094805 - CERTSRV_E_BAD_RENEWAL_SUBJECT Handle = 0x80094806 - CERTSRV_E_BAD_TEMPLATE_VERSION Handle = 0x80094807 - CERTSRV_E_TEMPLATE_POLICY_REQUIRED Handle = 0x80094808 - CERTSRV_E_SIGNATURE_POLICY_REQUIRED Handle = 0x80094809 - CERTSRV_E_SIGNATURE_COUNT Handle = 0x8009480A - CERTSRV_E_SIGNATURE_REJECTED Handle = 0x8009480B - CERTSRV_E_ISSUANCE_POLICY_REQUIRED Handle = 0x8009480C - CERTSRV_E_SUBJECT_UPN_REQUIRED Handle = 0x8009480D - CERTSRV_E_SUBJECT_DIRECTORY_GUID_REQUIRED Handle = 0x8009480E - CERTSRV_E_SUBJECT_DNS_REQUIRED Handle = 0x8009480F - CERTSRV_E_ARCHIVED_KEY_UNEXPECTED Handle = 0x80094810 - CERTSRV_E_KEY_LENGTH Handle = 0x80094811 - CERTSRV_E_SUBJECT_EMAIL_REQUIRED Handle = 0x80094812 - CERTSRV_E_UNKNOWN_CERT_TYPE Handle = 0x80094813 - CERTSRV_E_CERT_TYPE_OVERLAP Handle = 0x80094814 - CERTSRV_E_TOO_MANY_SIGNATURES Handle = 0x80094815 - CERTSRV_E_RENEWAL_BAD_PUBLIC_KEY Handle = 0x80094816 - CERTSRV_E_INVALID_EK Handle = 0x80094817 - CERTSRV_E_INVALID_IDBINDING Handle = 0x80094818 - CERTSRV_E_INVALID_ATTESTATION Handle = 0x80094819 - CERTSRV_E_KEY_ATTESTATION Handle = 0x8009481A - CERTSRV_E_CORRUPT_KEY_ATTESTATION Handle = 0x8009481B - CERTSRV_E_EXPIRED_CHALLENGE Handle = 0x8009481C - CERTSRV_E_INVALID_RESPONSE Handle = 0x8009481D - CERTSRV_E_INVALID_REQUESTID Handle = 0x8009481E - CERTSRV_E_REQUEST_PRECERTIFICATE_MISMATCH Handle = 0x8009481F - CERTSRV_E_PENDING_CLIENT_RESPONSE Handle = 0x80094820 - XENROLL_E_KEY_NOT_EXPORTABLE Handle = 0x80095000 - XENROLL_E_CANNOT_ADD_ROOT_CERT Handle = 0x80095001 - XENROLL_E_RESPONSE_KA_HASH_NOT_FOUND Handle = 0x80095002 - XENROLL_E_RESPONSE_UNEXPECTED_KA_HASH Handle = 0x80095003 - XENROLL_E_RESPONSE_KA_HASH_MISMATCH Handle = 0x80095004 - XENROLL_E_KEYSPEC_SMIME_MISMATCH Handle = 0x80095005 - TRUST_E_SYSTEM_ERROR Handle = 0x80096001 - TRUST_E_NO_SIGNER_CERT Handle = 0x80096002 - TRUST_E_COUNTER_SIGNER Handle = 0x80096003 - TRUST_E_CERT_SIGNATURE Handle = 0x80096004 - TRUST_E_TIME_STAMP Handle = 0x80096005 - TRUST_E_BAD_DIGEST Handle = 0x80096010 - TRUST_E_MALFORMED_SIGNATURE Handle = 0x80096011 - TRUST_E_BASIC_CONSTRAINTS Handle = 0x80096019 - TRUST_E_FINANCIAL_CRITERIA Handle = 0x8009601E - MSSIPOTF_E_OUTOFMEMRANGE Handle = 0x80097001 - MSSIPOTF_E_CANTGETOBJECT Handle = 0x80097002 - MSSIPOTF_E_NOHEADTABLE Handle = 0x80097003 - MSSIPOTF_E_BAD_MAGICNUMBER Handle = 0x80097004 - MSSIPOTF_E_BAD_OFFSET_TABLE Handle = 0x80097005 - MSSIPOTF_E_TABLE_TAGORDER Handle = 0x80097006 - MSSIPOTF_E_TABLE_LONGWORD Handle = 0x80097007 - MSSIPOTF_E_BAD_FIRST_TABLE_PLACEMENT Handle = 0x80097008 - MSSIPOTF_E_TABLES_OVERLAP Handle = 0x80097009 - MSSIPOTF_E_TABLE_PADBYTES Handle = 0x8009700A - MSSIPOTF_E_FILETOOSMALL Handle = 0x8009700B - MSSIPOTF_E_TABLE_CHECKSUM Handle = 0x8009700C - MSSIPOTF_E_FILE_CHECKSUM Handle = 0x8009700D - MSSIPOTF_E_FAILED_POLICY Handle = 0x80097010 - MSSIPOTF_E_FAILED_HINTS_CHECK Handle = 0x80097011 - MSSIPOTF_E_NOT_OPENTYPE Handle = 0x80097012 - MSSIPOTF_E_FILE Handle = 0x80097013 - MSSIPOTF_E_CRYPT Handle = 0x80097014 - MSSIPOTF_E_BADVERSION Handle = 0x80097015 - MSSIPOTF_E_DSIG_STRUCTURE Handle = 0x80097016 - MSSIPOTF_E_PCONST_CHECK Handle = 0x80097017 - MSSIPOTF_E_STRUCTURE Handle = 0x80097018 - ERROR_CRED_REQUIRES_CONFIRMATION Handle = 0x80097019 - NTE_OP_OK syscall.Errno = 0 - TRUST_E_PROVIDER_UNKNOWN Handle = 0x800B0001 - TRUST_E_ACTION_UNKNOWN Handle = 0x800B0002 - TRUST_E_SUBJECT_FORM_UNKNOWN Handle = 0x800B0003 - TRUST_E_SUBJECT_NOT_TRUSTED Handle = 0x800B0004 - DIGSIG_E_ENCODE Handle = 0x800B0005 - DIGSIG_E_DECODE Handle = 0x800B0006 - DIGSIG_E_EXTENSIBILITY Handle = 0x800B0007 - DIGSIG_E_CRYPTO Handle = 0x800B0008 - PERSIST_E_SIZEDEFINITE Handle = 0x800B0009 - PERSIST_E_SIZEINDEFINITE Handle = 0x800B000A - PERSIST_E_NOTSELFSIZING Handle = 0x800B000B - TRUST_E_NOSIGNATURE Handle = 0x800B0100 - CERT_E_EXPIRED Handle = 0x800B0101 - CERT_E_VALIDITYPERIODNESTING Handle = 0x800B0102 - CERT_E_ROLE Handle = 0x800B0103 - CERT_E_PATHLENCONST Handle = 0x800B0104 - CERT_E_CRITICAL Handle = 0x800B0105 - CERT_E_PURPOSE Handle = 0x800B0106 - CERT_E_ISSUERCHAINING Handle = 0x800B0107 - CERT_E_MALFORMED Handle = 0x800B0108 - CERT_E_UNTRUSTEDROOT Handle = 0x800B0109 - CERT_E_CHAINING Handle = 0x800B010A - TRUST_E_FAIL Handle = 0x800B010B - CERT_E_REVOKED Handle = 0x800B010C - CERT_E_UNTRUSTEDTESTROOT Handle = 0x800B010D - CERT_E_REVOCATION_FAILURE Handle = 0x800B010E - CERT_E_CN_NO_MATCH Handle = 0x800B010F - CERT_E_WRONG_USAGE Handle = 0x800B0110 - TRUST_E_EXPLICIT_DISTRUST Handle = 0x800B0111 - CERT_E_UNTRUSTEDCA Handle = 0x800B0112 - CERT_E_INVALID_POLICY Handle = 0x800B0113 - CERT_E_INVALID_NAME Handle = 0x800B0114 - SPAPI_E_EXPECTED_SECTION_NAME Handle = 0x800F0000 - SPAPI_E_BAD_SECTION_NAME_LINE Handle = 0x800F0001 - SPAPI_E_SECTION_NAME_TOO_LONG Handle = 0x800F0002 - SPAPI_E_GENERAL_SYNTAX Handle = 0x800F0003 - SPAPI_E_WRONG_INF_STYLE Handle = 0x800F0100 - SPAPI_E_SECTION_NOT_FOUND Handle = 0x800F0101 - SPAPI_E_LINE_NOT_FOUND Handle = 0x800F0102 - SPAPI_E_NO_BACKUP Handle = 0x800F0103 - SPAPI_E_NO_ASSOCIATED_CLASS Handle = 0x800F0200 - SPAPI_E_CLASS_MISMATCH Handle = 0x800F0201 - SPAPI_E_DUPLICATE_FOUND Handle = 0x800F0202 - SPAPI_E_NO_DRIVER_SELECTED Handle = 0x800F0203 - SPAPI_E_KEY_DOES_NOT_EXIST Handle = 0x800F0204 - SPAPI_E_INVALID_DEVINST_NAME Handle = 0x800F0205 - SPAPI_E_INVALID_CLASS Handle = 0x800F0206 - SPAPI_E_DEVINST_ALREADY_EXISTS Handle = 0x800F0207 - SPAPI_E_DEVINFO_NOT_REGISTERED Handle = 0x800F0208 - SPAPI_E_INVALID_REG_PROPERTY Handle = 0x800F0209 - SPAPI_E_NO_INF Handle = 0x800F020A - SPAPI_E_NO_SUCH_DEVINST Handle = 0x800F020B - SPAPI_E_CANT_LOAD_CLASS_ICON Handle = 0x800F020C - SPAPI_E_INVALID_CLASS_INSTALLER Handle = 0x800F020D - SPAPI_E_DI_DO_DEFAULT Handle = 0x800F020E - SPAPI_E_DI_NOFILECOPY Handle = 0x800F020F - SPAPI_E_INVALID_HWPROFILE Handle = 0x800F0210 - SPAPI_E_NO_DEVICE_SELECTED Handle = 0x800F0211 - SPAPI_E_DEVINFO_LIST_LOCKED Handle = 0x800F0212 - SPAPI_E_DEVINFO_DATA_LOCKED Handle = 0x800F0213 - SPAPI_E_DI_BAD_PATH Handle = 0x800F0214 - SPAPI_E_NO_CLASSINSTALL_PARAMS Handle = 0x800F0215 - SPAPI_E_FILEQUEUE_LOCKED Handle = 0x800F0216 - SPAPI_E_BAD_SERVICE_INSTALLSECT Handle = 0x800F0217 - SPAPI_E_NO_CLASS_DRIVER_LIST Handle = 0x800F0218 - SPAPI_E_NO_ASSOCIATED_SERVICE Handle = 0x800F0219 - SPAPI_E_NO_DEFAULT_DEVICE_INTERFACE Handle = 0x800F021A - SPAPI_E_DEVICE_INTERFACE_ACTIVE Handle = 0x800F021B - SPAPI_E_DEVICE_INTERFACE_REMOVED Handle = 0x800F021C - SPAPI_E_BAD_INTERFACE_INSTALLSECT Handle = 0x800F021D - SPAPI_E_NO_SUCH_INTERFACE_CLASS Handle = 0x800F021E - SPAPI_E_INVALID_REFERENCE_STRING Handle = 0x800F021F - SPAPI_E_INVALID_MACHINENAME Handle = 0x800F0220 - SPAPI_E_REMOTE_COMM_FAILURE Handle = 0x800F0221 - SPAPI_E_MACHINE_UNAVAILABLE Handle = 0x800F0222 - SPAPI_E_NO_CONFIGMGR_SERVICES Handle = 0x800F0223 - SPAPI_E_INVALID_PROPPAGE_PROVIDER Handle = 0x800F0224 - SPAPI_E_NO_SUCH_DEVICE_INTERFACE Handle = 0x800F0225 - SPAPI_E_DI_POSTPROCESSING_REQUIRED Handle = 0x800F0226 - SPAPI_E_INVALID_COINSTALLER Handle = 0x800F0227 - SPAPI_E_NO_COMPAT_DRIVERS Handle = 0x800F0228 - SPAPI_E_NO_DEVICE_ICON Handle = 0x800F0229 - SPAPI_E_INVALID_INF_LOGCONFIG Handle = 0x800F022A - SPAPI_E_DI_DONT_INSTALL Handle = 0x800F022B - SPAPI_E_INVALID_FILTER_DRIVER Handle = 0x800F022C - SPAPI_E_NON_WINDOWS_NT_DRIVER Handle = 0x800F022D - SPAPI_E_NON_WINDOWS_DRIVER Handle = 0x800F022E - SPAPI_E_NO_CATALOG_FOR_OEM_INF Handle = 0x800F022F - SPAPI_E_DEVINSTALL_QUEUE_NONNATIVE Handle = 0x800F0230 - SPAPI_E_NOT_DISABLEABLE Handle = 0x800F0231 - SPAPI_E_CANT_REMOVE_DEVINST Handle = 0x800F0232 - SPAPI_E_INVALID_TARGET Handle = 0x800F0233 - SPAPI_E_DRIVER_NONNATIVE Handle = 0x800F0234 - SPAPI_E_IN_WOW64 Handle = 0x800F0235 - SPAPI_E_SET_SYSTEM_RESTORE_POINT Handle = 0x800F0236 - SPAPI_E_INCORRECTLY_COPIED_INF Handle = 0x800F0237 - SPAPI_E_SCE_DISABLED Handle = 0x800F0238 - SPAPI_E_UNKNOWN_EXCEPTION Handle = 0x800F0239 - SPAPI_E_PNP_REGISTRY_ERROR Handle = 0x800F023A - SPAPI_E_REMOTE_REQUEST_UNSUPPORTED Handle = 0x800F023B - SPAPI_E_NOT_AN_INSTALLED_OEM_INF Handle = 0x800F023C - SPAPI_E_INF_IN_USE_BY_DEVICES Handle = 0x800F023D - SPAPI_E_DI_FUNCTION_OBSOLETE Handle = 0x800F023E - SPAPI_E_NO_AUTHENTICODE_CATALOG Handle = 0x800F023F - SPAPI_E_AUTHENTICODE_DISALLOWED Handle = 0x800F0240 - SPAPI_E_AUTHENTICODE_TRUSTED_PUBLISHER Handle = 0x800F0241 - SPAPI_E_AUTHENTICODE_TRUST_NOT_ESTABLISHED Handle = 0x800F0242 - SPAPI_E_AUTHENTICODE_PUBLISHER_NOT_TRUSTED Handle = 0x800F0243 - SPAPI_E_SIGNATURE_OSATTRIBUTE_MISMATCH Handle = 0x800F0244 - SPAPI_E_ONLY_VALIDATE_VIA_AUTHENTICODE Handle = 0x800F0245 - SPAPI_E_DEVICE_INSTALLER_NOT_READY Handle = 0x800F0246 - SPAPI_E_DRIVER_STORE_ADD_FAILED Handle = 0x800F0247 - SPAPI_E_DEVICE_INSTALL_BLOCKED Handle = 0x800F0248 - SPAPI_E_DRIVER_INSTALL_BLOCKED Handle = 0x800F0249 - SPAPI_E_WRONG_INF_TYPE Handle = 0x800F024A - SPAPI_E_FILE_HASH_NOT_IN_CATALOG Handle = 0x800F024B - SPAPI_E_DRIVER_STORE_DELETE_FAILED Handle = 0x800F024C - SPAPI_E_UNRECOVERABLE_STACK_OVERFLOW Handle = 0x800F0300 - SPAPI_E_ERROR_NOT_INSTALLED Handle = 0x800F1000 - SCARD_S_SUCCESS = S_OK - SCARD_F_INTERNAL_ERROR Handle = 0x80100001 - SCARD_E_CANCELLED Handle = 0x80100002 - SCARD_E_INVALID_HANDLE Handle = 0x80100003 - SCARD_E_INVALID_PARAMETER Handle = 0x80100004 - SCARD_E_INVALID_TARGET Handle = 0x80100005 - SCARD_E_NO_MEMORY Handle = 0x80100006 - SCARD_F_WAITED_TOO_LONG Handle = 0x80100007 - SCARD_E_INSUFFICIENT_BUFFER Handle = 0x80100008 - SCARD_E_UNKNOWN_READER Handle = 0x80100009 - SCARD_E_TIMEOUT Handle = 0x8010000A - SCARD_E_SHARING_VIOLATION Handle = 0x8010000B - SCARD_E_NO_SMARTCARD Handle = 0x8010000C - SCARD_E_UNKNOWN_CARD Handle = 0x8010000D - SCARD_E_CANT_DISPOSE Handle = 0x8010000E - SCARD_E_PROTO_MISMATCH Handle = 0x8010000F - SCARD_E_NOT_READY Handle = 0x80100010 - SCARD_E_INVALID_VALUE Handle = 0x80100011 - SCARD_E_SYSTEM_CANCELLED Handle = 0x80100012 - SCARD_F_COMM_ERROR Handle = 0x80100013 - SCARD_F_UNKNOWN_ERROR Handle = 0x80100014 - SCARD_E_INVALID_ATR Handle = 0x80100015 - SCARD_E_NOT_TRANSACTED Handle = 0x80100016 - SCARD_E_READER_UNAVAILABLE Handle = 0x80100017 - SCARD_P_SHUTDOWN Handle = 0x80100018 - SCARD_E_PCI_TOO_SMALL Handle = 0x80100019 - SCARD_E_READER_UNSUPPORTED Handle = 0x8010001A - SCARD_E_DUPLICATE_READER Handle = 0x8010001B - SCARD_E_CARD_UNSUPPORTED Handle = 0x8010001C - SCARD_E_NO_SERVICE Handle = 0x8010001D - SCARD_E_SERVICE_STOPPED Handle = 0x8010001E - SCARD_E_UNEXPECTED Handle = 0x8010001F - SCARD_E_ICC_INSTALLATION Handle = 0x80100020 - SCARD_E_ICC_CREATEORDER Handle = 0x80100021 - SCARD_E_UNSUPPORTED_FEATURE Handle = 0x80100022 - SCARD_E_DIR_NOT_FOUND Handle = 0x80100023 - SCARD_E_FILE_NOT_FOUND Handle = 0x80100024 - SCARD_E_NO_DIR Handle = 0x80100025 - SCARD_E_NO_FILE Handle = 0x80100026 - SCARD_E_NO_ACCESS Handle = 0x80100027 - SCARD_E_WRITE_TOO_MANY Handle = 0x80100028 - SCARD_E_BAD_SEEK Handle = 0x80100029 - SCARD_E_INVALID_CHV Handle = 0x8010002A - SCARD_E_UNKNOWN_RES_MNG Handle = 0x8010002B - SCARD_E_NO_SUCH_CERTIFICATE Handle = 0x8010002C - SCARD_E_CERTIFICATE_UNAVAILABLE Handle = 0x8010002D - SCARD_E_NO_READERS_AVAILABLE Handle = 0x8010002E - SCARD_E_COMM_DATA_LOST Handle = 0x8010002F - SCARD_E_NO_KEY_CONTAINER Handle = 0x80100030 - SCARD_E_SERVER_TOO_BUSY Handle = 0x80100031 - SCARD_E_PIN_CACHE_EXPIRED Handle = 0x80100032 - SCARD_E_NO_PIN_CACHE Handle = 0x80100033 - SCARD_E_READ_ONLY_CARD Handle = 0x80100034 - SCARD_W_UNSUPPORTED_CARD Handle = 0x80100065 - SCARD_W_UNRESPONSIVE_CARD Handle = 0x80100066 - SCARD_W_UNPOWERED_CARD Handle = 0x80100067 - SCARD_W_RESET_CARD Handle = 0x80100068 - SCARD_W_REMOVED_CARD Handle = 0x80100069 - SCARD_W_SECURITY_VIOLATION Handle = 0x8010006A - SCARD_W_WRONG_CHV Handle = 0x8010006B - SCARD_W_CHV_BLOCKED Handle = 0x8010006C - SCARD_W_EOF Handle = 0x8010006D - SCARD_W_CANCELLED_BY_USER Handle = 0x8010006E - SCARD_W_CARD_NOT_AUTHENTICATED Handle = 0x8010006F - SCARD_W_CACHE_ITEM_NOT_FOUND Handle = 0x80100070 - SCARD_W_CACHE_ITEM_STALE Handle = 0x80100071 - SCARD_W_CACHE_ITEM_TOO_BIG Handle = 0x80100072 - COMADMIN_E_OBJECTERRORS Handle = 0x80110401 - COMADMIN_E_OBJECTINVALID Handle = 0x80110402 - COMADMIN_E_KEYMISSING Handle = 0x80110403 - COMADMIN_E_ALREADYINSTALLED Handle = 0x80110404 - COMADMIN_E_APP_FILE_WRITEFAIL Handle = 0x80110407 - COMADMIN_E_APP_FILE_READFAIL Handle = 0x80110408 - COMADMIN_E_APP_FILE_VERSION Handle = 0x80110409 - COMADMIN_E_BADPATH Handle = 0x8011040A - COMADMIN_E_APPLICATIONEXISTS Handle = 0x8011040B - COMADMIN_E_ROLEEXISTS Handle = 0x8011040C - COMADMIN_E_CANTCOPYFILE Handle = 0x8011040D - COMADMIN_E_NOUSER Handle = 0x8011040F - COMADMIN_E_INVALIDUSERIDS Handle = 0x80110410 - COMADMIN_E_NOREGISTRYCLSID Handle = 0x80110411 - COMADMIN_E_BADREGISTRYPROGID Handle = 0x80110412 - COMADMIN_E_AUTHENTICATIONLEVEL Handle = 0x80110413 - COMADMIN_E_USERPASSWDNOTVALID Handle = 0x80110414 - COMADMIN_E_CLSIDORIIDMISMATCH Handle = 0x80110418 - COMADMIN_E_REMOTEINTERFACE Handle = 0x80110419 - COMADMIN_E_DLLREGISTERSERVER Handle = 0x8011041A - COMADMIN_E_NOSERVERSHARE Handle = 0x8011041B - COMADMIN_E_DLLLOADFAILED Handle = 0x8011041D - COMADMIN_E_BADREGISTRYLIBID Handle = 0x8011041E - COMADMIN_E_APPDIRNOTFOUND Handle = 0x8011041F - COMADMIN_E_REGISTRARFAILED Handle = 0x80110423 - COMADMIN_E_COMPFILE_DOESNOTEXIST Handle = 0x80110424 - COMADMIN_E_COMPFILE_LOADDLLFAIL Handle = 0x80110425 - COMADMIN_E_COMPFILE_GETCLASSOBJ Handle = 0x80110426 - COMADMIN_E_COMPFILE_CLASSNOTAVAIL Handle = 0x80110427 - COMADMIN_E_COMPFILE_BADTLB Handle = 0x80110428 - COMADMIN_E_COMPFILE_NOTINSTALLABLE Handle = 0x80110429 - COMADMIN_E_NOTCHANGEABLE Handle = 0x8011042A - COMADMIN_E_NOTDELETEABLE Handle = 0x8011042B - COMADMIN_E_SESSION Handle = 0x8011042C - COMADMIN_E_COMP_MOVE_LOCKED Handle = 0x8011042D - COMADMIN_E_COMP_MOVE_BAD_DEST Handle = 0x8011042E - COMADMIN_E_REGISTERTLB Handle = 0x80110430 - COMADMIN_E_SYSTEMAPP Handle = 0x80110433 - COMADMIN_E_COMPFILE_NOREGISTRAR Handle = 0x80110434 - COMADMIN_E_COREQCOMPINSTALLED Handle = 0x80110435 - COMADMIN_E_SERVICENOTINSTALLED Handle = 0x80110436 - COMADMIN_E_PROPERTYSAVEFAILED Handle = 0x80110437 - COMADMIN_E_OBJECTEXISTS Handle = 0x80110438 - COMADMIN_E_COMPONENTEXISTS Handle = 0x80110439 - COMADMIN_E_REGFILE_CORRUPT Handle = 0x8011043B - COMADMIN_E_PROPERTY_OVERFLOW Handle = 0x8011043C - COMADMIN_E_NOTINREGISTRY Handle = 0x8011043E - COMADMIN_E_OBJECTNOTPOOLABLE Handle = 0x8011043F - COMADMIN_E_APPLID_MATCHES_CLSID Handle = 0x80110446 - COMADMIN_E_ROLE_DOES_NOT_EXIST Handle = 0x80110447 - COMADMIN_E_START_APP_NEEDS_COMPONENTS Handle = 0x80110448 - COMADMIN_E_REQUIRES_DIFFERENT_PLATFORM Handle = 0x80110449 - COMADMIN_E_CAN_NOT_EXPORT_APP_PROXY Handle = 0x8011044A - COMADMIN_E_CAN_NOT_START_APP Handle = 0x8011044B - COMADMIN_E_CAN_NOT_EXPORT_SYS_APP Handle = 0x8011044C - COMADMIN_E_CANT_SUBSCRIBE_TO_COMPONENT Handle = 0x8011044D - COMADMIN_E_EVENTCLASS_CANT_BE_SUBSCRIBER Handle = 0x8011044E - COMADMIN_E_LIB_APP_PROXY_INCOMPATIBLE Handle = 0x8011044F - COMADMIN_E_BASE_PARTITION_ONLY Handle = 0x80110450 - COMADMIN_E_START_APP_DISABLED Handle = 0x80110451 - COMADMIN_E_CAT_DUPLICATE_PARTITION_NAME Handle = 0x80110457 - COMADMIN_E_CAT_INVALID_PARTITION_NAME Handle = 0x80110458 - COMADMIN_E_CAT_PARTITION_IN_USE Handle = 0x80110459 - COMADMIN_E_FILE_PARTITION_DUPLICATE_FILES Handle = 0x8011045A - COMADMIN_E_CAT_IMPORTED_COMPONENTS_NOT_ALLOWED Handle = 0x8011045B - COMADMIN_E_AMBIGUOUS_APPLICATION_NAME Handle = 0x8011045C - COMADMIN_E_AMBIGUOUS_PARTITION_NAME Handle = 0x8011045D - COMADMIN_E_REGDB_NOTINITIALIZED Handle = 0x80110472 - COMADMIN_E_REGDB_NOTOPEN Handle = 0x80110473 - COMADMIN_E_REGDB_SYSTEMERR Handle = 0x80110474 - COMADMIN_E_REGDB_ALREADYRUNNING Handle = 0x80110475 - COMADMIN_E_MIG_VERSIONNOTSUPPORTED Handle = 0x80110480 - COMADMIN_E_MIG_SCHEMANOTFOUND Handle = 0x80110481 - COMADMIN_E_CAT_BITNESSMISMATCH Handle = 0x80110482 - COMADMIN_E_CAT_UNACCEPTABLEBITNESS Handle = 0x80110483 - COMADMIN_E_CAT_WRONGAPPBITNESS Handle = 0x80110484 - COMADMIN_E_CAT_PAUSE_RESUME_NOT_SUPPORTED Handle = 0x80110485 - COMADMIN_E_CAT_SERVERFAULT Handle = 0x80110486 - COMQC_E_APPLICATION_NOT_QUEUED Handle = 0x80110600 - COMQC_E_NO_QUEUEABLE_INTERFACES Handle = 0x80110601 - COMQC_E_QUEUING_SERVICE_NOT_AVAILABLE Handle = 0x80110602 - COMQC_E_NO_IPERSISTSTREAM Handle = 0x80110603 - COMQC_E_BAD_MESSAGE Handle = 0x80110604 - COMQC_E_UNAUTHENTICATED Handle = 0x80110605 - COMQC_E_UNTRUSTED_ENQUEUER Handle = 0x80110606 - MSDTC_E_DUPLICATE_RESOURCE Handle = 0x80110701 - COMADMIN_E_OBJECT_PARENT_MISSING Handle = 0x80110808 - COMADMIN_E_OBJECT_DOES_NOT_EXIST Handle = 0x80110809 - COMADMIN_E_APP_NOT_RUNNING Handle = 0x8011080A - COMADMIN_E_INVALID_PARTITION Handle = 0x8011080B - COMADMIN_E_SVCAPP_NOT_POOLABLE_OR_RECYCLABLE Handle = 0x8011080D - COMADMIN_E_USER_IN_SET Handle = 0x8011080E - COMADMIN_E_CANTRECYCLELIBRARYAPPS Handle = 0x8011080F - COMADMIN_E_CANTRECYCLESERVICEAPPS Handle = 0x80110811 - COMADMIN_E_PROCESSALREADYRECYCLED Handle = 0x80110812 - COMADMIN_E_PAUSEDPROCESSMAYNOTBERECYCLED Handle = 0x80110813 - COMADMIN_E_CANTMAKEINPROCSERVICE Handle = 0x80110814 - COMADMIN_E_PROGIDINUSEBYCLSID Handle = 0x80110815 - COMADMIN_E_DEFAULT_PARTITION_NOT_IN_SET Handle = 0x80110816 - COMADMIN_E_RECYCLEDPROCESSMAYNOTBEPAUSED Handle = 0x80110817 - COMADMIN_E_PARTITION_ACCESSDENIED Handle = 0x80110818 - COMADMIN_E_PARTITION_MSI_ONLY Handle = 0x80110819 - COMADMIN_E_LEGACYCOMPS_NOT_ALLOWED_IN_1_0_FORMAT Handle = 0x8011081A - COMADMIN_E_LEGACYCOMPS_NOT_ALLOWED_IN_NONBASE_PARTITIONS Handle = 0x8011081B - COMADMIN_E_COMP_MOVE_SOURCE Handle = 0x8011081C - COMADMIN_E_COMP_MOVE_DEST Handle = 0x8011081D - COMADMIN_E_COMP_MOVE_PRIVATE Handle = 0x8011081E - COMADMIN_E_BASEPARTITION_REQUIRED_IN_SET Handle = 0x8011081F - COMADMIN_E_CANNOT_ALIAS_EVENTCLASS Handle = 0x80110820 - COMADMIN_E_PRIVATE_ACCESSDENIED Handle = 0x80110821 - COMADMIN_E_SAFERINVALID Handle = 0x80110822 - COMADMIN_E_REGISTRY_ACCESSDENIED Handle = 0x80110823 - COMADMIN_E_PARTITIONS_DISABLED Handle = 0x80110824 - WER_S_REPORT_DEBUG Handle = 0x001B0000 - WER_S_REPORT_UPLOADED Handle = 0x001B0001 - WER_S_REPORT_QUEUED Handle = 0x001B0002 - WER_S_DISABLED Handle = 0x001B0003 - WER_S_SUSPENDED_UPLOAD Handle = 0x001B0004 - WER_S_DISABLED_QUEUE Handle = 0x001B0005 - WER_S_DISABLED_ARCHIVE Handle = 0x001B0006 - WER_S_REPORT_ASYNC Handle = 0x001B0007 - WER_S_IGNORE_ASSERT_INSTANCE Handle = 0x001B0008 - WER_S_IGNORE_ALL_ASSERTS Handle = 0x001B0009 - WER_S_ASSERT_CONTINUE Handle = 0x001B000A - WER_S_THROTTLED Handle = 0x001B000B - WER_S_REPORT_UPLOADED_CAB Handle = 0x001B000C - WER_E_CRASH_FAILURE Handle = 0x801B8000 - WER_E_CANCELED Handle = 0x801B8001 - WER_E_NETWORK_FAILURE Handle = 0x801B8002 - WER_E_NOT_INITIALIZED Handle = 0x801B8003 - WER_E_ALREADY_REPORTING Handle = 0x801B8004 - WER_E_DUMP_THROTTLED Handle = 0x801B8005 - WER_E_INSUFFICIENT_CONSENT Handle = 0x801B8006 - WER_E_TOO_HEAVY Handle = 0x801B8007 - ERROR_FLT_IO_COMPLETE Handle = 0x001F0001 - ERROR_FLT_NO_HANDLER_DEFINED Handle = 0x801F0001 - ERROR_FLT_CONTEXT_ALREADY_DEFINED Handle = 0x801F0002 - ERROR_FLT_INVALID_ASYNCHRONOUS_REQUEST Handle = 0x801F0003 - ERROR_FLT_DISALLOW_FAST_IO Handle = 0x801F0004 - ERROR_FLT_INVALID_NAME_REQUEST Handle = 0x801F0005 - ERROR_FLT_NOT_SAFE_TO_POST_OPERATION Handle = 0x801F0006 - ERROR_FLT_NOT_INITIALIZED Handle = 0x801F0007 - ERROR_FLT_FILTER_NOT_READY Handle = 0x801F0008 - ERROR_FLT_POST_OPERATION_CLEANUP Handle = 0x801F0009 - ERROR_FLT_INTERNAL_ERROR Handle = 0x801F000A - ERROR_FLT_DELETING_OBJECT Handle = 0x801F000B - ERROR_FLT_MUST_BE_NONPAGED_POOL Handle = 0x801F000C - ERROR_FLT_DUPLICATE_ENTRY Handle = 0x801F000D - ERROR_FLT_CBDQ_DISABLED Handle = 0x801F000E - ERROR_FLT_DO_NOT_ATTACH Handle = 0x801F000F - ERROR_FLT_DO_NOT_DETACH Handle = 0x801F0010 - ERROR_FLT_INSTANCE_ALTITUDE_COLLISION Handle = 0x801F0011 - ERROR_FLT_INSTANCE_NAME_COLLISION Handle = 0x801F0012 - ERROR_FLT_FILTER_NOT_FOUND Handle = 0x801F0013 - ERROR_FLT_VOLUME_NOT_FOUND Handle = 0x801F0014 - ERROR_FLT_INSTANCE_NOT_FOUND Handle = 0x801F0015 - ERROR_FLT_CONTEXT_ALLOCATION_NOT_FOUND Handle = 0x801F0016 - ERROR_FLT_INVALID_CONTEXT_REGISTRATION Handle = 0x801F0017 - ERROR_FLT_NAME_CACHE_MISS Handle = 0x801F0018 - ERROR_FLT_NO_DEVICE_OBJECT Handle = 0x801F0019 - ERROR_FLT_VOLUME_ALREADY_MOUNTED Handle = 0x801F001A - ERROR_FLT_ALREADY_ENLISTED Handle = 0x801F001B - ERROR_FLT_CONTEXT_ALREADY_LINKED Handle = 0x801F001C - ERROR_FLT_NO_WAITER_FOR_REPLY Handle = 0x801F0020 - ERROR_FLT_REGISTRATION_BUSY Handle = 0x801F0023 - ERROR_HUNG_DISPLAY_DRIVER_THREAD Handle = 0x80260001 - DWM_E_COMPOSITIONDISABLED Handle = 0x80263001 - DWM_E_REMOTING_NOT_SUPPORTED Handle = 0x80263002 - DWM_E_NO_REDIRECTION_SURFACE_AVAILABLE Handle = 0x80263003 - DWM_E_NOT_QUEUING_PRESENTS Handle = 0x80263004 - DWM_E_ADAPTER_NOT_FOUND Handle = 0x80263005 - DWM_S_GDI_REDIRECTION_SURFACE Handle = 0x00263005 - DWM_E_TEXTURE_TOO_LARGE Handle = 0x80263007 - DWM_S_GDI_REDIRECTION_SURFACE_BLT_VIA_GDI Handle = 0x00263008 - ERROR_MONITOR_NO_DESCRIPTOR Handle = 0x00261001 - ERROR_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT Handle = 0x00261002 - ERROR_MONITOR_INVALID_DESCRIPTOR_CHECKSUM Handle = 0xC0261003 - ERROR_MONITOR_INVALID_STANDARD_TIMING_BLOCK Handle = 0xC0261004 - ERROR_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED Handle = 0xC0261005 - ERROR_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK Handle = 0xC0261006 - ERROR_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK Handle = 0xC0261007 - ERROR_MONITOR_NO_MORE_DESCRIPTOR_DATA Handle = 0xC0261008 - ERROR_MONITOR_INVALID_DETAILED_TIMING_BLOCK Handle = 0xC0261009 - ERROR_MONITOR_INVALID_MANUFACTURE_DATE Handle = 0xC026100A - ERROR_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER Handle = 0xC0262000 - ERROR_GRAPHICS_INSUFFICIENT_DMA_BUFFER Handle = 0xC0262001 - ERROR_GRAPHICS_INVALID_DISPLAY_ADAPTER Handle = 0xC0262002 - ERROR_GRAPHICS_ADAPTER_WAS_RESET Handle = 0xC0262003 - ERROR_GRAPHICS_INVALID_DRIVER_MODEL Handle = 0xC0262004 - ERROR_GRAPHICS_PRESENT_MODE_CHANGED Handle = 0xC0262005 - ERROR_GRAPHICS_PRESENT_OCCLUDED Handle = 0xC0262006 - ERROR_GRAPHICS_PRESENT_DENIED Handle = 0xC0262007 - ERROR_GRAPHICS_CANNOTCOLORCONVERT Handle = 0xC0262008 - ERROR_GRAPHICS_DRIVER_MISMATCH Handle = 0xC0262009 - ERROR_GRAPHICS_PARTIAL_DATA_POPULATED Handle = 0x4026200A - ERROR_GRAPHICS_PRESENT_REDIRECTION_DISABLED Handle = 0xC026200B - ERROR_GRAPHICS_PRESENT_UNOCCLUDED Handle = 0xC026200C - ERROR_GRAPHICS_WINDOWDC_NOT_AVAILABLE Handle = 0xC026200D - ERROR_GRAPHICS_WINDOWLESS_PRESENT_DISABLED Handle = 0xC026200E - ERROR_GRAPHICS_PRESENT_INVALID_WINDOW Handle = 0xC026200F - ERROR_GRAPHICS_PRESENT_BUFFER_NOT_BOUND Handle = 0xC0262010 - ERROR_GRAPHICS_VAIL_STATE_CHANGED Handle = 0xC0262011 - ERROR_GRAPHICS_INDIRECT_DISPLAY_ABANDON_SWAPCHAIN Handle = 0xC0262012 - ERROR_GRAPHICS_INDIRECT_DISPLAY_DEVICE_STOPPED Handle = 0xC0262013 - ERROR_GRAPHICS_NO_VIDEO_MEMORY Handle = 0xC0262100 - ERROR_GRAPHICS_CANT_LOCK_MEMORY Handle = 0xC0262101 - ERROR_GRAPHICS_ALLOCATION_BUSY Handle = 0xC0262102 - ERROR_GRAPHICS_TOO_MANY_REFERENCES Handle = 0xC0262103 - ERROR_GRAPHICS_TRY_AGAIN_LATER Handle = 0xC0262104 - ERROR_GRAPHICS_TRY_AGAIN_NOW Handle = 0xC0262105 - ERROR_GRAPHICS_ALLOCATION_INVALID Handle = 0xC0262106 - ERROR_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE Handle = 0xC0262107 - ERROR_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED Handle = 0xC0262108 - ERROR_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION Handle = 0xC0262109 - ERROR_GRAPHICS_INVALID_ALLOCATION_USAGE Handle = 0xC0262110 - ERROR_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION Handle = 0xC0262111 - ERROR_GRAPHICS_ALLOCATION_CLOSED Handle = 0xC0262112 - ERROR_GRAPHICS_INVALID_ALLOCATION_INSTANCE Handle = 0xC0262113 - ERROR_GRAPHICS_INVALID_ALLOCATION_HANDLE Handle = 0xC0262114 - ERROR_GRAPHICS_WRONG_ALLOCATION_DEVICE Handle = 0xC0262115 - ERROR_GRAPHICS_ALLOCATION_CONTENT_LOST Handle = 0xC0262116 - ERROR_GRAPHICS_GPU_EXCEPTION_ON_DEVICE Handle = 0xC0262200 - ERROR_GRAPHICS_SKIP_ALLOCATION_PREPARATION Handle = 0x40262201 - ERROR_GRAPHICS_INVALID_VIDPN_TOPOLOGY Handle = 0xC0262300 - ERROR_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED Handle = 0xC0262301 - ERROR_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED Handle = 0xC0262302 - ERROR_GRAPHICS_INVALID_VIDPN Handle = 0xC0262303 - ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE Handle = 0xC0262304 - ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET Handle = 0xC0262305 - ERROR_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED Handle = 0xC0262306 - ERROR_GRAPHICS_MODE_NOT_PINNED Handle = 0x00262307 - ERROR_GRAPHICS_INVALID_VIDPN_SOURCEMODESET Handle = 0xC0262308 - ERROR_GRAPHICS_INVALID_VIDPN_TARGETMODESET Handle = 0xC0262309 - ERROR_GRAPHICS_INVALID_FREQUENCY Handle = 0xC026230A - ERROR_GRAPHICS_INVALID_ACTIVE_REGION Handle = 0xC026230B - ERROR_GRAPHICS_INVALID_TOTAL_REGION Handle = 0xC026230C - ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE Handle = 0xC0262310 - ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE Handle = 0xC0262311 - ERROR_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET Handle = 0xC0262312 - ERROR_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY Handle = 0xC0262313 - ERROR_GRAPHICS_MODE_ALREADY_IN_MODESET Handle = 0xC0262314 - ERROR_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET Handle = 0xC0262315 - ERROR_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET Handle = 0xC0262316 - ERROR_GRAPHICS_SOURCE_ALREADY_IN_SET Handle = 0xC0262317 - ERROR_GRAPHICS_TARGET_ALREADY_IN_SET Handle = 0xC0262318 - ERROR_GRAPHICS_INVALID_VIDPN_PRESENT_PATH Handle = 0xC0262319 - ERROR_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY Handle = 0xC026231A - ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET Handle = 0xC026231B - ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE Handle = 0xC026231C - ERROR_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET Handle = 0xC026231D - ERROR_GRAPHICS_NO_PREFERRED_MODE Handle = 0x0026231E - ERROR_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET Handle = 0xC026231F - ERROR_GRAPHICS_STALE_MODESET Handle = 0xC0262320 - ERROR_GRAPHICS_INVALID_MONITOR_SOURCEMODESET Handle = 0xC0262321 - ERROR_GRAPHICS_INVALID_MONITOR_SOURCE_MODE Handle = 0xC0262322 - ERROR_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN Handle = 0xC0262323 - ERROR_GRAPHICS_MODE_ID_MUST_BE_UNIQUE Handle = 0xC0262324 - ERROR_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION Handle = 0xC0262325 - ERROR_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES Handle = 0xC0262326 - ERROR_GRAPHICS_PATH_NOT_IN_TOPOLOGY Handle = 0xC0262327 - ERROR_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE Handle = 0xC0262328 - ERROR_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET Handle = 0xC0262329 - ERROR_GRAPHICS_INVALID_MONITORDESCRIPTORSET Handle = 0xC026232A - ERROR_GRAPHICS_INVALID_MONITORDESCRIPTOR Handle = 0xC026232B - ERROR_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET Handle = 0xC026232C - ERROR_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET Handle = 0xC026232D - ERROR_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE Handle = 0xC026232E - ERROR_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE Handle = 0xC026232F - ERROR_GRAPHICS_RESOURCES_NOT_RELATED Handle = 0xC0262330 - ERROR_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE Handle = 0xC0262331 - ERROR_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE Handle = 0xC0262332 - ERROR_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET Handle = 0xC0262333 - ERROR_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER Handle = 0xC0262334 - ERROR_GRAPHICS_NO_VIDPNMGR Handle = 0xC0262335 - ERROR_GRAPHICS_NO_ACTIVE_VIDPN Handle = 0xC0262336 - ERROR_GRAPHICS_STALE_VIDPN_TOPOLOGY Handle = 0xC0262337 - ERROR_GRAPHICS_MONITOR_NOT_CONNECTED Handle = 0xC0262338 - ERROR_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY Handle = 0xC0262339 - ERROR_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE Handle = 0xC026233A - ERROR_GRAPHICS_INVALID_VISIBLEREGION_SIZE Handle = 0xC026233B - ERROR_GRAPHICS_INVALID_STRIDE Handle = 0xC026233C - ERROR_GRAPHICS_INVALID_PIXELFORMAT Handle = 0xC026233D - ERROR_GRAPHICS_INVALID_COLORBASIS Handle = 0xC026233E - ERROR_GRAPHICS_INVALID_PIXELVALUEACCESSMODE Handle = 0xC026233F - ERROR_GRAPHICS_TARGET_NOT_IN_TOPOLOGY Handle = 0xC0262340 - ERROR_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT Handle = 0xC0262341 - ERROR_GRAPHICS_VIDPN_SOURCE_IN_USE Handle = 0xC0262342 - ERROR_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN Handle = 0xC0262343 - ERROR_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL Handle = 0xC0262344 - ERROR_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION Handle = 0xC0262345 - ERROR_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED Handle = 0xC0262346 - ERROR_GRAPHICS_INVALID_GAMMA_RAMP Handle = 0xC0262347 - ERROR_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED Handle = 0xC0262348 - ERROR_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED Handle = 0xC0262349 - ERROR_GRAPHICS_MODE_NOT_IN_MODESET Handle = 0xC026234A - ERROR_GRAPHICS_DATASET_IS_EMPTY Handle = 0x0026234B - ERROR_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET Handle = 0x0026234C - ERROR_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON Handle = 0xC026234D - ERROR_GRAPHICS_INVALID_PATH_CONTENT_TYPE Handle = 0xC026234E - ERROR_GRAPHICS_INVALID_COPYPROTECTION_TYPE Handle = 0xC026234F - ERROR_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS Handle = 0xC0262350 - ERROR_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED Handle = 0x00262351 - ERROR_GRAPHICS_INVALID_SCANLINE_ORDERING Handle = 0xC0262352 - ERROR_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED Handle = 0xC0262353 - ERROR_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS Handle = 0xC0262354 - ERROR_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT Handle = 0xC0262355 - ERROR_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM Handle = 0xC0262356 - ERROR_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN Handle = 0xC0262357 - ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT Handle = 0xC0262358 - ERROR_GRAPHICS_MAX_NUM_PATHS_REACHED Handle = 0xC0262359 - ERROR_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION Handle = 0xC026235A - ERROR_GRAPHICS_INVALID_CLIENT_TYPE Handle = 0xC026235B - ERROR_GRAPHICS_CLIENTVIDPN_NOT_SET Handle = 0xC026235C - ERROR_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED Handle = 0xC0262400 - ERROR_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED Handle = 0xC0262401 - ERROR_GRAPHICS_UNKNOWN_CHILD_STATUS Handle = 0x4026242F - ERROR_GRAPHICS_NOT_A_LINKED_ADAPTER Handle = 0xC0262430 - ERROR_GRAPHICS_LEADLINK_NOT_ENUMERATED Handle = 0xC0262431 - ERROR_GRAPHICS_CHAINLINKS_NOT_ENUMERATED Handle = 0xC0262432 - ERROR_GRAPHICS_ADAPTER_CHAIN_NOT_READY Handle = 0xC0262433 - ERROR_GRAPHICS_CHAINLINKS_NOT_STARTED Handle = 0xC0262434 - ERROR_GRAPHICS_CHAINLINKS_NOT_POWERED_ON Handle = 0xC0262435 - ERROR_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE Handle = 0xC0262436 - ERROR_GRAPHICS_LEADLINK_START_DEFERRED Handle = 0x40262437 - ERROR_GRAPHICS_NOT_POST_DEVICE_DRIVER Handle = 0xC0262438 - ERROR_GRAPHICS_POLLING_TOO_FREQUENTLY Handle = 0x40262439 - ERROR_GRAPHICS_START_DEFERRED Handle = 0x4026243A - ERROR_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED Handle = 0xC026243B - ERROR_GRAPHICS_DEPENDABLE_CHILD_STATUS Handle = 0x4026243C - ERROR_GRAPHICS_OPM_NOT_SUPPORTED Handle = 0xC0262500 - ERROR_GRAPHICS_COPP_NOT_SUPPORTED Handle = 0xC0262501 - ERROR_GRAPHICS_UAB_NOT_SUPPORTED Handle = 0xC0262502 - ERROR_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS Handle = 0xC0262503 - ERROR_GRAPHICS_OPM_NO_VIDEO_OUTPUTS_EXIST Handle = 0xC0262505 - ERROR_GRAPHICS_OPM_INTERNAL_ERROR Handle = 0xC026250B - ERROR_GRAPHICS_OPM_INVALID_HANDLE Handle = 0xC026250C - ERROR_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH Handle = 0xC026250E - ERROR_GRAPHICS_OPM_SPANNING_MODE_ENABLED Handle = 0xC026250F - ERROR_GRAPHICS_OPM_THEATER_MODE_ENABLED Handle = 0xC0262510 - ERROR_GRAPHICS_PVP_HFS_FAILED Handle = 0xC0262511 - ERROR_GRAPHICS_OPM_INVALID_SRM Handle = 0xC0262512 - ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP Handle = 0xC0262513 - ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP Handle = 0xC0262514 - ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA Handle = 0xC0262515 - ERROR_GRAPHICS_OPM_HDCP_SRM_NEVER_SET Handle = 0xC0262516 - ERROR_GRAPHICS_OPM_RESOLUTION_TOO_HIGH Handle = 0xC0262517 - ERROR_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE Handle = 0xC0262518 - ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_NO_LONGER_EXISTS Handle = 0xC026251A - ERROR_GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS Handle = 0xC026251B - ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS Handle = 0xC026251C - ERROR_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST Handle = 0xC026251D - ERROR_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR Handle = 0xC026251E - ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS Handle = 0xC026251F - ERROR_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED Handle = 0xC0262520 - ERROR_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST Handle = 0xC0262521 - ERROR_GRAPHICS_I2C_NOT_SUPPORTED Handle = 0xC0262580 - ERROR_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST Handle = 0xC0262581 - ERROR_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA Handle = 0xC0262582 - ERROR_GRAPHICS_I2C_ERROR_RECEIVING_DATA Handle = 0xC0262583 - ERROR_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED Handle = 0xC0262584 - ERROR_GRAPHICS_DDCCI_INVALID_DATA Handle = 0xC0262585 - ERROR_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE Handle = 0xC0262586 - ERROR_GRAPHICS_MCA_INVALID_CAPABILITIES_STRING Handle = 0xC0262587 - ERROR_GRAPHICS_MCA_INTERNAL_ERROR Handle = 0xC0262588 - ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND Handle = 0xC0262589 - ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH Handle = 0xC026258A - ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM Handle = 0xC026258B - ERROR_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE Handle = 0xC026258C - ERROR_GRAPHICS_MONITOR_NO_LONGER_EXISTS Handle = 0xC026258D - ERROR_GRAPHICS_DDCCI_CURRENT_CURRENT_VALUE_GREATER_THAN_MAXIMUM_VALUE Handle = 0xC02625D8 - ERROR_GRAPHICS_MCA_INVALID_VCP_VERSION Handle = 0xC02625D9 - ERROR_GRAPHICS_MCA_MONITOR_VIOLATES_MCCS_SPECIFICATION Handle = 0xC02625DA - ERROR_GRAPHICS_MCA_MCCS_VERSION_MISMATCH Handle = 0xC02625DB - ERROR_GRAPHICS_MCA_UNSUPPORTED_MCCS_VERSION Handle = 0xC02625DC - ERROR_GRAPHICS_MCA_INVALID_TECHNOLOGY_TYPE_RETURNED Handle = 0xC02625DE - ERROR_GRAPHICS_MCA_UNSUPPORTED_COLOR_TEMPERATURE Handle = 0xC02625DF - ERROR_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED Handle = 0xC02625E0 - ERROR_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME Handle = 0xC02625E1 - ERROR_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP Handle = 0xC02625E2 - ERROR_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED Handle = 0xC02625E3 - ERROR_GRAPHICS_INVALID_POINTER Handle = 0xC02625E4 - ERROR_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE Handle = 0xC02625E5 - ERROR_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL Handle = 0xC02625E6 - ERROR_GRAPHICS_INTERNAL_ERROR Handle = 0xC02625E7 - ERROR_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS Handle = 0xC02605E8 - NAP_E_INVALID_PACKET Handle = 0x80270001 - NAP_E_MISSING_SOH Handle = 0x80270002 - NAP_E_CONFLICTING_ID Handle = 0x80270003 - NAP_E_NO_CACHED_SOH Handle = 0x80270004 - NAP_E_STILL_BOUND Handle = 0x80270005 - NAP_E_NOT_REGISTERED Handle = 0x80270006 - NAP_E_NOT_INITIALIZED Handle = 0x80270007 - NAP_E_MISMATCHED_ID Handle = 0x80270008 - NAP_E_NOT_PENDING Handle = 0x80270009 - NAP_E_ID_NOT_FOUND Handle = 0x8027000A - NAP_E_MAXSIZE_TOO_SMALL Handle = 0x8027000B - NAP_E_SERVICE_NOT_RUNNING Handle = 0x8027000C - NAP_S_CERT_ALREADY_PRESENT Handle = 0x0027000D - NAP_E_ENTITY_DISABLED Handle = 0x8027000E - NAP_E_NETSH_GROUPPOLICY_ERROR Handle = 0x8027000F - NAP_E_TOO_MANY_CALLS Handle = 0x80270010 - NAP_E_SHV_CONFIG_EXISTED Handle = 0x80270011 - NAP_E_SHV_CONFIG_NOT_FOUND Handle = 0x80270012 - NAP_E_SHV_TIMEOUT Handle = 0x80270013 - TPM_E_ERROR_MASK Handle = 0x80280000 - TPM_E_AUTHFAIL Handle = 0x80280001 - TPM_E_BADINDEX Handle = 0x80280002 - TPM_E_BAD_PARAMETER Handle = 0x80280003 - TPM_E_AUDITFAILURE Handle = 0x80280004 - TPM_E_CLEAR_DISABLED Handle = 0x80280005 - TPM_E_DEACTIVATED Handle = 0x80280006 - TPM_E_DISABLED Handle = 0x80280007 - TPM_E_DISABLED_CMD Handle = 0x80280008 - TPM_E_FAIL Handle = 0x80280009 - TPM_E_BAD_ORDINAL Handle = 0x8028000A - TPM_E_INSTALL_DISABLED Handle = 0x8028000B - TPM_E_INVALID_KEYHANDLE Handle = 0x8028000C - TPM_E_KEYNOTFOUND Handle = 0x8028000D - TPM_E_INAPPROPRIATE_ENC Handle = 0x8028000E - TPM_E_MIGRATEFAIL Handle = 0x8028000F - TPM_E_INVALID_PCR_INFO Handle = 0x80280010 - TPM_E_NOSPACE Handle = 0x80280011 - TPM_E_NOSRK Handle = 0x80280012 - TPM_E_NOTSEALED_BLOB Handle = 0x80280013 - TPM_E_OWNER_SET Handle = 0x80280014 - TPM_E_RESOURCES Handle = 0x80280015 - TPM_E_SHORTRANDOM Handle = 0x80280016 - TPM_E_SIZE Handle = 0x80280017 - TPM_E_WRONGPCRVAL Handle = 0x80280018 - TPM_E_BAD_PARAM_SIZE Handle = 0x80280019 - TPM_E_SHA_THREAD Handle = 0x8028001A - TPM_E_SHA_ERROR Handle = 0x8028001B - TPM_E_FAILEDSELFTEST Handle = 0x8028001C - TPM_E_AUTH2FAIL Handle = 0x8028001D - TPM_E_BADTAG Handle = 0x8028001E - TPM_E_IOERROR Handle = 0x8028001F - TPM_E_ENCRYPT_ERROR Handle = 0x80280020 - TPM_E_DECRYPT_ERROR Handle = 0x80280021 - TPM_E_INVALID_AUTHHANDLE Handle = 0x80280022 - TPM_E_NO_ENDORSEMENT Handle = 0x80280023 - TPM_E_INVALID_KEYUSAGE Handle = 0x80280024 - TPM_E_WRONG_ENTITYTYPE Handle = 0x80280025 - TPM_E_INVALID_POSTINIT Handle = 0x80280026 - TPM_E_INAPPROPRIATE_SIG Handle = 0x80280027 - TPM_E_BAD_KEY_PROPERTY Handle = 0x80280028 - TPM_E_BAD_MIGRATION Handle = 0x80280029 - TPM_E_BAD_SCHEME Handle = 0x8028002A - TPM_E_BAD_DATASIZE Handle = 0x8028002B - TPM_E_BAD_MODE Handle = 0x8028002C - TPM_E_BAD_PRESENCE Handle = 0x8028002D - TPM_E_BAD_VERSION Handle = 0x8028002E - TPM_E_NO_WRAP_TRANSPORT Handle = 0x8028002F - TPM_E_AUDITFAIL_UNSUCCESSFUL Handle = 0x80280030 - TPM_E_AUDITFAIL_SUCCESSFUL Handle = 0x80280031 - TPM_E_NOTRESETABLE Handle = 0x80280032 - TPM_E_NOTLOCAL Handle = 0x80280033 - TPM_E_BAD_TYPE Handle = 0x80280034 - TPM_E_INVALID_RESOURCE Handle = 0x80280035 - TPM_E_NOTFIPS Handle = 0x80280036 - TPM_E_INVALID_FAMILY Handle = 0x80280037 - TPM_E_NO_NV_PERMISSION Handle = 0x80280038 - TPM_E_REQUIRES_SIGN Handle = 0x80280039 - TPM_E_KEY_NOTSUPPORTED Handle = 0x8028003A - TPM_E_AUTH_CONFLICT Handle = 0x8028003B - TPM_E_AREA_LOCKED Handle = 0x8028003C - TPM_E_BAD_LOCALITY Handle = 0x8028003D - TPM_E_READ_ONLY Handle = 0x8028003E - TPM_E_PER_NOWRITE Handle = 0x8028003F - TPM_E_FAMILYCOUNT Handle = 0x80280040 - TPM_E_WRITE_LOCKED Handle = 0x80280041 - TPM_E_BAD_ATTRIBUTES Handle = 0x80280042 - TPM_E_INVALID_STRUCTURE Handle = 0x80280043 - TPM_E_KEY_OWNER_CONTROL Handle = 0x80280044 - TPM_E_BAD_COUNTER Handle = 0x80280045 - TPM_E_NOT_FULLWRITE Handle = 0x80280046 - TPM_E_CONTEXT_GAP Handle = 0x80280047 - TPM_E_MAXNVWRITES Handle = 0x80280048 - TPM_E_NOOPERATOR Handle = 0x80280049 - TPM_E_RESOURCEMISSING Handle = 0x8028004A - TPM_E_DELEGATE_LOCK Handle = 0x8028004B - TPM_E_DELEGATE_FAMILY Handle = 0x8028004C - TPM_E_DELEGATE_ADMIN Handle = 0x8028004D - TPM_E_TRANSPORT_NOTEXCLUSIVE Handle = 0x8028004E - TPM_E_OWNER_CONTROL Handle = 0x8028004F - TPM_E_DAA_RESOURCES Handle = 0x80280050 - TPM_E_DAA_INPUT_DATA0 Handle = 0x80280051 - TPM_E_DAA_INPUT_DATA1 Handle = 0x80280052 - TPM_E_DAA_ISSUER_SETTINGS Handle = 0x80280053 - TPM_E_DAA_TPM_SETTINGS Handle = 0x80280054 - TPM_E_DAA_STAGE Handle = 0x80280055 - TPM_E_DAA_ISSUER_VALIDITY Handle = 0x80280056 - TPM_E_DAA_WRONG_W Handle = 0x80280057 - TPM_E_BAD_HANDLE Handle = 0x80280058 - TPM_E_BAD_DELEGATE Handle = 0x80280059 - TPM_E_BADCONTEXT Handle = 0x8028005A - TPM_E_TOOMANYCONTEXTS Handle = 0x8028005B - TPM_E_MA_TICKET_SIGNATURE Handle = 0x8028005C - TPM_E_MA_DESTINATION Handle = 0x8028005D - TPM_E_MA_SOURCE Handle = 0x8028005E - TPM_E_MA_AUTHORITY Handle = 0x8028005F - TPM_E_PERMANENTEK Handle = 0x80280061 - TPM_E_BAD_SIGNATURE Handle = 0x80280062 - TPM_E_NOCONTEXTSPACE Handle = 0x80280063 - TPM_20_E_ASYMMETRIC Handle = 0x80280081 - TPM_20_E_ATTRIBUTES Handle = 0x80280082 - TPM_20_E_HASH Handle = 0x80280083 - TPM_20_E_VALUE Handle = 0x80280084 - TPM_20_E_HIERARCHY Handle = 0x80280085 - TPM_20_E_KEY_SIZE Handle = 0x80280087 - TPM_20_E_MGF Handle = 0x80280088 - TPM_20_E_MODE Handle = 0x80280089 - TPM_20_E_TYPE Handle = 0x8028008A - TPM_20_E_HANDLE Handle = 0x8028008B - TPM_20_E_KDF Handle = 0x8028008C - TPM_20_E_RANGE Handle = 0x8028008D - TPM_20_E_AUTH_FAIL Handle = 0x8028008E - TPM_20_E_NONCE Handle = 0x8028008F - TPM_20_E_PP Handle = 0x80280090 - TPM_20_E_SCHEME Handle = 0x80280092 - TPM_20_E_SIZE Handle = 0x80280095 - TPM_20_E_SYMMETRIC Handle = 0x80280096 - TPM_20_E_TAG Handle = 0x80280097 - TPM_20_E_SELECTOR Handle = 0x80280098 - TPM_20_E_INSUFFICIENT Handle = 0x8028009A - TPM_20_E_SIGNATURE Handle = 0x8028009B - TPM_20_E_KEY Handle = 0x8028009C - TPM_20_E_POLICY_FAIL Handle = 0x8028009D - TPM_20_E_INTEGRITY Handle = 0x8028009F - TPM_20_E_TICKET Handle = 0x802800A0 - TPM_20_E_RESERVED_BITS Handle = 0x802800A1 - TPM_20_E_BAD_AUTH Handle = 0x802800A2 - TPM_20_E_EXPIRED Handle = 0x802800A3 - TPM_20_E_POLICY_CC Handle = 0x802800A4 - TPM_20_E_BINDING Handle = 0x802800A5 - TPM_20_E_CURVE Handle = 0x802800A6 - TPM_20_E_ECC_POINT Handle = 0x802800A7 - TPM_20_E_INITIALIZE Handle = 0x80280100 - TPM_20_E_FAILURE Handle = 0x80280101 - TPM_20_E_SEQUENCE Handle = 0x80280103 - TPM_20_E_PRIVATE Handle = 0x8028010B - TPM_20_E_HMAC Handle = 0x80280119 - TPM_20_E_DISABLED Handle = 0x80280120 - TPM_20_E_EXCLUSIVE Handle = 0x80280121 - TPM_20_E_ECC_CURVE Handle = 0x80280123 - TPM_20_E_AUTH_TYPE Handle = 0x80280124 - TPM_20_E_AUTH_MISSING Handle = 0x80280125 - TPM_20_E_POLICY Handle = 0x80280126 - TPM_20_E_PCR Handle = 0x80280127 - TPM_20_E_PCR_CHANGED Handle = 0x80280128 - TPM_20_E_UPGRADE Handle = 0x8028012D - TPM_20_E_TOO_MANY_CONTEXTS Handle = 0x8028012E - TPM_20_E_AUTH_UNAVAILABLE Handle = 0x8028012F - TPM_20_E_REBOOT Handle = 0x80280130 - TPM_20_E_UNBALANCED Handle = 0x80280131 - TPM_20_E_COMMAND_SIZE Handle = 0x80280142 - TPM_20_E_COMMAND_CODE Handle = 0x80280143 - TPM_20_E_AUTHSIZE Handle = 0x80280144 - TPM_20_E_AUTH_CONTEXT Handle = 0x80280145 - TPM_20_E_NV_RANGE Handle = 0x80280146 - TPM_20_E_NV_SIZE Handle = 0x80280147 - TPM_20_E_NV_LOCKED Handle = 0x80280148 - TPM_20_E_NV_AUTHORIZATION Handle = 0x80280149 - TPM_20_E_NV_UNINITIALIZED Handle = 0x8028014A - TPM_20_E_NV_SPACE Handle = 0x8028014B - TPM_20_E_NV_DEFINED Handle = 0x8028014C - TPM_20_E_BAD_CONTEXT Handle = 0x80280150 - TPM_20_E_CPHASH Handle = 0x80280151 - TPM_20_E_PARENT Handle = 0x80280152 - TPM_20_E_NEEDS_TEST Handle = 0x80280153 - TPM_20_E_NO_RESULT Handle = 0x80280154 - TPM_20_E_SENSITIVE Handle = 0x80280155 - TPM_E_COMMAND_BLOCKED Handle = 0x80280400 - TPM_E_INVALID_HANDLE Handle = 0x80280401 - TPM_E_DUPLICATE_VHANDLE Handle = 0x80280402 - TPM_E_EMBEDDED_COMMAND_BLOCKED Handle = 0x80280403 - TPM_E_EMBEDDED_COMMAND_UNSUPPORTED Handle = 0x80280404 - TPM_E_RETRY Handle = 0x80280800 - TPM_E_NEEDS_SELFTEST Handle = 0x80280801 - TPM_E_DOING_SELFTEST Handle = 0x80280802 - TPM_E_DEFEND_LOCK_RUNNING Handle = 0x80280803 - TPM_20_E_CONTEXT_GAP Handle = 0x80280901 - TPM_20_E_OBJECT_MEMORY Handle = 0x80280902 - TPM_20_E_SESSION_MEMORY Handle = 0x80280903 - TPM_20_E_MEMORY Handle = 0x80280904 - TPM_20_E_SESSION_HANDLES Handle = 0x80280905 - TPM_20_E_OBJECT_HANDLES Handle = 0x80280906 - TPM_20_E_LOCALITY Handle = 0x80280907 - TPM_20_E_YIELDED Handle = 0x80280908 - TPM_20_E_CANCELED Handle = 0x80280909 - TPM_20_E_TESTING Handle = 0x8028090A - TPM_20_E_NV_RATE Handle = 0x80280920 - TPM_20_E_LOCKOUT Handle = 0x80280921 - TPM_20_E_RETRY Handle = 0x80280922 - TPM_20_E_NV_UNAVAILABLE Handle = 0x80280923 - TBS_E_INTERNAL_ERROR Handle = 0x80284001 - TBS_E_BAD_PARAMETER Handle = 0x80284002 - TBS_E_INVALID_OUTPUT_POINTER Handle = 0x80284003 - TBS_E_INVALID_CONTEXT Handle = 0x80284004 - TBS_E_INSUFFICIENT_BUFFER Handle = 0x80284005 - TBS_E_IOERROR Handle = 0x80284006 - TBS_E_INVALID_CONTEXT_PARAM Handle = 0x80284007 - TBS_E_SERVICE_NOT_RUNNING Handle = 0x80284008 - TBS_E_TOO_MANY_TBS_CONTEXTS Handle = 0x80284009 - TBS_E_TOO_MANY_RESOURCES Handle = 0x8028400A - TBS_E_SERVICE_START_PENDING Handle = 0x8028400B - TBS_E_PPI_NOT_SUPPORTED Handle = 0x8028400C - TBS_E_COMMAND_CANCELED Handle = 0x8028400D - TBS_E_BUFFER_TOO_LARGE Handle = 0x8028400E - TBS_E_TPM_NOT_FOUND Handle = 0x8028400F - TBS_E_SERVICE_DISABLED Handle = 0x80284010 - TBS_E_NO_EVENT_LOG Handle = 0x80284011 - TBS_E_ACCESS_DENIED Handle = 0x80284012 - TBS_E_PROVISIONING_NOT_ALLOWED Handle = 0x80284013 - TBS_E_PPI_FUNCTION_UNSUPPORTED Handle = 0x80284014 - TBS_E_OWNERAUTH_NOT_FOUND Handle = 0x80284015 - TBS_E_PROVISIONING_INCOMPLETE Handle = 0x80284016 - TPMAPI_E_INVALID_STATE Handle = 0x80290100 - TPMAPI_E_NOT_ENOUGH_DATA Handle = 0x80290101 - TPMAPI_E_TOO_MUCH_DATA Handle = 0x80290102 - TPMAPI_E_INVALID_OUTPUT_POINTER Handle = 0x80290103 - TPMAPI_E_INVALID_PARAMETER Handle = 0x80290104 - TPMAPI_E_OUT_OF_MEMORY Handle = 0x80290105 - TPMAPI_E_BUFFER_TOO_SMALL Handle = 0x80290106 - TPMAPI_E_INTERNAL_ERROR Handle = 0x80290107 - TPMAPI_E_ACCESS_DENIED Handle = 0x80290108 - TPMAPI_E_AUTHORIZATION_FAILED Handle = 0x80290109 - TPMAPI_E_INVALID_CONTEXT_HANDLE Handle = 0x8029010A - TPMAPI_E_TBS_COMMUNICATION_ERROR Handle = 0x8029010B - TPMAPI_E_TPM_COMMAND_ERROR Handle = 0x8029010C - TPMAPI_E_MESSAGE_TOO_LARGE Handle = 0x8029010D - TPMAPI_E_INVALID_ENCODING Handle = 0x8029010E - TPMAPI_E_INVALID_KEY_SIZE Handle = 0x8029010F - TPMAPI_E_ENCRYPTION_FAILED Handle = 0x80290110 - TPMAPI_E_INVALID_KEY_PARAMS Handle = 0x80290111 - TPMAPI_E_INVALID_MIGRATION_AUTHORIZATION_BLOB Handle = 0x80290112 - TPMAPI_E_INVALID_PCR_INDEX Handle = 0x80290113 - TPMAPI_E_INVALID_DELEGATE_BLOB Handle = 0x80290114 - TPMAPI_E_INVALID_CONTEXT_PARAMS Handle = 0x80290115 - TPMAPI_E_INVALID_KEY_BLOB Handle = 0x80290116 - TPMAPI_E_INVALID_PCR_DATA Handle = 0x80290117 - TPMAPI_E_INVALID_OWNER_AUTH Handle = 0x80290118 - TPMAPI_E_FIPS_RNG_CHECK_FAILED Handle = 0x80290119 - TPMAPI_E_EMPTY_TCG_LOG Handle = 0x8029011A - TPMAPI_E_INVALID_TCG_LOG_ENTRY Handle = 0x8029011B - TPMAPI_E_TCG_SEPARATOR_ABSENT Handle = 0x8029011C - TPMAPI_E_TCG_INVALID_DIGEST_ENTRY Handle = 0x8029011D - TPMAPI_E_POLICY_DENIES_OPERATION Handle = 0x8029011E - TPMAPI_E_NV_BITS_NOT_DEFINED Handle = 0x8029011F - TPMAPI_E_NV_BITS_NOT_READY Handle = 0x80290120 - TPMAPI_E_SEALING_KEY_NOT_AVAILABLE Handle = 0x80290121 - TPMAPI_E_NO_AUTHORIZATION_CHAIN_FOUND Handle = 0x80290122 - TPMAPI_E_SVN_COUNTER_NOT_AVAILABLE Handle = 0x80290123 - TPMAPI_E_OWNER_AUTH_NOT_NULL Handle = 0x80290124 - TPMAPI_E_ENDORSEMENT_AUTH_NOT_NULL Handle = 0x80290125 - TPMAPI_E_AUTHORIZATION_REVOKED Handle = 0x80290126 - TPMAPI_E_MALFORMED_AUTHORIZATION_KEY Handle = 0x80290127 - TPMAPI_E_AUTHORIZING_KEY_NOT_SUPPORTED Handle = 0x80290128 - TPMAPI_E_INVALID_AUTHORIZATION_SIGNATURE Handle = 0x80290129 - TPMAPI_E_MALFORMED_AUTHORIZATION_POLICY Handle = 0x8029012A - TPMAPI_E_MALFORMED_AUTHORIZATION_OTHER Handle = 0x8029012B - TPMAPI_E_SEALING_KEY_CHANGED Handle = 0x8029012C - TBSIMP_E_BUFFER_TOO_SMALL Handle = 0x80290200 - TBSIMP_E_CLEANUP_FAILED Handle = 0x80290201 - TBSIMP_E_INVALID_CONTEXT_HANDLE Handle = 0x80290202 - TBSIMP_E_INVALID_CONTEXT_PARAM Handle = 0x80290203 - TBSIMP_E_TPM_ERROR Handle = 0x80290204 - TBSIMP_E_HASH_BAD_KEY Handle = 0x80290205 - TBSIMP_E_DUPLICATE_VHANDLE Handle = 0x80290206 - TBSIMP_E_INVALID_OUTPUT_POINTER Handle = 0x80290207 - TBSIMP_E_INVALID_PARAMETER Handle = 0x80290208 - TBSIMP_E_RPC_INIT_FAILED Handle = 0x80290209 - TBSIMP_E_SCHEDULER_NOT_RUNNING Handle = 0x8029020A - TBSIMP_E_COMMAND_CANCELED Handle = 0x8029020B - TBSIMP_E_OUT_OF_MEMORY Handle = 0x8029020C - TBSIMP_E_LIST_NO_MORE_ITEMS Handle = 0x8029020D - TBSIMP_E_LIST_NOT_FOUND Handle = 0x8029020E - TBSIMP_E_NOT_ENOUGH_SPACE Handle = 0x8029020F - TBSIMP_E_NOT_ENOUGH_TPM_CONTEXTS Handle = 0x80290210 - TBSIMP_E_COMMAND_FAILED Handle = 0x80290211 - TBSIMP_E_UNKNOWN_ORDINAL Handle = 0x80290212 - TBSIMP_E_RESOURCE_EXPIRED Handle = 0x80290213 - TBSIMP_E_INVALID_RESOURCE Handle = 0x80290214 - TBSIMP_E_NOTHING_TO_UNLOAD Handle = 0x80290215 - TBSIMP_E_HASH_TABLE_FULL Handle = 0x80290216 - TBSIMP_E_TOO_MANY_TBS_CONTEXTS Handle = 0x80290217 - TBSIMP_E_TOO_MANY_RESOURCES Handle = 0x80290218 - TBSIMP_E_PPI_NOT_SUPPORTED Handle = 0x80290219 - TBSIMP_E_TPM_INCOMPATIBLE Handle = 0x8029021A - TBSIMP_E_NO_EVENT_LOG Handle = 0x8029021B - TPM_E_PPI_ACPI_FAILURE Handle = 0x80290300 - TPM_E_PPI_USER_ABORT Handle = 0x80290301 - TPM_E_PPI_BIOS_FAILURE Handle = 0x80290302 - TPM_E_PPI_NOT_SUPPORTED Handle = 0x80290303 - TPM_E_PPI_BLOCKED_IN_BIOS Handle = 0x80290304 - TPM_E_PCP_ERROR_MASK Handle = 0x80290400 - TPM_E_PCP_DEVICE_NOT_READY Handle = 0x80290401 - TPM_E_PCP_INVALID_HANDLE Handle = 0x80290402 - TPM_E_PCP_INVALID_PARAMETER Handle = 0x80290403 - TPM_E_PCP_FLAG_NOT_SUPPORTED Handle = 0x80290404 - TPM_E_PCP_NOT_SUPPORTED Handle = 0x80290405 - TPM_E_PCP_BUFFER_TOO_SMALL Handle = 0x80290406 - TPM_E_PCP_INTERNAL_ERROR Handle = 0x80290407 - TPM_E_PCP_AUTHENTICATION_FAILED Handle = 0x80290408 - TPM_E_PCP_AUTHENTICATION_IGNORED Handle = 0x80290409 - TPM_E_PCP_POLICY_NOT_FOUND Handle = 0x8029040A - TPM_E_PCP_PROFILE_NOT_FOUND Handle = 0x8029040B - TPM_E_PCP_VALIDATION_FAILED Handle = 0x8029040C - TPM_E_PCP_WRONG_PARENT Handle = 0x8029040E - TPM_E_KEY_NOT_LOADED Handle = 0x8029040F - TPM_E_NO_KEY_CERTIFICATION Handle = 0x80290410 - TPM_E_KEY_NOT_FINALIZED Handle = 0x80290411 - TPM_E_ATTESTATION_CHALLENGE_NOT_SET Handle = 0x80290412 - TPM_E_NOT_PCR_BOUND Handle = 0x80290413 - TPM_E_KEY_ALREADY_FINALIZED Handle = 0x80290414 - TPM_E_KEY_USAGE_POLICY_NOT_SUPPORTED Handle = 0x80290415 - TPM_E_KEY_USAGE_POLICY_INVALID Handle = 0x80290416 - TPM_E_SOFT_KEY_ERROR Handle = 0x80290417 - TPM_E_KEY_NOT_AUTHENTICATED Handle = 0x80290418 - TPM_E_PCP_KEY_NOT_AIK Handle = 0x80290419 - TPM_E_KEY_NOT_SIGNING_KEY Handle = 0x8029041A - TPM_E_LOCKED_OUT Handle = 0x8029041B - TPM_E_CLAIM_TYPE_NOT_SUPPORTED Handle = 0x8029041C - TPM_E_VERSION_NOT_SUPPORTED Handle = 0x8029041D - TPM_E_BUFFER_LENGTH_MISMATCH Handle = 0x8029041E - TPM_E_PCP_IFX_RSA_KEY_CREATION_BLOCKED Handle = 0x8029041F - TPM_E_PCP_TICKET_MISSING Handle = 0x80290420 - TPM_E_PCP_RAW_POLICY_NOT_SUPPORTED Handle = 0x80290421 - TPM_E_PCP_KEY_HANDLE_INVALIDATED Handle = 0x80290422 - TPM_E_PCP_UNSUPPORTED_PSS_SALT Handle = 0x40290423 - TPM_E_ZERO_EXHAUST_ENABLED Handle = 0x80290500 - PLA_E_DCS_NOT_FOUND Handle = 0x80300002 - PLA_E_DCS_IN_USE Handle = 0x803000AA - PLA_E_TOO_MANY_FOLDERS Handle = 0x80300045 - PLA_E_NO_MIN_DISK Handle = 0x80300070 - PLA_E_DCS_ALREADY_EXISTS Handle = 0x803000B7 - PLA_S_PROPERTY_IGNORED Handle = 0x00300100 - PLA_E_PROPERTY_CONFLICT Handle = 0x80300101 - PLA_E_DCS_SINGLETON_REQUIRED Handle = 0x80300102 - PLA_E_CREDENTIALS_REQUIRED Handle = 0x80300103 - PLA_E_DCS_NOT_RUNNING Handle = 0x80300104 - PLA_E_CONFLICT_INCL_EXCL_API Handle = 0x80300105 - PLA_E_NETWORK_EXE_NOT_VALID Handle = 0x80300106 - PLA_E_EXE_ALREADY_CONFIGURED Handle = 0x80300107 - PLA_E_EXE_PATH_NOT_VALID Handle = 0x80300108 - PLA_E_DC_ALREADY_EXISTS Handle = 0x80300109 - PLA_E_DCS_START_WAIT_TIMEOUT Handle = 0x8030010A - PLA_E_DC_START_WAIT_TIMEOUT Handle = 0x8030010B - PLA_E_REPORT_WAIT_TIMEOUT Handle = 0x8030010C - PLA_E_NO_DUPLICATES Handle = 0x8030010D - PLA_E_EXE_FULL_PATH_REQUIRED Handle = 0x8030010E - PLA_E_INVALID_SESSION_NAME Handle = 0x8030010F - PLA_E_PLA_CHANNEL_NOT_ENABLED Handle = 0x80300110 - PLA_E_TASKSCHED_CHANNEL_NOT_ENABLED Handle = 0x80300111 - PLA_E_RULES_MANAGER_FAILED Handle = 0x80300112 - PLA_E_CABAPI_FAILURE Handle = 0x80300113 - FVE_E_LOCKED_VOLUME Handle = 0x80310000 - FVE_E_NOT_ENCRYPTED Handle = 0x80310001 - FVE_E_NO_TPM_BIOS Handle = 0x80310002 - FVE_E_NO_MBR_METRIC Handle = 0x80310003 - FVE_E_NO_BOOTSECTOR_METRIC Handle = 0x80310004 - FVE_E_NO_BOOTMGR_METRIC Handle = 0x80310005 - FVE_E_WRONG_BOOTMGR Handle = 0x80310006 - FVE_E_SECURE_KEY_REQUIRED Handle = 0x80310007 - FVE_E_NOT_ACTIVATED Handle = 0x80310008 - FVE_E_ACTION_NOT_ALLOWED Handle = 0x80310009 - FVE_E_AD_SCHEMA_NOT_INSTALLED Handle = 0x8031000A - FVE_E_AD_INVALID_DATATYPE Handle = 0x8031000B - FVE_E_AD_INVALID_DATASIZE Handle = 0x8031000C - FVE_E_AD_NO_VALUES Handle = 0x8031000D - FVE_E_AD_ATTR_NOT_SET Handle = 0x8031000E - FVE_E_AD_GUID_NOT_FOUND Handle = 0x8031000F - FVE_E_BAD_INFORMATION Handle = 0x80310010 - FVE_E_TOO_SMALL Handle = 0x80310011 - FVE_E_SYSTEM_VOLUME Handle = 0x80310012 - FVE_E_FAILED_WRONG_FS Handle = 0x80310013 - FVE_E_BAD_PARTITION_SIZE Handle = 0x80310014 - FVE_E_NOT_SUPPORTED Handle = 0x80310015 - FVE_E_BAD_DATA Handle = 0x80310016 - FVE_E_VOLUME_NOT_BOUND Handle = 0x80310017 - FVE_E_TPM_NOT_OWNED Handle = 0x80310018 - FVE_E_NOT_DATA_VOLUME Handle = 0x80310019 - FVE_E_AD_INSUFFICIENT_BUFFER Handle = 0x8031001A - FVE_E_CONV_READ Handle = 0x8031001B - FVE_E_CONV_WRITE Handle = 0x8031001C - FVE_E_KEY_REQUIRED Handle = 0x8031001D - FVE_E_CLUSTERING_NOT_SUPPORTED Handle = 0x8031001E - FVE_E_VOLUME_BOUND_ALREADY Handle = 0x8031001F - FVE_E_OS_NOT_PROTECTED Handle = 0x80310020 - FVE_E_PROTECTION_DISABLED Handle = 0x80310021 - FVE_E_RECOVERY_KEY_REQUIRED Handle = 0x80310022 - FVE_E_FOREIGN_VOLUME Handle = 0x80310023 - FVE_E_OVERLAPPED_UPDATE Handle = 0x80310024 - FVE_E_TPM_SRK_AUTH_NOT_ZERO Handle = 0x80310025 - FVE_E_FAILED_SECTOR_SIZE Handle = 0x80310026 - FVE_E_FAILED_AUTHENTICATION Handle = 0x80310027 - FVE_E_NOT_OS_VOLUME Handle = 0x80310028 - FVE_E_AUTOUNLOCK_ENABLED Handle = 0x80310029 - FVE_E_WRONG_BOOTSECTOR Handle = 0x8031002A - FVE_E_WRONG_SYSTEM_FS Handle = 0x8031002B - FVE_E_POLICY_PASSWORD_REQUIRED Handle = 0x8031002C - FVE_E_CANNOT_SET_FVEK_ENCRYPTED Handle = 0x8031002D - FVE_E_CANNOT_ENCRYPT_NO_KEY Handle = 0x8031002E - FVE_E_BOOTABLE_CDDVD Handle = 0x80310030 - FVE_E_PROTECTOR_EXISTS Handle = 0x80310031 - FVE_E_RELATIVE_PATH Handle = 0x80310032 - FVE_E_PROTECTOR_NOT_FOUND Handle = 0x80310033 - FVE_E_INVALID_KEY_FORMAT Handle = 0x80310034 - FVE_E_INVALID_PASSWORD_FORMAT Handle = 0x80310035 - FVE_E_FIPS_RNG_CHECK_FAILED Handle = 0x80310036 - FVE_E_FIPS_PREVENTS_RECOVERY_PASSWORD Handle = 0x80310037 - FVE_E_FIPS_PREVENTS_EXTERNAL_KEY_EXPORT Handle = 0x80310038 - FVE_E_NOT_DECRYPTED Handle = 0x80310039 - FVE_E_INVALID_PROTECTOR_TYPE Handle = 0x8031003A - FVE_E_NO_PROTECTORS_TO_TEST Handle = 0x8031003B - FVE_E_KEYFILE_NOT_FOUND Handle = 0x8031003C - FVE_E_KEYFILE_INVALID Handle = 0x8031003D - FVE_E_KEYFILE_NO_VMK Handle = 0x8031003E - FVE_E_TPM_DISABLED Handle = 0x8031003F - FVE_E_NOT_ALLOWED_IN_SAFE_MODE Handle = 0x80310040 - FVE_E_TPM_INVALID_PCR Handle = 0x80310041 - FVE_E_TPM_NO_VMK Handle = 0x80310042 - FVE_E_PIN_INVALID Handle = 0x80310043 - FVE_E_AUTH_INVALID_APPLICATION Handle = 0x80310044 - FVE_E_AUTH_INVALID_CONFIG Handle = 0x80310045 - FVE_E_FIPS_DISABLE_PROTECTION_NOT_ALLOWED Handle = 0x80310046 - FVE_E_FS_NOT_EXTENDED Handle = 0x80310047 - FVE_E_FIRMWARE_TYPE_NOT_SUPPORTED Handle = 0x80310048 - FVE_E_NO_LICENSE Handle = 0x80310049 - FVE_E_NOT_ON_STACK Handle = 0x8031004A - FVE_E_FS_MOUNTED Handle = 0x8031004B - FVE_E_TOKEN_NOT_IMPERSONATED Handle = 0x8031004C - FVE_E_DRY_RUN_FAILED Handle = 0x8031004D - FVE_E_REBOOT_REQUIRED Handle = 0x8031004E - FVE_E_DEBUGGER_ENABLED Handle = 0x8031004F - FVE_E_RAW_ACCESS Handle = 0x80310050 - FVE_E_RAW_BLOCKED Handle = 0x80310051 - FVE_E_BCD_APPLICATIONS_PATH_INCORRECT Handle = 0x80310052 - FVE_E_NOT_ALLOWED_IN_VERSION Handle = 0x80310053 - FVE_E_NO_AUTOUNLOCK_MASTER_KEY Handle = 0x80310054 - FVE_E_MOR_FAILED Handle = 0x80310055 - FVE_E_HIDDEN_VOLUME Handle = 0x80310056 - FVE_E_TRANSIENT_STATE Handle = 0x80310057 - FVE_E_PUBKEY_NOT_ALLOWED Handle = 0x80310058 - FVE_E_VOLUME_HANDLE_OPEN Handle = 0x80310059 - FVE_E_NO_FEATURE_LICENSE Handle = 0x8031005A - FVE_E_INVALID_STARTUP_OPTIONS Handle = 0x8031005B - FVE_E_POLICY_RECOVERY_PASSWORD_NOT_ALLOWED Handle = 0x8031005C - FVE_E_POLICY_RECOVERY_PASSWORD_REQUIRED Handle = 0x8031005D - FVE_E_POLICY_RECOVERY_KEY_NOT_ALLOWED Handle = 0x8031005E - FVE_E_POLICY_RECOVERY_KEY_REQUIRED Handle = 0x8031005F - FVE_E_POLICY_STARTUP_PIN_NOT_ALLOWED Handle = 0x80310060 - FVE_E_POLICY_STARTUP_PIN_REQUIRED Handle = 0x80310061 - FVE_E_POLICY_STARTUP_KEY_NOT_ALLOWED Handle = 0x80310062 - FVE_E_POLICY_STARTUP_KEY_REQUIRED Handle = 0x80310063 - FVE_E_POLICY_STARTUP_PIN_KEY_NOT_ALLOWED Handle = 0x80310064 - FVE_E_POLICY_STARTUP_PIN_KEY_REQUIRED Handle = 0x80310065 - FVE_E_POLICY_STARTUP_TPM_NOT_ALLOWED Handle = 0x80310066 - FVE_E_POLICY_STARTUP_TPM_REQUIRED Handle = 0x80310067 - FVE_E_POLICY_INVALID_PIN_LENGTH Handle = 0x80310068 - FVE_E_KEY_PROTECTOR_NOT_SUPPORTED Handle = 0x80310069 - FVE_E_POLICY_PASSPHRASE_NOT_ALLOWED Handle = 0x8031006A - FVE_E_POLICY_PASSPHRASE_REQUIRED Handle = 0x8031006B - FVE_E_FIPS_PREVENTS_PASSPHRASE Handle = 0x8031006C - FVE_E_OS_VOLUME_PASSPHRASE_NOT_ALLOWED Handle = 0x8031006D - FVE_E_INVALID_BITLOCKER_OID Handle = 0x8031006E - FVE_E_VOLUME_TOO_SMALL Handle = 0x8031006F - FVE_E_DV_NOT_SUPPORTED_ON_FS Handle = 0x80310070 - FVE_E_DV_NOT_ALLOWED_BY_GP Handle = 0x80310071 - FVE_E_POLICY_USER_CERTIFICATE_NOT_ALLOWED Handle = 0x80310072 - FVE_E_POLICY_USER_CERTIFICATE_REQUIRED Handle = 0x80310073 - FVE_E_POLICY_USER_CERT_MUST_BE_HW Handle = 0x80310074 - FVE_E_POLICY_USER_CONFIGURE_FDV_AUTOUNLOCK_NOT_ALLOWED Handle = 0x80310075 - FVE_E_POLICY_USER_CONFIGURE_RDV_AUTOUNLOCK_NOT_ALLOWED Handle = 0x80310076 - FVE_E_POLICY_USER_CONFIGURE_RDV_NOT_ALLOWED Handle = 0x80310077 - FVE_E_POLICY_USER_ENABLE_RDV_NOT_ALLOWED Handle = 0x80310078 - FVE_E_POLICY_USER_DISABLE_RDV_NOT_ALLOWED Handle = 0x80310079 - FVE_E_POLICY_INVALID_PASSPHRASE_LENGTH Handle = 0x80310080 - FVE_E_POLICY_PASSPHRASE_TOO_SIMPLE Handle = 0x80310081 - FVE_E_RECOVERY_PARTITION Handle = 0x80310082 - FVE_E_POLICY_CONFLICT_FDV_RK_OFF_AUK_ON Handle = 0x80310083 - FVE_E_POLICY_CONFLICT_RDV_RK_OFF_AUK_ON Handle = 0x80310084 - FVE_E_NON_BITLOCKER_OID Handle = 0x80310085 - FVE_E_POLICY_PROHIBITS_SELFSIGNED Handle = 0x80310086 - FVE_E_POLICY_CONFLICT_RO_AND_STARTUP_KEY_REQUIRED Handle = 0x80310087 - FVE_E_CONV_RECOVERY_FAILED Handle = 0x80310088 - FVE_E_VIRTUALIZED_SPACE_TOO_BIG Handle = 0x80310089 - FVE_E_POLICY_CONFLICT_OSV_RP_OFF_ADB_ON Handle = 0x80310090 - FVE_E_POLICY_CONFLICT_FDV_RP_OFF_ADB_ON Handle = 0x80310091 - FVE_E_POLICY_CONFLICT_RDV_RP_OFF_ADB_ON Handle = 0x80310092 - FVE_E_NON_BITLOCKER_KU Handle = 0x80310093 - FVE_E_PRIVATEKEY_AUTH_FAILED Handle = 0x80310094 - FVE_E_REMOVAL_OF_DRA_FAILED Handle = 0x80310095 - FVE_E_OPERATION_NOT_SUPPORTED_ON_VISTA_VOLUME Handle = 0x80310096 - FVE_E_CANT_LOCK_AUTOUNLOCK_ENABLED_VOLUME Handle = 0x80310097 - FVE_E_FIPS_HASH_KDF_NOT_ALLOWED Handle = 0x80310098 - FVE_E_ENH_PIN_INVALID Handle = 0x80310099 - FVE_E_INVALID_PIN_CHARS Handle = 0x8031009A - FVE_E_INVALID_DATUM_TYPE Handle = 0x8031009B - FVE_E_EFI_ONLY Handle = 0x8031009C - FVE_E_MULTIPLE_NKP_CERTS Handle = 0x8031009D - FVE_E_REMOVAL_OF_NKP_FAILED Handle = 0x8031009E - FVE_E_INVALID_NKP_CERT Handle = 0x8031009F - FVE_E_NO_EXISTING_PIN Handle = 0x803100A0 - FVE_E_PROTECTOR_CHANGE_PIN_MISMATCH Handle = 0x803100A1 - FVE_E_PIN_PROTECTOR_CHANGE_BY_STD_USER_DISALLOWED Handle = 0x803100A2 - FVE_E_PROTECTOR_CHANGE_MAX_PIN_CHANGE_ATTEMPTS_REACHED Handle = 0x803100A3 - FVE_E_POLICY_PASSPHRASE_REQUIRES_ASCII Handle = 0x803100A4 - FVE_E_FULL_ENCRYPTION_NOT_ALLOWED_ON_TP_STORAGE Handle = 0x803100A5 - FVE_E_WIPE_NOT_ALLOWED_ON_TP_STORAGE Handle = 0x803100A6 - FVE_E_KEY_LENGTH_NOT_SUPPORTED_BY_EDRIVE Handle = 0x803100A7 - FVE_E_NO_EXISTING_PASSPHRASE Handle = 0x803100A8 - FVE_E_PROTECTOR_CHANGE_PASSPHRASE_MISMATCH Handle = 0x803100A9 - FVE_E_PASSPHRASE_TOO_LONG Handle = 0x803100AA - FVE_E_NO_PASSPHRASE_WITH_TPM Handle = 0x803100AB - FVE_E_NO_TPM_WITH_PASSPHRASE Handle = 0x803100AC - FVE_E_NOT_ALLOWED_ON_CSV_STACK Handle = 0x803100AD - FVE_E_NOT_ALLOWED_ON_CLUSTER Handle = 0x803100AE - FVE_E_EDRIVE_NO_FAILOVER_TO_SW Handle = 0x803100AF - FVE_E_EDRIVE_BAND_IN_USE Handle = 0x803100B0 - FVE_E_EDRIVE_DISALLOWED_BY_GP Handle = 0x803100B1 - FVE_E_EDRIVE_INCOMPATIBLE_VOLUME Handle = 0x803100B2 - FVE_E_NOT_ALLOWED_TO_UPGRADE_WHILE_CONVERTING Handle = 0x803100B3 - FVE_E_EDRIVE_DV_NOT_SUPPORTED Handle = 0x803100B4 - FVE_E_NO_PREBOOT_KEYBOARD_DETECTED Handle = 0x803100B5 - FVE_E_NO_PREBOOT_KEYBOARD_OR_WINRE_DETECTED Handle = 0x803100B6 - FVE_E_POLICY_REQUIRES_STARTUP_PIN_ON_TOUCH_DEVICE Handle = 0x803100B7 - FVE_E_POLICY_REQUIRES_RECOVERY_PASSWORD_ON_TOUCH_DEVICE Handle = 0x803100B8 - FVE_E_WIPE_CANCEL_NOT_APPLICABLE Handle = 0x803100B9 - FVE_E_SECUREBOOT_DISABLED Handle = 0x803100BA - FVE_E_SECUREBOOT_CONFIGURATION_INVALID Handle = 0x803100BB - FVE_E_EDRIVE_DRY_RUN_FAILED Handle = 0x803100BC - FVE_E_SHADOW_COPY_PRESENT Handle = 0x803100BD - FVE_E_POLICY_INVALID_ENHANCED_BCD_SETTINGS Handle = 0x803100BE - FVE_E_EDRIVE_INCOMPATIBLE_FIRMWARE Handle = 0x803100BF - FVE_E_PROTECTOR_CHANGE_MAX_PASSPHRASE_CHANGE_ATTEMPTS_REACHED Handle = 0x803100C0 - FVE_E_PASSPHRASE_PROTECTOR_CHANGE_BY_STD_USER_DISALLOWED Handle = 0x803100C1 - FVE_E_LIVEID_ACCOUNT_SUSPENDED Handle = 0x803100C2 - FVE_E_LIVEID_ACCOUNT_BLOCKED Handle = 0x803100C3 - FVE_E_NOT_PROVISIONED_ON_ALL_VOLUMES Handle = 0x803100C4 - FVE_E_DE_FIXED_DATA_NOT_SUPPORTED Handle = 0x803100C5 - FVE_E_DE_HARDWARE_NOT_COMPLIANT Handle = 0x803100C6 - FVE_E_DE_WINRE_NOT_CONFIGURED Handle = 0x803100C7 - FVE_E_DE_PROTECTION_SUSPENDED Handle = 0x803100C8 - FVE_E_DE_OS_VOLUME_NOT_PROTECTED Handle = 0x803100C9 - FVE_E_DE_DEVICE_LOCKEDOUT Handle = 0x803100CA - FVE_E_DE_PROTECTION_NOT_YET_ENABLED Handle = 0x803100CB - FVE_E_INVALID_PIN_CHARS_DETAILED Handle = 0x803100CC - FVE_E_DEVICE_LOCKOUT_COUNTER_UNAVAILABLE Handle = 0x803100CD - FVE_E_DEVICELOCKOUT_COUNTER_MISMATCH Handle = 0x803100CE - FVE_E_BUFFER_TOO_LARGE Handle = 0x803100CF - FVE_E_NO_SUCH_CAPABILITY_ON_TARGET Handle = 0x803100D0 - FVE_E_DE_PREVENTED_FOR_OS Handle = 0x803100D1 - FVE_E_DE_VOLUME_OPTED_OUT Handle = 0x803100D2 - FVE_E_DE_VOLUME_NOT_SUPPORTED Handle = 0x803100D3 - FVE_E_EOW_NOT_SUPPORTED_IN_VERSION Handle = 0x803100D4 - FVE_E_ADBACKUP_NOT_ENABLED Handle = 0x803100D5 - FVE_E_VOLUME_EXTEND_PREVENTS_EOW_DECRYPT Handle = 0x803100D6 - FVE_E_NOT_DE_VOLUME Handle = 0x803100D7 - FVE_E_PROTECTION_CANNOT_BE_DISABLED Handle = 0x803100D8 - FVE_E_OSV_KSR_NOT_ALLOWED Handle = 0x803100D9 - FVE_E_AD_BACKUP_REQUIRED_POLICY_NOT_SET_OS_DRIVE Handle = 0x803100DA - FVE_E_AD_BACKUP_REQUIRED_POLICY_NOT_SET_FIXED_DRIVE Handle = 0x803100DB - FVE_E_AD_BACKUP_REQUIRED_POLICY_NOT_SET_REMOVABLE_DRIVE Handle = 0x803100DC - FVE_E_KEY_ROTATION_NOT_SUPPORTED Handle = 0x803100DD - FVE_E_EXECUTE_REQUEST_SENT_TOO_SOON Handle = 0x803100DE - FVE_E_KEY_ROTATION_NOT_ENABLED Handle = 0x803100DF - FVE_E_DEVICE_NOT_JOINED Handle = 0x803100E0 - FWP_E_CALLOUT_NOT_FOUND Handle = 0x80320001 - FWP_E_CONDITION_NOT_FOUND Handle = 0x80320002 - FWP_E_FILTER_NOT_FOUND Handle = 0x80320003 - FWP_E_LAYER_NOT_FOUND Handle = 0x80320004 - FWP_E_PROVIDER_NOT_FOUND Handle = 0x80320005 - FWP_E_PROVIDER_CONTEXT_NOT_FOUND Handle = 0x80320006 - FWP_E_SUBLAYER_NOT_FOUND Handle = 0x80320007 - FWP_E_NOT_FOUND Handle = 0x80320008 - FWP_E_ALREADY_EXISTS Handle = 0x80320009 - FWP_E_IN_USE Handle = 0x8032000A - FWP_E_DYNAMIC_SESSION_IN_PROGRESS Handle = 0x8032000B - FWP_E_WRONG_SESSION Handle = 0x8032000C - FWP_E_NO_TXN_IN_PROGRESS Handle = 0x8032000D - FWP_E_TXN_IN_PROGRESS Handle = 0x8032000E - FWP_E_TXN_ABORTED Handle = 0x8032000F - FWP_E_SESSION_ABORTED Handle = 0x80320010 - FWP_E_INCOMPATIBLE_TXN Handle = 0x80320011 - FWP_E_TIMEOUT Handle = 0x80320012 - FWP_E_NET_EVENTS_DISABLED Handle = 0x80320013 - FWP_E_INCOMPATIBLE_LAYER Handle = 0x80320014 - FWP_E_KM_CLIENTS_ONLY Handle = 0x80320015 - FWP_E_LIFETIME_MISMATCH Handle = 0x80320016 - FWP_E_BUILTIN_OBJECT Handle = 0x80320017 - FWP_E_TOO_MANY_CALLOUTS Handle = 0x80320018 - FWP_E_NOTIFICATION_DROPPED Handle = 0x80320019 - FWP_E_TRAFFIC_MISMATCH Handle = 0x8032001A - FWP_E_INCOMPATIBLE_SA_STATE Handle = 0x8032001B - FWP_E_NULL_POINTER Handle = 0x8032001C - FWP_E_INVALID_ENUMERATOR Handle = 0x8032001D - FWP_E_INVALID_FLAGS Handle = 0x8032001E - FWP_E_INVALID_NET_MASK Handle = 0x8032001F - FWP_E_INVALID_RANGE Handle = 0x80320020 - FWP_E_INVALID_INTERVAL Handle = 0x80320021 - FWP_E_ZERO_LENGTH_ARRAY Handle = 0x80320022 - FWP_E_NULL_DISPLAY_NAME Handle = 0x80320023 - FWP_E_INVALID_ACTION_TYPE Handle = 0x80320024 - FWP_E_INVALID_WEIGHT Handle = 0x80320025 - FWP_E_MATCH_TYPE_MISMATCH Handle = 0x80320026 - FWP_E_TYPE_MISMATCH Handle = 0x80320027 - FWP_E_OUT_OF_BOUNDS Handle = 0x80320028 - FWP_E_RESERVED Handle = 0x80320029 - FWP_E_DUPLICATE_CONDITION Handle = 0x8032002A - FWP_E_DUPLICATE_KEYMOD Handle = 0x8032002B - FWP_E_ACTION_INCOMPATIBLE_WITH_LAYER Handle = 0x8032002C - FWP_E_ACTION_INCOMPATIBLE_WITH_SUBLAYER Handle = 0x8032002D - FWP_E_CONTEXT_INCOMPATIBLE_WITH_LAYER Handle = 0x8032002E - FWP_E_CONTEXT_INCOMPATIBLE_WITH_CALLOUT Handle = 0x8032002F - FWP_E_INCOMPATIBLE_AUTH_METHOD Handle = 0x80320030 - FWP_E_INCOMPATIBLE_DH_GROUP Handle = 0x80320031 - FWP_E_EM_NOT_SUPPORTED Handle = 0x80320032 - FWP_E_NEVER_MATCH Handle = 0x80320033 - FWP_E_PROVIDER_CONTEXT_MISMATCH Handle = 0x80320034 - FWP_E_INVALID_PARAMETER Handle = 0x80320035 - FWP_E_TOO_MANY_SUBLAYERS Handle = 0x80320036 - FWP_E_CALLOUT_NOTIFICATION_FAILED Handle = 0x80320037 - FWP_E_INVALID_AUTH_TRANSFORM Handle = 0x80320038 - FWP_E_INVALID_CIPHER_TRANSFORM Handle = 0x80320039 - FWP_E_INCOMPATIBLE_CIPHER_TRANSFORM Handle = 0x8032003A - FWP_E_INVALID_TRANSFORM_COMBINATION Handle = 0x8032003B - FWP_E_DUPLICATE_AUTH_METHOD Handle = 0x8032003C - FWP_E_INVALID_TUNNEL_ENDPOINT Handle = 0x8032003D - FWP_E_L2_DRIVER_NOT_READY Handle = 0x8032003E - FWP_E_KEY_DICTATOR_ALREADY_REGISTERED Handle = 0x8032003F - FWP_E_KEY_DICTATION_INVALID_KEYING_MATERIAL Handle = 0x80320040 - FWP_E_CONNECTIONS_DISABLED Handle = 0x80320041 - FWP_E_INVALID_DNS_NAME Handle = 0x80320042 - FWP_E_STILL_ON Handle = 0x80320043 - FWP_E_IKEEXT_NOT_RUNNING Handle = 0x80320044 - FWP_E_DROP_NOICMP Handle = 0x80320104 - WS_S_ASYNC Handle = 0x003D0000 - WS_S_END Handle = 0x003D0001 - WS_E_INVALID_FORMAT Handle = 0x803D0000 - WS_E_OBJECT_FAULTED Handle = 0x803D0001 - WS_E_NUMERIC_OVERFLOW Handle = 0x803D0002 - WS_E_INVALID_OPERATION Handle = 0x803D0003 - WS_E_OPERATION_ABORTED Handle = 0x803D0004 - WS_E_ENDPOINT_ACCESS_DENIED Handle = 0x803D0005 - WS_E_OPERATION_TIMED_OUT Handle = 0x803D0006 - WS_E_OPERATION_ABANDONED Handle = 0x803D0007 - WS_E_QUOTA_EXCEEDED Handle = 0x803D0008 - WS_E_NO_TRANSLATION_AVAILABLE Handle = 0x803D0009 - WS_E_SECURITY_VERIFICATION_FAILURE Handle = 0x803D000A - WS_E_ADDRESS_IN_USE Handle = 0x803D000B - WS_E_ADDRESS_NOT_AVAILABLE Handle = 0x803D000C - WS_E_ENDPOINT_NOT_FOUND Handle = 0x803D000D - WS_E_ENDPOINT_NOT_AVAILABLE Handle = 0x803D000E - WS_E_ENDPOINT_FAILURE Handle = 0x803D000F - WS_E_ENDPOINT_UNREACHABLE Handle = 0x803D0010 - WS_E_ENDPOINT_ACTION_NOT_SUPPORTED Handle = 0x803D0011 - WS_E_ENDPOINT_TOO_BUSY Handle = 0x803D0012 - WS_E_ENDPOINT_FAULT_RECEIVED Handle = 0x803D0013 - WS_E_ENDPOINT_DISCONNECTED Handle = 0x803D0014 - WS_E_PROXY_FAILURE Handle = 0x803D0015 - WS_E_PROXY_ACCESS_DENIED Handle = 0x803D0016 - WS_E_NOT_SUPPORTED Handle = 0x803D0017 - WS_E_PROXY_REQUIRES_BASIC_AUTH Handle = 0x803D0018 - WS_E_PROXY_REQUIRES_DIGEST_AUTH Handle = 0x803D0019 - WS_E_PROXY_REQUIRES_NTLM_AUTH Handle = 0x803D001A - WS_E_PROXY_REQUIRES_NEGOTIATE_AUTH Handle = 0x803D001B - WS_E_SERVER_REQUIRES_BASIC_AUTH Handle = 0x803D001C - WS_E_SERVER_REQUIRES_DIGEST_AUTH Handle = 0x803D001D - WS_E_SERVER_REQUIRES_NTLM_AUTH Handle = 0x803D001E - WS_E_SERVER_REQUIRES_NEGOTIATE_AUTH Handle = 0x803D001F - WS_E_INVALID_ENDPOINT_URL Handle = 0x803D0020 - WS_E_OTHER Handle = 0x803D0021 - WS_E_SECURITY_TOKEN_EXPIRED Handle = 0x803D0022 - WS_E_SECURITY_SYSTEM_FAILURE Handle = 0x803D0023 - ERROR_NDIS_INTERFACE_CLOSING syscall.Errno = 0x80340002 - ERROR_NDIS_BAD_VERSION syscall.Errno = 0x80340004 - ERROR_NDIS_BAD_CHARACTERISTICS syscall.Errno = 0x80340005 - ERROR_NDIS_ADAPTER_NOT_FOUND syscall.Errno = 0x80340006 - ERROR_NDIS_OPEN_FAILED syscall.Errno = 0x80340007 - ERROR_NDIS_DEVICE_FAILED syscall.Errno = 0x80340008 - ERROR_NDIS_MULTICAST_FULL syscall.Errno = 0x80340009 - ERROR_NDIS_MULTICAST_EXISTS syscall.Errno = 0x8034000A - ERROR_NDIS_MULTICAST_NOT_FOUND syscall.Errno = 0x8034000B - ERROR_NDIS_REQUEST_ABORTED syscall.Errno = 0x8034000C - ERROR_NDIS_RESET_IN_PROGRESS syscall.Errno = 0x8034000D - ERROR_NDIS_NOT_SUPPORTED syscall.Errno = 0x803400BB - ERROR_NDIS_INVALID_PACKET syscall.Errno = 0x8034000F - ERROR_NDIS_ADAPTER_NOT_READY syscall.Errno = 0x80340011 - ERROR_NDIS_INVALID_LENGTH syscall.Errno = 0x80340014 - ERROR_NDIS_INVALID_DATA syscall.Errno = 0x80340015 - ERROR_NDIS_BUFFER_TOO_SHORT syscall.Errno = 0x80340016 - ERROR_NDIS_INVALID_OID syscall.Errno = 0x80340017 - ERROR_NDIS_ADAPTER_REMOVED syscall.Errno = 0x80340018 - ERROR_NDIS_UNSUPPORTED_MEDIA syscall.Errno = 0x80340019 - ERROR_NDIS_GROUP_ADDRESS_IN_USE syscall.Errno = 0x8034001A - ERROR_NDIS_FILE_NOT_FOUND syscall.Errno = 0x8034001B - ERROR_NDIS_ERROR_READING_FILE syscall.Errno = 0x8034001C - ERROR_NDIS_ALREADY_MAPPED syscall.Errno = 0x8034001D - ERROR_NDIS_RESOURCE_CONFLICT syscall.Errno = 0x8034001E - ERROR_NDIS_MEDIA_DISCONNECTED syscall.Errno = 0x8034001F - ERROR_NDIS_INVALID_ADDRESS syscall.Errno = 0x80340022 - ERROR_NDIS_INVALID_DEVICE_REQUEST syscall.Errno = 0x80340010 - ERROR_NDIS_PAUSED syscall.Errno = 0x8034002A - ERROR_NDIS_INTERFACE_NOT_FOUND syscall.Errno = 0x8034002B - ERROR_NDIS_UNSUPPORTED_REVISION syscall.Errno = 0x8034002C - ERROR_NDIS_INVALID_PORT syscall.Errno = 0x8034002D - ERROR_NDIS_INVALID_PORT_STATE syscall.Errno = 0x8034002E - ERROR_NDIS_LOW_POWER_STATE syscall.Errno = 0x8034002F - ERROR_NDIS_REINIT_REQUIRED syscall.Errno = 0x80340030 - ERROR_NDIS_NO_QUEUES syscall.Errno = 0x80340031 - ERROR_NDIS_DOT11_AUTO_CONFIG_ENABLED syscall.Errno = 0x80342000 - ERROR_NDIS_DOT11_MEDIA_IN_USE syscall.Errno = 0x80342001 - ERROR_NDIS_DOT11_POWER_STATE_INVALID syscall.Errno = 0x80342002 - ERROR_NDIS_PM_WOL_PATTERN_LIST_FULL syscall.Errno = 0x80342003 - ERROR_NDIS_PM_PROTOCOL_OFFLOAD_LIST_FULL syscall.Errno = 0x80342004 - ERROR_NDIS_DOT11_AP_CHANNEL_CURRENTLY_NOT_AVAILABLE syscall.Errno = 0x80342005 - ERROR_NDIS_DOT11_AP_BAND_CURRENTLY_NOT_AVAILABLE syscall.Errno = 0x80342006 - ERROR_NDIS_DOT11_AP_CHANNEL_NOT_ALLOWED syscall.Errno = 0x80342007 - ERROR_NDIS_DOT11_AP_BAND_NOT_ALLOWED syscall.Errno = 0x80342008 - ERROR_NDIS_INDICATION_REQUIRED syscall.Errno = 0x00340001 - ERROR_NDIS_OFFLOAD_POLICY syscall.Errno = 0xC034100F - ERROR_NDIS_OFFLOAD_CONNECTION_REJECTED syscall.Errno = 0xC0341012 - ERROR_NDIS_OFFLOAD_PATH_REJECTED syscall.Errno = 0xC0341013 - ERROR_HV_INVALID_HYPERCALL_CODE syscall.Errno = 0xC0350002 - ERROR_HV_INVALID_HYPERCALL_INPUT syscall.Errno = 0xC0350003 - ERROR_HV_INVALID_ALIGNMENT syscall.Errno = 0xC0350004 - ERROR_HV_INVALID_PARAMETER syscall.Errno = 0xC0350005 - ERROR_HV_ACCESS_DENIED syscall.Errno = 0xC0350006 - ERROR_HV_INVALID_PARTITION_STATE syscall.Errno = 0xC0350007 - ERROR_HV_OPERATION_DENIED syscall.Errno = 0xC0350008 - ERROR_HV_UNKNOWN_PROPERTY syscall.Errno = 0xC0350009 - ERROR_HV_PROPERTY_VALUE_OUT_OF_RANGE syscall.Errno = 0xC035000A - ERROR_HV_INSUFFICIENT_MEMORY syscall.Errno = 0xC035000B - ERROR_HV_PARTITION_TOO_DEEP syscall.Errno = 0xC035000C - ERROR_HV_INVALID_PARTITION_ID syscall.Errno = 0xC035000D - ERROR_HV_INVALID_VP_INDEX syscall.Errno = 0xC035000E - ERROR_HV_INVALID_PORT_ID syscall.Errno = 0xC0350011 - ERROR_HV_INVALID_CONNECTION_ID syscall.Errno = 0xC0350012 - ERROR_HV_INSUFFICIENT_BUFFERS syscall.Errno = 0xC0350013 - ERROR_HV_NOT_ACKNOWLEDGED syscall.Errno = 0xC0350014 - ERROR_HV_INVALID_VP_STATE syscall.Errno = 0xC0350015 - ERROR_HV_ACKNOWLEDGED syscall.Errno = 0xC0350016 - ERROR_HV_INVALID_SAVE_RESTORE_STATE syscall.Errno = 0xC0350017 - ERROR_HV_INVALID_SYNIC_STATE syscall.Errno = 0xC0350018 - ERROR_HV_OBJECT_IN_USE syscall.Errno = 0xC0350019 - ERROR_HV_INVALID_PROXIMITY_DOMAIN_INFO syscall.Errno = 0xC035001A - ERROR_HV_NO_DATA syscall.Errno = 0xC035001B - ERROR_HV_INACTIVE syscall.Errno = 0xC035001C - ERROR_HV_NO_RESOURCES syscall.Errno = 0xC035001D - ERROR_HV_FEATURE_UNAVAILABLE syscall.Errno = 0xC035001E - ERROR_HV_INSUFFICIENT_BUFFER syscall.Errno = 0xC0350033 - ERROR_HV_INSUFFICIENT_DEVICE_DOMAINS syscall.Errno = 0xC0350038 - ERROR_HV_CPUID_FEATURE_VALIDATION syscall.Errno = 0xC035003C - ERROR_HV_CPUID_XSAVE_FEATURE_VALIDATION syscall.Errno = 0xC035003D - ERROR_HV_PROCESSOR_STARTUP_TIMEOUT syscall.Errno = 0xC035003E - ERROR_HV_SMX_ENABLED syscall.Errno = 0xC035003F - ERROR_HV_INVALID_LP_INDEX syscall.Errno = 0xC0350041 - ERROR_HV_INVALID_REGISTER_VALUE syscall.Errno = 0xC0350050 - ERROR_HV_INVALID_VTL_STATE syscall.Errno = 0xC0350051 - ERROR_HV_NX_NOT_DETECTED syscall.Errno = 0xC0350055 - ERROR_HV_INVALID_DEVICE_ID syscall.Errno = 0xC0350057 - ERROR_HV_INVALID_DEVICE_STATE syscall.Errno = 0xC0350058 - ERROR_HV_PENDING_PAGE_REQUESTS syscall.Errno = 0x00350059 - ERROR_HV_PAGE_REQUEST_INVALID syscall.Errno = 0xC0350060 - ERROR_HV_INVALID_CPU_GROUP_ID syscall.Errno = 0xC035006F - ERROR_HV_INVALID_CPU_GROUP_STATE syscall.Errno = 0xC0350070 - ERROR_HV_OPERATION_FAILED syscall.Errno = 0xC0350071 - ERROR_HV_NOT_ALLOWED_WITH_NESTED_VIRT_ACTIVE syscall.Errno = 0xC0350072 - ERROR_HV_INSUFFICIENT_ROOT_MEMORY syscall.Errno = 0xC0350073 - ERROR_HV_NOT_PRESENT syscall.Errno = 0xC0351000 - ERROR_VID_DUPLICATE_HANDLER syscall.Errno = 0xC0370001 - ERROR_VID_TOO_MANY_HANDLERS syscall.Errno = 0xC0370002 - ERROR_VID_QUEUE_FULL syscall.Errno = 0xC0370003 - ERROR_VID_HANDLER_NOT_PRESENT syscall.Errno = 0xC0370004 - ERROR_VID_INVALID_OBJECT_NAME syscall.Errno = 0xC0370005 - ERROR_VID_PARTITION_NAME_TOO_LONG syscall.Errno = 0xC0370006 - ERROR_VID_MESSAGE_QUEUE_NAME_TOO_LONG syscall.Errno = 0xC0370007 - ERROR_VID_PARTITION_ALREADY_EXISTS syscall.Errno = 0xC0370008 - ERROR_VID_PARTITION_DOES_NOT_EXIST syscall.Errno = 0xC0370009 - ERROR_VID_PARTITION_NAME_NOT_FOUND syscall.Errno = 0xC037000A - ERROR_VID_MESSAGE_QUEUE_ALREADY_EXISTS syscall.Errno = 0xC037000B - ERROR_VID_EXCEEDED_MBP_ENTRY_MAP_LIMIT syscall.Errno = 0xC037000C - ERROR_VID_MB_STILL_REFERENCED syscall.Errno = 0xC037000D - ERROR_VID_CHILD_GPA_PAGE_SET_CORRUPTED syscall.Errno = 0xC037000E - ERROR_VID_INVALID_NUMA_SETTINGS syscall.Errno = 0xC037000F - ERROR_VID_INVALID_NUMA_NODE_INDEX syscall.Errno = 0xC0370010 - ERROR_VID_NOTIFICATION_QUEUE_ALREADY_ASSOCIATED syscall.Errno = 0xC0370011 - ERROR_VID_INVALID_MEMORY_BLOCK_HANDLE syscall.Errno = 0xC0370012 - ERROR_VID_PAGE_RANGE_OVERFLOW syscall.Errno = 0xC0370013 - ERROR_VID_INVALID_MESSAGE_QUEUE_HANDLE syscall.Errno = 0xC0370014 - ERROR_VID_INVALID_GPA_RANGE_HANDLE syscall.Errno = 0xC0370015 - ERROR_VID_NO_MEMORY_BLOCK_NOTIFICATION_QUEUE syscall.Errno = 0xC0370016 - ERROR_VID_MEMORY_BLOCK_LOCK_COUNT_EXCEEDED syscall.Errno = 0xC0370017 - ERROR_VID_INVALID_PPM_HANDLE syscall.Errno = 0xC0370018 - ERROR_VID_MBPS_ARE_LOCKED syscall.Errno = 0xC0370019 - ERROR_VID_MESSAGE_QUEUE_CLOSED syscall.Errno = 0xC037001A - ERROR_VID_VIRTUAL_PROCESSOR_LIMIT_EXCEEDED syscall.Errno = 0xC037001B - ERROR_VID_STOP_PENDING syscall.Errno = 0xC037001C - ERROR_VID_INVALID_PROCESSOR_STATE syscall.Errno = 0xC037001D - ERROR_VID_EXCEEDED_KM_CONTEXT_COUNT_LIMIT syscall.Errno = 0xC037001E - ERROR_VID_KM_INTERFACE_ALREADY_INITIALIZED syscall.Errno = 0xC037001F - ERROR_VID_MB_PROPERTY_ALREADY_SET_RESET syscall.Errno = 0xC0370020 - ERROR_VID_MMIO_RANGE_DESTROYED syscall.Errno = 0xC0370021 - ERROR_VID_INVALID_CHILD_GPA_PAGE_SET syscall.Errno = 0xC0370022 - ERROR_VID_RESERVE_PAGE_SET_IS_BEING_USED syscall.Errno = 0xC0370023 - ERROR_VID_RESERVE_PAGE_SET_TOO_SMALL syscall.Errno = 0xC0370024 - ERROR_VID_MBP_ALREADY_LOCKED_USING_RESERVED_PAGE syscall.Errno = 0xC0370025 - ERROR_VID_MBP_COUNT_EXCEEDED_LIMIT syscall.Errno = 0xC0370026 - ERROR_VID_SAVED_STATE_CORRUPT syscall.Errno = 0xC0370027 - ERROR_VID_SAVED_STATE_UNRECOGNIZED_ITEM syscall.Errno = 0xC0370028 - ERROR_VID_SAVED_STATE_INCOMPATIBLE syscall.Errno = 0xC0370029 - ERROR_VID_VTL_ACCESS_DENIED syscall.Errno = 0xC037002A - ERROR_VMCOMPUTE_TERMINATED_DURING_START syscall.Errno = 0xC0370100 - ERROR_VMCOMPUTE_IMAGE_MISMATCH syscall.Errno = 0xC0370101 - ERROR_VMCOMPUTE_HYPERV_NOT_INSTALLED syscall.Errno = 0xC0370102 - ERROR_VMCOMPUTE_OPERATION_PENDING syscall.Errno = 0xC0370103 - ERROR_VMCOMPUTE_TOO_MANY_NOTIFICATIONS syscall.Errno = 0xC0370104 - ERROR_VMCOMPUTE_INVALID_STATE syscall.Errno = 0xC0370105 - ERROR_VMCOMPUTE_UNEXPECTED_EXIT syscall.Errno = 0xC0370106 - ERROR_VMCOMPUTE_TERMINATED syscall.Errno = 0xC0370107 - ERROR_VMCOMPUTE_CONNECT_FAILED syscall.Errno = 0xC0370108 - ERROR_VMCOMPUTE_TIMEOUT syscall.Errno = 0xC0370109 - ERROR_VMCOMPUTE_CONNECTION_CLOSED syscall.Errno = 0xC037010A - ERROR_VMCOMPUTE_UNKNOWN_MESSAGE syscall.Errno = 0xC037010B - ERROR_VMCOMPUTE_UNSUPPORTED_PROTOCOL_VERSION syscall.Errno = 0xC037010C - ERROR_VMCOMPUTE_INVALID_JSON syscall.Errno = 0xC037010D - ERROR_VMCOMPUTE_SYSTEM_NOT_FOUND syscall.Errno = 0xC037010E - ERROR_VMCOMPUTE_SYSTEM_ALREADY_EXISTS syscall.Errno = 0xC037010F - ERROR_VMCOMPUTE_SYSTEM_ALREADY_STOPPED syscall.Errno = 0xC0370110 - ERROR_VMCOMPUTE_PROTOCOL_ERROR syscall.Errno = 0xC0370111 - ERROR_VMCOMPUTE_INVALID_LAYER syscall.Errno = 0xC0370112 - ERROR_VMCOMPUTE_WINDOWS_INSIDER_REQUIRED syscall.Errno = 0xC0370113 - HCS_E_TERMINATED_DURING_START Handle = 0x80370100 - HCS_E_IMAGE_MISMATCH Handle = 0x80370101 - HCS_E_HYPERV_NOT_INSTALLED Handle = 0x80370102 - HCS_E_INVALID_STATE Handle = 0x80370105 - HCS_E_UNEXPECTED_EXIT Handle = 0x80370106 - HCS_E_TERMINATED Handle = 0x80370107 - HCS_E_CONNECT_FAILED Handle = 0x80370108 - HCS_E_CONNECTION_TIMEOUT Handle = 0x80370109 - HCS_E_CONNECTION_CLOSED Handle = 0x8037010A - HCS_E_UNKNOWN_MESSAGE Handle = 0x8037010B - HCS_E_UNSUPPORTED_PROTOCOL_VERSION Handle = 0x8037010C - HCS_E_INVALID_JSON Handle = 0x8037010D - HCS_E_SYSTEM_NOT_FOUND Handle = 0x8037010E - HCS_E_SYSTEM_ALREADY_EXISTS Handle = 0x8037010F - HCS_E_SYSTEM_ALREADY_STOPPED Handle = 0x80370110 - HCS_E_PROTOCOL_ERROR Handle = 0x80370111 - HCS_E_INVALID_LAYER Handle = 0x80370112 - HCS_E_WINDOWS_INSIDER_REQUIRED Handle = 0x80370113 - HCS_E_SERVICE_NOT_AVAILABLE Handle = 0x80370114 - HCS_E_OPERATION_NOT_STARTED Handle = 0x80370115 - HCS_E_OPERATION_ALREADY_STARTED Handle = 0x80370116 - HCS_E_OPERATION_PENDING Handle = 0x80370117 - HCS_E_OPERATION_TIMEOUT Handle = 0x80370118 - HCS_E_OPERATION_SYSTEM_CALLBACK_ALREADY_SET Handle = 0x80370119 - HCS_E_OPERATION_RESULT_ALLOCATION_FAILED Handle = 0x8037011A - HCS_E_ACCESS_DENIED Handle = 0x8037011B - HCS_E_GUEST_CRITICAL_ERROR Handle = 0x8037011C - ERROR_VNET_VIRTUAL_SWITCH_NAME_NOT_FOUND syscall.Errno = 0xC0370200 - ERROR_VID_REMOTE_NODE_PARENT_GPA_PAGES_USED syscall.Errno = 0x80370001 - WHV_E_UNKNOWN_CAPABILITY Handle = 0x80370300 - WHV_E_INSUFFICIENT_BUFFER Handle = 0x80370301 - WHV_E_UNKNOWN_PROPERTY Handle = 0x80370302 - WHV_E_UNSUPPORTED_HYPERVISOR_CONFIG Handle = 0x80370303 - WHV_E_INVALID_PARTITION_CONFIG Handle = 0x80370304 - WHV_E_GPA_RANGE_NOT_FOUND Handle = 0x80370305 - WHV_E_VP_ALREADY_EXISTS Handle = 0x80370306 - WHV_E_VP_DOES_NOT_EXIST Handle = 0x80370307 - WHV_E_INVALID_VP_STATE Handle = 0x80370308 - WHV_E_INVALID_VP_REGISTER_NAME Handle = 0x80370309 - ERROR_VSMB_SAVED_STATE_FILE_NOT_FOUND syscall.Errno = 0xC0370400 - ERROR_VSMB_SAVED_STATE_CORRUPT syscall.Errno = 0xC0370401 - ERROR_VOLMGR_INCOMPLETE_REGENERATION syscall.Errno = 0x80380001 - ERROR_VOLMGR_INCOMPLETE_DISK_MIGRATION syscall.Errno = 0x80380002 - ERROR_VOLMGR_DATABASE_FULL syscall.Errno = 0xC0380001 - ERROR_VOLMGR_DISK_CONFIGURATION_CORRUPTED syscall.Errno = 0xC0380002 - ERROR_VOLMGR_DISK_CONFIGURATION_NOT_IN_SYNC syscall.Errno = 0xC0380003 - ERROR_VOLMGR_PACK_CONFIG_UPDATE_FAILED syscall.Errno = 0xC0380004 - ERROR_VOLMGR_DISK_CONTAINS_NON_SIMPLE_VOLUME syscall.Errno = 0xC0380005 - ERROR_VOLMGR_DISK_DUPLICATE syscall.Errno = 0xC0380006 - ERROR_VOLMGR_DISK_DYNAMIC syscall.Errno = 0xC0380007 - ERROR_VOLMGR_DISK_ID_INVALID syscall.Errno = 0xC0380008 - ERROR_VOLMGR_DISK_INVALID syscall.Errno = 0xC0380009 - ERROR_VOLMGR_DISK_LAST_VOTER syscall.Errno = 0xC038000A - ERROR_VOLMGR_DISK_LAYOUT_INVALID syscall.Errno = 0xC038000B - ERROR_VOLMGR_DISK_LAYOUT_NON_BASIC_BETWEEN_BASIC_PARTITIONS syscall.Errno = 0xC038000C - ERROR_VOLMGR_DISK_LAYOUT_NOT_CYLINDER_ALIGNED syscall.Errno = 0xC038000D - ERROR_VOLMGR_DISK_LAYOUT_PARTITIONS_TOO_SMALL syscall.Errno = 0xC038000E - ERROR_VOLMGR_DISK_LAYOUT_PRIMARY_BETWEEN_LOGICAL_PARTITIONS syscall.Errno = 0xC038000F - ERROR_VOLMGR_DISK_LAYOUT_TOO_MANY_PARTITIONS syscall.Errno = 0xC0380010 - ERROR_VOLMGR_DISK_MISSING syscall.Errno = 0xC0380011 - ERROR_VOLMGR_DISK_NOT_EMPTY syscall.Errno = 0xC0380012 - ERROR_VOLMGR_DISK_NOT_ENOUGH_SPACE syscall.Errno = 0xC0380013 - ERROR_VOLMGR_DISK_REVECTORING_FAILED syscall.Errno = 0xC0380014 - ERROR_VOLMGR_DISK_SECTOR_SIZE_INVALID syscall.Errno = 0xC0380015 - ERROR_VOLMGR_DISK_SET_NOT_CONTAINED syscall.Errno = 0xC0380016 - ERROR_VOLMGR_DISK_USED_BY_MULTIPLE_MEMBERS syscall.Errno = 0xC0380017 - ERROR_VOLMGR_DISK_USED_BY_MULTIPLE_PLEXES syscall.Errno = 0xC0380018 - ERROR_VOLMGR_DYNAMIC_DISK_NOT_SUPPORTED syscall.Errno = 0xC0380019 - ERROR_VOLMGR_EXTENT_ALREADY_USED syscall.Errno = 0xC038001A - ERROR_VOLMGR_EXTENT_NOT_CONTIGUOUS syscall.Errno = 0xC038001B - ERROR_VOLMGR_EXTENT_NOT_IN_PUBLIC_REGION syscall.Errno = 0xC038001C - ERROR_VOLMGR_EXTENT_NOT_SECTOR_ALIGNED syscall.Errno = 0xC038001D - ERROR_VOLMGR_EXTENT_OVERLAPS_EBR_PARTITION syscall.Errno = 0xC038001E - ERROR_VOLMGR_EXTENT_VOLUME_LENGTHS_DO_NOT_MATCH syscall.Errno = 0xC038001F - ERROR_VOLMGR_FAULT_TOLERANT_NOT_SUPPORTED syscall.Errno = 0xC0380020 - ERROR_VOLMGR_INTERLEAVE_LENGTH_INVALID syscall.Errno = 0xC0380021 - ERROR_VOLMGR_MAXIMUM_REGISTERED_USERS syscall.Errno = 0xC0380022 - ERROR_VOLMGR_MEMBER_IN_SYNC syscall.Errno = 0xC0380023 - ERROR_VOLMGR_MEMBER_INDEX_DUPLICATE syscall.Errno = 0xC0380024 - ERROR_VOLMGR_MEMBER_INDEX_INVALID syscall.Errno = 0xC0380025 - ERROR_VOLMGR_MEMBER_MISSING syscall.Errno = 0xC0380026 - ERROR_VOLMGR_MEMBER_NOT_DETACHED syscall.Errno = 0xC0380027 - ERROR_VOLMGR_MEMBER_REGENERATING syscall.Errno = 0xC0380028 - ERROR_VOLMGR_ALL_DISKS_FAILED syscall.Errno = 0xC0380029 - ERROR_VOLMGR_NO_REGISTERED_USERS syscall.Errno = 0xC038002A - ERROR_VOLMGR_NO_SUCH_USER syscall.Errno = 0xC038002B - ERROR_VOLMGR_NOTIFICATION_RESET syscall.Errno = 0xC038002C - ERROR_VOLMGR_NUMBER_OF_MEMBERS_INVALID syscall.Errno = 0xC038002D - ERROR_VOLMGR_NUMBER_OF_PLEXES_INVALID syscall.Errno = 0xC038002E - ERROR_VOLMGR_PACK_DUPLICATE syscall.Errno = 0xC038002F - ERROR_VOLMGR_PACK_ID_INVALID syscall.Errno = 0xC0380030 - ERROR_VOLMGR_PACK_INVALID syscall.Errno = 0xC0380031 - ERROR_VOLMGR_PACK_NAME_INVALID syscall.Errno = 0xC0380032 - ERROR_VOLMGR_PACK_OFFLINE syscall.Errno = 0xC0380033 - ERROR_VOLMGR_PACK_HAS_QUORUM syscall.Errno = 0xC0380034 - ERROR_VOLMGR_PACK_WITHOUT_QUORUM syscall.Errno = 0xC0380035 - ERROR_VOLMGR_PARTITION_STYLE_INVALID syscall.Errno = 0xC0380036 - ERROR_VOLMGR_PARTITION_UPDATE_FAILED syscall.Errno = 0xC0380037 - ERROR_VOLMGR_PLEX_IN_SYNC syscall.Errno = 0xC0380038 - ERROR_VOLMGR_PLEX_INDEX_DUPLICATE syscall.Errno = 0xC0380039 - ERROR_VOLMGR_PLEX_INDEX_INVALID syscall.Errno = 0xC038003A - ERROR_VOLMGR_PLEX_LAST_ACTIVE syscall.Errno = 0xC038003B - ERROR_VOLMGR_PLEX_MISSING syscall.Errno = 0xC038003C - ERROR_VOLMGR_PLEX_REGENERATING syscall.Errno = 0xC038003D - ERROR_VOLMGR_PLEX_TYPE_INVALID syscall.Errno = 0xC038003E - ERROR_VOLMGR_PLEX_NOT_RAID5 syscall.Errno = 0xC038003F - ERROR_VOLMGR_PLEX_NOT_SIMPLE syscall.Errno = 0xC0380040 - ERROR_VOLMGR_STRUCTURE_SIZE_INVALID syscall.Errno = 0xC0380041 - ERROR_VOLMGR_TOO_MANY_NOTIFICATION_REQUESTS syscall.Errno = 0xC0380042 - ERROR_VOLMGR_TRANSACTION_IN_PROGRESS syscall.Errno = 0xC0380043 - ERROR_VOLMGR_UNEXPECTED_DISK_LAYOUT_CHANGE syscall.Errno = 0xC0380044 - ERROR_VOLMGR_VOLUME_CONTAINS_MISSING_DISK syscall.Errno = 0xC0380045 - ERROR_VOLMGR_VOLUME_ID_INVALID syscall.Errno = 0xC0380046 - ERROR_VOLMGR_VOLUME_LENGTH_INVALID syscall.Errno = 0xC0380047 - ERROR_VOLMGR_VOLUME_LENGTH_NOT_SECTOR_SIZE_MULTIPLE syscall.Errno = 0xC0380048 - ERROR_VOLMGR_VOLUME_NOT_MIRRORED syscall.Errno = 0xC0380049 - ERROR_VOLMGR_VOLUME_NOT_RETAINED syscall.Errno = 0xC038004A - ERROR_VOLMGR_VOLUME_OFFLINE syscall.Errno = 0xC038004B - ERROR_VOLMGR_VOLUME_RETAINED syscall.Errno = 0xC038004C - ERROR_VOLMGR_NUMBER_OF_EXTENTS_INVALID syscall.Errno = 0xC038004D - ERROR_VOLMGR_DIFFERENT_SECTOR_SIZE syscall.Errno = 0xC038004E - ERROR_VOLMGR_BAD_BOOT_DISK syscall.Errno = 0xC038004F - ERROR_VOLMGR_PACK_CONFIG_OFFLINE syscall.Errno = 0xC0380050 - ERROR_VOLMGR_PACK_CONFIG_ONLINE syscall.Errno = 0xC0380051 - ERROR_VOLMGR_NOT_PRIMARY_PACK syscall.Errno = 0xC0380052 - ERROR_VOLMGR_PACK_LOG_UPDATE_FAILED syscall.Errno = 0xC0380053 - ERROR_VOLMGR_NUMBER_OF_DISKS_IN_PLEX_INVALID syscall.Errno = 0xC0380054 - ERROR_VOLMGR_NUMBER_OF_DISKS_IN_MEMBER_INVALID syscall.Errno = 0xC0380055 - ERROR_VOLMGR_VOLUME_MIRRORED syscall.Errno = 0xC0380056 - ERROR_VOLMGR_PLEX_NOT_SIMPLE_SPANNED syscall.Errno = 0xC0380057 - ERROR_VOLMGR_NO_VALID_LOG_COPIES syscall.Errno = 0xC0380058 - ERROR_VOLMGR_PRIMARY_PACK_PRESENT syscall.Errno = 0xC0380059 - ERROR_VOLMGR_NUMBER_OF_DISKS_INVALID syscall.Errno = 0xC038005A - ERROR_VOLMGR_MIRROR_NOT_SUPPORTED syscall.Errno = 0xC038005B - ERROR_VOLMGR_RAID5_NOT_SUPPORTED syscall.Errno = 0xC038005C - ERROR_BCD_NOT_ALL_ENTRIES_IMPORTED syscall.Errno = 0x80390001 - ERROR_BCD_TOO_MANY_ELEMENTS syscall.Errno = 0xC0390002 - ERROR_BCD_NOT_ALL_ENTRIES_SYNCHRONIZED syscall.Errno = 0x80390003 - ERROR_VHD_DRIVE_FOOTER_MISSING syscall.Errno = 0xC03A0001 - ERROR_VHD_DRIVE_FOOTER_CHECKSUM_MISMATCH syscall.Errno = 0xC03A0002 - ERROR_VHD_DRIVE_FOOTER_CORRUPT syscall.Errno = 0xC03A0003 - ERROR_VHD_FORMAT_UNKNOWN syscall.Errno = 0xC03A0004 - ERROR_VHD_FORMAT_UNSUPPORTED_VERSION syscall.Errno = 0xC03A0005 - ERROR_VHD_SPARSE_HEADER_CHECKSUM_MISMATCH syscall.Errno = 0xC03A0006 - ERROR_VHD_SPARSE_HEADER_UNSUPPORTED_VERSION syscall.Errno = 0xC03A0007 - ERROR_VHD_SPARSE_HEADER_CORRUPT syscall.Errno = 0xC03A0008 - ERROR_VHD_BLOCK_ALLOCATION_FAILURE syscall.Errno = 0xC03A0009 - ERROR_VHD_BLOCK_ALLOCATION_TABLE_CORRUPT syscall.Errno = 0xC03A000A - ERROR_VHD_INVALID_BLOCK_SIZE syscall.Errno = 0xC03A000B - ERROR_VHD_BITMAP_MISMATCH syscall.Errno = 0xC03A000C - ERROR_VHD_PARENT_VHD_NOT_FOUND syscall.Errno = 0xC03A000D - ERROR_VHD_CHILD_PARENT_ID_MISMATCH syscall.Errno = 0xC03A000E - ERROR_VHD_CHILD_PARENT_TIMESTAMP_MISMATCH syscall.Errno = 0xC03A000F - ERROR_VHD_METADATA_READ_FAILURE syscall.Errno = 0xC03A0010 - ERROR_VHD_METADATA_WRITE_FAILURE syscall.Errno = 0xC03A0011 - ERROR_VHD_INVALID_SIZE syscall.Errno = 0xC03A0012 - ERROR_VHD_INVALID_FILE_SIZE syscall.Errno = 0xC03A0013 - ERROR_VIRTDISK_PROVIDER_NOT_FOUND syscall.Errno = 0xC03A0014 - ERROR_VIRTDISK_NOT_VIRTUAL_DISK syscall.Errno = 0xC03A0015 - ERROR_VHD_PARENT_VHD_ACCESS_DENIED syscall.Errno = 0xC03A0016 - ERROR_VHD_CHILD_PARENT_SIZE_MISMATCH syscall.Errno = 0xC03A0017 - ERROR_VHD_DIFFERENCING_CHAIN_CYCLE_DETECTED syscall.Errno = 0xC03A0018 - ERROR_VHD_DIFFERENCING_CHAIN_ERROR_IN_PARENT syscall.Errno = 0xC03A0019 - ERROR_VIRTUAL_DISK_LIMITATION syscall.Errno = 0xC03A001A - ERROR_VHD_INVALID_TYPE syscall.Errno = 0xC03A001B - ERROR_VHD_INVALID_STATE syscall.Errno = 0xC03A001C - ERROR_VIRTDISK_UNSUPPORTED_DISK_SECTOR_SIZE syscall.Errno = 0xC03A001D - ERROR_VIRTDISK_DISK_ALREADY_OWNED syscall.Errno = 0xC03A001E - ERROR_VIRTDISK_DISK_ONLINE_AND_WRITABLE syscall.Errno = 0xC03A001F - ERROR_CTLOG_TRACKING_NOT_INITIALIZED syscall.Errno = 0xC03A0020 - ERROR_CTLOG_LOGFILE_SIZE_EXCEEDED_MAXSIZE syscall.Errno = 0xC03A0021 - ERROR_CTLOG_VHD_CHANGED_OFFLINE syscall.Errno = 0xC03A0022 - ERROR_CTLOG_INVALID_TRACKING_STATE syscall.Errno = 0xC03A0023 - ERROR_CTLOG_INCONSISTENT_TRACKING_FILE syscall.Errno = 0xC03A0024 - ERROR_VHD_RESIZE_WOULD_TRUNCATE_DATA syscall.Errno = 0xC03A0025 - ERROR_VHD_COULD_NOT_COMPUTE_MINIMUM_VIRTUAL_SIZE syscall.Errno = 0xC03A0026 - ERROR_VHD_ALREADY_AT_OR_BELOW_MINIMUM_VIRTUAL_SIZE syscall.Errno = 0xC03A0027 - ERROR_VHD_METADATA_FULL syscall.Errno = 0xC03A0028 - ERROR_VHD_INVALID_CHANGE_TRACKING_ID syscall.Errno = 0xC03A0029 - ERROR_VHD_CHANGE_TRACKING_DISABLED syscall.Errno = 0xC03A002A - ERROR_VHD_MISSING_CHANGE_TRACKING_INFORMATION syscall.Errno = 0xC03A0030 - ERROR_QUERY_STORAGE_ERROR syscall.Errno = 0x803A0001 - HCN_E_NETWORK_NOT_FOUND Handle = 0x803B0001 - HCN_E_ENDPOINT_NOT_FOUND Handle = 0x803B0002 - HCN_E_LAYER_NOT_FOUND Handle = 0x803B0003 - HCN_E_SWITCH_NOT_FOUND Handle = 0x803B0004 - HCN_E_SUBNET_NOT_FOUND Handle = 0x803B0005 - HCN_E_ADAPTER_NOT_FOUND Handle = 0x803B0006 - HCN_E_PORT_NOT_FOUND Handle = 0x803B0007 - HCN_E_POLICY_NOT_FOUND Handle = 0x803B0008 - HCN_E_VFP_PORTSETTING_NOT_FOUND Handle = 0x803B0009 - HCN_E_INVALID_NETWORK Handle = 0x803B000A - HCN_E_INVALID_NETWORK_TYPE Handle = 0x803B000B - HCN_E_INVALID_ENDPOINT Handle = 0x803B000C - HCN_E_INVALID_POLICY Handle = 0x803B000D - HCN_E_INVALID_POLICY_TYPE Handle = 0x803B000E - HCN_E_INVALID_REMOTE_ENDPOINT_OPERATION Handle = 0x803B000F - HCN_E_NETWORK_ALREADY_EXISTS Handle = 0x803B0010 - HCN_E_LAYER_ALREADY_EXISTS Handle = 0x803B0011 - HCN_E_POLICY_ALREADY_EXISTS Handle = 0x803B0012 - HCN_E_PORT_ALREADY_EXISTS Handle = 0x803B0013 - HCN_E_ENDPOINT_ALREADY_ATTACHED Handle = 0x803B0014 - HCN_E_REQUEST_UNSUPPORTED Handle = 0x803B0015 - HCN_E_MAPPING_NOT_SUPPORTED Handle = 0x803B0016 - HCN_E_DEGRADED_OPERATION Handle = 0x803B0017 - HCN_E_SHARED_SWITCH_MODIFICATION Handle = 0x803B0018 - HCN_E_GUID_CONVERSION_FAILURE Handle = 0x803B0019 - HCN_E_REGKEY_FAILURE Handle = 0x803B001A - HCN_E_INVALID_JSON Handle = 0x803B001B - HCN_E_INVALID_JSON_REFERENCE Handle = 0x803B001C - HCN_E_ENDPOINT_SHARING_DISABLED Handle = 0x803B001D - HCN_E_INVALID_IP Handle = 0x803B001E - HCN_E_SWITCH_EXTENSION_NOT_FOUND Handle = 0x803B001F - HCN_E_MANAGER_STOPPED Handle = 0x803B0020 - GCN_E_MODULE_NOT_FOUND Handle = 0x803B0021 - GCN_E_NO_REQUEST_HANDLERS Handle = 0x803B0022 - GCN_E_REQUEST_UNSUPPORTED Handle = 0x803B0023 - GCN_E_RUNTIMEKEYS_FAILED Handle = 0x803B0024 - GCN_E_NETADAPTER_TIMEOUT Handle = 0x803B0025 - GCN_E_NETADAPTER_NOT_FOUND Handle = 0x803B0026 - GCN_E_NETCOMPARTMENT_NOT_FOUND Handle = 0x803B0027 - GCN_E_NETINTERFACE_NOT_FOUND Handle = 0x803B0028 - GCN_E_DEFAULTNAMESPACE_EXISTS Handle = 0x803B0029 - HCN_E_ICS_DISABLED Handle = 0x803B002A - HCN_E_ENDPOINT_NAMESPACE_ALREADY_EXISTS Handle = 0x803B002B - HCN_E_ENTITY_HAS_REFERENCES Handle = 0x803B002C - HCN_E_INVALID_INTERNAL_PORT Handle = 0x803B002D - HCN_E_NAMESPACE_ATTACH_FAILED Handle = 0x803B002E - HCN_E_ADDR_INVALID_OR_RESERVED Handle = 0x803B002F - SDIAG_E_CANCELLED syscall.Errno = 0x803C0100 - SDIAG_E_SCRIPT syscall.Errno = 0x803C0101 - SDIAG_E_POWERSHELL syscall.Errno = 0x803C0102 - SDIAG_E_MANAGEDHOST syscall.Errno = 0x803C0103 - SDIAG_E_NOVERIFIER syscall.Errno = 0x803C0104 - SDIAG_S_CANNOTRUN syscall.Errno = 0x003C0105 - SDIAG_E_DISABLED syscall.Errno = 0x803C0106 - SDIAG_E_TRUST syscall.Errno = 0x803C0107 - SDIAG_E_CANNOTRUN syscall.Errno = 0x803C0108 - SDIAG_E_VERSION syscall.Errno = 0x803C0109 - SDIAG_E_RESOURCE syscall.Errno = 0x803C010A - SDIAG_E_ROOTCAUSE syscall.Errno = 0x803C010B - WPN_E_CHANNEL_CLOSED Handle = 0x803E0100 - WPN_E_CHANNEL_REQUEST_NOT_COMPLETE Handle = 0x803E0101 - WPN_E_INVALID_APP Handle = 0x803E0102 - WPN_E_OUTSTANDING_CHANNEL_REQUEST Handle = 0x803E0103 - WPN_E_DUPLICATE_CHANNEL Handle = 0x803E0104 - WPN_E_PLATFORM_UNAVAILABLE Handle = 0x803E0105 - WPN_E_NOTIFICATION_POSTED Handle = 0x803E0106 - WPN_E_NOTIFICATION_HIDDEN Handle = 0x803E0107 - WPN_E_NOTIFICATION_NOT_POSTED Handle = 0x803E0108 - WPN_E_CLOUD_DISABLED Handle = 0x803E0109 - WPN_E_CLOUD_INCAPABLE Handle = 0x803E0110 - WPN_E_CLOUD_AUTH_UNAVAILABLE Handle = 0x803E011A - WPN_E_CLOUD_SERVICE_UNAVAILABLE Handle = 0x803E011B - WPN_E_FAILED_LOCK_SCREEN_UPDATE_INTIALIZATION Handle = 0x803E011C - WPN_E_NOTIFICATION_DISABLED Handle = 0x803E0111 - WPN_E_NOTIFICATION_INCAPABLE Handle = 0x803E0112 - WPN_E_INTERNET_INCAPABLE Handle = 0x803E0113 - WPN_E_NOTIFICATION_TYPE_DISABLED Handle = 0x803E0114 - WPN_E_NOTIFICATION_SIZE Handle = 0x803E0115 - WPN_E_TAG_SIZE Handle = 0x803E0116 - WPN_E_ACCESS_DENIED Handle = 0x803E0117 - WPN_E_DUPLICATE_REGISTRATION Handle = 0x803E0118 - WPN_E_PUSH_NOTIFICATION_INCAPABLE Handle = 0x803E0119 - WPN_E_DEV_ID_SIZE Handle = 0x803E0120 - WPN_E_TAG_ALPHANUMERIC Handle = 0x803E012A - WPN_E_INVALID_HTTP_STATUS_CODE Handle = 0x803E012B - WPN_E_OUT_OF_SESSION Handle = 0x803E0200 - WPN_E_POWER_SAVE Handle = 0x803E0201 - WPN_E_IMAGE_NOT_FOUND_IN_CACHE Handle = 0x803E0202 - WPN_E_ALL_URL_NOT_COMPLETED Handle = 0x803E0203 - WPN_E_INVALID_CLOUD_IMAGE Handle = 0x803E0204 - WPN_E_NOTIFICATION_ID_MATCHED Handle = 0x803E0205 - WPN_E_CALLBACK_ALREADY_REGISTERED Handle = 0x803E0206 - WPN_E_TOAST_NOTIFICATION_DROPPED Handle = 0x803E0207 - WPN_E_STORAGE_LOCKED Handle = 0x803E0208 - WPN_E_GROUP_SIZE Handle = 0x803E0209 - WPN_E_GROUP_ALPHANUMERIC Handle = 0x803E020A - WPN_E_CLOUD_DISABLED_FOR_APP Handle = 0x803E020B - E_MBN_CONTEXT_NOT_ACTIVATED Handle = 0x80548201 - E_MBN_BAD_SIM Handle = 0x80548202 - E_MBN_DATA_CLASS_NOT_AVAILABLE Handle = 0x80548203 - E_MBN_INVALID_ACCESS_STRING Handle = 0x80548204 - E_MBN_MAX_ACTIVATED_CONTEXTS Handle = 0x80548205 - E_MBN_PACKET_SVC_DETACHED Handle = 0x80548206 - E_MBN_PROVIDER_NOT_VISIBLE Handle = 0x80548207 - E_MBN_RADIO_POWER_OFF Handle = 0x80548208 - E_MBN_SERVICE_NOT_ACTIVATED Handle = 0x80548209 - E_MBN_SIM_NOT_INSERTED Handle = 0x8054820A - E_MBN_VOICE_CALL_IN_PROGRESS Handle = 0x8054820B - E_MBN_INVALID_CACHE Handle = 0x8054820C - E_MBN_NOT_REGISTERED Handle = 0x8054820D - E_MBN_PROVIDERS_NOT_FOUND Handle = 0x8054820E - E_MBN_PIN_NOT_SUPPORTED Handle = 0x8054820F - E_MBN_PIN_REQUIRED Handle = 0x80548210 - E_MBN_PIN_DISABLED Handle = 0x80548211 - E_MBN_FAILURE Handle = 0x80548212 - E_MBN_INVALID_PROFILE Handle = 0x80548218 - E_MBN_DEFAULT_PROFILE_EXIST Handle = 0x80548219 - E_MBN_SMS_ENCODING_NOT_SUPPORTED Handle = 0x80548220 - E_MBN_SMS_FILTER_NOT_SUPPORTED Handle = 0x80548221 - E_MBN_SMS_INVALID_MEMORY_INDEX Handle = 0x80548222 - E_MBN_SMS_LANG_NOT_SUPPORTED Handle = 0x80548223 - E_MBN_SMS_MEMORY_FAILURE Handle = 0x80548224 - E_MBN_SMS_NETWORK_TIMEOUT Handle = 0x80548225 - E_MBN_SMS_UNKNOWN_SMSC_ADDRESS Handle = 0x80548226 - E_MBN_SMS_FORMAT_NOT_SUPPORTED Handle = 0x80548227 - E_MBN_SMS_OPERATION_NOT_ALLOWED Handle = 0x80548228 - E_MBN_SMS_MEMORY_FULL Handle = 0x80548229 - PEER_E_IPV6_NOT_INSTALLED Handle = 0x80630001 - PEER_E_NOT_INITIALIZED Handle = 0x80630002 - PEER_E_CANNOT_START_SERVICE Handle = 0x80630003 - PEER_E_NOT_LICENSED Handle = 0x80630004 - PEER_E_INVALID_GRAPH Handle = 0x80630010 - PEER_E_DBNAME_CHANGED Handle = 0x80630011 - PEER_E_DUPLICATE_GRAPH Handle = 0x80630012 - PEER_E_GRAPH_NOT_READY Handle = 0x80630013 - PEER_E_GRAPH_SHUTTING_DOWN Handle = 0x80630014 - PEER_E_GRAPH_IN_USE Handle = 0x80630015 - PEER_E_INVALID_DATABASE Handle = 0x80630016 - PEER_E_TOO_MANY_ATTRIBUTES Handle = 0x80630017 - PEER_E_CONNECTION_NOT_FOUND Handle = 0x80630103 - PEER_E_CONNECT_SELF Handle = 0x80630106 - PEER_E_ALREADY_LISTENING Handle = 0x80630107 - PEER_E_NODE_NOT_FOUND Handle = 0x80630108 - PEER_E_CONNECTION_FAILED Handle = 0x80630109 - PEER_E_CONNECTION_NOT_AUTHENTICATED Handle = 0x8063010A - PEER_E_CONNECTION_REFUSED Handle = 0x8063010B - PEER_E_CLASSIFIER_TOO_LONG Handle = 0x80630201 - PEER_E_TOO_MANY_IDENTITIES Handle = 0x80630202 - PEER_E_NO_KEY_ACCESS Handle = 0x80630203 - PEER_E_GROUPS_EXIST Handle = 0x80630204 - PEER_E_RECORD_NOT_FOUND Handle = 0x80630301 - PEER_E_DATABASE_ACCESSDENIED Handle = 0x80630302 - PEER_E_DBINITIALIZATION_FAILED Handle = 0x80630303 - PEER_E_MAX_RECORD_SIZE_EXCEEDED Handle = 0x80630304 - PEER_E_DATABASE_ALREADY_PRESENT Handle = 0x80630305 - PEER_E_DATABASE_NOT_PRESENT Handle = 0x80630306 - PEER_E_IDENTITY_NOT_FOUND Handle = 0x80630401 - PEER_E_EVENT_HANDLE_NOT_FOUND Handle = 0x80630501 - PEER_E_INVALID_SEARCH Handle = 0x80630601 - PEER_E_INVALID_ATTRIBUTES Handle = 0x80630602 - PEER_E_INVITATION_NOT_TRUSTED Handle = 0x80630701 - PEER_E_CHAIN_TOO_LONG Handle = 0x80630703 - PEER_E_INVALID_TIME_PERIOD Handle = 0x80630705 - PEER_E_CIRCULAR_CHAIN_DETECTED Handle = 0x80630706 - PEER_E_CERT_STORE_CORRUPTED Handle = 0x80630801 - PEER_E_NO_CLOUD Handle = 0x80631001 - PEER_E_CLOUD_NAME_AMBIGUOUS Handle = 0x80631005 - PEER_E_INVALID_RECORD Handle = 0x80632010 - PEER_E_NOT_AUTHORIZED Handle = 0x80632020 - PEER_E_PASSWORD_DOES_NOT_MEET_POLICY Handle = 0x80632021 - PEER_E_DEFERRED_VALIDATION Handle = 0x80632030 - PEER_E_INVALID_GROUP_PROPERTIES Handle = 0x80632040 - PEER_E_INVALID_PEER_NAME Handle = 0x80632050 - PEER_E_INVALID_CLASSIFIER Handle = 0x80632060 - PEER_E_INVALID_FRIENDLY_NAME Handle = 0x80632070 - PEER_E_INVALID_ROLE_PROPERTY Handle = 0x80632071 - PEER_E_INVALID_CLASSIFIER_PROPERTY Handle = 0x80632072 - PEER_E_INVALID_RECORD_EXPIRATION Handle = 0x80632080 - PEER_E_INVALID_CREDENTIAL_INFO Handle = 0x80632081 - PEER_E_INVALID_CREDENTIAL Handle = 0x80632082 - PEER_E_INVALID_RECORD_SIZE Handle = 0x80632083 - PEER_E_UNSUPPORTED_VERSION Handle = 0x80632090 - PEER_E_GROUP_NOT_READY Handle = 0x80632091 - PEER_E_GROUP_IN_USE Handle = 0x80632092 - PEER_E_INVALID_GROUP Handle = 0x80632093 - PEER_E_NO_MEMBERS_FOUND Handle = 0x80632094 - PEER_E_NO_MEMBER_CONNECTIONS Handle = 0x80632095 - PEER_E_UNABLE_TO_LISTEN Handle = 0x80632096 - PEER_E_IDENTITY_DELETED Handle = 0x806320A0 - PEER_E_SERVICE_NOT_AVAILABLE Handle = 0x806320A1 - PEER_E_CONTACT_NOT_FOUND Handle = 0x80636001 - PEER_S_GRAPH_DATA_CREATED Handle = 0x00630001 - PEER_S_NO_EVENT_DATA Handle = 0x00630002 - PEER_S_ALREADY_CONNECTED Handle = 0x00632000 - PEER_S_SUBSCRIPTION_EXISTS Handle = 0x00636000 - PEER_S_NO_CONNECTIVITY Handle = 0x00630005 - PEER_S_ALREADY_A_MEMBER Handle = 0x00630006 - PEER_E_CANNOT_CONVERT_PEER_NAME Handle = 0x80634001 - PEER_E_INVALID_PEER_HOST_NAME Handle = 0x80634002 - PEER_E_NO_MORE Handle = 0x80634003 - PEER_E_PNRP_DUPLICATE_PEER_NAME Handle = 0x80634005 - PEER_E_INVITE_CANCELLED Handle = 0x80637000 - PEER_E_INVITE_RESPONSE_NOT_AVAILABLE Handle = 0x80637001 - PEER_E_NOT_SIGNED_IN Handle = 0x80637003 - PEER_E_PRIVACY_DECLINED Handle = 0x80637004 - PEER_E_TIMEOUT Handle = 0x80637005 - PEER_E_INVALID_ADDRESS Handle = 0x80637007 - PEER_E_FW_EXCEPTION_DISABLED Handle = 0x80637008 - PEER_E_FW_BLOCKED_BY_POLICY Handle = 0x80637009 - PEER_E_FW_BLOCKED_BY_SHIELDS_UP Handle = 0x8063700A - PEER_E_FW_DECLINED Handle = 0x8063700B - UI_E_CREATE_FAILED Handle = 0x802A0001 - UI_E_SHUTDOWN_CALLED Handle = 0x802A0002 - UI_E_ILLEGAL_REENTRANCY Handle = 0x802A0003 - UI_E_OBJECT_SEALED Handle = 0x802A0004 - UI_E_VALUE_NOT_SET Handle = 0x802A0005 - UI_E_VALUE_NOT_DETERMINED Handle = 0x802A0006 - UI_E_INVALID_OUTPUT Handle = 0x802A0007 - UI_E_BOOLEAN_EXPECTED Handle = 0x802A0008 - UI_E_DIFFERENT_OWNER Handle = 0x802A0009 - UI_E_AMBIGUOUS_MATCH Handle = 0x802A000A - UI_E_FP_OVERFLOW Handle = 0x802A000B - UI_E_WRONG_THREAD Handle = 0x802A000C - UI_E_STORYBOARD_ACTIVE Handle = 0x802A0101 - UI_E_STORYBOARD_NOT_PLAYING Handle = 0x802A0102 - UI_E_START_KEYFRAME_AFTER_END Handle = 0x802A0103 - UI_E_END_KEYFRAME_NOT_DETERMINED Handle = 0x802A0104 - UI_E_LOOPS_OVERLAP Handle = 0x802A0105 - UI_E_TRANSITION_ALREADY_USED Handle = 0x802A0106 - UI_E_TRANSITION_NOT_IN_STORYBOARD Handle = 0x802A0107 - UI_E_TRANSITION_ECLIPSED Handle = 0x802A0108 - UI_E_TIME_BEFORE_LAST_UPDATE Handle = 0x802A0109 - UI_E_TIMER_CLIENT_ALREADY_CONNECTED Handle = 0x802A010A - UI_E_INVALID_DIMENSION Handle = 0x802A010B - UI_E_PRIMITIVE_OUT_OF_BOUNDS Handle = 0x802A010C - UI_E_WINDOW_CLOSED Handle = 0x802A0201 - E_BLUETOOTH_ATT_INVALID_HANDLE Handle = 0x80650001 - E_BLUETOOTH_ATT_READ_NOT_PERMITTED Handle = 0x80650002 - E_BLUETOOTH_ATT_WRITE_NOT_PERMITTED Handle = 0x80650003 - E_BLUETOOTH_ATT_INVALID_PDU Handle = 0x80650004 - E_BLUETOOTH_ATT_INSUFFICIENT_AUTHENTICATION Handle = 0x80650005 - E_BLUETOOTH_ATT_REQUEST_NOT_SUPPORTED Handle = 0x80650006 - E_BLUETOOTH_ATT_INVALID_OFFSET Handle = 0x80650007 - E_BLUETOOTH_ATT_INSUFFICIENT_AUTHORIZATION Handle = 0x80650008 - E_BLUETOOTH_ATT_PREPARE_QUEUE_FULL Handle = 0x80650009 - E_BLUETOOTH_ATT_ATTRIBUTE_NOT_FOUND Handle = 0x8065000A - E_BLUETOOTH_ATT_ATTRIBUTE_NOT_LONG Handle = 0x8065000B - E_BLUETOOTH_ATT_INSUFFICIENT_ENCRYPTION_KEY_SIZE Handle = 0x8065000C - E_BLUETOOTH_ATT_INVALID_ATTRIBUTE_VALUE_LENGTH Handle = 0x8065000D - E_BLUETOOTH_ATT_UNLIKELY Handle = 0x8065000E - E_BLUETOOTH_ATT_INSUFFICIENT_ENCRYPTION Handle = 0x8065000F - E_BLUETOOTH_ATT_UNSUPPORTED_GROUP_TYPE Handle = 0x80650010 - E_BLUETOOTH_ATT_INSUFFICIENT_RESOURCES Handle = 0x80650011 - E_BLUETOOTH_ATT_UNKNOWN_ERROR Handle = 0x80651000 - E_AUDIO_ENGINE_NODE_NOT_FOUND Handle = 0x80660001 - E_HDAUDIO_EMPTY_CONNECTION_LIST Handle = 0x80660002 - E_HDAUDIO_CONNECTION_LIST_NOT_SUPPORTED Handle = 0x80660003 - E_HDAUDIO_NO_LOGICAL_DEVICES_CREATED Handle = 0x80660004 - E_HDAUDIO_NULL_LINKED_LIST_ENTRY Handle = 0x80660005 - STATEREPOSITORY_E_CONCURRENCY_LOCKING_FAILURE Handle = 0x80670001 - STATEREPOSITORY_E_STATEMENT_INPROGRESS Handle = 0x80670002 - STATEREPOSITORY_E_CONFIGURATION_INVALID Handle = 0x80670003 - STATEREPOSITORY_E_UNKNOWN_SCHEMA_VERSION Handle = 0x80670004 - STATEREPOSITORY_ERROR_DICTIONARY_CORRUPTED Handle = 0x80670005 - STATEREPOSITORY_E_BLOCKED Handle = 0x80670006 - STATEREPOSITORY_E_BUSY_RETRY Handle = 0x80670007 - STATEREPOSITORY_E_BUSY_RECOVERY_RETRY Handle = 0x80670008 - STATEREPOSITORY_E_LOCKED_RETRY Handle = 0x80670009 - STATEREPOSITORY_E_LOCKED_SHAREDCACHE_RETRY Handle = 0x8067000A - STATEREPOSITORY_E_TRANSACTION_REQUIRED Handle = 0x8067000B - STATEREPOSITORY_E_BUSY_TIMEOUT_EXCEEDED Handle = 0x8067000C - STATEREPOSITORY_E_BUSY_RECOVERY_TIMEOUT_EXCEEDED Handle = 0x8067000D - STATEREPOSITORY_E_LOCKED_TIMEOUT_EXCEEDED Handle = 0x8067000E - STATEREPOSITORY_E_LOCKED_SHAREDCACHE_TIMEOUT_EXCEEDED Handle = 0x8067000F - STATEREPOSITORY_E_SERVICE_STOP_IN_PROGRESS Handle = 0x80670010 - STATEREPOSTORY_E_NESTED_TRANSACTION_NOT_SUPPORTED Handle = 0x80670011 - STATEREPOSITORY_ERROR_CACHE_CORRUPTED Handle = 0x80670012 - STATEREPOSITORY_TRANSACTION_CALLER_ID_CHANGED Handle = 0x00670013 - STATEREPOSITORY_TRANSACTION_IN_PROGRESS Handle = 0x00670014 - ERROR_SPACES_POOL_WAS_DELETED Handle = 0x00E70001 - ERROR_SPACES_FAULT_DOMAIN_TYPE_INVALID Handle = 0x80E70001 - ERROR_SPACES_INTERNAL_ERROR Handle = 0x80E70002 - ERROR_SPACES_RESILIENCY_TYPE_INVALID Handle = 0x80E70003 - ERROR_SPACES_DRIVE_SECTOR_SIZE_INVALID Handle = 0x80E70004 - ERROR_SPACES_DRIVE_REDUNDANCY_INVALID Handle = 0x80E70006 - ERROR_SPACES_NUMBER_OF_DATA_COPIES_INVALID Handle = 0x80E70007 - ERROR_SPACES_PARITY_LAYOUT_INVALID Handle = 0x80E70008 - ERROR_SPACES_INTERLEAVE_LENGTH_INVALID Handle = 0x80E70009 - ERROR_SPACES_NUMBER_OF_COLUMNS_INVALID Handle = 0x80E7000A - ERROR_SPACES_NOT_ENOUGH_DRIVES Handle = 0x80E7000B - ERROR_SPACES_EXTENDED_ERROR Handle = 0x80E7000C - ERROR_SPACES_PROVISIONING_TYPE_INVALID Handle = 0x80E7000D - ERROR_SPACES_ALLOCATION_SIZE_INVALID Handle = 0x80E7000E - ERROR_SPACES_ENCLOSURE_AWARE_INVALID Handle = 0x80E7000F - ERROR_SPACES_WRITE_CACHE_SIZE_INVALID Handle = 0x80E70010 - ERROR_SPACES_NUMBER_OF_GROUPS_INVALID Handle = 0x80E70011 - ERROR_SPACES_DRIVE_OPERATIONAL_STATE_INVALID Handle = 0x80E70012 - ERROR_SPACES_ENTRY_INCOMPLETE Handle = 0x80E70013 - ERROR_SPACES_ENTRY_INVALID Handle = 0x80E70014 - ERROR_VOLSNAP_BOOTFILE_NOT_VALID Handle = 0x80820001 - ERROR_VOLSNAP_ACTIVATION_TIMEOUT Handle = 0x80820002 - ERROR_TIERING_NOT_SUPPORTED_ON_VOLUME Handle = 0x80830001 - ERROR_TIERING_VOLUME_DISMOUNT_IN_PROGRESS Handle = 0x80830002 - ERROR_TIERING_STORAGE_TIER_NOT_FOUND Handle = 0x80830003 - ERROR_TIERING_INVALID_FILE_ID Handle = 0x80830004 - ERROR_TIERING_WRONG_CLUSTER_NODE Handle = 0x80830005 - ERROR_TIERING_ALREADY_PROCESSING Handle = 0x80830006 - ERROR_TIERING_CANNOT_PIN_OBJECT Handle = 0x80830007 - ERROR_TIERING_FILE_IS_NOT_PINNED Handle = 0x80830008 - ERROR_NOT_A_TIERED_VOLUME Handle = 0x80830009 - ERROR_ATTRIBUTE_NOT_PRESENT Handle = 0x8083000A - ERROR_SECCORE_INVALID_COMMAND Handle = 0xC0E80000 - ERROR_NO_APPLICABLE_APP_LICENSES_FOUND Handle = 0xC0EA0001 - ERROR_CLIP_LICENSE_NOT_FOUND Handle = 0xC0EA0002 - ERROR_CLIP_DEVICE_LICENSE_MISSING Handle = 0xC0EA0003 - ERROR_CLIP_LICENSE_INVALID_SIGNATURE Handle = 0xC0EA0004 - ERROR_CLIP_KEYHOLDER_LICENSE_MISSING_OR_INVALID Handle = 0xC0EA0005 - ERROR_CLIP_LICENSE_EXPIRED Handle = 0xC0EA0006 - ERROR_CLIP_LICENSE_SIGNED_BY_UNKNOWN_SOURCE Handle = 0xC0EA0007 - ERROR_CLIP_LICENSE_NOT_SIGNED Handle = 0xC0EA0008 - ERROR_CLIP_LICENSE_HARDWARE_ID_OUT_OF_TOLERANCE Handle = 0xC0EA0009 - ERROR_CLIP_LICENSE_DEVICE_ID_MISMATCH Handle = 0xC0EA000A - DXGI_STATUS_OCCLUDED Handle = 0x087A0001 - DXGI_STATUS_CLIPPED Handle = 0x087A0002 - DXGI_STATUS_NO_REDIRECTION Handle = 0x087A0004 - DXGI_STATUS_NO_DESKTOP_ACCESS Handle = 0x087A0005 - DXGI_STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE Handle = 0x087A0006 - DXGI_STATUS_MODE_CHANGED Handle = 0x087A0007 - DXGI_STATUS_MODE_CHANGE_IN_PROGRESS Handle = 0x087A0008 - DXGI_ERROR_INVALID_CALL Handle = 0x887A0001 - DXGI_ERROR_NOT_FOUND Handle = 0x887A0002 - DXGI_ERROR_MORE_DATA Handle = 0x887A0003 - DXGI_ERROR_UNSUPPORTED Handle = 0x887A0004 - DXGI_ERROR_DEVICE_REMOVED Handle = 0x887A0005 - DXGI_ERROR_DEVICE_HUNG Handle = 0x887A0006 - DXGI_ERROR_DEVICE_RESET Handle = 0x887A0007 - DXGI_ERROR_WAS_STILL_DRAWING Handle = 0x887A000A - DXGI_ERROR_FRAME_STATISTICS_DISJOINT Handle = 0x887A000B - DXGI_ERROR_GRAPHICS_VIDPN_SOURCE_IN_USE Handle = 0x887A000C - DXGI_ERROR_DRIVER_INTERNAL_ERROR Handle = 0x887A0020 - DXGI_ERROR_NONEXCLUSIVE Handle = 0x887A0021 - DXGI_ERROR_NOT_CURRENTLY_AVAILABLE Handle = 0x887A0022 - DXGI_ERROR_REMOTE_CLIENT_DISCONNECTED Handle = 0x887A0023 - DXGI_ERROR_REMOTE_OUTOFMEMORY Handle = 0x887A0024 - DXGI_ERROR_ACCESS_LOST Handle = 0x887A0026 - DXGI_ERROR_WAIT_TIMEOUT Handle = 0x887A0027 - DXGI_ERROR_SESSION_DISCONNECTED Handle = 0x887A0028 - DXGI_ERROR_RESTRICT_TO_OUTPUT_STALE Handle = 0x887A0029 - DXGI_ERROR_CANNOT_PROTECT_CONTENT Handle = 0x887A002A - DXGI_ERROR_ACCESS_DENIED Handle = 0x887A002B - DXGI_ERROR_NAME_ALREADY_EXISTS Handle = 0x887A002C - DXGI_ERROR_SDK_COMPONENT_MISSING Handle = 0x887A002D - DXGI_ERROR_NOT_CURRENT Handle = 0x887A002E - DXGI_ERROR_HW_PROTECTION_OUTOFMEMORY Handle = 0x887A0030 - DXGI_ERROR_DYNAMIC_CODE_POLICY_VIOLATION Handle = 0x887A0031 - DXGI_ERROR_NON_COMPOSITED_UI Handle = 0x887A0032 - DXGI_STATUS_UNOCCLUDED Handle = 0x087A0009 - DXGI_STATUS_DDA_WAS_STILL_DRAWING Handle = 0x087A000A - DXGI_ERROR_MODE_CHANGE_IN_PROGRESS Handle = 0x887A0025 - DXGI_STATUS_PRESENT_REQUIRED Handle = 0x087A002F - DXGI_ERROR_CACHE_CORRUPT Handle = 0x887A0033 - DXGI_ERROR_CACHE_FULL Handle = 0x887A0034 - DXGI_ERROR_CACHE_HASH_COLLISION Handle = 0x887A0035 - DXGI_ERROR_ALREADY_EXISTS Handle = 0x887A0036 - DXGI_DDI_ERR_WASSTILLDRAWING Handle = 0x887B0001 - DXGI_DDI_ERR_UNSUPPORTED Handle = 0x887B0002 - DXGI_DDI_ERR_NONEXCLUSIVE Handle = 0x887B0003 - D3D10_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS Handle = 0x88790001 - D3D10_ERROR_FILE_NOT_FOUND Handle = 0x88790002 - D3D11_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS Handle = 0x887C0001 - D3D11_ERROR_FILE_NOT_FOUND Handle = 0x887C0002 - D3D11_ERROR_TOO_MANY_UNIQUE_VIEW_OBJECTS Handle = 0x887C0003 - D3D11_ERROR_DEFERRED_CONTEXT_MAP_WITHOUT_INITIAL_DISCARD Handle = 0x887C0004 - D3D12_ERROR_ADAPTER_NOT_FOUND Handle = 0x887E0001 - D3D12_ERROR_DRIVER_VERSION_MISMATCH Handle = 0x887E0002 - D2DERR_WRONG_STATE Handle = 0x88990001 - D2DERR_NOT_INITIALIZED Handle = 0x88990002 - D2DERR_UNSUPPORTED_OPERATION Handle = 0x88990003 - D2DERR_SCANNER_FAILED Handle = 0x88990004 - D2DERR_SCREEN_ACCESS_DENIED Handle = 0x88990005 - D2DERR_DISPLAY_STATE_INVALID Handle = 0x88990006 - D2DERR_ZERO_VECTOR Handle = 0x88990007 - D2DERR_INTERNAL_ERROR Handle = 0x88990008 - D2DERR_DISPLAY_FORMAT_NOT_SUPPORTED Handle = 0x88990009 - D2DERR_INVALID_CALL Handle = 0x8899000A - D2DERR_NO_HARDWARE_DEVICE Handle = 0x8899000B - D2DERR_RECREATE_TARGET Handle = 0x8899000C - D2DERR_TOO_MANY_SHADER_ELEMENTS Handle = 0x8899000D - D2DERR_SHADER_COMPILE_FAILED Handle = 0x8899000E - D2DERR_MAX_TEXTURE_SIZE_EXCEEDED Handle = 0x8899000F - D2DERR_UNSUPPORTED_VERSION Handle = 0x88990010 - D2DERR_BAD_NUMBER Handle = 0x88990011 - D2DERR_WRONG_FACTORY Handle = 0x88990012 - D2DERR_LAYER_ALREADY_IN_USE Handle = 0x88990013 - D2DERR_POP_CALL_DID_NOT_MATCH_PUSH Handle = 0x88990014 - D2DERR_WRONG_RESOURCE_DOMAIN Handle = 0x88990015 - D2DERR_PUSH_POP_UNBALANCED Handle = 0x88990016 - D2DERR_RENDER_TARGET_HAS_LAYER_OR_CLIPRECT Handle = 0x88990017 - D2DERR_INCOMPATIBLE_BRUSH_TYPES Handle = 0x88990018 - D2DERR_WIN32_ERROR Handle = 0x88990019 - D2DERR_TARGET_NOT_GDI_COMPATIBLE Handle = 0x8899001A - D2DERR_TEXT_EFFECT_IS_WRONG_TYPE Handle = 0x8899001B - D2DERR_TEXT_RENDERER_NOT_RELEASED Handle = 0x8899001C - D2DERR_EXCEEDS_MAX_BITMAP_SIZE Handle = 0x8899001D - D2DERR_INVALID_GRAPH_CONFIGURATION Handle = 0x8899001E - D2DERR_INVALID_INTERNAL_GRAPH_CONFIGURATION Handle = 0x8899001F - D2DERR_CYCLIC_GRAPH Handle = 0x88990020 - D2DERR_BITMAP_CANNOT_DRAW Handle = 0x88990021 - D2DERR_OUTSTANDING_BITMAP_REFERENCES Handle = 0x88990022 - D2DERR_ORIGINAL_TARGET_NOT_BOUND Handle = 0x88990023 - D2DERR_INVALID_TARGET Handle = 0x88990024 - D2DERR_BITMAP_BOUND_AS_TARGET Handle = 0x88990025 - D2DERR_INSUFFICIENT_DEVICE_CAPABILITIES Handle = 0x88990026 - D2DERR_INTERMEDIATE_TOO_LARGE Handle = 0x88990027 - D2DERR_EFFECT_IS_NOT_REGISTERED Handle = 0x88990028 - D2DERR_INVALID_PROPERTY Handle = 0x88990029 - D2DERR_NO_SUBPROPERTIES Handle = 0x8899002A - D2DERR_PRINT_JOB_CLOSED Handle = 0x8899002B - D2DERR_PRINT_FORMAT_NOT_SUPPORTED Handle = 0x8899002C - D2DERR_TOO_MANY_TRANSFORM_INPUTS Handle = 0x8899002D - D2DERR_INVALID_GLYPH_IMAGE Handle = 0x8899002E - DWRITE_E_FILEFORMAT Handle = 0x88985000 - DWRITE_E_UNEXPECTED Handle = 0x88985001 - DWRITE_E_NOFONT Handle = 0x88985002 - DWRITE_E_FILENOTFOUND Handle = 0x88985003 - DWRITE_E_FILEACCESS Handle = 0x88985004 - DWRITE_E_FONTCOLLECTIONOBSOLETE Handle = 0x88985005 - DWRITE_E_ALREADYREGISTERED Handle = 0x88985006 - DWRITE_E_CACHEFORMAT Handle = 0x88985007 - DWRITE_E_CACHEVERSION Handle = 0x88985008 - DWRITE_E_UNSUPPORTEDOPERATION Handle = 0x88985009 - DWRITE_E_TEXTRENDERERINCOMPATIBLE Handle = 0x8898500A - DWRITE_E_FLOWDIRECTIONCONFLICTS Handle = 0x8898500B - DWRITE_E_NOCOLOR Handle = 0x8898500C - DWRITE_E_REMOTEFONT Handle = 0x8898500D - DWRITE_E_DOWNLOADCANCELLED Handle = 0x8898500E - DWRITE_E_DOWNLOADFAILED Handle = 0x8898500F - DWRITE_E_TOOMANYDOWNLOADS Handle = 0x88985010 - WINCODEC_ERR_WRONGSTATE Handle = 0x88982F04 - WINCODEC_ERR_VALUEOUTOFRANGE Handle = 0x88982F05 - WINCODEC_ERR_UNKNOWNIMAGEFORMAT Handle = 0x88982F07 - WINCODEC_ERR_UNSUPPORTEDVERSION Handle = 0x88982F0B - WINCODEC_ERR_NOTINITIALIZED Handle = 0x88982F0C - WINCODEC_ERR_ALREADYLOCKED Handle = 0x88982F0D - WINCODEC_ERR_PROPERTYNOTFOUND Handle = 0x88982F40 - WINCODEC_ERR_PROPERTYNOTSUPPORTED Handle = 0x88982F41 - WINCODEC_ERR_PROPERTYSIZE Handle = 0x88982F42 - WINCODEC_ERR_CODECPRESENT Handle = 0x88982F43 - WINCODEC_ERR_CODECNOTHUMBNAIL Handle = 0x88982F44 - WINCODEC_ERR_PALETTEUNAVAILABLE Handle = 0x88982F45 - WINCODEC_ERR_CODECTOOMANYSCANLINES Handle = 0x88982F46 - WINCODEC_ERR_INTERNALERROR Handle = 0x88982F48 - WINCODEC_ERR_SOURCERECTDOESNOTMATCHDIMENSIONS Handle = 0x88982F49 - WINCODEC_ERR_COMPONENTNOTFOUND Handle = 0x88982F50 - WINCODEC_ERR_IMAGESIZEOUTOFRANGE Handle = 0x88982F51 - WINCODEC_ERR_TOOMUCHMETADATA Handle = 0x88982F52 - WINCODEC_ERR_BADIMAGE Handle = 0x88982F60 - WINCODEC_ERR_BADHEADER Handle = 0x88982F61 - WINCODEC_ERR_FRAMEMISSING Handle = 0x88982F62 - WINCODEC_ERR_BADMETADATAHEADER Handle = 0x88982F63 - WINCODEC_ERR_BADSTREAMDATA Handle = 0x88982F70 - WINCODEC_ERR_STREAMWRITE Handle = 0x88982F71 - WINCODEC_ERR_STREAMREAD Handle = 0x88982F72 - WINCODEC_ERR_STREAMNOTAVAILABLE Handle = 0x88982F73 - WINCODEC_ERR_UNSUPPORTEDPIXELFORMAT Handle = 0x88982F80 - WINCODEC_ERR_UNSUPPORTEDOPERATION Handle = 0x88982F81 - WINCODEC_ERR_INVALIDREGISTRATION Handle = 0x88982F8A - WINCODEC_ERR_COMPONENTINITIALIZEFAILURE Handle = 0x88982F8B - WINCODEC_ERR_INSUFFICIENTBUFFER Handle = 0x88982F8C - WINCODEC_ERR_DUPLICATEMETADATAPRESENT Handle = 0x88982F8D - WINCODEC_ERR_PROPERTYUNEXPECTEDTYPE Handle = 0x88982F8E - WINCODEC_ERR_UNEXPECTEDSIZE Handle = 0x88982F8F - WINCODEC_ERR_INVALIDQUERYREQUEST Handle = 0x88982F90 - WINCODEC_ERR_UNEXPECTEDMETADATATYPE Handle = 0x88982F91 - WINCODEC_ERR_REQUESTONLYVALIDATMETADATAROOT Handle = 0x88982F92 - WINCODEC_ERR_INVALIDQUERYCHARACTER Handle = 0x88982F93 - WINCODEC_ERR_WIN32ERROR Handle = 0x88982F94 - WINCODEC_ERR_INVALIDPROGRESSIVELEVEL Handle = 0x88982F95 - WINCODEC_ERR_INVALIDJPEGSCANINDEX Handle = 0x88982F96 - MILERR_OBJECTBUSY Handle = 0x88980001 - MILERR_INSUFFICIENTBUFFER Handle = 0x88980002 - MILERR_WIN32ERROR Handle = 0x88980003 - MILERR_SCANNER_FAILED Handle = 0x88980004 - MILERR_SCREENACCESSDENIED Handle = 0x88980005 - MILERR_DISPLAYSTATEINVALID Handle = 0x88980006 - MILERR_NONINVERTIBLEMATRIX Handle = 0x88980007 - MILERR_ZEROVECTOR Handle = 0x88980008 - MILERR_TERMINATED Handle = 0x88980009 - MILERR_BADNUMBER Handle = 0x8898000A - MILERR_INTERNALERROR Handle = 0x88980080 - MILERR_DISPLAYFORMATNOTSUPPORTED Handle = 0x88980084 - MILERR_INVALIDCALL Handle = 0x88980085 - MILERR_ALREADYLOCKED Handle = 0x88980086 - MILERR_NOTLOCKED Handle = 0x88980087 - MILERR_DEVICECANNOTRENDERTEXT Handle = 0x88980088 - MILERR_GLYPHBITMAPMISSED Handle = 0x88980089 - MILERR_MALFORMEDGLYPHCACHE Handle = 0x8898008A - MILERR_GENERIC_IGNORE Handle = 0x8898008B - MILERR_MALFORMED_GUIDELINE_DATA Handle = 0x8898008C - MILERR_NO_HARDWARE_DEVICE Handle = 0x8898008D - MILERR_NEED_RECREATE_AND_PRESENT Handle = 0x8898008E - MILERR_ALREADY_INITIALIZED Handle = 0x8898008F - MILERR_MISMATCHED_SIZE Handle = 0x88980090 - MILERR_NO_REDIRECTION_SURFACE_AVAILABLE Handle = 0x88980091 - MILERR_REMOTING_NOT_SUPPORTED Handle = 0x88980092 - MILERR_QUEUED_PRESENT_NOT_SUPPORTED Handle = 0x88980093 - MILERR_NOT_QUEUING_PRESENTS Handle = 0x88980094 - MILERR_NO_REDIRECTION_SURFACE_RETRY_LATER Handle = 0x88980095 - MILERR_TOOMANYSHADERELEMNTS Handle = 0x88980096 - MILERR_MROW_READLOCK_FAILED Handle = 0x88980097 - MILERR_MROW_UPDATE_FAILED Handle = 0x88980098 - MILERR_SHADER_COMPILE_FAILED Handle = 0x88980099 - MILERR_MAX_TEXTURE_SIZE_EXCEEDED Handle = 0x8898009A - MILERR_QPC_TIME_WENT_BACKWARD Handle = 0x8898009B - MILERR_DXGI_ENUMERATION_OUT_OF_SYNC Handle = 0x8898009D - MILERR_ADAPTER_NOT_FOUND Handle = 0x8898009E - MILERR_COLORSPACE_NOT_SUPPORTED Handle = 0x8898009F - MILERR_PREFILTER_NOT_SUPPORTED Handle = 0x889800A0 - MILERR_DISPLAYID_ACCESS_DENIED Handle = 0x889800A1 - UCEERR_INVALIDPACKETHEADER Handle = 0x88980400 - UCEERR_UNKNOWNPACKET Handle = 0x88980401 - UCEERR_ILLEGALPACKET Handle = 0x88980402 - UCEERR_MALFORMEDPACKET Handle = 0x88980403 - UCEERR_ILLEGALHANDLE Handle = 0x88980404 - UCEERR_HANDLELOOKUPFAILED Handle = 0x88980405 - UCEERR_RENDERTHREADFAILURE Handle = 0x88980406 - UCEERR_CTXSTACKFRSTTARGETNULL Handle = 0x88980407 - UCEERR_CONNECTIONIDLOOKUPFAILED Handle = 0x88980408 - UCEERR_BLOCKSFULL Handle = 0x88980409 - UCEERR_MEMORYFAILURE Handle = 0x8898040A - UCEERR_PACKETRECORDOUTOFRANGE Handle = 0x8898040B - UCEERR_ILLEGALRECORDTYPE Handle = 0x8898040C - UCEERR_OUTOFHANDLES Handle = 0x8898040D - UCEERR_UNCHANGABLE_UPDATE_ATTEMPTED Handle = 0x8898040E - UCEERR_NO_MULTIPLE_WORKER_THREADS Handle = 0x8898040F - UCEERR_REMOTINGNOTSUPPORTED Handle = 0x88980410 - UCEERR_MISSINGENDCOMMAND Handle = 0x88980411 - UCEERR_MISSINGBEGINCOMMAND Handle = 0x88980412 - UCEERR_CHANNELSYNCTIMEDOUT Handle = 0x88980413 - UCEERR_CHANNELSYNCABANDONED Handle = 0x88980414 - UCEERR_UNSUPPORTEDTRANSPORTVERSION Handle = 0x88980415 - UCEERR_TRANSPORTUNAVAILABLE Handle = 0x88980416 - UCEERR_FEEDBACK_UNSUPPORTED Handle = 0x88980417 - UCEERR_COMMANDTRANSPORTDENIED Handle = 0x88980418 - UCEERR_GRAPHICSSTREAMUNAVAILABLE Handle = 0x88980419 - UCEERR_GRAPHICSSTREAMALREADYOPEN Handle = 0x88980420 - UCEERR_TRANSPORTDISCONNECTED Handle = 0x88980421 - UCEERR_TRANSPORTOVERLOADED Handle = 0x88980422 - UCEERR_PARTITION_ZOMBIED Handle = 0x88980423 - MILAVERR_NOCLOCK Handle = 0x88980500 - MILAVERR_NOMEDIATYPE Handle = 0x88980501 - MILAVERR_NOVIDEOMIXER Handle = 0x88980502 - MILAVERR_NOVIDEOPRESENTER Handle = 0x88980503 - MILAVERR_NOREADYFRAMES Handle = 0x88980504 - MILAVERR_MODULENOTLOADED Handle = 0x88980505 - MILAVERR_WMPFACTORYNOTREGISTERED Handle = 0x88980506 - MILAVERR_INVALIDWMPVERSION Handle = 0x88980507 - MILAVERR_INSUFFICIENTVIDEORESOURCES Handle = 0x88980508 - MILAVERR_VIDEOACCELERATIONNOTAVAILABLE Handle = 0x88980509 - MILAVERR_REQUESTEDTEXTURETOOBIG Handle = 0x8898050A - MILAVERR_SEEKFAILED Handle = 0x8898050B - MILAVERR_UNEXPECTEDWMPFAILURE Handle = 0x8898050C - MILAVERR_MEDIAPLAYERCLOSED Handle = 0x8898050D - MILAVERR_UNKNOWNHARDWAREERROR Handle = 0x8898050E - MILEFFECTSERR_UNKNOWNPROPERTY Handle = 0x8898060E - MILEFFECTSERR_EFFECTNOTPARTOFGROUP Handle = 0x8898060F - MILEFFECTSERR_NOINPUTSOURCEATTACHED Handle = 0x88980610 - MILEFFECTSERR_CONNECTORNOTCONNECTED Handle = 0x88980611 - MILEFFECTSERR_CONNECTORNOTASSOCIATEDWITHEFFECT Handle = 0x88980612 - MILEFFECTSERR_RESERVED Handle = 0x88980613 - MILEFFECTSERR_CYCLEDETECTED Handle = 0x88980614 - MILEFFECTSERR_EFFECTINMORETHANONEGRAPH Handle = 0x88980615 - MILEFFECTSERR_EFFECTALREADYINAGRAPH Handle = 0x88980616 - MILEFFECTSERR_EFFECTHASNOCHILDREN Handle = 0x88980617 - MILEFFECTSERR_ALREADYATTACHEDTOLISTENER Handle = 0x88980618 - MILEFFECTSERR_NOTAFFINETRANSFORM Handle = 0x88980619 - MILEFFECTSERR_EMPTYBOUNDS Handle = 0x8898061A - MILEFFECTSERR_OUTPUTSIZETOOLARGE Handle = 0x8898061B - DWMERR_STATE_TRANSITION_FAILED Handle = 0x88980700 - DWMERR_THEME_FAILED Handle = 0x88980701 - DWMERR_CATASTROPHIC_FAILURE Handle = 0x88980702 - DCOMPOSITION_ERROR_WINDOW_ALREADY_COMPOSED Handle = 0x88980800 - DCOMPOSITION_ERROR_SURFACE_BEING_RENDERED Handle = 0x88980801 - DCOMPOSITION_ERROR_SURFACE_NOT_BEING_RENDERED Handle = 0x88980802 - ONL_E_INVALID_AUTHENTICATION_TARGET Handle = 0x80860001 - ONL_E_ACCESS_DENIED_BY_TOU Handle = 0x80860002 - ONL_E_INVALID_APPLICATION Handle = 0x80860003 - ONL_E_PASSWORD_UPDATE_REQUIRED Handle = 0x80860004 - ONL_E_ACCOUNT_UPDATE_REQUIRED Handle = 0x80860005 - ONL_E_FORCESIGNIN Handle = 0x80860006 - ONL_E_ACCOUNT_LOCKED Handle = 0x80860007 - ONL_E_PARENTAL_CONSENT_REQUIRED Handle = 0x80860008 - ONL_E_EMAIL_VERIFICATION_REQUIRED Handle = 0x80860009 - ONL_E_ACCOUNT_SUSPENDED_COMPROIMISE Handle = 0x8086000A - ONL_E_ACCOUNT_SUSPENDED_ABUSE Handle = 0x8086000B - ONL_E_ACTION_REQUIRED Handle = 0x8086000C - ONL_CONNECTION_COUNT_LIMIT Handle = 0x8086000D - ONL_E_CONNECTED_ACCOUNT_CAN_NOT_SIGNOUT Handle = 0x8086000E - ONL_E_USER_AUTHENTICATION_REQUIRED Handle = 0x8086000F - ONL_E_REQUEST_THROTTLED Handle = 0x80860010 - FA_E_MAX_PERSISTED_ITEMS_REACHED Handle = 0x80270220 - FA_E_HOMEGROUP_NOT_AVAILABLE Handle = 0x80270222 - E_MONITOR_RESOLUTION_TOO_LOW Handle = 0x80270250 - E_ELEVATED_ACTIVATION_NOT_SUPPORTED Handle = 0x80270251 - E_UAC_DISABLED Handle = 0x80270252 - E_FULL_ADMIN_NOT_SUPPORTED Handle = 0x80270253 - E_APPLICATION_NOT_REGISTERED Handle = 0x80270254 - E_MULTIPLE_EXTENSIONS_FOR_APPLICATION Handle = 0x80270255 - E_MULTIPLE_PACKAGES_FOR_FAMILY Handle = 0x80270256 - E_APPLICATION_MANAGER_NOT_RUNNING Handle = 0x80270257 - S_STORE_LAUNCHED_FOR_REMEDIATION Handle = 0x00270258 - S_APPLICATION_ACTIVATION_ERROR_HANDLED_BY_DIALOG Handle = 0x00270259 - E_APPLICATION_ACTIVATION_TIMED_OUT Handle = 0x8027025A - E_APPLICATION_ACTIVATION_EXEC_FAILURE Handle = 0x8027025B - E_APPLICATION_TEMPORARY_LICENSE_ERROR Handle = 0x8027025C - E_APPLICATION_TRIAL_LICENSE_EXPIRED Handle = 0x8027025D - E_SKYDRIVE_ROOT_TARGET_FILE_SYSTEM_NOT_SUPPORTED Handle = 0x80270260 - E_SKYDRIVE_ROOT_TARGET_OVERLAP Handle = 0x80270261 - E_SKYDRIVE_ROOT_TARGET_CANNOT_INDEX Handle = 0x80270262 - E_SKYDRIVE_FILE_NOT_UPLOADED Handle = 0x80270263 - E_SKYDRIVE_UPDATE_AVAILABILITY_FAIL Handle = 0x80270264 - E_SKYDRIVE_ROOT_TARGET_VOLUME_ROOT_NOT_SUPPORTED Handle = 0x80270265 - E_SYNCENGINE_FILE_SIZE_OVER_LIMIT Handle = 0x8802B001 - E_SYNCENGINE_FILE_SIZE_EXCEEDS_REMAINING_QUOTA Handle = 0x8802B002 - E_SYNCENGINE_UNSUPPORTED_FILE_NAME Handle = 0x8802B003 - E_SYNCENGINE_FOLDER_ITEM_COUNT_LIMIT_EXCEEDED Handle = 0x8802B004 - E_SYNCENGINE_FILE_SYNC_PARTNER_ERROR Handle = 0x8802B005 - E_SYNCENGINE_SYNC_PAUSED_BY_SERVICE Handle = 0x8802B006 - E_SYNCENGINE_FILE_IDENTIFIER_UNKNOWN Handle = 0x8802C002 - E_SYNCENGINE_SERVICE_AUTHENTICATION_FAILED Handle = 0x8802C003 - E_SYNCENGINE_UNKNOWN_SERVICE_ERROR Handle = 0x8802C004 - E_SYNCENGINE_SERVICE_RETURNED_UNEXPECTED_SIZE Handle = 0x8802C005 - E_SYNCENGINE_REQUEST_BLOCKED_BY_SERVICE Handle = 0x8802C006 - E_SYNCENGINE_REQUEST_BLOCKED_DUE_TO_CLIENT_ERROR Handle = 0x8802C007 - E_SYNCENGINE_FOLDER_INACCESSIBLE Handle = 0x8802D001 - E_SYNCENGINE_UNSUPPORTED_FOLDER_NAME Handle = 0x8802D002 - E_SYNCENGINE_UNSUPPORTED_MARKET Handle = 0x8802D003 - E_SYNCENGINE_PATH_LENGTH_LIMIT_EXCEEDED Handle = 0x8802D004 - E_SYNCENGINE_REMOTE_PATH_LENGTH_LIMIT_EXCEEDED Handle = 0x8802D005 - E_SYNCENGINE_CLIENT_UPDATE_NEEDED Handle = 0x8802D006 - E_SYNCENGINE_PROXY_AUTHENTICATION_REQUIRED Handle = 0x8802D007 - E_SYNCENGINE_STORAGE_SERVICE_PROVISIONING_FAILED Handle = 0x8802D008 - E_SYNCENGINE_UNSUPPORTED_REPARSE_POINT Handle = 0x8802D009 - E_SYNCENGINE_STORAGE_SERVICE_BLOCKED Handle = 0x8802D00A - E_SYNCENGINE_FOLDER_IN_REDIRECTION Handle = 0x8802D00B - EAS_E_POLICY_NOT_MANAGED_BY_OS Handle = 0x80550001 - EAS_E_POLICY_COMPLIANT_WITH_ACTIONS Handle = 0x80550002 - EAS_E_REQUESTED_POLICY_NOT_ENFORCEABLE Handle = 0x80550003 - EAS_E_CURRENT_USER_HAS_BLANK_PASSWORD Handle = 0x80550004 - EAS_E_REQUESTED_POLICY_PASSWORD_EXPIRATION_INCOMPATIBLE Handle = 0x80550005 - EAS_E_USER_CANNOT_CHANGE_PASSWORD Handle = 0x80550006 - EAS_E_ADMINS_HAVE_BLANK_PASSWORD Handle = 0x80550007 - EAS_E_ADMINS_CANNOT_CHANGE_PASSWORD Handle = 0x80550008 - EAS_E_LOCAL_CONTROLLED_USERS_CANNOT_CHANGE_PASSWORD Handle = 0x80550009 - EAS_E_PASSWORD_POLICY_NOT_ENFORCEABLE_FOR_CONNECTED_ADMINS Handle = 0x8055000A - EAS_E_CONNECTED_ADMINS_NEED_TO_CHANGE_PASSWORD Handle = 0x8055000B - EAS_E_PASSWORD_POLICY_NOT_ENFORCEABLE_FOR_CURRENT_CONNECTED_USER Handle = 0x8055000C - EAS_E_CURRENT_CONNECTED_USER_NEED_TO_CHANGE_PASSWORD Handle = 0x8055000D - WEB_E_UNSUPPORTED_FORMAT Handle = 0x83750001 - WEB_E_INVALID_XML Handle = 0x83750002 - WEB_E_MISSING_REQUIRED_ELEMENT Handle = 0x83750003 - WEB_E_MISSING_REQUIRED_ATTRIBUTE Handle = 0x83750004 - WEB_E_UNEXPECTED_CONTENT Handle = 0x83750005 - WEB_E_RESOURCE_TOO_LARGE Handle = 0x83750006 - WEB_E_INVALID_JSON_STRING Handle = 0x83750007 - WEB_E_INVALID_JSON_NUMBER Handle = 0x83750008 - WEB_E_JSON_VALUE_NOT_FOUND Handle = 0x83750009 - HTTP_E_STATUS_UNEXPECTED Handle = 0x80190001 - HTTP_E_STATUS_UNEXPECTED_REDIRECTION Handle = 0x80190003 - HTTP_E_STATUS_UNEXPECTED_CLIENT_ERROR Handle = 0x80190004 - HTTP_E_STATUS_UNEXPECTED_SERVER_ERROR Handle = 0x80190005 - HTTP_E_STATUS_AMBIGUOUS Handle = 0x8019012C - HTTP_E_STATUS_MOVED Handle = 0x8019012D - HTTP_E_STATUS_REDIRECT Handle = 0x8019012E - HTTP_E_STATUS_REDIRECT_METHOD Handle = 0x8019012F - HTTP_E_STATUS_NOT_MODIFIED Handle = 0x80190130 - HTTP_E_STATUS_USE_PROXY Handle = 0x80190131 - HTTP_E_STATUS_REDIRECT_KEEP_VERB Handle = 0x80190133 - HTTP_E_STATUS_BAD_REQUEST Handle = 0x80190190 - HTTP_E_STATUS_DENIED Handle = 0x80190191 - HTTP_E_STATUS_PAYMENT_REQ Handle = 0x80190192 - HTTP_E_STATUS_FORBIDDEN Handle = 0x80190193 - HTTP_E_STATUS_NOT_FOUND Handle = 0x80190194 - HTTP_E_STATUS_BAD_METHOD Handle = 0x80190195 - HTTP_E_STATUS_NONE_ACCEPTABLE Handle = 0x80190196 - HTTP_E_STATUS_PROXY_AUTH_REQ Handle = 0x80190197 - HTTP_E_STATUS_REQUEST_TIMEOUT Handle = 0x80190198 - HTTP_E_STATUS_CONFLICT Handle = 0x80190199 - HTTP_E_STATUS_GONE Handle = 0x8019019A - HTTP_E_STATUS_LENGTH_REQUIRED Handle = 0x8019019B - HTTP_E_STATUS_PRECOND_FAILED Handle = 0x8019019C - HTTP_E_STATUS_REQUEST_TOO_LARGE Handle = 0x8019019D - HTTP_E_STATUS_URI_TOO_LONG Handle = 0x8019019E - HTTP_E_STATUS_UNSUPPORTED_MEDIA Handle = 0x8019019F - HTTP_E_STATUS_RANGE_NOT_SATISFIABLE Handle = 0x801901A0 - HTTP_E_STATUS_EXPECTATION_FAILED Handle = 0x801901A1 - HTTP_E_STATUS_SERVER_ERROR Handle = 0x801901F4 - HTTP_E_STATUS_NOT_SUPPORTED Handle = 0x801901F5 - HTTP_E_STATUS_BAD_GATEWAY Handle = 0x801901F6 - HTTP_E_STATUS_SERVICE_UNAVAIL Handle = 0x801901F7 - HTTP_E_STATUS_GATEWAY_TIMEOUT Handle = 0x801901F8 - HTTP_E_STATUS_VERSION_NOT_SUP Handle = 0x801901F9 - E_INVALID_PROTOCOL_OPERATION Handle = 0x83760001 - E_INVALID_PROTOCOL_FORMAT Handle = 0x83760002 - E_PROTOCOL_EXTENSIONS_NOT_SUPPORTED Handle = 0x83760003 - E_SUBPROTOCOL_NOT_SUPPORTED Handle = 0x83760004 - E_PROTOCOL_VERSION_NOT_SUPPORTED Handle = 0x83760005 - INPUT_E_OUT_OF_ORDER Handle = 0x80400000 - INPUT_E_REENTRANCY Handle = 0x80400001 - INPUT_E_MULTIMODAL Handle = 0x80400002 - INPUT_E_PACKET Handle = 0x80400003 - INPUT_E_FRAME Handle = 0x80400004 - INPUT_E_HISTORY Handle = 0x80400005 - INPUT_E_DEVICE_INFO Handle = 0x80400006 - INPUT_E_TRANSFORM Handle = 0x80400007 - INPUT_E_DEVICE_PROPERTY Handle = 0x80400008 - INET_E_INVALID_URL Handle = 0x800C0002 - INET_E_NO_SESSION Handle = 0x800C0003 - INET_E_CANNOT_CONNECT Handle = 0x800C0004 - INET_E_RESOURCE_NOT_FOUND Handle = 0x800C0005 - INET_E_OBJECT_NOT_FOUND Handle = 0x800C0006 - INET_E_DATA_NOT_AVAILABLE Handle = 0x800C0007 - INET_E_DOWNLOAD_FAILURE Handle = 0x800C0008 - INET_E_AUTHENTICATION_REQUIRED Handle = 0x800C0009 - INET_E_NO_VALID_MEDIA Handle = 0x800C000A - INET_E_CONNECTION_TIMEOUT Handle = 0x800C000B - INET_E_INVALID_REQUEST Handle = 0x800C000C - INET_E_UNKNOWN_PROTOCOL Handle = 0x800C000D - INET_E_SECURITY_PROBLEM Handle = 0x800C000E - INET_E_CANNOT_LOAD_DATA Handle = 0x800C000F - INET_E_CANNOT_INSTANTIATE_OBJECT Handle = 0x800C0010 - INET_E_INVALID_CERTIFICATE Handle = 0x800C0019 - INET_E_REDIRECT_FAILED Handle = 0x800C0014 - INET_E_REDIRECT_TO_DIR Handle = 0x800C0015 - ERROR_DBG_CREATE_PROCESS_FAILURE_LOCKDOWN Handle = 0x80B00001 - ERROR_DBG_ATTACH_PROCESS_FAILURE_LOCKDOWN Handle = 0x80B00002 - ERROR_DBG_CONNECT_SERVER_FAILURE_LOCKDOWN Handle = 0x80B00003 - ERROR_DBG_START_SERVER_FAILURE_LOCKDOWN Handle = 0x80B00004 - ERROR_IO_PREEMPTED Handle = 0x89010001 - JSCRIPT_E_CANTEXECUTE Handle = 0x89020001 - WEP_E_NOT_PROVISIONED_ON_ALL_VOLUMES Handle = 0x88010001 - WEP_E_FIXED_DATA_NOT_SUPPORTED Handle = 0x88010002 - WEP_E_HARDWARE_NOT_COMPLIANT Handle = 0x88010003 - WEP_E_LOCK_NOT_CONFIGURED Handle = 0x88010004 - WEP_E_PROTECTION_SUSPENDED Handle = 0x88010005 - WEP_E_NO_LICENSE Handle = 0x88010006 - WEP_E_OS_NOT_PROTECTED Handle = 0x88010007 - WEP_E_UNEXPECTED_FAIL Handle = 0x88010008 - WEP_E_BUFFER_TOO_LARGE Handle = 0x88010009 - ERROR_SVHDX_ERROR_STORED Handle = 0xC05C0000 - ERROR_SVHDX_ERROR_NOT_AVAILABLE Handle = 0xC05CFF00 - ERROR_SVHDX_UNIT_ATTENTION_AVAILABLE Handle = 0xC05CFF01 - ERROR_SVHDX_UNIT_ATTENTION_CAPACITY_DATA_CHANGED Handle = 0xC05CFF02 - ERROR_SVHDX_UNIT_ATTENTION_RESERVATIONS_PREEMPTED Handle = 0xC05CFF03 - ERROR_SVHDX_UNIT_ATTENTION_RESERVATIONS_RELEASED Handle = 0xC05CFF04 - ERROR_SVHDX_UNIT_ATTENTION_REGISTRATIONS_PREEMPTED Handle = 0xC05CFF05 - ERROR_SVHDX_UNIT_ATTENTION_OPERATING_DEFINITION_CHANGED Handle = 0xC05CFF06 - ERROR_SVHDX_RESERVATION_CONFLICT Handle = 0xC05CFF07 - ERROR_SVHDX_WRONG_FILE_TYPE Handle = 0xC05CFF08 - ERROR_SVHDX_VERSION_MISMATCH Handle = 0xC05CFF09 - ERROR_VHD_SHARED Handle = 0xC05CFF0A - ERROR_SVHDX_NO_INITIATOR Handle = 0xC05CFF0B - ERROR_VHDSET_BACKING_STORAGE_NOT_FOUND Handle = 0xC05CFF0C - ERROR_SMB_NO_PREAUTH_INTEGRITY_HASH_OVERLAP Handle = 0xC05D0000 - ERROR_SMB_BAD_CLUSTER_DIALECT Handle = 0xC05D0001 - WININET_E_OUT_OF_HANDLES Handle = 0x80072EE1 - WININET_E_TIMEOUT Handle = 0x80072EE2 - WININET_E_EXTENDED_ERROR Handle = 0x80072EE3 - WININET_E_INTERNAL_ERROR Handle = 0x80072EE4 - WININET_E_INVALID_URL Handle = 0x80072EE5 - WININET_E_UNRECOGNIZED_SCHEME Handle = 0x80072EE6 - WININET_E_NAME_NOT_RESOLVED Handle = 0x80072EE7 - WININET_E_PROTOCOL_NOT_FOUND Handle = 0x80072EE8 - WININET_E_INVALID_OPTION Handle = 0x80072EE9 - WININET_E_BAD_OPTION_LENGTH Handle = 0x80072EEA - WININET_E_OPTION_NOT_SETTABLE Handle = 0x80072EEB - WININET_E_SHUTDOWN Handle = 0x80072EEC - WININET_E_INCORRECT_USER_NAME Handle = 0x80072EED - WININET_E_INCORRECT_PASSWORD Handle = 0x80072EEE - WININET_E_LOGIN_FAILURE Handle = 0x80072EEF - WININET_E_INVALID_OPERATION Handle = 0x80072EF0 - WININET_E_OPERATION_CANCELLED Handle = 0x80072EF1 - WININET_E_INCORRECT_HANDLE_TYPE Handle = 0x80072EF2 - WININET_E_INCORRECT_HANDLE_STATE Handle = 0x80072EF3 - WININET_E_NOT_PROXY_REQUEST Handle = 0x80072EF4 - WININET_E_REGISTRY_VALUE_NOT_FOUND Handle = 0x80072EF5 - WININET_E_BAD_REGISTRY_PARAMETER Handle = 0x80072EF6 - WININET_E_NO_DIRECT_ACCESS Handle = 0x80072EF7 - WININET_E_NO_CONTEXT Handle = 0x80072EF8 - WININET_E_NO_CALLBACK Handle = 0x80072EF9 - WININET_E_REQUEST_PENDING Handle = 0x80072EFA - WININET_E_INCORRECT_FORMAT Handle = 0x80072EFB - WININET_E_ITEM_NOT_FOUND Handle = 0x80072EFC - WININET_E_CANNOT_CONNECT Handle = 0x80072EFD - WININET_E_CONNECTION_ABORTED Handle = 0x80072EFE - WININET_E_CONNECTION_RESET Handle = 0x80072EFF - WININET_E_FORCE_RETRY Handle = 0x80072F00 - WININET_E_INVALID_PROXY_REQUEST Handle = 0x80072F01 - WININET_E_NEED_UI Handle = 0x80072F02 - WININET_E_HANDLE_EXISTS Handle = 0x80072F04 - WININET_E_SEC_CERT_DATE_INVALID Handle = 0x80072F05 - WININET_E_SEC_CERT_CN_INVALID Handle = 0x80072F06 - WININET_E_HTTP_TO_HTTPS_ON_REDIR Handle = 0x80072F07 - WININET_E_HTTPS_TO_HTTP_ON_REDIR Handle = 0x80072F08 - WININET_E_MIXED_SECURITY Handle = 0x80072F09 - WININET_E_CHG_POST_IS_NON_SECURE Handle = 0x80072F0A - WININET_E_POST_IS_NON_SECURE Handle = 0x80072F0B - WININET_E_CLIENT_AUTH_CERT_NEEDED Handle = 0x80072F0C - WININET_E_INVALID_CA Handle = 0x80072F0D - WININET_E_CLIENT_AUTH_NOT_SETUP Handle = 0x80072F0E - WININET_E_ASYNC_THREAD_FAILED Handle = 0x80072F0F - WININET_E_REDIRECT_SCHEME_CHANGE Handle = 0x80072F10 - WININET_E_DIALOG_PENDING Handle = 0x80072F11 - WININET_E_RETRY_DIALOG Handle = 0x80072F12 - WININET_E_NO_NEW_CONTAINERS Handle = 0x80072F13 - WININET_E_HTTPS_HTTP_SUBMIT_REDIR Handle = 0x80072F14 - WININET_E_SEC_CERT_ERRORS Handle = 0x80072F17 - WININET_E_SEC_CERT_REV_FAILED Handle = 0x80072F19 - WININET_E_HEADER_NOT_FOUND Handle = 0x80072F76 - WININET_E_DOWNLEVEL_SERVER Handle = 0x80072F77 - WININET_E_INVALID_SERVER_RESPONSE Handle = 0x80072F78 - WININET_E_INVALID_HEADER Handle = 0x80072F79 - WININET_E_INVALID_QUERY_REQUEST Handle = 0x80072F7A - WININET_E_HEADER_ALREADY_EXISTS Handle = 0x80072F7B - WININET_E_REDIRECT_FAILED Handle = 0x80072F7C - WININET_E_SECURITY_CHANNEL_ERROR Handle = 0x80072F7D - WININET_E_UNABLE_TO_CACHE_FILE Handle = 0x80072F7E - WININET_E_TCPIP_NOT_INSTALLED Handle = 0x80072F7F - WININET_E_DISCONNECTED Handle = 0x80072F83 - WININET_E_SERVER_UNREACHABLE Handle = 0x80072F84 - WININET_E_PROXY_SERVER_UNREACHABLE Handle = 0x80072F85 - WININET_E_BAD_AUTO_PROXY_SCRIPT Handle = 0x80072F86 - WININET_E_UNABLE_TO_DOWNLOAD_SCRIPT Handle = 0x80072F87 - WININET_E_SEC_INVALID_CERT Handle = 0x80072F89 - WININET_E_SEC_CERT_REVOKED Handle = 0x80072F8A - WININET_E_FAILED_DUETOSECURITYCHECK Handle = 0x80072F8B - WININET_E_NOT_INITIALIZED Handle = 0x80072F8C - WININET_E_LOGIN_FAILURE_DISPLAY_ENTITY_BODY Handle = 0x80072F8E - WININET_E_DECODING_FAILED Handle = 0x80072F8F - WININET_E_NOT_REDIRECTED Handle = 0x80072F80 - WININET_E_COOKIE_NEEDS_CONFIRMATION Handle = 0x80072F81 - WININET_E_COOKIE_DECLINED Handle = 0x80072F82 - WININET_E_REDIRECT_NEEDS_CONFIRMATION Handle = 0x80072F88 - SQLITE_E_ERROR Handle = 0x87AF0001 - SQLITE_E_INTERNAL Handle = 0x87AF0002 - SQLITE_E_PERM Handle = 0x87AF0003 - SQLITE_E_ABORT Handle = 0x87AF0004 - SQLITE_E_BUSY Handle = 0x87AF0005 - SQLITE_E_LOCKED Handle = 0x87AF0006 - SQLITE_E_NOMEM Handle = 0x87AF0007 - SQLITE_E_READONLY Handle = 0x87AF0008 - SQLITE_E_INTERRUPT Handle = 0x87AF0009 - SQLITE_E_IOERR Handle = 0x87AF000A - SQLITE_E_CORRUPT Handle = 0x87AF000B - SQLITE_E_NOTFOUND Handle = 0x87AF000C - SQLITE_E_FULL Handle = 0x87AF000D - SQLITE_E_CANTOPEN Handle = 0x87AF000E - SQLITE_E_PROTOCOL Handle = 0x87AF000F - SQLITE_E_EMPTY Handle = 0x87AF0010 - SQLITE_E_SCHEMA Handle = 0x87AF0011 - SQLITE_E_TOOBIG Handle = 0x87AF0012 - SQLITE_E_CONSTRAINT Handle = 0x87AF0013 - SQLITE_E_MISMATCH Handle = 0x87AF0014 - SQLITE_E_MISUSE Handle = 0x87AF0015 - SQLITE_E_NOLFS Handle = 0x87AF0016 - SQLITE_E_AUTH Handle = 0x87AF0017 - SQLITE_E_FORMAT Handle = 0x87AF0018 - SQLITE_E_RANGE Handle = 0x87AF0019 - SQLITE_E_NOTADB Handle = 0x87AF001A - SQLITE_E_NOTICE Handle = 0x87AF001B - SQLITE_E_WARNING Handle = 0x87AF001C - SQLITE_E_ROW Handle = 0x87AF0064 - SQLITE_E_DONE Handle = 0x87AF0065 - SQLITE_E_IOERR_READ Handle = 0x87AF010A - SQLITE_E_IOERR_SHORT_READ Handle = 0x87AF020A - SQLITE_E_IOERR_WRITE Handle = 0x87AF030A - SQLITE_E_IOERR_FSYNC Handle = 0x87AF040A - SQLITE_E_IOERR_DIR_FSYNC Handle = 0x87AF050A - SQLITE_E_IOERR_TRUNCATE Handle = 0x87AF060A - SQLITE_E_IOERR_FSTAT Handle = 0x87AF070A - SQLITE_E_IOERR_UNLOCK Handle = 0x87AF080A - SQLITE_E_IOERR_RDLOCK Handle = 0x87AF090A - SQLITE_E_IOERR_DELETE Handle = 0x87AF0A0A - SQLITE_E_IOERR_BLOCKED Handle = 0x87AF0B0A - SQLITE_E_IOERR_NOMEM Handle = 0x87AF0C0A - SQLITE_E_IOERR_ACCESS Handle = 0x87AF0D0A - SQLITE_E_IOERR_CHECKRESERVEDLOCK Handle = 0x87AF0E0A - SQLITE_E_IOERR_LOCK Handle = 0x87AF0F0A - SQLITE_E_IOERR_CLOSE Handle = 0x87AF100A - SQLITE_E_IOERR_DIR_CLOSE Handle = 0x87AF110A - SQLITE_E_IOERR_SHMOPEN Handle = 0x87AF120A - SQLITE_E_IOERR_SHMSIZE Handle = 0x87AF130A - SQLITE_E_IOERR_SHMLOCK Handle = 0x87AF140A - SQLITE_E_IOERR_SHMMAP Handle = 0x87AF150A - SQLITE_E_IOERR_SEEK Handle = 0x87AF160A - SQLITE_E_IOERR_DELETE_NOENT Handle = 0x87AF170A - SQLITE_E_IOERR_MMAP Handle = 0x87AF180A - SQLITE_E_IOERR_GETTEMPPATH Handle = 0x87AF190A - SQLITE_E_IOERR_CONVPATH Handle = 0x87AF1A0A - SQLITE_E_IOERR_VNODE Handle = 0x87AF1A02 - SQLITE_E_IOERR_AUTH Handle = 0x87AF1A03 - SQLITE_E_LOCKED_SHAREDCACHE Handle = 0x87AF0106 - SQLITE_E_BUSY_RECOVERY Handle = 0x87AF0105 - SQLITE_E_BUSY_SNAPSHOT Handle = 0x87AF0205 - SQLITE_E_CANTOPEN_NOTEMPDIR Handle = 0x87AF010E - SQLITE_E_CANTOPEN_ISDIR Handle = 0x87AF020E - SQLITE_E_CANTOPEN_FULLPATH Handle = 0x87AF030E - SQLITE_E_CANTOPEN_CONVPATH Handle = 0x87AF040E - SQLITE_E_CORRUPT_VTAB Handle = 0x87AF010B - SQLITE_E_READONLY_RECOVERY Handle = 0x87AF0108 - SQLITE_E_READONLY_CANTLOCK Handle = 0x87AF0208 - SQLITE_E_READONLY_ROLLBACK Handle = 0x87AF0308 - SQLITE_E_READONLY_DBMOVED Handle = 0x87AF0408 - SQLITE_E_ABORT_ROLLBACK Handle = 0x87AF0204 - SQLITE_E_CONSTRAINT_CHECK Handle = 0x87AF0113 - SQLITE_E_CONSTRAINT_COMMITHOOK Handle = 0x87AF0213 - SQLITE_E_CONSTRAINT_FOREIGNKEY Handle = 0x87AF0313 - SQLITE_E_CONSTRAINT_FUNCTION Handle = 0x87AF0413 - SQLITE_E_CONSTRAINT_NOTNULL Handle = 0x87AF0513 - SQLITE_E_CONSTRAINT_PRIMARYKEY Handle = 0x87AF0613 - SQLITE_E_CONSTRAINT_TRIGGER Handle = 0x87AF0713 - SQLITE_E_CONSTRAINT_UNIQUE Handle = 0x87AF0813 - SQLITE_E_CONSTRAINT_VTAB Handle = 0x87AF0913 - SQLITE_E_CONSTRAINT_ROWID Handle = 0x87AF0A13 - SQLITE_E_NOTICE_RECOVER_WAL Handle = 0x87AF011B - SQLITE_E_NOTICE_RECOVER_ROLLBACK Handle = 0x87AF021B - SQLITE_E_WARNING_AUTOINDEX Handle = 0x87AF011C - UTC_E_TOGGLE_TRACE_STARTED Handle = 0x87C51001 - UTC_E_ALTERNATIVE_TRACE_CANNOT_PREEMPT Handle = 0x87C51002 - UTC_E_AOT_NOT_RUNNING Handle = 0x87C51003 - UTC_E_SCRIPT_TYPE_INVALID Handle = 0x87C51004 - UTC_E_SCENARIODEF_NOT_FOUND Handle = 0x87C51005 - UTC_E_TRACEPROFILE_NOT_FOUND Handle = 0x87C51006 - UTC_E_FORWARDER_ALREADY_ENABLED Handle = 0x87C51007 - UTC_E_FORWARDER_ALREADY_DISABLED Handle = 0x87C51008 - UTC_E_EVENTLOG_ENTRY_MALFORMED Handle = 0x87C51009 - UTC_E_DIAGRULES_SCHEMAVERSION_MISMATCH Handle = 0x87C5100A - UTC_E_SCRIPT_TERMINATED Handle = 0x87C5100B - UTC_E_INVALID_CUSTOM_FILTER Handle = 0x87C5100C - UTC_E_TRACE_NOT_RUNNING Handle = 0x87C5100D - UTC_E_REESCALATED_TOO_QUICKLY Handle = 0x87C5100E - UTC_E_ESCALATION_ALREADY_RUNNING Handle = 0x87C5100F - UTC_E_PERFTRACK_ALREADY_TRACING Handle = 0x87C51010 - UTC_E_REACHED_MAX_ESCALATIONS Handle = 0x87C51011 - UTC_E_FORWARDER_PRODUCER_MISMATCH Handle = 0x87C51012 - UTC_E_INTENTIONAL_SCRIPT_FAILURE Handle = 0x87C51013 - UTC_E_SQM_INIT_FAILED Handle = 0x87C51014 - UTC_E_NO_WER_LOGGER_SUPPORTED Handle = 0x87C51015 - UTC_E_TRACERS_DONT_EXIST Handle = 0x87C51016 - UTC_E_WINRT_INIT_FAILED Handle = 0x87C51017 - UTC_E_SCENARIODEF_SCHEMAVERSION_MISMATCH Handle = 0x87C51018 - UTC_E_INVALID_FILTER Handle = 0x87C51019 - UTC_E_EXE_TERMINATED Handle = 0x87C5101A - UTC_E_ESCALATION_NOT_AUTHORIZED Handle = 0x87C5101B - UTC_E_SETUP_NOT_AUTHORIZED Handle = 0x87C5101C - UTC_E_CHILD_PROCESS_FAILED Handle = 0x87C5101D - UTC_E_COMMAND_LINE_NOT_AUTHORIZED Handle = 0x87C5101E - UTC_E_CANNOT_LOAD_SCENARIO_EDITOR_XML Handle = 0x87C5101F - UTC_E_ESCALATION_TIMED_OUT Handle = 0x87C51020 - UTC_E_SETUP_TIMED_OUT Handle = 0x87C51021 - UTC_E_TRIGGER_MISMATCH Handle = 0x87C51022 - UTC_E_TRIGGER_NOT_FOUND Handle = 0x87C51023 - UTC_E_SIF_NOT_SUPPORTED Handle = 0x87C51024 - UTC_E_DELAY_TERMINATED Handle = 0x87C51025 - UTC_E_DEVICE_TICKET_ERROR Handle = 0x87C51026 - UTC_E_TRACE_BUFFER_LIMIT_EXCEEDED Handle = 0x87C51027 - UTC_E_API_RESULT_UNAVAILABLE Handle = 0x87C51028 - UTC_E_RPC_TIMEOUT Handle = 0x87C51029 - UTC_E_RPC_WAIT_FAILED Handle = 0x87C5102A - UTC_E_API_BUSY Handle = 0x87C5102B - UTC_E_TRACE_MIN_DURATION_REQUIREMENT_NOT_MET Handle = 0x87C5102C - UTC_E_EXCLUSIVITY_NOT_AVAILABLE Handle = 0x87C5102D - UTC_E_GETFILE_FILE_PATH_NOT_APPROVED Handle = 0x87C5102E - UTC_E_ESCALATION_DIRECTORY_ALREADY_EXISTS Handle = 0x87C5102F - UTC_E_TIME_TRIGGER_ON_START_INVALID Handle = 0x87C51030 - UTC_E_TIME_TRIGGER_ONLY_VALID_ON_SINGLE_TRANSITION Handle = 0x87C51031 - UTC_E_TIME_TRIGGER_INVALID_TIME_RANGE Handle = 0x87C51032 - UTC_E_MULTIPLE_TIME_TRIGGER_ON_SINGLE_STATE Handle = 0x87C51033 - UTC_E_BINARY_MISSING Handle = 0x87C51034 - UTC_E_NETWORK_CAPTURE_NOT_ALLOWED Handle = 0x87C51035 - UTC_E_FAILED_TO_RESOLVE_CONTAINER_ID Handle = 0x87C51036 - UTC_E_UNABLE_TO_RESOLVE_SESSION Handle = 0x87C51037 - UTC_E_THROTTLED Handle = 0x87C51038 - UTC_E_UNAPPROVED_SCRIPT Handle = 0x87C51039 - UTC_E_SCRIPT_MISSING Handle = 0x87C5103A - UTC_E_SCENARIO_THROTTLED Handle = 0x87C5103B - UTC_E_API_NOT_SUPPORTED Handle = 0x87C5103C - UTC_E_GETFILE_EXTERNAL_PATH_NOT_APPROVED Handle = 0x87C5103D - UTC_E_TRY_GET_SCENARIO_TIMEOUT_EXCEEDED Handle = 0x87C5103E - UTC_E_CERT_REV_FAILED Handle = 0x87C5103F - UTC_E_FAILED_TO_START_NDISCAP Handle = 0x87C51040 - UTC_E_KERNELDUMP_LIMIT_REACHED Handle = 0x87C51041 - UTC_E_MISSING_AGGREGATE_EVENT_TAG Handle = 0x87C51042 - UTC_E_INVALID_AGGREGATION_STRUCT Handle = 0x87C51043 - UTC_E_ACTION_NOT_SUPPORTED_IN_DESTINATION Handle = 0x87C51044 - UTC_E_FILTER_MISSING_ATTRIBUTE Handle = 0x87C51045 - UTC_E_FILTER_INVALID_TYPE Handle = 0x87C51046 - UTC_E_FILTER_VARIABLE_NOT_FOUND Handle = 0x87C51047 - UTC_E_FILTER_FUNCTION_RESTRICTED Handle = 0x87C51048 - UTC_E_FILTER_VERSION_MISMATCH Handle = 0x87C51049 - UTC_E_FILTER_INVALID_FUNCTION Handle = 0x87C51050 - UTC_E_FILTER_INVALID_FUNCTION_PARAMS Handle = 0x87C51051 - UTC_E_FILTER_INVALID_COMMAND Handle = 0x87C51052 - UTC_E_FILTER_ILLEGAL_EVAL Handle = 0x87C51053 - UTC_E_TTTRACER_RETURNED_ERROR Handle = 0x87C51054 - UTC_E_AGENT_DIAGNOSTICS_TOO_LARGE Handle = 0x87C51055 - UTC_E_FAILED_TO_RECEIVE_AGENT_DIAGNOSTICS Handle = 0x87C51056 - UTC_E_SCENARIO_HAS_NO_ACTIONS Handle = 0x87C51057 - UTC_E_TTTRACER_STORAGE_FULL Handle = 0x87C51058 - UTC_E_INSUFFICIENT_SPACE_TO_START_TRACE Handle = 0x87C51059 - UTC_E_ESCALATION_CANCELLED_AT_SHUTDOWN Handle = 0x87C5105A - UTC_E_GETFILEINFOACTION_FILE_NOT_APPROVED Handle = 0x87C5105B - UTC_E_SETREGKEYACTION_TYPE_NOT_APPROVED Handle = 0x87C5105C - WINML_ERR_INVALID_DEVICE Handle = 0x88900001 - WINML_ERR_INVALID_BINDING Handle = 0x88900002 - WINML_ERR_VALUE_NOTFOUND Handle = 0x88900003 - WINML_ERR_SIZE_MISMATCH Handle = 0x88900004 - STATUS_WAIT_0 NTStatus = 0x00000000 - STATUS_SUCCESS NTStatus = 0x00000000 - STATUS_WAIT_1 NTStatus = 0x00000001 - STATUS_WAIT_2 NTStatus = 0x00000002 - STATUS_WAIT_3 NTStatus = 0x00000003 - STATUS_WAIT_63 NTStatus = 0x0000003F - STATUS_ABANDONED NTStatus = 0x00000080 - STATUS_ABANDONED_WAIT_0 NTStatus = 0x00000080 - STATUS_ABANDONED_WAIT_63 NTStatus = 0x000000BF - STATUS_USER_APC NTStatus = 0x000000C0 - STATUS_ALREADY_COMPLETE NTStatus = 0x000000FF - STATUS_KERNEL_APC NTStatus = 0x00000100 - STATUS_ALERTED NTStatus = 0x00000101 - STATUS_TIMEOUT NTStatus = 0x00000102 - STATUS_PENDING NTStatus = 0x00000103 - STATUS_REPARSE NTStatus = 0x00000104 - STATUS_MORE_ENTRIES NTStatus = 0x00000105 - STATUS_NOT_ALL_ASSIGNED NTStatus = 0x00000106 - STATUS_SOME_NOT_MAPPED NTStatus = 0x00000107 - STATUS_OPLOCK_BREAK_IN_PROGRESS NTStatus = 0x00000108 - STATUS_VOLUME_MOUNTED NTStatus = 0x00000109 - STATUS_RXACT_COMMITTED NTStatus = 0x0000010A - STATUS_NOTIFY_CLEANUP NTStatus = 0x0000010B - STATUS_NOTIFY_ENUM_DIR NTStatus = 0x0000010C - STATUS_NO_QUOTAS_FOR_ACCOUNT NTStatus = 0x0000010D - STATUS_PRIMARY_TRANSPORT_CONNECT_FAILED NTStatus = 0x0000010E - STATUS_PAGE_FAULT_TRANSITION NTStatus = 0x00000110 - STATUS_PAGE_FAULT_DEMAND_ZERO NTStatus = 0x00000111 - STATUS_PAGE_FAULT_COPY_ON_WRITE NTStatus = 0x00000112 - STATUS_PAGE_FAULT_GUARD_PAGE NTStatus = 0x00000113 - STATUS_PAGE_FAULT_PAGING_FILE NTStatus = 0x00000114 - STATUS_CACHE_PAGE_LOCKED NTStatus = 0x00000115 - STATUS_CRASH_DUMP NTStatus = 0x00000116 - STATUS_BUFFER_ALL_ZEROS NTStatus = 0x00000117 - STATUS_REPARSE_OBJECT NTStatus = 0x00000118 - STATUS_RESOURCE_REQUIREMENTS_CHANGED NTStatus = 0x00000119 - STATUS_TRANSLATION_COMPLETE NTStatus = 0x00000120 - STATUS_DS_MEMBERSHIP_EVALUATED_LOCALLY NTStatus = 0x00000121 - STATUS_NOTHING_TO_TERMINATE NTStatus = 0x00000122 - STATUS_PROCESS_NOT_IN_JOB NTStatus = 0x00000123 - STATUS_PROCESS_IN_JOB NTStatus = 0x00000124 - STATUS_VOLSNAP_HIBERNATE_READY NTStatus = 0x00000125 - STATUS_FSFILTER_OP_COMPLETED_SUCCESSFULLY NTStatus = 0x00000126 - STATUS_INTERRUPT_VECTOR_ALREADY_CONNECTED NTStatus = 0x00000127 - STATUS_INTERRUPT_STILL_CONNECTED NTStatus = 0x00000128 - STATUS_PROCESS_CLONED NTStatus = 0x00000129 - STATUS_FILE_LOCKED_WITH_ONLY_READERS NTStatus = 0x0000012A - STATUS_FILE_LOCKED_WITH_WRITERS NTStatus = 0x0000012B - STATUS_VALID_IMAGE_HASH NTStatus = 0x0000012C - STATUS_VALID_CATALOG_HASH NTStatus = 0x0000012D - STATUS_VALID_STRONG_CODE_HASH NTStatus = 0x0000012E - STATUS_GHOSTED NTStatus = 0x0000012F - STATUS_DATA_OVERWRITTEN NTStatus = 0x00000130 - STATUS_RESOURCEMANAGER_READ_ONLY NTStatus = 0x00000202 - STATUS_RING_PREVIOUSLY_EMPTY NTStatus = 0x00000210 - STATUS_RING_PREVIOUSLY_FULL NTStatus = 0x00000211 - STATUS_RING_PREVIOUSLY_ABOVE_QUOTA NTStatus = 0x00000212 - STATUS_RING_NEWLY_EMPTY NTStatus = 0x00000213 - STATUS_RING_SIGNAL_OPPOSITE_ENDPOINT NTStatus = 0x00000214 - STATUS_OPLOCK_SWITCHED_TO_NEW_HANDLE NTStatus = 0x00000215 - STATUS_OPLOCK_HANDLE_CLOSED NTStatus = 0x00000216 - STATUS_WAIT_FOR_OPLOCK NTStatus = 0x00000367 - STATUS_REPARSE_GLOBAL NTStatus = 0x00000368 - STATUS_FLT_IO_COMPLETE NTStatus = 0x001C0001 - STATUS_OBJECT_NAME_EXISTS NTStatus = 0x40000000 - STATUS_THREAD_WAS_SUSPENDED NTStatus = 0x40000001 - STATUS_WORKING_SET_LIMIT_RANGE NTStatus = 0x40000002 - STATUS_IMAGE_NOT_AT_BASE NTStatus = 0x40000003 - STATUS_RXACT_STATE_CREATED NTStatus = 0x40000004 - STATUS_SEGMENT_NOTIFICATION NTStatus = 0x40000005 - STATUS_LOCAL_USER_SESSION_KEY NTStatus = 0x40000006 - STATUS_BAD_CURRENT_DIRECTORY NTStatus = 0x40000007 - STATUS_SERIAL_MORE_WRITES NTStatus = 0x40000008 - STATUS_REGISTRY_RECOVERED NTStatus = 0x40000009 - STATUS_FT_READ_RECOVERY_FROM_BACKUP NTStatus = 0x4000000A - STATUS_FT_WRITE_RECOVERY NTStatus = 0x4000000B - STATUS_SERIAL_COUNTER_TIMEOUT NTStatus = 0x4000000C - STATUS_NULL_LM_PASSWORD NTStatus = 0x4000000D - STATUS_IMAGE_MACHINE_TYPE_MISMATCH NTStatus = 0x4000000E - STATUS_RECEIVE_PARTIAL NTStatus = 0x4000000F - STATUS_RECEIVE_EXPEDITED NTStatus = 0x40000010 - STATUS_RECEIVE_PARTIAL_EXPEDITED NTStatus = 0x40000011 - STATUS_EVENT_DONE NTStatus = 0x40000012 - STATUS_EVENT_PENDING NTStatus = 0x40000013 - STATUS_CHECKING_FILE_SYSTEM NTStatus = 0x40000014 - STATUS_FATAL_APP_EXIT NTStatus = 0x40000015 - STATUS_PREDEFINED_HANDLE NTStatus = 0x40000016 - STATUS_WAS_UNLOCKED NTStatus = 0x40000017 - STATUS_SERVICE_NOTIFICATION NTStatus = 0x40000018 - STATUS_WAS_LOCKED NTStatus = 0x40000019 - STATUS_LOG_HARD_ERROR NTStatus = 0x4000001A - STATUS_ALREADY_WIN32 NTStatus = 0x4000001B - STATUS_WX86_UNSIMULATE NTStatus = 0x4000001C - STATUS_WX86_CONTINUE NTStatus = 0x4000001D - STATUS_WX86_SINGLE_STEP NTStatus = 0x4000001E - STATUS_WX86_BREAKPOINT NTStatus = 0x4000001F - STATUS_WX86_EXCEPTION_CONTINUE NTStatus = 0x40000020 - STATUS_WX86_EXCEPTION_LASTCHANCE NTStatus = 0x40000021 - STATUS_WX86_EXCEPTION_CHAIN NTStatus = 0x40000022 - STATUS_IMAGE_MACHINE_TYPE_MISMATCH_EXE NTStatus = 0x40000023 - STATUS_NO_YIELD_PERFORMED NTStatus = 0x40000024 - STATUS_TIMER_RESUME_IGNORED NTStatus = 0x40000025 - STATUS_ARBITRATION_UNHANDLED NTStatus = 0x40000026 - STATUS_CARDBUS_NOT_SUPPORTED NTStatus = 0x40000027 - STATUS_WX86_CREATEWX86TIB NTStatus = 0x40000028 - STATUS_MP_PROCESSOR_MISMATCH NTStatus = 0x40000029 - STATUS_HIBERNATED NTStatus = 0x4000002A - STATUS_RESUME_HIBERNATION NTStatus = 0x4000002B - STATUS_FIRMWARE_UPDATED NTStatus = 0x4000002C - STATUS_DRIVERS_LEAKING_LOCKED_PAGES NTStatus = 0x4000002D - STATUS_MESSAGE_RETRIEVED NTStatus = 0x4000002E - STATUS_SYSTEM_POWERSTATE_TRANSITION NTStatus = 0x4000002F - STATUS_ALPC_CHECK_COMPLETION_LIST NTStatus = 0x40000030 - STATUS_SYSTEM_POWERSTATE_COMPLEX_TRANSITION NTStatus = 0x40000031 - STATUS_ACCESS_AUDIT_BY_POLICY NTStatus = 0x40000032 - STATUS_ABANDON_HIBERFILE NTStatus = 0x40000033 - STATUS_BIZRULES_NOT_ENABLED NTStatus = 0x40000034 - STATUS_FT_READ_FROM_COPY NTStatus = 0x40000035 - STATUS_IMAGE_AT_DIFFERENT_BASE NTStatus = 0x40000036 - STATUS_PATCH_DEFERRED NTStatus = 0x40000037 - STATUS_HEURISTIC_DAMAGE_POSSIBLE NTStatus = 0x40190001 - STATUS_GUARD_PAGE_VIOLATION NTStatus = 0x80000001 - STATUS_DATATYPE_MISALIGNMENT NTStatus = 0x80000002 - STATUS_BREAKPOINT NTStatus = 0x80000003 - STATUS_SINGLE_STEP NTStatus = 0x80000004 - STATUS_BUFFER_OVERFLOW NTStatus = 0x80000005 - STATUS_NO_MORE_FILES NTStatus = 0x80000006 - STATUS_WAKE_SYSTEM_DEBUGGER NTStatus = 0x80000007 - STATUS_HANDLES_CLOSED NTStatus = 0x8000000A - STATUS_NO_INHERITANCE NTStatus = 0x8000000B - STATUS_GUID_SUBSTITUTION_MADE NTStatus = 0x8000000C - STATUS_PARTIAL_COPY NTStatus = 0x8000000D - STATUS_DEVICE_PAPER_EMPTY NTStatus = 0x8000000E - STATUS_DEVICE_POWERED_OFF NTStatus = 0x8000000F - STATUS_DEVICE_OFF_LINE NTStatus = 0x80000010 - STATUS_DEVICE_BUSY NTStatus = 0x80000011 - STATUS_NO_MORE_EAS NTStatus = 0x80000012 - STATUS_INVALID_EA_NAME NTStatus = 0x80000013 - STATUS_EA_LIST_INCONSISTENT NTStatus = 0x80000014 - STATUS_INVALID_EA_FLAG NTStatus = 0x80000015 - STATUS_VERIFY_REQUIRED NTStatus = 0x80000016 - STATUS_EXTRANEOUS_INFORMATION NTStatus = 0x80000017 - STATUS_RXACT_COMMIT_NECESSARY NTStatus = 0x80000018 - STATUS_NO_MORE_ENTRIES NTStatus = 0x8000001A - STATUS_FILEMARK_DETECTED NTStatus = 0x8000001B - STATUS_MEDIA_CHANGED NTStatus = 0x8000001C - STATUS_BUS_RESET NTStatus = 0x8000001D - STATUS_END_OF_MEDIA NTStatus = 0x8000001E - STATUS_BEGINNING_OF_MEDIA NTStatus = 0x8000001F - STATUS_MEDIA_CHECK NTStatus = 0x80000020 - STATUS_SETMARK_DETECTED NTStatus = 0x80000021 - STATUS_NO_DATA_DETECTED NTStatus = 0x80000022 - STATUS_REDIRECTOR_HAS_OPEN_HANDLES NTStatus = 0x80000023 - STATUS_SERVER_HAS_OPEN_HANDLES NTStatus = 0x80000024 - STATUS_ALREADY_DISCONNECTED NTStatus = 0x80000025 - STATUS_LONGJUMP NTStatus = 0x80000026 - STATUS_CLEANER_CARTRIDGE_INSTALLED NTStatus = 0x80000027 - STATUS_PLUGPLAY_QUERY_VETOED NTStatus = 0x80000028 - STATUS_UNWIND_CONSOLIDATE NTStatus = 0x80000029 - STATUS_REGISTRY_HIVE_RECOVERED NTStatus = 0x8000002A - STATUS_DLL_MIGHT_BE_INSECURE NTStatus = 0x8000002B - STATUS_DLL_MIGHT_BE_INCOMPATIBLE NTStatus = 0x8000002C - STATUS_STOPPED_ON_SYMLINK NTStatus = 0x8000002D - STATUS_CANNOT_GRANT_REQUESTED_OPLOCK NTStatus = 0x8000002E - STATUS_NO_ACE_CONDITION NTStatus = 0x8000002F - STATUS_DEVICE_SUPPORT_IN_PROGRESS NTStatus = 0x80000030 - STATUS_DEVICE_POWER_CYCLE_REQUIRED NTStatus = 0x80000031 - STATUS_NO_WORK_DONE NTStatus = 0x80000032 - STATUS_CLUSTER_NODE_ALREADY_UP NTStatus = 0x80130001 - STATUS_CLUSTER_NODE_ALREADY_DOWN NTStatus = 0x80130002 - STATUS_CLUSTER_NETWORK_ALREADY_ONLINE NTStatus = 0x80130003 - STATUS_CLUSTER_NETWORK_ALREADY_OFFLINE NTStatus = 0x80130004 - STATUS_CLUSTER_NODE_ALREADY_MEMBER NTStatus = 0x80130005 - STATUS_FLT_BUFFER_TOO_SMALL NTStatus = 0x801C0001 - STATUS_FVE_PARTIAL_METADATA NTStatus = 0x80210001 - STATUS_FVE_TRANSIENT_STATE NTStatus = 0x80210002 - STATUS_CLOUD_FILE_PROPERTY_BLOB_CHECKSUM_MISMATCH NTStatus = 0x8000CF00 - STATUS_UNSUCCESSFUL NTStatus = 0xC0000001 - STATUS_NOT_IMPLEMENTED NTStatus = 0xC0000002 - STATUS_INVALID_INFO_CLASS NTStatus = 0xC0000003 - STATUS_INFO_LENGTH_MISMATCH NTStatus = 0xC0000004 - STATUS_ACCESS_VIOLATION NTStatus = 0xC0000005 - STATUS_IN_PAGE_ERROR NTStatus = 0xC0000006 - STATUS_PAGEFILE_QUOTA NTStatus = 0xC0000007 - STATUS_INVALID_HANDLE NTStatus = 0xC0000008 - STATUS_BAD_INITIAL_STACK NTStatus = 0xC0000009 - STATUS_BAD_INITIAL_PC NTStatus = 0xC000000A - STATUS_INVALID_CID NTStatus = 0xC000000B - STATUS_TIMER_NOT_CANCELED NTStatus = 0xC000000C - STATUS_INVALID_PARAMETER NTStatus = 0xC000000D - STATUS_NO_SUCH_DEVICE NTStatus = 0xC000000E - STATUS_NO_SUCH_FILE NTStatus = 0xC000000F - STATUS_INVALID_DEVICE_REQUEST NTStatus = 0xC0000010 - STATUS_END_OF_FILE NTStatus = 0xC0000011 - STATUS_WRONG_VOLUME NTStatus = 0xC0000012 - STATUS_NO_MEDIA_IN_DEVICE NTStatus = 0xC0000013 - STATUS_UNRECOGNIZED_MEDIA NTStatus = 0xC0000014 - STATUS_NONEXISTENT_SECTOR NTStatus = 0xC0000015 - STATUS_MORE_PROCESSING_REQUIRED NTStatus = 0xC0000016 - STATUS_NO_MEMORY NTStatus = 0xC0000017 - STATUS_CONFLICTING_ADDRESSES NTStatus = 0xC0000018 - STATUS_NOT_MAPPED_VIEW NTStatus = 0xC0000019 - STATUS_UNABLE_TO_FREE_VM NTStatus = 0xC000001A - STATUS_UNABLE_TO_DELETE_SECTION NTStatus = 0xC000001B - STATUS_INVALID_SYSTEM_SERVICE NTStatus = 0xC000001C - STATUS_ILLEGAL_INSTRUCTION NTStatus = 0xC000001D - STATUS_INVALID_LOCK_SEQUENCE NTStatus = 0xC000001E - STATUS_INVALID_VIEW_SIZE NTStatus = 0xC000001F - STATUS_INVALID_FILE_FOR_SECTION NTStatus = 0xC0000020 - STATUS_ALREADY_COMMITTED NTStatus = 0xC0000021 - STATUS_ACCESS_DENIED NTStatus = 0xC0000022 - STATUS_BUFFER_TOO_SMALL NTStatus = 0xC0000023 - STATUS_OBJECT_TYPE_MISMATCH NTStatus = 0xC0000024 - STATUS_NONCONTINUABLE_EXCEPTION NTStatus = 0xC0000025 - STATUS_INVALID_DISPOSITION NTStatus = 0xC0000026 - STATUS_UNWIND NTStatus = 0xC0000027 - STATUS_BAD_STACK NTStatus = 0xC0000028 - STATUS_INVALID_UNWIND_TARGET NTStatus = 0xC0000029 - STATUS_NOT_LOCKED NTStatus = 0xC000002A - STATUS_PARITY_ERROR NTStatus = 0xC000002B - STATUS_UNABLE_TO_DECOMMIT_VM NTStatus = 0xC000002C - STATUS_NOT_COMMITTED NTStatus = 0xC000002D - STATUS_INVALID_PORT_ATTRIBUTES NTStatus = 0xC000002E - STATUS_PORT_MESSAGE_TOO_LONG NTStatus = 0xC000002F - STATUS_INVALID_PARAMETER_MIX NTStatus = 0xC0000030 - STATUS_INVALID_QUOTA_LOWER NTStatus = 0xC0000031 - STATUS_DISK_CORRUPT_ERROR NTStatus = 0xC0000032 - STATUS_OBJECT_NAME_INVALID NTStatus = 0xC0000033 - STATUS_OBJECT_NAME_NOT_FOUND NTStatus = 0xC0000034 - STATUS_OBJECT_NAME_COLLISION NTStatus = 0xC0000035 - STATUS_PORT_DO_NOT_DISTURB NTStatus = 0xC0000036 - STATUS_PORT_DISCONNECTED NTStatus = 0xC0000037 - STATUS_DEVICE_ALREADY_ATTACHED NTStatus = 0xC0000038 - STATUS_OBJECT_PATH_INVALID NTStatus = 0xC0000039 - STATUS_OBJECT_PATH_NOT_FOUND NTStatus = 0xC000003A - STATUS_OBJECT_PATH_SYNTAX_BAD NTStatus = 0xC000003B - STATUS_DATA_OVERRUN NTStatus = 0xC000003C - STATUS_DATA_LATE_ERROR NTStatus = 0xC000003D - STATUS_DATA_ERROR NTStatus = 0xC000003E - STATUS_CRC_ERROR NTStatus = 0xC000003F - STATUS_SECTION_TOO_BIG NTStatus = 0xC0000040 - STATUS_PORT_CONNECTION_REFUSED NTStatus = 0xC0000041 - STATUS_INVALID_PORT_HANDLE NTStatus = 0xC0000042 - STATUS_SHARING_VIOLATION NTStatus = 0xC0000043 - STATUS_QUOTA_EXCEEDED NTStatus = 0xC0000044 - STATUS_INVALID_PAGE_PROTECTION NTStatus = 0xC0000045 - STATUS_MUTANT_NOT_OWNED NTStatus = 0xC0000046 - STATUS_SEMAPHORE_LIMIT_EXCEEDED NTStatus = 0xC0000047 - STATUS_PORT_ALREADY_SET NTStatus = 0xC0000048 - STATUS_SECTION_NOT_IMAGE NTStatus = 0xC0000049 - STATUS_SUSPEND_COUNT_EXCEEDED NTStatus = 0xC000004A - STATUS_THREAD_IS_TERMINATING NTStatus = 0xC000004B - STATUS_BAD_WORKING_SET_LIMIT NTStatus = 0xC000004C - STATUS_INCOMPATIBLE_FILE_MAP NTStatus = 0xC000004D - STATUS_SECTION_PROTECTION NTStatus = 0xC000004E - STATUS_EAS_NOT_SUPPORTED NTStatus = 0xC000004F - STATUS_EA_TOO_LARGE NTStatus = 0xC0000050 - STATUS_NONEXISTENT_EA_ENTRY NTStatus = 0xC0000051 - STATUS_NO_EAS_ON_FILE NTStatus = 0xC0000052 - STATUS_EA_CORRUPT_ERROR NTStatus = 0xC0000053 - STATUS_FILE_LOCK_CONFLICT NTStatus = 0xC0000054 - STATUS_LOCK_NOT_GRANTED NTStatus = 0xC0000055 - STATUS_DELETE_PENDING NTStatus = 0xC0000056 - STATUS_CTL_FILE_NOT_SUPPORTED NTStatus = 0xC0000057 - STATUS_UNKNOWN_REVISION NTStatus = 0xC0000058 - STATUS_REVISION_MISMATCH NTStatus = 0xC0000059 - STATUS_INVALID_OWNER NTStatus = 0xC000005A - STATUS_INVALID_PRIMARY_GROUP NTStatus = 0xC000005B - STATUS_NO_IMPERSONATION_TOKEN NTStatus = 0xC000005C - STATUS_CANT_DISABLE_MANDATORY NTStatus = 0xC000005D - STATUS_NO_LOGON_SERVERS NTStatus = 0xC000005E - STATUS_NO_SUCH_LOGON_SESSION NTStatus = 0xC000005F - STATUS_NO_SUCH_PRIVILEGE NTStatus = 0xC0000060 - STATUS_PRIVILEGE_NOT_HELD NTStatus = 0xC0000061 - STATUS_INVALID_ACCOUNT_NAME NTStatus = 0xC0000062 - STATUS_USER_EXISTS NTStatus = 0xC0000063 - STATUS_NO_SUCH_USER NTStatus = 0xC0000064 - STATUS_GROUP_EXISTS NTStatus = 0xC0000065 - STATUS_NO_SUCH_GROUP NTStatus = 0xC0000066 - STATUS_MEMBER_IN_GROUP NTStatus = 0xC0000067 - STATUS_MEMBER_NOT_IN_GROUP NTStatus = 0xC0000068 - STATUS_LAST_ADMIN NTStatus = 0xC0000069 - STATUS_WRONG_PASSWORD NTStatus = 0xC000006A - STATUS_ILL_FORMED_PASSWORD NTStatus = 0xC000006B - STATUS_PASSWORD_RESTRICTION NTStatus = 0xC000006C - STATUS_LOGON_FAILURE NTStatus = 0xC000006D - STATUS_ACCOUNT_RESTRICTION NTStatus = 0xC000006E - STATUS_INVALID_LOGON_HOURS NTStatus = 0xC000006F - STATUS_INVALID_WORKSTATION NTStatus = 0xC0000070 - STATUS_PASSWORD_EXPIRED NTStatus = 0xC0000071 - STATUS_ACCOUNT_DISABLED NTStatus = 0xC0000072 - STATUS_NONE_MAPPED NTStatus = 0xC0000073 - STATUS_TOO_MANY_LUIDS_REQUESTED NTStatus = 0xC0000074 - STATUS_LUIDS_EXHAUSTED NTStatus = 0xC0000075 - STATUS_INVALID_SUB_AUTHORITY NTStatus = 0xC0000076 - STATUS_INVALID_ACL NTStatus = 0xC0000077 - STATUS_INVALID_SID NTStatus = 0xC0000078 - STATUS_INVALID_SECURITY_DESCR NTStatus = 0xC0000079 - STATUS_PROCEDURE_NOT_FOUND NTStatus = 0xC000007A - STATUS_INVALID_IMAGE_FORMAT NTStatus = 0xC000007B - STATUS_NO_TOKEN NTStatus = 0xC000007C - STATUS_BAD_INHERITANCE_ACL NTStatus = 0xC000007D - STATUS_RANGE_NOT_LOCKED NTStatus = 0xC000007E - STATUS_DISK_FULL NTStatus = 0xC000007F - STATUS_SERVER_DISABLED NTStatus = 0xC0000080 - STATUS_SERVER_NOT_DISABLED NTStatus = 0xC0000081 - STATUS_TOO_MANY_GUIDS_REQUESTED NTStatus = 0xC0000082 - STATUS_GUIDS_EXHAUSTED NTStatus = 0xC0000083 - STATUS_INVALID_ID_AUTHORITY NTStatus = 0xC0000084 - STATUS_AGENTS_EXHAUSTED NTStatus = 0xC0000085 - STATUS_INVALID_VOLUME_LABEL NTStatus = 0xC0000086 - STATUS_SECTION_NOT_EXTENDED NTStatus = 0xC0000087 - STATUS_NOT_MAPPED_DATA NTStatus = 0xC0000088 - STATUS_RESOURCE_DATA_NOT_FOUND NTStatus = 0xC0000089 - STATUS_RESOURCE_TYPE_NOT_FOUND NTStatus = 0xC000008A - STATUS_RESOURCE_NAME_NOT_FOUND NTStatus = 0xC000008B - STATUS_ARRAY_BOUNDS_EXCEEDED NTStatus = 0xC000008C - STATUS_FLOAT_DENORMAL_OPERAND NTStatus = 0xC000008D - STATUS_FLOAT_DIVIDE_BY_ZERO NTStatus = 0xC000008E - STATUS_FLOAT_INEXACT_RESULT NTStatus = 0xC000008F - STATUS_FLOAT_INVALID_OPERATION NTStatus = 0xC0000090 - STATUS_FLOAT_OVERFLOW NTStatus = 0xC0000091 - STATUS_FLOAT_STACK_CHECK NTStatus = 0xC0000092 - STATUS_FLOAT_UNDERFLOW NTStatus = 0xC0000093 - STATUS_INTEGER_DIVIDE_BY_ZERO NTStatus = 0xC0000094 - STATUS_INTEGER_OVERFLOW NTStatus = 0xC0000095 - STATUS_PRIVILEGED_INSTRUCTION NTStatus = 0xC0000096 - STATUS_TOO_MANY_PAGING_FILES NTStatus = 0xC0000097 - STATUS_FILE_INVALID NTStatus = 0xC0000098 - STATUS_ALLOTTED_SPACE_EXCEEDED NTStatus = 0xC0000099 - STATUS_INSUFFICIENT_RESOURCES NTStatus = 0xC000009A - STATUS_DFS_EXIT_PATH_FOUND NTStatus = 0xC000009B - STATUS_DEVICE_DATA_ERROR NTStatus = 0xC000009C - STATUS_DEVICE_NOT_CONNECTED NTStatus = 0xC000009D - STATUS_DEVICE_POWER_FAILURE NTStatus = 0xC000009E - STATUS_FREE_VM_NOT_AT_BASE NTStatus = 0xC000009F - STATUS_MEMORY_NOT_ALLOCATED NTStatus = 0xC00000A0 - STATUS_WORKING_SET_QUOTA NTStatus = 0xC00000A1 - STATUS_MEDIA_WRITE_PROTECTED NTStatus = 0xC00000A2 - STATUS_DEVICE_NOT_READY NTStatus = 0xC00000A3 - STATUS_INVALID_GROUP_ATTRIBUTES NTStatus = 0xC00000A4 - STATUS_BAD_IMPERSONATION_LEVEL NTStatus = 0xC00000A5 - STATUS_CANT_OPEN_ANONYMOUS NTStatus = 0xC00000A6 - STATUS_BAD_VALIDATION_CLASS NTStatus = 0xC00000A7 - STATUS_BAD_TOKEN_TYPE NTStatus = 0xC00000A8 - STATUS_BAD_MASTER_BOOT_RECORD NTStatus = 0xC00000A9 - STATUS_INSTRUCTION_MISALIGNMENT NTStatus = 0xC00000AA - STATUS_INSTANCE_NOT_AVAILABLE NTStatus = 0xC00000AB - STATUS_PIPE_NOT_AVAILABLE NTStatus = 0xC00000AC - STATUS_INVALID_PIPE_STATE NTStatus = 0xC00000AD - STATUS_PIPE_BUSY NTStatus = 0xC00000AE - STATUS_ILLEGAL_FUNCTION NTStatus = 0xC00000AF - STATUS_PIPE_DISCONNECTED NTStatus = 0xC00000B0 - STATUS_PIPE_CLOSING NTStatus = 0xC00000B1 - STATUS_PIPE_CONNECTED NTStatus = 0xC00000B2 - STATUS_PIPE_LISTENING NTStatus = 0xC00000B3 - STATUS_INVALID_READ_MODE NTStatus = 0xC00000B4 - STATUS_IO_TIMEOUT NTStatus = 0xC00000B5 - STATUS_FILE_FORCED_CLOSED NTStatus = 0xC00000B6 - STATUS_PROFILING_NOT_STARTED NTStatus = 0xC00000B7 - STATUS_PROFILING_NOT_STOPPED NTStatus = 0xC00000B8 - STATUS_COULD_NOT_INTERPRET NTStatus = 0xC00000B9 - STATUS_FILE_IS_A_DIRECTORY NTStatus = 0xC00000BA - STATUS_NOT_SUPPORTED NTStatus = 0xC00000BB - STATUS_REMOTE_NOT_LISTENING NTStatus = 0xC00000BC - STATUS_DUPLICATE_NAME NTStatus = 0xC00000BD - STATUS_BAD_NETWORK_PATH NTStatus = 0xC00000BE - STATUS_NETWORK_BUSY NTStatus = 0xC00000BF - STATUS_DEVICE_DOES_NOT_EXIST NTStatus = 0xC00000C0 - STATUS_TOO_MANY_COMMANDS NTStatus = 0xC00000C1 - STATUS_ADAPTER_HARDWARE_ERROR NTStatus = 0xC00000C2 - STATUS_INVALID_NETWORK_RESPONSE NTStatus = 0xC00000C3 - STATUS_UNEXPECTED_NETWORK_ERROR NTStatus = 0xC00000C4 - STATUS_BAD_REMOTE_ADAPTER NTStatus = 0xC00000C5 - STATUS_PRINT_QUEUE_FULL NTStatus = 0xC00000C6 - STATUS_NO_SPOOL_SPACE NTStatus = 0xC00000C7 - STATUS_PRINT_CANCELLED NTStatus = 0xC00000C8 - STATUS_NETWORK_NAME_DELETED NTStatus = 0xC00000C9 - STATUS_NETWORK_ACCESS_DENIED NTStatus = 0xC00000CA - STATUS_BAD_DEVICE_TYPE NTStatus = 0xC00000CB - STATUS_BAD_NETWORK_NAME NTStatus = 0xC00000CC - STATUS_TOO_MANY_NAMES NTStatus = 0xC00000CD - STATUS_TOO_MANY_SESSIONS NTStatus = 0xC00000CE - STATUS_SHARING_PAUSED NTStatus = 0xC00000CF - STATUS_REQUEST_NOT_ACCEPTED NTStatus = 0xC00000D0 - STATUS_REDIRECTOR_PAUSED NTStatus = 0xC00000D1 - STATUS_NET_WRITE_FAULT NTStatus = 0xC00000D2 - STATUS_PROFILING_AT_LIMIT NTStatus = 0xC00000D3 - STATUS_NOT_SAME_DEVICE NTStatus = 0xC00000D4 - STATUS_FILE_RENAMED NTStatus = 0xC00000D5 - STATUS_VIRTUAL_CIRCUIT_CLOSED NTStatus = 0xC00000D6 - STATUS_NO_SECURITY_ON_OBJECT NTStatus = 0xC00000D7 - STATUS_CANT_WAIT NTStatus = 0xC00000D8 - STATUS_PIPE_EMPTY NTStatus = 0xC00000D9 - STATUS_CANT_ACCESS_DOMAIN_INFO NTStatus = 0xC00000DA - STATUS_CANT_TERMINATE_SELF NTStatus = 0xC00000DB - STATUS_INVALID_SERVER_STATE NTStatus = 0xC00000DC - STATUS_INVALID_DOMAIN_STATE NTStatus = 0xC00000DD - STATUS_INVALID_DOMAIN_ROLE NTStatus = 0xC00000DE - STATUS_NO_SUCH_DOMAIN NTStatus = 0xC00000DF - STATUS_DOMAIN_EXISTS NTStatus = 0xC00000E0 - STATUS_DOMAIN_LIMIT_EXCEEDED NTStatus = 0xC00000E1 - STATUS_OPLOCK_NOT_GRANTED NTStatus = 0xC00000E2 - STATUS_INVALID_OPLOCK_PROTOCOL NTStatus = 0xC00000E3 - STATUS_INTERNAL_DB_CORRUPTION NTStatus = 0xC00000E4 - STATUS_INTERNAL_ERROR NTStatus = 0xC00000E5 - STATUS_GENERIC_NOT_MAPPED NTStatus = 0xC00000E6 - STATUS_BAD_DESCRIPTOR_FORMAT NTStatus = 0xC00000E7 - STATUS_INVALID_USER_BUFFER NTStatus = 0xC00000E8 - STATUS_UNEXPECTED_IO_ERROR NTStatus = 0xC00000E9 - STATUS_UNEXPECTED_MM_CREATE_ERR NTStatus = 0xC00000EA - STATUS_UNEXPECTED_MM_MAP_ERROR NTStatus = 0xC00000EB - STATUS_UNEXPECTED_MM_EXTEND_ERR NTStatus = 0xC00000EC - STATUS_NOT_LOGON_PROCESS NTStatus = 0xC00000ED - STATUS_LOGON_SESSION_EXISTS NTStatus = 0xC00000EE - STATUS_INVALID_PARAMETER_1 NTStatus = 0xC00000EF - STATUS_INVALID_PARAMETER_2 NTStatus = 0xC00000F0 - STATUS_INVALID_PARAMETER_3 NTStatus = 0xC00000F1 - STATUS_INVALID_PARAMETER_4 NTStatus = 0xC00000F2 - STATUS_INVALID_PARAMETER_5 NTStatus = 0xC00000F3 - STATUS_INVALID_PARAMETER_6 NTStatus = 0xC00000F4 - STATUS_INVALID_PARAMETER_7 NTStatus = 0xC00000F5 - STATUS_INVALID_PARAMETER_8 NTStatus = 0xC00000F6 - STATUS_INVALID_PARAMETER_9 NTStatus = 0xC00000F7 - STATUS_INVALID_PARAMETER_10 NTStatus = 0xC00000F8 - STATUS_INVALID_PARAMETER_11 NTStatus = 0xC00000F9 - STATUS_INVALID_PARAMETER_12 NTStatus = 0xC00000FA - STATUS_REDIRECTOR_NOT_STARTED NTStatus = 0xC00000FB - STATUS_REDIRECTOR_STARTED NTStatus = 0xC00000FC - STATUS_STACK_OVERFLOW NTStatus = 0xC00000FD - STATUS_NO_SUCH_PACKAGE NTStatus = 0xC00000FE - STATUS_BAD_FUNCTION_TABLE NTStatus = 0xC00000FF - STATUS_VARIABLE_NOT_FOUND NTStatus = 0xC0000100 - STATUS_DIRECTORY_NOT_EMPTY NTStatus = 0xC0000101 - STATUS_FILE_CORRUPT_ERROR NTStatus = 0xC0000102 - STATUS_NOT_A_DIRECTORY NTStatus = 0xC0000103 - STATUS_BAD_LOGON_SESSION_STATE NTStatus = 0xC0000104 - STATUS_LOGON_SESSION_COLLISION NTStatus = 0xC0000105 - STATUS_NAME_TOO_LONG NTStatus = 0xC0000106 - STATUS_FILES_OPEN NTStatus = 0xC0000107 - STATUS_CONNECTION_IN_USE NTStatus = 0xC0000108 - STATUS_MESSAGE_NOT_FOUND NTStatus = 0xC0000109 - STATUS_PROCESS_IS_TERMINATING NTStatus = 0xC000010A - STATUS_INVALID_LOGON_TYPE NTStatus = 0xC000010B - STATUS_NO_GUID_TRANSLATION NTStatus = 0xC000010C - STATUS_CANNOT_IMPERSONATE NTStatus = 0xC000010D - STATUS_IMAGE_ALREADY_LOADED NTStatus = 0xC000010E - STATUS_ABIOS_NOT_PRESENT NTStatus = 0xC000010F - STATUS_ABIOS_LID_NOT_EXIST NTStatus = 0xC0000110 - STATUS_ABIOS_LID_ALREADY_OWNED NTStatus = 0xC0000111 - STATUS_ABIOS_NOT_LID_OWNER NTStatus = 0xC0000112 - STATUS_ABIOS_INVALID_COMMAND NTStatus = 0xC0000113 - STATUS_ABIOS_INVALID_LID NTStatus = 0xC0000114 - STATUS_ABIOS_SELECTOR_NOT_AVAILABLE NTStatus = 0xC0000115 - STATUS_ABIOS_INVALID_SELECTOR NTStatus = 0xC0000116 - STATUS_NO_LDT NTStatus = 0xC0000117 - STATUS_INVALID_LDT_SIZE NTStatus = 0xC0000118 - STATUS_INVALID_LDT_OFFSET NTStatus = 0xC0000119 - STATUS_INVALID_LDT_DESCRIPTOR NTStatus = 0xC000011A - STATUS_INVALID_IMAGE_NE_FORMAT NTStatus = 0xC000011B - STATUS_RXACT_INVALID_STATE NTStatus = 0xC000011C - STATUS_RXACT_COMMIT_FAILURE NTStatus = 0xC000011D - STATUS_MAPPED_FILE_SIZE_ZERO NTStatus = 0xC000011E - STATUS_TOO_MANY_OPENED_FILES NTStatus = 0xC000011F - STATUS_CANCELLED NTStatus = 0xC0000120 - STATUS_CANNOT_DELETE NTStatus = 0xC0000121 - STATUS_INVALID_COMPUTER_NAME NTStatus = 0xC0000122 - STATUS_FILE_DELETED NTStatus = 0xC0000123 - STATUS_SPECIAL_ACCOUNT NTStatus = 0xC0000124 - STATUS_SPECIAL_GROUP NTStatus = 0xC0000125 - STATUS_SPECIAL_USER NTStatus = 0xC0000126 - STATUS_MEMBERS_PRIMARY_GROUP NTStatus = 0xC0000127 - STATUS_FILE_CLOSED NTStatus = 0xC0000128 - STATUS_TOO_MANY_THREADS NTStatus = 0xC0000129 - STATUS_THREAD_NOT_IN_PROCESS NTStatus = 0xC000012A - STATUS_TOKEN_ALREADY_IN_USE NTStatus = 0xC000012B - STATUS_PAGEFILE_QUOTA_EXCEEDED NTStatus = 0xC000012C - STATUS_COMMITMENT_LIMIT NTStatus = 0xC000012D - STATUS_INVALID_IMAGE_LE_FORMAT NTStatus = 0xC000012E - STATUS_INVALID_IMAGE_NOT_MZ NTStatus = 0xC000012F - STATUS_INVALID_IMAGE_PROTECT NTStatus = 0xC0000130 - STATUS_INVALID_IMAGE_WIN_16 NTStatus = 0xC0000131 - STATUS_LOGON_SERVER_CONFLICT NTStatus = 0xC0000132 - STATUS_TIME_DIFFERENCE_AT_DC NTStatus = 0xC0000133 - STATUS_SYNCHRONIZATION_REQUIRED NTStatus = 0xC0000134 - STATUS_DLL_NOT_FOUND NTStatus = 0xC0000135 - STATUS_OPEN_FAILED NTStatus = 0xC0000136 - STATUS_IO_PRIVILEGE_FAILED NTStatus = 0xC0000137 - STATUS_ORDINAL_NOT_FOUND NTStatus = 0xC0000138 - STATUS_ENTRYPOINT_NOT_FOUND NTStatus = 0xC0000139 - STATUS_CONTROL_C_EXIT NTStatus = 0xC000013A - STATUS_LOCAL_DISCONNECT NTStatus = 0xC000013B - STATUS_REMOTE_DISCONNECT NTStatus = 0xC000013C - STATUS_REMOTE_RESOURCES NTStatus = 0xC000013D - STATUS_LINK_FAILED NTStatus = 0xC000013E - STATUS_LINK_TIMEOUT NTStatus = 0xC000013F - STATUS_INVALID_CONNECTION NTStatus = 0xC0000140 - STATUS_INVALID_ADDRESS NTStatus = 0xC0000141 - STATUS_DLL_INIT_FAILED NTStatus = 0xC0000142 - STATUS_MISSING_SYSTEMFILE NTStatus = 0xC0000143 - STATUS_UNHANDLED_EXCEPTION NTStatus = 0xC0000144 - STATUS_APP_INIT_FAILURE NTStatus = 0xC0000145 - STATUS_PAGEFILE_CREATE_FAILED NTStatus = 0xC0000146 - STATUS_NO_PAGEFILE NTStatus = 0xC0000147 - STATUS_INVALID_LEVEL NTStatus = 0xC0000148 - STATUS_WRONG_PASSWORD_CORE NTStatus = 0xC0000149 - STATUS_ILLEGAL_FLOAT_CONTEXT NTStatus = 0xC000014A - STATUS_PIPE_BROKEN NTStatus = 0xC000014B - STATUS_REGISTRY_CORRUPT NTStatus = 0xC000014C - STATUS_REGISTRY_IO_FAILED NTStatus = 0xC000014D - STATUS_NO_EVENT_PAIR NTStatus = 0xC000014E - STATUS_UNRECOGNIZED_VOLUME NTStatus = 0xC000014F - STATUS_SERIAL_NO_DEVICE_INITED NTStatus = 0xC0000150 - STATUS_NO_SUCH_ALIAS NTStatus = 0xC0000151 - STATUS_MEMBER_NOT_IN_ALIAS NTStatus = 0xC0000152 - STATUS_MEMBER_IN_ALIAS NTStatus = 0xC0000153 - STATUS_ALIAS_EXISTS NTStatus = 0xC0000154 - STATUS_LOGON_NOT_GRANTED NTStatus = 0xC0000155 - STATUS_TOO_MANY_SECRETS NTStatus = 0xC0000156 - STATUS_SECRET_TOO_LONG NTStatus = 0xC0000157 - STATUS_INTERNAL_DB_ERROR NTStatus = 0xC0000158 - STATUS_FULLSCREEN_MODE NTStatus = 0xC0000159 - STATUS_TOO_MANY_CONTEXT_IDS NTStatus = 0xC000015A - STATUS_LOGON_TYPE_NOT_GRANTED NTStatus = 0xC000015B - STATUS_NOT_REGISTRY_FILE NTStatus = 0xC000015C - STATUS_NT_CROSS_ENCRYPTION_REQUIRED NTStatus = 0xC000015D - STATUS_DOMAIN_CTRLR_CONFIG_ERROR NTStatus = 0xC000015E - STATUS_FT_MISSING_MEMBER NTStatus = 0xC000015F - STATUS_ILL_FORMED_SERVICE_ENTRY NTStatus = 0xC0000160 - STATUS_ILLEGAL_CHARACTER NTStatus = 0xC0000161 - STATUS_UNMAPPABLE_CHARACTER NTStatus = 0xC0000162 - STATUS_UNDEFINED_CHARACTER NTStatus = 0xC0000163 - STATUS_FLOPPY_VOLUME NTStatus = 0xC0000164 - STATUS_FLOPPY_ID_MARK_NOT_FOUND NTStatus = 0xC0000165 - STATUS_FLOPPY_WRONG_CYLINDER NTStatus = 0xC0000166 - STATUS_FLOPPY_UNKNOWN_ERROR NTStatus = 0xC0000167 - STATUS_FLOPPY_BAD_REGISTERS NTStatus = 0xC0000168 - STATUS_DISK_RECALIBRATE_FAILED NTStatus = 0xC0000169 - STATUS_DISK_OPERATION_FAILED NTStatus = 0xC000016A - STATUS_DISK_RESET_FAILED NTStatus = 0xC000016B - STATUS_SHARED_IRQ_BUSY NTStatus = 0xC000016C - STATUS_FT_ORPHANING NTStatus = 0xC000016D - STATUS_BIOS_FAILED_TO_CONNECT_INTERRUPT NTStatus = 0xC000016E - STATUS_PARTITION_FAILURE NTStatus = 0xC0000172 - STATUS_INVALID_BLOCK_LENGTH NTStatus = 0xC0000173 - STATUS_DEVICE_NOT_PARTITIONED NTStatus = 0xC0000174 - STATUS_UNABLE_TO_LOCK_MEDIA NTStatus = 0xC0000175 - STATUS_UNABLE_TO_UNLOAD_MEDIA NTStatus = 0xC0000176 - STATUS_EOM_OVERFLOW NTStatus = 0xC0000177 - STATUS_NO_MEDIA NTStatus = 0xC0000178 - STATUS_NO_SUCH_MEMBER NTStatus = 0xC000017A - STATUS_INVALID_MEMBER NTStatus = 0xC000017B - STATUS_KEY_DELETED NTStatus = 0xC000017C - STATUS_NO_LOG_SPACE NTStatus = 0xC000017D - STATUS_TOO_MANY_SIDS NTStatus = 0xC000017E - STATUS_LM_CROSS_ENCRYPTION_REQUIRED NTStatus = 0xC000017F - STATUS_KEY_HAS_CHILDREN NTStatus = 0xC0000180 - STATUS_CHILD_MUST_BE_VOLATILE NTStatus = 0xC0000181 - STATUS_DEVICE_CONFIGURATION_ERROR NTStatus = 0xC0000182 - STATUS_DRIVER_INTERNAL_ERROR NTStatus = 0xC0000183 - STATUS_INVALID_DEVICE_STATE NTStatus = 0xC0000184 - STATUS_IO_DEVICE_ERROR NTStatus = 0xC0000185 - STATUS_DEVICE_PROTOCOL_ERROR NTStatus = 0xC0000186 - STATUS_BACKUP_CONTROLLER NTStatus = 0xC0000187 - STATUS_LOG_FILE_FULL NTStatus = 0xC0000188 - STATUS_TOO_LATE NTStatus = 0xC0000189 - STATUS_NO_TRUST_LSA_SECRET NTStatus = 0xC000018A - STATUS_NO_TRUST_SAM_ACCOUNT NTStatus = 0xC000018B - STATUS_TRUSTED_DOMAIN_FAILURE NTStatus = 0xC000018C - STATUS_TRUSTED_RELATIONSHIP_FAILURE NTStatus = 0xC000018D - STATUS_EVENTLOG_FILE_CORRUPT NTStatus = 0xC000018E - STATUS_EVENTLOG_CANT_START NTStatus = 0xC000018F - STATUS_TRUST_FAILURE NTStatus = 0xC0000190 - STATUS_MUTANT_LIMIT_EXCEEDED NTStatus = 0xC0000191 - STATUS_NETLOGON_NOT_STARTED NTStatus = 0xC0000192 - STATUS_ACCOUNT_EXPIRED NTStatus = 0xC0000193 - STATUS_POSSIBLE_DEADLOCK NTStatus = 0xC0000194 - STATUS_NETWORK_CREDENTIAL_CONFLICT NTStatus = 0xC0000195 - STATUS_REMOTE_SESSION_LIMIT NTStatus = 0xC0000196 - STATUS_EVENTLOG_FILE_CHANGED NTStatus = 0xC0000197 - STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT NTStatus = 0xC0000198 - STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT NTStatus = 0xC0000199 - STATUS_NOLOGON_SERVER_TRUST_ACCOUNT NTStatus = 0xC000019A - STATUS_DOMAIN_TRUST_INCONSISTENT NTStatus = 0xC000019B - STATUS_FS_DRIVER_REQUIRED NTStatus = 0xC000019C - STATUS_IMAGE_ALREADY_LOADED_AS_DLL NTStatus = 0xC000019D - STATUS_INCOMPATIBLE_WITH_GLOBAL_SHORT_NAME_REGISTRY_SETTING NTStatus = 0xC000019E - STATUS_SHORT_NAMES_NOT_ENABLED_ON_VOLUME NTStatus = 0xC000019F - STATUS_SECURITY_STREAM_IS_INCONSISTENT NTStatus = 0xC00001A0 - STATUS_INVALID_LOCK_RANGE NTStatus = 0xC00001A1 - STATUS_INVALID_ACE_CONDITION NTStatus = 0xC00001A2 - STATUS_IMAGE_SUBSYSTEM_NOT_PRESENT NTStatus = 0xC00001A3 - STATUS_NOTIFICATION_GUID_ALREADY_DEFINED NTStatus = 0xC00001A4 - STATUS_INVALID_EXCEPTION_HANDLER NTStatus = 0xC00001A5 - STATUS_DUPLICATE_PRIVILEGES NTStatus = 0xC00001A6 - STATUS_NOT_ALLOWED_ON_SYSTEM_FILE NTStatus = 0xC00001A7 - STATUS_REPAIR_NEEDED NTStatus = 0xC00001A8 - STATUS_QUOTA_NOT_ENABLED NTStatus = 0xC00001A9 - STATUS_NO_APPLICATION_PACKAGE NTStatus = 0xC00001AA - STATUS_FILE_METADATA_OPTIMIZATION_IN_PROGRESS NTStatus = 0xC00001AB - STATUS_NOT_SAME_OBJECT NTStatus = 0xC00001AC - STATUS_FATAL_MEMORY_EXHAUSTION NTStatus = 0xC00001AD - STATUS_ERROR_PROCESS_NOT_IN_JOB NTStatus = 0xC00001AE - STATUS_CPU_SET_INVALID NTStatus = 0xC00001AF - STATUS_IO_DEVICE_INVALID_DATA NTStatus = 0xC00001B0 - STATUS_IO_UNALIGNED_WRITE NTStatus = 0xC00001B1 - STATUS_NETWORK_OPEN_RESTRICTION NTStatus = 0xC0000201 - STATUS_NO_USER_SESSION_KEY NTStatus = 0xC0000202 - STATUS_USER_SESSION_DELETED NTStatus = 0xC0000203 - STATUS_RESOURCE_LANG_NOT_FOUND NTStatus = 0xC0000204 - STATUS_INSUFF_SERVER_RESOURCES NTStatus = 0xC0000205 - STATUS_INVALID_BUFFER_SIZE NTStatus = 0xC0000206 - STATUS_INVALID_ADDRESS_COMPONENT NTStatus = 0xC0000207 - STATUS_INVALID_ADDRESS_WILDCARD NTStatus = 0xC0000208 - STATUS_TOO_MANY_ADDRESSES NTStatus = 0xC0000209 - STATUS_ADDRESS_ALREADY_EXISTS NTStatus = 0xC000020A - STATUS_ADDRESS_CLOSED NTStatus = 0xC000020B - STATUS_CONNECTION_DISCONNECTED NTStatus = 0xC000020C - STATUS_CONNECTION_RESET NTStatus = 0xC000020D - STATUS_TOO_MANY_NODES NTStatus = 0xC000020E - STATUS_TRANSACTION_ABORTED NTStatus = 0xC000020F - STATUS_TRANSACTION_TIMED_OUT NTStatus = 0xC0000210 - STATUS_TRANSACTION_NO_RELEASE NTStatus = 0xC0000211 - STATUS_TRANSACTION_NO_MATCH NTStatus = 0xC0000212 - STATUS_TRANSACTION_RESPONDED NTStatus = 0xC0000213 - STATUS_TRANSACTION_INVALID_ID NTStatus = 0xC0000214 - STATUS_TRANSACTION_INVALID_TYPE NTStatus = 0xC0000215 - STATUS_NOT_SERVER_SESSION NTStatus = 0xC0000216 - STATUS_NOT_CLIENT_SESSION NTStatus = 0xC0000217 - STATUS_CANNOT_LOAD_REGISTRY_FILE NTStatus = 0xC0000218 - STATUS_DEBUG_ATTACH_FAILED NTStatus = 0xC0000219 - STATUS_SYSTEM_PROCESS_TERMINATED NTStatus = 0xC000021A - STATUS_DATA_NOT_ACCEPTED NTStatus = 0xC000021B - STATUS_NO_BROWSER_SERVERS_FOUND NTStatus = 0xC000021C - STATUS_VDM_HARD_ERROR NTStatus = 0xC000021D - STATUS_DRIVER_CANCEL_TIMEOUT NTStatus = 0xC000021E - STATUS_REPLY_MESSAGE_MISMATCH NTStatus = 0xC000021F - STATUS_MAPPED_ALIGNMENT NTStatus = 0xC0000220 - STATUS_IMAGE_CHECKSUM_MISMATCH NTStatus = 0xC0000221 - STATUS_LOST_WRITEBEHIND_DATA NTStatus = 0xC0000222 - STATUS_CLIENT_SERVER_PARAMETERS_INVALID NTStatus = 0xC0000223 - STATUS_PASSWORD_MUST_CHANGE NTStatus = 0xC0000224 - STATUS_NOT_FOUND NTStatus = 0xC0000225 - STATUS_NOT_TINY_STREAM NTStatus = 0xC0000226 - STATUS_RECOVERY_FAILURE NTStatus = 0xC0000227 - STATUS_STACK_OVERFLOW_READ NTStatus = 0xC0000228 - STATUS_FAIL_CHECK NTStatus = 0xC0000229 - STATUS_DUPLICATE_OBJECTID NTStatus = 0xC000022A - STATUS_OBJECTID_EXISTS NTStatus = 0xC000022B - STATUS_CONVERT_TO_LARGE NTStatus = 0xC000022C - STATUS_RETRY NTStatus = 0xC000022D - STATUS_FOUND_OUT_OF_SCOPE NTStatus = 0xC000022E - STATUS_ALLOCATE_BUCKET NTStatus = 0xC000022F - STATUS_PROPSET_NOT_FOUND NTStatus = 0xC0000230 - STATUS_MARSHALL_OVERFLOW NTStatus = 0xC0000231 - STATUS_INVALID_VARIANT NTStatus = 0xC0000232 - STATUS_DOMAIN_CONTROLLER_NOT_FOUND NTStatus = 0xC0000233 - STATUS_ACCOUNT_LOCKED_OUT NTStatus = 0xC0000234 - STATUS_HANDLE_NOT_CLOSABLE NTStatus = 0xC0000235 - STATUS_CONNECTION_REFUSED NTStatus = 0xC0000236 - STATUS_GRACEFUL_DISCONNECT NTStatus = 0xC0000237 - STATUS_ADDRESS_ALREADY_ASSOCIATED NTStatus = 0xC0000238 - STATUS_ADDRESS_NOT_ASSOCIATED NTStatus = 0xC0000239 - STATUS_CONNECTION_INVALID NTStatus = 0xC000023A - STATUS_CONNECTION_ACTIVE NTStatus = 0xC000023B - STATUS_NETWORK_UNREACHABLE NTStatus = 0xC000023C - STATUS_HOST_UNREACHABLE NTStatus = 0xC000023D - STATUS_PROTOCOL_UNREACHABLE NTStatus = 0xC000023E - STATUS_PORT_UNREACHABLE NTStatus = 0xC000023F - STATUS_REQUEST_ABORTED NTStatus = 0xC0000240 - STATUS_CONNECTION_ABORTED NTStatus = 0xC0000241 - STATUS_BAD_COMPRESSION_BUFFER NTStatus = 0xC0000242 - STATUS_USER_MAPPED_FILE NTStatus = 0xC0000243 - STATUS_AUDIT_FAILED NTStatus = 0xC0000244 - STATUS_TIMER_RESOLUTION_NOT_SET NTStatus = 0xC0000245 - STATUS_CONNECTION_COUNT_LIMIT NTStatus = 0xC0000246 - STATUS_LOGIN_TIME_RESTRICTION NTStatus = 0xC0000247 - STATUS_LOGIN_WKSTA_RESTRICTION NTStatus = 0xC0000248 - STATUS_IMAGE_MP_UP_MISMATCH NTStatus = 0xC0000249 - STATUS_INSUFFICIENT_LOGON_INFO NTStatus = 0xC0000250 - STATUS_BAD_DLL_ENTRYPOINT NTStatus = 0xC0000251 - STATUS_BAD_SERVICE_ENTRYPOINT NTStatus = 0xC0000252 - STATUS_LPC_REPLY_LOST NTStatus = 0xC0000253 - STATUS_IP_ADDRESS_CONFLICT1 NTStatus = 0xC0000254 - STATUS_IP_ADDRESS_CONFLICT2 NTStatus = 0xC0000255 - STATUS_REGISTRY_QUOTA_LIMIT NTStatus = 0xC0000256 - STATUS_PATH_NOT_COVERED NTStatus = 0xC0000257 - STATUS_NO_CALLBACK_ACTIVE NTStatus = 0xC0000258 - STATUS_LICENSE_QUOTA_EXCEEDED NTStatus = 0xC0000259 - STATUS_PWD_TOO_SHORT NTStatus = 0xC000025A - STATUS_PWD_TOO_RECENT NTStatus = 0xC000025B - STATUS_PWD_HISTORY_CONFLICT NTStatus = 0xC000025C - STATUS_PLUGPLAY_NO_DEVICE NTStatus = 0xC000025E - STATUS_UNSUPPORTED_COMPRESSION NTStatus = 0xC000025F - STATUS_INVALID_HW_PROFILE NTStatus = 0xC0000260 - STATUS_INVALID_PLUGPLAY_DEVICE_PATH NTStatus = 0xC0000261 - STATUS_DRIVER_ORDINAL_NOT_FOUND NTStatus = 0xC0000262 - STATUS_DRIVER_ENTRYPOINT_NOT_FOUND NTStatus = 0xC0000263 - STATUS_RESOURCE_NOT_OWNED NTStatus = 0xC0000264 - STATUS_TOO_MANY_LINKS NTStatus = 0xC0000265 - STATUS_QUOTA_LIST_INCONSISTENT NTStatus = 0xC0000266 - STATUS_FILE_IS_OFFLINE NTStatus = 0xC0000267 - STATUS_EVALUATION_EXPIRATION NTStatus = 0xC0000268 - STATUS_ILLEGAL_DLL_RELOCATION NTStatus = 0xC0000269 - STATUS_LICENSE_VIOLATION NTStatus = 0xC000026A - STATUS_DLL_INIT_FAILED_LOGOFF NTStatus = 0xC000026B - STATUS_DRIVER_UNABLE_TO_LOAD NTStatus = 0xC000026C - STATUS_DFS_UNAVAILABLE NTStatus = 0xC000026D - STATUS_VOLUME_DISMOUNTED NTStatus = 0xC000026E - STATUS_WX86_INTERNAL_ERROR NTStatus = 0xC000026F - STATUS_WX86_FLOAT_STACK_CHECK NTStatus = 0xC0000270 - STATUS_VALIDATE_CONTINUE NTStatus = 0xC0000271 - STATUS_NO_MATCH NTStatus = 0xC0000272 - STATUS_NO_MORE_MATCHES NTStatus = 0xC0000273 - STATUS_NOT_A_REPARSE_POINT NTStatus = 0xC0000275 - STATUS_IO_REPARSE_TAG_INVALID NTStatus = 0xC0000276 - STATUS_IO_REPARSE_TAG_MISMATCH NTStatus = 0xC0000277 - STATUS_IO_REPARSE_DATA_INVALID NTStatus = 0xC0000278 - STATUS_IO_REPARSE_TAG_NOT_HANDLED NTStatus = 0xC0000279 - STATUS_PWD_TOO_LONG NTStatus = 0xC000027A - STATUS_STOWED_EXCEPTION NTStatus = 0xC000027B - STATUS_CONTEXT_STOWED_EXCEPTION NTStatus = 0xC000027C - STATUS_REPARSE_POINT_NOT_RESOLVED NTStatus = 0xC0000280 - STATUS_DIRECTORY_IS_A_REPARSE_POINT NTStatus = 0xC0000281 - STATUS_RANGE_LIST_CONFLICT NTStatus = 0xC0000282 - STATUS_SOURCE_ELEMENT_EMPTY NTStatus = 0xC0000283 - STATUS_DESTINATION_ELEMENT_FULL NTStatus = 0xC0000284 - STATUS_ILLEGAL_ELEMENT_ADDRESS NTStatus = 0xC0000285 - STATUS_MAGAZINE_NOT_PRESENT NTStatus = 0xC0000286 - STATUS_REINITIALIZATION_NEEDED NTStatus = 0xC0000287 - STATUS_DEVICE_REQUIRES_CLEANING NTStatus = 0x80000288 - STATUS_DEVICE_DOOR_OPEN NTStatus = 0x80000289 - STATUS_ENCRYPTION_FAILED NTStatus = 0xC000028A - STATUS_DECRYPTION_FAILED NTStatus = 0xC000028B - STATUS_RANGE_NOT_FOUND NTStatus = 0xC000028C - STATUS_NO_RECOVERY_POLICY NTStatus = 0xC000028D - STATUS_NO_EFS NTStatus = 0xC000028E - STATUS_WRONG_EFS NTStatus = 0xC000028F - STATUS_NO_USER_KEYS NTStatus = 0xC0000290 - STATUS_FILE_NOT_ENCRYPTED NTStatus = 0xC0000291 - STATUS_NOT_EXPORT_FORMAT NTStatus = 0xC0000292 - STATUS_FILE_ENCRYPTED NTStatus = 0xC0000293 - STATUS_WAKE_SYSTEM NTStatus = 0x40000294 - STATUS_WMI_GUID_NOT_FOUND NTStatus = 0xC0000295 - STATUS_WMI_INSTANCE_NOT_FOUND NTStatus = 0xC0000296 - STATUS_WMI_ITEMID_NOT_FOUND NTStatus = 0xC0000297 - STATUS_WMI_TRY_AGAIN NTStatus = 0xC0000298 - STATUS_SHARED_POLICY NTStatus = 0xC0000299 - STATUS_POLICY_OBJECT_NOT_FOUND NTStatus = 0xC000029A - STATUS_POLICY_ONLY_IN_DS NTStatus = 0xC000029B - STATUS_VOLUME_NOT_UPGRADED NTStatus = 0xC000029C - STATUS_REMOTE_STORAGE_NOT_ACTIVE NTStatus = 0xC000029D - STATUS_REMOTE_STORAGE_MEDIA_ERROR NTStatus = 0xC000029E - STATUS_NO_TRACKING_SERVICE NTStatus = 0xC000029F - STATUS_SERVER_SID_MISMATCH NTStatus = 0xC00002A0 - STATUS_DS_NO_ATTRIBUTE_OR_VALUE NTStatus = 0xC00002A1 - STATUS_DS_INVALID_ATTRIBUTE_SYNTAX NTStatus = 0xC00002A2 - STATUS_DS_ATTRIBUTE_TYPE_UNDEFINED NTStatus = 0xC00002A3 - STATUS_DS_ATTRIBUTE_OR_VALUE_EXISTS NTStatus = 0xC00002A4 - STATUS_DS_BUSY NTStatus = 0xC00002A5 - STATUS_DS_UNAVAILABLE NTStatus = 0xC00002A6 - STATUS_DS_NO_RIDS_ALLOCATED NTStatus = 0xC00002A7 - STATUS_DS_NO_MORE_RIDS NTStatus = 0xC00002A8 - STATUS_DS_INCORRECT_ROLE_OWNER NTStatus = 0xC00002A9 - STATUS_DS_RIDMGR_INIT_ERROR NTStatus = 0xC00002AA - STATUS_DS_OBJ_CLASS_VIOLATION NTStatus = 0xC00002AB - STATUS_DS_CANT_ON_NON_LEAF NTStatus = 0xC00002AC - STATUS_DS_CANT_ON_RDN NTStatus = 0xC00002AD - STATUS_DS_CANT_MOD_OBJ_CLASS NTStatus = 0xC00002AE - STATUS_DS_CROSS_DOM_MOVE_FAILED NTStatus = 0xC00002AF - STATUS_DS_GC_NOT_AVAILABLE NTStatus = 0xC00002B0 - STATUS_DIRECTORY_SERVICE_REQUIRED NTStatus = 0xC00002B1 - STATUS_REPARSE_ATTRIBUTE_CONFLICT NTStatus = 0xC00002B2 - STATUS_CANT_ENABLE_DENY_ONLY NTStatus = 0xC00002B3 - STATUS_FLOAT_MULTIPLE_FAULTS NTStatus = 0xC00002B4 - STATUS_FLOAT_MULTIPLE_TRAPS NTStatus = 0xC00002B5 - STATUS_DEVICE_REMOVED NTStatus = 0xC00002B6 - STATUS_JOURNAL_DELETE_IN_PROGRESS NTStatus = 0xC00002B7 - STATUS_JOURNAL_NOT_ACTIVE NTStatus = 0xC00002B8 - STATUS_NOINTERFACE NTStatus = 0xC00002B9 - STATUS_DS_RIDMGR_DISABLED NTStatus = 0xC00002BA - STATUS_DS_ADMIN_LIMIT_EXCEEDED NTStatus = 0xC00002C1 - STATUS_DRIVER_FAILED_SLEEP NTStatus = 0xC00002C2 - STATUS_MUTUAL_AUTHENTICATION_FAILED NTStatus = 0xC00002C3 - STATUS_CORRUPT_SYSTEM_FILE NTStatus = 0xC00002C4 - STATUS_DATATYPE_MISALIGNMENT_ERROR NTStatus = 0xC00002C5 - STATUS_WMI_READ_ONLY NTStatus = 0xC00002C6 - STATUS_WMI_SET_FAILURE NTStatus = 0xC00002C7 - STATUS_COMMITMENT_MINIMUM NTStatus = 0xC00002C8 - STATUS_REG_NAT_CONSUMPTION NTStatus = 0xC00002C9 - STATUS_TRANSPORT_FULL NTStatus = 0xC00002CA - STATUS_DS_SAM_INIT_FAILURE NTStatus = 0xC00002CB - STATUS_ONLY_IF_CONNECTED NTStatus = 0xC00002CC - STATUS_DS_SENSITIVE_GROUP_VIOLATION NTStatus = 0xC00002CD - STATUS_PNP_RESTART_ENUMERATION NTStatus = 0xC00002CE - STATUS_JOURNAL_ENTRY_DELETED NTStatus = 0xC00002CF - STATUS_DS_CANT_MOD_PRIMARYGROUPID NTStatus = 0xC00002D0 - STATUS_SYSTEM_IMAGE_BAD_SIGNATURE NTStatus = 0xC00002D1 - STATUS_PNP_REBOOT_REQUIRED NTStatus = 0xC00002D2 - STATUS_POWER_STATE_INVALID NTStatus = 0xC00002D3 - STATUS_DS_INVALID_GROUP_TYPE NTStatus = 0xC00002D4 - STATUS_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN NTStatus = 0xC00002D5 - STATUS_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN NTStatus = 0xC00002D6 - STATUS_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER NTStatus = 0xC00002D7 - STATUS_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER NTStatus = 0xC00002D8 - STATUS_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER NTStatus = 0xC00002D9 - STATUS_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER NTStatus = 0xC00002DA - STATUS_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER NTStatus = 0xC00002DB - STATUS_DS_HAVE_PRIMARY_MEMBERS NTStatus = 0xC00002DC - STATUS_WMI_NOT_SUPPORTED NTStatus = 0xC00002DD - STATUS_INSUFFICIENT_POWER NTStatus = 0xC00002DE - STATUS_SAM_NEED_BOOTKEY_PASSWORD NTStatus = 0xC00002DF - STATUS_SAM_NEED_BOOTKEY_FLOPPY NTStatus = 0xC00002E0 - STATUS_DS_CANT_START NTStatus = 0xC00002E1 - STATUS_DS_INIT_FAILURE NTStatus = 0xC00002E2 - STATUS_SAM_INIT_FAILURE NTStatus = 0xC00002E3 - STATUS_DS_GC_REQUIRED NTStatus = 0xC00002E4 - STATUS_DS_LOCAL_MEMBER_OF_LOCAL_ONLY NTStatus = 0xC00002E5 - STATUS_DS_NO_FPO_IN_UNIVERSAL_GROUPS NTStatus = 0xC00002E6 - STATUS_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED NTStatus = 0xC00002E7 - STATUS_MULTIPLE_FAULT_VIOLATION NTStatus = 0xC00002E8 - STATUS_CURRENT_DOMAIN_NOT_ALLOWED NTStatus = 0xC00002E9 - STATUS_CANNOT_MAKE NTStatus = 0xC00002EA - STATUS_SYSTEM_SHUTDOWN NTStatus = 0xC00002EB - STATUS_DS_INIT_FAILURE_CONSOLE NTStatus = 0xC00002EC - STATUS_DS_SAM_INIT_FAILURE_CONSOLE NTStatus = 0xC00002ED - STATUS_UNFINISHED_CONTEXT_DELETED NTStatus = 0xC00002EE - STATUS_NO_TGT_REPLY NTStatus = 0xC00002EF - STATUS_OBJECTID_NOT_FOUND NTStatus = 0xC00002F0 - STATUS_NO_IP_ADDRESSES NTStatus = 0xC00002F1 - STATUS_WRONG_CREDENTIAL_HANDLE NTStatus = 0xC00002F2 - STATUS_CRYPTO_SYSTEM_INVALID NTStatus = 0xC00002F3 - STATUS_MAX_REFERRALS_EXCEEDED NTStatus = 0xC00002F4 - STATUS_MUST_BE_KDC NTStatus = 0xC00002F5 - STATUS_STRONG_CRYPTO_NOT_SUPPORTED NTStatus = 0xC00002F6 - STATUS_TOO_MANY_PRINCIPALS NTStatus = 0xC00002F7 - STATUS_NO_PA_DATA NTStatus = 0xC00002F8 - STATUS_PKINIT_NAME_MISMATCH NTStatus = 0xC00002F9 - STATUS_SMARTCARD_LOGON_REQUIRED NTStatus = 0xC00002FA - STATUS_KDC_INVALID_REQUEST NTStatus = 0xC00002FB - STATUS_KDC_UNABLE_TO_REFER NTStatus = 0xC00002FC - STATUS_KDC_UNKNOWN_ETYPE NTStatus = 0xC00002FD - STATUS_SHUTDOWN_IN_PROGRESS NTStatus = 0xC00002FE - STATUS_SERVER_SHUTDOWN_IN_PROGRESS NTStatus = 0xC00002FF - STATUS_NOT_SUPPORTED_ON_SBS NTStatus = 0xC0000300 - STATUS_WMI_GUID_DISCONNECTED NTStatus = 0xC0000301 - STATUS_WMI_ALREADY_DISABLED NTStatus = 0xC0000302 - STATUS_WMI_ALREADY_ENABLED NTStatus = 0xC0000303 - STATUS_MFT_TOO_FRAGMENTED NTStatus = 0xC0000304 - STATUS_COPY_PROTECTION_FAILURE NTStatus = 0xC0000305 - STATUS_CSS_AUTHENTICATION_FAILURE NTStatus = 0xC0000306 - STATUS_CSS_KEY_NOT_PRESENT NTStatus = 0xC0000307 - STATUS_CSS_KEY_NOT_ESTABLISHED NTStatus = 0xC0000308 - STATUS_CSS_SCRAMBLED_SECTOR NTStatus = 0xC0000309 - STATUS_CSS_REGION_MISMATCH NTStatus = 0xC000030A - STATUS_CSS_RESETS_EXHAUSTED NTStatus = 0xC000030B - STATUS_PASSWORD_CHANGE_REQUIRED NTStatus = 0xC000030C - STATUS_LOST_MODE_LOGON_RESTRICTION NTStatus = 0xC000030D - STATUS_PKINIT_FAILURE NTStatus = 0xC0000320 - STATUS_SMARTCARD_SUBSYSTEM_FAILURE NTStatus = 0xC0000321 - STATUS_NO_KERB_KEY NTStatus = 0xC0000322 - STATUS_HOST_DOWN NTStatus = 0xC0000350 - STATUS_UNSUPPORTED_PREAUTH NTStatus = 0xC0000351 - STATUS_EFS_ALG_BLOB_TOO_BIG NTStatus = 0xC0000352 - STATUS_PORT_NOT_SET NTStatus = 0xC0000353 - STATUS_DEBUGGER_INACTIVE NTStatus = 0xC0000354 - STATUS_DS_VERSION_CHECK_FAILURE NTStatus = 0xC0000355 - STATUS_AUDITING_DISABLED NTStatus = 0xC0000356 - STATUS_PRENT4_MACHINE_ACCOUNT NTStatus = 0xC0000357 - STATUS_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER NTStatus = 0xC0000358 - STATUS_INVALID_IMAGE_WIN_32 NTStatus = 0xC0000359 - STATUS_INVALID_IMAGE_WIN_64 NTStatus = 0xC000035A - STATUS_BAD_BINDINGS NTStatus = 0xC000035B - STATUS_NETWORK_SESSION_EXPIRED NTStatus = 0xC000035C - STATUS_APPHELP_BLOCK NTStatus = 0xC000035D - STATUS_ALL_SIDS_FILTERED NTStatus = 0xC000035E - STATUS_NOT_SAFE_MODE_DRIVER NTStatus = 0xC000035F - STATUS_ACCESS_DISABLED_BY_POLICY_DEFAULT NTStatus = 0xC0000361 - STATUS_ACCESS_DISABLED_BY_POLICY_PATH NTStatus = 0xC0000362 - STATUS_ACCESS_DISABLED_BY_POLICY_PUBLISHER NTStatus = 0xC0000363 - STATUS_ACCESS_DISABLED_BY_POLICY_OTHER NTStatus = 0xC0000364 - STATUS_FAILED_DRIVER_ENTRY NTStatus = 0xC0000365 - STATUS_DEVICE_ENUMERATION_ERROR NTStatus = 0xC0000366 - STATUS_MOUNT_POINT_NOT_RESOLVED NTStatus = 0xC0000368 - STATUS_INVALID_DEVICE_OBJECT_PARAMETER NTStatus = 0xC0000369 - STATUS_MCA_OCCURED NTStatus = 0xC000036A - STATUS_DRIVER_BLOCKED_CRITICAL NTStatus = 0xC000036B - STATUS_DRIVER_BLOCKED NTStatus = 0xC000036C - STATUS_DRIVER_DATABASE_ERROR NTStatus = 0xC000036D - STATUS_SYSTEM_HIVE_TOO_LARGE NTStatus = 0xC000036E - STATUS_INVALID_IMPORT_OF_NON_DLL NTStatus = 0xC000036F - STATUS_DS_SHUTTING_DOWN NTStatus = 0x40000370 - STATUS_NO_SECRETS NTStatus = 0xC0000371 - STATUS_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY NTStatus = 0xC0000372 - STATUS_FAILED_STACK_SWITCH NTStatus = 0xC0000373 - STATUS_HEAP_CORRUPTION NTStatus = 0xC0000374 - STATUS_SMARTCARD_WRONG_PIN NTStatus = 0xC0000380 - STATUS_SMARTCARD_CARD_BLOCKED NTStatus = 0xC0000381 - STATUS_SMARTCARD_CARD_NOT_AUTHENTICATED NTStatus = 0xC0000382 - STATUS_SMARTCARD_NO_CARD NTStatus = 0xC0000383 - STATUS_SMARTCARD_NO_KEY_CONTAINER NTStatus = 0xC0000384 - STATUS_SMARTCARD_NO_CERTIFICATE NTStatus = 0xC0000385 - STATUS_SMARTCARD_NO_KEYSET NTStatus = 0xC0000386 - STATUS_SMARTCARD_IO_ERROR NTStatus = 0xC0000387 - STATUS_DOWNGRADE_DETECTED NTStatus = 0xC0000388 - STATUS_SMARTCARD_CERT_REVOKED NTStatus = 0xC0000389 - STATUS_ISSUING_CA_UNTRUSTED NTStatus = 0xC000038A - STATUS_REVOCATION_OFFLINE_C NTStatus = 0xC000038B - STATUS_PKINIT_CLIENT_FAILURE NTStatus = 0xC000038C - STATUS_SMARTCARD_CERT_EXPIRED NTStatus = 0xC000038D - STATUS_DRIVER_FAILED_PRIOR_UNLOAD NTStatus = 0xC000038E - STATUS_SMARTCARD_SILENT_CONTEXT NTStatus = 0xC000038F - STATUS_PER_USER_TRUST_QUOTA_EXCEEDED NTStatus = 0xC0000401 - STATUS_ALL_USER_TRUST_QUOTA_EXCEEDED NTStatus = 0xC0000402 - STATUS_USER_DELETE_TRUST_QUOTA_EXCEEDED NTStatus = 0xC0000403 - STATUS_DS_NAME_NOT_UNIQUE NTStatus = 0xC0000404 - STATUS_DS_DUPLICATE_ID_FOUND NTStatus = 0xC0000405 - STATUS_DS_GROUP_CONVERSION_ERROR NTStatus = 0xC0000406 - STATUS_VOLSNAP_PREPARE_HIBERNATE NTStatus = 0xC0000407 - STATUS_USER2USER_REQUIRED NTStatus = 0xC0000408 - STATUS_STACK_BUFFER_OVERRUN NTStatus = 0xC0000409 - STATUS_NO_S4U_PROT_SUPPORT NTStatus = 0xC000040A - STATUS_CROSSREALM_DELEGATION_FAILURE NTStatus = 0xC000040B - STATUS_REVOCATION_OFFLINE_KDC NTStatus = 0xC000040C - STATUS_ISSUING_CA_UNTRUSTED_KDC NTStatus = 0xC000040D - STATUS_KDC_CERT_EXPIRED NTStatus = 0xC000040E - STATUS_KDC_CERT_REVOKED NTStatus = 0xC000040F - STATUS_PARAMETER_QUOTA_EXCEEDED NTStatus = 0xC0000410 - STATUS_HIBERNATION_FAILURE NTStatus = 0xC0000411 - STATUS_DELAY_LOAD_FAILED NTStatus = 0xC0000412 - STATUS_AUTHENTICATION_FIREWALL_FAILED NTStatus = 0xC0000413 - STATUS_VDM_DISALLOWED NTStatus = 0xC0000414 - STATUS_HUNG_DISPLAY_DRIVER_THREAD NTStatus = 0xC0000415 - STATUS_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE NTStatus = 0xC0000416 - STATUS_INVALID_CRUNTIME_PARAMETER NTStatus = 0xC0000417 - STATUS_NTLM_BLOCKED NTStatus = 0xC0000418 - STATUS_DS_SRC_SID_EXISTS_IN_FOREST NTStatus = 0xC0000419 - STATUS_DS_DOMAIN_NAME_EXISTS_IN_FOREST NTStatus = 0xC000041A - STATUS_DS_FLAT_NAME_EXISTS_IN_FOREST NTStatus = 0xC000041B - STATUS_INVALID_USER_PRINCIPAL_NAME NTStatus = 0xC000041C - STATUS_FATAL_USER_CALLBACK_EXCEPTION NTStatus = 0xC000041D - STATUS_ASSERTION_FAILURE NTStatus = 0xC0000420 - STATUS_VERIFIER_STOP NTStatus = 0xC0000421 - STATUS_CALLBACK_POP_STACK NTStatus = 0xC0000423 - STATUS_INCOMPATIBLE_DRIVER_BLOCKED NTStatus = 0xC0000424 - STATUS_HIVE_UNLOADED NTStatus = 0xC0000425 - STATUS_COMPRESSION_DISABLED NTStatus = 0xC0000426 - STATUS_FILE_SYSTEM_LIMITATION NTStatus = 0xC0000427 - STATUS_INVALID_IMAGE_HASH NTStatus = 0xC0000428 - STATUS_NOT_CAPABLE NTStatus = 0xC0000429 - STATUS_REQUEST_OUT_OF_SEQUENCE NTStatus = 0xC000042A - STATUS_IMPLEMENTATION_LIMIT NTStatus = 0xC000042B - STATUS_ELEVATION_REQUIRED NTStatus = 0xC000042C - STATUS_NO_SECURITY_CONTEXT NTStatus = 0xC000042D - STATUS_PKU2U_CERT_FAILURE NTStatus = 0xC000042F - STATUS_BEYOND_VDL NTStatus = 0xC0000432 - STATUS_ENCOUNTERED_WRITE_IN_PROGRESS NTStatus = 0xC0000433 - STATUS_PTE_CHANGED NTStatus = 0xC0000434 - STATUS_PURGE_FAILED NTStatus = 0xC0000435 - STATUS_CRED_REQUIRES_CONFIRMATION NTStatus = 0xC0000440 - STATUS_CS_ENCRYPTION_INVALID_SERVER_RESPONSE NTStatus = 0xC0000441 - STATUS_CS_ENCRYPTION_UNSUPPORTED_SERVER NTStatus = 0xC0000442 - STATUS_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE NTStatus = 0xC0000443 - STATUS_CS_ENCRYPTION_NEW_ENCRYPTED_FILE NTStatus = 0xC0000444 - STATUS_CS_ENCRYPTION_FILE_NOT_CSE NTStatus = 0xC0000445 - STATUS_INVALID_LABEL NTStatus = 0xC0000446 - STATUS_DRIVER_PROCESS_TERMINATED NTStatus = 0xC0000450 - STATUS_AMBIGUOUS_SYSTEM_DEVICE NTStatus = 0xC0000451 - STATUS_SYSTEM_DEVICE_NOT_FOUND NTStatus = 0xC0000452 - STATUS_RESTART_BOOT_APPLICATION NTStatus = 0xC0000453 - STATUS_INSUFFICIENT_NVRAM_RESOURCES NTStatus = 0xC0000454 - STATUS_INVALID_SESSION NTStatus = 0xC0000455 - STATUS_THREAD_ALREADY_IN_SESSION NTStatus = 0xC0000456 - STATUS_THREAD_NOT_IN_SESSION NTStatus = 0xC0000457 - STATUS_INVALID_WEIGHT NTStatus = 0xC0000458 - STATUS_REQUEST_PAUSED NTStatus = 0xC0000459 - STATUS_NO_RANGES_PROCESSED NTStatus = 0xC0000460 - STATUS_DISK_RESOURCES_EXHAUSTED NTStatus = 0xC0000461 - STATUS_NEEDS_REMEDIATION NTStatus = 0xC0000462 - STATUS_DEVICE_FEATURE_NOT_SUPPORTED NTStatus = 0xC0000463 - STATUS_DEVICE_UNREACHABLE NTStatus = 0xC0000464 - STATUS_INVALID_TOKEN NTStatus = 0xC0000465 - STATUS_SERVER_UNAVAILABLE NTStatus = 0xC0000466 - STATUS_FILE_NOT_AVAILABLE NTStatus = 0xC0000467 - STATUS_DEVICE_INSUFFICIENT_RESOURCES NTStatus = 0xC0000468 - STATUS_PACKAGE_UPDATING NTStatus = 0xC0000469 - STATUS_NOT_READ_FROM_COPY NTStatus = 0xC000046A - STATUS_FT_WRITE_FAILURE NTStatus = 0xC000046B - STATUS_FT_DI_SCAN_REQUIRED NTStatus = 0xC000046C - STATUS_OBJECT_NOT_EXTERNALLY_BACKED NTStatus = 0xC000046D - STATUS_EXTERNAL_BACKING_PROVIDER_UNKNOWN NTStatus = 0xC000046E - STATUS_COMPRESSION_NOT_BENEFICIAL NTStatus = 0xC000046F - STATUS_DATA_CHECKSUM_ERROR NTStatus = 0xC0000470 - STATUS_INTERMIXED_KERNEL_EA_OPERATION NTStatus = 0xC0000471 - STATUS_TRIM_READ_ZERO_NOT_SUPPORTED NTStatus = 0xC0000472 - STATUS_TOO_MANY_SEGMENT_DESCRIPTORS NTStatus = 0xC0000473 - STATUS_INVALID_OFFSET_ALIGNMENT NTStatus = 0xC0000474 - STATUS_INVALID_FIELD_IN_PARAMETER_LIST NTStatus = 0xC0000475 - STATUS_OPERATION_IN_PROGRESS NTStatus = 0xC0000476 - STATUS_INVALID_INITIATOR_TARGET_PATH NTStatus = 0xC0000477 - STATUS_SCRUB_DATA_DISABLED NTStatus = 0xC0000478 - STATUS_NOT_REDUNDANT_STORAGE NTStatus = 0xC0000479 - STATUS_RESIDENT_FILE_NOT_SUPPORTED NTStatus = 0xC000047A - STATUS_COMPRESSED_FILE_NOT_SUPPORTED NTStatus = 0xC000047B - STATUS_DIRECTORY_NOT_SUPPORTED NTStatus = 0xC000047C - STATUS_IO_OPERATION_TIMEOUT NTStatus = 0xC000047D - STATUS_SYSTEM_NEEDS_REMEDIATION NTStatus = 0xC000047E - STATUS_APPX_INTEGRITY_FAILURE_CLR_NGEN NTStatus = 0xC000047F - STATUS_SHARE_UNAVAILABLE NTStatus = 0xC0000480 - STATUS_APISET_NOT_HOSTED NTStatus = 0xC0000481 - STATUS_APISET_NOT_PRESENT NTStatus = 0xC0000482 - STATUS_DEVICE_HARDWARE_ERROR NTStatus = 0xC0000483 - STATUS_FIRMWARE_SLOT_INVALID NTStatus = 0xC0000484 - STATUS_FIRMWARE_IMAGE_INVALID NTStatus = 0xC0000485 - STATUS_STORAGE_TOPOLOGY_ID_MISMATCH NTStatus = 0xC0000486 - STATUS_WIM_NOT_BOOTABLE NTStatus = 0xC0000487 - STATUS_BLOCKED_BY_PARENTAL_CONTROLS NTStatus = 0xC0000488 - STATUS_NEEDS_REGISTRATION NTStatus = 0xC0000489 - STATUS_QUOTA_ACTIVITY NTStatus = 0xC000048A - STATUS_CALLBACK_INVOKE_INLINE NTStatus = 0xC000048B - STATUS_BLOCK_TOO_MANY_REFERENCES NTStatus = 0xC000048C - STATUS_MARKED_TO_DISALLOW_WRITES NTStatus = 0xC000048D - STATUS_NETWORK_ACCESS_DENIED_EDP NTStatus = 0xC000048E - STATUS_ENCLAVE_FAILURE NTStatus = 0xC000048F - STATUS_PNP_NO_COMPAT_DRIVERS NTStatus = 0xC0000490 - STATUS_PNP_DRIVER_PACKAGE_NOT_FOUND NTStatus = 0xC0000491 - STATUS_PNP_DRIVER_CONFIGURATION_NOT_FOUND NTStatus = 0xC0000492 - STATUS_PNP_DRIVER_CONFIGURATION_INCOMPLETE NTStatus = 0xC0000493 - STATUS_PNP_FUNCTION_DRIVER_REQUIRED NTStatus = 0xC0000494 - STATUS_PNP_DEVICE_CONFIGURATION_PENDING NTStatus = 0xC0000495 - STATUS_DEVICE_HINT_NAME_BUFFER_TOO_SMALL NTStatus = 0xC0000496 - STATUS_PACKAGE_NOT_AVAILABLE NTStatus = 0xC0000497 - STATUS_DEVICE_IN_MAINTENANCE NTStatus = 0xC0000499 - STATUS_NOT_SUPPORTED_ON_DAX NTStatus = 0xC000049A - STATUS_FREE_SPACE_TOO_FRAGMENTED NTStatus = 0xC000049B - STATUS_DAX_MAPPING_EXISTS NTStatus = 0xC000049C - STATUS_CHILD_PROCESS_BLOCKED NTStatus = 0xC000049D - STATUS_STORAGE_LOST_DATA_PERSISTENCE NTStatus = 0xC000049E - STATUS_VRF_CFG_ENABLED NTStatus = 0xC000049F - STATUS_PARTITION_TERMINATING NTStatus = 0xC00004A0 - STATUS_EXTERNAL_SYSKEY_NOT_SUPPORTED NTStatus = 0xC00004A1 - STATUS_ENCLAVE_VIOLATION NTStatus = 0xC00004A2 - STATUS_FILE_PROTECTED_UNDER_DPL NTStatus = 0xC00004A3 - STATUS_VOLUME_NOT_CLUSTER_ALIGNED NTStatus = 0xC00004A4 - STATUS_NO_PHYSICALLY_ALIGNED_FREE_SPACE_FOUND NTStatus = 0xC00004A5 - STATUS_APPX_FILE_NOT_ENCRYPTED NTStatus = 0xC00004A6 - STATUS_RWRAW_ENCRYPTED_FILE_NOT_ENCRYPTED NTStatus = 0xC00004A7 - STATUS_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILEOFFSET NTStatus = 0xC00004A8 - STATUS_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILERANGE NTStatus = 0xC00004A9 - STATUS_RWRAW_ENCRYPTED_INVALID_EDATAINFO_PARAMETER NTStatus = 0xC00004AA - STATUS_FT_READ_FAILURE NTStatus = 0xC00004AB - STATUS_PATCH_CONFLICT NTStatus = 0xC00004AC - STATUS_STORAGE_RESERVE_ID_INVALID NTStatus = 0xC00004AD - STATUS_STORAGE_RESERVE_DOES_NOT_EXIST NTStatus = 0xC00004AE - STATUS_STORAGE_RESERVE_ALREADY_EXISTS NTStatus = 0xC00004AF - STATUS_STORAGE_RESERVE_NOT_EMPTY NTStatus = 0xC00004B0 - STATUS_NOT_A_DAX_VOLUME NTStatus = 0xC00004B1 - STATUS_NOT_DAX_MAPPABLE NTStatus = 0xC00004B2 - STATUS_CASE_DIFFERING_NAMES_IN_DIR NTStatus = 0xC00004B3 - STATUS_FILE_NOT_SUPPORTED NTStatus = 0xC00004B4 - STATUS_NOT_SUPPORTED_WITH_BTT NTStatus = 0xC00004B5 - STATUS_ENCRYPTION_DISABLED NTStatus = 0xC00004B6 - STATUS_ENCRYPTING_METADATA_DISALLOWED NTStatus = 0xC00004B7 - STATUS_CANT_CLEAR_ENCRYPTION_FLAG NTStatus = 0xC00004B8 - STATUS_INVALID_TASK_NAME NTStatus = 0xC0000500 - STATUS_INVALID_TASK_INDEX NTStatus = 0xC0000501 - STATUS_THREAD_ALREADY_IN_TASK NTStatus = 0xC0000502 - STATUS_CALLBACK_BYPASS NTStatus = 0xC0000503 - STATUS_UNDEFINED_SCOPE NTStatus = 0xC0000504 - STATUS_INVALID_CAP NTStatus = 0xC0000505 - STATUS_NOT_GUI_PROCESS NTStatus = 0xC0000506 - STATUS_DEVICE_HUNG NTStatus = 0xC0000507 - STATUS_CONTAINER_ASSIGNED NTStatus = 0xC0000508 - STATUS_JOB_NO_CONTAINER NTStatus = 0xC0000509 - STATUS_DEVICE_UNRESPONSIVE NTStatus = 0xC000050A - STATUS_REPARSE_POINT_ENCOUNTERED NTStatus = 0xC000050B - STATUS_ATTRIBUTE_NOT_PRESENT NTStatus = 0xC000050C - STATUS_NOT_A_TIERED_VOLUME NTStatus = 0xC000050D - STATUS_ALREADY_HAS_STREAM_ID NTStatus = 0xC000050E - STATUS_JOB_NOT_EMPTY NTStatus = 0xC000050F - STATUS_ALREADY_INITIALIZED NTStatus = 0xC0000510 - STATUS_ENCLAVE_NOT_TERMINATED NTStatus = 0xC0000511 - STATUS_ENCLAVE_IS_TERMINATING NTStatus = 0xC0000512 - STATUS_SMB1_NOT_AVAILABLE NTStatus = 0xC0000513 - STATUS_SMR_GARBAGE_COLLECTION_REQUIRED NTStatus = 0xC0000514 - STATUS_INTERRUPTED NTStatus = 0xC0000515 - STATUS_THREAD_NOT_RUNNING NTStatus = 0xC0000516 - STATUS_FAIL_FAST_EXCEPTION NTStatus = 0xC0000602 - STATUS_IMAGE_CERT_REVOKED NTStatus = 0xC0000603 - STATUS_DYNAMIC_CODE_BLOCKED NTStatus = 0xC0000604 - STATUS_IMAGE_CERT_EXPIRED NTStatus = 0xC0000605 - STATUS_STRICT_CFG_VIOLATION NTStatus = 0xC0000606 - STATUS_SET_CONTEXT_DENIED NTStatus = 0xC000060A - STATUS_CROSS_PARTITION_VIOLATION NTStatus = 0xC000060B - STATUS_PORT_CLOSED NTStatus = 0xC0000700 - STATUS_MESSAGE_LOST NTStatus = 0xC0000701 - STATUS_INVALID_MESSAGE NTStatus = 0xC0000702 - STATUS_REQUEST_CANCELED NTStatus = 0xC0000703 - STATUS_RECURSIVE_DISPATCH NTStatus = 0xC0000704 - STATUS_LPC_RECEIVE_BUFFER_EXPECTED NTStatus = 0xC0000705 - STATUS_LPC_INVALID_CONNECTION_USAGE NTStatus = 0xC0000706 - STATUS_LPC_REQUESTS_NOT_ALLOWED NTStatus = 0xC0000707 - STATUS_RESOURCE_IN_USE NTStatus = 0xC0000708 - STATUS_HARDWARE_MEMORY_ERROR NTStatus = 0xC0000709 - STATUS_THREADPOOL_HANDLE_EXCEPTION NTStatus = 0xC000070A - STATUS_THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED NTStatus = 0xC000070B - STATUS_THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED NTStatus = 0xC000070C - STATUS_THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED NTStatus = 0xC000070D - STATUS_THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED NTStatus = 0xC000070E - STATUS_THREADPOOL_RELEASED_DURING_OPERATION NTStatus = 0xC000070F - STATUS_CALLBACK_RETURNED_WHILE_IMPERSONATING NTStatus = 0xC0000710 - STATUS_APC_RETURNED_WHILE_IMPERSONATING NTStatus = 0xC0000711 - STATUS_PROCESS_IS_PROTECTED NTStatus = 0xC0000712 - STATUS_MCA_EXCEPTION NTStatus = 0xC0000713 - STATUS_CERTIFICATE_MAPPING_NOT_UNIQUE NTStatus = 0xC0000714 - STATUS_SYMLINK_CLASS_DISABLED NTStatus = 0xC0000715 - STATUS_INVALID_IDN_NORMALIZATION NTStatus = 0xC0000716 - STATUS_NO_UNICODE_TRANSLATION NTStatus = 0xC0000717 - STATUS_ALREADY_REGISTERED NTStatus = 0xC0000718 - STATUS_CONTEXT_MISMATCH NTStatus = 0xC0000719 - STATUS_PORT_ALREADY_HAS_COMPLETION_LIST NTStatus = 0xC000071A - STATUS_CALLBACK_RETURNED_THREAD_PRIORITY NTStatus = 0xC000071B - STATUS_INVALID_THREAD NTStatus = 0xC000071C - STATUS_CALLBACK_RETURNED_TRANSACTION NTStatus = 0xC000071D - STATUS_CALLBACK_RETURNED_LDR_LOCK NTStatus = 0xC000071E - STATUS_CALLBACK_RETURNED_LANG NTStatus = 0xC000071F - STATUS_CALLBACK_RETURNED_PRI_BACK NTStatus = 0xC0000720 - STATUS_CALLBACK_RETURNED_THREAD_AFFINITY NTStatus = 0xC0000721 - STATUS_LPC_HANDLE_COUNT_EXCEEDED NTStatus = 0xC0000722 - STATUS_EXECUTABLE_MEMORY_WRITE NTStatus = 0xC0000723 - STATUS_KERNEL_EXECUTABLE_MEMORY_WRITE NTStatus = 0xC0000724 - STATUS_ATTACHED_EXECUTABLE_MEMORY_WRITE NTStatus = 0xC0000725 - STATUS_TRIGGERED_EXECUTABLE_MEMORY_WRITE NTStatus = 0xC0000726 - STATUS_DISK_REPAIR_DISABLED NTStatus = 0xC0000800 - STATUS_DS_DOMAIN_RENAME_IN_PROGRESS NTStatus = 0xC0000801 - STATUS_DISK_QUOTA_EXCEEDED NTStatus = 0xC0000802 - STATUS_DATA_LOST_REPAIR NTStatus = 0x80000803 - STATUS_CONTENT_BLOCKED NTStatus = 0xC0000804 - STATUS_BAD_CLUSTERS NTStatus = 0xC0000805 - STATUS_VOLUME_DIRTY NTStatus = 0xC0000806 - STATUS_DISK_REPAIR_REDIRECTED NTStatus = 0x40000807 - STATUS_DISK_REPAIR_UNSUCCESSFUL NTStatus = 0xC0000808 - STATUS_CORRUPT_LOG_OVERFULL NTStatus = 0xC0000809 - STATUS_CORRUPT_LOG_CORRUPTED NTStatus = 0xC000080A - STATUS_CORRUPT_LOG_UNAVAILABLE NTStatus = 0xC000080B - STATUS_CORRUPT_LOG_DELETED_FULL NTStatus = 0xC000080C - STATUS_CORRUPT_LOG_CLEARED NTStatus = 0xC000080D - STATUS_ORPHAN_NAME_EXHAUSTED NTStatus = 0xC000080E - STATUS_PROACTIVE_SCAN_IN_PROGRESS NTStatus = 0xC000080F - STATUS_ENCRYPTED_IO_NOT_POSSIBLE NTStatus = 0xC0000810 - STATUS_CORRUPT_LOG_UPLEVEL_RECORDS NTStatus = 0xC0000811 - STATUS_FILE_CHECKED_OUT NTStatus = 0xC0000901 - STATUS_CHECKOUT_REQUIRED NTStatus = 0xC0000902 - STATUS_BAD_FILE_TYPE NTStatus = 0xC0000903 - STATUS_FILE_TOO_LARGE NTStatus = 0xC0000904 - STATUS_FORMS_AUTH_REQUIRED NTStatus = 0xC0000905 - STATUS_VIRUS_INFECTED NTStatus = 0xC0000906 - STATUS_VIRUS_DELETED NTStatus = 0xC0000907 - STATUS_BAD_MCFG_TABLE NTStatus = 0xC0000908 - STATUS_CANNOT_BREAK_OPLOCK NTStatus = 0xC0000909 - STATUS_BAD_KEY NTStatus = 0xC000090A - STATUS_BAD_DATA NTStatus = 0xC000090B - STATUS_NO_KEY NTStatus = 0xC000090C - STATUS_FILE_HANDLE_REVOKED NTStatus = 0xC0000910 - STATUS_WOW_ASSERTION NTStatus = 0xC0009898 - STATUS_INVALID_SIGNATURE NTStatus = 0xC000A000 - STATUS_HMAC_NOT_SUPPORTED NTStatus = 0xC000A001 - STATUS_AUTH_TAG_MISMATCH NTStatus = 0xC000A002 - STATUS_INVALID_STATE_TRANSITION NTStatus = 0xC000A003 - STATUS_INVALID_KERNEL_INFO_VERSION NTStatus = 0xC000A004 - STATUS_INVALID_PEP_INFO_VERSION NTStatus = 0xC000A005 - STATUS_HANDLE_REVOKED NTStatus = 0xC000A006 - STATUS_EOF_ON_GHOSTED_RANGE NTStatus = 0xC000A007 - STATUS_IPSEC_QUEUE_OVERFLOW NTStatus = 0xC000A010 - STATUS_ND_QUEUE_OVERFLOW NTStatus = 0xC000A011 - STATUS_HOPLIMIT_EXCEEDED NTStatus = 0xC000A012 - STATUS_PROTOCOL_NOT_SUPPORTED NTStatus = 0xC000A013 - STATUS_FASTPATH_REJECTED NTStatus = 0xC000A014 - STATUS_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED NTStatus = 0xC000A080 - STATUS_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR NTStatus = 0xC000A081 - STATUS_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR NTStatus = 0xC000A082 - STATUS_XML_PARSE_ERROR NTStatus = 0xC000A083 - STATUS_XMLDSIG_ERROR NTStatus = 0xC000A084 - STATUS_WRONG_COMPARTMENT NTStatus = 0xC000A085 - STATUS_AUTHIP_FAILURE NTStatus = 0xC000A086 - STATUS_DS_OID_MAPPED_GROUP_CANT_HAVE_MEMBERS NTStatus = 0xC000A087 - STATUS_DS_OID_NOT_FOUND NTStatus = 0xC000A088 - STATUS_INCORRECT_ACCOUNT_TYPE NTStatus = 0xC000A089 - STATUS_HASH_NOT_SUPPORTED NTStatus = 0xC000A100 - STATUS_HASH_NOT_PRESENT NTStatus = 0xC000A101 - STATUS_SECONDARY_IC_PROVIDER_NOT_REGISTERED NTStatus = 0xC000A121 - STATUS_GPIO_CLIENT_INFORMATION_INVALID NTStatus = 0xC000A122 - STATUS_GPIO_VERSION_NOT_SUPPORTED NTStatus = 0xC000A123 - STATUS_GPIO_INVALID_REGISTRATION_PACKET NTStatus = 0xC000A124 - STATUS_GPIO_OPERATION_DENIED NTStatus = 0xC000A125 - STATUS_GPIO_INCOMPATIBLE_CONNECT_MODE NTStatus = 0xC000A126 - STATUS_GPIO_INTERRUPT_ALREADY_UNMASKED NTStatus = 0x8000A127 - STATUS_CANNOT_SWITCH_RUNLEVEL NTStatus = 0xC000A141 - STATUS_INVALID_RUNLEVEL_SETTING NTStatus = 0xC000A142 - STATUS_RUNLEVEL_SWITCH_TIMEOUT NTStatus = 0xC000A143 - STATUS_SERVICES_FAILED_AUTOSTART NTStatus = 0x4000A144 - STATUS_RUNLEVEL_SWITCH_AGENT_TIMEOUT NTStatus = 0xC000A145 - STATUS_RUNLEVEL_SWITCH_IN_PROGRESS NTStatus = 0xC000A146 - STATUS_NOT_APPCONTAINER NTStatus = 0xC000A200 - STATUS_NOT_SUPPORTED_IN_APPCONTAINER NTStatus = 0xC000A201 - STATUS_INVALID_PACKAGE_SID_LENGTH NTStatus = 0xC000A202 - STATUS_LPAC_ACCESS_DENIED NTStatus = 0xC000A203 - STATUS_ADMINLESS_ACCESS_DENIED NTStatus = 0xC000A204 - STATUS_APP_DATA_NOT_FOUND NTStatus = 0xC000A281 - STATUS_APP_DATA_EXPIRED NTStatus = 0xC000A282 - STATUS_APP_DATA_CORRUPT NTStatus = 0xC000A283 - STATUS_APP_DATA_LIMIT_EXCEEDED NTStatus = 0xC000A284 - STATUS_APP_DATA_REBOOT_REQUIRED NTStatus = 0xC000A285 - STATUS_OFFLOAD_READ_FLT_NOT_SUPPORTED NTStatus = 0xC000A2A1 - STATUS_OFFLOAD_WRITE_FLT_NOT_SUPPORTED NTStatus = 0xC000A2A2 - STATUS_OFFLOAD_READ_FILE_NOT_SUPPORTED NTStatus = 0xC000A2A3 - STATUS_OFFLOAD_WRITE_FILE_NOT_SUPPORTED NTStatus = 0xC000A2A4 - STATUS_WOF_WIM_HEADER_CORRUPT NTStatus = 0xC000A2A5 - STATUS_WOF_WIM_RESOURCE_TABLE_CORRUPT NTStatus = 0xC000A2A6 - STATUS_WOF_FILE_RESOURCE_TABLE_CORRUPT NTStatus = 0xC000A2A7 - STATUS_FILE_SYSTEM_VIRTUALIZATION_UNAVAILABLE NTStatus = 0xC000CE01 - STATUS_FILE_SYSTEM_VIRTUALIZATION_METADATA_CORRUPT NTStatus = 0xC000CE02 - STATUS_FILE_SYSTEM_VIRTUALIZATION_BUSY NTStatus = 0xC000CE03 - STATUS_FILE_SYSTEM_VIRTUALIZATION_PROVIDER_UNKNOWN NTStatus = 0xC000CE04 - STATUS_FILE_SYSTEM_VIRTUALIZATION_INVALID_OPERATION NTStatus = 0xC000CE05 - STATUS_CLOUD_FILE_SYNC_ROOT_METADATA_CORRUPT NTStatus = 0xC000CF00 - STATUS_CLOUD_FILE_PROVIDER_NOT_RUNNING NTStatus = 0xC000CF01 - STATUS_CLOUD_FILE_METADATA_CORRUPT NTStatus = 0xC000CF02 - STATUS_CLOUD_FILE_METADATA_TOO_LARGE NTStatus = 0xC000CF03 - STATUS_CLOUD_FILE_PROPERTY_BLOB_TOO_LARGE NTStatus = 0x8000CF04 - STATUS_CLOUD_FILE_TOO_MANY_PROPERTY_BLOBS NTStatus = 0x8000CF05 - STATUS_CLOUD_FILE_PROPERTY_VERSION_NOT_SUPPORTED NTStatus = 0xC000CF06 - STATUS_NOT_A_CLOUD_FILE NTStatus = 0xC000CF07 - STATUS_CLOUD_FILE_NOT_IN_SYNC NTStatus = 0xC000CF08 - STATUS_CLOUD_FILE_ALREADY_CONNECTED NTStatus = 0xC000CF09 - STATUS_CLOUD_FILE_NOT_SUPPORTED NTStatus = 0xC000CF0A - STATUS_CLOUD_FILE_INVALID_REQUEST NTStatus = 0xC000CF0B - STATUS_CLOUD_FILE_READ_ONLY_VOLUME NTStatus = 0xC000CF0C - STATUS_CLOUD_FILE_CONNECTED_PROVIDER_ONLY NTStatus = 0xC000CF0D - STATUS_CLOUD_FILE_VALIDATION_FAILED NTStatus = 0xC000CF0E - STATUS_CLOUD_FILE_AUTHENTICATION_FAILED NTStatus = 0xC000CF0F - STATUS_CLOUD_FILE_INSUFFICIENT_RESOURCES NTStatus = 0xC000CF10 - STATUS_CLOUD_FILE_NETWORK_UNAVAILABLE NTStatus = 0xC000CF11 - STATUS_CLOUD_FILE_UNSUCCESSFUL NTStatus = 0xC000CF12 - STATUS_CLOUD_FILE_NOT_UNDER_SYNC_ROOT NTStatus = 0xC000CF13 - STATUS_CLOUD_FILE_IN_USE NTStatus = 0xC000CF14 - STATUS_CLOUD_FILE_PINNED NTStatus = 0xC000CF15 - STATUS_CLOUD_FILE_REQUEST_ABORTED NTStatus = 0xC000CF16 - STATUS_CLOUD_FILE_PROPERTY_CORRUPT NTStatus = 0xC000CF17 - STATUS_CLOUD_FILE_ACCESS_DENIED NTStatus = 0xC000CF18 - STATUS_CLOUD_FILE_INCOMPATIBLE_HARDLINKS NTStatus = 0xC000CF19 - STATUS_CLOUD_FILE_PROPERTY_LOCK_CONFLICT NTStatus = 0xC000CF1A - STATUS_CLOUD_FILE_REQUEST_CANCELED NTStatus = 0xC000CF1B - STATUS_CLOUD_FILE_PROVIDER_TERMINATED NTStatus = 0xC000CF1D - STATUS_NOT_A_CLOUD_SYNC_ROOT NTStatus = 0xC000CF1E - STATUS_CLOUD_FILE_REQUEST_TIMEOUT NTStatus = 0xC000CF1F - STATUS_ACPI_INVALID_OPCODE NTStatus = 0xC0140001 - STATUS_ACPI_STACK_OVERFLOW NTStatus = 0xC0140002 - STATUS_ACPI_ASSERT_FAILED NTStatus = 0xC0140003 - STATUS_ACPI_INVALID_INDEX NTStatus = 0xC0140004 - STATUS_ACPI_INVALID_ARGUMENT NTStatus = 0xC0140005 - STATUS_ACPI_FATAL NTStatus = 0xC0140006 - STATUS_ACPI_INVALID_SUPERNAME NTStatus = 0xC0140007 - STATUS_ACPI_INVALID_ARGTYPE NTStatus = 0xC0140008 - STATUS_ACPI_INVALID_OBJTYPE NTStatus = 0xC0140009 - STATUS_ACPI_INVALID_TARGETTYPE NTStatus = 0xC014000A - STATUS_ACPI_INCORRECT_ARGUMENT_COUNT NTStatus = 0xC014000B - STATUS_ACPI_ADDRESS_NOT_MAPPED NTStatus = 0xC014000C - STATUS_ACPI_INVALID_EVENTTYPE NTStatus = 0xC014000D - STATUS_ACPI_HANDLER_COLLISION NTStatus = 0xC014000E - STATUS_ACPI_INVALID_DATA NTStatus = 0xC014000F - STATUS_ACPI_INVALID_REGION NTStatus = 0xC0140010 - STATUS_ACPI_INVALID_ACCESS_SIZE NTStatus = 0xC0140011 - STATUS_ACPI_ACQUIRE_GLOBAL_LOCK NTStatus = 0xC0140012 - STATUS_ACPI_ALREADY_INITIALIZED NTStatus = 0xC0140013 - STATUS_ACPI_NOT_INITIALIZED NTStatus = 0xC0140014 - STATUS_ACPI_INVALID_MUTEX_LEVEL NTStatus = 0xC0140015 - STATUS_ACPI_MUTEX_NOT_OWNED NTStatus = 0xC0140016 - STATUS_ACPI_MUTEX_NOT_OWNER NTStatus = 0xC0140017 - STATUS_ACPI_RS_ACCESS NTStatus = 0xC0140018 - STATUS_ACPI_INVALID_TABLE NTStatus = 0xC0140019 - STATUS_ACPI_REG_HANDLER_FAILED NTStatus = 0xC0140020 - STATUS_ACPI_POWER_REQUEST_FAILED NTStatus = 0xC0140021 - STATUS_CTX_WINSTATION_NAME_INVALID NTStatus = 0xC00A0001 - STATUS_CTX_INVALID_PD NTStatus = 0xC00A0002 - STATUS_CTX_PD_NOT_FOUND NTStatus = 0xC00A0003 - STATUS_CTX_CDM_CONNECT NTStatus = 0x400A0004 - STATUS_CTX_CDM_DISCONNECT NTStatus = 0x400A0005 - STATUS_CTX_CLOSE_PENDING NTStatus = 0xC00A0006 - STATUS_CTX_NO_OUTBUF NTStatus = 0xC00A0007 - STATUS_CTX_MODEM_INF_NOT_FOUND NTStatus = 0xC00A0008 - STATUS_CTX_INVALID_MODEMNAME NTStatus = 0xC00A0009 - STATUS_CTX_RESPONSE_ERROR NTStatus = 0xC00A000A - STATUS_CTX_MODEM_RESPONSE_TIMEOUT NTStatus = 0xC00A000B - STATUS_CTX_MODEM_RESPONSE_NO_CARRIER NTStatus = 0xC00A000C - STATUS_CTX_MODEM_RESPONSE_NO_DIALTONE NTStatus = 0xC00A000D - STATUS_CTX_MODEM_RESPONSE_BUSY NTStatus = 0xC00A000E - STATUS_CTX_MODEM_RESPONSE_VOICE NTStatus = 0xC00A000F - STATUS_CTX_TD_ERROR NTStatus = 0xC00A0010 - STATUS_CTX_LICENSE_CLIENT_INVALID NTStatus = 0xC00A0012 - STATUS_CTX_LICENSE_NOT_AVAILABLE NTStatus = 0xC00A0013 - STATUS_CTX_LICENSE_EXPIRED NTStatus = 0xC00A0014 - STATUS_CTX_WINSTATION_NOT_FOUND NTStatus = 0xC00A0015 - STATUS_CTX_WINSTATION_NAME_COLLISION NTStatus = 0xC00A0016 - STATUS_CTX_WINSTATION_BUSY NTStatus = 0xC00A0017 - STATUS_CTX_BAD_VIDEO_MODE NTStatus = 0xC00A0018 - STATUS_CTX_GRAPHICS_INVALID NTStatus = 0xC00A0022 - STATUS_CTX_NOT_CONSOLE NTStatus = 0xC00A0024 - STATUS_CTX_CLIENT_QUERY_TIMEOUT NTStatus = 0xC00A0026 - STATUS_CTX_CONSOLE_DISCONNECT NTStatus = 0xC00A0027 - STATUS_CTX_CONSOLE_CONNECT NTStatus = 0xC00A0028 - STATUS_CTX_SHADOW_DENIED NTStatus = 0xC00A002A - STATUS_CTX_WINSTATION_ACCESS_DENIED NTStatus = 0xC00A002B - STATUS_CTX_INVALID_WD NTStatus = 0xC00A002E - STATUS_CTX_WD_NOT_FOUND NTStatus = 0xC00A002F - STATUS_CTX_SHADOW_INVALID NTStatus = 0xC00A0030 - STATUS_CTX_SHADOW_DISABLED NTStatus = 0xC00A0031 - STATUS_RDP_PROTOCOL_ERROR NTStatus = 0xC00A0032 - STATUS_CTX_CLIENT_LICENSE_NOT_SET NTStatus = 0xC00A0033 - STATUS_CTX_CLIENT_LICENSE_IN_USE NTStatus = 0xC00A0034 - STATUS_CTX_SHADOW_ENDED_BY_MODE_CHANGE NTStatus = 0xC00A0035 - STATUS_CTX_SHADOW_NOT_RUNNING NTStatus = 0xC00A0036 - STATUS_CTX_LOGON_DISABLED NTStatus = 0xC00A0037 - STATUS_CTX_SECURITY_LAYER_ERROR NTStatus = 0xC00A0038 - STATUS_TS_INCOMPATIBLE_SESSIONS NTStatus = 0xC00A0039 - STATUS_TS_VIDEO_SUBSYSTEM_ERROR NTStatus = 0xC00A003A - STATUS_PNP_BAD_MPS_TABLE NTStatus = 0xC0040035 - STATUS_PNP_TRANSLATION_FAILED NTStatus = 0xC0040036 - STATUS_PNP_IRQ_TRANSLATION_FAILED NTStatus = 0xC0040037 - STATUS_PNP_INVALID_ID NTStatus = 0xC0040038 - STATUS_IO_REISSUE_AS_CACHED NTStatus = 0xC0040039 - STATUS_MUI_FILE_NOT_FOUND NTStatus = 0xC00B0001 - STATUS_MUI_INVALID_FILE NTStatus = 0xC00B0002 - STATUS_MUI_INVALID_RC_CONFIG NTStatus = 0xC00B0003 - STATUS_MUI_INVALID_LOCALE_NAME NTStatus = 0xC00B0004 - STATUS_MUI_INVALID_ULTIMATEFALLBACK_NAME NTStatus = 0xC00B0005 - STATUS_MUI_FILE_NOT_LOADED NTStatus = 0xC00B0006 - STATUS_RESOURCE_ENUM_USER_STOP NTStatus = 0xC00B0007 - STATUS_FLT_NO_HANDLER_DEFINED NTStatus = 0xC01C0001 - STATUS_FLT_CONTEXT_ALREADY_DEFINED NTStatus = 0xC01C0002 - STATUS_FLT_INVALID_ASYNCHRONOUS_REQUEST NTStatus = 0xC01C0003 - STATUS_FLT_DISALLOW_FAST_IO NTStatus = 0xC01C0004 - STATUS_FLT_INVALID_NAME_REQUEST NTStatus = 0xC01C0005 - STATUS_FLT_NOT_SAFE_TO_POST_OPERATION NTStatus = 0xC01C0006 - STATUS_FLT_NOT_INITIALIZED NTStatus = 0xC01C0007 - STATUS_FLT_FILTER_NOT_READY NTStatus = 0xC01C0008 - STATUS_FLT_POST_OPERATION_CLEANUP NTStatus = 0xC01C0009 - STATUS_FLT_INTERNAL_ERROR NTStatus = 0xC01C000A - STATUS_FLT_DELETING_OBJECT NTStatus = 0xC01C000B - STATUS_FLT_MUST_BE_NONPAGED_POOL NTStatus = 0xC01C000C - STATUS_FLT_DUPLICATE_ENTRY NTStatus = 0xC01C000D - STATUS_FLT_CBDQ_DISABLED NTStatus = 0xC01C000E - STATUS_FLT_DO_NOT_ATTACH NTStatus = 0xC01C000F - STATUS_FLT_DO_NOT_DETACH NTStatus = 0xC01C0010 - STATUS_FLT_INSTANCE_ALTITUDE_COLLISION NTStatus = 0xC01C0011 - STATUS_FLT_INSTANCE_NAME_COLLISION NTStatus = 0xC01C0012 - STATUS_FLT_FILTER_NOT_FOUND NTStatus = 0xC01C0013 - STATUS_FLT_VOLUME_NOT_FOUND NTStatus = 0xC01C0014 - STATUS_FLT_INSTANCE_NOT_FOUND NTStatus = 0xC01C0015 - STATUS_FLT_CONTEXT_ALLOCATION_NOT_FOUND NTStatus = 0xC01C0016 - STATUS_FLT_INVALID_CONTEXT_REGISTRATION NTStatus = 0xC01C0017 - STATUS_FLT_NAME_CACHE_MISS NTStatus = 0xC01C0018 - STATUS_FLT_NO_DEVICE_OBJECT NTStatus = 0xC01C0019 - STATUS_FLT_VOLUME_ALREADY_MOUNTED NTStatus = 0xC01C001A - STATUS_FLT_ALREADY_ENLISTED NTStatus = 0xC01C001B - STATUS_FLT_CONTEXT_ALREADY_LINKED NTStatus = 0xC01C001C - STATUS_FLT_NO_WAITER_FOR_REPLY NTStatus = 0xC01C0020 - STATUS_FLT_REGISTRATION_BUSY NTStatus = 0xC01C0023 - STATUS_SXS_SECTION_NOT_FOUND NTStatus = 0xC0150001 - STATUS_SXS_CANT_GEN_ACTCTX NTStatus = 0xC0150002 - STATUS_SXS_INVALID_ACTCTXDATA_FORMAT NTStatus = 0xC0150003 - STATUS_SXS_ASSEMBLY_NOT_FOUND NTStatus = 0xC0150004 - STATUS_SXS_MANIFEST_FORMAT_ERROR NTStatus = 0xC0150005 - STATUS_SXS_MANIFEST_PARSE_ERROR NTStatus = 0xC0150006 - STATUS_SXS_ACTIVATION_CONTEXT_DISABLED NTStatus = 0xC0150007 - STATUS_SXS_KEY_NOT_FOUND NTStatus = 0xC0150008 - STATUS_SXS_VERSION_CONFLICT NTStatus = 0xC0150009 - STATUS_SXS_WRONG_SECTION_TYPE NTStatus = 0xC015000A - STATUS_SXS_THREAD_QUERIES_DISABLED NTStatus = 0xC015000B - STATUS_SXS_ASSEMBLY_MISSING NTStatus = 0xC015000C - STATUS_SXS_RELEASE_ACTIVATION_CONTEXT NTStatus = 0x4015000D - STATUS_SXS_PROCESS_DEFAULT_ALREADY_SET NTStatus = 0xC015000E - STATUS_SXS_EARLY_DEACTIVATION NTStatus = 0xC015000F - STATUS_SXS_INVALID_DEACTIVATION NTStatus = 0xC0150010 - STATUS_SXS_MULTIPLE_DEACTIVATION NTStatus = 0xC0150011 - STATUS_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY NTStatus = 0xC0150012 - STATUS_SXS_PROCESS_TERMINATION_REQUESTED NTStatus = 0xC0150013 - STATUS_SXS_CORRUPT_ACTIVATION_STACK NTStatus = 0xC0150014 - STATUS_SXS_CORRUPTION NTStatus = 0xC0150015 - STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE NTStatus = 0xC0150016 - STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME NTStatus = 0xC0150017 - STATUS_SXS_IDENTITY_DUPLICATE_ATTRIBUTE NTStatus = 0xC0150018 - STATUS_SXS_IDENTITY_PARSE_ERROR NTStatus = 0xC0150019 - STATUS_SXS_COMPONENT_STORE_CORRUPT NTStatus = 0xC015001A - STATUS_SXS_FILE_HASH_MISMATCH NTStatus = 0xC015001B - STATUS_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT NTStatus = 0xC015001C - STATUS_SXS_IDENTITIES_DIFFERENT NTStatus = 0xC015001D - STATUS_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT NTStatus = 0xC015001E - STATUS_SXS_FILE_NOT_PART_OF_ASSEMBLY NTStatus = 0xC015001F - STATUS_ADVANCED_INSTALLER_FAILED NTStatus = 0xC0150020 - STATUS_XML_ENCODING_MISMATCH NTStatus = 0xC0150021 - STATUS_SXS_MANIFEST_TOO_BIG NTStatus = 0xC0150022 - STATUS_SXS_SETTING_NOT_REGISTERED NTStatus = 0xC0150023 - STATUS_SXS_TRANSACTION_CLOSURE_INCOMPLETE NTStatus = 0xC0150024 - STATUS_SMI_PRIMITIVE_INSTALLER_FAILED NTStatus = 0xC0150025 - STATUS_GENERIC_COMMAND_FAILED NTStatus = 0xC0150026 - STATUS_SXS_FILE_HASH_MISSING NTStatus = 0xC0150027 - STATUS_CLUSTER_INVALID_NODE NTStatus = 0xC0130001 - STATUS_CLUSTER_NODE_EXISTS NTStatus = 0xC0130002 - STATUS_CLUSTER_JOIN_IN_PROGRESS NTStatus = 0xC0130003 - STATUS_CLUSTER_NODE_NOT_FOUND NTStatus = 0xC0130004 - STATUS_CLUSTER_LOCAL_NODE_NOT_FOUND NTStatus = 0xC0130005 - STATUS_CLUSTER_NETWORK_EXISTS NTStatus = 0xC0130006 - STATUS_CLUSTER_NETWORK_NOT_FOUND NTStatus = 0xC0130007 - STATUS_CLUSTER_NETINTERFACE_EXISTS NTStatus = 0xC0130008 - STATUS_CLUSTER_NETINTERFACE_NOT_FOUND NTStatus = 0xC0130009 - STATUS_CLUSTER_INVALID_REQUEST NTStatus = 0xC013000A - STATUS_CLUSTER_INVALID_NETWORK_PROVIDER NTStatus = 0xC013000B - STATUS_CLUSTER_NODE_DOWN NTStatus = 0xC013000C - STATUS_CLUSTER_NODE_UNREACHABLE NTStatus = 0xC013000D - STATUS_CLUSTER_NODE_NOT_MEMBER NTStatus = 0xC013000E - STATUS_CLUSTER_JOIN_NOT_IN_PROGRESS NTStatus = 0xC013000F - STATUS_CLUSTER_INVALID_NETWORK NTStatus = 0xC0130010 - STATUS_CLUSTER_NO_NET_ADAPTERS NTStatus = 0xC0130011 - STATUS_CLUSTER_NODE_UP NTStatus = 0xC0130012 - STATUS_CLUSTER_NODE_PAUSED NTStatus = 0xC0130013 - STATUS_CLUSTER_NODE_NOT_PAUSED NTStatus = 0xC0130014 - STATUS_CLUSTER_NO_SECURITY_CONTEXT NTStatus = 0xC0130015 - STATUS_CLUSTER_NETWORK_NOT_INTERNAL NTStatus = 0xC0130016 - STATUS_CLUSTER_POISONED NTStatus = 0xC0130017 - STATUS_CLUSTER_NON_CSV_PATH NTStatus = 0xC0130018 - STATUS_CLUSTER_CSV_VOLUME_NOT_LOCAL NTStatus = 0xC0130019 - STATUS_CLUSTER_CSV_READ_OPLOCK_BREAK_IN_PROGRESS NTStatus = 0xC0130020 - STATUS_CLUSTER_CSV_AUTO_PAUSE_ERROR NTStatus = 0xC0130021 - STATUS_CLUSTER_CSV_REDIRECTED NTStatus = 0xC0130022 - STATUS_CLUSTER_CSV_NOT_REDIRECTED NTStatus = 0xC0130023 - STATUS_CLUSTER_CSV_VOLUME_DRAINING NTStatus = 0xC0130024 - STATUS_CLUSTER_CSV_SNAPSHOT_CREATION_IN_PROGRESS NTStatus = 0xC0130025 - STATUS_CLUSTER_CSV_VOLUME_DRAINING_SUCCEEDED_DOWNLEVEL NTStatus = 0xC0130026 - STATUS_CLUSTER_CSV_NO_SNAPSHOTS NTStatus = 0xC0130027 - STATUS_CSV_IO_PAUSE_TIMEOUT NTStatus = 0xC0130028 - STATUS_CLUSTER_CSV_INVALID_HANDLE NTStatus = 0xC0130029 - STATUS_CLUSTER_CSV_SUPPORTED_ONLY_ON_COORDINATOR NTStatus = 0xC0130030 - STATUS_CLUSTER_CAM_TICKET_REPLAY_DETECTED NTStatus = 0xC0130031 - STATUS_TRANSACTIONAL_CONFLICT NTStatus = 0xC0190001 - STATUS_INVALID_TRANSACTION NTStatus = 0xC0190002 - STATUS_TRANSACTION_NOT_ACTIVE NTStatus = 0xC0190003 - STATUS_TM_INITIALIZATION_FAILED NTStatus = 0xC0190004 - STATUS_RM_NOT_ACTIVE NTStatus = 0xC0190005 - STATUS_RM_METADATA_CORRUPT NTStatus = 0xC0190006 - STATUS_TRANSACTION_NOT_JOINED NTStatus = 0xC0190007 - STATUS_DIRECTORY_NOT_RM NTStatus = 0xC0190008 - STATUS_COULD_NOT_RESIZE_LOG NTStatus = 0x80190009 - STATUS_TRANSACTIONS_UNSUPPORTED_REMOTE NTStatus = 0xC019000A - STATUS_LOG_RESIZE_INVALID_SIZE NTStatus = 0xC019000B - STATUS_REMOTE_FILE_VERSION_MISMATCH NTStatus = 0xC019000C - STATUS_CRM_PROTOCOL_ALREADY_EXISTS NTStatus = 0xC019000F - STATUS_TRANSACTION_PROPAGATION_FAILED NTStatus = 0xC0190010 - STATUS_CRM_PROTOCOL_NOT_FOUND NTStatus = 0xC0190011 - STATUS_TRANSACTION_SUPERIOR_EXISTS NTStatus = 0xC0190012 - STATUS_TRANSACTION_REQUEST_NOT_VALID NTStatus = 0xC0190013 - STATUS_TRANSACTION_NOT_REQUESTED NTStatus = 0xC0190014 - STATUS_TRANSACTION_ALREADY_ABORTED NTStatus = 0xC0190015 - STATUS_TRANSACTION_ALREADY_COMMITTED NTStatus = 0xC0190016 - STATUS_TRANSACTION_INVALID_MARSHALL_BUFFER NTStatus = 0xC0190017 - STATUS_CURRENT_TRANSACTION_NOT_VALID NTStatus = 0xC0190018 - STATUS_LOG_GROWTH_FAILED NTStatus = 0xC0190019 - STATUS_OBJECT_NO_LONGER_EXISTS NTStatus = 0xC0190021 - STATUS_STREAM_MINIVERSION_NOT_FOUND NTStatus = 0xC0190022 - STATUS_STREAM_MINIVERSION_NOT_VALID NTStatus = 0xC0190023 - STATUS_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION NTStatus = 0xC0190024 - STATUS_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT NTStatus = 0xC0190025 - STATUS_CANT_CREATE_MORE_STREAM_MINIVERSIONS NTStatus = 0xC0190026 - STATUS_HANDLE_NO_LONGER_VALID NTStatus = 0xC0190028 - STATUS_NO_TXF_METADATA NTStatus = 0x80190029 - STATUS_LOG_CORRUPTION_DETECTED NTStatus = 0xC0190030 - STATUS_CANT_RECOVER_WITH_HANDLE_OPEN NTStatus = 0x80190031 - STATUS_RM_DISCONNECTED NTStatus = 0xC0190032 - STATUS_ENLISTMENT_NOT_SUPERIOR NTStatus = 0xC0190033 - STATUS_RECOVERY_NOT_NEEDED NTStatus = 0x40190034 - STATUS_RM_ALREADY_STARTED NTStatus = 0x40190035 - STATUS_FILE_IDENTITY_NOT_PERSISTENT NTStatus = 0xC0190036 - STATUS_CANT_BREAK_TRANSACTIONAL_DEPENDENCY NTStatus = 0xC0190037 - STATUS_CANT_CROSS_RM_BOUNDARY NTStatus = 0xC0190038 - STATUS_TXF_DIR_NOT_EMPTY NTStatus = 0xC0190039 - STATUS_INDOUBT_TRANSACTIONS_EXIST NTStatus = 0xC019003A - STATUS_TM_VOLATILE NTStatus = 0xC019003B - STATUS_ROLLBACK_TIMER_EXPIRED NTStatus = 0xC019003C - STATUS_TXF_ATTRIBUTE_CORRUPT NTStatus = 0xC019003D - STATUS_EFS_NOT_ALLOWED_IN_TRANSACTION NTStatus = 0xC019003E - STATUS_TRANSACTIONAL_OPEN_NOT_ALLOWED NTStatus = 0xC019003F - STATUS_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE NTStatus = 0xC0190040 - STATUS_TXF_METADATA_ALREADY_PRESENT NTStatus = 0x80190041 - STATUS_TRANSACTION_SCOPE_CALLBACKS_NOT_SET NTStatus = 0x80190042 - STATUS_TRANSACTION_REQUIRED_PROMOTION NTStatus = 0xC0190043 - STATUS_CANNOT_EXECUTE_FILE_IN_TRANSACTION NTStatus = 0xC0190044 - STATUS_TRANSACTIONS_NOT_FROZEN NTStatus = 0xC0190045 - STATUS_TRANSACTION_FREEZE_IN_PROGRESS NTStatus = 0xC0190046 - STATUS_NOT_SNAPSHOT_VOLUME NTStatus = 0xC0190047 - STATUS_NO_SAVEPOINT_WITH_OPEN_FILES NTStatus = 0xC0190048 - STATUS_SPARSE_NOT_ALLOWED_IN_TRANSACTION NTStatus = 0xC0190049 - STATUS_TM_IDENTITY_MISMATCH NTStatus = 0xC019004A - STATUS_FLOATED_SECTION NTStatus = 0xC019004B - STATUS_CANNOT_ACCEPT_TRANSACTED_WORK NTStatus = 0xC019004C - STATUS_CANNOT_ABORT_TRANSACTIONS NTStatus = 0xC019004D - STATUS_TRANSACTION_NOT_FOUND NTStatus = 0xC019004E - STATUS_RESOURCEMANAGER_NOT_FOUND NTStatus = 0xC019004F - STATUS_ENLISTMENT_NOT_FOUND NTStatus = 0xC0190050 - STATUS_TRANSACTIONMANAGER_NOT_FOUND NTStatus = 0xC0190051 - STATUS_TRANSACTIONMANAGER_NOT_ONLINE NTStatus = 0xC0190052 - STATUS_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION NTStatus = 0xC0190053 - STATUS_TRANSACTION_NOT_ROOT NTStatus = 0xC0190054 - STATUS_TRANSACTION_OBJECT_EXPIRED NTStatus = 0xC0190055 - STATUS_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION NTStatus = 0xC0190056 - STATUS_TRANSACTION_RESPONSE_NOT_ENLISTED NTStatus = 0xC0190057 - STATUS_TRANSACTION_RECORD_TOO_LONG NTStatus = 0xC0190058 - STATUS_NO_LINK_TRACKING_IN_TRANSACTION NTStatus = 0xC0190059 - STATUS_OPERATION_NOT_SUPPORTED_IN_TRANSACTION NTStatus = 0xC019005A - STATUS_TRANSACTION_INTEGRITY_VIOLATED NTStatus = 0xC019005B - STATUS_TRANSACTIONMANAGER_IDENTITY_MISMATCH NTStatus = 0xC019005C - STATUS_RM_CANNOT_BE_FROZEN_FOR_SNAPSHOT NTStatus = 0xC019005D - STATUS_TRANSACTION_MUST_WRITETHROUGH NTStatus = 0xC019005E - STATUS_TRANSACTION_NO_SUPERIOR NTStatus = 0xC019005F - STATUS_EXPIRED_HANDLE NTStatus = 0xC0190060 - STATUS_TRANSACTION_NOT_ENLISTED NTStatus = 0xC0190061 - STATUS_LOG_SECTOR_INVALID NTStatus = 0xC01A0001 - STATUS_LOG_SECTOR_PARITY_INVALID NTStatus = 0xC01A0002 - STATUS_LOG_SECTOR_REMAPPED NTStatus = 0xC01A0003 - STATUS_LOG_BLOCK_INCOMPLETE NTStatus = 0xC01A0004 - STATUS_LOG_INVALID_RANGE NTStatus = 0xC01A0005 - STATUS_LOG_BLOCKS_EXHAUSTED NTStatus = 0xC01A0006 - STATUS_LOG_READ_CONTEXT_INVALID NTStatus = 0xC01A0007 - STATUS_LOG_RESTART_INVALID NTStatus = 0xC01A0008 - STATUS_LOG_BLOCK_VERSION NTStatus = 0xC01A0009 - STATUS_LOG_BLOCK_INVALID NTStatus = 0xC01A000A - STATUS_LOG_READ_MODE_INVALID NTStatus = 0xC01A000B - STATUS_LOG_NO_RESTART NTStatus = 0x401A000C - STATUS_LOG_METADATA_CORRUPT NTStatus = 0xC01A000D - STATUS_LOG_METADATA_INVALID NTStatus = 0xC01A000E - STATUS_LOG_METADATA_INCONSISTENT NTStatus = 0xC01A000F - STATUS_LOG_RESERVATION_INVALID NTStatus = 0xC01A0010 - STATUS_LOG_CANT_DELETE NTStatus = 0xC01A0011 - STATUS_LOG_CONTAINER_LIMIT_EXCEEDED NTStatus = 0xC01A0012 - STATUS_LOG_START_OF_LOG NTStatus = 0xC01A0013 - STATUS_LOG_POLICY_ALREADY_INSTALLED NTStatus = 0xC01A0014 - STATUS_LOG_POLICY_NOT_INSTALLED NTStatus = 0xC01A0015 - STATUS_LOG_POLICY_INVALID NTStatus = 0xC01A0016 - STATUS_LOG_POLICY_CONFLICT NTStatus = 0xC01A0017 - STATUS_LOG_PINNED_ARCHIVE_TAIL NTStatus = 0xC01A0018 - STATUS_LOG_RECORD_NONEXISTENT NTStatus = 0xC01A0019 - STATUS_LOG_RECORDS_RESERVED_INVALID NTStatus = 0xC01A001A - STATUS_LOG_SPACE_RESERVED_INVALID NTStatus = 0xC01A001B - STATUS_LOG_TAIL_INVALID NTStatus = 0xC01A001C - STATUS_LOG_FULL NTStatus = 0xC01A001D - STATUS_LOG_MULTIPLEXED NTStatus = 0xC01A001E - STATUS_LOG_DEDICATED NTStatus = 0xC01A001F - STATUS_LOG_ARCHIVE_NOT_IN_PROGRESS NTStatus = 0xC01A0020 - STATUS_LOG_ARCHIVE_IN_PROGRESS NTStatus = 0xC01A0021 - STATUS_LOG_EPHEMERAL NTStatus = 0xC01A0022 - STATUS_LOG_NOT_ENOUGH_CONTAINERS NTStatus = 0xC01A0023 - STATUS_LOG_CLIENT_ALREADY_REGISTERED NTStatus = 0xC01A0024 - STATUS_LOG_CLIENT_NOT_REGISTERED NTStatus = 0xC01A0025 - STATUS_LOG_FULL_HANDLER_IN_PROGRESS NTStatus = 0xC01A0026 - STATUS_LOG_CONTAINER_READ_FAILED NTStatus = 0xC01A0027 - STATUS_LOG_CONTAINER_WRITE_FAILED NTStatus = 0xC01A0028 - STATUS_LOG_CONTAINER_OPEN_FAILED NTStatus = 0xC01A0029 - STATUS_LOG_CONTAINER_STATE_INVALID NTStatus = 0xC01A002A - STATUS_LOG_STATE_INVALID NTStatus = 0xC01A002B - STATUS_LOG_PINNED NTStatus = 0xC01A002C - STATUS_LOG_METADATA_FLUSH_FAILED NTStatus = 0xC01A002D - STATUS_LOG_INCONSISTENT_SECURITY NTStatus = 0xC01A002E - STATUS_LOG_APPENDED_FLUSH_FAILED NTStatus = 0xC01A002F - STATUS_LOG_PINNED_RESERVATION NTStatus = 0xC01A0030 - STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD NTStatus = 0xC01B00EA - STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD_RECOVERED NTStatus = 0x801B00EB - STATUS_VIDEO_DRIVER_DEBUG_REPORT_REQUEST NTStatus = 0x401B00EC - STATUS_MONITOR_NO_DESCRIPTOR NTStatus = 0xC01D0001 - STATUS_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT NTStatus = 0xC01D0002 - STATUS_MONITOR_INVALID_DESCRIPTOR_CHECKSUM NTStatus = 0xC01D0003 - STATUS_MONITOR_INVALID_STANDARD_TIMING_BLOCK NTStatus = 0xC01D0004 - STATUS_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED NTStatus = 0xC01D0005 - STATUS_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK NTStatus = 0xC01D0006 - STATUS_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK NTStatus = 0xC01D0007 - STATUS_MONITOR_NO_MORE_DESCRIPTOR_DATA NTStatus = 0xC01D0008 - STATUS_MONITOR_INVALID_DETAILED_TIMING_BLOCK NTStatus = 0xC01D0009 - STATUS_MONITOR_INVALID_MANUFACTURE_DATE NTStatus = 0xC01D000A - STATUS_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER NTStatus = 0xC01E0000 - STATUS_GRAPHICS_INSUFFICIENT_DMA_BUFFER NTStatus = 0xC01E0001 - STATUS_GRAPHICS_INVALID_DISPLAY_ADAPTER NTStatus = 0xC01E0002 - STATUS_GRAPHICS_ADAPTER_WAS_RESET NTStatus = 0xC01E0003 - STATUS_GRAPHICS_INVALID_DRIVER_MODEL NTStatus = 0xC01E0004 - STATUS_GRAPHICS_PRESENT_MODE_CHANGED NTStatus = 0xC01E0005 - STATUS_GRAPHICS_PRESENT_OCCLUDED NTStatus = 0xC01E0006 - STATUS_GRAPHICS_PRESENT_DENIED NTStatus = 0xC01E0007 - STATUS_GRAPHICS_CANNOTCOLORCONVERT NTStatus = 0xC01E0008 - STATUS_GRAPHICS_DRIVER_MISMATCH NTStatus = 0xC01E0009 - STATUS_GRAPHICS_PARTIAL_DATA_POPULATED NTStatus = 0x401E000A - STATUS_GRAPHICS_PRESENT_REDIRECTION_DISABLED NTStatus = 0xC01E000B - STATUS_GRAPHICS_PRESENT_UNOCCLUDED NTStatus = 0xC01E000C - STATUS_GRAPHICS_WINDOWDC_NOT_AVAILABLE NTStatus = 0xC01E000D - STATUS_GRAPHICS_WINDOWLESS_PRESENT_DISABLED NTStatus = 0xC01E000E - STATUS_GRAPHICS_PRESENT_INVALID_WINDOW NTStatus = 0xC01E000F - STATUS_GRAPHICS_PRESENT_BUFFER_NOT_BOUND NTStatus = 0xC01E0010 - STATUS_GRAPHICS_VAIL_STATE_CHANGED NTStatus = 0xC01E0011 - STATUS_GRAPHICS_INDIRECT_DISPLAY_ABANDON_SWAPCHAIN NTStatus = 0xC01E0012 - STATUS_GRAPHICS_INDIRECT_DISPLAY_DEVICE_STOPPED NTStatus = 0xC01E0013 - STATUS_GRAPHICS_NO_VIDEO_MEMORY NTStatus = 0xC01E0100 - STATUS_GRAPHICS_CANT_LOCK_MEMORY NTStatus = 0xC01E0101 - STATUS_GRAPHICS_ALLOCATION_BUSY NTStatus = 0xC01E0102 - STATUS_GRAPHICS_TOO_MANY_REFERENCES NTStatus = 0xC01E0103 - STATUS_GRAPHICS_TRY_AGAIN_LATER NTStatus = 0xC01E0104 - STATUS_GRAPHICS_TRY_AGAIN_NOW NTStatus = 0xC01E0105 - STATUS_GRAPHICS_ALLOCATION_INVALID NTStatus = 0xC01E0106 - STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE NTStatus = 0xC01E0107 - STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED NTStatus = 0xC01E0108 - STATUS_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION NTStatus = 0xC01E0109 - STATUS_GRAPHICS_INVALID_ALLOCATION_USAGE NTStatus = 0xC01E0110 - STATUS_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION NTStatus = 0xC01E0111 - STATUS_GRAPHICS_ALLOCATION_CLOSED NTStatus = 0xC01E0112 - STATUS_GRAPHICS_INVALID_ALLOCATION_INSTANCE NTStatus = 0xC01E0113 - STATUS_GRAPHICS_INVALID_ALLOCATION_HANDLE NTStatus = 0xC01E0114 - STATUS_GRAPHICS_WRONG_ALLOCATION_DEVICE NTStatus = 0xC01E0115 - STATUS_GRAPHICS_ALLOCATION_CONTENT_LOST NTStatus = 0xC01E0116 - STATUS_GRAPHICS_GPU_EXCEPTION_ON_DEVICE NTStatus = 0xC01E0200 - STATUS_GRAPHICS_SKIP_ALLOCATION_PREPARATION NTStatus = 0x401E0201 - STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY NTStatus = 0xC01E0300 - STATUS_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED NTStatus = 0xC01E0301 - STATUS_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED NTStatus = 0xC01E0302 - STATUS_GRAPHICS_INVALID_VIDPN NTStatus = 0xC01E0303 - STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE NTStatus = 0xC01E0304 - STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET NTStatus = 0xC01E0305 - STATUS_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED NTStatus = 0xC01E0306 - STATUS_GRAPHICS_MODE_NOT_PINNED NTStatus = 0x401E0307 - STATUS_GRAPHICS_INVALID_VIDPN_SOURCEMODESET NTStatus = 0xC01E0308 - STATUS_GRAPHICS_INVALID_VIDPN_TARGETMODESET NTStatus = 0xC01E0309 - STATUS_GRAPHICS_INVALID_FREQUENCY NTStatus = 0xC01E030A - STATUS_GRAPHICS_INVALID_ACTIVE_REGION NTStatus = 0xC01E030B - STATUS_GRAPHICS_INVALID_TOTAL_REGION NTStatus = 0xC01E030C - STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE NTStatus = 0xC01E0310 - STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE NTStatus = 0xC01E0311 - STATUS_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET NTStatus = 0xC01E0312 - STATUS_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY NTStatus = 0xC01E0313 - STATUS_GRAPHICS_MODE_ALREADY_IN_MODESET NTStatus = 0xC01E0314 - STATUS_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET NTStatus = 0xC01E0315 - STATUS_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET NTStatus = 0xC01E0316 - STATUS_GRAPHICS_SOURCE_ALREADY_IN_SET NTStatus = 0xC01E0317 - STATUS_GRAPHICS_TARGET_ALREADY_IN_SET NTStatus = 0xC01E0318 - STATUS_GRAPHICS_INVALID_VIDPN_PRESENT_PATH NTStatus = 0xC01E0319 - STATUS_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY NTStatus = 0xC01E031A - STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET NTStatus = 0xC01E031B - STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE NTStatus = 0xC01E031C - STATUS_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET NTStatus = 0xC01E031D - STATUS_GRAPHICS_NO_PREFERRED_MODE NTStatus = 0x401E031E - STATUS_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET NTStatus = 0xC01E031F - STATUS_GRAPHICS_STALE_MODESET NTStatus = 0xC01E0320 - STATUS_GRAPHICS_INVALID_MONITOR_SOURCEMODESET NTStatus = 0xC01E0321 - STATUS_GRAPHICS_INVALID_MONITOR_SOURCE_MODE NTStatus = 0xC01E0322 - STATUS_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN NTStatus = 0xC01E0323 - STATUS_GRAPHICS_MODE_ID_MUST_BE_UNIQUE NTStatus = 0xC01E0324 - STATUS_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION NTStatus = 0xC01E0325 - STATUS_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES NTStatus = 0xC01E0326 - STATUS_GRAPHICS_PATH_NOT_IN_TOPOLOGY NTStatus = 0xC01E0327 - STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE NTStatus = 0xC01E0328 - STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET NTStatus = 0xC01E0329 - STATUS_GRAPHICS_INVALID_MONITORDESCRIPTORSET NTStatus = 0xC01E032A - STATUS_GRAPHICS_INVALID_MONITORDESCRIPTOR NTStatus = 0xC01E032B - STATUS_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET NTStatus = 0xC01E032C - STATUS_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET NTStatus = 0xC01E032D - STATUS_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE NTStatus = 0xC01E032E - STATUS_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE NTStatus = 0xC01E032F - STATUS_GRAPHICS_RESOURCES_NOT_RELATED NTStatus = 0xC01E0330 - STATUS_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE NTStatus = 0xC01E0331 - STATUS_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE NTStatus = 0xC01E0332 - STATUS_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET NTStatus = 0xC01E0333 - STATUS_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER NTStatus = 0xC01E0334 - STATUS_GRAPHICS_NO_VIDPNMGR NTStatus = 0xC01E0335 - STATUS_GRAPHICS_NO_ACTIVE_VIDPN NTStatus = 0xC01E0336 - STATUS_GRAPHICS_STALE_VIDPN_TOPOLOGY NTStatus = 0xC01E0337 - STATUS_GRAPHICS_MONITOR_NOT_CONNECTED NTStatus = 0xC01E0338 - STATUS_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY NTStatus = 0xC01E0339 - STATUS_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE NTStatus = 0xC01E033A - STATUS_GRAPHICS_INVALID_VISIBLEREGION_SIZE NTStatus = 0xC01E033B - STATUS_GRAPHICS_INVALID_STRIDE NTStatus = 0xC01E033C - STATUS_GRAPHICS_INVALID_PIXELFORMAT NTStatus = 0xC01E033D - STATUS_GRAPHICS_INVALID_COLORBASIS NTStatus = 0xC01E033E - STATUS_GRAPHICS_INVALID_PIXELVALUEACCESSMODE NTStatus = 0xC01E033F - STATUS_GRAPHICS_TARGET_NOT_IN_TOPOLOGY NTStatus = 0xC01E0340 - STATUS_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT NTStatus = 0xC01E0341 - STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE NTStatus = 0xC01E0342 - STATUS_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN NTStatus = 0xC01E0343 - STATUS_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL NTStatus = 0xC01E0344 - STATUS_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION NTStatus = 0xC01E0345 - STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED NTStatus = 0xC01E0346 - STATUS_GRAPHICS_INVALID_GAMMA_RAMP NTStatus = 0xC01E0347 - STATUS_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED NTStatus = 0xC01E0348 - STATUS_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED NTStatus = 0xC01E0349 - STATUS_GRAPHICS_MODE_NOT_IN_MODESET NTStatus = 0xC01E034A - STATUS_GRAPHICS_DATASET_IS_EMPTY NTStatus = 0x401E034B - STATUS_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET NTStatus = 0x401E034C - STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON NTStatus = 0xC01E034D - STATUS_GRAPHICS_INVALID_PATH_CONTENT_TYPE NTStatus = 0xC01E034E - STATUS_GRAPHICS_INVALID_COPYPROTECTION_TYPE NTStatus = 0xC01E034F - STATUS_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS NTStatus = 0xC01E0350 - STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED NTStatus = 0x401E0351 - STATUS_GRAPHICS_INVALID_SCANLINE_ORDERING NTStatus = 0xC01E0352 - STATUS_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED NTStatus = 0xC01E0353 - STATUS_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS NTStatus = 0xC01E0354 - STATUS_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT NTStatus = 0xC01E0355 - STATUS_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM NTStatus = 0xC01E0356 - STATUS_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN NTStatus = 0xC01E0357 - STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT NTStatus = 0xC01E0358 - STATUS_GRAPHICS_MAX_NUM_PATHS_REACHED NTStatus = 0xC01E0359 - STATUS_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION NTStatus = 0xC01E035A - STATUS_GRAPHICS_INVALID_CLIENT_TYPE NTStatus = 0xC01E035B - STATUS_GRAPHICS_CLIENTVIDPN_NOT_SET NTStatus = 0xC01E035C - STATUS_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED NTStatus = 0xC01E0400 - STATUS_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED NTStatus = 0xC01E0401 - STATUS_GRAPHICS_UNKNOWN_CHILD_STATUS NTStatus = 0x401E042F - STATUS_GRAPHICS_NOT_A_LINKED_ADAPTER NTStatus = 0xC01E0430 - STATUS_GRAPHICS_LEADLINK_NOT_ENUMERATED NTStatus = 0xC01E0431 - STATUS_GRAPHICS_CHAINLINKS_NOT_ENUMERATED NTStatus = 0xC01E0432 - STATUS_GRAPHICS_ADAPTER_CHAIN_NOT_READY NTStatus = 0xC01E0433 - STATUS_GRAPHICS_CHAINLINKS_NOT_STARTED NTStatus = 0xC01E0434 - STATUS_GRAPHICS_CHAINLINKS_NOT_POWERED_ON NTStatus = 0xC01E0435 - STATUS_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE NTStatus = 0xC01E0436 - STATUS_GRAPHICS_LEADLINK_START_DEFERRED NTStatus = 0x401E0437 - STATUS_GRAPHICS_NOT_POST_DEVICE_DRIVER NTStatus = 0xC01E0438 - STATUS_GRAPHICS_POLLING_TOO_FREQUENTLY NTStatus = 0x401E0439 - STATUS_GRAPHICS_START_DEFERRED NTStatus = 0x401E043A - STATUS_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED NTStatus = 0xC01E043B - STATUS_GRAPHICS_DEPENDABLE_CHILD_STATUS NTStatus = 0x401E043C - STATUS_GRAPHICS_OPM_NOT_SUPPORTED NTStatus = 0xC01E0500 - STATUS_GRAPHICS_COPP_NOT_SUPPORTED NTStatus = 0xC01E0501 - STATUS_GRAPHICS_UAB_NOT_SUPPORTED NTStatus = 0xC01E0502 - STATUS_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS NTStatus = 0xC01E0503 - STATUS_GRAPHICS_OPM_NO_PROTECTED_OUTPUTS_EXIST NTStatus = 0xC01E0505 - STATUS_GRAPHICS_OPM_INTERNAL_ERROR NTStatus = 0xC01E050B - STATUS_GRAPHICS_OPM_INVALID_HANDLE NTStatus = 0xC01E050C - STATUS_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH NTStatus = 0xC01E050E - STATUS_GRAPHICS_OPM_SPANNING_MODE_ENABLED NTStatus = 0xC01E050F - STATUS_GRAPHICS_OPM_THEATER_MODE_ENABLED NTStatus = 0xC01E0510 - STATUS_GRAPHICS_PVP_HFS_FAILED NTStatus = 0xC01E0511 - STATUS_GRAPHICS_OPM_INVALID_SRM NTStatus = 0xC01E0512 - STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP NTStatus = 0xC01E0513 - STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP NTStatus = 0xC01E0514 - STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA NTStatus = 0xC01E0515 - STATUS_GRAPHICS_OPM_HDCP_SRM_NEVER_SET NTStatus = 0xC01E0516 - STATUS_GRAPHICS_OPM_RESOLUTION_TOO_HIGH NTStatus = 0xC01E0517 - STATUS_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE NTStatus = 0xC01E0518 - STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_NO_LONGER_EXISTS NTStatus = 0xC01E051A - STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS NTStatus = 0xC01E051C - STATUS_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST NTStatus = 0xC01E051D - STATUS_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR NTStatus = 0xC01E051E - STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS NTStatus = 0xC01E051F - STATUS_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED NTStatus = 0xC01E0520 - STATUS_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST NTStatus = 0xC01E0521 - STATUS_GRAPHICS_I2C_NOT_SUPPORTED NTStatus = 0xC01E0580 - STATUS_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST NTStatus = 0xC01E0581 - STATUS_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA NTStatus = 0xC01E0582 - STATUS_GRAPHICS_I2C_ERROR_RECEIVING_DATA NTStatus = 0xC01E0583 - STATUS_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED NTStatus = 0xC01E0584 - STATUS_GRAPHICS_DDCCI_INVALID_DATA NTStatus = 0xC01E0585 - STATUS_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE NTStatus = 0xC01E0586 - STATUS_GRAPHICS_DDCCI_INVALID_CAPABILITIES_STRING NTStatus = 0xC01E0587 - STATUS_GRAPHICS_MCA_INTERNAL_ERROR NTStatus = 0xC01E0588 - STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND NTStatus = 0xC01E0589 - STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH NTStatus = 0xC01E058A - STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM NTStatus = 0xC01E058B - STATUS_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE NTStatus = 0xC01E058C - STATUS_GRAPHICS_MONITOR_NO_LONGER_EXISTS NTStatus = 0xC01E058D - STATUS_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED NTStatus = 0xC01E05E0 - STATUS_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME NTStatus = 0xC01E05E1 - STATUS_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP NTStatus = 0xC01E05E2 - STATUS_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED NTStatus = 0xC01E05E3 - STATUS_GRAPHICS_INVALID_POINTER NTStatus = 0xC01E05E4 - STATUS_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE NTStatus = 0xC01E05E5 - STATUS_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL NTStatus = 0xC01E05E6 - STATUS_GRAPHICS_INTERNAL_ERROR NTStatus = 0xC01E05E7 - STATUS_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS NTStatus = 0xC01E05E8 - STATUS_FVE_LOCKED_VOLUME NTStatus = 0xC0210000 - STATUS_FVE_NOT_ENCRYPTED NTStatus = 0xC0210001 - STATUS_FVE_BAD_INFORMATION NTStatus = 0xC0210002 - STATUS_FVE_TOO_SMALL NTStatus = 0xC0210003 - STATUS_FVE_FAILED_WRONG_FS NTStatus = 0xC0210004 - STATUS_FVE_BAD_PARTITION_SIZE NTStatus = 0xC0210005 - STATUS_FVE_FS_NOT_EXTENDED NTStatus = 0xC0210006 - STATUS_FVE_FS_MOUNTED NTStatus = 0xC0210007 - STATUS_FVE_NO_LICENSE NTStatus = 0xC0210008 - STATUS_FVE_ACTION_NOT_ALLOWED NTStatus = 0xC0210009 - STATUS_FVE_BAD_DATA NTStatus = 0xC021000A - STATUS_FVE_VOLUME_NOT_BOUND NTStatus = 0xC021000B - STATUS_FVE_NOT_DATA_VOLUME NTStatus = 0xC021000C - STATUS_FVE_CONV_READ_ERROR NTStatus = 0xC021000D - STATUS_FVE_CONV_WRITE_ERROR NTStatus = 0xC021000E - STATUS_FVE_OVERLAPPED_UPDATE NTStatus = 0xC021000F - STATUS_FVE_FAILED_SECTOR_SIZE NTStatus = 0xC0210010 - STATUS_FVE_FAILED_AUTHENTICATION NTStatus = 0xC0210011 - STATUS_FVE_NOT_OS_VOLUME NTStatus = 0xC0210012 - STATUS_FVE_KEYFILE_NOT_FOUND NTStatus = 0xC0210013 - STATUS_FVE_KEYFILE_INVALID NTStatus = 0xC0210014 - STATUS_FVE_KEYFILE_NO_VMK NTStatus = 0xC0210015 - STATUS_FVE_TPM_DISABLED NTStatus = 0xC0210016 - STATUS_FVE_TPM_SRK_AUTH_NOT_ZERO NTStatus = 0xC0210017 - STATUS_FVE_TPM_INVALID_PCR NTStatus = 0xC0210018 - STATUS_FVE_TPM_NO_VMK NTStatus = 0xC0210019 - STATUS_FVE_PIN_INVALID NTStatus = 0xC021001A - STATUS_FVE_AUTH_INVALID_APPLICATION NTStatus = 0xC021001B - STATUS_FVE_AUTH_INVALID_CONFIG NTStatus = 0xC021001C - STATUS_FVE_DEBUGGER_ENABLED NTStatus = 0xC021001D - STATUS_FVE_DRY_RUN_FAILED NTStatus = 0xC021001E - STATUS_FVE_BAD_METADATA_POINTER NTStatus = 0xC021001F - STATUS_FVE_OLD_METADATA_COPY NTStatus = 0xC0210020 - STATUS_FVE_REBOOT_REQUIRED NTStatus = 0xC0210021 - STATUS_FVE_RAW_ACCESS NTStatus = 0xC0210022 - STATUS_FVE_RAW_BLOCKED NTStatus = 0xC0210023 - STATUS_FVE_NO_AUTOUNLOCK_MASTER_KEY NTStatus = 0xC0210024 - STATUS_FVE_MOR_FAILED NTStatus = 0xC0210025 - STATUS_FVE_NO_FEATURE_LICENSE NTStatus = 0xC0210026 - STATUS_FVE_POLICY_USER_DISABLE_RDV_NOT_ALLOWED NTStatus = 0xC0210027 - STATUS_FVE_CONV_RECOVERY_FAILED NTStatus = 0xC0210028 - STATUS_FVE_VIRTUALIZED_SPACE_TOO_BIG NTStatus = 0xC0210029 - STATUS_FVE_INVALID_DATUM_TYPE NTStatus = 0xC021002A - STATUS_FVE_VOLUME_TOO_SMALL NTStatus = 0xC0210030 - STATUS_FVE_ENH_PIN_INVALID NTStatus = 0xC0210031 - STATUS_FVE_FULL_ENCRYPTION_NOT_ALLOWED_ON_TP_STORAGE NTStatus = 0xC0210032 - STATUS_FVE_WIPE_NOT_ALLOWED_ON_TP_STORAGE NTStatus = 0xC0210033 - STATUS_FVE_NOT_ALLOWED_ON_CSV_STACK NTStatus = 0xC0210034 - STATUS_FVE_NOT_ALLOWED_ON_CLUSTER NTStatus = 0xC0210035 - STATUS_FVE_NOT_ALLOWED_TO_UPGRADE_WHILE_CONVERTING NTStatus = 0xC0210036 - STATUS_FVE_WIPE_CANCEL_NOT_APPLICABLE NTStatus = 0xC0210037 - STATUS_FVE_EDRIVE_DRY_RUN_FAILED NTStatus = 0xC0210038 - STATUS_FVE_SECUREBOOT_DISABLED NTStatus = 0xC0210039 - STATUS_FVE_SECUREBOOT_CONFIG_CHANGE NTStatus = 0xC021003A - STATUS_FVE_DEVICE_LOCKEDOUT NTStatus = 0xC021003B - STATUS_FVE_VOLUME_EXTEND_PREVENTS_EOW_DECRYPT NTStatus = 0xC021003C - STATUS_FVE_NOT_DE_VOLUME NTStatus = 0xC021003D - STATUS_FVE_PROTECTION_DISABLED NTStatus = 0xC021003E - STATUS_FVE_PROTECTION_CANNOT_BE_DISABLED NTStatus = 0xC021003F - STATUS_FVE_OSV_KSR_NOT_ALLOWED NTStatus = 0xC0210040 - STATUS_FWP_CALLOUT_NOT_FOUND NTStatus = 0xC0220001 - STATUS_FWP_CONDITION_NOT_FOUND NTStatus = 0xC0220002 - STATUS_FWP_FILTER_NOT_FOUND NTStatus = 0xC0220003 - STATUS_FWP_LAYER_NOT_FOUND NTStatus = 0xC0220004 - STATUS_FWP_PROVIDER_NOT_FOUND NTStatus = 0xC0220005 - STATUS_FWP_PROVIDER_CONTEXT_NOT_FOUND NTStatus = 0xC0220006 - STATUS_FWP_SUBLAYER_NOT_FOUND NTStatus = 0xC0220007 - STATUS_FWP_NOT_FOUND NTStatus = 0xC0220008 - STATUS_FWP_ALREADY_EXISTS NTStatus = 0xC0220009 - STATUS_FWP_IN_USE NTStatus = 0xC022000A - STATUS_FWP_DYNAMIC_SESSION_IN_PROGRESS NTStatus = 0xC022000B - STATUS_FWP_WRONG_SESSION NTStatus = 0xC022000C - STATUS_FWP_NO_TXN_IN_PROGRESS NTStatus = 0xC022000D - STATUS_FWP_TXN_IN_PROGRESS NTStatus = 0xC022000E - STATUS_FWP_TXN_ABORTED NTStatus = 0xC022000F - STATUS_FWP_SESSION_ABORTED NTStatus = 0xC0220010 - STATUS_FWP_INCOMPATIBLE_TXN NTStatus = 0xC0220011 - STATUS_FWP_TIMEOUT NTStatus = 0xC0220012 - STATUS_FWP_NET_EVENTS_DISABLED NTStatus = 0xC0220013 - STATUS_FWP_INCOMPATIBLE_LAYER NTStatus = 0xC0220014 - STATUS_FWP_KM_CLIENTS_ONLY NTStatus = 0xC0220015 - STATUS_FWP_LIFETIME_MISMATCH NTStatus = 0xC0220016 - STATUS_FWP_BUILTIN_OBJECT NTStatus = 0xC0220017 - STATUS_FWP_TOO_MANY_CALLOUTS NTStatus = 0xC0220018 - STATUS_FWP_NOTIFICATION_DROPPED NTStatus = 0xC0220019 - STATUS_FWP_TRAFFIC_MISMATCH NTStatus = 0xC022001A - STATUS_FWP_INCOMPATIBLE_SA_STATE NTStatus = 0xC022001B - STATUS_FWP_NULL_POINTER NTStatus = 0xC022001C - STATUS_FWP_INVALID_ENUMERATOR NTStatus = 0xC022001D - STATUS_FWP_INVALID_FLAGS NTStatus = 0xC022001E - STATUS_FWP_INVALID_NET_MASK NTStatus = 0xC022001F - STATUS_FWP_INVALID_RANGE NTStatus = 0xC0220020 - STATUS_FWP_INVALID_INTERVAL NTStatus = 0xC0220021 - STATUS_FWP_ZERO_LENGTH_ARRAY NTStatus = 0xC0220022 - STATUS_FWP_NULL_DISPLAY_NAME NTStatus = 0xC0220023 - STATUS_FWP_INVALID_ACTION_TYPE NTStatus = 0xC0220024 - STATUS_FWP_INVALID_WEIGHT NTStatus = 0xC0220025 - STATUS_FWP_MATCH_TYPE_MISMATCH NTStatus = 0xC0220026 - STATUS_FWP_TYPE_MISMATCH NTStatus = 0xC0220027 - STATUS_FWP_OUT_OF_BOUNDS NTStatus = 0xC0220028 - STATUS_FWP_RESERVED NTStatus = 0xC0220029 - STATUS_FWP_DUPLICATE_CONDITION NTStatus = 0xC022002A - STATUS_FWP_DUPLICATE_KEYMOD NTStatus = 0xC022002B - STATUS_FWP_ACTION_INCOMPATIBLE_WITH_LAYER NTStatus = 0xC022002C - STATUS_FWP_ACTION_INCOMPATIBLE_WITH_SUBLAYER NTStatus = 0xC022002D - STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_LAYER NTStatus = 0xC022002E - STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_CALLOUT NTStatus = 0xC022002F - STATUS_FWP_INCOMPATIBLE_AUTH_METHOD NTStatus = 0xC0220030 - STATUS_FWP_INCOMPATIBLE_DH_GROUP NTStatus = 0xC0220031 - STATUS_FWP_EM_NOT_SUPPORTED NTStatus = 0xC0220032 - STATUS_FWP_NEVER_MATCH NTStatus = 0xC0220033 - STATUS_FWP_PROVIDER_CONTEXT_MISMATCH NTStatus = 0xC0220034 - STATUS_FWP_INVALID_PARAMETER NTStatus = 0xC0220035 - STATUS_FWP_TOO_MANY_SUBLAYERS NTStatus = 0xC0220036 - STATUS_FWP_CALLOUT_NOTIFICATION_FAILED NTStatus = 0xC0220037 - STATUS_FWP_INVALID_AUTH_TRANSFORM NTStatus = 0xC0220038 - STATUS_FWP_INVALID_CIPHER_TRANSFORM NTStatus = 0xC0220039 - STATUS_FWP_INCOMPATIBLE_CIPHER_TRANSFORM NTStatus = 0xC022003A - STATUS_FWP_INVALID_TRANSFORM_COMBINATION NTStatus = 0xC022003B - STATUS_FWP_DUPLICATE_AUTH_METHOD NTStatus = 0xC022003C - STATUS_FWP_INVALID_TUNNEL_ENDPOINT NTStatus = 0xC022003D - STATUS_FWP_L2_DRIVER_NOT_READY NTStatus = 0xC022003E - STATUS_FWP_KEY_DICTATOR_ALREADY_REGISTERED NTStatus = 0xC022003F - STATUS_FWP_KEY_DICTATION_INVALID_KEYING_MATERIAL NTStatus = 0xC0220040 - STATUS_FWP_CONNECTIONS_DISABLED NTStatus = 0xC0220041 - STATUS_FWP_INVALID_DNS_NAME NTStatus = 0xC0220042 - STATUS_FWP_STILL_ON NTStatus = 0xC0220043 - STATUS_FWP_IKEEXT_NOT_RUNNING NTStatus = 0xC0220044 - STATUS_FWP_TCPIP_NOT_READY NTStatus = 0xC0220100 - STATUS_FWP_INJECT_HANDLE_CLOSING NTStatus = 0xC0220101 - STATUS_FWP_INJECT_HANDLE_STALE NTStatus = 0xC0220102 - STATUS_FWP_CANNOT_PEND NTStatus = 0xC0220103 - STATUS_FWP_DROP_NOICMP NTStatus = 0xC0220104 - STATUS_NDIS_CLOSING NTStatus = 0xC0230002 - STATUS_NDIS_BAD_VERSION NTStatus = 0xC0230004 - STATUS_NDIS_BAD_CHARACTERISTICS NTStatus = 0xC0230005 - STATUS_NDIS_ADAPTER_NOT_FOUND NTStatus = 0xC0230006 - STATUS_NDIS_OPEN_FAILED NTStatus = 0xC0230007 - STATUS_NDIS_DEVICE_FAILED NTStatus = 0xC0230008 - STATUS_NDIS_MULTICAST_FULL NTStatus = 0xC0230009 - STATUS_NDIS_MULTICAST_EXISTS NTStatus = 0xC023000A - STATUS_NDIS_MULTICAST_NOT_FOUND NTStatus = 0xC023000B - STATUS_NDIS_REQUEST_ABORTED NTStatus = 0xC023000C - STATUS_NDIS_RESET_IN_PROGRESS NTStatus = 0xC023000D - STATUS_NDIS_NOT_SUPPORTED NTStatus = 0xC02300BB - STATUS_NDIS_INVALID_PACKET NTStatus = 0xC023000F - STATUS_NDIS_ADAPTER_NOT_READY NTStatus = 0xC0230011 - STATUS_NDIS_INVALID_LENGTH NTStatus = 0xC0230014 - STATUS_NDIS_INVALID_DATA NTStatus = 0xC0230015 - STATUS_NDIS_BUFFER_TOO_SHORT NTStatus = 0xC0230016 - STATUS_NDIS_INVALID_OID NTStatus = 0xC0230017 - STATUS_NDIS_ADAPTER_REMOVED NTStatus = 0xC0230018 - STATUS_NDIS_UNSUPPORTED_MEDIA NTStatus = 0xC0230019 - STATUS_NDIS_GROUP_ADDRESS_IN_USE NTStatus = 0xC023001A - STATUS_NDIS_FILE_NOT_FOUND NTStatus = 0xC023001B - STATUS_NDIS_ERROR_READING_FILE NTStatus = 0xC023001C - STATUS_NDIS_ALREADY_MAPPED NTStatus = 0xC023001D - STATUS_NDIS_RESOURCE_CONFLICT NTStatus = 0xC023001E - STATUS_NDIS_MEDIA_DISCONNECTED NTStatus = 0xC023001F - STATUS_NDIS_INVALID_ADDRESS NTStatus = 0xC0230022 - STATUS_NDIS_INVALID_DEVICE_REQUEST NTStatus = 0xC0230010 - STATUS_NDIS_PAUSED NTStatus = 0xC023002A - STATUS_NDIS_INTERFACE_NOT_FOUND NTStatus = 0xC023002B - STATUS_NDIS_UNSUPPORTED_REVISION NTStatus = 0xC023002C - STATUS_NDIS_INVALID_PORT NTStatus = 0xC023002D - STATUS_NDIS_INVALID_PORT_STATE NTStatus = 0xC023002E - STATUS_NDIS_LOW_POWER_STATE NTStatus = 0xC023002F - STATUS_NDIS_REINIT_REQUIRED NTStatus = 0xC0230030 - STATUS_NDIS_NO_QUEUES NTStatus = 0xC0230031 - STATUS_NDIS_DOT11_AUTO_CONFIG_ENABLED NTStatus = 0xC0232000 - STATUS_NDIS_DOT11_MEDIA_IN_USE NTStatus = 0xC0232001 - STATUS_NDIS_DOT11_POWER_STATE_INVALID NTStatus = 0xC0232002 - STATUS_NDIS_PM_WOL_PATTERN_LIST_FULL NTStatus = 0xC0232003 - STATUS_NDIS_PM_PROTOCOL_OFFLOAD_LIST_FULL NTStatus = 0xC0232004 - STATUS_NDIS_DOT11_AP_CHANNEL_CURRENTLY_NOT_AVAILABLE NTStatus = 0xC0232005 - STATUS_NDIS_DOT11_AP_BAND_CURRENTLY_NOT_AVAILABLE NTStatus = 0xC0232006 - STATUS_NDIS_DOT11_AP_CHANNEL_NOT_ALLOWED NTStatus = 0xC0232007 - STATUS_NDIS_DOT11_AP_BAND_NOT_ALLOWED NTStatus = 0xC0232008 - STATUS_NDIS_INDICATION_REQUIRED NTStatus = 0x40230001 - STATUS_NDIS_OFFLOAD_POLICY NTStatus = 0xC023100F - STATUS_NDIS_OFFLOAD_CONNECTION_REJECTED NTStatus = 0xC0231012 - STATUS_NDIS_OFFLOAD_PATH_REJECTED NTStatus = 0xC0231013 - STATUS_TPM_ERROR_MASK NTStatus = 0xC0290000 - STATUS_TPM_AUTHFAIL NTStatus = 0xC0290001 - STATUS_TPM_BADINDEX NTStatus = 0xC0290002 - STATUS_TPM_BAD_PARAMETER NTStatus = 0xC0290003 - STATUS_TPM_AUDITFAILURE NTStatus = 0xC0290004 - STATUS_TPM_CLEAR_DISABLED NTStatus = 0xC0290005 - STATUS_TPM_DEACTIVATED NTStatus = 0xC0290006 - STATUS_TPM_DISABLED NTStatus = 0xC0290007 - STATUS_TPM_DISABLED_CMD NTStatus = 0xC0290008 - STATUS_TPM_FAIL NTStatus = 0xC0290009 - STATUS_TPM_BAD_ORDINAL NTStatus = 0xC029000A - STATUS_TPM_INSTALL_DISABLED NTStatus = 0xC029000B - STATUS_TPM_INVALID_KEYHANDLE NTStatus = 0xC029000C - STATUS_TPM_KEYNOTFOUND NTStatus = 0xC029000D - STATUS_TPM_INAPPROPRIATE_ENC NTStatus = 0xC029000E - STATUS_TPM_MIGRATEFAIL NTStatus = 0xC029000F - STATUS_TPM_INVALID_PCR_INFO NTStatus = 0xC0290010 - STATUS_TPM_NOSPACE NTStatus = 0xC0290011 - STATUS_TPM_NOSRK NTStatus = 0xC0290012 - STATUS_TPM_NOTSEALED_BLOB NTStatus = 0xC0290013 - STATUS_TPM_OWNER_SET NTStatus = 0xC0290014 - STATUS_TPM_RESOURCES NTStatus = 0xC0290015 - STATUS_TPM_SHORTRANDOM NTStatus = 0xC0290016 - STATUS_TPM_SIZE NTStatus = 0xC0290017 - STATUS_TPM_WRONGPCRVAL NTStatus = 0xC0290018 - STATUS_TPM_BAD_PARAM_SIZE NTStatus = 0xC0290019 - STATUS_TPM_SHA_THREAD NTStatus = 0xC029001A - STATUS_TPM_SHA_ERROR NTStatus = 0xC029001B - STATUS_TPM_FAILEDSELFTEST NTStatus = 0xC029001C - STATUS_TPM_AUTH2FAIL NTStatus = 0xC029001D - STATUS_TPM_BADTAG NTStatus = 0xC029001E - STATUS_TPM_IOERROR NTStatus = 0xC029001F - STATUS_TPM_ENCRYPT_ERROR NTStatus = 0xC0290020 - STATUS_TPM_DECRYPT_ERROR NTStatus = 0xC0290021 - STATUS_TPM_INVALID_AUTHHANDLE NTStatus = 0xC0290022 - STATUS_TPM_NO_ENDORSEMENT NTStatus = 0xC0290023 - STATUS_TPM_INVALID_KEYUSAGE NTStatus = 0xC0290024 - STATUS_TPM_WRONG_ENTITYTYPE NTStatus = 0xC0290025 - STATUS_TPM_INVALID_POSTINIT NTStatus = 0xC0290026 - STATUS_TPM_INAPPROPRIATE_SIG NTStatus = 0xC0290027 - STATUS_TPM_BAD_KEY_PROPERTY NTStatus = 0xC0290028 - STATUS_TPM_BAD_MIGRATION NTStatus = 0xC0290029 - STATUS_TPM_BAD_SCHEME NTStatus = 0xC029002A - STATUS_TPM_BAD_DATASIZE NTStatus = 0xC029002B - STATUS_TPM_BAD_MODE NTStatus = 0xC029002C - STATUS_TPM_BAD_PRESENCE NTStatus = 0xC029002D - STATUS_TPM_BAD_VERSION NTStatus = 0xC029002E - STATUS_TPM_NO_WRAP_TRANSPORT NTStatus = 0xC029002F - STATUS_TPM_AUDITFAIL_UNSUCCESSFUL NTStatus = 0xC0290030 - STATUS_TPM_AUDITFAIL_SUCCESSFUL NTStatus = 0xC0290031 - STATUS_TPM_NOTRESETABLE NTStatus = 0xC0290032 - STATUS_TPM_NOTLOCAL NTStatus = 0xC0290033 - STATUS_TPM_BAD_TYPE NTStatus = 0xC0290034 - STATUS_TPM_INVALID_RESOURCE NTStatus = 0xC0290035 - STATUS_TPM_NOTFIPS NTStatus = 0xC0290036 - STATUS_TPM_INVALID_FAMILY NTStatus = 0xC0290037 - STATUS_TPM_NO_NV_PERMISSION NTStatus = 0xC0290038 - STATUS_TPM_REQUIRES_SIGN NTStatus = 0xC0290039 - STATUS_TPM_KEY_NOTSUPPORTED NTStatus = 0xC029003A - STATUS_TPM_AUTH_CONFLICT NTStatus = 0xC029003B - STATUS_TPM_AREA_LOCKED NTStatus = 0xC029003C - STATUS_TPM_BAD_LOCALITY NTStatus = 0xC029003D - STATUS_TPM_READ_ONLY NTStatus = 0xC029003E - STATUS_TPM_PER_NOWRITE NTStatus = 0xC029003F - STATUS_TPM_FAMILYCOUNT NTStatus = 0xC0290040 - STATUS_TPM_WRITE_LOCKED NTStatus = 0xC0290041 - STATUS_TPM_BAD_ATTRIBUTES NTStatus = 0xC0290042 - STATUS_TPM_INVALID_STRUCTURE NTStatus = 0xC0290043 - STATUS_TPM_KEY_OWNER_CONTROL NTStatus = 0xC0290044 - STATUS_TPM_BAD_COUNTER NTStatus = 0xC0290045 - STATUS_TPM_NOT_FULLWRITE NTStatus = 0xC0290046 - STATUS_TPM_CONTEXT_GAP NTStatus = 0xC0290047 - STATUS_TPM_MAXNVWRITES NTStatus = 0xC0290048 - STATUS_TPM_NOOPERATOR NTStatus = 0xC0290049 - STATUS_TPM_RESOURCEMISSING NTStatus = 0xC029004A - STATUS_TPM_DELEGATE_LOCK NTStatus = 0xC029004B - STATUS_TPM_DELEGATE_FAMILY NTStatus = 0xC029004C - STATUS_TPM_DELEGATE_ADMIN NTStatus = 0xC029004D - STATUS_TPM_TRANSPORT_NOTEXCLUSIVE NTStatus = 0xC029004E - STATUS_TPM_OWNER_CONTROL NTStatus = 0xC029004F - STATUS_TPM_DAA_RESOURCES NTStatus = 0xC0290050 - STATUS_TPM_DAA_INPUT_DATA0 NTStatus = 0xC0290051 - STATUS_TPM_DAA_INPUT_DATA1 NTStatus = 0xC0290052 - STATUS_TPM_DAA_ISSUER_SETTINGS NTStatus = 0xC0290053 - STATUS_TPM_DAA_TPM_SETTINGS NTStatus = 0xC0290054 - STATUS_TPM_DAA_STAGE NTStatus = 0xC0290055 - STATUS_TPM_DAA_ISSUER_VALIDITY NTStatus = 0xC0290056 - STATUS_TPM_DAA_WRONG_W NTStatus = 0xC0290057 - STATUS_TPM_BAD_HANDLE NTStatus = 0xC0290058 - STATUS_TPM_BAD_DELEGATE NTStatus = 0xC0290059 - STATUS_TPM_BADCONTEXT NTStatus = 0xC029005A - STATUS_TPM_TOOMANYCONTEXTS NTStatus = 0xC029005B - STATUS_TPM_MA_TICKET_SIGNATURE NTStatus = 0xC029005C - STATUS_TPM_MA_DESTINATION NTStatus = 0xC029005D - STATUS_TPM_MA_SOURCE NTStatus = 0xC029005E - STATUS_TPM_MA_AUTHORITY NTStatus = 0xC029005F - STATUS_TPM_PERMANENTEK NTStatus = 0xC0290061 - STATUS_TPM_BAD_SIGNATURE NTStatus = 0xC0290062 - STATUS_TPM_NOCONTEXTSPACE NTStatus = 0xC0290063 - STATUS_TPM_20_E_ASYMMETRIC NTStatus = 0xC0290081 - STATUS_TPM_20_E_ATTRIBUTES NTStatus = 0xC0290082 - STATUS_TPM_20_E_HASH NTStatus = 0xC0290083 - STATUS_TPM_20_E_VALUE NTStatus = 0xC0290084 - STATUS_TPM_20_E_HIERARCHY NTStatus = 0xC0290085 - STATUS_TPM_20_E_KEY_SIZE NTStatus = 0xC0290087 - STATUS_TPM_20_E_MGF NTStatus = 0xC0290088 - STATUS_TPM_20_E_MODE NTStatus = 0xC0290089 - STATUS_TPM_20_E_TYPE NTStatus = 0xC029008A - STATUS_TPM_20_E_HANDLE NTStatus = 0xC029008B - STATUS_TPM_20_E_KDF NTStatus = 0xC029008C - STATUS_TPM_20_E_RANGE NTStatus = 0xC029008D - STATUS_TPM_20_E_AUTH_FAIL NTStatus = 0xC029008E - STATUS_TPM_20_E_NONCE NTStatus = 0xC029008F - STATUS_TPM_20_E_PP NTStatus = 0xC0290090 - STATUS_TPM_20_E_SCHEME NTStatus = 0xC0290092 - STATUS_TPM_20_E_SIZE NTStatus = 0xC0290095 - STATUS_TPM_20_E_SYMMETRIC NTStatus = 0xC0290096 - STATUS_TPM_20_E_TAG NTStatus = 0xC0290097 - STATUS_TPM_20_E_SELECTOR NTStatus = 0xC0290098 - STATUS_TPM_20_E_INSUFFICIENT NTStatus = 0xC029009A - STATUS_TPM_20_E_SIGNATURE NTStatus = 0xC029009B - STATUS_TPM_20_E_KEY NTStatus = 0xC029009C - STATUS_TPM_20_E_POLICY_FAIL NTStatus = 0xC029009D - STATUS_TPM_20_E_INTEGRITY NTStatus = 0xC029009F - STATUS_TPM_20_E_TICKET NTStatus = 0xC02900A0 - STATUS_TPM_20_E_RESERVED_BITS NTStatus = 0xC02900A1 - STATUS_TPM_20_E_BAD_AUTH NTStatus = 0xC02900A2 - STATUS_TPM_20_E_EXPIRED NTStatus = 0xC02900A3 - STATUS_TPM_20_E_POLICY_CC NTStatus = 0xC02900A4 - STATUS_TPM_20_E_BINDING NTStatus = 0xC02900A5 - STATUS_TPM_20_E_CURVE NTStatus = 0xC02900A6 - STATUS_TPM_20_E_ECC_POINT NTStatus = 0xC02900A7 - STATUS_TPM_20_E_INITIALIZE NTStatus = 0xC0290100 - STATUS_TPM_20_E_FAILURE NTStatus = 0xC0290101 - STATUS_TPM_20_E_SEQUENCE NTStatus = 0xC0290103 - STATUS_TPM_20_E_PRIVATE NTStatus = 0xC029010B - STATUS_TPM_20_E_HMAC NTStatus = 0xC0290119 - STATUS_TPM_20_E_DISABLED NTStatus = 0xC0290120 - STATUS_TPM_20_E_EXCLUSIVE NTStatus = 0xC0290121 - STATUS_TPM_20_E_ECC_CURVE NTStatus = 0xC0290123 - STATUS_TPM_20_E_AUTH_TYPE NTStatus = 0xC0290124 - STATUS_TPM_20_E_AUTH_MISSING NTStatus = 0xC0290125 - STATUS_TPM_20_E_POLICY NTStatus = 0xC0290126 - STATUS_TPM_20_E_PCR NTStatus = 0xC0290127 - STATUS_TPM_20_E_PCR_CHANGED NTStatus = 0xC0290128 - STATUS_TPM_20_E_UPGRADE NTStatus = 0xC029012D - STATUS_TPM_20_E_TOO_MANY_CONTEXTS NTStatus = 0xC029012E - STATUS_TPM_20_E_AUTH_UNAVAILABLE NTStatus = 0xC029012F - STATUS_TPM_20_E_REBOOT NTStatus = 0xC0290130 - STATUS_TPM_20_E_UNBALANCED NTStatus = 0xC0290131 - STATUS_TPM_20_E_COMMAND_SIZE NTStatus = 0xC0290142 - STATUS_TPM_20_E_COMMAND_CODE NTStatus = 0xC0290143 - STATUS_TPM_20_E_AUTHSIZE NTStatus = 0xC0290144 - STATUS_TPM_20_E_AUTH_CONTEXT NTStatus = 0xC0290145 - STATUS_TPM_20_E_NV_RANGE NTStatus = 0xC0290146 - STATUS_TPM_20_E_NV_SIZE NTStatus = 0xC0290147 - STATUS_TPM_20_E_NV_LOCKED NTStatus = 0xC0290148 - STATUS_TPM_20_E_NV_AUTHORIZATION NTStatus = 0xC0290149 - STATUS_TPM_20_E_NV_UNINITIALIZED NTStatus = 0xC029014A - STATUS_TPM_20_E_NV_SPACE NTStatus = 0xC029014B - STATUS_TPM_20_E_NV_DEFINED NTStatus = 0xC029014C - STATUS_TPM_20_E_BAD_CONTEXT NTStatus = 0xC0290150 - STATUS_TPM_20_E_CPHASH NTStatus = 0xC0290151 - STATUS_TPM_20_E_PARENT NTStatus = 0xC0290152 - STATUS_TPM_20_E_NEEDS_TEST NTStatus = 0xC0290153 - STATUS_TPM_20_E_NO_RESULT NTStatus = 0xC0290154 - STATUS_TPM_20_E_SENSITIVE NTStatus = 0xC0290155 - STATUS_TPM_COMMAND_BLOCKED NTStatus = 0xC0290400 - STATUS_TPM_INVALID_HANDLE NTStatus = 0xC0290401 - STATUS_TPM_DUPLICATE_VHANDLE NTStatus = 0xC0290402 - STATUS_TPM_EMBEDDED_COMMAND_BLOCKED NTStatus = 0xC0290403 - STATUS_TPM_EMBEDDED_COMMAND_UNSUPPORTED NTStatus = 0xC0290404 - STATUS_TPM_RETRY NTStatus = 0xC0290800 - STATUS_TPM_NEEDS_SELFTEST NTStatus = 0xC0290801 - STATUS_TPM_DOING_SELFTEST NTStatus = 0xC0290802 - STATUS_TPM_DEFEND_LOCK_RUNNING NTStatus = 0xC0290803 - STATUS_TPM_COMMAND_CANCELED NTStatus = 0xC0291001 - STATUS_TPM_TOO_MANY_CONTEXTS NTStatus = 0xC0291002 - STATUS_TPM_NOT_FOUND NTStatus = 0xC0291003 - STATUS_TPM_ACCESS_DENIED NTStatus = 0xC0291004 - STATUS_TPM_INSUFFICIENT_BUFFER NTStatus = 0xC0291005 - STATUS_TPM_PPI_FUNCTION_UNSUPPORTED NTStatus = 0xC0291006 - STATUS_PCP_ERROR_MASK NTStatus = 0xC0292000 - STATUS_PCP_DEVICE_NOT_READY NTStatus = 0xC0292001 - STATUS_PCP_INVALID_HANDLE NTStatus = 0xC0292002 - STATUS_PCP_INVALID_PARAMETER NTStatus = 0xC0292003 - STATUS_PCP_FLAG_NOT_SUPPORTED NTStatus = 0xC0292004 - STATUS_PCP_NOT_SUPPORTED NTStatus = 0xC0292005 - STATUS_PCP_BUFFER_TOO_SMALL NTStatus = 0xC0292006 - STATUS_PCP_INTERNAL_ERROR NTStatus = 0xC0292007 - STATUS_PCP_AUTHENTICATION_FAILED NTStatus = 0xC0292008 - STATUS_PCP_AUTHENTICATION_IGNORED NTStatus = 0xC0292009 - STATUS_PCP_POLICY_NOT_FOUND NTStatus = 0xC029200A - STATUS_PCP_PROFILE_NOT_FOUND NTStatus = 0xC029200B - STATUS_PCP_VALIDATION_FAILED NTStatus = 0xC029200C - STATUS_PCP_DEVICE_NOT_FOUND NTStatus = 0xC029200D - STATUS_PCP_WRONG_PARENT NTStatus = 0xC029200E - STATUS_PCP_KEY_NOT_LOADED NTStatus = 0xC029200F - STATUS_PCP_NO_KEY_CERTIFICATION NTStatus = 0xC0292010 - STATUS_PCP_KEY_NOT_FINALIZED NTStatus = 0xC0292011 - STATUS_PCP_ATTESTATION_CHALLENGE_NOT_SET NTStatus = 0xC0292012 - STATUS_PCP_NOT_PCR_BOUND NTStatus = 0xC0292013 - STATUS_PCP_KEY_ALREADY_FINALIZED NTStatus = 0xC0292014 - STATUS_PCP_KEY_USAGE_POLICY_NOT_SUPPORTED NTStatus = 0xC0292015 - STATUS_PCP_KEY_USAGE_POLICY_INVALID NTStatus = 0xC0292016 - STATUS_PCP_SOFT_KEY_ERROR NTStatus = 0xC0292017 - STATUS_PCP_KEY_NOT_AUTHENTICATED NTStatus = 0xC0292018 - STATUS_PCP_KEY_NOT_AIK NTStatus = 0xC0292019 - STATUS_PCP_KEY_NOT_SIGNING_KEY NTStatus = 0xC029201A - STATUS_PCP_LOCKED_OUT NTStatus = 0xC029201B - STATUS_PCP_CLAIM_TYPE_NOT_SUPPORTED NTStatus = 0xC029201C - STATUS_PCP_TPM_VERSION_NOT_SUPPORTED NTStatus = 0xC029201D - STATUS_PCP_BUFFER_LENGTH_MISMATCH NTStatus = 0xC029201E - STATUS_PCP_IFX_RSA_KEY_CREATION_BLOCKED NTStatus = 0xC029201F - STATUS_PCP_TICKET_MISSING NTStatus = 0xC0292020 - STATUS_PCP_RAW_POLICY_NOT_SUPPORTED NTStatus = 0xC0292021 - STATUS_PCP_KEY_HANDLE_INVALIDATED NTStatus = 0xC0292022 - STATUS_PCP_UNSUPPORTED_PSS_SALT NTStatus = 0x40292023 - STATUS_RTPM_CONTEXT_CONTINUE NTStatus = 0x00293000 - STATUS_RTPM_CONTEXT_COMPLETE NTStatus = 0x00293001 - STATUS_RTPM_NO_RESULT NTStatus = 0xC0293002 - STATUS_RTPM_PCR_READ_INCOMPLETE NTStatus = 0xC0293003 - STATUS_RTPM_INVALID_CONTEXT NTStatus = 0xC0293004 - STATUS_RTPM_UNSUPPORTED_CMD NTStatus = 0xC0293005 - STATUS_TPM_ZERO_EXHAUST_ENABLED NTStatus = 0xC0294000 - STATUS_HV_INVALID_HYPERCALL_CODE NTStatus = 0xC0350002 - STATUS_HV_INVALID_HYPERCALL_INPUT NTStatus = 0xC0350003 - STATUS_HV_INVALID_ALIGNMENT NTStatus = 0xC0350004 - STATUS_HV_INVALID_PARAMETER NTStatus = 0xC0350005 - STATUS_HV_ACCESS_DENIED NTStatus = 0xC0350006 - STATUS_HV_INVALID_PARTITION_STATE NTStatus = 0xC0350007 - STATUS_HV_OPERATION_DENIED NTStatus = 0xC0350008 - STATUS_HV_UNKNOWN_PROPERTY NTStatus = 0xC0350009 - STATUS_HV_PROPERTY_VALUE_OUT_OF_RANGE NTStatus = 0xC035000A - STATUS_HV_INSUFFICIENT_MEMORY NTStatus = 0xC035000B - STATUS_HV_PARTITION_TOO_DEEP NTStatus = 0xC035000C - STATUS_HV_INVALID_PARTITION_ID NTStatus = 0xC035000D - STATUS_HV_INVALID_VP_INDEX NTStatus = 0xC035000E - STATUS_HV_INVALID_PORT_ID NTStatus = 0xC0350011 - STATUS_HV_INVALID_CONNECTION_ID NTStatus = 0xC0350012 - STATUS_HV_INSUFFICIENT_BUFFERS NTStatus = 0xC0350013 - STATUS_HV_NOT_ACKNOWLEDGED NTStatus = 0xC0350014 - STATUS_HV_INVALID_VP_STATE NTStatus = 0xC0350015 - STATUS_HV_ACKNOWLEDGED NTStatus = 0xC0350016 - STATUS_HV_INVALID_SAVE_RESTORE_STATE NTStatus = 0xC0350017 - STATUS_HV_INVALID_SYNIC_STATE NTStatus = 0xC0350018 - STATUS_HV_OBJECT_IN_USE NTStatus = 0xC0350019 - STATUS_HV_INVALID_PROXIMITY_DOMAIN_INFO NTStatus = 0xC035001A - STATUS_HV_NO_DATA NTStatus = 0xC035001B - STATUS_HV_INACTIVE NTStatus = 0xC035001C - STATUS_HV_NO_RESOURCES NTStatus = 0xC035001D - STATUS_HV_FEATURE_UNAVAILABLE NTStatus = 0xC035001E - STATUS_HV_INSUFFICIENT_BUFFER NTStatus = 0xC0350033 - STATUS_HV_INSUFFICIENT_DEVICE_DOMAINS NTStatus = 0xC0350038 - STATUS_HV_CPUID_FEATURE_VALIDATION_ERROR NTStatus = 0xC035003C - STATUS_HV_CPUID_XSAVE_FEATURE_VALIDATION_ERROR NTStatus = 0xC035003D - STATUS_HV_PROCESSOR_STARTUP_TIMEOUT NTStatus = 0xC035003E - STATUS_HV_SMX_ENABLED NTStatus = 0xC035003F - STATUS_HV_INVALID_LP_INDEX NTStatus = 0xC0350041 - STATUS_HV_INVALID_REGISTER_VALUE NTStatus = 0xC0350050 - STATUS_HV_INVALID_VTL_STATE NTStatus = 0xC0350051 - STATUS_HV_NX_NOT_DETECTED NTStatus = 0xC0350055 - STATUS_HV_INVALID_DEVICE_ID NTStatus = 0xC0350057 - STATUS_HV_INVALID_DEVICE_STATE NTStatus = 0xC0350058 - STATUS_HV_PENDING_PAGE_REQUESTS NTStatus = 0x00350059 - STATUS_HV_PAGE_REQUEST_INVALID NTStatus = 0xC0350060 - STATUS_HV_INVALID_CPU_GROUP_ID NTStatus = 0xC035006F - STATUS_HV_INVALID_CPU_GROUP_STATE NTStatus = 0xC0350070 - STATUS_HV_OPERATION_FAILED NTStatus = 0xC0350071 - STATUS_HV_NOT_ALLOWED_WITH_NESTED_VIRT_ACTIVE NTStatus = 0xC0350072 - STATUS_HV_INSUFFICIENT_ROOT_MEMORY NTStatus = 0xC0350073 - STATUS_HV_NOT_PRESENT NTStatus = 0xC0351000 - STATUS_VID_DUPLICATE_HANDLER NTStatus = 0xC0370001 - STATUS_VID_TOO_MANY_HANDLERS NTStatus = 0xC0370002 - STATUS_VID_QUEUE_FULL NTStatus = 0xC0370003 - STATUS_VID_HANDLER_NOT_PRESENT NTStatus = 0xC0370004 - STATUS_VID_INVALID_OBJECT_NAME NTStatus = 0xC0370005 - STATUS_VID_PARTITION_NAME_TOO_LONG NTStatus = 0xC0370006 - STATUS_VID_MESSAGE_QUEUE_NAME_TOO_LONG NTStatus = 0xC0370007 - STATUS_VID_PARTITION_ALREADY_EXISTS NTStatus = 0xC0370008 - STATUS_VID_PARTITION_DOES_NOT_EXIST NTStatus = 0xC0370009 - STATUS_VID_PARTITION_NAME_NOT_FOUND NTStatus = 0xC037000A - STATUS_VID_MESSAGE_QUEUE_ALREADY_EXISTS NTStatus = 0xC037000B - STATUS_VID_EXCEEDED_MBP_ENTRY_MAP_LIMIT NTStatus = 0xC037000C - STATUS_VID_MB_STILL_REFERENCED NTStatus = 0xC037000D - STATUS_VID_CHILD_GPA_PAGE_SET_CORRUPTED NTStatus = 0xC037000E - STATUS_VID_INVALID_NUMA_SETTINGS NTStatus = 0xC037000F - STATUS_VID_INVALID_NUMA_NODE_INDEX NTStatus = 0xC0370010 - STATUS_VID_NOTIFICATION_QUEUE_ALREADY_ASSOCIATED NTStatus = 0xC0370011 - STATUS_VID_INVALID_MEMORY_BLOCK_HANDLE NTStatus = 0xC0370012 - STATUS_VID_PAGE_RANGE_OVERFLOW NTStatus = 0xC0370013 - STATUS_VID_INVALID_MESSAGE_QUEUE_HANDLE NTStatus = 0xC0370014 - STATUS_VID_INVALID_GPA_RANGE_HANDLE NTStatus = 0xC0370015 - STATUS_VID_NO_MEMORY_BLOCK_NOTIFICATION_QUEUE NTStatus = 0xC0370016 - STATUS_VID_MEMORY_BLOCK_LOCK_COUNT_EXCEEDED NTStatus = 0xC0370017 - STATUS_VID_INVALID_PPM_HANDLE NTStatus = 0xC0370018 - STATUS_VID_MBPS_ARE_LOCKED NTStatus = 0xC0370019 - STATUS_VID_MESSAGE_QUEUE_CLOSED NTStatus = 0xC037001A - STATUS_VID_VIRTUAL_PROCESSOR_LIMIT_EXCEEDED NTStatus = 0xC037001B - STATUS_VID_STOP_PENDING NTStatus = 0xC037001C - STATUS_VID_INVALID_PROCESSOR_STATE NTStatus = 0xC037001D - STATUS_VID_EXCEEDED_KM_CONTEXT_COUNT_LIMIT NTStatus = 0xC037001E - STATUS_VID_KM_INTERFACE_ALREADY_INITIALIZED NTStatus = 0xC037001F - STATUS_VID_MB_PROPERTY_ALREADY_SET_RESET NTStatus = 0xC0370020 - STATUS_VID_MMIO_RANGE_DESTROYED NTStatus = 0xC0370021 - STATUS_VID_INVALID_CHILD_GPA_PAGE_SET NTStatus = 0xC0370022 - STATUS_VID_RESERVE_PAGE_SET_IS_BEING_USED NTStatus = 0xC0370023 - STATUS_VID_RESERVE_PAGE_SET_TOO_SMALL NTStatus = 0xC0370024 - STATUS_VID_MBP_ALREADY_LOCKED_USING_RESERVED_PAGE NTStatus = 0xC0370025 - STATUS_VID_MBP_COUNT_EXCEEDED_LIMIT NTStatus = 0xC0370026 - STATUS_VID_SAVED_STATE_CORRUPT NTStatus = 0xC0370027 - STATUS_VID_SAVED_STATE_UNRECOGNIZED_ITEM NTStatus = 0xC0370028 - STATUS_VID_SAVED_STATE_INCOMPATIBLE NTStatus = 0xC0370029 - STATUS_VID_VTL_ACCESS_DENIED NTStatus = 0xC037002A - STATUS_VID_REMOTE_NODE_PARENT_GPA_PAGES_USED NTStatus = 0x80370001 - STATUS_IPSEC_BAD_SPI NTStatus = 0xC0360001 - STATUS_IPSEC_SA_LIFETIME_EXPIRED NTStatus = 0xC0360002 - STATUS_IPSEC_WRONG_SA NTStatus = 0xC0360003 - STATUS_IPSEC_REPLAY_CHECK_FAILED NTStatus = 0xC0360004 - STATUS_IPSEC_INVALID_PACKET NTStatus = 0xC0360005 - STATUS_IPSEC_INTEGRITY_CHECK_FAILED NTStatus = 0xC0360006 - STATUS_IPSEC_CLEAR_TEXT_DROP NTStatus = 0xC0360007 - STATUS_IPSEC_AUTH_FIREWALL_DROP NTStatus = 0xC0360008 - STATUS_IPSEC_THROTTLE_DROP NTStatus = 0xC0360009 - STATUS_IPSEC_DOSP_BLOCK NTStatus = 0xC0368000 - STATUS_IPSEC_DOSP_RECEIVED_MULTICAST NTStatus = 0xC0368001 - STATUS_IPSEC_DOSP_INVALID_PACKET NTStatus = 0xC0368002 - STATUS_IPSEC_DOSP_STATE_LOOKUP_FAILED NTStatus = 0xC0368003 - STATUS_IPSEC_DOSP_MAX_ENTRIES NTStatus = 0xC0368004 - STATUS_IPSEC_DOSP_KEYMOD_NOT_ALLOWED NTStatus = 0xC0368005 - STATUS_IPSEC_DOSP_MAX_PER_IP_RATELIMIT_QUEUES NTStatus = 0xC0368006 - STATUS_VOLMGR_INCOMPLETE_REGENERATION NTStatus = 0x80380001 - STATUS_VOLMGR_INCOMPLETE_DISK_MIGRATION NTStatus = 0x80380002 - STATUS_VOLMGR_DATABASE_FULL NTStatus = 0xC0380001 - STATUS_VOLMGR_DISK_CONFIGURATION_CORRUPTED NTStatus = 0xC0380002 - STATUS_VOLMGR_DISK_CONFIGURATION_NOT_IN_SYNC NTStatus = 0xC0380003 - STATUS_VOLMGR_PACK_CONFIG_UPDATE_FAILED NTStatus = 0xC0380004 - STATUS_VOLMGR_DISK_CONTAINS_NON_SIMPLE_VOLUME NTStatus = 0xC0380005 - STATUS_VOLMGR_DISK_DUPLICATE NTStatus = 0xC0380006 - STATUS_VOLMGR_DISK_DYNAMIC NTStatus = 0xC0380007 - STATUS_VOLMGR_DISK_ID_INVALID NTStatus = 0xC0380008 - STATUS_VOLMGR_DISK_INVALID NTStatus = 0xC0380009 - STATUS_VOLMGR_DISK_LAST_VOTER NTStatus = 0xC038000A - STATUS_VOLMGR_DISK_LAYOUT_INVALID NTStatus = 0xC038000B - STATUS_VOLMGR_DISK_LAYOUT_NON_BASIC_BETWEEN_BASIC_PARTITIONS NTStatus = 0xC038000C - STATUS_VOLMGR_DISK_LAYOUT_NOT_CYLINDER_ALIGNED NTStatus = 0xC038000D - STATUS_VOLMGR_DISK_LAYOUT_PARTITIONS_TOO_SMALL NTStatus = 0xC038000E - STATUS_VOLMGR_DISK_LAYOUT_PRIMARY_BETWEEN_LOGICAL_PARTITIONS NTStatus = 0xC038000F - STATUS_VOLMGR_DISK_LAYOUT_TOO_MANY_PARTITIONS NTStatus = 0xC0380010 - STATUS_VOLMGR_DISK_MISSING NTStatus = 0xC0380011 - STATUS_VOLMGR_DISK_NOT_EMPTY NTStatus = 0xC0380012 - STATUS_VOLMGR_DISK_NOT_ENOUGH_SPACE NTStatus = 0xC0380013 - STATUS_VOLMGR_DISK_REVECTORING_FAILED NTStatus = 0xC0380014 - STATUS_VOLMGR_DISK_SECTOR_SIZE_INVALID NTStatus = 0xC0380015 - STATUS_VOLMGR_DISK_SET_NOT_CONTAINED NTStatus = 0xC0380016 - STATUS_VOLMGR_DISK_USED_BY_MULTIPLE_MEMBERS NTStatus = 0xC0380017 - STATUS_VOLMGR_DISK_USED_BY_MULTIPLE_PLEXES NTStatus = 0xC0380018 - STATUS_VOLMGR_DYNAMIC_DISK_NOT_SUPPORTED NTStatus = 0xC0380019 - STATUS_VOLMGR_EXTENT_ALREADY_USED NTStatus = 0xC038001A - STATUS_VOLMGR_EXTENT_NOT_CONTIGUOUS NTStatus = 0xC038001B - STATUS_VOLMGR_EXTENT_NOT_IN_PUBLIC_REGION NTStatus = 0xC038001C - STATUS_VOLMGR_EXTENT_NOT_SECTOR_ALIGNED NTStatus = 0xC038001D - STATUS_VOLMGR_EXTENT_OVERLAPS_EBR_PARTITION NTStatus = 0xC038001E - STATUS_VOLMGR_EXTENT_VOLUME_LENGTHS_DO_NOT_MATCH NTStatus = 0xC038001F - STATUS_VOLMGR_FAULT_TOLERANT_NOT_SUPPORTED NTStatus = 0xC0380020 - STATUS_VOLMGR_INTERLEAVE_LENGTH_INVALID NTStatus = 0xC0380021 - STATUS_VOLMGR_MAXIMUM_REGISTERED_USERS NTStatus = 0xC0380022 - STATUS_VOLMGR_MEMBER_IN_SYNC NTStatus = 0xC0380023 - STATUS_VOLMGR_MEMBER_INDEX_DUPLICATE NTStatus = 0xC0380024 - STATUS_VOLMGR_MEMBER_INDEX_INVALID NTStatus = 0xC0380025 - STATUS_VOLMGR_MEMBER_MISSING NTStatus = 0xC0380026 - STATUS_VOLMGR_MEMBER_NOT_DETACHED NTStatus = 0xC0380027 - STATUS_VOLMGR_MEMBER_REGENERATING NTStatus = 0xC0380028 - STATUS_VOLMGR_ALL_DISKS_FAILED NTStatus = 0xC0380029 - STATUS_VOLMGR_NO_REGISTERED_USERS NTStatus = 0xC038002A - STATUS_VOLMGR_NO_SUCH_USER NTStatus = 0xC038002B - STATUS_VOLMGR_NOTIFICATION_RESET NTStatus = 0xC038002C - STATUS_VOLMGR_NUMBER_OF_MEMBERS_INVALID NTStatus = 0xC038002D - STATUS_VOLMGR_NUMBER_OF_PLEXES_INVALID NTStatus = 0xC038002E - STATUS_VOLMGR_PACK_DUPLICATE NTStatus = 0xC038002F - STATUS_VOLMGR_PACK_ID_INVALID NTStatus = 0xC0380030 - STATUS_VOLMGR_PACK_INVALID NTStatus = 0xC0380031 - STATUS_VOLMGR_PACK_NAME_INVALID NTStatus = 0xC0380032 - STATUS_VOLMGR_PACK_OFFLINE NTStatus = 0xC0380033 - STATUS_VOLMGR_PACK_HAS_QUORUM NTStatus = 0xC0380034 - STATUS_VOLMGR_PACK_WITHOUT_QUORUM NTStatus = 0xC0380035 - STATUS_VOLMGR_PARTITION_STYLE_INVALID NTStatus = 0xC0380036 - STATUS_VOLMGR_PARTITION_UPDATE_FAILED NTStatus = 0xC0380037 - STATUS_VOLMGR_PLEX_IN_SYNC NTStatus = 0xC0380038 - STATUS_VOLMGR_PLEX_INDEX_DUPLICATE NTStatus = 0xC0380039 - STATUS_VOLMGR_PLEX_INDEX_INVALID NTStatus = 0xC038003A - STATUS_VOLMGR_PLEX_LAST_ACTIVE NTStatus = 0xC038003B - STATUS_VOLMGR_PLEX_MISSING NTStatus = 0xC038003C - STATUS_VOLMGR_PLEX_REGENERATING NTStatus = 0xC038003D - STATUS_VOLMGR_PLEX_TYPE_INVALID NTStatus = 0xC038003E - STATUS_VOLMGR_PLEX_NOT_RAID5 NTStatus = 0xC038003F - STATUS_VOLMGR_PLEX_NOT_SIMPLE NTStatus = 0xC0380040 - STATUS_VOLMGR_STRUCTURE_SIZE_INVALID NTStatus = 0xC0380041 - STATUS_VOLMGR_TOO_MANY_NOTIFICATION_REQUESTS NTStatus = 0xC0380042 - STATUS_VOLMGR_TRANSACTION_IN_PROGRESS NTStatus = 0xC0380043 - STATUS_VOLMGR_UNEXPECTED_DISK_LAYOUT_CHANGE NTStatus = 0xC0380044 - STATUS_VOLMGR_VOLUME_CONTAINS_MISSING_DISK NTStatus = 0xC0380045 - STATUS_VOLMGR_VOLUME_ID_INVALID NTStatus = 0xC0380046 - STATUS_VOLMGR_VOLUME_LENGTH_INVALID NTStatus = 0xC0380047 - STATUS_VOLMGR_VOLUME_LENGTH_NOT_SECTOR_SIZE_MULTIPLE NTStatus = 0xC0380048 - STATUS_VOLMGR_VOLUME_NOT_MIRRORED NTStatus = 0xC0380049 - STATUS_VOLMGR_VOLUME_NOT_RETAINED NTStatus = 0xC038004A - STATUS_VOLMGR_VOLUME_OFFLINE NTStatus = 0xC038004B - STATUS_VOLMGR_VOLUME_RETAINED NTStatus = 0xC038004C - STATUS_VOLMGR_NUMBER_OF_EXTENTS_INVALID NTStatus = 0xC038004D - STATUS_VOLMGR_DIFFERENT_SECTOR_SIZE NTStatus = 0xC038004E - STATUS_VOLMGR_BAD_BOOT_DISK NTStatus = 0xC038004F - STATUS_VOLMGR_PACK_CONFIG_OFFLINE NTStatus = 0xC0380050 - STATUS_VOLMGR_PACK_CONFIG_ONLINE NTStatus = 0xC0380051 - STATUS_VOLMGR_NOT_PRIMARY_PACK NTStatus = 0xC0380052 - STATUS_VOLMGR_PACK_LOG_UPDATE_FAILED NTStatus = 0xC0380053 - STATUS_VOLMGR_NUMBER_OF_DISKS_IN_PLEX_INVALID NTStatus = 0xC0380054 - STATUS_VOLMGR_NUMBER_OF_DISKS_IN_MEMBER_INVALID NTStatus = 0xC0380055 - STATUS_VOLMGR_VOLUME_MIRRORED NTStatus = 0xC0380056 - STATUS_VOLMGR_PLEX_NOT_SIMPLE_SPANNED NTStatus = 0xC0380057 - STATUS_VOLMGR_NO_VALID_LOG_COPIES NTStatus = 0xC0380058 - STATUS_VOLMGR_PRIMARY_PACK_PRESENT NTStatus = 0xC0380059 - STATUS_VOLMGR_NUMBER_OF_DISKS_INVALID NTStatus = 0xC038005A - STATUS_VOLMGR_MIRROR_NOT_SUPPORTED NTStatus = 0xC038005B - STATUS_VOLMGR_RAID5_NOT_SUPPORTED NTStatus = 0xC038005C - STATUS_BCD_NOT_ALL_ENTRIES_IMPORTED NTStatus = 0x80390001 - STATUS_BCD_TOO_MANY_ELEMENTS NTStatus = 0xC0390002 - STATUS_BCD_NOT_ALL_ENTRIES_SYNCHRONIZED NTStatus = 0x80390003 - STATUS_VHD_DRIVE_FOOTER_MISSING NTStatus = 0xC03A0001 - STATUS_VHD_DRIVE_FOOTER_CHECKSUM_MISMATCH NTStatus = 0xC03A0002 - STATUS_VHD_DRIVE_FOOTER_CORRUPT NTStatus = 0xC03A0003 - STATUS_VHD_FORMAT_UNKNOWN NTStatus = 0xC03A0004 - STATUS_VHD_FORMAT_UNSUPPORTED_VERSION NTStatus = 0xC03A0005 - STATUS_VHD_SPARSE_HEADER_CHECKSUM_MISMATCH NTStatus = 0xC03A0006 - STATUS_VHD_SPARSE_HEADER_UNSUPPORTED_VERSION NTStatus = 0xC03A0007 - STATUS_VHD_SPARSE_HEADER_CORRUPT NTStatus = 0xC03A0008 - STATUS_VHD_BLOCK_ALLOCATION_FAILURE NTStatus = 0xC03A0009 - STATUS_VHD_BLOCK_ALLOCATION_TABLE_CORRUPT NTStatus = 0xC03A000A - STATUS_VHD_INVALID_BLOCK_SIZE NTStatus = 0xC03A000B - STATUS_VHD_BITMAP_MISMATCH NTStatus = 0xC03A000C - STATUS_VHD_PARENT_VHD_NOT_FOUND NTStatus = 0xC03A000D - STATUS_VHD_CHILD_PARENT_ID_MISMATCH NTStatus = 0xC03A000E - STATUS_VHD_CHILD_PARENT_TIMESTAMP_MISMATCH NTStatus = 0xC03A000F - STATUS_VHD_METADATA_READ_FAILURE NTStatus = 0xC03A0010 - STATUS_VHD_METADATA_WRITE_FAILURE NTStatus = 0xC03A0011 - STATUS_VHD_INVALID_SIZE NTStatus = 0xC03A0012 - STATUS_VHD_INVALID_FILE_SIZE NTStatus = 0xC03A0013 - STATUS_VIRTDISK_PROVIDER_NOT_FOUND NTStatus = 0xC03A0014 - STATUS_VIRTDISK_NOT_VIRTUAL_DISK NTStatus = 0xC03A0015 - STATUS_VHD_PARENT_VHD_ACCESS_DENIED NTStatus = 0xC03A0016 - STATUS_VHD_CHILD_PARENT_SIZE_MISMATCH NTStatus = 0xC03A0017 - STATUS_VHD_DIFFERENCING_CHAIN_CYCLE_DETECTED NTStatus = 0xC03A0018 - STATUS_VHD_DIFFERENCING_CHAIN_ERROR_IN_PARENT NTStatus = 0xC03A0019 - STATUS_VIRTUAL_DISK_LIMITATION NTStatus = 0xC03A001A - STATUS_VHD_INVALID_TYPE NTStatus = 0xC03A001B - STATUS_VHD_INVALID_STATE NTStatus = 0xC03A001C - STATUS_VIRTDISK_UNSUPPORTED_DISK_SECTOR_SIZE NTStatus = 0xC03A001D - STATUS_VIRTDISK_DISK_ALREADY_OWNED NTStatus = 0xC03A001E - STATUS_VIRTDISK_DISK_ONLINE_AND_WRITABLE NTStatus = 0xC03A001F - STATUS_CTLOG_TRACKING_NOT_INITIALIZED NTStatus = 0xC03A0020 - STATUS_CTLOG_LOGFILE_SIZE_EXCEEDED_MAXSIZE NTStatus = 0xC03A0021 - STATUS_CTLOG_VHD_CHANGED_OFFLINE NTStatus = 0xC03A0022 - STATUS_CTLOG_INVALID_TRACKING_STATE NTStatus = 0xC03A0023 - STATUS_CTLOG_INCONSISTENT_TRACKING_FILE NTStatus = 0xC03A0024 - STATUS_VHD_METADATA_FULL NTStatus = 0xC03A0028 - STATUS_VHD_INVALID_CHANGE_TRACKING_ID NTStatus = 0xC03A0029 - STATUS_VHD_CHANGE_TRACKING_DISABLED NTStatus = 0xC03A002A - STATUS_VHD_MISSING_CHANGE_TRACKING_INFORMATION NTStatus = 0xC03A0030 - STATUS_VHD_RESIZE_WOULD_TRUNCATE_DATA NTStatus = 0xC03A0031 - STATUS_VHD_COULD_NOT_COMPUTE_MINIMUM_VIRTUAL_SIZE NTStatus = 0xC03A0032 - STATUS_VHD_ALREADY_AT_OR_BELOW_MINIMUM_VIRTUAL_SIZE NTStatus = 0xC03A0033 - STATUS_QUERY_STORAGE_ERROR NTStatus = 0x803A0001 - STATUS_GDI_HANDLE_LEAK NTStatus = 0x803F0001 - STATUS_RKF_KEY_NOT_FOUND NTStatus = 0xC0400001 - STATUS_RKF_DUPLICATE_KEY NTStatus = 0xC0400002 - STATUS_RKF_BLOB_FULL NTStatus = 0xC0400003 - STATUS_RKF_STORE_FULL NTStatus = 0xC0400004 - STATUS_RKF_FILE_BLOCKED NTStatus = 0xC0400005 - STATUS_RKF_ACTIVE_KEY NTStatus = 0xC0400006 - STATUS_RDBSS_RESTART_OPERATION NTStatus = 0xC0410001 - STATUS_RDBSS_CONTINUE_OPERATION NTStatus = 0xC0410002 - STATUS_RDBSS_POST_OPERATION NTStatus = 0xC0410003 - STATUS_RDBSS_RETRY_LOOKUP NTStatus = 0xC0410004 - STATUS_BTH_ATT_INVALID_HANDLE NTStatus = 0xC0420001 - STATUS_BTH_ATT_READ_NOT_PERMITTED NTStatus = 0xC0420002 - STATUS_BTH_ATT_WRITE_NOT_PERMITTED NTStatus = 0xC0420003 - STATUS_BTH_ATT_INVALID_PDU NTStatus = 0xC0420004 - STATUS_BTH_ATT_INSUFFICIENT_AUTHENTICATION NTStatus = 0xC0420005 - STATUS_BTH_ATT_REQUEST_NOT_SUPPORTED NTStatus = 0xC0420006 - STATUS_BTH_ATT_INVALID_OFFSET NTStatus = 0xC0420007 - STATUS_BTH_ATT_INSUFFICIENT_AUTHORIZATION NTStatus = 0xC0420008 - STATUS_BTH_ATT_PREPARE_QUEUE_FULL NTStatus = 0xC0420009 - STATUS_BTH_ATT_ATTRIBUTE_NOT_FOUND NTStatus = 0xC042000A - STATUS_BTH_ATT_ATTRIBUTE_NOT_LONG NTStatus = 0xC042000B - STATUS_BTH_ATT_INSUFFICIENT_ENCRYPTION_KEY_SIZE NTStatus = 0xC042000C - STATUS_BTH_ATT_INVALID_ATTRIBUTE_VALUE_LENGTH NTStatus = 0xC042000D - STATUS_BTH_ATT_UNLIKELY NTStatus = 0xC042000E - STATUS_BTH_ATT_INSUFFICIENT_ENCRYPTION NTStatus = 0xC042000F - STATUS_BTH_ATT_UNSUPPORTED_GROUP_TYPE NTStatus = 0xC0420010 - STATUS_BTH_ATT_INSUFFICIENT_RESOURCES NTStatus = 0xC0420011 - STATUS_BTH_ATT_UNKNOWN_ERROR NTStatus = 0xC0421000 - STATUS_SECUREBOOT_ROLLBACK_DETECTED NTStatus = 0xC0430001 - STATUS_SECUREBOOT_POLICY_VIOLATION NTStatus = 0xC0430002 - STATUS_SECUREBOOT_INVALID_POLICY NTStatus = 0xC0430003 - STATUS_SECUREBOOT_POLICY_PUBLISHER_NOT_FOUND NTStatus = 0xC0430004 - STATUS_SECUREBOOT_POLICY_NOT_SIGNED NTStatus = 0xC0430005 - STATUS_SECUREBOOT_NOT_ENABLED NTStatus = 0x80430006 - STATUS_SECUREBOOT_FILE_REPLACED NTStatus = 0xC0430007 - STATUS_SECUREBOOT_POLICY_NOT_AUTHORIZED NTStatus = 0xC0430008 - STATUS_SECUREBOOT_POLICY_UNKNOWN NTStatus = 0xC0430009 - STATUS_SECUREBOOT_POLICY_MISSING_ANTIROLLBACKVERSION NTStatus = 0xC043000A - STATUS_SECUREBOOT_PLATFORM_ID_MISMATCH NTStatus = 0xC043000B - STATUS_SECUREBOOT_POLICY_ROLLBACK_DETECTED NTStatus = 0xC043000C - STATUS_SECUREBOOT_POLICY_UPGRADE_MISMATCH NTStatus = 0xC043000D - STATUS_SECUREBOOT_REQUIRED_POLICY_FILE_MISSING NTStatus = 0xC043000E - STATUS_SECUREBOOT_NOT_BASE_POLICY NTStatus = 0xC043000F - STATUS_SECUREBOOT_NOT_SUPPLEMENTAL_POLICY NTStatus = 0xC0430010 - STATUS_PLATFORM_MANIFEST_NOT_AUTHORIZED NTStatus = 0xC0EB0001 - STATUS_PLATFORM_MANIFEST_INVALID NTStatus = 0xC0EB0002 - STATUS_PLATFORM_MANIFEST_FILE_NOT_AUTHORIZED NTStatus = 0xC0EB0003 - STATUS_PLATFORM_MANIFEST_CATALOG_NOT_AUTHORIZED NTStatus = 0xC0EB0004 - STATUS_PLATFORM_MANIFEST_BINARY_ID_NOT_FOUND NTStatus = 0xC0EB0005 - STATUS_PLATFORM_MANIFEST_NOT_ACTIVE NTStatus = 0xC0EB0006 - STATUS_PLATFORM_MANIFEST_NOT_SIGNED NTStatus = 0xC0EB0007 - STATUS_SYSTEM_INTEGRITY_ROLLBACK_DETECTED NTStatus = 0xC0E90001 - STATUS_SYSTEM_INTEGRITY_POLICY_VIOLATION NTStatus = 0xC0E90002 - STATUS_SYSTEM_INTEGRITY_INVALID_POLICY NTStatus = 0xC0E90003 - STATUS_SYSTEM_INTEGRITY_POLICY_NOT_SIGNED NTStatus = 0xC0E90004 - STATUS_SYSTEM_INTEGRITY_TOO_MANY_POLICIES NTStatus = 0xC0E90005 - STATUS_SYSTEM_INTEGRITY_SUPPLEMENTAL_POLICY_NOT_AUTHORIZED NTStatus = 0xC0E90006 - STATUS_NO_APPLICABLE_APP_LICENSES_FOUND NTStatus = 0xC0EA0001 - STATUS_CLIP_LICENSE_NOT_FOUND NTStatus = 0xC0EA0002 - STATUS_CLIP_DEVICE_LICENSE_MISSING NTStatus = 0xC0EA0003 - STATUS_CLIP_LICENSE_INVALID_SIGNATURE NTStatus = 0xC0EA0004 - STATUS_CLIP_KEYHOLDER_LICENSE_MISSING_OR_INVALID NTStatus = 0xC0EA0005 - STATUS_CLIP_LICENSE_EXPIRED NTStatus = 0xC0EA0006 - STATUS_CLIP_LICENSE_SIGNED_BY_UNKNOWN_SOURCE NTStatus = 0xC0EA0007 - STATUS_CLIP_LICENSE_NOT_SIGNED NTStatus = 0xC0EA0008 - STATUS_CLIP_LICENSE_HARDWARE_ID_OUT_OF_TOLERANCE NTStatus = 0xC0EA0009 - STATUS_CLIP_LICENSE_DEVICE_ID_MISMATCH NTStatus = 0xC0EA000A - STATUS_AUDIO_ENGINE_NODE_NOT_FOUND NTStatus = 0xC0440001 - STATUS_HDAUDIO_EMPTY_CONNECTION_LIST NTStatus = 0xC0440002 - STATUS_HDAUDIO_CONNECTION_LIST_NOT_SUPPORTED NTStatus = 0xC0440003 - STATUS_HDAUDIO_NO_LOGICAL_DEVICES_CREATED NTStatus = 0xC0440004 - STATUS_HDAUDIO_NULL_LINKED_LIST_ENTRY NTStatus = 0xC0440005 - STATUS_SPACES_REPAIRED NTStatus = 0x00E70000 - STATUS_SPACES_PAUSE NTStatus = 0x00E70001 - STATUS_SPACES_COMPLETE NTStatus = 0x00E70002 - STATUS_SPACES_REDIRECT NTStatus = 0x00E70003 - STATUS_SPACES_FAULT_DOMAIN_TYPE_INVALID NTStatus = 0xC0E70001 - STATUS_SPACES_RESILIENCY_TYPE_INVALID NTStatus = 0xC0E70003 - STATUS_SPACES_DRIVE_SECTOR_SIZE_INVALID NTStatus = 0xC0E70004 - STATUS_SPACES_DRIVE_REDUNDANCY_INVALID NTStatus = 0xC0E70006 - STATUS_SPACES_NUMBER_OF_DATA_COPIES_INVALID NTStatus = 0xC0E70007 - STATUS_SPACES_INTERLEAVE_LENGTH_INVALID NTStatus = 0xC0E70009 - STATUS_SPACES_NUMBER_OF_COLUMNS_INVALID NTStatus = 0xC0E7000A - STATUS_SPACES_NOT_ENOUGH_DRIVES NTStatus = 0xC0E7000B - STATUS_SPACES_EXTENDED_ERROR NTStatus = 0xC0E7000C - STATUS_SPACES_PROVISIONING_TYPE_INVALID NTStatus = 0xC0E7000D - STATUS_SPACES_ALLOCATION_SIZE_INVALID NTStatus = 0xC0E7000E - STATUS_SPACES_ENCLOSURE_AWARE_INVALID NTStatus = 0xC0E7000F - STATUS_SPACES_WRITE_CACHE_SIZE_INVALID NTStatus = 0xC0E70010 - STATUS_SPACES_NUMBER_OF_GROUPS_INVALID NTStatus = 0xC0E70011 - STATUS_SPACES_DRIVE_OPERATIONAL_STATE_INVALID NTStatus = 0xC0E70012 - STATUS_SPACES_UPDATE_COLUMN_STATE NTStatus = 0xC0E70013 - STATUS_SPACES_MAP_REQUIRED NTStatus = 0xC0E70014 - STATUS_SPACES_UNSUPPORTED_VERSION NTStatus = 0xC0E70015 - STATUS_SPACES_CORRUPT_METADATA NTStatus = 0xC0E70016 - STATUS_SPACES_DRT_FULL NTStatus = 0xC0E70017 - STATUS_SPACES_INCONSISTENCY NTStatus = 0xC0E70018 - STATUS_SPACES_LOG_NOT_READY NTStatus = 0xC0E70019 - STATUS_SPACES_NO_REDUNDANCY NTStatus = 0xC0E7001A - STATUS_SPACES_DRIVE_NOT_READY NTStatus = 0xC0E7001B - STATUS_SPACES_DRIVE_SPLIT NTStatus = 0xC0E7001C - STATUS_SPACES_DRIVE_LOST_DATA NTStatus = 0xC0E7001D - STATUS_SPACES_ENTRY_INCOMPLETE NTStatus = 0xC0E7001E - STATUS_SPACES_ENTRY_INVALID NTStatus = 0xC0E7001F - STATUS_SPACES_MARK_DIRTY NTStatus = 0xC0E70020 - STATUS_VOLSNAP_BOOTFILE_NOT_VALID NTStatus = 0xC0500003 - STATUS_VOLSNAP_ACTIVATION_TIMEOUT NTStatus = 0xC0500004 - STATUS_IO_PREEMPTED NTStatus = 0xC0510001 - STATUS_SVHDX_ERROR_STORED NTStatus = 0xC05C0000 - STATUS_SVHDX_ERROR_NOT_AVAILABLE NTStatus = 0xC05CFF00 - STATUS_SVHDX_UNIT_ATTENTION_AVAILABLE NTStatus = 0xC05CFF01 - STATUS_SVHDX_UNIT_ATTENTION_CAPACITY_DATA_CHANGED NTStatus = 0xC05CFF02 - STATUS_SVHDX_UNIT_ATTENTION_RESERVATIONS_PREEMPTED NTStatus = 0xC05CFF03 - STATUS_SVHDX_UNIT_ATTENTION_RESERVATIONS_RELEASED NTStatus = 0xC05CFF04 - STATUS_SVHDX_UNIT_ATTENTION_REGISTRATIONS_PREEMPTED NTStatus = 0xC05CFF05 - STATUS_SVHDX_UNIT_ATTENTION_OPERATING_DEFINITION_CHANGED NTStatus = 0xC05CFF06 - STATUS_SVHDX_RESERVATION_CONFLICT NTStatus = 0xC05CFF07 - STATUS_SVHDX_WRONG_FILE_TYPE NTStatus = 0xC05CFF08 - STATUS_SVHDX_VERSION_MISMATCH NTStatus = 0xC05CFF09 - STATUS_VHD_SHARED NTStatus = 0xC05CFF0A - STATUS_SVHDX_NO_INITIATOR NTStatus = 0xC05CFF0B - STATUS_VHDSET_BACKING_STORAGE_NOT_FOUND NTStatus = 0xC05CFF0C - STATUS_SMB_NO_PREAUTH_INTEGRITY_HASH_OVERLAP NTStatus = 0xC05D0000 - STATUS_SMB_BAD_CLUSTER_DIALECT NTStatus = 0xC05D0001 - STATUS_SMB_GUEST_LOGON_BLOCKED NTStatus = 0xC05D0002 - STATUS_SECCORE_INVALID_COMMAND NTStatus = 0xC0E80000 - STATUS_VSM_NOT_INITIALIZED NTStatus = 0xC0450000 - STATUS_VSM_DMA_PROTECTION_NOT_IN_USE NTStatus = 0xC0450001 - STATUS_APPEXEC_CONDITION_NOT_SATISFIED NTStatus = 0xC0EC0000 - STATUS_APPEXEC_HANDLE_INVALIDATED NTStatus = 0xC0EC0001 - STATUS_APPEXEC_INVALID_HOST_GENERATION NTStatus = 0xC0EC0002 - STATUS_APPEXEC_UNEXPECTED_PROCESS_REGISTRATION NTStatus = 0xC0EC0003 - STATUS_APPEXEC_INVALID_HOST_STATE NTStatus = 0xC0EC0004 - STATUS_APPEXEC_NO_DONOR NTStatus = 0xC0EC0005 - STATUS_APPEXEC_HOST_ID_MISMATCH NTStatus = 0xC0EC0006 - STATUS_APPEXEC_UNKNOWN_USER NTStatus = 0xC0EC0007 -) diff --git a/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go b/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go deleted file mode 100644 index 6048ac679..000000000 --- a/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go +++ /dev/null @@ -1,149 +0,0 @@ -// Code generated by 'mkknownfolderids.bash'; DO NOT EDIT. - -package windows - -type KNOWNFOLDERID GUID - -var ( - FOLDERID_NetworkFolder = &KNOWNFOLDERID{0xd20beec4, 0x5ca8, 0x4905, [8]byte{0xae, 0x3b, 0xbf, 0x25, 0x1e, 0xa0, 0x9b, 0x53}} - FOLDERID_ComputerFolder = &KNOWNFOLDERID{0x0ac0837c, 0xbbf8, 0x452a, [8]byte{0x85, 0x0d, 0x79, 0xd0, 0x8e, 0x66, 0x7c, 0xa7}} - FOLDERID_InternetFolder = &KNOWNFOLDERID{0x4d9f7874, 0x4e0c, 0x4904, [8]byte{0x96, 0x7b, 0x40, 0xb0, 0xd2, 0x0c, 0x3e, 0x4b}} - FOLDERID_ControlPanelFolder = &KNOWNFOLDERID{0x82a74aeb, 0xaeb4, 0x465c, [8]byte{0xa0, 0x14, 0xd0, 0x97, 0xee, 0x34, 0x6d, 0x63}} - FOLDERID_PrintersFolder = &KNOWNFOLDERID{0x76fc4e2d, 0xd6ad, 0x4519, [8]byte{0xa6, 0x63, 0x37, 0xbd, 0x56, 0x06, 0x81, 0x85}} - FOLDERID_SyncManagerFolder = &KNOWNFOLDERID{0x43668bf8, 0xc14e, 0x49b2, [8]byte{0x97, 0xc9, 0x74, 0x77, 0x84, 0xd7, 0x84, 0xb7}} - FOLDERID_SyncSetupFolder = &KNOWNFOLDERID{0x0f214138, 0xb1d3, 0x4a90, [8]byte{0xbb, 0xa9, 0x27, 0xcb, 0xc0, 0xc5, 0x38, 0x9a}} - FOLDERID_ConflictFolder = &KNOWNFOLDERID{0x4bfefb45, 0x347d, 0x4006, [8]byte{0xa5, 0xbe, 0xac, 0x0c, 0xb0, 0x56, 0x71, 0x92}} - FOLDERID_SyncResultsFolder = &KNOWNFOLDERID{0x289a9a43, 0xbe44, 0x4057, [8]byte{0xa4, 0x1b, 0x58, 0x7a, 0x76, 0xd7, 0xe7, 0xf9}} - FOLDERID_RecycleBinFolder = &KNOWNFOLDERID{0xb7534046, 0x3ecb, 0x4c18, [8]byte{0xbe, 0x4e, 0x64, 0xcd, 0x4c, 0xb7, 0xd6, 0xac}} - FOLDERID_ConnectionsFolder = &KNOWNFOLDERID{0x6f0cd92b, 0x2e97, 0x45d1, [8]byte{0x88, 0xff, 0xb0, 0xd1, 0x86, 0xb8, 0xde, 0xdd}} - FOLDERID_Fonts = &KNOWNFOLDERID{0xfd228cb7, 0xae11, 0x4ae3, [8]byte{0x86, 0x4c, 0x16, 0xf3, 0x91, 0x0a, 0xb8, 0xfe}} - FOLDERID_Desktop = &KNOWNFOLDERID{0xb4bfcc3a, 0xdb2c, 0x424c, [8]byte{0xb0, 0x29, 0x7f, 0xe9, 0x9a, 0x87, 0xc6, 0x41}} - FOLDERID_Startup = &KNOWNFOLDERID{0xb97d20bb, 0xf46a, 0x4c97, [8]byte{0xba, 0x10, 0x5e, 0x36, 0x08, 0x43, 0x08, 0x54}} - FOLDERID_Programs = &KNOWNFOLDERID{0xa77f5d77, 0x2e2b, 0x44c3, [8]byte{0xa6, 0xa2, 0xab, 0xa6, 0x01, 0x05, 0x4a, 0x51}} - FOLDERID_StartMenu = &KNOWNFOLDERID{0x625b53c3, 0xab48, 0x4ec1, [8]byte{0xba, 0x1f, 0xa1, 0xef, 0x41, 0x46, 0xfc, 0x19}} - FOLDERID_Recent = &KNOWNFOLDERID{0xae50c081, 0xebd2, 0x438a, [8]byte{0x86, 0x55, 0x8a, 0x09, 0x2e, 0x34, 0x98, 0x7a}} - FOLDERID_SendTo = &KNOWNFOLDERID{0x8983036c, 0x27c0, 0x404b, [8]byte{0x8f, 0x08, 0x10, 0x2d, 0x10, 0xdc, 0xfd, 0x74}} - FOLDERID_Documents = &KNOWNFOLDERID{0xfdd39ad0, 0x238f, 0x46af, [8]byte{0xad, 0xb4, 0x6c, 0x85, 0x48, 0x03, 0x69, 0xc7}} - FOLDERID_Favorites = &KNOWNFOLDERID{0x1777f761, 0x68ad, 0x4d8a, [8]byte{0x87, 0xbd, 0x30, 0xb7, 0x59, 0xfa, 0x33, 0xdd}} - FOLDERID_NetHood = &KNOWNFOLDERID{0xc5abbf53, 0xe17f, 0x4121, [8]byte{0x89, 0x00, 0x86, 0x62, 0x6f, 0xc2, 0xc9, 0x73}} - FOLDERID_PrintHood = &KNOWNFOLDERID{0x9274bd8d, 0xcfd1, 0x41c3, [8]byte{0xb3, 0x5e, 0xb1, 0x3f, 0x55, 0xa7, 0x58, 0xf4}} - FOLDERID_Templates = &KNOWNFOLDERID{0xa63293e8, 0x664e, 0x48db, [8]byte{0xa0, 0x79, 0xdf, 0x75, 0x9e, 0x05, 0x09, 0xf7}} - FOLDERID_CommonStartup = &KNOWNFOLDERID{0x82a5ea35, 0xd9cd, 0x47c5, [8]byte{0x96, 0x29, 0xe1, 0x5d, 0x2f, 0x71, 0x4e, 0x6e}} - FOLDERID_CommonPrograms = &KNOWNFOLDERID{0x0139d44e, 0x6afe, 0x49f2, [8]byte{0x86, 0x90, 0x3d, 0xaf, 0xca, 0xe6, 0xff, 0xb8}} - FOLDERID_CommonStartMenu = &KNOWNFOLDERID{0xa4115719, 0xd62e, 0x491d, [8]byte{0xaa, 0x7c, 0xe7, 0x4b, 0x8b, 0xe3, 0xb0, 0x67}} - FOLDERID_PublicDesktop = &KNOWNFOLDERID{0xc4aa340d, 0xf20f, 0x4863, [8]byte{0xaf, 0xef, 0xf8, 0x7e, 0xf2, 0xe6, 0xba, 0x25}} - FOLDERID_ProgramData = &KNOWNFOLDERID{0x62ab5d82, 0xfdc1, 0x4dc3, [8]byte{0xa9, 0xdd, 0x07, 0x0d, 0x1d, 0x49, 0x5d, 0x97}} - FOLDERID_CommonTemplates = &KNOWNFOLDERID{0xb94237e7, 0x57ac, 0x4347, [8]byte{0x91, 0x51, 0xb0, 0x8c, 0x6c, 0x32, 0xd1, 0xf7}} - FOLDERID_PublicDocuments = &KNOWNFOLDERID{0xed4824af, 0xdce4, 0x45a8, [8]byte{0x81, 0xe2, 0xfc, 0x79, 0x65, 0x08, 0x36, 0x34}} - FOLDERID_RoamingAppData = &KNOWNFOLDERID{0x3eb685db, 0x65f9, 0x4cf6, [8]byte{0xa0, 0x3a, 0xe3, 0xef, 0x65, 0x72, 0x9f, 0x3d}} - FOLDERID_LocalAppData = &KNOWNFOLDERID{0xf1b32785, 0x6fba, 0x4fcf, [8]byte{0x9d, 0x55, 0x7b, 0x8e, 0x7f, 0x15, 0x70, 0x91}} - FOLDERID_LocalAppDataLow = &KNOWNFOLDERID{0xa520a1a4, 0x1780, 0x4ff6, [8]byte{0xbd, 0x18, 0x16, 0x73, 0x43, 0xc5, 0xaf, 0x16}} - FOLDERID_InternetCache = &KNOWNFOLDERID{0x352481e8, 0x33be, 0x4251, [8]byte{0xba, 0x85, 0x60, 0x07, 0xca, 0xed, 0xcf, 0x9d}} - FOLDERID_Cookies = &KNOWNFOLDERID{0x2b0f765d, 0xc0e9, 0x4171, [8]byte{0x90, 0x8e, 0x08, 0xa6, 0x11, 0xb8, 0x4f, 0xf6}} - FOLDERID_History = &KNOWNFOLDERID{0xd9dc8a3b, 0xb784, 0x432e, [8]byte{0xa7, 0x81, 0x5a, 0x11, 0x30, 0xa7, 0x59, 0x63}} - FOLDERID_System = &KNOWNFOLDERID{0x1ac14e77, 0x02e7, 0x4e5d, [8]byte{0xb7, 0x44, 0x2e, 0xb1, 0xae, 0x51, 0x98, 0xb7}} - FOLDERID_SystemX86 = &KNOWNFOLDERID{0xd65231b0, 0xb2f1, 0x4857, [8]byte{0xa4, 0xce, 0xa8, 0xe7, 0xc6, 0xea, 0x7d, 0x27}} - FOLDERID_Windows = &KNOWNFOLDERID{0xf38bf404, 0x1d43, 0x42f2, [8]byte{0x93, 0x05, 0x67, 0xde, 0x0b, 0x28, 0xfc, 0x23}} - FOLDERID_Profile = &KNOWNFOLDERID{0x5e6c858f, 0x0e22, 0x4760, [8]byte{0x9a, 0xfe, 0xea, 0x33, 0x17, 0xb6, 0x71, 0x73}} - FOLDERID_Pictures = &KNOWNFOLDERID{0x33e28130, 0x4e1e, 0x4676, [8]byte{0x83, 0x5a, 0x98, 0x39, 0x5c, 0x3b, 0xc3, 0xbb}} - FOLDERID_ProgramFilesX86 = &KNOWNFOLDERID{0x7c5a40ef, 0xa0fb, 0x4bfc, [8]byte{0x87, 0x4a, 0xc0, 0xf2, 0xe0, 0xb9, 0xfa, 0x8e}} - FOLDERID_ProgramFilesCommonX86 = &KNOWNFOLDERID{0xde974d24, 0xd9c6, 0x4d3e, [8]byte{0xbf, 0x91, 0xf4, 0x45, 0x51, 0x20, 0xb9, 0x17}} - FOLDERID_ProgramFilesX64 = &KNOWNFOLDERID{0x6d809377, 0x6af0, 0x444b, [8]byte{0x89, 0x57, 0xa3, 0x77, 0x3f, 0x02, 0x20, 0x0e}} - FOLDERID_ProgramFilesCommonX64 = &KNOWNFOLDERID{0x6365d5a7, 0x0f0d, 0x45e5, [8]byte{0x87, 0xf6, 0x0d, 0xa5, 0x6b, 0x6a, 0x4f, 0x7d}} - FOLDERID_ProgramFiles = &KNOWNFOLDERID{0x905e63b6, 0xc1bf, 0x494e, [8]byte{0xb2, 0x9c, 0x65, 0xb7, 0x32, 0xd3, 0xd2, 0x1a}} - FOLDERID_ProgramFilesCommon = &KNOWNFOLDERID{0xf7f1ed05, 0x9f6d, 0x47a2, [8]byte{0xaa, 0xae, 0x29, 0xd3, 0x17, 0xc6, 0xf0, 0x66}} - FOLDERID_UserProgramFiles = &KNOWNFOLDERID{0x5cd7aee2, 0x2219, 0x4a67, [8]byte{0xb8, 0x5d, 0x6c, 0x9c, 0xe1, 0x56, 0x60, 0xcb}} - FOLDERID_UserProgramFilesCommon = &KNOWNFOLDERID{0xbcbd3057, 0xca5c, 0x4622, [8]byte{0xb4, 0x2d, 0xbc, 0x56, 0xdb, 0x0a, 0xe5, 0x16}} - FOLDERID_AdminTools = &KNOWNFOLDERID{0x724ef170, 0xa42d, 0x4fef, [8]byte{0x9f, 0x26, 0xb6, 0x0e, 0x84, 0x6f, 0xba, 0x4f}} - FOLDERID_CommonAdminTools = &KNOWNFOLDERID{0xd0384e7d, 0xbac3, 0x4797, [8]byte{0x8f, 0x14, 0xcb, 0xa2, 0x29, 0xb3, 0x92, 0xb5}} - FOLDERID_Music = &KNOWNFOLDERID{0x4bd8d571, 0x6d19, 0x48d3, [8]byte{0xbe, 0x97, 0x42, 0x22, 0x20, 0x08, 0x0e, 0x43}} - FOLDERID_Videos = &KNOWNFOLDERID{0x18989b1d, 0x99b5, 0x455b, [8]byte{0x84, 0x1c, 0xab, 0x7c, 0x74, 0xe4, 0xdd, 0xfc}} - FOLDERID_Ringtones = &KNOWNFOLDERID{0xc870044b, 0xf49e, 0x4126, [8]byte{0xa9, 0xc3, 0xb5, 0x2a, 0x1f, 0xf4, 0x11, 0xe8}} - FOLDERID_PublicPictures = &KNOWNFOLDERID{0xb6ebfb86, 0x6907, 0x413c, [8]byte{0x9a, 0xf7, 0x4f, 0xc2, 0xab, 0xf0, 0x7c, 0xc5}} - FOLDERID_PublicMusic = &KNOWNFOLDERID{0x3214fab5, 0x9757, 0x4298, [8]byte{0xbb, 0x61, 0x92, 0xa9, 0xde, 0xaa, 0x44, 0xff}} - FOLDERID_PublicVideos = &KNOWNFOLDERID{0x2400183a, 0x6185, 0x49fb, [8]byte{0xa2, 0xd8, 0x4a, 0x39, 0x2a, 0x60, 0x2b, 0xa3}} - FOLDERID_PublicRingtones = &KNOWNFOLDERID{0xe555ab60, 0x153b, 0x4d17, [8]byte{0x9f, 0x04, 0xa5, 0xfe, 0x99, 0xfc, 0x15, 0xec}} - FOLDERID_ResourceDir = &KNOWNFOLDERID{0x8ad10c31, 0x2adb, 0x4296, [8]byte{0xa8, 0xf7, 0xe4, 0x70, 0x12, 0x32, 0xc9, 0x72}} - FOLDERID_LocalizedResourcesDir = &KNOWNFOLDERID{0x2a00375e, 0x224c, 0x49de, [8]byte{0xb8, 0xd1, 0x44, 0x0d, 0xf7, 0xef, 0x3d, 0xdc}} - FOLDERID_CommonOEMLinks = &KNOWNFOLDERID{0xc1bae2d0, 0x10df, 0x4334, [8]byte{0xbe, 0xdd, 0x7a, 0xa2, 0x0b, 0x22, 0x7a, 0x9d}} - FOLDERID_CDBurning = &KNOWNFOLDERID{0x9e52ab10, 0xf80d, 0x49df, [8]byte{0xac, 0xb8, 0x43, 0x30, 0xf5, 0x68, 0x78, 0x55}} - FOLDERID_UserProfiles = &KNOWNFOLDERID{0x0762d272, 0xc50a, 0x4bb0, [8]byte{0xa3, 0x82, 0x69, 0x7d, 0xcd, 0x72, 0x9b, 0x80}} - FOLDERID_Playlists = &KNOWNFOLDERID{0xde92c1c7, 0x837f, 0x4f69, [8]byte{0xa3, 0xbb, 0x86, 0xe6, 0x31, 0x20, 0x4a, 0x23}} - FOLDERID_SamplePlaylists = &KNOWNFOLDERID{0x15ca69b3, 0x30ee, 0x49c1, [8]byte{0xac, 0xe1, 0x6b, 0x5e, 0xc3, 0x72, 0xaf, 0xb5}} - FOLDERID_SampleMusic = &KNOWNFOLDERID{0xb250c668, 0xf57d, 0x4ee1, [8]byte{0xa6, 0x3c, 0x29, 0x0e, 0xe7, 0xd1, 0xaa, 0x1f}} - FOLDERID_SamplePictures = &KNOWNFOLDERID{0xc4900540, 0x2379, 0x4c75, [8]byte{0x84, 0x4b, 0x64, 0xe6, 0xfa, 0xf8, 0x71, 0x6b}} - FOLDERID_SampleVideos = &KNOWNFOLDERID{0x859ead94, 0x2e85, 0x48ad, [8]byte{0xa7, 0x1a, 0x09, 0x69, 0xcb, 0x56, 0xa6, 0xcd}} - FOLDERID_PhotoAlbums = &KNOWNFOLDERID{0x69d2cf90, 0xfc33, 0x4fb7, [8]byte{0x9a, 0x0c, 0xeb, 0xb0, 0xf0, 0xfc, 0xb4, 0x3c}} - FOLDERID_Public = &KNOWNFOLDERID{0xdfdf76a2, 0xc82a, 0x4d63, [8]byte{0x90, 0x6a, 0x56, 0x44, 0xac, 0x45, 0x73, 0x85}} - FOLDERID_ChangeRemovePrograms = &KNOWNFOLDERID{0xdf7266ac, 0x9274, 0x4867, [8]byte{0x8d, 0x55, 0x3b, 0xd6, 0x61, 0xde, 0x87, 0x2d}} - FOLDERID_AppUpdates = &KNOWNFOLDERID{0xa305ce99, 0xf527, 0x492b, [8]byte{0x8b, 0x1a, 0x7e, 0x76, 0xfa, 0x98, 0xd6, 0xe4}} - FOLDERID_AddNewPrograms = &KNOWNFOLDERID{0xde61d971, 0x5ebc, 0x4f02, [8]byte{0xa3, 0xa9, 0x6c, 0x82, 0x89, 0x5e, 0x5c, 0x04}} - FOLDERID_Downloads = &KNOWNFOLDERID{0x374de290, 0x123f, 0x4565, [8]byte{0x91, 0x64, 0x39, 0xc4, 0x92, 0x5e, 0x46, 0x7b}} - FOLDERID_PublicDownloads = &KNOWNFOLDERID{0x3d644c9b, 0x1fb8, 0x4f30, [8]byte{0x9b, 0x45, 0xf6, 0x70, 0x23, 0x5f, 0x79, 0xc0}} - FOLDERID_SavedSearches = &KNOWNFOLDERID{0x7d1d3a04, 0xdebb, 0x4115, [8]byte{0x95, 0xcf, 0x2f, 0x29, 0xda, 0x29, 0x20, 0xda}} - FOLDERID_QuickLaunch = &KNOWNFOLDERID{0x52a4f021, 0x7b75, 0x48a9, [8]byte{0x9f, 0x6b, 0x4b, 0x87, 0xa2, 0x10, 0xbc, 0x8f}} - FOLDERID_Contacts = &KNOWNFOLDERID{0x56784854, 0xc6cb, 0x462b, [8]byte{0x81, 0x69, 0x88, 0xe3, 0x50, 0xac, 0xb8, 0x82}} - FOLDERID_SidebarParts = &KNOWNFOLDERID{0xa75d362e, 0x50fc, 0x4fb7, [8]byte{0xac, 0x2c, 0xa8, 0xbe, 0xaa, 0x31, 0x44, 0x93}} - FOLDERID_SidebarDefaultParts = &KNOWNFOLDERID{0x7b396e54, 0x9ec5, 0x4300, [8]byte{0xbe, 0x0a, 0x24, 0x82, 0xeb, 0xae, 0x1a, 0x26}} - FOLDERID_PublicGameTasks = &KNOWNFOLDERID{0xdebf2536, 0xe1a8, 0x4c59, [8]byte{0xb6, 0xa2, 0x41, 0x45, 0x86, 0x47, 0x6a, 0xea}} - FOLDERID_GameTasks = &KNOWNFOLDERID{0x054fae61, 0x4dd8, 0x4787, [8]byte{0x80, 0xb6, 0x09, 0x02, 0x20, 0xc4, 0xb7, 0x00}} - FOLDERID_SavedGames = &KNOWNFOLDERID{0x4c5c32ff, 0xbb9d, 0x43b0, [8]byte{0xb5, 0xb4, 0x2d, 0x72, 0xe5, 0x4e, 0xaa, 0xa4}} - FOLDERID_Games = &KNOWNFOLDERID{0xcac52c1a, 0xb53d, 0x4edc, [8]byte{0x92, 0xd7, 0x6b, 0x2e, 0x8a, 0xc1, 0x94, 0x34}} - FOLDERID_SEARCH_MAPI = &KNOWNFOLDERID{0x98ec0e18, 0x2098, 0x4d44, [8]byte{0x86, 0x44, 0x66, 0x97, 0x93, 0x15, 0xa2, 0x81}} - FOLDERID_SEARCH_CSC = &KNOWNFOLDERID{0xee32e446, 0x31ca, 0x4aba, [8]byte{0x81, 0x4f, 0xa5, 0xeb, 0xd2, 0xfd, 0x6d, 0x5e}} - FOLDERID_Links = &KNOWNFOLDERID{0xbfb9d5e0, 0xc6a9, 0x404c, [8]byte{0xb2, 0xb2, 0xae, 0x6d, 0xb6, 0xaf, 0x49, 0x68}} - FOLDERID_UsersFiles = &KNOWNFOLDERID{0xf3ce0f7c, 0x4901, 0x4acc, [8]byte{0x86, 0x48, 0xd5, 0xd4, 0x4b, 0x04, 0xef, 0x8f}} - FOLDERID_UsersLibraries = &KNOWNFOLDERID{0xa302545d, 0xdeff, 0x464b, [8]byte{0xab, 0xe8, 0x61, 0xc8, 0x64, 0x8d, 0x93, 0x9b}} - FOLDERID_SearchHome = &KNOWNFOLDERID{0x190337d1, 0xb8ca, 0x4121, [8]byte{0xa6, 0x39, 0x6d, 0x47, 0x2d, 0x16, 0x97, 0x2a}} - FOLDERID_OriginalImages = &KNOWNFOLDERID{0x2c36c0aa, 0x5812, 0x4b87, [8]byte{0xbf, 0xd0, 0x4c, 0xd0, 0xdf, 0xb1, 0x9b, 0x39}} - FOLDERID_DocumentsLibrary = &KNOWNFOLDERID{0x7b0db17d, 0x9cd2, 0x4a93, [8]byte{0x97, 0x33, 0x46, 0xcc, 0x89, 0x02, 0x2e, 0x7c}} - FOLDERID_MusicLibrary = &KNOWNFOLDERID{0x2112ab0a, 0xc86a, 0x4ffe, [8]byte{0xa3, 0x68, 0x0d, 0xe9, 0x6e, 0x47, 0x01, 0x2e}} - FOLDERID_PicturesLibrary = &KNOWNFOLDERID{0xa990ae9f, 0xa03b, 0x4e80, [8]byte{0x94, 0xbc, 0x99, 0x12, 0xd7, 0x50, 0x41, 0x04}} - FOLDERID_VideosLibrary = &KNOWNFOLDERID{0x491e922f, 0x5643, 0x4af4, [8]byte{0xa7, 0xeb, 0x4e, 0x7a, 0x13, 0x8d, 0x81, 0x74}} - FOLDERID_RecordedTVLibrary = &KNOWNFOLDERID{0x1a6fdba2, 0xf42d, 0x4358, [8]byte{0xa7, 0x98, 0xb7, 0x4d, 0x74, 0x59, 0x26, 0xc5}} - FOLDERID_HomeGroup = &KNOWNFOLDERID{0x52528a6b, 0xb9e3, 0x4add, [8]byte{0xb6, 0x0d, 0x58, 0x8c, 0x2d, 0xba, 0x84, 0x2d}} - FOLDERID_HomeGroupCurrentUser = &KNOWNFOLDERID{0x9b74b6a3, 0x0dfd, 0x4f11, [8]byte{0x9e, 0x78, 0x5f, 0x78, 0x00, 0xf2, 0xe7, 0x72}} - FOLDERID_DeviceMetadataStore = &KNOWNFOLDERID{0x5ce4a5e9, 0xe4eb, 0x479d, [8]byte{0xb8, 0x9f, 0x13, 0x0c, 0x02, 0x88, 0x61, 0x55}} - FOLDERID_Libraries = &KNOWNFOLDERID{0x1b3ea5dc, 0xb587, 0x4786, [8]byte{0xb4, 0xef, 0xbd, 0x1d, 0xc3, 0x32, 0xae, 0xae}} - FOLDERID_PublicLibraries = &KNOWNFOLDERID{0x48daf80b, 0xe6cf, 0x4f4e, [8]byte{0xb8, 0x00, 0x0e, 0x69, 0xd8, 0x4e, 0xe3, 0x84}} - FOLDERID_UserPinned = &KNOWNFOLDERID{0x9e3995ab, 0x1f9c, 0x4f13, [8]byte{0xb8, 0x27, 0x48, 0xb2, 0x4b, 0x6c, 0x71, 0x74}} - FOLDERID_ImplicitAppShortcuts = &KNOWNFOLDERID{0xbcb5256f, 0x79f6, 0x4cee, [8]byte{0xb7, 0x25, 0xdc, 0x34, 0xe4, 0x02, 0xfd, 0x46}} - FOLDERID_AccountPictures = &KNOWNFOLDERID{0x008ca0b1, 0x55b4, 0x4c56, [8]byte{0xb8, 0xa8, 0x4d, 0xe4, 0xb2, 0x99, 0xd3, 0xbe}} - FOLDERID_PublicUserTiles = &KNOWNFOLDERID{0x0482af6c, 0x08f1, 0x4c34, [8]byte{0x8c, 0x90, 0xe1, 0x7e, 0xc9, 0x8b, 0x1e, 0x17}} - FOLDERID_AppsFolder = &KNOWNFOLDERID{0x1e87508d, 0x89c2, 0x42f0, [8]byte{0x8a, 0x7e, 0x64, 0x5a, 0x0f, 0x50, 0xca, 0x58}} - FOLDERID_StartMenuAllPrograms = &KNOWNFOLDERID{0xf26305ef, 0x6948, 0x40b9, [8]byte{0xb2, 0x55, 0x81, 0x45, 0x3d, 0x09, 0xc7, 0x85}} - FOLDERID_CommonStartMenuPlaces = &KNOWNFOLDERID{0xa440879f, 0x87a0, 0x4f7d, [8]byte{0xb7, 0x00, 0x02, 0x07, 0xb9, 0x66, 0x19, 0x4a}} - FOLDERID_ApplicationShortcuts = &KNOWNFOLDERID{0xa3918781, 0xe5f2, 0x4890, [8]byte{0xb3, 0xd9, 0xa7, 0xe5, 0x43, 0x32, 0x32, 0x8c}} - FOLDERID_RoamingTiles = &KNOWNFOLDERID{0x00bcfc5a, 0xed94, 0x4e48, [8]byte{0x96, 0xa1, 0x3f, 0x62, 0x17, 0xf2, 0x19, 0x90}} - FOLDERID_RoamedTileImages = &KNOWNFOLDERID{0xaaa8d5a5, 0xf1d6, 0x4259, [8]byte{0xba, 0xa8, 0x78, 0xe7, 0xef, 0x60, 0x83, 0x5e}} - FOLDERID_Screenshots = &KNOWNFOLDERID{0xb7bede81, 0xdf94, 0x4682, [8]byte{0xa7, 0xd8, 0x57, 0xa5, 0x26, 0x20, 0xb8, 0x6f}} - FOLDERID_CameraRoll = &KNOWNFOLDERID{0xab5fb87b, 0x7ce2, 0x4f83, [8]byte{0x91, 0x5d, 0x55, 0x08, 0x46, 0xc9, 0x53, 0x7b}} - FOLDERID_SkyDrive = &KNOWNFOLDERID{0xa52bba46, 0xe9e1, 0x435f, [8]byte{0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}} - FOLDERID_OneDrive = &KNOWNFOLDERID{0xa52bba46, 0xe9e1, 0x435f, [8]byte{0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}} - FOLDERID_SkyDriveDocuments = &KNOWNFOLDERID{0x24d89e24, 0x2f19, 0x4534, [8]byte{0x9d, 0xde, 0x6a, 0x66, 0x71, 0xfb, 0xb8, 0xfe}} - FOLDERID_SkyDrivePictures = &KNOWNFOLDERID{0x339719b5, 0x8c47, 0x4894, [8]byte{0x94, 0xc2, 0xd8, 0xf7, 0x7a, 0xdd, 0x44, 0xa6}} - FOLDERID_SkyDriveMusic = &KNOWNFOLDERID{0xc3f2459e, 0x80d6, 0x45dc, [8]byte{0xbf, 0xef, 0x1f, 0x76, 0x9f, 0x2b, 0xe7, 0x30}} - FOLDERID_SkyDriveCameraRoll = &KNOWNFOLDERID{0x767e6811, 0x49cb, 0x4273, [8]byte{0x87, 0xc2, 0x20, 0xf3, 0x55, 0xe1, 0x08, 0x5b}} - FOLDERID_SearchHistory = &KNOWNFOLDERID{0x0d4c3db6, 0x03a3, 0x462f, [8]byte{0xa0, 0xe6, 0x08, 0x92, 0x4c, 0x41, 0xb5, 0xd4}} - FOLDERID_SearchTemplates = &KNOWNFOLDERID{0x7e636bfe, 0xdfa9, 0x4d5e, [8]byte{0xb4, 0x56, 0xd7, 0xb3, 0x98, 0x51, 0xd8, 0xa9}} - FOLDERID_CameraRollLibrary = &KNOWNFOLDERID{0x2b20df75, 0x1eda, 0x4039, [8]byte{0x80, 0x97, 0x38, 0x79, 0x82, 0x27, 0xd5, 0xb7}} - FOLDERID_SavedPictures = &KNOWNFOLDERID{0x3b193882, 0xd3ad, 0x4eab, [8]byte{0x96, 0x5a, 0x69, 0x82, 0x9d, 0x1f, 0xb5, 0x9f}} - FOLDERID_SavedPicturesLibrary = &KNOWNFOLDERID{0xe25b5812, 0xbe88, 0x4bd9, [8]byte{0x94, 0xb0, 0x29, 0x23, 0x34, 0x77, 0xb6, 0xc3}} - FOLDERID_RetailDemo = &KNOWNFOLDERID{0x12d4c69e, 0x24ad, 0x4923, [8]byte{0xbe, 0x19, 0x31, 0x32, 0x1c, 0x43, 0xa7, 0x67}} - FOLDERID_Device = &KNOWNFOLDERID{0x1c2ac1dc, 0x4358, 0x4b6c, [8]byte{0x97, 0x33, 0xaf, 0x21, 0x15, 0x65, 0x76, 0xf0}} - FOLDERID_DevelopmentFiles = &KNOWNFOLDERID{0xdbe8e08e, 0x3053, 0x4bbc, [8]byte{0xb1, 0x83, 0x2a, 0x7b, 0x2b, 0x19, 0x1e, 0x59}} - FOLDERID_Objects3D = &KNOWNFOLDERID{0x31c0dd25, 0x9439, 0x4f12, [8]byte{0xbf, 0x41, 0x7f, 0xf4, 0xed, 0xa3, 0x87, 0x22}} - FOLDERID_AppCaptures = &KNOWNFOLDERID{0xedc0fe71, 0x98d8, 0x4f4a, [8]byte{0xb9, 0x20, 0xc8, 0xdc, 0x13, 0x3c, 0xb1, 0x65}} - FOLDERID_LocalDocuments = &KNOWNFOLDERID{0xf42ee2d3, 0x909f, 0x4907, [8]byte{0x88, 0x71, 0x4c, 0x22, 0xfc, 0x0b, 0xf7, 0x56}} - FOLDERID_LocalPictures = &KNOWNFOLDERID{0x0ddd015d, 0xb06c, 0x45d5, [8]byte{0x8c, 0x4c, 0xf5, 0x97, 0x13, 0x85, 0x46, 0x39}} - FOLDERID_LocalVideos = &KNOWNFOLDERID{0x35286a68, 0x3c57, 0x41a1, [8]byte{0xbb, 0xb1, 0x0e, 0xae, 0x73, 0xd7, 0x6c, 0x95}} - FOLDERID_LocalMusic = &KNOWNFOLDERID{0xa0c69a99, 0x21c8, 0x4671, [8]byte{0x87, 0x03, 0x79, 0x34, 0x16, 0x2f, 0xcf, 0x1d}} - FOLDERID_LocalDownloads = &KNOWNFOLDERID{0x7d83ee9b, 0x2244, 0x4e70, [8]byte{0xb1, 0xf5, 0x53, 0x93, 0x04, 0x2a, 0xf1, 0xe4}} - FOLDERID_RecordedCalls = &KNOWNFOLDERID{0x2f8b40c2, 0x83ed, 0x48ee, [8]byte{0xb3, 0x83, 0xa1, 0xf1, 0x57, 0xec, 0x6f, 0x9a}} - FOLDERID_AllAppMods = &KNOWNFOLDERID{0x7ad67899, 0x66af, 0x43ba, [8]byte{0x91, 0x56, 0x6a, 0xad, 0x42, 0xe6, 0xc5, 0x96}} - FOLDERID_CurrentAppMods = &KNOWNFOLDERID{0x3db40b20, 0x2a30, 0x4dbe, [8]byte{0x91, 0x7e, 0x77, 0x1d, 0xd2, 0x1d, 0xd0, 0x99}} - FOLDERID_AppDataDesktop = &KNOWNFOLDERID{0xb2c5e279, 0x7add, 0x439f, [8]byte{0xb2, 0x8c, 0xc4, 0x1f, 0xe1, 0xbb, 0xf6, 0x72}} - FOLDERID_AppDataDocuments = &KNOWNFOLDERID{0x7be16610, 0x1f7f, 0x44ac, [8]byte{0xbf, 0xf0, 0x83, 0xe1, 0x5f, 0x2f, 0xfc, 0xa1}} - FOLDERID_AppDataFavorites = &KNOWNFOLDERID{0x7cfbefbc, 0xde1f, 0x45aa, [8]byte{0xb8, 0x43, 0xa5, 0x42, 0xac, 0x53, 0x6c, 0xc9}} - FOLDERID_AppDataProgramData = &KNOWNFOLDERID{0x559d40a3, 0xa036, 0x40fa, [8]byte{0xaf, 0x61, 0x84, 0xcb, 0x43, 0x0a, 0x4d, 0x34}} -) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go deleted file mode 100644 index a81ea2c70..000000000 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ /dev/null @@ -1,4345 +0,0 @@ -// Code generated by 'go generate'; DO NOT EDIT. - -package windows - -import ( - "syscall" - "unsafe" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modCfgMgr32 = NewLazySystemDLL("CfgMgr32.dll") - modadvapi32 = NewLazySystemDLL("advapi32.dll") - modcrypt32 = NewLazySystemDLL("crypt32.dll") - moddnsapi = NewLazySystemDLL("dnsapi.dll") - moddwmapi = NewLazySystemDLL("dwmapi.dll") - modiphlpapi = NewLazySystemDLL("iphlpapi.dll") - modkernel32 = NewLazySystemDLL("kernel32.dll") - modmswsock = NewLazySystemDLL("mswsock.dll") - modnetapi32 = NewLazySystemDLL("netapi32.dll") - modntdll = NewLazySystemDLL("ntdll.dll") - modole32 = NewLazySystemDLL("ole32.dll") - modpsapi = NewLazySystemDLL("psapi.dll") - modsechost = NewLazySystemDLL("sechost.dll") - modsecur32 = NewLazySystemDLL("secur32.dll") - modsetupapi = NewLazySystemDLL("setupapi.dll") - modshell32 = NewLazySystemDLL("shell32.dll") - moduser32 = NewLazySystemDLL("user32.dll") - moduserenv = NewLazySystemDLL("userenv.dll") - modversion = NewLazySystemDLL("version.dll") - modwintrust = NewLazySystemDLL("wintrust.dll") - modws2_32 = NewLazySystemDLL("ws2_32.dll") - modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") - - procCM_Get_DevNode_Status = modCfgMgr32.NewProc("CM_Get_DevNode_Status") - procCM_Get_Device_Interface_ListW = modCfgMgr32.NewProc("CM_Get_Device_Interface_ListW") - procCM_Get_Device_Interface_List_SizeW = modCfgMgr32.NewProc("CM_Get_Device_Interface_List_SizeW") - procCM_MapCrToWin32Err = modCfgMgr32.NewProc("CM_MapCrToWin32Err") - procAdjustTokenGroups = modadvapi32.NewProc("AdjustTokenGroups") - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") - procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") - procBuildSecurityDescriptorW = modadvapi32.NewProc("BuildSecurityDescriptorW") - procChangeServiceConfig2W = modadvapi32.NewProc("ChangeServiceConfig2W") - procChangeServiceConfigW = modadvapi32.NewProc("ChangeServiceConfigW") - procCheckTokenMembership = modadvapi32.NewProc("CheckTokenMembership") - procCloseServiceHandle = modadvapi32.NewProc("CloseServiceHandle") - procControlService = modadvapi32.NewProc("ControlService") - procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") - procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") - procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") - procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") - procCopySid = modadvapi32.NewProc("CopySid") - procCreateProcessAsUserW = modadvapi32.NewProc("CreateProcessAsUserW") - procCreateServiceW = modadvapi32.NewProc("CreateServiceW") - procCreateWellKnownSid = modadvapi32.NewProc("CreateWellKnownSid") - procCryptAcquireContextW = modadvapi32.NewProc("CryptAcquireContextW") - procCryptGenRandom = modadvapi32.NewProc("CryptGenRandom") - procCryptReleaseContext = modadvapi32.NewProc("CryptReleaseContext") - procDeleteService = modadvapi32.NewProc("DeleteService") - procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") - procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") - procEnumDependentServicesW = modadvapi32.NewProc("EnumDependentServicesW") - procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") - procEqualSid = modadvapi32.NewProc("EqualSid") - procFreeSid = modadvapi32.NewProc("FreeSid") - procGetLengthSid = modadvapi32.NewProc("GetLengthSid") - procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") - procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") - procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") - procGetSecurityDescriptorGroup = modadvapi32.NewProc("GetSecurityDescriptorGroup") - procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") - procGetSecurityDescriptorOwner = modadvapi32.NewProc("GetSecurityDescriptorOwner") - procGetSecurityDescriptorRMControl = modadvapi32.NewProc("GetSecurityDescriptorRMControl") - procGetSecurityDescriptorSacl = modadvapi32.NewProc("GetSecurityDescriptorSacl") - procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo") - procGetSidIdentifierAuthority = modadvapi32.NewProc("GetSidIdentifierAuthority") - procGetSidSubAuthority = modadvapi32.NewProc("GetSidSubAuthority") - procGetSidSubAuthorityCount = modadvapi32.NewProc("GetSidSubAuthorityCount") - procGetTokenInformation = modadvapi32.NewProc("GetTokenInformation") - procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") - procInitializeSecurityDescriptor = modadvapi32.NewProc("InitializeSecurityDescriptor") - procInitiateSystemShutdownExW = modadvapi32.NewProc("InitiateSystemShutdownExW") - procIsTokenRestricted = modadvapi32.NewProc("IsTokenRestricted") - procIsValidSecurityDescriptor = modadvapi32.NewProc("IsValidSecurityDescriptor") - procIsValidSid = modadvapi32.NewProc("IsValidSid") - procIsWellKnownSid = modadvapi32.NewProc("IsWellKnownSid") - procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") - procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procMakeAbsoluteSD = modadvapi32.NewProc("MakeAbsoluteSD") - procMakeSelfRelativeSD = modadvapi32.NewProc("MakeSelfRelativeSD") - procNotifyServiceStatusChangeW = modadvapi32.NewProc("NotifyServiceStatusChangeW") - procOpenProcessToken = modadvapi32.NewProc("OpenProcessToken") - procOpenSCManagerW = modadvapi32.NewProc("OpenSCManagerW") - procOpenServiceW = modadvapi32.NewProc("OpenServiceW") - procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") - procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") - procQueryServiceConfigW = modadvapi32.NewProc("QueryServiceConfigW") - procQueryServiceDynamicInformation = modadvapi32.NewProc("QueryServiceDynamicInformation") - procQueryServiceLockStatusW = modadvapi32.NewProc("QueryServiceLockStatusW") - procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") - procQueryServiceStatusEx = modadvapi32.NewProc("QueryServiceStatusEx") - procRegCloseKey = modadvapi32.NewProc("RegCloseKey") - procRegEnumKeyExW = modadvapi32.NewProc("RegEnumKeyExW") - procRegNotifyChangeKeyValue = modadvapi32.NewProc("RegNotifyChangeKeyValue") - procRegOpenKeyExW = modadvapi32.NewProc("RegOpenKeyExW") - procRegQueryInfoKeyW = modadvapi32.NewProc("RegQueryInfoKeyW") - procRegQueryValueExW = modadvapi32.NewProc("RegQueryValueExW") - procRegisterEventSourceW = modadvapi32.NewProc("RegisterEventSourceW") - procRegisterServiceCtrlHandlerExW = modadvapi32.NewProc("RegisterServiceCtrlHandlerExW") - procReportEventW = modadvapi32.NewProc("ReportEventW") - procRevertToSelf = modadvapi32.NewProc("RevertToSelf") - procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW") - procSetKernelObjectSecurity = modadvapi32.NewProc("SetKernelObjectSecurity") - procSetNamedSecurityInfoW = modadvapi32.NewProc("SetNamedSecurityInfoW") - procSetSecurityDescriptorControl = modadvapi32.NewProc("SetSecurityDescriptorControl") - procSetSecurityDescriptorDacl = modadvapi32.NewProc("SetSecurityDescriptorDacl") - procSetSecurityDescriptorGroup = modadvapi32.NewProc("SetSecurityDescriptorGroup") - procSetSecurityDescriptorOwner = modadvapi32.NewProc("SetSecurityDescriptorOwner") - procSetSecurityDescriptorRMControl = modadvapi32.NewProc("SetSecurityDescriptorRMControl") - procSetSecurityDescriptorSacl = modadvapi32.NewProc("SetSecurityDescriptorSacl") - procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo") - procSetServiceStatus = modadvapi32.NewProc("SetServiceStatus") - procSetThreadToken = modadvapi32.NewProc("SetThreadToken") - procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation") - procStartServiceCtrlDispatcherW = modadvapi32.NewProc("StartServiceCtrlDispatcherW") - procStartServiceW = modadvapi32.NewProc("StartServiceW") - procCertAddCertificateContextToStore = modcrypt32.NewProc("CertAddCertificateContextToStore") - procCertCloseStore = modcrypt32.NewProc("CertCloseStore") - procCertCreateCertificateContext = modcrypt32.NewProc("CertCreateCertificateContext") - procCertDeleteCertificateFromStore = modcrypt32.NewProc("CertDeleteCertificateFromStore") - procCertDuplicateCertificateContext = modcrypt32.NewProc("CertDuplicateCertificateContext") - procCertEnumCertificatesInStore = modcrypt32.NewProc("CertEnumCertificatesInStore") - procCertFindCertificateInStore = modcrypt32.NewProc("CertFindCertificateInStore") - procCertFindChainInStore = modcrypt32.NewProc("CertFindChainInStore") - procCertFindExtension = modcrypt32.NewProc("CertFindExtension") - procCertFreeCertificateChain = modcrypt32.NewProc("CertFreeCertificateChain") - procCertFreeCertificateContext = modcrypt32.NewProc("CertFreeCertificateContext") - procCertGetCertificateChain = modcrypt32.NewProc("CertGetCertificateChain") - procCertGetNameStringW = modcrypt32.NewProc("CertGetNameStringW") - procCertOpenStore = modcrypt32.NewProc("CertOpenStore") - procCertOpenSystemStoreW = modcrypt32.NewProc("CertOpenSystemStoreW") - procCertVerifyCertificateChainPolicy = modcrypt32.NewProc("CertVerifyCertificateChainPolicy") - procCryptAcquireCertificatePrivateKey = modcrypt32.NewProc("CryptAcquireCertificatePrivateKey") - procCryptDecodeObject = modcrypt32.NewProc("CryptDecodeObject") - procCryptProtectData = modcrypt32.NewProc("CryptProtectData") - procCryptQueryObject = modcrypt32.NewProc("CryptQueryObject") - procCryptUnprotectData = modcrypt32.NewProc("CryptUnprotectData") - procPFXImportCertStore = modcrypt32.NewProc("PFXImportCertStore") - procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W") - procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W") - procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") - procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") - procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") - procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") - procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") - procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") - procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") - procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") - procCancelIo = modkernel32.NewProc("CancelIo") - procCancelIoEx = modkernel32.NewProc("CancelIoEx") - procCloseHandle = modkernel32.NewProc("CloseHandle") - procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") - procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") - procCreateEventExW = modkernel32.NewProc("CreateEventExW") - procCreateEventW = modkernel32.NewProc("CreateEventW") - procCreateFileMappingW = modkernel32.NewProc("CreateFileMappingW") - procCreateFileW = modkernel32.NewProc("CreateFileW") - procCreateHardLinkW = modkernel32.NewProc("CreateHardLinkW") - procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") - procCreateJobObjectW = modkernel32.NewProc("CreateJobObjectW") - procCreateMutexExW = modkernel32.NewProc("CreateMutexExW") - procCreateMutexW = modkernel32.NewProc("CreateMutexW") - procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") - procCreatePipe = modkernel32.NewProc("CreatePipe") - procCreateProcessW = modkernel32.NewProc("CreateProcessW") - procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") - procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") - procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") - procDeleteFileW = modkernel32.NewProc("DeleteFileW") - procDeleteProcThreadAttributeList = modkernel32.NewProc("DeleteProcThreadAttributeList") - procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") - procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") - procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") - procExitProcess = modkernel32.NewProc("ExitProcess") - procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") - procFindClose = modkernel32.NewProc("FindClose") - procFindCloseChangeNotification = modkernel32.NewProc("FindCloseChangeNotification") - procFindFirstChangeNotificationW = modkernel32.NewProc("FindFirstChangeNotificationW") - procFindFirstFileW = modkernel32.NewProc("FindFirstFileW") - procFindFirstVolumeMountPointW = modkernel32.NewProc("FindFirstVolumeMountPointW") - procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") - procFindNextChangeNotification = modkernel32.NewProc("FindNextChangeNotification") - procFindNextFileW = modkernel32.NewProc("FindNextFileW") - procFindNextVolumeMountPointW = modkernel32.NewProc("FindNextVolumeMountPointW") - procFindNextVolumeW = modkernel32.NewProc("FindNextVolumeW") - procFindResourceW = modkernel32.NewProc("FindResourceW") - procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") - procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") - procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") - procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") - procFormatMessageW = modkernel32.NewProc("FormatMessageW") - procFreeEnvironmentStringsW = modkernel32.NewProc("FreeEnvironmentStringsW") - procFreeLibrary = modkernel32.NewProc("FreeLibrary") - procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") - procGetACP = modkernel32.NewProc("GetACP") - procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") - procGetCommTimeouts = modkernel32.NewProc("GetCommTimeouts") - procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") - procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") - procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") - procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") - procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") - procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") - procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") - procGetCurrentThreadId = modkernel32.NewProc("GetCurrentThreadId") - procGetDiskFreeSpaceExW = modkernel32.NewProc("GetDiskFreeSpaceExW") - procGetDriveTypeW = modkernel32.NewProc("GetDriveTypeW") - procGetEnvironmentStringsW = modkernel32.NewProc("GetEnvironmentStringsW") - procGetEnvironmentVariableW = modkernel32.NewProc("GetEnvironmentVariableW") - procGetExitCodeProcess = modkernel32.NewProc("GetExitCodeProcess") - procGetFileAttributesExW = modkernel32.NewProc("GetFileAttributesExW") - procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") - procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") - procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") - procGetFileType = modkernel32.NewProc("GetFileType") - procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") - procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") - procGetLargePageMinimum = modkernel32.NewProc("GetLargePageMinimum") - procGetLastError = modkernel32.NewProc("GetLastError") - procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") - procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") - procGetLongPathNameW = modkernel32.NewProc("GetLongPathNameW") - procGetMaximumProcessorCount = modkernel32.NewProc("GetMaximumProcessorCount") - procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") - procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") - procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") - procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") - procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") - procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") - procGetProcAddress = modkernel32.NewProc("GetProcAddress") - procGetProcessId = modkernel32.NewProc("GetProcessId") - procGetProcessPreferredUILanguages = modkernel32.NewProc("GetProcessPreferredUILanguages") - procGetProcessShutdownParameters = modkernel32.NewProc("GetProcessShutdownParameters") - procGetProcessTimes = modkernel32.NewProc("GetProcessTimes") - procGetProcessWorkingSetSizeEx = modkernel32.NewProc("GetProcessWorkingSetSizeEx") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") - procGetShortPathNameW = modkernel32.NewProc("GetShortPathNameW") - procGetStartupInfoW = modkernel32.NewProc("GetStartupInfoW") - procGetStdHandle = modkernel32.NewProc("GetStdHandle") - procGetSystemDirectoryW = modkernel32.NewProc("GetSystemDirectoryW") - procGetSystemPreferredUILanguages = modkernel32.NewProc("GetSystemPreferredUILanguages") - procGetSystemTimeAsFileTime = modkernel32.NewProc("GetSystemTimeAsFileTime") - procGetSystemTimePreciseAsFileTime = modkernel32.NewProc("GetSystemTimePreciseAsFileTime") - procGetSystemWindowsDirectoryW = modkernel32.NewProc("GetSystemWindowsDirectoryW") - procGetTempPathW = modkernel32.NewProc("GetTempPathW") - procGetThreadPreferredUILanguages = modkernel32.NewProc("GetThreadPreferredUILanguages") - procGetTickCount64 = modkernel32.NewProc("GetTickCount64") - procGetTimeZoneInformation = modkernel32.NewProc("GetTimeZoneInformation") - procGetUserPreferredUILanguages = modkernel32.NewProc("GetUserPreferredUILanguages") - procGetVersion = modkernel32.NewProc("GetVersion") - procGetVolumeInformationByHandleW = modkernel32.NewProc("GetVolumeInformationByHandleW") - procGetVolumeInformationW = modkernel32.NewProc("GetVolumeInformationW") - procGetVolumeNameForVolumeMountPointW = modkernel32.NewProc("GetVolumeNameForVolumeMountPointW") - procGetVolumePathNameW = modkernel32.NewProc("GetVolumePathNameW") - procGetVolumePathNamesForVolumeNameW = modkernel32.NewProc("GetVolumePathNamesForVolumeNameW") - procGetWindowsDirectoryW = modkernel32.NewProc("GetWindowsDirectoryW") - procInitializeProcThreadAttributeList = modkernel32.NewProc("InitializeProcThreadAttributeList") - procIsWow64Process = modkernel32.NewProc("IsWow64Process") - procIsWow64Process2 = modkernel32.NewProc("IsWow64Process2") - procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") - procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") - procLoadResource = modkernel32.NewProc("LoadResource") - procLocalAlloc = modkernel32.NewProc("LocalAlloc") - procLocalFree = modkernel32.NewProc("LocalFree") - procLockFileEx = modkernel32.NewProc("LockFileEx") - procLockResource = modkernel32.NewProc("LockResource") - procMapViewOfFile = modkernel32.NewProc("MapViewOfFile") - procModule32FirstW = modkernel32.NewProc("Module32FirstW") - procModule32NextW = modkernel32.NewProc("Module32NextW") - procMoveFileExW = modkernel32.NewProc("MoveFileExW") - procMoveFileW = modkernel32.NewProc("MoveFileW") - procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") - procOpenEventW = modkernel32.NewProc("OpenEventW") - procOpenMutexW = modkernel32.NewProc("OpenMutexW") - procOpenProcess = modkernel32.NewProc("OpenProcess") - procOpenThread = modkernel32.NewProc("OpenThread") - procPostQueuedCompletionStatus = modkernel32.NewProc("PostQueuedCompletionStatus") - procProcess32FirstW = modkernel32.NewProc("Process32FirstW") - procProcess32NextW = modkernel32.NewProc("Process32NextW") - procProcessIdToSessionId = modkernel32.NewProc("ProcessIdToSessionId") - procPulseEvent = modkernel32.NewProc("PulseEvent") - procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") - procQueryFullProcessImageNameW = modkernel32.NewProc("QueryFullProcessImageNameW") - procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") - procReadConsoleW = modkernel32.NewProc("ReadConsoleW") - procReadDirectoryChangesW = modkernel32.NewProc("ReadDirectoryChangesW") - procReadFile = modkernel32.NewProc("ReadFile") - procReadProcessMemory = modkernel32.NewProc("ReadProcessMemory") - procReleaseMutex = modkernel32.NewProc("ReleaseMutex") - procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") - procResetEvent = modkernel32.NewProc("ResetEvent") - procResumeThread = modkernel32.NewProc("ResumeThread") - procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") - procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") - procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") - procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") - procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") - procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") - procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") - procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") - procSetErrorMode = modkernel32.NewProc("SetErrorMode") - procSetEvent = modkernel32.NewProc("SetEvent") - procSetFileAttributesW = modkernel32.NewProc("SetFileAttributesW") - procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") - procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") - procSetFilePointer = modkernel32.NewProc("SetFilePointer") - procSetFileTime = modkernel32.NewProc("SetFileTime") - procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") - procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") - procSetNamedPipeHandleState = modkernel32.NewProc("SetNamedPipeHandleState") - procSetPriorityClass = modkernel32.NewProc("SetPriorityClass") - procSetProcessPriorityBoost = modkernel32.NewProc("SetProcessPriorityBoost") - procSetProcessShutdownParameters = modkernel32.NewProc("SetProcessShutdownParameters") - procSetProcessWorkingSetSizeEx = modkernel32.NewProc("SetProcessWorkingSetSizeEx") - procSetStdHandle = modkernel32.NewProc("SetStdHandle") - procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") - procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") - procSizeofResource = modkernel32.NewProc("SizeofResource") - procSleepEx = modkernel32.NewProc("SleepEx") - procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") - procTerminateProcess = modkernel32.NewProc("TerminateProcess") - procThread32First = modkernel32.NewProc("Thread32First") - procThread32Next = modkernel32.NewProc("Thread32Next") - procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") - procUnmapViewOfFile = modkernel32.NewProc("UnmapViewOfFile") - procUpdateProcThreadAttribute = modkernel32.NewProc("UpdateProcThreadAttribute") - procVirtualAlloc = modkernel32.NewProc("VirtualAlloc") - procVirtualFree = modkernel32.NewProc("VirtualFree") - procVirtualLock = modkernel32.NewProc("VirtualLock") - procVirtualProtect = modkernel32.NewProc("VirtualProtect") - procVirtualProtectEx = modkernel32.NewProc("VirtualProtectEx") - procVirtualQuery = modkernel32.NewProc("VirtualQuery") - procVirtualQueryEx = modkernel32.NewProc("VirtualQueryEx") - procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") - procWTSGetActiveConsoleSessionId = modkernel32.NewProc("WTSGetActiveConsoleSessionId") - procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") - procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") - procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") - procWriteFile = modkernel32.NewProc("WriteFile") - procWriteProcessMemory = modkernel32.NewProc("WriteProcessMemory") - procAcceptEx = modmswsock.NewProc("AcceptEx") - procGetAcceptExSockaddrs = modmswsock.NewProc("GetAcceptExSockaddrs") - procTransmitFile = modmswsock.NewProc("TransmitFile") - procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") - procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") - procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") - procNtCreateFile = modntdll.NewProc("NtCreateFile") - procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") - procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess") - procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation") - procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile") - procNtSetInformationProcess = modntdll.NewProc("NtSetInformationProcess") - procNtSetSystemInformation = modntdll.NewProc("NtSetSystemInformation") - procRtlAddFunctionTable = modntdll.NewProc("RtlAddFunctionTable") - procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") - procRtlDeleteFunctionTable = modntdll.NewProc("RtlDeleteFunctionTable") - procRtlDosPathNameToNtPathName_U_WithStatus = modntdll.NewProc("RtlDosPathNameToNtPathName_U_WithStatus") - procRtlDosPathNameToRelativeNtPathName_U_WithStatus = modntdll.NewProc("RtlDosPathNameToRelativeNtPathName_U_WithStatus") - procRtlGetCurrentPeb = modntdll.NewProc("RtlGetCurrentPeb") - procRtlGetNtVersionNumbers = modntdll.NewProc("RtlGetNtVersionNumbers") - procRtlGetVersion = modntdll.NewProc("RtlGetVersion") - procRtlInitString = modntdll.NewProc("RtlInitString") - procRtlInitUnicodeString = modntdll.NewProc("RtlInitUnicodeString") - procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") - procCLSIDFromString = modole32.NewProc("CLSIDFromString") - procCoCreateGuid = modole32.NewProc("CoCreateGuid") - procCoGetObject = modole32.NewProc("CoGetObject") - procCoInitializeEx = modole32.NewProc("CoInitializeEx") - procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") - procCoUninitialize = modole32.NewProc("CoUninitialize") - procStringFromGUID2 = modole32.NewProc("StringFromGUID2") - procEnumProcessModules = modpsapi.NewProc("EnumProcessModules") - procEnumProcessModulesEx = modpsapi.NewProc("EnumProcessModulesEx") - procEnumProcesses = modpsapi.NewProc("EnumProcesses") - procGetModuleBaseNameW = modpsapi.NewProc("GetModuleBaseNameW") - procGetModuleFileNameExW = modpsapi.NewProc("GetModuleFileNameExW") - procGetModuleInformation = modpsapi.NewProc("GetModuleInformation") - procQueryWorkingSetEx = modpsapi.NewProc("QueryWorkingSetEx") - procSubscribeServiceChangeNotifications = modsechost.NewProc("SubscribeServiceChangeNotifications") - procUnsubscribeServiceChangeNotifications = modsechost.NewProc("UnsubscribeServiceChangeNotifications") - procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") - procTranslateNameW = modsecur32.NewProc("TranslateNameW") - procSetupDiBuildDriverInfoList = modsetupapi.NewProc("SetupDiBuildDriverInfoList") - procSetupDiCallClassInstaller = modsetupapi.NewProc("SetupDiCallClassInstaller") - procSetupDiCancelDriverInfoSearch = modsetupapi.NewProc("SetupDiCancelDriverInfoSearch") - procSetupDiClassGuidsFromNameExW = modsetupapi.NewProc("SetupDiClassGuidsFromNameExW") - procSetupDiClassNameFromGuidExW = modsetupapi.NewProc("SetupDiClassNameFromGuidExW") - procSetupDiCreateDeviceInfoListExW = modsetupapi.NewProc("SetupDiCreateDeviceInfoListExW") - procSetupDiCreateDeviceInfoW = modsetupapi.NewProc("SetupDiCreateDeviceInfoW") - procSetupDiDestroyDeviceInfoList = modsetupapi.NewProc("SetupDiDestroyDeviceInfoList") - procSetupDiDestroyDriverInfoList = modsetupapi.NewProc("SetupDiDestroyDriverInfoList") - procSetupDiEnumDeviceInfo = modsetupapi.NewProc("SetupDiEnumDeviceInfo") - procSetupDiEnumDriverInfoW = modsetupapi.NewProc("SetupDiEnumDriverInfoW") - procSetupDiGetClassDevsExW = modsetupapi.NewProc("SetupDiGetClassDevsExW") - procSetupDiGetClassInstallParamsW = modsetupapi.NewProc("SetupDiGetClassInstallParamsW") - procSetupDiGetDeviceInfoListDetailW = modsetupapi.NewProc("SetupDiGetDeviceInfoListDetailW") - procSetupDiGetDeviceInstallParamsW = modsetupapi.NewProc("SetupDiGetDeviceInstallParamsW") - procSetupDiGetDeviceInstanceIdW = modsetupapi.NewProc("SetupDiGetDeviceInstanceIdW") - procSetupDiGetDevicePropertyW = modsetupapi.NewProc("SetupDiGetDevicePropertyW") - procSetupDiGetDeviceRegistryPropertyW = modsetupapi.NewProc("SetupDiGetDeviceRegistryPropertyW") - procSetupDiGetDriverInfoDetailW = modsetupapi.NewProc("SetupDiGetDriverInfoDetailW") - procSetupDiGetSelectedDevice = modsetupapi.NewProc("SetupDiGetSelectedDevice") - procSetupDiGetSelectedDriverW = modsetupapi.NewProc("SetupDiGetSelectedDriverW") - procSetupDiOpenDevRegKey = modsetupapi.NewProc("SetupDiOpenDevRegKey") - procSetupDiSetClassInstallParamsW = modsetupapi.NewProc("SetupDiSetClassInstallParamsW") - procSetupDiSetDeviceInstallParamsW = modsetupapi.NewProc("SetupDiSetDeviceInstallParamsW") - procSetupDiSetDeviceRegistryPropertyW = modsetupapi.NewProc("SetupDiSetDeviceRegistryPropertyW") - procSetupDiSetSelectedDevice = modsetupapi.NewProc("SetupDiSetSelectedDevice") - procSetupDiSetSelectedDriverW = modsetupapi.NewProc("SetupDiSetSelectedDriverW") - procSetupUninstallOEMInfW = modsetupapi.NewProc("SetupUninstallOEMInfW") - procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") - procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") - procShellExecuteW = modshell32.NewProc("ShellExecuteW") - procEnumChildWindows = moduser32.NewProc("EnumChildWindows") - procEnumWindows = moduser32.NewProc("EnumWindows") - procExitWindowsEx = moduser32.NewProc("ExitWindowsEx") - procGetClassNameW = moduser32.NewProc("GetClassNameW") - procGetDesktopWindow = moduser32.NewProc("GetDesktopWindow") - procGetForegroundWindow = moduser32.NewProc("GetForegroundWindow") - procGetGUIThreadInfo = moduser32.NewProc("GetGUIThreadInfo") - procGetShellWindow = moduser32.NewProc("GetShellWindow") - procGetWindowThreadProcessId = moduser32.NewProc("GetWindowThreadProcessId") - procIsWindow = moduser32.NewProc("IsWindow") - procIsWindowUnicode = moduser32.NewProc("IsWindowUnicode") - procIsWindowVisible = moduser32.NewProc("IsWindowVisible") - procMessageBoxW = moduser32.NewProc("MessageBoxW") - procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") - procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") - procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") - procGetFileVersionInfoSizeW = modversion.NewProc("GetFileVersionInfoSizeW") - procGetFileVersionInfoW = modversion.NewProc("GetFileVersionInfoW") - procVerQueryValueW = modversion.NewProc("VerQueryValueW") - procWinVerifyTrustEx = modwintrust.NewProc("WinVerifyTrustEx") - procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") - procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") - procWSACleanup = modws2_32.NewProc("WSACleanup") - procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") - procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") - procWSAIoctl = modws2_32.NewProc("WSAIoctl") - procWSALookupServiceBeginW = modws2_32.NewProc("WSALookupServiceBeginW") - procWSALookupServiceEnd = modws2_32.NewProc("WSALookupServiceEnd") - procWSALookupServiceNextW = modws2_32.NewProc("WSALookupServiceNextW") - procWSARecv = modws2_32.NewProc("WSARecv") - procWSARecvFrom = modws2_32.NewProc("WSARecvFrom") - procWSASend = modws2_32.NewProc("WSASend") - procWSASendTo = modws2_32.NewProc("WSASendTo") - procWSASocketW = modws2_32.NewProc("WSASocketW") - procWSAStartup = modws2_32.NewProc("WSAStartup") - procbind = modws2_32.NewProc("bind") - procclosesocket = modws2_32.NewProc("closesocket") - procconnect = modws2_32.NewProc("connect") - procgethostbyname = modws2_32.NewProc("gethostbyname") - procgetpeername = modws2_32.NewProc("getpeername") - procgetprotobyname = modws2_32.NewProc("getprotobyname") - procgetservbyname = modws2_32.NewProc("getservbyname") - procgetsockname = modws2_32.NewProc("getsockname") - procgetsockopt = modws2_32.NewProc("getsockopt") - proclisten = modws2_32.NewProc("listen") - procntohs = modws2_32.NewProc("ntohs") - procrecvfrom = modws2_32.NewProc("recvfrom") - procsendto = modws2_32.NewProc("sendto") - procsetsockopt = modws2_32.NewProc("setsockopt") - procshutdown = modws2_32.NewProc("shutdown") - procsocket = modws2_32.NewProc("socket") - procWTSEnumerateSessionsW = modwtsapi32.NewProc("WTSEnumerateSessionsW") - procWTSFreeMemory = modwtsapi32.NewProc("WTSFreeMemory") - procWTSQueryUserToken = modwtsapi32.NewProc("WTSQueryUserToken") -) - -func cm_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_Status.Addr(), 4, uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags), 0, 0) - ret = CONFIGRET(r0) - return -} - -func cm_Get_Device_Interface_List(interfaceClass *GUID, deviceID *uint16, buffer *uint16, bufferLen uint32, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_ListW.Addr(), 5, uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags), 0) - ret = CONFIGRET(r0) - return -} - -func cm_Get_Device_Interface_List_Size(len *uint32, interfaceClass *GUID, deviceID *uint16, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_List_SizeW.Addr(), 4, uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags), 0, 0) - ret = CONFIGRET(r0) - return -} - -func cm_MapCrToWin32Err(configRet CONFIGRET, defaultWin32Error Errno) (ret Errno) { - r0, _, _ := syscall.Syscall(procCM_MapCrToWin32Err.Addr(), 2, uintptr(configRet), uintptr(defaultWin32Error), 0) - ret = Errno(r0) - return -} - -func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) { - var _p0 uint32 - if resetToDefault { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) { - var _p0 uint32 - if disableAllPrivileges { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { - r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CloseServiceHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(str) - if err != nil { - return - } - return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) -} - -func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) { - var _p0 uint32 - if inheritHandles { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall12(procCreateProcessAsUserW.Addr(), 11, uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0) - handle = Handle(r0) - if handle == 0 { - err = errnoErr(e1) - } - return -} - -func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { - r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func DeleteService(service Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func DeregisterEventSource(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) { - r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumDependentServicesW.Addr(), 6, uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { - r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0) - isEqual = r0 != 0 - return -} - -func FreeSid(sid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - if r1 != 0 { - err = errnoErr(e1) - } - return -} - -func GetLengthSid(sid *SID) (len uint32) { - r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - len = uint32(r0) - return -} - -func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - var _p0 *uint16 - _p0, ret = syscall.UTF16PtrFromString(objectName) - if ret != nil { - return - } - return _getNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl, sd) -} - -func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl **ACL, daclDefaulted *bool) (err error) { - var _p0 uint32 - if *daclPresent { - _p0 = 1 - } - var _p1 uint32 - if *daclDefaulted { - _p1 = 1 - } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) - *daclPresent = _p0 != 0 - *daclDefaulted = _p1 != 0 - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefaulted *bool) (err error) { - var _p0 uint32 - if *groupDefaulted { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) - *groupDefaulted = _p0 != 0 - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) - len = uint32(r0) - return -} - -func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefaulted *bool) (err error) { - var _p0 uint32 - if *ownerDefaulted { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) - *ownerDefaulted = _p0 != 0 - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl **ACL, saclDefaulted *bool) (err error) { - var _p0 uint32 - if *saclPresent { - _p0 = 1 - } - var _p1 uint32 - if *saclDefaulted { - _p1 = 1 - } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) - *saclPresent = _p0 != 0 - *saclDefaulted = _p1 != 0 - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) { - r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0)) - return -} - -func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0) - subAuthority = (*uint32)(unsafe.Pointer(r0)) - return -} - -func getSidSubAuthorityCount(sid *SID) (count *uint8) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - count = (*uint8)(unsafe.Pointer(r0)) - return -} - -func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func ImpersonateSelf(impersonationlevel uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) { - r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint32, forceAppsClosed bool, rebootAfterShutdown bool, reason uint32) (err error) { - var _p0 uint32 - if forceAppsClosed { - _p0 = 1 - } - var _p1 uint32 - if rebootAfterShutdown { - _p1 = 1 - } - r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func isTokenRestricted(tokenHandle Token) (ret bool, err error) { - r0, _, e1 := syscall.Syscall(procIsTokenRestricted.Addr(), 1, uintptr(tokenHandle), 0, 0) - ret = r0 != 0 - if !ret { - err = errnoErr(e1) - } - return -} - -func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) - isValid = r0 != 0 - return -} - -func isValidSid(sid *SID) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - isValid = r0 != 0 - return -} - -func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) { - r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0) - isWellKnown = r0 != 0 - return -} - -func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) { - r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) - handle = Handle(r0) - if handle == 0 { - err = errnoErr(e1) - } - return -} - -func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) - handle = Handle(r0) - if handle == 0 { - err = errnoErr(e1) - } - return -} - -func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) { - var _p0 uint32 - if openAsSelf { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInfo unsafe.Pointer) (err error) { - err = procQueryServiceDynamicInformation.Find() - if err != nil { - return - } - r1, _, e1 := syscall.Syscall(procQueryServiceDynamicInformation.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func RegCloseKey(key Handle) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, event Handle, asynchronous bool) (regerrno error) { - var _p0 uint32 - if watchSubtree { - _p0 = 1 - } - var _p1 uint32 - if asynchronous { - _p1 = 1 - } - r0, _, _ := syscall.Syscall6(procRegNotifyChangeKeyValue.Addr(), 5, uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1), 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0) - handle = Handle(r0) - if handle == 0 { - err = errnoErr(e1) - } - return -} - -func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procRegisterServiceCtrlHandlerExW.Addr(), 3, uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context)) - handle = Handle(r0) - if handle == 0 { - err = errnoErr(e1) - } - return -} - -func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func RevertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) { - r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) { - r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - var _p0 *uint16 - _p0, ret = syscall.UTF16PtrFromString(objectName) - if ret != nil { - return - } - return _SetNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl) -} - -func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) { - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *ACL, daclDefaulted bool) (err error) { - var _p0 uint32 - if daclPresent { - _p0 = 1 - } - var _p1 uint32 - if daclDefaulted { - _p1 = 1 - } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaulted bool) (err error) { - var _p0 uint32 - if groupDefaulted { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaulted bool) (err error) { - var _p0 uint32 - if ownerDefaulted { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) { - syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) - return -} - -func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *ACL, saclDefaulted bool) (err error) { - var _p0 uint32 - if saclPresent { - _p0 = 1 - } - var _p1 uint32 - if saclDefaulted { - _p1 = 1 - } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetThreadToken(thread *Handle, token Token) (err error) { - r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) { - r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CertCloseStore(store Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) - context = (*CertContext)(unsafe.Pointer(r0)) - if context == nil { - err = errnoErr(e1) - } - return -} - -func CertDeleteCertificateFromStore(certContext *CertContext) (err error) { - r1, _, e1 := syscall.Syscall(procCertDeleteCertificateFromStore.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CertDuplicateCertificateContext(certContext *CertContext) (dupContext *CertContext) { - r0, _, _ := syscall.Syscall(procCertDuplicateCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) - dupContext = (*CertContext)(unsafe.Pointer(r0)) - return -} - -func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0) - context = (*CertContext)(unsafe.Pointer(r0)) - if context == nil { - err = errnoErr(e1) - } - return -} - -func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevCertContext *CertContext) (cert *CertContext, err error) { - r0, _, e1 := syscall.Syscall6(procCertFindCertificateInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext))) - cert = (*CertContext)(unsafe.Pointer(r0)) - if cert == nil { - err = errnoErr(e1) - } - return -} - -func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevChainContext *CertChainContext) (certchain *CertChainContext, err error) { - r0, _, e1 := syscall.Syscall6(procCertFindChainInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext))) - certchain = (*CertChainContext)(unsafe.Pointer(r0)) - if certchain == nil { - err = errnoErr(e1) - } - return -} - -func CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) { - r0, _, _ := syscall.Syscall(procCertFindExtension.Addr(), 3, uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions))) - ret = (*CertExtension)(unsafe.Pointer(r0)) - return -} - -func CertFreeCertificateChain(ctx *CertChainContext) { - syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) - return -} - -func CertFreeCertificateContext(ctx *CertContext) (err error) { - r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) { - r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) { - r0, _, _ := syscall.Syscall6(procCertGetNameStringW.Addr(), 6, uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size)) - chars = uint32(r0) - return -} - -func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) - handle = Handle(r0) - if handle == 0 { - err = errnoErr(e1) - } - return -} - -func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { - r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0) - store = Handle(r0) - if store == 0 { - err = errnoErr(e1) - } - return -} - -func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) { - r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, parameters unsafe.Pointer, cryptProvOrNCryptKey *Handle, keySpec *uint32, callerFreeProvOrNCryptKey *bool) (err error) { - var _p0 uint32 - if *callerFreeProvOrNCryptKey { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall6(procCryptAcquireCertificatePrivateKey.Addr(), 6, uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0))) - *callerFreeProvOrNCryptKey = _p0 != 0 - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptDecodeObject.Addr(), 7, uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptProtectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall12(procCryptQueryObject.Addr(), 11, uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptUnprotectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (store Handle, err error) { - r0, _, e1 := syscall.Syscall(procPFXImportCertStore.Addr(), 3, uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags)) - store = Handle(r0) - if store == 0 { - err = errnoErr(e1) - } - return -} - -func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) { - r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0) - same = r0 != 0 - return -} - -func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { - var _p0 *uint16 - _p0, status = syscall.UTF16PtrFromString(name) - if status != nil { - return - } - return _DnsQuery(_p0, qtype, options, extra, qrs, pr) -} - -func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { - r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) - if r0 != 0 { - status = syscall.Errno(r0) - } - return -} - -func DnsRecordListFree(rl *DNSRecord, freetype uint32) { - syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0) - return -} - -func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { - r0, _, _ := syscall.Syscall6(procDwmGetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { - r0, _, _ := syscall.Syscall6(procDwmSetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { - r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) - if r0 != 0 { - errcode = syscall.Errno(r0) - } - return -} - -func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { - r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0) - if r0 != 0 { - errcode = syscall.Errno(r0) - } - return -} - -func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) { - r0, _, _ := syscall.Syscall(procGetBestInterfaceEx.Addr(), 2, uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex)), 0) - if r0 != 0 { - errcode = syscall.Errno(r0) - } - return -} - -func GetIfEntry(pIfRow *MibIfRow) (errcode error) { - r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) - if r0 != 0 { - errcode = syscall.Errno(r0) - } - return -} - -func AssignProcessToJobObject(job Handle, process Handle) (err error) { - r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CancelIo(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CancelIoEx(s Handle, o *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CloseHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(overlapped)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { - r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) - handle = Handle(r0) - if handle == 0 || e1 == ERROR_ALREADY_EXISTS { - err = errnoErr(e1) - } - return -} - -func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) - handle = Handle(r0) - if handle == 0 || e1 == ERROR_ALREADY_EXISTS { - err = errnoErr(e1) - } - return -} - -func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) - handle = Handle(r0) - if handle == 0 || e1 == ERROR_ALREADY_EXISTS { - err = errnoErr(e1) - } - return -} - -func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) - handle = Handle(r0) - if handle == InvalidHandle { - err = errnoErr(e1) - } - return -} - -func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) - if r1&0xff == 0 { - err = errnoErr(e1) - } - return -} - -func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, threadcnt uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0) - handle = Handle(r0) - if handle == 0 { - err = errnoErr(e1) - } - return -} - -func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0) - handle = Handle(r0) - if handle == 0 { - err = errnoErr(e1) - } - return -} - -func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) - handle = Handle(r0) - if handle == 0 || e1 == ERROR_ALREADY_EXISTS { - err = errnoErr(e1) - } - return -} - -func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) { - var _p0 uint32 - if initialOwner { - _p0 = 1 - } - r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) - handle = Handle(r0) - if handle == 0 || e1 == ERROR_ALREADY_EXISTS { - err = errnoErr(e1) - } - return -} - -func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) - handle = Handle(r0) - if handle == InvalidHandle { - err = errnoErr(e1) - } - return -} - -func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) { - var _p0 uint32 - if inheritHandles { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) - if r1&0xff == 0 { - err = errnoErr(e1) - } - return -} - -func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0) - handle = Handle(r0) - if handle == InvalidHandle { - err = errnoErr(e1) - } - return -} - -func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func DeleteFile(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) { - syscall.Syscall(procDeleteProcThreadAttributeList.Addr(), 1, uintptr(unsafe.Pointer(attrlist)), 0, 0) - return -} - -func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { - var _p0 uint32 - if bInheritHandle { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func ExitProcess(exitcode uint32) { - syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) - return -} - -func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) - n = uint32(r0) - if n == 0 { - err = errnoErr(e1) - } - return -} - -func FindClose(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func FindCloseChangeNotification(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindCloseChangeNotification.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func FindFirstChangeNotification(path string, watchSubtree bool, notifyFilter uint32) (handle Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(path) - if err != nil { - return - } - return _FindFirstChangeNotification(_p0, watchSubtree, notifyFilter) -} - -func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter uint32) (handle Handle, err error) { - var _p1 uint32 - if watchSubtree { - _p1 = 1 - } - r0, _, e1 := syscall.Syscall(procFindFirstChangeNotificationW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter)) - handle = Handle(r0) - if handle == InvalidHandle { - err = errnoErr(e1) - } - return -} - -func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0) - handle = Handle(r0) - if handle == InvalidHandle { - err = errnoErr(e1) - } - return -} - -func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) - handle = Handle(r0) - if handle == InvalidHandle { - err = errnoErr(e1) - } - return -} - -func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0) - handle = Handle(r0) - if handle == InvalidHandle { - err = errnoErr(e1) - } - return -} - -func FindNextChangeNotification(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextChangeNotification.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func findNextFile1(handle Handle, data *win32finddata1) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindResourceW.Addr(), 3, uintptr(module), uintptr(name), uintptr(resType)) - resInfo = Handle(r0) - if resInfo == 0 { - err = errnoErr(e1) - } - return -} - -func FindVolumeClose(findVolume Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func FlushFileBuffers(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func FlushViewOfFile(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) { - var _p0 *uint16 - if len(buf) > 0 { - _p0 = &buf[0] - } - r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0) - n = uint32(r0) - if n == 0 { - err = errnoErr(e1) - } - return -} - -func FreeEnvironmentStrings(envs *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func FreeLibrary(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetACP() (acp uint32) { - r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) - acp = uint32(r0) - return -} - -func GetActiveProcessorCount(groupNumber uint16) (ret uint32) { - r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) - ret = uint32(r0) - return -} - -func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetCommandLine() (cmd *uint16) { - r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0) - cmd = (*uint16)(unsafe.Pointer(r0)) - return -} - -func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetComputerName(buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetConsoleMode(console Handle, mode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) - n = uint32(r0) - if n == 0 { - err = errnoErr(e1) - } - return -} - -func GetCurrentProcessId() (pid uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) - pid = uint32(r0) - return -} - -func GetCurrentThreadId() (id uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentThreadId.Addr(), 0, 0, 0, 0) - id = uint32(r0) - return -} - -func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) { - r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetDriveType(rootPathName *uint16) (driveType uint32) { - r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) - driveType = uint32(r0) - return -} - -func GetEnvironmentStrings() (envs *uint16, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0) - envs = (*uint16)(unsafe.Pointer(r0)) - if envs == nil { - err = errnoErr(e1) - } - return -} - -func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) - n = uint32(r0) - if n == 0 { - err = errnoErr(e1) - } - return -} - -func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetFileAttributes(name *uint16) (attrs uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) - attrs = uint32(r0) - if attrs == INVALID_FILE_ATTRIBUTES { - err = errnoErr(e1) - } - return -} - -func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetFileType(filehandle Handle) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) - n = uint32(r0) - if n == 0 { - err = errnoErr(e1) - } - return -} - -func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags), 0, 0) - n = uint32(r0) - if n == 0 { - err = errnoErr(e1) - } - return -} - -func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0) - n = uint32(r0) - if n == 0 { - err = errnoErr(e1) - } - return -} - -func GetLargePageMinimum() (size uintptr) { - r0, _, _ := syscall.Syscall(procGetLargePageMinimum.Addr(), 0, 0, 0, 0) - size = uintptr(r0) - return -} - -func GetLastError() (lasterr error) { - r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) - if r0 != 0 { - lasterr = syscall.Errno(r0) - } - return -} - -func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0) - n = uint32(r0) - if n == 0 { - err = errnoErr(e1) - } - return -} - -func GetLogicalDrives() (drivesBitMask uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0) - drivesBitMask = uint32(r0) - if drivesBitMask == 0 { - err = errnoErr(e1) - } - return -} - -func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) - n = uint32(r0) - if n == 0 { - err = errnoErr(e1) - } - return -} - -func GetMaximumProcessorCount(groupNumber uint16) (ret uint32) { - r0, _, _ := syscall.Syscall(procGetMaximumProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) - ret = uint32(r0) - return -} - -func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) - n = uint32(r0) - if n == 0 { - err = errnoErr(e1) - } - return -} - -func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { - var _p0 uint32 - if wait { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetPriorityClass(process Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0) - ret = uint32(r0) - if ret == 0 { - err = errnoErr(e1) - } - return -} - -func GetProcAddress(module Handle, procname string) (proc uintptr, err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(procname) - if err != nil { - return - } - return _GetProcAddress(module, _p0) -} - -func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { - r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0) - proc = uintptr(r0) - if proc == 0 { - err = errnoErr(e1) - } - return -} - -func GetProcessId(process Handle) (id uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0) - id = uint32(r0) - if id == 0 { - err = errnoErr(e1) - } - return -} - -func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) { - syscall.Syscall6(procGetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)), 0, 0) - return -} - -func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overlapped **Overlapped, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) - n = uint32(r0) - if n == 0 { - err = errnoErr(e1) - } - return -} - -func GetStartupInfo(startupInfo *StartupInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetStdHandle(stdhandle uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0) - handle = Handle(r0) - if handle == InvalidHandle { - err = errnoErr(e1) - } - return -} - -func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) - len = uint32(r0) - if len == 0 { - err = errnoErr(e1) - } - return -} - -func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetSystemPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetSystemTimeAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) - return -} - -func GetSystemTimePreciseAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) - return -} - -func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) - len = uint32(r0) - if len == 0 { - err = errnoErr(e1) - } - return -} - -func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) - n = uint32(r0) - if n == 0 { - err = errnoErr(e1) - } - return -} - -func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetThreadPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getTickCount64() (ms uint64) { - r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) - ms = uint64(r0) - return -} - -func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0) - rc = uint32(r0) - if rc == 0xffffffff { - err = errnoErr(e1) - } - return -} - -func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetUserPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetVersion() (ver uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) - ver = uint32(r0) - if ver == 0 { - err = errnoErr(e1) - } - return -} - -func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) - len = uint32(r0) - if len == 0 { - err = errnoErr(e1) - } - return -} - -func initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procInitializeProcThreadAttributeList.Addr(), 4, uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func IsWow64Process(handle Handle, isWow64 *bool) (err error) { - var _p0 uint32 - if *isWow64 { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0) - *isWow64 = _p0 != 0 - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint16) (err error) { - err = procIsWow64Process2.Find() - if err != nil { - return - } - r1, _, e1 := syscall.Syscall(procIsWow64Process2.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(libname) - if err != nil { - return - } - return _LoadLibraryEx(_p0, zero, flags) -} - -func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) - handle = Handle(r0) - if handle == 0 { - err = errnoErr(e1) - } - return -} - -func LoadLibrary(libname string) (handle Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(libname) - if err != nil { - return - } - return _LoadLibrary(_p0) -} - -func _LoadLibrary(libname *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0) - handle = Handle(r0) - if handle == 0 { - err = errnoErr(e1) - } - return -} - -func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) - resData = Handle(r0) - if resData == 0 { - err = errnoErr(e1) - } - return -} - -func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) { - r0, _, e1 := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(length), 0) - ptr = uintptr(r0) - if ptr == 0 { - err = errnoErr(e1) - } - return -} - -func LocalFree(hmem Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0) - handle = Handle(r0) - if handle != 0 { - err = errnoErr(e1) - } - return -} - -func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func LockResource(resData Handle) (addr uintptr, err error) { - r0, _, e1 := syscall.Syscall(procLockResource.Addr(), 1, uintptr(resData), 0, 0) - addr = uintptr(r0) - if addr == 0 { - err = errnoErr(e1) - } - return -} - -func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0) - addr = uintptr(r0) - if addr == 0 { - err = errnoErr(e1) - } - return -} - -func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procModule32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procModule32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func MoveFile(from *uint16, to *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { - r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) - nwrite = int32(r0) - if nwrite == 0 { - err = errnoErr(e1) - } - return -} - -func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } - r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) - handle = Handle(r0) - if handle == 0 { - err = errnoErr(e1) - } - return -} - -func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } - r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) - handle = Handle(r0) - if handle == 0 { - err = errnoErr(e1) - } - return -} - -func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } - r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) - handle = Handle(r0) - if handle == 0 { - err = errnoErr(e1) - } - return -} - -func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } - r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) - handle = Handle(r0) - if handle == 0 { - err = errnoErr(e1) - } - return -} - -func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procProcessIdToSessionId.Addr(), 2, uintptr(pid), uintptr(unsafe.Pointer(sessionid)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func PulseEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) - n = uint32(r0) - if n == 0 { - err = errnoErr(e1) - } - return -} - -func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryFullProcessImageNameW.Addr(), 4, uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { - var _p0 uint32 - if watchSubTree { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { - var _p0 *byte - if len(buf) > 0 { - _p0 = &buf[0] - } - r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesRead *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procReadProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func ReleaseMutex(mutex Handle) (err error) { - r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func RemoveDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func ResetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func ResumeThread(thread Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) - ret = uint32(r0) - if ret == 0xffffffff { - err = errnoErr(e1) - } - return -} - -func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func setConsoleCursorPosition(console Handle, position uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetConsoleMode(console Handle, mode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetCurrentDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetDefaultDllDirectories(directoryFlags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetDefaultDllDirectories.Addr(), 1, uintptr(directoryFlags), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetDllDirectory(path string) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(path) - if err != nil { - return - } - return _SetDllDirectory(_p0) -} - -func _SetDllDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetDllDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetEndOfFile(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetErrorMode(mode uint32) (ret uint32) { - r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0) - ret = uint32(r0) - return -} - -func SetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetFileAttributes(name *uint16, attrs uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inBufferLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { - r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) - newlowoffset = uint32(r0) - if newlowoffset == 0xffffffff { - err = errnoErr(e1) - } - return -} - -func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) { - r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0) - ret = int(r0) - if ret == 0 { - err = errnoErr(e1) - } - return -} - -func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetNamedPipeHandleState.Addr(), 4, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetPriorityClass(process Handle, priorityClass uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetProcessPriorityBoost(process Handle, disable bool) (err error) { - var _p0 uint32 - if disable { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetStdHandle(stdhandle uint32, handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { - r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) - size = uint32(r0) - if size == 0 { - err = errnoErr(e1) - } - return -} - -func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { - var _p0 uint32 - if alertable { - _p0 = 1 - } - r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0) - ret = uint32(r0) - return -} - -func TerminateJobObject(job Handle, exitCode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func TerminateProcess(handle Handle, exitcode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func UnmapViewOfFile(addr uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procUpdateProcThreadAttribute.Addr(), 7, uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0) - value = uintptr(r0) - if value == 0 { - err = errnoErr(e1) - } - return -} - -func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func VirtualLock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect uint32, oldProtect *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualProtectEx.Addr(), 5, uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualQuery.Addr(), 3, uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualQueryEx.Addr(), 4, uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func VirtualUnlock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func WTSGetActiveConsoleSessionId() (sessionID uint32) { - r0, _, _ := syscall.Syscall(procWTSGetActiveConsoleSessionId.Addr(), 0, 0, 0, 0) - sessionID = uint32(r0) - return -} - -func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { - var _p0 uint32 - if waitAll { - _p0 = 1 - } - r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0) - event = uint32(r0) - if event == 0xffffffff { - err = errnoErr(e1) - } - return -} - -func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) { - r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0) - event = uint32(r0) - if event == 0xffffffff { - err = errnoErr(e1) - } - return -} - -func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { - var _p0 *byte - if len(buf) > 0 { - _p0 = &buf[0] - } - r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesWritten *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procWriteProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) { - syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0) - return -} - -func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func NetApiBufferFree(buf *byte) (neterr error) { - r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0) - if r0 != 0 { - neterr = syscall.Errno(r0) - } - return -} - -func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) { - r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) - if r0 != 0 { - neterr = syscall.Errno(r0) - } - return -} - -func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { - r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) - if r0 != 0 { - neterr = syscall.Errno(r0) - } - return -} - -func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength), 0) - if r0 != 0 { - ntstatus = NTStatus(r0) - } - return -} - -func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) { - r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) - if r0 != 0 { - ntstatus = NTStatus(r0) - } - return -} - -func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen)), 0) - if r0 != 0 { - ntstatus = NTStatus(r0) - } - return -} - -func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32, retLen *uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen)), 0, 0) - if r0 != 0 { - ntstatus = NTStatus(r0) - } - return -} - -func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, inBufferLen uint32, class uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class), 0) - if r0 != 0 { - ntstatus = NTStatus(r0) - } - return -} - -func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtSetInformationProcess.Addr(), 4, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), 0, 0) - if r0 != 0 { - ntstatus = NTStatus(r0) - } - return -} - -func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall(procNtSetSystemInformation.Addr(), 3, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen)) - if r0 != 0 { - ntstatus = NTStatus(r0) - } - return -} - -func RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) { - r0, _, _ := syscall.Syscall(procRtlAddFunctionTable.Addr(), 3, uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress)) - ret = r0 != 0 - return -} - -func RtlDefaultNpAcl(acl **ACL) (ntstatus error) { - r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(acl)), 0, 0) - if r0 != 0 { - ntstatus = NTStatus(r0) - } - return -} - -func RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) { - r0, _, _ := syscall.Syscall(procRtlDeleteFunctionTable.Addr(), 1, uintptr(unsafe.Pointer(functionTable)), 0, 0) - ret = r0 != 0 - return -} - -func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) - if r0 != 0 { - ntstatus = NTStatus(r0) - } - return -} - -func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) - if r0 != 0 { - ntstatus = NTStatus(r0) - } - return -} - -func RtlGetCurrentPeb() (peb *PEB) { - r0, _, _ := syscall.Syscall(procRtlGetCurrentPeb.Addr(), 0, 0, 0, 0) - peb = (*PEB)(unsafe.Pointer(r0)) - return -} - -func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) { - syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) - return -} - -func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) { - r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) - if r0 != 0 { - ntstatus = NTStatus(r0) - } - return -} - -func RtlInitString(destinationString *NTString, sourceString *byte) { - syscall.Syscall(procRtlInitString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) - return -} - -func RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) { - syscall.Syscall(procRtlInitUnicodeString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) - return -} - -func rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(ntstatus), 0, 0) - ret = syscall.Errno(r0) - return -} - -func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func coCreateGuid(pguid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) { - r0, _, _ := syscall.Syscall6(procCoGetObject.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) { - r0, _, _ := syscall.Syscall(procCoInitializeEx.Addr(), 2, uintptr(reserved), uintptr(coInit), 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func CoTaskMemFree(address unsafe.Pointer) { - syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) - return -} - -func CoUninitialize() { - syscall.Syscall(procCoUninitialize.Addr(), 0, 0, 0, 0) - return -} - -func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { - r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) - chars = int32(r0) - return -} - -func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumProcessModules.Addr(), 4, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumProcessModulesEx.Addr(), 5, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) { - var _p0 *uint32 - if len(processIds) > 0 { - _p0 = &processIds[0] - } - r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(processIds)), uintptr(unsafe.Pointer(bytesReturned))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleBaseNameW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleFileNameExW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleInformation.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) { - r1, _, e1 := syscall.Syscall(procQueryWorkingSetEx.Addr(), 3, uintptr(process), uintptr(pv), uintptr(cb)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SubscribeServiceChangeNotifications(service Handle, eventType uint32, callback uintptr, callbackCtx uintptr, subscription *uintptr) (ret error) { - ret = procSubscribeServiceChangeNotifications.Find() - if ret != nil { - return - } - r0, _, _ := syscall.Syscall6(procSubscribeServiceChangeNotifications.Addr(), 5, uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func UnsubscribeServiceChangeNotifications(subscription uintptr) (err error) { - err = procUnsubscribeServiceChangeNotifications.Find() - if err != nil { - return - } - syscall.Syscall(procUnsubscribeServiceChangeNotifications.Addr(), 1, uintptr(subscription), 0, 0) - return -} - -func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) - if r1&0xff == 0 { - err = errnoErr(e1) - } - return -} - -func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0) - if r1&0xff == 0 { - err = errnoErr(e1) - } - return -} - -func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiBuildDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiCallClassInstaller.Addr(), 3, uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiCancelDriverInfoSearch.Addr(), 1, uintptr(deviceInfoSet), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiClassGuidsFromNameExW.Addr(), 6, uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiClassNameFromGuidExW.Addr(), 6, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { - r0, _, e1 := syscall.Syscall6(procSetupDiCreateDeviceInfoListExW.Addr(), 4, uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) - handle = DevInfo(r0) - if handle == DevInfo(InvalidHandle) { - err = errnoErr(e1) - } - return -} - -func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiCreateDeviceInfoW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiDestroyDeviceInfoList.Addr(), 1, uintptr(deviceInfoSet), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiDestroyDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiEnumDeviceInfo.Addr(), 3, uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiEnumDriverInfoW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { - r0, _, e1 := syscall.Syscall9(procSetupDiGetClassDevsExW.Addr(), 7, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) - handle = DevInfo(r0) - if handle == DevInfo(InvalidHandle) { - err = errnoErr(e1) - } - return -} - -func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetClassInstallParamsW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInfoListDetailW.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, instanceId *uint16, instanceIdSize uint32, instanceIdRequiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetDeviceInstanceIdW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY, propertyType *DEVPROPTYPE, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiGetDevicePropertyW.Addr(), 8, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiGetDeviceRegistryPropertyW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetDriverInfoDetailW.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key Handle, err error) { - r0, _, e1 := syscall.Syscall6(procSetupDiOpenDevRegKey.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired)) - key = Handle(r0) - if key == InvalidHandle { - err = errnoErr(e1) - } - return -} - -func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiSetClassInstallParamsW.Addr(), 4, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiSetDeviceRegistryPropertyW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procSetupUninstallOEMInfW.Addr(), 3, uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { - r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) - argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0)) - if argv == nil { - err = errnoErr(e1) - } - return -} - -func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) { - r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { - r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) - if r1 <= 32 { - err = errnoErr(e1) - } - return -} - -func EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) { - syscall.Syscall(procEnumChildWindows.Addr(), 3, uintptr(hwnd), uintptr(enumFunc), uintptr(param)) - return -} - -func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall(procEnumWindows.Addr(), 2, uintptr(enumFunc), uintptr(param), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func ExitWindowsEx(flags uint32, reason uint32) (err error) { - r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) { - r0, _, e1 := syscall.Syscall(procGetClassNameW.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount)) - copied = int32(r0) - if copied == 0 { - err = errnoErr(e1) - } - return -} - -func GetDesktopWindow() (hwnd HWND) { - r0, _, _ := syscall.Syscall(procGetDesktopWindow.Addr(), 0, 0, 0, 0) - hwnd = HWND(r0) - return -} - -func GetForegroundWindow() (hwnd HWND) { - r0, _, _ := syscall.Syscall(procGetForegroundWindow.Addr(), 0, 0, 0, 0) - hwnd = HWND(r0) - return -} - -func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetGUIThreadInfo.Addr(), 2, uintptr(thread), uintptr(unsafe.Pointer(info)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetShellWindow() (shellWindow HWND) { - r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0) - shellWindow = HWND(r0) - return -} - -func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowThreadProcessId.Addr(), 2, uintptr(hwnd), uintptr(unsafe.Pointer(pid)), 0) - tid = uint32(r0) - if tid == 0 { - err = errnoErr(e1) - } - return -} - -func IsWindow(hwnd HWND) (isWindow bool) { - r0, _, _ := syscall.Syscall(procIsWindow.Addr(), 1, uintptr(hwnd), 0, 0) - isWindow = r0 != 0 - return -} - -func IsWindowUnicode(hwnd HWND) (isUnicode bool) { - r0, _, _ := syscall.Syscall(procIsWindowUnicode.Addr(), 1, uintptr(hwnd), 0, 0) - isUnicode = r0 != 0 - return -} - -func IsWindowVisible(hwnd HWND) (isVisible bool) { - r0, _, _ := syscall.Syscall(procIsWindowVisible.Addr(), 1, uintptr(hwnd), 0, 0) - isVisible = r0 != 0 - return -} - -func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { - r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) - ret = int32(r0) - if ret == 0 { - err = errnoErr(e1) - } - return -} - -func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) { - var _p0 uint32 - if inheritExisting { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func DestroyEnvironmentBlock(block *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func GetFileVersionInfoSize(filename string, zeroHandle *Handle) (bufSize uint32, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(filename) - if err != nil { - return - } - return _GetFileVersionInfoSize(_p0, zeroHandle) -} - -func _GetFileVersionInfoSize(filename *uint16, zeroHandle *Handle) (bufSize uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileVersionInfoSizeW.Addr(), 2, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle)), 0) - bufSize = uint32(r0) - if bufSize == 0 { - err = errnoErr(e1) - } - return -} - -func GetFileVersionInfo(filename string, handle uint32, bufSize uint32, buffer unsafe.Pointer) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(filename) - if err != nil { - return - } - return _GetFileVersionInfo(_p0, handle, bufSize, buffer) -} - -func _GetFileVersionInfo(filename *uint16, handle uint32, bufSize uint32, buffer unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileVersionInfoW.Addr(), 4, uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(subBlock) - if err != nil { - return - } - return _VerQueryValue(block, _p0, pointerToBufferPointer, bufSize) -} - -func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVerQueryValueW.Addr(), 4, uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) { - r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func FreeAddrInfoW(addrinfo *AddrinfoW) { - syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0) - return -} - -func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) { - r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0) - if r0 != 0 { - sockerr = syscall.Errno(r0) - } - return -} - -func WSACleanup() (err error) { - r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0) - if r1 == socket_error { - err = errnoErr(e1) - } - return -} - -func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) { - r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) - n = int32(r0) - if n == -1 { - err = errnoErr(e1) - } - return -} - -func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { - var _p0 uint32 - if wait { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) - if r1 == socket_error { - err = errnoErr(e1) - } - return -} - -func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procWSALookupServiceBeginW.Addr(), 3, uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle))) - if r1 == socket_error { - err = errnoErr(e1) - } - return -} - -func WSALookupServiceEnd(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procWSALookupServiceEnd.Addr(), 1, uintptr(handle), 0, 0) - if r1 == socket_error { - err = errnoErr(e1) - } - return -} - -func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) { - r1, _, e1 := syscall.Syscall6(procWSALookupServiceNextW.Addr(), 4, uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)), 0, 0) - if r1 == socket_error { - err = errnoErr(e1) - } - return -} - -func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) - if r1 == socket_error { - err = errnoErr(e1) - } - return -} - -func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) - if r1 == socket_error { - err = errnoErr(e1) - } - return -} - -func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) - if r1 == socket_error { - err = errnoErr(e1) - } - return -} - -func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) - if r1 == socket_error { - err = errnoErr(e1) - } - return -} - -func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procWSASocketW.Addr(), 6, uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags)) - handle = Handle(r0) - if handle == InvalidHandle { - err = errnoErr(e1) - } - return -} - -func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { - r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) - if r0 != 0 { - sockerr = syscall.Errno(r0) - } - return -} - -func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) - if r1 == socket_error { - err = errnoErr(e1) - } - return -} - -func Closesocket(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0) - if r1 == socket_error { - err = errnoErr(e1) - } - return -} - -func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) - if r1 == socket_error { - err = errnoErr(e1) - } - return -} - -func GetHostByName(name string) (h *Hostent, err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(name) - if err != nil { - return - } - return _GetHostByName(_p0) -} - -func _GetHostByName(name *byte) (h *Hostent, err error) { - r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) - h = (*Hostent)(unsafe.Pointer(r0)) - if h == nil { - err = errnoErr(e1) - } - return -} - -func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if r1 == socket_error { - err = errnoErr(e1) - } - return -} - -func GetProtoByName(name string) (p *Protoent, err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(name) - if err != nil { - return - } - return _GetProtoByName(_p0) -} - -func _GetProtoByName(name *byte) (p *Protoent, err error) { - r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) - p = (*Protoent)(unsafe.Pointer(r0)) - if p == nil { - err = errnoErr(e1) - } - return -} - -func GetServByName(name string, proto string) (s *Servent, err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(name) - if err != nil { - return - } - var _p1 *byte - _p1, err = syscall.BytePtrFromString(proto) - if err != nil { - return - } - return _GetServByName(_p0, _p1) -} - -func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { - r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0) - s = (*Servent)(unsafe.Pointer(r0)) - if s == nil { - err = errnoErr(e1) - } - return -} - -func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if r1 == socket_error { - err = errnoErr(e1) - } - return -} - -func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) { - r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0) - if r1 == socket_error { - err = errnoErr(e1) - } - return -} - -func listen(s Handle, backlog int32) (err error) { - r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0) - if r1 == socket_error { - err = errnoErr(e1) - } - return -} - -func Ntohs(netshort uint16) (u uint16) { - r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0) - u = uint16(r0) - return -} - -func recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) { - var _p0 *byte - if len(buf) > 0 { - _p0 = &buf[0] - } - r0, _, e1 := syscall.Syscall6(procrecvfrom.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) - n = int32(r0) - if n == -1 { - err = errnoErr(e1) - } - return -} - -func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) { - var _p0 *byte - if len(buf) > 0 { - _p0 = &buf[0] - } - r1, _, e1 := syscall.Syscall6(procsendto.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) - if r1 == socket_error { - err = errnoErr(e1) - } - return -} - -func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) { - r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0) - if r1 == socket_error { - err = errnoErr(e1) - } - return -} - -func shutdown(s Handle, how int32) (err error) { - r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0) - if r1 == socket_error { - err = errnoErr(e1) - } - return -} - -func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol)) - handle = Handle(r0) - if handle == InvalidHandle { - err = errnoErr(e1) - } - return -} - -func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func WTSFreeMemory(ptr uintptr) { - syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) - return -} - -func WTSQueryUserToken(session uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/google.golang.org/genproto/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/google.golang.org/genproto/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go deleted file mode 100644 index f34a38e4e..000000000 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.12.2 -// source: google/rpc/status.proto - -package status - -import ( - reflect "reflect" - sync "sync" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// The `Status` type defines a logical error model that is suitable for -// different programming environments, including REST APIs and RPC APIs. It is -// used by [gRPC](https://github.com/grpc). Each `Status` message contains -// three pieces of data: error code, error message, and error details. -// -// You can find out more about this error model and how to work with it in the -// [API Design Guide](https://cloud.google.com/apis/design/errors). -type Status struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. - Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - // A developer-facing error message, which should be in English. Any - // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - // A list of messages that carry the error details. There is a common set of - // message types for APIs to use. - Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` -} - -func (x *Status) Reset() { - *x = Status{} - if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_status_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Status) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Status) ProtoMessage() {} - -func (x *Status) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_status_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Status.ProtoReflect.Descriptor instead. -func (*Status) Descriptor() ([]byte, []int) { - return file_google_rpc_status_proto_rawDescGZIP(), []int{0} -} - -func (x *Status) GetCode() int32 { - if x != nil { - return x.Code - } - return 0 -} - -func (x *Status) GetMessage() string { - if x != nil { - return x.Message - } - return "" -} - -func (x *Status) GetDetails() []*anypb.Any { - if x != nil { - return x.Details - } - return nil -} - -var File_google_rpc_status_proto protoreflect.FileDescriptor - -var file_google_rpc_status_proto_rawDesc = []byte{ - 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, - 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, - 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, - 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x61, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x0b, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3b, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var ( - file_google_rpc_status_proto_rawDescOnce sync.Once - file_google_rpc_status_proto_rawDescData = file_google_rpc_status_proto_rawDesc -) - -func file_google_rpc_status_proto_rawDescGZIP() []byte { - file_google_rpc_status_proto_rawDescOnce.Do(func() { - file_google_rpc_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_status_proto_rawDescData) - }) - return file_google_rpc_status_proto_rawDescData -} - -var file_google_rpc_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_rpc_status_proto_goTypes = []interface{}{ - (*Status)(nil), // 0: google.rpc.Status - (*anypb.Any)(nil), // 1: google.protobuf.Any -} -var file_google_rpc_status_proto_depIdxs = []int32{ - 1, // 0: google.rpc.Status.details:type_name -> google.protobuf.Any - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_google_rpc_status_proto_init() } -func file_google_rpc_status_proto_init() { - if File_google_rpc_status_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_rpc_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Status); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_rpc_status_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_rpc_status_proto_goTypes, - DependencyIndexes: file_google_rpc_status_proto_depIdxs, - MessageInfos: file_google_rpc_status_proto_msgTypes, - }.Build() - File_google_rpc_status_proto = out.File - file_google_rpc_status_proto_rawDesc = nil - file_google_rpc_status_proto_goTypes = nil - file_google_rpc_status_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS deleted file mode 100644 index e491a9e7f..000000000 --- a/vendor/google.golang.org/grpc/AUTHORS +++ /dev/null @@ -1 +0,0 @@ -Google Inc. diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/google.golang.org/grpc/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/google.golang.org/grpc/NOTICE.txt b/vendor/google.golang.org/grpc/NOTICE.txt deleted file mode 100644 index 530197749..000000000 --- a/vendor/google.golang.org/grpc/NOTICE.txt +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2014 gRPC authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go deleted file mode 100644 index 0b206a578..000000000 --- a/vendor/google.golang.org/grpc/codes/code_string.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package codes - -import "strconv" - -func (c Code) String() string { - switch c { - case OK: - return "OK" - case Canceled: - return "Canceled" - case Unknown: - return "Unknown" - case InvalidArgument: - return "InvalidArgument" - case DeadlineExceeded: - return "DeadlineExceeded" - case NotFound: - return "NotFound" - case AlreadyExists: - return "AlreadyExists" - case PermissionDenied: - return "PermissionDenied" - case ResourceExhausted: - return "ResourceExhausted" - case FailedPrecondition: - return "FailedPrecondition" - case Aborted: - return "Aborted" - case OutOfRange: - return "OutOfRange" - case Unimplemented: - return "Unimplemented" - case Internal: - return "Internal" - case Unavailable: - return "Unavailable" - case DataLoss: - return "DataLoss" - case Unauthenticated: - return "Unauthenticated" - default: - return "Code(" + strconv.FormatInt(int64(c), 10) + ")" - } -} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go deleted file mode 100644 index 11b106182..000000000 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ /dev/null @@ -1,244 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package codes defines the canonical error codes used by gRPC. It is -// consistent across various languages. -package codes // import "google.golang.org/grpc/codes" - -import ( - "fmt" - "strconv" -) - -// A Code is an unsigned 32-bit error code as defined in the gRPC spec. -type Code uint32 - -const ( - // OK is returned on success. - OK Code = 0 - - // Canceled indicates the operation was canceled (typically by the caller). - // - // The gRPC framework will generate this error code when cancellation - // is requested. - Canceled Code = 1 - - // Unknown error. An example of where this error may be returned is - // if a Status value received from another address space belongs to - // an error-space that is not known in this address space. Also - // errors raised by APIs that do not return enough error information - // may be converted to this error. - // - // The gRPC framework will generate this error code in the above two - // mentioned cases. - Unknown Code = 2 - - // InvalidArgument indicates client specified an invalid argument. - // Note that this differs from FailedPrecondition. It indicates arguments - // that are problematic regardless of the state of the system - // (e.g., a malformed file name). - // - // This error code will not be generated by the gRPC framework. - InvalidArgument Code = 3 - - // DeadlineExceeded means operation expired before completion. - // For operations that change the state of the system, this error may be - // returned even if the operation has completed successfully. For - // example, a successful response from a server could have been delayed - // long enough for the deadline to expire. - // - // The gRPC framework will generate this error code when the deadline is - // exceeded. - DeadlineExceeded Code = 4 - - // NotFound means some requested entity (e.g., file or directory) was - // not found. - // - // This error code will not be generated by the gRPC framework. - NotFound Code = 5 - - // AlreadyExists means an attempt to create an entity failed because one - // already exists. - // - // This error code will not be generated by the gRPC framework. - AlreadyExists Code = 6 - - // PermissionDenied indicates the caller does not have permission to - // execute the specified operation. It must not be used for rejections - // caused by exhausting some resource (use ResourceExhausted - // instead for those errors). It must not be - // used if the caller cannot be identified (use Unauthenticated - // instead for those errors). - // - // This error code will not be generated by the gRPC core framework, - // but expect authentication middleware to use it. - PermissionDenied Code = 7 - - // ResourceExhausted indicates some resource has been exhausted, perhaps - // a per-user quota, or perhaps the entire file system is out of space. - // - // This error code will be generated by the gRPC framework in - // out-of-memory and server overload situations, or when a message is - // larger than the configured maximum size. - ResourceExhausted Code = 8 - - // FailedPrecondition indicates operation was rejected because the - // system is not in a state required for the operation's execution. - // For example, directory to be deleted may be non-empty, an rmdir - // operation is applied to a non-directory, etc. - // - // A litmus test that may help a service implementor in deciding - // between FailedPrecondition, Aborted, and Unavailable: - // (a) Use Unavailable if the client can retry just the failing call. - // (b) Use Aborted if the client should retry at a higher-level - // (e.g., restarting a read-modify-write sequence). - // (c) Use FailedPrecondition if the client should not retry until - // the system state has been explicitly fixed. E.g., if an "rmdir" - // fails because the directory is non-empty, FailedPrecondition - // should be returned since the client should not retry unless - // they have first fixed up the directory by deleting files from it. - // (d) Use FailedPrecondition if the client performs conditional - // REST Get/Update/Delete on a resource and the resource on the - // server does not match the condition. E.g., conflicting - // read-modify-write on the same resource. - // - // This error code will not be generated by the gRPC framework. - FailedPrecondition Code = 9 - - // Aborted indicates the operation was aborted, typically due to a - // concurrency issue like sequencer check failures, transaction aborts, - // etc. - // - // See litmus test above for deciding between FailedPrecondition, - // Aborted, and Unavailable. - // - // This error code will not be generated by the gRPC framework. - Aborted Code = 10 - - // OutOfRange means operation was attempted past the valid range. - // E.g., seeking or reading past end of file. - // - // Unlike InvalidArgument, this error indicates a problem that may - // be fixed if the system state changes. For example, a 32-bit file - // system will generate InvalidArgument if asked to read at an - // offset that is not in the range [0,2^32-1], but it will generate - // OutOfRange if asked to read from an offset past the current - // file size. - // - // There is a fair bit of overlap between FailedPrecondition and - // OutOfRange. We recommend using OutOfRange (the more specific - // error) when it applies so that callers who are iterating through - // a space can easily look for an OutOfRange error to detect when - // they are done. - // - // This error code will not be generated by the gRPC framework. - OutOfRange Code = 11 - - // Unimplemented indicates operation is not implemented or not - // supported/enabled in this service. - // - // This error code will be generated by the gRPC framework. Most - // commonly, you will see this error code when a method implementation - // is missing on the server. It can also be generated for unknown - // compression algorithms or a disagreement as to whether an RPC should - // be streaming. - Unimplemented Code = 12 - - // Internal errors. Means some invariants expected by underlying - // system has been broken. If you see one of these errors, - // something is very broken. - // - // This error code will be generated by the gRPC framework in several - // internal error conditions. - Internal Code = 13 - - // Unavailable indicates the service is currently unavailable. - // This is a most likely a transient condition and may be corrected - // by retrying with a backoff. Note that it is not always safe to retry - // non-idempotent operations. - // - // See litmus test above for deciding between FailedPrecondition, - // Aborted, and Unavailable. - // - // This error code will be generated by the gRPC framework during - // abrupt shutdown of a server process or network connection. - Unavailable Code = 14 - - // DataLoss indicates unrecoverable data loss or corruption. - // - // This error code will not be generated by the gRPC framework. - DataLoss Code = 15 - - // Unauthenticated indicates the request does not have valid - // authentication credentials for the operation. - // - // The gRPC framework will generate this error code when the - // authentication metadata is invalid or a Credentials callback fails, - // but also expect authentication middleware to generate it. - Unauthenticated Code = 16 - - _maxCode = 17 -) - -var strToCode = map[string]Code{ - `"OK"`: OK, - `"CANCELLED"`:/* [sic] */ Canceled, - `"UNKNOWN"`: Unknown, - `"INVALID_ARGUMENT"`: InvalidArgument, - `"DEADLINE_EXCEEDED"`: DeadlineExceeded, - `"NOT_FOUND"`: NotFound, - `"ALREADY_EXISTS"`: AlreadyExists, - `"PERMISSION_DENIED"`: PermissionDenied, - `"RESOURCE_EXHAUSTED"`: ResourceExhausted, - `"FAILED_PRECONDITION"`: FailedPrecondition, - `"ABORTED"`: Aborted, - `"OUT_OF_RANGE"`: OutOfRange, - `"UNIMPLEMENTED"`: Unimplemented, - `"INTERNAL"`: Internal, - `"UNAVAILABLE"`: Unavailable, - `"DATA_LOSS"`: DataLoss, - `"UNAUTHENTICATED"`: Unauthenticated, -} - -// UnmarshalJSON unmarshals b into the Code. -func (c *Code) UnmarshalJSON(b []byte) error { - // From json.Unmarshaler: By convention, to approximate the behavior of - // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as - // a no-op. - if string(b) == "null" { - return nil - } - if c == nil { - return fmt.Errorf("nil receiver passed to UnmarshalJSON") - } - - if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { - if ci >= _maxCode { - return fmt.Errorf("invalid code: %q", ci) - } - - *c = Code(ci) - return nil - } - - if jc, ok := strToCode[string(b)]; ok { - *c = jc - return nil - } - return fmt.Errorf("invalid code: %q", string(b)) -} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go deleted file mode 100644 index e5c6513ed..000000000 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ /dev/null @@ -1,166 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package status implements errors returned by gRPC. These errors are -// serialized and transmitted on the wire between server and client, and allow -// for additional data to be transmitted via the Details field in the status -// proto. gRPC service handlers should return an error created by this -// package, and gRPC clients should expect a corresponding error to be -// returned from the RPC call. -// -// This package upholds the invariants that a non-nil error may not -// contain an OK code, and an OK code must result in a nil error. -package status - -import ( - "errors" - "fmt" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" - spb "google.golang.org/genproto/googleapis/rpc/status" - "google.golang.org/grpc/codes" -) - -// Status represents an RPC status code, message, and details. It is immutable -// and should be created with New, Newf, or FromProto. -type Status struct { - s *spb.Status -} - -// New returns a Status representing c and msg. -func New(c codes.Code, msg string) *Status { - return &Status{s: &spb.Status{Code: int32(c), Message: msg}} -} - -// Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { - return New(c, fmt.Sprintf(format, a...)) -} - -// FromProto returns a Status representing s. -func FromProto(s *spb.Status) *Status { - return &Status{s: proto.Clone(s).(*spb.Status)} -} - -// Err returns an error representing c and msg. If c is OK, returns nil. -func Err(c codes.Code, msg string) error { - return New(c, msg).Err() -} - -// Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { - return Err(c, fmt.Sprintf(format, a...)) -} - -// Code returns the status code contained in s. -func (s *Status) Code() codes.Code { - if s == nil || s.s == nil { - return codes.OK - } - return codes.Code(s.s.Code) -} - -// Message returns the message contained in s. -func (s *Status) Message() string { - if s == nil || s.s == nil { - return "" - } - return s.s.Message -} - -// Proto returns s's status as an spb.Status proto message. -func (s *Status) Proto() *spb.Status { - if s == nil { - return nil - } - return proto.Clone(s.s).(*spb.Status) -} - -// Err returns an immutable error representing s; returns nil if s.Code() is OK. -func (s *Status) Err() error { - if s.Code() == codes.OK { - return nil - } - return &Error{s: s} -} - -// WithDetails returns a new status with the provided details messages appended to the status. -// If any errors are encountered, it returns nil and the first error encountered. -func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { - if s.Code() == codes.OK { - return nil, errors.New("no error details for status with code OK") - } - // s.Code() != OK implies that s.Proto() != nil. - p := s.Proto() - for _, detail := range details { - any, err := ptypes.MarshalAny(detail) - if err != nil { - return nil, err - } - p.Details = append(p.Details, any) - } - return &Status{s: p}, nil -} - -// Details returns a slice of details messages attached to the status. -// If a detail cannot be decoded, the error is returned in place of the detail. -func (s *Status) Details() []interface{} { - if s == nil || s.s == nil { - return nil - } - details := make([]interface{}, 0, len(s.s.Details)) - for _, any := range s.s.Details { - detail := &ptypes.DynamicAny{} - if err := ptypes.UnmarshalAny(any, detail); err != nil { - details = append(details, err) - continue - } - details = append(details, detail.Message) - } - return details -} - -func (s *Status) String() string { - return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message()) -} - -// Error wraps a pointer of a status proto. It implements error and Status, -// and a nil *Error should never be returned by this package. -type Error struct { - s *Status -} - -func (e *Error) Error() string { - return e.s.String() -} - -// GRPCStatus returns the Status represented by se. -func (e *Error) GRPCStatus() *Status { - return e.s -} - -// Is implements future error.Is functionality. -// A Error is equivalent if the code and message are identical. -func (e *Error) Is(target error) bool { - tse, ok := target.(*Error) - if !ok { - return false - } - return proto.Equal(e.s.s, tse.s.s) -} diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go deleted file mode 100644 index 6d163b6e3..000000000 --- a/vendor/google.golang.org/grpc/status/status.go +++ /dev/null @@ -1,135 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package status implements errors returned by gRPC. These errors are -// serialized and transmitted on the wire between server and client, and allow -// for additional data to be transmitted via the Details field in the status -// proto. gRPC service handlers should return an error created by this -// package, and gRPC clients should expect a corresponding error to be -// returned from the RPC call. -// -// This package upholds the invariants that a non-nil error may not -// contain an OK code, and an OK code must result in a nil error. -package status - -import ( - "context" - "errors" - "fmt" - - spb "google.golang.org/genproto/googleapis/rpc/status" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/status" -) - -// Status references google.golang.org/grpc/internal/status. It represents an -// RPC status code, message, and details. It is immutable and should be -// created with New, Newf, or FromProto. -// https://godoc.org/google.golang.org/grpc/internal/status -type Status = status.Status - -// New returns a Status representing c and msg. -func New(c codes.Code, msg string) *Status { - return status.New(c, msg) -} - -// Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { - return New(c, fmt.Sprintf(format, a...)) -} - -// Error returns an error representing c and msg. If c is OK, returns nil. -func Error(c codes.Code, msg string) error { - return New(c, msg).Err() -} - -// Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { - return Error(c, fmt.Sprintf(format, a...)) -} - -// ErrorProto returns an error representing s. If s.Code is OK, returns nil. -func ErrorProto(s *spb.Status) error { - return FromProto(s).Err() -} - -// FromProto returns a Status representing s. -func FromProto(s *spb.Status) *Status { - return status.FromProto(s) -} - -// FromError returns a Status representation of err. -// -// - If err was produced by this package or implements the method `GRPCStatus() -// *Status`, the appropriate Status is returned. -// -// - If err is nil, a Status is returned with codes.OK and no message. -// -// - Otherwise, err is an error not compatible with this package. In this -// case, a Status is returned with codes.Unknown and err's Error() message, -// and ok is false. -func FromError(err error) (s *Status, ok bool) { - if err == nil { - return nil, true - } - if se, ok := err.(interface { - GRPCStatus() *Status - }); ok { - return se.GRPCStatus(), true - } - return New(codes.Unknown, err.Error()), false -} - -// Convert is a convenience function which removes the need to handle the -// boolean return value from FromError. -func Convert(err error) *Status { - s, _ := FromError(err) - return s -} - -// Code returns the Code of the error if it is a Status error, codes.OK if err -// is nil, or codes.Unknown otherwise. -func Code(err error) codes.Code { - // Don't use FromError to avoid allocation of OK status. - if err == nil { - return codes.OK - } - if se, ok := err.(interface { - GRPCStatus() *Status - }); ok { - return se.GRPCStatus().Code() - } - return codes.Unknown -} - -// FromContextError converts a context error or wrapped context error into a -// Status. It returns a Status with codes.OK if err is nil, or a Status with -// codes.Unknown if err is non-nil and not a context error. -func FromContextError(err error) *Status { - if err == nil { - return nil - } - if errors.Is(err, context.DeadlineExceeded) { - return New(codes.DeadlineExceeded, err.Error()) - } - if errors.Is(err, context.Canceled) { - return New(codes.Canceled, err.Error()) - } - return New(codes.Unknown, err.Error()) -} diff --git a/vendor/google.golang.org/protobuf/AUTHORS b/vendor/google.golang.org/protobuf/AUTHORS deleted file mode 100644 index 2b00ddba0..000000000 --- a/vendor/google.golang.org/protobuf/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/google.golang.org/protobuf/CONTRIBUTORS b/vendor/google.golang.org/protobuf/CONTRIBUTORS deleted file mode 100644 index 1fbd3e976..000000000 --- a/vendor/google.golang.org/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/google.golang.org/protobuf/LICENSE b/vendor/google.golang.org/protobuf/LICENSE deleted file mode 100644 index 49ea0f928..000000000 --- a/vendor/google.golang.org/protobuf/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2018 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/google.golang.org/protobuf/PATENTS b/vendor/google.golang.org/protobuf/PATENTS deleted file mode 100644 index 733099041..000000000 --- a/vendor/google.golang.org/protobuf/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go deleted file mode 100644 index 179d6e8fc..000000000 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ /dev/null @@ -1,770 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package prototext - -import ( - "fmt" - "unicode/utf8" - - "google.golang.org/protobuf/internal/encoding/messageset" - "google.golang.org/protobuf/internal/encoding/text" - "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/flags" - "google.golang.org/protobuf/internal/genid" - "google.golang.org/protobuf/internal/pragma" - "google.golang.org/protobuf/internal/set" - "google.golang.org/protobuf/internal/strs" - "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -// Unmarshal reads the given []byte into the given proto.Message. -// The provided message must be mutable (e.g., a non-nil pointer to a message). -func Unmarshal(b []byte, m proto.Message) error { - return UnmarshalOptions{}.Unmarshal(b, m) -} - -// UnmarshalOptions is a configurable textproto format unmarshaler. -type UnmarshalOptions struct { - pragma.NoUnkeyedLiterals - - // AllowPartial accepts input for messages that will result in missing - // required fields. If AllowPartial is false (the default), Unmarshal will - // return error if there are any missing required fields. - AllowPartial bool - - // DiscardUnknown specifies whether to ignore unknown fields when parsing. - // An unknown field is any field whose field name or field number does not - // resolve to any known or extension field in the message. - // By default, unmarshal rejects unknown fields as an error. - DiscardUnknown bool - - // Resolver is used for looking up types when unmarshaling - // google.protobuf.Any messages or extension fields. - // If nil, this defaults to using protoregistry.GlobalTypes. - Resolver interface { - protoregistry.MessageTypeResolver - protoregistry.ExtensionTypeResolver - } -} - -// Unmarshal reads the given []byte and populates the given proto.Message -// using options in the UnmarshalOptions object. -// The provided message must be mutable (e.g., a non-nil pointer to a message). -func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { - return o.unmarshal(b, m) -} - -// unmarshal is a centralized function that all unmarshal operations go through. -// For profiling purposes, avoid changing the name of this function or -// introducing other code paths for unmarshal that do not go through this. -func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { - proto.Reset(m) - - if o.Resolver == nil { - o.Resolver = protoregistry.GlobalTypes - } - - dec := decoder{text.NewDecoder(b), o} - if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { - return err - } - if o.AllowPartial { - return nil - } - return proto.CheckInitialized(m) -} - -type decoder struct { - *text.Decoder - opts UnmarshalOptions -} - -// newError returns an error object with position info. -func (d decoder) newError(pos int, f string, x ...interface{}) error { - line, column := d.Position(pos) - head := fmt.Sprintf("(line %d:%d): ", line, column) - return errors.New(head+f, x...) -} - -// unexpectedTokenError returns a syntax error for the given unexpected token. -func (d decoder) unexpectedTokenError(tok text.Token) error { - return d.syntaxError(tok.Pos(), "unexpected token: %s", tok.RawString()) -} - -// syntaxError returns a syntax error for given position. -func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { - line, column := d.Position(pos) - head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) - return errors.New(head+f, x...) -} - -// unmarshalMessage unmarshals into the given protoreflect.Message. -func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { - messageDesc := m.Descriptor() - if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { - return errors.New("no support for proto1 MessageSets") - } - - if messageDesc.FullName() == genid.Any_message_fullname { - return d.unmarshalAny(m, checkDelims) - } - - if checkDelims { - tok, err := d.Read() - if err != nil { - return err - } - - if tok.Kind() != text.MessageOpen { - return d.unexpectedTokenError(tok) - } - } - - var seenNums set.Ints - var seenOneofs set.Ints - fieldDescs := messageDesc.Fields() - - for { - // Read field name. - tok, err := d.Read() - if err != nil { - return err - } - switch typ := tok.Kind(); typ { - case text.Name: - // Continue below. - case text.EOF: - if checkDelims { - return text.ErrUnexpectedEOF - } - return nil - default: - if checkDelims && typ == text.MessageClose { - return nil - } - return d.unexpectedTokenError(tok) - } - - // Resolve the field descriptor. - var name pref.Name - var fd pref.FieldDescriptor - var xt pref.ExtensionType - var xtErr error - var isFieldNumberName bool - - switch tok.NameKind() { - case text.IdentName: - name = pref.Name(tok.IdentName()) - fd = fieldDescs.ByTextName(string(name)) - - case text.TypeName: - // Handle extensions only. This code path is not for Any. - xt, xtErr = d.opts.Resolver.FindExtensionByName(pref.FullName(tok.TypeName())) - - case text.FieldNumber: - isFieldNumberName = true - num := pref.FieldNumber(tok.FieldNumber()) - if !num.IsValid() { - return d.newError(tok.Pos(), "invalid field number: %d", num) - } - fd = fieldDescs.ByNumber(num) - if fd == nil { - xt, xtErr = d.opts.Resolver.FindExtensionByNumber(messageDesc.FullName(), num) - } - } - - if xt != nil { - fd = xt.TypeDescriptor() - if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() { - return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName()) - } - } else if xtErr != nil && xtErr != protoregistry.NotFound { - return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr) - } - if flags.ProtoLegacy { - if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { - fd = nil // reset since the weak reference is not linked in - } - } - - // Handle unknown fields. - if fd == nil { - if d.opts.DiscardUnknown || messageDesc.ReservedNames().Has(name) { - d.skipValue() - continue - } - return d.newError(tok.Pos(), "unknown field: %v", tok.RawString()) - } - - // Handle fields identified by field number. - if isFieldNumberName { - // TODO: Add an option to permit parsing field numbers. - // - // This requires careful thought as the MarshalOptions.EmitUnknown - // option allows formatting unknown fields as the field number and the - // best-effort textual representation of the field value. In that case, - // it may not be possible to unmarshal the value from a parser that does - // have information about the unknown field. - return d.newError(tok.Pos(), "cannot specify field by number: %v", tok.RawString()) - } - - switch { - case fd.IsList(): - kind := fd.Kind() - if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() { - return d.syntaxError(tok.Pos(), "missing field separator :") - } - - list := m.Mutable(fd).List() - if err := d.unmarshalList(fd, list); err != nil { - return err - } - - case fd.IsMap(): - mmap := m.Mutable(fd).Map() - if err := d.unmarshalMap(fd, mmap); err != nil { - return err - } - - default: - kind := fd.Kind() - if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() { - return d.syntaxError(tok.Pos(), "missing field separator :") - } - - // If field is a oneof, check if it has already been set. - if od := fd.ContainingOneof(); od != nil { - idx := uint64(od.Index()) - if seenOneofs.Has(idx) { - return d.newError(tok.Pos(), "error parsing %q, oneof %v is already set", tok.RawString(), od.FullName()) - } - seenOneofs.Set(idx) - } - - num := uint64(fd.Number()) - if seenNums.Has(num) { - return d.newError(tok.Pos(), "non-repeated field %q is repeated", tok.RawString()) - } - - if err := d.unmarshalSingular(fd, m); err != nil { - return err - } - seenNums.Set(num) - } - } - - return nil -} - -// unmarshalSingular unmarshals a non-repeated field value specified by the -// given FieldDescriptor. -func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error { - var val pref.Value - var err error - switch fd.Kind() { - case pref.MessageKind, pref.GroupKind: - val = m.NewField(fd) - err = d.unmarshalMessage(val.Message(), true) - default: - val, err = d.unmarshalScalar(fd) - } - if err == nil { - m.Set(fd, val) - } - return err -} - -// unmarshalScalar unmarshals a scalar/enum protoreflect.Value specified by the -// given FieldDescriptor. -func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) { - tok, err := d.Read() - if err != nil { - return pref.Value{}, err - } - - if tok.Kind() != text.Scalar { - return pref.Value{}, d.unexpectedTokenError(tok) - } - - kind := fd.Kind() - switch kind { - case pref.BoolKind: - if b, ok := tok.Bool(); ok { - return pref.ValueOfBool(b), nil - } - - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: - if n, ok := tok.Int32(); ok { - return pref.ValueOfInt32(n), nil - } - - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: - if n, ok := tok.Int64(); ok { - return pref.ValueOfInt64(n), nil - } - - case pref.Uint32Kind, pref.Fixed32Kind: - if n, ok := tok.Uint32(); ok { - return pref.ValueOfUint32(n), nil - } - - case pref.Uint64Kind, pref.Fixed64Kind: - if n, ok := tok.Uint64(); ok { - return pref.ValueOfUint64(n), nil - } - - case pref.FloatKind: - if n, ok := tok.Float32(); ok { - return pref.ValueOfFloat32(n), nil - } - - case pref.DoubleKind: - if n, ok := tok.Float64(); ok { - return pref.ValueOfFloat64(n), nil - } - - case pref.StringKind: - if s, ok := tok.String(); ok { - if strs.EnforceUTF8(fd) && !utf8.ValidString(s) { - return pref.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8") - } - return pref.ValueOfString(s), nil - } - - case pref.BytesKind: - if b, ok := tok.String(); ok { - return pref.ValueOfBytes([]byte(b)), nil - } - - case pref.EnumKind: - if lit, ok := tok.Enum(); ok { - // Lookup EnumNumber based on name. - if enumVal := fd.Enum().Values().ByName(pref.Name(lit)); enumVal != nil { - return pref.ValueOfEnum(enumVal.Number()), nil - } - } - if num, ok := tok.Int32(); ok { - return pref.ValueOfEnum(pref.EnumNumber(num)), nil - } - - default: - panic(fmt.Sprintf("invalid scalar kind %v", kind)) - } - - return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) -} - -// unmarshalList unmarshals into given protoreflect.List. A list value can -// either be in [] syntax or simply just a single scalar/message value. -func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error { - tok, err := d.Peek() - if err != nil { - return err - } - - switch fd.Kind() { - case pref.MessageKind, pref.GroupKind: - switch tok.Kind() { - case text.ListOpen: - d.Read() - for { - tok, err := d.Peek() - if err != nil { - return err - } - - switch tok.Kind() { - case text.ListClose: - d.Read() - return nil - case text.MessageOpen: - pval := list.NewElement() - if err := d.unmarshalMessage(pval.Message(), true); err != nil { - return err - } - list.Append(pval) - default: - return d.unexpectedTokenError(tok) - } - } - - case text.MessageOpen: - pval := list.NewElement() - if err := d.unmarshalMessage(pval.Message(), true); err != nil { - return err - } - list.Append(pval) - return nil - } - - default: - switch tok.Kind() { - case text.ListOpen: - d.Read() - for { - tok, err := d.Peek() - if err != nil { - return err - } - - switch tok.Kind() { - case text.ListClose: - d.Read() - return nil - case text.Scalar: - pval, err := d.unmarshalScalar(fd) - if err != nil { - return err - } - list.Append(pval) - default: - return d.unexpectedTokenError(tok) - } - } - - case text.Scalar: - pval, err := d.unmarshalScalar(fd) - if err != nil { - return err - } - list.Append(pval) - return nil - } - } - - return d.unexpectedTokenError(tok) -} - -// unmarshalMap unmarshals into given protoreflect.Map. A map value is a -// textproto message containing {key: , value: }. -func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error { - // Determine ahead whether map entry is a scalar type or a message type in - // order to call the appropriate unmarshalMapValue func inside - // unmarshalMapEntry. - var unmarshalMapValue func() (pref.Value, error) - switch fd.MapValue().Kind() { - case pref.MessageKind, pref.GroupKind: - unmarshalMapValue = func() (pref.Value, error) { - pval := mmap.NewValue() - if err := d.unmarshalMessage(pval.Message(), true); err != nil { - return pref.Value{}, err - } - return pval, nil - } - default: - unmarshalMapValue = func() (pref.Value, error) { - return d.unmarshalScalar(fd.MapValue()) - } - } - - tok, err := d.Read() - if err != nil { - return err - } - switch tok.Kind() { - case text.MessageOpen: - return d.unmarshalMapEntry(fd, mmap, unmarshalMapValue) - - case text.ListOpen: - for { - tok, err := d.Read() - if err != nil { - return err - } - switch tok.Kind() { - case text.ListClose: - return nil - case text.MessageOpen: - if err := d.unmarshalMapEntry(fd, mmap, unmarshalMapValue); err != nil { - return err - } - default: - return d.unexpectedTokenError(tok) - } - } - - default: - return d.unexpectedTokenError(tok) - } -} - -// unmarshalMap unmarshals into given protoreflect.Map. A map value is a -// textproto message containing {key: , value: }. -func (d decoder) unmarshalMapEntry(fd pref.FieldDescriptor, mmap pref.Map, unmarshalMapValue func() (pref.Value, error)) error { - var key pref.MapKey - var pval pref.Value -Loop: - for { - // Read field name. - tok, err := d.Read() - if err != nil { - return err - } - switch tok.Kind() { - case text.Name: - if tok.NameKind() != text.IdentName { - if !d.opts.DiscardUnknown { - return d.newError(tok.Pos(), "unknown map entry field %q", tok.RawString()) - } - d.skipValue() - continue Loop - } - // Continue below. - case text.MessageClose: - break Loop - default: - return d.unexpectedTokenError(tok) - } - - switch name := pref.Name(tok.IdentName()); name { - case genid.MapEntry_Key_field_name: - if !tok.HasSeparator() { - return d.syntaxError(tok.Pos(), "missing field separator :") - } - if key.IsValid() { - return d.newError(tok.Pos(), "map entry %q cannot be repeated", name) - } - val, err := d.unmarshalScalar(fd.MapKey()) - if err != nil { - return err - } - key = val.MapKey() - - case genid.MapEntry_Value_field_name: - if kind := fd.MapValue().Kind(); (kind != pref.MessageKind) && (kind != pref.GroupKind) { - if !tok.HasSeparator() { - return d.syntaxError(tok.Pos(), "missing field separator :") - } - } - if pval.IsValid() { - return d.newError(tok.Pos(), "map entry %q cannot be repeated", name) - } - pval, err = unmarshalMapValue() - if err != nil { - return err - } - - default: - if !d.opts.DiscardUnknown { - return d.newError(tok.Pos(), "unknown map entry field %q", name) - } - d.skipValue() - } - } - - if !key.IsValid() { - key = fd.MapKey().Default().MapKey() - } - if !pval.IsValid() { - switch fd.MapValue().Kind() { - case pref.MessageKind, pref.GroupKind: - // If value field is not set for message/group types, construct an - // empty one as default. - pval = mmap.NewValue() - default: - pval = fd.MapValue().Default() - } - } - mmap.Set(key, pval) - return nil -} - -// unmarshalAny unmarshals an Any textproto. It can either be in expanded form -// or non-expanded form. -func (d decoder) unmarshalAny(m pref.Message, checkDelims bool) error { - var typeURL string - var bValue []byte - var seenTypeUrl bool - var seenValue bool - var isExpanded bool - - if checkDelims { - tok, err := d.Read() - if err != nil { - return err - } - - if tok.Kind() != text.MessageOpen { - return d.unexpectedTokenError(tok) - } - } - -Loop: - for { - // Read field name. Can only have 3 possible field names, i.e. type_url, - // value and type URL name inside []. - tok, err := d.Read() - if err != nil { - return err - } - if typ := tok.Kind(); typ != text.Name { - if checkDelims { - if typ == text.MessageClose { - break Loop - } - } else if typ == text.EOF { - break Loop - } - return d.unexpectedTokenError(tok) - } - - switch tok.NameKind() { - case text.IdentName: - // Both type_url and value fields require field separator :. - if !tok.HasSeparator() { - return d.syntaxError(tok.Pos(), "missing field separator :") - } - - switch name := pref.Name(tok.IdentName()); name { - case genid.Any_TypeUrl_field_name: - if seenTypeUrl { - return d.newError(tok.Pos(), "duplicate %v field", genid.Any_TypeUrl_field_fullname) - } - if isExpanded { - return d.newError(tok.Pos(), "conflict with [%s] field", typeURL) - } - tok, err := d.Read() - if err != nil { - return err - } - var ok bool - typeURL, ok = tok.String() - if !ok { - return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_TypeUrl_field_fullname, tok.RawString()) - } - seenTypeUrl = true - - case genid.Any_Value_field_name: - if seenValue { - return d.newError(tok.Pos(), "duplicate %v field", genid.Any_Value_field_fullname) - } - if isExpanded { - return d.newError(tok.Pos(), "conflict with [%s] field", typeURL) - } - tok, err := d.Read() - if err != nil { - return err - } - s, ok := tok.String() - if !ok { - return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_Value_field_fullname, tok.RawString()) - } - bValue = []byte(s) - seenValue = true - - default: - if !d.opts.DiscardUnknown { - return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname) - } - } - - case text.TypeName: - if isExpanded { - return d.newError(tok.Pos(), "cannot have more than one type") - } - if seenTypeUrl { - return d.newError(tok.Pos(), "conflict with type_url field") - } - typeURL = tok.TypeName() - var err error - bValue, err = d.unmarshalExpandedAny(typeURL, tok.Pos()) - if err != nil { - return err - } - isExpanded = true - - default: - if !d.opts.DiscardUnknown { - return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname) - } - } - } - - fds := m.Descriptor().Fields() - if len(typeURL) > 0 { - m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), pref.ValueOfString(typeURL)) - } - if len(bValue) > 0 { - m.Set(fds.ByNumber(genid.Any_Value_field_number), pref.ValueOfBytes(bValue)) - } - return nil -} - -func (d decoder) unmarshalExpandedAny(typeURL string, pos int) ([]byte, error) { - mt, err := d.opts.Resolver.FindMessageByURL(typeURL) - if err != nil { - return nil, d.newError(pos, "unable to resolve message [%v]: %v", typeURL, err) - } - // Create new message for the embedded message type and unmarshal the value - // field into it. - m := mt.New() - if err := d.unmarshalMessage(m, true); err != nil { - return nil, err - } - // Serialize the embedded message and return the resulting bytes. - b, err := proto.MarshalOptions{ - AllowPartial: true, // Never check required fields inside an Any. - Deterministic: true, - }.Marshal(m.Interface()) - if err != nil { - return nil, d.newError(pos, "error in marshaling message into Any.value: %v", err) - } - return b, nil -} - -// skipValue makes the decoder parse a field value in order to advance the read -// to the next field. It relies on Read returning an error if the types are not -// in valid sequence. -func (d decoder) skipValue() error { - tok, err := d.Read() - if err != nil { - return err - } - // Only need to continue reading for messages and lists. - switch tok.Kind() { - case text.MessageOpen: - return d.skipMessageValue() - - case text.ListOpen: - for { - tok, err := d.Read() - if err != nil { - return err - } - switch tok.Kind() { - case text.ListClose: - return nil - case text.MessageOpen: - return d.skipMessageValue() - default: - // Skip items. This will not validate whether skipped values are - // of the same type or not, same behavior as C++ - // TextFormat::Parser::AllowUnknownField(true) version 3.8.0. - } - } - } - return nil -} - -// skipMessageValue makes the decoder parse and skip over all fields in a -// message. It assumes that the previous read type is MessageOpen. -func (d decoder) skipMessageValue() error { - for { - tok, err := d.Read() - if err != nil { - return err - } - switch tok.Kind() { - case text.MessageClose: - return nil - case text.Name: - if err := d.skipValue(); err != nil { - return err - } - } - } -} diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/doc.go b/vendor/google.golang.org/protobuf/encoding/prototext/doc.go deleted file mode 100644 index 162b4f98a..000000000 --- a/vendor/google.golang.org/protobuf/encoding/prototext/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package prototext marshals and unmarshals protocol buffer messages as the -// textproto format. -package prototext diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go deleted file mode 100644 index 8d5304dc5..000000000 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ /dev/null @@ -1,371 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package prototext - -import ( - "fmt" - "strconv" - "unicode/utf8" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/encoding/messageset" - "google.golang.org/protobuf/internal/encoding/text" - "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/flags" - "google.golang.org/protobuf/internal/genid" - "google.golang.org/protobuf/internal/order" - "google.golang.org/protobuf/internal/pragma" - "google.golang.org/protobuf/internal/strs" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -const defaultIndent = " " - -// Format formats the message as a multiline string. -// This function is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. -func Format(m proto.Message) string { - return MarshalOptions{Multiline: true}.Format(m) -} - -// Marshal writes the given proto.Message in textproto format using default -// options. Do not depend on the output being stable. It may change over time -// across different versions of the program. -func Marshal(m proto.Message) ([]byte, error) { - return MarshalOptions{}.Marshal(m) -} - -// MarshalOptions is a configurable text format marshaler. -type MarshalOptions struct { - pragma.NoUnkeyedLiterals - - // Multiline specifies whether the marshaler should format the output in - // indented-form with every textual element on a new line. - // If Indent is an empty string, then an arbitrary indent is chosen. - Multiline bool - - // Indent specifies the set of indentation characters to use in a multiline - // formatted output such that every entry is preceded by Indent and - // terminated by a newline. If non-empty, then Multiline is treated as true. - // Indent can only be composed of space or tab characters. - Indent string - - // EmitASCII specifies whether to format strings and bytes as ASCII only - // as opposed to using UTF-8 encoding when possible. - EmitASCII bool - - // allowInvalidUTF8 specifies whether to permit the encoding of strings - // with invalid UTF-8. This is unexported as it is intended to only - // be specified by the Format method. - allowInvalidUTF8 bool - - // AllowPartial allows messages that have missing required fields to marshal - // without returning an error. If AllowPartial is false (the default), - // Marshal will return error if there are any missing required fields. - AllowPartial bool - - // EmitUnknown specifies whether to emit unknown fields in the output. - // If specified, the unmarshaler may be unable to parse the output. - // The default is to exclude unknown fields. - EmitUnknown bool - - // Resolver is used for looking up types when expanding google.protobuf.Any - // messages. If nil, this defaults to using protoregistry.GlobalTypes. - Resolver interface { - protoregistry.ExtensionTypeResolver - protoregistry.MessageTypeResolver - } -} - -// Format formats the message as a string. -// This method is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. -func (o MarshalOptions) Format(m proto.Message) string { - if m == nil || !m.ProtoReflect().IsValid() { - return "" // invalid syntax, but okay since this is for debugging - } - o.allowInvalidUTF8 = true - o.AllowPartial = true - o.EmitUnknown = true - b, _ := o.Marshal(m) - return string(b) -} - -// Marshal writes the given proto.Message in textproto format using options in -// MarshalOptions object. Do not depend on the output being stable. It may -// change over time across different versions of the program. -func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { - return o.marshal(m) -} - -// marshal is a centralized function that all marshal operations go through. -// For profiling purposes, avoid changing the name of this function or -// introducing other code paths for marshal that do not go through this. -func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { - var delims = [2]byte{'{', '}'} - - if o.Multiline && o.Indent == "" { - o.Indent = defaultIndent - } - if o.Resolver == nil { - o.Resolver = protoregistry.GlobalTypes - } - - internalEnc, err := text.NewEncoder(o.Indent, delims, o.EmitASCII) - if err != nil { - return nil, err - } - - // Treat nil message interface as an empty message, - // in which case there is nothing to output. - if m == nil { - return []byte{}, nil - } - - enc := encoder{internalEnc, o} - err = enc.marshalMessage(m.ProtoReflect(), false) - if err != nil { - return nil, err - } - out := enc.Bytes() - if len(o.Indent) > 0 && len(out) > 0 { - out = append(out, '\n') - } - if o.AllowPartial { - return out, nil - } - return out, proto.CheckInitialized(m) -} - -type encoder struct { - *text.Encoder - opts MarshalOptions -} - -// marshalMessage marshals the given protoreflect.Message. -func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { - messageDesc := m.Descriptor() - if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { - return errors.New("no support for proto1 MessageSets") - } - - if inclDelims { - e.StartMessage() - defer e.EndMessage() - } - - // Handle Any expansion. - if messageDesc.FullName() == genid.Any_message_fullname { - if e.marshalAny(m) { - return nil - } - // If unable to expand, continue on to marshal Any as a regular message. - } - - // Marshal fields. - var err error - order.RangeFields(m, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - if err = e.marshalField(fd.TextName(), v, fd); err != nil { - return false - } - return true - }) - if err != nil { - return err - } - - // Marshal unknown fields. - if e.opts.EmitUnknown { - e.marshalUnknown(m.GetUnknown()) - } - - return nil -} - -// marshalField marshals the given field with protoreflect.Value. -func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescriptor) error { - switch { - case fd.IsList(): - return e.marshalList(name, val.List(), fd) - case fd.IsMap(): - return e.marshalMap(name, val.Map(), fd) - default: - e.WriteName(name) - return e.marshalSingular(val, fd) - } -} - -// marshalSingular marshals the given non-repeated field value. This includes -// all scalar types, enums, messages, and groups. -func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error { - kind := fd.Kind() - switch kind { - case pref.BoolKind: - e.WriteBool(val.Bool()) - - case pref.StringKind: - s := val.String() - if !e.opts.allowInvalidUTF8 && strs.EnforceUTF8(fd) && !utf8.ValidString(s) { - return errors.InvalidUTF8(string(fd.FullName())) - } - e.WriteString(s) - - case pref.Int32Kind, pref.Int64Kind, - pref.Sint32Kind, pref.Sint64Kind, - pref.Sfixed32Kind, pref.Sfixed64Kind: - e.WriteInt(val.Int()) - - case pref.Uint32Kind, pref.Uint64Kind, - pref.Fixed32Kind, pref.Fixed64Kind: - e.WriteUint(val.Uint()) - - case pref.FloatKind: - // Encoder.WriteFloat handles the special numbers NaN and infinites. - e.WriteFloat(val.Float(), 32) - - case pref.DoubleKind: - // Encoder.WriteFloat handles the special numbers NaN and infinites. - e.WriteFloat(val.Float(), 64) - - case pref.BytesKind: - e.WriteString(string(val.Bytes())) - - case pref.EnumKind: - num := val.Enum() - if desc := fd.Enum().Values().ByNumber(num); desc != nil { - e.WriteLiteral(string(desc.Name())) - } else { - // Use numeric value if there is no enum description. - e.WriteInt(int64(num)) - } - - case pref.MessageKind, pref.GroupKind: - return e.marshalMessage(val.Message(), true) - - default: - panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind)) - } - return nil -} - -// marshalList marshals the given protoreflect.List as multiple name-value fields. -func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescriptor) error { - size := list.Len() - for i := 0; i < size; i++ { - e.WriteName(name) - if err := e.marshalSingular(list.Get(i), fd); err != nil { - return err - } - } - return nil -} - -// marshalMap marshals the given protoreflect.Map as multiple name-value fields. -func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error { - var err error - order.RangeEntries(mmap, order.GenericKeyOrder, func(key pref.MapKey, val pref.Value) bool { - e.WriteName(name) - e.StartMessage() - defer e.EndMessage() - - e.WriteName(string(genid.MapEntry_Key_field_name)) - err = e.marshalSingular(key.Value(), fd.MapKey()) - if err != nil { - return false - } - - e.WriteName(string(genid.MapEntry_Value_field_name)) - err = e.marshalSingular(val, fd.MapValue()) - if err != nil { - return false - } - return true - }) - return err -} - -// marshalUnknown parses the given []byte and marshals fields out. -// This function assumes proper encoding in the given []byte. -func (e encoder) marshalUnknown(b []byte) { - const dec = 10 - const hex = 16 - for len(b) > 0 { - num, wtype, n := protowire.ConsumeTag(b) - b = b[n:] - e.WriteName(strconv.FormatInt(int64(num), dec)) - - switch wtype { - case protowire.VarintType: - var v uint64 - v, n = protowire.ConsumeVarint(b) - e.WriteUint(v) - case protowire.Fixed32Type: - var v uint32 - v, n = protowire.ConsumeFixed32(b) - e.WriteLiteral("0x" + strconv.FormatUint(uint64(v), hex)) - case protowire.Fixed64Type: - var v uint64 - v, n = protowire.ConsumeFixed64(b) - e.WriteLiteral("0x" + strconv.FormatUint(v, hex)) - case protowire.BytesType: - var v []byte - v, n = protowire.ConsumeBytes(b) - e.WriteString(string(v)) - case protowire.StartGroupType: - e.StartMessage() - var v []byte - v, n = protowire.ConsumeGroup(num, b) - e.marshalUnknown(v) - e.EndMessage() - default: - panic(fmt.Sprintf("prototext: error parsing unknown field wire type: %v", wtype)) - } - - b = b[n:] - } -} - -// marshalAny marshals the given google.protobuf.Any message in expanded form. -// It returns true if it was able to marshal, else false. -func (e encoder) marshalAny(any pref.Message) bool { - // Construct the embedded message. - fds := any.Descriptor().Fields() - fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) - typeURL := any.Get(fdType).String() - mt, err := e.opts.Resolver.FindMessageByURL(typeURL) - if err != nil { - return false - } - m := mt.New().Interface() - - // Unmarshal bytes into embedded message. - fdValue := fds.ByNumber(genid.Any_Value_field_number) - value := any.Get(fdValue) - err = proto.UnmarshalOptions{ - AllowPartial: true, - Resolver: e.opts.Resolver, - }.Unmarshal(value.Bytes(), m) - if err != nil { - return false - } - - // Get current encoder position. If marshaling fails, reset encoder output - // back to this position. - pos := e.Snapshot() - - // Field name is the proto field name enclosed in []. - e.WriteName("[" + typeURL + "]") - err = e.marshalMessage(m.ProtoReflect(), true) - if err != nil { - e.Reset(pos) - return false - } - return true -} diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go deleted file mode 100644 index 9c61112f5..000000000 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ /dev/null @@ -1,547 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package protowire parses and formats the raw wire encoding. -// See https://developers.google.com/protocol-buffers/docs/encoding. -// -// For marshaling and unmarshaling entire protobuf messages, -// use the "google.golang.org/protobuf/proto" package instead. -package protowire - -import ( - "io" - "math" - "math/bits" - - "google.golang.org/protobuf/internal/errors" -) - -// Number represents the field number. -type Number int32 - -const ( - MinValidNumber Number = 1 - FirstReservedNumber Number = 19000 - LastReservedNumber Number = 19999 - MaxValidNumber Number = 1<<29 - 1 - DefaultRecursionLimit = 10000 -) - -// IsValid reports whether the field number is semantically valid. -// -// Note that while numbers within the reserved range are semantically invalid, -// they are syntactically valid in the wire format. -// Implementations may treat records with reserved field numbers as unknown. -func (n Number) IsValid() bool { - return MinValidNumber <= n && n < FirstReservedNumber || LastReservedNumber < n && n <= MaxValidNumber -} - -// Type represents the wire type. -type Type int8 - -const ( - VarintType Type = 0 - Fixed32Type Type = 5 - Fixed64Type Type = 1 - BytesType Type = 2 - StartGroupType Type = 3 - EndGroupType Type = 4 -) - -const ( - _ = -iota - errCodeTruncated - errCodeFieldNumber - errCodeOverflow - errCodeReserved - errCodeEndGroup - errCodeRecursionDepth -) - -var ( - errFieldNumber = errors.New("invalid field number") - errOverflow = errors.New("variable length integer overflow") - errReserved = errors.New("cannot parse reserved wire type") - errEndGroup = errors.New("mismatching end group marker") - errParse = errors.New("parse error") -) - -// ParseError converts an error code into an error value. -// This returns nil if n is a non-negative number. -func ParseError(n int) error { - if n >= 0 { - return nil - } - switch n { - case errCodeTruncated: - return io.ErrUnexpectedEOF - case errCodeFieldNumber: - return errFieldNumber - case errCodeOverflow: - return errOverflow - case errCodeReserved: - return errReserved - case errCodeEndGroup: - return errEndGroup - default: - return errParse - } -} - -// ConsumeField parses an entire field record (both tag and value) and returns -// the field number, the wire type, and the total length. -// This returns a negative length upon an error (see ParseError). -// -// The total length includes the tag header and the end group marker (if the -// field is a group). -func ConsumeField(b []byte) (Number, Type, int) { - num, typ, n := ConsumeTag(b) - if n < 0 { - return 0, 0, n // forward error code - } - m := ConsumeFieldValue(num, typ, b[n:]) - if m < 0 { - return 0, 0, m // forward error code - } - return num, typ, n + m -} - -// ConsumeFieldValue parses a field value and returns its length. -// This assumes that the field Number and wire Type have already been parsed. -// This returns a negative length upon an error (see ParseError). -// -// When parsing a group, the length includes the end group marker and -// the end group is verified to match the starting field number. -func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { - return consumeFieldValueD(num, typ, b, DefaultRecursionLimit) -} - -func consumeFieldValueD(num Number, typ Type, b []byte, depth int) (n int) { - switch typ { - case VarintType: - _, n = ConsumeVarint(b) - return n - case Fixed32Type: - _, n = ConsumeFixed32(b) - return n - case Fixed64Type: - _, n = ConsumeFixed64(b) - return n - case BytesType: - _, n = ConsumeBytes(b) - return n - case StartGroupType: - if depth < 0 { - return errCodeRecursionDepth - } - n0 := len(b) - for { - num2, typ2, n := ConsumeTag(b) - if n < 0 { - return n // forward error code - } - b = b[n:] - if typ2 == EndGroupType { - if num != num2 { - return errCodeEndGroup - } - return n0 - len(b) - } - - n = consumeFieldValueD(num2, typ2, b, depth-1) - if n < 0 { - return n // forward error code - } - b = b[n:] - } - case EndGroupType: - return errCodeEndGroup - default: - return errCodeReserved - } -} - -// AppendTag encodes num and typ as a varint-encoded tag and appends it to b. -func AppendTag(b []byte, num Number, typ Type) []byte { - return AppendVarint(b, EncodeTag(num, typ)) -} - -// ConsumeTag parses b as a varint-encoded tag, reporting its length. -// This returns a negative length upon an error (see ParseError). -func ConsumeTag(b []byte) (Number, Type, int) { - v, n := ConsumeVarint(b) - if n < 0 { - return 0, 0, n // forward error code - } - num, typ := DecodeTag(v) - if num < MinValidNumber { - return 0, 0, errCodeFieldNumber - } - return num, typ, n -} - -func SizeTag(num Number) int { - return SizeVarint(EncodeTag(num, 0)) // wire type has no effect on size -} - -// AppendVarint appends v to b as a varint-encoded uint64. -func AppendVarint(b []byte, v uint64) []byte { - switch { - case v < 1<<7: - b = append(b, byte(v)) - case v < 1<<14: - b = append(b, - byte((v>>0)&0x7f|0x80), - byte(v>>7)) - case v < 1<<21: - b = append(b, - byte((v>>0)&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte(v>>14)) - case v < 1<<28: - b = append(b, - byte((v>>0)&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte(v>>21)) - case v < 1<<35: - b = append(b, - byte((v>>0)&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte(v>>28)) - case v < 1<<42: - b = append(b, - byte((v>>0)&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte(v>>35)) - case v < 1<<49: - b = append(b, - byte((v>>0)&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte(v>>42)) - case v < 1<<56: - b = append(b, - byte((v>>0)&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte(v>>49)) - case v < 1<<63: - b = append(b, - byte((v>>0)&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte(v>>56)) - default: - b = append(b, - byte((v>>0)&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte((v>>56)&0x7f|0x80), - 1) - } - return b -} - -// ConsumeVarint parses b as a varint-encoded uint64, reporting its length. -// This returns a negative length upon an error (see ParseError). -func ConsumeVarint(b []byte) (v uint64, n int) { - var y uint64 - if len(b) <= 0 { - return 0, errCodeTruncated - } - v = uint64(b[0]) - if v < 0x80 { - return v, 1 - } - v -= 0x80 - - if len(b) <= 1 { - return 0, errCodeTruncated - } - y = uint64(b[1]) - v += y << 7 - if y < 0x80 { - return v, 2 - } - v -= 0x80 << 7 - - if len(b) <= 2 { - return 0, errCodeTruncated - } - y = uint64(b[2]) - v += y << 14 - if y < 0x80 { - return v, 3 - } - v -= 0x80 << 14 - - if len(b) <= 3 { - return 0, errCodeTruncated - } - y = uint64(b[3]) - v += y << 21 - if y < 0x80 { - return v, 4 - } - v -= 0x80 << 21 - - if len(b) <= 4 { - return 0, errCodeTruncated - } - y = uint64(b[4]) - v += y << 28 - if y < 0x80 { - return v, 5 - } - v -= 0x80 << 28 - - if len(b) <= 5 { - return 0, errCodeTruncated - } - y = uint64(b[5]) - v += y << 35 - if y < 0x80 { - return v, 6 - } - v -= 0x80 << 35 - - if len(b) <= 6 { - return 0, errCodeTruncated - } - y = uint64(b[6]) - v += y << 42 - if y < 0x80 { - return v, 7 - } - v -= 0x80 << 42 - - if len(b) <= 7 { - return 0, errCodeTruncated - } - y = uint64(b[7]) - v += y << 49 - if y < 0x80 { - return v, 8 - } - v -= 0x80 << 49 - - if len(b) <= 8 { - return 0, errCodeTruncated - } - y = uint64(b[8]) - v += y << 56 - if y < 0x80 { - return v, 9 - } - v -= 0x80 << 56 - - if len(b) <= 9 { - return 0, errCodeTruncated - } - y = uint64(b[9]) - v += y << 63 - if y < 2 { - return v, 10 - } - return 0, errCodeOverflow -} - -// SizeVarint returns the encoded size of a varint. -// The size is guaranteed to be within 1 and 10, inclusive. -func SizeVarint(v uint64) int { - // This computes 1 + (bits.Len64(v)-1)/7. - // 9/64 is a good enough approximation of 1/7 - return int(9*uint32(bits.Len64(v))+64) / 64 -} - -// AppendFixed32 appends v to b as a little-endian uint32. -func AppendFixed32(b []byte, v uint32) []byte { - return append(b, - byte(v>>0), - byte(v>>8), - byte(v>>16), - byte(v>>24)) -} - -// ConsumeFixed32 parses b as a little-endian uint32, reporting its length. -// This returns a negative length upon an error (see ParseError). -func ConsumeFixed32(b []byte) (v uint32, n int) { - if len(b) < 4 { - return 0, errCodeTruncated - } - v = uint32(b[0])<<0 | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - return v, 4 -} - -// SizeFixed32 returns the encoded size of a fixed32; which is always 4. -func SizeFixed32() int { - return 4 -} - -// AppendFixed64 appends v to b as a little-endian uint64. -func AppendFixed64(b []byte, v uint64) []byte { - return append(b, - byte(v>>0), - byte(v>>8), - byte(v>>16), - byte(v>>24), - byte(v>>32), - byte(v>>40), - byte(v>>48), - byte(v>>56)) -} - -// ConsumeFixed64 parses b as a little-endian uint64, reporting its length. -// This returns a negative length upon an error (see ParseError). -func ConsumeFixed64(b []byte) (v uint64, n int) { - if len(b) < 8 { - return 0, errCodeTruncated - } - v = uint64(b[0])<<0 | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - return v, 8 -} - -// SizeFixed64 returns the encoded size of a fixed64; which is always 8. -func SizeFixed64() int { - return 8 -} - -// AppendBytes appends v to b as a length-prefixed bytes value. -func AppendBytes(b []byte, v []byte) []byte { - return append(AppendVarint(b, uint64(len(v))), v...) -} - -// ConsumeBytes parses b as a length-prefixed bytes value, reporting its length. -// This returns a negative length upon an error (see ParseError). -func ConsumeBytes(b []byte) (v []byte, n int) { - m, n := ConsumeVarint(b) - if n < 0 { - return nil, n // forward error code - } - if m > uint64(len(b[n:])) { - return nil, errCodeTruncated - } - return b[n:][:m], n + int(m) -} - -// SizeBytes returns the encoded size of a length-prefixed bytes value, -// given only the length. -func SizeBytes(n int) int { - return SizeVarint(uint64(n)) + n -} - -// AppendString appends v to b as a length-prefixed bytes value. -func AppendString(b []byte, v string) []byte { - return append(AppendVarint(b, uint64(len(v))), v...) -} - -// ConsumeString parses b as a length-prefixed bytes value, reporting its length. -// This returns a negative length upon an error (see ParseError). -func ConsumeString(b []byte) (v string, n int) { - bb, n := ConsumeBytes(b) - return string(bb), n -} - -// AppendGroup appends v to b as group value, with a trailing end group marker. -// The value v must not contain the end marker. -func AppendGroup(b []byte, num Number, v []byte) []byte { - return AppendVarint(append(b, v...), EncodeTag(num, EndGroupType)) -} - -// ConsumeGroup parses b as a group value until the trailing end group marker, -// and verifies that the end marker matches the provided num. The value v -// does not contain the end marker, while the length does contain the end marker. -// This returns a negative length upon an error (see ParseError). -func ConsumeGroup(num Number, b []byte) (v []byte, n int) { - n = ConsumeFieldValue(num, StartGroupType, b) - if n < 0 { - return nil, n // forward error code - } - b = b[:n] - - // Truncate off end group marker, but need to handle denormalized varints. - // Assuming end marker is never 0 (which is always the case since - // EndGroupType is non-zero), we can truncate all trailing bytes where the - // lower 7 bits are all zero (implying that the varint is denormalized). - for len(b) > 0 && b[len(b)-1]&0x7f == 0 { - b = b[:len(b)-1] - } - b = b[:len(b)-SizeTag(num)] - return b, n -} - -// SizeGroup returns the encoded size of a group, given only the length. -func SizeGroup(num Number, n int) int { - return n + SizeTag(num) -} - -// DecodeTag decodes the field Number and wire Type from its unified form. -// The Number is -1 if the decoded field number overflows int32. -// Other than overflow, this does not check for field number validity. -func DecodeTag(x uint64) (Number, Type) { - // NOTE: MessageSet allows for larger field numbers than normal. - if x>>3 > uint64(math.MaxInt32) { - return -1, 0 - } - return Number(x >> 3), Type(x & 7) -} - -// EncodeTag encodes the field Number and wire Type into its unified form. -func EncodeTag(num Number, typ Type) uint64 { - return uint64(num)<<3 | uint64(typ&7) -} - -// DecodeZigZag decodes a zig-zag-encoded uint64 as an int64. -// Input: {…, 5, 3, 1, 0, 2, 4, 6, …} -// Output: {…, -3, -2, -1, 0, +1, +2, +3, …} -func DecodeZigZag(x uint64) int64 { - return int64(x>>1) ^ int64(x)<<63>>63 -} - -// EncodeZigZag encodes an int64 as a zig-zag-encoded uint64. -// Input: {…, -3, -2, -1, 0, +1, +2, +3, …} -// Output: {…, 5, 3, 1, 0, 2, 4, 6, …} -func EncodeZigZag(x int64) uint64 { - return uint64(x<<1) ^ uint64(x>>63) -} - -// DecodeBool decodes a uint64 as a bool. -// Input: { 0, 1, 2, …} -// Output: {false, true, true, …} -func DecodeBool(x uint64) bool { - return x != 0 -} - -// EncodeBool encodes a bool as a uint64. -// Input: {false, true} -// Output: { 0, 1} -func EncodeBool(x bool) uint64 { - if x { - return 1 - } - return 0 -} diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go deleted file mode 100644 index 360c63329..000000000 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package descfmt provides functionality to format descriptors. -package descfmt - -import ( - "fmt" - "io" - "reflect" - "strconv" - "strings" - - "google.golang.org/protobuf/internal/detrand" - "google.golang.org/protobuf/internal/pragma" - pref "google.golang.org/protobuf/reflect/protoreflect" -) - -type list interface { - Len() int - pragma.DoNotImplement -} - -func FormatList(s fmt.State, r rune, vs list) { - io.WriteString(s, formatListOpt(vs, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) -} -func formatListOpt(vs list, isRoot, allowMulti bool) string { - start, end := "[", "]" - if isRoot { - var name string - switch vs.(type) { - case pref.Names: - name = "Names" - case pref.FieldNumbers: - name = "FieldNumbers" - case pref.FieldRanges: - name = "FieldRanges" - case pref.EnumRanges: - name = "EnumRanges" - case pref.FileImports: - name = "FileImports" - case pref.Descriptor: - name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s" - default: - name = reflect.ValueOf(vs).Elem().Type().Name() - } - start, end = name+"{", "}" - } - - var ss []string - switch vs := vs.(type) { - case pref.Names: - for i := 0; i < vs.Len(); i++ { - ss = append(ss, fmt.Sprint(vs.Get(i))) - } - return start + joinStrings(ss, false) + end - case pref.FieldNumbers: - for i := 0; i < vs.Len(); i++ { - ss = append(ss, fmt.Sprint(vs.Get(i))) - } - return start + joinStrings(ss, false) + end - case pref.FieldRanges: - for i := 0; i < vs.Len(); i++ { - r := vs.Get(i) - if r[0]+1 == r[1] { - ss = append(ss, fmt.Sprintf("%d", r[0])) - } else { - ss = append(ss, fmt.Sprintf("%d:%d", r[0], r[1])) // enum ranges are end exclusive - } - } - return start + joinStrings(ss, false) + end - case pref.EnumRanges: - for i := 0; i < vs.Len(); i++ { - r := vs.Get(i) - if r[0] == r[1] { - ss = append(ss, fmt.Sprintf("%d", r[0])) - } else { - ss = append(ss, fmt.Sprintf("%d:%d", r[0], int64(r[1])+1)) // enum ranges are end inclusive - } - } - return start + joinStrings(ss, false) + end - case pref.FileImports: - for i := 0; i < vs.Len(); i++ { - var rs records - rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak") - ss = append(ss, "{"+rs.Join()+"}") - } - return start + joinStrings(ss, allowMulti) + end - default: - _, isEnumValue := vs.(pref.EnumValueDescriptors) - for i := 0; i < vs.Len(); i++ { - m := reflect.ValueOf(vs).MethodByName("Get") - v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface() - ss = append(ss, formatDescOpt(v.(pref.Descriptor), false, allowMulti && !isEnumValue)) - } - return start + joinStrings(ss, allowMulti && isEnumValue) + end - } -} - -// descriptorAccessors is a list of accessors to print for each descriptor. -// -// Do not print all accessors since some contain redundant information, -// while others are pointers that we do not want to follow since the descriptor -// is actually a cyclic graph. -// -// Using a list allows us to print the accessors in a sensible order. -var descriptorAccessors = map[reflect.Type][]string{ - reflect.TypeOf((*pref.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, - reflect.TypeOf((*pref.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, - reflect.TypeOf((*pref.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, - reflect.TypeOf((*pref.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt - reflect.TypeOf((*pref.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, - reflect.TypeOf((*pref.EnumValueDescriptor)(nil)).Elem(): {"Number"}, - reflect.TypeOf((*pref.ServiceDescriptor)(nil)).Elem(): {"Methods"}, - reflect.TypeOf((*pref.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, -} - -func FormatDesc(s fmt.State, r rune, t pref.Descriptor) { - io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) -} -func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { - rv := reflect.ValueOf(t) - rt := rv.MethodByName("ProtoType").Type().In(0) - - start, end := "{", "}" - if isRoot { - start = rt.Name() + "{" - } - - _, isFile := t.(pref.FileDescriptor) - rs := records{allowMulti: allowMulti} - if t.IsPlaceholder() { - if isFile { - rs.Append(rv, "Path", "Package", "IsPlaceholder") - } else { - rs.Append(rv, "FullName", "IsPlaceholder") - } - } else { - switch { - case isFile: - rs.Append(rv, "Syntax") - case isRoot: - rs.Append(rv, "Syntax", "FullName") - default: - rs.Append(rv, "Name") - } - switch t := t.(type) { - case pref.FieldDescriptor: - for _, s := range descriptorAccessors[rt] { - switch s { - case "MapKey": - if k := t.MapKey(); k != nil { - rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()}) - } - case "MapValue": - if v := t.MapValue(); v != nil { - switch v.Kind() { - case pref.EnumKind: - rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())}) - case pref.MessageKind, pref.GroupKind: - rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())}) - default: - rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()}) - } - } - case "ContainingOneof": - if od := t.ContainingOneof(); od != nil { - rs.recs = append(rs.recs, [2]string{"Oneof", string(od.Name())}) - } - case "ContainingMessage": - if t.IsExtension() { - rs.recs = append(rs.recs, [2]string{"Extendee", string(t.ContainingMessage().FullName())}) - } - case "Message": - if !t.IsMap() { - rs.Append(rv, s) - } - default: - rs.Append(rv, s) - } - } - case pref.OneofDescriptor: - var ss []string - fs := t.Fields() - for i := 0; i < fs.Len(); i++ { - ss = append(ss, string(fs.Get(i).Name())) - } - if len(ss) > 0 { - rs.recs = append(rs.recs, [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) - } - default: - rs.Append(rv, descriptorAccessors[rt]...) - } - if rv.MethodByName("GoType").IsValid() { - rs.Append(rv, "GoType") - } - } - return start + rs.Join() + end -} - -type records struct { - recs [][2]string - allowMulti bool -} - -func (rs *records) Append(v reflect.Value, accessors ...string) { - for _, a := range accessors { - var rv reflect.Value - if m := v.MethodByName(a); m.IsValid() { - rv = m.Call(nil)[0] - } - if v.Kind() == reflect.Struct && !rv.IsValid() { - rv = v.FieldByName(a) - } - if !rv.IsValid() { - panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a)) - } - if _, ok := rv.Interface().(pref.Value); ok { - rv = rv.MethodByName("Interface").Call(nil)[0] - if !rv.IsNil() { - rv = rv.Elem() - } - } - - // Ignore zero values. - var isZero bool - switch rv.Kind() { - case reflect.Interface, reflect.Slice: - isZero = rv.IsNil() - case reflect.Bool: - isZero = rv.Bool() == false - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - isZero = rv.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - isZero = rv.Uint() == 0 - case reflect.String: - isZero = rv.String() == "" - } - if n, ok := rv.Interface().(list); ok { - isZero = n.Len() == 0 - } - if isZero { - continue - } - - // Format the value. - var s string - v := rv.Interface() - switch v := v.(type) { - case list: - s = formatListOpt(v, false, rs.allowMulti) - case pref.FieldDescriptor, pref.OneofDescriptor, pref.EnumValueDescriptor, pref.MethodDescriptor: - s = string(v.(pref.Descriptor).Name()) - case pref.Descriptor: - s = string(v.FullName()) - case string: - s = strconv.Quote(v) - case []byte: - s = fmt.Sprintf("%q", v) - default: - s = fmt.Sprint(v) - } - rs.recs = append(rs.recs, [2]string{a, s}) - } -} - -func (rs *records) Join() string { - var ss []string - - // In single line mode, simply join all records with commas. - if !rs.allowMulti { - for _, r := range rs.recs { - ss = append(ss, r[0]+formatColon(0)+r[1]) - } - return joinStrings(ss, false) - } - - // In allowMulti line mode, align single line records for more readable output. - var maxLen int - flush := func(i int) { - for _, r := range rs.recs[len(ss):i] { - ss = append(ss, r[0]+formatColon(maxLen-len(r[0]))+r[1]) - } - maxLen = 0 - } - for i, r := range rs.recs { - if isMulti := strings.Contains(r[1], "\n"); isMulti { - flush(i) - ss = append(ss, r[0]+formatColon(0)+strings.Join(strings.Split(r[1], "\n"), "\n\t")) - } else if maxLen < len(r[0]) { - maxLen = len(r[0]) - } - } - flush(len(rs.recs)) - return joinStrings(ss, true) -} - -func formatColon(padding int) string { - // Deliberately introduce instability into the debug output to - // discourage users from performing string comparisons. - // This provides us flexibility to change the output in the future. - if detrand.Bool() { - return ":" + strings.Repeat(" ", 1+padding) // use non-breaking spaces (U+00a0) - } else { - return ":" + strings.Repeat(" ", 1+padding) // use regular spaces (U+0020) - } -} - -func joinStrings(ss []string, isMulti bool) string { - if len(ss) == 0 { - return "" - } - if isMulti { - return "\n\t" + strings.Join(ss, "\n\t") + "\n" - } - return strings.Join(ss, ", ") -} diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go deleted file mode 100644 index 8401be8c8..000000000 --- a/vendor/google.golang.org/protobuf/internal/descopts/options.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package descopts contains the nil pointers to concrete descriptor options. -// -// This package exists as a form of reverse dependency injection so that certain -// packages (e.g., internal/filedesc and internal/filetype can avoid a direct -// dependency on the descriptor proto package). -package descopts - -import pref "google.golang.org/protobuf/reflect/protoreflect" - -// These variables are set by the init function in descriptor.pb.go via logic -// in internal/filetype. In other words, so long as the descriptor proto package -// is linked in, these variables will be populated. -// -// Each variable is populated with a nil pointer to the options struct. -var ( - File pref.ProtoMessage - Enum pref.ProtoMessage - EnumValue pref.ProtoMessage - Message pref.ProtoMessage - Field pref.ProtoMessage - Oneof pref.ProtoMessage - ExtensionRange pref.ProtoMessage - Service pref.ProtoMessage - Method pref.ProtoMessage -) diff --git a/vendor/google.golang.org/protobuf/internal/detrand/rand.go b/vendor/google.golang.org/protobuf/internal/detrand/rand.go deleted file mode 100644 index 49c8676d4..000000000 --- a/vendor/google.golang.org/protobuf/internal/detrand/rand.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package detrand provides deterministically random functionality. -// -// The pseudo-randomness of these functions is seeded by the program binary -// itself and guarantees that the output does not change within a program, -// while ensuring that the output is unstable across different builds. -package detrand - -import ( - "encoding/binary" - "hash/fnv" - "os" -) - -// Disable disables detrand such that all functions returns the zero value. -// This function is not concurrent-safe and must be called during program init. -func Disable() { - randSeed = 0 -} - -// Bool returns a deterministically random boolean. -func Bool() bool { - return randSeed%2 == 1 -} - -// Intn returns a deterministically random integer between 0 and n-1, inclusive. -func Intn(n int) int { - if n <= 0 { - panic("must be positive") - } - return int(randSeed % uint64(n)) -} - -// randSeed is a best-effort at an approximate hash of the Go binary. -var randSeed = binaryHash() - -func binaryHash() uint64 { - // Open the Go binary. - s, err := os.Executable() - if err != nil { - return 0 - } - f, err := os.Open(s) - if err != nil { - return 0 - } - defer f.Close() - - // Hash the size and several samples of the Go binary. - const numSamples = 8 - var buf [64]byte - h := fnv.New64() - fi, err := f.Stat() - if err != nil { - return 0 - } - binary.LittleEndian.PutUint64(buf[:8], uint64(fi.Size())) - h.Write(buf[:8]) - for i := int64(0); i < numSamples; i++ { - if _, err := f.ReadAt(buf[:], i*fi.Size()/numSamples); err != nil { - return 0 - } - h.Write(buf[:]) - } - return h.Sum64() -} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go deleted file mode 100644 index fdd9b13f2..000000000 --- a/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package defval marshals and unmarshals textual forms of default values. -// -// This package handles both the form historically used in Go struct field tags -// and also the form used by google.protobuf.FieldDescriptorProto.default_value -// since they differ in superficial ways. -package defval - -import ( - "fmt" - "math" - "strconv" - - ptext "google.golang.org/protobuf/internal/encoding/text" - errors "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" -) - -// Format is the serialization format used to represent the default value. -type Format int - -const ( - _ Format = iota - - // Descriptor uses the serialization format that protoc uses with the - // google.protobuf.FieldDescriptorProto.default_value field. - Descriptor - - // GoTag uses the historical serialization format in Go struct field tags. - GoTag -) - -// Unmarshal deserializes the default string s according to the given kind k. -// When k is an enum, a list of enum value descriptors must be provided. -func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) (pref.Value, pref.EnumValueDescriptor, error) { - switch k { - case pref.BoolKind: - if f == GoTag { - switch s { - case "1": - return pref.ValueOfBool(true), nil, nil - case "0": - return pref.ValueOfBool(false), nil, nil - } - } else { - switch s { - case "true": - return pref.ValueOfBool(true), nil, nil - case "false": - return pref.ValueOfBool(false), nil, nil - } - } - case pref.EnumKind: - if f == GoTag { - // Go tags use the numeric form of the enum value. - if n, err := strconv.ParseInt(s, 10, 32); err == nil { - if ev := evs.ByNumber(pref.EnumNumber(n)); ev != nil { - return pref.ValueOfEnum(ev.Number()), ev, nil - } - } - } else { - // Descriptor default_value use the enum identifier. - ev := evs.ByName(pref.Name(s)) - if ev != nil { - return pref.ValueOfEnum(ev.Number()), ev, nil - } - } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: - if v, err := strconv.ParseInt(s, 10, 32); err == nil { - return pref.ValueOfInt32(int32(v)), nil, nil - } - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: - if v, err := strconv.ParseInt(s, 10, 64); err == nil { - return pref.ValueOfInt64(int64(v)), nil, nil - } - case pref.Uint32Kind, pref.Fixed32Kind: - if v, err := strconv.ParseUint(s, 10, 32); err == nil { - return pref.ValueOfUint32(uint32(v)), nil, nil - } - case pref.Uint64Kind, pref.Fixed64Kind: - if v, err := strconv.ParseUint(s, 10, 64); err == nil { - return pref.ValueOfUint64(uint64(v)), nil, nil - } - case pref.FloatKind, pref.DoubleKind: - var v float64 - var err error - switch s { - case "-inf": - v = math.Inf(-1) - case "inf": - v = math.Inf(+1) - case "nan": - v = math.NaN() - default: - v, err = strconv.ParseFloat(s, 64) - } - if err == nil { - if k == pref.FloatKind { - return pref.ValueOfFloat32(float32(v)), nil, nil - } else { - return pref.ValueOfFloat64(float64(v)), nil, nil - } - } - case pref.StringKind: - // String values are already unescaped and can be used as is. - return pref.ValueOfString(s), nil, nil - case pref.BytesKind: - if b, ok := unmarshalBytes(s); ok { - return pref.ValueOfBytes(b), nil, nil - } - } - return pref.Value{}, nil, errors.New("could not parse value for %v: %q", k, s) -} - -// Marshal serializes v as the default string according to the given kind k. -// When specifying the Descriptor format for an enum kind, the associated -// enum value descriptor must be provided. -func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) (string, error) { - switch k { - case pref.BoolKind: - if f == GoTag { - if v.Bool() { - return "1", nil - } else { - return "0", nil - } - } else { - if v.Bool() { - return "true", nil - } else { - return "false", nil - } - } - case pref.EnumKind: - if f == GoTag { - return strconv.FormatInt(int64(v.Enum()), 10), nil - } else { - return string(ev.Name()), nil - } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind, pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: - return strconv.FormatInt(v.Int(), 10), nil - case pref.Uint32Kind, pref.Fixed32Kind, pref.Uint64Kind, pref.Fixed64Kind: - return strconv.FormatUint(v.Uint(), 10), nil - case pref.FloatKind, pref.DoubleKind: - f := v.Float() - switch { - case math.IsInf(f, -1): - return "-inf", nil - case math.IsInf(f, +1): - return "inf", nil - case math.IsNaN(f): - return "nan", nil - default: - if k == pref.FloatKind { - return strconv.FormatFloat(f, 'g', -1, 32), nil - } else { - return strconv.FormatFloat(f, 'g', -1, 64), nil - } - } - case pref.StringKind: - // String values are serialized as is without any escaping. - return v.String(), nil - case pref.BytesKind: - if s, ok := marshalBytes(v.Bytes()); ok { - return s, nil - } - } - return "", errors.New("could not format value for %v: %v", k, v) -} - -// unmarshalBytes deserializes bytes by applying C unescaping. -func unmarshalBytes(s string) ([]byte, bool) { - // Bytes values use the same escaping as the text format, - // however they lack the surrounding double quotes. - v, err := ptext.UnmarshalString(`"` + s + `"`) - if err != nil { - return nil, false - } - return []byte(v), true -} - -// marshalBytes serializes bytes by using C escaping. -// To match the exact output of protoc, this is identical to the -// CEscape function in strutil.cc of the protoc source code. -func marshalBytes(b []byte) (string, bool) { - var s []byte - for _, c := range b { - switch c { - case '\n': - s = append(s, `\n`...) - case '\r': - s = append(s, `\r`...) - case '\t': - s = append(s, `\t`...) - case '"': - s = append(s, `\"`...) - case '\'': - s = append(s, `\'`...) - case '\\': - s = append(s, `\\`...) - default: - if printableASCII := c >= 0x20 && c <= 0x7e; printableASCII { - s = append(s, c) - } else { - s = append(s, fmt.Sprintf(`\%03o`, c)...) - } - } - } - return string(s), true -} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go deleted file mode 100644 index c1866f3c1..000000000 --- a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package messageset encodes and decodes the obsolete MessageSet wire format. -package messageset - -import ( - "math" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" -) - -// The MessageSet wire format is equivalent to a message defined as follows, -// where each Item defines an extension field with a field number of 'type_id' -// and content of 'message'. MessageSet extensions must be non-repeated message -// fields. -// -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// } -// } -const ( - FieldItem = protowire.Number(1) - FieldTypeID = protowire.Number(2) - FieldMessage = protowire.Number(3) -) - -// ExtensionName is the field name for extensions of MessageSet. -// -// A valid MessageSet extension must be of the form: -// message MyMessage { -// extend proto2.bridge.MessageSet { -// optional MyMessage message_set_extension = 1234; -// } -// ... -// } -const ExtensionName = "message_set_extension" - -// IsMessageSet returns whether the message uses the MessageSet wire format. -func IsMessageSet(md pref.MessageDescriptor) bool { - xmd, ok := md.(interface{ IsMessageSet() bool }) - return ok && xmd.IsMessageSet() -} - -// IsMessageSetExtension reports this field properly extends a MessageSet. -func IsMessageSetExtension(fd pref.FieldDescriptor) bool { - switch { - case fd.Name() != ExtensionName: - return false - case !IsMessageSet(fd.ContainingMessage()): - return false - case fd.FullName().Parent() != fd.Message().FullName(): - return false - } - return true -} - -// SizeField returns the size of a MessageSet item field containing an extension -// with the given field number, not counting the contents of the message subfield. -func SizeField(num protowire.Number) int { - return 2*protowire.SizeTag(FieldItem) + protowire.SizeTag(FieldTypeID) + protowire.SizeVarint(uint64(num)) -} - -// Unmarshal parses a MessageSet. -// -// It calls fn with the type ID and value of each item in the MessageSet. -// Unknown fields are discarded. -// -// If wantLen is true, the item values include the varint length prefix. -// This is ugly, but simplifies the fast-path decoder in internal/impl. -func Unmarshal(b []byte, wantLen bool, fn func(typeID protowire.Number, value []byte) error) error { - for len(b) > 0 { - num, wtyp, n := protowire.ConsumeTag(b) - if n < 0 { - return protowire.ParseError(n) - } - b = b[n:] - if num != FieldItem || wtyp != protowire.StartGroupType { - n := protowire.ConsumeFieldValue(num, wtyp, b) - if n < 0 { - return protowire.ParseError(n) - } - b = b[n:] - continue - } - typeID, value, n, err := ConsumeFieldValue(b, wantLen) - if err != nil { - return err - } - b = b[n:] - if typeID == 0 { - continue - } - if err := fn(typeID, value); err != nil { - return err - } - } - return nil -} - -// ConsumeFieldValue parses b as a MessageSet item field value until and including -// the trailing end group marker. It assumes the start group tag has already been parsed. -// It returns the contents of the type_id and message subfields and the total -// item length. -// -// If wantLen is true, the returned message value includes the length prefix. -func ConsumeFieldValue(b []byte, wantLen bool) (typeid protowire.Number, message []byte, n int, err error) { - ilen := len(b) - for { - num, wtyp, n := protowire.ConsumeTag(b) - if n < 0 { - return 0, nil, 0, protowire.ParseError(n) - } - b = b[n:] - switch { - case num == FieldItem && wtyp == protowire.EndGroupType: - if wantLen && len(message) == 0 { - // The message field was missing, which should never happen. - // Be prepared for this case anyway. - message = protowire.AppendVarint(message, 0) - } - return typeid, message, ilen - len(b), nil - case num == FieldTypeID && wtyp == protowire.VarintType: - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return 0, nil, 0, protowire.ParseError(n) - } - b = b[n:] - if v < 1 || v > math.MaxInt32 { - return 0, nil, 0, errors.New("invalid type_id in message set") - } - typeid = protowire.Number(v) - case num == FieldMessage && wtyp == protowire.BytesType: - m, n := protowire.ConsumeBytes(b) - if n < 0 { - return 0, nil, 0, protowire.ParseError(n) - } - if message == nil { - if wantLen { - message = b[:n:n] - } else { - message = m[:len(m):len(m)] - } - } else { - // This case should never happen in practice, but handle it for - // correctness: The MessageSet item contains multiple message - // fields, which need to be merged. - // - // In the case where we're returning the length, this becomes - // quite inefficient since we need to strip the length off - // the existing data and reconstruct it with the combined length. - if wantLen { - _, nn := protowire.ConsumeVarint(message) - m0 := message[nn:] - message = nil - message = protowire.AppendVarint(message, uint64(len(m0)+len(m))) - message = append(message, m0...) - message = append(message, m...) - } else { - message = append(message, m...) - } - } - b = b[n:] - default: - // We have no place to put it, so we just ignore unknown fields. - n := protowire.ConsumeFieldValue(num, wtyp, b) - if n < 0 { - return 0, nil, 0, protowire.ParseError(n) - } - b = b[n:] - } - } -} - -// AppendFieldStart appends the start of a MessageSet item field containing -// an extension with the given number. The caller must add the message -// subfield (including the tag). -func AppendFieldStart(b []byte, num protowire.Number) []byte { - b = protowire.AppendTag(b, FieldItem, protowire.StartGroupType) - b = protowire.AppendTag(b, FieldTypeID, protowire.VarintType) - b = protowire.AppendVarint(b, uint64(num)) - return b -} - -// AppendFieldEnd appends the trailing end group marker for a MessageSet item field. -func AppendFieldEnd(b []byte) []byte { - return protowire.AppendTag(b, FieldItem, protowire.EndGroupType) -} - -// SizeUnknown returns the size of an unknown fields section in MessageSet format. -// -// See AppendUnknown. -func SizeUnknown(unknown []byte) (size int) { - for len(unknown) > 0 { - num, typ, n := protowire.ConsumeTag(unknown) - if n < 0 || typ != protowire.BytesType { - return 0 - } - unknown = unknown[n:] - _, n = protowire.ConsumeBytes(unknown) - if n < 0 { - return 0 - } - unknown = unknown[n:] - size += SizeField(num) + protowire.SizeTag(FieldMessage) + n - } - return size -} - -// AppendUnknown appends unknown fields to b in MessageSet format. -// -// For historic reasons, unresolved items in a MessageSet are stored in a -// message's unknown fields section in non-MessageSet format. That is, an -// unknown item with typeID T and value V appears in the unknown fields as -// a field with number T and value V. -// -// This function converts the unknown fields back into MessageSet form. -func AppendUnknown(b, unknown []byte) ([]byte, error) { - for len(unknown) > 0 { - num, typ, n := protowire.ConsumeTag(unknown) - if n < 0 || typ != protowire.BytesType { - return nil, errors.New("invalid data in message set unknown fields") - } - unknown = unknown[n:] - _, n = protowire.ConsumeBytes(unknown) - if n < 0 { - return nil, errors.New("invalid data in message set unknown fields") - } - b = AppendFieldStart(b, num) - b = protowire.AppendTag(b, FieldMessage, protowire.BytesType) - b = append(b, unknown[:n]...) - b = AppendFieldEnd(b) - unknown = unknown[n:] - } - return b, nil -} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go deleted file mode 100644 index 38f1931c6..000000000 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package tag marshals and unmarshals the legacy struct tags as generated -// by historical versions of protoc-gen-go. -package tag - -import ( - "reflect" - "strconv" - "strings" - - defval "google.golang.org/protobuf/internal/encoding/defval" - fdesc "google.golang.org/protobuf/internal/filedesc" - "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" -) - -var byteType = reflect.TypeOf(byte(0)) - -// Unmarshal decodes the tag into a prototype.Field. -// -// The goType is needed to determine the original protoreflect.Kind since the -// tag does not record sufficient information to determine that. -// The type is the underlying field type (e.g., a repeated field may be -// represented by []T, but the Go type passed in is just T). -// A list of enum value descriptors must be provided for enum fields. -// This does not populate the Enum or Message (except for weak message). -// -// This function is a best effort attempt; parsing errors are ignored. -func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) pref.FieldDescriptor { - f := new(fdesc.Field) - f.L0.ParentFile = fdesc.SurrogateProto2 - for len(tag) > 0 { - i := strings.IndexByte(tag, ',') - if i < 0 { - i = len(tag) - } - switch s := tag[:i]; { - case strings.HasPrefix(s, "name="): - f.L0.FullName = pref.FullName(s[len("name="):]) - case strings.Trim(s, "0123456789") == "": - n, _ := strconv.ParseUint(s, 10, 32) - f.L1.Number = pref.FieldNumber(n) - case s == "opt": - f.L1.Cardinality = pref.Optional - case s == "req": - f.L1.Cardinality = pref.Required - case s == "rep": - f.L1.Cardinality = pref.Repeated - case s == "varint": - switch goType.Kind() { - case reflect.Bool: - f.L1.Kind = pref.BoolKind - case reflect.Int32: - f.L1.Kind = pref.Int32Kind - case reflect.Int64: - f.L1.Kind = pref.Int64Kind - case reflect.Uint32: - f.L1.Kind = pref.Uint32Kind - case reflect.Uint64: - f.L1.Kind = pref.Uint64Kind - } - case s == "zigzag32": - if goType.Kind() == reflect.Int32 { - f.L1.Kind = pref.Sint32Kind - } - case s == "zigzag64": - if goType.Kind() == reflect.Int64 { - f.L1.Kind = pref.Sint64Kind - } - case s == "fixed32": - switch goType.Kind() { - case reflect.Int32: - f.L1.Kind = pref.Sfixed32Kind - case reflect.Uint32: - f.L1.Kind = pref.Fixed32Kind - case reflect.Float32: - f.L1.Kind = pref.FloatKind - } - case s == "fixed64": - switch goType.Kind() { - case reflect.Int64: - f.L1.Kind = pref.Sfixed64Kind - case reflect.Uint64: - f.L1.Kind = pref.Fixed64Kind - case reflect.Float64: - f.L1.Kind = pref.DoubleKind - } - case s == "bytes": - switch { - case goType.Kind() == reflect.String: - f.L1.Kind = pref.StringKind - case goType.Kind() == reflect.Slice && goType.Elem() == byteType: - f.L1.Kind = pref.BytesKind - default: - f.L1.Kind = pref.MessageKind - } - case s == "group": - f.L1.Kind = pref.GroupKind - case strings.HasPrefix(s, "enum="): - f.L1.Kind = pref.EnumKind - case strings.HasPrefix(s, "json="): - jsonName := s[len("json="):] - if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) { - f.L1.StringName.InitJSON(jsonName) - } - case s == "packed": - f.L1.HasPacked = true - f.L1.IsPacked = true - case strings.HasPrefix(s, "weak="): - f.L1.IsWeak = true - f.L1.Message = fdesc.PlaceholderMessage(pref.FullName(s[len("weak="):])) - case strings.HasPrefix(s, "def="): - // The default tag is special in that everything afterwards is the - // default regardless of the presence of commas. - s, i = tag[len("def="):], len(tag) - v, ev, _ := defval.Unmarshal(s, f.L1.Kind, evs, defval.GoTag) - f.L1.Default = fdesc.DefaultValue(v, ev) - case s == "proto3": - f.L0.ParentFile = fdesc.SurrogateProto3 - } - tag = strings.TrimPrefix(tag[i:], ",") - } - - // The generator uses the group message name instead of the field name. - // We obtain the real field name by lowercasing the group name. - if f.L1.Kind == pref.GroupKind { - f.L0.FullName = pref.FullName(strings.ToLower(string(f.L0.FullName))) - } - return f -} - -// Marshal encodes the protoreflect.FieldDescriptor as a tag. -// -// The enumName must be provided if the kind is an enum. -// Historically, the formulation of the enum "name" was the proto package -// dot-concatenated with the generated Go identifier for the enum type. -// Depending on the context on how Marshal is called, there are different ways -// through which that information is determined. As such it is the caller's -// responsibility to provide a function to obtain that information. -func Marshal(fd pref.FieldDescriptor, enumName string) string { - var tag []string - switch fd.Kind() { - case pref.BoolKind, pref.EnumKind, pref.Int32Kind, pref.Uint32Kind, pref.Int64Kind, pref.Uint64Kind: - tag = append(tag, "varint") - case pref.Sint32Kind: - tag = append(tag, "zigzag32") - case pref.Sint64Kind: - tag = append(tag, "zigzag64") - case pref.Sfixed32Kind, pref.Fixed32Kind, pref.FloatKind: - tag = append(tag, "fixed32") - case pref.Sfixed64Kind, pref.Fixed64Kind, pref.DoubleKind: - tag = append(tag, "fixed64") - case pref.StringKind, pref.BytesKind, pref.MessageKind: - tag = append(tag, "bytes") - case pref.GroupKind: - tag = append(tag, "group") - } - tag = append(tag, strconv.Itoa(int(fd.Number()))) - switch fd.Cardinality() { - case pref.Optional: - tag = append(tag, "opt") - case pref.Required: - tag = append(tag, "req") - case pref.Repeated: - tag = append(tag, "rep") - } - if fd.IsPacked() { - tag = append(tag, "packed") - } - name := string(fd.Name()) - if fd.Kind() == pref.GroupKind { - // The name of the FieldDescriptor for a group field is - // lowercased. To find the original capitalization, we - // look in the field's MessageType. - name = string(fd.Message().Name()) - } - tag = append(tag, "name="+name) - if jsonName := fd.JSONName(); jsonName != "" && jsonName != name && !fd.IsExtension() { - // NOTE: The jsonName != name condition is suspect, but it preserve - // the exact same semantics from the previous generator. - tag = append(tag, "json="+jsonName) - } - if fd.IsWeak() { - tag = append(tag, "weak="+string(fd.Message().FullName())) - } - // The previous implementation does not tag extension fields as proto3, - // even when the field is defined in a proto3 file. Match that behavior - // for consistency. - if fd.Syntax() == pref.Proto3 && !fd.IsExtension() { - tag = append(tag, "proto3") - } - if fd.Kind() == pref.EnumKind && enumName != "" { - tag = append(tag, "enum="+enumName) - } - if fd.ContainingOneof() != nil { - tag = append(tag, "oneof") - } - // This must appear last in the tag, since commas in strings aren't escaped. - if fd.HasDefault() { - def, _ := defval.Marshal(fd.Default(), fd.DefaultEnumValue(), fd.Kind(), defval.GoTag) - tag = append(tag, "def="+def) - } - return strings.Join(tag, ",") -} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go deleted file mode 100644 index 37803773f..000000000 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ /dev/null @@ -1,665 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package text - -import ( - "bytes" - "fmt" - "io" - "regexp" - "strconv" - "unicode/utf8" - - "google.golang.org/protobuf/internal/errors" -) - -// Decoder is a token-based textproto decoder. -type Decoder struct { - // lastCall is last method called, either readCall or peekCall. - // Initial value is readCall. - lastCall call - - // lastToken contains the last read token. - lastToken Token - - // lastErr contains the last read error. - lastErr error - - // openStack is a stack containing the byte characters for MessageOpen and - // ListOpen kinds. The top of stack represents the message or the list that - // the current token is nested in. An empty stack means the current token is - // at the top level message. The characters '{' and '<' both represent the - // MessageOpen kind. - openStack []byte - - // orig is used in reporting line and column. - orig []byte - // in contains the unconsumed input. - in []byte -} - -// NewDecoder returns a Decoder to read the given []byte. -func NewDecoder(b []byte) *Decoder { - return &Decoder{orig: b, in: b} -} - -// ErrUnexpectedEOF means that EOF was encountered in the middle of the input. -var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF) - -// call specifies which Decoder method was invoked. -type call uint8 - -const ( - readCall call = iota - peekCall -) - -// Peek looks ahead and returns the next token and error without advancing a read. -func (d *Decoder) Peek() (Token, error) { - defer func() { d.lastCall = peekCall }() - if d.lastCall == readCall { - d.lastToken, d.lastErr = d.Read() - } - return d.lastToken, d.lastErr -} - -// Read returns the next token. -// It will return an error if there is no valid token. -func (d *Decoder) Read() (Token, error) { - defer func() { d.lastCall = readCall }() - if d.lastCall == peekCall { - return d.lastToken, d.lastErr - } - - tok, err := d.parseNext(d.lastToken.kind) - if err != nil { - return Token{}, err - } - - switch tok.kind { - case comma, semicolon: - tok, err = d.parseNext(tok.kind) - if err != nil { - return Token{}, err - } - } - d.lastToken = tok - return tok, nil -} - -const ( - mismatchedFmt = "mismatched close character %q" - unexpectedFmt = "unexpected character %q" -) - -// parseNext parses the next Token based on given last kind. -func (d *Decoder) parseNext(lastKind Kind) (Token, error) { - // Trim leading spaces. - d.consume(0) - isEOF := false - if len(d.in) == 0 { - isEOF = true - } - - switch lastKind { - case EOF: - return d.consumeToken(EOF, 0, 0), nil - - case bof: - // Start of top level message. Next token can be EOF or Name. - if isEOF { - return d.consumeToken(EOF, 0, 0), nil - } - return d.parseFieldName() - - case Name: - // Next token can be MessageOpen, ListOpen or Scalar. - if isEOF { - return Token{}, ErrUnexpectedEOF - } - switch ch := d.in[0]; ch { - case '{', '<': - d.pushOpenStack(ch) - return d.consumeToken(MessageOpen, 1, 0), nil - case '[': - d.pushOpenStack(ch) - return d.consumeToken(ListOpen, 1, 0), nil - default: - return d.parseScalar() - } - - case Scalar: - openKind, closeCh := d.currentOpenKind() - switch openKind { - case bof: - // Top level message. - // Next token can be EOF, comma, semicolon or Name. - if isEOF { - return d.consumeToken(EOF, 0, 0), nil - } - switch d.in[0] { - case ',': - return d.consumeToken(comma, 1, 0), nil - case ';': - return d.consumeToken(semicolon, 1, 0), nil - default: - return d.parseFieldName() - } - - case MessageOpen: - // Next token can be MessageClose, comma, semicolon or Name. - if isEOF { - return Token{}, ErrUnexpectedEOF - } - switch ch := d.in[0]; ch { - case closeCh: - d.popOpenStack() - return d.consumeToken(MessageClose, 1, 0), nil - case otherCloseChar[closeCh]: - return Token{}, d.newSyntaxError(mismatchedFmt, ch) - case ',': - return d.consumeToken(comma, 1, 0), nil - case ';': - return d.consumeToken(semicolon, 1, 0), nil - default: - return d.parseFieldName() - } - - case ListOpen: - // Next token can be ListClose or comma. - if isEOF { - return Token{}, ErrUnexpectedEOF - } - switch ch := d.in[0]; ch { - case ']': - d.popOpenStack() - return d.consumeToken(ListClose, 1, 0), nil - case ',': - return d.consumeToken(comma, 1, 0), nil - default: - return Token{}, d.newSyntaxError(unexpectedFmt, ch) - } - } - - case MessageOpen: - // Next token can be MessageClose or Name. - if isEOF { - return Token{}, ErrUnexpectedEOF - } - _, closeCh := d.currentOpenKind() - switch ch := d.in[0]; ch { - case closeCh: - d.popOpenStack() - return d.consumeToken(MessageClose, 1, 0), nil - case otherCloseChar[closeCh]: - return Token{}, d.newSyntaxError(mismatchedFmt, ch) - default: - return d.parseFieldName() - } - - case MessageClose: - openKind, closeCh := d.currentOpenKind() - switch openKind { - case bof: - // Top level message. - // Next token can be EOF, comma, semicolon or Name. - if isEOF { - return d.consumeToken(EOF, 0, 0), nil - } - switch ch := d.in[0]; ch { - case ',': - return d.consumeToken(comma, 1, 0), nil - case ';': - return d.consumeToken(semicolon, 1, 0), nil - default: - return d.parseFieldName() - } - - case MessageOpen: - // Next token can be MessageClose, comma, semicolon or Name. - if isEOF { - return Token{}, ErrUnexpectedEOF - } - switch ch := d.in[0]; ch { - case closeCh: - d.popOpenStack() - return d.consumeToken(MessageClose, 1, 0), nil - case otherCloseChar[closeCh]: - return Token{}, d.newSyntaxError(mismatchedFmt, ch) - case ',': - return d.consumeToken(comma, 1, 0), nil - case ';': - return d.consumeToken(semicolon, 1, 0), nil - default: - return d.parseFieldName() - } - - case ListOpen: - // Next token can be ListClose or comma - if isEOF { - return Token{}, ErrUnexpectedEOF - } - switch ch := d.in[0]; ch { - case closeCh: - d.popOpenStack() - return d.consumeToken(ListClose, 1, 0), nil - case ',': - return d.consumeToken(comma, 1, 0), nil - default: - return Token{}, d.newSyntaxError(unexpectedFmt, ch) - } - } - - case ListOpen: - // Next token can be ListClose, MessageStart or Scalar. - if isEOF { - return Token{}, ErrUnexpectedEOF - } - switch ch := d.in[0]; ch { - case ']': - d.popOpenStack() - return d.consumeToken(ListClose, 1, 0), nil - case '{', '<': - d.pushOpenStack(ch) - return d.consumeToken(MessageOpen, 1, 0), nil - default: - return d.parseScalar() - } - - case ListClose: - openKind, closeCh := d.currentOpenKind() - switch openKind { - case bof: - // Top level message. - // Next token can be EOF, comma, semicolon or Name. - if isEOF { - return d.consumeToken(EOF, 0, 0), nil - } - switch ch := d.in[0]; ch { - case ',': - return d.consumeToken(comma, 1, 0), nil - case ';': - return d.consumeToken(semicolon, 1, 0), nil - default: - return d.parseFieldName() - } - - case MessageOpen: - // Next token can be MessageClose, comma, semicolon or Name. - if isEOF { - return Token{}, ErrUnexpectedEOF - } - switch ch := d.in[0]; ch { - case closeCh: - d.popOpenStack() - return d.consumeToken(MessageClose, 1, 0), nil - case otherCloseChar[closeCh]: - return Token{}, d.newSyntaxError(mismatchedFmt, ch) - case ',': - return d.consumeToken(comma, 1, 0), nil - case ';': - return d.consumeToken(semicolon, 1, 0), nil - default: - return d.parseFieldName() - } - - default: - // It is not possible to have this case. Let it panic below. - } - - case comma, semicolon: - openKind, closeCh := d.currentOpenKind() - switch openKind { - case bof: - // Top level message. Next token can be EOF or Name. - if isEOF { - return d.consumeToken(EOF, 0, 0), nil - } - return d.parseFieldName() - - case MessageOpen: - // Next token can be MessageClose or Name. - if isEOF { - return Token{}, ErrUnexpectedEOF - } - switch ch := d.in[0]; ch { - case closeCh: - d.popOpenStack() - return d.consumeToken(MessageClose, 1, 0), nil - case otherCloseChar[closeCh]: - return Token{}, d.newSyntaxError(mismatchedFmt, ch) - default: - return d.parseFieldName() - } - - case ListOpen: - if lastKind == semicolon { - // It is not be possible to have this case as logic here - // should not have produced a semicolon Token when inside a - // list. Let it panic below. - break - } - // Next token can be MessageOpen or Scalar. - if isEOF { - return Token{}, ErrUnexpectedEOF - } - switch ch := d.in[0]; ch { - case '{', '<': - d.pushOpenStack(ch) - return d.consumeToken(MessageOpen, 1, 0), nil - default: - return d.parseScalar() - } - } - } - - line, column := d.Position(len(d.orig) - len(d.in)) - panic(fmt.Sprintf("Decoder.parseNext: bug at handling line %d:%d with lastKind=%v", line, column, lastKind)) -} - -var otherCloseChar = map[byte]byte{ - '}': '>', - '>': '}', -} - -// currentOpenKind indicates whether current position is inside a message, list -// or top-level message by returning MessageOpen, ListOpen or bof respectively. -// If the returned kind is either a MessageOpen or ListOpen, it also returns the -// corresponding closing character. -func (d *Decoder) currentOpenKind() (Kind, byte) { - if len(d.openStack) == 0 { - return bof, 0 - } - openCh := d.openStack[len(d.openStack)-1] - switch openCh { - case '{': - return MessageOpen, '}' - case '<': - return MessageOpen, '>' - case '[': - return ListOpen, ']' - } - panic(fmt.Sprintf("Decoder: openStack contains invalid byte %c", openCh)) -} - -func (d *Decoder) pushOpenStack(ch byte) { - d.openStack = append(d.openStack, ch) -} - -func (d *Decoder) popOpenStack() { - d.openStack = d.openStack[:len(d.openStack)-1] -} - -// parseFieldName parses field name and separator. -func (d *Decoder) parseFieldName() (tok Token, err error) { - defer func() { - if err == nil && d.tryConsumeChar(':') { - tok.attrs |= hasSeparator - } - }() - - // Extension or Any type URL. - if d.in[0] == '[' { - return d.parseTypeName() - } - - // Identifier. - if size := parseIdent(d.in, false); size > 0 { - return d.consumeToken(Name, size, uint8(IdentName)), nil - } - - // Field number. Identify if input is a valid number that is not negative - // and is decimal integer within 32-bit range. - if num := parseNumber(d.in); num.size > 0 { - if !num.neg && num.kind == numDec { - if _, err := strconv.ParseInt(string(d.in[:num.size]), 10, 32); err == nil { - return d.consumeToken(Name, num.size, uint8(FieldNumber)), nil - } - } - return Token{}, d.newSyntaxError("invalid field number: %s", d.in[:num.size]) - } - - return Token{}, d.newSyntaxError("invalid field name: %s", errRegexp.Find(d.in)) -} - -// parseTypeName parses Any type URL or extension field name. The name is -// enclosed in [ and ] characters. The C++ parser does not handle many legal URL -// strings. This implementation is more liberal and allows for the pattern -// ^[-_a-zA-Z0-9]+([./][-_a-zA-Z0-9]+)*`). Whitespaces and comments are allowed -// in between [ ], '.', '/' and the sub names. -func (d *Decoder) parseTypeName() (Token, error) { - startPos := len(d.orig) - len(d.in) - // Use alias s to advance first in order to use d.in for error handling. - // Caller already checks for [ as first character. - s := consume(d.in[1:], 0) - if len(s) == 0 { - return Token{}, ErrUnexpectedEOF - } - - var name []byte - for len(s) > 0 && isTypeNameChar(s[0]) { - name = append(name, s[0]) - s = s[1:] - } - s = consume(s, 0) - - var closed bool - for len(s) > 0 && !closed { - switch { - case s[0] == ']': - s = s[1:] - closed = true - - case s[0] == '/', s[0] == '.': - if len(name) > 0 && (name[len(name)-1] == '/' || name[len(name)-1] == '.') { - return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s", - d.orig[startPos:len(d.orig)-len(s)+1]) - } - name = append(name, s[0]) - s = s[1:] - s = consume(s, 0) - for len(s) > 0 && isTypeNameChar(s[0]) { - name = append(name, s[0]) - s = s[1:] - } - s = consume(s, 0) - - default: - return Token{}, d.newSyntaxError( - "invalid type URL/extension field name: %s", d.orig[startPos:len(d.orig)-len(s)+1]) - } - } - - if !closed { - return Token{}, ErrUnexpectedEOF - } - - // First character cannot be '.'. Last character cannot be '.' or '/'. - size := len(name) - if size == 0 || name[0] == '.' || name[size-1] == '.' || name[size-1] == '/' { - return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s", - d.orig[startPos:len(d.orig)-len(s)]) - } - - d.in = s - endPos := len(d.orig) - len(d.in) - d.consume(0) - - return Token{ - kind: Name, - attrs: uint8(TypeName), - pos: startPos, - raw: d.orig[startPos:endPos], - str: string(name), - }, nil -} - -func isTypeNameChar(b byte) bool { - return (b == '-' || b == '_' || - ('0' <= b && b <= '9') || - ('a' <= b && b <= 'z') || - ('A' <= b && b <= 'Z')) -} - -func isWhiteSpace(b byte) bool { - switch b { - case ' ', '\n', '\r', '\t': - return true - default: - return false - } -} - -// parseIdent parses an unquoted proto identifier and returns size. -// If allowNeg is true, it allows '-' to be the first character in the -// identifier. This is used when parsing literal values like -infinity, etc. -// Regular expression matches an identifier: `^[_a-zA-Z][_a-zA-Z0-9]*` -func parseIdent(input []byte, allowNeg bool) int { - var size int - - s := input - if len(s) == 0 { - return 0 - } - - if allowNeg && s[0] == '-' { - s = s[1:] - size++ - if len(s) == 0 { - return 0 - } - } - - switch { - case s[0] == '_', - 'a' <= s[0] && s[0] <= 'z', - 'A' <= s[0] && s[0] <= 'Z': - s = s[1:] - size++ - default: - return 0 - } - - for len(s) > 0 && (s[0] == '_' || - 'a' <= s[0] && s[0] <= 'z' || - 'A' <= s[0] && s[0] <= 'Z' || - '0' <= s[0] && s[0] <= '9') { - s = s[1:] - size++ - } - - if len(s) > 0 && !isDelim(s[0]) { - return 0 - } - - return size -} - -// parseScalar parses for a string, literal or number value. -func (d *Decoder) parseScalar() (Token, error) { - if d.in[0] == '"' || d.in[0] == '\'' { - return d.parseStringValue() - } - - if tok, ok := d.parseLiteralValue(); ok { - return tok, nil - } - - if tok, ok := d.parseNumberValue(); ok { - return tok, nil - } - - return Token{}, d.newSyntaxError("invalid scalar value: %s", errRegexp.Find(d.in)) -} - -// parseLiteralValue parses a literal value. A literal value is used for -// bools, special floats and enums. This function simply identifies that the -// field value is a literal. -func (d *Decoder) parseLiteralValue() (Token, bool) { - size := parseIdent(d.in, true) - if size == 0 { - return Token{}, false - } - return d.consumeToken(Scalar, size, literalValue), true -} - -// consumeToken constructs a Token for given Kind from d.in and consumes given -// size-length from it. -func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token { - // Important to compute raw and pos before consuming. - tok := Token{ - kind: kind, - attrs: attrs, - pos: len(d.orig) - len(d.in), - raw: d.in[:size], - } - d.consume(size) - return tok -} - -// newSyntaxError returns a syntax error with line and column information for -// current position. -func (d *Decoder) newSyntaxError(f string, x ...interface{}) error { - e := errors.New(f, x...) - line, column := d.Position(len(d.orig) - len(d.in)) - return errors.New("syntax error (line %d:%d): %v", line, column, e) -} - -// Position returns line and column number of given index of the original input. -// It will panic if index is out of range. -func (d *Decoder) Position(idx int) (line int, column int) { - b := d.orig[:idx] - line = bytes.Count(b, []byte("\n")) + 1 - if i := bytes.LastIndexByte(b, '\n'); i >= 0 { - b = b[i+1:] - } - column = utf8.RuneCount(b) + 1 // ignore multi-rune characters - return line, column -} - -func (d *Decoder) tryConsumeChar(c byte) bool { - if len(d.in) > 0 && d.in[0] == c { - d.consume(1) - return true - } - return false -} - -// consume consumes n bytes of input and any subsequent whitespace or comments. -func (d *Decoder) consume(n int) { - d.in = consume(d.in, n) - return -} - -// consume consumes n bytes of input and any subsequent whitespace or comments. -func consume(b []byte, n int) []byte { - b = b[n:] - for len(b) > 0 { - switch b[0] { - case ' ', '\n', '\r', '\t': - b = b[1:] - case '#': - if i := bytes.IndexByte(b, '\n'); i >= 0 { - b = b[i+len("\n"):] - } else { - b = nil - } - default: - return b - } - } - return b -} - -// Any sequence that looks like a non-delimiter (for error reporting). -var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9\/]+|.)`) - -// isDelim returns true if given byte is a delimiter character. -func isDelim(c byte) bool { - return !(c == '-' || c == '+' || c == '.' || c == '_' || - ('a' <= c && c <= 'z') || - ('A' <= c && c <= 'Z') || - ('0' <= c && c <= '9')) -} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go deleted file mode 100644 index f2d90b789..000000000 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package text - -// parseNumberValue parses a number from the input and returns a Token object. -func (d *Decoder) parseNumberValue() (Token, bool) { - in := d.in - num := parseNumber(in) - if num.size == 0 { - return Token{}, false - } - numAttrs := num.kind - if num.neg { - numAttrs |= isNegative - } - strSize := num.size - last := num.size - 1 - if num.kind == numFloat && (d.in[last] == 'f' || d.in[last] == 'F') { - strSize = last - } - tok := Token{ - kind: Scalar, - attrs: numberValue, - pos: len(d.orig) - len(d.in), - raw: d.in[:num.size], - str: string(d.in[:strSize]), - numAttrs: numAttrs, - } - d.consume(num.size) - return tok, true -} - -const ( - numDec uint8 = (1 << iota) / 2 - numHex - numOct - numFloat -) - -// number is the result of parsing out a valid number from parseNumber. It -// contains data for doing float or integer conversion via the strconv package -// in conjunction with the input bytes. -type number struct { - kind uint8 - neg bool - size int -} - -// parseNumber constructs a number object from given input. It allows for the -// following patterns: -// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*) -// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?) -// It also returns the number of parsed bytes for the given number, 0 if it is -// not a number. -func parseNumber(input []byte) number { - kind := numDec - var size int - var neg bool - - s := input - if len(s) == 0 { - return number{} - } - - // Optional - - if s[0] == '-' { - neg = true - s = s[1:] - size++ - if len(s) == 0 { - return number{} - } - } - - // C++ allows for whitespace and comments in between the negative sign and - // the rest of the number. This logic currently does not but is consistent - // with v1. - - switch { - case s[0] == '0': - if len(s) > 1 { - switch { - case s[1] == 'x' || s[1] == 'X': - // Parse as hex number. - kind = numHex - n := 2 - s = s[2:] - for len(s) > 0 && (('0' <= s[0] && s[0] <= '9') || - ('a' <= s[0] && s[0] <= 'f') || - ('A' <= s[0] && s[0] <= 'F')) { - s = s[1:] - n++ - } - if n == 2 { - return number{} - } - size += n - - case '0' <= s[1] && s[1] <= '7': - // Parse as octal number. - kind = numOct - n := 2 - s = s[2:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '7' { - s = s[1:] - n++ - } - size += n - } - - if kind&(numHex|numOct) > 0 { - if len(s) > 0 && !isDelim(s[0]) { - return number{} - } - return number{kind: kind, neg: neg, size: size} - } - } - s = s[1:] - size++ - - case '1' <= s[0] && s[0] <= '9': - n := 1 - s = s[1:] - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - n++ - } - size += n - - case s[0] == '.': - // Set kind to numFloat to signify the intent to parse as float. And - // that it needs to have other digits after '.'. - kind = numFloat - - default: - return number{} - } - - // . followed by 0 or more digits. - if len(s) > 0 && s[0] == '.' { - n := 1 - s = s[1:] - // If decimal point was before any digits, it should be followed by - // other digits. - if len(s) == 0 && kind == numFloat { - return number{} - } - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - n++ - } - size += n - kind = numFloat - } - - // e or E followed by an optional - or + and 1 or more digits. - if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { - kind = numFloat - s = s[1:] - n := 1 - if s[0] == '+' || s[0] == '-' { - s = s[1:] - n++ - if len(s) == 0 { - return number{} - } - } - for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { - s = s[1:] - n++ - } - size += n - } - - // Optional suffix f or F for floats. - if len(s) > 0 && (s[0] == 'f' || s[0] == 'F') { - kind = numFloat - s = s[1:] - size++ - } - - // Check that next byte is a delimiter or it is at the end. - if len(s) > 0 && !isDelim(s[0]) { - return number{} - } - - return number{kind: kind, neg: neg, size: size} -} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go deleted file mode 100644 index d4d349023..000000000 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package text - -import ( - "bytes" - "strconv" - "strings" - "unicode" - "unicode/utf16" - "unicode/utf8" - - "google.golang.org/protobuf/internal/strs" -) - -// parseStringValue parses string field token. -// This differs from parseString since the text format allows -// multiple back-to-back string literals where they are semantically treated -// as a single large string with all values concatenated. -// -// E.g., `"foo" "bar" "baz"` => "foobarbaz" -func (d *Decoder) parseStringValue() (Token, error) { - // Note that the ending quote is sufficient to unambiguously mark the end - // of a string. Thus, the text grammar does not require intervening - // whitespace or control characters in-between strings. - // Thus, the following is valid: - // `"foo"'bar'"baz"` => "foobarbaz" - in0 := d.in - var ss []string - for len(d.in) > 0 && (d.in[0] == '"' || d.in[0] == '\'') { - s, err := d.parseString() - if err != nil { - return Token{}, err - } - ss = append(ss, s) - } - // d.in already points to the end of the value at this point. - return Token{ - kind: Scalar, - attrs: stringValue, - pos: len(d.orig) - len(in0), - raw: in0[:len(in0)-len(d.in)], - str: strings.Join(ss, ""), - }, nil -} - -// parseString parses a string value enclosed in " or '. -func (d *Decoder) parseString() (string, error) { - in := d.in - if len(in) == 0 { - return "", ErrUnexpectedEOF - } - quote := in[0] - in = in[1:] - i := indexNeedEscapeInBytes(in) - in, out := in[i:], in[:i:i] // set cap to prevent mutations - for len(in) > 0 { - switch r, n := utf8.DecodeRune(in); { - case r == utf8.RuneError && n == 1: - return "", d.newSyntaxError("invalid UTF-8 detected") - case r == 0 || r == '\n': - return "", d.newSyntaxError("invalid character %q in string", r) - case r == rune(quote): - in = in[1:] - d.consume(len(d.in) - len(in)) - return string(out), nil - case r == '\\': - if len(in) < 2 { - return "", ErrUnexpectedEOF - } - switch r := in[1]; r { - case '"', '\'', '\\', '?': - in, out = in[2:], append(out, r) - case 'a': - in, out = in[2:], append(out, '\a') - case 'b': - in, out = in[2:], append(out, '\b') - case 'n': - in, out = in[2:], append(out, '\n') - case 'r': - in, out = in[2:], append(out, '\r') - case 't': - in, out = in[2:], append(out, '\t') - case 'v': - in, out = in[2:], append(out, '\v') - case 'f': - in, out = in[2:], append(out, '\f') - case '0', '1', '2', '3', '4', '5', '6', '7': - // One, two, or three octal characters. - n := len(in[1:]) - len(bytes.TrimLeft(in[1:], "01234567")) - if n > 3 { - n = 3 - } - v, err := strconv.ParseUint(string(in[1:1+n]), 8, 8) - if err != nil { - return "", d.newSyntaxError("invalid octal escape code %q in string", in[:1+n]) - } - in, out = in[1+n:], append(out, byte(v)) - case 'x': - // One or two hexadecimal characters. - n := len(in[2:]) - len(bytes.TrimLeft(in[2:], "0123456789abcdefABCDEF")) - if n > 2 { - n = 2 - } - v, err := strconv.ParseUint(string(in[2:2+n]), 16, 8) - if err != nil { - return "", d.newSyntaxError("invalid hex escape code %q in string", in[:2+n]) - } - in, out = in[2+n:], append(out, byte(v)) - case 'u', 'U': - // Four or eight hexadecimal characters - n := 6 - if r == 'U' { - n = 10 - } - if len(in) < n { - return "", ErrUnexpectedEOF - } - v, err := strconv.ParseUint(string(in[2:n]), 16, 32) - if utf8.MaxRune < v || err != nil { - return "", d.newSyntaxError("invalid Unicode escape code %q in string", in[:n]) - } - in = in[n:] - - r := rune(v) - if utf16.IsSurrogate(r) { - if len(in) < 6 { - return "", ErrUnexpectedEOF - } - v, err := strconv.ParseUint(string(in[2:6]), 16, 16) - r = utf16.DecodeRune(r, rune(v)) - if in[0] != '\\' || in[1] != 'u' || r == unicode.ReplacementChar || err != nil { - return "", d.newSyntaxError("invalid Unicode escape code %q in string", in[:6]) - } - in = in[6:] - } - out = append(out, string(r)...) - default: - return "", d.newSyntaxError("invalid escape code %q in string", in[:2]) - } - default: - i := indexNeedEscapeInBytes(in[n:]) - in, out = in[n+i:], append(out, in[:n+i]...) - } - } - return "", ErrUnexpectedEOF -} - -// indexNeedEscapeInString returns the index of the character that needs -// escaping. If no characters need escaping, this returns the input length. -func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) } - -// UnmarshalString returns an unescaped string given a textproto string value. -// String value needs to contain single or double quotes. This is only used by -// internal/encoding/defval package for unmarshaling bytes. -func UnmarshalString(s string) (string, error) { - d := NewDecoder([]byte(s)) - return d.parseString() -} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go deleted file mode 100644 index 83d2b0d5a..000000000 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go +++ /dev/null @@ -1,373 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package text - -import ( - "bytes" - "fmt" - "math" - "strconv" - "strings" - - "google.golang.org/protobuf/internal/flags" -) - -// Kind represents a token kind expressible in the textproto format. -type Kind uint8 - -// Kind values. -const ( - Invalid Kind = iota - EOF - Name // Name indicates the field name. - Scalar // Scalar are scalar values, e.g. "string", 47, ENUM_LITERAL, true. - MessageOpen - MessageClose - ListOpen - ListClose - - // comma and semi-colon are only for parsing in between values and should not be exposed. - comma - semicolon - - // bof indicates beginning of file, which is the default token - // kind at the beginning of parsing. - bof = Invalid -) - -func (t Kind) String() string { - switch t { - case Invalid: - return "" - case EOF: - return "eof" - case Scalar: - return "scalar" - case Name: - return "name" - case MessageOpen: - return "{" - case MessageClose: - return "}" - case ListOpen: - return "[" - case ListClose: - return "]" - case comma: - return "," - case semicolon: - return ";" - default: - return fmt.Sprintf("", uint8(t)) - } -} - -// NameKind represents different types of field names. -type NameKind uint8 - -// NameKind values. -const ( - IdentName NameKind = iota + 1 - TypeName - FieldNumber -) - -func (t NameKind) String() string { - switch t { - case IdentName: - return "IdentName" - case TypeName: - return "TypeName" - case FieldNumber: - return "FieldNumber" - default: - return fmt.Sprintf("", uint8(t)) - } -} - -// Bit mask in Token.attrs to indicate if a Name token is followed by the -// separator char ':'. The field name separator char is optional for message -// field or repeated message field, but required for all other types. Decoder -// simply indicates whether a Name token is followed by separator or not. It is -// up to the prototext package to validate. -const hasSeparator = 1 << 7 - -// Scalar value types. -const ( - numberValue = iota + 1 - stringValue - literalValue -) - -// Bit mask in Token.numAttrs to indicate that the number is a negative. -const isNegative = 1 << 7 - -// Token provides a parsed token kind and value. Values are provided by the -// different accessor methods. -type Token struct { - // Kind of the Token object. - kind Kind - // attrs contains metadata for the following Kinds: - // Name: hasSeparator bit and one of NameKind. - // Scalar: one of numberValue, stringValue, literalValue. - attrs uint8 - // numAttrs contains metadata for numberValue: - // - highest bit is whether negative or positive. - // - lower bits indicate one of numDec, numHex, numOct, numFloat. - numAttrs uint8 - // pos provides the position of the token in the original input. - pos int - // raw bytes of the serialized token. - // This is a subslice into the original input. - raw []byte - // str contains parsed string for the following: - // - stringValue of Scalar kind - // - numberValue of Scalar kind - // - TypeName of Name kind - str string -} - -// Kind returns the token kind. -func (t Token) Kind() Kind { - return t.kind -} - -// RawString returns the read value in string. -func (t Token) RawString() string { - return string(t.raw) -} - -// Pos returns the token position from the input. -func (t Token) Pos() int { - return t.pos -} - -// NameKind returns IdentName, TypeName or FieldNumber. -// It panics if type is not Name. -func (t Token) NameKind() NameKind { - if t.kind == Name { - return NameKind(t.attrs &^ hasSeparator) - } - panic(fmt.Sprintf("Token is not a Name type: %s", t.kind)) -} - -// HasSeparator returns true if the field name is followed by the separator char -// ':', else false. It panics if type is not Name. -func (t Token) HasSeparator() bool { - if t.kind == Name { - return t.attrs&hasSeparator != 0 - } - panic(fmt.Sprintf("Token is not a Name type: %s", t.kind)) -} - -// IdentName returns the value for IdentName type. -func (t Token) IdentName() string { - if t.kind == Name && t.attrs&uint8(IdentName) != 0 { - return string(t.raw) - } - panic(fmt.Sprintf("Token is not an IdentName: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator))) -} - -// TypeName returns the value for TypeName type. -func (t Token) TypeName() string { - if t.kind == Name && t.attrs&uint8(TypeName) != 0 { - return t.str - } - panic(fmt.Sprintf("Token is not a TypeName: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator))) -} - -// FieldNumber returns the value for FieldNumber type. It returns a -// non-negative int32 value. Caller will still need to validate for the correct -// field number range. -func (t Token) FieldNumber() int32 { - if t.kind != Name || t.attrs&uint8(FieldNumber) == 0 { - panic(fmt.Sprintf("Token is not a FieldNumber: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator))) - } - // Following should not return an error as it had already been called right - // before this Token was constructed. - num, _ := strconv.ParseInt(string(t.raw), 10, 32) - return int32(num) -} - -// String returns the string value for a Scalar type. -func (t Token) String() (string, bool) { - if t.kind != Scalar || t.attrs != stringValue { - return "", false - } - return t.str, true -} - -// Enum returns the literal value for a Scalar type for use as enum literals. -func (t Token) Enum() (string, bool) { - if t.kind != Scalar || t.attrs != literalValue || (len(t.raw) > 0 && t.raw[0] == '-') { - return "", false - } - return string(t.raw), true -} - -// Bool returns the bool value for a Scalar type. -func (t Token) Bool() (bool, bool) { - if t.kind != Scalar { - return false, false - } - switch t.attrs { - case literalValue: - if b, ok := boolLits[string(t.raw)]; ok { - return b, true - } - case numberValue: - // Unsigned integer representation of 0 or 1 is permitted: 00, 0x0, 01, - // 0x1, etc. - n, err := strconv.ParseUint(t.str, 0, 64) - if err == nil { - switch n { - case 0: - return false, true - case 1: - return true, true - } - } - } - return false, false -} - -// These exact boolean literals are the ones supported in C++. -var boolLits = map[string]bool{ - "t": true, - "true": true, - "True": true, - "f": false, - "false": false, - "False": false, -} - -// Uint64 returns the uint64 value for a Scalar type. -func (t Token) Uint64() (uint64, bool) { - if t.kind != Scalar || t.attrs != numberValue || - t.numAttrs&isNegative > 0 || t.numAttrs&numFloat > 0 { - return 0, false - } - n, err := strconv.ParseUint(t.str, 0, 64) - if err != nil { - return 0, false - } - return n, true -} - -// Uint32 returns the uint32 value for a Scalar type. -func (t Token) Uint32() (uint32, bool) { - if t.kind != Scalar || t.attrs != numberValue || - t.numAttrs&isNegative > 0 || t.numAttrs&numFloat > 0 { - return 0, false - } - n, err := strconv.ParseUint(t.str, 0, 32) - if err != nil { - return 0, false - } - return uint32(n), true -} - -// Int64 returns the int64 value for a Scalar type. -func (t Token) Int64() (int64, bool) { - if t.kind != Scalar || t.attrs != numberValue || t.numAttrs&numFloat > 0 { - return 0, false - } - if n, err := strconv.ParseInt(t.str, 0, 64); err == nil { - return n, true - } - // C++ accepts large positive hex numbers as negative values. - // This feature is here for proto1 backwards compatibility purposes. - if flags.ProtoLegacy && (t.numAttrs == numHex) { - if n, err := strconv.ParseUint(t.str, 0, 64); err == nil { - return int64(n), true - } - } - return 0, false -} - -// Int32 returns the int32 value for a Scalar type. -func (t Token) Int32() (int32, bool) { - if t.kind != Scalar || t.attrs != numberValue || t.numAttrs&numFloat > 0 { - return 0, false - } - if n, err := strconv.ParseInt(t.str, 0, 32); err == nil { - return int32(n), true - } - // C++ accepts large positive hex numbers as negative values. - // This feature is here for proto1 backwards compatibility purposes. - if flags.ProtoLegacy && (t.numAttrs == numHex) { - if n, err := strconv.ParseUint(t.str, 0, 32); err == nil { - return int32(n), true - } - } - return 0, false -} - -// Float64 returns the float64 value for a Scalar type. -func (t Token) Float64() (float64, bool) { - if t.kind != Scalar { - return 0, false - } - switch t.attrs { - case literalValue: - if f, ok := floatLits[strings.ToLower(string(t.raw))]; ok { - return f, true - } - case numberValue: - n, err := strconv.ParseFloat(t.str, 64) - if err == nil { - return n, true - } - nerr := err.(*strconv.NumError) - if nerr.Err == strconv.ErrRange { - return n, true - } - } - return 0, false -} - -// Float32 returns the float32 value for a Scalar type. -func (t Token) Float32() (float32, bool) { - if t.kind != Scalar { - return 0, false - } - switch t.attrs { - case literalValue: - if f, ok := floatLits[strings.ToLower(string(t.raw))]; ok { - return float32(f), true - } - case numberValue: - n, err := strconv.ParseFloat(t.str, 64) - if err == nil { - // Overflows are treated as (-)infinity. - return float32(n), true - } - nerr := err.(*strconv.NumError) - if nerr.Err == strconv.ErrRange { - return float32(n), true - } - } - return 0, false -} - -// These are the supported float literals which C++ permits case-insensitive -// variants of these. -var floatLits = map[string]float64{ - "nan": math.NaN(), - "inf": math.Inf(1), - "infinity": math.Inf(1), - "-inf": math.Inf(-1), - "-infinity": math.Inf(-1), -} - -// TokenEquals returns true if given Tokens are equal, else false. -func TokenEquals(x, y Token) bool { - return x.kind == y.kind && - x.attrs == y.attrs && - x.numAttrs == y.numAttrs && - x.pos == y.pos && - bytes.Equal(x.raw, y.raw) && - x.str == y.str -} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go deleted file mode 100644 index 0ce8d6fb8..000000000 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package text implements the text format for protocol buffers. -// This package has no semantic understanding for protocol buffers and is only -// a parser and composer for the format. -// -// There is no formal specification for the protobuf text format, as such the -// C++ implementation (see google::protobuf::TextFormat) is the reference -// implementation of the text format. -// -// This package is neither a superset nor a subset of the C++ implementation. -// This implementation permits a more liberal grammar in some cases to be -// backwards compatible with the historical Go implementation. -// Future parsings unique to Go should not be added. -// Some grammars allowed by the C++ implementation are deliberately -// not implemented here because they are considered a bug by the protobuf team -// and should not be replicated. -// -// The Go implementation should implement a sufficient amount of the C++ -// grammar such that the default text serialization by C++ can be parsed by Go. -// However, just because the C++ parser accepts some input does not mean that -// the Go implementation should as well. -// -// The text format is almost a superset of JSON except: -// * message keys are not quoted strings, but identifiers -// * the top-level value must be a message without the delimiters -package text diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go deleted file mode 100644 index da289ccce..000000000 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package text - -import ( - "math" - "math/bits" - "strconv" - "strings" - "unicode/utf8" - - "google.golang.org/protobuf/internal/detrand" - "google.golang.org/protobuf/internal/errors" -) - -// encType represents an encoding type. -type encType uint8 - -const ( - _ encType = (1 << iota) / 2 - name - scalar - messageOpen - messageClose -) - -// Encoder provides methods to write out textproto constructs and values. The user is -// responsible for producing valid sequences of constructs and values. -type Encoder struct { - encoderState - - indent string - delims [2]byte - outputASCII bool -} - -type encoderState struct { - lastType encType - indents []byte - out []byte -} - -// NewEncoder returns an Encoder. -// -// If indent is a non-empty string, it causes every entry in a List or Message -// to be preceded by the indent and trailed by a newline. -// -// If delims is not the zero value, it controls the delimiter characters used -// for messages (e.g., "{}" vs "<>"). -// -// If outputASCII is true, strings will be serialized in such a way that -// multi-byte UTF-8 sequences are escaped. This property ensures that the -// overall output is ASCII (as opposed to UTF-8). -func NewEncoder(indent string, delims [2]byte, outputASCII bool) (*Encoder, error) { - e := &Encoder{} - if len(indent) > 0 { - if strings.Trim(indent, " \t") != "" { - return nil, errors.New("indent may only be composed of space and tab characters") - } - e.indent = indent - } - switch delims { - case [2]byte{0, 0}: - e.delims = [2]byte{'{', '}'} - case [2]byte{'{', '}'}, [2]byte{'<', '>'}: - e.delims = delims - default: - return nil, errors.New("delimiters may only be \"{}\" or \"<>\"") - } - e.outputASCII = outputASCII - - return e, nil -} - -// Bytes returns the content of the written bytes. -func (e *Encoder) Bytes() []byte { - return e.out -} - -// StartMessage writes out the '{' or '<' symbol. -func (e *Encoder) StartMessage() { - e.prepareNext(messageOpen) - e.out = append(e.out, e.delims[0]) -} - -// EndMessage writes out the '}' or '>' symbol. -func (e *Encoder) EndMessage() { - e.prepareNext(messageClose) - e.out = append(e.out, e.delims[1]) -} - -// WriteName writes out the field name and the separator ':'. -func (e *Encoder) WriteName(s string) { - e.prepareNext(name) - e.out = append(e.out, s...) - e.out = append(e.out, ':') -} - -// WriteBool writes out the given boolean value. -func (e *Encoder) WriteBool(b bool) { - if b { - e.WriteLiteral("true") - } else { - e.WriteLiteral("false") - } -} - -// WriteString writes out the given string value. -func (e *Encoder) WriteString(s string) { - e.prepareNext(scalar) - e.out = appendString(e.out, s, e.outputASCII) -} - -func appendString(out []byte, in string, outputASCII bool) []byte { - out = append(out, '"') - i := indexNeedEscapeInString(in) - in, out = in[i:], append(out, in[:i]...) - for len(in) > 0 { - switch r, n := utf8.DecodeRuneInString(in); { - case r == utf8.RuneError && n == 1: - // We do not report invalid UTF-8 because strings in the text format - // are used to represent both the proto string and bytes type. - r = rune(in[0]) - fallthrough - case r < ' ' || r == '"' || r == '\\' || r == 0x7f: - out = append(out, '\\') - switch r { - case '"', '\\': - out = append(out, byte(r)) - case '\n': - out = append(out, 'n') - case '\r': - out = append(out, 'r') - case '\t': - out = append(out, 't') - default: - out = append(out, 'x') - out = append(out, "00"[1+(bits.Len32(uint32(r))-1)/4:]...) - out = strconv.AppendUint(out, uint64(r), 16) - } - in = in[n:] - case r >= utf8.RuneSelf && (outputASCII || r <= 0x009f): - out = append(out, '\\') - if r <= math.MaxUint16 { - out = append(out, 'u') - out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...) - out = strconv.AppendUint(out, uint64(r), 16) - } else { - out = append(out, 'U') - out = append(out, "00000000"[1+(bits.Len32(uint32(r))-1)/4:]...) - out = strconv.AppendUint(out, uint64(r), 16) - } - in = in[n:] - default: - i := indexNeedEscapeInString(in[n:]) - in, out = in[n+i:], append(out, in[:n+i]...) - } - } - out = append(out, '"') - return out -} - -// indexNeedEscapeInString returns the index of the character that needs -// escaping. If no characters need escaping, this returns the input length. -func indexNeedEscapeInString(s string) int { - for i := 0; i < len(s); i++ { - if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= 0x7f { - return i - } - } - return len(s) -} - -// WriteFloat writes out the given float value for given bitSize. -func (e *Encoder) WriteFloat(n float64, bitSize int) { - e.prepareNext(scalar) - e.out = appendFloat(e.out, n, bitSize) -} - -func appendFloat(out []byte, n float64, bitSize int) []byte { - switch { - case math.IsNaN(n): - return append(out, "nan"...) - case math.IsInf(n, +1): - return append(out, "inf"...) - case math.IsInf(n, -1): - return append(out, "-inf"...) - default: - return strconv.AppendFloat(out, n, 'g', -1, bitSize) - } -} - -// WriteInt writes out the given signed integer value. -func (e *Encoder) WriteInt(n int64) { - e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatInt(n, 10)...) -} - -// WriteUint writes out the given unsigned integer value. -func (e *Encoder) WriteUint(n uint64) { - e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatUint(n, 10)...) -} - -// WriteLiteral writes out the given string as a literal value without quotes. -// This is used for writing enum literal strings. -func (e *Encoder) WriteLiteral(s string) { - e.prepareNext(scalar) - e.out = append(e.out, s...) -} - -// prepareNext adds possible space and indentation for the next value based -// on last encType and indent option. It also updates e.lastType to next. -func (e *Encoder) prepareNext(next encType) { - defer func() { - e.lastType = next - }() - - // Single line. - if len(e.indent) == 0 { - // Add space after each field before the next one. - if e.lastType&(scalar|messageClose) != 0 && next == name { - e.out = append(e.out, ' ') - // Add a random extra space to make output unstable. - if detrand.Bool() { - e.out = append(e.out, ' ') - } - } - return - } - - // Multi-line. - switch { - case e.lastType == name: - e.out = append(e.out, ' ') - // Add a random extra space after name: to make output unstable. - if detrand.Bool() { - e.out = append(e.out, ' ') - } - - case e.lastType == messageOpen && next != messageClose: - e.indents = append(e.indents, e.indent...) - e.out = append(e.out, '\n') - e.out = append(e.out, e.indents...) - - case e.lastType&(scalar|messageClose) != 0: - if next == messageClose { - e.indents = e.indents[:len(e.indents)-len(e.indent)] - } - e.out = append(e.out, '\n') - e.out = append(e.out, e.indents...) - } -} - -// Snapshot returns the current snapshot for use in Reset. -func (e *Encoder) Snapshot() encoderState { - return e.encoderState -} - -// Reset resets the Encoder to the given encoderState from a Snapshot. -func (e *Encoder) Reset(es encoderState) { - e.encoderState = es -} - -// AppendString appends the escaped form of the input string to b. -func AppendString(b []byte, s string) []byte { - return appendString(b, s, false) -} diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go deleted file mode 100644 index 20c17b35e..000000000 --- a/vendor/google.golang.org/protobuf/internal/errors/errors.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package errors implements functions to manipulate errors. -package errors - -import ( - "errors" - "fmt" - - "google.golang.org/protobuf/internal/detrand" -) - -// Error is a sentinel matching all errors produced by this package. -var Error = errors.New("protobuf error") - -// New formats a string according to the format specifier and arguments and -// returns an error that has a "proto" prefix. -func New(f string, x ...interface{}) error { - return &prefixError{s: format(f, x...)} -} - -type prefixError struct{ s string } - -var prefix = func() string { - // Deliberately introduce instability into the error message string to - // discourage users from performing error string comparisons. - if detrand.Bool() { - return "proto: " // use non-breaking spaces (U+00a0) - } else { - return "proto: " // use regular spaces (U+0020) - } -}() - -func (e *prefixError) Error() string { - return prefix + e.s -} - -func (e *prefixError) Unwrap() error { - return Error -} - -// Wrap returns an error that has a "proto" prefix, the formatted string described -// by the format specifier and arguments, and a suffix of err. The error wraps err. -func Wrap(err error, f string, x ...interface{}) error { - return &wrapError{ - s: format(f, x...), - err: err, - } -} - -type wrapError struct { - s string - err error -} - -func (e *wrapError) Error() string { - return format("%v%v: %v", prefix, e.s, e.err) -} - -func (e *wrapError) Unwrap() error { - return e.err -} - -func (e *wrapError) Is(target error) bool { - return target == Error -} - -func format(f string, x ...interface{}) string { - // avoid "proto: " prefix when chaining - for i := 0; i < len(x); i++ { - switch e := x[i].(type) { - case *prefixError: - x[i] = e.s - case *wrapError: - x[i] = format("%v: %v", e.s, e.err) - } - } - return fmt.Sprintf(f, x...) -} - -func InvalidUTF8(name string) error { - return New("field %v contains invalid UTF-8", name) -} - -func RequiredNotSet(name string) error { - return New("required field %v not set", name) -} diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go deleted file mode 100644 index fbcd34920..000000000 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.13 -// +build !go1.13 - -package errors - -import "reflect" - -// Is is a copy of Go 1.13's errors.Is for use with older Go versions. -func Is(err, target error) bool { - if target == nil { - return err == target - } - - isComparable := reflect.TypeOf(target).Comparable() - for { - if isComparable && err == target { - return true - } - if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) { - return true - } - if err = unwrap(err); err == nil { - return false - } - } -} - -func unwrap(err error) error { - u, ok := err.(interface { - Unwrap() error - }) - if !ok { - return nil - } - return u.Unwrap() -} diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go deleted file mode 100644 index 5e72f1cde..000000000 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.13 -// +build go1.13 - -package errors - -import "errors" - -// Is is errors.Is. -func Is(err, target error) bool { return errors.Is(err, target) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go deleted file mode 100644 index b293b6947..000000000 --- a/vendor/google.golang.org/protobuf/internal/filedesc/build.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package filedesc provides functionality for constructing descriptors. -// -// The types in this package implement interfaces in the protoreflect package -// related to protobuf descripriptors. -package filedesc - -import ( - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/genid" - "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" -) - -// Builder construct a protoreflect.FileDescriptor from the raw descriptor. -type Builder struct { - // GoPackagePath is the Go package path that is invoking this builder. - GoPackagePath string - - // RawDescriptor is the wire-encoded bytes of FileDescriptorProto - // and must be populated. - RawDescriptor []byte - - // NumEnums is the total number of enums declared in the file. - NumEnums int32 - // NumMessages is the total number of messages declared in the file. - // It includes the implicit message declarations for map entries. - NumMessages int32 - // NumExtensions is the total number of extensions declared in the file. - NumExtensions int32 - // NumServices is the total number of services declared in the file. - NumServices int32 - - // TypeResolver resolves extension field types for descriptor options. - // If nil, it uses protoregistry.GlobalTypes. - TypeResolver interface { - preg.ExtensionTypeResolver - } - - // FileRegistry is use to lookup file, enum, and message dependencies. - // Once constructed, the file descriptor is registered here. - // If nil, it uses protoregistry.GlobalFiles. - FileRegistry interface { - FindFileByPath(string) (protoreflect.FileDescriptor, error) - FindDescriptorByName(pref.FullName) (pref.Descriptor, error) - RegisterFile(pref.FileDescriptor) error - } -} - -// resolverByIndex is an interface Builder.FileRegistry may implement. -// If so, it permits looking up an enum or message dependency based on the -// sub-list and element index into filetype.Builder.DependencyIndexes. -type resolverByIndex interface { - FindEnumByIndex(int32, int32, []Enum, []Message) pref.EnumDescriptor - FindMessageByIndex(int32, int32, []Enum, []Message) pref.MessageDescriptor -} - -// Indexes of each sub-list in filetype.Builder.DependencyIndexes. -const ( - listFieldDeps int32 = iota - listExtTargets - listExtDeps - listMethInDeps - listMethOutDeps -) - -// Out is the output of the Builder. -type Out struct { - File pref.FileDescriptor - - // Enums is all enum descriptors in "flattened ordering". - Enums []Enum - // Messages is all message descriptors in "flattened ordering". - // It includes the implicit message declarations for map entries. - Messages []Message - // Extensions is all extension descriptors in "flattened ordering". - Extensions []Extension - // Service is all service descriptors in "flattened ordering". - Services []Service -} - -// Build constructs a FileDescriptor given the parameters set in Builder. -// It assumes that the inputs are well-formed and panics if any inconsistencies -// are encountered. -// -// If NumEnums+NumMessages+NumExtensions+NumServices is zero, -// then Build automatically derives them from the raw descriptor. -func (db Builder) Build() (out Out) { - // Populate the counts if uninitialized. - if db.NumEnums+db.NumMessages+db.NumExtensions+db.NumServices == 0 { - db.unmarshalCounts(db.RawDescriptor, true) - } - - // Initialize resolvers and registries if unpopulated. - if db.TypeResolver == nil { - db.TypeResolver = preg.GlobalTypes - } - if db.FileRegistry == nil { - db.FileRegistry = preg.GlobalFiles - } - - fd := newRawFile(db) - out.File = fd - out.Enums = fd.allEnums - out.Messages = fd.allMessages - out.Extensions = fd.allExtensions - out.Services = fd.allServices - - if err := db.FileRegistry.RegisterFile(fd); err != nil { - panic(err) - } - return out -} - -// unmarshalCounts counts the number of enum, message, extension, and service -// declarations in the raw message, which is either a FileDescriptorProto -// or a MessageDescriptorProto depending on whether isFile is set. -func (db *Builder) unmarshalCounts(b []byte, isFile bool) { - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - if isFile { - switch num { - case genid.FileDescriptorProto_EnumType_field_number: - db.NumEnums++ - case genid.FileDescriptorProto_MessageType_field_number: - db.unmarshalCounts(v, false) - db.NumMessages++ - case genid.FileDescriptorProto_Extension_field_number: - db.NumExtensions++ - case genid.FileDescriptorProto_Service_field_number: - db.NumServices++ - } - } else { - switch num { - case genid.DescriptorProto_EnumType_field_number: - db.NumEnums++ - case genid.DescriptorProto_NestedType_field_number: - db.unmarshalCounts(v, false) - db.NumMessages++ - case genid.DescriptorProto_Extension_field_number: - db.NumExtensions++ - } - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } -} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go deleted file mode 100644 index 98ab142ae..000000000 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ /dev/null @@ -1,631 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package filedesc - -import ( - "bytes" - "fmt" - "sync" - "sync/atomic" - - "google.golang.org/protobuf/internal/descfmt" - "google.golang.org/protobuf/internal/descopts" - "google.golang.org/protobuf/internal/encoding/defval" - "google.golang.org/protobuf/internal/encoding/messageset" - "google.golang.org/protobuf/internal/genid" - "google.golang.org/protobuf/internal/pragma" - "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -// The types in this file may have a suffix: -// • L0: Contains fields common to all descriptors (except File) and -// must be initialized up front. -// • L1: Contains fields specific to a descriptor and -// must be initialized up front. -// • L2: Contains fields that are lazily initialized when constructing -// from the raw file descriptor. When constructing as a literal, the L2 -// fields must be initialized up front. -// -// The types are exported so that packages like reflect/protodesc can -// directly construct descriptors. - -type ( - File struct { - fileRaw - L1 FileL1 - - once uint32 // atomically set if L2 is valid - mu sync.Mutex // protects L2 - L2 *FileL2 - } - FileL1 struct { - Syntax pref.Syntax - Path string - Package pref.FullName - - Enums Enums - Messages Messages - Extensions Extensions - Services Services - } - FileL2 struct { - Options func() pref.ProtoMessage - Imports FileImports - Locations SourceLocations - } -) - -func (fd *File) ParentFile() pref.FileDescriptor { return fd } -func (fd *File) Parent() pref.Descriptor { return nil } -func (fd *File) Index() int { return 0 } -func (fd *File) Syntax() pref.Syntax { return fd.L1.Syntax } -func (fd *File) Name() pref.Name { return fd.L1.Package.Name() } -func (fd *File) FullName() pref.FullName { return fd.L1.Package } -func (fd *File) IsPlaceholder() bool { return false } -func (fd *File) Options() pref.ProtoMessage { - if f := fd.lazyInit().Options; f != nil { - return f() - } - return descopts.File -} -func (fd *File) Path() string { return fd.L1.Path } -func (fd *File) Package() pref.FullName { return fd.L1.Package } -func (fd *File) Imports() pref.FileImports { return &fd.lazyInit().Imports } -func (fd *File) Enums() pref.EnumDescriptors { return &fd.L1.Enums } -func (fd *File) Messages() pref.MessageDescriptors { return &fd.L1.Messages } -func (fd *File) Extensions() pref.ExtensionDescriptors { return &fd.L1.Extensions } -func (fd *File) Services() pref.ServiceDescriptors { return &fd.L1.Services } -func (fd *File) SourceLocations() pref.SourceLocations { return &fd.lazyInit().Locations } -func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } -func (fd *File) ProtoType(pref.FileDescriptor) {} -func (fd *File) ProtoInternal(pragma.DoNotImplement) {} - -func (fd *File) lazyInit() *FileL2 { - if atomic.LoadUint32(&fd.once) == 0 { - fd.lazyInitOnce() - } - return fd.L2 -} - -func (fd *File) lazyInitOnce() { - fd.mu.Lock() - if fd.L2 == nil { - fd.lazyRawInit() // recursively initializes all L2 structures - } - atomic.StoreUint32(&fd.once, 1) - fd.mu.Unlock() -} - -// GoPackagePath is a pseudo-internal API for determining the Go package path -// that this file descriptor is declared in. -// -// WARNING: This method is exempt from the compatibility promise and may be -// removed in the future without warning. -func (fd *File) GoPackagePath() string { - return fd.builder.GoPackagePath -} - -type ( - Enum struct { - Base - L1 EnumL1 - L2 *EnumL2 // protected by fileDesc.once - } - EnumL1 struct { - eagerValues bool // controls whether EnumL2.Values is already populated - } - EnumL2 struct { - Options func() pref.ProtoMessage - Values EnumValues - ReservedNames Names - ReservedRanges EnumRanges - } - - EnumValue struct { - Base - L1 EnumValueL1 - } - EnumValueL1 struct { - Options func() pref.ProtoMessage - Number pref.EnumNumber - } -) - -func (ed *Enum) Options() pref.ProtoMessage { - if f := ed.lazyInit().Options; f != nil { - return f() - } - return descopts.Enum -} -func (ed *Enum) Values() pref.EnumValueDescriptors { - if ed.L1.eagerValues { - return &ed.L2.Values - } - return &ed.lazyInit().Values -} -func (ed *Enum) ReservedNames() pref.Names { return &ed.lazyInit().ReservedNames } -func (ed *Enum) ReservedRanges() pref.EnumRanges { return &ed.lazyInit().ReservedRanges } -func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } -func (ed *Enum) ProtoType(pref.EnumDescriptor) {} -func (ed *Enum) lazyInit() *EnumL2 { - ed.L0.ParentFile.lazyInit() // implicitly initializes L2 - return ed.L2 -} - -func (ed *EnumValue) Options() pref.ProtoMessage { - if f := ed.L1.Options; f != nil { - return f() - } - return descopts.EnumValue -} -func (ed *EnumValue) Number() pref.EnumNumber { return ed.L1.Number } -func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } -func (ed *EnumValue) ProtoType(pref.EnumValueDescriptor) {} - -type ( - Message struct { - Base - L1 MessageL1 - L2 *MessageL2 // protected by fileDesc.once - } - MessageL1 struct { - Enums Enums - Messages Messages - Extensions Extensions - IsMapEntry bool // promoted from google.protobuf.MessageOptions - IsMessageSet bool // promoted from google.protobuf.MessageOptions - } - MessageL2 struct { - Options func() pref.ProtoMessage - Fields Fields - Oneofs Oneofs - ReservedNames Names - ReservedRanges FieldRanges - RequiredNumbers FieldNumbers // must be consistent with Fields.Cardinality - ExtensionRanges FieldRanges - ExtensionRangeOptions []func() pref.ProtoMessage // must be same length as ExtensionRanges - } - - Field struct { - Base - L1 FieldL1 - } - FieldL1 struct { - Options func() pref.ProtoMessage - Number pref.FieldNumber - Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers - Kind pref.Kind - StringName stringName - IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto - IsWeak bool // promoted from google.protobuf.FieldOptions - HasPacked bool // promoted from google.protobuf.FieldOptions - IsPacked bool // promoted from google.protobuf.FieldOptions - HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions - EnforceUTF8 bool // promoted from google.protobuf.FieldOptions - Default defaultValue - ContainingOneof pref.OneofDescriptor // must be consistent with Message.Oneofs.Fields - Enum pref.EnumDescriptor - Message pref.MessageDescriptor - } - - Oneof struct { - Base - L1 OneofL1 - } - OneofL1 struct { - Options func() pref.ProtoMessage - Fields OneofFields // must be consistent with Message.Fields.ContainingOneof - } -) - -func (md *Message) Options() pref.ProtoMessage { - if f := md.lazyInit().Options; f != nil { - return f() - } - return descopts.Message -} -func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry } -func (md *Message) Fields() pref.FieldDescriptors { return &md.lazyInit().Fields } -func (md *Message) Oneofs() pref.OneofDescriptors { return &md.lazyInit().Oneofs } -func (md *Message) ReservedNames() pref.Names { return &md.lazyInit().ReservedNames } -func (md *Message) ReservedRanges() pref.FieldRanges { return &md.lazyInit().ReservedRanges } -func (md *Message) RequiredNumbers() pref.FieldNumbers { return &md.lazyInit().RequiredNumbers } -func (md *Message) ExtensionRanges() pref.FieldRanges { return &md.lazyInit().ExtensionRanges } -func (md *Message) ExtensionRangeOptions(i int) pref.ProtoMessage { - if f := md.lazyInit().ExtensionRangeOptions[i]; f != nil { - return f() - } - return descopts.ExtensionRange -} -func (md *Message) Enums() pref.EnumDescriptors { return &md.L1.Enums } -func (md *Message) Messages() pref.MessageDescriptors { return &md.L1.Messages } -func (md *Message) Extensions() pref.ExtensionDescriptors { return &md.L1.Extensions } -func (md *Message) ProtoType(pref.MessageDescriptor) {} -func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } -func (md *Message) lazyInit() *MessageL2 { - md.L0.ParentFile.lazyInit() // implicitly initializes L2 - return md.L2 -} - -// IsMessageSet is a pseudo-internal API for checking whether a message -// should serialize in the proto1 message format. -// -// WARNING: This method is exempt from the compatibility promise and may be -// removed in the future without warning. -func (md *Message) IsMessageSet() bool { - return md.L1.IsMessageSet -} - -func (fd *Field) Options() pref.ProtoMessage { - if f := fd.L1.Options; f != nil { - return f() - } - return descopts.Field -} -func (fd *Field) Number() pref.FieldNumber { return fd.L1.Number } -func (fd *Field) Cardinality() pref.Cardinality { return fd.L1.Cardinality } -func (fd *Field) Kind() pref.Kind { return fd.L1.Kind } -func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } -func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } -func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } -func (fd *Field) HasPresence() bool { - return fd.L1.Cardinality != pref.Repeated && (fd.L0.ParentFile.L1.Syntax == pref.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) -} -func (fd *Field) HasOptionalKeyword() bool { - return (fd.L0.ParentFile.L1.Syntax == pref.Proto2 && fd.L1.Cardinality == pref.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional -} -func (fd *Field) IsPacked() bool { - if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != pref.Proto2 && fd.L1.Cardinality == pref.Repeated { - switch fd.L1.Kind { - case pref.StringKind, pref.BytesKind, pref.MessageKind, pref.GroupKind: - default: - return true - } - } - return fd.L1.IsPacked -} -func (fd *Field) IsExtension() bool { return false } -func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } -func (fd *Field) IsList() bool { return fd.Cardinality() == pref.Repeated && !fd.IsMap() } -func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } -func (fd *Field) MapKey() pref.FieldDescriptor { - if !fd.IsMap() { - return nil - } - return fd.Message().Fields().ByNumber(genid.MapEntry_Key_field_number) -} -func (fd *Field) MapValue() pref.FieldDescriptor { - if !fd.IsMap() { - return nil - } - return fd.Message().Fields().ByNumber(genid.MapEntry_Value_field_number) -} -func (fd *Field) HasDefault() bool { return fd.L1.Default.has } -func (fd *Field) Default() pref.Value { return fd.L1.Default.get(fd) } -func (fd *Field) DefaultEnumValue() pref.EnumValueDescriptor { return fd.L1.Default.enum } -func (fd *Field) ContainingOneof() pref.OneofDescriptor { return fd.L1.ContainingOneof } -func (fd *Field) ContainingMessage() pref.MessageDescriptor { - return fd.L0.Parent.(pref.MessageDescriptor) -} -func (fd *Field) Enum() pref.EnumDescriptor { - return fd.L1.Enum -} -func (fd *Field) Message() pref.MessageDescriptor { - if fd.L1.IsWeak { - if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil { - return d.(pref.MessageDescriptor) - } - } - return fd.L1.Message -} -func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } -func (fd *Field) ProtoType(pref.FieldDescriptor) {} - -// EnforceUTF8 is a pseudo-internal API to determine whether to enforce UTF-8 -// validation for the string field. This exists for Google-internal use only -// since proto3 did not enforce UTF-8 validity prior to the open-source release. -// If this method does not exist, the default is to enforce valid UTF-8. -// -// WARNING: This method is exempt from the compatibility promise and may be -// removed in the future without warning. -func (fd *Field) EnforceUTF8() bool { - if fd.L1.HasEnforceUTF8 { - return fd.L1.EnforceUTF8 - } - return fd.L0.ParentFile.L1.Syntax == pref.Proto3 -} - -func (od *Oneof) IsSynthetic() bool { - return od.L0.ParentFile.L1.Syntax == pref.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword() -} -func (od *Oneof) Options() pref.ProtoMessage { - if f := od.L1.Options; f != nil { - return f() - } - return descopts.Oneof -} -func (od *Oneof) Fields() pref.FieldDescriptors { return &od.L1.Fields } -func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) } -func (od *Oneof) ProtoType(pref.OneofDescriptor) {} - -type ( - Extension struct { - Base - L1 ExtensionL1 - L2 *ExtensionL2 // protected by fileDesc.once - } - ExtensionL1 struct { - Number pref.FieldNumber - Extendee pref.MessageDescriptor - Cardinality pref.Cardinality - Kind pref.Kind - } - ExtensionL2 struct { - Options func() pref.ProtoMessage - StringName stringName - IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto - IsPacked bool // promoted from google.protobuf.FieldOptions - Default defaultValue - Enum pref.EnumDescriptor - Message pref.MessageDescriptor - } -) - -func (xd *Extension) Options() pref.ProtoMessage { - if f := xd.lazyInit().Options; f != nil { - return f() - } - return descopts.Field -} -func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number } -func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality } -func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind } -func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON } -func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) } -func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) } -func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != pref.Repeated } -func (xd *Extension) HasOptionalKeyword() bool { - return (xd.L0.ParentFile.L1.Syntax == pref.Proto2 && xd.L1.Cardinality == pref.Optional) || xd.lazyInit().IsProto3Optional -} -func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } -func (xd *Extension) IsExtension() bool { return true } -func (xd *Extension) IsWeak() bool { return false } -func (xd *Extension) IsList() bool { return xd.Cardinality() == pref.Repeated } -func (xd *Extension) IsMap() bool { return false } -func (xd *Extension) MapKey() pref.FieldDescriptor { return nil } -func (xd *Extension) MapValue() pref.FieldDescriptor { return nil } -func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has } -func (xd *Extension) Default() pref.Value { return xd.lazyInit().Default.get(xd) } -func (xd *Extension) DefaultEnumValue() pref.EnumValueDescriptor { return xd.lazyInit().Default.enum } -func (xd *Extension) ContainingOneof() pref.OneofDescriptor { return nil } -func (xd *Extension) ContainingMessage() pref.MessageDescriptor { return xd.L1.Extendee } -func (xd *Extension) Enum() pref.EnumDescriptor { return xd.lazyInit().Enum } -func (xd *Extension) Message() pref.MessageDescriptor { return xd.lazyInit().Message } -func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) } -func (xd *Extension) ProtoType(pref.FieldDescriptor) {} -func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {} -func (xd *Extension) lazyInit() *ExtensionL2 { - xd.L0.ParentFile.lazyInit() // implicitly initializes L2 - return xd.L2 -} - -type ( - Service struct { - Base - L1 ServiceL1 - L2 *ServiceL2 // protected by fileDesc.once - } - ServiceL1 struct{} - ServiceL2 struct { - Options func() pref.ProtoMessage - Methods Methods - } - - Method struct { - Base - L1 MethodL1 - } - MethodL1 struct { - Options func() pref.ProtoMessage - Input pref.MessageDescriptor - Output pref.MessageDescriptor - IsStreamingClient bool - IsStreamingServer bool - } -) - -func (sd *Service) Options() pref.ProtoMessage { - if f := sd.lazyInit().Options; f != nil { - return f() - } - return descopts.Service -} -func (sd *Service) Methods() pref.MethodDescriptors { return &sd.lazyInit().Methods } -func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) } -func (sd *Service) ProtoType(pref.ServiceDescriptor) {} -func (sd *Service) ProtoInternal(pragma.DoNotImplement) {} -func (sd *Service) lazyInit() *ServiceL2 { - sd.L0.ParentFile.lazyInit() // implicitly initializes L2 - return sd.L2 -} - -func (md *Method) Options() pref.ProtoMessage { - if f := md.L1.Options; f != nil { - return f() - } - return descopts.Method -} -func (md *Method) Input() pref.MessageDescriptor { return md.L1.Input } -func (md *Method) Output() pref.MessageDescriptor { return md.L1.Output } -func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient } -func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer } -func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } -func (md *Method) ProtoType(pref.MethodDescriptor) {} -func (md *Method) ProtoInternal(pragma.DoNotImplement) {} - -// Surrogate files are can be used to create standalone descriptors -// where the syntax is only information derived from the parent file. -var ( - SurrogateProto2 = &File{L1: FileL1{Syntax: pref.Proto2}, L2: &FileL2{}} - SurrogateProto3 = &File{L1: FileL1{Syntax: pref.Proto3}, L2: &FileL2{}} -) - -type ( - Base struct { - L0 BaseL0 - } - BaseL0 struct { - FullName pref.FullName // must be populated - ParentFile *File // must be populated - Parent pref.Descriptor - Index int - } -) - -func (d *Base) Name() pref.Name { return d.L0.FullName.Name() } -func (d *Base) FullName() pref.FullName { return d.L0.FullName } -func (d *Base) ParentFile() pref.FileDescriptor { - if d.L0.ParentFile == SurrogateProto2 || d.L0.ParentFile == SurrogateProto3 { - return nil // surrogate files are not real parents - } - return d.L0.ParentFile -} -func (d *Base) Parent() pref.Descriptor { return d.L0.Parent } -func (d *Base) Index() int { return d.L0.Index } -func (d *Base) Syntax() pref.Syntax { return d.L0.ParentFile.Syntax() } -func (d *Base) IsPlaceholder() bool { return false } -func (d *Base) ProtoInternal(pragma.DoNotImplement) {} - -type stringName struct { - hasJSON bool - once sync.Once - nameJSON string - nameText string -} - -// InitJSON initializes the name. It is exported for use by other internal packages. -func (s *stringName) InitJSON(name string) { - s.hasJSON = true - s.nameJSON = name -} - -func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { - s.once.Do(func() { - if fd.IsExtension() { - // For extensions, JSON and text are formatted the same way. - var name string - if messageset.IsMessageSetExtension(fd) { - name = string("[" + fd.FullName().Parent() + "]") - } else { - name = string("[" + fd.FullName() + "]") - } - s.nameJSON = name - s.nameText = name - } else { - // Format the JSON name. - if !s.hasJSON { - s.nameJSON = strs.JSONCamelCase(string(fd.Name())) - } - - // Format the text name. - s.nameText = string(fd.Name()) - if fd.Kind() == pref.GroupKind { - s.nameText = string(fd.Message().Name()) - } - } - }) - return s -} - -func (s *stringName) getJSON(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameJSON } -func (s *stringName) getText(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameText } - -func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue { - dv := defaultValue{has: v.IsValid(), val: v, enum: ev} - if b, ok := v.Interface().([]byte); ok { - // Store a copy of the default bytes, so that we can detect - // accidental mutations of the original value. - dv.bytes = append([]byte(nil), b...) - } - return dv -} - -func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) defaultValue { - var evs pref.EnumValueDescriptors - if k == pref.EnumKind { - // If the enum is declared within the same file, be careful not to - // blindly call the Values method, lest we bind ourselves in a deadlock. - if e, ok := ed.(*Enum); ok && e.L0.ParentFile == pf { - evs = &e.L2.Values - } else { - evs = ed.Values() - } - - // If we are unable to resolve the enum dependency, use a placeholder - // enum value since we will not be able to parse the default value. - if ed.IsPlaceholder() && pref.Name(b).IsValid() { - v := pref.ValueOfEnum(0) - ev := PlaceholderEnumValue(ed.FullName().Parent().Append(pref.Name(b))) - return DefaultValue(v, ev) - } - } - - v, ev, err := defval.Unmarshal(string(b), k, evs, defval.Descriptor) - if err != nil { - panic(err) - } - return DefaultValue(v, ev) -} - -type defaultValue struct { - has bool - val pref.Value - enum pref.EnumValueDescriptor - bytes []byte -} - -func (dv *defaultValue) get(fd pref.FieldDescriptor) pref.Value { - // Return the zero value as the default if unpopulated. - if !dv.has { - if fd.Cardinality() == pref.Repeated { - return pref.Value{} - } - switch fd.Kind() { - case pref.BoolKind: - return pref.ValueOfBool(false) - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: - return pref.ValueOfInt32(0) - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: - return pref.ValueOfInt64(0) - case pref.Uint32Kind, pref.Fixed32Kind: - return pref.ValueOfUint32(0) - case pref.Uint64Kind, pref.Fixed64Kind: - return pref.ValueOfUint64(0) - case pref.FloatKind: - return pref.ValueOfFloat32(0) - case pref.DoubleKind: - return pref.ValueOfFloat64(0) - case pref.StringKind: - return pref.ValueOfString("") - case pref.BytesKind: - return pref.ValueOfBytes(nil) - case pref.EnumKind: - if evs := fd.Enum().Values(); evs.Len() > 0 { - return pref.ValueOfEnum(evs.Get(0).Number()) - } - return pref.ValueOfEnum(0) - } - } - - if len(dv.bytes) > 0 && !bytes.Equal(dv.bytes, dv.val.Bytes()) { - // TODO: Avoid panic if we're running with the race detector - // and instead spawn a goroutine that periodically resets - // this value back to the original to induce a race. - panic(fmt.Sprintf("detected mutation on the default bytes for %v", fd.FullName())) - } - return dv.val -} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go deleted file mode 100644 index 66e1fee52..000000000 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ /dev/null @@ -1,471 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package filedesc - -import ( - "sync" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/genid" - "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" -) - -// fileRaw is a data struct used when initializing a file descriptor from -// a raw FileDescriptorProto. -type fileRaw struct { - builder Builder - allEnums []Enum - allMessages []Message - allExtensions []Extension - allServices []Service -} - -func newRawFile(db Builder) *File { - fd := &File{fileRaw: fileRaw{builder: db}} - fd.initDecls(db.NumEnums, db.NumMessages, db.NumExtensions, db.NumServices) - fd.unmarshalSeed(db.RawDescriptor) - - // Extended message targets are eagerly resolved since registration - // needs this information at program init time. - for i := range fd.allExtensions { - xd := &fd.allExtensions[i] - xd.L1.Extendee = fd.resolveMessageDependency(xd.L1.Extendee, listExtTargets, int32(i)) - } - - fd.checkDecls() - return fd -} - -// initDecls pre-allocates slices for the exact number of enums, messages -// (including map entries), extensions, and services declared in the proto file. -// This is done to avoid regrowing the slice, which would change the address -// for any previously seen declaration. -// -// The alloc methods "allocates" slices by pulling from the capacity. -func (fd *File) initDecls(numEnums, numMessages, numExtensions, numServices int32) { - fd.allEnums = make([]Enum, 0, numEnums) - fd.allMessages = make([]Message, 0, numMessages) - fd.allExtensions = make([]Extension, 0, numExtensions) - fd.allServices = make([]Service, 0, numServices) -} - -func (fd *File) allocEnums(n int) []Enum { - total := len(fd.allEnums) - es := fd.allEnums[total : total+n] - fd.allEnums = fd.allEnums[:total+n] - return es -} -func (fd *File) allocMessages(n int) []Message { - total := len(fd.allMessages) - ms := fd.allMessages[total : total+n] - fd.allMessages = fd.allMessages[:total+n] - return ms -} -func (fd *File) allocExtensions(n int) []Extension { - total := len(fd.allExtensions) - xs := fd.allExtensions[total : total+n] - fd.allExtensions = fd.allExtensions[:total+n] - return xs -} -func (fd *File) allocServices(n int) []Service { - total := len(fd.allServices) - xs := fd.allServices[total : total+n] - fd.allServices = fd.allServices[:total+n] - return xs -} - -// checkDecls performs a sanity check that the expected number of expected -// declarations matches the number that were found in the descriptor proto. -func (fd *File) checkDecls() { - switch { - case len(fd.allEnums) != cap(fd.allEnums): - case len(fd.allMessages) != cap(fd.allMessages): - case len(fd.allExtensions) != cap(fd.allExtensions): - case len(fd.allServices) != cap(fd.allServices): - default: - return - } - panic("mismatching cardinality") -} - -func (fd *File) unmarshalSeed(b []byte) { - sb := getBuilder() - defer putBuilder(sb) - - var prevField pref.FieldNumber - var numEnums, numMessages, numExtensions, numServices int - var posEnums, posMessages, posExtensions, posServices int - b0 := b - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.FileDescriptorProto_Syntax_field_number: - switch string(v) { - case "proto2": - fd.L1.Syntax = pref.Proto2 - case "proto3": - fd.L1.Syntax = pref.Proto3 - default: - panic("invalid syntax") - } - case genid.FileDescriptorProto_Name_field_number: - fd.L1.Path = sb.MakeString(v) - case genid.FileDescriptorProto_Package_field_number: - fd.L1.Package = pref.FullName(sb.MakeString(v)) - case genid.FileDescriptorProto_EnumType_field_number: - if prevField != genid.FileDescriptorProto_EnumType_field_number { - if numEnums > 0 { - panic("non-contiguous repeated field") - } - posEnums = len(b0) - len(b) - n - m - } - numEnums++ - case genid.FileDescriptorProto_MessageType_field_number: - if prevField != genid.FileDescriptorProto_MessageType_field_number { - if numMessages > 0 { - panic("non-contiguous repeated field") - } - posMessages = len(b0) - len(b) - n - m - } - numMessages++ - case genid.FileDescriptorProto_Extension_field_number: - if prevField != genid.FileDescriptorProto_Extension_field_number { - if numExtensions > 0 { - panic("non-contiguous repeated field") - } - posExtensions = len(b0) - len(b) - n - m - } - numExtensions++ - case genid.FileDescriptorProto_Service_field_number: - if prevField != genid.FileDescriptorProto_Service_field_number { - if numServices > 0 { - panic("non-contiguous repeated field") - } - posServices = len(b0) - len(b) - n - m - } - numServices++ - } - prevField = num - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - prevField = -1 // ignore known field numbers of unknown wire type - } - } - - // If syntax is missing, it is assumed to be proto2. - if fd.L1.Syntax == 0 { - fd.L1.Syntax = pref.Proto2 - } - - // Must allocate all declarations before parsing each descriptor type - // to ensure we handled all descriptors in "flattened ordering". - if numEnums > 0 { - fd.L1.Enums.List = fd.allocEnums(numEnums) - } - if numMessages > 0 { - fd.L1.Messages.List = fd.allocMessages(numMessages) - } - if numExtensions > 0 { - fd.L1.Extensions.List = fd.allocExtensions(numExtensions) - } - if numServices > 0 { - fd.L1.Services.List = fd.allocServices(numServices) - } - - if numEnums > 0 { - b := b0[posEnums:] - for i := range fd.L1.Enums.List { - _, n := protowire.ConsumeVarint(b) - v, m := protowire.ConsumeBytes(b[n:]) - fd.L1.Enums.List[i].unmarshalSeed(v, sb, fd, fd, i) - b = b[n+m:] - } - } - if numMessages > 0 { - b := b0[posMessages:] - for i := range fd.L1.Messages.List { - _, n := protowire.ConsumeVarint(b) - v, m := protowire.ConsumeBytes(b[n:]) - fd.L1.Messages.List[i].unmarshalSeed(v, sb, fd, fd, i) - b = b[n+m:] - } - } - if numExtensions > 0 { - b := b0[posExtensions:] - for i := range fd.L1.Extensions.List { - _, n := protowire.ConsumeVarint(b) - v, m := protowire.ConsumeBytes(b[n:]) - fd.L1.Extensions.List[i].unmarshalSeed(v, sb, fd, fd, i) - b = b[n+m:] - } - } - if numServices > 0 { - b := b0[posServices:] - for i := range fd.L1.Services.List { - _, n := protowire.ConsumeVarint(b) - v, m := protowire.ConsumeBytes(b[n:]) - fd.L1.Services.List[i].unmarshalSeed(v, sb, fd, fd, i) - b = b[n+m:] - } - } -} - -func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { - ed.L0.ParentFile = pf - ed.L0.Parent = pd - ed.L0.Index = i - - var numValues int - for b := b; len(b) > 0; { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.EnumDescriptorProto_Name_field_number: - ed.L0.FullName = appendFullName(sb, pd.FullName(), v) - case genid.EnumDescriptorProto_Value_field_number: - numValues++ - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } - - // Only construct enum value descriptors for top-level enums since - // they are needed for registration. - if pd != pf { - return - } - ed.L1.eagerValues = true - ed.L2 = new(EnumL2) - ed.L2.Values.List = make([]EnumValue, numValues) - for i := 0; len(b) > 0; { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.EnumDescriptorProto_Value_field_number: - ed.L2.Values.List[i].unmarshalFull(v, sb, pf, ed, i) - i++ - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } -} - -func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { - md.L0.ParentFile = pf - md.L0.Parent = pd - md.L0.Index = i - - var prevField pref.FieldNumber - var numEnums, numMessages, numExtensions int - var posEnums, posMessages, posExtensions int - b0 := b - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.DescriptorProto_Name_field_number: - md.L0.FullName = appendFullName(sb, pd.FullName(), v) - case genid.DescriptorProto_EnumType_field_number: - if prevField != genid.DescriptorProto_EnumType_field_number { - if numEnums > 0 { - panic("non-contiguous repeated field") - } - posEnums = len(b0) - len(b) - n - m - } - numEnums++ - case genid.DescriptorProto_NestedType_field_number: - if prevField != genid.DescriptorProto_NestedType_field_number { - if numMessages > 0 { - panic("non-contiguous repeated field") - } - posMessages = len(b0) - len(b) - n - m - } - numMessages++ - case genid.DescriptorProto_Extension_field_number: - if prevField != genid.DescriptorProto_Extension_field_number { - if numExtensions > 0 { - panic("non-contiguous repeated field") - } - posExtensions = len(b0) - len(b) - n - m - } - numExtensions++ - case genid.DescriptorProto_Options_field_number: - md.unmarshalSeedOptions(v) - } - prevField = num - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - prevField = -1 // ignore known field numbers of unknown wire type - } - } - - // Must allocate all declarations before parsing each descriptor type - // to ensure we handled all descriptors in "flattened ordering". - if numEnums > 0 { - md.L1.Enums.List = pf.allocEnums(numEnums) - } - if numMessages > 0 { - md.L1.Messages.List = pf.allocMessages(numMessages) - } - if numExtensions > 0 { - md.L1.Extensions.List = pf.allocExtensions(numExtensions) - } - - if numEnums > 0 { - b := b0[posEnums:] - for i := range md.L1.Enums.List { - _, n := protowire.ConsumeVarint(b) - v, m := protowire.ConsumeBytes(b[n:]) - md.L1.Enums.List[i].unmarshalSeed(v, sb, pf, md, i) - b = b[n+m:] - } - } - if numMessages > 0 { - b := b0[posMessages:] - for i := range md.L1.Messages.List { - _, n := protowire.ConsumeVarint(b) - v, m := protowire.ConsumeBytes(b[n:]) - md.L1.Messages.List[i].unmarshalSeed(v, sb, pf, md, i) - b = b[n+m:] - } - } - if numExtensions > 0 { - b := b0[posExtensions:] - for i := range md.L1.Extensions.List { - _, n := protowire.ConsumeVarint(b) - v, m := protowire.ConsumeBytes(b[n:]) - md.L1.Extensions.List[i].unmarshalSeed(v, sb, pf, md, i) - b = b[n+m:] - } - } -} - -func (md *Message) unmarshalSeedOptions(b []byte) { - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.MessageOptions_MapEntry_field_number: - md.L1.IsMapEntry = protowire.DecodeBool(v) - case genid.MessageOptions_MessageSetWireFormat_field_number: - md.L1.IsMessageSet = protowire.DecodeBool(v) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } -} - -func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { - xd.L0.ParentFile = pf - xd.L0.Parent = pd - xd.L0.Index = i - - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.FieldDescriptorProto_Number_field_number: - xd.L1.Number = pref.FieldNumber(v) - case genid.FieldDescriptorProto_Label_field_number: - xd.L1.Cardinality = pref.Cardinality(v) - case genid.FieldDescriptorProto_Type_field_number: - xd.L1.Kind = pref.Kind(v) - } - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.FieldDescriptorProto_Name_field_number: - xd.L0.FullName = appendFullName(sb, pd.FullName(), v) - case genid.FieldDescriptorProto_Extendee_field_number: - xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v)) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } -} - -func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { - sd.L0.ParentFile = pf - sd.L0.Parent = pd - sd.L0.Index = i - - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.ServiceDescriptorProto_Name_field_number: - sd.L0.FullName = appendFullName(sb, pd.FullName(), v) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } -} - -var nameBuilderPool = sync.Pool{ - New: func() interface{} { return new(strs.Builder) }, -} - -func getBuilder() *strs.Builder { - return nameBuilderPool.Get().(*strs.Builder) -} -func putBuilder(b *strs.Builder) { - nameBuilderPool.Put(b) -} - -// makeFullName converts b to a protoreflect.FullName, -// where b must start with a leading dot. -func makeFullName(sb *strs.Builder, b []byte) pref.FullName { - if len(b) == 0 || b[0] != '.' { - panic("name reference must be fully qualified") - } - return pref.FullName(sb.MakeString(b[1:])) -} - -func appendFullName(sb *strs.Builder, prefix pref.FullName, suffix []byte) pref.FullName { - return sb.AppendFullName(prefix, pref.Name(strs.UnsafeString(suffix))) -} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go deleted file mode 100644 index 198451e3e..000000000 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ /dev/null @@ -1,704 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package filedesc - -import ( - "reflect" - "sync" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/descopts" - "google.golang.org/protobuf/internal/genid" - "google.golang.org/protobuf/internal/strs" - "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" -) - -func (fd *File) lazyRawInit() { - fd.unmarshalFull(fd.builder.RawDescriptor) - fd.resolveMessages() - fd.resolveExtensions() - fd.resolveServices() -} - -func (file *File) resolveMessages() { - var depIdx int32 - for i := range file.allMessages { - md := &file.allMessages[i] - - // Resolve message field dependencies. - for j := range md.L2.Fields.List { - fd := &md.L2.Fields.List[j] - - // Weak fields are resolved upon actual use. - if fd.L1.IsWeak { - continue - } - - // Resolve message field dependency. - switch fd.L1.Kind { - case pref.EnumKind: - fd.L1.Enum = file.resolveEnumDependency(fd.L1.Enum, listFieldDeps, depIdx) - depIdx++ - case pref.MessageKind, pref.GroupKind: - fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) - depIdx++ - } - - // Default is resolved here since it depends on Enum being resolved. - if v := fd.L1.Default.val; v.IsValid() { - fd.L1.Default = unmarshalDefault(v.Bytes(), fd.L1.Kind, file, fd.L1.Enum) - } - } - } -} - -func (file *File) resolveExtensions() { - var depIdx int32 - for i := range file.allExtensions { - xd := &file.allExtensions[i] - - // Resolve extension field dependency. - switch xd.L1.Kind { - case pref.EnumKind: - xd.L2.Enum = file.resolveEnumDependency(xd.L2.Enum, listExtDeps, depIdx) - depIdx++ - case pref.MessageKind, pref.GroupKind: - xd.L2.Message = file.resolveMessageDependency(xd.L2.Message, listExtDeps, depIdx) - depIdx++ - } - - // Default is resolved here since it depends on Enum being resolved. - if v := xd.L2.Default.val; v.IsValid() { - xd.L2.Default = unmarshalDefault(v.Bytes(), xd.L1.Kind, file, xd.L2.Enum) - } - } -} - -func (file *File) resolveServices() { - var depIdx int32 - for i := range file.allServices { - sd := &file.allServices[i] - - // Resolve method dependencies. - for j := range sd.L2.Methods.List { - md := &sd.L2.Methods.List[j] - md.L1.Input = file.resolveMessageDependency(md.L1.Input, listMethInDeps, depIdx) - md.L1.Output = file.resolveMessageDependency(md.L1.Output, listMethOutDeps, depIdx) - depIdx++ - } - } -} - -func (file *File) resolveEnumDependency(ed pref.EnumDescriptor, i, j int32) pref.EnumDescriptor { - r := file.builder.FileRegistry - if r, ok := r.(resolverByIndex); ok { - if ed2 := r.FindEnumByIndex(i, j, file.allEnums, file.allMessages); ed2 != nil { - return ed2 - } - } - for i := range file.allEnums { - if ed2 := &file.allEnums[i]; ed2.L0.FullName == ed.FullName() { - return ed2 - } - } - if d, _ := r.FindDescriptorByName(ed.FullName()); d != nil { - return d.(pref.EnumDescriptor) - } - return ed -} - -func (file *File) resolveMessageDependency(md pref.MessageDescriptor, i, j int32) pref.MessageDescriptor { - r := file.builder.FileRegistry - if r, ok := r.(resolverByIndex); ok { - if md2 := r.FindMessageByIndex(i, j, file.allEnums, file.allMessages); md2 != nil { - return md2 - } - } - for i := range file.allMessages { - if md2 := &file.allMessages[i]; md2.L0.FullName == md.FullName() { - return md2 - } - } - if d, _ := r.FindDescriptorByName(md.FullName()); d != nil { - return d.(pref.MessageDescriptor) - } - return md -} - -func (fd *File) unmarshalFull(b []byte) { - sb := getBuilder() - defer putBuilder(sb) - - var enumIdx, messageIdx, extensionIdx, serviceIdx int - var rawOptions []byte - fd.L2 = new(FileL2) - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.FileDescriptorProto_PublicDependency_field_number: - fd.L2.Imports[v].IsPublic = true - case genid.FileDescriptorProto_WeakDependency_field_number: - fd.L2.Imports[v].IsWeak = true - } - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.FileDescriptorProto_Dependency_field_number: - path := sb.MakeString(v) - imp, _ := fd.builder.FileRegistry.FindFileByPath(path) - if imp == nil { - imp = PlaceholderFile(path) - } - fd.L2.Imports = append(fd.L2.Imports, pref.FileImport{FileDescriptor: imp}) - case genid.FileDescriptorProto_EnumType_field_number: - fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb) - enumIdx++ - case genid.FileDescriptorProto_MessageType_field_number: - fd.L1.Messages.List[messageIdx].unmarshalFull(v, sb) - messageIdx++ - case genid.FileDescriptorProto_Extension_field_number: - fd.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb) - extensionIdx++ - case genid.FileDescriptorProto_Service_field_number: - fd.L1.Services.List[serviceIdx].unmarshalFull(v, sb) - serviceIdx++ - case genid.FileDescriptorProto_Options_field_number: - rawOptions = appendOptions(rawOptions, v) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } - fd.L2.Options = fd.builder.optionsUnmarshaler(&descopts.File, rawOptions) -} - -func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { - var rawValues [][]byte - var rawOptions []byte - if !ed.L1.eagerValues { - ed.L2 = new(EnumL2) - } - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.EnumDescriptorProto_Value_field_number: - rawValues = append(rawValues, v) - case genid.EnumDescriptorProto_ReservedName_field_number: - ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) - case genid.EnumDescriptorProto_ReservedRange_field_number: - ed.L2.ReservedRanges.List = append(ed.L2.ReservedRanges.List, unmarshalEnumReservedRange(v)) - case genid.EnumDescriptorProto_Options_field_number: - rawOptions = appendOptions(rawOptions, v) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } - if !ed.L1.eagerValues && len(rawValues) > 0 { - ed.L2.Values.List = make([]EnumValue, len(rawValues)) - for i, b := range rawValues { - ed.L2.Values.List[i].unmarshalFull(b, sb, ed.L0.ParentFile, ed, i) - } - } - ed.L2.Options = ed.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Enum, rawOptions) -} - -func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.EnumDescriptorProto_EnumReservedRange_Start_field_number: - r[0] = pref.EnumNumber(v) - case genid.EnumDescriptorProto_EnumReservedRange_End_field_number: - r[1] = pref.EnumNumber(v) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } - return r -} - -func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { - vd.L0.ParentFile = pf - vd.L0.Parent = pd - vd.L0.Index = i - - var rawOptions []byte - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.EnumValueDescriptorProto_Number_field_number: - vd.L1.Number = pref.EnumNumber(v) - } - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.EnumValueDescriptorProto_Name_field_number: - // NOTE: Enum values are in the same scope as the enum parent. - vd.L0.FullName = appendFullName(sb, pd.Parent().FullName(), v) - case genid.EnumValueDescriptorProto_Options_field_number: - rawOptions = appendOptions(rawOptions, v) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } - vd.L1.Options = pf.builder.optionsUnmarshaler(&descopts.EnumValue, rawOptions) -} - -func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { - var rawFields, rawOneofs [][]byte - var enumIdx, messageIdx, extensionIdx int - var rawOptions []byte - md.L2 = new(MessageL2) - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.DescriptorProto_Field_field_number: - rawFields = append(rawFields, v) - case genid.DescriptorProto_OneofDecl_field_number: - rawOneofs = append(rawOneofs, v) - case genid.DescriptorProto_ReservedName_field_number: - md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) - case genid.DescriptorProto_ReservedRange_field_number: - md.L2.ReservedRanges.List = append(md.L2.ReservedRanges.List, unmarshalMessageReservedRange(v)) - case genid.DescriptorProto_ExtensionRange_field_number: - r, rawOptions := unmarshalMessageExtensionRange(v) - opts := md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.ExtensionRange, rawOptions) - md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, r) - md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, opts) - case genid.DescriptorProto_EnumType_field_number: - md.L1.Enums.List[enumIdx].unmarshalFull(v, sb) - enumIdx++ - case genid.DescriptorProto_NestedType_field_number: - md.L1.Messages.List[messageIdx].unmarshalFull(v, sb) - messageIdx++ - case genid.DescriptorProto_Extension_field_number: - md.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb) - extensionIdx++ - case genid.DescriptorProto_Options_field_number: - md.unmarshalOptions(v) - rawOptions = appendOptions(rawOptions, v) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } - if len(rawFields) > 0 || len(rawOneofs) > 0 { - md.L2.Fields.List = make([]Field, len(rawFields)) - md.L2.Oneofs.List = make([]Oneof, len(rawOneofs)) - for i, b := range rawFields { - fd := &md.L2.Fields.List[i] - fd.unmarshalFull(b, sb, md.L0.ParentFile, md, i) - if fd.L1.Cardinality == pref.Required { - md.L2.RequiredNumbers.List = append(md.L2.RequiredNumbers.List, fd.L1.Number) - } - } - for i, b := range rawOneofs { - od := &md.L2.Oneofs.List[i] - od.unmarshalFull(b, sb, md.L0.ParentFile, md, i) - } - } - md.L2.Options = md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Message, rawOptions) -} - -func (md *Message) unmarshalOptions(b []byte) { - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.MessageOptions_MapEntry_field_number: - md.L1.IsMapEntry = protowire.DecodeBool(v) - case genid.MessageOptions_MessageSetWireFormat_field_number: - md.L1.IsMessageSet = protowire.DecodeBool(v) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } -} - -func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.DescriptorProto_ReservedRange_Start_field_number: - r[0] = pref.FieldNumber(v) - case genid.DescriptorProto_ReservedRange_End_field_number: - r[1] = pref.FieldNumber(v) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } - return r -} - -func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions []byte) { - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.DescriptorProto_ExtensionRange_Start_field_number: - r[0] = pref.FieldNumber(v) - case genid.DescriptorProto_ExtensionRange_End_field_number: - r[1] = pref.FieldNumber(v) - } - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.DescriptorProto_ExtensionRange_Options_field_number: - rawOptions = appendOptions(rawOptions, v) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } - return r, rawOptions -} - -func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { - fd.L0.ParentFile = pf - fd.L0.Parent = pd - fd.L0.Index = i - - var rawTypeName []byte - var rawOptions []byte - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.FieldDescriptorProto_Number_field_number: - fd.L1.Number = pref.FieldNumber(v) - case genid.FieldDescriptorProto_Label_field_number: - fd.L1.Cardinality = pref.Cardinality(v) - case genid.FieldDescriptorProto_Type_field_number: - fd.L1.Kind = pref.Kind(v) - case genid.FieldDescriptorProto_OneofIndex_field_number: - // In Message.unmarshalFull, we allocate slices for both - // the field and oneof descriptors before unmarshaling either - // of them. This ensures pointers to slice elements are stable. - od := &pd.(*Message).L2.Oneofs.List[v] - od.L1.Fields.List = append(od.L1.Fields.List, fd) - if fd.L1.ContainingOneof != nil { - panic("oneof type already set") - } - fd.L1.ContainingOneof = od - case genid.FieldDescriptorProto_Proto3Optional_field_number: - fd.L1.IsProto3Optional = protowire.DecodeBool(v) - } - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.FieldDescriptorProto_Name_field_number: - fd.L0.FullName = appendFullName(sb, pd.FullName(), v) - case genid.FieldDescriptorProto_JsonName_field_number: - fd.L1.StringName.InitJSON(sb.MakeString(v)) - case genid.FieldDescriptorProto_DefaultValue_field_number: - fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages - case genid.FieldDescriptorProto_TypeName_field_number: - rawTypeName = v - case genid.FieldDescriptorProto_Options_field_number: - fd.unmarshalOptions(v) - rawOptions = appendOptions(rawOptions, v) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } - if rawTypeName != nil { - name := makeFullName(sb, rawTypeName) - switch fd.L1.Kind { - case pref.EnumKind: - fd.L1.Enum = PlaceholderEnum(name) - case pref.MessageKind, pref.GroupKind: - fd.L1.Message = PlaceholderMessage(name) - } - } - fd.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Field, rawOptions) -} - -func (fd *Field) unmarshalOptions(b []byte) { - const FieldOptions_EnforceUTF8 = 13 - - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.FieldOptions_Packed_field_number: - fd.L1.HasPacked = true - fd.L1.IsPacked = protowire.DecodeBool(v) - case genid.FieldOptions_Weak_field_number: - fd.L1.IsWeak = protowire.DecodeBool(v) - case FieldOptions_EnforceUTF8: - fd.L1.HasEnforceUTF8 = true - fd.L1.EnforceUTF8 = protowire.DecodeBool(v) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } -} - -func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { - od.L0.ParentFile = pf - od.L0.Parent = pd - od.L0.Index = i - - var rawOptions []byte - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.OneofDescriptorProto_Name_field_number: - od.L0.FullName = appendFullName(sb, pd.FullName(), v) - case genid.OneofDescriptorProto_Options_field_number: - rawOptions = appendOptions(rawOptions, v) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } - od.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Oneof, rawOptions) -} - -func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { - var rawTypeName []byte - var rawOptions []byte - xd.L2 = new(ExtensionL2) - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.FieldDescriptorProto_Proto3Optional_field_number: - xd.L2.IsProto3Optional = protowire.DecodeBool(v) - } - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.FieldDescriptorProto_JsonName_field_number: - xd.L2.StringName.InitJSON(sb.MakeString(v)) - case genid.FieldDescriptorProto_DefaultValue_field_number: - xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions - case genid.FieldDescriptorProto_TypeName_field_number: - rawTypeName = v - case genid.FieldDescriptorProto_Options_field_number: - xd.unmarshalOptions(v) - rawOptions = appendOptions(rawOptions, v) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } - if rawTypeName != nil { - name := makeFullName(sb, rawTypeName) - switch xd.L1.Kind { - case pref.EnumKind: - xd.L2.Enum = PlaceholderEnum(name) - case pref.MessageKind, pref.GroupKind: - xd.L2.Message = PlaceholderMessage(name) - } - } - xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions) -} - -func (xd *Extension) unmarshalOptions(b []byte) { - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.FieldOptions_Packed_field_number: - xd.L2.IsPacked = protowire.DecodeBool(v) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } -} - -func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { - var rawMethods [][]byte - var rawOptions []byte - sd.L2 = new(ServiceL2) - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.ServiceDescriptorProto_Method_field_number: - rawMethods = append(rawMethods, v) - case genid.ServiceDescriptorProto_Options_field_number: - rawOptions = appendOptions(rawOptions, v) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } - if len(rawMethods) > 0 { - sd.L2.Methods.List = make([]Method, len(rawMethods)) - for i, b := range rawMethods { - sd.L2.Methods.List[i].unmarshalFull(b, sb, sd.L0.ParentFile, sd, i) - } - } - sd.L2.Options = sd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Service, rawOptions) -} - -func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { - md.L0.ParentFile = pf - md.L0.Parent = pd - md.L0.Index = i - - var rawOptions []byte - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.MethodDescriptorProto_ClientStreaming_field_number: - md.L1.IsStreamingClient = protowire.DecodeBool(v) - case genid.MethodDescriptorProto_ServerStreaming_field_number: - md.L1.IsStreamingServer = protowire.DecodeBool(v) - } - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.MethodDescriptorProto_Name_field_number: - md.L0.FullName = appendFullName(sb, pd.FullName(), v) - case genid.MethodDescriptorProto_InputType_field_number: - md.L1.Input = PlaceholderMessage(makeFullName(sb, v)) - case genid.MethodDescriptorProto_OutputType_field_number: - md.L1.Output = PlaceholderMessage(makeFullName(sb, v)) - case genid.MethodDescriptorProto_Options_field_number: - rawOptions = appendOptions(rawOptions, v) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } - md.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Method, rawOptions) -} - -// appendOptions appends src to dst, where the returned slice is never nil. -// This is necessary to distinguish between empty and unpopulated options. -func appendOptions(dst, src []byte) []byte { - if dst == nil { - dst = []byte{} - } - return append(dst, src...) -} - -// optionsUnmarshaler constructs a lazy unmarshal function for an options message. -// -// The type of message to unmarshal to is passed as a pointer since the -// vars in descopts may not yet be populated at the time this function is called. -func (db *Builder) optionsUnmarshaler(p *pref.ProtoMessage, b []byte) func() pref.ProtoMessage { - if b == nil { - return nil - } - var opts pref.ProtoMessage - var once sync.Once - return func() pref.ProtoMessage { - once.Do(func() { - if *p == nil { - panic("Descriptor.Options called without importing the descriptor package") - } - opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(pref.ProtoMessage) - if err := (proto.UnmarshalOptions{ - AllowPartial: true, - Resolver: db.TypeResolver, - }).Unmarshal(b, opts); err != nil { - panic(err) - } - }) - return opts - } -} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go deleted file mode 100644 index aa294fff9..000000000 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go +++ /dev/null @@ -1,450 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package filedesc - -import ( - "fmt" - "math" - "sort" - "sync" - - "google.golang.org/protobuf/internal/genid" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/descfmt" - "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/pragma" - "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" -) - -type FileImports []pref.FileImport - -func (p *FileImports) Len() int { return len(*p) } -func (p *FileImports) Get(i int) pref.FileImport { return (*p)[i] } -func (p *FileImports) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } -func (p *FileImports) ProtoInternal(pragma.DoNotImplement) {} - -type Names struct { - List []pref.Name - once sync.Once - has map[pref.Name]int // protected by once -} - -func (p *Names) Len() int { return len(p.List) } -func (p *Names) Get(i int) pref.Name { return p.List[i] } -func (p *Names) Has(s pref.Name) bool { return p.lazyInit().has[s] > 0 } -func (p *Names) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } -func (p *Names) ProtoInternal(pragma.DoNotImplement) {} -func (p *Names) lazyInit() *Names { - p.once.Do(func() { - if len(p.List) > 0 { - p.has = make(map[pref.Name]int, len(p.List)) - for _, s := range p.List { - p.has[s] = p.has[s] + 1 - } - } - }) - return p -} - -// CheckValid reports any errors with the set of names with an error message -// that completes the sentence: "ranges is invalid because it has ..." -func (p *Names) CheckValid() error { - for s, n := range p.lazyInit().has { - switch { - case n > 1: - return errors.New("duplicate name: %q", s) - case false && !s.IsValid(): - // NOTE: The C++ implementation does not validate the identifier. - // See https://github.com/protocolbuffers/protobuf/issues/6335. - return errors.New("invalid name: %q", s) - } - } - return nil -} - -type EnumRanges struct { - List [][2]pref.EnumNumber // start inclusive; end inclusive - once sync.Once - sorted [][2]pref.EnumNumber // protected by once -} - -func (p *EnumRanges) Len() int { return len(p.List) } -func (p *EnumRanges) Get(i int) [2]pref.EnumNumber { return p.List[i] } -func (p *EnumRanges) Has(n pref.EnumNumber) bool { - for ls := p.lazyInit().sorted; len(ls) > 0; { - i := len(ls) / 2 - switch r := enumRange(ls[i]); { - case n < r.Start(): - ls = ls[:i] // search lower - case n > r.End(): - ls = ls[i+1:] // search upper - default: - return true - } - } - return false -} -func (p *EnumRanges) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } -func (p *EnumRanges) ProtoInternal(pragma.DoNotImplement) {} -func (p *EnumRanges) lazyInit() *EnumRanges { - p.once.Do(func() { - p.sorted = append(p.sorted, p.List...) - sort.Slice(p.sorted, func(i, j int) bool { - return p.sorted[i][0] < p.sorted[j][0] - }) - }) - return p -} - -// CheckValid reports any errors with the set of names with an error message -// that completes the sentence: "ranges is invalid because it has ..." -func (p *EnumRanges) CheckValid() error { - var rp enumRange - for i, r := range p.lazyInit().sorted { - r := enumRange(r) - switch { - case !(r.Start() <= r.End()): - return errors.New("invalid range: %v", r) - case !(rp.End() < r.Start()) && i > 0: - return errors.New("overlapping ranges: %v with %v", rp, r) - } - rp = r - } - return nil -} - -type enumRange [2]protoreflect.EnumNumber - -func (r enumRange) Start() protoreflect.EnumNumber { return r[0] } // inclusive -func (r enumRange) End() protoreflect.EnumNumber { return r[1] } // inclusive -func (r enumRange) String() string { - if r.Start() == r.End() { - return fmt.Sprintf("%d", r.Start()) - } - return fmt.Sprintf("%d to %d", r.Start(), r.End()) -} - -type FieldRanges struct { - List [][2]pref.FieldNumber // start inclusive; end exclusive - once sync.Once - sorted [][2]pref.FieldNumber // protected by once -} - -func (p *FieldRanges) Len() int { return len(p.List) } -func (p *FieldRanges) Get(i int) [2]pref.FieldNumber { return p.List[i] } -func (p *FieldRanges) Has(n pref.FieldNumber) bool { - for ls := p.lazyInit().sorted; len(ls) > 0; { - i := len(ls) / 2 - switch r := fieldRange(ls[i]); { - case n < r.Start(): - ls = ls[:i] // search lower - case n > r.End(): - ls = ls[i+1:] // search upper - default: - return true - } - } - return false -} -func (p *FieldRanges) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } -func (p *FieldRanges) ProtoInternal(pragma.DoNotImplement) {} -func (p *FieldRanges) lazyInit() *FieldRanges { - p.once.Do(func() { - p.sorted = append(p.sorted, p.List...) - sort.Slice(p.sorted, func(i, j int) bool { - return p.sorted[i][0] < p.sorted[j][0] - }) - }) - return p -} - -// CheckValid reports any errors with the set of ranges with an error message -// that completes the sentence: "ranges is invalid because it has ..." -func (p *FieldRanges) CheckValid(isMessageSet bool) error { - var rp fieldRange - for i, r := range p.lazyInit().sorted { - r := fieldRange(r) - switch { - case !isValidFieldNumber(r.Start(), isMessageSet): - return errors.New("invalid field number: %d", r.Start()) - case !isValidFieldNumber(r.End(), isMessageSet): - return errors.New("invalid field number: %d", r.End()) - case !(r.Start() <= r.End()): - return errors.New("invalid range: %v", r) - case !(rp.End() < r.Start()) && i > 0: - return errors.New("overlapping ranges: %v with %v", rp, r) - } - rp = r - } - return nil -} - -// isValidFieldNumber reports whether the field number is valid. -// Unlike the FieldNumber.IsValid method, it allows ranges that cover the -// reserved number range. -func isValidFieldNumber(n protoreflect.FieldNumber, isMessageSet bool) bool { - return protowire.MinValidNumber <= n && (n <= protowire.MaxValidNumber || isMessageSet) -} - -// CheckOverlap reports an error if p and q overlap. -func (p *FieldRanges) CheckOverlap(q *FieldRanges) error { - rps := p.lazyInit().sorted - rqs := q.lazyInit().sorted - for pi, qi := 0, 0; pi < len(rps) && qi < len(rqs); { - rp := fieldRange(rps[pi]) - rq := fieldRange(rqs[qi]) - if !(rp.End() < rq.Start() || rq.End() < rp.Start()) { - return errors.New("overlapping ranges: %v with %v", rp, rq) - } - if rp.Start() < rq.Start() { - pi++ - } else { - qi++ - } - } - return nil -} - -type fieldRange [2]protoreflect.FieldNumber - -func (r fieldRange) Start() protoreflect.FieldNumber { return r[0] } // inclusive -func (r fieldRange) End() protoreflect.FieldNumber { return r[1] - 1 } // inclusive -func (r fieldRange) String() string { - if r.Start() == r.End() { - return fmt.Sprintf("%d", r.Start()) - } - return fmt.Sprintf("%d to %d", r.Start(), r.End()) -} - -type FieldNumbers struct { - List []pref.FieldNumber - once sync.Once - has map[pref.FieldNumber]struct{} // protected by once -} - -func (p *FieldNumbers) Len() int { return len(p.List) } -func (p *FieldNumbers) Get(i int) pref.FieldNumber { return p.List[i] } -func (p *FieldNumbers) Has(n pref.FieldNumber) bool { - p.once.Do(func() { - if len(p.List) > 0 { - p.has = make(map[pref.FieldNumber]struct{}, len(p.List)) - for _, n := range p.List { - p.has[n] = struct{}{} - } - } - }) - _, ok := p.has[n] - return ok -} -func (p *FieldNumbers) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } -func (p *FieldNumbers) ProtoInternal(pragma.DoNotImplement) {} - -type OneofFields struct { - List []pref.FieldDescriptor - once sync.Once - byName map[pref.Name]pref.FieldDescriptor // protected by once - byJSON map[string]pref.FieldDescriptor // protected by once - byText map[string]pref.FieldDescriptor // protected by once - byNum map[pref.FieldNumber]pref.FieldDescriptor // protected by once -} - -func (p *OneofFields) Len() int { return len(p.List) } -func (p *OneofFields) Get(i int) pref.FieldDescriptor { return p.List[i] } -func (p *OneofFields) ByName(s pref.Name) pref.FieldDescriptor { return p.lazyInit().byName[s] } -func (p *OneofFields) ByJSONName(s string) pref.FieldDescriptor { return p.lazyInit().byJSON[s] } -func (p *OneofFields) ByTextName(s string) pref.FieldDescriptor { return p.lazyInit().byText[s] } -func (p *OneofFields) ByNumber(n pref.FieldNumber) pref.FieldDescriptor { return p.lazyInit().byNum[n] } -func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } -func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {} - -func (p *OneofFields) lazyInit() *OneofFields { - p.once.Do(func() { - if len(p.List) > 0 { - p.byName = make(map[pref.Name]pref.FieldDescriptor, len(p.List)) - p.byJSON = make(map[string]pref.FieldDescriptor, len(p.List)) - p.byText = make(map[string]pref.FieldDescriptor, len(p.List)) - p.byNum = make(map[pref.FieldNumber]pref.FieldDescriptor, len(p.List)) - for _, f := range p.List { - // Field names and numbers are guaranteed to be unique. - p.byName[f.Name()] = f - p.byJSON[f.JSONName()] = f - p.byText[f.TextName()] = f - p.byNum[f.Number()] = f - } - } - }) - return p -} - -type SourceLocations struct { - // List is a list of SourceLocations. - // The SourceLocation.Next field does not need to be populated - // as it will be lazily populated upon first need. - List []pref.SourceLocation - - // File is the parent file descriptor that these locations are relative to. - // If non-nil, ByDescriptor verifies that the provided descriptor - // is a child of this file descriptor. - File pref.FileDescriptor - - once sync.Once - byPath map[pathKey]int -} - -func (p *SourceLocations) Len() int { return len(p.List) } -func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.lazyInit().List[i] } -func (p *SourceLocations) byKey(k pathKey) pref.SourceLocation { - if i, ok := p.lazyInit().byPath[k]; ok { - return p.List[i] - } - return pref.SourceLocation{} -} -func (p *SourceLocations) ByPath(path pref.SourcePath) pref.SourceLocation { - return p.byKey(newPathKey(path)) -} -func (p *SourceLocations) ByDescriptor(desc pref.Descriptor) pref.SourceLocation { - if p.File != nil && desc != nil && p.File != desc.ParentFile() { - return pref.SourceLocation{} // mismatching parent files - } - var pathArr [16]int32 - path := pathArr[:0] - for { - switch desc.(type) { - case pref.FileDescriptor: - // Reverse the path since it was constructed in reverse. - for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 { - path[i], path[j] = path[j], path[i] - } - return p.byKey(newPathKey(path)) - case pref.MessageDescriptor: - path = append(path, int32(desc.Index())) - desc = desc.Parent() - switch desc.(type) { - case pref.FileDescriptor: - path = append(path, int32(genid.FileDescriptorProto_MessageType_field_number)) - case pref.MessageDescriptor: - path = append(path, int32(genid.DescriptorProto_NestedType_field_number)) - default: - return pref.SourceLocation{} - } - case pref.FieldDescriptor: - isExtension := desc.(pref.FieldDescriptor).IsExtension() - path = append(path, int32(desc.Index())) - desc = desc.Parent() - if isExtension { - switch desc.(type) { - case pref.FileDescriptor: - path = append(path, int32(genid.FileDescriptorProto_Extension_field_number)) - case pref.MessageDescriptor: - path = append(path, int32(genid.DescriptorProto_Extension_field_number)) - default: - return pref.SourceLocation{} - } - } else { - switch desc.(type) { - case pref.MessageDescriptor: - path = append(path, int32(genid.DescriptorProto_Field_field_number)) - default: - return pref.SourceLocation{} - } - } - case pref.OneofDescriptor: - path = append(path, int32(desc.Index())) - desc = desc.Parent() - switch desc.(type) { - case pref.MessageDescriptor: - path = append(path, int32(genid.DescriptorProto_OneofDecl_field_number)) - default: - return pref.SourceLocation{} - } - case pref.EnumDescriptor: - path = append(path, int32(desc.Index())) - desc = desc.Parent() - switch desc.(type) { - case pref.FileDescriptor: - path = append(path, int32(genid.FileDescriptorProto_EnumType_field_number)) - case pref.MessageDescriptor: - path = append(path, int32(genid.DescriptorProto_EnumType_field_number)) - default: - return pref.SourceLocation{} - } - case pref.EnumValueDescriptor: - path = append(path, int32(desc.Index())) - desc = desc.Parent() - switch desc.(type) { - case pref.EnumDescriptor: - path = append(path, int32(genid.EnumDescriptorProto_Value_field_number)) - default: - return pref.SourceLocation{} - } - case pref.ServiceDescriptor: - path = append(path, int32(desc.Index())) - desc = desc.Parent() - switch desc.(type) { - case pref.FileDescriptor: - path = append(path, int32(genid.FileDescriptorProto_Service_field_number)) - default: - return pref.SourceLocation{} - } - case pref.MethodDescriptor: - path = append(path, int32(desc.Index())) - desc = desc.Parent() - switch desc.(type) { - case pref.ServiceDescriptor: - path = append(path, int32(genid.ServiceDescriptorProto_Method_field_number)) - default: - return pref.SourceLocation{} - } - default: - return pref.SourceLocation{} - } - } -} -func (p *SourceLocations) lazyInit() *SourceLocations { - p.once.Do(func() { - if len(p.List) > 0 { - // Collect all the indexes for a given path. - pathIdxs := make(map[pathKey][]int, len(p.List)) - for i, l := range p.List { - k := newPathKey(l.Path) - pathIdxs[k] = append(pathIdxs[k], i) - } - - // Update the next index for all locations. - p.byPath = make(map[pathKey]int, len(p.List)) - for k, idxs := range pathIdxs { - for i := 0; i < len(idxs)-1; i++ { - p.List[idxs[i]].Next = idxs[i+1] - } - p.List[idxs[len(idxs)-1]].Next = 0 - p.byPath[k] = idxs[0] // record the first location for this path - } - } - }) - return p -} -func (p *SourceLocations) ProtoInternal(pragma.DoNotImplement) {} - -// pathKey is a comparable representation of protoreflect.SourcePath. -type pathKey struct { - arr [16]uint8 // first n-1 path segments; last element is the length - str string // used if the path does not fit in arr -} - -func newPathKey(p pref.SourcePath) (k pathKey) { - if len(p) < len(k.arr) { - for i, ps := range p { - if ps < 0 || math.MaxUint8 <= ps { - return pathKey{str: p.String()} - } - k.arr[i] = uint8(ps) - } - k.arr[len(k.arr)-1] = uint8(len(p)) - return k - } - return pathKey{str: p.String()} -} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go deleted file mode 100644 index 30db19fdc..000000000 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-types. DO NOT EDIT. - -package filedesc - -import ( - "fmt" - "sync" - - "google.golang.org/protobuf/internal/descfmt" - "google.golang.org/protobuf/internal/pragma" - "google.golang.org/protobuf/reflect/protoreflect" -) - -type Enums struct { - List []Enum - once sync.Once - byName map[protoreflect.Name]*Enum // protected by once -} - -func (p *Enums) Len() int { - return len(p.List) -} -func (p *Enums) Get(i int) protoreflect.EnumDescriptor { - return &p.List[i] -} -func (p *Enums) ByName(s protoreflect.Name) protoreflect.EnumDescriptor { - if d := p.lazyInit().byName[s]; d != nil { - return d - } - return nil -} -func (p *Enums) Format(s fmt.State, r rune) { - descfmt.FormatList(s, r, p) -} -func (p *Enums) ProtoInternal(pragma.DoNotImplement) {} -func (p *Enums) lazyInit() *Enums { - p.once.Do(func() { - if len(p.List) > 0 { - p.byName = make(map[protoreflect.Name]*Enum, len(p.List)) - for i := range p.List { - d := &p.List[i] - if _, ok := p.byName[d.Name()]; !ok { - p.byName[d.Name()] = d - } - } - } - }) - return p -} - -type EnumValues struct { - List []EnumValue - once sync.Once - byName map[protoreflect.Name]*EnumValue // protected by once - byNum map[protoreflect.EnumNumber]*EnumValue // protected by once -} - -func (p *EnumValues) Len() int { - return len(p.List) -} -func (p *EnumValues) Get(i int) protoreflect.EnumValueDescriptor { - return &p.List[i] -} -func (p *EnumValues) ByName(s protoreflect.Name) protoreflect.EnumValueDescriptor { - if d := p.lazyInit().byName[s]; d != nil { - return d - } - return nil -} -func (p *EnumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor { - if d := p.lazyInit().byNum[n]; d != nil { - return d - } - return nil -} -func (p *EnumValues) Format(s fmt.State, r rune) { - descfmt.FormatList(s, r, p) -} -func (p *EnumValues) ProtoInternal(pragma.DoNotImplement) {} -func (p *EnumValues) lazyInit() *EnumValues { - p.once.Do(func() { - if len(p.List) > 0 { - p.byName = make(map[protoreflect.Name]*EnumValue, len(p.List)) - p.byNum = make(map[protoreflect.EnumNumber]*EnumValue, len(p.List)) - for i := range p.List { - d := &p.List[i] - if _, ok := p.byName[d.Name()]; !ok { - p.byName[d.Name()] = d - } - if _, ok := p.byNum[d.Number()]; !ok { - p.byNum[d.Number()] = d - } - } - } - }) - return p -} - -type Messages struct { - List []Message - once sync.Once - byName map[protoreflect.Name]*Message // protected by once -} - -func (p *Messages) Len() int { - return len(p.List) -} -func (p *Messages) Get(i int) protoreflect.MessageDescriptor { - return &p.List[i] -} -func (p *Messages) ByName(s protoreflect.Name) protoreflect.MessageDescriptor { - if d := p.lazyInit().byName[s]; d != nil { - return d - } - return nil -} -func (p *Messages) Format(s fmt.State, r rune) { - descfmt.FormatList(s, r, p) -} -func (p *Messages) ProtoInternal(pragma.DoNotImplement) {} -func (p *Messages) lazyInit() *Messages { - p.once.Do(func() { - if len(p.List) > 0 { - p.byName = make(map[protoreflect.Name]*Message, len(p.List)) - for i := range p.List { - d := &p.List[i] - if _, ok := p.byName[d.Name()]; !ok { - p.byName[d.Name()] = d - } - } - } - }) - return p -} - -type Fields struct { - List []Field - once sync.Once - byName map[protoreflect.Name]*Field // protected by once - byJSON map[string]*Field // protected by once - byText map[string]*Field // protected by once - byNum map[protoreflect.FieldNumber]*Field // protected by once -} - -func (p *Fields) Len() int { - return len(p.List) -} -func (p *Fields) Get(i int) protoreflect.FieldDescriptor { - return &p.List[i] -} -func (p *Fields) ByName(s protoreflect.Name) protoreflect.FieldDescriptor { - if d := p.lazyInit().byName[s]; d != nil { - return d - } - return nil -} -func (p *Fields) ByJSONName(s string) protoreflect.FieldDescriptor { - if d := p.lazyInit().byJSON[s]; d != nil { - return d - } - return nil -} -func (p *Fields) ByTextName(s string) protoreflect.FieldDescriptor { - if d := p.lazyInit().byText[s]; d != nil { - return d - } - return nil -} -func (p *Fields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor { - if d := p.lazyInit().byNum[n]; d != nil { - return d - } - return nil -} -func (p *Fields) Format(s fmt.State, r rune) { - descfmt.FormatList(s, r, p) -} -func (p *Fields) ProtoInternal(pragma.DoNotImplement) {} -func (p *Fields) lazyInit() *Fields { - p.once.Do(func() { - if len(p.List) > 0 { - p.byName = make(map[protoreflect.Name]*Field, len(p.List)) - p.byJSON = make(map[string]*Field, len(p.List)) - p.byText = make(map[string]*Field, len(p.List)) - p.byNum = make(map[protoreflect.FieldNumber]*Field, len(p.List)) - for i := range p.List { - d := &p.List[i] - if _, ok := p.byName[d.Name()]; !ok { - p.byName[d.Name()] = d - } - if _, ok := p.byJSON[d.JSONName()]; !ok { - p.byJSON[d.JSONName()] = d - } - if _, ok := p.byText[d.TextName()]; !ok { - p.byText[d.TextName()] = d - } - if _, ok := p.byNum[d.Number()]; !ok { - p.byNum[d.Number()] = d - } - } - } - }) - return p -} - -type Oneofs struct { - List []Oneof - once sync.Once - byName map[protoreflect.Name]*Oneof // protected by once -} - -func (p *Oneofs) Len() int { - return len(p.List) -} -func (p *Oneofs) Get(i int) protoreflect.OneofDescriptor { - return &p.List[i] -} -func (p *Oneofs) ByName(s protoreflect.Name) protoreflect.OneofDescriptor { - if d := p.lazyInit().byName[s]; d != nil { - return d - } - return nil -} -func (p *Oneofs) Format(s fmt.State, r rune) { - descfmt.FormatList(s, r, p) -} -func (p *Oneofs) ProtoInternal(pragma.DoNotImplement) {} -func (p *Oneofs) lazyInit() *Oneofs { - p.once.Do(func() { - if len(p.List) > 0 { - p.byName = make(map[protoreflect.Name]*Oneof, len(p.List)) - for i := range p.List { - d := &p.List[i] - if _, ok := p.byName[d.Name()]; !ok { - p.byName[d.Name()] = d - } - } - } - }) - return p -} - -type Extensions struct { - List []Extension - once sync.Once - byName map[protoreflect.Name]*Extension // protected by once -} - -func (p *Extensions) Len() int { - return len(p.List) -} -func (p *Extensions) Get(i int) protoreflect.ExtensionDescriptor { - return &p.List[i] -} -func (p *Extensions) ByName(s protoreflect.Name) protoreflect.ExtensionDescriptor { - if d := p.lazyInit().byName[s]; d != nil { - return d - } - return nil -} -func (p *Extensions) Format(s fmt.State, r rune) { - descfmt.FormatList(s, r, p) -} -func (p *Extensions) ProtoInternal(pragma.DoNotImplement) {} -func (p *Extensions) lazyInit() *Extensions { - p.once.Do(func() { - if len(p.List) > 0 { - p.byName = make(map[protoreflect.Name]*Extension, len(p.List)) - for i := range p.List { - d := &p.List[i] - if _, ok := p.byName[d.Name()]; !ok { - p.byName[d.Name()] = d - } - } - } - }) - return p -} - -type Services struct { - List []Service - once sync.Once - byName map[protoreflect.Name]*Service // protected by once -} - -func (p *Services) Len() int { - return len(p.List) -} -func (p *Services) Get(i int) protoreflect.ServiceDescriptor { - return &p.List[i] -} -func (p *Services) ByName(s protoreflect.Name) protoreflect.ServiceDescriptor { - if d := p.lazyInit().byName[s]; d != nil { - return d - } - return nil -} -func (p *Services) Format(s fmt.State, r rune) { - descfmt.FormatList(s, r, p) -} -func (p *Services) ProtoInternal(pragma.DoNotImplement) {} -func (p *Services) lazyInit() *Services { - p.once.Do(func() { - if len(p.List) > 0 { - p.byName = make(map[protoreflect.Name]*Service, len(p.List)) - for i := range p.List { - d := &p.List[i] - if _, ok := p.byName[d.Name()]; !ok { - p.byName[d.Name()] = d - } - } - } - }) - return p -} - -type Methods struct { - List []Method - once sync.Once - byName map[protoreflect.Name]*Method // protected by once -} - -func (p *Methods) Len() int { - return len(p.List) -} -func (p *Methods) Get(i int) protoreflect.MethodDescriptor { - return &p.List[i] -} -func (p *Methods) ByName(s protoreflect.Name) protoreflect.MethodDescriptor { - if d := p.lazyInit().byName[s]; d != nil { - return d - } - return nil -} -func (p *Methods) Format(s fmt.State, r rune) { - descfmt.FormatList(s, r, p) -} -func (p *Methods) ProtoInternal(pragma.DoNotImplement) {} -func (p *Methods) lazyInit() *Methods { - p.once.Do(func() { - if len(p.List) > 0 { - p.byName = make(map[protoreflect.Name]*Method, len(p.List)) - for i := range p.List { - d := &p.List[i] - if _, ok := p.byName[d.Name()]; !ok { - p.byName[d.Name()] = d - } - } - } - }) - return p -} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go deleted file mode 100644 index dbf2c605b..000000000 --- a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package filedesc - -import ( - "google.golang.org/protobuf/internal/descopts" - "google.golang.org/protobuf/internal/pragma" - pref "google.golang.org/protobuf/reflect/protoreflect" -) - -var ( - emptyNames = new(Names) - emptyEnumRanges = new(EnumRanges) - emptyFieldRanges = new(FieldRanges) - emptyFieldNumbers = new(FieldNumbers) - emptySourceLocations = new(SourceLocations) - - emptyFiles = new(FileImports) - emptyMessages = new(Messages) - emptyFields = new(Fields) - emptyOneofs = new(Oneofs) - emptyEnums = new(Enums) - emptyEnumValues = new(EnumValues) - emptyExtensions = new(Extensions) - emptyServices = new(Services) -) - -// PlaceholderFile is a placeholder, representing only the file path. -type PlaceholderFile string - -func (f PlaceholderFile) ParentFile() pref.FileDescriptor { return f } -func (f PlaceholderFile) Parent() pref.Descriptor { return nil } -func (f PlaceholderFile) Index() int { return 0 } -func (f PlaceholderFile) Syntax() pref.Syntax { return 0 } -func (f PlaceholderFile) Name() pref.Name { return "" } -func (f PlaceholderFile) FullName() pref.FullName { return "" } -func (f PlaceholderFile) IsPlaceholder() bool { return true } -func (f PlaceholderFile) Options() pref.ProtoMessage { return descopts.File } -func (f PlaceholderFile) Path() string { return string(f) } -func (f PlaceholderFile) Package() pref.FullName { return "" } -func (f PlaceholderFile) Imports() pref.FileImports { return emptyFiles } -func (f PlaceholderFile) Messages() pref.MessageDescriptors { return emptyMessages } -func (f PlaceholderFile) Enums() pref.EnumDescriptors { return emptyEnums } -func (f PlaceholderFile) Extensions() pref.ExtensionDescriptors { return emptyExtensions } -func (f PlaceholderFile) Services() pref.ServiceDescriptors { return emptyServices } -func (f PlaceholderFile) SourceLocations() pref.SourceLocations { return emptySourceLocations } -func (f PlaceholderFile) ProtoType(pref.FileDescriptor) { return } -func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return } - -// PlaceholderEnum is a placeholder, representing only the full name. -type PlaceholderEnum pref.FullName - -func (e PlaceholderEnum) ParentFile() pref.FileDescriptor { return nil } -func (e PlaceholderEnum) Parent() pref.Descriptor { return nil } -func (e PlaceholderEnum) Index() int { return 0 } -func (e PlaceholderEnum) Syntax() pref.Syntax { return 0 } -func (e PlaceholderEnum) Name() pref.Name { return pref.FullName(e).Name() } -func (e PlaceholderEnum) FullName() pref.FullName { return pref.FullName(e) } -func (e PlaceholderEnum) IsPlaceholder() bool { return true } -func (e PlaceholderEnum) Options() pref.ProtoMessage { return descopts.Enum } -func (e PlaceholderEnum) Values() pref.EnumValueDescriptors { return emptyEnumValues } -func (e PlaceholderEnum) ReservedNames() pref.Names { return emptyNames } -func (e PlaceholderEnum) ReservedRanges() pref.EnumRanges { return emptyEnumRanges } -func (e PlaceholderEnum) ProtoType(pref.EnumDescriptor) { return } -func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } - -// PlaceholderEnumValue is a placeholder, representing only the full name. -type PlaceholderEnumValue pref.FullName - -func (e PlaceholderEnumValue) ParentFile() pref.FileDescriptor { return nil } -func (e PlaceholderEnumValue) Parent() pref.Descriptor { return nil } -func (e PlaceholderEnumValue) Index() int { return 0 } -func (e PlaceholderEnumValue) Syntax() pref.Syntax { return 0 } -func (e PlaceholderEnumValue) Name() pref.Name { return pref.FullName(e).Name() } -func (e PlaceholderEnumValue) FullName() pref.FullName { return pref.FullName(e) } -func (e PlaceholderEnumValue) IsPlaceholder() bool { return true } -func (e PlaceholderEnumValue) Options() pref.ProtoMessage { return descopts.EnumValue } -func (e PlaceholderEnumValue) Number() pref.EnumNumber { return 0 } -func (e PlaceholderEnumValue) ProtoType(pref.EnumValueDescriptor) { return } -func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return } - -// PlaceholderMessage is a placeholder, representing only the full name. -type PlaceholderMessage pref.FullName - -func (m PlaceholderMessage) ParentFile() pref.FileDescriptor { return nil } -func (m PlaceholderMessage) Parent() pref.Descriptor { return nil } -func (m PlaceholderMessage) Index() int { return 0 } -func (m PlaceholderMessage) Syntax() pref.Syntax { return 0 } -func (m PlaceholderMessage) Name() pref.Name { return pref.FullName(m).Name() } -func (m PlaceholderMessage) FullName() pref.FullName { return pref.FullName(m) } -func (m PlaceholderMessage) IsPlaceholder() bool { return true } -func (m PlaceholderMessage) Options() pref.ProtoMessage { return descopts.Message } -func (m PlaceholderMessage) IsMapEntry() bool { return false } -func (m PlaceholderMessage) Fields() pref.FieldDescriptors { return emptyFields } -func (m PlaceholderMessage) Oneofs() pref.OneofDescriptors { return emptyOneofs } -func (m PlaceholderMessage) ReservedNames() pref.Names { return emptyNames } -func (m PlaceholderMessage) ReservedRanges() pref.FieldRanges { return emptyFieldRanges } -func (m PlaceholderMessage) RequiredNumbers() pref.FieldNumbers { return emptyFieldNumbers } -func (m PlaceholderMessage) ExtensionRanges() pref.FieldRanges { return emptyFieldRanges } -func (m PlaceholderMessage) ExtensionRangeOptions(int) pref.ProtoMessage { panic("index out of range") } -func (m PlaceholderMessage) Messages() pref.MessageDescriptors { return emptyMessages } -func (m PlaceholderMessage) Enums() pref.EnumDescriptors { return emptyEnums } -func (m PlaceholderMessage) Extensions() pref.ExtensionDescriptors { return emptyExtensions } -func (m PlaceholderMessage) ProtoType(pref.MessageDescriptor) { return } -func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go deleted file mode 100644 index 0a0dd35de..000000000 --- a/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ /dev/null @@ -1,297 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package filetype provides functionality for wrapping descriptors -// with Go type information. -package filetype - -import ( - "reflect" - - "google.golang.org/protobuf/internal/descopts" - fdesc "google.golang.org/protobuf/internal/filedesc" - pimpl "google.golang.org/protobuf/internal/impl" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" -) - -// Builder constructs type descriptors from a raw file descriptor -// and associated Go types for each enum and message declaration. -// -// -// Flattened Ordering -// -// The protobuf type system represents declarations as a tree. Certain nodes in -// the tree require us to either associate it with a concrete Go type or to -// resolve a dependency, which is information that must be provided separately -// since it cannot be derived from the file descriptor alone. -// -// However, representing a tree as Go literals is difficult to simply do in a -// space and time efficient way. Thus, we store them as a flattened list of -// objects where the serialization order from the tree-based form is important. -// -// The "flattened ordering" is defined as a tree traversal of all enum, message, -// extension, and service declarations using the following algorithm: -// -// def VisitFileDecls(fd): -// for e in fd.Enums: yield e -// for m in fd.Messages: yield m -// for x in fd.Extensions: yield x -// for s in fd.Services: yield s -// for m in fd.Messages: yield from VisitMessageDecls(m) -// -// def VisitMessageDecls(md): -// for e in md.Enums: yield e -// for m in md.Messages: yield m -// for x in md.Extensions: yield x -// for m in md.Messages: yield from VisitMessageDecls(m) -// -// The traversal starts at the root file descriptor and yields each direct -// declaration within each node before traversing into sub-declarations -// that children themselves may have. -type Builder struct { - // File is the underlying file descriptor builder. - File fdesc.Builder - - // GoTypes is a unique set of the Go types for all declarations and - // dependencies. Each type is represented as a zero value of the Go type. - // - // Declarations are Go types generated for enums and messages directly - // declared (not publicly imported) in the proto source file. - // Messages for map entries are accounted for, but represented by nil. - // Enum declarations in "flattened ordering" come first, followed by - // message declarations in "flattened ordering". - // - // Dependencies are Go types for enums or messages referenced by - // message fields (excluding weak fields), for parent extended messages of - // extension fields, for enums or messages referenced by extension fields, - // and for input and output messages referenced by service methods. - // Dependencies must come after declarations, but the ordering of - // dependencies themselves is unspecified. - GoTypes []interface{} - - // DependencyIndexes is an ordered list of indexes into GoTypes for the - // dependencies of messages, extensions, or services. - // - // There are 5 sub-lists in "flattened ordering" concatenated back-to-back: - // 0. Message field dependencies: list of the enum or message type - // referred to by every message field. - // 1. Extension field targets: list of the extended parent message of - // every extension. - // 2. Extension field dependencies: list of the enum or message type - // referred to by every extension field. - // 3. Service method inputs: list of the input message type - // referred to by every service method. - // 4. Service method outputs: list of the output message type - // referred to by every service method. - // - // The offset into DependencyIndexes for the start of each sub-list - // is appended to the end in reverse order. - DependencyIndexes []int32 - - // EnumInfos is a list of enum infos in "flattened ordering". - EnumInfos []pimpl.EnumInfo - - // MessageInfos is a list of message infos in "flattened ordering". - // If provided, the GoType and PBType for each element is populated. - // - // Requirement: len(MessageInfos) == len(Build.Messages) - MessageInfos []pimpl.MessageInfo - - // ExtensionInfos is a list of extension infos in "flattened ordering". - // Each element is initialized and registered with the protoregistry package. - // - // Requirement: len(LegacyExtensions) == len(Build.Extensions) - ExtensionInfos []pimpl.ExtensionInfo - - // TypeRegistry is the registry to register each type descriptor. - // If nil, it uses protoregistry.GlobalTypes. - TypeRegistry interface { - RegisterMessage(pref.MessageType) error - RegisterEnum(pref.EnumType) error - RegisterExtension(pref.ExtensionType) error - } -} - -// Out is the output of the builder. -type Out struct { - File pref.FileDescriptor -} - -func (tb Builder) Build() (out Out) { - // Replace the resolver with one that resolves dependencies by index, - // which is faster and more reliable than relying on the global registry. - if tb.File.FileRegistry == nil { - tb.File.FileRegistry = preg.GlobalFiles - } - tb.File.FileRegistry = &resolverByIndex{ - goTypes: tb.GoTypes, - depIdxs: tb.DependencyIndexes, - fileRegistry: tb.File.FileRegistry, - } - - // Initialize registry if unpopulated. - if tb.TypeRegistry == nil { - tb.TypeRegistry = preg.GlobalTypes - } - - fbOut := tb.File.Build() - out.File = fbOut.File - - // Process enums. - enumGoTypes := tb.GoTypes[:len(fbOut.Enums)] - if len(tb.EnumInfos) != len(fbOut.Enums) { - panic("mismatching enum lengths") - } - if len(fbOut.Enums) > 0 { - for i := range fbOut.Enums { - tb.EnumInfos[i] = pimpl.EnumInfo{ - GoReflectType: reflect.TypeOf(enumGoTypes[i]), - Desc: &fbOut.Enums[i], - } - // Register enum types. - if err := tb.TypeRegistry.RegisterEnum(&tb.EnumInfos[i]); err != nil { - panic(err) - } - } - } - - // Process messages. - messageGoTypes := tb.GoTypes[len(fbOut.Enums):][:len(fbOut.Messages)] - if len(tb.MessageInfos) != len(fbOut.Messages) { - panic("mismatching message lengths") - } - if len(fbOut.Messages) > 0 { - for i := range fbOut.Messages { - if messageGoTypes[i] == nil { - continue // skip map entry - } - - tb.MessageInfos[i].GoReflectType = reflect.TypeOf(messageGoTypes[i]) - tb.MessageInfos[i].Desc = &fbOut.Messages[i] - - // Register message types. - if err := tb.TypeRegistry.RegisterMessage(&tb.MessageInfos[i]); err != nil { - panic(err) - } - } - - // As a special-case for descriptor.proto, - // locally register concrete message type for the options. - if out.File.Path() == "google/protobuf/descriptor.proto" && out.File.Package() == "google.protobuf" { - for i := range fbOut.Messages { - switch fbOut.Messages[i].Name() { - case "FileOptions": - descopts.File = messageGoTypes[i].(pref.ProtoMessage) - case "EnumOptions": - descopts.Enum = messageGoTypes[i].(pref.ProtoMessage) - case "EnumValueOptions": - descopts.EnumValue = messageGoTypes[i].(pref.ProtoMessage) - case "MessageOptions": - descopts.Message = messageGoTypes[i].(pref.ProtoMessage) - case "FieldOptions": - descopts.Field = messageGoTypes[i].(pref.ProtoMessage) - case "OneofOptions": - descopts.Oneof = messageGoTypes[i].(pref.ProtoMessage) - case "ExtensionRangeOptions": - descopts.ExtensionRange = messageGoTypes[i].(pref.ProtoMessage) - case "ServiceOptions": - descopts.Service = messageGoTypes[i].(pref.ProtoMessage) - case "MethodOptions": - descopts.Method = messageGoTypes[i].(pref.ProtoMessage) - } - } - } - } - - // Process extensions. - if len(tb.ExtensionInfos) != len(fbOut.Extensions) { - panic("mismatching extension lengths") - } - var depIdx int32 - for i := range fbOut.Extensions { - // For enum and message kinds, determine the referent Go type so - // that we can construct their constructors. - const listExtDeps = 2 - var goType reflect.Type - switch fbOut.Extensions[i].L1.Kind { - case pref.EnumKind: - j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) - goType = reflect.TypeOf(tb.GoTypes[j]) - depIdx++ - case pref.MessageKind, pref.GroupKind: - j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) - goType = reflect.TypeOf(tb.GoTypes[j]) - depIdx++ - default: - goType = goTypeForPBKind[fbOut.Extensions[i].L1.Kind] - } - if fbOut.Extensions[i].IsList() { - goType = reflect.SliceOf(goType) - } - - pimpl.InitExtensionInfo(&tb.ExtensionInfos[i], &fbOut.Extensions[i], goType) - - // Register extension types. - if err := tb.TypeRegistry.RegisterExtension(&tb.ExtensionInfos[i]); err != nil { - panic(err) - } - } - - return out -} - -var goTypeForPBKind = map[pref.Kind]reflect.Type{ - pref.BoolKind: reflect.TypeOf(bool(false)), - pref.Int32Kind: reflect.TypeOf(int32(0)), - pref.Sint32Kind: reflect.TypeOf(int32(0)), - pref.Sfixed32Kind: reflect.TypeOf(int32(0)), - pref.Int64Kind: reflect.TypeOf(int64(0)), - pref.Sint64Kind: reflect.TypeOf(int64(0)), - pref.Sfixed64Kind: reflect.TypeOf(int64(0)), - pref.Uint32Kind: reflect.TypeOf(uint32(0)), - pref.Fixed32Kind: reflect.TypeOf(uint32(0)), - pref.Uint64Kind: reflect.TypeOf(uint64(0)), - pref.Fixed64Kind: reflect.TypeOf(uint64(0)), - pref.FloatKind: reflect.TypeOf(float32(0)), - pref.DoubleKind: reflect.TypeOf(float64(0)), - pref.StringKind: reflect.TypeOf(string("")), - pref.BytesKind: reflect.TypeOf([]byte(nil)), -} - -type depIdxs []int32 - -// Get retrieves the jth element of the ith sub-list. -func (x depIdxs) Get(i, j int32) int32 { - return x[x[int32(len(x))-i-1]+j] -} - -type ( - resolverByIndex struct { - goTypes []interface{} - depIdxs depIdxs - fileRegistry - } - fileRegistry interface { - FindFileByPath(string) (pref.FileDescriptor, error) - FindDescriptorByName(pref.FullName) (pref.Descriptor, error) - RegisterFile(pref.FileDescriptor) error - } -) - -func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.EnumDescriptor { - if depIdx := int(r.depIdxs.Get(i, j)); int(depIdx) < len(es)+len(ms) { - return &es[depIdx] - } else { - return pimpl.Export{}.EnumDescriptorOf(r.goTypes[depIdx]) - } -} - -func (r *resolverByIndex) FindMessageByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.MessageDescriptor { - if depIdx := int(r.depIdxs.Get(i, j)); depIdx < len(es)+len(ms) { - return &ms[depIdx-len(es)] - } else { - return pimpl.Export{}.MessageDescriptorOf(r.goTypes[depIdx]) - } -} diff --git a/vendor/google.golang.org/protobuf/internal/flags/flags.go b/vendor/google.golang.org/protobuf/internal/flags/flags.go deleted file mode 100644 index 58372dd34..000000000 --- a/vendor/google.golang.org/protobuf/internal/flags/flags.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package flags provides a set of flags controlled by build tags. -package flags - -// ProtoLegacy specifies whether to enable support for legacy functionality -// such as MessageSets, weak fields, and various other obscure behavior -// that is necessary to maintain backwards compatibility with proto1 or -// the pre-release variants of proto2 and proto3. -// -// This is disabled by default unless built with the "protolegacy" tag. -// -// WARNING: The compatibility agreement covers nothing provided by this flag. -// As such, functionality may suddenly be removed or changed at our discretion. -const ProtoLegacy = protoLegacy - -// LazyUnmarshalExtensions specifies whether to lazily unmarshal extensions. -// -// Lazy extension unmarshaling validates the contents of message-valued -// extension fields at unmarshal time, but defers creating the message -// structure until the extension is first accessed. -const LazyUnmarshalExtensions = ProtoLegacy diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go deleted file mode 100644 index bda8e8cf3..000000000 --- a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !protolegacy -// +build !protolegacy - -package flags - -const protoLegacy = false diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go deleted file mode 100644 index 6d8d9bd6b..000000000 --- a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build protolegacy -// +build protolegacy - -package flags - -const protoLegacy = true diff --git a/vendor/google.golang.org/protobuf/internal/genid/any_gen.go b/vendor/google.golang.org/protobuf/internal/genid/any_gen.go deleted file mode 100644 index e6f7d47ab..000000000 --- a/vendor/google.golang.org/protobuf/internal/genid/any_gen.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package genid - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" -) - -const File_google_protobuf_any_proto = "google/protobuf/any.proto" - -// Names for google.protobuf.Any. -const ( - Any_message_name protoreflect.Name = "Any" - Any_message_fullname protoreflect.FullName = "google.protobuf.Any" -) - -// Field names for google.protobuf.Any. -const ( - Any_TypeUrl_field_name protoreflect.Name = "type_url" - Any_Value_field_name protoreflect.Name = "value" - - Any_TypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Any.type_url" - Any_Value_field_fullname protoreflect.FullName = "google.protobuf.Any.value" -) - -// Field numbers for google.protobuf.Any. -const ( - Any_TypeUrl_field_number protoreflect.FieldNumber = 1 - Any_Value_field_number protoreflect.FieldNumber = 2 -) diff --git a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go deleted file mode 100644 index df8f91850..000000000 --- a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package genid - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" -) - -const File_google_protobuf_api_proto = "google/protobuf/api.proto" - -// Names for google.protobuf.Api. -const ( - Api_message_name protoreflect.Name = "Api" - Api_message_fullname protoreflect.FullName = "google.protobuf.Api" -) - -// Field names for google.protobuf.Api. -const ( - Api_Name_field_name protoreflect.Name = "name" - Api_Methods_field_name protoreflect.Name = "methods" - Api_Options_field_name protoreflect.Name = "options" - Api_Version_field_name protoreflect.Name = "version" - Api_SourceContext_field_name protoreflect.Name = "source_context" - Api_Mixins_field_name protoreflect.Name = "mixins" - Api_Syntax_field_name protoreflect.Name = "syntax" - - Api_Name_field_fullname protoreflect.FullName = "google.protobuf.Api.name" - Api_Methods_field_fullname protoreflect.FullName = "google.protobuf.Api.methods" - Api_Options_field_fullname protoreflect.FullName = "google.protobuf.Api.options" - Api_Version_field_fullname protoreflect.FullName = "google.protobuf.Api.version" - Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context" - Api_Mixins_field_fullname protoreflect.FullName = "google.protobuf.Api.mixins" - Api_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Api.syntax" -) - -// Field numbers for google.protobuf.Api. -const ( - Api_Name_field_number protoreflect.FieldNumber = 1 - Api_Methods_field_number protoreflect.FieldNumber = 2 - Api_Options_field_number protoreflect.FieldNumber = 3 - Api_Version_field_number protoreflect.FieldNumber = 4 - Api_SourceContext_field_number protoreflect.FieldNumber = 5 - Api_Mixins_field_number protoreflect.FieldNumber = 6 - Api_Syntax_field_number protoreflect.FieldNumber = 7 -) - -// Names for google.protobuf.Method. -const ( - Method_message_name protoreflect.Name = "Method" - Method_message_fullname protoreflect.FullName = "google.protobuf.Method" -) - -// Field names for google.protobuf.Method. -const ( - Method_Name_field_name protoreflect.Name = "name" - Method_RequestTypeUrl_field_name protoreflect.Name = "request_type_url" - Method_RequestStreaming_field_name protoreflect.Name = "request_streaming" - Method_ResponseTypeUrl_field_name protoreflect.Name = "response_type_url" - Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming" - Method_Options_field_name protoreflect.Name = "options" - Method_Syntax_field_name protoreflect.Name = "syntax" - - Method_Name_field_fullname protoreflect.FullName = "google.protobuf.Method.name" - Method_RequestTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.request_type_url" - Method_RequestStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.request_streaming" - Method_ResponseTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.response_type_url" - Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming" - Method_Options_field_fullname protoreflect.FullName = "google.protobuf.Method.options" - Method_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Method.syntax" -) - -// Field numbers for google.protobuf.Method. -const ( - Method_Name_field_number protoreflect.FieldNumber = 1 - Method_RequestTypeUrl_field_number protoreflect.FieldNumber = 2 - Method_RequestStreaming_field_number protoreflect.FieldNumber = 3 - Method_ResponseTypeUrl_field_number protoreflect.FieldNumber = 4 - Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5 - Method_Options_field_number protoreflect.FieldNumber = 6 - Method_Syntax_field_number protoreflect.FieldNumber = 7 -) - -// Names for google.protobuf.Mixin. -const ( - Mixin_message_name protoreflect.Name = "Mixin" - Mixin_message_fullname protoreflect.FullName = "google.protobuf.Mixin" -) - -// Field names for google.protobuf.Mixin. -const ( - Mixin_Name_field_name protoreflect.Name = "name" - Mixin_Root_field_name protoreflect.Name = "root" - - Mixin_Name_field_fullname protoreflect.FullName = "google.protobuf.Mixin.name" - Mixin_Root_field_fullname protoreflect.FullName = "google.protobuf.Mixin.root" -) - -// Field numbers for google.protobuf.Mixin. -const ( - Mixin_Name_field_number protoreflect.FieldNumber = 1 - Mixin_Root_field_number protoreflect.FieldNumber = 2 -) diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go deleted file mode 100644 index e3cdf1c20..000000000 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ /dev/null @@ -1,829 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package genid - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" -) - -const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto" - -// Names for google.protobuf.FileDescriptorSet. -const ( - FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" - FileDescriptorSet_message_fullname protoreflect.FullName = "google.protobuf.FileDescriptorSet" -) - -// Field names for google.protobuf.FileDescriptorSet. -const ( - FileDescriptorSet_File_field_name protoreflect.Name = "file" - - FileDescriptorSet_File_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorSet.file" -) - -// Field numbers for google.protobuf.FileDescriptorSet. -const ( - FileDescriptorSet_File_field_number protoreflect.FieldNumber = 1 -) - -// Names for google.protobuf.FileDescriptorProto. -const ( - FileDescriptorProto_message_name protoreflect.Name = "FileDescriptorProto" - FileDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto" -) - -// Field names for google.protobuf.FileDescriptorProto. -const ( - FileDescriptorProto_Name_field_name protoreflect.Name = "name" - FileDescriptorProto_Package_field_name protoreflect.Name = "package" - FileDescriptorProto_Dependency_field_name protoreflect.Name = "dependency" - FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency" - FileDescriptorProto_WeakDependency_field_name protoreflect.Name = "weak_dependency" - FileDescriptorProto_MessageType_field_name protoreflect.Name = "message_type" - FileDescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" - FileDescriptorProto_Service_field_name protoreflect.Name = "service" - FileDescriptorProto_Extension_field_name protoreflect.Name = "extension" - FileDescriptorProto_Options_field_name protoreflect.Name = "options" - FileDescriptorProto_SourceCodeInfo_field_name protoreflect.Name = "source_code_info" - FileDescriptorProto_Syntax_field_name protoreflect.Name = "syntax" - - FileDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.name" - FileDescriptorProto_Package_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.package" - FileDescriptorProto_Dependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency" - FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency" - FileDescriptorProto_WeakDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency" - FileDescriptorProto_MessageType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type" - FileDescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type" - FileDescriptorProto_Service_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.service" - FileDescriptorProto_Extension_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.extension" - FileDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.options" - FileDescriptorProto_SourceCodeInfo_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.source_code_info" - FileDescriptorProto_Syntax_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.syntax" -) - -// Field numbers for google.protobuf.FileDescriptorProto. -const ( - FileDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 - FileDescriptorProto_Package_field_number protoreflect.FieldNumber = 2 - FileDescriptorProto_Dependency_field_number protoreflect.FieldNumber = 3 - FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10 - FileDescriptorProto_WeakDependency_field_number protoreflect.FieldNumber = 11 - FileDescriptorProto_MessageType_field_number protoreflect.FieldNumber = 4 - FileDescriptorProto_EnumType_field_number protoreflect.FieldNumber = 5 - FileDescriptorProto_Service_field_number protoreflect.FieldNumber = 6 - FileDescriptorProto_Extension_field_number protoreflect.FieldNumber = 7 - FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 - FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 - FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 -) - -// Names for google.protobuf.DescriptorProto. -const ( - DescriptorProto_message_name protoreflect.Name = "DescriptorProto" - DescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto" -) - -// Field names for google.protobuf.DescriptorProto. -const ( - DescriptorProto_Name_field_name protoreflect.Name = "name" - DescriptorProto_Field_field_name protoreflect.Name = "field" - DescriptorProto_Extension_field_name protoreflect.Name = "extension" - DescriptorProto_NestedType_field_name protoreflect.Name = "nested_type" - DescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" - DescriptorProto_ExtensionRange_field_name protoreflect.Name = "extension_range" - DescriptorProto_OneofDecl_field_name protoreflect.Name = "oneof_decl" - DescriptorProto_Options_field_name protoreflect.Name = "options" - DescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" - DescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" - - DescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.name" - DescriptorProto_Field_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.field" - DescriptorProto_Extension_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.extension" - DescriptorProto_NestedType_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.nested_type" - DescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.enum_type" - DescriptorProto_ExtensionRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.extension_range" - DescriptorProto_OneofDecl_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.oneof_decl" - DescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.options" - DescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range" - DescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name" -) - -// Field numbers for google.protobuf.DescriptorProto. -const ( - DescriptorProto_Name_field_number protoreflect.FieldNumber = 1 - DescriptorProto_Field_field_number protoreflect.FieldNumber = 2 - DescriptorProto_Extension_field_number protoreflect.FieldNumber = 6 - DescriptorProto_NestedType_field_number protoreflect.FieldNumber = 3 - DescriptorProto_EnumType_field_number protoreflect.FieldNumber = 4 - DescriptorProto_ExtensionRange_field_number protoreflect.FieldNumber = 5 - DescriptorProto_OneofDecl_field_number protoreflect.FieldNumber = 8 - DescriptorProto_Options_field_number protoreflect.FieldNumber = 7 - DescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 9 - DescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 10 -) - -// Names for google.protobuf.DescriptorProto.ExtensionRange. -const ( - DescriptorProto_ExtensionRange_message_name protoreflect.Name = "ExtensionRange" - DescriptorProto_ExtensionRange_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange" -) - -// Field names for google.protobuf.DescriptorProto.ExtensionRange. -const ( - DescriptorProto_ExtensionRange_Start_field_name protoreflect.Name = "start" - DescriptorProto_ExtensionRange_End_field_name protoreflect.Name = "end" - DescriptorProto_ExtensionRange_Options_field_name protoreflect.Name = "options" - - DescriptorProto_ExtensionRange_Start_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.start" - DescriptorProto_ExtensionRange_End_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.end" - DescriptorProto_ExtensionRange_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.options" -) - -// Field numbers for google.protobuf.DescriptorProto.ExtensionRange. -const ( - DescriptorProto_ExtensionRange_Start_field_number protoreflect.FieldNumber = 1 - DescriptorProto_ExtensionRange_End_field_number protoreflect.FieldNumber = 2 - DescriptorProto_ExtensionRange_Options_field_number protoreflect.FieldNumber = 3 -) - -// Names for google.protobuf.DescriptorProto.ReservedRange. -const ( - DescriptorProto_ReservedRange_message_name protoreflect.Name = "ReservedRange" - DescriptorProto_ReservedRange_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange" -) - -// Field names for google.protobuf.DescriptorProto.ReservedRange. -const ( - DescriptorProto_ReservedRange_Start_field_name protoreflect.Name = "start" - DescriptorProto_ReservedRange_End_field_name protoreflect.Name = "end" - - DescriptorProto_ReservedRange_Start_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange.start" - DescriptorProto_ReservedRange_End_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange.end" -) - -// Field numbers for google.protobuf.DescriptorProto.ReservedRange. -const ( - DescriptorProto_ReservedRange_Start_field_number protoreflect.FieldNumber = 1 - DescriptorProto_ReservedRange_End_field_number protoreflect.FieldNumber = 2 -) - -// Names for google.protobuf.ExtensionRangeOptions. -const ( - ExtensionRangeOptions_message_name protoreflect.Name = "ExtensionRangeOptions" - ExtensionRangeOptions_message_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions" -) - -// Field names for google.protobuf.ExtensionRangeOptions. -const ( - ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" - - ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option" -) - -// Field numbers for google.protobuf.ExtensionRangeOptions. -const ( - ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 -) - -// Names for google.protobuf.FieldDescriptorProto. -const ( - FieldDescriptorProto_message_name protoreflect.Name = "FieldDescriptorProto" - FieldDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto" -) - -// Field names for google.protobuf.FieldDescriptorProto. -const ( - FieldDescriptorProto_Name_field_name protoreflect.Name = "name" - FieldDescriptorProto_Number_field_name protoreflect.Name = "number" - FieldDescriptorProto_Label_field_name protoreflect.Name = "label" - FieldDescriptorProto_Type_field_name protoreflect.Name = "type" - FieldDescriptorProto_TypeName_field_name protoreflect.Name = "type_name" - FieldDescriptorProto_Extendee_field_name protoreflect.Name = "extendee" - FieldDescriptorProto_DefaultValue_field_name protoreflect.Name = "default_value" - FieldDescriptorProto_OneofIndex_field_name protoreflect.Name = "oneof_index" - FieldDescriptorProto_JsonName_field_name protoreflect.Name = "json_name" - FieldDescriptorProto_Options_field_name protoreflect.Name = "options" - FieldDescriptorProto_Proto3Optional_field_name protoreflect.Name = "proto3_optional" - - FieldDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.name" - FieldDescriptorProto_Number_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.number" - FieldDescriptorProto_Label_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.label" - FieldDescriptorProto_Type_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.type" - FieldDescriptorProto_TypeName_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.type_name" - FieldDescriptorProto_Extendee_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.extendee" - FieldDescriptorProto_DefaultValue_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.default_value" - FieldDescriptorProto_OneofIndex_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.oneof_index" - FieldDescriptorProto_JsonName_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.json_name" - FieldDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.options" - FieldDescriptorProto_Proto3Optional_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.proto3_optional" -) - -// Field numbers for google.protobuf.FieldDescriptorProto. -const ( - FieldDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 - FieldDescriptorProto_Number_field_number protoreflect.FieldNumber = 3 - FieldDescriptorProto_Label_field_number protoreflect.FieldNumber = 4 - FieldDescriptorProto_Type_field_number protoreflect.FieldNumber = 5 - FieldDescriptorProto_TypeName_field_number protoreflect.FieldNumber = 6 - FieldDescriptorProto_Extendee_field_number protoreflect.FieldNumber = 2 - FieldDescriptorProto_DefaultValue_field_number protoreflect.FieldNumber = 7 - FieldDescriptorProto_OneofIndex_field_number protoreflect.FieldNumber = 9 - FieldDescriptorProto_JsonName_field_number protoreflect.FieldNumber = 10 - FieldDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 - FieldDescriptorProto_Proto3Optional_field_number protoreflect.FieldNumber = 17 -) - -// Full and short names for google.protobuf.FieldDescriptorProto.Type. -const ( - FieldDescriptorProto_Type_enum_fullname = "google.protobuf.FieldDescriptorProto.Type" - FieldDescriptorProto_Type_enum_name = "Type" -) - -// Full and short names for google.protobuf.FieldDescriptorProto.Label. -const ( - FieldDescriptorProto_Label_enum_fullname = "google.protobuf.FieldDescriptorProto.Label" - FieldDescriptorProto_Label_enum_name = "Label" -) - -// Names for google.protobuf.OneofDescriptorProto. -const ( - OneofDescriptorProto_message_name protoreflect.Name = "OneofDescriptorProto" - OneofDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto" -) - -// Field names for google.protobuf.OneofDescriptorProto. -const ( - OneofDescriptorProto_Name_field_name protoreflect.Name = "name" - OneofDescriptorProto_Options_field_name protoreflect.Name = "options" - - OneofDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto.name" - OneofDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto.options" -) - -// Field numbers for google.protobuf.OneofDescriptorProto. -const ( - OneofDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 - OneofDescriptorProto_Options_field_number protoreflect.FieldNumber = 2 -) - -// Names for google.protobuf.EnumDescriptorProto. -const ( - EnumDescriptorProto_message_name protoreflect.Name = "EnumDescriptorProto" - EnumDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto" -) - -// Field names for google.protobuf.EnumDescriptorProto. -const ( - EnumDescriptorProto_Name_field_name protoreflect.Name = "name" - EnumDescriptorProto_Value_field_name protoreflect.Name = "value" - EnumDescriptorProto_Options_field_name protoreflect.Name = "options" - EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" - EnumDescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" - - EnumDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name" - EnumDescriptorProto_Value_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value" - EnumDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options" - EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range" - EnumDescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name" -) - -// Field numbers for google.protobuf.EnumDescriptorProto. -const ( - EnumDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 - EnumDescriptorProto_Value_field_number protoreflect.FieldNumber = 2 - EnumDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 - EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4 - EnumDescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 5 -) - -// Names for google.protobuf.EnumDescriptorProto.EnumReservedRange. -const ( - EnumDescriptorProto_EnumReservedRange_message_name protoreflect.Name = "EnumReservedRange" - EnumDescriptorProto_EnumReservedRange_message_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange" -) - -// Field names for google.protobuf.EnumDescriptorProto.EnumReservedRange. -const ( - EnumDescriptorProto_EnumReservedRange_Start_field_name protoreflect.Name = "start" - EnumDescriptorProto_EnumReservedRange_End_field_name protoreflect.Name = "end" - - EnumDescriptorProto_EnumReservedRange_Start_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange.start" - EnumDescriptorProto_EnumReservedRange_End_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange.end" -) - -// Field numbers for google.protobuf.EnumDescriptorProto.EnumReservedRange. -const ( - EnumDescriptorProto_EnumReservedRange_Start_field_number protoreflect.FieldNumber = 1 - EnumDescriptorProto_EnumReservedRange_End_field_number protoreflect.FieldNumber = 2 -) - -// Names for google.protobuf.EnumValueDescriptorProto. -const ( - EnumValueDescriptorProto_message_name protoreflect.Name = "EnumValueDescriptorProto" - EnumValueDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto" -) - -// Field names for google.protobuf.EnumValueDescriptorProto. -const ( - EnumValueDescriptorProto_Name_field_name protoreflect.Name = "name" - EnumValueDescriptorProto_Number_field_name protoreflect.Name = "number" - EnumValueDescriptorProto_Options_field_name protoreflect.Name = "options" - - EnumValueDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.name" - EnumValueDescriptorProto_Number_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.number" - EnumValueDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.options" -) - -// Field numbers for google.protobuf.EnumValueDescriptorProto. -const ( - EnumValueDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 - EnumValueDescriptorProto_Number_field_number protoreflect.FieldNumber = 2 - EnumValueDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 -) - -// Names for google.protobuf.ServiceDescriptorProto. -const ( - ServiceDescriptorProto_message_name protoreflect.Name = "ServiceDescriptorProto" - ServiceDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto" -) - -// Field names for google.protobuf.ServiceDescriptorProto. -const ( - ServiceDescriptorProto_Name_field_name protoreflect.Name = "name" - ServiceDescriptorProto_Method_field_name protoreflect.Name = "method" - ServiceDescriptorProto_Options_field_name protoreflect.Name = "options" - - ServiceDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.name" - ServiceDescriptorProto_Method_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.method" - ServiceDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.options" -) - -// Field numbers for google.protobuf.ServiceDescriptorProto. -const ( - ServiceDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 - ServiceDescriptorProto_Method_field_number protoreflect.FieldNumber = 2 - ServiceDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 -) - -// Names for google.protobuf.MethodDescriptorProto. -const ( - MethodDescriptorProto_message_name protoreflect.Name = "MethodDescriptorProto" - MethodDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto" -) - -// Field names for google.protobuf.MethodDescriptorProto. -const ( - MethodDescriptorProto_Name_field_name protoreflect.Name = "name" - MethodDescriptorProto_InputType_field_name protoreflect.Name = "input_type" - MethodDescriptorProto_OutputType_field_name protoreflect.Name = "output_type" - MethodDescriptorProto_Options_field_name protoreflect.Name = "options" - MethodDescriptorProto_ClientStreaming_field_name protoreflect.Name = "client_streaming" - MethodDescriptorProto_ServerStreaming_field_name protoreflect.Name = "server_streaming" - - MethodDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.name" - MethodDescriptorProto_InputType_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.input_type" - MethodDescriptorProto_OutputType_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.output_type" - MethodDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.options" - MethodDescriptorProto_ClientStreaming_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.client_streaming" - MethodDescriptorProto_ServerStreaming_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.server_streaming" -) - -// Field numbers for google.protobuf.MethodDescriptorProto. -const ( - MethodDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 - MethodDescriptorProto_InputType_field_number protoreflect.FieldNumber = 2 - MethodDescriptorProto_OutputType_field_number protoreflect.FieldNumber = 3 - MethodDescriptorProto_Options_field_number protoreflect.FieldNumber = 4 - MethodDescriptorProto_ClientStreaming_field_number protoreflect.FieldNumber = 5 - MethodDescriptorProto_ServerStreaming_field_number protoreflect.FieldNumber = 6 -) - -// Names for google.protobuf.FileOptions. -const ( - FileOptions_message_name protoreflect.Name = "FileOptions" - FileOptions_message_fullname protoreflect.FullName = "google.protobuf.FileOptions" -) - -// Field names for google.protobuf.FileOptions. -const ( - FileOptions_JavaPackage_field_name protoreflect.Name = "java_package" - FileOptions_JavaOuterClassname_field_name protoreflect.Name = "java_outer_classname" - FileOptions_JavaMultipleFiles_field_name protoreflect.Name = "java_multiple_files" - FileOptions_JavaGenerateEqualsAndHash_field_name protoreflect.Name = "java_generate_equals_and_hash" - FileOptions_JavaStringCheckUtf8_field_name protoreflect.Name = "java_string_check_utf8" - FileOptions_OptimizeFor_field_name protoreflect.Name = "optimize_for" - FileOptions_GoPackage_field_name protoreflect.Name = "go_package" - FileOptions_CcGenericServices_field_name protoreflect.Name = "cc_generic_services" - FileOptions_JavaGenericServices_field_name protoreflect.Name = "java_generic_services" - FileOptions_PyGenericServices_field_name protoreflect.Name = "py_generic_services" - FileOptions_PhpGenericServices_field_name protoreflect.Name = "php_generic_services" - FileOptions_Deprecated_field_name protoreflect.Name = "deprecated" - FileOptions_CcEnableArenas_field_name protoreflect.Name = "cc_enable_arenas" - FileOptions_ObjcClassPrefix_field_name protoreflect.Name = "objc_class_prefix" - FileOptions_CsharpNamespace_field_name protoreflect.Name = "csharp_namespace" - FileOptions_SwiftPrefix_field_name protoreflect.Name = "swift_prefix" - FileOptions_PhpClassPrefix_field_name protoreflect.Name = "php_class_prefix" - FileOptions_PhpNamespace_field_name protoreflect.Name = "php_namespace" - FileOptions_PhpMetadataNamespace_field_name protoreflect.Name = "php_metadata_namespace" - FileOptions_RubyPackage_field_name protoreflect.Name = "ruby_package" - FileOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" - - FileOptions_JavaPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_package" - FileOptions_JavaOuterClassname_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_outer_classname" - FileOptions_JavaMultipleFiles_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_multiple_files" - FileOptions_JavaGenerateEqualsAndHash_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generate_equals_and_hash" - FileOptions_JavaStringCheckUtf8_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_string_check_utf8" - FileOptions_OptimizeFor_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.optimize_for" - FileOptions_GoPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.go_package" - FileOptions_CcGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_generic_services" - FileOptions_JavaGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generic_services" - FileOptions_PyGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.py_generic_services" - FileOptions_PhpGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_generic_services" - FileOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.deprecated" - FileOptions_CcEnableArenas_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_enable_arenas" - FileOptions_ObjcClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.objc_class_prefix" - FileOptions_CsharpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.csharp_namespace" - FileOptions_SwiftPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.swift_prefix" - FileOptions_PhpClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_class_prefix" - FileOptions_PhpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_namespace" - FileOptions_PhpMetadataNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace" - FileOptions_RubyPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.ruby_package" - FileOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option" -) - -// Field numbers for google.protobuf.FileOptions. -const ( - FileOptions_JavaPackage_field_number protoreflect.FieldNumber = 1 - FileOptions_JavaOuterClassname_field_number protoreflect.FieldNumber = 8 - FileOptions_JavaMultipleFiles_field_number protoreflect.FieldNumber = 10 - FileOptions_JavaGenerateEqualsAndHash_field_number protoreflect.FieldNumber = 20 - FileOptions_JavaStringCheckUtf8_field_number protoreflect.FieldNumber = 27 - FileOptions_OptimizeFor_field_number protoreflect.FieldNumber = 9 - FileOptions_GoPackage_field_number protoreflect.FieldNumber = 11 - FileOptions_CcGenericServices_field_number protoreflect.FieldNumber = 16 - FileOptions_JavaGenericServices_field_number protoreflect.FieldNumber = 17 - FileOptions_PyGenericServices_field_number protoreflect.FieldNumber = 18 - FileOptions_PhpGenericServices_field_number protoreflect.FieldNumber = 42 - FileOptions_Deprecated_field_number protoreflect.FieldNumber = 23 - FileOptions_CcEnableArenas_field_number protoreflect.FieldNumber = 31 - FileOptions_ObjcClassPrefix_field_number protoreflect.FieldNumber = 36 - FileOptions_CsharpNamespace_field_number protoreflect.FieldNumber = 37 - FileOptions_SwiftPrefix_field_number protoreflect.FieldNumber = 39 - FileOptions_PhpClassPrefix_field_number protoreflect.FieldNumber = 40 - FileOptions_PhpNamespace_field_number protoreflect.FieldNumber = 41 - FileOptions_PhpMetadataNamespace_field_number protoreflect.FieldNumber = 44 - FileOptions_RubyPackage_field_number protoreflect.FieldNumber = 45 - FileOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 -) - -// Full and short names for google.protobuf.FileOptions.OptimizeMode. -const ( - FileOptions_OptimizeMode_enum_fullname = "google.protobuf.FileOptions.OptimizeMode" - FileOptions_OptimizeMode_enum_name = "OptimizeMode" -) - -// Names for google.protobuf.MessageOptions. -const ( - MessageOptions_message_name protoreflect.Name = "MessageOptions" - MessageOptions_message_fullname protoreflect.FullName = "google.protobuf.MessageOptions" -) - -// Field names for google.protobuf.MessageOptions. -const ( - MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format" - MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor" - MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" - MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" - MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" - - MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" - MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor" - MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" - MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" - MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" -) - -// Field numbers for google.protobuf.MessageOptions. -const ( - MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1 - MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2 - MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 - MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 - MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 -) - -// Names for google.protobuf.FieldOptions. -const ( - FieldOptions_message_name protoreflect.Name = "FieldOptions" - FieldOptions_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions" -) - -// Field names for google.protobuf.FieldOptions. -const ( - FieldOptions_Ctype_field_name protoreflect.Name = "ctype" - FieldOptions_Packed_field_name protoreflect.Name = "packed" - FieldOptions_Jstype_field_name protoreflect.Name = "jstype" - FieldOptions_Lazy_field_name protoreflect.Name = "lazy" - FieldOptions_Deprecated_field_name protoreflect.Name = "deprecated" - FieldOptions_Weak_field_name protoreflect.Name = "weak" - FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" - - FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" - FieldOptions_Packed_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.packed" - FieldOptions_Jstype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.jstype" - FieldOptions_Lazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.lazy" - FieldOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.deprecated" - FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" - FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" -) - -// Field numbers for google.protobuf.FieldOptions. -const ( - FieldOptions_Ctype_field_number protoreflect.FieldNumber = 1 - FieldOptions_Packed_field_number protoreflect.FieldNumber = 2 - FieldOptions_Jstype_field_number protoreflect.FieldNumber = 6 - FieldOptions_Lazy_field_number protoreflect.FieldNumber = 5 - FieldOptions_Deprecated_field_number protoreflect.FieldNumber = 3 - FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 - FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 -) - -// Full and short names for google.protobuf.FieldOptions.CType. -const ( - FieldOptions_CType_enum_fullname = "google.protobuf.FieldOptions.CType" - FieldOptions_CType_enum_name = "CType" -) - -// Full and short names for google.protobuf.FieldOptions.JSType. -const ( - FieldOptions_JSType_enum_fullname = "google.protobuf.FieldOptions.JSType" - FieldOptions_JSType_enum_name = "JSType" -) - -// Names for google.protobuf.OneofOptions. -const ( - OneofOptions_message_name protoreflect.Name = "OneofOptions" - OneofOptions_message_fullname protoreflect.FullName = "google.protobuf.OneofOptions" -) - -// Field names for google.protobuf.OneofOptions. -const ( - OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" - - OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option" -) - -// Field numbers for google.protobuf.OneofOptions. -const ( - OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 -) - -// Names for google.protobuf.EnumOptions. -const ( - EnumOptions_message_name protoreflect.Name = "EnumOptions" - EnumOptions_message_fullname protoreflect.FullName = "google.protobuf.EnumOptions" -) - -// Field names for google.protobuf.EnumOptions. -const ( - EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" - EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" - EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" - - EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" - EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" - EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" -) - -// Field numbers for google.protobuf.EnumOptions. -const ( - EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 - EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 - EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 -) - -// Names for google.protobuf.EnumValueOptions. -const ( - EnumValueOptions_message_name protoreflect.Name = "EnumValueOptions" - EnumValueOptions_message_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions" -) - -// Field names for google.protobuf.EnumValueOptions. -const ( - EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" - EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" - - EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" - EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" -) - -// Field numbers for google.protobuf.EnumValueOptions. -const ( - EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 - EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 -) - -// Names for google.protobuf.ServiceOptions. -const ( - ServiceOptions_message_name protoreflect.Name = "ServiceOptions" - ServiceOptions_message_fullname protoreflect.FullName = "google.protobuf.ServiceOptions" -) - -// Field names for google.protobuf.ServiceOptions. -const ( - ServiceOptions_Deprecated_field_name protoreflect.Name = "deprecated" - ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" - - ServiceOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated" - ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option" -) - -// Field numbers for google.protobuf.ServiceOptions. -const ( - ServiceOptions_Deprecated_field_number protoreflect.FieldNumber = 33 - ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 -) - -// Names for google.protobuf.MethodOptions. -const ( - MethodOptions_message_name protoreflect.Name = "MethodOptions" - MethodOptions_message_fullname protoreflect.FullName = "google.protobuf.MethodOptions" -) - -// Field names for google.protobuf.MethodOptions. -const ( - MethodOptions_Deprecated_field_name protoreflect.Name = "deprecated" - MethodOptions_IdempotencyLevel_field_name protoreflect.Name = "idempotency_level" - MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" - - MethodOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.deprecated" - MethodOptions_IdempotencyLevel_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level" - MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option" -) - -// Field numbers for google.protobuf.MethodOptions. -const ( - MethodOptions_Deprecated_field_number protoreflect.FieldNumber = 33 - MethodOptions_IdempotencyLevel_field_number protoreflect.FieldNumber = 34 - MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 -) - -// Full and short names for google.protobuf.MethodOptions.IdempotencyLevel. -const ( - MethodOptions_IdempotencyLevel_enum_fullname = "google.protobuf.MethodOptions.IdempotencyLevel" - MethodOptions_IdempotencyLevel_enum_name = "IdempotencyLevel" -) - -// Names for google.protobuf.UninterpretedOption. -const ( - UninterpretedOption_message_name protoreflect.Name = "UninterpretedOption" - UninterpretedOption_message_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption" -) - -// Field names for google.protobuf.UninterpretedOption. -const ( - UninterpretedOption_Name_field_name protoreflect.Name = "name" - UninterpretedOption_IdentifierValue_field_name protoreflect.Name = "identifier_value" - UninterpretedOption_PositiveIntValue_field_name protoreflect.Name = "positive_int_value" - UninterpretedOption_NegativeIntValue_field_name protoreflect.Name = "negative_int_value" - UninterpretedOption_DoubleValue_field_name protoreflect.Name = "double_value" - UninterpretedOption_StringValue_field_name protoreflect.Name = "string_value" - UninterpretedOption_AggregateValue_field_name protoreflect.Name = "aggregate_value" - - UninterpretedOption_Name_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.name" - UninterpretedOption_IdentifierValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.identifier_value" - UninterpretedOption_PositiveIntValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.positive_int_value" - UninterpretedOption_NegativeIntValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.negative_int_value" - UninterpretedOption_DoubleValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.double_value" - UninterpretedOption_StringValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.string_value" - UninterpretedOption_AggregateValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.aggregate_value" -) - -// Field numbers for google.protobuf.UninterpretedOption. -const ( - UninterpretedOption_Name_field_number protoreflect.FieldNumber = 2 - UninterpretedOption_IdentifierValue_field_number protoreflect.FieldNumber = 3 - UninterpretedOption_PositiveIntValue_field_number protoreflect.FieldNumber = 4 - UninterpretedOption_NegativeIntValue_field_number protoreflect.FieldNumber = 5 - UninterpretedOption_DoubleValue_field_number protoreflect.FieldNumber = 6 - UninterpretedOption_StringValue_field_number protoreflect.FieldNumber = 7 - UninterpretedOption_AggregateValue_field_number protoreflect.FieldNumber = 8 -) - -// Names for google.protobuf.UninterpretedOption.NamePart. -const ( - UninterpretedOption_NamePart_message_name protoreflect.Name = "NamePart" - UninterpretedOption_NamePart_message_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart" -) - -// Field names for google.protobuf.UninterpretedOption.NamePart. -const ( - UninterpretedOption_NamePart_NamePart_field_name protoreflect.Name = "name_part" - UninterpretedOption_NamePart_IsExtension_field_name protoreflect.Name = "is_extension" - - UninterpretedOption_NamePart_NamePart_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart.name_part" - UninterpretedOption_NamePart_IsExtension_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart.is_extension" -) - -// Field numbers for google.protobuf.UninterpretedOption.NamePart. -const ( - UninterpretedOption_NamePart_NamePart_field_number protoreflect.FieldNumber = 1 - UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2 -) - -// Names for google.protobuf.SourceCodeInfo. -const ( - SourceCodeInfo_message_name protoreflect.Name = "SourceCodeInfo" - SourceCodeInfo_message_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo" -) - -// Field names for google.protobuf.SourceCodeInfo. -const ( - SourceCodeInfo_Location_field_name protoreflect.Name = "location" - - SourceCodeInfo_Location_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.location" -) - -// Field numbers for google.protobuf.SourceCodeInfo. -const ( - SourceCodeInfo_Location_field_number protoreflect.FieldNumber = 1 -) - -// Names for google.protobuf.SourceCodeInfo.Location. -const ( - SourceCodeInfo_Location_message_name protoreflect.Name = "Location" - SourceCodeInfo_Location_message_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location" -) - -// Field names for google.protobuf.SourceCodeInfo.Location. -const ( - SourceCodeInfo_Location_Path_field_name protoreflect.Name = "path" - SourceCodeInfo_Location_Span_field_name protoreflect.Name = "span" - SourceCodeInfo_Location_LeadingComments_field_name protoreflect.Name = "leading_comments" - SourceCodeInfo_Location_TrailingComments_field_name protoreflect.Name = "trailing_comments" - SourceCodeInfo_Location_LeadingDetachedComments_field_name protoreflect.Name = "leading_detached_comments" - - SourceCodeInfo_Location_Path_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.path" - SourceCodeInfo_Location_Span_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.span" - SourceCodeInfo_Location_LeadingComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.leading_comments" - SourceCodeInfo_Location_TrailingComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.trailing_comments" - SourceCodeInfo_Location_LeadingDetachedComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.leading_detached_comments" -) - -// Field numbers for google.protobuf.SourceCodeInfo.Location. -const ( - SourceCodeInfo_Location_Path_field_number protoreflect.FieldNumber = 1 - SourceCodeInfo_Location_Span_field_number protoreflect.FieldNumber = 2 - SourceCodeInfo_Location_LeadingComments_field_number protoreflect.FieldNumber = 3 - SourceCodeInfo_Location_TrailingComments_field_number protoreflect.FieldNumber = 4 - SourceCodeInfo_Location_LeadingDetachedComments_field_number protoreflect.FieldNumber = 6 -) - -// Names for google.protobuf.GeneratedCodeInfo. -const ( - GeneratedCodeInfo_message_name protoreflect.Name = "GeneratedCodeInfo" - GeneratedCodeInfo_message_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo" -) - -// Field names for google.protobuf.GeneratedCodeInfo. -const ( - GeneratedCodeInfo_Annotation_field_name protoreflect.Name = "annotation" - - GeneratedCodeInfo_Annotation_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.annotation" -) - -// Field numbers for google.protobuf.GeneratedCodeInfo. -const ( - GeneratedCodeInfo_Annotation_field_number protoreflect.FieldNumber = 1 -) - -// Names for google.protobuf.GeneratedCodeInfo.Annotation. -const ( - GeneratedCodeInfo_Annotation_message_name protoreflect.Name = "Annotation" - GeneratedCodeInfo_Annotation_message_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation" -) - -// Field names for google.protobuf.GeneratedCodeInfo.Annotation. -const ( - GeneratedCodeInfo_Annotation_Path_field_name protoreflect.Name = "path" - GeneratedCodeInfo_Annotation_SourceFile_field_name protoreflect.Name = "source_file" - GeneratedCodeInfo_Annotation_Begin_field_name protoreflect.Name = "begin" - GeneratedCodeInfo_Annotation_End_field_name protoreflect.Name = "end" - - GeneratedCodeInfo_Annotation_Path_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.path" - GeneratedCodeInfo_Annotation_SourceFile_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.source_file" - GeneratedCodeInfo_Annotation_Begin_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.begin" - GeneratedCodeInfo_Annotation_End_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.end" -) - -// Field numbers for google.protobuf.GeneratedCodeInfo.Annotation. -const ( - GeneratedCodeInfo_Annotation_Path_field_number protoreflect.FieldNumber = 1 - GeneratedCodeInfo_Annotation_SourceFile_field_number protoreflect.FieldNumber = 2 - GeneratedCodeInfo_Annotation_Begin_field_number protoreflect.FieldNumber = 3 - GeneratedCodeInfo_Annotation_End_field_number protoreflect.FieldNumber = 4 -) diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go deleted file mode 100644 index 45ccd0121..000000000 --- a/vendor/google.golang.org/protobuf/internal/genid/doc.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package genid contains constants for declarations in descriptor.proto -// and the well-known types. -package genid - -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" - -const GoogleProtobuf_package protoreflect.FullName = "google.protobuf" diff --git a/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go b/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go deleted file mode 100644 index b070ef4fd..000000000 --- a/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package genid - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" -) - -const File_google_protobuf_duration_proto = "google/protobuf/duration.proto" - -// Names for google.protobuf.Duration. -const ( - Duration_message_name protoreflect.Name = "Duration" - Duration_message_fullname protoreflect.FullName = "google.protobuf.Duration" -) - -// Field names for google.protobuf.Duration. -const ( - Duration_Seconds_field_name protoreflect.Name = "seconds" - Duration_Nanos_field_name protoreflect.Name = "nanos" - - Duration_Seconds_field_fullname protoreflect.FullName = "google.protobuf.Duration.seconds" - Duration_Nanos_field_fullname protoreflect.FullName = "google.protobuf.Duration.nanos" -) - -// Field numbers for google.protobuf.Duration. -const ( - Duration_Seconds_field_number protoreflect.FieldNumber = 1 - Duration_Nanos_field_number protoreflect.FieldNumber = 2 -) diff --git a/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go b/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go deleted file mode 100644 index 762abb34a..000000000 --- a/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package genid - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" -) - -const File_google_protobuf_empty_proto = "google/protobuf/empty.proto" - -// Names for google.protobuf.Empty. -const ( - Empty_message_name protoreflect.Name = "Empty" - Empty_message_fullname protoreflect.FullName = "google.protobuf.Empty" -) diff --git a/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go b/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go deleted file mode 100644 index 70bed453f..000000000 --- a/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package genid - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" -) - -const File_google_protobuf_field_mask_proto = "google/protobuf/field_mask.proto" - -// Names for google.protobuf.FieldMask. -const ( - FieldMask_message_name protoreflect.Name = "FieldMask" - FieldMask_message_fullname protoreflect.FullName = "google.protobuf.FieldMask" -) - -// Field names for google.protobuf.FieldMask. -const ( - FieldMask_Paths_field_name protoreflect.Name = "paths" - - FieldMask_Paths_field_fullname protoreflect.FullName = "google.protobuf.FieldMask.paths" -) - -// Field numbers for google.protobuf.FieldMask. -const ( - FieldMask_Paths_field_number protoreflect.FieldNumber = 1 -) diff --git a/vendor/google.golang.org/protobuf/internal/genid/goname.go b/vendor/google.golang.org/protobuf/internal/genid/goname.go deleted file mode 100644 index 693d2e9e1..000000000 --- a/vendor/google.golang.org/protobuf/internal/genid/goname.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package genid - -// Go names of implementation-specific struct fields in generated messages. -const ( - State_goname = "state" - - SizeCache_goname = "sizeCache" - SizeCacheA_goname = "XXX_sizecache" - - WeakFields_goname = "weakFields" - WeakFieldsA_goname = "XXX_weak" - - UnknownFields_goname = "unknownFields" - UnknownFieldsA_goname = "XXX_unrecognized" - - ExtensionFields_goname = "extensionFields" - ExtensionFieldsA_goname = "XXX_InternalExtensions" - ExtensionFieldsB_goname = "XXX_extensions" - - WeakFieldPrefix_goname = "XXX_weak_" -) diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go deleted file mode 100644 index 8f9ea02ff..000000000 --- a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package genid - -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" - -// Generic field names and numbers for synthetic map entry messages. -const ( - MapEntry_Key_field_name protoreflect.Name = "key" - MapEntry_Value_field_name protoreflect.Name = "value" - - MapEntry_Key_field_number protoreflect.FieldNumber = 1 - MapEntry_Value_field_number protoreflect.FieldNumber = 2 -) diff --git a/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go b/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go deleted file mode 100644 index 3e99ae16c..000000000 --- a/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package genid - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" -) - -const File_google_protobuf_source_context_proto = "google/protobuf/source_context.proto" - -// Names for google.protobuf.SourceContext. -const ( - SourceContext_message_name protoreflect.Name = "SourceContext" - SourceContext_message_fullname protoreflect.FullName = "google.protobuf.SourceContext" -) - -// Field names for google.protobuf.SourceContext. -const ( - SourceContext_FileName_field_name protoreflect.Name = "file_name" - - SourceContext_FileName_field_fullname protoreflect.FullName = "google.protobuf.SourceContext.file_name" -) - -// Field numbers for google.protobuf.SourceContext. -const ( - SourceContext_FileName_field_number protoreflect.FieldNumber = 1 -) diff --git a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go deleted file mode 100644 index 1a38944b2..000000000 --- a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package genid - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" -) - -const File_google_protobuf_struct_proto = "google/protobuf/struct.proto" - -// Full and short names for google.protobuf.NullValue. -const ( - NullValue_enum_fullname = "google.protobuf.NullValue" - NullValue_enum_name = "NullValue" -) - -// Names for google.protobuf.Struct. -const ( - Struct_message_name protoreflect.Name = "Struct" - Struct_message_fullname protoreflect.FullName = "google.protobuf.Struct" -) - -// Field names for google.protobuf.Struct. -const ( - Struct_Fields_field_name protoreflect.Name = "fields" - - Struct_Fields_field_fullname protoreflect.FullName = "google.protobuf.Struct.fields" -) - -// Field numbers for google.protobuf.Struct. -const ( - Struct_Fields_field_number protoreflect.FieldNumber = 1 -) - -// Names for google.protobuf.Struct.FieldsEntry. -const ( - Struct_FieldsEntry_message_name protoreflect.Name = "FieldsEntry" - Struct_FieldsEntry_message_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry" -) - -// Field names for google.protobuf.Struct.FieldsEntry. -const ( - Struct_FieldsEntry_Key_field_name protoreflect.Name = "key" - Struct_FieldsEntry_Value_field_name protoreflect.Name = "value" - - Struct_FieldsEntry_Key_field_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry.key" - Struct_FieldsEntry_Value_field_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry.value" -) - -// Field numbers for google.protobuf.Struct.FieldsEntry. -const ( - Struct_FieldsEntry_Key_field_number protoreflect.FieldNumber = 1 - Struct_FieldsEntry_Value_field_number protoreflect.FieldNumber = 2 -) - -// Names for google.protobuf.Value. -const ( - Value_message_name protoreflect.Name = "Value" - Value_message_fullname protoreflect.FullName = "google.protobuf.Value" -) - -// Field names for google.protobuf.Value. -const ( - Value_NullValue_field_name protoreflect.Name = "null_value" - Value_NumberValue_field_name protoreflect.Name = "number_value" - Value_StringValue_field_name protoreflect.Name = "string_value" - Value_BoolValue_field_name protoreflect.Name = "bool_value" - Value_StructValue_field_name protoreflect.Name = "struct_value" - Value_ListValue_field_name protoreflect.Name = "list_value" - - Value_NullValue_field_fullname protoreflect.FullName = "google.protobuf.Value.null_value" - Value_NumberValue_field_fullname protoreflect.FullName = "google.protobuf.Value.number_value" - Value_StringValue_field_fullname protoreflect.FullName = "google.protobuf.Value.string_value" - Value_BoolValue_field_fullname protoreflect.FullName = "google.protobuf.Value.bool_value" - Value_StructValue_field_fullname protoreflect.FullName = "google.protobuf.Value.struct_value" - Value_ListValue_field_fullname protoreflect.FullName = "google.protobuf.Value.list_value" -) - -// Field numbers for google.protobuf.Value. -const ( - Value_NullValue_field_number protoreflect.FieldNumber = 1 - Value_NumberValue_field_number protoreflect.FieldNumber = 2 - Value_StringValue_field_number protoreflect.FieldNumber = 3 - Value_BoolValue_field_number protoreflect.FieldNumber = 4 - Value_StructValue_field_number protoreflect.FieldNumber = 5 - Value_ListValue_field_number protoreflect.FieldNumber = 6 -) - -// Oneof names for google.protobuf.Value. -const ( - Value_Kind_oneof_name protoreflect.Name = "kind" - - Value_Kind_oneof_fullname protoreflect.FullName = "google.protobuf.Value.kind" -) - -// Names for google.protobuf.ListValue. -const ( - ListValue_message_name protoreflect.Name = "ListValue" - ListValue_message_fullname protoreflect.FullName = "google.protobuf.ListValue" -) - -// Field names for google.protobuf.ListValue. -const ( - ListValue_Values_field_name protoreflect.Name = "values" - - ListValue_Values_field_fullname protoreflect.FullName = "google.protobuf.ListValue.values" -) - -// Field numbers for google.protobuf.ListValue. -const ( - ListValue_Values_field_number protoreflect.FieldNumber = 1 -) diff --git a/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go b/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go deleted file mode 100644 index f5cd5634c..000000000 --- a/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package genid - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" -) - -const File_google_protobuf_timestamp_proto = "google/protobuf/timestamp.proto" - -// Names for google.protobuf.Timestamp. -const ( - Timestamp_message_name protoreflect.Name = "Timestamp" - Timestamp_message_fullname protoreflect.FullName = "google.protobuf.Timestamp" -) - -// Field names for google.protobuf.Timestamp. -const ( - Timestamp_Seconds_field_name protoreflect.Name = "seconds" - Timestamp_Nanos_field_name protoreflect.Name = "nanos" - - Timestamp_Seconds_field_fullname protoreflect.FullName = "google.protobuf.Timestamp.seconds" - Timestamp_Nanos_field_fullname protoreflect.FullName = "google.protobuf.Timestamp.nanos" -) - -// Field numbers for google.protobuf.Timestamp. -const ( - Timestamp_Seconds_field_number protoreflect.FieldNumber = 1 - Timestamp_Nanos_field_number protoreflect.FieldNumber = 2 -) diff --git a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go deleted file mode 100644 index 3bc710138..000000000 --- a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package genid - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" -) - -const File_google_protobuf_type_proto = "google/protobuf/type.proto" - -// Full and short names for google.protobuf.Syntax. -const ( - Syntax_enum_fullname = "google.protobuf.Syntax" - Syntax_enum_name = "Syntax" -) - -// Names for google.protobuf.Type. -const ( - Type_message_name protoreflect.Name = "Type" - Type_message_fullname protoreflect.FullName = "google.protobuf.Type" -) - -// Field names for google.protobuf.Type. -const ( - Type_Name_field_name protoreflect.Name = "name" - Type_Fields_field_name protoreflect.Name = "fields" - Type_Oneofs_field_name protoreflect.Name = "oneofs" - Type_Options_field_name protoreflect.Name = "options" - Type_SourceContext_field_name protoreflect.Name = "source_context" - Type_Syntax_field_name protoreflect.Name = "syntax" - - Type_Name_field_fullname protoreflect.FullName = "google.protobuf.Type.name" - Type_Fields_field_fullname protoreflect.FullName = "google.protobuf.Type.fields" - Type_Oneofs_field_fullname protoreflect.FullName = "google.protobuf.Type.oneofs" - Type_Options_field_fullname protoreflect.FullName = "google.protobuf.Type.options" - Type_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Type.source_context" - Type_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Type.syntax" -) - -// Field numbers for google.protobuf.Type. -const ( - Type_Name_field_number protoreflect.FieldNumber = 1 - Type_Fields_field_number protoreflect.FieldNumber = 2 - Type_Oneofs_field_number protoreflect.FieldNumber = 3 - Type_Options_field_number protoreflect.FieldNumber = 4 - Type_SourceContext_field_number protoreflect.FieldNumber = 5 - Type_Syntax_field_number protoreflect.FieldNumber = 6 -) - -// Names for google.protobuf.Field. -const ( - Field_message_name protoreflect.Name = "Field" - Field_message_fullname protoreflect.FullName = "google.protobuf.Field" -) - -// Field names for google.protobuf.Field. -const ( - Field_Kind_field_name protoreflect.Name = "kind" - Field_Cardinality_field_name protoreflect.Name = "cardinality" - Field_Number_field_name protoreflect.Name = "number" - Field_Name_field_name protoreflect.Name = "name" - Field_TypeUrl_field_name protoreflect.Name = "type_url" - Field_OneofIndex_field_name protoreflect.Name = "oneof_index" - Field_Packed_field_name protoreflect.Name = "packed" - Field_Options_field_name protoreflect.Name = "options" - Field_JsonName_field_name protoreflect.Name = "json_name" - Field_DefaultValue_field_name protoreflect.Name = "default_value" - - Field_Kind_field_fullname protoreflect.FullName = "google.protobuf.Field.kind" - Field_Cardinality_field_fullname protoreflect.FullName = "google.protobuf.Field.cardinality" - Field_Number_field_fullname protoreflect.FullName = "google.protobuf.Field.number" - Field_Name_field_fullname protoreflect.FullName = "google.protobuf.Field.name" - Field_TypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Field.type_url" - Field_OneofIndex_field_fullname protoreflect.FullName = "google.protobuf.Field.oneof_index" - Field_Packed_field_fullname protoreflect.FullName = "google.protobuf.Field.packed" - Field_Options_field_fullname protoreflect.FullName = "google.protobuf.Field.options" - Field_JsonName_field_fullname protoreflect.FullName = "google.protobuf.Field.json_name" - Field_DefaultValue_field_fullname protoreflect.FullName = "google.protobuf.Field.default_value" -) - -// Field numbers for google.protobuf.Field. -const ( - Field_Kind_field_number protoreflect.FieldNumber = 1 - Field_Cardinality_field_number protoreflect.FieldNumber = 2 - Field_Number_field_number protoreflect.FieldNumber = 3 - Field_Name_field_number protoreflect.FieldNumber = 4 - Field_TypeUrl_field_number protoreflect.FieldNumber = 6 - Field_OneofIndex_field_number protoreflect.FieldNumber = 7 - Field_Packed_field_number protoreflect.FieldNumber = 8 - Field_Options_field_number protoreflect.FieldNumber = 9 - Field_JsonName_field_number protoreflect.FieldNumber = 10 - Field_DefaultValue_field_number protoreflect.FieldNumber = 11 -) - -// Full and short names for google.protobuf.Field.Kind. -const ( - Field_Kind_enum_fullname = "google.protobuf.Field.Kind" - Field_Kind_enum_name = "Kind" -) - -// Full and short names for google.protobuf.Field.Cardinality. -const ( - Field_Cardinality_enum_fullname = "google.protobuf.Field.Cardinality" - Field_Cardinality_enum_name = "Cardinality" -) - -// Names for google.protobuf.Enum. -const ( - Enum_message_name protoreflect.Name = "Enum" - Enum_message_fullname protoreflect.FullName = "google.protobuf.Enum" -) - -// Field names for google.protobuf.Enum. -const ( - Enum_Name_field_name protoreflect.Name = "name" - Enum_Enumvalue_field_name protoreflect.Name = "enumvalue" - Enum_Options_field_name protoreflect.Name = "options" - Enum_SourceContext_field_name protoreflect.Name = "source_context" - Enum_Syntax_field_name protoreflect.Name = "syntax" - - Enum_Name_field_fullname protoreflect.FullName = "google.protobuf.Enum.name" - Enum_Enumvalue_field_fullname protoreflect.FullName = "google.protobuf.Enum.enumvalue" - Enum_Options_field_fullname protoreflect.FullName = "google.protobuf.Enum.options" - Enum_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Enum.source_context" - Enum_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Enum.syntax" -) - -// Field numbers for google.protobuf.Enum. -const ( - Enum_Name_field_number protoreflect.FieldNumber = 1 - Enum_Enumvalue_field_number protoreflect.FieldNumber = 2 - Enum_Options_field_number protoreflect.FieldNumber = 3 - Enum_SourceContext_field_number protoreflect.FieldNumber = 4 - Enum_Syntax_field_number protoreflect.FieldNumber = 5 -) - -// Names for google.protobuf.EnumValue. -const ( - EnumValue_message_name protoreflect.Name = "EnumValue" - EnumValue_message_fullname protoreflect.FullName = "google.protobuf.EnumValue" -) - -// Field names for google.protobuf.EnumValue. -const ( - EnumValue_Name_field_name protoreflect.Name = "name" - EnumValue_Number_field_name protoreflect.Name = "number" - EnumValue_Options_field_name protoreflect.Name = "options" - - EnumValue_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.name" - EnumValue_Number_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.number" - EnumValue_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.options" -) - -// Field numbers for google.protobuf.EnumValue. -const ( - EnumValue_Name_field_number protoreflect.FieldNumber = 1 - EnumValue_Number_field_number protoreflect.FieldNumber = 2 - EnumValue_Options_field_number protoreflect.FieldNumber = 3 -) - -// Names for google.protobuf.Option. -const ( - Option_message_name protoreflect.Name = "Option" - Option_message_fullname protoreflect.FullName = "google.protobuf.Option" -) - -// Field names for google.protobuf.Option. -const ( - Option_Name_field_name protoreflect.Name = "name" - Option_Value_field_name protoreflect.Name = "value" - - Option_Name_field_fullname protoreflect.FullName = "google.protobuf.Option.name" - Option_Value_field_fullname protoreflect.FullName = "google.protobuf.Option.value" -) - -// Field numbers for google.protobuf.Option. -const ( - Option_Name_field_number protoreflect.FieldNumber = 1 - Option_Value_field_number protoreflect.FieldNumber = 2 -) diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go deleted file mode 100644 index 429384b85..000000000 --- a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package genid - -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" - -// Generic field name and number for messages in wrappers.proto. -const ( - WrapperValue_Value_field_name protoreflect.Name = "value" - WrapperValue_Value_field_number protoreflect.FieldNumber = 1 -) diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go deleted file mode 100644 index 72527d2ab..000000000 --- a/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package genid - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" -) - -const File_google_protobuf_wrappers_proto = "google/protobuf/wrappers.proto" - -// Names for google.protobuf.DoubleValue. -const ( - DoubleValue_message_name protoreflect.Name = "DoubleValue" - DoubleValue_message_fullname protoreflect.FullName = "google.protobuf.DoubleValue" -) - -// Field names for google.protobuf.DoubleValue. -const ( - DoubleValue_Value_field_name protoreflect.Name = "value" - - DoubleValue_Value_field_fullname protoreflect.FullName = "google.protobuf.DoubleValue.value" -) - -// Field numbers for google.protobuf.DoubleValue. -const ( - DoubleValue_Value_field_number protoreflect.FieldNumber = 1 -) - -// Names for google.protobuf.FloatValue. -const ( - FloatValue_message_name protoreflect.Name = "FloatValue" - FloatValue_message_fullname protoreflect.FullName = "google.protobuf.FloatValue" -) - -// Field names for google.protobuf.FloatValue. -const ( - FloatValue_Value_field_name protoreflect.Name = "value" - - FloatValue_Value_field_fullname protoreflect.FullName = "google.protobuf.FloatValue.value" -) - -// Field numbers for google.protobuf.FloatValue. -const ( - FloatValue_Value_field_number protoreflect.FieldNumber = 1 -) - -// Names for google.protobuf.Int64Value. -const ( - Int64Value_message_name protoreflect.Name = "Int64Value" - Int64Value_message_fullname protoreflect.FullName = "google.protobuf.Int64Value" -) - -// Field names for google.protobuf.Int64Value. -const ( - Int64Value_Value_field_name protoreflect.Name = "value" - - Int64Value_Value_field_fullname protoreflect.FullName = "google.protobuf.Int64Value.value" -) - -// Field numbers for google.protobuf.Int64Value. -const ( - Int64Value_Value_field_number protoreflect.FieldNumber = 1 -) - -// Names for google.protobuf.UInt64Value. -const ( - UInt64Value_message_name protoreflect.Name = "UInt64Value" - UInt64Value_message_fullname protoreflect.FullName = "google.protobuf.UInt64Value" -) - -// Field names for google.protobuf.UInt64Value. -const ( - UInt64Value_Value_field_name protoreflect.Name = "value" - - UInt64Value_Value_field_fullname protoreflect.FullName = "google.protobuf.UInt64Value.value" -) - -// Field numbers for google.protobuf.UInt64Value. -const ( - UInt64Value_Value_field_number protoreflect.FieldNumber = 1 -) - -// Names for google.protobuf.Int32Value. -const ( - Int32Value_message_name protoreflect.Name = "Int32Value" - Int32Value_message_fullname protoreflect.FullName = "google.protobuf.Int32Value" -) - -// Field names for google.protobuf.Int32Value. -const ( - Int32Value_Value_field_name protoreflect.Name = "value" - - Int32Value_Value_field_fullname protoreflect.FullName = "google.protobuf.Int32Value.value" -) - -// Field numbers for google.protobuf.Int32Value. -const ( - Int32Value_Value_field_number protoreflect.FieldNumber = 1 -) - -// Names for google.protobuf.UInt32Value. -const ( - UInt32Value_message_name protoreflect.Name = "UInt32Value" - UInt32Value_message_fullname protoreflect.FullName = "google.protobuf.UInt32Value" -) - -// Field names for google.protobuf.UInt32Value. -const ( - UInt32Value_Value_field_name protoreflect.Name = "value" - - UInt32Value_Value_field_fullname protoreflect.FullName = "google.protobuf.UInt32Value.value" -) - -// Field numbers for google.protobuf.UInt32Value. -const ( - UInt32Value_Value_field_number protoreflect.FieldNumber = 1 -) - -// Names for google.protobuf.BoolValue. -const ( - BoolValue_message_name protoreflect.Name = "BoolValue" - BoolValue_message_fullname protoreflect.FullName = "google.protobuf.BoolValue" -) - -// Field names for google.protobuf.BoolValue. -const ( - BoolValue_Value_field_name protoreflect.Name = "value" - - BoolValue_Value_field_fullname protoreflect.FullName = "google.protobuf.BoolValue.value" -) - -// Field numbers for google.protobuf.BoolValue. -const ( - BoolValue_Value_field_number protoreflect.FieldNumber = 1 -) - -// Names for google.protobuf.StringValue. -const ( - StringValue_message_name protoreflect.Name = "StringValue" - StringValue_message_fullname protoreflect.FullName = "google.protobuf.StringValue" -) - -// Field names for google.protobuf.StringValue. -const ( - StringValue_Value_field_name protoreflect.Name = "value" - - StringValue_Value_field_fullname protoreflect.FullName = "google.protobuf.StringValue.value" -) - -// Field numbers for google.protobuf.StringValue. -const ( - StringValue_Value_field_number protoreflect.FieldNumber = 1 -) - -// Names for google.protobuf.BytesValue. -const ( - BytesValue_message_name protoreflect.Name = "BytesValue" - BytesValue_message_fullname protoreflect.FullName = "google.protobuf.BytesValue" -) - -// Field names for google.protobuf.BytesValue. -const ( - BytesValue_Value_field_name protoreflect.Name = "value" - - BytesValue_Value_field_fullname protoreflect.FullName = "google.protobuf.BytesValue.value" -) - -// Field numbers for google.protobuf.BytesValue. -const ( - BytesValue_Value_field_number protoreflect.FieldNumber = 1 -) diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go deleted file mode 100644 index abee5f30e..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "fmt" - "reflect" - "strconv" - - "google.golang.org/protobuf/encoding/prototext" - "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" -) - -// Export is a zero-length named type that exists only to export a set of -// functions that we do not want to appear in godoc. -type Export struct{} - -// NewError formats a string according to the format specifier and arguments and -// returns an error that has a "proto" prefix. -func (Export) NewError(f string, x ...interface{}) error { - return errors.New(f, x...) -} - -// enum is any enum type generated by protoc-gen-go -// and must be a named int32 type. -type enum = interface{} - -// EnumOf returns the protoreflect.Enum interface over e. -// It returns nil if e is nil. -func (Export) EnumOf(e enum) pref.Enum { - switch e := e.(type) { - case nil: - return nil - case pref.Enum: - return e - default: - return legacyWrapEnum(reflect.ValueOf(e)) - } -} - -// EnumDescriptorOf returns the protoreflect.EnumDescriptor for e. -// It returns nil if e is nil. -func (Export) EnumDescriptorOf(e enum) pref.EnumDescriptor { - switch e := e.(type) { - case nil: - return nil - case pref.Enum: - return e.Descriptor() - default: - return LegacyLoadEnumDesc(reflect.TypeOf(e)) - } -} - -// EnumTypeOf returns the protoreflect.EnumType for e. -// It returns nil if e is nil. -func (Export) EnumTypeOf(e enum) pref.EnumType { - switch e := e.(type) { - case nil: - return nil - case pref.Enum: - return e.Type() - default: - return legacyLoadEnumType(reflect.TypeOf(e)) - } -} - -// EnumStringOf returns the enum value as a string, either as the name if -// the number is resolvable, or the number formatted as a string. -func (Export) EnumStringOf(ed pref.EnumDescriptor, n pref.EnumNumber) string { - ev := ed.Values().ByNumber(n) - if ev != nil { - return string(ev.Name()) - } - return strconv.Itoa(int(n)) -} - -// message is any message type generated by protoc-gen-go -// and must be a pointer to a named struct type. -type message = interface{} - -// legacyMessageWrapper wraps a v2 message as a v1 message. -type legacyMessageWrapper struct{ m pref.ProtoMessage } - -func (m legacyMessageWrapper) Reset() { proto.Reset(m.m) } -func (m legacyMessageWrapper) String() string { return Export{}.MessageStringOf(m.m) } -func (m legacyMessageWrapper) ProtoMessage() {} - -// ProtoMessageV1Of converts either a v1 or v2 message to a v1 message. -// It returns nil if m is nil. -func (Export) ProtoMessageV1Of(m message) piface.MessageV1 { - switch mv := m.(type) { - case nil: - return nil - case piface.MessageV1: - return mv - case unwrapper: - return Export{}.ProtoMessageV1Of(mv.protoUnwrap()) - case pref.ProtoMessage: - return legacyMessageWrapper{mv} - default: - panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) - } -} - -func (Export) protoMessageV2Of(m message) pref.ProtoMessage { - switch mv := m.(type) { - case nil: - return nil - case pref.ProtoMessage: - return mv - case legacyMessageWrapper: - return mv.m - case piface.MessageV1: - return nil - default: - panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) - } -} - -// ProtoMessageV2Of converts either a v1 or v2 message to a v2 message. -// It returns nil if m is nil. -func (Export) ProtoMessageV2Of(m message) pref.ProtoMessage { - if m == nil { - return nil - } - if mv := (Export{}).protoMessageV2Of(m); mv != nil { - return mv - } - return legacyWrapMessage(reflect.ValueOf(m)).Interface() -} - -// MessageOf returns the protoreflect.Message interface over m. -// It returns nil if m is nil. -func (Export) MessageOf(m message) pref.Message { - if m == nil { - return nil - } - if mv := (Export{}).protoMessageV2Of(m); mv != nil { - return mv.ProtoReflect() - } - return legacyWrapMessage(reflect.ValueOf(m)) -} - -// MessageDescriptorOf returns the protoreflect.MessageDescriptor for m. -// It returns nil if m is nil. -func (Export) MessageDescriptorOf(m message) pref.MessageDescriptor { - if m == nil { - return nil - } - if mv := (Export{}).protoMessageV2Of(m); mv != nil { - return mv.ProtoReflect().Descriptor() - } - return LegacyLoadMessageDesc(reflect.TypeOf(m)) -} - -// MessageTypeOf returns the protoreflect.MessageType for m. -// It returns nil if m is nil. -func (Export) MessageTypeOf(m message) pref.MessageType { - if m == nil { - return nil - } - if mv := (Export{}).protoMessageV2Of(m); mv != nil { - return mv.ProtoReflect().Type() - } - return legacyLoadMessageType(reflect.TypeOf(m), "") -} - -// MessageStringOf returns the message value as a string, -// which is the message serialized in the protobuf text format. -func (Export) MessageStringOf(m pref.ProtoMessage) string { - return prototext.MarshalOptions{Multiline: false}.Format(m) -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go deleted file mode 100644 index b82341e57..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "sync" - - "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" -) - -func (mi *MessageInfo) checkInitialized(in piface.CheckInitializedInput) (piface.CheckInitializedOutput, error) { - var p pointer - if ms, ok := in.Message.(*messageState); ok { - p = ms.pointer() - } else { - p = in.Message.(*messageReflectWrapper).pointer() - } - return piface.CheckInitializedOutput{}, mi.checkInitializedPointer(p) -} - -func (mi *MessageInfo) checkInitializedPointer(p pointer) error { - mi.init() - if !mi.needsInitCheck { - return nil - } - if p.IsNil() { - for _, f := range mi.orderedCoderFields { - if f.isRequired { - return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName())) - } - } - return nil - } - if mi.extensionOffset.IsValid() { - e := p.Apply(mi.extensionOffset).Extensions() - if err := mi.isInitExtensions(e); err != nil { - return err - } - } - for _, f := range mi.orderedCoderFields { - if !f.isRequired && f.funcs.isInit == nil { - continue - } - fptr := p.Apply(f.offset) - if f.isPointer && fptr.Elem().IsNil() { - if f.isRequired { - return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName())) - } - continue - } - if f.funcs.isInit == nil { - continue - } - if err := f.funcs.isInit(fptr, f); err != nil { - return err - } - } - return nil -} - -func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error { - if ext == nil { - return nil - } - for _, x := range *ext { - ei := getExtensionFieldInfo(x.Type()) - if ei.funcs.isInit == nil { - continue - } - v := x.Value() - if !v.IsValid() { - continue - } - if err := ei.funcs.isInit(v); err != nil { - return err - } - } - return nil -} - -var ( - needsInitCheckMu sync.Mutex - needsInitCheckMap sync.Map -) - -// needsInitCheck reports whether a message needs to be checked for partial initialization. -// -// It returns true if the message transitively includes any required or extension fields. -func needsInitCheck(md pref.MessageDescriptor) bool { - if v, ok := needsInitCheckMap.Load(md); ok { - if has, ok := v.(bool); ok { - return has - } - } - needsInitCheckMu.Lock() - defer needsInitCheckMu.Unlock() - return needsInitCheckLocked(md) -} - -func needsInitCheckLocked(md pref.MessageDescriptor) (has bool) { - if v, ok := needsInitCheckMap.Load(md); ok { - // If has is true, we've previously determined that this message - // needs init checks. - // - // If has is false, we've previously determined that it can never - // be uninitialized. - // - // If has is not a bool, we've just encountered a cycle in the - // message graph. In this case, it is safe to return false: If - // the message does have required fields, we'll detect them later - // in the graph traversal. - has, ok := v.(bool) - return ok && has - } - needsInitCheckMap.Store(md, struct{}{}) // avoid cycles while descending into this message - defer func() { - needsInitCheckMap.Store(md, has) - }() - if md.RequiredNumbers().Len() > 0 { - return true - } - if md.ExtensionRanges().Len() > 0 { - return true - } - for i := 0; i < md.Fields().Len(); i++ { - fd := md.Fields().Get(i) - // Map keys are never messages, so just consider the map value. - if fd.IsMap() { - fd = fd.MapValue() - } - fmd := fd.Message() - if fmd != nil && needsInitCheckLocked(fmd) { - return true - } - } - return false -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go deleted file mode 100644 index 08d35170b..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "sync" - "sync/atomic" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" -) - -type extensionFieldInfo struct { - wiretag uint64 - tagsize int - unmarshalNeedsValue bool - funcs valueCoderFuncs - validation validationInfo -} - -var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo - -func getExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { - if xi, ok := xt.(*ExtensionInfo); ok { - xi.lazyInit() - return xi.info - } - return legacyLoadExtensionFieldInfo(xt) -} - -// legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt. -func legacyLoadExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { - if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok { - return xi.(*extensionFieldInfo) - } - e := makeExtensionFieldInfo(xt.TypeDescriptor()) - if e, ok := legacyMessageTypeCache.LoadOrStore(xt, e); ok { - return e.(*extensionFieldInfo) - } - return e -} - -func makeExtensionFieldInfo(xd pref.ExtensionDescriptor) *extensionFieldInfo { - var wiretag uint64 - if !xd.IsPacked() { - wiretag = protowire.EncodeTag(xd.Number(), wireTypes[xd.Kind()]) - } else { - wiretag = protowire.EncodeTag(xd.Number(), protowire.BytesType) - } - e := &extensionFieldInfo{ - wiretag: wiretag, - tagsize: protowire.SizeVarint(wiretag), - funcs: encoderFuncsForValue(xd), - } - // Does the unmarshal function need a value passed to it? - // This is true for composite types, where we pass in a message, list, or map to fill in, - // and for enums, where we pass in a prototype value to specify the concrete enum type. - switch xd.Kind() { - case pref.MessageKind, pref.GroupKind, pref.EnumKind: - e.unmarshalNeedsValue = true - default: - if xd.Cardinality() == pref.Repeated { - e.unmarshalNeedsValue = true - } - } - return e -} - -type lazyExtensionValue struct { - atomicOnce uint32 // atomically set if value is valid - mu sync.Mutex - xi *extensionFieldInfo - value pref.Value - b []byte - fn func() pref.Value -} - -type ExtensionField struct { - typ pref.ExtensionType - - // value is either the value of GetValue, - // or a *lazyExtensionValue that then returns the value of GetValue. - value pref.Value - lazy *lazyExtensionValue -} - -func (f *ExtensionField) appendLazyBytes(xt pref.ExtensionType, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, b []byte) { - if f.lazy == nil { - f.lazy = &lazyExtensionValue{xi: xi} - } - f.typ = xt - f.lazy.xi = xi - f.lazy.b = protowire.AppendTag(f.lazy.b, num, wtyp) - f.lazy.b = append(f.lazy.b, b...) -} - -func (f *ExtensionField) canLazy(xt pref.ExtensionType) bool { - if f.typ == nil { - return true - } - if f.typ == xt && f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 { - return true - } - return false -} - -func (f *ExtensionField) lazyInit() { - f.lazy.mu.Lock() - defer f.lazy.mu.Unlock() - if atomic.LoadUint32(&f.lazy.atomicOnce) == 1 { - return - } - if f.lazy.xi != nil { - b := f.lazy.b - val := f.typ.New() - for len(b) > 0 { - var tag uint64 - if b[0] < 0x80 { - tag = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - tag, n = protowire.ConsumeVarint(b) - if n < 0 { - panic(errors.New("bad tag in lazy extension decoding")) - } - b = b[n:] - } - num := protowire.Number(tag >> 3) - wtyp := protowire.Type(tag & 7) - var out unmarshalOutput - var err error - val, out, err = f.lazy.xi.funcs.unmarshal(b, val, num, wtyp, lazyUnmarshalOptions) - if err != nil { - panic(errors.New("decode failure in lazy extension decoding: %v", err)) - } - b = b[out.n:] - } - f.lazy.value = val - } else { - f.lazy.value = f.lazy.fn() - } - f.lazy.xi = nil - f.lazy.fn = nil - f.lazy.b = nil - atomic.StoreUint32(&f.lazy.atomicOnce, 1) -} - -// Set sets the type and value of the extension field. -// This must not be called concurrently. -func (f *ExtensionField) Set(t pref.ExtensionType, v pref.Value) { - f.typ = t - f.value = v - f.lazy = nil -} - -// SetLazy sets the type and a value that is to be lazily evaluated upon first use. -// This must not be called concurrently. -func (f *ExtensionField) SetLazy(t pref.ExtensionType, fn func() pref.Value) { - f.typ = t - f.lazy = &lazyExtensionValue{fn: fn} -} - -// Value returns the value of the extension field. -// This may be called concurrently. -func (f *ExtensionField) Value() pref.Value { - if f.lazy != nil { - if atomic.LoadUint32(&f.lazy.atomicOnce) == 0 { - f.lazyInit() - } - return f.lazy.value - } - return f.value -} - -// Type returns the type of the extension field. -// This may be called concurrently. -func (f ExtensionField) Type() pref.ExtensionType { - return f.typ -} - -// IsSet returns whether the extension field is set. -// This may be called concurrently. -func (f ExtensionField) IsSet() bool { - return f.typ != nil -} - -// IsLazy reports whether a field is lazily encoded. -// It is exported for testing. -func IsLazy(m pref.Message, fd pref.FieldDescriptor) bool { - var mi *MessageInfo - var p pointer - switch m := m.(type) { - case *messageState: - mi = m.messageInfo() - p = m.pointer() - case *messageReflectWrapper: - mi = m.messageInfo() - p = m.pointer() - default: - return false - } - xd, ok := fd.(pref.ExtensionTypeDescriptor) - if !ok { - return false - } - xt := xd.Type() - ext := mi.extensionMap(p) - if ext == nil { - return false - } - f, ok := (*ext)[int32(fd.Number())] - if !ok { - return false - } - return f.typ == xt && f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go deleted file mode 100644 index cb4b482d1..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ /dev/null @@ -1,830 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "fmt" - "reflect" - "sync" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" - piface "google.golang.org/protobuf/runtime/protoiface" -) - -type errInvalidUTF8 struct{} - -func (errInvalidUTF8) Error() string { return "string field contains invalid UTF-8" } -func (errInvalidUTF8) InvalidUTF8() bool { return true } -func (errInvalidUTF8) Unwrap() error { return errors.Error } - -// initOneofFieldCoders initializes the fast-path functions for the fields in a oneof. -// -// For size, marshal, and isInit operations, functions are set only on the first field -// in the oneof. The functions are called when the oneof is non-nil, and will dispatch -// to the appropriate field-specific function as necessary. -// -// The unmarshal function is set on each field individually as usual. -func (mi *MessageInfo) initOneofFieldCoders(od pref.OneofDescriptor, si structInfo) { - fs := si.oneofsByName[od.Name()] - ft := fs.Type - oneofFields := make(map[reflect.Type]*coderFieldInfo) - needIsInit := false - fields := od.Fields() - for i, lim := 0, fields.Len(); i < lim; i++ { - fd := od.Fields().Get(i) - num := fd.Number() - // Make a copy of the original coderFieldInfo for use in unmarshaling. - // - // oneofFields[oneofType].funcs.marshal is the field-specific marshal function. - // - // mi.coderFields[num].marshal is set on only the first field in the oneof, - // and dispatches to the field-specific marshaler in oneofFields. - cf := *mi.coderFields[num] - ot := si.oneofWrappersByNumber[num] - cf.ft = ot.Field(0).Type - cf.mi, cf.funcs = fieldCoder(fd, cf.ft) - oneofFields[ot] = &cf - if cf.funcs.isInit != nil { - needIsInit = true - } - mi.coderFields[num].funcs.unmarshal = func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { - var vw reflect.Value // pointer to wrapper type - vi := p.AsValueOf(ft).Elem() // oneof field value of interface kind - if !vi.IsNil() && !vi.Elem().IsNil() && vi.Elem().Elem().Type() == ot { - vw = vi.Elem() - } else { - vw = reflect.New(ot) - } - out, err := cf.funcs.unmarshal(b, pointerOfValue(vw).Apply(zeroOffset), wtyp, &cf, opts) - if err != nil { - return out, err - } - vi.Set(vw) - return out, nil - } - } - getInfo := func(p pointer) (pointer, *coderFieldInfo) { - v := p.AsValueOf(ft).Elem() - if v.IsNil() { - return pointer{}, nil - } - v = v.Elem() // interface -> *struct - if v.IsNil() { - return pointer{}, nil - } - return pointerOfValue(v).Apply(zeroOffset), oneofFields[v.Elem().Type()] - } - first := mi.coderFields[od.Fields().Get(0).Number()] - first.funcs.size = func(p pointer, _ *coderFieldInfo, opts marshalOptions) int { - p, info := getInfo(p) - if info == nil || info.funcs.size == nil { - return 0 - } - return info.funcs.size(p, info, opts) - } - first.funcs.marshal = func(b []byte, p pointer, _ *coderFieldInfo, opts marshalOptions) ([]byte, error) { - p, info := getInfo(p) - if info == nil || info.funcs.marshal == nil { - return b, nil - } - return info.funcs.marshal(b, p, info, opts) - } - first.funcs.merge = func(dst, src pointer, _ *coderFieldInfo, opts mergeOptions) { - srcp, srcinfo := getInfo(src) - if srcinfo == nil || srcinfo.funcs.merge == nil { - return - } - dstp, dstinfo := getInfo(dst) - if dstinfo != srcinfo { - dst.AsValueOf(ft).Elem().Set(reflect.New(src.AsValueOf(ft).Elem().Elem().Elem().Type())) - dstp = pointerOfValue(dst.AsValueOf(ft).Elem().Elem()).Apply(zeroOffset) - } - srcinfo.funcs.merge(dstp, srcp, srcinfo, opts) - } - if needIsInit { - first.funcs.isInit = func(p pointer, _ *coderFieldInfo) error { - p, info := getInfo(p) - if info == nil || info.funcs.isInit == nil { - return nil - } - return info.funcs.isInit(p, info) - } - } -} - -func makeWeakMessageFieldCoder(fd pref.FieldDescriptor) pointerCoderFuncs { - var once sync.Once - var messageType pref.MessageType - lazyInit := func() { - once.Do(func() { - messageName := fd.Message().FullName() - messageType, _ = preg.GlobalTypes.FindMessageByName(messageName) - }) - } - - return pointerCoderFuncs{ - size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { - m, ok := p.WeakFields().get(f.num) - if !ok { - return 0 - } - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - return sizeMessage(m, f.tagsize, opts) - }, - marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - m, ok := p.WeakFields().get(f.num) - if !ok { - return b, nil - } - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - return appendMessage(b, m, f.wiretag, opts) - }, - unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { - fs := p.WeakFields() - m, ok := fs.get(f.num) - if !ok { - lazyInit() - if messageType == nil { - return unmarshalOutput{}, errUnknown - } - m = messageType.New().Interface() - fs.set(f.num, m) - } - return consumeMessage(b, m, wtyp, opts) - }, - isInit: func(p pointer, f *coderFieldInfo) error { - m, ok := p.WeakFields().get(f.num) - if !ok { - return nil - } - return proto.CheckInitialized(m) - }, - merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { - sm, ok := src.WeakFields().get(f.num) - if !ok { - return - } - dm, ok := dst.WeakFields().get(f.num) - if !ok { - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - dm = messageType.New().Interface() - dst.WeakFields().set(f.num, dm) - } - opts.Merge(dm, sm) - }, - } -} - -func makeMessageFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { - if mi := getMessageInfo(ft); mi != nil { - funcs := pointerCoderFuncs{ - size: sizeMessageInfo, - marshal: appendMessageInfo, - unmarshal: consumeMessageInfo, - merge: mergeMessage, - } - if needsInitCheck(mi.Desc) { - funcs.isInit = isInitMessageInfo - } - return funcs - } else { - return pointerCoderFuncs{ - size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { - m := asMessage(p.AsValueOf(ft).Elem()) - return sizeMessage(m, f.tagsize, opts) - }, - marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - m := asMessage(p.AsValueOf(ft).Elem()) - return appendMessage(b, m, f.wiretag, opts) - }, - unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { - mp := p.AsValueOf(ft).Elem() - if mp.IsNil() { - mp.Set(reflect.New(ft.Elem())) - } - return consumeMessage(b, asMessage(mp), wtyp, opts) - }, - isInit: func(p pointer, f *coderFieldInfo) error { - m := asMessage(p.AsValueOf(ft).Elem()) - return proto.CheckInitialized(m) - }, - merge: mergeMessage, - } - } -} - -func sizeMessageInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { - return protowire.SizeBytes(f.mi.sizePointer(p.Elem(), opts)) + f.tagsize -} - -func appendMessageInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(f.mi.sizePointer(p.Elem(), opts))) - return f.mi.marshalAppendPointer(b, p.Elem(), opts) -} - -func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.BytesType { - return out, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - if p.Elem().IsNil() { - p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) - } - o, err := f.mi.unmarshalPointer(v, p.Elem(), 0, opts) - if err != nil { - return out, err - } - out.n = n - out.initialized = o.initialized - return out, nil -} - -func isInitMessageInfo(p pointer, f *coderFieldInfo) error { - return f.mi.checkInitializedPointer(p.Elem()) -} - -func sizeMessage(m proto.Message, tagsize int, _ marshalOptions) int { - return protowire.SizeBytes(proto.Size(m)) + tagsize -} - -func appendMessage(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, uint64(proto.Size(m))) - return opts.Options().MarshalAppend(b, m) -} - -func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.BytesType { - return out, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ - Buf: v, - Message: m.ProtoReflect(), - }) - if err != nil { - return out, err - } - out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 - return out, nil -} - -func sizeMessageValue(v pref.Value, tagsize int, opts marshalOptions) int { - m := v.Message().Interface() - return sizeMessage(m, tagsize, opts) -} - -func appendMessageValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - m := v.Message().Interface() - return appendMessage(b, m, wiretag, opts) -} - -func consumeMessageValue(b []byte, v pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) { - m := v.Message().Interface() - out, err := consumeMessage(b, m, wtyp, opts) - return v, out, err -} - -func isInitMessageValue(v pref.Value) error { - m := v.Message().Interface() - return proto.CheckInitialized(m) -} - -var coderMessageValue = valueCoderFuncs{ - size: sizeMessageValue, - marshal: appendMessageValue, - unmarshal: consumeMessageValue, - isInit: isInitMessageValue, - merge: mergeMessageValue, -} - -func sizeGroupValue(v pref.Value, tagsize int, opts marshalOptions) int { - m := v.Message().Interface() - return sizeGroup(m, tagsize, opts) -} - -func appendGroupValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - m := v.Message().Interface() - return appendGroup(b, m, wiretag, opts) -} - -func consumeGroupValue(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) { - m := v.Message().Interface() - out, err := consumeGroup(b, m, num, wtyp, opts) - return v, out, err -} - -var coderGroupValue = valueCoderFuncs{ - size: sizeGroupValue, - marshal: appendGroupValue, - unmarshal: consumeGroupValue, - isInit: isInitMessageValue, - merge: mergeMessageValue, -} - -func makeGroupFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { - num := fd.Number() - if mi := getMessageInfo(ft); mi != nil { - funcs := pointerCoderFuncs{ - size: sizeGroupType, - marshal: appendGroupType, - unmarshal: consumeGroupType, - merge: mergeMessage, - } - if needsInitCheck(mi.Desc) { - funcs.isInit = isInitMessageInfo - } - return funcs - } else { - return pointerCoderFuncs{ - size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { - m := asMessage(p.AsValueOf(ft).Elem()) - return sizeGroup(m, f.tagsize, opts) - }, - marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - m := asMessage(p.AsValueOf(ft).Elem()) - return appendGroup(b, m, f.wiretag, opts) - }, - unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { - mp := p.AsValueOf(ft).Elem() - if mp.IsNil() { - mp.Set(reflect.New(ft.Elem())) - } - return consumeGroup(b, asMessage(mp), num, wtyp, opts) - }, - isInit: func(p pointer, f *coderFieldInfo) error { - m := asMessage(p.AsValueOf(ft).Elem()) - return proto.CheckInitialized(m) - }, - merge: mergeMessage, - } - } -} - -func sizeGroupType(p pointer, f *coderFieldInfo, opts marshalOptions) int { - return 2*f.tagsize + f.mi.sizePointer(p.Elem(), opts) -} - -func appendGroupType(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - b = protowire.AppendVarint(b, f.wiretag) // start group - b, err := f.mi.marshalAppendPointer(b, p.Elem(), opts) - b = protowire.AppendVarint(b, f.wiretag+1) // end group - return b, err -} - -func consumeGroupType(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.StartGroupType { - return out, errUnknown - } - if p.Elem().IsNil() { - p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) - } - return f.mi.unmarshalPointer(b, p.Elem(), f.num, opts) -} - -func sizeGroup(m proto.Message, tagsize int, _ marshalOptions) int { - return 2*tagsize + proto.Size(m) -} - -func appendGroup(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { - b = protowire.AppendVarint(b, wiretag) // start group - b, err := opts.Options().MarshalAppend(b, m) - b = protowire.AppendVarint(b, wiretag+1) // end group - return b, err -} - -func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.StartGroupType { - return out, errUnknown - } - b, n := protowire.ConsumeGroup(num, b) - if n < 0 { - return out, errDecode - } - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ - Buf: b, - Message: m.ProtoReflect(), - }) - if err != nil { - return out, err - } - out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 - return out, nil -} - -func makeMessageSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { - if mi := getMessageInfo(ft); mi != nil { - funcs := pointerCoderFuncs{ - size: sizeMessageSliceInfo, - marshal: appendMessageSliceInfo, - unmarshal: consumeMessageSliceInfo, - merge: mergeMessageSlice, - } - if needsInitCheck(mi.Desc) { - funcs.isInit = isInitMessageSliceInfo - } - return funcs - } - return pointerCoderFuncs{ - size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { - return sizeMessageSlice(p, ft, f.tagsize, opts) - }, - marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - return appendMessageSlice(b, p, f.wiretag, ft, opts) - }, - unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { - return consumeMessageSlice(b, p, ft, wtyp, opts) - }, - isInit: func(p pointer, f *coderFieldInfo) error { - return isInitMessageSlice(p, ft) - }, - merge: mergeMessageSlice, - } -} - -func sizeMessageSliceInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { - s := p.PointerSlice() - n := 0 - for _, v := range s { - n += protowire.SizeBytes(f.mi.sizePointer(v, opts)) + f.tagsize - } - return n -} - -func appendMessageSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.PointerSlice() - var err error - for _, v := range s { - b = protowire.AppendVarint(b, f.wiretag) - siz := f.mi.sizePointer(v, opts) - b = protowire.AppendVarint(b, uint64(siz)) - b, err = f.mi.marshalAppendPointer(b, v, opts) - if err != nil { - return b, err - } - } - return b, nil -} - -func consumeMessageSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.BytesType { - return out, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - m := reflect.New(f.mi.GoReflectType.Elem()).Interface() - mp := pointerOfIface(m) - o, err := f.mi.unmarshalPointer(v, mp, 0, opts) - if err != nil { - return out, err - } - p.AppendPointerSlice(mp) - out.n = n - out.initialized = o.initialized - return out, nil -} - -func isInitMessageSliceInfo(p pointer, f *coderFieldInfo) error { - s := p.PointerSlice() - for _, v := range s { - if err := f.mi.checkInitializedPointer(v); err != nil { - return err - } - } - return nil -} - -func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, _ marshalOptions) int { - s := p.PointerSlice() - n := 0 - for _, v := range s { - m := asMessage(v.AsValueOf(goType.Elem())) - n += protowire.SizeBytes(proto.Size(m)) + tagsize - } - return n -} - -func appendMessageSlice(b []byte, p pointer, wiretag uint64, goType reflect.Type, opts marshalOptions) ([]byte, error) { - s := p.PointerSlice() - var err error - for _, v := range s { - m := asMessage(v.AsValueOf(goType.Elem())) - b = protowire.AppendVarint(b, wiretag) - siz := proto.Size(m) - b = protowire.AppendVarint(b, uint64(siz)) - b, err = opts.Options().MarshalAppend(b, m) - if err != nil { - return b, err - } - } - return b, nil -} - -func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.BytesType { - return out, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - mp := reflect.New(goType.Elem()) - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ - Buf: v, - Message: asMessage(mp).ProtoReflect(), - }) - if err != nil { - return out, err - } - p.AppendPointerSlice(pointerOfValue(mp)) - out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 - return out, nil -} - -func isInitMessageSlice(p pointer, goType reflect.Type) error { - s := p.PointerSlice() - for _, v := range s { - m := asMessage(v.AsValueOf(goType.Elem())) - if err := proto.CheckInitialized(m); err != nil { - return err - } - } - return nil -} - -// Slices of messages - -func sizeMessageSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int { - list := listv.List() - n := 0 - for i, llen := 0, list.Len(); i < llen; i++ { - m := list.Get(i).Message().Interface() - n += protowire.SizeBytes(proto.Size(m)) + tagsize - } - return n -} - -func appendMessageSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - mopts := opts.Options() - for i, llen := 0, list.Len(); i < llen; i++ { - m := list.Get(i).Message().Interface() - b = protowire.AppendVarint(b, wiretag) - siz := proto.Size(m) - b = protowire.AppendVarint(b, uint64(siz)) - var err error - b, err = mopts.MarshalAppend(b, m) - if err != nil { - return b, err - } - } - return b, nil -} - -func consumeMessageSliceValue(b []byte, listv pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) { - list := listv.List() - if wtyp != protowire.BytesType { - return pref.Value{}, out, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return pref.Value{}, out, errDecode - } - m := list.NewElement() - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ - Buf: v, - Message: m.Message(), - }) - if err != nil { - return pref.Value{}, out, err - } - list.Append(m) - out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 - return listv, out, nil -} - -func isInitMessageSliceValue(listv pref.Value) error { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - m := list.Get(i).Message().Interface() - if err := proto.CheckInitialized(m); err != nil { - return err - } - } - return nil -} - -var coderMessageSliceValue = valueCoderFuncs{ - size: sizeMessageSliceValue, - marshal: appendMessageSliceValue, - unmarshal: consumeMessageSliceValue, - isInit: isInitMessageSliceValue, - merge: mergeMessageListValue, -} - -func sizeGroupSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int { - list := listv.List() - n := 0 - for i, llen := 0, list.Len(); i < llen; i++ { - m := list.Get(i).Message().Interface() - n += 2*tagsize + proto.Size(m) - } - return n -} - -func appendGroupSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - mopts := opts.Options() - for i, llen := 0, list.Len(); i < llen; i++ { - m := list.Get(i).Message().Interface() - b = protowire.AppendVarint(b, wiretag) // start group - var err error - b, err = mopts.MarshalAppend(b, m) - if err != nil { - return b, err - } - b = protowire.AppendVarint(b, wiretag+1) // end group - } - return b, nil -} - -func consumeGroupSliceValue(b []byte, listv pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) { - list := listv.List() - if wtyp != protowire.StartGroupType { - return pref.Value{}, out, errUnknown - } - b, n := protowire.ConsumeGroup(num, b) - if n < 0 { - return pref.Value{}, out, errDecode - } - m := list.NewElement() - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ - Buf: b, - Message: m.Message(), - }) - if err != nil { - return pref.Value{}, out, err - } - list.Append(m) - out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 - return listv, out, nil -} - -var coderGroupSliceValue = valueCoderFuncs{ - size: sizeGroupSliceValue, - marshal: appendGroupSliceValue, - unmarshal: consumeGroupSliceValue, - isInit: isInitMessageSliceValue, - merge: mergeMessageListValue, -} - -func makeGroupSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { - num := fd.Number() - if mi := getMessageInfo(ft); mi != nil { - funcs := pointerCoderFuncs{ - size: sizeGroupSliceInfo, - marshal: appendGroupSliceInfo, - unmarshal: consumeGroupSliceInfo, - merge: mergeMessageSlice, - } - if needsInitCheck(mi.Desc) { - funcs.isInit = isInitMessageSliceInfo - } - return funcs - } - return pointerCoderFuncs{ - size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { - return sizeGroupSlice(p, ft, f.tagsize, opts) - }, - marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - return appendGroupSlice(b, p, f.wiretag, ft, opts) - }, - unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { - return consumeGroupSlice(b, p, num, wtyp, ft, opts) - }, - isInit: func(p pointer, f *coderFieldInfo) error { - return isInitMessageSlice(p, ft) - }, - merge: mergeMessageSlice, - } -} - -func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, _ marshalOptions) int { - s := p.PointerSlice() - n := 0 - for _, v := range s { - m := asMessage(v.AsValueOf(messageType.Elem())) - n += 2*tagsize + proto.Size(m) - } - return n -} - -func appendGroupSlice(b []byte, p pointer, wiretag uint64, messageType reflect.Type, opts marshalOptions) ([]byte, error) { - s := p.PointerSlice() - var err error - for _, v := range s { - m := asMessage(v.AsValueOf(messageType.Elem())) - b = protowire.AppendVarint(b, wiretag) // start group - b, err = opts.Options().MarshalAppend(b, m) - if err != nil { - return b, err - } - b = protowire.AppendVarint(b, wiretag+1) // end group - } - return b, nil -} - -func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire.Type, goType reflect.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.StartGroupType { - return out, errUnknown - } - b, n := protowire.ConsumeGroup(num, b) - if n < 0 { - return out, errDecode - } - mp := reflect.New(goType.Elem()) - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ - Buf: b, - Message: asMessage(mp).ProtoReflect(), - }) - if err != nil { - return out, err - } - p.AppendPointerSlice(pointerOfValue(mp)) - out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 - return out, nil -} - -func sizeGroupSliceInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { - s := p.PointerSlice() - n := 0 - for _, v := range s { - n += 2*f.tagsize + f.mi.sizePointer(v, opts) - } - return n -} - -func appendGroupSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.PointerSlice() - var err error - for _, v := range s { - b = protowire.AppendVarint(b, f.wiretag) // start group - b, err = f.mi.marshalAppendPointer(b, v, opts) - if err != nil { - return b, err - } - b = protowire.AppendVarint(b, f.wiretag+1) // end group - } - return b, nil -} - -func consumeGroupSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { - if wtyp != protowire.StartGroupType { - return unmarshalOutput{}, errUnknown - } - m := reflect.New(f.mi.GoReflectType.Elem()).Interface() - mp := pointerOfIface(m) - out, err := f.mi.unmarshalPointer(b, mp, f.num, opts) - if err != nil { - return out, err - } - p.AppendPointerSlice(mp) - return out, nil -} - -func asMessage(v reflect.Value) pref.ProtoMessage { - if m, ok := v.Interface().(pref.ProtoMessage); ok { - return m - } - return legacyWrapMessage(v).Interface() -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go deleted file mode 100644 index 1a509b63e..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go +++ /dev/null @@ -1,5637 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-types. DO NOT EDIT. - -package impl - -import ( - "math" - "unicode/utf8" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/reflect/protoreflect" -) - -// sizeBool returns the size of wire encoding a bool pointer as a Bool. -func sizeBool(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := *p.Bool() - return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) -} - -// appendBool wire encodes a bool pointer as a Bool. -func appendBool(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Bool() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, protowire.EncodeBool(v)) - return b, nil -} - -// consumeBool wire decodes a bool pointer as a Bool. -func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - *p.Bool() = protowire.DecodeBool(v) - out.n = n - return out, nil -} - -var coderBool = pointerCoderFuncs{ - size: sizeBool, - marshal: appendBool, - unmarshal: consumeBool, - merge: mergeBool, -} - -// sizeBoolNoZero returns the size of wire encoding a bool pointer as a Bool. -// The zero value is not encoded. -func sizeBoolNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := *p.Bool() - if v == false { - return 0 - } - return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) -} - -// appendBoolNoZero wire encodes a bool pointer as a Bool. -// The zero value is not encoded. -func appendBoolNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Bool() - if v == false { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, protowire.EncodeBool(v)) - return b, nil -} - -var coderBoolNoZero = pointerCoderFuncs{ - size: sizeBoolNoZero, - marshal: appendBoolNoZero, - unmarshal: consumeBool, - merge: mergeBoolNoZero, -} - -// sizeBoolPtr returns the size of wire encoding a *bool pointer as a Bool. -// It panics if the pointer is nil. -func sizeBoolPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := **p.BoolPtr() - return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) -} - -// appendBoolPtr wire encodes a *bool pointer as a Bool. -// It panics if the pointer is nil. -func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := **p.BoolPtr() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, protowire.EncodeBool(v)) - return b, nil -} - -// consumeBoolPtr wire decodes a *bool pointer as a Bool. -func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - vp := p.BoolPtr() - if *vp == nil { - *vp = new(bool) - } - **vp = protowire.DecodeBool(v) - out.n = n - return out, nil -} - -var coderBoolPtr = pointerCoderFuncs{ - size: sizeBoolPtr, - marshal: appendBoolPtr, - unmarshal: consumeBoolPtr, - merge: mergeBoolPtr, -} - -// sizeBoolSlice returns the size of wire encoding a []bool pointer as a repeated Bool. -func sizeBoolSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.BoolSlice() - for _, v := range s { - size += f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) - } - return size -} - -// appendBoolSlice encodes a []bool pointer as a repeated Bool. -func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.BoolSlice() - for _, v := range s { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, protowire.EncodeBool(v)) - } - return b, nil -} - -// consumeBoolSlice wire decodes a []bool pointer as a repeated Bool. -func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - sp := p.BoolSlice() - if wtyp == protowire.BytesType { - s := *sp - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - s = append(s, protowire.DecodeBool(v)) - b = b[n:] - } - *sp = s - out.n = n - return out, nil - } - if wtyp != protowire.VarintType { - return out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - *sp = append(*sp, protowire.DecodeBool(v)) - out.n = n - return out, nil -} - -var coderBoolSlice = pointerCoderFuncs{ - size: sizeBoolSlice, - marshal: appendBoolSlice, - unmarshal: consumeBoolSlice, - merge: mergeBoolSlice, -} - -// sizeBoolPackedSlice returns the size of wire encoding a []bool pointer as a packed repeated Bool. -func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.BoolSlice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += protowire.SizeVarint(protowire.EncodeBool(v)) - } - return f.tagsize + protowire.SizeBytes(n) -} - -// appendBoolPackedSlice encodes a []bool pointer as a packed repeated Bool. -func appendBoolPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.BoolSlice() - if len(s) == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := 0 - for _, v := range s { - n += protowire.SizeVarint(protowire.EncodeBool(v)) - } - b = protowire.AppendVarint(b, uint64(n)) - for _, v := range s { - b = protowire.AppendVarint(b, protowire.EncodeBool(v)) - } - return b, nil -} - -var coderBoolPackedSlice = pointerCoderFuncs{ - size: sizeBoolPackedSlice, - marshal: appendBoolPackedSlice, - unmarshal: consumeBoolSlice, - merge: mergeBoolSlice, -} - -// sizeBoolValue returns the size of wire encoding a bool value as a Bool. -func sizeBoolValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { - return tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool())) -} - -// appendBoolValue encodes a bool value as a Bool. -func appendBoolValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) - return b, nil -} - -// consumeBoolValue decodes a bool value as a Bool. -func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return protoreflect.Value{}, out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - out.n = n - return protoreflect.ValueOfBool(protowire.DecodeBool(v)), out, nil -} - -var coderBoolValue = valueCoderFuncs{ - size: sizeBoolValue, - marshal: appendBoolValue, - unmarshal: consumeBoolValue, - merge: mergeScalarValue, -} - -// sizeBoolSliceValue returns the size of wire encoding a []bool value as a repeated Bool. -func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - size += tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool())) - } - return size -} - -// appendBoolSliceValue encodes a []bool value as a repeated Bool. -func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) - } - return b, nil -} - -// consumeBoolSliceValue wire decodes a []bool value as a repeated Bool. -func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - list := listv.List() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - for len(b) > 0 { - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) - b = b[n:] - } - out.n = n - return listv, out, nil - } - if wtyp != protowire.VarintType { - return protoreflect.Value{}, out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) - out.n = n - return listv, out, nil -} - -var coderBoolSliceValue = valueCoderFuncs{ - size: sizeBoolSliceValue, - marshal: appendBoolSliceValue, - unmarshal: consumeBoolSliceValue, - merge: mergeListValue, -} - -// sizeBoolPackedSliceValue returns the size of wire encoding a []bool value as a packed repeated Bool. -func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return 0 - } - n := 0 - for i, llen := 0, llen; i < llen; i++ { - v := list.Get(i) - n += protowire.SizeVarint(protowire.EncodeBool(v.Bool())) - } - return tagsize + protowire.SizeBytes(n) -} - -// appendBoolPackedSliceValue encodes a []bool value as a packed repeated Bool. -func appendBoolPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, wiretag) - n := 0 - for i := 0; i < llen; i++ { - v := list.Get(i) - n += protowire.SizeVarint(protowire.EncodeBool(v.Bool())) - } - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - v := list.Get(i) - b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) - } - return b, nil -} - -var coderBoolPackedSliceValue = valueCoderFuncs{ - size: sizeBoolPackedSliceValue, - marshal: appendBoolPackedSliceValue, - unmarshal: consumeBoolSliceValue, - merge: mergeListValue, -} - -// sizeEnumValue returns the size of wire encoding a value as a Enum. -func sizeEnumValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { - return tagsize + protowire.SizeVarint(uint64(v.Enum())) -} - -// appendEnumValue encodes a value as a Enum. -func appendEnumValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, uint64(v.Enum())) - return b, nil -} - -// consumeEnumValue decodes a value as a Enum. -func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return protoreflect.Value{}, out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - out.n = n - return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), out, nil -} - -var coderEnumValue = valueCoderFuncs{ - size: sizeEnumValue, - marshal: appendEnumValue, - unmarshal: consumeEnumValue, - merge: mergeScalarValue, -} - -// sizeEnumSliceValue returns the size of wire encoding a [] value as a repeated Enum. -func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - size += tagsize + protowire.SizeVarint(uint64(v.Enum())) - } - return size -} - -// appendEnumSliceValue encodes a [] value as a repeated Enum. -func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, uint64(v.Enum())) - } - return b, nil -} - -// consumeEnumSliceValue wire decodes a [] value as a repeated Enum. -func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - list := listv.List() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - for len(b) > 0 { - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) - b = b[n:] - } - out.n = n - return listv, out, nil - } - if wtyp != protowire.VarintType { - return protoreflect.Value{}, out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) - out.n = n - return listv, out, nil -} - -var coderEnumSliceValue = valueCoderFuncs{ - size: sizeEnumSliceValue, - marshal: appendEnumSliceValue, - unmarshal: consumeEnumSliceValue, - merge: mergeListValue, -} - -// sizeEnumPackedSliceValue returns the size of wire encoding a [] value as a packed repeated Enum. -func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return 0 - } - n := 0 - for i, llen := 0, llen; i < llen; i++ { - v := list.Get(i) - n += protowire.SizeVarint(uint64(v.Enum())) - } - return tagsize + protowire.SizeBytes(n) -} - -// appendEnumPackedSliceValue encodes a [] value as a packed repeated Enum. -func appendEnumPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, wiretag) - n := 0 - for i := 0; i < llen; i++ { - v := list.Get(i) - n += protowire.SizeVarint(uint64(v.Enum())) - } - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - v := list.Get(i) - b = protowire.AppendVarint(b, uint64(v.Enum())) - } - return b, nil -} - -var coderEnumPackedSliceValue = valueCoderFuncs{ - size: sizeEnumPackedSliceValue, - marshal: appendEnumPackedSliceValue, - unmarshal: consumeEnumSliceValue, - merge: mergeListValue, -} - -// sizeInt32 returns the size of wire encoding a int32 pointer as a Int32. -func sizeInt32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := *p.Int32() - return f.tagsize + protowire.SizeVarint(uint64(v)) -} - -// appendInt32 wire encodes a int32 pointer as a Int32. -func appendInt32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Int32() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - return b, nil -} - -// consumeInt32 wire decodes a int32 pointer as a Int32. -func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - *p.Int32() = int32(v) - out.n = n - return out, nil -} - -var coderInt32 = pointerCoderFuncs{ - size: sizeInt32, - marshal: appendInt32, - unmarshal: consumeInt32, - merge: mergeInt32, -} - -// sizeInt32NoZero returns the size of wire encoding a int32 pointer as a Int32. -// The zero value is not encoded. -func sizeInt32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := *p.Int32() - if v == 0 { - return 0 - } - return f.tagsize + protowire.SizeVarint(uint64(v)) -} - -// appendInt32NoZero wire encodes a int32 pointer as a Int32. -// The zero value is not encoded. -func appendInt32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Int32() - if v == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - return b, nil -} - -var coderInt32NoZero = pointerCoderFuncs{ - size: sizeInt32NoZero, - marshal: appendInt32NoZero, - unmarshal: consumeInt32, - merge: mergeInt32NoZero, -} - -// sizeInt32Ptr returns the size of wire encoding a *int32 pointer as a Int32. -// It panics if the pointer is nil. -func sizeInt32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := **p.Int32Ptr() - return f.tagsize + protowire.SizeVarint(uint64(v)) -} - -// appendInt32Ptr wire encodes a *int32 pointer as a Int32. -// It panics if the pointer is nil. -func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := **p.Int32Ptr() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - return b, nil -} - -// consumeInt32Ptr wire decodes a *int32 pointer as a Int32. -func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - vp := p.Int32Ptr() - if *vp == nil { - *vp = new(int32) - } - **vp = int32(v) - out.n = n - return out, nil -} - -var coderInt32Ptr = pointerCoderFuncs{ - size: sizeInt32Ptr, - marshal: appendInt32Ptr, - unmarshal: consumeInt32Ptr, - merge: mergeInt32Ptr, -} - -// sizeInt32Slice returns the size of wire encoding a []int32 pointer as a repeated Int32. -func sizeInt32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.Int32Slice() - for _, v := range s { - size += f.tagsize + protowire.SizeVarint(uint64(v)) - } - return size -} - -// appendInt32Slice encodes a []int32 pointer as a repeated Int32. -func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.Int32Slice() - for _, v := range s { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - } - return b, nil -} - -// consumeInt32Slice wire decodes a []int32 pointer as a repeated Int32. -func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - sp := p.Int32Slice() - if wtyp == protowire.BytesType { - s := *sp - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - s = append(s, int32(v)) - b = b[n:] - } - *sp = s - out.n = n - return out, nil - } - if wtyp != protowire.VarintType { - return out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - *sp = append(*sp, int32(v)) - out.n = n - return out, nil -} - -var coderInt32Slice = pointerCoderFuncs{ - size: sizeInt32Slice, - marshal: appendInt32Slice, - unmarshal: consumeInt32Slice, - merge: mergeInt32Slice, -} - -// sizeInt32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Int32. -func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.Int32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += protowire.SizeVarint(uint64(v)) - } - return f.tagsize + protowire.SizeBytes(n) -} - -// appendInt32PackedSlice encodes a []int32 pointer as a packed repeated Int32. -func appendInt32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.Int32Slice() - if len(s) == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := 0 - for _, v := range s { - n += protowire.SizeVarint(uint64(v)) - } - b = protowire.AppendVarint(b, uint64(n)) - for _, v := range s { - b = protowire.AppendVarint(b, uint64(v)) - } - return b, nil -} - -var coderInt32PackedSlice = pointerCoderFuncs{ - size: sizeInt32PackedSlice, - marshal: appendInt32PackedSlice, - unmarshal: consumeInt32Slice, - merge: mergeInt32Slice, -} - -// sizeInt32Value returns the size of wire encoding a int32 value as a Int32. -func sizeInt32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { - return tagsize + protowire.SizeVarint(uint64(int32(v.Int()))) -} - -// appendInt32Value encodes a int32 value as a Int32. -func appendInt32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, uint64(int32(v.Int()))) - return b, nil -} - -// consumeInt32Value decodes a int32 value as a Int32. -func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return protoreflect.Value{}, out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - out.n = n - return protoreflect.ValueOfInt32(int32(v)), out, nil -} - -var coderInt32Value = valueCoderFuncs{ - size: sizeInt32Value, - marshal: appendInt32Value, - unmarshal: consumeInt32Value, - merge: mergeScalarValue, -} - -// sizeInt32SliceValue returns the size of wire encoding a []int32 value as a repeated Int32. -func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - size += tagsize + protowire.SizeVarint(uint64(int32(v.Int()))) - } - return size -} - -// appendInt32SliceValue encodes a []int32 value as a repeated Int32. -func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, uint64(int32(v.Int()))) - } - return b, nil -} - -// consumeInt32SliceValue wire decodes a []int32 value as a repeated Int32. -func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - list := listv.List() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - for len(b) > 0 { - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfInt32(int32(v))) - b = b[n:] - } - out.n = n - return listv, out, nil - } - if wtyp != protowire.VarintType { - return protoreflect.Value{}, out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfInt32(int32(v))) - out.n = n - return listv, out, nil -} - -var coderInt32SliceValue = valueCoderFuncs{ - size: sizeInt32SliceValue, - marshal: appendInt32SliceValue, - unmarshal: consumeInt32SliceValue, - merge: mergeListValue, -} - -// sizeInt32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Int32. -func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return 0 - } - n := 0 - for i, llen := 0, llen; i < llen; i++ { - v := list.Get(i) - n += protowire.SizeVarint(uint64(int32(v.Int()))) - } - return tagsize + protowire.SizeBytes(n) -} - -// appendInt32PackedSliceValue encodes a []int32 value as a packed repeated Int32. -func appendInt32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, wiretag) - n := 0 - for i := 0; i < llen; i++ { - v := list.Get(i) - n += protowire.SizeVarint(uint64(int32(v.Int()))) - } - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - v := list.Get(i) - b = protowire.AppendVarint(b, uint64(int32(v.Int()))) - } - return b, nil -} - -var coderInt32PackedSliceValue = valueCoderFuncs{ - size: sizeInt32PackedSliceValue, - marshal: appendInt32PackedSliceValue, - unmarshal: consumeInt32SliceValue, - merge: mergeListValue, -} - -// sizeSint32 returns the size of wire encoding a int32 pointer as a Sint32. -func sizeSint32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := *p.Int32() - return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) -} - -// appendSint32 wire encodes a int32 pointer as a Sint32. -func appendSint32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Int32() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) - return b, nil -} - -// consumeSint32 wire decodes a int32 pointer as a Sint32. -func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - *p.Int32() = int32(protowire.DecodeZigZag(v & math.MaxUint32)) - out.n = n - return out, nil -} - -var coderSint32 = pointerCoderFuncs{ - size: sizeSint32, - marshal: appendSint32, - unmarshal: consumeSint32, - merge: mergeInt32, -} - -// sizeSint32NoZero returns the size of wire encoding a int32 pointer as a Sint32. -// The zero value is not encoded. -func sizeSint32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := *p.Int32() - if v == 0 { - return 0 - } - return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) -} - -// appendSint32NoZero wire encodes a int32 pointer as a Sint32. -// The zero value is not encoded. -func appendSint32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Int32() - if v == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) - return b, nil -} - -var coderSint32NoZero = pointerCoderFuncs{ - size: sizeSint32NoZero, - marshal: appendSint32NoZero, - unmarshal: consumeSint32, - merge: mergeInt32NoZero, -} - -// sizeSint32Ptr returns the size of wire encoding a *int32 pointer as a Sint32. -// It panics if the pointer is nil. -func sizeSint32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := **p.Int32Ptr() - return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) -} - -// appendSint32Ptr wire encodes a *int32 pointer as a Sint32. -// It panics if the pointer is nil. -func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := **p.Int32Ptr() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) - return b, nil -} - -// consumeSint32Ptr wire decodes a *int32 pointer as a Sint32. -func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - vp := p.Int32Ptr() - if *vp == nil { - *vp = new(int32) - } - **vp = int32(protowire.DecodeZigZag(v & math.MaxUint32)) - out.n = n - return out, nil -} - -var coderSint32Ptr = pointerCoderFuncs{ - size: sizeSint32Ptr, - marshal: appendSint32Ptr, - unmarshal: consumeSint32Ptr, - merge: mergeInt32Ptr, -} - -// sizeSint32Slice returns the size of wire encoding a []int32 pointer as a repeated Sint32. -func sizeSint32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.Int32Slice() - for _, v := range s { - size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) - } - return size -} - -// appendSint32Slice encodes a []int32 pointer as a repeated Sint32. -func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.Int32Slice() - for _, v := range s { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) - } - return b, nil -} - -// consumeSint32Slice wire decodes a []int32 pointer as a repeated Sint32. -func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - sp := p.Int32Slice() - if wtyp == protowire.BytesType { - s := *sp - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - s = append(s, int32(protowire.DecodeZigZag(v&math.MaxUint32))) - b = b[n:] - } - *sp = s - out.n = n - return out, nil - } - if wtyp != protowire.VarintType { - return out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - *sp = append(*sp, int32(protowire.DecodeZigZag(v&math.MaxUint32))) - out.n = n - return out, nil -} - -var coderSint32Slice = pointerCoderFuncs{ - size: sizeSint32Slice, - marshal: appendSint32Slice, - unmarshal: consumeSint32Slice, - merge: mergeInt32Slice, -} - -// sizeSint32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sint32. -func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.Int32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) - } - return f.tagsize + protowire.SizeBytes(n) -} - -// appendSint32PackedSlice encodes a []int32 pointer as a packed repeated Sint32. -func appendSint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.Int32Slice() - if len(s) == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := 0 - for _, v := range s { - n += protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) - } - b = protowire.AppendVarint(b, uint64(n)) - for _, v := range s { - b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) - } - return b, nil -} - -var coderSint32PackedSlice = pointerCoderFuncs{ - size: sizeSint32PackedSlice, - marshal: appendSint32PackedSlice, - unmarshal: consumeSint32Slice, - merge: mergeInt32Slice, -} - -// sizeSint32Value returns the size of wire encoding a int32 value as a Sint32. -func sizeSint32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { - return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) -} - -// appendSint32Value encodes a int32 value as a Sint32. -func appendSint32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) - return b, nil -} - -// consumeSint32Value decodes a int32 value as a Sint32. -func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return protoreflect.Value{}, out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - out.n = n - return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), out, nil -} - -var coderSint32Value = valueCoderFuncs{ - size: sizeSint32Value, - marshal: appendSint32Value, - unmarshal: consumeSint32Value, - merge: mergeScalarValue, -} - -// sizeSint32SliceValue returns the size of wire encoding a []int32 value as a repeated Sint32. -func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - size += tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) - } - return size -} - -// appendSint32SliceValue encodes a []int32 value as a repeated Sint32. -func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) - } - return b, nil -} - -// consumeSint32SliceValue wire decodes a []int32 value as a repeated Sint32. -func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - list := listv.List() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - for len(b) > 0 { - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) - b = b[n:] - } - out.n = n - return listv, out, nil - } - if wtyp != protowire.VarintType { - return protoreflect.Value{}, out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) - out.n = n - return listv, out, nil -} - -var coderSint32SliceValue = valueCoderFuncs{ - size: sizeSint32SliceValue, - marshal: appendSint32SliceValue, - unmarshal: consumeSint32SliceValue, - merge: mergeListValue, -} - -// sizeSint32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sint32. -func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return 0 - } - n := 0 - for i, llen := 0, llen; i < llen; i++ { - v := list.Get(i) - n += protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) - } - return tagsize + protowire.SizeBytes(n) -} - -// appendSint32PackedSliceValue encodes a []int32 value as a packed repeated Sint32. -func appendSint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, wiretag) - n := 0 - for i := 0; i < llen; i++ { - v := list.Get(i) - n += protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) - } - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - v := list.Get(i) - b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) - } - return b, nil -} - -var coderSint32PackedSliceValue = valueCoderFuncs{ - size: sizeSint32PackedSliceValue, - marshal: appendSint32PackedSliceValue, - unmarshal: consumeSint32SliceValue, - merge: mergeListValue, -} - -// sizeUint32 returns the size of wire encoding a uint32 pointer as a Uint32. -func sizeUint32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := *p.Uint32() - return f.tagsize + protowire.SizeVarint(uint64(v)) -} - -// appendUint32 wire encodes a uint32 pointer as a Uint32. -func appendUint32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Uint32() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - return b, nil -} - -// consumeUint32 wire decodes a uint32 pointer as a Uint32. -func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - *p.Uint32() = uint32(v) - out.n = n - return out, nil -} - -var coderUint32 = pointerCoderFuncs{ - size: sizeUint32, - marshal: appendUint32, - unmarshal: consumeUint32, - merge: mergeUint32, -} - -// sizeUint32NoZero returns the size of wire encoding a uint32 pointer as a Uint32. -// The zero value is not encoded. -func sizeUint32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := *p.Uint32() - if v == 0 { - return 0 - } - return f.tagsize + protowire.SizeVarint(uint64(v)) -} - -// appendUint32NoZero wire encodes a uint32 pointer as a Uint32. -// The zero value is not encoded. -func appendUint32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Uint32() - if v == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - return b, nil -} - -var coderUint32NoZero = pointerCoderFuncs{ - size: sizeUint32NoZero, - marshal: appendUint32NoZero, - unmarshal: consumeUint32, - merge: mergeUint32NoZero, -} - -// sizeUint32Ptr returns the size of wire encoding a *uint32 pointer as a Uint32. -// It panics if the pointer is nil. -func sizeUint32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := **p.Uint32Ptr() - return f.tagsize + protowire.SizeVarint(uint64(v)) -} - -// appendUint32Ptr wire encodes a *uint32 pointer as a Uint32. -// It panics if the pointer is nil. -func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := **p.Uint32Ptr() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - return b, nil -} - -// consumeUint32Ptr wire decodes a *uint32 pointer as a Uint32. -func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - vp := p.Uint32Ptr() - if *vp == nil { - *vp = new(uint32) - } - **vp = uint32(v) - out.n = n - return out, nil -} - -var coderUint32Ptr = pointerCoderFuncs{ - size: sizeUint32Ptr, - marshal: appendUint32Ptr, - unmarshal: consumeUint32Ptr, - merge: mergeUint32Ptr, -} - -// sizeUint32Slice returns the size of wire encoding a []uint32 pointer as a repeated Uint32. -func sizeUint32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.Uint32Slice() - for _, v := range s { - size += f.tagsize + protowire.SizeVarint(uint64(v)) - } - return size -} - -// appendUint32Slice encodes a []uint32 pointer as a repeated Uint32. -func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.Uint32Slice() - for _, v := range s { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - } - return b, nil -} - -// consumeUint32Slice wire decodes a []uint32 pointer as a repeated Uint32. -func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - sp := p.Uint32Slice() - if wtyp == protowire.BytesType { - s := *sp - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - s = append(s, uint32(v)) - b = b[n:] - } - *sp = s - out.n = n - return out, nil - } - if wtyp != protowire.VarintType { - return out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - *sp = append(*sp, uint32(v)) - out.n = n - return out, nil -} - -var coderUint32Slice = pointerCoderFuncs{ - size: sizeUint32Slice, - marshal: appendUint32Slice, - unmarshal: consumeUint32Slice, - merge: mergeUint32Slice, -} - -// sizeUint32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Uint32. -func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.Uint32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += protowire.SizeVarint(uint64(v)) - } - return f.tagsize + protowire.SizeBytes(n) -} - -// appendUint32PackedSlice encodes a []uint32 pointer as a packed repeated Uint32. -func appendUint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.Uint32Slice() - if len(s) == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := 0 - for _, v := range s { - n += protowire.SizeVarint(uint64(v)) - } - b = protowire.AppendVarint(b, uint64(n)) - for _, v := range s { - b = protowire.AppendVarint(b, uint64(v)) - } - return b, nil -} - -var coderUint32PackedSlice = pointerCoderFuncs{ - size: sizeUint32PackedSlice, - marshal: appendUint32PackedSlice, - unmarshal: consumeUint32Slice, - merge: mergeUint32Slice, -} - -// sizeUint32Value returns the size of wire encoding a uint32 value as a Uint32. -func sizeUint32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { - return tagsize + protowire.SizeVarint(uint64(uint32(v.Uint()))) -} - -// appendUint32Value encodes a uint32 value as a Uint32. -func appendUint32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) - return b, nil -} - -// consumeUint32Value decodes a uint32 value as a Uint32. -func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return protoreflect.Value{}, out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - out.n = n - return protoreflect.ValueOfUint32(uint32(v)), out, nil -} - -var coderUint32Value = valueCoderFuncs{ - size: sizeUint32Value, - marshal: appendUint32Value, - unmarshal: consumeUint32Value, - merge: mergeScalarValue, -} - -// sizeUint32SliceValue returns the size of wire encoding a []uint32 value as a repeated Uint32. -func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - size += tagsize + protowire.SizeVarint(uint64(uint32(v.Uint()))) - } - return size -} - -// appendUint32SliceValue encodes a []uint32 value as a repeated Uint32. -func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) - } - return b, nil -} - -// consumeUint32SliceValue wire decodes a []uint32 value as a repeated Uint32. -func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - list := listv.List() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - for len(b) > 0 { - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfUint32(uint32(v))) - b = b[n:] - } - out.n = n - return listv, out, nil - } - if wtyp != protowire.VarintType { - return protoreflect.Value{}, out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfUint32(uint32(v))) - out.n = n - return listv, out, nil -} - -var coderUint32SliceValue = valueCoderFuncs{ - size: sizeUint32SliceValue, - marshal: appendUint32SliceValue, - unmarshal: consumeUint32SliceValue, - merge: mergeListValue, -} - -// sizeUint32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Uint32. -func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return 0 - } - n := 0 - for i, llen := 0, llen; i < llen; i++ { - v := list.Get(i) - n += protowire.SizeVarint(uint64(uint32(v.Uint()))) - } - return tagsize + protowire.SizeBytes(n) -} - -// appendUint32PackedSliceValue encodes a []uint32 value as a packed repeated Uint32. -func appendUint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, wiretag) - n := 0 - for i := 0; i < llen; i++ { - v := list.Get(i) - n += protowire.SizeVarint(uint64(uint32(v.Uint()))) - } - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - v := list.Get(i) - b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) - } - return b, nil -} - -var coderUint32PackedSliceValue = valueCoderFuncs{ - size: sizeUint32PackedSliceValue, - marshal: appendUint32PackedSliceValue, - unmarshal: consumeUint32SliceValue, - merge: mergeListValue, -} - -// sizeInt64 returns the size of wire encoding a int64 pointer as a Int64. -func sizeInt64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := *p.Int64() - return f.tagsize + protowire.SizeVarint(uint64(v)) -} - -// appendInt64 wire encodes a int64 pointer as a Int64. -func appendInt64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Int64() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - return b, nil -} - -// consumeInt64 wire decodes a int64 pointer as a Int64. -func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - *p.Int64() = int64(v) - out.n = n - return out, nil -} - -var coderInt64 = pointerCoderFuncs{ - size: sizeInt64, - marshal: appendInt64, - unmarshal: consumeInt64, - merge: mergeInt64, -} - -// sizeInt64NoZero returns the size of wire encoding a int64 pointer as a Int64. -// The zero value is not encoded. -func sizeInt64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := *p.Int64() - if v == 0 { - return 0 - } - return f.tagsize + protowire.SizeVarint(uint64(v)) -} - -// appendInt64NoZero wire encodes a int64 pointer as a Int64. -// The zero value is not encoded. -func appendInt64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Int64() - if v == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - return b, nil -} - -var coderInt64NoZero = pointerCoderFuncs{ - size: sizeInt64NoZero, - marshal: appendInt64NoZero, - unmarshal: consumeInt64, - merge: mergeInt64NoZero, -} - -// sizeInt64Ptr returns the size of wire encoding a *int64 pointer as a Int64. -// It panics if the pointer is nil. -func sizeInt64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := **p.Int64Ptr() - return f.tagsize + protowire.SizeVarint(uint64(v)) -} - -// appendInt64Ptr wire encodes a *int64 pointer as a Int64. -// It panics if the pointer is nil. -func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := **p.Int64Ptr() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - return b, nil -} - -// consumeInt64Ptr wire decodes a *int64 pointer as a Int64. -func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - vp := p.Int64Ptr() - if *vp == nil { - *vp = new(int64) - } - **vp = int64(v) - out.n = n - return out, nil -} - -var coderInt64Ptr = pointerCoderFuncs{ - size: sizeInt64Ptr, - marshal: appendInt64Ptr, - unmarshal: consumeInt64Ptr, - merge: mergeInt64Ptr, -} - -// sizeInt64Slice returns the size of wire encoding a []int64 pointer as a repeated Int64. -func sizeInt64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.Int64Slice() - for _, v := range s { - size += f.tagsize + protowire.SizeVarint(uint64(v)) - } - return size -} - -// appendInt64Slice encodes a []int64 pointer as a repeated Int64. -func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.Int64Slice() - for _, v := range s { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - } - return b, nil -} - -// consumeInt64Slice wire decodes a []int64 pointer as a repeated Int64. -func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - sp := p.Int64Slice() - if wtyp == protowire.BytesType { - s := *sp - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - s = append(s, int64(v)) - b = b[n:] - } - *sp = s - out.n = n - return out, nil - } - if wtyp != protowire.VarintType { - return out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - *sp = append(*sp, int64(v)) - out.n = n - return out, nil -} - -var coderInt64Slice = pointerCoderFuncs{ - size: sizeInt64Slice, - marshal: appendInt64Slice, - unmarshal: consumeInt64Slice, - merge: mergeInt64Slice, -} - -// sizeInt64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Int64. -func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.Int64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += protowire.SizeVarint(uint64(v)) - } - return f.tagsize + protowire.SizeBytes(n) -} - -// appendInt64PackedSlice encodes a []int64 pointer as a packed repeated Int64. -func appendInt64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.Int64Slice() - if len(s) == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := 0 - for _, v := range s { - n += protowire.SizeVarint(uint64(v)) - } - b = protowire.AppendVarint(b, uint64(n)) - for _, v := range s { - b = protowire.AppendVarint(b, uint64(v)) - } - return b, nil -} - -var coderInt64PackedSlice = pointerCoderFuncs{ - size: sizeInt64PackedSlice, - marshal: appendInt64PackedSlice, - unmarshal: consumeInt64Slice, - merge: mergeInt64Slice, -} - -// sizeInt64Value returns the size of wire encoding a int64 value as a Int64. -func sizeInt64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { - return tagsize + protowire.SizeVarint(uint64(v.Int())) -} - -// appendInt64Value encodes a int64 value as a Int64. -func appendInt64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, uint64(v.Int())) - return b, nil -} - -// consumeInt64Value decodes a int64 value as a Int64. -func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return protoreflect.Value{}, out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - out.n = n - return protoreflect.ValueOfInt64(int64(v)), out, nil -} - -var coderInt64Value = valueCoderFuncs{ - size: sizeInt64Value, - marshal: appendInt64Value, - unmarshal: consumeInt64Value, - merge: mergeScalarValue, -} - -// sizeInt64SliceValue returns the size of wire encoding a []int64 value as a repeated Int64. -func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - size += tagsize + protowire.SizeVarint(uint64(v.Int())) - } - return size -} - -// appendInt64SliceValue encodes a []int64 value as a repeated Int64. -func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, uint64(v.Int())) - } - return b, nil -} - -// consumeInt64SliceValue wire decodes a []int64 value as a repeated Int64. -func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - list := listv.List() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - for len(b) > 0 { - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfInt64(int64(v))) - b = b[n:] - } - out.n = n - return listv, out, nil - } - if wtyp != protowire.VarintType { - return protoreflect.Value{}, out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfInt64(int64(v))) - out.n = n - return listv, out, nil -} - -var coderInt64SliceValue = valueCoderFuncs{ - size: sizeInt64SliceValue, - marshal: appendInt64SliceValue, - unmarshal: consumeInt64SliceValue, - merge: mergeListValue, -} - -// sizeInt64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Int64. -func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return 0 - } - n := 0 - for i, llen := 0, llen; i < llen; i++ { - v := list.Get(i) - n += protowire.SizeVarint(uint64(v.Int())) - } - return tagsize + protowire.SizeBytes(n) -} - -// appendInt64PackedSliceValue encodes a []int64 value as a packed repeated Int64. -func appendInt64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, wiretag) - n := 0 - for i := 0; i < llen; i++ { - v := list.Get(i) - n += protowire.SizeVarint(uint64(v.Int())) - } - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - v := list.Get(i) - b = protowire.AppendVarint(b, uint64(v.Int())) - } - return b, nil -} - -var coderInt64PackedSliceValue = valueCoderFuncs{ - size: sizeInt64PackedSliceValue, - marshal: appendInt64PackedSliceValue, - unmarshal: consumeInt64SliceValue, - merge: mergeListValue, -} - -// sizeSint64 returns the size of wire encoding a int64 pointer as a Sint64. -func sizeSint64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := *p.Int64() - return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) -} - -// appendSint64 wire encodes a int64 pointer as a Sint64. -func appendSint64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Int64() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) - return b, nil -} - -// consumeSint64 wire decodes a int64 pointer as a Sint64. -func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - *p.Int64() = protowire.DecodeZigZag(v) - out.n = n - return out, nil -} - -var coderSint64 = pointerCoderFuncs{ - size: sizeSint64, - marshal: appendSint64, - unmarshal: consumeSint64, - merge: mergeInt64, -} - -// sizeSint64NoZero returns the size of wire encoding a int64 pointer as a Sint64. -// The zero value is not encoded. -func sizeSint64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := *p.Int64() - if v == 0 { - return 0 - } - return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) -} - -// appendSint64NoZero wire encodes a int64 pointer as a Sint64. -// The zero value is not encoded. -func appendSint64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Int64() - if v == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) - return b, nil -} - -var coderSint64NoZero = pointerCoderFuncs{ - size: sizeSint64NoZero, - marshal: appendSint64NoZero, - unmarshal: consumeSint64, - merge: mergeInt64NoZero, -} - -// sizeSint64Ptr returns the size of wire encoding a *int64 pointer as a Sint64. -// It panics if the pointer is nil. -func sizeSint64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := **p.Int64Ptr() - return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) -} - -// appendSint64Ptr wire encodes a *int64 pointer as a Sint64. -// It panics if the pointer is nil. -func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := **p.Int64Ptr() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) - return b, nil -} - -// consumeSint64Ptr wire decodes a *int64 pointer as a Sint64. -func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - vp := p.Int64Ptr() - if *vp == nil { - *vp = new(int64) - } - **vp = protowire.DecodeZigZag(v) - out.n = n - return out, nil -} - -var coderSint64Ptr = pointerCoderFuncs{ - size: sizeSint64Ptr, - marshal: appendSint64Ptr, - unmarshal: consumeSint64Ptr, - merge: mergeInt64Ptr, -} - -// sizeSint64Slice returns the size of wire encoding a []int64 pointer as a repeated Sint64. -func sizeSint64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.Int64Slice() - for _, v := range s { - size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) - } - return size -} - -// appendSint64Slice encodes a []int64 pointer as a repeated Sint64. -func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.Int64Slice() - for _, v := range s { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) - } - return b, nil -} - -// consumeSint64Slice wire decodes a []int64 pointer as a repeated Sint64. -func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - sp := p.Int64Slice() - if wtyp == protowire.BytesType { - s := *sp - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - s = append(s, protowire.DecodeZigZag(v)) - b = b[n:] - } - *sp = s - out.n = n - return out, nil - } - if wtyp != protowire.VarintType { - return out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - *sp = append(*sp, protowire.DecodeZigZag(v)) - out.n = n - return out, nil -} - -var coderSint64Slice = pointerCoderFuncs{ - size: sizeSint64Slice, - marshal: appendSint64Slice, - unmarshal: consumeSint64Slice, - merge: mergeInt64Slice, -} - -// sizeSint64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sint64. -func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.Int64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += protowire.SizeVarint(protowire.EncodeZigZag(v)) - } - return f.tagsize + protowire.SizeBytes(n) -} - -// appendSint64PackedSlice encodes a []int64 pointer as a packed repeated Sint64. -func appendSint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.Int64Slice() - if len(s) == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := 0 - for _, v := range s { - n += protowire.SizeVarint(protowire.EncodeZigZag(v)) - } - b = protowire.AppendVarint(b, uint64(n)) - for _, v := range s { - b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) - } - return b, nil -} - -var coderSint64PackedSlice = pointerCoderFuncs{ - size: sizeSint64PackedSlice, - marshal: appendSint64PackedSlice, - unmarshal: consumeSint64Slice, - merge: mergeInt64Slice, -} - -// sizeSint64Value returns the size of wire encoding a int64 value as a Sint64. -func sizeSint64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { - return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) -} - -// appendSint64Value encodes a int64 value as a Sint64. -func appendSint64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) - return b, nil -} - -// consumeSint64Value decodes a int64 value as a Sint64. -func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return protoreflect.Value{}, out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - out.n = n - return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), out, nil -} - -var coderSint64Value = valueCoderFuncs{ - size: sizeSint64Value, - marshal: appendSint64Value, - unmarshal: consumeSint64Value, - merge: mergeScalarValue, -} - -// sizeSint64SliceValue returns the size of wire encoding a []int64 value as a repeated Sint64. -func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - size += tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) - } - return size -} - -// appendSint64SliceValue encodes a []int64 value as a repeated Sint64. -func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) - } - return b, nil -} - -// consumeSint64SliceValue wire decodes a []int64 value as a repeated Sint64. -func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - list := listv.List() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - for len(b) > 0 { - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) - b = b[n:] - } - out.n = n - return listv, out, nil - } - if wtyp != protowire.VarintType { - return protoreflect.Value{}, out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) - out.n = n - return listv, out, nil -} - -var coderSint64SliceValue = valueCoderFuncs{ - size: sizeSint64SliceValue, - marshal: appendSint64SliceValue, - unmarshal: consumeSint64SliceValue, - merge: mergeListValue, -} - -// sizeSint64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sint64. -func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return 0 - } - n := 0 - for i, llen := 0, llen; i < llen; i++ { - v := list.Get(i) - n += protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) - } - return tagsize + protowire.SizeBytes(n) -} - -// appendSint64PackedSliceValue encodes a []int64 value as a packed repeated Sint64. -func appendSint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, wiretag) - n := 0 - for i := 0; i < llen; i++ { - v := list.Get(i) - n += protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) - } - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - v := list.Get(i) - b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) - } - return b, nil -} - -var coderSint64PackedSliceValue = valueCoderFuncs{ - size: sizeSint64PackedSliceValue, - marshal: appendSint64PackedSliceValue, - unmarshal: consumeSint64SliceValue, - merge: mergeListValue, -} - -// sizeUint64 returns the size of wire encoding a uint64 pointer as a Uint64. -func sizeUint64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := *p.Uint64() - return f.tagsize + protowire.SizeVarint(v) -} - -// appendUint64 wire encodes a uint64 pointer as a Uint64. -func appendUint64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Uint64() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, v) - return b, nil -} - -// consumeUint64 wire decodes a uint64 pointer as a Uint64. -func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - *p.Uint64() = v - out.n = n - return out, nil -} - -var coderUint64 = pointerCoderFuncs{ - size: sizeUint64, - marshal: appendUint64, - unmarshal: consumeUint64, - merge: mergeUint64, -} - -// sizeUint64NoZero returns the size of wire encoding a uint64 pointer as a Uint64. -// The zero value is not encoded. -func sizeUint64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := *p.Uint64() - if v == 0 { - return 0 - } - return f.tagsize + protowire.SizeVarint(v) -} - -// appendUint64NoZero wire encodes a uint64 pointer as a Uint64. -// The zero value is not encoded. -func appendUint64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Uint64() - if v == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, v) - return b, nil -} - -var coderUint64NoZero = pointerCoderFuncs{ - size: sizeUint64NoZero, - marshal: appendUint64NoZero, - unmarshal: consumeUint64, - merge: mergeUint64NoZero, -} - -// sizeUint64Ptr returns the size of wire encoding a *uint64 pointer as a Uint64. -// It panics if the pointer is nil. -func sizeUint64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := **p.Uint64Ptr() - return f.tagsize + protowire.SizeVarint(v) -} - -// appendUint64Ptr wire encodes a *uint64 pointer as a Uint64. -// It panics if the pointer is nil. -func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := **p.Uint64Ptr() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, v) - return b, nil -} - -// consumeUint64Ptr wire decodes a *uint64 pointer as a Uint64. -func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - vp := p.Uint64Ptr() - if *vp == nil { - *vp = new(uint64) - } - **vp = v - out.n = n - return out, nil -} - -var coderUint64Ptr = pointerCoderFuncs{ - size: sizeUint64Ptr, - marshal: appendUint64Ptr, - unmarshal: consumeUint64Ptr, - merge: mergeUint64Ptr, -} - -// sizeUint64Slice returns the size of wire encoding a []uint64 pointer as a repeated Uint64. -func sizeUint64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.Uint64Slice() - for _, v := range s { - size += f.tagsize + protowire.SizeVarint(v) - } - return size -} - -// appendUint64Slice encodes a []uint64 pointer as a repeated Uint64. -func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.Uint64Slice() - for _, v := range s { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, v) - } - return b, nil -} - -// consumeUint64Slice wire decodes a []uint64 pointer as a repeated Uint64. -func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - sp := p.Uint64Slice() - if wtyp == protowire.BytesType { - s := *sp - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - s = append(s, v) - b = b[n:] - } - *sp = s - out.n = n - return out, nil - } - if wtyp != protowire.VarintType { - return out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return out, errDecode - } - *sp = append(*sp, v) - out.n = n - return out, nil -} - -var coderUint64Slice = pointerCoderFuncs{ - size: sizeUint64Slice, - marshal: appendUint64Slice, - unmarshal: consumeUint64Slice, - merge: mergeUint64Slice, -} - -// sizeUint64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Uint64. -func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.Uint64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += protowire.SizeVarint(v) - } - return f.tagsize + protowire.SizeBytes(n) -} - -// appendUint64PackedSlice encodes a []uint64 pointer as a packed repeated Uint64. -func appendUint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.Uint64Slice() - if len(s) == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := 0 - for _, v := range s { - n += protowire.SizeVarint(v) - } - b = protowire.AppendVarint(b, uint64(n)) - for _, v := range s { - b = protowire.AppendVarint(b, v) - } - return b, nil -} - -var coderUint64PackedSlice = pointerCoderFuncs{ - size: sizeUint64PackedSlice, - marshal: appendUint64PackedSlice, - unmarshal: consumeUint64Slice, - merge: mergeUint64Slice, -} - -// sizeUint64Value returns the size of wire encoding a uint64 value as a Uint64. -func sizeUint64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { - return tagsize + protowire.SizeVarint(v.Uint()) -} - -// appendUint64Value encodes a uint64 value as a Uint64. -func appendUint64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, v.Uint()) - return b, nil -} - -// consumeUint64Value decodes a uint64 value as a Uint64. -func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return protoreflect.Value{}, out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - out.n = n - return protoreflect.ValueOfUint64(v), out, nil -} - -var coderUint64Value = valueCoderFuncs{ - size: sizeUint64Value, - marshal: appendUint64Value, - unmarshal: consumeUint64Value, - merge: mergeScalarValue, -} - -// sizeUint64SliceValue returns the size of wire encoding a []uint64 value as a repeated Uint64. -func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - size += tagsize + protowire.SizeVarint(v.Uint()) - } - return size -} - -// appendUint64SliceValue encodes a []uint64 value as a repeated Uint64. -func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, v.Uint()) - } - return b, nil -} - -// consumeUint64SliceValue wire decodes a []uint64 value as a repeated Uint64. -func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - list := listv.List() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - for len(b) > 0 { - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfUint64(v)) - b = b[n:] - } - out.n = n - return listv, out, nil - } - if wtyp != protowire.VarintType { - return protoreflect.Value{}, out, errUnknown - } - var v uint64 - var n int - if len(b) >= 1 && b[0] < 0x80 { - v = uint64(b[0]) - n = 1 - } else if len(b) >= 2 && b[1] < 128 { - v = uint64(b[0]&0x7f) + uint64(b[1])<<7 - n = 2 - } else { - v, n = protowire.ConsumeVarint(b) - } - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfUint64(v)) - out.n = n - return listv, out, nil -} - -var coderUint64SliceValue = valueCoderFuncs{ - size: sizeUint64SliceValue, - marshal: appendUint64SliceValue, - unmarshal: consumeUint64SliceValue, - merge: mergeListValue, -} - -// sizeUint64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Uint64. -func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return 0 - } - n := 0 - for i, llen := 0, llen; i < llen; i++ { - v := list.Get(i) - n += protowire.SizeVarint(v.Uint()) - } - return tagsize + protowire.SizeBytes(n) -} - -// appendUint64PackedSliceValue encodes a []uint64 value as a packed repeated Uint64. -func appendUint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, wiretag) - n := 0 - for i := 0; i < llen; i++ { - v := list.Get(i) - n += protowire.SizeVarint(v.Uint()) - } - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - v := list.Get(i) - b = protowire.AppendVarint(b, v.Uint()) - } - return b, nil -} - -var coderUint64PackedSliceValue = valueCoderFuncs{ - size: sizeUint64PackedSliceValue, - marshal: appendUint64PackedSliceValue, - unmarshal: consumeUint64SliceValue, - merge: mergeListValue, -} - -// sizeSfixed32 returns the size of wire encoding a int32 pointer as a Sfixed32. -func sizeSfixed32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - - return f.tagsize + protowire.SizeFixed32() -} - -// appendSfixed32 wire encodes a int32 pointer as a Sfixed32. -func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Int32() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendFixed32(b, uint32(v)) - return b, nil -} - -// consumeSfixed32 wire decodes a int32 pointer as a Sfixed32. -func consumeSfixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.Fixed32Type { - return out, errUnknown - } - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return out, errDecode - } - *p.Int32() = int32(v) - out.n = n - return out, nil -} - -var coderSfixed32 = pointerCoderFuncs{ - size: sizeSfixed32, - marshal: appendSfixed32, - unmarshal: consumeSfixed32, - merge: mergeInt32, -} - -// sizeSfixed32NoZero returns the size of wire encoding a int32 pointer as a Sfixed32. -// The zero value is not encoded. -func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := *p.Int32() - if v == 0 { - return 0 - } - return f.tagsize + protowire.SizeFixed32() -} - -// appendSfixed32NoZero wire encodes a int32 pointer as a Sfixed32. -// The zero value is not encoded. -func appendSfixed32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Int32() - if v == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendFixed32(b, uint32(v)) - return b, nil -} - -var coderSfixed32NoZero = pointerCoderFuncs{ - size: sizeSfixed32NoZero, - marshal: appendSfixed32NoZero, - unmarshal: consumeSfixed32, - merge: mergeInt32NoZero, -} - -// sizeSfixed32Ptr returns the size of wire encoding a *int32 pointer as a Sfixed32. -// It panics if the pointer is nil. -func sizeSfixed32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - return f.tagsize + protowire.SizeFixed32() -} - -// appendSfixed32Ptr wire encodes a *int32 pointer as a Sfixed32. -// It panics if the pointer is nil. -func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := **p.Int32Ptr() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendFixed32(b, uint32(v)) - return b, nil -} - -// consumeSfixed32Ptr wire decodes a *int32 pointer as a Sfixed32. -func consumeSfixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.Fixed32Type { - return out, errUnknown - } - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return out, errDecode - } - vp := p.Int32Ptr() - if *vp == nil { - *vp = new(int32) - } - **vp = int32(v) - out.n = n - return out, nil -} - -var coderSfixed32Ptr = pointerCoderFuncs{ - size: sizeSfixed32Ptr, - marshal: appendSfixed32Ptr, - unmarshal: consumeSfixed32Ptr, - merge: mergeInt32Ptr, -} - -// sizeSfixed32Slice returns the size of wire encoding a []int32 pointer as a repeated Sfixed32. -func sizeSfixed32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.Int32Slice() - size = len(s) * (f.tagsize + protowire.SizeFixed32()) - return size -} - -// appendSfixed32Slice encodes a []int32 pointer as a repeated Sfixed32. -func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.Int32Slice() - for _, v := range s { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendFixed32(b, uint32(v)) - } - return b, nil -} - -// consumeSfixed32Slice wire decodes a []int32 pointer as a repeated Sfixed32. -func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - sp := p.Int32Slice() - if wtyp == protowire.BytesType { - s := *sp - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return out, errDecode - } - s = append(s, int32(v)) - b = b[n:] - } - *sp = s - out.n = n - return out, nil - } - if wtyp != protowire.Fixed32Type { - return out, errUnknown - } - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return out, errDecode - } - *sp = append(*sp, int32(v)) - out.n = n - return out, nil -} - -var coderSfixed32Slice = pointerCoderFuncs{ - size: sizeSfixed32Slice, - marshal: appendSfixed32Slice, - unmarshal: consumeSfixed32Slice, - merge: mergeInt32Slice, -} - -// sizeSfixed32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sfixed32. -func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.Int32Slice() - if len(s) == 0 { - return 0 - } - n := len(s) * protowire.SizeFixed32() - return f.tagsize + protowire.SizeBytes(n) -} - -// appendSfixed32PackedSlice encodes a []int32 pointer as a packed repeated Sfixed32. -func appendSfixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.Int32Slice() - if len(s) == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := len(s) * protowire.SizeFixed32() - b = protowire.AppendVarint(b, uint64(n)) - for _, v := range s { - b = protowire.AppendFixed32(b, uint32(v)) - } - return b, nil -} - -var coderSfixed32PackedSlice = pointerCoderFuncs{ - size: sizeSfixed32PackedSlice, - marshal: appendSfixed32PackedSlice, - unmarshal: consumeSfixed32Slice, - merge: mergeInt32Slice, -} - -// sizeSfixed32Value returns the size of wire encoding a int32 value as a Sfixed32. -func sizeSfixed32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { - return tagsize + protowire.SizeFixed32() -} - -// appendSfixed32Value encodes a int32 value as a Sfixed32. -func appendSfixed32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendFixed32(b, uint32(v.Int())) - return b, nil -} - -// consumeSfixed32Value decodes a int32 value as a Sfixed32. -func consumeSfixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - if wtyp != protowire.Fixed32Type { - return protoreflect.Value{}, out, errUnknown - } - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - out.n = n - return protoreflect.ValueOfInt32(int32(v)), out, nil -} - -var coderSfixed32Value = valueCoderFuncs{ - size: sizeSfixed32Value, - marshal: appendSfixed32Value, - unmarshal: consumeSfixed32Value, - merge: mergeScalarValue, -} - -// sizeSfixed32SliceValue returns the size of wire encoding a []int32 value as a repeated Sfixed32. -func sizeSfixed32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - size = list.Len() * (tagsize + protowire.SizeFixed32()) - return size -} - -// appendSfixed32SliceValue encodes a []int32 value as a repeated Sfixed32. -func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendFixed32(b, uint32(v.Int())) - } - return b, nil -} - -// consumeSfixed32SliceValue wire decodes a []int32 value as a repeated Sfixed32. -func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - list := listv.List() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfInt32(int32(v))) - b = b[n:] - } - out.n = n - return listv, out, nil - } - if wtyp != protowire.Fixed32Type { - return protoreflect.Value{}, out, errUnknown - } - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfInt32(int32(v))) - out.n = n - return listv, out, nil -} - -var coderSfixed32SliceValue = valueCoderFuncs{ - size: sizeSfixed32SliceValue, - marshal: appendSfixed32SliceValue, - unmarshal: consumeSfixed32SliceValue, - merge: mergeListValue, -} - -// sizeSfixed32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sfixed32. -func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return 0 - } - n := llen * protowire.SizeFixed32() - return tagsize + protowire.SizeBytes(n) -} - -// appendSfixed32PackedSliceValue encodes a []int32 value as a packed repeated Sfixed32. -func appendSfixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, wiretag) - n := llen * protowire.SizeFixed32() - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - v := list.Get(i) - b = protowire.AppendFixed32(b, uint32(v.Int())) - } - return b, nil -} - -var coderSfixed32PackedSliceValue = valueCoderFuncs{ - size: sizeSfixed32PackedSliceValue, - marshal: appendSfixed32PackedSliceValue, - unmarshal: consumeSfixed32SliceValue, - merge: mergeListValue, -} - -// sizeFixed32 returns the size of wire encoding a uint32 pointer as a Fixed32. -func sizeFixed32(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - - return f.tagsize + protowire.SizeFixed32() -} - -// appendFixed32 wire encodes a uint32 pointer as a Fixed32. -func appendFixed32(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Uint32() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendFixed32(b, v) - return b, nil -} - -// consumeFixed32 wire decodes a uint32 pointer as a Fixed32. -func consumeFixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.Fixed32Type { - return out, errUnknown - } - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return out, errDecode - } - *p.Uint32() = v - out.n = n - return out, nil -} - -var coderFixed32 = pointerCoderFuncs{ - size: sizeFixed32, - marshal: appendFixed32, - unmarshal: consumeFixed32, - merge: mergeUint32, -} - -// sizeFixed32NoZero returns the size of wire encoding a uint32 pointer as a Fixed32. -// The zero value is not encoded. -func sizeFixed32NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := *p.Uint32() - if v == 0 { - return 0 - } - return f.tagsize + protowire.SizeFixed32() -} - -// appendFixed32NoZero wire encodes a uint32 pointer as a Fixed32. -// The zero value is not encoded. -func appendFixed32NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Uint32() - if v == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendFixed32(b, v) - return b, nil -} - -var coderFixed32NoZero = pointerCoderFuncs{ - size: sizeFixed32NoZero, - marshal: appendFixed32NoZero, - unmarshal: consumeFixed32, - merge: mergeUint32NoZero, -} - -// sizeFixed32Ptr returns the size of wire encoding a *uint32 pointer as a Fixed32. -// It panics if the pointer is nil. -func sizeFixed32Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - return f.tagsize + protowire.SizeFixed32() -} - -// appendFixed32Ptr wire encodes a *uint32 pointer as a Fixed32. -// It panics if the pointer is nil. -func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := **p.Uint32Ptr() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendFixed32(b, v) - return b, nil -} - -// consumeFixed32Ptr wire decodes a *uint32 pointer as a Fixed32. -func consumeFixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.Fixed32Type { - return out, errUnknown - } - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return out, errDecode - } - vp := p.Uint32Ptr() - if *vp == nil { - *vp = new(uint32) - } - **vp = v - out.n = n - return out, nil -} - -var coderFixed32Ptr = pointerCoderFuncs{ - size: sizeFixed32Ptr, - marshal: appendFixed32Ptr, - unmarshal: consumeFixed32Ptr, - merge: mergeUint32Ptr, -} - -// sizeFixed32Slice returns the size of wire encoding a []uint32 pointer as a repeated Fixed32. -func sizeFixed32Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.Uint32Slice() - size = len(s) * (f.tagsize + protowire.SizeFixed32()) - return size -} - -// appendFixed32Slice encodes a []uint32 pointer as a repeated Fixed32. -func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.Uint32Slice() - for _, v := range s { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendFixed32(b, v) - } - return b, nil -} - -// consumeFixed32Slice wire decodes a []uint32 pointer as a repeated Fixed32. -func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - sp := p.Uint32Slice() - if wtyp == protowire.BytesType { - s := *sp - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return out, errDecode - } - s = append(s, v) - b = b[n:] - } - *sp = s - out.n = n - return out, nil - } - if wtyp != protowire.Fixed32Type { - return out, errUnknown - } - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return out, errDecode - } - *sp = append(*sp, v) - out.n = n - return out, nil -} - -var coderFixed32Slice = pointerCoderFuncs{ - size: sizeFixed32Slice, - marshal: appendFixed32Slice, - unmarshal: consumeFixed32Slice, - merge: mergeUint32Slice, -} - -// sizeFixed32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Fixed32. -func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.Uint32Slice() - if len(s) == 0 { - return 0 - } - n := len(s) * protowire.SizeFixed32() - return f.tagsize + protowire.SizeBytes(n) -} - -// appendFixed32PackedSlice encodes a []uint32 pointer as a packed repeated Fixed32. -func appendFixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.Uint32Slice() - if len(s) == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := len(s) * protowire.SizeFixed32() - b = protowire.AppendVarint(b, uint64(n)) - for _, v := range s { - b = protowire.AppendFixed32(b, v) - } - return b, nil -} - -var coderFixed32PackedSlice = pointerCoderFuncs{ - size: sizeFixed32PackedSlice, - marshal: appendFixed32PackedSlice, - unmarshal: consumeFixed32Slice, - merge: mergeUint32Slice, -} - -// sizeFixed32Value returns the size of wire encoding a uint32 value as a Fixed32. -func sizeFixed32Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { - return tagsize + protowire.SizeFixed32() -} - -// appendFixed32Value encodes a uint32 value as a Fixed32. -func appendFixed32Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendFixed32(b, uint32(v.Uint())) - return b, nil -} - -// consumeFixed32Value decodes a uint32 value as a Fixed32. -func consumeFixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - if wtyp != protowire.Fixed32Type { - return protoreflect.Value{}, out, errUnknown - } - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - out.n = n - return protoreflect.ValueOfUint32(uint32(v)), out, nil -} - -var coderFixed32Value = valueCoderFuncs{ - size: sizeFixed32Value, - marshal: appendFixed32Value, - unmarshal: consumeFixed32Value, - merge: mergeScalarValue, -} - -// sizeFixed32SliceValue returns the size of wire encoding a []uint32 value as a repeated Fixed32. -func sizeFixed32SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - size = list.Len() * (tagsize + protowire.SizeFixed32()) - return size -} - -// appendFixed32SliceValue encodes a []uint32 value as a repeated Fixed32. -func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendFixed32(b, uint32(v.Uint())) - } - return b, nil -} - -// consumeFixed32SliceValue wire decodes a []uint32 value as a repeated Fixed32. -func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - list := listv.List() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfUint32(uint32(v))) - b = b[n:] - } - out.n = n - return listv, out, nil - } - if wtyp != protowire.Fixed32Type { - return protoreflect.Value{}, out, errUnknown - } - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfUint32(uint32(v))) - out.n = n - return listv, out, nil -} - -var coderFixed32SliceValue = valueCoderFuncs{ - size: sizeFixed32SliceValue, - marshal: appendFixed32SliceValue, - unmarshal: consumeFixed32SliceValue, - merge: mergeListValue, -} - -// sizeFixed32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Fixed32. -func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return 0 - } - n := llen * protowire.SizeFixed32() - return tagsize + protowire.SizeBytes(n) -} - -// appendFixed32PackedSliceValue encodes a []uint32 value as a packed repeated Fixed32. -func appendFixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, wiretag) - n := llen * protowire.SizeFixed32() - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - v := list.Get(i) - b = protowire.AppendFixed32(b, uint32(v.Uint())) - } - return b, nil -} - -var coderFixed32PackedSliceValue = valueCoderFuncs{ - size: sizeFixed32PackedSliceValue, - marshal: appendFixed32PackedSliceValue, - unmarshal: consumeFixed32SliceValue, - merge: mergeListValue, -} - -// sizeFloat returns the size of wire encoding a float32 pointer as a Float. -func sizeFloat(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - - return f.tagsize + protowire.SizeFixed32() -} - -// appendFloat wire encodes a float32 pointer as a Float. -func appendFloat(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Float32() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendFixed32(b, math.Float32bits(v)) - return b, nil -} - -// consumeFloat wire decodes a float32 pointer as a Float. -func consumeFloat(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.Fixed32Type { - return out, errUnknown - } - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return out, errDecode - } - *p.Float32() = math.Float32frombits(v) - out.n = n - return out, nil -} - -var coderFloat = pointerCoderFuncs{ - size: sizeFloat, - marshal: appendFloat, - unmarshal: consumeFloat, - merge: mergeFloat32, -} - -// sizeFloatNoZero returns the size of wire encoding a float32 pointer as a Float. -// The zero value is not encoded. -func sizeFloatNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := *p.Float32() - if v == 0 && !math.Signbit(float64(v)) { - return 0 - } - return f.tagsize + protowire.SizeFixed32() -} - -// appendFloatNoZero wire encodes a float32 pointer as a Float. -// The zero value is not encoded. -func appendFloatNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Float32() - if v == 0 && !math.Signbit(float64(v)) { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendFixed32(b, math.Float32bits(v)) - return b, nil -} - -var coderFloatNoZero = pointerCoderFuncs{ - size: sizeFloatNoZero, - marshal: appendFloatNoZero, - unmarshal: consumeFloat, - merge: mergeFloat32NoZero, -} - -// sizeFloatPtr returns the size of wire encoding a *float32 pointer as a Float. -// It panics if the pointer is nil. -func sizeFloatPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - return f.tagsize + protowire.SizeFixed32() -} - -// appendFloatPtr wire encodes a *float32 pointer as a Float. -// It panics if the pointer is nil. -func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := **p.Float32Ptr() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendFixed32(b, math.Float32bits(v)) - return b, nil -} - -// consumeFloatPtr wire decodes a *float32 pointer as a Float. -func consumeFloatPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.Fixed32Type { - return out, errUnknown - } - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return out, errDecode - } - vp := p.Float32Ptr() - if *vp == nil { - *vp = new(float32) - } - **vp = math.Float32frombits(v) - out.n = n - return out, nil -} - -var coderFloatPtr = pointerCoderFuncs{ - size: sizeFloatPtr, - marshal: appendFloatPtr, - unmarshal: consumeFloatPtr, - merge: mergeFloat32Ptr, -} - -// sizeFloatSlice returns the size of wire encoding a []float32 pointer as a repeated Float. -func sizeFloatSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.Float32Slice() - size = len(s) * (f.tagsize + protowire.SizeFixed32()) - return size -} - -// appendFloatSlice encodes a []float32 pointer as a repeated Float. -func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.Float32Slice() - for _, v := range s { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendFixed32(b, math.Float32bits(v)) - } - return b, nil -} - -// consumeFloatSlice wire decodes a []float32 pointer as a repeated Float. -func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - sp := p.Float32Slice() - if wtyp == protowire.BytesType { - s := *sp - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return out, errDecode - } - s = append(s, math.Float32frombits(v)) - b = b[n:] - } - *sp = s - out.n = n - return out, nil - } - if wtyp != protowire.Fixed32Type { - return out, errUnknown - } - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return out, errDecode - } - *sp = append(*sp, math.Float32frombits(v)) - out.n = n - return out, nil -} - -var coderFloatSlice = pointerCoderFuncs{ - size: sizeFloatSlice, - marshal: appendFloatSlice, - unmarshal: consumeFloatSlice, - merge: mergeFloat32Slice, -} - -// sizeFloatPackedSlice returns the size of wire encoding a []float32 pointer as a packed repeated Float. -func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.Float32Slice() - if len(s) == 0 { - return 0 - } - n := len(s) * protowire.SizeFixed32() - return f.tagsize + protowire.SizeBytes(n) -} - -// appendFloatPackedSlice encodes a []float32 pointer as a packed repeated Float. -func appendFloatPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.Float32Slice() - if len(s) == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := len(s) * protowire.SizeFixed32() - b = protowire.AppendVarint(b, uint64(n)) - for _, v := range s { - b = protowire.AppendFixed32(b, math.Float32bits(v)) - } - return b, nil -} - -var coderFloatPackedSlice = pointerCoderFuncs{ - size: sizeFloatPackedSlice, - marshal: appendFloatPackedSlice, - unmarshal: consumeFloatSlice, - merge: mergeFloat32Slice, -} - -// sizeFloatValue returns the size of wire encoding a float32 value as a Float. -func sizeFloatValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { - return tagsize + protowire.SizeFixed32() -} - -// appendFloatValue encodes a float32 value as a Float. -func appendFloatValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) - return b, nil -} - -// consumeFloatValue decodes a float32 value as a Float. -func consumeFloatValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - if wtyp != protowire.Fixed32Type { - return protoreflect.Value{}, out, errUnknown - } - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - out.n = n - return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), out, nil -} - -var coderFloatValue = valueCoderFuncs{ - size: sizeFloatValue, - marshal: appendFloatValue, - unmarshal: consumeFloatValue, - merge: mergeScalarValue, -} - -// sizeFloatSliceValue returns the size of wire encoding a []float32 value as a repeated Float. -func sizeFloatSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - size = list.Len() * (tagsize + protowire.SizeFixed32()) - return size -} - -// appendFloatSliceValue encodes a []float32 value as a repeated Float. -func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) - } - return b, nil -} - -// consumeFloatSliceValue wire decodes a []float32 value as a repeated Float. -func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - list := listv.List() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) - b = b[n:] - } - out.n = n - return listv, out, nil - } - if wtyp != protowire.Fixed32Type { - return protoreflect.Value{}, out, errUnknown - } - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) - out.n = n - return listv, out, nil -} - -var coderFloatSliceValue = valueCoderFuncs{ - size: sizeFloatSliceValue, - marshal: appendFloatSliceValue, - unmarshal: consumeFloatSliceValue, - merge: mergeListValue, -} - -// sizeFloatPackedSliceValue returns the size of wire encoding a []float32 value as a packed repeated Float. -func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return 0 - } - n := llen * protowire.SizeFixed32() - return tagsize + protowire.SizeBytes(n) -} - -// appendFloatPackedSliceValue encodes a []float32 value as a packed repeated Float. -func appendFloatPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, wiretag) - n := llen * protowire.SizeFixed32() - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - v := list.Get(i) - b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) - } - return b, nil -} - -var coderFloatPackedSliceValue = valueCoderFuncs{ - size: sizeFloatPackedSliceValue, - marshal: appendFloatPackedSliceValue, - unmarshal: consumeFloatSliceValue, - merge: mergeListValue, -} - -// sizeSfixed64 returns the size of wire encoding a int64 pointer as a Sfixed64. -func sizeSfixed64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - - return f.tagsize + protowire.SizeFixed64() -} - -// appendSfixed64 wire encodes a int64 pointer as a Sfixed64. -func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Int64() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendFixed64(b, uint64(v)) - return b, nil -} - -// consumeSfixed64 wire decodes a int64 pointer as a Sfixed64. -func consumeSfixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.Fixed64Type { - return out, errUnknown - } - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return out, errDecode - } - *p.Int64() = int64(v) - out.n = n - return out, nil -} - -var coderSfixed64 = pointerCoderFuncs{ - size: sizeSfixed64, - marshal: appendSfixed64, - unmarshal: consumeSfixed64, - merge: mergeInt64, -} - -// sizeSfixed64NoZero returns the size of wire encoding a int64 pointer as a Sfixed64. -// The zero value is not encoded. -func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := *p.Int64() - if v == 0 { - return 0 - } - return f.tagsize + protowire.SizeFixed64() -} - -// appendSfixed64NoZero wire encodes a int64 pointer as a Sfixed64. -// The zero value is not encoded. -func appendSfixed64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Int64() - if v == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendFixed64(b, uint64(v)) - return b, nil -} - -var coderSfixed64NoZero = pointerCoderFuncs{ - size: sizeSfixed64NoZero, - marshal: appendSfixed64NoZero, - unmarshal: consumeSfixed64, - merge: mergeInt64NoZero, -} - -// sizeSfixed64Ptr returns the size of wire encoding a *int64 pointer as a Sfixed64. -// It panics if the pointer is nil. -func sizeSfixed64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - return f.tagsize + protowire.SizeFixed64() -} - -// appendSfixed64Ptr wire encodes a *int64 pointer as a Sfixed64. -// It panics if the pointer is nil. -func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := **p.Int64Ptr() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendFixed64(b, uint64(v)) - return b, nil -} - -// consumeSfixed64Ptr wire decodes a *int64 pointer as a Sfixed64. -func consumeSfixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.Fixed64Type { - return out, errUnknown - } - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return out, errDecode - } - vp := p.Int64Ptr() - if *vp == nil { - *vp = new(int64) - } - **vp = int64(v) - out.n = n - return out, nil -} - -var coderSfixed64Ptr = pointerCoderFuncs{ - size: sizeSfixed64Ptr, - marshal: appendSfixed64Ptr, - unmarshal: consumeSfixed64Ptr, - merge: mergeInt64Ptr, -} - -// sizeSfixed64Slice returns the size of wire encoding a []int64 pointer as a repeated Sfixed64. -func sizeSfixed64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.Int64Slice() - size = len(s) * (f.tagsize + protowire.SizeFixed64()) - return size -} - -// appendSfixed64Slice encodes a []int64 pointer as a repeated Sfixed64. -func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.Int64Slice() - for _, v := range s { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendFixed64(b, uint64(v)) - } - return b, nil -} - -// consumeSfixed64Slice wire decodes a []int64 pointer as a repeated Sfixed64. -func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - sp := p.Int64Slice() - if wtyp == protowire.BytesType { - s := *sp - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return out, errDecode - } - s = append(s, int64(v)) - b = b[n:] - } - *sp = s - out.n = n - return out, nil - } - if wtyp != protowire.Fixed64Type { - return out, errUnknown - } - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return out, errDecode - } - *sp = append(*sp, int64(v)) - out.n = n - return out, nil -} - -var coderSfixed64Slice = pointerCoderFuncs{ - size: sizeSfixed64Slice, - marshal: appendSfixed64Slice, - unmarshal: consumeSfixed64Slice, - merge: mergeInt64Slice, -} - -// sizeSfixed64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sfixed64. -func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.Int64Slice() - if len(s) == 0 { - return 0 - } - n := len(s) * protowire.SizeFixed64() - return f.tagsize + protowire.SizeBytes(n) -} - -// appendSfixed64PackedSlice encodes a []int64 pointer as a packed repeated Sfixed64. -func appendSfixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.Int64Slice() - if len(s) == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := len(s) * protowire.SizeFixed64() - b = protowire.AppendVarint(b, uint64(n)) - for _, v := range s { - b = protowire.AppendFixed64(b, uint64(v)) - } - return b, nil -} - -var coderSfixed64PackedSlice = pointerCoderFuncs{ - size: sizeSfixed64PackedSlice, - marshal: appendSfixed64PackedSlice, - unmarshal: consumeSfixed64Slice, - merge: mergeInt64Slice, -} - -// sizeSfixed64Value returns the size of wire encoding a int64 value as a Sfixed64. -func sizeSfixed64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { - return tagsize + protowire.SizeFixed64() -} - -// appendSfixed64Value encodes a int64 value as a Sfixed64. -func appendSfixed64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendFixed64(b, uint64(v.Int())) - return b, nil -} - -// consumeSfixed64Value decodes a int64 value as a Sfixed64. -func consumeSfixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - if wtyp != protowire.Fixed64Type { - return protoreflect.Value{}, out, errUnknown - } - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - out.n = n - return protoreflect.ValueOfInt64(int64(v)), out, nil -} - -var coderSfixed64Value = valueCoderFuncs{ - size: sizeSfixed64Value, - marshal: appendSfixed64Value, - unmarshal: consumeSfixed64Value, - merge: mergeScalarValue, -} - -// sizeSfixed64SliceValue returns the size of wire encoding a []int64 value as a repeated Sfixed64. -func sizeSfixed64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - size = list.Len() * (tagsize + protowire.SizeFixed64()) - return size -} - -// appendSfixed64SliceValue encodes a []int64 value as a repeated Sfixed64. -func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendFixed64(b, uint64(v.Int())) - } - return b, nil -} - -// consumeSfixed64SliceValue wire decodes a []int64 value as a repeated Sfixed64. -func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - list := listv.List() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfInt64(int64(v))) - b = b[n:] - } - out.n = n - return listv, out, nil - } - if wtyp != protowire.Fixed64Type { - return protoreflect.Value{}, out, errUnknown - } - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfInt64(int64(v))) - out.n = n - return listv, out, nil -} - -var coderSfixed64SliceValue = valueCoderFuncs{ - size: sizeSfixed64SliceValue, - marshal: appendSfixed64SliceValue, - unmarshal: consumeSfixed64SliceValue, - merge: mergeListValue, -} - -// sizeSfixed64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sfixed64. -func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return 0 - } - n := llen * protowire.SizeFixed64() - return tagsize + protowire.SizeBytes(n) -} - -// appendSfixed64PackedSliceValue encodes a []int64 value as a packed repeated Sfixed64. -func appendSfixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, wiretag) - n := llen * protowire.SizeFixed64() - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - v := list.Get(i) - b = protowire.AppendFixed64(b, uint64(v.Int())) - } - return b, nil -} - -var coderSfixed64PackedSliceValue = valueCoderFuncs{ - size: sizeSfixed64PackedSliceValue, - marshal: appendSfixed64PackedSliceValue, - unmarshal: consumeSfixed64SliceValue, - merge: mergeListValue, -} - -// sizeFixed64 returns the size of wire encoding a uint64 pointer as a Fixed64. -func sizeFixed64(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - - return f.tagsize + protowire.SizeFixed64() -} - -// appendFixed64 wire encodes a uint64 pointer as a Fixed64. -func appendFixed64(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Uint64() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendFixed64(b, v) - return b, nil -} - -// consumeFixed64 wire decodes a uint64 pointer as a Fixed64. -func consumeFixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.Fixed64Type { - return out, errUnknown - } - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return out, errDecode - } - *p.Uint64() = v - out.n = n - return out, nil -} - -var coderFixed64 = pointerCoderFuncs{ - size: sizeFixed64, - marshal: appendFixed64, - unmarshal: consumeFixed64, - merge: mergeUint64, -} - -// sizeFixed64NoZero returns the size of wire encoding a uint64 pointer as a Fixed64. -// The zero value is not encoded. -func sizeFixed64NoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := *p.Uint64() - if v == 0 { - return 0 - } - return f.tagsize + protowire.SizeFixed64() -} - -// appendFixed64NoZero wire encodes a uint64 pointer as a Fixed64. -// The zero value is not encoded. -func appendFixed64NoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Uint64() - if v == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendFixed64(b, v) - return b, nil -} - -var coderFixed64NoZero = pointerCoderFuncs{ - size: sizeFixed64NoZero, - marshal: appendFixed64NoZero, - unmarshal: consumeFixed64, - merge: mergeUint64NoZero, -} - -// sizeFixed64Ptr returns the size of wire encoding a *uint64 pointer as a Fixed64. -// It panics if the pointer is nil. -func sizeFixed64Ptr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - return f.tagsize + protowire.SizeFixed64() -} - -// appendFixed64Ptr wire encodes a *uint64 pointer as a Fixed64. -// It panics if the pointer is nil. -func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := **p.Uint64Ptr() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendFixed64(b, v) - return b, nil -} - -// consumeFixed64Ptr wire decodes a *uint64 pointer as a Fixed64. -func consumeFixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.Fixed64Type { - return out, errUnknown - } - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return out, errDecode - } - vp := p.Uint64Ptr() - if *vp == nil { - *vp = new(uint64) - } - **vp = v - out.n = n - return out, nil -} - -var coderFixed64Ptr = pointerCoderFuncs{ - size: sizeFixed64Ptr, - marshal: appendFixed64Ptr, - unmarshal: consumeFixed64Ptr, - merge: mergeUint64Ptr, -} - -// sizeFixed64Slice returns the size of wire encoding a []uint64 pointer as a repeated Fixed64. -func sizeFixed64Slice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.Uint64Slice() - size = len(s) * (f.tagsize + protowire.SizeFixed64()) - return size -} - -// appendFixed64Slice encodes a []uint64 pointer as a repeated Fixed64. -func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.Uint64Slice() - for _, v := range s { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendFixed64(b, v) - } - return b, nil -} - -// consumeFixed64Slice wire decodes a []uint64 pointer as a repeated Fixed64. -func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - sp := p.Uint64Slice() - if wtyp == protowire.BytesType { - s := *sp - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return out, errDecode - } - s = append(s, v) - b = b[n:] - } - *sp = s - out.n = n - return out, nil - } - if wtyp != protowire.Fixed64Type { - return out, errUnknown - } - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return out, errDecode - } - *sp = append(*sp, v) - out.n = n - return out, nil -} - -var coderFixed64Slice = pointerCoderFuncs{ - size: sizeFixed64Slice, - marshal: appendFixed64Slice, - unmarshal: consumeFixed64Slice, - merge: mergeUint64Slice, -} - -// sizeFixed64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Fixed64. -func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.Uint64Slice() - if len(s) == 0 { - return 0 - } - n := len(s) * protowire.SizeFixed64() - return f.tagsize + protowire.SizeBytes(n) -} - -// appendFixed64PackedSlice encodes a []uint64 pointer as a packed repeated Fixed64. -func appendFixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.Uint64Slice() - if len(s) == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := len(s) * protowire.SizeFixed64() - b = protowire.AppendVarint(b, uint64(n)) - for _, v := range s { - b = protowire.AppendFixed64(b, v) - } - return b, nil -} - -var coderFixed64PackedSlice = pointerCoderFuncs{ - size: sizeFixed64PackedSlice, - marshal: appendFixed64PackedSlice, - unmarshal: consumeFixed64Slice, - merge: mergeUint64Slice, -} - -// sizeFixed64Value returns the size of wire encoding a uint64 value as a Fixed64. -func sizeFixed64Value(v protoreflect.Value, tagsize int, opts marshalOptions) int { - return tagsize + protowire.SizeFixed64() -} - -// appendFixed64Value encodes a uint64 value as a Fixed64. -func appendFixed64Value(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendFixed64(b, v.Uint()) - return b, nil -} - -// consumeFixed64Value decodes a uint64 value as a Fixed64. -func consumeFixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - if wtyp != protowire.Fixed64Type { - return protoreflect.Value{}, out, errUnknown - } - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - out.n = n - return protoreflect.ValueOfUint64(v), out, nil -} - -var coderFixed64Value = valueCoderFuncs{ - size: sizeFixed64Value, - marshal: appendFixed64Value, - unmarshal: consumeFixed64Value, - merge: mergeScalarValue, -} - -// sizeFixed64SliceValue returns the size of wire encoding a []uint64 value as a repeated Fixed64. -func sizeFixed64SliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - size = list.Len() * (tagsize + protowire.SizeFixed64()) - return size -} - -// appendFixed64SliceValue encodes a []uint64 value as a repeated Fixed64. -func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendFixed64(b, v.Uint()) - } - return b, nil -} - -// consumeFixed64SliceValue wire decodes a []uint64 value as a repeated Fixed64. -func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - list := listv.List() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfUint64(v)) - b = b[n:] - } - out.n = n - return listv, out, nil - } - if wtyp != protowire.Fixed64Type { - return protoreflect.Value{}, out, errUnknown - } - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfUint64(v)) - out.n = n - return listv, out, nil -} - -var coderFixed64SliceValue = valueCoderFuncs{ - size: sizeFixed64SliceValue, - marshal: appendFixed64SliceValue, - unmarshal: consumeFixed64SliceValue, - merge: mergeListValue, -} - -// sizeFixed64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Fixed64. -func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return 0 - } - n := llen * protowire.SizeFixed64() - return tagsize + protowire.SizeBytes(n) -} - -// appendFixed64PackedSliceValue encodes a []uint64 value as a packed repeated Fixed64. -func appendFixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, wiretag) - n := llen * protowire.SizeFixed64() - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - v := list.Get(i) - b = protowire.AppendFixed64(b, v.Uint()) - } - return b, nil -} - -var coderFixed64PackedSliceValue = valueCoderFuncs{ - size: sizeFixed64PackedSliceValue, - marshal: appendFixed64PackedSliceValue, - unmarshal: consumeFixed64SliceValue, - merge: mergeListValue, -} - -// sizeDouble returns the size of wire encoding a float64 pointer as a Double. -func sizeDouble(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - - return f.tagsize + protowire.SizeFixed64() -} - -// appendDouble wire encodes a float64 pointer as a Double. -func appendDouble(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Float64() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendFixed64(b, math.Float64bits(v)) - return b, nil -} - -// consumeDouble wire decodes a float64 pointer as a Double. -func consumeDouble(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.Fixed64Type { - return out, errUnknown - } - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return out, errDecode - } - *p.Float64() = math.Float64frombits(v) - out.n = n - return out, nil -} - -var coderDouble = pointerCoderFuncs{ - size: sizeDouble, - marshal: appendDouble, - unmarshal: consumeDouble, - merge: mergeFloat64, -} - -// sizeDoubleNoZero returns the size of wire encoding a float64 pointer as a Double. -// The zero value is not encoded. -func sizeDoubleNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := *p.Float64() - if v == 0 && !math.Signbit(float64(v)) { - return 0 - } - return f.tagsize + protowire.SizeFixed64() -} - -// appendDoubleNoZero wire encodes a float64 pointer as a Double. -// The zero value is not encoded. -func appendDoubleNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Float64() - if v == 0 && !math.Signbit(float64(v)) { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendFixed64(b, math.Float64bits(v)) - return b, nil -} - -var coderDoubleNoZero = pointerCoderFuncs{ - size: sizeDoubleNoZero, - marshal: appendDoubleNoZero, - unmarshal: consumeDouble, - merge: mergeFloat64NoZero, -} - -// sizeDoublePtr returns the size of wire encoding a *float64 pointer as a Double. -// It panics if the pointer is nil. -func sizeDoublePtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - return f.tagsize + protowire.SizeFixed64() -} - -// appendDoublePtr wire encodes a *float64 pointer as a Double. -// It panics if the pointer is nil. -func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := **p.Float64Ptr() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendFixed64(b, math.Float64bits(v)) - return b, nil -} - -// consumeDoublePtr wire decodes a *float64 pointer as a Double. -func consumeDoublePtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.Fixed64Type { - return out, errUnknown - } - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return out, errDecode - } - vp := p.Float64Ptr() - if *vp == nil { - *vp = new(float64) - } - **vp = math.Float64frombits(v) - out.n = n - return out, nil -} - -var coderDoublePtr = pointerCoderFuncs{ - size: sizeDoublePtr, - marshal: appendDoublePtr, - unmarshal: consumeDoublePtr, - merge: mergeFloat64Ptr, -} - -// sizeDoubleSlice returns the size of wire encoding a []float64 pointer as a repeated Double. -func sizeDoubleSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.Float64Slice() - size = len(s) * (f.tagsize + protowire.SizeFixed64()) - return size -} - -// appendDoubleSlice encodes a []float64 pointer as a repeated Double. -func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.Float64Slice() - for _, v := range s { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendFixed64(b, math.Float64bits(v)) - } - return b, nil -} - -// consumeDoubleSlice wire decodes a []float64 pointer as a repeated Double. -func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - sp := p.Float64Slice() - if wtyp == protowire.BytesType { - s := *sp - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return out, errDecode - } - s = append(s, math.Float64frombits(v)) - b = b[n:] - } - *sp = s - out.n = n - return out, nil - } - if wtyp != protowire.Fixed64Type { - return out, errUnknown - } - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return out, errDecode - } - *sp = append(*sp, math.Float64frombits(v)) - out.n = n - return out, nil -} - -var coderDoubleSlice = pointerCoderFuncs{ - size: sizeDoubleSlice, - marshal: appendDoubleSlice, - unmarshal: consumeDoubleSlice, - merge: mergeFloat64Slice, -} - -// sizeDoublePackedSlice returns the size of wire encoding a []float64 pointer as a packed repeated Double. -func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.Float64Slice() - if len(s) == 0 { - return 0 - } - n := len(s) * protowire.SizeFixed64() - return f.tagsize + protowire.SizeBytes(n) -} - -// appendDoublePackedSlice encodes a []float64 pointer as a packed repeated Double. -func appendDoublePackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.Float64Slice() - if len(s) == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := len(s) * protowire.SizeFixed64() - b = protowire.AppendVarint(b, uint64(n)) - for _, v := range s { - b = protowire.AppendFixed64(b, math.Float64bits(v)) - } - return b, nil -} - -var coderDoublePackedSlice = pointerCoderFuncs{ - size: sizeDoublePackedSlice, - marshal: appendDoublePackedSlice, - unmarshal: consumeDoubleSlice, - merge: mergeFloat64Slice, -} - -// sizeDoubleValue returns the size of wire encoding a float64 value as a Double. -func sizeDoubleValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { - return tagsize + protowire.SizeFixed64() -} - -// appendDoubleValue encodes a float64 value as a Double. -func appendDoubleValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) - return b, nil -} - -// consumeDoubleValue decodes a float64 value as a Double. -func consumeDoubleValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - if wtyp != protowire.Fixed64Type { - return protoreflect.Value{}, out, errUnknown - } - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - out.n = n - return protoreflect.ValueOfFloat64(math.Float64frombits(v)), out, nil -} - -var coderDoubleValue = valueCoderFuncs{ - size: sizeDoubleValue, - marshal: appendDoubleValue, - unmarshal: consumeDoubleValue, - merge: mergeScalarValue, -} - -// sizeDoubleSliceValue returns the size of wire encoding a []float64 value as a repeated Double. -func sizeDoubleSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - size = list.Len() * (tagsize + protowire.SizeFixed64()) - return size -} - -// appendDoubleSliceValue encodes a []float64 value as a repeated Double. -func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) - } - return b, nil -} - -// consumeDoubleSliceValue wire decodes a []float64 value as a repeated Double. -func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - list := listv.List() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) - b = b[n:] - } - out.n = n - return listv, out, nil - } - if wtyp != protowire.Fixed64Type { - return protoreflect.Value{}, out, errUnknown - } - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) - out.n = n - return listv, out, nil -} - -var coderDoubleSliceValue = valueCoderFuncs{ - size: sizeDoubleSliceValue, - marshal: appendDoubleSliceValue, - unmarshal: consumeDoubleSliceValue, - merge: mergeListValue, -} - -// sizeDoublePackedSliceValue returns the size of wire encoding a []float64 value as a packed repeated Double. -func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return 0 - } - n := llen * protowire.SizeFixed64() - return tagsize + protowire.SizeBytes(n) -} - -// appendDoublePackedSliceValue encodes a []float64 value as a packed repeated Double. -func appendDoublePackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - llen := list.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, wiretag) - n := llen * protowire.SizeFixed64() - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - v := list.Get(i) - b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) - } - return b, nil -} - -var coderDoublePackedSliceValue = valueCoderFuncs{ - size: sizeDoublePackedSliceValue, - marshal: appendDoublePackedSliceValue, - unmarshal: consumeDoubleSliceValue, - merge: mergeListValue, -} - -// sizeString returns the size of wire encoding a string pointer as a String. -func sizeString(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := *p.String() - return f.tagsize + protowire.SizeBytes(len(v)) -} - -// appendString wire encodes a string pointer as a String. -func appendString(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.String() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendString(b, v) - return b, nil -} - -// consumeString wire decodes a string pointer as a String. -func consumeString(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.BytesType { - return out, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - *p.String() = string(v) - out.n = n - return out, nil -} - -var coderString = pointerCoderFuncs{ - size: sizeString, - marshal: appendString, - unmarshal: consumeString, - merge: mergeString, -} - -// appendStringValidateUTF8 wire encodes a string pointer as a String. -func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.String() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendString(b, v) - if !utf8.ValidString(v) { - return b, errInvalidUTF8{} - } - return b, nil -} - -// consumeStringValidateUTF8 wire decodes a string pointer as a String. -func consumeStringValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.BytesType { - return out, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - if !utf8.Valid(v) { - return out, errInvalidUTF8{} - } - *p.String() = string(v) - out.n = n - return out, nil -} - -var coderStringValidateUTF8 = pointerCoderFuncs{ - size: sizeString, - marshal: appendStringValidateUTF8, - unmarshal: consumeStringValidateUTF8, - merge: mergeString, -} - -// sizeStringNoZero returns the size of wire encoding a string pointer as a String. -// The zero value is not encoded. -func sizeStringNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := *p.String() - if len(v) == 0 { - return 0 - } - return f.tagsize + protowire.SizeBytes(len(v)) -} - -// appendStringNoZero wire encodes a string pointer as a String. -// The zero value is not encoded. -func appendStringNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.String() - if len(v) == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendString(b, v) - return b, nil -} - -var coderStringNoZero = pointerCoderFuncs{ - size: sizeStringNoZero, - marshal: appendStringNoZero, - unmarshal: consumeString, - merge: mergeStringNoZero, -} - -// appendStringNoZeroValidateUTF8 wire encodes a string pointer as a String. -// The zero value is not encoded. -func appendStringNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.String() - if len(v) == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendString(b, v) - if !utf8.ValidString(v) { - return b, errInvalidUTF8{} - } - return b, nil -} - -var coderStringNoZeroValidateUTF8 = pointerCoderFuncs{ - size: sizeStringNoZero, - marshal: appendStringNoZeroValidateUTF8, - unmarshal: consumeStringValidateUTF8, - merge: mergeStringNoZero, -} - -// sizeStringPtr returns the size of wire encoding a *string pointer as a String. -// It panics if the pointer is nil. -func sizeStringPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := **p.StringPtr() - return f.tagsize + protowire.SizeBytes(len(v)) -} - -// appendStringPtr wire encodes a *string pointer as a String. -// It panics if the pointer is nil. -func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := **p.StringPtr() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendString(b, v) - return b, nil -} - -// consumeStringPtr wire decodes a *string pointer as a String. -func consumeStringPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.BytesType { - return out, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - vp := p.StringPtr() - if *vp == nil { - *vp = new(string) - } - **vp = string(v) - out.n = n - return out, nil -} - -var coderStringPtr = pointerCoderFuncs{ - size: sizeStringPtr, - marshal: appendStringPtr, - unmarshal: consumeStringPtr, - merge: mergeStringPtr, -} - -// appendStringPtrValidateUTF8 wire encodes a *string pointer as a String. -// It panics if the pointer is nil. -func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := **p.StringPtr() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendString(b, v) - if !utf8.ValidString(v) { - return b, errInvalidUTF8{} - } - return b, nil -} - -// consumeStringPtrValidateUTF8 wire decodes a *string pointer as a String. -func consumeStringPtrValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.BytesType { - return out, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - if !utf8.Valid(v) { - return out, errInvalidUTF8{} - } - vp := p.StringPtr() - if *vp == nil { - *vp = new(string) - } - **vp = string(v) - out.n = n - return out, nil -} - -var coderStringPtrValidateUTF8 = pointerCoderFuncs{ - size: sizeStringPtr, - marshal: appendStringPtrValidateUTF8, - unmarshal: consumeStringPtrValidateUTF8, - merge: mergeStringPtr, -} - -// sizeStringSlice returns the size of wire encoding a []string pointer as a repeated String. -func sizeStringSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.StringSlice() - for _, v := range s { - size += f.tagsize + protowire.SizeBytes(len(v)) - } - return size -} - -// appendStringSlice encodes a []string pointer as a repeated String. -func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.StringSlice() - for _, v := range s { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendString(b, v) - } - return b, nil -} - -// consumeStringSlice wire decodes a []string pointer as a repeated String. -func consumeStringSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - sp := p.StringSlice() - if wtyp != protowire.BytesType { - return out, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - *sp = append(*sp, string(v)) - out.n = n - return out, nil -} - -var coderStringSlice = pointerCoderFuncs{ - size: sizeStringSlice, - marshal: appendStringSlice, - unmarshal: consumeStringSlice, - merge: mergeStringSlice, -} - -// appendStringSliceValidateUTF8 encodes a []string pointer as a repeated String. -func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.StringSlice() - for _, v := range s { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendString(b, v) - if !utf8.ValidString(v) { - return b, errInvalidUTF8{} - } - } - return b, nil -} - -// consumeStringSliceValidateUTF8 wire decodes a []string pointer as a repeated String. -func consumeStringSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.BytesType { - return out, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - if !utf8.Valid(v) { - return out, errInvalidUTF8{} - } - sp := p.StringSlice() - *sp = append(*sp, string(v)) - out.n = n - return out, nil -} - -var coderStringSliceValidateUTF8 = pointerCoderFuncs{ - size: sizeStringSlice, - marshal: appendStringSliceValidateUTF8, - unmarshal: consumeStringSliceValidateUTF8, - merge: mergeStringSlice, -} - -// sizeStringValue returns the size of wire encoding a string value as a String. -func sizeStringValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { - return tagsize + protowire.SizeBytes(len(v.String())) -} - -// appendStringValue encodes a string value as a String. -func appendStringValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendString(b, v.String()) - return b, nil -} - -// consumeStringValue decodes a string value as a String. -func consumeStringValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - if wtyp != protowire.BytesType { - return protoreflect.Value{}, out, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - out.n = n - return protoreflect.ValueOfString(string(v)), out, nil -} - -var coderStringValue = valueCoderFuncs{ - size: sizeStringValue, - marshal: appendStringValue, - unmarshal: consumeStringValue, - merge: mergeScalarValue, -} - -// appendStringValueValidateUTF8 encodes a string value as a String. -func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendString(b, v.String()) - if !utf8.ValidString(v.String()) { - return b, errInvalidUTF8{} - } - return b, nil -} - -// consumeStringValueValidateUTF8 decodes a string value as a String. -func consumeStringValueValidateUTF8(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - if wtyp != protowire.BytesType { - return protoreflect.Value{}, out, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - if !utf8.Valid(v) { - return protoreflect.Value{}, out, errInvalidUTF8{} - } - out.n = n - return protoreflect.ValueOfString(string(v)), out, nil -} - -var coderStringValueValidateUTF8 = valueCoderFuncs{ - size: sizeStringValue, - marshal: appendStringValueValidateUTF8, - unmarshal: consumeStringValueValidateUTF8, - merge: mergeScalarValue, -} - -// sizeStringSliceValue returns the size of wire encoding a []string value as a repeated String. -func sizeStringSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - size += tagsize + protowire.SizeBytes(len(v.String())) - } - return size -} - -// appendStringSliceValue encodes a []string value as a repeated String. -func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendString(b, v.String()) - } - return b, nil -} - -// consumeStringSliceValue wire decodes a []string value as a repeated String. -func consumeStringSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - list := listv.List() - if wtyp != protowire.BytesType { - return protoreflect.Value{}, out, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfString(string(v))) - out.n = n - return listv, out, nil -} - -var coderStringSliceValue = valueCoderFuncs{ - size: sizeStringSliceValue, - marshal: appendStringSliceValue, - unmarshal: consumeStringSliceValue, - merge: mergeListValue, -} - -// sizeBytes returns the size of wire encoding a []byte pointer as a Bytes. -func sizeBytes(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := *p.Bytes() - return f.tagsize + protowire.SizeBytes(len(v)) -} - -// appendBytes wire encodes a []byte pointer as a Bytes. -func appendBytes(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Bytes() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendBytes(b, v) - return b, nil -} - -// consumeBytes wire decodes a []byte pointer as a Bytes. -func consumeBytes(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.BytesType { - return out, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - *p.Bytes() = append(emptyBuf[:], v...) - out.n = n - return out, nil -} - -var coderBytes = pointerCoderFuncs{ - size: sizeBytes, - marshal: appendBytes, - unmarshal: consumeBytes, - merge: mergeBytes, -} - -// appendBytesValidateUTF8 wire encodes a []byte pointer as a Bytes. -func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Bytes() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendBytes(b, v) - if !utf8.Valid(v) { - return b, errInvalidUTF8{} - } - return b, nil -} - -// consumeBytesValidateUTF8 wire decodes a []byte pointer as a Bytes. -func consumeBytesValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.BytesType { - return out, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - if !utf8.Valid(v) { - return out, errInvalidUTF8{} - } - *p.Bytes() = append(emptyBuf[:], v...) - out.n = n - return out, nil -} - -var coderBytesValidateUTF8 = pointerCoderFuncs{ - size: sizeBytes, - marshal: appendBytesValidateUTF8, - unmarshal: consumeBytesValidateUTF8, - merge: mergeBytes, -} - -// sizeBytesNoZero returns the size of wire encoding a []byte pointer as a Bytes. -// The zero value is not encoded. -func sizeBytesNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - v := *p.Bytes() - if len(v) == 0 { - return 0 - } - return f.tagsize + protowire.SizeBytes(len(v)) -} - -// appendBytesNoZero wire encodes a []byte pointer as a Bytes. -// The zero value is not encoded. -func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Bytes() - if len(v) == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendBytes(b, v) - return b, nil -} - -// consumeBytesNoZero wire decodes a []byte pointer as a Bytes. -// The zero value is not decoded. -func consumeBytesNoZero(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.BytesType { - return out, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - *p.Bytes() = append(([]byte)(nil), v...) - out.n = n - return out, nil -} - -var coderBytesNoZero = pointerCoderFuncs{ - size: sizeBytesNoZero, - marshal: appendBytesNoZero, - unmarshal: consumeBytesNoZero, - merge: mergeBytesNoZero, -} - -// appendBytesNoZeroValidateUTF8 wire encodes a []byte pointer as a Bytes. -// The zero value is not encoded. -func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := *p.Bytes() - if len(v) == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendBytes(b, v) - if !utf8.Valid(v) { - return b, errInvalidUTF8{} - } - return b, nil -} - -// consumeBytesNoZeroValidateUTF8 wire decodes a []byte pointer as a Bytes. -func consumeBytesNoZeroValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.BytesType { - return out, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - if !utf8.Valid(v) { - return out, errInvalidUTF8{} - } - *p.Bytes() = append(([]byte)(nil), v...) - out.n = n - return out, nil -} - -var coderBytesNoZeroValidateUTF8 = pointerCoderFuncs{ - size: sizeBytesNoZero, - marshal: appendBytesNoZeroValidateUTF8, - unmarshal: consumeBytesNoZeroValidateUTF8, - merge: mergeBytesNoZero, -} - -// sizeBytesSlice returns the size of wire encoding a [][]byte pointer as a repeated Bytes. -func sizeBytesSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := *p.BytesSlice() - for _, v := range s { - size += f.tagsize + protowire.SizeBytes(len(v)) - } - return size -} - -// appendBytesSlice encodes a [][]byte pointer as a repeated Bytes. -func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.BytesSlice() - for _, v := range s { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendBytes(b, v) - } - return b, nil -} - -// consumeBytesSlice wire decodes a [][]byte pointer as a repeated Bytes. -func consumeBytesSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - sp := p.BytesSlice() - if wtyp != protowire.BytesType { - return out, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - *sp = append(*sp, append(emptyBuf[:], v...)) - out.n = n - return out, nil -} - -var coderBytesSlice = pointerCoderFuncs{ - size: sizeBytesSlice, - marshal: appendBytesSlice, - unmarshal: consumeBytesSlice, - merge: mergeBytesSlice, -} - -// appendBytesSliceValidateUTF8 encodes a [][]byte pointer as a repeated Bytes. -func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := *p.BytesSlice() - for _, v := range s { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendBytes(b, v) - if !utf8.Valid(v) { - return b, errInvalidUTF8{} - } - } - return b, nil -} - -// consumeBytesSliceValidateUTF8 wire decodes a [][]byte pointer as a repeated Bytes. -func consumeBytesSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.BytesType { - return out, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - if !utf8.Valid(v) { - return out, errInvalidUTF8{} - } - sp := p.BytesSlice() - *sp = append(*sp, append(emptyBuf[:], v...)) - out.n = n - return out, nil -} - -var coderBytesSliceValidateUTF8 = pointerCoderFuncs{ - size: sizeBytesSlice, - marshal: appendBytesSliceValidateUTF8, - unmarshal: consumeBytesSliceValidateUTF8, - merge: mergeBytesSlice, -} - -// sizeBytesValue returns the size of wire encoding a []byte value as a Bytes. -func sizeBytesValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { - return tagsize + protowire.SizeBytes(len(v.Bytes())) -} - -// appendBytesValue encodes a []byte value as a Bytes. -func appendBytesValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendBytes(b, v.Bytes()) - return b, nil -} - -// consumeBytesValue decodes a []byte value as a Bytes. -func consumeBytesValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - if wtyp != protowire.BytesType { - return protoreflect.Value{}, out, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - out.n = n - return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), out, nil -} - -var coderBytesValue = valueCoderFuncs{ - size: sizeBytesValue, - marshal: appendBytesValue, - unmarshal: consumeBytesValue, - merge: mergeBytesValue, -} - -// sizeBytesSliceValue returns the size of wire encoding a [][]byte value as a repeated Bytes. -func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) (size int) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - size += tagsize + protowire.SizeBytes(len(v.Bytes())) - } - return size -} - -// appendBytesSliceValue encodes a [][]byte value as a repeated Bytes. -func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { - list := listv.List() - for i, llen := 0, list.Len(); i < llen; i++ { - v := list.Get(i) - b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendBytes(b, v.Bytes()) - } - return b, nil -} - -// consumeBytesSliceValue wire decodes a [][]byte value as a repeated Bytes. -func consumeBytesSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { - list := listv.List() - if wtyp != protowire.BytesType { - return protoreflect.Value{}, out, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return protoreflect.Value{}, out, errDecode - } - list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...))) - out.n = n - return listv, out, nil -} - -var coderBytesSliceValue = valueCoderFuncs{ - size: sizeBytesSliceValue, - marshal: appendBytesSliceValue, - unmarshal: consumeBytesSliceValue, - merge: mergeBytesListValue, -} - -// We append to an empty array rather than a nil []byte to get non-nil zero-length byte slices. -var emptyBuf [0]byte - -var wireTypes = map[protoreflect.Kind]protowire.Type{ - protoreflect.BoolKind: protowire.VarintType, - protoreflect.EnumKind: protowire.VarintType, - protoreflect.Int32Kind: protowire.VarintType, - protoreflect.Sint32Kind: protowire.VarintType, - protoreflect.Uint32Kind: protowire.VarintType, - protoreflect.Int64Kind: protowire.VarintType, - protoreflect.Sint64Kind: protowire.VarintType, - protoreflect.Uint64Kind: protowire.VarintType, - protoreflect.Sfixed32Kind: protowire.Fixed32Type, - protoreflect.Fixed32Kind: protowire.Fixed32Type, - protoreflect.FloatKind: protowire.Fixed32Type, - protoreflect.Sfixed64Kind: protowire.Fixed64Type, - protoreflect.Fixed64Kind: protowire.Fixed64Type, - protoreflect.DoubleKind: protowire.Fixed64Type, - protoreflect.StringKind: protowire.BytesType, - protoreflect.BytesKind: protowire.BytesType, - protoreflect.MessageKind: protowire.BytesType, - protoreflect.GroupKind: protowire.StartGroupType, -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go deleted file mode 100644 index c1245fef4..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ /dev/null @@ -1,388 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "reflect" - "sort" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/genid" - pref "google.golang.org/protobuf/reflect/protoreflect" -) - -type mapInfo struct { - goType reflect.Type - keyWiretag uint64 - valWiretag uint64 - keyFuncs valueCoderFuncs - valFuncs valueCoderFuncs - keyZero pref.Value - keyKind pref.Kind - conv *mapConverter -} - -func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage *MessageInfo, funcs pointerCoderFuncs) { - // TODO: Consider generating specialized map coders. - keyField := fd.MapKey() - valField := fd.MapValue() - keyWiretag := protowire.EncodeTag(1, wireTypes[keyField.Kind()]) - valWiretag := protowire.EncodeTag(2, wireTypes[valField.Kind()]) - keyFuncs := encoderFuncsForValue(keyField) - valFuncs := encoderFuncsForValue(valField) - conv := newMapConverter(ft, fd) - - mapi := &mapInfo{ - goType: ft, - keyWiretag: keyWiretag, - valWiretag: valWiretag, - keyFuncs: keyFuncs, - valFuncs: valFuncs, - keyZero: keyField.Default(), - keyKind: keyField.Kind(), - conv: conv, - } - if valField.Kind() == pref.MessageKind { - valueMessage = getMessageInfo(ft.Elem()) - } - - funcs = pointerCoderFuncs{ - size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { - return sizeMap(p.AsValueOf(ft).Elem(), mapi, f, opts) - }, - marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - return appendMap(b, p.AsValueOf(ft).Elem(), mapi, f, opts) - }, - unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { - mp := p.AsValueOf(ft) - if mp.Elem().IsNil() { - mp.Elem().Set(reflect.MakeMap(mapi.goType)) - } - if f.mi == nil { - return consumeMap(b, mp.Elem(), wtyp, mapi, f, opts) - } else { - return consumeMapOfMessage(b, mp.Elem(), wtyp, mapi, f, opts) - } - }, - } - switch valField.Kind() { - case pref.MessageKind: - funcs.merge = mergeMapOfMessage - case pref.BytesKind: - funcs.merge = mergeMapOfBytes - default: - funcs.merge = mergeMap - } - if valFuncs.isInit != nil { - funcs.isInit = func(p pointer, f *coderFieldInfo) error { - return isInitMap(p.AsValueOf(ft).Elem(), mapi, f) - } - } - return valueMessage, funcs -} - -const ( - mapKeyTagSize = 1 // field 1, tag size 1. - mapValTagSize = 1 // field 2, tag size 2. -) - -func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) int { - if mapv.Len() == 0 { - return 0 - } - n := 0 - iter := mapRange(mapv) - for iter.Next() { - key := mapi.conv.keyConv.PBValueOf(iter.Key()).MapKey() - keySize := mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) - var valSize int - value := mapi.conv.valConv.PBValueOf(iter.Value()) - if f.mi == nil { - valSize = mapi.valFuncs.size(value, mapValTagSize, opts) - } else { - p := pointerOfValue(iter.Value()) - valSize += mapValTagSize - valSize += protowire.SizeBytes(f.mi.sizePointer(p, opts)) - } - n += f.tagsize + protowire.SizeBytes(keySize+valSize) - } - return n -} - -func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.BytesType { - return out, errUnknown - } - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - var ( - key = mapi.keyZero - val = mapi.conv.valConv.New() - ) - for len(b) > 0 { - num, wtyp, n := protowire.ConsumeTag(b) - if n < 0 { - return out, errDecode - } - if num > protowire.MaxValidNumber { - return out, errDecode - } - b = b[n:] - err := errUnknown - switch num { - case genid.MapEntry_Key_field_number: - var v pref.Value - var o unmarshalOutput - v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) - if err != nil { - break - } - key = v - n = o.n - case genid.MapEntry_Value_field_number: - var v pref.Value - var o unmarshalOutput - v, o, err = mapi.valFuncs.unmarshal(b, val, num, wtyp, opts) - if err != nil { - break - } - val = v - n = o.n - } - if err == errUnknown { - n = protowire.ConsumeFieldValue(num, wtyp, b) - if n < 0 { - return out, errDecode - } - } else if err != nil { - return out, err - } - b = b[n:] - } - mapv.SetMapIndex(mapi.conv.keyConv.GoValueOf(key), mapi.conv.valConv.GoValueOf(val)) - out.n = n - return out, nil -} - -func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.BytesType { - return out, errUnknown - } - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - var ( - key = mapi.keyZero - val = reflect.New(f.mi.GoReflectType.Elem()) - ) - for len(b) > 0 { - num, wtyp, n := protowire.ConsumeTag(b) - if n < 0 { - return out, errDecode - } - if num > protowire.MaxValidNumber { - return out, errDecode - } - b = b[n:] - err := errUnknown - switch num { - case 1: - var v pref.Value - var o unmarshalOutput - v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) - if err != nil { - break - } - key = v - n = o.n - case 2: - if wtyp != protowire.BytesType { - break - } - var v []byte - v, n = protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - var o unmarshalOutput - o, err = f.mi.unmarshalPointer(v, pointerOfValue(val), 0, opts) - if o.initialized { - // Consider this map item initialized so long as we see - // an initialized value. - out.initialized = true - } - } - if err == errUnknown { - n = protowire.ConsumeFieldValue(num, wtyp, b) - if n < 0 { - return out, errDecode - } - } else if err != nil { - return out, err - } - b = b[n:] - } - mapv.SetMapIndex(mapi.conv.keyConv.GoValueOf(key), val) - out.n = n - return out, nil -} - -func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - if f.mi == nil { - key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey() - val := mapi.conv.valConv.PBValueOf(valrv) - size := 0 - size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) - size += mapi.valFuncs.size(val, mapValTagSize, opts) - b = protowire.AppendVarint(b, uint64(size)) - b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts) - if err != nil { - return nil, err - } - return mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts) - } else { - key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey() - val := pointerOfValue(valrv) - valSize := f.mi.sizePointer(val, opts) - size := 0 - size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) - size += mapValTagSize + protowire.SizeBytes(valSize) - b = protowire.AppendVarint(b, uint64(size)) - b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts) - if err != nil { - return nil, err - } - b = protowire.AppendVarint(b, mapi.valWiretag) - b = protowire.AppendVarint(b, uint64(valSize)) - return f.mi.marshalAppendPointer(b, val, opts) - } -} - -func appendMap(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - if mapv.Len() == 0 { - return b, nil - } - if opts.Deterministic() { - return appendMapDeterministic(b, mapv, mapi, f, opts) - } - iter := mapRange(mapv) - for iter.Next() { - var err error - b = protowire.AppendVarint(b, f.wiretag) - b, err = appendMapItem(b, iter.Key(), iter.Value(), mapi, f, opts) - if err != nil { - return b, err - } - } - return b, nil -} - -func appendMapDeterministic(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - keys := mapv.MapKeys() - sort.Slice(keys, func(i, j int) bool { - switch keys[i].Kind() { - case reflect.Bool: - return !keys[i].Bool() && keys[j].Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return keys[i].Int() < keys[j].Int() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return keys[i].Uint() < keys[j].Uint() - case reflect.Float32, reflect.Float64: - return keys[i].Float() < keys[j].Float() - case reflect.String: - return keys[i].String() < keys[j].String() - default: - panic("invalid kind: " + keys[i].Kind().String()) - } - }) - for _, key := range keys { - var err error - b = protowire.AppendVarint(b, f.wiretag) - b, err = appendMapItem(b, key, mapv.MapIndex(key), mapi, f, opts) - if err != nil { - return b, err - } - } - return b, nil -} - -func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error { - if mi := f.mi; mi != nil { - mi.init() - if !mi.needsInitCheck { - return nil - } - iter := mapRange(mapv) - for iter.Next() { - val := pointerOfValue(iter.Value()) - if err := mi.checkInitializedPointer(val); err != nil { - return err - } - } - } else { - iter := mapRange(mapv) - for iter.Next() { - val := mapi.conv.valConv.PBValueOf(iter.Value()) - if err := mapi.valFuncs.isInit(val); err != nil { - return err - } - } - } - return nil -} - -func mergeMap(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { - dstm := dst.AsValueOf(f.ft).Elem() - srcm := src.AsValueOf(f.ft).Elem() - if srcm.Len() == 0 { - return - } - if dstm.IsNil() { - dstm.Set(reflect.MakeMap(f.ft)) - } - iter := mapRange(srcm) - for iter.Next() { - dstm.SetMapIndex(iter.Key(), iter.Value()) - } -} - -func mergeMapOfBytes(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { - dstm := dst.AsValueOf(f.ft).Elem() - srcm := src.AsValueOf(f.ft).Elem() - if srcm.Len() == 0 { - return - } - if dstm.IsNil() { - dstm.Set(reflect.MakeMap(f.ft)) - } - iter := mapRange(srcm) - for iter.Next() { - dstm.SetMapIndex(iter.Key(), reflect.ValueOf(append(emptyBuf[:], iter.Value().Bytes()...))) - } -} - -func mergeMapOfMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { - dstm := dst.AsValueOf(f.ft).Elem() - srcm := src.AsValueOf(f.ft).Elem() - if srcm.Len() == 0 { - return - } - if dstm.IsNil() { - dstm.Set(reflect.MakeMap(f.ft)) - } - iter := mapRange(srcm) - for iter.Next() { - val := reflect.New(f.ft.Elem().Elem()) - if f.mi != nil { - f.mi.mergePointer(pointerOfValue(val), pointerOfValue(iter.Value()), opts) - } else { - opts.Merge(asMessage(val), asMessage(iter.Value())) - } - dstm.SetMapIndex(iter.Key(), val) - } -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go deleted file mode 100644 index 4b15493f2..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.12 -// +build !go1.12 - -package impl - -import "reflect" - -type mapIter struct { - v reflect.Value - keys []reflect.Value -} - -// mapRange provides a less-efficient equivalent to -// the Go 1.12 reflect.Value.MapRange method. -func mapRange(v reflect.Value) *mapIter { - return &mapIter{v: v} -} - -func (i *mapIter) Next() bool { - if i.keys == nil { - i.keys = i.v.MapKeys() - } else { - i.keys = i.keys[1:] - } - return len(i.keys) > 0 -} - -func (i *mapIter) Key() reflect.Value { - return i.keys[0] -} - -func (i *mapIter) Value() reflect.Value { - return i.v.MapIndex(i.keys[0]) -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go deleted file mode 100644 index 0b31b66ea..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.12 -// +build go1.12 - -package impl - -import "reflect" - -func mapRange(v reflect.Value) *reflect.MapIter { return v.MapRange() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go deleted file mode 100644 index cd40527ff..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ /dev/null @@ -1,217 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "fmt" - "reflect" - "sort" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/encoding/messageset" - "google.golang.org/protobuf/internal/order" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" -) - -// coderMessageInfo contains per-message information used by the fast-path functions. -// This is a different type from MessageInfo to keep MessageInfo as general-purpose as -// possible. -type coderMessageInfo struct { - methods piface.Methods - - orderedCoderFields []*coderFieldInfo - denseCoderFields []*coderFieldInfo - coderFields map[protowire.Number]*coderFieldInfo - sizecacheOffset offset - unknownOffset offset - unknownPtrKind bool - extensionOffset offset - needsInitCheck bool - isMessageSet bool - numRequiredFields uint8 -} - -type coderFieldInfo struct { - funcs pointerCoderFuncs // fast-path per-field functions - mi *MessageInfo // field's message - ft reflect.Type - validation validationInfo // information used by message validation - num pref.FieldNumber // field number - offset offset // struct field offset - wiretag uint64 // field tag (number + wire type) - tagsize int // size of the varint-encoded tag - isPointer bool // true if IsNil may be called on the struct field - isRequired bool // true if field is required -} - -func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { - mi.sizecacheOffset = invalidOffset - mi.unknownOffset = invalidOffset - mi.extensionOffset = invalidOffset - - if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType { - mi.sizecacheOffset = si.sizecacheOffset - } - if si.unknownOffset.IsValid() && (si.unknownType == unknownFieldsAType || si.unknownType == unknownFieldsBType) { - mi.unknownOffset = si.unknownOffset - mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr - } - if si.extensionOffset.IsValid() && si.extensionType == extensionFieldsType { - mi.extensionOffset = si.extensionOffset - } - - mi.coderFields = make(map[protowire.Number]*coderFieldInfo) - fields := mi.Desc.Fields() - preallocFields := make([]coderFieldInfo, fields.Len()) - for i := 0; i < fields.Len(); i++ { - fd := fields.Get(i) - - fs := si.fieldsByNumber[fd.Number()] - isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() - if isOneof { - fs = si.oneofsByName[fd.ContainingOneof().Name()] - } - ft := fs.Type - var wiretag uint64 - if !fd.IsPacked() { - wiretag = protowire.EncodeTag(fd.Number(), wireTypes[fd.Kind()]) - } else { - wiretag = protowire.EncodeTag(fd.Number(), protowire.BytesType) - } - var fieldOffset offset - var funcs pointerCoderFuncs - var childMessage *MessageInfo - switch { - case ft == nil: - // This never occurs for generated message types. - // It implies that a hand-crafted type has missing Go fields - // for specific protobuf message fields. - funcs = pointerCoderFuncs{ - size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { - return 0 - }, - marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - return nil, nil - }, - unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { - panic("missing Go struct field for " + string(fd.FullName())) - }, - isInit: func(p pointer, f *coderFieldInfo) error { - panic("missing Go struct field for " + string(fd.FullName())) - }, - merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { - panic("missing Go struct field for " + string(fd.FullName())) - }, - } - case isOneof: - fieldOffset = offsetOf(fs, mi.Exporter) - case fd.IsWeak(): - fieldOffset = si.weakOffset - funcs = makeWeakMessageFieldCoder(fd) - default: - fieldOffset = offsetOf(fs, mi.Exporter) - childMessage, funcs = fieldCoder(fd, ft) - } - cf := &preallocFields[i] - *cf = coderFieldInfo{ - num: fd.Number(), - offset: fieldOffset, - wiretag: wiretag, - ft: ft, - tagsize: protowire.SizeVarint(wiretag), - funcs: funcs, - mi: childMessage, - validation: newFieldValidationInfo(mi, si, fd, ft), - isPointer: fd.Cardinality() == pref.Repeated || fd.HasPresence(), - isRequired: fd.Cardinality() == pref.Required, - } - mi.orderedCoderFields = append(mi.orderedCoderFields, cf) - mi.coderFields[cf.num] = cf - } - for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ { - if od := oneofs.Get(i); !od.IsSynthetic() { - mi.initOneofFieldCoders(od, si) - } - } - if messageset.IsMessageSet(mi.Desc) { - if !mi.extensionOffset.IsValid() { - panic(fmt.Sprintf("%v: MessageSet with no extensions field", mi.Desc.FullName())) - } - if !mi.unknownOffset.IsValid() { - panic(fmt.Sprintf("%v: MessageSet with no unknown field", mi.Desc.FullName())) - } - mi.isMessageSet = true - } - sort.Slice(mi.orderedCoderFields, func(i, j int) bool { - return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num - }) - - var maxDense pref.FieldNumber - for _, cf := range mi.orderedCoderFields { - if cf.num >= 16 && cf.num >= 2*maxDense { - break - } - maxDense = cf.num - } - mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1) - for _, cf := range mi.orderedCoderFields { - if int(cf.num) >= len(mi.denseCoderFields) { - break - } - mi.denseCoderFields[cf.num] = cf - } - - // To preserve compatibility with historic wire output, marshal oneofs last. - if mi.Desc.Oneofs().Len() > 0 { - sort.Slice(mi.orderedCoderFields, func(i, j int) bool { - fi := fields.ByNumber(mi.orderedCoderFields[i].num) - fj := fields.ByNumber(mi.orderedCoderFields[j].num) - return order.LegacyFieldOrder(fi, fj) - }) - } - - mi.needsInitCheck = needsInitCheck(mi.Desc) - if mi.methods.Marshal == nil && mi.methods.Size == nil { - mi.methods.Flags |= piface.SupportMarshalDeterministic - mi.methods.Marshal = mi.marshal - mi.methods.Size = mi.size - } - if mi.methods.Unmarshal == nil { - mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown - mi.methods.Unmarshal = mi.unmarshal - } - if mi.methods.CheckInitialized == nil { - mi.methods.CheckInitialized = mi.checkInitialized - } - if mi.methods.Merge == nil { - mi.methods.Merge = mi.merge - } -} - -// getUnknownBytes returns a *[]byte for the unknown fields. -// It is the caller's responsibility to check whether the pointer is nil. -// This function is specially designed to be inlineable. -func (mi *MessageInfo) getUnknownBytes(p pointer) *[]byte { - if mi.unknownPtrKind { - return *p.Apply(mi.unknownOffset).BytesPtr() - } else { - return p.Apply(mi.unknownOffset).Bytes() - } -} - -// mutableUnknownBytes returns a *[]byte for the unknown fields. -// The returned pointer is guaranteed to not be nil. -func (mi *MessageInfo) mutableUnknownBytes(p pointer) *[]byte { - if mi.unknownPtrKind { - bp := p.Apply(mi.unknownOffset).BytesPtr() - if *bp == nil { - *bp = new([]byte) - } - return *bp - } else { - return p.Apply(mi.unknownOffset).Bytes() - } -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go deleted file mode 100644 index b7a23faf1..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "sort" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/encoding/messageset" - "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/flags" -) - -func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) { - if !flags.ProtoLegacy { - return 0 - } - - ext := *p.Apply(mi.extensionOffset).Extensions() - for _, x := range ext { - xi := getExtensionFieldInfo(x.Type()) - if xi.funcs.size == nil { - continue - } - num, _ := protowire.DecodeTag(xi.wiretag) - size += messageset.SizeField(num) - size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts) - } - - if u := mi.getUnknownBytes(p); u != nil { - size += messageset.SizeUnknown(*u) - } - - return size -} - -func marshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts marshalOptions) ([]byte, error) { - if !flags.ProtoLegacy { - return b, errors.New("no support for message_set_wire_format") - } - - ext := *p.Apply(mi.extensionOffset).Extensions() - switch len(ext) { - case 0: - case 1: - // Fast-path for one extension: Don't bother sorting the keys. - for _, x := range ext { - var err error - b, err = marshalMessageSetField(mi, b, x, opts) - if err != nil { - return b, err - } - } - default: - // Sort the keys to provide a deterministic encoding. - // Not sure this is required, but the old code does it. - keys := make([]int, 0, len(ext)) - for k := range ext { - keys = append(keys, int(k)) - } - sort.Ints(keys) - for _, k := range keys { - var err error - b, err = marshalMessageSetField(mi, b, ext[int32(k)], opts) - if err != nil { - return b, err - } - } - } - - if u := mi.getUnknownBytes(p); u != nil { - var err error - b, err = messageset.AppendUnknown(b, *u) - if err != nil { - return b, err - } - } - - return b, nil -} - -func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts marshalOptions) ([]byte, error) { - xi := getExtensionFieldInfo(x.Type()) - num, _ := protowire.DecodeTag(xi.wiretag) - b = messageset.AppendFieldStart(b, num) - b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts) - if err != nil { - return b, err - } - b = messageset.AppendFieldEnd(b) - return b, nil -} - -func unmarshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts unmarshalOptions) (out unmarshalOutput, err error) { - if !flags.ProtoLegacy { - return out, errors.New("no support for message_set_wire_format") - } - - ep := p.Apply(mi.extensionOffset).Extensions() - if *ep == nil { - *ep = make(map[int32]ExtensionField) - } - ext := *ep - initialized := true - err = messageset.Unmarshal(b, true, func(num protowire.Number, v []byte) error { - o, err := mi.unmarshalExtension(v, num, protowire.BytesType, ext, opts) - if err == errUnknown { - u := mi.mutableUnknownBytes(p) - *u = protowire.AppendTag(*u, num, protowire.BytesType) - *u = append(*u, v...) - return nil - } - if !o.initialized { - initialized = false - } - return err - }) - out.n = len(b) - out.initialized = initialized - return out, err -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go deleted file mode 100644 index 145c577bd..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "reflect" - - "google.golang.org/protobuf/encoding/protowire" -) - -func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { - v := p.v.Elem().Int() - return f.tagsize + protowire.SizeVarint(uint64(v)) -} - -func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := p.v.Elem().Int() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - return b, nil -} - -func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - p.v.Elem().SetInt(int64(v)) - out.n = n - return out, nil -} - -func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(src.v.Elem()) -} - -var coderEnum = pointerCoderFuncs{ - size: sizeEnum, - marshal: appendEnum, - unmarshal: consumeEnum, - merge: mergeEnum, -} - -func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - if p.v.Elem().Int() == 0 { - return 0 - } - return sizeEnum(p, f, opts) -} - -func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - if p.v.Elem().Int() == 0 { - return b, nil - } - return appendEnum(b, p, f, opts) -} - -func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if src.v.Elem().Int() != 0 { - dst.v.Elem().Set(src.v.Elem()) - } -} - -var coderEnumNoZero = pointerCoderFuncs{ - size: sizeEnumNoZero, - marshal: appendEnumNoZero, - unmarshal: consumeEnum, - merge: mergeEnumNoZero, -} - -func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - return sizeEnum(pointer{p.v.Elem()}, f, opts) -} - -func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - return appendEnum(b, pointer{p.v.Elem()}, f, opts) -} - -func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - if p.v.Elem().IsNil() { - p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem())) - } - return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts) -} - -func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if !src.v.Elem().IsNil() { - v := reflect.New(dst.v.Type().Elem().Elem()) - v.Elem().Set(src.v.Elem().Elem()) - dst.v.Elem().Set(v) - } -} - -var coderEnumPtr = pointerCoderFuncs{ - size: sizeEnumPtr, - marshal: appendEnumPtr, - unmarshal: consumeEnumPtr, - merge: mergeEnumPtr, -} - -func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize - } - return size -} - -func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - s := p.v.Elem() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - b = b[n:] - } - out.n = n - return out, nil - } - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - out.n = n - return out, nil -} - -func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem())) -} - -var coderEnumSlice = pointerCoderFuncs{ - size: sizeEnumSlice, - marshal: appendEnumSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} - -func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return 0 - } - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - return f.tagsize + protowire.SizeBytes(n) -} - -func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -var coderEnumPackedSlice = pointerCoderFuncs{ - size: sizeEnumPackedSlice, - marshal: appendEnumPackedSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go deleted file mode 100644 index e89971238..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go +++ /dev/null @@ -1,557 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "fmt" - "reflect" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" -) - -// pointerCoderFuncs is a set of pointer encoding functions. -type pointerCoderFuncs struct { - mi *MessageInfo - size func(p pointer, f *coderFieldInfo, opts marshalOptions) int - marshal func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) - unmarshal func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) - isInit func(p pointer, f *coderFieldInfo) error - merge func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) -} - -// valueCoderFuncs is a set of protoreflect.Value encoding functions. -type valueCoderFuncs struct { - size func(v pref.Value, tagsize int, opts marshalOptions) int - marshal func(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) - unmarshal func(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) - isInit func(v pref.Value) error - merge func(dst, src pref.Value, opts mergeOptions) pref.Value -} - -// fieldCoder returns pointer functions for a field, used for operating on -// struct fields. -func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { - switch { - case fd.IsMap(): - return encoderFuncsForMap(fd, ft) - case fd.Cardinality() == pref.Repeated && !fd.IsPacked(): - // Repeated fields (not packed). - if ft.Kind() != reflect.Slice { - break - } - ft := ft.Elem() - switch fd.Kind() { - case pref.BoolKind: - if ft.Kind() == reflect.Bool { - return nil, coderBoolSlice - } - case pref.EnumKind: - if ft.Kind() == reflect.Int32 { - return nil, coderEnumSlice - } - case pref.Int32Kind: - if ft.Kind() == reflect.Int32 { - return nil, coderInt32Slice - } - case pref.Sint32Kind: - if ft.Kind() == reflect.Int32 { - return nil, coderSint32Slice - } - case pref.Uint32Kind: - if ft.Kind() == reflect.Uint32 { - return nil, coderUint32Slice - } - case pref.Int64Kind: - if ft.Kind() == reflect.Int64 { - return nil, coderInt64Slice - } - case pref.Sint64Kind: - if ft.Kind() == reflect.Int64 { - return nil, coderSint64Slice - } - case pref.Uint64Kind: - if ft.Kind() == reflect.Uint64 { - return nil, coderUint64Slice - } - case pref.Sfixed32Kind: - if ft.Kind() == reflect.Int32 { - return nil, coderSfixed32Slice - } - case pref.Fixed32Kind: - if ft.Kind() == reflect.Uint32 { - return nil, coderFixed32Slice - } - case pref.FloatKind: - if ft.Kind() == reflect.Float32 { - return nil, coderFloatSlice - } - case pref.Sfixed64Kind: - if ft.Kind() == reflect.Int64 { - return nil, coderSfixed64Slice - } - case pref.Fixed64Kind: - if ft.Kind() == reflect.Uint64 { - return nil, coderFixed64Slice - } - case pref.DoubleKind: - if ft.Kind() == reflect.Float64 { - return nil, coderDoubleSlice - } - case pref.StringKind: - if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { - return nil, coderStringSliceValidateUTF8 - } - if ft.Kind() == reflect.String { - return nil, coderStringSlice - } - if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) { - return nil, coderBytesSliceValidateUTF8 - } - if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { - return nil, coderBytesSlice - } - case pref.BytesKind: - if ft.Kind() == reflect.String { - return nil, coderStringSlice - } - if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { - return nil, coderBytesSlice - } - case pref.MessageKind: - return getMessageInfo(ft), makeMessageSliceFieldCoder(fd, ft) - case pref.GroupKind: - return getMessageInfo(ft), makeGroupSliceFieldCoder(fd, ft) - } - case fd.Cardinality() == pref.Repeated && fd.IsPacked(): - // Packed repeated fields. - // - // Only repeated fields of primitive numeric types - // (Varint, Fixed32, or Fixed64 wire type) can be packed. - if ft.Kind() != reflect.Slice { - break - } - ft := ft.Elem() - switch fd.Kind() { - case pref.BoolKind: - if ft.Kind() == reflect.Bool { - return nil, coderBoolPackedSlice - } - case pref.EnumKind: - if ft.Kind() == reflect.Int32 { - return nil, coderEnumPackedSlice - } - case pref.Int32Kind: - if ft.Kind() == reflect.Int32 { - return nil, coderInt32PackedSlice - } - case pref.Sint32Kind: - if ft.Kind() == reflect.Int32 { - return nil, coderSint32PackedSlice - } - case pref.Uint32Kind: - if ft.Kind() == reflect.Uint32 { - return nil, coderUint32PackedSlice - } - case pref.Int64Kind: - if ft.Kind() == reflect.Int64 { - return nil, coderInt64PackedSlice - } - case pref.Sint64Kind: - if ft.Kind() == reflect.Int64 { - return nil, coderSint64PackedSlice - } - case pref.Uint64Kind: - if ft.Kind() == reflect.Uint64 { - return nil, coderUint64PackedSlice - } - case pref.Sfixed32Kind: - if ft.Kind() == reflect.Int32 { - return nil, coderSfixed32PackedSlice - } - case pref.Fixed32Kind: - if ft.Kind() == reflect.Uint32 { - return nil, coderFixed32PackedSlice - } - case pref.FloatKind: - if ft.Kind() == reflect.Float32 { - return nil, coderFloatPackedSlice - } - case pref.Sfixed64Kind: - if ft.Kind() == reflect.Int64 { - return nil, coderSfixed64PackedSlice - } - case pref.Fixed64Kind: - if ft.Kind() == reflect.Uint64 { - return nil, coderFixed64PackedSlice - } - case pref.DoubleKind: - if ft.Kind() == reflect.Float64 { - return nil, coderDoublePackedSlice - } - } - case fd.Kind() == pref.MessageKind: - return getMessageInfo(ft), makeMessageFieldCoder(fd, ft) - case fd.Kind() == pref.GroupKind: - return getMessageInfo(ft), makeGroupFieldCoder(fd, ft) - case fd.Syntax() == pref.Proto3 && fd.ContainingOneof() == nil: - // Populated oneof fields always encode even if set to the zero value, - // which normally are not encoded in proto3. - switch fd.Kind() { - case pref.BoolKind: - if ft.Kind() == reflect.Bool { - return nil, coderBoolNoZero - } - case pref.EnumKind: - if ft.Kind() == reflect.Int32 { - return nil, coderEnumNoZero - } - case pref.Int32Kind: - if ft.Kind() == reflect.Int32 { - return nil, coderInt32NoZero - } - case pref.Sint32Kind: - if ft.Kind() == reflect.Int32 { - return nil, coderSint32NoZero - } - case pref.Uint32Kind: - if ft.Kind() == reflect.Uint32 { - return nil, coderUint32NoZero - } - case pref.Int64Kind: - if ft.Kind() == reflect.Int64 { - return nil, coderInt64NoZero - } - case pref.Sint64Kind: - if ft.Kind() == reflect.Int64 { - return nil, coderSint64NoZero - } - case pref.Uint64Kind: - if ft.Kind() == reflect.Uint64 { - return nil, coderUint64NoZero - } - case pref.Sfixed32Kind: - if ft.Kind() == reflect.Int32 { - return nil, coderSfixed32NoZero - } - case pref.Fixed32Kind: - if ft.Kind() == reflect.Uint32 { - return nil, coderFixed32NoZero - } - case pref.FloatKind: - if ft.Kind() == reflect.Float32 { - return nil, coderFloatNoZero - } - case pref.Sfixed64Kind: - if ft.Kind() == reflect.Int64 { - return nil, coderSfixed64NoZero - } - case pref.Fixed64Kind: - if ft.Kind() == reflect.Uint64 { - return nil, coderFixed64NoZero - } - case pref.DoubleKind: - if ft.Kind() == reflect.Float64 { - return nil, coderDoubleNoZero - } - case pref.StringKind: - if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { - return nil, coderStringNoZeroValidateUTF8 - } - if ft.Kind() == reflect.String { - return nil, coderStringNoZero - } - if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) { - return nil, coderBytesNoZeroValidateUTF8 - } - if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { - return nil, coderBytesNoZero - } - case pref.BytesKind: - if ft.Kind() == reflect.String { - return nil, coderStringNoZero - } - if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { - return nil, coderBytesNoZero - } - } - case ft.Kind() == reflect.Ptr: - ft := ft.Elem() - switch fd.Kind() { - case pref.BoolKind: - if ft.Kind() == reflect.Bool { - return nil, coderBoolPtr - } - case pref.EnumKind: - if ft.Kind() == reflect.Int32 { - return nil, coderEnumPtr - } - case pref.Int32Kind: - if ft.Kind() == reflect.Int32 { - return nil, coderInt32Ptr - } - case pref.Sint32Kind: - if ft.Kind() == reflect.Int32 { - return nil, coderSint32Ptr - } - case pref.Uint32Kind: - if ft.Kind() == reflect.Uint32 { - return nil, coderUint32Ptr - } - case pref.Int64Kind: - if ft.Kind() == reflect.Int64 { - return nil, coderInt64Ptr - } - case pref.Sint64Kind: - if ft.Kind() == reflect.Int64 { - return nil, coderSint64Ptr - } - case pref.Uint64Kind: - if ft.Kind() == reflect.Uint64 { - return nil, coderUint64Ptr - } - case pref.Sfixed32Kind: - if ft.Kind() == reflect.Int32 { - return nil, coderSfixed32Ptr - } - case pref.Fixed32Kind: - if ft.Kind() == reflect.Uint32 { - return nil, coderFixed32Ptr - } - case pref.FloatKind: - if ft.Kind() == reflect.Float32 { - return nil, coderFloatPtr - } - case pref.Sfixed64Kind: - if ft.Kind() == reflect.Int64 { - return nil, coderSfixed64Ptr - } - case pref.Fixed64Kind: - if ft.Kind() == reflect.Uint64 { - return nil, coderFixed64Ptr - } - case pref.DoubleKind: - if ft.Kind() == reflect.Float64 { - return nil, coderDoublePtr - } - case pref.StringKind: - if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { - return nil, coderStringPtrValidateUTF8 - } - if ft.Kind() == reflect.String { - return nil, coderStringPtr - } - case pref.BytesKind: - if ft.Kind() == reflect.String { - return nil, coderStringPtr - } - } - default: - switch fd.Kind() { - case pref.BoolKind: - if ft.Kind() == reflect.Bool { - return nil, coderBool - } - case pref.EnumKind: - if ft.Kind() == reflect.Int32 { - return nil, coderEnum - } - case pref.Int32Kind: - if ft.Kind() == reflect.Int32 { - return nil, coderInt32 - } - case pref.Sint32Kind: - if ft.Kind() == reflect.Int32 { - return nil, coderSint32 - } - case pref.Uint32Kind: - if ft.Kind() == reflect.Uint32 { - return nil, coderUint32 - } - case pref.Int64Kind: - if ft.Kind() == reflect.Int64 { - return nil, coderInt64 - } - case pref.Sint64Kind: - if ft.Kind() == reflect.Int64 { - return nil, coderSint64 - } - case pref.Uint64Kind: - if ft.Kind() == reflect.Uint64 { - return nil, coderUint64 - } - case pref.Sfixed32Kind: - if ft.Kind() == reflect.Int32 { - return nil, coderSfixed32 - } - case pref.Fixed32Kind: - if ft.Kind() == reflect.Uint32 { - return nil, coderFixed32 - } - case pref.FloatKind: - if ft.Kind() == reflect.Float32 { - return nil, coderFloat - } - case pref.Sfixed64Kind: - if ft.Kind() == reflect.Int64 { - return nil, coderSfixed64 - } - case pref.Fixed64Kind: - if ft.Kind() == reflect.Uint64 { - return nil, coderFixed64 - } - case pref.DoubleKind: - if ft.Kind() == reflect.Float64 { - return nil, coderDouble - } - case pref.StringKind: - if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { - return nil, coderStringValidateUTF8 - } - if ft.Kind() == reflect.String { - return nil, coderString - } - if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) { - return nil, coderBytesValidateUTF8 - } - if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { - return nil, coderBytes - } - case pref.BytesKind: - if ft.Kind() == reflect.String { - return nil, coderString - } - if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { - return nil, coderBytes - } - } - } - panic(fmt.Sprintf("invalid type: no encoder for %v %v %v/%v", fd.FullName(), fd.Cardinality(), fd.Kind(), ft)) -} - -// encoderFuncsForValue returns value functions for a field, used for -// extension values and map encoding. -func encoderFuncsForValue(fd pref.FieldDescriptor) valueCoderFuncs { - switch { - case fd.Cardinality() == pref.Repeated && !fd.IsPacked(): - switch fd.Kind() { - case pref.BoolKind: - return coderBoolSliceValue - case pref.EnumKind: - return coderEnumSliceValue - case pref.Int32Kind: - return coderInt32SliceValue - case pref.Sint32Kind: - return coderSint32SliceValue - case pref.Uint32Kind: - return coderUint32SliceValue - case pref.Int64Kind: - return coderInt64SliceValue - case pref.Sint64Kind: - return coderSint64SliceValue - case pref.Uint64Kind: - return coderUint64SliceValue - case pref.Sfixed32Kind: - return coderSfixed32SliceValue - case pref.Fixed32Kind: - return coderFixed32SliceValue - case pref.FloatKind: - return coderFloatSliceValue - case pref.Sfixed64Kind: - return coderSfixed64SliceValue - case pref.Fixed64Kind: - return coderFixed64SliceValue - case pref.DoubleKind: - return coderDoubleSliceValue - case pref.StringKind: - // We don't have a UTF-8 validating coder for repeated string fields. - // Value coders are used for extensions and maps. - // Extensions are never proto3, and maps never contain lists. - return coderStringSliceValue - case pref.BytesKind: - return coderBytesSliceValue - case pref.MessageKind: - return coderMessageSliceValue - case pref.GroupKind: - return coderGroupSliceValue - } - case fd.Cardinality() == pref.Repeated && fd.IsPacked(): - switch fd.Kind() { - case pref.BoolKind: - return coderBoolPackedSliceValue - case pref.EnumKind: - return coderEnumPackedSliceValue - case pref.Int32Kind: - return coderInt32PackedSliceValue - case pref.Sint32Kind: - return coderSint32PackedSliceValue - case pref.Uint32Kind: - return coderUint32PackedSliceValue - case pref.Int64Kind: - return coderInt64PackedSliceValue - case pref.Sint64Kind: - return coderSint64PackedSliceValue - case pref.Uint64Kind: - return coderUint64PackedSliceValue - case pref.Sfixed32Kind: - return coderSfixed32PackedSliceValue - case pref.Fixed32Kind: - return coderFixed32PackedSliceValue - case pref.FloatKind: - return coderFloatPackedSliceValue - case pref.Sfixed64Kind: - return coderSfixed64PackedSliceValue - case pref.Fixed64Kind: - return coderFixed64PackedSliceValue - case pref.DoubleKind: - return coderDoublePackedSliceValue - } - default: - switch fd.Kind() { - default: - case pref.BoolKind: - return coderBoolValue - case pref.EnumKind: - return coderEnumValue - case pref.Int32Kind: - return coderInt32Value - case pref.Sint32Kind: - return coderSint32Value - case pref.Uint32Kind: - return coderUint32Value - case pref.Int64Kind: - return coderInt64Value - case pref.Sint64Kind: - return coderSint64Value - case pref.Uint64Kind: - return coderUint64Value - case pref.Sfixed32Kind: - return coderSfixed32Value - case pref.Fixed32Kind: - return coderFixed32Value - case pref.FloatKind: - return coderFloatValue - case pref.Sfixed64Kind: - return coderSfixed64Value - case pref.Fixed64Kind: - return coderFixed64Value - case pref.DoubleKind: - return coderDoubleValue - case pref.StringKind: - if strs.EnforceUTF8(fd) { - return coderStringValueValidateUTF8 - } - return coderStringValue - case pref.BytesKind: - return coderBytesValue - case pref.MessageKind: - return coderMessageValue - case pref.GroupKind: - return coderGroupValue - } - } - panic(fmt.Sprintf("invalid field: no encoder for %v %v %v", fd.FullName(), fd.Cardinality(), fd.Kind())) -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go deleted file mode 100644 index 757642e23..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !purego && !appengine -// +build !purego,!appengine - -package impl - -// When using unsafe pointers, we can just treat enum values as int32s. - -var ( - coderEnumNoZero = coderInt32NoZero - coderEnum = coderInt32 - coderEnumPtr = coderInt32Ptr - coderEnumSlice = coderInt32Slice - coderEnumPackedSlice = coderInt32PackedSlice -) diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go deleted file mode 100644 index acd61bb50..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ /dev/null @@ -1,496 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "fmt" - "reflect" - - pref "google.golang.org/protobuf/reflect/protoreflect" -) - -// unwrapper unwraps the value to the underlying value. -// This is implemented by List and Map. -type unwrapper interface { - protoUnwrap() interface{} -} - -// A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. -type Converter interface { - // PBValueOf converts a reflect.Value to a protoreflect.Value. - PBValueOf(reflect.Value) pref.Value - - // GoValueOf converts a protoreflect.Value to a reflect.Value. - GoValueOf(pref.Value) reflect.Value - - // IsValidPB returns whether a protoreflect.Value is compatible with this type. - IsValidPB(pref.Value) bool - - // IsValidGo returns whether a reflect.Value is compatible with this type. - IsValidGo(reflect.Value) bool - - // New returns a new field value. - // For scalars, it returns the default value of the field. - // For composite types, it returns a new mutable value. - New() pref.Value - - // Zero returns a new field value. - // For scalars, it returns the default value of the field. - // For composite types, it returns an immutable, empty value. - Zero() pref.Value -} - -// NewConverter matches a Go type with a protobuf field and returns a Converter -// that converts between the two. Enums must be a named int32 kind that -// implements protoreflect.Enum, and messages must be pointer to a named -// struct type that implements protoreflect.ProtoMessage. -// -// This matcher deliberately supports a wider range of Go types than what -// protoc-gen-go historically generated to be able to automatically wrap some -// v1 messages generated by other forks of protoc-gen-go. -func NewConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { - switch { - case fd.IsList(): - return newListConverter(t, fd) - case fd.IsMap(): - return newMapConverter(t, fd) - default: - return newSingularConverter(t, fd) - } - panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) -} - -var ( - boolType = reflect.TypeOf(bool(false)) - int32Type = reflect.TypeOf(int32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint32Type = reflect.TypeOf(uint32(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float32Type = reflect.TypeOf(float32(0)) - float64Type = reflect.TypeOf(float64(0)) - stringType = reflect.TypeOf(string("")) - bytesType = reflect.TypeOf([]byte(nil)) - byteType = reflect.TypeOf(byte(0)) -) - -var ( - boolZero = pref.ValueOfBool(false) - int32Zero = pref.ValueOfInt32(0) - int64Zero = pref.ValueOfInt64(0) - uint32Zero = pref.ValueOfUint32(0) - uint64Zero = pref.ValueOfUint64(0) - float32Zero = pref.ValueOfFloat32(0) - float64Zero = pref.ValueOfFloat64(0) - stringZero = pref.ValueOfString("") - bytesZero = pref.ValueOfBytes(nil) -) - -func newSingularConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { - defVal := func(fd pref.FieldDescriptor, zero pref.Value) pref.Value { - if fd.Cardinality() == pref.Repeated { - // Default isn't defined for repeated fields. - return zero - } - return fd.Default() - } - switch fd.Kind() { - case pref.BoolKind: - if t.Kind() == reflect.Bool { - return &boolConverter{t, defVal(fd, boolZero)} - } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: - if t.Kind() == reflect.Int32 { - return &int32Converter{t, defVal(fd, int32Zero)} - } - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: - if t.Kind() == reflect.Int64 { - return &int64Converter{t, defVal(fd, int64Zero)} - } - case pref.Uint32Kind, pref.Fixed32Kind: - if t.Kind() == reflect.Uint32 { - return &uint32Converter{t, defVal(fd, uint32Zero)} - } - case pref.Uint64Kind, pref.Fixed64Kind: - if t.Kind() == reflect.Uint64 { - return &uint64Converter{t, defVal(fd, uint64Zero)} - } - case pref.FloatKind: - if t.Kind() == reflect.Float32 { - return &float32Converter{t, defVal(fd, float32Zero)} - } - case pref.DoubleKind: - if t.Kind() == reflect.Float64 { - return &float64Converter{t, defVal(fd, float64Zero)} - } - case pref.StringKind: - if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { - return &stringConverter{t, defVal(fd, stringZero)} - } - case pref.BytesKind: - if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { - return &bytesConverter{t, defVal(fd, bytesZero)} - } - case pref.EnumKind: - // Handle enums, which must be a named int32 type. - if t.Kind() == reflect.Int32 { - return newEnumConverter(t, fd) - } - case pref.MessageKind, pref.GroupKind: - return newMessageConverter(t) - } - panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) -} - -type boolConverter struct { - goType reflect.Type - def pref.Value -} - -func (c *boolConverter) PBValueOf(v reflect.Value) pref.Value { - if v.Type() != c.goType { - panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) - } - return pref.ValueOfBool(v.Bool()) -} -func (c *boolConverter) GoValueOf(v pref.Value) reflect.Value { - return reflect.ValueOf(v.Bool()).Convert(c.goType) -} -func (c *boolConverter) IsValidPB(v pref.Value) bool { - _, ok := v.Interface().(bool) - return ok -} -func (c *boolConverter) IsValidGo(v reflect.Value) bool { - return v.IsValid() && v.Type() == c.goType -} -func (c *boolConverter) New() pref.Value { return c.def } -func (c *boolConverter) Zero() pref.Value { return c.def } - -type int32Converter struct { - goType reflect.Type - def pref.Value -} - -func (c *int32Converter) PBValueOf(v reflect.Value) pref.Value { - if v.Type() != c.goType { - panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) - } - return pref.ValueOfInt32(int32(v.Int())) -} -func (c *int32Converter) GoValueOf(v pref.Value) reflect.Value { - return reflect.ValueOf(int32(v.Int())).Convert(c.goType) -} -func (c *int32Converter) IsValidPB(v pref.Value) bool { - _, ok := v.Interface().(int32) - return ok -} -func (c *int32Converter) IsValidGo(v reflect.Value) bool { - return v.IsValid() && v.Type() == c.goType -} -func (c *int32Converter) New() pref.Value { return c.def } -func (c *int32Converter) Zero() pref.Value { return c.def } - -type int64Converter struct { - goType reflect.Type - def pref.Value -} - -func (c *int64Converter) PBValueOf(v reflect.Value) pref.Value { - if v.Type() != c.goType { - panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) - } - return pref.ValueOfInt64(int64(v.Int())) -} -func (c *int64Converter) GoValueOf(v pref.Value) reflect.Value { - return reflect.ValueOf(int64(v.Int())).Convert(c.goType) -} -func (c *int64Converter) IsValidPB(v pref.Value) bool { - _, ok := v.Interface().(int64) - return ok -} -func (c *int64Converter) IsValidGo(v reflect.Value) bool { - return v.IsValid() && v.Type() == c.goType -} -func (c *int64Converter) New() pref.Value { return c.def } -func (c *int64Converter) Zero() pref.Value { return c.def } - -type uint32Converter struct { - goType reflect.Type - def pref.Value -} - -func (c *uint32Converter) PBValueOf(v reflect.Value) pref.Value { - if v.Type() != c.goType { - panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) - } - return pref.ValueOfUint32(uint32(v.Uint())) -} -func (c *uint32Converter) GoValueOf(v pref.Value) reflect.Value { - return reflect.ValueOf(uint32(v.Uint())).Convert(c.goType) -} -func (c *uint32Converter) IsValidPB(v pref.Value) bool { - _, ok := v.Interface().(uint32) - return ok -} -func (c *uint32Converter) IsValidGo(v reflect.Value) bool { - return v.IsValid() && v.Type() == c.goType -} -func (c *uint32Converter) New() pref.Value { return c.def } -func (c *uint32Converter) Zero() pref.Value { return c.def } - -type uint64Converter struct { - goType reflect.Type - def pref.Value -} - -func (c *uint64Converter) PBValueOf(v reflect.Value) pref.Value { - if v.Type() != c.goType { - panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) - } - return pref.ValueOfUint64(uint64(v.Uint())) -} -func (c *uint64Converter) GoValueOf(v pref.Value) reflect.Value { - return reflect.ValueOf(uint64(v.Uint())).Convert(c.goType) -} -func (c *uint64Converter) IsValidPB(v pref.Value) bool { - _, ok := v.Interface().(uint64) - return ok -} -func (c *uint64Converter) IsValidGo(v reflect.Value) bool { - return v.IsValid() && v.Type() == c.goType -} -func (c *uint64Converter) New() pref.Value { return c.def } -func (c *uint64Converter) Zero() pref.Value { return c.def } - -type float32Converter struct { - goType reflect.Type - def pref.Value -} - -func (c *float32Converter) PBValueOf(v reflect.Value) pref.Value { - if v.Type() != c.goType { - panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) - } - return pref.ValueOfFloat32(float32(v.Float())) -} -func (c *float32Converter) GoValueOf(v pref.Value) reflect.Value { - return reflect.ValueOf(float32(v.Float())).Convert(c.goType) -} -func (c *float32Converter) IsValidPB(v pref.Value) bool { - _, ok := v.Interface().(float32) - return ok -} -func (c *float32Converter) IsValidGo(v reflect.Value) bool { - return v.IsValid() && v.Type() == c.goType -} -func (c *float32Converter) New() pref.Value { return c.def } -func (c *float32Converter) Zero() pref.Value { return c.def } - -type float64Converter struct { - goType reflect.Type - def pref.Value -} - -func (c *float64Converter) PBValueOf(v reflect.Value) pref.Value { - if v.Type() != c.goType { - panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) - } - return pref.ValueOfFloat64(float64(v.Float())) -} -func (c *float64Converter) GoValueOf(v pref.Value) reflect.Value { - return reflect.ValueOf(float64(v.Float())).Convert(c.goType) -} -func (c *float64Converter) IsValidPB(v pref.Value) bool { - _, ok := v.Interface().(float64) - return ok -} -func (c *float64Converter) IsValidGo(v reflect.Value) bool { - return v.IsValid() && v.Type() == c.goType -} -func (c *float64Converter) New() pref.Value { return c.def } -func (c *float64Converter) Zero() pref.Value { return c.def } - -type stringConverter struct { - goType reflect.Type - def pref.Value -} - -func (c *stringConverter) PBValueOf(v reflect.Value) pref.Value { - if v.Type() != c.goType { - panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) - } - return pref.ValueOfString(v.Convert(stringType).String()) -} -func (c *stringConverter) GoValueOf(v pref.Value) reflect.Value { - // pref.Value.String never panics, so we go through an interface - // conversion here to check the type. - s := v.Interface().(string) - if c.goType.Kind() == reflect.Slice && s == "" { - return reflect.Zero(c.goType) // ensure empty string is []byte(nil) - } - return reflect.ValueOf(s).Convert(c.goType) -} -func (c *stringConverter) IsValidPB(v pref.Value) bool { - _, ok := v.Interface().(string) - return ok -} -func (c *stringConverter) IsValidGo(v reflect.Value) bool { - return v.IsValid() && v.Type() == c.goType -} -func (c *stringConverter) New() pref.Value { return c.def } -func (c *stringConverter) Zero() pref.Value { return c.def } - -type bytesConverter struct { - goType reflect.Type - def pref.Value -} - -func (c *bytesConverter) PBValueOf(v reflect.Value) pref.Value { - if v.Type() != c.goType { - panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) - } - if c.goType.Kind() == reflect.String && v.Len() == 0 { - return pref.ValueOfBytes(nil) // ensure empty string is []byte(nil) - } - return pref.ValueOfBytes(v.Convert(bytesType).Bytes()) -} -func (c *bytesConverter) GoValueOf(v pref.Value) reflect.Value { - return reflect.ValueOf(v.Bytes()).Convert(c.goType) -} -func (c *bytesConverter) IsValidPB(v pref.Value) bool { - _, ok := v.Interface().([]byte) - return ok -} -func (c *bytesConverter) IsValidGo(v reflect.Value) bool { - return v.IsValid() && v.Type() == c.goType -} -func (c *bytesConverter) New() pref.Value { return c.def } -func (c *bytesConverter) Zero() pref.Value { return c.def } - -type enumConverter struct { - goType reflect.Type - def pref.Value -} - -func newEnumConverter(goType reflect.Type, fd pref.FieldDescriptor) Converter { - var def pref.Value - if fd.Cardinality() == pref.Repeated { - def = pref.ValueOfEnum(fd.Enum().Values().Get(0).Number()) - } else { - def = fd.Default() - } - return &enumConverter{goType, def} -} - -func (c *enumConverter) PBValueOf(v reflect.Value) pref.Value { - if v.Type() != c.goType { - panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) - } - return pref.ValueOfEnum(pref.EnumNumber(v.Int())) -} - -func (c *enumConverter) GoValueOf(v pref.Value) reflect.Value { - return reflect.ValueOf(v.Enum()).Convert(c.goType) -} - -func (c *enumConverter) IsValidPB(v pref.Value) bool { - _, ok := v.Interface().(pref.EnumNumber) - return ok -} - -func (c *enumConverter) IsValidGo(v reflect.Value) bool { - return v.IsValid() && v.Type() == c.goType -} - -func (c *enumConverter) New() pref.Value { - return c.def -} - -func (c *enumConverter) Zero() pref.Value { - return c.def -} - -type messageConverter struct { - goType reflect.Type -} - -func newMessageConverter(goType reflect.Type) Converter { - return &messageConverter{goType} -} - -func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value { - if v.Type() != c.goType { - panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) - } - if c.isNonPointer() { - if v.CanAddr() { - v = v.Addr() // T => *T - } else { - v = reflect.Zero(reflect.PtrTo(v.Type())) - } - } - if m, ok := v.Interface().(pref.ProtoMessage); ok { - return pref.ValueOfMessage(m.ProtoReflect()) - } - return pref.ValueOfMessage(legacyWrapMessage(v)) -} - -func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value { - m := v.Message() - var rv reflect.Value - if u, ok := m.(unwrapper); ok { - rv = reflect.ValueOf(u.protoUnwrap()) - } else { - rv = reflect.ValueOf(m.Interface()) - } - if c.isNonPointer() { - if rv.Type() != reflect.PtrTo(c.goType) { - panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), reflect.PtrTo(c.goType))) - } - if !rv.IsNil() { - rv = rv.Elem() // *T => T - } else { - rv = reflect.Zero(rv.Type().Elem()) - } - } - if rv.Type() != c.goType { - panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), c.goType)) - } - return rv -} - -func (c *messageConverter) IsValidPB(v pref.Value) bool { - m := v.Message() - var rv reflect.Value - if u, ok := m.(unwrapper); ok { - rv = reflect.ValueOf(u.protoUnwrap()) - } else { - rv = reflect.ValueOf(m.Interface()) - } - if c.isNonPointer() { - return rv.Type() == reflect.PtrTo(c.goType) - } - return rv.Type() == c.goType -} - -func (c *messageConverter) IsValidGo(v reflect.Value) bool { - return v.IsValid() && v.Type() == c.goType -} - -func (c *messageConverter) New() pref.Value { - if c.isNonPointer() { - return c.PBValueOf(reflect.New(c.goType).Elem()) - } - return c.PBValueOf(reflect.New(c.goType.Elem())) -} - -func (c *messageConverter) Zero() pref.Value { - return c.PBValueOf(reflect.Zero(c.goType)) -} - -// isNonPointer reports whether the type is a non-pointer type. -// This never occurs for generated message types. -func (c *messageConverter) isNonPointer() bool { - return c.goType.Kind() != reflect.Ptr -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go deleted file mode 100644 index 6fccab520..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "fmt" - "reflect" - - pref "google.golang.org/protobuf/reflect/protoreflect" -) - -func newListConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { - switch { - case t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Slice: - return &listPtrConverter{t, newSingularConverter(t.Elem().Elem(), fd)} - case t.Kind() == reflect.Slice: - return &listConverter{t, newSingularConverter(t.Elem(), fd)} - } - panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) -} - -type listConverter struct { - goType reflect.Type // []T - c Converter -} - -func (c *listConverter) PBValueOf(v reflect.Value) pref.Value { - if v.Type() != c.goType { - panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) - } - pv := reflect.New(c.goType) - pv.Elem().Set(v) - return pref.ValueOfList(&listReflect{pv, c.c}) -} - -func (c *listConverter) GoValueOf(v pref.Value) reflect.Value { - rv := v.List().(*listReflect).v - if rv.IsNil() { - return reflect.Zero(c.goType) - } - return rv.Elem() -} - -func (c *listConverter) IsValidPB(v pref.Value) bool { - list, ok := v.Interface().(*listReflect) - if !ok { - return false - } - return list.v.Type().Elem() == c.goType -} - -func (c *listConverter) IsValidGo(v reflect.Value) bool { - return v.IsValid() && v.Type() == c.goType -} - -func (c *listConverter) New() pref.Value { - return pref.ValueOfList(&listReflect{reflect.New(c.goType), c.c}) -} - -func (c *listConverter) Zero() pref.Value { - return pref.ValueOfList(&listReflect{reflect.Zero(reflect.PtrTo(c.goType)), c.c}) -} - -type listPtrConverter struct { - goType reflect.Type // *[]T - c Converter -} - -func (c *listPtrConverter) PBValueOf(v reflect.Value) pref.Value { - if v.Type() != c.goType { - panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) - } - return pref.ValueOfList(&listReflect{v, c.c}) -} - -func (c *listPtrConverter) GoValueOf(v pref.Value) reflect.Value { - return v.List().(*listReflect).v -} - -func (c *listPtrConverter) IsValidPB(v pref.Value) bool { - list, ok := v.Interface().(*listReflect) - if !ok { - return false - } - return list.v.Type() == c.goType -} - -func (c *listPtrConverter) IsValidGo(v reflect.Value) bool { - return v.IsValid() && v.Type() == c.goType -} - -func (c *listPtrConverter) New() pref.Value { - return c.PBValueOf(reflect.New(c.goType.Elem())) -} - -func (c *listPtrConverter) Zero() pref.Value { - return c.PBValueOf(reflect.Zero(c.goType)) -} - -type listReflect struct { - v reflect.Value // *[]T - conv Converter -} - -func (ls *listReflect) Len() int { - if ls.v.IsNil() { - return 0 - } - return ls.v.Elem().Len() -} -func (ls *listReflect) Get(i int) pref.Value { - return ls.conv.PBValueOf(ls.v.Elem().Index(i)) -} -func (ls *listReflect) Set(i int, v pref.Value) { - ls.v.Elem().Index(i).Set(ls.conv.GoValueOf(v)) -} -func (ls *listReflect) Append(v pref.Value) { - ls.v.Elem().Set(reflect.Append(ls.v.Elem(), ls.conv.GoValueOf(v))) -} -func (ls *listReflect) AppendMutable() pref.Value { - if _, ok := ls.conv.(*messageConverter); !ok { - panic("invalid AppendMutable on list with non-message type") - } - v := ls.NewElement() - ls.Append(v) - return v -} -func (ls *listReflect) Truncate(i int) { - ls.v.Elem().Set(ls.v.Elem().Slice(0, i)) -} -func (ls *listReflect) NewElement() pref.Value { - return ls.conv.New() -} -func (ls *listReflect) IsValid() bool { - return !ls.v.IsNil() -} -func (ls *listReflect) protoUnwrap() interface{} { - return ls.v.Interface() -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go deleted file mode 100644 index de06b2593..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "fmt" - "reflect" - - pref "google.golang.org/protobuf/reflect/protoreflect" -) - -type mapConverter struct { - goType reflect.Type // map[K]V - keyConv, valConv Converter -} - -func newMapConverter(t reflect.Type, fd pref.FieldDescriptor) *mapConverter { - if t.Kind() != reflect.Map { - panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) - } - return &mapConverter{ - goType: t, - keyConv: newSingularConverter(t.Key(), fd.MapKey()), - valConv: newSingularConverter(t.Elem(), fd.MapValue()), - } -} - -func (c *mapConverter) PBValueOf(v reflect.Value) pref.Value { - if v.Type() != c.goType { - panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) - } - return pref.ValueOfMap(&mapReflect{v, c.keyConv, c.valConv}) -} - -func (c *mapConverter) GoValueOf(v pref.Value) reflect.Value { - return v.Map().(*mapReflect).v -} - -func (c *mapConverter) IsValidPB(v pref.Value) bool { - mapv, ok := v.Interface().(*mapReflect) - if !ok { - return false - } - return mapv.v.Type() == c.goType -} - -func (c *mapConverter) IsValidGo(v reflect.Value) bool { - return v.IsValid() && v.Type() == c.goType -} - -func (c *mapConverter) New() pref.Value { - return c.PBValueOf(reflect.MakeMap(c.goType)) -} - -func (c *mapConverter) Zero() pref.Value { - return c.PBValueOf(reflect.Zero(c.goType)) -} - -type mapReflect struct { - v reflect.Value // map[K]V - keyConv Converter - valConv Converter -} - -func (ms *mapReflect) Len() int { - return ms.v.Len() -} -func (ms *mapReflect) Has(k pref.MapKey) bool { - rk := ms.keyConv.GoValueOf(k.Value()) - rv := ms.v.MapIndex(rk) - return rv.IsValid() -} -func (ms *mapReflect) Get(k pref.MapKey) pref.Value { - rk := ms.keyConv.GoValueOf(k.Value()) - rv := ms.v.MapIndex(rk) - if !rv.IsValid() { - return pref.Value{} - } - return ms.valConv.PBValueOf(rv) -} -func (ms *mapReflect) Set(k pref.MapKey, v pref.Value) { - rk := ms.keyConv.GoValueOf(k.Value()) - rv := ms.valConv.GoValueOf(v) - ms.v.SetMapIndex(rk, rv) -} -func (ms *mapReflect) Clear(k pref.MapKey) { - rk := ms.keyConv.GoValueOf(k.Value()) - ms.v.SetMapIndex(rk, reflect.Value{}) -} -func (ms *mapReflect) Mutable(k pref.MapKey) pref.Value { - if _, ok := ms.valConv.(*messageConverter); !ok { - panic("invalid Mutable on map with non-message value type") - } - v := ms.Get(k) - if !v.IsValid() { - v = ms.NewValue() - ms.Set(k, v) - } - return v -} -func (ms *mapReflect) Range(f func(pref.MapKey, pref.Value) bool) { - iter := mapRange(ms.v) - for iter.Next() { - k := ms.keyConv.PBValueOf(iter.Key()).MapKey() - v := ms.valConv.PBValueOf(iter.Value()) - if !f(k, v) { - return - } - } -} -func (ms *mapReflect) NewValue() pref.Value { - return ms.valConv.New() -} -func (ms *mapReflect) IsValid() bool { - return !ms.v.IsNil() -} -func (ms *mapReflect) protoUnwrap() interface{} { - return ms.v.Interface() -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go deleted file mode 100644 index c65b0325c..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/decode.go +++ /dev/null @@ -1,284 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "math/bits" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/flags" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" - "google.golang.org/protobuf/runtime/protoiface" - piface "google.golang.org/protobuf/runtime/protoiface" -) - -var errDecode = errors.New("cannot parse invalid wire-format data") -var errRecursionDepth = errors.New("exceeded maximum recursion depth") - -type unmarshalOptions struct { - flags protoiface.UnmarshalInputFlags - resolver interface { - FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) - FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) - } - depth int -} - -func (o unmarshalOptions) Options() proto.UnmarshalOptions { - return proto.UnmarshalOptions{ - Merge: true, - AllowPartial: true, - DiscardUnknown: o.DiscardUnknown(), - Resolver: o.resolver, - } -} - -func (o unmarshalOptions) DiscardUnknown() bool { return o.flags&piface.UnmarshalDiscardUnknown != 0 } - -func (o unmarshalOptions) IsDefault() bool { - return o.flags == 0 && o.resolver == preg.GlobalTypes -} - -var lazyUnmarshalOptions = unmarshalOptions{ - resolver: preg.GlobalTypes, - depth: protowire.DefaultRecursionLimit, -} - -type unmarshalOutput struct { - n int // number of bytes consumed - initialized bool -} - -// unmarshal is protoreflect.Methods.Unmarshal. -func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { - var p pointer - if ms, ok := in.Message.(*messageState); ok { - p = ms.pointer() - } else { - p = in.Message.(*messageReflectWrapper).pointer() - } - out, err := mi.unmarshalPointer(in.Buf, p, 0, unmarshalOptions{ - flags: in.Flags, - resolver: in.Resolver, - depth: in.Depth, - }) - var flags piface.UnmarshalOutputFlags - if out.initialized { - flags |= piface.UnmarshalInitialized - } - return piface.UnmarshalOutput{ - Flags: flags, - }, err -} - -// errUnknown is returned during unmarshaling to indicate a parse error that -// should result in a field being placed in the unknown fields section (for example, -// when the wire type doesn't match) as opposed to the entire unmarshal operation -// failing (for example, when a field extends past the available input). -// -// This is a sentinel error which should never be visible to the user. -var errUnknown = errors.New("unknown") - -func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { - mi.init() - opts.depth-- - if opts.depth < 0 { - return out, errRecursionDepth - } - if flags.ProtoLegacy && mi.isMessageSet { - return unmarshalMessageSet(mi, b, p, opts) - } - initialized := true - var requiredMask uint64 - var exts *map[int32]ExtensionField - start := len(b) - for len(b) > 0 { - // Parse the tag (field number and wire type). - var tag uint64 - if b[0] < 0x80 { - tag = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - tag, n = protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - b = b[n:] - } - var num protowire.Number - if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { - return out, errDecode - } else { - num = protowire.Number(n) - } - wtyp := protowire.Type(tag & 7) - - if wtyp == protowire.EndGroupType { - if num != groupTag { - return out, errDecode - } - groupTag = 0 - break - } - - var f *coderFieldInfo - if int(num) < len(mi.denseCoderFields) { - f = mi.denseCoderFields[num] - } else { - f = mi.coderFields[num] - } - var n int - err := errUnknown - switch { - case f != nil: - if f.funcs.unmarshal == nil { - break - } - var o unmarshalOutput - o, err = f.funcs.unmarshal(b, p.Apply(f.offset), wtyp, f, opts) - n = o.n - if err != nil { - break - } - requiredMask |= f.validation.requiredBit - if f.funcs.isInit != nil && !o.initialized { - initialized = false - } - default: - // Possible extension. - if exts == nil && mi.extensionOffset.IsValid() { - exts = p.Apply(mi.extensionOffset).Extensions() - if *exts == nil { - *exts = make(map[int32]ExtensionField) - } - } - if exts == nil { - break - } - var o unmarshalOutput - o, err = mi.unmarshalExtension(b, num, wtyp, *exts, opts) - if err != nil { - break - } - n = o.n - if !o.initialized { - initialized = false - } - } - if err != nil { - if err != errUnknown { - return out, err - } - n = protowire.ConsumeFieldValue(num, wtyp, b) - if n < 0 { - return out, errDecode - } - if !opts.DiscardUnknown() && mi.unknownOffset.IsValid() { - u := mi.mutableUnknownBytes(p) - *u = protowire.AppendTag(*u, num, wtyp) - *u = append(*u, b[:n]...) - } - } - b = b[n:] - } - if groupTag != 0 { - return out, errDecode - } - if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) { - initialized = false - } - if initialized { - out.initialized = true - } - out.n = start - len(b) - return out, nil -} - -func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp protowire.Type, exts map[int32]ExtensionField, opts unmarshalOptions) (out unmarshalOutput, err error) { - x := exts[int32(num)] - xt := x.Type() - if xt == nil { - var err error - xt, err = opts.resolver.FindExtensionByNumber(mi.Desc.FullName(), num) - if err != nil { - if err == preg.NotFound { - return out, errUnknown - } - return out, errors.New("%v: unable to resolve extension %v: %v", mi.Desc.FullName(), num, err) - } - } - xi := getExtensionFieldInfo(xt) - if xi.funcs.unmarshal == nil { - return out, errUnknown - } - if flags.LazyUnmarshalExtensions { - if opts.IsDefault() && x.canLazy(xt) { - out, valid := skipExtension(b, xi, num, wtyp, opts) - switch valid { - case ValidationValid: - if out.initialized { - x.appendLazyBytes(xt, xi, num, wtyp, b[:out.n]) - exts[int32(num)] = x - return out, nil - } - case ValidationInvalid: - return out, errDecode - case ValidationUnknown: - } - } - } - ival := x.Value() - if !ival.IsValid() && xi.unmarshalNeedsValue { - // Create a new message, list, or map value to fill in. - // For enums, create a prototype value to let the unmarshal func know the - // concrete type. - ival = xt.New() - } - v, out, err := xi.funcs.unmarshal(b, ival, num, wtyp, opts) - if err != nil { - return out, err - } - if xi.funcs.isInit == nil { - out.initialized = true - } - x.Set(xt, v) - exts[int32(num)] = x - return out, nil -} - -func skipExtension(b []byte, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, _ ValidationStatus) { - if xi.validation.mi == nil { - return out, ValidationUnknown - } - xi.validation.mi.init() - switch xi.validation.typ { - case validationTypeMessage: - if wtyp != protowire.BytesType { - return out, ValidationUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, ValidationUnknown - } - out, st := xi.validation.mi.validate(v, 0, opts) - out.n = n - return out, st - case validationTypeGroup: - if wtyp != protowire.StartGroupType { - return out, ValidationUnknown - } - out, st := xi.validation.mi.validate(b, num, opts) - return out, st - default: - return out, ValidationUnknown - } -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go deleted file mode 100644 index 845c67d6e..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "math" - "sort" - "sync/atomic" - - "google.golang.org/protobuf/internal/flags" - proto "google.golang.org/protobuf/proto" - piface "google.golang.org/protobuf/runtime/protoiface" -) - -type marshalOptions struct { - flags piface.MarshalInputFlags -} - -func (o marshalOptions) Options() proto.MarshalOptions { - return proto.MarshalOptions{ - AllowPartial: true, - Deterministic: o.Deterministic(), - UseCachedSize: o.UseCachedSize(), - } -} - -func (o marshalOptions) Deterministic() bool { return o.flags&piface.MarshalDeterministic != 0 } -func (o marshalOptions) UseCachedSize() bool { return o.flags&piface.MarshalUseCachedSize != 0 } - -// size is protoreflect.Methods.Size. -func (mi *MessageInfo) size(in piface.SizeInput) piface.SizeOutput { - var p pointer - if ms, ok := in.Message.(*messageState); ok { - p = ms.pointer() - } else { - p = in.Message.(*messageReflectWrapper).pointer() - } - size := mi.sizePointer(p, marshalOptions{ - flags: in.Flags, - }) - return piface.SizeOutput{Size: size} -} - -func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) { - mi.init() - if p.IsNil() { - return 0 - } - if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() { - if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 { - return int(size) - } - } - return mi.sizePointerSlow(p, opts) -} - -func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int) { - if flags.ProtoLegacy && mi.isMessageSet { - size = sizeMessageSet(mi, p, opts) - if mi.sizecacheOffset.IsValid() { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) - } - return size - } - if mi.extensionOffset.IsValid() { - e := p.Apply(mi.extensionOffset).Extensions() - size += mi.sizeExtensions(e, opts) - } - for _, f := range mi.orderedCoderFields { - if f.funcs.size == nil { - continue - } - fptr := p.Apply(f.offset) - if f.isPointer && fptr.Elem().IsNil() { - continue - } - size += f.funcs.size(fptr, f, opts) - } - if mi.unknownOffset.IsValid() { - if u := mi.getUnknownBytes(p); u != nil { - size += len(*u) - } - } - if mi.sizecacheOffset.IsValid() { - if size > math.MaxInt32 { - // The size is too large for the int32 sizecache field. - // We will need to recompute the size when encoding; - // unfortunately expensive, but better than invalid output. - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1) - } else { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) - } - } - return size -} - -// marshal is protoreflect.Methods.Marshal. -func (mi *MessageInfo) marshal(in piface.MarshalInput) (out piface.MarshalOutput, err error) { - var p pointer - if ms, ok := in.Message.(*messageState); ok { - p = ms.pointer() - } else { - p = in.Message.(*messageReflectWrapper).pointer() - } - b, err := mi.marshalAppendPointer(in.Buf, p, marshalOptions{ - flags: in.Flags, - }) - return piface.MarshalOutput{Buf: b}, err -} - -func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOptions) ([]byte, error) { - mi.init() - if p.IsNil() { - return b, nil - } - if flags.ProtoLegacy && mi.isMessageSet { - return marshalMessageSet(mi, b, p, opts) - } - var err error - // The old marshaler encodes extensions at beginning. - if mi.extensionOffset.IsValid() { - e := p.Apply(mi.extensionOffset).Extensions() - // TODO: Special handling for MessageSet? - b, err = mi.appendExtensions(b, e, opts) - if err != nil { - return b, err - } - } - for _, f := range mi.orderedCoderFields { - if f.funcs.marshal == nil { - continue - } - fptr := p.Apply(f.offset) - if f.isPointer && fptr.Elem().IsNil() { - continue - } - b, err = f.funcs.marshal(b, fptr, f, opts) - if err != nil { - return b, err - } - } - if mi.unknownOffset.IsValid() && !mi.isMessageSet { - if u := mi.getUnknownBytes(p); u != nil { - b = append(b, (*u)...) - } - } - return b, nil -} - -func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { - if ext == nil { - return 0 - } - for _, x := range *ext { - xi := getExtensionFieldInfo(x.Type()) - if xi.funcs.size == nil { - continue - } - n += xi.funcs.size(x.Value(), xi.tagsize, opts) - } - return n -} - -func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, opts marshalOptions) ([]byte, error) { - if ext == nil { - return b, nil - } - - switch len(*ext) { - case 0: - return b, nil - case 1: - // Fast-path for one extension: Don't bother sorting the keys. - var err error - for _, x := range *ext { - xi := getExtensionFieldInfo(x.Type()) - b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) - } - return b, err - default: - // Sort the keys to provide a deterministic encoding. - // Not sure this is required, but the old code does it. - keys := make([]int, 0, len(*ext)) - for k := range *ext { - keys = append(keys, int(k)) - } - sort.Ints(keys) - var err error - for _, k := range keys { - x := (*ext)[int32(k)] - xi := getExtensionFieldInfo(x.Type()) - b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) - if err != nil { - return b, err - } - } - return b, nil - } -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/enum.go b/vendor/google.golang.org/protobuf/internal/impl/enum.go deleted file mode 100644 index 8c1eab4bf..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/enum.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "reflect" - - pref "google.golang.org/protobuf/reflect/protoreflect" -) - -type EnumInfo struct { - GoReflectType reflect.Type // int32 kind - Desc pref.EnumDescriptor -} - -func (t *EnumInfo) New(n pref.EnumNumber) pref.Enum { - return reflect.ValueOf(n).Convert(t.GoReflectType).Interface().(pref.Enum) -} -func (t *EnumInfo) Descriptor() pref.EnumDescriptor { return t.Desc } diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go deleted file mode 100644 index e904fd993..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/extension.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "reflect" - "sync" - "sync/atomic" - - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" -) - -// ExtensionInfo implements ExtensionType. -// -// This type contains a number of exported fields for legacy compatibility. -// The only non-deprecated use of this type is through the methods of the -// ExtensionType interface. -type ExtensionInfo struct { - // An ExtensionInfo may exist in several stages of initialization. - // - // extensionInfoUninitialized: Some or all of the legacy exported - // fields may be set, but none of the unexported fields have been - // initialized. This is the starting state for an ExtensionInfo - // in legacy generated code. - // - // extensionInfoDescInit: The desc field is set, but other unexported fields - // may not be initialized. Legacy exported fields may or may not be set. - // This is the starting state for an ExtensionInfo in newly generated code. - // - // extensionInfoFullInit: The ExtensionInfo is fully initialized. - // This state is only entered after lazy initialization is complete. - init uint32 - mu sync.Mutex - - goType reflect.Type - desc extensionTypeDescriptor - conv Converter - info *extensionFieldInfo // for fast-path method implementations - - // ExtendedType is a typed nil-pointer to the parent message type that - // is being extended. It is possible for this to be unpopulated in v2 - // since the message may no longer implement the MessageV1 interface. - // - // Deprecated: Use the ExtendedType method instead. - ExtendedType piface.MessageV1 - - // ExtensionType is the zero value of the extension type. - // - // For historical reasons, reflect.TypeOf(ExtensionType) and the - // type returned by InterfaceOf may not be identical. - // - // Deprecated: Use InterfaceOf(xt.Zero()) instead. - ExtensionType interface{} - - // Field is the field number of the extension. - // - // Deprecated: Use the Descriptor().Number method instead. - Field int32 - - // Name is the fully qualified name of extension. - // - // Deprecated: Use the Descriptor().FullName method instead. - Name string - - // Tag is the protobuf struct tag used in the v1 API. - // - // Deprecated: Do not use. - Tag string - - // Filename is the proto filename in which the extension is defined. - // - // Deprecated: Use Descriptor().ParentFile().Path() instead. - Filename string -} - -// Stages of initialization: See the ExtensionInfo.init field. -const ( - extensionInfoUninitialized = 0 - extensionInfoDescInit = 1 - extensionInfoFullInit = 2 -) - -func InitExtensionInfo(xi *ExtensionInfo, xd pref.ExtensionDescriptor, goType reflect.Type) { - xi.goType = goType - xi.desc = extensionTypeDescriptor{xd, xi} - xi.init = extensionInfoDescInit -} - -func (xi *ExtensionInfo) New() pref.Value { - return xi.lazyInit().New() -} -func (xi *ExtensionInfo) Zero() pref.Value { - return xi.lazyInit().Zero() -} -func (xi *ExtensionInfo) ValueOf(v interface{}) pref.Value { - return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) -} -func (xi *ExtensionInfo) InterfaceOf(v pref.Value) interface{} { - return xi.lazyInit().GoValueOf(v).Interface() -} -func (xi *ExtensionInfo) IsValidValue(v pref.Value) bool { - return xi.lazyInit().IsValidPB(v) -} -func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool { - return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) -} -func (xi *ExtensionInfo) TypeDescriptor() pref.ExtensionTypeDescriptor { - if atomic.LoadUint32(&xi.init) < extensionInfoDescInit { - xi.lazyInitSlow() - } - return &xi.desc -} - -func (xi *ExtensionInfo) lazyInit() Converter { - if atomic.LoadUint32(&xi.init) < extensionInfoFullInit { - xi.lazyInitSlow() - } - return xi.conv -} - -func (xi *ExtensionInfo) lazyInitSlow() { - xi.mu.Lock() - defer xi.mu.Unlock() - - if xi.init == extensionInfoFullInit { - return - } - defer atomic.StoreUint32(&xi.init, extensionInfoFullInit) - - if xi.desc.ExtensionDescriptor == nil { - xi.initFromLegacy() - } - if !xi.desc.ExtensionDescriptor.IsPlaceholder() { - if xi.ExtensionType == nil { - xi.initToLegacy() - } - xi.conv = NewConverter(xi.goType, xi.desc.ExtensionDescriptor) - xi.info = makeExtensionFieldInfo(xi.desc.ExtensionDescriptor) - xi.info.validation = newValidationInfo(xi.desc.ExtensionDescriptor, xi.goType) - } -} - -type extensionTypeDescriptor struct { - pref.ExtensionDescriptor - xi *ExtensionInfo -} - -func (xtd *extensionTypeDescriptor) Type() pref.ExtensionType { - return xtd.xi -} -func (xtd *extensionTypeDescriptor) Descriptor() pref.ExtensionDescriptor { - return xtd.ExtensionDescriptor -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go deleted file mode 100644 index f7d7ffb51..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "fmt" - "reflect" - "strings" - "sync" - - "google.golang.org/protobuf/internal/filedesc" - "google.golang.org/protobuf/internal/strs" - "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" -) - -// legacyEnumName returns the name of enums used in legacy code. -// It is neither the protobuf full name nor the qualified Go name, -// but rather an odd hybrid of both. -func legacyEnumName(ed pref.EnumDescriptor) string { - var protoPkg string - enumName := string(ed.FullName()) - if fd := ed.ParentFile(); fd != nil { - protoPkg = string(fd.Package()) - enumName = strings.TrimPrefix(enumName, protoPkg+".") - } - if protoPkg == "" { - return strs.GoCamelCase(enumName) - } - return protoPkg + "." + strs.GoCamelCase(enumName) -} - -// legacyWrapEnum wraps v as a protoreflect.Enum, -// where v must be a int32 kind and not implement the v2 API already. -func legacyWrapEnum(v reflect.Value) pref.Enum { - et := legacyLoadEnumType(v.Type()) - return et.New(pref.EnumNumber(v.Int())) -} - -var legacyEnumTypeCache sync.Map // map[reflect.Type]protoreflect.EnumType - -// legacyLoadEnumType dynamically loads a protoreflect.EnumType for t, -// where t must be an int32 kind and not implement the v2 API already. -func legacyLoadEnumType(t reflect.Type) pref.EnumType { - // Fast-path: check if a EnumType is cached for this concrete type. - if et, ok := legacyEnumTypeCache.Load(t); ok { - return et.(pref.EnumType) - } - - // Slow-path: derive enum descriptor and initialize EnumType. - var et pref.EnumType - ed := LegacyLoadEnumDesc(t) - et = &legacyEnumType{ - desc: ed, - goType: t, - } - if et, ok := legacyEnumTypeCache.LoadOrStore(t, et); ok { - return et.(pref.EnumType) - } - return et -} - -type legacyEnumType struct { - desc pref.EnumDescriptor - goType reflect.Type - m sync.Map // map[protoreflect.EnumNumber]proto.Enum -} - -func (t *legacyEnumType) New(n pref.EnumNumber) pref.Enum { - if e, ok := t.m.Load(n); ok { - return e.(pref.Enum) - } - e := &legacyEnumWrapper{num: n, pbTyp: t, goTyp: t.goType} - t.m.Store(n, e) - return e -} -func (t *legacyEnumType) Descriptor() pref.EnumDescriptor { - return t.desc -} - -type legacyEnumWrapper struct { - num pref.EnumNumber - pbTyp pref.EnumType - goTyp reflect.Type -} - -func (e *legacyEnumWrapper) Descriptor() pref.EnumDescriptor { - return e.pbTyp.Descriptor() -} -func (e *legacyEnumWrapper) Type() pref.EnumType { - return e.pbTyp -} -func (e *legacyEnumWrapper) Number() pref.EnumNumber { - return e.num -} -func (e *legacyEnumWrapper) ProtoReflect() pref.Enum { - return e -} -func (e *legacyEnumWrapper) protoUnwrap() interface{} { - v := reflect.New(e.goTyp).Elem() - v.SetInt(int64(e.num)) - return v.Interface() -} - -var ( - _ pref.Enum = (*legacyEnumWrapper)(nil) - _ unwrapper = (*legacyEnumWrapper)(nil) -) - -var legacyEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor - -// LegacyLoadEnumDesc returns an EnumDescriptor derived from the Go type, -// which must be an int32 kind and not implement the v2 API already. -// -// This is exported for testing purposes. -func LegacyLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { - // Fast-path: check if an EnumDescriptor is cached for this concrete type. - if ed, ok := legacyEnumDescCache.Load(t); ok { - return ed.(pref.EnumDescriptor) - } - - // Slow-path: initialize EnumDescriptor from the raw descriptor. - ev := reflect.Zero(t).Interface() - if _, ok := ev.(pref.Enum); ok { - panic(fmt.Sprintf("%v already implements proto.Enum", t)) - } - edV1, ok := ev.(enumV1) - if !ok { - return aberrantLoadEnumDesc(t) - } - b, idxs := edV1.EnumDescriptor() - - var ed pref.EnumDescriptor - if len(idxs) == 1 { - ed = legacyLoadFileDesc(b).Enums().Get(idxs[0]) - } else { - md := legacyLoadFileDesc(b).Messages().Get(idxs[0]) - for _, i := range idxs[1 : len(idxs)-1] { - md = md.Messages().Get(i) - } - ed = md.Enums().Get(idxs[len(idxs)-1]) - } - if ed, ok := legacyEnumDescCache.LoadOrStore(t, ed); ok { - return ed.(protoreflect.EnumDescriptor) - } - return ed -} - -var aberrantEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor - -// aberrantLoadEnumDesc returns an EnumDescriptor derived from the Go type, -// which must not implement protoreflect.Enum or enumV1. -// -// If the type does not implement enumV1, then there is no reliable -// way to derive the original protobuf type information. -// We are unable to use the global enum registry since it is -// unfortunately keyed by the protobuf full name, which we also do not know. -// Thus, this produces some bogus enum descriptor based on the Go type name. -func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { - // Fast-path: check if an EnumDescriptor is cached for this concrete type. - if ed, ok := aberrantEnumDescCache.Load(t); ok { - return ed.(pref.EnumDescriptor) - } - - // Slow-path: construct a bogus, but unique EnumDescriptor. - ed := &filedesc.Enum{L2: new(filedesc.EnumL2)} - ed.L0.FullName = AberrantDeriveFullName(t) // e.g., github_com.user.repo.MyEnum - ed.L0.ParentFile = filedesc.SurrogateProto3 - ed.L2.Values.List = append(ed.L2.Values.List, filedesc.EnumValue{}) - - // TODO: Use the presence of a UnmarshalJSON method to determine proto2? - - vd := &ed.L2.Values.List[0] - vd.L0.FullName = ed.L0.FullName + "_UNKNOWN" // e.g., github_com.user.repo.MyEnum_UNKNOWN - vd.L0.ParentFile = ed.L0.ParentFile - vd.L0.Parent = ed - - // TODO: We could use the String method to obtain some enum value names by - // starting at 0 and print the enum until it produces invalid identifiers. - // An exhaustive query is clearly impractical, but can be best-effort. - - if ed, ok := aberrantEnumDescCache.LoadOrStore(t, ed); ok { - return ed.(pref.EnumDescriptor) - } - return ed -} - -// AberrantDeriveFullName derives a fully qualified protobuf name for the given Go type -// The provided name is not guaranteed to be stable nor universally unique. -// It should be sufficiently unique within a program. -// -// This is exported for testing purposes. -func AberrantDeriveFullName(t reflect.Type) pref.FullName { - sanitize := func(r rune) rune { - switch { - case r == '/': - return '.' - case 'a' <= r && r <= 'z', 'A' <= r && r <= 'Z', '0' <= r && r <= '9': - return r - default: - return '_' - } - } - prefix := strings.Map(sanitize, t.PkgPath()) - suffix := strings.Map(sanitize, t.Name()) - if suffix == "" { - suffix = fmt.Sprintf("UnknownX%X", reflect.ValueOf(t).Pointer()) - } - - ss := append(strings.Split(prefix, "."), suffix) - for i, s := range ss { - if s == "" || ('0' <= s[0] && s[0] <= '9') { - ss[i] = "x" + s - } - } - return pref.FullName(strings.Join(ss, ".")) -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go deleted file mode 100644 index e3fb0b578..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "encoding/binary" - "encoding/json" - "hash/crc32" - "math" - "reflect" - - "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" -) - -// These functions exist to support exported APIs in generated protobufs. -// While these are deprecated, they cannot be removed for compatibility reasons. - -// LegacyEnumName returns the name of enums used in legacy code. -func (Export) LegacyEnumName(ed pref.EnumDescriptor) string { - return legacyEnumName(ed) -} - -// LegacyMessageTypeOf returns the protoreflect.MessageType for m, -// with name used as the message name if necessary. -func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.MessageType { - if mv := (Export{}).protoMessageV2Of(m); mv != nil { - return mv.ProtoReflect().Type() - } - return legacyLoadMessageType(reflect.TypeOf(m), name) -} - -// UnmarshalJSONEnum unmarshals an enum from a JSON-encoded input. -// The input can either be a string representing the enum value by name, -// or a number representing the enum number itself. -func (Export) UnmarshalJSONEnum(ed pref.EnumDescriptor, b []byte) (pref.EnumNumber, error) { - if b[0] == '"' { - var name pref.Name - if err := json.Unmarshal(b, &name); err != nil { - return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) - } - ev := ed.Values().ByName(name) - if ev == nil { - return 0, errors.New("invalid value for enum %v: %s", ed.FullName(), name) - } - return ev.Number(), nil - } else { - var num pref.EnumNumber - if err := json.Unmarshal(b, &num); err != nil { - return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) - } - return num, nil - } -} - -// CompressGZIP compresses the input as a GZIP-encoded file. -// The current implementation does no compression. -func (Export) CompressGZIP(in []byte) (out []byte) { - // RFC 1952, section 2.3.1. - var gzipHeader = [10]byte{0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff} - - // RFC 1951, section 3.2.4. - var blockHeader [5]byte - const maxBlockSize = math.MaxUint16 - numBlocks := 1 + len(in)/maxBlockSize - - // RFC 1952, section 2.3.1. - var gzipFooter [8]byte - binary.LittleEndian.PutUint32(gzipFooter[0:4], crc32.ChecksumIEEE(in)) - binary.LittleEndian.PutUint32(gzipFooter[4:8], uint32(len(in))) - - // Encode the input without compression using raw DEFLATE blocks. - out = make([]byte, 0, len(gzipHeader)+len(blockHeader)*numBlocks+len(in)+len(gzipFooter)) - out = append(out, gzipHeader[:]...) - for blockHeader[0] == 0 { - blockSize := maxBlockSize - if blockSize > len(in) { - blockHeader[0] = 0x01 // final bit per RFC 1951, section 3.2.3. - blockSize = len(in) - } - binary.LittleEndian.PutUint16(blockHeader[1:3], uint16(blockSize)^0x0000) - binary.LittleEndian.PutUint16(blockHeader[3:5], uint16(blockSize)^0xffff) - out = append(out, blockHeader[:]...) - out = append(out, in[:blockSize]...) - in = in[blockSize:] - } - out = append(out, gzipFooter[:]...) - return out -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go deleted file mode 100644 index 49e723161..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "reflect" - - "google.golang.org/protobuf/internal/descopts" - "google.golang.org/protobuf/internal/encoding/messageset" - ptag "google.golang.org/protobuf/internal/encoding/tag" - "google.golang.org/protobuf/internal/filedesc" - "google.golang.org/protobuf/internal/pragma" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" - piface "google.golang.org/protobuf/runtime/protoiface" -) - -func (xi *ExtensionInfo) initToLegacy() { - xd := xi.desc - var parent piface.MessageV1 - messageName := xd.ContainingMessage().FullName() - if mt, _ := preg.GlobalTypes.FindMessageByName(messageName); mt != nil { - // Create a new parent message and unwrap it if possible. - mv := mt.New().Interface() - t := reflect.TypeOf(mv) - if mv, ok := mv.(unwrapper); ok { - t = reflect.TypeOf(mv.protoUnwrap()) - } - - // Check whether the message implements the legacy v1 Message interface. - mz := reflect.Zero(t).Interface() - if mz, ok := mz.(piface.MessageV1); ok { - parent = mz - } - } - - // Determine the v1 extension type, which is unfortunately not the same as - // the v2 ExtensionType.GoType. - extType := xi.goType - switch extType.Kind() { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - extType = reflect.PtrTo(extType) // T -> *T for singular scalar fields - } - - // Reconstruct the legacy enum full name. - var enumName string - if xd.Kind() == pref.EnumKind { - enumName = legacyEnumName(xd.Enum()) - } - - // Derive the proto file that the extension was declared within. - var filename string - if fd := xd.ParentFile(); fd != nil { - filename = fd.Path() - } - - // For MessageSet extensions, the name used is the parent message. - name := xd.FullName() - if messageset.IsMessageSetExtension(xd) { - name = name.Parent() - } - - xi.ExtendedType = parent - xi.ExtensionType = reflect.Zero(extType).Interface() - xi.Field = int32(xd.Number()) - xi.Name = string(name) - xi.Tag = ptag.Marshal(xd, enumName) - xi.Filename = filename -} - -// initFromLegacy initializes an ExtensionInfo from -// the contents of the deprecated exported fields of the type. -func (xi *ExtensionInfo) initFromLegacy() { - // The v1 API returns "type incomplete" descriptors where only the - // field number is specified. In such a case, use a placeholder. - if xi.ExtendedType == nil || xi.ExtensionType == nil { - xd := placeholderExtension{ - name: pref.FullName(xi.Name), - number: pref.FieldNumber(xi.Field), - } - xi.desc = extensionTypeDescriptor{xd, xi} - return - } - - // Resolve enum or message dependencies. - var ed pref.EnumDescriptor - var md pref.MessageDescriptor - t := reflect.TypeOf(xi.ExtensionType) - isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct - isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 - if isOptional || isRepeated { - t = t.Elem() - } - switch v := reflect.Zero(t).Interface().(type) { - case pref.Enum: - ed = v.Descriptor() - case enumV1: - ed = LegacyLoadEnumDesc(t) - case pref.ProtoMessage: - md = v.ProtoReflect().Descriptor() - case messageV1: - md = LegacyLoadMessageDesc(t) - } - - // Derive basic field information from the struct tag. - var evs pref.EnumValueDescriptors - if ed != nil { - evs = ed.Values() - } - fd := ptag.Unmarshal(xi.Tag, t, evs).(*filedesc.Field) - - // Construct a v2 ExtensionType. - xd := &filedesc.Extension{L2: new(filedesc.ExtensionL2)} - xd.L0.ParentFile = filedesc.SurrogateProto2 - xd.L0.FullName = pref.FullName(xi.Name) - xd.L1.Number = pref.FieldNumber(xi.Field) - xd.L1.Cardinality = fd.L1.Cardinality - xd.L1.Kind = fd.L1.Kind - xd.L2.IsPacked = fd.L1.IsPacked - xd.L2.Default = fd.L1.Default - xd.L1.Extendee = Export{}.MessageDescriptorOf(xi.ExtendedType) - xd.L2.Enum = ed - xd.L2.Message = md - - // Derive real extension field name for MessageSets. - if messageset.IsMessageSet(xd.L1.Extendee) && md.FullName() == xd.L0.FullName { - xd.L0.FullName = xd.L0.FullName.Append(messageset.ExtensionName) - } - - tt := reflect.TypeOf(xi.ExtensionType) - if isOptional { - tt = tt.Elem() - } - xi.goType = tt - xi.desc = extensionTypeDescriptor{xd, xi} -} - -type placeholderExtension struct { - name pref.FullName - number pref.FieldNumber -} - -func (x placeholderExtension) ParentFile() pref.FileDescriptor { return nil } -func (x placeholderExtension) Parent() pref.Descriptor { return nil } -func (x placeholderExtension) Index() int { return 0 } -func (x placeholderExtension) Syntax() pref.Syntax { return 0 } -func (x placeholderExtension) Name() pref.Name { return x.name.Name() } -func (x placeholderExtension) FullName() pref.FullName { return x.name } -func (x placeholderExtension) IsPlaceholder() bool { return true } -func (x placeholderExtension) Options() pref.ProtoMessage { return descopts.Field } -func (x placeholderExtension) Number() pref.FieldNumber { return x.number } -func (x placeholderExtension) Cardinality() pref.Cardinality { return 0 } -func (x placeholderExtension) Kind() pref.Kind { return 0 } -func (x placeholderExtension) HasJSONName() bool { return false } -func (x placeholderExtension) JSONName() string { return "[" + string(x.name) + "]" } -func (x placeholderExtension) TextName() string { return "[" + string(x.name) + "]" } -func (x placeholderExtension) HasPresence() bool { return false } -func (x placeholderExtension) HasOptionalKeyword() bool { return false } -func (x placeholderExtension) IsExtension() bool { return true } -func (x placeholderExtension) IsWeak() bool { return false } -func (x placeholderExtension) IsPacked() bool { return false } -func (x placeholderExtension) IsList() bool { return false } -func (x placeholderExtension) IsMap() bool { return false } -func (x placeholderExtension) MapKey() pref.FieldDescriptor { return nil } -func (x placeholderExtension) MapValue() pref.FieldDescriptor { return nil } -func (x placeholderExtension) HasDefault() bool { return false } -func (x placeholderExtension) Default() pref.Value { return pref.Value{} } -func (x placeholderExtension) DefaultEnumValue() pref.EnumValueDescriptor { return nil } -func (x placeholderExtension) ContainingOneof() pref.OneofDescriptor { return nil } -func (x placeholderExtension) ContainingMessage() pref.MessageDescriptor { return nil } -func (x placeholderExtension) Enum() pref.EnumDescriptor { return nil } -func (x placeholderExtension) Message() pref.MessageDescriptor { return nil } -func (x placeholderExtension) ProtoType(pref.FieldDescriptor) { return } -func (x placeholderExtension) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go deleted file mode 100644 index 9ab091086..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "bytes" - "compress/gzip" - "io/ioutil" - "sync" - - "google.golang.org/protobuf/internal/filedesc" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -// Every enum and message type generated by protoc-gen-go since commit 2fc053c5 -// on February 25th, 2016 has had a method to get the raw descriptor. -// Types that were not generated by protoc-gen-go or were generated prior -// to that version are not supported. -// -// The []byte returned is the encoded form of a FileDescriptorProto message -// compressed using GZIP. The []int is the path from the top-level file -// to the specific message or enum declaration. -type ( - enumV1 interface { - EnumDescriptor() ([]byte, []int) - } - messageV1 interface { - Descriptor() ([]byte, []int) - } -) - -var legacyFileDescCache sync.Map // map[*byte]protoreflect.FileDescriptor - -// legacyLoadFileDesc unmarshals b as a compressed FileDescriptorProto message. -// -// This assumes that b is immutable and that b does not refer to part of a -// concatenated series of GZIP files (which would require shenanigans that -// rely on the concatenation properties of both protobufs and GZIP). -// File descriptors generated by protoc-gen-go do not rely on that property. -func legacyLoadFileDesc(b []byte) protoreflect.FileDescriptor { - // Fast-path: check whether we already have a cached file descriptor. - if fd, ok := legacyFileDescCache.Load(&b[0]); ok { - return fd.(protoreflect.FileDescriptor) - } - - // Slow-path: decompress and unmarshal the file descriptor proto. - zr, err := gzip.NewReader(bytes.NewReader(b)) - if err != nil { - panic(err) - } - b2, err := ioutil.ReadAll(zr) - if err != nil { - panic(err) - } - - fd := filedesc.Builder{ - RawDescriptor: b2, - FileRegistry: resolverOnly{protoregistry.GlobalFiles}, // do not register back to global registry - }.Build().File - if fd, ok := legacyFileDescCache.LoadOrStore(&b[0], fd); ok { - return fd.(protoreflect.FileDescriptor) - } - return fd -} - -type resolverOnly struct { - reg *protoregistry.Files -} - -func (r resolverOnly) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { - return r.reg.FindFileByPath(path) -} -func (r resolverOnly) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { - return r.reg.FindDescriptorByName(name) -} -func (resolverOnly) RegisterFile(protoreflect.FileDescriptor) error { - return nil -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go deleted file mode 100644 index 029feeefd..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ /dev/null @@ -1,565 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "fmt" - "reflect" - "strings" - "sync" - - "google.golang.org/protobuf/internal/descopts" - ptag "google.golang.org/protobuf/internal/encoding/tag" - "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/filedesc" - "google.golang.org/protobuf/internal/strs" - "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/runtime/protoiface" - piface "google.golang.org/protobuf/runtime/protoiface" -) - -// legacyWrapMessage wraps v as a protoreflect.Message, -// where v must be a *struct kind and not implement the v2 API already. -func legacyWrapMessage(v reflect.Value) pref.Message { - t := v.Type() - if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { - return aberrantMessage{v: v} - } - mt := legacyLoadMessageInfo(t, "") - return mt.MessageOf(v.Interface()) -} - -// legacyLoadMessageType dynamically loads a protoreflect.Type for t, -// where t must be not implement the v2 API already. -// The provided name is used if it cannot be determined from the message. -func legacyLoadMessageType(t reflect.Type, name pref.FullName) protoreflect.MessageType { - if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { - return aberrantMessageType{t} - } - return legacyLoadMessageInfo(t, name) -} - -var legacyMessageTypeCache sync.Map // map[reflect.Type]*MessageInfo - -// legacyLoadMessageInfo dynamically loads a *MessageInfo for t, -// where t must be a *struct kind and not implement the v2 API already. -// The provided name is used if it cannot be determined from the message. -func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { - // Fast-path: check if a MessageInfo is cached for this concrete type. - if mt, ok := legacyMessageTypeCache.Load(t); ok { - return mt.(*MessageInfo) - } - - // Slow-path: derive message descriptor and initialize MessageInfo. - mi := &MessageInfo{ - Desc: legacyLoadMessageDesc(t, name), - GoReflectType: t, - } - - var hasMarshal, hasUnmarshal bool - v := reflect.Zero(t).Interface() - if _, hasMarshal = v.(legacyMarshaler); hasMarshal { - mi.methods.Marshal = legacyMarshal - - // We have no way to tell whether the type's Marshal method - // supports deterministic serialization or not, but this - // preserves the v1 implementation's behavior of always - // calling Marshal methods when present. - mi.methods.Flags |= piface.SupportMarshalDeterministic - } - if _, hasUnmarshal = v.(legacyUnmarshaler); hasUnmarshal { - mi.methods.Unmarshal = legacyUnmarshal - } - if _, hasMerge := v.(legacyMerger); hasMerge || (hasMarshal && hasUnmarshal) { - mi.methods.Merge = legacyMerge - } - - if mi, ok := legacyMessageTypeCache.LoadOrStore(t, mi); ok { - return mi.(*MessageInfo) - } - return mi -} - -var legacyMessageDescCache sync.Map // map[reflect.Type]protoreflect.MessageDescriptor - -// LegacyLoadMessageDesc returns an MessageDescriptor derived from the Go type, -// which should be a *struct kind and must not implement the v2 API already. -// -// This is exported for testing purposes. -func LegacyLoadMessageDesc(t reflect.Type) pref.MessageDescriptor { - return legacyLoadMessageDesc(t, "") -} -func legacyLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor { - // Fast-path: check if a MessageDescriptor is cached for this concrete type. - if mi, ok := legacyMessageDescCache.Load(t); ok { - return mi.(pref.MessageDescriptor) - } - - // Slow-path: initialize MessageDescriptor from the raw descriptor. - mv := reflect.Zero(t).Interface() - if _, ok := mv.(pref.ProtoMessage); ok { - panic(fmt.Sprintf("%v already implements proto.Message", t)) - } - mdV1, ok := mv.(messageV1) - if !ok { - return aberrantLoadMessageDesc(t, name) - } - - // If this is a dynamic message type where there isn't a 1-1 mapping between - // Go and protobuf types, calling the Descriptor method on the zero value of - // the message type isn't likely to work. If it panics, swallow the panic and - // continue as if the Descriptor method wasn't present. - b, idxs := func() ([]byte, []int) { - defer func() { - recover() - }() - return mdV1.Descriptor() - }() - if b == nil { - return aberrantLoadMessageDesc(t, name) - } - - // If the Go type has no fields, then this might be a proto3 empty message - // from before the size cache was added. If there are any fields, check to - // see that at least one of them looks like something we generated. - if t.Elem().Kind() == reflect.Struct { - if nfield := t.Elem().NumField(); nfield > 0 { - hasProtoField := false - for i := 0; i < nfield; i++ { - f := t.Elem().Field(i) - if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") { - hasProtoField = true - break - } - } - if !hasProtoField { - return aberrantLoadMessageDesc(t, name) - } - } - } - - md := legacyLoadFileDesc(b).Messages().Get(idxs[0]) - for _, i := range idxs[1:] { - md = md.Messages().Get(i) - } - if name != "" && md.FullName() != name { - panic(fmt.Sprintf("mismatching message name: got %v, want %v", md.FullName(), name)) - } - if md, ok := legacyMessageDescCache.LoadOrStore(t, md); ok { - return md.(protoreflect.MessageDescriptor) - } - return md -} - -var ( - aberrantMessageDescLock sync.Mutex - aberrantMessageDescCache map[reflect.Type]protoreflect.MessageDescriptor -) - -// aberrantLoadMessageDesc returns an MessageDescriptor derived from the Go type, -// which must not implement protoreflect.ProtoMessage or messageV1. -// -// This is a best-effort derivation of the message descriptor using the protobuf -// tags on the struct fields. -func aberrantLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor { - aberrantMessageDescLock.Lock() - defer aberrantMessageDescLock.Unlock() - if aberrantMessageDescCache == nil { - aberrantMessageDescCache = make(map[reflect.Type]protoreflect.MessageDescriptor) - } - return aberrantLoadMessageDescReentrant(t, name) -} -func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.MessageDescriptor { - // Fast-path: check if an MessageDescriptor is cached for this concrete type. - if md, ok := aberrantMessageDescCache[t]; ok { - return md - } - - // Slow-path: construct a descriptor from the Go struct type (best-effort). - // Cache the MessageDescriptor early on so that we can resolve internal - // cyclic references. - md := &filedesc.Message{L2: new(filedesc.MessageL2)} - md.L0.FullName = aberrantDeriveMessageName(t, name) - md.L0.ParentFile = filedesc.SurrogateProto2 - aberrantMessageDescCache[t] = md - - if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { - return md - } - - // Try to determine if the message is using proto3 by checking scalars. - for i := 0; i < t.Elem().NumField(); i++ { - f := t.Elem().Field(i) - if tag := f.Tag.Get("protobuf"); tag != "" { - switch f.Type.Kind() { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - md.L0.ParentFile = filedesc.SurrogateProto3 - } - for _, s := range strings.Split(tag, ",") { - if s == "proto3" { - md.L0.ParentFile = filedesc.SurrogateProto3 - } - } - } - } - - // Obtain a list of oneof wrapper types. - var oneofWrappers []reflect.Type - for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { - if fn, ok := t.MethodByName(method); ok { - for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { - for _, v := range vs { - oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) - } - } - } - } - } - - // Obtain a list of the extension ranges. - if fn, ok := t.MethodByName("ExtensionRangeArray"); ok { - vs := fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0] - for i := 0; i < vs.Len(); i++ { - v := vs.Index(i) - md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]pref.FieldNumber{ - pref.FieldNumber(v.FieldByName("Start").Int()), - pref.FieldNumber(v.FieldByName("End").Int() + 1), - }) - md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, nil) - } - } - - // Derive the message fields by inspecting the struct fields. - for i := 0; i < t.Elem().NumField(); i++ { - f := t.Elem().Field(i) - if tag := f.Tag.Get("protobuf"); tag != "" { - tagKey := f.Tag.Get("protobuf_key") - tagVal := f.Tag.Get("protobuf_val") - aberrantAppendField(md, f.Type, tag, tagKey, tagVal) - } - if tag := f.Tag.Get("protobuf_oneof"); tag != "" { - n := len(md.L2.Oneofs.List) - md.L2.Oneofs.List = append(md.L2.Oneofs.List, filedesc.Oneof{}) - od := &md.L2.Oneofs.List[n] - od.L0.FullName = md.FullName().Append(pref.Name(tag)) - od.L0.ParentFile = md.L0.ParentFile - od.L0.Parent = md - od.L0.Index = n - - for _, t := range oneofWrappers { - if t.Implements(f.Type) { - f := t.Elem().Field(0) - if tag := f.Tag.Get("protobuf"); tag != "" { - aberrantAppendField(md, f.Type, tag, "", "") - fd := &md.L2.Fields.List[len(md.L2.Fields.List)-1] - fd.L1.ContainingOneof = od - od.L1.Fields.List = append(od.L1.Fields.List, fd) - } - } - } - } - } - - return md -} - -func aberrantDeriveMessageName(t reflect.Type, name pref.FullName) pref.FullName { - if name.IsValid() { - return name - } - func() { - defer func() { recover() }() // swallow possible nil panics - if m, ok := reflect.Zero(t).Interface().(interface{ XXX_MessageName() string }); ok { - name = pref.FullName(m.XXX_MessageName()) - } - }() - if name.IsValid() { - return name - } - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - return AberrantDeriveFullName(t) -} - -func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, tagVal string) { - t := goType - isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct - isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 - if isOptional || isRepeated { - t = t.Elem() - } - fd := ptag.Unmarshal(tag, t, placeholderEnumValues{}).(*filedesc.Field) - - // Append field descriptor to the message. - n := len(md.L2.Fields.List) - md.L2.Fields.List = append(md.L2.Fields.List, *fd) - fd = &md.L2.Fields.List[n] - fd.L0.FullName = md.FullName().Append(fd.Name()) - fd.L0.ParentFile = md.L0.ParentFile - fd.L0.Parent = md - fd.L0.Index = n - - if fd.L1.IsWeak || fd.L1.HasPacked { - fd.L1.Options = func() pref.ProtoMessage { - opts := descopts.Field.ProtoReflect().New() - if fd.L1.IsWeak { - opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) - } - if fd.L1.HasPacked { - opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.IsPacked)) - } - return opts.Interface() - } - } - - // Populate Enum and Message. - if fd.Enum() == nil && fd.Kind() == pref.EnumKind { - switch v := reflect.Zero(t).Interface().(type) { - case pref.Enum: - fd.L1.Enum = v.Descriptor() - default: - fd.L1.Enum = LegacyLoadEnumDesc(t) - } - } - if fd.Message() == nil && (fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind) { - switch v := reflect.Zero(t).Interface().(type) { - case pref.ProtoMessage: - fd.L1.Message = v.ProtoReflect().Descriptor() - case messageV1: - fd.L1.Message = LegacyLoadMessageDesc(t) - default: - if t.Kind() == reflect.Map { - n := len(md.L1.Messages.List) - md.L1.Messages.List = append(md.L1.Messages.List, filedesc.Message{L2: new(filedesc.MessageL2)}) - md2 := &md.L1.Messages.List[n] - md2.L0.FullName = md.FullName().Append(pref.Name(strs.MapEntryName(string(fd.Name())))) - md2.L0.ParentFile = md.L0.ParentFile - md2.L0.Parent = md - md2.L0.Index = n - - md2.L1.IsMapEntry = true - md2.L2.Options = func() pref.ProtoMessage { - opts := descopts.Message.ProtoReflect().New() - opts.Set(opts.Descriptor().Fields().ByName("map_entry"), protoreflect.ValueOfBool(true)) - return opts.Interface() - } - - aberrantAppendField(md2, t.Key(), tagKey, "", "") - aberrantAppendField(md2, t.Elem(), tagVal, "", "") - - fd.L1.Message = md2 - break - } - fd.L1.Message = aberrantLoadMessageDescReentrant(t, "") - } - } -} - -type placeholderEnumValues struct { - protoreflect.EnumValueDescriptors -} - -func (placeholderEnumValues) ByNumber(n pref.EnumNumber) pref.EnumValueDescriptor { - return filedesc.PlaceholderEnumValue(pref.FullName(fmt.Sprintf("UNKNOWN_%d", n))) -} - -// legacyMarshaler is the proto.Marshaler interface superseded by protoiface.Methoder. -type legacyMarshaler interface { - Marshal() ([]byte, error) -} - -// legacyUnmarshaler is the proto.Unmarshaler interface superseded by protoiface.Methoder. -type legacyUnmarshaler interface { - Unmarshal([]byte) error -} - -// legacyMerger is the proto.Merger interface superseded by protoiface.Methoder. -type legacyMerger interface { - Merge(protoiface.MessageV1) -} - -var aberrantProtoMethods = &piface.Methods{ - Marshal: legacyMarshal, - Unmarshal: legacyUnmarshal, - Merge: legacyMerge, - - // We have no way to tell whether the type's Marshal method - // supports deterministic serialization or not, but this - // preserves the v1 implementation's behavior of always - // calling Marshal methods when present. - Flags: piface.SupportMarshalDeterministic, -} - -func legacyMarshal(in piface.MarshalInput) (piface.MarshalOutput, error) { - v := in.Message.(unwrapper).protoUnwrap() - marshaler, ok := v.(legacyMarshaler) - if !ok { - return piface.MarshalOutput{}, errors.New("%T does not implement Marshal", v) - } - out, err := marshaler.Marshal() - if in.Buf != nil { - out = append(in.Buf, out...) - } - return piface.MarshalOutput{ - Buf: out, - }, err -} - -func legacyUnmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { - v := in.Message.(unwrapper).protoUnwrap() - unmarshaler, ok := v.(legacyUnmarshaler) - if !ok { - return piface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v) - } - return piface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf) -} - -func legacyMerge(in piface.MergeInput) piface.MergeOutput { - // Check whether this supports the legacy merger. - dstv := in.Destination.(unwrapper).protoUnwrap() - merger, ok := dstv.(legacyMerger) - if ok { - merger.Merge(Export{}.ProtoMessageV1Of(in.Source)) - return piface.MergeOutput{Flags: piface.MergeComplete} - } - - // If legacy merger is unavailable, implement merge in terms of - // a marshal and unmarshal operation. - srcv := in.Source.(unwrapper).protoUnwrap() - marshaler, ok := srcv.(legacyMarshaler) - if !ok { - return piface.MergeOutput{} - } - dstv = in.Destination.(unwrapper).protoUnwrap() - unmarshaler, ok := dstv.(legacyUnmarshaler) - if !ok { - return piface.MergeOutput{} - } - if !in.Source.IsValid() { - // Legacy Marshal methods may not function on nil messages. - // Check for a typed nil source only after we confirm that - // legacy Marshal/Unmarshal methods are present, for - // consistency. - return piface.MergeOutput{Flags: piface.MergeComplete} - } - b, err := marshaler.Marshal() - if err != nil { - return piface.MergeOutput{} - } - err = unmarshaler.Unmarshal(b) - if err != nil { - return piface.MergeOutput{} - } - return piface.MergeOutput{Flags: piface.MergeComplete} -} - -// aberrantMessageType implements MessageType for all types other than pointer-to-struct. -type aberrantMessageType struct { - t reflect.Type -} - -func (mt aberrantMessageType) New() pref.Message { - if mt.t.Kind() == reflect.Ptr { - return aberrantMessage{reflect.New(mt.t.Elem())} - } - return aberrantMessage{reflect.Zero(mt.t)} -} -func (mt aberrantMessageType) Zero() pref.Message { - return aberrantMessage{reflect.Zero(mt.t)} -} -func (mt aberrantMessageType) GoType() reflect.Type { - return mt.t -} -func (mt aberrantMessageType) Descriptor() pref.MessageDescriptor { - return LegacyLoadMessageDesc(mt.t) -} - -// aberrantMessage implements Message for all types other than pointer-to-struct. -// -// When the underlying type implements legacyMarshaler or legacyUnmarshaler, -// the aberrant Message can be marshaled or unmarshaled. Otherwise, there is -// not much that can be done with values of this type. -type aberrantMessage struct { - v reflect.Value -} - -// Reset implements the v1 proto.Message.Reset method. -func (m aberrantMessage) Reset() { - if mr, ok := m.v.Interface().(interface{ Reset() }); ok { - mr.Reset() - return - } - if m.v.Kind() == reflect.Ptr && !m.v.IsNil() { - m.v.Elem().Set(reflect.Zero(m.v.Type().Elem())) - } -} - -func (m aberrantMessage) ProtoReflect() pref.Message { - return m -} - -func (m aberrantMessage) Descriptor() pref.MessageDescriptor { - return LegacyLoadMessageDesc(m.v.Type()) -} -func (m aberrantMessage) Type() pref.MessageType { - return aberrantMessageType{m.v.Type()} -} -func (m aberrantMessage) New() pref.Message { - if m.v.Type().Kind() == reflect.Ptr { - return aberrantMessage{reflect.New(m.v.Type().Elem())} - } - return aberrantMessage{reflect.Zero(m.v.Type())} -} -func (m aberrantMessage) Interface() pref.ProtoMessage { - return m -} -func (m aberrantMessage) Range(f func(pref.FieldDescriptor, pref.Value) bool) { - return -} -func (m aberrantMessage) Has(pref.FieldDescriptor) bool { - return false -} -func (m aberrantMessage) Clear(pref.FieldDescriptor) { - panic("invalid Message.Clear on " + string(m.Descriptor().FullName())) -} -func (m aberrantMessage) Get(fd pref.FieldDescriptor) pref.Value { - if fd.Default().IsValid() { - return fd.Default() - } - panic("invalid Message.Get on " + string(m.Descriptor().FullName())) -} -func (m aberrantMessage) Set(pref.FieldDescriptor, pref.Value) { - panic("invalid Message.Set on " + string(m.Descriptor().FullName())) -} -func (m aberrantMessage) Mutable(pref.FieldDescriptor) pref.Value { - panic("invalid Message.Mutable on " + string(m.Descriptor().FullName())) -} -func (m aberrantMessage) NewField(pref.FieldDescriptor) pref.Value { - panic("invalid Message.NewField on " + string(m.Descriptor().FullName())) -} -func (m aberrantMessage) WhichOneof(pref.OneofDescriptor) pref.FieldDescriptor { - panic("invalid Message.WhichOneof descriptor on " + string(m.Descriptor().FullName())) -} -func (m aberrantMessage) GetUnknown() pref.RawFields { - return nil -} -func (m aberrantMessage) SetUnknown(pref.RawFields) { - // SetUnknown discards its input on messages which don't support unknown field storage. -} -func (m aberrantMessage) IsValid() bool { - if m.v.Kind() == reflect.Ptr { - return !m.v.IsNil() - } - return false -} -func (m aberrantMessage) ProtoMethods() *piface.Methods { - return aberrantProtoMethods -} -func (m aberrantMessage) protoUnwrap() interface{} { - return m.v.Interface() -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go deleted file mode 100644 index c65bbc044..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/merge.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "fmt" - "reflect" - - "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" -) - -type mergeOptions struct{} - -func (o mergeOptions) Merge(dst, src proto.Message) { - proto.Merge(dst, src) -} - -// merge is protoreflect.Methods.Merge. -func (mi *MessageInfo) merge(in piface.MergeInput) piface.MergeOutput { - dp, ok := mi.getPointer(in.Destination) - if !ok { - return piface.MergeOutput{} - } - sp, ok := mi.getPointer(in.Source) - if !ok { - return piface.MergeOutput{} - } - mi.mergePointer(dp, sp, mergeOptions{}) - return piface.MergeOutput{Flags: piface.MergeComplete} -} - -func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { - mi.init() - if dst.IsNil() { - panic(fmt.Sprintf("invalid value: merging into nil message")) - } - if src.IsNil() { - return - } - for _, f := range mi.orderedCoderFields { - if f.funcs.merge == nil { - continue - } - sfptr := src.Apply(f.offset) - if f.isPointer && sfptr.Elem().IsNil() { - continue - } - f.funcs.merge(dst.Apply(f.offset), sfptr, f, opts) - } - if mi.extensionOffset.IsValid() { - sext := src.Apply(mi.extensionOffset).Extensions() - dext := dst.Apply(mi.extensionOffset).Extensions() - if *dext == nil { - *dext = make(map[int32]ExtensionField) - } - for num, sx := range *sext { - xt := sx.Type() - xi := getExtensionFieldInfo(xt) - if xi.funcs.merge == nil { - continue - } - dx := (*dext)[num] - var dv pref.Value - if dx.Type() == sx.Type() { - dv = dx.Value() - } - if !dv.IsValid() && xi.unmarshalNeedsValue { - dv = xt.New() - } - dv = xi.funcs.merge(dv, sx.Value(), opts) - dx.Set(sx.Type(), dv) - (*dext)[num] = dx - } - } - if mi.unknownOffset.IsValid() { - su := mi.getUnknownBytes(src) - if su != nil && len(*su) > 0 { - du := mi.mutableUnknownBytes(dst) - *du = append(*du, *su...) - } - } -} - -func mergeScalarValue(dst, src pref.Value, opts mergeOptions) pref.Value { - return src -} - -func mergeBytesValue(dst, src pref.Value, opts mergeOptions) pref.Value { - return pref.ValueOfBytes(append(emptyBuf[:], src.Bytes()...)) -} - -func mergeListValue(dst, src pref.Value, opts mergeOptions) pref.Value { - dstl := dst.List() - srcl := src.List() - for i, llen := 0, srcl.Len(); i < llen; i++ { - dstl.Append(srcl.Get(i)) - } - return dst -} - -func mergeBytesListValue(dst, src pref.Value, opts mergeOptions) pref.Value { - dstl := dst.List() - srcl := src.List() - for i, llen := 0, srcl.Len(); i < llen; i++ { - sb := srcl.Get(i).Bytes() - db := append(emptyBuf[:], sb...) - dstl.Append(pref.ValueOfBytes(db)) - } - return dst -} - -func mergeMessageListValue(dst, src pref.Value, opts mergeOptions) pref.Value { - dstl := dst.List() - srcl := src.List() - for i, llen := 0, srcl.Len(); i < llen; i++ { - sm := srcl.Get(i).Message() - dm := proto.Clone(sm.Interface()).ProtoReflect() - dstl.Append(pref.ValueOfMessage(dm)) - } - return dst -} - -func mergeMessageValue(dst, src pref.Value, opts mergeOptions) pref.Value { - opts.Merge(dst.Message().Interface(), src.Message().Interface()) - return dst -} - -func mergeMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { - if f.mi != nil { - if dst.Elem().IsNil() { - dst.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) - } - f.mi.mergePointer(dst.Elem(), src.Elem(), opts) - } else { - dm := dst.AsValueOf(f.ft).Elem() - sm := src.AsValueOf(f.ft).Elem() - if dm.IsNil() { - dm.Set(reflect.New(f.ft.Elem())) - } - opts.Merge(asMessage(dm), asMessage(sm)) - } -} - -func mergeMessageSlice(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { - for _, sp := range src.PointerSlice() { - dm := reflect.New(f.ft.Elem().Elem()) - if f.mi != nil { - f.mi.mergePointer(pointerOfValue(dm), sp, opts) - } else { - opts.Merge(asMessage(dm), asMessage(sp.AsValueOf(f.ft.Elem().Elem()))) - } - dst.AppendPointerSlice(pointerOfValue(dm)) - } -} - -func mergeBytes(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - *dst.Bytes() = append(emptyBuf[:], *src.Bytes()...) -} - -func mergeBytesNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - v := *src.Bytes() - if len(v) > 0 { - *dst.Bytes() = append(emptyBuf[:], v...) - } -} - -func mergeBytesSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - ds := dst.BytesSlice() - for _, v := range *src.BytesSlice() { - *ds = append(*ds, append(emptyBuf[:], v...)) - } -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go b/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go deleted file mode 100644 index 8816c274d..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-types. DO NOT EDIT. - -package impl - -import () - -func mergeBool(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - *dst.Bool() = *src.Bool() -} - -func mergeBoolNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - v := *src.Bool() - if v != false { - *dst.Bool() = v - } -} - -func mergeBoolPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - p := *src.BoolPtr() - if p != nil { - v := *p - *dst.BoolPtr() = &v - } -} - -func mergeBoolSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - ds := dst.BoolSlice() - ss := src.BoolSlice() - *ds = append(*ds, *ss...) -} - -func mergeInt32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - *dst.Int32() = *src.Int32() -} - -func mergeInt32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - v := *src.Int32() - if v != 0 { - *dst.Int32() = v - } -} - -func mergeInt32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - p := *src.Int32Ptr() - if p != nil { - v := *p - *dst.Int32Ptr() = &v - } -} - -func mergeInt32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - ds := dst.Int32Slice() - ss := src.Int32Slice() - *ds = append(*ds, *ss...) -} - -func mergeUint32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - *dst.Uint32() = *src.Uint32() -} - -func mergeUint32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - v := *src.Uint32() - if v != 0 { - *dst.Uint32() = v - } -} - -func mergeUint32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - p := *src.Uint32Ptr() - if p != nil { - v := *p - *dst.Uint32Ptr() = &v - } -} - -func mergeUint32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - ds := dst.Uint32Slice() - ss := src.Uint32Slice() - *ds = append(*ds, *ss...) -} - -func mergeInt64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - *dst.Int64() = *src.Int64() -} - -func mergeInt64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - v := *src.Int64() - if v != 0 { - *dst.Int64() = v - } -} - -func mergeInt64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - p := *src.Int64Ptr() - if p != nil { - v := *p - *dst.Int64Ptr() = &v - } -} - -func mergeInt64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - ds := dst.Int64Slice() - ss := src.Int64Slice() - *ds = append(*ds, *ss...) -} - -func mergeUint64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - *dst.Uint64() = *src.Uint64() -} - -func mergeUint64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - v := *src.Uint64() - if v != 0 { - *dst.Uint64() = v - } -} - -func mergeUint64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - p := *src.Uint64Ptr() - if p != nil { - v := *p - *dst.Uint64Ptr() = &v - } -} - -func mergeUint64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - ds := dst.Uint64Slice() - ss := src.Uint64Slice() - *ds = append(*ds, *ss...) -} - -func mergeFloat32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - *dst.Float32() = *src.Float32() -} - -func mergeFloat32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - v := *src.Float32() - if v != 0 { - *dst.Float32() = v - } -} - -func mergeFloat32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - p := *src.Float32Ptr() - if p != nil { - v := *p - *dst.Float32Ptr() = &v - } -} - -func mergeFloat32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - ds := dst.Float32Slice() - ss := src.Float32Slice() - *ds = append(*ds, *ss...) -} - -func mergeFloat64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - *dst.Float64() = *src.Float64() -} - -func mergeFloat64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - v := *src.Float64() - if v != 0 { - *dst.Float64() = v - } -} - -func mergeFloat64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - p := *src.Float64Ptr() - if p != nil { - v := *p - *dst.Float64Ptr() = &v - } -} - -func mergeFloat64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - ds := dst.Float64Slice() - ss := src.Float64Slice() - *ds = append(*ds, *ss...) -} - -func mergeString(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - *dst.String() = *src.String() -} - -func mergeStringNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - v := *src.String() - if v != "" { - *dst.String() = v - } -} - -func mergeStringPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - p := *src.StringPtr() - if p != nil { - v := *p - *dst.StringPtr() = &v - } -} - -func mergeStringSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - ds := dst.StringSlice() - ss := src.StringSlice() - *ds = append(*ds, *ss...) -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go deleted file mode 100644 index a104e28e8..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - - "google.golang.org/protobuf/internal/genid" - "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" -) - -// MessageInfo provides protobuf related functionality for a given Go type -// that represents a message. A given instance of MessageInfo is tied to -// exactly one Go type, which must be a pointer to a struct type. -// -// The exported fields must be populated before any methods are called -// and cannot be mutated after set. -type MessageInfo struct { - // GoReflectType is the underlying message Go type and must be populated. - GoReflectType reflect.Type // pointer to struct - - // Desc is the underlying message descriptor type and must be populated. - Desc pref.MessageDescriptor - - // Exporter must be provided in a purego environment in order to provide - // access to unexported fields. - Exporter exporter - - // OneofWrappers is list of pointers to oneof wrapper struct types. - OneofWrappers []interface{} - - initMu sync.Mutex // protects all unexported fields - initDone uint32 - - reflectMessageInfo // for reflection implementation - coderMessageInfo // for fast-path method implementations -} - -// exporter is a function that returns a reference to the ith field of v, -// where v is a pointer to a struct. It returns nil if it does not support -// exporting the requested field (e.g., already exported). -type exporter func(v interface{}, i int) interface{} - -// getMessageInfo returns the MessageInfo for any message type that -// is generated by our implementation of protoc-gen-go (for v2 and on). -// If it is unable to obtain a MessageInfo, it returns nil. -func getMessageInfo(mt reflect.Type) *MessageInfo { - m, ok := reflect.Zero(mt).Interface().(pref.ProtoMessage) - if !ok { - return nil - } - mr, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *MessageInfo }) - if !ok { - return nil - } - return mr.ProtoMessageInfo() -} - -func (mi *MessageInfo) init() { - // This function is called in the hot path. Inline the sync.Once logic, - // since allocating a closure for Once.Do is expensive. - // Keep init small to ensure that it can be inlined. - if atomic.LoadUint32(&mi.initDone) == 0 { - mi.initOnce() - } -} - -func (mi *MessageInfo) initOnce() { - mi.initMu.Lock() - defer mi.initMu.Unlock() - if mi.initDone == 1 { - return - } - - t := mi.GoReflectType - if t.Kind() != reflect.Ptr && t.Elem().Kind() != reflect.Struct { - panic(fmt.Sprintf("got %v, want *struct kind", t)) - } - t = t.Elem() - - si := mi.makeStructInfo(t) - mi.makeReflectFuncs(t, si) - mi.makeCoderMethods(t, si) - - atomic.StoreUint32(&mi.initDone, 1) -} - -// getPointer returns the pointer for a message, which should be of -// the type of the MessageInfo. If the message is of a different type, -// it returns ok==false. -func (mi *MessageInfo) getPointer(m pref.Message) (p pointer, ok bool) { - switch m := m.(type) { - case *messageState: - return m.pointer(), m.messageInfo() == mi - case *messageReflectWrapper: - return m.pointer(), m.messageInfo() == mi - } - return pointer{}, false -} - -type ( - SizeCache = int32 - WeakFields = map[int32]protoreflect.ProtoMessage - UnknownFields = unknownFieldsA // TODO: switch to unknownFieldsB - unknownFieldsA = []byte - unknownFieldsB = *[]byte - ExtensionFields = map[int32]ExtensionField -) - -var ( - sizecacheType = reflect.TypeOf(SizeCache(0)) - weakFieldsType = reflect.TypeOf(WeakFields(nil)) - unknownFieldsAType = reflect.TypeOf(unknownFieldsA(nil)) - unknownFieldsBType = reflect.TypeOf(unknownFieldsB(nil)) - extensionFieldsType = reflect.TypeOf(ExtensionFields(nil)) -) - -type structInfo struct { - sizecacheOffset offset - sizecacheType reflect.Type - weakOffset offset - weakType reflect.Type - unknownOffset offset - unknownType reflect.Type - extensionOffset offset - extensionType reflect.Type - - fieldsByNumber map[pref.FieldNumber]reflect.StructField - oneofsByName map[pref.Name]reflect.StructField - oneofWrappersByType map[reflect.Type]pref.FieldNumber - oneofWrappersByNumber map[pref.FieldNumber]reflect.Type -} - -func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { - si := structInfo{ - sizecacheOffset: invalidOffset, - weakOffset: invalidOffset, - unknownOffset: invalidOffset, - extensionOffset: invalidOffset, - - fieldsByNumber: map[pref.FieldNumber]reflect.StructField{}, - oneofsByName: map[pref.Name]reflect.StructField{}, - oneofWrappersByType: map[reflect.Type]pref.FieldNumber{}, - oneofWrappersByNumber: map[pref.FieldNumber]reflect.Type{}, - } - -fieldLoop: - for i := 0; i < t.NumField(); i++ { - switch f := t.Field(i); f.Name { - case genid.SizeCache_goname, genid.SizeCacheA_goname: - if f.Type == sizecacheType { - si.sizecacheOffset = offsetOf(f, mi.Exporter) - si.sizecacheType = f.Type - } - case genid.WeakFields_goname, genid.WeakFieldsA_goname: - if f.Type == weakFieldsType { - si.weakOffset = offsetOf(f, mi.Exporter) - si.weakType = f.Type - } - case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: - if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType { - si.unknownOffset = offsetOf(f, mi.Exporter) - si.unknownType = f.Type - } - case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname: - if f.Type == extensionFieldsType { - si.extensionOffset = offsetOf(f, mi.Exporter) - si.extensionType = f.Type - } - default: - for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { - if len(s) > 0 && strings.Trim(s, "0123456789") == "" { - n, _ := strconv.ParseUint(s, 10, 64) - si.fieldsByNumber[pref.FieldNumber(n)] = f - continue fieldLoop - } - } - if s := f.Tag.Get("protobuf_oneof"); len(s) > 0 { - si.oneofsByName[pref.Name(s)] = f - continue fieldLoop - } - } - } - - // Derive a mapping of oneof wrappers to fields. - oneofWrappers := mi.OneofWrappers - for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { - if fn, ok := reflect.PtrTo(t).MethodByName(method); ok { - for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { - oneofWrappers = vs - } - } - } - } - for _, v := range oneofWrappers { - tf := reflect.TypeOf(v).Elem() - f := tf.Field(0) - for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { - if len(s) > 0 && strings.Trim(s, "0123456789") == "" { - n, _ := strconv.ParseUint(s, 10, 64) - si.oneofWrappersByType[tf] = pref.FieldNumber(n) - si.oneofWrappersByNumber[pref.FieldNumber(n)] = tf - break - } - } - } - - return si -} - -func (mi *MessageInfo) New() protoreflect.Message { - return mi.MessageOf(reflect.New(mi.GoReflectType.Elem()).Interface()) -} -func (mi *MessageInfo) Zero() protoreflect.Message { - return mi.MessageOf(reflect.Zero(mi.GoReflectType).Interface()) -} -func (mi *MessageInfo) Descriptor() protoreflect.MessageDescriptor { - return mi.Desc -} -func (mi *MessageInfo) Enum(i int) protoreflect.EnumType { - mi.init() - fd := mi.Desc.Fields().Get(i) - return Export{}.EnumTypeOf(mi.fieldTypes[fd.Number()]) -} -func (mi *MessageInfo) Message(i int) protoreflect.MessageType { - mi.init() - fd := mi.Desc.Fields().Get(i) - switch { - case fd.IsWeak(): - mt, _ := preg.GlobalTypes.FindMessageByName(fd.Message().FullName()) - return mt - case fd.IsMap(): - return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]} - default: - return Export{}.MessageTypeOf(mi.fieldTypes[fd.Number()]) - } -} - -type mapEntryType struct { - desc protoreflect.MessageDescriptor - valType interface{} // zero value of enum or message type -} - -func (mt mapEntryType) New() protoreflect.Message { - return nil -} -func (mt mapEntryType) Zero() protoreflect.Message { - return nil -} -func (mt mapEntryType) Descriptor() protoreflect.MessageDescriptor { - return mt.desc -} -func (mt mapEntryType) Enum(i int) protoreflect.EnumType { - fd := mt.desc.Fields().Get(i) - if fd.Enum() == nil { - return nil - } - return Export{}.EnumTypeOf(mt.valType) -} -func (mt mapEntryType) Message(i int) protoreflect.MessageType { - fd := mt.desc.Fields().Get(i) - if fd.Message() == nil { - return nil - } - return Export{}.MessageTypeOf(mt.valType) -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go deleted file mode 100644 index 9488b7261..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ /dev/null @@ -1,465 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "fmt" - "reflect" - - "google.golang.org/protobuf/internal/detrand" - "google.golang.org/protobuf/internal/pragma" - pref "google.golang.org/protobuf/reflect/protoreflect" -) - -type reflectMessageInfo struct { - fields map[pref.FieldNumber]*fieldInfo - oneofs map[pref.Name]*oneofInfo - - // fieldTypes contains the zero value of an enum or message field. - // For lists, it contains the element type. - // For maps, it contains the entry value type. - fieldTypes map[pref.FieldNumber]interface{} - - // denseFields is a subset of fields where: - // 0 < fieldDesc.Number() < len(denseFields) - // It provides faster access to the fieldInfo, but may be incomplete. - denseFields []*fieldInfo - - // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. - rangeInfos []interface{} // either *fieldInfo or *oneofInfo - - getUnknown func(pointer) pref.RawFields - setUnknown func(pointer, pref.RawFields) - extensionMap func(pointer) *extensionMap - - nilMessage atomicNilMessage -} - -// makeReflectFuncs generates the set of functions to support reflection. -func (mi *MessageInfo) makeReflectFuncs(t reflect.Type, si structInfo) { - mi.makeKnownFieldsFunc(si) - mi.makeUnknownFieldsFunc(t, si) - mi.makeExtensionFieldsFunc(t, si) - mi.makeFieldTypes(si) -} - -// makeKnownFieldsFunc generates functions for operations that can be performed -// on each protobuf message field. It takes in a reflect.Type representing the -// Go struct and matches message fields with struct fields. -// -// This code assumes that the struct is well-formed and panics if there are -// any discrepancies. -func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { - mi.fields = map[pref.FieldNumber]*fieldInfo{} - md := mi.Desc - fds := md.Fields() - for i := 0; i < fds.Len(); i++ { - fd := fds.Get(i) - fs := si.fieldsByNumber[fd.Number()] - isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() - if isOneof { - fs = si.oneofsByName[fd.ContainingOneof().Name()] - } - var fi fieldInfo - switch { - case fs.Type == nil: - fi = fieldInfoForMissing(fd) // never occurs for officially generated message types - case isOneof: - fi = fieldInfoForOneof(fd, fs, mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) - case fd.IsMap(): - fi = fieldInfoForMap(fd, fs, mi.Exporter) - case fd.IsList(): - fi = fieldInfoForList(fd, fs, mi.Exporter) - case fd.IsWeak(): - fi = fieldInfoForWeakMessage(fd, si.weakOffset) - case fd.Message() != nil: - fi = fieldInfoForMessage(fd, fs, mi.Exporter) - default: - fi = fieldInfoForScalar(fd, fs, mi.Exporter) - } - mi.fields[fd.Number()] = &fi - } - - mi.oneofs = map[pref.Name]*oneofInfo{} - for i := 0; i < md.Oneofs().Len(); i++ { - od := md.Oneofs().Get(i) - mi.oneofs[od.Name()] = makeOneofInfo(od, si, mi.Exporter) - } - - mi.denseFields = make([]*fieldInfo, fds.Len()*2) - for i := 0; i < fds.Len(); i++ { - if fd := fds.Get(i); int(fd.Number()) < len(mi.denseFields) { - mi.denseFields[fd.Number()] = mi.fields[fd.Number()] - } - } - - for i := 0; i < fds.Len(); { - fd := fds.Get(i) - if od := fd.ContainingOneof(); od != nil && !od.IsSynthetic() { - mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()]) - i += od.Fields().Len() - } else { - mi.rangeInfos = append(mi.rangeInfos, mi.fields[fd.Number()]) - i++ - } - } - - // Introduce instability to iteration order, but keep it deterministic. - if len(mi.rangeInfos) > 1 && detrand.Bool() { - i := detrand.Intn(len(mi.rangeInfos) - 1) - mi.rangeInfos[i], mi.rangeInfos[i+1] = mi.rangeInfos[i+1], mi.rangeInfos[i] - } -} - -func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { - switch { - case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsAType: - // Handle as []byte. - mi.getUnknown = func(p pointer) pref.RawFields { - if p.IsNil() { - return nil - } - return *p.Apply(mi.unknownOffset).Bytes() - } - mi.setUnknown = func(p pointer, b pref.RawFields) { - if p.IsNil() { - panic("invalid SetUnknown on nil Message") - } - *p.Apply(mi.unknownOffset).Bytes() = b - } - case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsBType: - // Handle as *[]byte. - mi.getUnknown = func(p pointer) pref.RawFields { - if p.IsNil() { - return nil - } - bp := p.Apply(mi.unknownOffset).BytesPtr() - if *bp == nil { - return nil - } - return **bp - } - mi.setUnknown = func(p pointer, b pref.RawFields) { - if p.IsNil() { - panic("invalid SetUnknown on nil Message") - } - bp := p.Apply(mi.unknownOffset).BytesPtr() - if *bp == nil { - *bp = new([]byte) - } - **bp = b - } - default: - mi.getUnknown = func(pointer) pref.RawFields { - return nil - } - mi.setUnknown = func(p pointer, _ pref.RawFields) { - if p.IsNil() { - panic("invalid SetUnknown on nil Message") - } - } - } -} - -func (mi *MessageInfo) makeExtensionFieldsFunc(t reflect.Type, si structInfo) { - if si.extensionOffset.IsValid() { - mi.extensionMap = func(p pointer) *extensionMap { - if p.IsNil() { - return (*extensionMap)(nil) - } - v := p.Apply(si.extensionOffset).AsValueOf(extensionFieldsType) - return (*extensionMap)(v.Interface().(*map[int32]ExtensionField)) - } - } else { - mi.extensionMap = func(pointer) *extensionMap { - return (*extensionMap)(nil) - } - } -} -func (mi *MessageInfo) makeFieldTypes(si structInfo) { - md := mi.Desc - fds := md.Fields() - for i := 0; i < fds.Len(); i++ { - var ft reflect.Type - fd := fds.Get(i) - fs := si.fieldsByNumber[fd.Number()] - isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() - if isOneof { - fs = si.oneofsByName[fd.ContainingOneof().Name()] - } - var isMessage bool - switch { - case fs.Type == nil: - continue // never occurs for officially generated message types - case isOneof: - if fd.Enum() != nil || fd.Message() != nil { - ft = si.oneofWrappersByNumber[fd.Number()].Field(0).Type - } - case fd.IsMap(): - if fd.MapValue().Enum() != nil || fd.MapValue().Message() != nil { - ft = fs.Type.Elem() - } - isMessage = fd.MapValue().Message() != nil - case fd.IsList(): - if fd.Enum() != nil || fd.Message() != nil { - ft = fs.Type.Elem() - } - isMessage = fd.Message() != nil - case fd.Enum() != nil: - ft = fs.Type - if fd.HasPresence() && ft.Kind() == reflect.Ptr { - ft = ft.Elem() - } - case fd.Message() != nil: - ft = fs.Type - if fd.IsWeak() { - ft = nil - } - isMessage = true - } - if isMessage && ft != nil && ft.Kind() != reflect.Ptr { - ft = reflect.PtrTo(ft) // never occurs for officially generated message types - } - if ft != nil { - if mi.fieldTypes == nil { - mi.fieldTypes = make(map[pref.FieldNumber]interface{}) - } - mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() - } - } -} - -type extensionMap map[int32]ExtensionField - -func (m *extensionMap) Range(f func(pref.FieldDescriptor, pref.Value) bool) { - if m != nil { - for _, x := range *m { - xd := x.Type().TypeDescriptor() - v := x.Value() - if xd.IsList() && v.List().Len() == 0 { - continue - } - if !f(xd, v) { - return - } - } - } -} -func (m *extensionMap) Has(xt pref.ExtensionType) (ok bool) { - if m == nil { - return false - } - xd := xt.TypeDescriptor() - x, ok := (*m)[int32(xd.Number())] - if !ok { - return false - } - switch { - case xd.IsList(): - return x.Value().List().Len() > 0 - case xd.IsMap(): - return x.Value().Map().Len() > 0 - case xd.Message() != nil: - return x.Value().Message().IsValid() - } - return true -} -func (m *extensionMap) Clear(xt pref.ExtensionType) { - delete(*m, int32(xt.TypeDescriptor().Number())) -} -func (m *extensionMap) Get(xt pref.ExtensionType) pref.Value { - xd := xt.TypeDescriptor() - if m != nil { - if x, ok := (*m)[int32(xd.Number())]; ok { - return x.Value() - } - } - return xt.Zero() -} -func (m *extensionMap) Set(xt pref.ExtensionType, v pref.Value) { - xd := xt.TypeDescriptor() - isValid := true - switch { - case !xt.IsValidValue(v): - isValid = false - case xd.IsList(): - isValid = v.List().IsValid() - case xd.IsMap(): - isValid = v.Map().IsValid() - case xd.Message() != nil: - isValid = v.Message().IsValid() - } - if !isValid { - panic(fmt.Sprintf("%v: assigning invalid value", xt.TypeDescriptor().FullName())) - } - - if *m == nil { - *m = make(map[int32]ExtensionField) - } - var x ExtensionField - x.Set(xt, v) - (*m)[int32(xd.Number())] = x -} -func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value { - xd := xt.TypeDescriptor() - if xd.Kind() != pref.MessageKind && xd.Kind() != pref.GroupKind && !xd.IsList() && !xd.IsMap() { - panic("invalid Mutable on field with non-composite type") - } - if x, ok := (*m)[int32(xd.Number())]; ok { - return x.Value() - } - v := xt.New() - m.Set(xt, v) - return v -} - -// MessageState is a data structure that is nested as the first field in a -// concrete message. It provides a way to implement the ProtoReflect method -// in an allocation-free way without needing to have a shadow Go type generated -// for every message type. This technique only works using unsafe. -// -// -// Example generated code: -// -// type M struct { -// state protoimpl.MessageState -// -// Field1 int32 -// Field2 string -// Field3 *BarMessage -// ... -// } -// -// func (m *M) ProtoReflect() protoreflect.Message { -// mi := &file_fizz_buzz_proto_msgInfos[5] -// if protoimpl.UnsafeEnabled && m != nil { -// ms := protoimpl.X.MessageStateOf(Pointer(m)) -// if ms.LoadMessageInfo() == nil { -// ms.StoreMessageInfo(mi) -// } -// return ms -// } -// return mi.MessageOf(m) -// } -// -// The MessageState type holds a *MessageInfo, which must be atomically set to -// the message info associated with a given message instance. -// By unsafely converting a *M into a *MessageState, the MessageState object -// has access to all the information needed to implement protobuf reflection. -// It has access to the message info as its first field, and a pointer to the -// MessageState is identical to a pointer to the concrete message value. -// -// -// Requirements: -// • The type M must implement protoreflect.ProtoMessage. -// • The address of m must not be nil. -// • The address of m and the address of m.state must be equal, -// even though they are different Go types. -type MessageState struct { - pragma.NoUnkeyedLiterals - pragma.DoNotCompare - pragma.DoNotCopy - - atomicMessageInfo *MessageInfo -} - -type messageState MessageState - -var ( - _ pref.Message = (*messageState)(nil) - _ unwrapper = (*messageState)(nil) -) - -// messageDataType is a tuple of a pointer to the message data and -// a pointer to the message type. It is a generalized way of providing a -// reflective view over a message instance. The disadvantage of this approach -// is the need to allocate this tuple of 16B. -type messageDataType struct { - p pointer - mi *MessageInfo -} - -type ( - messageReflectWrapper messageDataType - messageIfaceWrapper messageDataType -) - -var ( - _ pref.Message = (*messageReflectWrapper)(nil) - _ unwrapper = (*messageReflectWrapper)(nil) - _ pref.ProtoMessage = (*messageIfaceWrapper)(nil) - _ unwrapper = (*messageIfaceWrapper)(nil) -) - -// MessageOf returns a reflective view over a message. The input must be a -// pointer to a named Go struct. If the provided type has a ProtoReflect method, -// it must be implemented by calling this method. -func (mi *MessageInfo) MessageOf(m interface{}) pref.Message { - if reflect.TypeOf(m) != mi.GoReflectType { - panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) - } - p := pointerOfIface(m) - if p.IsNil() { - return mi.nilMessage.Init(mi) - } - return &messageReflectWrapper{p, mi} -} - -func (m *messageReflectWrapper) pointer() pointer { return m.p } -func (m *messageReflectWrapper) messageInfo() *MessageInfo { return m.mi } - -// Reset implements the v1 proto.Message.Reset method. -func (m *messageIfaceWrapper) Reset() { - if mr, ok := m.protoUnwrap().(interface{ Reset() }); ok { - mr.Reset() - return - } - rv := reflect.ValueOf(m.protoUnwrap()) - if rv.Kind() == reflect.Ptr && !rv.IsNil() { - rv.Elem().Set(reflect.Zero(rv.Type().Elem())) - } -} -func (m *messageIfaceWrapper) ProtoReflect() pref.Message { - return (*messageReflectWrapper)(m) -} -func (m *messageIfaceWrapper) protoUnwrap() interface{} { - return m.p.AsIfaceOf(m.mi.GoReflectType.Elem()) -} - -// checkField verifies that the provided field descriptor is valid. -// Exactly one of the returned values is populated. -func (mi *MessageInfo) checkField(fd pref.FieldDescriptor) (*fieldInfo, pref.ExtensionType) { - var fi *fieldInfo - if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) { - fi = mi.denseFields[n] - } else { - fi = mi.fields[n] - } - if fi != nil { - if fi.fieldDesc != fd { - if got, want := fd.FullName(), fi.fieldDesc.FullName(); got != want { - panic(fmt.Sprintf("mismatching field: got %v, want %v", got, want)) - } - panic(fmt.Sprintf("mismatching field: %v", fd.FullName())) - } - return fi, nil - } - - if fd.IsExtension() { - if got, want := fd.ContainingMessage().FullName(), mi.Desc.FullName(); got != want { - // TODO: Should this be exact containing message descriptor match? - panic(fmt.Sprintf("extension %v has mismatching containing message: got %v, want %v", fd.FullName(), got, want)) - } - if !mi.Desc.ExtensionRanges().Has(fd.Number()) { - panic(fmt.Sprintf("extension %v extends %v outside the extension range", fd.FullName(), mi.Desc.FullName())) - } - xtd, ok := fd.(pref.ExtensionTypeDescriptor) - if !ok { - panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName())) - } - return nil, xtd.Type() - } - panic(fmt.Sprintf("field %v is invalid", fd.FullName())) -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go deleted file mode 100644 index 343cf8721..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ /dev/null @@ -1,543 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "fmt" - "math" - "reflect" - "sync" - - "google.golang.org/protobuf/internal/flags" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" -) - -type fieldInfo struct { - fieldDesc pref.FieldDescriptor - - // These fields are used for protobuf reflection support. - has func(pointer) bool - clear func(pointer) - get func(pointer) pref.Value - set func(pointer, pref.Value) - mutable func(pointer) pref.Value - newMessage func() pref.Message - newField func() pref.Value -} - -func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { - // This never occurs for generated message types. - // It implies that a hand-crafted type has missing Go fields - // for specific protobuf message fields. - return fieldInfo{ - fieldDesc: fd, - has: func(p pointer) bool { - return false - }, - clear: func(p pointer) { - panic("missing Go struct field for " + string(fd.FullName())) - }, - get: func(p pointer) pref.Value { - return fd.Default() - }, - set: func(p pointer, v pref.Value) { - panic("missing Go struct field for " + string(fd.FullName())) - }, - mutable: func(p pointer) pref.Value { - panic("missing Go struct field for " + string(fd.FullName())) - }, - newMessage: func() pref.Message { - panic("missing Go struct field for " + string(fd.FullName())) - }, - newField: func() pref.Value { - if v := fd.Default(); v.IsValid() { - return v - } - panic("missing Go struct field for " + string(fd.FullName())) - }, - } -} - -func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo { - ft := fs.Type - if ft.Kind() != reflect.Interface { - panic(fmt.Sprintf("field %v has invalid type: got %v, want interface kind", fd.FullName(), ft)) - } - if ot.Kind() != reflect.Struct { - panic(fmt.Sprintf("field %v has invalid type: got %v, want struct kind", fd.FullName(), ot)) - } - if !reflect.PtrTo(ot).Implements(ft) { - panic(fmt.Sprintf("field %v has invalid type: %v does not implement %v", fd.FullName(), ot, ft)) - } - conv := NewConverter(ot.Field(0).Type, fd) - isMessage := fd.Message() != nil - - // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) - return fieldInfo{ - // NOTE: The logic below intentionally assumes that oneof fields are - // well-formatted. That is, the oneof interface never contains a - // typed nil pointer to one of the wrapper structs. - - fieldDesc: fd, - has: func(p pointer) bool { - if p.IsNil() { - return false - } - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { - return false - } - return true - }, - clear: func(p pointer) { - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - if rv.IsNil() || rv.Elem().Type().Elem() != ot { - // NOTE: We intentionally don't check for rv.Elem().IsNil() - // so that (*OneofWrapperType)(nil) gets cleared to nil. - return - } - rv.Set(reflect.Zero(rv.Type())) - }, - get: func(p pointer) pref.Value { - if p.IsNil() { - return conv.Zero() - } - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { - return conv.Zero() - } - rv = rv.Elem().Elem().Field(0) - return conv.PBValueOf(rv) - }, - set: func(p pointer, v pref.Value) { - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { - rv.Set(reflect.New(ot)) - } - rv = rv.Elem().Elem().Field(0) - rv.Set(conv.GoValueOf(v)) - }, - mutable: func(p pointer) pref.Value { - if !isMessage { - panic(fmt.Sprintf("field %v with invalid Mutable call on field with non-composite type", fd.FullName())) - } - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { - rv.Set(reflect.New(ot)) - } - rv = rv.Elem().Elem().Field(0) - if rv.Kind() == reflect.Ptr && rv.IsNil() { - rv.Set(conv.GoValueOf(pref.ValueOfMessage(conv.New().Message()))) - } - return conv.PBValueOf(rv) - }, - newMessage: func() pref.Message { - return conv.New().Message() - }, - newField: func() pref.Value { - return conv.New() - }, - } -} - -func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { - ft := fs.Type - if ft.Kind() != reflect.Map { - panic(fmt.Sprintf("field %v has invalid type: got %v, want map kind", fd.FullName(), ft)) - } - conv := NewConverter(ft, fd) - - // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) - return fieldInfo{ - fieldDesc: fd, - has: func(p pointer) bool { - if p.IsNil() { - return false - } - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - return rv.Len() > 0 - }, - clear: func(p pointer) { - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - rv.Set(reflect.Zero(rv.Type())) - }, - get: func(p pointer) pref.Value { - if p.IsNil() { - return conv.Zero() - } - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - if rv.Len() == 0 { - return conv.Zero() - } - return conv.PBValueOf(rv) - }, - set: func(p pointer, v pref.Value) { - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - pv := conv.GoValueOf(v) - if pv.IsNil() { - panic(fmt.Sprintf("map field %v cannot be set with read-only value", fd.FullName())) - } - rv.Set(pv) - }, - mutable: func(p pointer) pref.Value { - v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - if v.IsNil() { - v.Set(reflect.MakeMap(fs.Type)) - } - return conv.PBValueOf(v) - }, - newField: func() pref.Value { - return conv.New() - }, - } -} - -func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { - ft := fs.Type - if ft.Kind() != reflect.Slice { - panic(fmt.Sprintf("field %v has invalid type: got %v, want slice kind", fd.FullName(), ft)) - } - conv := NewConverter(reflect.PtrTo(ft), fd) - - // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) - return fieldInfo{ - fieldDesc: fd, - has: func(p pointer) bool { - if p.IsNil() { - return false - } - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - return rv.Len() > 0 - }, - clear: func(p pointer) { - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - rv.Set(reflect.Zero(rv.Type())) - }, - get: func(p pointer) pref.Value { - if p.IsNil() { - return conv.Zero() - } - rv := p.Apply(fieldOffset).AsValueOf(fs.Type) - if rv.Elem().Len() == 0 { - return conv.Zero() - } - return conv.PBValueOf(rv) - }, - set: func(p pointer, v pref.Value) { - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - pv := conv.GoValueOf(v) - if pv.IsNil() { - panic(fmt.Sprintf("list field %v cannot be set with read-only value", fd.FullName())) - } - rv.Set(pv.Elem()) - }, - mutable: func(p pointer) pref.Value { - v := p.Apply(fieldOffset).AsValueOf(fs.Type) - return conv.PBValueOf(v) - }, - newField: func() pref.Value { - return conv.New() - }, - } -} - -var ( - nilBytes = reflect.ValueOf([]byte(nil)) - emptyBytes = reflect.ValueOf([]byte{}) -) - -func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { - ft := fs.Type - nullable := fd.HasPresence() - isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 - if nullable { - if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice { - // This never occurs for generated message types. - // Despite the protobuf type system specifying presence, - // the Go field type cannot represent it. - nullable = false - } - if ft.Kind() == reflect.Ptr { - ft = ft.Elem() - } - } - conv := NewConverter(ft, fd) - - // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) - return fieldInfo{ - fieldDesc: fd, - has: func(p pointer) bool { - if p.IsNil() { - return false - } - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - if nullable { - return !rv.IsNil() - } - switch rv.Kind() { - case reflect.Bool: - return rv.Bool() - case reflect.Int32, reflect.Int64: - return rv.Int() != 0 - case reflect.Uint32, reflect.Uint64: - return rv.Uint() != 0 - case reflect.Float32, reflect.Float64: - return rv.Float() != 0 || math.Signbit(rv.Float()) - case reflect.String, reflect.Slice: - return rv.Len() > 0 - default: - panic(fmt.Sprintf("field %v has invalid type: %v", fd.FullName(), rv.Type())) // should never happen - } - }, - clear: func(p pointer) { - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - rv.Set(reflect.Zero(rv.Type())) - }, - get: func(p pointer) pref.Value { - if p.IsNil() { - return conv.Zero() - } - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - if nullable { - if rv.IsNil() { - return conv.Zero() - } - if rv.Kind() == reflect.Ptr { - rv = rv.Elem() - } - } - return conv.PBValueOf(rv) - }, - set: func(p pointer, v pref.Value) { - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - if nullable && rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(ft)) - } - rv = rv.Elem() - } - rv.Set(conv.GoValueOf(v)) - if isBytes && rv.Len() == 0 { - if nullable { - rv.Set(emptyBytes) // preserve presence - } else { - rv.Set(nilBytes) // do not preserve presence - } - } - }, - newField: func() pref.Value { - return conv.New() - }, - } -} - -func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldInfo { - if !flags.ProtoLegacy { - panic("no support for proto1 weak fields") - } - - var once sync.Once - var messageType pref.MessageType - lazyInit := func() { - once.Do(func() { - messageName := fd.Message().FullName() - messageType, _ = preg.GlobalTypes.FindMessageByName(messageName) - if messageType == nil { - panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName())) - } - }) - } - - num := fd.Number() - return fieldInfo{ - fieldDesc: fd, - has: func(p pointer) bool { - if p.IsNil() { - return false - } - _, ok := p.Apply(weakOffset).WeakFields().get(num) - return ok - }, - clear: func(p pointer) { - p.Apply(weakOffset).WeakFields().clear(num) - }, - get: func(p pointer) pref.Value { - lazyInit() - if p.IsNil() { - return pref.ValueOfMessage(messageType.Zero()) - } - m, ok := p.Apply(weakOffset).WeakFields().get(num) - if !ok { - return pref.ValueOfMessage(messageType.Zero()) - } - return pref.ValueOfMessage(m.ProtoReflect()) - }, - set: func(p pointer, v pref.Value) { - lazyInit() - m := v.Message() - if m.Descriptor() != messageType.Descriptor() { - if got, want := m.Descriptor().FullName(), messageType.Descriptor().FullName(); got != want { - panic(fmt.Sprintf("field %v has mismatching message descriptor: got %v, want %v", fd.FullName(), got, want)) - } - panic(fmt.Sprintf("field %v has mismatching message descriptor: %v", fd.FullName(), m.Descriptor().FullName())) - } - p.Apply(weakOffset).WeakFields().set(num, m.Interface()) - }, - mutable: func(p pointer) pref.Value { - lazyInit() - fs := p.Apply(weakOffset).WeakFields() - m, ok := fs.get(num) - if !ok { - m = messageType.New().Interface() - fs.set(num, m) - } - return pref.ValueOfMessage(m.ProtoReflect()) - }, - newMessage: func() pref.Message { - lazyInit() - return messageType.New() - }, - newField: func() pref.Value { - lazyInit() - return pref.ValueOfMessage(messageType.New()) - }, - } -} - -func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { - ft := fs.Type - conv := NewConverter(ft, fd) - - // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) - return fieldInfo{ - fieldDesc: fd, - has: func(p pointer) bool { - if p.IsNil() { - return false - } - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - if fs.Type.Kind() != reflect.Ptr { - return !isZero(rv) - } - return !rv.IsNil() - }, - clear: func(p pointer) { - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - rv.Set(reflect.Zero(rv.Type())) - }, - get: func(p pointer) pref.Value { - if p.IsNil() { - return conv.Zero() - } - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - return conv.PBValueOf(rv) - }, - set: func(p pointer, v pref.Value) { - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - rv.Set(conv.GoValueOf(v)) - if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { - panic(fmt.Sprintf("field %v has invalid nil pointer", fd.FullName())) - } - }, - mutable: func(p pointer) pref.Value { - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { - rv.Set(conv.GoValueOf(conv.New())) - } - return conv.PBValueOf(rv) - }, - newMessage: func() pref.Message { - return conv.New().Message() - }, - newField: func() pref.Value { - return conv.New() - }, - } -} - -type oneofInfo struct { - oneofDesc pref.OneofDescriptor - which func(pointer) pref.FieldNumber -} - -func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInfo { - oi := &oneofInfo{oneofDesc: od} - if od.IsSynthetic() { - fs := si.fieldsByNumber[od.Fields().Get(0).Number()] - fieldOffset := offsetOf(fs, x) - oi.which = func(p pointer) pref.FieldNumber { - if p.IsNil() { - return 0 - } - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - if rv.IsNil() { // valid on either *T or []byte - return 0 - } - return od.Fields().Get(0).Number() - } - } else { - fs := si.oneofsByName[od.Name()] - fieldOffset := offsetOf(fs, x) - oi.which = func(p pointer) pref.FieldNumber { - if p.IsNil() { - return 0 - } - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - if rv.IsNil() { - return 0 - } - rv = rv.Elem() - if rv.IsNil() { - return 0 - } - return si.oneofWrappersByType[rv.Type().Elem()] - } - } - return oi -} - -// isZero is identical to reflect.Value.IsZero. -// TODO: Remove this when Go1.13 is the minimally supported Go version. -func isZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return math.Float64bits(v.Float()) == 0 - case reflect.Complex64, reflect.Complex128: - c := v.Complex() - return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if !isZero(v.Index(i)) { - return false - } - } - return true - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: - return v.IsNil() - case reflect.String: - return v.Len() == 0 - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if !isZero(v.Field(i)) { - return false - } - } - return true - default: - panic(&reflect.ValueError{"reflect.Value.IsZero", v.Kind()}) - } -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go deleted file mode 100644 index 741d6e5b6..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-types. DO NOT EDIT. - -package impl - -import ( - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/runtime/protoiface" -) - -func (m *messageState) Descriptor() protoreflect.MessageDescriptor { - return m.messageInfo().Desc -} -func (m *messageState) Type() protoreflect.MessageType { - return m.messageInfo() -} -func (m *messageState) New() protoreflect.Message { - return m.messageInfo().New() -} -func (m *messageState) Interface() protoreflect.ProtoMessage { - return m.protoUnwrap().(protoreflect.ProtoMessage) -} -func (m *messageState) protoUnwrap() interface{} { - return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) -} -func (m *messageState) ProtoMethods() *protoiface.Methods { - m.messageInfo().init() - return &m.messageInfo().methods -} - -// ProtoMessageInfo is a pseudo-internal API for allowing the v1 code -// to be able to retrieve a v2 MessageInfo struct. -// -// WARNING: This method is exempt from the compatibility promise and -// may be removed in the future without warning. -func (m *messageState) ProtoMessageInfo() *MessageInfo { - return m.messageInfo() -} - -func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - m.messageInfo().init() - for _, ri := range m.messageInfo().rangeInfos { - switch ri := ri.(type) { - case *fieldInfo: - if ri.has(m.pointer()) { - if !f(ri.fieldDesc, ri.get(m.pointer())) { - return - } - } - case *oneofInfo: - if n := ri.which(m.pointer()); n > 0 { - fi := m.messageInfo().fields[n] - if !f(fi.fieldDesc, fi.get(m.pointer())) { - return - } - } - } - } - m.messageInfo().extensionMap(m.pointer()).Range(f) -} -func (m *messageState) Has(fd protoreflect.FieldDescriptor) bool { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { - return fi.has(m.pointer()) - } else { - return m.messageInfo().extensionMap(m.pointer()).Has(xt) - } -} -func (m *messageState) Clear(fd protoreflect.FieldDescriptor) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { - fi.clear(m.pointer()) - } else { - m.messageInfo().extensionMap(m.pointer()).Clear(xt) - } -} -func (m *messageState) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { - return fi.get(m.pointer()) - } else { - return m.messageInfo().extensionMap(m.pointer()).Get(xt) - } -} -func (m *messageState) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { - fi.set(m.pointer(), v) - } else { - m.messageInfo().extensionMap(m.pointer()).Set(xt, v) - } -} -func (m *messageState) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { - return fi.mutable(m.pointer()) - } else { - return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) - } -} -func (m *messageState) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { - return fi.newField() - } else { - return xt.New() - } -} -func (m *messageState) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { - m.messageInfo().init() - if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { - return od.Fields().ByNumber(oi.which(m.pointer())) - } - panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) -} -func (m *messageState) GetUnknown() protoreflect.RawFields { - m.messageInfo().init() - return m.messageInfo().getUnknown(m.pointer()) -} -func (m *messageState) SetUnknown(b protoreflect.RawFields) { - m.messageInfo().init() - m.messageInfo().setUnknown(m.pointer(), b) -} -func (m *messageState) IsValid() bool { - return !m.pointer().IsNil() -} - -func (m *messageReflectWrapper) Descriptor() protoreflect.MessageDescriptor { - return m.messageInfo().Desc -} -func (m *messageReflectWrapper) Type() protoreflect.MessageType { - return m.messageInfo() -} -func (m *messageReflectWrapper) New() protoreflect.Message { - return m.messageInfo().New() -} -func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage { - if m, ok := m.protoUnwrap().(protoreflect.ProtoMessage); ok { - return m - } - return (*messageIfaceWrapper)(m) -} -func (m *messageReflectWrapper) protoUnwrap() interface{} { - return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) -} -func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods { - m.messageInfo().init() - return &m.messageInfo().methods -} - -// ProtoMessageInfo is a pseudo-internal API for allowing the v1 code -// to be able to retrieve a v2 MessageInfo struct. -// -// WARNING: This method is exempt from the compatibility promise and -// may be removed in the future without warning. -func (m *messageReflectWrapper) ProtoMessageInfo() *MessageInfo { - return m.messageInfo() -} - -func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - m.messageInfo().init() - for _, ri := range m.messageInfo().rangeInfos { - switch ri := ri.(type) { - case *fieldInfo: - if ri.has(m.pointer()) { - if !f(ri.fieldDesc, ri.get(m.pointer())) { - return - } - } - case *oneofInfo: - if n := ri.which(m.pointer()); n > 0 { - fi := m.messageInfo().fields[n] - if !f(fi.fieldDesc, fi.get(m.pointer())) { - return - } - } - } - } - m.messageInfo().extensionMap(m.pointer()).Range(f) -} -func (m *messageReflectWrapper) Has(fd protoreflect.FieldDescriptor) bool { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { - return fi.has(m.pointer()) - } else { - return m.messageInfo().extensionMap(m.pointer()).Has(xt) - } -} -func (m *messageReflectWrapper) Clear(fd protoreflect.FieldDescriptor) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { - fi.clear(m.pointer()) - } else { - m.messageInfo().extensionMap(m.pointer()).Clear(xt) - } -} -func (m *messageReflectWrapper) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { - return fi.get(m.pointer()) - } else { - return m.messageInfo().extensionMap(m.pointer()).Get(xt) - } -} -func (m *messageReflectWrapper) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { - fi.set(m.pointer(), v) - } else { - m.messageInfo().extensionMap(m.pointer()).Set(xt, v) - } -} -func (m *messageReflectWrapper) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { - return fi.mutable(m.pointer()) - } else { - return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) - } -} -func (m *messageReflectWrapper) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { - return fi.newField() - } else { - return xt.New() - } -} -func (m *messageReflectWrapper) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { - m.messageInfo().init() - if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { - return od.Fields().ByNumber(oi.which(m.pointer())) - } - panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) -} -func (m *messageReflectWrapper) GetUnknown() protoreflect.RawFields { - m.messageInfo().init() - return m.messageInfo().getUnknown(m.pointer()) -} -func (m *messageReflectWrapper) SetUnknown(b protoreflect.RawFields) { - m.messageInfo().init() - m.messageInfo().setUnknown(m.pointer(), b) -} -func (m *messageReflectWrapper) IsValid() bool { - return !m.pointer().IsNil() -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go deleted file mode 100644 index 4c491bdf4..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "fmt" - "reflect" - "sync" -) - -const UnsafeEnabled = false - -// Pointer is an opaque pointer type. -type Pointer interface{} - -// offset represents the offset to a struct field, accessible from a pointer. -// The offset is the field index into a struct. -type offset struct { - index int - export exporter -} - -// offsetOf returns a field offset for the struct field. -func offsetOf(f reflect.StructField, x exporter) offset { - if len(f.Index) != 1 { - panic("embedded structs are not supported") - } - if f.PkgPath == "" { - return offset{index: f.Index[0]} // field is already exported - } - if x == nil { - panic("exporter must be provided for unexported field") - } - return offset{index: f.Index[0], export: x} -} - -// IsValid reports whether the offset is valid. -func (f offset) IsValid() bool { return f.index >= 0 } - -// invalidOffset is an invalid field offset. -var invalidOffset = offset{index: -1} - -// zeroOffset is a noop when calling pointer.Apply. -var zeroOffset = offset{index: 0} - -// pointer is an abstract representation of a pointer to a struct or field. -type pointer struct{ v reflect.Value } - -// pointerOf returns p as a pointer. -func pointerOf(p Pointer) pointer { - return pointerOfIface(p) -} - -// pointerOfValue returns v as a pointer. -func pointerOfValue(v reflect.Value) pointer { - return pointer{v: v} -} - -// pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { - return pointer{v: reflect.ValueOf(v)} -} - -// IsNil reports whether the pointer is nil. -func (p pointer) IsNil() bool { - return p.v.IsNil() -} - -// Apply adds an offset to the pointer to derive a new pointer -// to a specified field. The current pointer must be pointing at a struct. -func (p pointer) Apply(f offset) pointer { - if f.export != nil { - if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() { - return pointer{v: v} - } - } - return pointer{v: p.v.Elem().Field(f.index).Addr()} -} - -// AsValueOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) -func (p pointer) AsValueOf(t reflect.Type) reflect.Value { - if got := p.v.Type().Elem(); got != t { - panic(fmt.Sprintf("invalid type: got %v, want %v", got, t)) - } - return p.v -} - -// AsIfaceOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { - return p.AsValueOf(t).Interface() -} - -func (p pointer) Bool() *bool { return p.v.Interface().(*bool) } -func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) } -func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) } -func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) } -func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) } -func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) } -func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) } -func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) } -func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) } -func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) } -func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) } -func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) } -func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) } -func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) } -func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) } -func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) } -func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) } -func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) } -func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) } -func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) } -func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) } -func (p pointer) String() *string { return p.v.Interface().(*string) } -func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } -func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } -func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } -func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) } -func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } -func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } -func (p pointer) Extensions() *map[int32]ExtensionField { - return p.v.Interface().(*map[int32]ExtensionField) -} - -func (p pointer) Elem() pointer { - return pointer{v: p.v.Elem()} -} - -// PointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) PointerSlice() []pointer { - // TODO: reconsider this - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// AppendPointerSlice appends v to p, which must be a []*T. -func (p pointer) AppendPointerSlice(v pointer) { - sp := p.v.Elem() - sp.Set(reflect.Append(sp, v.v)) -} - -// SetPointer sets *p to v. -func (p pointer) SetPointer(v pointer) { - p.v.Elem().Set(v.v) -} - -func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } -func (ms *messageState) pointer() pointer { panic("not supported") } -func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") } - -type atomicNilMessage struct { - once sync.Once - m messageReflectWrapper -} - -func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { - m.once.Do(func() { - m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface()) - m.m.mi = mi - }) - return &m.m -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go deleted file mode 100644 index ee0e0573e..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !purego && !appengine -// +build !purego,!appengine - -package impl - -import ( - "reflect" - "sync/atomic" - "unsafe" -) - -const UnsafeEnabled = true - -// Pointer is an opaque pointer type. -type Pointer unsafe.Pointer - -// offset represents the offset to a struct field, accessible from a pointer. -// The offset is the byte offset to the field from the start of the struct. -type offset uintptr - -// offsetOf returns a field offset for the struct field. -func offsetOf(f reflect.StructField, x exporter) offset { - return offset(f.Offset) -} - -// IsValid reports whether the offset is valid. -func (f offset) IsValid() bool { return f != invalidOffset } - -// invalidOffset is an invalid field offset. -var invalidOffset = ^offset(0) - -// zeroOffset is a noop when calling pointer.Apply. -var zeroOffset = offset(0) - -// pointer is a pointer to a message struct or field. -type pointer struct{ p unsafe.Pointer } - -// pointerOf returns p as a pointer. -func pointerOf(p Pointer) pointer { - return pointer{p: unsafe.Pointer(p)} -} - -// pointerOfValue returns v as a pointer. -func pointerOfValue(v reflect.Value) pointer { - return pointer{p: unsafe.Pointer(v.Pointer())} -} - -// pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { - type ifaceHeader struct { - Type unsafe.Pointer - Data unsafe.Pointer - } - return pointer{p: (*ifaceHeader)(unsafe.Pointer(&v)).Data} -} - -// IsNil reports whether the pointer is nil. -func (p pointer) IsNil() bool { - return p.p == nil -} - -// Apply adds an offset to the pointer to derive a new pointer -// to a specified field. The pointer must be valid and pointing at a struct. -func (p pointer) Apply(f offset) pointer { - if p.IsNil() { - panic("invalid nil pointer") - } - return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} -} - -// AsValueOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) -func (p pointer) AsValueOf(t reflect.Type) reflect.Value { - return reflect.NewAt(t, p.p) -} - -// AsIfaceOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { - // TODO: Use tricky unsafe magic to directly create ifaceHeader. - return p.AsValueOf(t).Interface() -} - -func (p pointer) Bool() *bool { return (*bool)(p.p) } -func (p pointer) BoolPtr() **bool { return (**bool)(p.p) } -func (p pointer) BoolSlice() *[]bool { return (*[]bool)(p.p) } -func (p pointer) Int32() *int32 { return (*int32)(p.p) } -func (p pointer) Int32Ptr() **int32 { return (**int32)(p.p) } -func (p pointer) Int32Slice() *[]int32 { return (*[]int32)(p.p) } -func (p pointer) Int64() *int64 { return (*int64)(p.p) } -func (p pointer) Int64Ptr() **int64 { return (**int64)(p.p) } -func (p pointer) Int64Slice() *[]int64 { return (*[]int64)(p.p) } -func (p pointer) Uint32() *uint32 { return (*uint32)(p.p) } -func (p pointer) Uint32Ptr() **uint32 { return (**uint32)(p.p) } -func (p pointer) Uint32Slice() *[]uint32 { return (*[]uint32)(p.p) } -func (p pointer) Uint64() *uint64 { return (*uint64)(p.p) } -func (p pointer) Uint64Ptr() **uint64 { return (**uint64)(p.p) } -func (p pointer) Uint64Slice() *[]uint64 { return (*[]uint64)(p.p) } -func (p pointer) Float32() *float32 { return (*float32)(p.p) } -func (p pointer) Float32Ptr() **float32 { return (**float32)(p.p) } -func (p pointer) Float32Slice() *[]float32 { return (*[]float32)(p.p) } -func (p pointer) Float64() *float64 { return (*float64)(p.p) } -func (p pointer) Float64Ptr() **float64 { return (**float64)(p.p) } -func (p pointer) Float64Slice() *[]float64 { return (*[]float64)(p.p) } -func (p pointer) String() *string { return (*string)(p.p) } -func (p pointer) StringPtr() **string { return (**string)(p.p) } -func (p pointer) StringSlice() *[]string { return (*[]string)(p.p) } -func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) } -func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) } -func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) } -func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) } -func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) } - -func (p pointer) Elem() pointer { - return pointer{p: *(*unsafe.Pointer)(p.p)} -} - -// PointerSlice loads []*T from p as a []pointer. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) PointerSlice() []pointer { - // Super-tricky - p should point to a []*T where T is a - // message type. We load it as []pointer. - return *(*[]pointer)(p.p) -} - -// AppendPointerSlice appends v to p, which must be a []*T. -func (p pointer) AppendPointerSlice(v pointer) { - *(*[]pointer)(p.p) = append(*(*[]pointer)(p.p), v) -} - -// SetPointer sets *p to v. -func (p pointer) SetPointer(v pointer) { - *(*unsafe.Pointer)(p.p) = (unsafe.Pointer)(v.p) -} - -// Static check that MessageState does not exceed the size of a pointer. -const _ = uint(unsafe.Sizeof(unsafe.Pointer(nil)) - unsafe.Sizeof(MessageState{})) - -func (Export) MessageStateOf(p Pointer) *messageState { - // Super-tricky - see documentation on MessageState. - return (*messageState)(unsafe.Pointer(p)) -} -func (ms *messageState) pointer() pointer { - // Super-tricky - see documentation on MessageState. - return pointer{p: unsafe.Pointer(ms)} -} -func (ms *messageState) messageInfo() *MessageInfo { - mi := ms.LoadMessageInfo() - if mi == nil { - panic("invalid nil message info; this suggests memory corruption due to a race or shallow copy on the message struct") - } - return mi -} -func (ms *messageState) LoadMessageInfo() *MessageInfo { - return (*MessageInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&ms.atomicMessageInfo)))) -} -func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&ms.atomicMessageInfo)), unsafe.Pointer(mi)) -} - -type atomicNilMessage struct{ p unsafe.Pointer } // p is a *messageReflectWrapper - -func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { - if p := atomic.LoadPointer(&m.p); p != nil { - return (*messageReflectWrapper)(p) - } - w := &messageReflectWrapper{mi: mi} - atomic.CompareAndSwapPointer(&m.p, nil, (unsafe.Pointer)(w)) - return (*messageReflectWrapper)(atomic.LoadPointer(&m.p)) -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go deleted file mode 100644 index 08cfb6054..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/validate.go +++ /dev/null @@ -1,576 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "fmt" - "math" - "math/bits" - "reflect" - "unicode/utf8" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/encoding/messageset" - "google.golang.org/protobuf/internal/flags" - "google.golang.org/protobuf/internal/genid" - "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" - piface "google.golang.org/protobuf/runtime/protoiface" -) - -// ValidationStatus is the result of validating the wire-format encoding of a message. -type ValidationStatus int - -const ( - // ValidationUnknown indicates that unmarshaling the message might succeed or fail. - // The validator was unable to render a judgement. - // - // The only causes of this status are an aberrant message type appearing somewhere - // in the message or a failure in the extension resolver. - ValidationUnknown ValidationStatus = iota + 1 - - // ValidationInvalid indicates that unmarshaling the message will fail. - ValidationInvalid - - // ValidationValid indicates that unmarshaling the message will succeed. - ValidationValid -) - -func (v ValidationStatus) String() string { - switch v { - case ValidationUnknown: - return "ValidationUnknown" - case ValidationInvalid: - return "ValidationInvalid" - case ValidationValid: - return "ValidationValid" - default: - return fmt.Sprintf("ValidationStatus(%d)", int(v)) - } -} - -// Validate determines whether the contents of the buffer are a valid wire encoding -// of the message type. -// -// This function is exposed for testing. -func Validate(mt pref.MessageType, in piface.UnmarshalInput) (out piface.UnmarshalOutput, _ ValidationStatus) { - mi, ok := mt.(*MessageInfo) - if !ok { - return out, ValidationUnknown - } - if in.Resolver == nil { - in.Resolver = preg.GlobalTypes - } - o, st := mi.validate(in.Buf, 0, unmarshalOptions{ - flags: in.Flags, - resolver: in.Resolver, - }) - if o.initialized { - out.Flags |= piface.UnmarshalInitialized - } - return out, st -} - -type validationInfo struct { - mi *MessageInfo - typ validationType - keyType, valType validationType - - // For non-required fields, requiredBit is 0. - // - // For required fields, requiredBit's nth bit is set, where n is a - // unique index in the range [0, MessageInfo.numRequiredFields). - // - // If there are more than 64 required fields, requiredBit is 0. - requiredBit uint64 -} - -type validationType uint8 - -const ( - validationTypeOther validationType = iota - validationTypeMessage - validationTypeGroup - validationTypeMap - validationTypeRepeatedVarint - validationTypeRepeatedFixed32 - validationTypeRepeatedFixed64 - validationTypeVarint - validationTypeFixed32 - validationTypeFixed64 - validationTypeBytes - validationTypeUTF8String - validationTypeMessageSetItem -) - -func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescriptor, ft reflect.Type) validationInfo { - var vi validationInfo - switch { - case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): - switch fd.Kind() { - case pref.MessageKind: - vi.typ = validationTypeMessage - if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { - vi.mi = getMessageInfo(ot.Field(0).Type) - } - case pref.GroupKind: - vi.typ = validationTypeGroup - if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { - vi.mi = getMessageInfo(ot.Field(0).Type) - } - case pref.StringKind: - if strs.EnforceUTF8(fd) { - vi.typ = validationTypeUTF8String - } - } - default: - vi = newValidationInfo(fd, ft) - } - if fd.Cardinality() == pref.Required { - // Avoid overflow. The required field check is done with a 64-bit mask, with - // any message containing more than 64 required fields always reported as - // potentially uninitialized, so it is not important to get a precise count - // of the required fields past 64. - if mi.numRequiredFields < math.MaxUint8 { - mi.numRequiredFields++ - vi.requiredBit = 1 << (mi.numRequiredFields - 1) - } - } - return vi -} - -func newValidationInfo(fd pref.FieldDescriptor, ft reflect.Type) validationInfo { - var vi validationInfo - switch { - case fd.IsList(): - switch fd.Kind() { - case pref.MessageKind: - vi.typ = validationTypeMessage - if ft.Kind() == reflect.Slice { - vi.mi = getMessageInfo(ft.Elem()) - } - case pref.GroupKind: - vi.typ = validationTypeGroup - if ft.Kind() == reflect.Slice { - vi.mi = getMessageInfo(ft.Elem()) - } - case pref.StringKind: - vi.typ = validationTypeBytes - if strs.EnforceUTF8(fd) { - vi.typ = validationTypeUTF8String - } - default: - switch wireTypes[fd.Kind()] { - case protowire.VarintType: - vi.typ = validationTypeRepeatedVarint - case protowire.Fixed32Type: - vi.typ = validationTypeRepeatedFixed32 - case protowire.Fixed64Type: - vi.typ = validationTypeRepeatedFixed64 - } - } - case fd.IsMap(): - vi.typ = validationTypeMap - switch fd.MapKey().Kind() { - case pref.StringKind: - if strs.EnforceUTF8(fd) { - vi.keyType = validationTypeUTF8String - } - } - switch fd.MapValue().Kind() { - case pref.MessageKind: - vi.valType = validationTypeMessage - if ft.Kind() == reflect.Map { - vi.mi = getMessageInfo(ft.Elem()) - } - case pref.StringKind: - if strs.EnforceUTF8(fd) { - vi.valType = validationTypeUTF8String - } - } - default: - switch fd.Kind() { - case pref.MessageKind: - vi.typ = validationTypeMessage - if !fd.IsWeak() { - vi.mi = getMessageInfo(ft) - } - case pref.GroupKind: - vi.typ = validationTypeGroup - vi.mi = getMessageInfo(ft) - case pref.StringKind: - vi.typ = validationTypeBytes - if strs.EnforceUTF8(fd) { - vi.typ = validationTypeUTF8String - } - default: - switch wireTypes[fd.Kind()] { - case protowire.VarintType: - vi.typ = validationTypeVarint - case protowire.Fixed32Type: - vi.typ = validationTypeFixed32 - case protowire.Fixed64Type: - vi.typ = validationTypeFixed64 - case protowire.BytesType: - vi.typ = validationTypeBytes - } - } - } - return vi -} - -func (mi *MessageInfo) validate(b []byte, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, result ValidationStatus) { - mi.init() - type validationState struct { - typ validationType - keyType, valType validationType - endGroup protowire.Number - mi *MessageInfo - tail []byte - requiredMask uint64 - } - - // Pre-allocate some slots to avoid repeated slice reallocation. - states := make([]validationState, 0, 16) - states = append(states, validationState{ - typ: validationTypeMessage, - mi: mi, - }) - if groupTag > 0 { - states[0].typ = validationTypeGroup - states[0].endGroup = groupTag - } - initialized := true - start := len(b) -State: - for len(states) > 0 { - st := &states[len(states)-1] - for len(b) > 0 { - // Parse the tag (field number and wire type). - var tag uint64 - if b[0] < 0x80 { - tag = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - tag, n = protowire.ConsumeVarint(b) - if n < 0 { - return out, ValidationInvalid - } - b = b[n:] - } - var num protowire.Number - if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { - return out, ValidationInvalid - } else { - num = protowire.Number(n) - } - wtyp := protowire.Type(tag & 7) - - if wtyp == protowire.EndGroupType { - if st.endGroup == num { - goto PopState - } - return out, ValidationInvalid - } - var vi validationInfo - switch { - case st.typ == validationTypeMap: - switch num { - case genid.MapEntry_Key_field_number: - vi.typ = st.keyType - case genid.MapEntry_Value_field_number: - vi.typ = st.valType - vi.mi = st.mi - vi.requiredBit = 1 - } - case flags.ProtoLegacy && st.mi.isMessageSet: - switch num { - case messageset.FieldItem: - vi.typ = validationTypeMessageSetItem - } - default: - var f *coderFieldInfo - if int(num) < len(st.mi.denseCoderFields) { - f = st.mi.denseCoderFields[num] - } else { - f = st.mi.coderFields[num] - } - if f != nil { - vi = f.validation - if vi.typ == validationTypeMessage && vi.mi == nil { - // Probable weak field. - // - // TODO: Consider storing the results of this lookup somewhere - // rather than recomputing it on every validation. - fd := st.mi.Desc.Fields().ByNumber(num) - if fd == nil || !fd.IsWeak() { - break - } - messageName := fd.Message().FullName() - messageType, err := preg.GlobalTypes.FindMessageByName(messageName) - switch err { - case nil: - vi.mi, _ = messageType.(*MessageInfo) - case preg.NotFound: - vi.typ = validationTypeBytes - default: - return out, ValidationUnknown - } - } - break - } - // Possible extension field. - // - // TODO: We should return ValidationUnknown when: - // 1. The resolver is not frozen. (More extensions may be added to it.) - // 2. The resolver returns preg.NotFound. - // In this case, a type added to the resolver in the future could cause - // unmarshaling to begin failing. Supporting this requires some way to - // determine if the resolver is frozen. - xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), num) - if err != nil && err != preg.NotFound { - return out, ValidationUnknown - } - if err == nil { - vi = getExtensionFieldInfo(xt).validation - } - } - if vi.requiredBit != 0 { - // Check that the field has a compatible wire type. - // We only need to consider non-repeated field types, - // since repeated fields (and maps) can never be required. - ok := false - switch vi.typ { - case validationTypeVarint: - ok = wtyp == protowire.VarintType - case validationTypeFixed32: - ok = wtyp == protowire.Fixed32Type - case validationTypeFixed64: - ok = wtyp == protowire.Fixed64Type - case validationTypeBytes, validationTypeUTF8String, validationTypeMessage: - ok = wtyp == protowire.BytesType - case validationTypeGroup: - ok = wtyp == protowire.StartGroupType - } - if ok { - st.requiredMask |= vi.requiredBit - } - } - - switch wtyp { - case protowire.VarintType: - if len(b) >= 10 { - switch { - case b[0] < 0x80: - b = b[1:] - case b[1] < 0x80: - b = b[2:] - case b[2] < 0x80: - b = b[3:] - case b[3] < 0x80: - b = b[4:] - case b[4] < 0x80: - b = b[5:] - case b[5] < 0x80: - b = b[6:] - case b[6] < 0x80: - b = b[7:] - case b[7] < 0x80: - b = b[8:] - case b[8] < 0x80: - b = b[9:] - case b[9] < 0x80 && b[9] < 2: - b = b[10:] - default: - return out, ValidationInvalid - } - } else { - switch { - case len(b) > 0 && b[0] < 0x80: - b = b[1:] - case len(b) > 1 && b[1] < 0x80: - b = b[2:] - case len(b) > 2 && b[2] < 0x80: - b = b[3:] - case len(b) > 3 && b[3] < 0x80: - b = b[4:] - case len(b) > 4 && b[4] < 0x80: - b = b[5:] - case len(b) > 5 && b[5] < 0x80: - b = b[6:] - case len(b) > 6 && b[6] < 0x80: - b = b[7:] - case len(b) > 7 && b[7] < 0x80: - b = b[8:] - case len(b) > 8 && b[8] < 0x80: - b = b[9:] - case len(b) > 9 && b[9] < 2: - b = b[10:] - default: - return out, ValidationInvalid - } - } - continue State - case protowire.BytesType: - var size uint64 - if len(b) >= 1 && b[0] < 0x80 { - size = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - size = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - size, n = protowire.ConsumeVarint(b) - if n < 0 { - return out, ValidationInvalid - } - b = b[n:] - } - if size > uint64(len(b)) { - return out, ValidationInvalid - } - v := b[:size] - b = b[size:] - switch vi.typ { - case validationTypeMessage: - if vi.mi == nil { - return out, ValidationUnknown - } - vi.mi.init() - fallthrough - case validationTypeMap: - if vi.mi != nil { - vi.mi.init() - } - states = append(states, validationState{ - typ: vi.typ, - keyType: vi.keyType, - valType: vi.valType, - mi: vi.mi, - tail: b, - }) - b = v - continue State - case validationTypeRepeatedVarint: - // Packed field. - for len(v) > 0 { - _, n := protowire.ConsumeVarint(v) - if n < 0 { - return out, ValidationInvalid - } - v = v[n:] - } - case validationTypeRepeatedFixed32: - // Packed field. - if len(v)%4 != 0 { - return out, ValidationInvalid - } - case validationTypeRepeatedFixed64: - // Packed field. - if len(v)%8 != 0 { - return out, ValidationInvalid - } - case validationTypeUTF8String: - if !utf8.Valid(v) { - return out, ValidationInvalid - } - } - case protowire.Fixed32Type: - if len(b) < 4 { - return out, ValidationInvalid - } - b = b[4:] - case protowire.Fixed64Type: - if len(b) < 8 { - return out, ValidationInvalid - } - b = b[8:] - case protowire.StartGroupType: - switch { - case vi.typ == validationTypeGroup: - if vi.mi == nil { - return out, ValidationUnknown - } - vi.mi.init() - states = append(states, validationState{ - typ: validationTypeGroup, - mi: vi.mi, - endGroup: num, - }) - continue State - case flags.ProtoLegacy && vi.typ == validationTypeMessageSetItem: - typeid, v, n, err := messageset.ConsumeFieldValue(b, false) - if err != nil { - return out, ValidationInvalid - } - xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), typeid) - switch { - case err == preg.NotFound: - b = b[n:] - case err != nil: - return out, ValidationUnknown - default: - xvi := getExtensionFieldInfo(xt).validation - if xvi.mi != nil { - xvi.mi.init() - } - states = append(states, validationState{ - typ: xvi.typ, - mi: xvi.mi, - tail: b[n:], - }) - b = v - continue State - } - default: - n := protowire.ConsumeFieldValue(num, wtyp, b) - if n < 0 { - return out, ValidationInvalid - } - b = b[n:] - } - default: - return out, ValidationInvalid - } - } - if st.endGroup != 0 { - return out, ValidationInvalid - } - if len(b) != 0 { - return out, ValidationInvalid - } - b = st.tail - PopState: - numRequiredFields := 0 - switch st.typ { - case validationTypeMessage, validationTypeGroup: - numRequiredFields = int(st.mi.numRequiredFields) - case validationTypeMap: - // If this is a map field with a message value that contains - // required fields, require that the value be present. - if st.mi != nil && st.mi.numRequiredFields > 0 { - numRequiredFields = 1 - } - } - // If there are more than 64 required fields, this check will - // always fail and we will report that the message is potentially - // uninitialized. - if numRequiredFields > 0 && bits.OnesCount64(st.requiredMask) != numRequiredFields { - initialized = false - } - states = states[:len(states)-1] - } - out.n = start - len(b) - if initialized { - out.initialized = true - } - return out, ValidationValid -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go deleted file mode 100644 index 009cbefd1..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/weak.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "fmt" - - pref "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -// weakFields adds methods to the exported WeakFields type for internal use. -// -// The exported type is an alias to an unnamed type, so methods can't be -// defined directly on it. -type weakFields WeakFields - -func (w weakFields) get(num pref.FieldNumber) (pref.ProtoMessage, bool) { - m, ok := w[int32(num)] - return m, ok -} - -func (w *weakFields) set(num pref.FieldNumber, m pref.ProtoMessage) { - if *w == nil { - *w = make(weakFields) - } - (*w)[int32(num)] = m -} - -func (w *weakFields) clear(num pref.FieldNumber) { - delete(*w, int32(num)) -} - -func (Export) HasWeak(w WeakFields, num pref.FieldNumber) bool { - _, ok := w[int32(num)] - return ok -} - -func (Export) ClearWeak(w *WeakFields, num pref.FieldNumber) { - delete(*w, int32(num)) -} - -func (Export) GetWeak(w WeakFields, num pref.FieldNumber, name pref.FullName) pref.ProtoMessage { - if m, ok := w[int32(num)]; ok { - return m - } - mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) - if mt == nil { - panic(fmt.Sprintf("message %v for weak field is not linked in", name)) - } - return mt.Zero().Interface() -} - -func (Export) SetWeak(w *WeakFields, num pref.FieldNumber, name pref.FullName, m pref.ProtoMessage) { - if m != nil { - mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) - if mt == nil { - panic(fmt.Sprintf("message %v for weak field is not linked in", name)) - } - if mt != m.ProtoReflect().Type() { - panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface())) - } - } - if m == nil || !m.ProtoReflect().IsValid() { - delete(*w, int32(num)) - return - } - if *w == nil { - *w = make(weakFields) - } - (*w)[int32(num)] = m -} diff --git a/vendor/google.golang.org/protobuf/internal/order/order.go b/vendor/google.golang.org/protobuf/internal/order/order.go deleted file mode 100644 index 2a24953f6..000000000 --- a/vendor/google.golang.org/protobuf/internal/order/order.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package order - -import ( - pref "google.golang.org/protobuf/reflect/protoreflect" -) - -// FieldOrder specifies the ordering to visit message fields. -// It is a function that reports whether x is ordered before y. -type FieldOrder func(x, y pref.FieldDescriptor) bool - -var ( - // AnyFieldOrder specifies no specific field ordering. - AnyFieldOrder FieldOrder = nil - - // LegacyFieldOrder sorts fields in the same ordering as emitted by - // wire serialization in the github.com/golang/protobuf implementation. - LegacyFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { - ox, oy := x.ContainingOneof(), y.ContainingOneof() - inOneof := func(od pref.OneofDescriptor) bool { - return od != nil && !od.IsSynthetic() - } - - // Extension fields sort before non-extension fields. - if x.IsExtension() != y.IsExtension() { - return x.IsExtension() && !y.IsExtension() - } - // Fields not within a oneof sort before those within a oneof. - if inOneof(ox) != inOneof(oy) { - return !inOneof(ox) && inOneof(oy) - } - // Fields in disjoint oneof sets are sorted by declaration index. - if ox != nil && oy != nil && ox != oy { - return ox.Index() < oy.Index() - } - // Fields sorted by field number. - return x.Number() < y.Number() - } - - // NumberFieldOrder sorts fields by their field number. - NumberFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { - return x.Number() < y.Number() - } - - // IndexNameFieldOrder sorts non-extension fields before extension fields. - // Non-extensions are sorted according to their declaration index. - // Extensions are sorted according to their full name. - IndexNameFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { - // Non-extension fields sort before extension fields. - if x.IsExtension() != y.IsExtension() { - return !x.IsExtension() && y.IsExtension() - } - // Extensions sorted by fullname. - if x.IsExtension() && y.IsExtension() { - return x.FullName() < y.FullName() - } - // Non-extensions sorted by declaration index. - return x.Index() < y.Index() - } -) - -// KeyOrder specifies the ordering to visit map entries. -// It is a function that reports whether x is ordered before y. -type KeyOrder func(x, y pref.MapKey) bool - -var ( - // AnyKeyOrder specifies no specific key ordering. - AnyKeyOrder KeyOrder = nil - - // GenericKeyOrder sorts false before true, numeric keys in ascending order, - // and strings in lexicographical ordering according to UTF-8 codepoints. - GenericKeyOrder KeyOrder = func(x, y pref.MapKey) bool { - switch x.Interface().(type) { - case bool: - return !x.Bool() && y.Bool() - case int32, int64: - return x.Int() < y.Int() - case uint32, uint64: - return x.Uint() < y.Uint() - case string: - return x.String() < y.String() - default: - panic("invalid map key type") - } - } -) diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go deleted file mode 100644 index c8090e0c5..000000000 --- a/vendor/google.golang.org/protobuf/internal/order/range.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package order provides ordered access to messages and maps. -package order - -import ( - "sort" - "sync" - - pref "google.golang.org/protobuf/reflect/protoreflect" -) - -type messageField struct { - fd pref.FieldDescriptor - v pref.Value -} - -var messageFieldPool = sync.Pool{ - New: func() interface{} { return new([]messageField) }, -} - -type ( - // FieldRnger is an interface for visiting all fields in a message. - // The protoreflect.Message type implements this interface. - FieldRanger interface{ Range(VisitField) } - // VisitField is called everytime a message field is visited. - VisitField = func(pref.FieldDescriptor, pref.Value) bool -) - -// RangeFields iterates over the fields of fs according to the specified order. -func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) { - if less == nil { - fs.Range(fn) - return - } - - // Obtain a pre-allocated scratch buffer. - p := messageFieldPool.Get().(*[]messageField) - fields := (*p)[:0] - defer func() { - if cap(fields) < 1024 { - *p = fields - messageFieldPool.Put(p) - } - }() - - // Collect all fields in the message and sort them. - fs.Range(func(fd pref.FieldDescriptor, v pref.Value) bool { - fields = append(fields, messageField{fd, v}) - return true - }) - sort.Slice(fields, func(i, j int) bool { - return less(fields[i].fd, fields[j].fd) - }) - - // Visit the fields in the specified ordering. - for _, f := range fields { - if !fn(f.fd, f.v) { - return - } - } -} - -type mapEntry struct { - k pref.MapKey - v pref.Value -} - -var mapEntryPool = sync.Pool{ - New: func() interface{} { return new([]mapEntry) }, -} - -type ( - // EntryRanger is an interface for visiting all fields in a message. - // The protoreflect.Map type implements this interface. - EntryRanger interface{ Range(VisitEntry) } - // VisitEntry is called everytime a map entry is visited. - VisitEntry = func(pref.MapKey, pref.Value) bool -) - -// RangeEntries iterates over the entries of es according to the specified order. -func RangeEntries(es EntryRanger, less KeyOrder, fn VisitEntry) { - if less == nil { - es.Range(fn) - return - } - - // Obtain a pre-allocated scratch buffer. - p := mapEntryPool.Get().(*[]mapEntry) - entries := (*p)[:0] - defer func() { - if cap(entries) < 1024 { - *p = entries - mapEntryPool.Put(p) - } - }() - - // Collect all entries in the map and sort them. - es.Range(func(k pref.MapKey, v pref.Value) bool { - entries = append(entries, mapEntry{k, v}) - return true - }) - sort.Slice(entries, func(i, j int) bool { - return less(entries[i].k, entries[j].k) - }) - - // Visit the entries in the specified ordering. - for _, e := range entries { - if !fn(e.k, e.v) { - return - } - } -} diff --git a/vendor/google.golang.org/protobuf/internal/pragma/pragma.go b/vendor/google.golang.org/protobuf/internal/pragma/pragma.go deleted file mode 100644 index 49dc4fcd9..000000000 --- a/vendor/google.golang.org/protobuf/internal/pragma/pragma.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package pragma provides types that can be embedded into a struct to -// statically enforce or prevent certain language properties. -package pragma - -import "sync" - -// NoUnkeyedLiterals can be embedded in a struct to prevent unkeyed literals. -type NoUnkeyedLiterals struct{} - -// DoNotImplement can be embedded in an interface to prevent trivial -// implementations of the interface. -// -// This is useful to prevent unauthorized implementations of an interface -// so that it can be extended in the future for any protobuf language changes. -type DoNotImplement interface{ ProtoInternal(DoNotImplement) } - -// DoNotCompare can be embedded in a struct to prevent comparability. -type DoNotCompare [0]func() - -// DoNotCopy can be embedded in a struct to help prevent shallow copies. -// This does not rely on a Go language feature, but rather a special case -// within the vet checker. -// -// See https://golang.org/issues/8005. -type DoNotCopy [0]sync.Mutex diff --git a/vendor/google.golang.org/protobuf/internal/set/ints.go b/vendor/google.golang.org/protobuf/internal/set/ints.go deleted file mode 100644 index d3d7f89ab..000000000 --- a/vendor/google.golang.org/protobuf/internal/set/ints.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package set provides simple set data structures for uint64s. -package set - -import "math/bits" - -// int64s represents a set of integers within the range of 0..63. -type int64s uint64 - -func (bs *int64s) Len() int { - return bits.OnesCount64(uint64(*bs)) -} -func (bs *int64s) Has(n uint64) bool { - return uint64(*bs)&(uint64(1)< 0 -} -func (bs *int64s) Set(n uint64) { - *(*uint64)(bs) |= uint64(1) << n -} -func (bs *int64s) Clear(n uint64) { - *(*uint64)(bs) &^= uint64(1) << n -} - -// Ints represents a set of integers within the range of 0..math.MaxUint64. -type Ints struct { - lo int64s - hi map[uint64]struct{} -} - -func (bs *Ints) Len() int { - return bs.lo.Len() + len(bs.hi) -} -func (bs *Ints) Has(n uint64) bool { - if n < 64 { - return bs.lo.Has(n) - } - _, ok := bs.hi[n] - return ok -} -func (bs *Ints) Set(n uint64) { - if n < 64 { - bs.lo.Set(n) - return - } - if bs.hi == nil { - bs.hi = make(map[uint64]struct{}) - } - bs.hi[n] = struct{}{} -} -func (bs *Ints) Clear(n uint64) { - if n < 64 { - bs.lo.Clear(n) - return - } - delete(bs.hi, n) -} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings.go b/vendor/google.golang.org/protobuf/internal/strs/strings.go deleted file mode 100644 index 0b74e7658..000000000 --- a/vendor/google.golang.org/protobuf/internal/strs/strings.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package strs provides string manipulation functionality specific to protobuf. -package strs - -import ( - "go/token" - "strings" - "unicode" - "unicode/utf8" - - "google.golang.org/protobuf/internal/flags" - "google.golang.org/protobuf/reflect/protoreflect" -) - -// EnforceUTF8 reports whether to enforce strict UTF-8 validation. -func EnforceUTF8(fd protoreflect.FieldDescriptor) bool { - if flags.ProtoLegacy { - if fd, ok := fd.(interface{ EnforceUTF8() bool }); ok { - return fd.EnforceUTF8() - } - } - return fd.Syntax() == protoreflect.Proto3 -} - -// GoCamelCase camel-cases a protobuf name for use as a Go identifier. -// -// If there is an interior underscore followed by a lower case letter, -// drop the underscore and convert the letter to upper case. -func GoCamelCase(s string) string { - // Invariant: if the next letter is lower case, it must be converted - // to upper case. - // That is, we process a word at a time, where words are marked by _ or - // upper case letter. Digits are treated as words. - var b []byte - for i := 0; i < len(s); i++ { - c := s[i] - switch { - case c == '.' && i+1 < len(s) && isASCIILower(s[i+1]): - // Skip over '.' in ".{{lowercase}}". - case c == '.': - b = append(b, '_') // convert '.' to '_' - case c == '_' && (i == 0 || s[i-1] == '.'): - // Convert initial '_' to ensure we start with a capital letter. - // Do the same for '_' after '.' to match historic behavior. - b = append(b, 'X') // convert '_' to 'X' - case c == '_' && i+1 < len(s) && isASCIILower(s[i+1]): - // Skip over '_' in "_{{lowercase}}". - case isASCIIDigit(c): - b = append(b, c) - default: - // Assume we have a letter now - if not, it's a bogus identifier. - // The next word is a sequence of characters that must start upper case. - if isASCIILower(c) { - c -= 'a' - 'A' // convert lowercase to uppercase - } - b = append(b, c) - - // Accept lower case sequence that follows. - for ; i+1 < len(s) && isASCIILower(s[i+1]); i++ { - b = append(b, s[i+1]) - } - } - } - return string(b) -} - -// GoSanitized converts a string to a valid Go identifier. -func GoSanitized(s string) string { - // Sanitize the input to the set of valid characters, - // which must be '_' or be in the Unicode L or N categories. - s = strings.Map(func(r rune) rune { - if unicode.IsLetter(r) || unicode.IsDigit(r) { - return r - } - return '_' - }, s) - - // Prepend '_' in the event of a Go keyword conflict or if - // the identifier is invalid (does not start in the Unicode L category). - r, _ := utf8.DecodeRuneInString(s) - if token.Lookup(s).IsKeyword() || !unicode.IsLetter(r) { - return "_" + s - } - return s -} - -// JSONCamelCase converts a snake_case identifier to a camelCase identifier, -// according to the protobuf JSON specification. -func JSONCamelCase(s string) string { - var b []byte - var wasUnderscore bool - for i := 0; i < len(s); i++ { // proto identifiers are always ASCII - c := s[i] - if c != '_' { - if wasUnderscore && isASCIILower(c) { - c -= 'a' - 'A' // convert to uppercase - } - b = append(b, c) - } - wasUnderscore = c == '_' - } - return string(b) -} - -// JSONSnakeCase converts a camelCase identifier to a snake_case identifier, -// according to the protobuf JSON specification. -func JSONSnakeCase(s string) string { - var b []byte - for i := 0; i < len(s); i++ { // proto identifiers are always ASCII - c := s[i] - if isASCIIUpper(c) { - b = append(b, '_') - c += 'a' - 'A' // convert to lowercase - } - b = append(b, c) - } - return string(b) -} - -// MapEntryName derives the name of the map entry message given the field name. -// See protoc v3.8.0: src/google/protobuf/descriptor.cc:254-276,6057 -func MapEntryName(s string) string { - var b []byte - upperNext := true - for _, c := range s { - switch { - case c == '_': - upperNext = true - case upperNext: - b = append(b, byte(unicode.ToUpper(c))) - upperNext = false - default: - b = append(b, byte(c)) - } - } - b = append(b, "Entry"...) - return string(b) -} - -// EnumValueName derives the camel-cased enum value name. -// See protoc v3.8.0: src/google/protobuf/descriptor.cc:297-313 -func EnumValueName(s string) string { - var b []byte - upperNext := true - for _, c := range s { - switch { - case c == '_': - upperNext = true - case upperNext: - b = append(b, byte(unicode.ToUpper(c))) - upperNext = false - default: - b = append(b, byte(unicode.ToLower(c))) - upperNext = false - } - } - return string(b) -} - -// TrimEnumPrefix trims the enum name prefix from an enum value name, -// where the prefix is all lowercase without underscores. -// See protoc v3.8.0: src/google/protobuf/descriptor.cc:330-375 -func TrimEnumPrefix(s, prefix string) string { - s0 := s // original input - for len(s) > 0 && len(prefix) > 0 { - if s[0] == '_' { - s = s[1:] - continue - } - if unicode.ToLower(rune(s[0])) != rune(prefix[0]) { - return s0 // no prefix match - } - s, prefix = s[1:], prefix[1:] - } - if len(prefix) > 0 { - return s0 // no prefix match - } - s = strings.TrimLeft(s, "_") - if len(s) == 0 { - return s0 // avoid returning empty string - } - return s -} - -func isASCIILower(c byte) bool { - return 'a' <= c && c <= 'z' -} -func isASCIIUpper(c byte) bool { - return 'A' <= c && c <= 'Z' -} -func isASCIIDigit(c byte) bool { - return '0' <= c && c <= '9' -} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go deleted file mode 100644 index a1f6f3338..000000000 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package strs - -import pref "google.golang.org/protobuf/reflect/protoreflect" - -func UnsafeString(b []byte) string { - return string(b) -} - -func UnsafeBytes(s string) []byte { - return []byte(s) -} - -type Builder struct{} - -func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { - return prefix.Append(name) -} - -func (*Builder) MakeString(b []byte) string { - return string(b) -} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go deleted file mode 100644 index 56a8a4ed3..000000000 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !purego && !appengine -// +build !purego,!appengine - -package strs - -import ( - "unsafe" - - pref "google.golang.org/protobuf/reflect/protoreflect" -) - -type ( - stringHeader struct { - Data unsafe.Pointer - Len int - } - sliceHeader struct { - Data unsafe.Pointer - Len int - Cap int - } -) - -// UnsafeString returns an unsafe string reference of b. -// The caller must treat the input slice as immutable. -// -// WARNING: Use carefully. The returned result must not leak to the end user -// unless the input slice is provably immutable. -func UnsafeString(b []byte) (s string) { - src := (*sliceHeader)(unsafe.Pointer(&b)) - dst := (*stringHeader)(unsafe.Pointer(&s)) - dst.Data = src.Data - dst.Len = src.Len - return s -} - -// UnsafeBytes returns an unsafe bytes slice reference of s. -// The caller must treat returned slice as immutable. -// -// WARNING: Use carefully. The returned result must not leak to the end user. -func UnsafeBytes(s string) (b []byte) { - src := (*stringHeader)(unsafe.Pointer(&s)) - dst := (*sliceHeader)(unsafe.Pointer(&b)) - dst.Data = src.Data - dst.Len = src.Len - dst.Cap = src.Len - return b -} - -// Builder builds a set of strings with shared lifetime. -// This differs from strings.Builder, which is for building a single string. -type Builder struct { - buf []byte -} - -// AppendFullName is equivalent to protoreflect.FullName.Append, -// but optimized for large batches where each name has a shared lifetime. -func (sb *Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { - n := len(prefix) + len(".") + len(name) - if len(prefix) == 0 { - n -= len(".") - } - sb.grow(n) - sb.buf = append(sb.buf, prefix...) - sb.buf = append(sb.buf, '.') - sb.buf = append(sb.buf, name...) - return pref.FullName(sb.last(n)) -} - -// MakeString is equivalent to string(b), but optimized for large batches -// with a shared lifetime. -func (sb *Builder) MakeString(b []byte) string { - sb.grow(len(b)) - sb.buf = append(sb.buf, b...) - return sb.last(len(b)) -} - -func (sb *Builder) grow(n int) { - if cap(sb.buf)-len(sb.buf) >= n { - return - } - - // Unlike strings.Builder, we do not need to copy over the contents - // of the old buffer since our builder provides no API for - // retrieving previously created strings. - sb.buf = make([]byte, 2*(cap(sb.buf)+n)) -} - -func (sb *Builder) last(n int) string { - return UnsafeString(sb.buf[len(sb.buf)-n:]) -} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go deleted file mode 100644 index 3d40d5249..000000000 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package version records versioning information about this module. -package version - -import ( - "fmt" - "strings" -) - -// These constants determine the current version of this module. -// -// -// For our release process, we enforce the following rules: -// * Tagged releases use a tag that is identical to String. -// * Tagged releases never reference a commit where the String -// contains "devel". -// * The set of all commits in this repository where String -// does not contain "devel" must have a unique String. -// -// -// Steps for tagging a new release: -// 1. Create a new CL. -// -// 2. Update Minor, Patch, and/or PreRelease as necessary. -// PreRelease must not contain the string "devel". -// -// 3. Since the last released minor version, have there been any changes to -// generator that relies on new functionality in the runtime? -// If yes, then increment RequiredGenerated. -// -// 4. Since the last released minor version, have there been any changes to -// the runtime that removes support for old .pb.go source code? -// If yes, then increment SupportMinimum. -// -// 5. Send out the CL for review and submit it. -// Note that the next CL in step 8 must be submitted after this CL -// without any other CLs in-between. -// -// 6. Tag a new version, where the tag is is the current String. -// -// 7. Write release notes for all notable changes -// between this release and the last release. -// -// 8. Create a new CL. -// -// 9. Update PreRelease to include the string "devel". -// For example: "" -> "devel" or "rc.1" -> "rc.1.devel" -// -// 10. Send out the CL for review and submit it. -const ( - Major = 1 - Minor = 28 - Patch = 0 - PreRelease = "" -) - -// String formats the version string for this module in semver format. -// -// Examples: -// v1.20.1 -// v1.21.0-rc.1 -func String() string { - v := fmt.Sprintf("v%d.%d.%d", Major, Minor, Patch) - if PreRelease != "" { - v += "-" + PreRelease - - // TODO: Add metadata about the commit or build hash. - // See https://golang.org/issue/29814 - // See https://golang.org/issue/33533 - var metadata string - if strings.Contains(PreRelease, "devel") && metadata != "" { - v += "+" + metadata - } - } - return v -} diff --git a/vendor/google.golang.org/protobuf/proto/checkinit.go b/vendor/google.golang.org/protobuf/proto/checkinit.go deleted file mode 100644 index 3e9a6a2f6..000000000 --- a/vendor/google.golang.org/protobuf/proto/checkinit.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/runtime/protoiface" -) - -// CheckInitialized returns an error if any required fields in m are not set. -func CheckInitialized(m Message) error { - // Treat a nil message interface as an "untyped" empty message, - // which we assume to have no required fields. - if m == nil { - return nil - } - - return checkInitialized(m.ProtoReflect()) -} - -// CheckInitialized returns an error if any required fields in m are not set. -func checkInitialized(m protoreflect.Message) error { - if methods := protoMethods(m); methods != nil && methods.CheckInitialized != nil { - _, err := methods.CheckInitialized(protoiface.CheckInitializedInput{ - Message: m, - }) - return err - } - return checkInitializedSlow(m) -} - -func checkInitializedSlow(m protoreflect.Message) error { - md := m.Descriptor() - fds := md.Fields() - for i, nums := 0, md.RequiredNumbers(); i < nums.Len(); i++ { - fd := fds.ByNumber(nums.Get(i)) - if !m.Has(fd) { - return errors.RequiredNotSet(string(fd.FullName())) - } - } - var err error - m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - switch { - case fd.IsList(): - if fd.Message() == nil { - return true - } - for i, list := 0, v.List(); i < list.Len() && err == nil; i++ { - err = checkInitialized(list.Get(i).Message()) - } - case fd.IsMap(): - if fd.MapValue().Message() == nil { - return true - } - v.Map().Range(func(key protoreflect.MapKey, v protoreflect.Value) bool { - err = checkInitialized(v.Message()) - return err == nil - }) - default: - if fd.Message() == nil { - return true - } - err = checkInitialized(v.Message()) - } - return err == nil - }) - return err -} diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go deleted file mode 100644 index 11bf7173b..000000000 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/encoding/messageset" - "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/flags" - "google.golang.org/protobuf/internal/genid" - "google.golang.org/protobuf/internal/pragma" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - "google.golang.org/protobuf/runtime/protoiface" -) - -// UnmarshalOptions configures the unmarshaler. -// -// Example usage: -// err := UnmarshalOptions{DiscardUnknown: true}.Unmarshal(b, m) -type UnmarshalOptions struct { - pragma.NoUnkeyedLiterals - - // Merge merges the input into the destination message. - // The default behavior is to always reset the message before unmarshaling, - // unless Merge is specified. - Merge bool - - // AllowPartial accepts input for messages that will result in missing - // required fields. If AllowPartial is false (the default), Unmarshal will - // return an error if there are any missing required fields. - AllowPartial bool - - // If DiscardUnknown is set, unknown fields are ignored. - DiscardUnknown bool - - // Resolver is used for looking up types when unmarshaling extension fields. - // If nil, this defaults to using protoregistry.GlobalTypes. - Resolver interface { - FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) - FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) - } - - // RecursionLimit limits how deeply messages may be nested. - // If zero, a default limit is applied. - RecursionLimit int -} - -// Unmarshal parses the wire-format message in b and places the result in m. -// The provided message must be mutable (e.g., a non-nil pointer to a message). -func Unmarshal(b []byte, m Message) error { - _, err := UnmarshalOptions{RecursionLimit: protowire.DefaultRecursionLimit}.unmarshal(b, m.ProtoReflect()) - return err -} - -// Unmarshal parses the wire-format message in b and places the result in m. -// The provided message must be mutable (e.g., a non-nil pointer to a message). -func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { - if o.RecursionLimit == 0 { - o.RecursionLimit = protowire.DefaultRecursionLimit - } - _, err := o.unmarshal(b, m.ProtoReflect()) - return err -} - -// UnmarshalState parses a wire-format message and places the result in m. -// -// This method permits fine-grained control over the unmarshaler. -// Most users should use Unmarshal instead. -func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { - if o.RecursionLimit == 0 { - o.RecursionLimit = protowire.DefaultRecursionLimit - } - return o.unmarshal(in.Buf, in.Message) -} - -// unmarshal is a centralized function that all unmarshal operations go through. -// For profiling purposes, avoid changing the name of this function or -// introducing other code paths for unmarshal that do not go through this. -func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out protoiface.UnmarshalOutput, err error) { - if o.Resolver == nil { - o.Resolver = protoregistry.GlobalTypes - } - if !o.Merge { - Reset(m.Interface()) - } - allowPartial := o.AllowPartial - o.Merge = true - o.AllowPartial = true - methods := protoMethods(m) - if methods != nil && methods.Unmarshal != nil && - !(o.DiscardUnknown && methods.Flags&protoiface.SupportUnmarshalDiscardUnknown == 0) { - in := protoiface.UnmarshalInput{ - Message: m, - Buf: b, - Resolver: o.Resolver, - Depth: o.RecursionLimit, - } - if o.DiscardUnknown { - in.Flags |= protoiface.UnmarshalDiscardUnknown - } - out, err = methods.Unmarshal(in) - } else { - o.RecursionLimit-- - if o.RecursionLimit < 0 { - return out, errors.New("exceeded max recursion depth") - } - err = o.unmarshalMessageSlow(b, m) - } - if err != nil { - return out, err - } - if allowPartial || (out.Flags&protoiface.UnmarshalInitialized != 0) { - return out, nil - } - return out, checkInitialized(m) -} - -func (o UnmarshalOptions) unmarshalMessage(b []byte, m protoreflect.Message) error { - _, err := o.unmarshal(b, m) - return err -} - -func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) error { - md := m.Descriptor() - if messageset.IsMessageSet(md) { - return o.unmarshalMessageSet(b, m) - } - fields := md.Fields() - for len(b) > 0 { - // Parse the tag (field number and wire type). - num, wtyp, tagLen := protowire.ConsumeTag(b) - if tagLen < 0 { - return errDecode - } - if num > protowire.MaxValidNumber { - return errDecode - } - - // Find the field descriptor for this field number. - fd := fields.ByNumber(num) - if fd == nil && md.ExtensionRanges().Has(num) { - extType, err := o.Resolver.FindExtensionByNumber(md.FullName(), num) - if err != nil && err != protoregistry.NotFound { - return errors.New("%v: unable to resolve extension %v: %v", md.FullName(), num, err) - } - if extType != nil { - fd = extType.TypeDescriptor() - } - } - var err error - if fd == nil { - err = errUnknown - } else if flags.ProtoLegacy { - if fd.IsWeak() && fd.Message().IsPlaceholder() { - err = errUnknown // weak referent is not linked in - } - } - - // Parse the field value. - var valLen int - switch { - case err != nil: - case fd.IsList(): - valLen, err = o.unmarshalList(b[tagLen:], wtyp, m.Mutable(fd).List(), fd) - case fd.IsMap(): - valLen, err = o.unmarshalMap(b[tagLen:], wtyp, m.Mutable(fd).Map(), fd) - default: - valLen, err = o.unmarshalSingular(b[tagLen:], wtyp, m, fd) - } - if err != nil { - if err != errUnknown { - return err - } - valLen = protowire.ConsumeFieldValue(num, wtyp, b[tagLen:]) - if valLen < 0 { - return errDecode - } - if !o.DiscardUnknown { - m.SetUnknown(append(m.GetUnknown(), b[:tagLen+valLen]...)) - } - } - b = b[tagLen+valLen:] - } - return nil -} - -func (o UnmarshalOptions) unmarshalSingular(b []byte, wtyp protowire.Type, m protoreflect.Message, fd protoreflect.FieldDescriptor) (n int, err error) { - v, n, err := o.unmarshalScalar(b, wtyp, fd) - if err != nil { - return 0, err - } - switch fd.Kind() { - case protoreflect.GroupKind, protoreflect.MessageKind: - m2 := m.Mutable(fd).Message() - if err := o.unmarshalMessage(v.Bytes(), m2); err != nil { - return n, err - } - default: - // Non-message scalars replace the previous value. - m.Set(fd, v) - } - return n, nil -} - -func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv protoreflect.Map, fd protoreflect.FieldDescriptor) (n int, err error) { - if wtyp != protowire.BytesType { - return 0, errUnknown - } - b, n = protowire.ConsumeBytes(b) - if n < 0 { - return 0, errDecode - } - var ( - keyField = fd.MapKey() - valField = fd.MapValue() - key protoreflect.Value - val protoreflect.Value - haveKey bool - haveVal bool - ) - switch valField.Kind() { - case protoreflect.GroupKind, protoreflect.MessageKind: - val = mapv.NewValue() - } - // Map entries are represented as a two-element message with fields - // containing the key and value. - for len(b) > 0 { - num, wtyp, n := protowire.ConsumeTag(b) - if n < 0 { - return 0, errDecode - } - if num > protowire.MaxValidNumber { - return 0, errDecode - } - b = b[n:] - err = errUnknown - switch num { - case genid.MapEntry_Key_field_number: - key, n, err = o.unmarshalScalar(b, wtyp, keyField) - if err != nil { - break - } - haveKey = true - case genid.MapEntry_Value_field_number: - var v protoreflect.Value - v, n, err = o.unmarshalScalar(b, wtyp, valField) - if err != nil { - break - } - switch valField.Kind() { - case protoreflect.GroupKind, protoreflect.MessageKind: - if err := o.unmarshalMessage(v.Bytes(), val.Message()); err != nil { - return 0, err - } - default: - val = v - } - haveVal = true - } - if err == errUnknown { - n = protowire.ConsumeFieldValue(num, wtyp, b) - if n < 0 { - return 0, errDecode - } - } else if err != nil { - return 0, err - } - b = b[n:] - } - // Every map entry should have entries for key and value, but this is not strictly required. - if !haveKey { - key = keyField.Default() - } - if !haveVal { - switch valField.Kind() { - case protoreflect.GroupKind, protoreflect.MessageKind: - default: - val = valField.Default() - } - } - mapv.Set(key.MapKey(), val) - return n, nil -} - -// errUnknown is used internally to indicate fields which should be added -// to the unknown field set of a message. It is never returned from an exported -// function. -var errUnknown = errors.New("BUG: internal error (unknown)") - -var errDecode = errors.New("cannot parse invalid wire-format data") diff --git a/vendor/google.golang.org/protobuf/proto/decode_gen.go b/vendor/google.golang.org/protobuf/proto/decode_gen.go deleted file mode 100644 index 301eeb20f..000000000 --- a/vendor/google.golang.org/protobuf/proto/decode_gen.go +++ /dev/null @@ -1,603 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-types. DO NOT EDIT. - -package proto - -import ( - "math" - "unicode/utf8" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/strs" - "google.golang.org/protobuf/reflect/protoreflect" -) - -// unmarshalScalar decodes a value of the given kind. -// -// Message values are decoded into a []byte which aliases the input data. -func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd protoreflect.FieldDescriptor) (val protoreflect.Value, n int, err error) { - switch fd.Kind() { - case protoreflect.BoolKind: - if wtyp != protowire.VarintType { - return val, 0, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return val, 0, errDecode - } - return protoreflect.ValueOfBool(protowire.DecodeBool(v)), n, nil - case protoreflect.EnumKind: - if wtyp != protowire.VarintType { - return val, 0, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return val, 0, errDecode - } - return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), n, nil - case protoreflect.Int32Kind: - if wtyp != protowire.VarintType { - return val, 0, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return val, 0, errDecode - } - return protoreflect.ValueOfInt32(int32(v)), n, nil - case protoreflect.Sint32Kind: - if wtyp != protowire.VarintType { - return val, 0, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return val, 0, errDecode - } - return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), n, nil - case protoreflect.Uint32Kind: - if wtyp != protowire.VarintType { - return val, 0, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return val, 0, errDecode - } - return protoreflect.ValueOfUint32(uint32(v)), n, nil - case protoreflect.Int64Kind: - if wtyp != protowire.VarintType { - return val, 0, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return val, 0, errDecode - } - return protoreflect.ValueOfInt64(int64(v)), n, nil - case protoreflect.Sint64Kind: - if wtyp != protowire.VarintType { - return val, 0, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return val, 0, errDecode - } - return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), n, nil - case protoreflect.Uint64Kind: - if wtyp != protowire.VarintType { - return val, 0, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return val, 0, errDecode - } - return protoreflect.ValueOfUint64(v), n, nil - case protoreflect.Sfixed32Kind: - if wtyp != protowire.Fixed32Type { - return val, 0, errUnknown - } - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return val, 0, errDecode - } - return protoreflect.ValueOfInt32(int32(v)), n, nil - case protoreflect.Fixed32Kind: - if wtyp != protowire.Fixed32Type { - return val, 0, errUnknown - } - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return val, 0, errDecode - } - return protoreflect.ValueOfUint32(uint32(v)), n, nil - case protoreflect.FloatKind: - if wtyp != protowire.Fixed32Type { - return val, 0, errUnknown - } - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return val, 0, errDecode - } - return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), n, nil - case protoreflect.Sfixed64Kind: - if wtyp != protowire.Fixed64Type { - return val, 0, errUnknown - } - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return val, 0, errDecode - } - return protoreflect.ValueOfInt64(int64(v)), n, nil - case protoreflect.Fixed64Kind: - if wtyp != protowire.Fixed64Type { - return val, 0, errUnknown - } - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return val, 0, errDecode - } - return protoreflect.ValueOfUint64(v), n, nil - case protoreflect.DoubleKind: - if wtyp != protowire.Fixed64Type { - return val, 0, errUnknown - } - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return val, 0, errDecode - } - return protoreflect.ValueOfFloat64(math.Float64frombits(v)), n, nil - case protoreflect.StringKind: - if wtyp != protowire.BytesType { - return val, 0, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return val, 0, errDecode - } - if strs.EnforceUTF8(fd) && !utf8.Valid(v) { - return protoreflect.Value{}, 0, errors.InvalidUTF8(string(fd.FullName())) - } - return protoreflect.ValueOfString(string(v)), n, nil - case protoreflect.BytesKind: - if wtyp != protowire.BytesType { - return val, 0, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return val, 0, errDecode - } - return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), n, nil - case protoreflect.MessageKind: - if wtyp != protowire.BytesType { - return val, 0, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return val, 0, errDecode - } - return protoreflect.ValueOfBytes(v), n, nil - case protoreflect.GroupKind: - if wtyp != protowire.StartGroupType { - return val, 0, errUnknown - } - v, n := protowire.ConsumeGroup(fd.Number(), b) - if n < 0 { - return val, 0, errDecode - } - return protoreflect.ValueOfBytes(v), n, nil - default: - return val, 0, errUnknown - } -} - -func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list protoreflect.List, fd protoreflect.FieldDescriptor) (n int, err error) { - switch fd.Kind() { - case protoreflect.BoolKind: - if wtyp == protowire.BytesType { - buf, n := protowire.ConsumeBytes(b) - if n < 0 { - return 0, errDecode - } - for len(buf) > 0 { - v, n := protowire.ConsumeVarint(buf) - if n < 0 { - return 0, errDecode - } - buf = buf[n:] - list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) - } - return n, nil - } - if wtyp != protowire.VarintType { - return 0, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return 0, errDecode - } - list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) - return n, nil - case protoreflect.EnumKind: - if wtyp == protowire.BytesType { - buf, n := protowire.ConsumeBytes(b) - if n < 0 { - return 0, errDecode - } - for len(buf) > 0 { - v, n := protowire.ConsumeVarint(buf) - if n < 0 { - return 0, errDecode - } - buf = buf[n:] - list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) - } - return n, nil - } - if wtyp != protowire.VarintType { - return 0, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return 0, errDecode - } - list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) - return n, nil - case protoreflect.Int32Kind: - if wtyp == protowire.BytesType { - buf, n := protowire.ConsumeBytes(b) - if n < 0 { - return 0, errDecode - } - for len(buf) > 0 { - v, n := protowire.ConsumeVarint(buf) - if n < 0 { - return 0, errDecode - } - buf = buf[n:] - list.Append(protoreflect.ValueOfInt32(int32(v))) - } - return n, nil - } - if wtyp != protowire.VarintType { - return 0, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return 0, errDecode - } - list.Append(protoreflect.ValueOfInt32(int32(v))) - return n, nil - case protoreflect.Sint32Kind: - if wtyp == protowire.BytesType { - buf, n := protowire.ConsumeBytes(b) - if n < 0 { - return 0, errDecode - } - for len(buf) > 0 { - v, n := protowire.ConsumeVarint(buf) - if n < 0 { - return 0, errDecode - } - buf = buf[n:] - list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) - } - return n, nil - } - if wtyp != protowire.VarintType { - return 0, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return 0, errDecode - } - list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) - return n, nil - case protoreflect.Uint32Kind: - if wtyp == protowire.BytesType { - buf, n := protowire.ConsumeBytes(b) - if n < 0 { - return 0, errDecode - } - for len(buf) > 0 { - v, n := protowire.ConsumeVarint(buf) - if n < 0 { - return 0, errDecode - } - buf = buf[n:] - list.Append(protoreflect.ValueOfUint32(uint32(v))) - } - return n, nil - } - if wtyp != protowire.VarintType { - return 0, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return 0, errDecode - } - list.Append(protoreflect.ValueOfUint32(uint32(v))) - return n, nil - case protoreflect.Int64Kind: - if wtyp == protowire.BytesType { - buf, n := protowire.ConsumeBytes(b) - if n < 0 { - return 0, errDecode - } - for len(buf) > 0 { - v, n := protowire.ConsumeVarint(buf) - if n < 0 { - return 0, errDecode - } - buf = buf[n:] - list.Append(protoreflect.ValueOfInt64(int64(v))) - } - return n, nil - } - if wtyp != protowire.VarintType { - return 0, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return 0, errDecode - } - list.Append(protoreflect.ValueOfInt64(int64(v))) - return n, nil - case protoreflect.Sint64Kind: - if wtyp == protowire.BytesType { - buf, n := protowire.ConsumeBytes(b) - if n < 0 { - return 0, errDecode - } - for len(buf) > 0 { - v, n := protowire.ConsumeVarint(buf) - if n < 0 { - return 0, errDecode - } - buf = buf[n:] - list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) - } - return n, nil - } - if wtyp != protowire.VarintType { - return 0, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return 0, errDecode - } - list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) - return n, nil - case protoreflect.Uint64Kind: - if wtyp == protowire.BytesType { - buf, n := protowire.ConsumeBytes(b) - if n < 0 { - return 0, errDecode - } - for len(buf) > 0 { - v, n := protowire.ConsumeVarint(buf) - if n < 0 { - return 0, errDecode - } - buf = buf[n:] - list.Append(protoreflect.ValueOfUint64(v)) - } - return n, nil - } - if wtyp != protowire.VarintType { - return 0, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return 0, errDecode - } - list.Append(protoreflect.ValueOfUint64(v)) - return n, nil - case protoreflect.Sfixed32Kind: - if wtyp == protowire.BytesType { - buf, n := protowire.ConsumeBytes(b) - if n < 0 { - return 0, errDecode - } - for len(buf) > 0 { - v, n := protowire.ConsumeFixed32(buf) - if n < 0 { - return 0, errDecode - } - buf = buf[n:] - list.Append(protoreflect.ValueOfInt32(int32(v))) - } - return n, nil - } - if wtyp != protowire.Fixed32Type { - return 0, errUnknown - } - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return 0, errDecode - } - list.Append(protoreflect.ValueOfInt32(int32(v))) - return n, nil - case protoreflect.Fixed32Kind: - if wtyp == protowire.BytesType { - buf, n := protowire.ConsumeBytes(b) - if n < 0 { - return 0, errDecode - } - for len(buf) > 0 { - v, n := protowire.ConsumeFixed32(buf) - if n < 0 { - return 0, errDecode - } - buf = buf[n:] - list.Append(protoreflect.ValueOfUint32(uint32(v))) - } - return n, nil - } - if wtyp != protowire.Fixed32Type { - return 0, errUnknown - } - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return 0, errDecode - } - list.Append(protoreflect.ValueOfUint32(uint32(v))) - return n, nil - case protoreflect.FloatKind: - if wtyp == protowire.BytesType { - buf, n := protowire.ConsumeBytes(b) - if n < 0 { - return 0, errDecode - } - for len(buf) > 0 { - v, n := protowire.ConsumeFixed32(buf) - if n < 0 { - return 0, errDecode - } - buf = buf[n:] - list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) - } - return n, nil - } - if wtyp != protowire.Fixed32Type { - return 0, errUnknown - } - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return 0, errDecode - } - list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) - return n, nil - case protoreflect.Sfixed64Kind: - if wtyp == protowire.BytesType { - buf, n := protowire.ConsumeBytes(b) - if n < 0 { - return 0, errDecode - } - for len(buf) > 0 { - v, n := protowire.ConsumeFixed64(buf) - if n < 0 { - return 0, errDecode - } - buf = buf[n:] - list.Append(protoreflect.ValueOfInt64(int64(v))) - } - return n, nil - } - if wtyp != protowire.Fixed64Type { - return 0, errUnknown - } - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return 0, errDecode - } - list.Append(protoreflect.ValueOfInt64(int64(v))) - return n, nil - case protoreflect.Fixed64Kind: - if wtyp == protowire.BytesType { - buf, n := protowire.ConsumeBytes(b) - if n < 0 { - return 0, errDecode - } - for len(buf) > 0 { - v, n := protowire.ConsumeFixed64(buf) - if n < 0 { - return 0, errDecode - } - buf = buf[n:] - list.Append(protoreflect.ValueOfUint64(v)) - } - return n, nil - } - if wtyp != protowire.Fixed64Type { - return 0, errUnknown - } - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return 0, errDecode - } - list.Append(protoreflect.ValueOfUint64(v)) - return n, nil - case protoreflect.DoubleKind: - if wtyp == protowire.BytesType { - buf, n := protowire.ConsumeBytes(b) - if n < 0 { - return 0, errDecode - } - for len(buf) > 0 { - v, n := protowire.ConsumeFixed64(buf) - if n < 0 { - return 0, errDecode - } - buf = buf[n:] - list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) - } - return n, nil - } - if wtyp != protowire.Fixed64Type { - return 0, errUnknown - } - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return 0, errDecode - } - list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) - return n, nil - case protoreflect.StringKind: - if wtyp != protowire.BytesType { - return 0, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return 0, errDecode - } - if strs.EnforceUTF8(fd) && !utf8.Valid(v) { - return 0, errors.InvalidUTF8(string(fd.FullName())) - } - list.Append(protoreflect.ValueOfString(string(v))) - return n, nil - case protoreflect.BytesKind: - if wtyp != protowire.BytesType { - return 0, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return 0, errDecode - } - list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...))) - return n, nil - case protoreflect.MessageKind: - if wtyp != protowire.BytesType { - return 0, errUnknown - } - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return 0, errDecode - } - m := list.NewElement() - if err := o.unmarshalMessage(v, m.Message()); err != nil { - return 0, err - } - list.Append(m) - return n, nil - case protoreflect.GroupKind: - if wtyp != protowire.StartGroupType { - return 0, errUnknown - } - v, n := protowire.ConsumeGroup(fd.Number(), b) - if n < 0 { - return 0, errDecode - } - m := list.NewElement() - if err := o.unmarshalMessage(v, m.Message()); err != nil { - return 0, err - } - list.Append(m) - return n, nil - default: - return 0, errUnknown - } -} - -// We append to an empty array rather than a nil []byte to get non-nil zero-length byte slices. -var emptyBuf [0]byte diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go deleted file mode 100644 index c52d8c4ab..000000000 --- a/vendor/google.golang.org/protobuf/proto/doc.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package proto provides functions operating on protocol buffer messages. -// -// For documentation on protocol buffers in general, see: -// -// https://developers.google.com/protocol-buffers -// -// For a tutorial on using protocol buffers with Go, see: -// -// https://developers.google.com/protocol-buffers/docs/gotutorial -// -// For a guide to generated Go protocol buffer code, see: -// -// https://developers.google.com/protocol-buffers/docs/reference/go-generated -// -// -// Binary serialization -// -// This package contains functions to convert to and from the wire format, -// an efficient binary serialization of protocol buffers. -// -// • Size reports the size of a message in the wire format. -// -// • Marshal converts a message to the wire format. -// The MarshalOptions type provides more control over wire marshaling. -// -// • Unmarshal converts a message from the wire format. -// The UnmarshalOptions type provides more control over wire unmarshaling. -// -// -// Basic message operations -// -// • Clone makes a deep copy of a message. -// -// • Merge merges the content of a message into another. -// -// • Equal compares two messages. For more control over comparisons -// and detailed reporting of differences, see package -// "google.golang.org/protobuf/testing/protocmp". -// -// • Reset clears the content of a message. -// -// • CheckInitialized reports whether all required fields in a message are set. -// -// -// Optional scalar constructors -// -// The API for some generated messages represents optional scalar fields -// as pointers to a value. For example, an optional string field has the -// Go type *string. -// -// • Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, and String -// take a value and return a pointer to a new instance of it, -// to simplify construction of optional field values. -// -// Generated enum types usually have an Enum method which performs the -// same operation. -// -// Optional scalar fields are only supported in proto2. -// -// -// Extension accessors -// -// • HasExtension, GetExtension, SetExtension, and ClearExtension -// access extension field values in a protocol buffer message. -// -// Extension fields are only supported in proto2. -// -// -// Related packages -// -// • Package "google.golang.org/protobuf/encoding/protojson" converts messages to -// and from JSON. -// -// • Package "google.golang.org/protobuf/encoding/prototext" converts messages to -// and from the text format. -// -// • Package "google.golang.org/protobuf/reflect/protoreflect" provides a -// reflection interface for protocol buffer data types. -// -// • Package "google.golang.org/protobuf/testing/protocmp" provides features -// to compare protocol buffer messages with the "github.com/google/go-cmp/cmp" -// package. -// -// • Package "google.golang.org/protobuf/types/dynamicpb" provides a dynamic -// message type, suitable for working with messages where the protocol buffer -// type is only known at runtime. -// -// This module contains additional packages for more specialized use cases. -// Consult the individual package documentation for details. -package proto diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go deleted file mode 100644 index d18239c23..000000000 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/encoding/messageset" - "google.golang.org/protobuf/internal/order" - "google.golang.org/protobuf/internal/pragma" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/runtime/protoiface" -) - -// MarshalOptions configures the marshaler. -// -// Example usage: -// b, err := MarshalOptions{Deterministic: true}.Marshal(m) -type MarshalOptions struct { - pragma.NoUnkeyedLiterals - - // AllowPartial allows messages that have missing required fields to marshal - // without returning an error. If AllowPartial is false (the default), - // Marshal will return an error if there are any missing required fields. - AllowPartial bool - - // Deterministic controls whether the same message will always be - // serialized to the same bytes within the same binary. - // - // Setting this option guarantees that repeated serialization of - // the same message will return the same bytes, and that different - // processes of the same binary (which may be executing on different - // machines) will serialize equal messages to the same bytes. - // It has no effect on the resulting size of the encoded message compared - // to a non-deterministic marshal. - // - // Note that the deterministic serialization is NOT canonical across - // languages. It is not guaranteed to remain stable over time. It is - // unstable across different builds with schema changes due to unknown - // fields. Users who need canonical serialization (e.g., persistent - // storage in a canonical form, fingerprinting, etc.) must define - // their own canonicalization specification and implement their own - // serializer rather than relying on this API. - // - // If deterministic serialization is requested, map entries will be - // sorted by keys in lexographical order. This is an implementation - // detail and subject to change. - Deterministic bool - - // UseCachedSize indicates that the result of a previous Size call - // may be reused. - // - // Setting this option asserts that: - // - // 1. Size has previously been called on this message with identical - // options (except for UseCachedSize itself). - // - // 2. The message and all its submessages have not changed in any - // way since the Size call. - // - // If either of these invariants is violated, - // the results are undefined and may include panics or corrupted output. - // - // Implementations MAY take this option into account to provide - // better performance, but there is no guarantee that they will do so. - // There is absolutely no guarantee that Size followed by Marshal with - // UseCachedSize set will perform equivalently to Marshal alone. - UseCachedSize bool -} - -// Marshal returns the wire-format encoding of m. -func Marshal(m Message) ([]byte, error) { - // Treat nil message interface as an empty message; nothing to output. - if m == nil { - return nil, nil - } - - out, err := MarshalOptions{}.marshal(nil, m.ProtoReflect()) - if len(out.Buf) == 0 && err == nil { - out.Buf = emptyBytesForMessage(m) - } - return out.Buf, err -} - -// Marshal returns the wire-format encoding of m. -func (o MarshalOptions) Marshal(m Message) ([]byte, error) { - // Treat nil message interface as an empty message; nothing to output. - if m == nil { - return nil, nil - } - - out, err := o.marshal(nil, m.ProtoReflect()) - if len(out.Buf) == 0 && err == nil { - out.Buf = emptyBytesForMessage(m) - } - return out.Buf, err -} - -// emptyBytesForMessage returns a nil buffer if and only if m is invalid, -// otherwise it returns a non-nil empty buffer. -// -// This is to assist the edge-case where user-code does the following: -// m1.OptionalBytes, _ = proto.Marshal(m2) -// where they expect the proto2 "optional_bytes" field to be populated -// if any only if m2 is a valid message. -func emptyBytesForMessage(m Message) []byte { - if m == nil || !m.ProtoReflect().IsValid() { - return nil - } - return emptyBuf[:] -} - -// MarshalAppend appends the wire-format encoding of m to b, -// returning the result. -func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { - // Treat nil message interface as an empty message; nothing to append. - if m == nil { - return b, nil - } - - out, err := o.marshal(b, m.ProtoReflect()) - return out.Buf, err -} - -// MarshalState returns the wire-format encoding of a message. -// -// This method permits fine-grained control over the marshaler. -// Most users should use Marshal instead. -func (o MarshalOptions) MarshalState(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) { - return o.marshal(in.Buf, in.Message) -} - -// marshal is a centralized function that all marshal operations go through. -// For profiling purposes, avoid changing the name of this function or -// introducing other code paths for marshal that do not go through this. -func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoiface.MarshalOutput, err error) { - allowPartial := o.AllowPartial - o.AllowPartial = true - if methods := protoMethods(m); methods != nil && methods.Marshal != nil && - !(o.Deterministic && methods.Flags&protoiface.SupportMarshalDeterministic == 0) { - in := protoiface.MarshalInput{ - Message: m, - Buf: b, - } - if o.Deterministic { - in.Flags |= protoiface.MarshalDeterministic - } - if o.UseCachedSize { - in.Flags |= protoiface.MarshalUseCachedSize - } - if methods.Size != nil { - sout := methods.Size(protoiface.SizeInput{ - Message: m, - Flags: in.Flags, - }) - if cap(b) < len(b)+sout.Size { - in.Buf = make([]byte, len(b), growcap(cap(b), len(b)+sout.Size)) - copy(in.Buf, b) - } - in.Flags |= protoiface.MarshalUseCachedSize - } - out, err = methods.Marshal(in) - } else { - out.Buf, err = o.marshalMessageSlow(b, m) - } - if err != nil { - return out, err - } - if allowPartial { - return out, nil - } - return out, checkInitialized(m) -} - -func (o MarshalOptions) marshalMessage(b []byte, m protoreflect.Message) ([]byte, error) { - out, err := o.marshal(b, m) - return out.Buf, err -} - -// growcap scales up the capacity of a slice. -// -// Given a slice with a current capacity of oldcap and a desired -// capacity of wantcap, growcap returns a new capacity >= wantcap. -// -// The algorithm is mostly identical to the one used by append as of Go 1.14. -func growcap(oldcap, wantcap int) (newcap int) { - if wantcap > oldcap*2 { - newcap = wantcap - } else if oldcap < 1024 { - // The Go 1.14 runtime takes this case when len(s) < 1024, - // not when cap(s) < 1024. The difference doesn't seem - // significant here. - newcap = oldcap * 2 - } else { - newcap = oldcap - for 0 < newcap && newcap < wantcap { - newcap += newcap / 4 - } - if newcap <= 0 { - newcap = wantcap - } - } - return newcap -} - -func (o MarshalOptions) marshalMessageSlow(b []byte, m protoreflect.Message) ([]byte, error) { - if messageset.IsMessageSet(m.Descriptor()) { - return o.marshalMessageSet(b, m) - } - fieldOrder := order.AnyFieldOrder - if o.Deterministic { - // TODO: This should use a more natural ordering like NumberFieldOrder, - // but doing so breaks golden tests that make invalid assumption about - // output stability of this implementation. - fieldOrder = order.LegacyFieldOrder - } - var err error - order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - b, err = o.marshalField(b, fd, v) - return err == nil - }) - if err != nil { - return b, err - } - b = append(b, m.GetUnknown()...) - return b, nil -} - -func (o MarshalOptions) marshalField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { - switch { - case fd.IsList(): - return o.marshalList(b, fd, value.List()) - case fd.IsMap(): - return o.marshalMap(b, fd, value.Map()) - default: - b = protowire.AppendTag(b, fd.Number(), wireTypes[fd.Kind()]) - return o.marshalSingular(b, fd, value) - } -} - -func (o MarshalOptions) marshalList(b []byte, fd protoreflect.FieldDescriptor, list protoreflect.List) ([]byte, error) { - if fd.IsPacked() && list.Len() > 0 { - b = protowire.AppendTag(b, fd.Number(), protowire.BytesType) - b, pos := appendSpeculativeLength(b) - for i, llen := 0, list.Len(); i < llen; i++ { - var err error - b, err = o.marshalSingular(b, fd, list.Get(i)) - if err != nil { - return b, err - } - } - b = finishSpeculativeLength(b, pos) - return b, nil - } - - kind := fd.Kind() - for i, llen := 0, list.Len(); i < llen; i++ { - var err error - b = protowire.AppendTag(b, fd.Number(), wireTypes[kind]) - b, err = o.marshalSingular(b, fd, list.Get(i)) - if err != nil { - return b, err - } - } - return b, nil -} - -func (o MarshalOptions) marshalMap(b []byte, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) ([]byte, error) { - keyf := fd.MapKey() - valf := fd.MapValue() - keyOrder := order.AnyKeyOrder - if o.Deterministic { - keyOrder = order.GenericKeyOrder - } - var err error - order.RangeEntries(mapv, keyOrder, func(key protoreflect.MapKey, value protoreflect.Value) bool { - b = protowire.AppendTag(b, fd.Number(), protowire.BytesType) - var pos int - b, pos = appendSpeculativeLength(b) - - b, err = o.marshalField(b, keyf, key.Value()) - if err != nil { - return false - } - b, err = o.marshalField(b, valf, value) - if err != nil { - return false - } - b = finishSpeculativeLength(b, pos) - return true - }) - return b, err -} - -// When encoding length-prefixed fields, we speculatively set aside some number of bytes -// for the length, encode the data, and then encode the length (shifting the data if necessary -// to make room). -const speculativeLength = 1 - -func appendSpeculativeLength(b []byte) ([]byte, int) { - pos := len(b) - b = append(b, "\x00\x00\x00\x00"[:speculativeLength]...) - return b, pos -} - -func finishSpeculativeLength(b []byte, pos int) []byte { - mlen := len(b) - pos - speculativeLength - msiz := protowire.SizeVarint(uint64(mlen)) - if msiz != speculativeLength { - for i := 0; i < msiz-speculativeLength; i++ { - b = append(b, 0) - } - copy(b[pos+msiz:], b[pos+speculativeLength:]) - b = b[:pos+msiz+mlen] - } - protowire.AppendVarint(b[:pos], uint64(mlen)) - return b -} diff --git a/vendor/google.golang.org/protobuf/proto/encode_gen.go b/vendor/google.golang.org/protobuf/proto/encode_gen.go deleted file mode 100644 index 185dacfb4..000000000 --- a/vendor/google.golang.org/protobuf/proto/encode_gen.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-types. DO NOT EDIT. - -package proto - -import ( - "math" - "unicode/utf8" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/strs" - "google.golang.org/protobuf/reflect/protoreflect" -) - -var wireTypes = map[protoreflect.Kind]protowire.Type{ - protoreflect.BoolKind: protowire.VarintType, - protoreflect.EnumKind: protowire.VarintType, - protoreflect.Int32Kind: protowire.VarintType, - protoreflect.Sint32Kind: protowire.VarintType, - protoreflect.Uint32Kind: protowire.VarintType, - protoreflect.Int64Kind: protowire.VarintType, - protoreflect.Sint64Kind: protowire.VarintType, - protoreflect.Uint64Kind: protowire.VarintType, - protoreflect.Sfixed32Kind: protowire.Fixed32Type, - protoreflect.Fixed32Kind: protowire.Fixed32Type, - protoreflect.FloatKind: protowire.Fixed32Type, - protoreflect.Sfixed64Kind: protowire.Fixed64Type, - protoreflect.Fixed64Kind: protowire.Fixed64Type, - protoreflect.DoubleKind: protowire.Fixed64Type, - protoreflect.StringKind: protowire.BytesType, - protoreflect.BytesKind: protowire.BytesType, - protoreflect.MessageKind: protowire.BytesType, - protoreflect.GroupKind: protowire.StartGroupType, -} - -func (o MarshalOptions) marshalSingular(b []byte, fd protoreflect.FieldDescriptor, v protoreflect.Value) ([]byte, error) { - switch fd.Kind() { - case protoreflect.BoolKind: - b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) - case protoreflect.EnumKind: - b = protowire.AppendVarint(b, uint64(v.Enum())) - case protoreflect.Int32Kind: - b = protowire.AppendVarint(b, uint64(int32(v.Int()))) - case protoreflect.Sint32Kind: - b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) - case protoreflect.Uint32Kind: - b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) - case protoreflect.Int64Kind: - b = protowire.AppendVarint(b, uint64(v.Int())) - case protoreflect.Sint64Kind: - b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) - case protoreflect.Uint64Kind: - b = protowire.AppendVarint(b, v.Uint()) - case protoreflect.Sfixed32Kind: - b = protowire.AppendFixed32(b, uint32(v.Int())) - case protoreflect.Fixed32Kind: - b = protowire.AppendFixed32(b, uint32(v.Uint())) - case protoreflect.FloatKind: - b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) - case protoreflect.Sfixed64Kind: - b = protowire.AppendFixed64(b, uint64(v.Int())) - case protoreflect.Fixed64Kind: - b = protowire.AppendFixed64(b, v.Uint()) - case protoreflect.DoubleKind: - b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) - case protoreflect.StringKind: - if strs.EnforceUTF8(fd) && !utf8.ValidString(v.String()) { - return b, errors.InvalidUTF8(string(fd.FullName())) - } - b = protowire.AppendString(b, v.String()) - case protoreflect.BytesKind: - b = protowire.AppendBytes(b, v.Bytes()) - case protoreflect.MessageKind: - var pos int - var err error - b, pos = appendSpeculativeLength(b) - b, err = o.marshalMessage(b, v.Message()) - if err != nil { - return b, err - } - b = finishSpeculativeLength(b, pos) - case protoreflect.GroupKind: - var err error - b, err = o.marshalMessage(b, v.Message()) - if err != nil { - return b, err - } - b = protowire.AppendVarint(b, protowire.EncodeTag(fd.Number(), protowire.EndGroupType)) - default: - return b, errors.New("invalid kind %v", fd.Kind()) - } - return b, nil -} diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go deleted file mode 100644 index 4dba2b969..000000000 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "bytes" - "math" - "reflect" - - "google.golang.org/protobuf/encoding/protowire" - pref "google.golang.org/protobuf/reflect/protoreflect" -) - -// Equal reports whether two messages are equal. -// If two messages marshal to the same bytes under deterministic serialization, -// then Equal is guaranteed to report true. -// -// Two messages are equal if they belong to the same message descriptor, -// have the same set of populated known and extension field values, -// and the same set of unknown fields values. If either of the top-level -// messages are invalid, then Equal reports true only if both are invalid. -// -// Scalar values are compared with the equivalent of the == operator in Go, -// except bytes values which are compared using bytes.Equal and -// floating point values which specially treat NaNs as equal. -// Message values are compared by recursively calling Equal. -// Lists are equal if each element value is also equal. -// Maps are equal if they have the same set of keys, where the pair of values -// for each key is also equal. -func Equal(x, y Message) bool { - if x == nil || y == nil { - return x == nil && y == nil - } - mx := x.ProtoReflect() - my := y.ProtoReflect() - if mx.IsValid() != my.IsValid() { - return false - } - return equalMessage(mx, my) -} - -// equalMessage compares two messages. -func equalMessage(mx, my pref.Message) bool { - if mx.Descriptor() != my.Descriptor() { - return false - } - - nx := 0 - equal := true - mx.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool { - nx++ - vy := my.Get(fd) - equal = my.Has(fd) && equalField(fd, vx, vy) - return equal - }) - if !equal { - return false - } - ny := 0 - my.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool { - ny++ - return true - }) - if nx != ny { - return false - } - - return equalUnknown(mx.GetUnknown(), my.GetUnknown()) -} - -// equalField compares two fields. -func equalField(fd pref.FieldDescriptor, x, y pref.Value) bool { - switch { - case fd.IsList(): - return equalList(fd, x.List(), y.List()) - case fd.IsMap(): - return equalMap(fd, x.Map(), y.Map()) - default: - return equalValue(fd, x, y) - } -} - -// equalMap compares two maps. -func equalMap(fd pref.FieldDescriptor, x, y pref.Map) bool { - if x.Len() != y.Len() { - return false - } - equal := true - x.Range(func(k pref.MapKey, vx pref.Value) bool { - vy := y.Get(k) - equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy) - return equal - }) - return equal -} - -// equalList compares two lists. -func equalList(fd pref.FieldDescriptor, x, y pref.List) bool { - if x.Len() != y.Len() { - return false - } - for i := x.Len() - 1; i >= 0; i-- { - if !equalValue(fd, x.Get(i), y.Get(i)) { - return false - } - } - return true -} - -// equalValue compares two singular values. -func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool { - switch fd.Kind() { - case pref.BoolKind: - return x.Bool() == y.Bool() - case pref.EnumKind: - return x.Enum() == y.Enum() - case pref.Int32Kind, pref.Sint32Kind, - pref.Int64Kind, pref.Sint64Kind, - pref.Sfixed32Kind, pref.Sfixed64Kind: - return x.Int() == y.Int() - case pref.Uint32Kind, pref.Uint64Kind, - pref.Fixed32Kind, pref.Fixed64Kind: - return x.Uint() == y.Uint() - case pref.FloatKind, pref.DoubleKind: - fx := x.Float() - fy := y.Float() - if math.IsNaN(fx) || math.IsNaN(fy) { - return math.IsNaN(fx) && math.IsNaN(fy) - } - return fx == fy - case pref.StringKind: - return x.String() == y.String() - case pref.BytesKind: - return bytes.Equal(x.Bytes(), y.Bytes()) - case pref.MessageKind, pref.GroupKind: - return equalMessage(x.Message(), y.Message()) - default: - return x.Interface() == y.Interface() - } -} - -// equalUnknown compares unknown fields by direct comparison on the raw bytes -// of each individual field number. -func equalUnknown(x, y pref.RawFields) bool { - if len(x) != len(y) { - return false - } - if bytes.Equal([]byte(x), []byte(y)) { - return true - } - - mx := make(map[pref.FieldNumber]pref.RawFields) - my := make(map[pref.FieldNumber]pref.RawFields) - for len(x) > 0 { - fnum, _, n := protowire.ConsumeField(x) - mx[fnum] = append(mx[fnum], x[:n]...) - x = x[n:] - } - for len(y) > 0 { - fnum, _, n := protowire.ConsumeField(y) - my[fnum] = append(my[fnum], y[:n]...) - y = y[n:] - } - return reflect.DeepEqual(mx, my) -} diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go deleted file mode 100644 index 5f293cda8..000000000 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "google.golang.org/protobuf/reflect/protoreflect" -) - -// HasExtension reports whether an extension field is populated. -// It returns false if m is invalid or if xt does not extend m. -func HasExtension(m Message, xt protoreflect.ExtensionType) bool { - // Treat nil message interface as an empty message; no populated fields. - if m == nil { - return false - } - - // As a special-case, we reports invalid or mismatching descriptors - // as always not being populated (since they aren't). - if xt == nil || m.ProtoReflect().Descriptor() != xt.TypeDescriptor().ContainingMessage() { - return false - } - - return m.ProtoReflect().Has(xt.TypeDescriptor()) -} - -// ClearExtension clears an extension field such that subsequent -// HasExtension calls return false. -// It panics if m is invalid or if xt does not extend m. -func ClearExtension(m Message, xt protoreflect.ExtensionType) { - m.ProtoReflect().Clear(xt.TypeDescriptor()) -} - -// GetExtension retrieves the value for an extension field. -// If the field is unpopulated, it returns the default value for -// scalars and an immutable, empty value for lists or messages. -// It panics if xt does not extend m. -func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { - // Treat nil message interface as an empty message; return the default. - if m == nil { - return xt.InterfaceOf(xt.Zero()) - } - - return xt.InterfaceOf(m.ProtoReflect().Get(xt.TypeDescriptor())) -} - -// SetExtension stores the value of an extension field. -// It panics if m is invalid, xt does not extend m, or if type of v -// is invalid for the specified extension field. -func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { - xd := xt.TypeDescriptor() - pv := xt.ValueOf(v) - - // Specially treat an invalid list, map, or message as clear. - isValid := true - switch { - case xd.IsList(): - isValid = pv.List().IsValid() - case xd.IsMap(): - isValid = pv.Map().IsValid() - case xd.Message() != nil: - isValid = pv.Message().IsValid() - } - if !isValid { - m.ProtoReflect().Clear(xd) - return - } - - m.ProtoReflect().Set(xd, pv) -} - -// RangeExtensions iterates over every populated extension field in m in an -// undefined order, calling f for each extension type and value encountered. -// It returns immediately if f returns false. -// While iterating, mutating operations may only be performed -// on the current extension field. -func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) { - // Treat nil message interface as an empty message; nothing to range over. - if m == nil { - return - } - - m.ProtoReflect().Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - if fd.IsExtension() { - xt := fd.(protoreflect.ExtensionTypeDescriptor).Type() - vi := xt.InterfaceOf(v) - return f(xt, vi) - } - return true - }) -} diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go deleted file mode 100644 index d761ab331..000000000 --- a/vendor/google.golang.org/protobuf/proto/merge.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "fmt" - - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/runtime/protoiface" -) - -// Merge merges src into dst, which must be a message with the same descriptor. -// -// Populated scalar fields in src are copied to dst, while populated -// singular messages in src are merged into dst by recursively calling Merge. -// The elements of every list field in src is appended to the corresponded -// list fields in dst. The entries of every map field in src is copied into -// the corresponding map field in dst, possibly replacing existing entries. -// The unknown fields of src are appended to the unknown fields of dst. -// -// It is semantically equivalent to unmarshaling the encoded form of src -// into dst with the UnmarshalOptions.Merge option specified. -func Merge(dst, src Message) { - // TODO: Should nil src be treated as semantically equivalent to a - // untyped, read-only, empty message? What about a nil dst? - - dstMsg, srcMsg := dst.ProtoReflect(), src.ProtoReflect() - if dstMsg.Descriptor() != srcMsg.Descriptor() { - if got, want := dstMsg.Descriptor().FullName(), srcMsg.Descriptor().FullName(); got != want { - panic(fmt.Sprintf("descriptor mismatch: %v != %v", got, want)) - } - panic("descriptor mismatch") - } - mergeOptions{}.mergeMessage(dstMsg, srcMsg) -} - -// Clone returns a deep copy of m. -// If the top-level message is invalid, it returns an invalid message as well. -func Clone(m Message) Message { - // NOTE: Most usages of Clone assume the following properties: - // t := reflect.TypeOf(m) - // t == reflect.TypeOf(m.ProtoReflect().New().Interface()) - // t == reflect.TypeOf(m.ProtoReflect().Type().Zero().Interface()) - // - // Embedding protobuf messages breaks this since the parent type will have - // a forwarded ProtoReflect method, but the Interface method will return - // the underlying embedded message type. - if m == nil { - return nil - } - src := m.ProtoReflect() - if !src.IsValid() { - return src.Type().Zero().Interface() - } - dst := src.New() - mergeOptions{}.mergeMessage(dst, src) - return dst.Interface() -} - -// mergeOptions provides a namespace for merge functions, and can be -// exported in the future if we add user-visible merge options. -type mergeOptions struct{} - -func (o mergeOptions) mergeMessage(dst, src protoreflect.Message) { - methods := protoMethods(dst) - if methods != nil && methods.Merge != nil { - in := protoiface.MergeInput{ - Destination: dst, - Source: src, - } - out := methods.Merge(in) - if out.Flags&protoiface.MergeComplete != 0 { - return - } - } - - if !dst.IsValid() { - panic(fmt.Sprintf("cannot merge into invalid %v message", dst.Descriptor().FullName())) - } - - src.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - switch { - case fd.IsList(): - o.mergeList(dst.Mutable(fd).List(), v.List(), fd) - case fd.IsMap(): - o.mergeMap(dst.Mutable(fd).Map(), v.Map(), fd.MapValue()) - case fd.Message() != nil: - o.mergeMessage(dst.Mutable(fd).Message(), v.Message()) - case fd.Kind() == protoreflect.BytesKind: - dst.Set(fd, o.cloneBytes(v)) - default: - dst.Set(fd, v) - } - return true - }) - - if len(src.GetUnknown()) > 0 { - dst.SetUnknown(append(dst.GetUnknown(), src.GetUnknown()...)) - } -} - -func (o mergeOptions) mergeList(dst, src protoreflect.List, fd protoreflect.FieldDescriptor) { - // Merge semantics appends to the end of the existing list. - for i, n := 0, src.Len(); i < n; i++ { - switch v := src.Get(i); { - case fd.Message() != nil: - dstv := dst.NewElement() - o.mergeMessage(dstv.Message(), v.Message()) - dst.Append(dstv) - case fd.Kind() == protoreflect.BytesKind: - dst.Append(o.cloneBytes(v)) - default: - dst.Append(v) - } - } -} - -func (o mergeOptions) mergeMap(dst, src protoreflect.Map, fd protoreflect.FieldDescriptor) { - // Merge semantics replaces, rather than merges into existing entries. - src.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { - switch { - case fd.Message() != nil: - dstv := dst.NewValue() - o.mergeMessage(dstv.Message(), v.Message()) - dst.Set(k, dstv) - case fd.Kind() == protoreflect.BytesKind: - dst.Set(k, o.cloneBytes(v)) - default: - dst.Set(k, v) - } - return true - }) -} - -func (o mergeOptions) cloneBytes(v protoreflect.Value) protoreflect.Value { - return protoreflect.ValueOfBytes(append([]byte{}, v.Bytes()...)) -} diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go deleted file mode 100644 index 312d5d45c..000000000 --- a/vendor/google.golang.org/protobuf/proto/messageset.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/encoding/messageset" - "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/flags" - "google.golang.org/protobuf/internal/order" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -func (o MarshalOptions) sizeMessageSet(m protoreflect.Message) (size int) { - m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - size += messageset.SizeField(fd.Number()) - size += protowire.SizeTag(messageset.FieldMessage) - size += protowire.SizeBytes(o.size(v.Message())) - return true - }) - size += messageset.SizeUnknown(m.GetUnknown()) - return size -} - -func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]byte, error) { - if !flags.ProtoLegacy { - return b, errors.New("no support for message_set_wire_format") - } - fieldOrder := order.AnyFieldOrder - if o.Deterministic { - fieldOrder = order.NumberFieldOrder - } - var err error - order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - b, err = o.marshalMessageSetField(b, fd, v) - return err == nil - }) - if err != nil { - return b, err - } - return messageset.AppendUnknown(b, m.GetUnknown()) -} - -func (o MarshalOptions) marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { - b = messageset.AppendFieldStart(b, fd.Number()) - b = protowire.AppendTag(b, messageset.FieldMessage, protowire.BytesType) - b = protowire.AppendVarint(b, uint64(o.Size(value.Message().Interface()))) - b, err := o.marshalMessage(b, value.Message()) - if err != nil { - return b, err - } - b = messageset.AppendFieldEnd(b) - return b, nil -} - -func (o UnmarshalOptions) unmarshalMessageSet(b []byte, m protoreflect.Message) error { - if !flags.ProtoLegacy { - return errors.New("no support for message_set_wire_format") - } - return messageset.Unmarshal(b, false, func(num protowire.Number, v []byte) error { - err := o.unmarshalMessageSetField(m, num, v) - if err == errUnknown { - unknown := m.GetUnknown() - unknown = protowire.AppendTag(unknown, num, protowire.BytesType) - unknown = protowire.AppendBytes(unknown, v) - m.SetUnknown(unknown) - return nil - } - return err - }) -} - -func (o UnmarshalOptions) unmarshalMessageSetField(m protoreflect.Message, num protowire.Number, v []byte) error { - md := m.Descriptor() - if !md.ExtensionRanges().Has(num) { - return errUnknown - } - xt, err := o.Resolver.FindExtensionByNumber(md.FullName(), num) - if err == protoregistry.NotFound { - return errUnknown - } - if err != nil { - return errors.New("%v: unable to resolve extension %v: %v", md.FullName(), num, err) - } - xd := xt.TypeDescriptor() - if err := o.unmarshalMessage(v, m.Mutable(xd).Message()); err != nil { - return err - } - return nil -} diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go deleted file mode 100644 index 1f0d183b1..000000000 --- a/vendor/google.golang.org/protobuf/proto/proto.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/reflect/protoreflect" -) - -// Message is the top-level interface that all messages must implement. -// It provides access to a reflective view of a message. -// Any implementation of this interface may be used with all functions in the -// protobuf module that accept a Message, except where otherwise specified. -// -// This is the v2 interface definition for protobuf messages. -// The v1 interface definition is "github.com/golang/protobuf/proto".Message. -// -// To convert a v1 message to a v2 message, -// use "github.com/golang/protobuf/proto".MessageV2. -// To convert a v2 message to a v1 message, -// use "github.com/golang/protobuf/proto".MessageV1. -type Message = protoreflect.ProtoMessage - -// Error matches all errors produced by packages in the protobuf module. -// -// That is, errors.Is(err, Error) reports whether an error is produced -// by this module. -var Error error - -func init() { - Error = errors.Error -} - -// MessageName returns the full name of m. -// If m is nil, it returns an empty string. -func MessageName(m Message) protoreflect.FullName { - if m == nil { - return "" - } - return m.ProtoReflect().Descriptor().FullName() -} diff --git a/vendor/google.golang.org/protobuf/proto/proto_methods.go b/vendor/google.golang.org/protobuf/proto/proto_methods.go deleted file mode 100644 index 465e057b3..000000000 --- a/vendor/google.golang.org/protobuf/proto/proto_methods.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// The protoreflect build tag disables use of fast-path methods. -//go:build !protoreflect -// +build !protoreflect - -package proto - -import ( - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/runtime/protoiface" -) - -const hasProtoMethods = true - -func protoMethods(m protoreflect.Message) *protoiface.Methods { - return m.ProtoMethods() -} diff --git a/vendor/google.golang.org/protobuf/proto/proto_reflect.go b/vendor/google.golang.org/protobuf/proto/proto_reflect.go deleted file mode 100644 index 494d6ceef..000000000 --- a/vendor/google.golang.org/protobuf/proto/proto_reflect.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// The protoreflect build tag disables use of fast-path methods. -//go:build protoreflect -// +build protoreflect - -package proto - -import ( - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/runtime/protoiface" -) - -const hasProtoMethods = false - -func protoMethods(m protoreflect.Message) *protoiface.Methods { - return nil -} diff --git a/vendor/google.golang.org/protobuf/proto/reset.go b/vendor/google.golang.org/protobuf/proto/reset.go deleted file mode 100644 index 3d7f89436..000000000 --- a/vendor/google.golang.org/protobuf/proto/reset.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "fmt" - - "google.golang.org/protobuf/reflect/protoreflect" -) - -// Reset clears every field in the message. -// The resulting message shares no observable memory with its previous state -// other than the memory for the message itself. -func Reset(m Message) { - if mr, ok := m.(interface{ Reset() }); ok && hasProtoMethods { - mr.Reset() - return - } - resetMessage(m.ProtoReflect()) -} - -func resetMessage(m protoreflect.Message) { - if !m.IsValid() { - panic(fmt.Sprintf("cannot reset invalid %v message", m.Descriptor().FullName())) - } - - // Clear all known fields. - fds := m.Descriptor().Fields() - for i := 0; i < fds.Len(); i++ { - m.Clear(fds.Get(i)) - } - - // Clear extension fields. - m.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - m.Clear(fd) - return true - }) - - // Clear unknown fields. - m.SetUnknown(nil) -} diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go deleted file mode 100644 index 554b9c6c0..000000000 --- a/vendor/google.golang.org/protobuf/proto/size.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/encoding/messageset" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/runtime/protoiface" -) - -// Size returns the size in bytes of the wire-format encoding of m. -func Size(m Message) int { - return MarshalOptions{}.Size(m) -} - -// Size returns the size in bytes of the wire-format encoding of m. -func (o MarshalOptions) Size(m Message) int { - // Treat a nil message interface as an empty message; nothing to output. - if m == nil { - return 0 - } - - return o.size(m.ProtoReflect()) -} - -// size is a centralized function that all size operations go through. -// For profiling purposes, avoid changing the name of this function or -// introducing other code paths for size that do not go through this. -func (o MarshalOptions) size(m protoreflect.Message) (size int) { - methods := protoMethods(m) - if methods != nil && methods.Size != nil { - out := methods.Size(protoiface.SizeInput{ - Message: m, - }) - return out.Size - } - if methods != nil && methods.Marshal != nil { - // This is not efficient, but we don't have any choice. - // This case is mainly used for legacy types with a Marshal method. - out, _ := methods.Marshal(protoiface.MarshalInput{ - Message: m, - }) - return len(out.Buf) - } - return o.sizeMessageSlow(m) -} - -func (o MarshalOptions) sizeMessageSlow(m protoreflect.Message) (size int) { - if messageset.IsMessageSet(m.Descriptor()) { - return o.sizeMessageSet(m) - } - m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - size += o.sizeField(fd, v) - return true - }) - size += len(m.GetUnknown()) - return size -} - -func (o MarshalOptions) sizeField(fd protoreflect.FieldDescriptor, value protoreflect.Value) (size int) { - num := fd.Number() - switch { - case fd.IsList(): - return o.sizeList(num, fd, value.List()) - case fd.IsMap(): - return o.sizeMap(num, fd, value.Map()) - default: - return protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), value) - } -} - -func (o MarshalOptions) sizeList(num protowire.Number, fd protoreflect.FieldDescriptor, list protoreflect.List) (size int) { - if fd.IsPacked() && list.Len() > 0 { - content := 0 - for i, llen := 0, list.Len(); i < llen; i++ { - content += o.sizeSingular(num, fd.Kind(), list.Get(i)) - } - return protowire.SizeTag(num) + protowire.SizeBytes(content) - } - - for i, llen := 0, list.Len(); i < llen; i++ { - size += protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), list.Get(i)) - } - return size -} - -func (o MarshalOptions) sizeMap(num protowire.Number, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) (size int) { - mapv.Range(func(key protoreflect.MapKey, value protoreflect.Value) bool { - size += protowire.SizeTag(num) - size += protowire.SizeBytes(o.sizeField(fd.MapKey(), key.Value()) + o.sizeField(fd.MapValue(), value)) - return true - }) - return size -} diff --git a/vendor/google.golang.org/protobuf/proto/size_gen.go b/vendor/google.golang.org/protobuf/proto/size_gen.go deleted file mode 100644 index 3cf61a824..000000000 --- a/vendor/google.golang.org/protobuf/proto/size_gen.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-types. DO NOT EDIT. - -package proto - -import ( - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/reflect/protoreflect" -) - -func (o MarshalOptions) sizeSingular(num protowire.Number, kind protoreflect.Kind, v protoreflect.Value) int { - switch kind { - case protoreflect.BoolKind: - return protowire.SizeVarint(protowire.EncodeBool(v.Bool())) - case protoreflect.EnumKind: - return protowire.SizeVarint(uint64(v.Enum())) - case protoreflect.Int32Kind: - return protowire.SizeVarint(uint64(int32(v.Int()))) - case protoreflect.Sint32Kind: - return protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) - case protoreflect.Uint32Kind: - return protowire.SizeVarint(uint64(uint32(v.Uint()))) - case protoreflect.Int64Kind: - return protowire.SizeVarint(uint64(v.Int())) - case protoreflect.Sint64Kind: - return protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) - case protoreflect.Uint64Kind: - return protowire.SizeVarint(v.Uint()) - case protoreflect.Sfixed32Kind: - return protowire.SizeFixed32() - case protoreflect.Fixed32Kind: - return protowire.SizeFixed32() - case protoreflect.FloatKind: - return protowire.SizeFixed32() - case protoreflect.Sfixed64Kind: - return protowire.SizeFixed64() - case protoreflect.Fixed64Kind: - return protowire.SizeFixed64() - case protoreflect.DoubleKind: - return protowire.SizeFixed64() - case protoreflect.StringKind: - return protowire.SizeBytes(len(v.String())) - case protoreflect.BytesKind: - return protowire.SizeBytes(len(v.Bytes())) - case protoreflect.MessageKind: - return protowire.SizeBytes(o.size(v.Message())) - case protoreflect.GroupKind: - return protowire.SizeGroup(num, o.size(v.Message())) - default: - return 0 - } -} diff --git a/vendor/google.golang.org/protobuf/proto/wrappers.go b/vendor/google.golang.org/protobuf/proto/wrappers.go deleted file mode 100644 index 653b12c3a..000000000 --- a/vendor/google.golang.org/protobuf/proto/wrappers.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -// Bool stores v in a new bool value and returns a pointer to it. -func Bool(v bool) *bool { return &v } - -// Int32 stores v in a new int32 value and returns a pointer to it. -func Int32(v int32) *int32 { return &v } - -// Int64 stores v in a new int64 value and returns a pointer to it. -func Int64(v int64) *int64 { return &v } - -// Float32 stores v in a new float32 value and returns a pointer to it. -func Float32(v float32) *float32 { return &v } - -// Float64 stores v in a new float64 value and returns a pointer to it. -func Float64(v float64) *float64 { return &v } - -// Uint32 stores v in a new uint32 value and returns a pointer to it. -func Uint32(v uint32) *uint32 { return &v } - -// Uint64 stores v in a new uint64 value and returns a pointer to it. -func Uint64(v uint64) *uint64 { return &v } - -// String stores v in a new string value and returns a pointer to it. -func String(v string) *string { return &v } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go deleted file mode 100644 index e4dfb1205..000000000 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package protodesc provides functionality for converting -// FileDescriptorProto messages to/from protoreflect.FileDescriptor values. -// -// The google.protobuf.FileDescriptorProto is a protobuf message that describes -// the type information for a .proto file in a form that is easily serializable. -// The protoreflect.FileDescriptor is a more structured representation of -// the FileDescriptorProto message where references and remote dependencies -// can be directly followed. -package protodesc - -import ( - "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/filedesc" - "google.golang.org/protobuf/internal/pragma" - "google.golang.org/protobuf/internal/strs" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - - "google.golang.org/protobuf/types/descriptorpb" -) - -// Resolver is the resolver used by NewFile to resolve dependencies. -// The enums and messages provided must belong to some parent file, -// which is also registered. -// -// It is implemented by protoregistry.Files. -type Resolver interface { - FindFileByPath(string) (protoreflect.FileDescriptor, error) - FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) -} - -// FileOptions configures the construction of file descriptors. -type FileOptions struct { - pragma.NoUnkeyedLiterals - - // AllowUnresolvable configures New to permissively allow unresolvable - // file, enum, or message dependencies. Unresolved dependencies are replaced - // by placeholder equivalents. - // - // The following dependencies may be left unresolved: - // • Resolving an imported file. - // • Resolving the type for a message field or extension field. - // If the kind of the field is unknown, then a placeholder is used for both - // the Enum and Message accessors on the protoreflect.FieldDescriptor. - // • Resolving an enum value set as the default for an optional enum field. - // If unresolvable, the protoreflect.FieldDescriptor.Default is set to the - // first value in the associated enum (or zero if the also enum dependency - // is also unresolvable). The protoreflect.FieldDescriptor.DefaultEnumValue - // is populated with a placeholder. - // • Resolving the extended message type for an extension field. - // • Resolving the input or output message type for a service method. - // - // If the unresolved dependency uses a relative name, - // then the placeholder will contain an invalid FullName with a "*." prefix, - // indicating that the starting prefix of the full name is unknown. - AllowUnresolvable bool -} - -// NewFile creates a new protoreflect.FileDescriptor from the provided -// file descriptor message. See FileOptions.New for more information. -func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { - return FileOptions{}.New(fd, r) -} - -// NewFiles creates a new protoregistry.Files from the provided -// FileDescriptorSet message. See FileOptions.NewFiles for more information. -func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { - return FileOptions{}.NewFiles(fd) -} - -// New creates a new protoreflect.FileDescriptor from the provided -// file descriptor message. The file must represent a valid proto file according -// to protobuf semantics. The returned descriptor is a deep copy of the input. -// -// Any imported files, enum types, or message types referenced in the file are -// resolved using the provided registry. When looking up an import file path, -// the path must be unique. The newly created file descriptor is not registered -// back into the provided file registry. -func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { - if r == nil { - r = (*protoregistry.Files)(nil) // empty resolver - } - - // Handle the file descriptor content. - f := &filedesc.File{L2: &filedesc.FileL2{}} - switch fd.GetSyntax() { - case "proto2", "": - f.L1.Syntax = protoreflect.Proto2 - case "proto3": - f.L1.Syntax = protoreflect.Proto3 - default: - return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) - } - f.L1.Path = fd.GetName() - if f.L1.Path == "" { - return nil, errors.New("file path must be populated") - } - f.L1.Package = protoreflect.FullName(fd.GetPackage()) - if !f.L1.Package.IsValid() && f.L1.Package != "" { - return nil, errors.New("invalid package: %q", f.L1.Package) - } - if opts := fd.GetOptions(); opts != nil { - opts = proto.Clone(opts).(*descriptorpb.FileOptions) - f.L2.Options = func() protoreflect.ProtoMessage { return opts } - } - - f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) - for _, i := range fd.GetPublicDependency() { - if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsPublic { - return nil, errors.New("invalid or duplicate public import index: %d", i) - } - f.L2.Imports[i].IsPublic = true - } - for _, i := range fd.GetWeakDependency() { - if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsWeak { - return nil, errors.New("invalid or duplicate weak import index: %d", i) - } - f.L2.Imports[i].IsWeak = true - } - imps := importSet{f.Path(): true} - for i, path := range fd.GetDependency() { - imp := &f.L2.Imports[i] - f, err := r.FindFileByPath(path) - if err == protoregistry.NotFound && (o.AllowUnresolvable || imp.IsWeak) { - f = filedesc.PlaceholderFile(path) - } else if err != nil { - return nil, errors.New("could not resolve import %q: %v", path, err) - } - imp.FileDescriptor = f - - if imps[imp.Path()] { - return nil, errors.New("already imported %q", path) - } - imps[imp.Path()] = true - } - for i := range fd.GetDependency() { - imp := &f.L2.Imports[i] - imps.importPublic(imp.Imports()) - } - - // Handle source locations. - f.L2.Locations.File = f - for _, loc := range fd.GetSourceCodeInfo().GetLocation() { - var l protoreflect.SourceLocation - // TODO: Validate that the path points to an actual declaration? - l.Path = protoreflect.SourcePath(loc.GetPath()) - s := loc.GetSpan() - switch len(s) { - case 3: - l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[0]), int(s[2]) - case 4: - l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[2]), int(s[3]) - default: - return nil, errors.New("invalid span: %v", s) - } - // TODO: Validate that the span information is sensible? - // See https://github.com/protocolbuffers/protobuf/issues/6378. - if false && (l.EndLine < l.StartLine || l.StartLine < 0 || l.StartColumn < 0 || l.EndColumn < 0 || - (l.StartLine == l.EndLine && l.EndColumn <= l.StartColumn)) { - return nil, errors.New("invalid span: %v", s) - } - l.LeadingDetachedComments = loc.GetLeadingDetachedComments() - l.LeadingComments = loc.GetLeadingComments() - l.TrailingComments = loc.GetTrailingComments() - f.L2.Locations.List = append(f.L2.Locations.List, l) - } - - // Step 1: Allocate and derive the names for all declarations. - // This copies all fields from the descriptor proto except: - // google.protobuf.FieldDescriptorProto.type_name - // google.protobuf.FieldDescriptorProto.default_value - // google.protobuf.FieldDescriptorProto.oneof_index - // google.protobuf.FieldDescriptorProto.extendee - // google.protobuf.MethodDescriptorProto.input - // google.protobuf.MethodDescriptorProto.output - var err error - sb := new(strs.Builder) - r1 := make(descsByName) - if f.L1.Enums.List, err = r1.initEnumDeclarations(fd.GetEnumType(), f, sb); err != nil { - return nil, err - } - if f.L1.Messages.List, err = r1.initMessagesDeclarations(fd.GetMessageType(), f, sb); err != nil { - return nil, err - } - if f.L1.Extensions.List, err = r1.initExtensionDeclarations(fd.GetExtension(), f, sb); err != nil { - return nil, err - } - if f.L1.Services.List, err = r1.initServiceDeclarations(fd.GetService(), f, sb); err != nil { - return nil, err - } - - // Step 2: Resolve every dependency reference not handled by step 1. - r2 := &resolver{local: r1, remote: r, imports: imps, allowUnresolvable: o.AllowUnresolvable} - if err := r2.resolveMessageDependencies(f.L1.Messages.List, fd.GetMessageType()); err != nil { - return nil, err - } - if err := r2.resolveExtensionDependencies(f.L1.Extensions.List, fd.GetExtension()); err != nil { - return nil, err - } - if err := r2.resolveServiceDependencies(f.L1.Services.List, fd.GetService()); err != nil { - return nil, err - } - - // Step 3: Validate every enum, message, and extension declaration. - if err := validateEnumDeclarations(f.L1.Enums.List, fd.GetEnumType()); err != nil { - return nil, err - } - if err := validateMessageDeclarations(f.L1.Messages.List, fd.GetMessageType()); err != nil { - return nil, err - } - if err := validateExtensionDeclarations(f.L1.Extensions.List, fd.GetExtension()); err != nil { - return nil, err - } - - return f, nil -} - -type importSet map[string]bool - -func (is importSet) importPublic(imps protoreflect.FileImports) { - for i := 0; i < imps.Len(); i++ { - if imp := imps.Get(i); imp.IsPublic { - is[imp.Path()] = true - is.importPublic(imp.Imports()) - } - } -} - -// NewFiles creates a new protoregistry.Files from the provided -// FileDescriptorSet message. The descriptor set must include only -// valid files according to protobuf semantics. The returned descriptors -// are a deep copy of the input. -func (o FileOptions) NewFiles(fds *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { - files := make(map[string]*descriptorpb.FileDescriptorProto) - for _, fd := range fds.File { - if _, ok := files[fd.GetName()]; ok { - return nil, errors.New("file appears multiple times: %q", fd.GetName()) - } - files[fd.GetName()] = fd - } - r := &protoregistry.Files{} - for _, fd := range files { - if err := o.addFileDeps(r, fd, files); err != nil { - return nil, err - } - } - return r, nil -} -func (o FileOptions) addFileDeps(r *protoregistry.Files, fd *descriptorpb.FileDescriptorProto, files map[string]*descriptorpb.FileDescriptorProto) error { - // Set the entry to nil while descending into a file's dependencies to detect cycles. - files[fd.GetName()] = nil - for _, dep := range fd.Dependency { - depfd, ok := files[dep] - if depfd == nil { - if ok { - return errors.New("import cycle in file: %q", dep) - } - continue - } - if err := o.addFileDeps(r, depfd, files); err != nil { - return err - } - } - // Delete the entry once dependencies are processed. - delete(files, fd.GetName()) - f, err := o.New(fd, r) - if err != nil { - return err - } - return r.RegisterFile(f) -} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go deleted file mode 100644 index 37efda1af..000000000 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package protodesc - -import ( - "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/filedesc" - "google.golang.org/protobuf/internal/strs" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - - "google.golang.org/protobuf/types/descriptorpb" -) - -type descsByName map[protoreflect.FullName]protoreflect.Descriptor - -func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (es []filedesc.Enum, err error) { - es = make([]filedesc.Enum, len(eds)) // allocate up-front to ensure stable pointers - for i, ed := range eds { - e := &es[i] - e.L2 = new(filedesc.EnumL2) - if e.L0, err = r.makeBase(e, parent, ed.GetName(), i, sb); err != nil { - return nil, err - } - if opts := ed.GetOptions(); opts != nil { - opts = proto.Clone(opts).(*descriptorpb.EnumOptions) - e.L2.Options = func() protoreflect.ProtoMessage { return opts } - } - for _, s := range ed.GetReservedName() { - e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s)) - } - for _, rr := range ed.GetReservedRange() { - e.L2.ReservedRanges.List = append(e.L2.ReservedRanges.List, [2]protoreflect.EnumNumber{ - protoreflect.EnumNumber(rr.GetStart()), - protoreflect.EnumNumber(rr.GetEnd()), - }) - } - if e.L2.Values.List, err = r.initEnumValuesFromDescriptorProto(ed.GetValue(), e, sb); err != nil { - return nil, err - } - } - return es, nil -} - -func (r descsByName) initEnumValuesFromDescriptorProto(vds []*descriptorpb.EnumValueDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (vs []filedesc.EnumValue, err error) { - vs = make([]filedesc.EnumValue, len(vds)) // allocate up-front to ensure stable pointers - for i, vd := range vds { - v := &vs[i] - if v.L0, err = r.makeBase(v, parent, vd.GetName(), i, sb); err != nil { - return nil, err - } - if opts := vd.GetOptions(); opts != nil { - opts = proto.Clone(opts).(*descriptorpb.EnumValueOptions) - v.L1.Options = func() protoreflect.ProtoMessage { return opts } - } - v.L1.Number = protoreflect.EnumNumber(vd.GetNumber()) - } - return vs, nil -} - -func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Message, err error) { - ms = make([]filedesc.Message, len(mds)) // allocate up-front to ensure stable pointers - for i, md := range mds { - m := &ms[i] - m.L2 = new(filedesc.MessageL2) - if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { - return nil, err - } - if opts := md.GetOptions(); opts != nil { - opts = proto.Clone(opts).(*descriptorpb.MessageOptions) - m.L2.Options = func() protoreflect.ProtoMessage { return opts } - m.L1.IsMapEntry = opts.GetMapEntry() - m.L1.IsMessageSet = opts.GetMessageSetWireFormat() - } - for _, s := range md.GetReservedName() { - m.L2.ReservedNames.List = append(m.L2.ReservedNames.List, protoreflect.Name(s)) - } - for _, rr := range md.GetReservedRange() { - m.L2.ReservedRanges.List = append(m.L2.ReservedRanges.List, [2]protoreflect.FieldNumber{ - protoreflect.FieldNumber(rr.GetStart()), - protoreflect.FieldNumber(rr.GetEnd()), - }) - } - for _, xr := range md.GetExtensionRange() { - m.L2.ExtensionRanges.List = append(m.L2.ExtensionRanges.List, [2]protoreflect.FieldNumber{ - protoreflect.FieldNumber(xr.GetStart()), - protoreflect.FieldNumber(xr.GetEnd()), - }) - var optsFunc func() protoreflect.ProtoMessage - if opts := xr.GetOptions(); opts != nil { - opts = proto.Clone(opts).(*descriptorpb.ExtensionRangeOptions) - optsFunc = func() protoreflect.ProtoMessage { return opts } - } - m.L2.ExtensionRangeOptions = append(m.L2.ExtensionRangeOptions, optsFunc) - } - if m.L2.Fields.List, err = r.initFieldsFromDescriptorProto(md.GetField(), m, sb); err != nil { - return nil, err - } - if m.L2.Oneofs.List, err = r.initOneofsFromDescriptorProto(md.GetOneofDecl(), m, sb); err != nil { - return nil, err - } - if m.L1.Enums.List, err = r.initEnumDeclarations(md.GetEnumType(), m, sb); err != nil { - return nil, err - } - if m.L1.Messages.List, err = r.initMessagesDeclarations(md.GetNestedType(), m, sb); err != nil { - return nil, err - } - if m.L1.Extensions.List, err = r.initExtensionDeclarations(md.GetExtension(), m, sb); err != nil { - return nil, err - } - } - return ms, nil -} - -func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (fs []filedesc.Field, err error) { - fs = make([]filedesc.Field, len(fds)) // allocate up-front to ensure stable pointers - for i, fd := range fds { - f := &fs[i] - if f.L0, err = r.makeBase(f, parent, fd.GetName(), i, sb); err != nil { - return nil, err - } - f.L1.IsProto3Optional = fd.GetProto3Optional() - if opts := fd.GetOptions(); opts != nil { - opts = proto.Clone(opts).(*descriptorpb.FieldOptions) - f.L1.Options = func() protoreflect.ProtoMessage { return opts } - f.L1.IsWeak = opts.GetWeak() - f.L1.HasPacked = opts.Packed != nil - f.L1.IsPacked = opts.GetPacked() - } - f.L1.Number = protoreflect.FieldNumber(fd.GetNumber()) - f.L1.Cardinality = protoreflect.Cardinality(fd.GetLabel()) - if fd.Type != nil { - f.L1.Kind = protoreflect.Kind(fd.GetType()) - } - if fd.JsonName != nil { - f.L1.StringName.InitJSON(fd.GetJsonName()) - } - } - return fs, nil -} - -func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (os []filedesc.Oneof, err error) { - os = make([]filedesc.Oneof, len(ods)) // allocate up-front to ensure stable pointers - for i, od := range ods { - o := &os[i] - if o.L0, err = r.makeBase(o, parent, od.GetName(), i, sb); err != nil { - return nil, err - } - if opts := od.GetOptions(); opts != nil { - opts = proto.Clone(opts).(*descriptorpb.OneofOptions) - o.L1.Options = func() protoreflect.ProtoMessage { return opts } - } - } - return os, nil -} - -func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (xs []filedesc.Extension, err error) { - xs = make([]filedesc.Extension, len(xds)) // allocate up-front to ensure stable pointers - for i, xd := range xds { - x := &xs[i] - x.L2 = new(filedesc.ExtensionL2) - if x.L0, err = r.makeBase(x, parent, xd.GetName(), i, sb); err != nil { - return nil, err - } - if opts := xd.GetOptions(); opts != nil { - opts = proto.Clone(opts).(*descriptorpb.FieldOptions) - x.L2.Options = func() protoreflect.ProtoMessage { return opts } - x.L2.IsPacked = opts.GetPacked() - } - x.L1.Number = protoreflect.FieldNumber(xd.GetNumber()) - x.L1.Cardinality = protoreflect.Cardinality(xd.GetLabel()) - if xd.Type != nil { - x.L1.Kind = protoreflect.Kind(xd.GetType()) - } - if xd.JsonName != nil { - x.L2.StringName.InitJSON(xd.GetJsonName()) - } - } - return xs, nil -} - -func (r descsByName) initServiceDeclarations(sds []*descriptorpb.ServiceDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ss []filedesc.Service, err error) { - ss = make([]filedesc.Service, len(sds)) // allocate up-front to ensure stable pointers - for i, sd := range sds { - s := &ss[i] - s.L2 = new(filedesc.ServiceL2) - if s.L0, err = r.makeBase(s, parent, sd.GetName(), i, sb); err != nil { - return nil, err - } - if opts := sd.GetOptions(); opts != nil { - opts = proto.Clone(opts).(*descriptorpb.ServiceOptions) - s.L2.Options = func() protoreflect.ProtoMessage { return opts } - } - if s.L2.Methods.List, err = r.initMethodsFromDescriptorProto(sd.GetMethod(), s, sb); err != nil { - return nil, err - } - } - return ss, nil -} - -func (r descsByName) initMethodsFromDescriptorProto(mds []*descriptorpb.MethodDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Method, err error) { - ms = make([]filedesc.Method, len(mds)) // allocate up-front to ensure stable pointers - for i, md := range mds { - m := &ms[i] - if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { - return nil, err - } - if opts := md.GetOptions(); opts != nil { - opts = proto.Clone(opts).(*descriptorpb.MethodOptions) - m.L1.Options = func() protoreflect.ProtoMessage { return opts } - } - m.L1.IsStreamingClient = md.GetClientStreaming() - m.L1.IsStreamingServer = md.GetServerStreaming() - } - return ms, nil -} - -func (r descsByName) makeBase(child, parent protoreflect.Descriptor, name string, idx int, sb *strs.Builder) (filedesc.BaseL0, error) { - if !protoreflect.Name(name).IsValid() { - return filedesc.BaseL0{}, errors.New("descriptor %q has an invalid nested name: %q", parent.FullName(), name) - } - - // Derive the full name of the child. - // Note that enum values are a sibling to the enum parent in the namespace. - var fullName protoreflect.FullName - if _, ok := parent.(protoreflect.EnumDescriptor); ok { - fullName = sb.AppendFullName(parent.FullName().Parent(), protoreflect.Name(name)) - } else { - fullName = sb.AppendFullName(parent.FullName(), protoreflect.Name(name)) - } - if _, ok := r[fullName]; ok { - return filedesc.BaseL0{}, errors.New("descriptor %q already declared", fullName) - } - r[fullName] = child - - // TODO: Verify that the full name does not already exist in the resolver? - // This is not as critical since most usages of NewFile will register - // the created file back into the registry, which will perform this check. - - return filedesc.BaseL0{ - FullName: fullName, - ParentFile: parent.ParentFile().(*filedesc.File), - Parent: parent, - Index: idx, - }, nil -} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go deleted file mode 100644 index cebb36cda..000000000 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ /dev/null @@ -1,286 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package protodesc - -import ( - "google.golang.org/protobuf/internal/encoding/defval" - "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/filedesc" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - - "google.golang.org/protobuf/types/descriptorpb" -) - -// resolver is a wrapper around a local registry of declarations within the file -// and the remote resolver. The remote resolver is restricted to only return -// descriptors that have been imported. -type resolver struct { - local descsByName - remote Resolver - imports importSet - - allowUnresolvable bool -} - -func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) (err error) { - for i, md := range mds { - m := &ms[i] - for j, fd := range md.GetField() { - f := &m.L2.Fields.List[j] - if f.L1.Cardinality == protoreflect.Required { - m.L2.RequiredNumbers.List = append(m.L2.RequiredNumbers.List, f.L1.Number) - } - if fd.OneofIndex != nil { - k := int(fd.GetOneofIndex()) - if !(0 <= k && k < len(md.GetOneofDecl())) { - return errors.New("message field %q has an invalid oneof index: %d", f.FullName(), k) - } - o := &m.L2.Oneofs.List[k] - f.L1.ContainingOneof = o - o.L1.Fields.List = append(o.L1.Fields.List, f) - } - - if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { - return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) - } - if fd.DefaultValue != nil { - v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable) - if err != nil { - return errors.New("message field %q has invalid default: %v", f.FullName(), err) - } - f.L1.Default = filedesc.DefaultValue(v, ev) - } - } - - if err := r.resolveMessageDependencies(m.L1.Messages.List, md.GetNestedType()); err != nil { - return err - } - if err := r.resolveExtensionDependencies(m.L1.Extensions.List, md.GetExtension()); err != nil { - return err - } - } - return nil -} - -func (r *resolver) resolveExtensionDependencies(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) (err error) { - for i, xd := range xds { - x := &xs[i] - if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee()), false); err != nil { - return errors.New("extension field %q cannot resolve extendee: %v", x.FullName(), err) - } - if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName()), false); err != nil { - return errors.New("extension field %q cannot resolve type: %v", x.FullName(), err) - } - if xd.DefaultValue != nil { - v, ev, err := unmarshalDefault(xd.GetDefaultValue(), x, r.allowUnresolvable) - if err != nil { - return errors.New("extension field %q has invalid default: %v", x.FullName(), err) - } - x.L2.Default = filedesc.DefaultValue(v, ev) - } - } - return nil -} - -func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*descriptorpb.ServiceDescriptorProto) (err error) { - for i, sd := range sds { - s := &ss[i] - for j, md := range sd.GetMethod() { - m := &s.L2.Methods.List[j] - m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()), false) - if err != nil { - return errors.New("service method %q cannot resolve input: %v", m.FullName(), err) - } - m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()), false) - if err != nil { - return errors.New("service method %q cannot resolve output: %v", m.FullName(), err) - } - } - } - return nil -} - -// findTarget finds an enum or message descriptor if k is an enum, message, -// group, or unknown. If unknown, and the name could be resolved, the kind -// returned kind is set based on the type of the resolved descriptor. -func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) { - switch k { - case protoreflect.EnumKind: - ed, err := r.findEnumDescriptor(scope, ref, isWeak) - if err != nil { - return 0, nil, nil, err - } - return k, ed, nil, nil - case protoreflect.MessageKind, protoreflect.GroupKind: - md, err := r.findMessageDescriptor(scope, ref, isWeak) - if err != nil { - return 0, nil, nil, err - } - return k, nil, md, nil - case 0: - // Handle unspecified kinds (possible with parsers that operate - // on a per-file basis without knowledge of dependencies). - d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { - return k, filedesc.PlaceholderEnum(ref.FullName()), filedesc.PlaceholderMessage(ref.FullName()), nil - } else if err == protoregistry.NotFound { - return 0, nil, nil, errors.New("%q not found", ref.FullName()) - } else if err != nil { - return 0, nil, nil, err - } - switch d := d.(type) { - case protoreflect.EnumDescriptor: - return protoreflect.EnumKind, d, nil, nil - case protoreflect.MessageDescriptor: - return protoreflect.MessageKind, nil, d, nil - default: - return 0, nil, nil, errors.New("unknown kind") - } - default: - if ref != "" { - return 0, nil, nil, errors.New("target name cannot be specified for %v", k) - } - if !k.IsValid() { - return 0, nil, nil, errors.New("invalid kind: %d", k) - } - return k, nil, nil, nil - } -} - -// findDescriptor finds the descriptor by name, -// which may be a relative name within some scope. -// -// Suppose the scope was "fizz.buzz" and the reference was "Foo.Bar", -// then the following full names are searched: -// * fizz.buzz.Foo.Bar -// * fizz.Foo.Bar -// * Foo.Bar -func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.Descriptor, error) { - if !ref.IsValid() { - return nil, errors.New("invalid name reference: %q", ref) - } - if ref.IsFull() { - scope, ref = "", ref[1:] - } - var foundButNotImported protoreflect.Descriptor - for { - // Derive the full name to search. - s := protoreflect.FullName(ref) - if scope != "" { - s = scope + "." + s - } - - // Check the current file for the descriptor. - if d, ok := r.local[s]; ok { - return d, nil - } - - // Check the remote registry for the descriptor. - d, err := r.remote.FindDescriptorByName(s) - if err == nil { - // Only allow descriptors covered by one of the imports. - if r.imports[d.ParentFile().Path()] { - return d, nil - } - foundButNotImported = d - } else if err != protoregistry.NotFound { - return nil, errors.Wrap(err, "%q", s) - } - - // Continue on at a higher level of scoping. - if scope == "" { - if d := foundButNotImported; d != nil { - return nil, errors.New("resolved %q, but %q is not imported", d.FullName(), d.ParentFile().Path()) - } - return nil, protoregistry.NotFound - } - scope = scope.Parent() - } -} - -func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.EnumDescriptor, error) { - d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { - return filedesc.PlaceholderEnum(ref.FullName()), nil - } else if err == protoregistry.NotFound { - return nil, errors.New("%q not found", ref.FullName()) - } else if err != nil { - return nil, err - } - ed, ok := d.(protoreflect.EnumDescriptor) - if !ok { - return nil, errors.New("resolved %q, but it is not an enum", d.FullName()) - } - return ed, nil -} - -func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.MessageDescriptor, error) { - d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { - return filedesc.PlaceholderMessage(ref.FullName()), nil - } else if err == protoregistry.NotFound { - return nil, errors.New("%q not found", ref.FullName()) - } else if err != nil { - return nil, err - } - md, ok := d.(protoreflect.MessageDescriptor) - if !ok { - return nil, errors.New("resolved %q, but it is not an message", d.FullName()) - } - return md, nil -} - -// partialName is the partial name. A leading dot means that the name is full, -// otherwise the name is relative to some current scope. -// See google.protobuf.FieldDescriptorProto.type_name. -type partialName string - -func (s partialName) IsFull() bool { - return len(s) > 0 && s[0] == '.' -} - -func (s partialName) IsValid() bool { - if s.IsFull() { - return protoreflect.FullName(s[1:]).IsValid() - } - return protoreflect.FullName(s).IsValid() -} - -const unknownPrefix = "*." - -// FullName converts the partial name to a full name on a best-effort basis. -// If relative, it creates an invalid full name, using a "*." prefix -// to indicate that the start of the full name is unknown. -func (s partialName) FullName() protoreflect.FullName { - if s.IsFull() { - return protoreflect.FullName(s[1:]) - } - return protoreflect.FullName(unknownPrefix + s) -} - -func unmarshalDefault(s string, fd protoreflect.FieldDescriptor, allowUnresolvable bool) (protoreflect.Value, protoreflect.EnumValueDescriptor, error) { - var evs protoreflect.EnumValueDescriptors - if fd.Enum() != nil { - evs = fd.Enum().Values() - } - v, ev, err := defval.Unmarshal(s, fd.Kind(), evs, defval.Descriptor) - if err != nil && allowUnresolvable && evs != nil && protoreflect.Name(s).IsValid() { - v = protoreflect.ValueOfEnum(0) - if evs.Len() > 0 { - v = protoreflect.ValueOfEnum(evs.Get(0).Number()) - } - ev = filedesc.PlaceholderEnumValue(fd.Enum().FullName().Parent().Append(protoreflect.Name(s))) - } else if err != nil { - return v, ev, err - } - if fd.Syntax() == protoreflect.Proto3 { - return v, ev, errors.New("cannot be specified under proto3 semantics") - } - if fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind || fd.Cardinality() == protoreflect.Repeated { - return v, ev, errors.New("cannot be specified on composite types") - } - return v, ev, nil -} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go deleted file mode 100644 index 9af1d5648..000000000 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ /dev/null @@ -1,374 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package protodesc - -import ( - "strings" - "unicode" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/filedesc" - "google.golang.org/protobuf/internal/flags" - "google.golang.org/protobuf/internal/genid" - "google.golang.org/protobuf/internal/strs" - "google.golang.org/protobuf/reflect/protoreflect" - - "google.golang.org/protobuf/types/descriptorpb" -) - -func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescriptorProto) error { - for i, ed := range eds { - e := &es[i] - if err := e.L2.ReservedNames.CheckValid(); err != nil { - return errors.New("enum %q reserved names has %v", e.FullName(), err) - } - if err := e.L2.ReservedRanges.CheckValid(); err != nil { - return errors.New("enum %q reserved ranges has %v", e.FullName(), err) - } - if len(ed.GetValue()) == 0 { - return errors.New("enum %q must contain at least one value declaration", e.FullName()) - } - allowAlias := ed.GetOptions().GetAllowAlias() - foundAlias := false - for i := 0; i < e.Values().Len(); i++ { - v1 := e.Values().Get(i) - if v2 := e.Values().ByNumber(v1.Number()); v1 != v2 { - foundAlias = true - if !allowAlias { - return errors.New("enum %q has conflicting non-aliased values on number %d: %q with %q", e.FullName(), v1.Number(), v1.Name(), v2.Name()) - } - } - } - if allowAlias && !foundAlias { - return errors.New("enum %q allows aliases, but none were found", e.FullName()) - } - if e.Syntax() == protoreflect.Proto3 { - if v := e.Values().Get(0); v.Number() != 0 { - return errors.New("enum %q using proto3 semantics must have zero number for the first value", v.FullName()) - } - // Verify that value names in proto3 do not conflict if the - // case-insensitive prefix is removed. - // See protoc v3.8.0: src/google/protobuf/descriptor.cc:4991-5055 - names := map[string]protoreflect.EnumValueDescriptor{} - prefix := strings.Replace(strings.ToLower(string(e.Name())), "_", "", -1) - for i := 0; i < e.Values().Len(); i++ { - v1 := e.Values().Get(i) - s := strs.EnumValueName(strs.TrimEnumPrefix(string(v1.Name()), prefix)) - if v2, ok := names[s]; ok && v1.Number() != v2.Number() { - return errors.New("enum %q using proto3 semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) - } - names[s] = v1 - } - } - - for j, vd := range ed.GetValue() { - v := &e.L2.Values.List[j] - if vd.Number == nil { - return errors.New("enum value %q must have a specified number", v.FullName()) - } - if e.L2.ReservedNames.Has(v.Name()) { - return errors.New("enum value %q must not use reserved name", v.FullName()) - } - if e.L2.ReservedRanges.Has(v.Number()) { - return errors.New("enum value %q must not use reserved number %d", v.FullName(), v.Number()) - } - } - } - return nil -} - -func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { - for i, md := range mds { - m := &ms[i] - - // Handle the message descriptor itself. - isMessageSet := md.GetOptions().GetMessageSetWireFormat() - if err := m.L2.ReservedNames.CheckValid(); err != nil { - return errors.New("message %q reserved names has %v", m.FullName(), err) - } - if err := m.L2.ReservedRanges.CheckValid(isMessageSet); err != nil { - return errors.New("message %q reserved ranges has %v", m.FullName(), err) - } - if err := m.L2.ExtensionRanges.CheckValid(isMessageSet); err != nil { - return errors.New("message %q extension ranges has %v", m.FullName(), err) - } - if err := (*filedesc.FieldRanges).CheckOverlap(&m.L2.ReservedRanges, &m.L2.ExtensionRanges); err != nil { - return errors.New("message %q reserved and extension ranges has %v", m.FullName(), err) - } - for i := 0; i < m.Fields().Len(); i++ { - f1 := m.Fields().Get(i) - if f2 := m.Fields().ByNumber(f1.Number()); f1 != f2 { - return errors.New("message %q has conflicting fields: %q with %q", m.FullName(), f1.Name(), f2.Name()) - } - } - if isMessageSet && !flags.ProtoLegacy { - return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName()) - } - if isMessageSet && (m.Syntax() != protoreflect.Proto2 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { - return errors.New("message %q is an invalid proto1 MessageSet", m.FullName()) - } - if m.Syntax() == protoreflect.Proto3 { - if m.ExtensionRanges().Len() > 0 { - return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName()) - } - // Verify that field names in proto3 do not conflict if lowercased - // with all underscores removed. - // See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847 - names := map[string]protoreflect.FieldDescriptor{} - for i := 0; i < m.Fields().Len(); i++ { - f1 := m.Fields().Get(i) - s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1) - if f2, ok := names[s]; ok { - return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name()) - } - names[s] = f1 - } - } - - for j, fd := range md.GetField() { - f := &m.L2.Fields.List[j] - if m.L2.ReservedNames.Has(f.Name()) { - return errors.New("message field %q must not use reserved name", f.FullName()) - } - if !f.Number().IsValid() { - return errors.New("message field %q has an invalid number: %d", f.FullName(), f.Number()) - } - if !f.Cardinality().IsValid() { - return errors.New("message field %q has an invalid cardinality: %d", f.FullName(), f.Cardinality()) - } - if m.L2.ReservedRanges.Has(f.Number()) { - return errors.New("message field %q must not use reserved number %d", f.FullName(), f.Number()) - } - if m.L2.ExtensionRanges.Has(f.Number()) { - return errors.New("message field %q with number %d in extension range", f.FullName(), f.Number()) - } - if fd.Extendee != nil { - return errors.New("message field %q may not have extendee: %q", f.FullName(), fd.GetExtendee()) - } - if f.L1.IsProto3Optional { - if f.Syntax() != protoreflect.Proto3 { - return errors.New("message field %q under proto3 optional semantics must be specified in the proto3 syntax", f.FullName()) - } - if f.Cardinality() != protoreflect.Optional { - return errors.New("message field %q under proto3 optional semantics must have optional cardinality", f.FullName()) - } - if f.ContainingOneof() != nil && f.ContainingOneof().Fields().Len() != 1 { - return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName()) - } - } - if f.IsWeak() && !flags.ProtoLegacy { - return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) - } - if f.IsWeak() && (f.Syntax() != protoreflect.Proto2 || !isOptionalMessage(f) || f.ContainingOneof() != nil) { - return errors.New("message field %q may only be weak for an optional message", f.FullName()) - } - if f.IsPacked() && !isPackable(f) { - return errors.New("message field %q is not packable", f.FullName()) - } - if err := checkValidGroup(f); err != nil { - return errors.New("message field %q is an invalid group: %v", f.FullName(), err) - } - if err := checkValidMap(f); err != nil { - return errors.New("message field %q is an invalid map: %v", f.FullName(), err) - } - if f.Syntax() == protoreflect.Proto3 { - if f.Cardinality() == protoreflect.Required { - return errors.New("message field %q using proto3 semantics cannot be required", f.FullName()) - } - if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().Syntax() != protoreflect.Proto3 { - return errors.New("message field %q using proto3 semantics may only depend on a proto3 enum", f.FullName()) - } - } - } - seenSynthetic := false // synthetic oneofs for proto3 optional must come after real oneofs - for j := range md.GetOneofDecl() { - o := &m.L2.Oneofs.List[j] - if o.Fields().Len() == 0 { - return errors.New("message oneof %q must contain at least one field declaration", o.FullName()) - } - if n := o.Fields().Len(); n-1 != (o.Fields().Get(n-1).Index() - o.Fields().Get(0).Index()) { - return errors.New("message oneof %q must have consecutively declared fields", o.FullName()) - } - - if o.IsSynthetic() { - seenSynthetic = true - continue - } - if !o.IsSynthetic() && seenSynthetic { - return errors.New("message oneof %q must be declared before synthetic oneofs", o.FullName()) - } - - for i := 0; i < o.Fields().Len(); i++ { - f := o.Fields().Get(i) - if f.Cardinality() != protoreflect.Optional { - return errors.New("message field %q belongs in a oneof and must be optional", f.FullName()) - } - if f.IsWeak() { - return errors.New("message field %q belongs in a oneof and must not be a weak reference", f.FullName()) - } - } - } - - if err := validateEnumDeclarations(m.L1.Enums.List, md.GetEnumType()); err != nil { - return err - } - if err := validateMessageDeclarations(m.L1.Messages.List, md.GetNestedType()); err != nil { - return err - } - if err := validateExtensionDeclarations(m.L1.Extensions.List, md.GetExtension()); err != nil { - return err - } - } - return nil -} - -func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { - for i, xd := range xds { - x := &xs[i] - // NOTE: Avoid using the IsValid method since extensions to MessageSet - // may have a field number higher than normal. This check only verifies - // that the number is not negative or reserved. We check again later - // if we know that the extendee is definitely not a MessageSet. - if n := x.Number(); n < 0 || (protowire.FirstReservedNumber <= n && n <= protowire.LastReservedNumber) { - return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) - } - if !x.Cardinality().IsValid() || x.Cardinality() == protoreflect.Required { - return errors.New("extension field %q has an invalid cardinality: %d", x.FullName(), x.Cardinality()) - } - if xd.JsonName != nil { - // A bug in older versions of protoc would always populate the - // "json_name" option for extensions when it is meaningless. - // When it did so, it would always use the camel-cased field name. - if xd.GetJsonName() != strs.JSONCamelCase(string(x.Name())) { - return errors.New("extension field %q may not have an explicitly set JSON name: %q", x.FullName(), xd.GetJsonName()) - } - } - if xd.OneofIndex != nil { - return errors.New("extension field %q may not be part of a oneof", x.FullName()) - } - if md := x.ContainingMessage(); !md.IsPlaceholder() { - if !md.ExtensionRanges().Has(x.Number()) { - return errors.New("extension field %q extends %q with non-extension field number: %d", x.FullName(), md.FullName(), x.Number()) - } - isMessageSet := md.Options().(*descriptorpb.MessageOptions).GetMessageSetWireFormat() - if isMessageSet && !isOptionalMessage(x) { - return errors.New("extension field %q extends MessageSet and must be an optional message", x.FullName()) - } - if !isMessageSet && !x.Number().IsValid() { - return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) - } - } - if xd.GetOptions().GetWeak() { - return errors.New("extension field %q cannot be a weak reference", x.FullName()) - } - if x.IsPacked() && !isPackable(x) { - return errors.New("extension field %q is not packable", x.FullName()) - } - if err := checkValidGroup(x); err != nil { - return errors.New("extension field %q is an invalid group: %v", x.FullName(), err) - } - if md := x.Message(); md != nil && md.IsMapEntry() { - return errors.New("extension field %q cannot be a map entry", x.FullName()) - } - if x.Syntax() == protoreflect.Proto3 { - switch x.ContainingMessage().FullName() { - case (*descriptorpb.FileOptions)(nil).ProtoReflect().Descriptor().FullName(): - case (*descriptorpb.EnumOptions)(nil).ProtoReflect().Descriptor().FullName(): - case (*descriptorpb.EnumValueOptions)(nil).ProtoReflect().Descriptor().FullName(): - case (*descriptorpb.MessageOptions)(nil).ProtoReflect().Descriptor().FullName(): - case (*descriptorpb.FieldOptions)(nil).ProtoReflect().Descriptor().FullName(): - case (*descriptorpb.OneofOptions)(nil).ProtoReflect().Descriptor().FullName(): - case (*descriptorpb.ExtensionRangeOptions)(nil).ProtoReflect().Descriptor().FullName(): - case (*descriptorpb.ServiceOptions)(nil).ProtoReflect().Descriptor().FullName(): - case (*descriptorpb.MethodOptions)(nil).ProtoReflect().Descriptor().FullName(): - default: - return errors.New("extension field %q cannot be declared in proto3 unless extended descriptor options", x.FullName()) - } - } - } - return nil -} - -// isOptionalMessage reports whether this is an optional message. -// If the kind is unknown, it is assumed to be a message. -func isOptionalMessage(fd protoreflect.FieldDescriptor) bool { - return (fd.Kind() == 0 || fd.Kind() == protoreflect.MessageKind) && fd.Cardinality() == protoreflect.Optional -} - -// isPackable checks whether the pack option can be specified. -func isPackable(fd protoreflect.FieldDescriptor) bool { - switch fd.Kind() { - case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: - return false - } - return fd.IsList() -} - -// checkValidGroup reports whether fd is a valid group according to the same -// rules that protoc imposes. -func checkValidGroup(fd protoreflect.FieldDescriptor) error { - md := fd.Message() - switch { - case fd.Kind() != protoreflect.GroupKind: - return nil - case fd.Syntax() != protoreflect.Proto2: - return errors.New("invalid under proto2 semantics") - case md == nil || md.IsPlaceholder(): - return errors.New("message must be resolvable") - case fd.FullName().Parent() != md.FullName().Parent(): - return errors.New("message and field must be declared in the same scope") - case !unicode.IsUpper(rune(md.Name()[0])): - return errors.New("message name must start with an uppercase") - case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): - return errors.New("field name must be lowercased form of the message name") - } - return nil -} - -// checkValidMap checks whether the field is a valid map according to the same -// rules that protoc imposes. -// See protoc v3.8.0: src/google/protobuf/descriptor.cc:6045-6115 -func checkValidMap(fd protoreflect.FieldDescriptor) error { - md := fd.Message() - switch { - case md == nil || !md.IsMapEntry(): - return nil - case fd.FullName().Parent() != md.FullName().Parent(): - return errors.New("message and field must be declared in the same scope") - case md.Name() != protoreflect.Name(strs.MapEntryName(string(fd.Name()))): - return errors.New("incorrect implicit map entry name") - case fd.Cardinality() != protoreflect.Repeated: - return errors.New("field must be repeated") - case md.Fields().Len() != 2: - return errors.New("message must have exactly two fields") - case md.ExtensionRanges().Len() > 0: - return errors.New("message must not have any extension ranges") - case md.Enums().Len()+md.Messages().Len()+md.Extensions().Len() > 0: - return errors.New("message must not have any nested declarations") - } - kf := md.Fields().Get(0) - vf := md.Fields().Get(1) - switch { - case kf.Name() != genid.MapEntry_Key_field_name || kf.Number() != genid.MapEntry_Key_field_number || kf.Cardinality() != protoreflect.Optional || kf.ContainingOneof() != nil || kf.HasDefault(): - return errors.New("invalid key field") - case vf.Name() != genid.MapEntry_Value_field_name || vf.Number() != genid.MapEntry_Value_field_number || vf.Cardinality() != protoreflect.Optional || vf.ContainingOneof() != nil || vf.HasDefault(): - return errors.New("invalid value field") - } - switch kf.Kind() { - case protoreflect.BoolKind: // bool - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: // int32 - case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: // int64 - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: // uint32 - case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: // uint64 - case protoreflect.StringKind: // string - default: - return errors.New("invalid key kind: %v", kf.Kind()) - } - if e := vf.Enum(); e != nil && e.Values().Len() > 0 && e.Values().Get(0).Number() != 0 { - return errors.New("map enum value must have zero number for the first value") - } - return nil -} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go deleted file mode 100644 index a7c5ceffc..000000000 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ /dev/null @@ -1,252 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package protodesc - -import ( - "fmt" - "strings" - - "google.golang.org/protobuf/internal/encoding/defval" - "google.golang.org/protobuf/internal/strs" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - - "google.golang.org/protobuf/types/descriptorpb" -) - -// ToFileDescriptorProto copies a protoreflect.FileDescriptor into a -// google.protobuf.FileDescriptorProto message. -func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto { - p := &descriptorpb.FileDescriptorProto{ - Name: proto.String(file.Path()), - Options: proto.Clone(file.Options()).(*descriptorpb.FileOptions), - } - if file.Package() != "" { - p.Package = proto.String(string(file.Package())) - } - for i, imports := 0, file.Imports(); i < imports.Len(); i++ { - imp := imports.Get(i) - p.Dependency = append(p.Dependency, imp.Path()) - if imp.IsPublic { - p.PublicDependency = append(p.PublicDependency, int32(i)) - } - if imp.IsWeak { - p.WeakDependency = append(p.WeakDependency, int32(i)) - } - } - for i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ { - loc := locs.Get(i) - l := &descriptorpb.SourceCodeInfo_Location{} - l.Path = append(l.Path, loc.Path...) - if loc.StartLine == loc.EndLine { - l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndColumn)} - } else { - l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndLine), int32(loc.EndColumn)} - } - l.LeadingDetachedComments = append([]string(nil), loc.LeadingDetachedComments...) - if loc.LeadingComments != "" { - l.LeadingComments = proto.String(loc.LeadingComments) - } - if loc.TrailingComments != "" { - l.TrailingComments = proto.String(loc.TrailingComments) - } - if p.SourceCodeInfo == nil { - p.SourceCodeInfo = &descriptorpb.SourceCodeInfo{} - } - p.SourceCodeInfo.Location = append(p.SourceCodeInfo.Location, l) - - } - for i, messages := 0, file.Messages(); i < messages.Len(); i++ { - p.MessageType = append(p.MessageType, ToDescriptorProto(messages.Get(i))) - } - for i, enums := 0, file.Enums(); i < enums.Len(); i++ { - p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i))) - } - for i, services := 0, file.Services(); i < services.Len(); i++ { - p.Service = append(p.Service, ToServiceDescriptorProto(services.Get(i))) - } - for i, exts := 0, file.Extensions(); i < exts.Len(); i++ { - p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) - } - if syntax := file.Syntax(); syntax != protoreflect.Proto2 { - p.Syntax = proto.String(file.Syntax().String()) - } - return p -} - -// ToDescriptorProto copies a protoreflect.MessageDescriptor into a -// google.protobuf.DescriptorProto message. -func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto { - p := &descriptorpb.DescriptorProto{ - Name: proto.String(string(message.Name())), - Options: proto.Clone(message.Options()).(*descriptorpb.MessageOptions), - } - for i, fields := 0, message.Fields(); i < fields.Len(); i++ { - p.Field = append(p.Field, ToFieldDescriptorProto(fields.Get(i))) - } - for i, exts := 0, message.Extensions(); i < exts.Len(); i++ { - p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) - } - for i, messages := 0, message.Messages(); i < messages.Len(); i++ { - p.NestedType = append(p.NestedType, ToDescriptorProto(messages.Get(i))) - } - for i, enums := 0, message.Enums(); i < enums.Len(); i++ { - p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i))) - } - for i, xranges := 0, message.ExtensionRanges(); i < xranges.Len(); i++ { - xrange := xranges.Get(i) - p.ExtensionRange = append(p.ExtensionRange, &descriptorpb.DescriptorProto_ExtensionRange{ - Start: proto.Int32(int32(xrange[0])), - End: proto.Int32(int32(xrange[1])), - Options: proto.Clone(message.ExtensionRangeOptions(i)).(*descriptorpb.ExtensionRangeOptions), - }) - } - for i, oneofs := 0, message.Oneofs(); i < oneofs.Len(); i++ { - p.OneofDecl = append(p.OneofDecl, ToOneofDescriptorProto(oneofs.Get(i))) - } - for i, ranges := 0, message.ReservedRanges(); i < ranges.Len(); i++ { - rrange := ranges.Get(i) - p.ReservedRange = append(p.ReservedRange, &descriptorpb.DescriptorProto_ReservedRange{ - Start: proto.Int32(int32(rrange[0])), - End: proto.Int32(int32(rrange[1])), - }) - } - for i, names := 0, message.ReservedNames(); i < names.Len(); i++ { - p.ReservedName = append(p.ReservedName, string(names.Get(i))) - } - return p -} - -// ToFieldDescriptorProto copies a protoreflect.FieldDescriptor into a -// google.protobuf.FieldDescriptorProto message. -func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto { - p := &descriptorpb.FieldDescriptorProto{ - Name: proto.String(string(field.Name())), - Number: proto.Int32(int32(field.Number())), - Label: descriptorpb.FieldDescriptorProto_Label(field.Cardinality()).Enum(), - Options: proto.Clone(field.Options()).(*descriptorpb.FieldOptions), - } - if field.IsExtension() { - p.Extendee = fullNameOf(field.ContainingMessage()) - } - if field.Kind().IsValid() { - p.Type = descriptorpb.FieldDescriptorProto_Type(field.Kind()).Enum() - } - if field.Enum() != nil { - p.TypeName = fullNameOf(field.Enum()) - } - if field.Message() != nil { - p.TypeName = fullNameOf(field.Message()) - } - if field.HasJSONName() { - // A bug in older versions of protoc would always populate the - // "json_name" option for extensions when it is meaningless. - // When it did so, it would always use the camel-cased field name. - if field.IsExtension() { - p.JsonName = proto.String(strs.JSONCamelCase(string(field.Name()))) - } else { - p.JsonName = proto.String(field.JSONName()) - } - } - if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() { - p.Proto3Optional = proto.Bool(true) - } - if field.HasDefault() { - def, err := defval.Marshal(field.Default(), field.DefaultEnumValue(), field.Kind(), defval.Descriptor) - if err != nil && field.DefaultEnumValue() != nil { - def = string(field.DefaultEnumValue().Name()) // occurs for unresolved enum values - } else if err != nil { - panic(fmt.Sprintf("%v: %v", field.FullName(), err)) - } - p.DefaultValue = proto.String(def) - } - if oneof := field.ContainingOneof(); oneof != nil { - p.OneofIndex = proto.Int32(int32(oneof.Index())) - } - return p -} - -// ToOneofDescriptorProto copies a protoreflect.OneofDescriptor into a -// google.protobuf.OneofDescriptorProto message. -func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto { - return &descriptorpb.OneofDescriptorProto{ - Name: proto.String(string(oneof.Name())), - Options: proto.Clone(oneof.Options()).(*descriptorpb.OneofOptions), - } -} - -// ToEnumDescriptorProto copies a protoreflect.EnumDescriptor into a -// google.protobuf.EnumDescriptorProto message. -func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto { - p := &descriptorpb.EnumDescriptorProto{ - Name: proto.String(string(enum.Name())), - Options: proto.Clone(enum.Options()).(*descriptorpb.EnumOptions), - } - for i, values := 0, enum.Values(); i < values.Len(); i++ { - p.Value = append(p.Value, ToEnumValueDescriptorProto(values.Get(i))) - } - for i, ranges := 0, enum.ReservedRanges(); i < ranges.Len(); i++ { - rrange := ranges.Get(i) - p.ReservedRange = append(p.ReservedRange, &descriptorpb.EnumDescriptorProto_EnumReservedRange{ - Start: proto.Int32(int32(rrange[0])), - End: proto.Int32(int32(rrange[1])), - }) - } - for i, names := 0, enum.ReservedNames(); i < names.Len(); i++ { - p.ReservedName = append(p.ReservedName, string(names.Get(i))) - } - return p -} - -// ToEnumValueDescriptorProto copies a protoreflect.EnumValueDescriptor into a -// google.protobuf.EnumValueDescriptorProto message. -func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto { - return &descriptorpb.EnumValueDescriptorProto{ - Name: proto.String(string(value.Name())), - Number: proto.Int32(int32(value.Number())), - Options: proto.Clone(value.Options()).(*descriptorpb.EnumValueOptions), - } -} - -// ToServiceDescriptorProto copies a protoreflect.ServiceDescriptor into a -// google.protobuf.ServiceDescriptorProto message. -func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto { - p := &descriptorpb.ServiceDescriptorProto{ - Name: proto.String(string(service.Name())), - Options: proto.Clone(service.Options()).(*descriptorpb.ServiceOptions), - } - for i, methods := 0, service.Methods(); i < methods.Len(); i++ { - p.Method = append(p.Method, ToMethodDescriptorProto(methods.Get(i))) - } - return p -} - -// ToMethodDescriptorProto copies a protoreflect.MethodDescriptor into a -// google.protobuf.MethodDescriptorProto message. -func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto { - p := &descriptorpb.MethodDescriptorProto{ - Name: proto.String(string(method.Name())), - InputType: fullNameOf(method.Input()), - OutputType: fullNameOf(method.Output()), - Options: proto.Clone(method.Options()).(*descriptorpb.MethodOptions), - } - if method.IsStreamingClient() { - p.ClientStreaming = proto.Bool(true) - } - if method.IsStreamingServer() { - p.ServerStreaming = proto.Bool(true) - } - return p -} - -func fullNameOf(d protoreflect.Descriptor) *string { - if d == nil { - return nil - } - if strings.HasPrefix(string(d.FullName()), unknownPrefix) { - return proto.String(string(d.FullName()[len(unknownPrefix):])) - } - return proto.String("." + string(d.FullName())) -} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go deleted file mode 100644 index d5d5af6eb..000000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package protoreflect - -import ( - "google.golang.org/protobuf/internal/pragma" -) - -// The following types are used by the fast-path Message.ProtoMethods method. -// -// To avoid polluting the public protoreflect API with types used only by -// low-level implementations, the canonical definitions of these types are -// in the runtime/protoiface package. The definitions here and in protoiface -// must be kept in sync. -type ( - methods = struct { - pragma.NoUnkeyedLiterals - Flags supportFlags - Size func(sizeInput) sizeOutput - Marshal func(marshalInput) (marshalOutput, error) - Unmarshal func(unmarshalInput) (unmarshalOutput, error) - Merge func(mergeInput) mergeOutput - CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error) - } - supportFlags = uint64 - sizeInput = struct { - pragma.NoUnkeyedLiterals - Message Message - Flags uint8 - } - sizeOutput = struct { - pragma.NoUnkeyedLiterals - Size int - } - marshalInput = struct { - pragma.NoUnkeyedLiterals - Message Message - Buf []byte - Flags uint8 - } - marshalOutput = struct { - pragma.NoUnkeyedLiterals - Buf []byte - } - unmarshalInput = struct { - pragma.NoUnkeyedLiterals - Message Message - Buf []byte - Flags uint8 - Resolver interface { - FindExtensionByName(field FullName) (ExtensionType, error) - FindExtensionByNumber(message FullName, field FieldNumber) (ExtensionType, error) - } - Depth int - } - unmarshalOutput = struct { - pragma.NoUnkeyedLiterals - Flags uint8 - } - mergeInput = struct { - pragma.NoUnkeyedLiterals - Source Message - Destination Message - } - mergeOutput = struct { - pragma.NoUnkeyedLiterals - Flags uint8 - } - checkInitializedInput = struct { - pragma.NoUnkeyedLiterals - Message Message - } - checkInitializedOutput = struct { - pragma.NoUnkeyedLiterals - } -) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go deleted file mode 100644 index dd85915bd..000000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ /dev/null @@ -1,504 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package protoreflect provides interfaces to dynamically manipulate messages. -// -// This package includes type descriptors which describe the structure of types -// defined in proto source files and value interfaces which provide the -// ability to examine and manipulate the contents of messages. -// -// -// Protocol Buffer Descriptors -// -// Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor) -// are immutable objects that represent protobuf type information. -// They are wrappers around the messages declared in descriptor.proto. -// Protobuf descriptors alone lack any information regarding Go types. -// -// Enums and messages generated by this module implement Enum and ProtoMessage, -// where the Descriptor and ProtoReflect.Descriptor accessors respectively -// return the protobuf descriptor for the values. -// -// The protobuf descriptor interfaces are not meant to be implemented by -// user code since they might need to be extended in the future to support -// additions to the protobuf language. -// The "google.golang.org/protobuf/reflect/protodesc" package converts between -// google.protobuf.DescriptorProto messages and protobuf descriptors. -// -// -// Go Type Descriptors -// -// A type descriptor (e.g., EnumType or MessageType) is a constructor for -// a concrete Go type that represents the associated protobuf descriptor. -// There is commonly a one-to-one relationship between protobuf descriptors and -// Go type descriptors, but it can potentially be a one-to-many relationship. -// -// Enums and messages generated by this module implement Enum and ProtoMessage, -// where the Type and ProtoReflect.Type accessors respectively -// return the protobuf descriptor for the values. -// -// The "google.golang.org/protobuf/types/dynamicpb" package can be used to -// create Go type descriptors from protobuf descriptors. -// -// -// Value Interfaces -// -// The Enum and Message interfaces provide a reflective view over an -// enum or message instance. For enums, it provides the ability to retrieve -// the enum value number for any concrete enum type. For messages, it provides -// the ability to access or manipulate fields of the message. -// -// To convert a proto.Message to a protoreflect.Message, use the -// former's ProtoReflect method. Since the ProtoReflect method is new to the -// v2 message interface, it may not be present on older message implementations. -// The "github.com/golang/protobuf/proto".MessageReflect function can be used -// to obtain a reflective view on older messages. -// -// -// Relationships -// -// The following diagrams demonstrate the relationships between -// various types declared in this package. -// -// -// ┌───────────────────────────────────┐ -// V │ -// ┌────────────── New(n) ─────────────┐ │ -// │ │ │ -// │ ┌──── Descriptor() ──┐ │ ┌── Number() ──┐ │ -// │ │ V V │ V │ -// ╔════════════╗ ╔════════════════╗ ╔════════╗ ╔════════════╗ -// ║ EnumType ║ ║ EnumDescriptor ║ ║ Enum ║ ║ EnumNumber ║ -// ╚════════════╝ ╚════════════════╝ ╚════════╝ ╚════════════╝ -// Λ Λ │ │ -// │ └─── Descriptor() ──┘ │ -// │ │ -// └────────────────── Type() ───────┘ -// -// • An EnumType describes a concrete Go enum type. -// It has an EnumDescriptor and can construct an Enum instance. -// -// • An EnumDescriptor describes an abstract protobuf enum type. -// -// • An Enum is a concrete enum instance. Generated enums implement Enum. -// -// -// ┌──────────────── New() ─────────────────┐ -// │ │ -// │ ┌─── Descriptor() ─────┐ │ ┌── Interface() ───┐ -// │ │ V V │ V -// ╔═════════════╗ ╔═══════════════════╗ ╔═════════╗ ╔══════════════╗ -// ║ MessageType ║ ║ MessageDescriptor ║ ║ Message ║ ║ ProtoMessage ║ -// ╚═════════════╝ ╚═══════════════════╝ ╚═════════╝ ╚══════════════╝ -// Λ Λ │ │ Λ │ -// │ └──── Descriptor() ────┘ │ └─ ProtoReflect() ─┘ -// │ │ -// └─────────────────── Type() ─────────┘ -// -// • A MessageType describes a concrete Go message type. -// It has a MessageDescriptor and can construct a Message instance. -// -// • A MessageDescriptor describes an abstract protobuf message type. -// -// • A Message is a concrete message instance. Generated messages implement -// ProtoMessage, which can convert to/from a Message. -// -// -// ┌── TypeDescriptor() ──┐ ┌───── Descriptor() ─────┐ -// │ V │ V -// ╔═══════════════╗ ╔═════════════════════════╗ ╔═════════════════════╗ -// ║ ExtensionType ║ ║ ExtensionTypeDescriptor ║ ║ ExtensionDescriptor ║ -// ╚═══════════════╝ ╚═════════════════════════╝ ╚═════════════════════╝ -// Λ │ │ Λ │ Λ -// └─────── Type() ───────┘ │ └─── may implement ────┘ │ -// │ │ -// └────── implements ────────┘ -// -// • An ExtensionType describes a concrete Go implementation of an extension. -// It has an ExtensionTypeDescriptor and can convert to/from -// abstract Values and Go values. -// -// • An ExtensionTypeDescriptor is an ExtensionDescriptor -// which also has an ExtensionType. -// -// • An ExtensionDescriptor describes an abstract protobuf extension field and -// may not always be an ExtensionTypeDescriptor. -package protoreflect - -import ( - "fmt" - "strings" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/pragma" -) - -type doNotImplement pragma.DoNotImplement - -// ProtoMessage is the top-level interface that all proto messages implement. -// This is declared in the protoreflect package to avoid a cyclic dependency; -// use the proto.Message type instead, which aliases this type. -type ProtoMessage interface{ ProtoReflect() Message } - -// Syntax is the language version of the proto file. -type Syntax syntax - -type syntax int8 // keep exact type opaque as the int type may change - -const ( - Proto2 Syntax = 2 - Proto3 Syntax = 3 -) - -// IsValid reports whether the syntax is valid. -func (s Syntax) IsValid() bool { - switch s { - case Proto2, Proto3: - return true - default: - return false - } -} - -// String returns s as a proto source identifier (e.g., "proto2"). -func (s Syntax) String() string { - switch s { - case Proto2: - return "proto2" - case Proto3: - return "proto3" - default: - return fmt.Sprintf("", s) - } -} - -// GoString returns s as a Go source identifier (e.g., "Proto2"). -func (s Syntax) GoString() string { - switch s { - case Proto2: - return "Proto2" - case Proto3: - return "Proto3" - default: - return fmt.Sprintf("Syntax(%d)", s) - } -} - -// Cardinality determines whether a field is optional, required, or repeated. -type Cardinality cardinality - -type cardinality int8 // keep exact type opaque as the int type may change - -// Constants as defined by the google.protobuf.Cardinality enumeration. -const ( - Optional Cardinality = 1 // appears zero or one times - Required Cardinality = 2 // appears exactly one time; invalid with Proto3 - Repeated Cardinality = 3 // appears zero or more times -) - -// IsValid reports whether the cardinality is valid. -func (c Cardinality) IsValid() bool { - switch c { - case Optional, Required, Repeated: - return true - default: - return false - } -} - -// String returns c as a proto source identifier (e.g., "optional"). -func (c Cardinality) String() string { - switch c { - case Optional: - return "optional" - case Required: - return "required" - case Repeated: - return "repeated" - default: - return fmt.Sprintf("", c) - } -} - -// GoString returns c as a Go source identifier (e.g., "Optional"). -func (c Cardinality) GoString() string { - switch c { - case Optional: - return "Optional" - case Required: - return "Required" - case Repeated: - return "Repeated" - default: - return fmt.Sprintf("Cardinality(%d)", c) - } -} - -// Kind indicates the basic proto kind of a field. -type Kind kind - -type kind int8 // keep exact type opaque as the int type may change - -// Constants as defined by the google.protobuf.Field.Kind enumeration. -const ( - BoolKind Kind = 8 - EnumKind Kind = 14 - Int32Kind Kind = 5 - Sint32Kind Kind = 17 - Uint32Kind Kind = 13 - Int64Kind Kind = 3 - Sint64Kind Kind = 18 - Uint64Kind Kind = 4 - Sfixed32Kind Kind = 15 - Fixed32Kind Kind = 7 - FloatKind Kind = 2 - Sfixed64Kind Kind = 16 - Fixed64Kind Kind = 6 - DoubleKind Kind = 1 - StringKind Kind = 9 - BytesKind Kind = 12 - MessageKind Kind = 11 - GroupKind Kind = 10 -) - -// IsValid reports whether the kind is valid. -func (k Kind) IsValid() bool { - switch k { - case BoolKind, EnumKind, - Int32Kind, Sint32Kind, Uint32Kind, - Int64Kind, Sint64Kind, Uint64Kind, - Sfixed32Kind, Fixed32Kind, FloatKind, - Sfixed64Kind, Fixed64Kind, DoubleKind, - StringKind, BytesKind, MessageKind, GroupKind: - return true - default: - return false - } -} - -// String returns k as a proto source identifier (e.g., "bool"). -func (k Kind) String() string { - switch k { - case BoolKind: - return "bool" - case EnumKind: - return "enum" - case Int32Kind: - return "int32" - case Sint32Kind: - return "sint32" - case Uint32Kind: - return "uint32" - case Int64Kind: - return "int64" - case Sint64Kind: - return "sint64" - case Uint64Kind: - return "uint64" - case Sfixed32Kind: - return "sfixed32" - case Fixed32Kind: - return "fixed32" - case FloatKind: - return "float" - case Sfixed64Kind: - return "sfixed64" - case Fixed64Kind: - return "fixed64" - case DoubleKind: - return "double" - case StringKind: - return "string" - case BytesKind: - return "bytes" - case MessageKind: - return "message" - case GroupKind: - return "group" - default: - return fmt.Sprintf("", k) - } -} - -// GoString returns k as a Go source identifier (e.g., "BoolKind"). -func (k Kind) GoString() string { - switch k { - case BoolKind: - return "BoolKind" - case EnumKind: - return "EnumKind" - case Int32Kind: - return "Int32Kind" - case Sint32Kind: - return "Sint32Kind" - case Uint32Kind: - return "Uint32Kind" - case Int64Kind: - return "Int64Kind" - case Sint64Kind: - return "Sint64Kind" - case Uint64Kind: - return "Uint64Kind" - case Sfixed32Kind: - return "Sfixed32Kind" - case Fixed32Kind: - return "Fixed32Kind" - case FloatKind: - return "FloatKind" - case Sfixed64Kind: - return "Sfixed64Kind" - case Fixed64Kind: - return "Fixed64Kind" - case DoubleKind: - return "DoubleKind" - case StringKind: - return "StringKind" - case BytesKind: - return "BytesKind" - case MessageKind: - return "MessageKind" - case GroupKind: - return "GroupKind" - default: - return fmt.Sprintf("Kind(%d)", k) - } -} - -// FieldNumber is the field number in a message. -type FieldNumber = protowire.Number - -// FieldNumbers represent a list of field numbers. -type FieldNumbers interface { - // Len reports the number of fields in the list. - Len() int - // Get returns the ith field number. It panics if out of bounds. - Get(i int) FieldNumber - // Has reports whether n is within the list of fields. - Has(n FieldNumber) bool - - doNotImplement -} - -// FieldRanges represent a list of field number ranges. -type FieldRanges interface { - // Len reports the number of ranges in the list. - Len() int - // Get returns the ith range. It panics if out of bounds. - Get(i int) [2]FieldNumber // start inclusive; end exclusive - // Has reports whether n is within any of the ranges. - Has(n FieldNumber) bool - - doNotImplement -} - -// EnumNumber is the numeric value for an enum. -type EnumNumber int32 - -// EnumRanges represent a list of enum number ranges. -type EnumRanges interface { - // Len reports the number of ranges in the list. - Len() int - // Get returns the ith range. It panics if out of bounds. - Get(i int) [2]EnumNumber // start inclusive; end inclusive - // Has reports whether n is within any of the ranges. - Has(n EnumNumber) bool - - doNotImplement -} - -// Name is the short name for a proto declaration. This is not the name -// as used in Go source code, which might not be identical to the proto name. -type Name string // e.g., "Kind" - -// IsValid reports whether s is a syntactically valid name. -// An empty name is invalid. -func (s Name) IsValid() bool { - return consumeIdent(string(s)) == len(s) -} - -// Names represent a list of names. -type Names interface { - // Len reports the number of names in the list. - Len() int - // Get returns the ith name. It panics if out of bounds. - Get(i int) Name - // Has reports whether s matches any names in the list. - Has(s Name) bool - - doNotImplement -} - -// FullName is a qualified name that uniquely identifies a proto declaration. -// A qualified name is the concatenation of the proto package along with the -// fully-declared name (i.e., name of parent preceding the name of the child), -// with a '.' delimiter placed between each Name. -// -// This should not have any leading or trailing dots. -type FullName string // e.g., "google.protobuf.Field.Kind" - -// IsValid reports whether s is a syntactically valid full name. -// An empty full name is invalid. -func (s FullName) IsValid() bool { - i := consumeIdent(string(s)) - if i < 0 { - return false - } - for len(s) > i { - if s[i] != '.' { - return false - } - i++ - n := consumeIdent(string(s[i:])) - if n < 0 { - return false - } - i += n - } - return true -} - -func consumeIdent(s string) (i int) { - if len(s) == 0 || !isLetter(s[i]) { - return -1 - } - i++ - for len(s) > i && isLetterDigit(s[i]) { - i++ - } - return i -} -func isLetter(c byte) bool { - return c == '_' || ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') -} -func isLetterDigit(c byte) bool { - return isLetter(c) || ('0' <= c && c <= '9') -} - -// Name returns the short name, which is the last identifier segment. -// A single segment FullName is the Name itself. -func (n FullName) Name() Name { - if i := strings.LastIndexByte(string(n), '.'); i >= 0 { - return Name(n[i+1:]) - } - return Name(n) -} - -// Parent returns the full name with the trailing identifier removed. -// A single segment FullName has no parent. -func (n FullName) Parent() FullName { - if i := strings.LastIndexByte(string(n), '.'); i >= 0 { - return n[:i] - } - return "" -} - -// Append returns the qualified name appended with the provided short name. -// -// Invariant: n == n.Parent().Append(n.Name()) // assuming n is valid -func (n FullName) Append(s Name) FullName { - if n == "" { - return FullName(s) - } - return n + "." + FullName(s) -} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go deleted file mode 100644 index 121ba3a07..000000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package protoreflect - -import ( - "strconv" -) - -// SourceLocations is a list of source locations. -type SourceLocations interface { - // Len reports the number of source locations in the proto file. - Len() int - // Get returns the ith SourceLocation. It panics if out of bounds. - Get(int) SourceLocation - - // ByPath returns the SourceLocation for the given path, - // returning the first location if multiple exist for the same path. - // If multiple locations exist for the same path, - // then SourceLocation.Next index can be used to identify the - // index of the next SourceLocation. - // If no location exists for this path, it returns the zero value. - ByPath(path SourcePath) SourceLocation - - // ByDescriptor returns the SourceLocation for the given descriptor, - // returning the first location if multiple exist for the same path. - // If no location exists for this descriptor, it returns the zero value. - ByDescriptor(desc Descriptor) SourceLocation - - doNotImplement -} - -// SourceLocation describes a source location and -// corresponds with the google.protobuf.SourceCodeInfo.Location message. -type SourceLocation struct { - // Path is the path to the declaration from the root file descriptor. - // The contents of this slice must not be mutated. - Path SourcePath - - // StartLine and StartColumn are the zero-indexed starting location - // in the source file for the declaration. - StartLine, StartColumn int - // EndLine and EndColumn are the zero-indexed ending location - // in the source file for the declaration. - // In the descriptor.proto, the end line may be omitted if it is identical - // to the start line. Here, it is always populated. - EndLine, EndColumn int - - // LeadingDetachedComments are the leading detached comments - // for the declaration. The contents of this slice must not be mutated. - LeadingDetachedComments []string - // LeadingComments is the leading attached comment for the declaration. - LeadingComments string - // TrailingComments is the trailing attached comment for the declaration. - TrailingComments string - - // Next is an index into SourceLocations for the next source location that - // has the same Path. It is zero if there is no next location. - Next int -} - -// SourcePath identifies part of a file descriptor for a source location. -// The SourcePath is a sequence of either field numbers or indexes into -// a repeated field that form a path starting from the root file descriptor. -// -// See google.protobuf.SourceCodeInfo.Location.path. -type SourcePath []int32 - -// Equal reports whether p1 equals p2. -func (p1 SourcePath) Equal(p2 SourcePath) bool { - if len(p1) != len(p2) { - return false - } - for i := range p1 { - if p1[i] != p2[i] { - return false - } - } - return true -} - -// String formats the path in a humanly readable manner. -// The output is guaranteed to be deterministic, -// making it suitable for use as a key into a Go map. -// It is not guaranteed to be stable as the exact output could change -// in a future version of this module. -// -// Example output: -// .message_type[6].nested_type[15].field[3] -func (p SourcePath) String() string { - b := p.appendFileDescriptorProto(nil) - for _, i := range p { - b = append(b, '.') - b = strconv.AppendInt(b, int64(i), 10) - } - return string(b) -} - -type appendFunc func(*SourcePath, []byte) []byte - -func (p *SourcePath) appendSingularField(b []byte, name string, f appendFunc) []byte { - if len(*p) == 0 { - return b - } - b = append(b, '.') - b = append(b, name...) - *p = (*p)[1:] - if f != nil { - b = f(p, b) - } - return b -} - -func (p *SourcePath) appendRepeatedField(b []byte, name string, f appendFunc) []byte { - b = p.appendSingularField(b, name, nil) - if len(*p) == 0 || (*p)[0] < 0 { - return b - } - b = append(b, '[') - b = strconv.AppendUint(b, uint64((*p)[0]), 10) - b = append(b, ']') - *p = (*p)[1:] - if f != nil { - b = f(p, b) - } - return b -} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go deleted file mode 100644 index b03c1223c..000000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ /dev/null @@ -1,461 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by generate-protos. DO NOT EDIT. - -package protoreflect - -func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 1: - b = p.appendSingularField(b, "name", nil) - case 2: - b = p.appendSingularField(b, "package", nil) - case 3: - b = p.appendRepeatedField(b, "dependency", nil) - case 10: - b = p.appendRepeatedField(b, "public_dependency", nil) - case 11: - b = p.appendRepeatedField(b, "weak_dependency", nil) - case 4: - b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto) - case 5: - b = p.appendRepeatedField(b, "enum_type", (*SourcePath).appendEnumDescriptorProto) - case 6: - b = p.appendRepeatedField(b, "service", (*SourcePath).appendServiceDescriptorProto) - case 7: - b = p.appendRepeatedField(b, "extension", (*SourcePath).appendFieldDescriptorProto) - case 8: - b = p.appendSingularField(b, "options", (*SourcePath).appendFileOptions) - case 9: - b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo) - case 12: - b = p.appendSingularField(b, "syntax", nil) - } - return b -} - -func (p *SourcePath) appendDescriptorProto(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 1: - b = p.appendSingularField(b, "name", nil) - case 2: - b = p.appendRepeatedField(b, "field", (*SourcePath).appendFieldDescriptorProto) - case 6: - b = p.appendRepeatedField(b, "extension", (*SourcePath).appendFieldDescriptorProto) - case 3: - b = p.appendRepeatedField(b, "nested_type", (*SourcePath).appendDescriptorProto) - case 4: - b = p.appendRepeatedField(b, "enum_type", (*SourcePath).appendEnumDescriptorProto) - case 5: - b = p.appendRepeatedField(b, "extension_range", (*SourcePath).appendDescriptorProto_ExtensionRange) - case 8: - b = p.appendRepeatedField(b, "oneof_decl", (*SourcePath).appendOneofDescriptorProto) - case 7: - b = p.appendSingularField(b, "options", (*SourcePath).appendMessageOptions) - case 9: - b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange) - case 10: - b = p.appendRepeatedField(b, "reserved_name", nil) - } - return b -} - -func (p *SourcePath) appendEnumDescriptorProto(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 1: - b = p.appendSingularField(b, "name", nil) - case 2: - b = p.appendRepeatedField(b, "value", (*SourcePath).appendEnumValueDescriptorProto) - case 3: - b = p.appendSingularField(b, "options", (*SourcePath).appendEnumOptions) - case 4: - b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange) - case 5: - b = p.appendRepeatedField(b, "reserved_name", nil) - } - return b -} - -func (p *SourcePath) appendServiceDescriptorProto(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 1: - b = p.appendSingularField(b, "name", nil) - case 2: - b = p.appendRepeatedField(b, "method", (*SourcePath).appendMethodDescriptorProto) - case 3: - b = p.appendSingularField(b, "options", (*SourcePath).appendServiceOptions) - } - return b -} - -func (p *SourcePath) appendFieldDescriptorProto(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 1: - b = p.appendSingularField(b, "name", nil) - case 3: - b = p.appendSingularField(b, "number", nil) - case 4: - b = p.appendSingularField(b, "label", nil) - case 5: - b = p.appendSingularField(b, "type", nil) - case 6: - b = p.appendSingularField(b, "type_name", nil) - case 2: - b = p.appendSingularField(b, "extendee", nil) - case 7: - b = p.appendSingularField(b, "default_value", nil) - case 9: - b = p.appendSingularField(b, "oneof_index", nil) - case 10: - b = p.appendSingularField(b, "json_name", nil) - case 8: - b = p.appendSingularField(b, "options", (*SourcePath).appendFieldOptions) - case 17: - b = p.appendSingularField(b, "proto3_optional", nil) - } - return b -} - -func (p *SourcePath) appendFileOptions(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 1: - b = p.appendSingularField(b, "java_package", nil) - case 8: - b = p.appendSingularField(b, "java_outer_classname", nil) - case 10: - b = p.appendSingularField(b, "java_multiple_files", nil) - case 20: - b = p.appendSingularField(b, "java_generate_equals_and_hash", nil) - case 27: - b = p.appendSingularField(b, "java_string_check_utf8", nil) - case 9: - b = p.appendSingularField(b, "optimize_for", nil) - case 11: - b = p.appendSingularField(b, "go_package", nil) - case 16: - b = p.appendSingularField(b, "cc_generic_services", nil) - case 17: - b = p.appendSingularField(b, "java_generic_services", nil) - case 18: - b = p.appendSingularField(b, "py_generic_services", nil) - case 42: - b = p.appendSingularField(b, "php_generic_services", nil) - case 23: - b = p.appendSingularField(b, "deprecated", nil) - case 31: - b = p.appendSingularField(b, "cc_enable_arenas", nil) - case 36: - b = p.appendSingularField(b, "objc_class_prefix", nil) - case 37: - b = p.appendSingularField(b, "csharp_namespace", nil) - case 39: - b = p.appendSingularField(b, "swift_prefix", nil) - case 40: - b = p.appendSingularField(b, "php_class_prefix", nil) - case 41: - b = p.appendSingularField(b, "php_namespace", nil) - case 44: - b = p.appendSingularField(b, "php_metadata_namespace", nil) - case 45: - b = p.appendSingularField(b, "ruby_package", nil) - case 999: - b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) - } - return b -} - -func (p *SourcePath) appendSourceCodeInfo(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 1: - b = p.appendRepeatedField(b, "location", (*SourcePath).appendSourceCodeInfo_Location) - } - return b -} - -func (p *SourcePath) appendDescriptorProto_ExtensionRange(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 1: - b = p.appendSingularField(b, "start", nil) - case 2: - b = p.appendSingularField(b, "end", nil) - case 3: - b = p.appendSingularField(b, "options", (*SourcePath).appendExtensionRangeOptions) - } - return b -} - -func (p *SourcePath) appendOneofDescriptorProto(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 1: - b = p.appendSingularField(b, "name", nil) - case 2: - b = p.appendSingularField(b, "options", (*SourcePath).appendOneofOptions) - } - return b -} - -func (p *SourcePath) appendMessageOptions(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 1: - b = p.appendSingularField(b, "message_set_wire_format", nil) - case 2: - b = p.appendSingularField(b, "no_standard_descriptor_accessor", nil) - case 3: - b = p.appendSingularField(b, "deprecated", nil) - case 7: - b = p.appendSingularField(b, "map_entry", nil) - case 999: - b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) - } - return b -} - -func (p *SourcePath) appendDescriptorProto_ReservedRange(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 1: - b = p.appendSingularField(b, "start", nil) - case 2: - b = p.appendSingularField(b, "end", nil) - } - return b -} - -func (p *SourcePath) appendEnumValueDescriptorProto(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 1: - b = p.appendSingularField(b, "name", nil) - case 2: - b = p.appendSingularField(b, "number", nil) - case 3: - b = p.appendSingularField(b, "options", (*SourcePath).appendEnumValueOptions) - } - return b -} - -func (p *SourcePath) appendEnumOptions(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 2: - b = p.appendSingularField(b, "allow_alias", nil) - case 3: - b = p.appendSingularField(b, "deprecated", nil) - case 999: - b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) - } - return b -} - -func (p *SourcePath) appendEnumDescriptorProto_EnumReservedRange(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 1: - b = p.appendSingularField(b, "start", nil) - case 2: - b = p.appendSingularField(b, "end", nil) - } - return b -} - -func (p *SourcePath) appendMethodDescriptorProto(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 1: - b = p.appendSingularField(b, "name", nil) - case 2: - b = p.appendSingularField(b, "input_type", nil) - case 3: - b = p.appendSingularField(b, "output_type", nil) - case 4: - b = p.appendSingularField(b, "options", (*SourcePath).appendMethodOptions) - case 5: - b = p.appendSingularField(b, "client_streaming", nil) - case 6: - b = p.appendSingularField(b, "server_streaming", nil) - } - return b -} - -func (p *SourcePath) appendServiceOptions(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 33: - b = p.appendSingularField(b, "deprecated", nil) - case 999: - b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) - } - return b -} - -func (p *SourcePath) appendFieldOptions(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 1: - b = p.appendSingularField(b, "ctype", nil) - case 2: - b = p.appendSingularField(b, "packed", nil) - case 6: - b = p.appendSingularField(b, "jstype", nil) - case 5: - b = p.appendSingularField(b, "lazy", nil) - case 3: - b = p.appendSingularField(b, "deprecated", nil) - case 10: - b = p.appendSingularField(b, "weak", nil) - case 999: - b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) - } - return b -} - -func (p *SourcePath) appendUninterpretedOption(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 2: - b = p.appendRepeatedField(b, "name", (*SourcePath).appendUninterpretedOption_NamePart) - case 3: - b = p.appendSingularField(b, "identifier_value", nil) - case 4: - b = p.appendSingularField(b, "positive_int_value", nil) - case 5: - b = p.appendSingularField(b, "negative_int_value", nil) - case 6: - b = p.appendSingularField(b, "double_value", nil) - case 7: - b = p.appendSingularField(b, "string_value", nil) - case 8: - b = p.appendSingularField(b, "aggregate_value", nil) - } - return b -} - -func (p *SourcePath) appendSourceCodeInfo_Location(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 1: - b = p.appendRepeatedField(b, "path", nil) - case 2: - b = p.appendRepeatedField(b, "span", nil) - case 3: - b = p.appendSingularField(b, "leading_comments", nil) - case 4: - b = p.appendSingularField(b, "trailing_comments", nil) - case 6: - b = p.appendRepeatedField(b, "leading_detached_comments", nil) - } - return b -} - -func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 999: - b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) - } - return b -} - -func (p *SourcePath) appendOneofOptions(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 999: - b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) - } - return b -} - -func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 1: - b = p.appendSingularField(b, "deprecated", nil) - case 999: - b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) - } - return b -} - -func (p *SourcePath) appendMethodOptions(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 33: - b = p.appendSingularField(b, "deprecated", nil) - case 34: - b = p.appendSingularField(b, "idempotency_level", nil) - case 999: - b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) - } - return b -} - -func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { - if len(*p) == 0 { - return b - } - switch (*p)[0] { - case 1: - b = p.appendSingularField(b, "name_part", nil) - case 2: - b = p.appendSingularField(b, "is_extension", nil) - } - return b -} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go deleted file mode 100644 index 8e53c44a9..000000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ /dev/null @@ -1,665 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package protoreflect - -// Descriptor provides a set of accessors that are common to every descriptor. -// Each descriptor type wraps the equivalent google.protobuf.XXXDescriptorProto, -// but provides efficient lookup and immutability. -// -// Each descriptor is comparable. Equality implies that the two types are -// exactly identical. However, it is possible for the same semantically -// identical proto type to be represented by multiple type descriptors. -// -// For example, suppose we have t1 and t2 which are both MessageDescriptors. -// If t1 == t2, then the types are definitely equal and all accessors return -// the same information. However, if t1 != t2, then it is still possible that -// they still represent the same proto type (e.g., t1.FullName == t2.FullName). -// This can occur if a descriptor type is created dynamically, or multiple -// versions of the same proto type are accidentally linked into the Go binary. -type Descriptor interface { - // ParentFile returns the parent file descriptor that this descriptor - // is declared within. The parent file for the file descriptor is itself. - // - // Support for this functionality is optional and may return nil. - ParentFile() FileDescriptor - - // Parent returns the parent containing this descriptor declaration. - // The following shows the mapping from child type to possible parent types: - // - // ╔═════════════════════╤═══════════════════════════════════╗ - // ║ Child type │ Possible parent types ║ - // ╠═════════════════════╪═══════════════════════════════════╣ - // ║ FileDescriptor │ nil ║ - // ║ MessageDescriptor │ FileDescriptor, MessageDescriptor ║ - // ║ FieldDescriptor │ FileDescriptor, MessageDescriptor ║ - // ║ OneofDescriptor │ MessageDescriptor ║ - // ║ EnumDescriptor │ FileDescriptor, MessageDescriptor ║ - // ║ EnumValueDescriptor │ EnumDescriptor ║ - // ║ ServiceDescriptor │ FileDescriptor ║ - // ║ MethodDescriptor │ ServiceDescriptor ║ - // ╚═════════════════════╧═══════════════════════════════════╝ - // - // Support for this functionality is optional and may return nil. - Parent() Descriptor - - // Index returns the index of this descriptor within its parent. - // It returns 0 if the descriptor does not have a parent or if the parent - // is unknown. - Index() int - - // Syntax is the protobuf syntax. - Syntax() Syntax // e.g., Proto2 or Proto3 - - // Name is the short name of the declaration (i.e., FullName.Name). - Name() Name // e.g., "Any" - - // FullName is the fully-qualified name of the declaration. - // - // The FullName is a concatenation of the full name of the type that this - // type is declared within and the declaration name. For example, - // field "foo_field" in message "proto.package.MyMessage" is - // uniquely identified as "proto.package.MyMessage.foo_field". - // Enum values are an exception to the rule (see EnumValueDescriptor). - FullName() FullName // e.g., "google.protobuf.Any" - - // IsPlaceholder reports whether type information is missing since a - // dependency is not resolved, in which case only name information is known. - // - // Placeholder types may only be returned by the following accessors - // as a result of unresolved dependencies or weak imports: - // - // ╔═══════════════════════════════════╤═════════════════════╗ - // ║ Accessor │ Descriptor ║ - // ╠═══════════════════════════════════╪═════════════════════╣ - // ║ FileImports.FileDescriptor │ FileDescriptor ║ - // ║ FieldDescriptor.Enum │ EnumDescriptor ║ - // ║ FieldDescriptor.Message │ MessageDescriptor ║ - // ║ FieldDescriptor.DefaultEnumValue │ EnumValueDescriptor ║ - // ║ FieldDescriptor.ContainingMessage │ MessageDescriptor ║ - // ║ MethodDescriptor.Input │ MessageDescriptor ║ - // ║ MethodDescriptor.Output │ MessageDescriptor ║ - // ╚═══════════════════════════════════╧═════════════════════╝ - // - // If true, only Name and FullName are valid. - // For FileDescriptor, the Path is also valid. - IsPlaceholder() bool - - // Options returns the descriptor options. The caller must not modify - // the returned value. - // - // To avoid a dependency cycle, this function returns a proto.Message value. - // The proto message type returned for each descriptor type is as follows: - // ╔═════════════════════╤══════════════════════════════════════════╗ - // ║ Go type │ Protobuf message type ║ - // ╠═════════════════════╪══════════════════════════════════════════╣ - // ║ FileDescriptor │ google.protobuf.FileOptions ║ - // ║ EnumDescriptor │ google.protobuf.EnumOptions ║ - // ║ EnumValueDescriptor │ google.protobuf.EnumValueOptions ║ - // ║ MessageDescriptor │ google.protobuf.MessageOptions ║ - // ║ FieldDescriptor │ google.protobuf.FieldOptions ║ - // ║ OneofDescriptor │ google.protobuf.OneofOptions ║ - // ║ ServiceDescriptor │ google.protobuf.ServiceOptions ║ - // ║ MethodDescriptor │ google.protobuf.MethodOptions ║ - // ╚═════════════════════╧══════════════════════════════════════════╝ - // - // This method returns a typed nil-pointer if no options are present. - // The caller must import the descriptorpb package to use this. - Options() ProtoMessage - - doNotImplement -} - -// FileDescriptor describes the types in a complete proto file and -// corresponds with the google.protobuf.FileDescriptorProto message. -// -// Top-level declarations: -// EnumDescriptor, MessageDescriptor, FieldDescriptor, and/or ServiceDescriptor. -type FileDescriptor interface { - Descriptor // Descriptor.FullName is identical to Package - - // Path returns the file name, relative to the source tree root. - Path() string // e.g., "path/to/file.proto" - // Package returns the protobuf package namespace. - Package() FullName // e.g., "google.protobuf" - - // Imports is a list of imported proto files. - Imports() FileImports - - // Enums is a list of the top-level enum declarations. - Enums() EnumDescriptors - // Messages is a list of the top-level message declarations. - Messages() MessageDescriptors - // Extensions is a list of the top-level extension declarations. - Extensions() ExtensionDescriptors - // Services is a list of the top-level service declarations. - Services() ServiceDescriptors - - // SourceLocations is a list of source locations. - SourceLocations() SourceLocations - - isFileDescriptor -} -type isFileDescriptor interface{ ProtoType(FileDescriptor) } - -// FileImports is a list of file imports. -type FileImports interface { - // Len reports the number of files imported by this proto file. - Len() int - // Get returns the ith FileImport. It panics if out of bounds. - Get(i int) FileImport - - doNotImplement -} - -// FileImport is the declaration for a proto file import. -type FileImport struct { - // FileDescriptor is the file type for the given import. - // It is a placeholder descriptor if IsWeak is set or if a dependency has - // not been regenerated to implement the new reflection APIs. - FileDescriptor - - // IsPublic reports whether this is a public import, which causes this file - // to alias declarations within the imported file. The intended use cases - // for this feature is the ability to move proto files without breaking - // existing dependencies. - // - // The current file and the imported file must be within proto package. - IsPublic bool - - // IsWeak reports whether this is a weak import, which does not impose - // a direct dependency on the target file. - // - // Weak imports are a legacy proto1 feature. Equivalent behavior is - // achieved using proto2 extension fields or proto3 Any messages. - IsWeak bool -} - -// MessageDescriptor describes a message and -// corresponds with the google.protobuf.DescriptorProto message. -// -// Nested declarations: -// FieldDescriptor, OneofDescriptor, FieldDescriptor, EnumDescriptor, -// and/or MessageDescriptor. -type MessageDescriptor interface { - Descriptor - - // IsMapEntry indicates that this is an auto-generated message type to - // represent the entry type for a map field. - // - // Map entry messages have only two fields: - // • a "key" field with a field number of 1 - // • a "value" field with a field number of 2 - // The key and value types are determined by these two fields. - // - // If IsMapEntry is true, it implies that FieldDescriptor.IsMap is true - // for some field with this message type. - IsMapEntry() bool - - // Fields is a list of nested field declarations. - Fields() FieldDescriptors - // Oneofs is a list of nested oneof declarations. - Oneofs() OneofDescriptors - - // ReservedNames is a list of reserved field names. - ReservedNames() Names - // ReservedRanges is a list of reserved ranges of field numbers. - ReservedRanges() FieldRanges - // RequiredNumbers is a list of required field numbers. - // In Proto3, it is always an empty list. - RequiredNumbers() FieldNumbers - // ExtensionRanges is the field ranges used for extension fields. - // In Proto3, it is always an empty ranges. - ExtensionRanges() FieldRanges - // ExtensionRangeOptions returns the ith extension range options. - // - // To avoid a dependency cycle, this method returns a proto.Message value, - // which always contains a google.protobuf.ExtensionRangeOptions message. - // This method returns a typed nil-pointer if no options are present. - // The caller must import the descriptorpb package to use this. - ExtensionRangeOptions(i int) ProtoMessage - - // Enums is a list of nested enum declarations. - Enums() EnumDescriptors - // Messages is a list of nested message declarations. - Messages() MessageDescriptors - // Extensions is a list of nested extension declarations. - Extensions() ExtensionDescriptors - - isMessageDescriptor -} -type isMessageDescriptor interface{ ProtoType(MessageDescriptor) } - -// MessageType encapsulates a MessageDescriptor with a concrete Go implementation. -// It is recommended that implementations of this interface also implement the -// MessageFieldTypes interface. -type MessageType interface { - // New returns a newly allocated empty message. - // It may return nil for synthetic messages representing a map entry. - New() Message - - // Zero returns an empty, read-only message. - // It may return nil for synthetic messages representing a map entry. - Zero() Message - - // Descriptor returns the message descriptor. - // - // Invariant: t.Descriptor() == t.New().Descriptor() - Descriptor() MessageDescriptor -} - -// MessageFieldTypes extends a MessageType by providing type information -// regarding enums and messages referenced by the message fields. -type MessageFieldTypes interface { - MessageType - - // Enum returns the EnumType for the ith field in Descriptor.Fields. - // It returns nil if the ith field is not an enum kind. - // It panics if out of bounds. - // - // Invariant: mt.Enum(i).Descriptor() == mt.Descriptor().Fields(i).Enum() - Enum(i int) EnumType - - // Message returns the MessageType for the ith field in Descriptor.Fields. - // It returns nil if the ith field is not a message or group kind. - // It panics if out of bounds. - // - // Invariant: mt.Message(i).Descriptor() == mt.Descriptor().Fields(i).Message() - Message(i int) MessageType -} - -// MessageDescriptors is a list of message declarations. -type MessageDescriptors interface { - // Len reports the number of messages. - Len() int - // Get returns the ith MessageDescriptor. It panics if out of bounds. - Get(i int) MessageDescriptor - // ByName returns the MessageDescriptor for a message named s. - // It returns nil if not found. - ByName(s Name) MessageDescriptor - - doNotImplement -} - -// FieldDescriptor describes a field within a message and -// corresponds with the google.protobuf.FieldDescriptorProto message. -// -// It is used for both normal fields defined within the parent message -// (e.g., MessageDescriptor.Fields) and fields that extend some remote message -// (e.g., FileDescriptor.Extensions or MessageDescriptor.Extensions). -type FieldDescriptor interface { - Descriptor - - // Number reports the unique number for this field. - Number() FieldNumber - // Cardinality reports the cardinality for this field. - Cardinality() Cardinality - // Kind reports the basic kind for this field. - Kind() Kind - - // HasJSONName reports whether this field has an explicitly set JSON name. - HasJSONName() bool - - // JSONName reports the name used for JSON serialization. - // It is usually the camel-cased form of the field name. - // Extension fields are represented by the full name surrounded by brackets. - JSONName() string - - // TextName reports the name used for text serialization. - // It is usually the name of the field, except that groups use the name - // of the inlined message, and extension fields are represented by the - // full name surrounded by brackets. - TextName() string - - // HasPresence reports whether the field distinguishes between unpopulated - // and default values. - HasPresence() bool - - // IsExtension reports whether this is an extension field. If false, - // then Parent and ContainingMessage refer to the same message. - // Otherwise, ContainingMessage and Parent likely differ. - IsExtension() bool - - // HasOptionalKeyword reports whether the "optional" keyword was explicitly - // specified in the source .proto file. - HasOptionalKeyword() bool - - // IsWeak reports whether this is a weak field, which does not impose a - // direct dependency on the target type. - // If true, then Message returns a placeholder type. - IsWeak() bool - - // IsPacked reports whether repeated primitive numeric kinds should be - // serialized using a packed encoding. - // If true, then it implies Cardinality is Repeated. - IsPacked() bool - - // IsList reports whether this field represents a list, - // where the value type for the associated field is a List. - // It is equivalent to checking whether Cardinality is Repeated and - // that IsMap reports false. - IsList() bool - - // IsMap reports whether this field represents a map, - // where the value type for the associated field is a Map. - // It is equivalent to checking whether Cardinality is Repeated, - // that the Kind is MessageKind, and that Message.IsMapEntry reports true. - IsMap() bool - - // MapKey returns the field descriptor for the key in the map entry. - // It returns nil if IsMap reports false. - MapKey() FieldDescriptor - - // MapValue returns the field descriptor for the value in the map entry. - // It returns nil if IsMap reports false. - MapValue() FieldDescriptor - - // HasDefault reports whether this field has a default value. - HasDefault() bool - - // Default returns the default value for scalar fields. - // For proto2, it is the default value as specified in the proto file, - // or the zero value if unspecified. - // For proto3, it is always the zero value of the scalar. - // The Value type is determined by the Kind. - Default() Value - - // DefaultEnumValue returns the enum value descriptor for the default value - // of an enum field, and is nil for any other kind of field. - DefaultEnumValue() EnumValueDescriptor - - // ContainingOneof is the containing oneof that this field belongs to, - // and is nil if this field is not part of a oneof. - ContainingOneof() OneofDescriptor - - // ContainingMessage is the containing message that this field belongs to. - // For extension fields, this may not necessarily be the parent message - // that the field is declared within. - ContainingMessage() MessageDescriptor - - // Enum is the enum descriptor if Kind is EnumKind. - // It returns nil for any other Kind. - Enum() EnumDescriptor - - // Message is the message descriptor if Kind is - // MessageKind or GroupKind. It returns nil for any other Kind. - Message() MessageDescriptor - - isFieldDescriptor -} -type isFieldDescriptor interface{ ProtoType(FieldDescriptor) } - -// FieldDescriptors is a list of field declarations. -type FieldDescriptors interface { - // Len reports the number of fields. - Len() int - // Get returns the ith FieldDescriptor. It panics if out of bounds. - Get(i int) FieldDescriptor - // ByName returns the FieldDescriptor for a field named s. - // It returns nil if not found. - ByName(s Name) FieldDescriptor - // ByJSONName returns the FieldDescriptor for a field with s as the JSON name. - // It returns nil if not found. - ByJSONName(s string) FieldDescriptor - // ByTextName returns the FieldDescriptor for a field with s as the text name. - // It returns nil if not found. - ByTextName(s string) FieldDescriptor - // ByNumber returns the FieldDescriptor for a field numbered n. - // It returns nil if not found. - ByNumber(n FieldNumber) FieldDescriptor - - doNotImplement -} - -// OneofDescriptor describes a oneof field set within a given message and -// corresponds with the google.protobuf.OneofDescriptorProto message. -type OneofDescriptor interface { - Descriptor - - // IsSynthetic reports whether this is a synthetic oneof created to support - // proto3 optional semantics. If true, Fields contains exactly one field - // with HasOptionalKeyword specified. - IsSynthetic() bool - - // Fields is a list of fields belonging to this oneof. - Fields() FieldDescriptors - - isOneofDescriptor -} -type isOneofDescriptor interface{ ProtoType(OneofDescriptor) } - -// OneofDescriptors is a list of oneof declarations. -type OneofDescriptors interface { - // Len reports the number of oneof fields. - Len() int - // Get returns the ith OneofDescriptor. It panics if out of bounds. - Get(i int) OneofDescriptor - // ByName returns the OneofDescriptor for a oneof named s. - // It returns nil if not found. - ByName(s Name) OneofDescriptor - - doNotImplement -} - -// ExtensionDescriptor is an alias of FieldDescriptor for documentation. -type ExtensionDescriptor = FieldDescriptor - -// ExtensionTypeDescriptor is an ExtensionDescriptor with an associated ExtensionType. -type ExtensionTypeDescriptor interface { - ExtensionDescriptor - - // Type returns the associated ExtensionType. - Type() ExtensionType - - // Descriptor returns the plain ExtensionDescriptor without the - // associated ExtensionType. - Descriptor() ExtensionDescriptor -} - -// ExtensionDescriptors is a list of field declarations. -type ExtensionDescriptors interface { - // Len reports the number of fields. - Len() int - // Get returns the ith ExtensionDescriptor. It panics if out of bounds. - Get(i int) ExtensionDescriptor - // ByName returns the ExtensionDescriptor for a field named s. - // It returns nil if not found. - ByName(s Name) ExtensionDescriptor - - doNotImplement -} - -// ExtensionType encapsulates an ExtensionDescriptor with a concrete -// Go implementation. The nested field descriptor must be for a extension field. -// -// While a normal field is a member of the parent message that it is declared -// within (see Descriptor.Parent), an extension field is a member of some other -// target message (see ExtensionDescriptor.Extendee) and may have no -// relationship with the parent. However, the full name of an extension field is -// relative to the parent that it is declared within. -// -// For example: -// syntax = "proto2"; -// package example; -// message FooMessage { -// extensions 100 to max; -// } -// message BarMessage { -// extends FooMessage { optional BarMessage bar_field = 100; } -// } -// -// Field "bar_field" is an extension of FooMessage, but its full name is -// "example.BarMessage.bar_field" instead of "example.FooMessage.bar_field". -type ExtensionType interface { - // New returns a new value for the field. - // For scalars, this returns the default value in native Go form. - New() Value - - // Zero returns a new value for the field. - // For scalars, this returns the default value in native Go form. - // For composite types, this returns an empty, read-only message, list, or map. - Zero() Value - - // TypeDescriptor returns the extension type descriptor. - TypeDescriptor() ExtensionTypeDescriptor - - // ValueOf wraps the input and returns it as a Value. - // ValueOf panics if the input value is invalid or not the appropriate type. - // - // ValueOf is more extensive than protoreflect.ValueOf for a given field's - // value as it has more type information available. - ValueOf(interface{}) Value - - // InterfaceOf completely unwraps the Value to the underlying Go type. - // InterfaceOf panics if the input is nil or does not represent the - // appropriate underlying Go type. For composite types, it panics if the - // value is not mutable. - // - // InterfaceOf is able to unwrap the Value further than Value.Interface - // as it has more type information available. - InterfaceOf(Value) interface{} - - // IsValidValue reports whether the Value is valid to assign to the field. - IsValidValue(Value) bool - - // IsValidInterface reports whether the input is valid to assign to the field. - IsValidInterface(interface{}) bool -} - -// EnumDescriptor describes an enum and -// corresponds with the google.protobuf.EnumDescriptorProto message. -// -// Nested declarations: -// EnumValueDescriptor. -type EnumDescriptor interface { - Descriptor - - // Values is a list of nested enum value declarations. - Values() EnumValueDescriptors - - // ReservedNames is a list of reserved enum names. - ReservedNames() Names - // ReservedRanges is a list of reserved ranges of enum numbers. - ReservedRanges() EnumRanges - - isEnumDescriptor -} -type isEnumDescriptor interface{ ProtoType(EnumDescriptor) } - -// EnumType encapsulates an EnumDescriptor with a concrete Go implementation. -type EnumType interface { - // New returns an instance of this enum type with its value set to n. - New(n EnumNumber) Enum - - // Descriptor returns the enum descriptor. - // - // Invariant: t.Descriptor() == t.New(0).Descriptor() - Descriptor() EnumDescriptor -} - -// EnumDescriptors is a list of enum declarations. -type EnumDescriptors interface { - // Len reports the number of enum types. - Len() int - // Get returns the ith EnumDescriptor. It panics if out of bounds. - Get(i int) EnumDescriptor - // ByName returns the EnumDescriptor for an enum named s. - // It returns nil if not found. - ByName(s Name) EnumDescriptor - - doNotImplement -} - -// EnumValueDescriptor describes an enum value and -// corresponds with the google.protobuf.EnumValueDescriptorProto message. -// -// All other proto declarations are in the namespace of the parent. -// However, enum values do not follow this rule and are within the namespace -// of the parent's parent (i.e., they are a sibling of the containing enum). -// Thus, a value named "FOO_VALUE" declared within an enum uniquely identified -// as "proto.package.MyEnum" has a full name of "proto.package.FOO_VALUE". -type EnumValueDescriptor interface { - Descriptor - - // Number returns the enum value as an integer. - Number() EnumNumber - - isEnumValueDescriptor -} -type isEnumValueDescriptor interface{ ProtoType(EnumValueDescriptor) } - -// EnumValueDescriptors is a list of enum value declarations. -type EnumValueDescriptors interface { - // Len reports the number of enum values. - Len() int - // Get returns the ith EnumValueDescriptor. It panics if out of bounds. - Get(i int) EnumValueDescriptor - // ByName returns the EnumValueDescriptor for the enum value named s. - // It returns nil if not found. - ByName(s Name) EnumValueDescriptor - // ByNumber returns the EnumValueDescriptor for the enum value numbered n. - // If multiple have the same number, the first one defined is returned - // It returns nil if not found. - ByNumber(n EnumNumber) EnumValueDescriptor - - doNotImplement -} - -// ServiceDescriptor describes a service and -// corresponds with the google.protobuf.ServiceDescriptorProto message. -// -// Nested declarations: MethodDescriptor. -type ServiceDescriptor interface { - Descriptor - - // Methods is a list of nested message declarations. - Methods() MethodDescriptors - - isServiceDescriptor -} -type isServiceDescriptor interface{ ProtoType(ServiceDescriptor) } - -// ServiceDescriptors is a list of service declarations. -type ServiceDescriptors interface { - // Len reports the number of services. - Len() int - // Get returns the ith ServiceDescriptor. It panics if out of bounds. - Get(i int) ServiceDescriptor - // ByName returns the ServiceDescriptor for a service named s. - // It returns nil if not found. - ByName(s Name) ServiceDescriptor - - doNotImplement -} - -// MethodDescriptor describes a method and -// corresponds with the google.protobuf.MethodDescriptorProto message. -type MethodDescriptor interface { - Descriptor - - // Input is the input message descriptor. - Input() MessageDescriptor - // Output is the output message descriptor. - Output() MessageDescriptor - // IsStreamingClient reports whether the client streams multiple messages. - IsStreamingClient() bool - // IsStreamingServer reports whether the server streams multiple messages. - IsStreamingServer() bool - - isMethodDescriptor -} -type isMethodDescriptor interface{ ProtoType(MethodDescriptor) } - -// MethodDescriptors is a list of method declarations. -type MethodDescriptors interface { - // Len reports the number of methods. - Len() int - // Get returns the ith MethodDescriptor. It panics if out of bounds. - Get(i int) MethodDescriptor - // ByName returns the MethodDescriptor for a service method named s. - // It returns nil if not found. - ByName(s Name) MethodDescriptor - - doNotImplement -} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go deleted file mode 100644 index f31981077..000000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package protoreflect - -import "google.golang.org/protobuf/encoding/protowire" - -// Enum is a reflection interface for a concrete enum value, -// which provides type information and a getter for the enum number. -// Enum does not provide a mutable API since enums are commonly backed by -// Go constants, which are not addressable. -type Enum interface { - // Descriptor returns enum descriptor, which contains only the protobuf - // type information for the enum. - Descriptor() EnumDescriptor - - // Type returns the enum type, which encapsulates both Go and protobuf - // type information. If the Go type information is not needed, - // it is recommended that the enum descriptor be used instead. - Type() EnumType - - // Number returns the enum value as an integer. - Number() EnumNumber -} - -// Message is a reflective interface for a concrete message value, -// encapsulating both type and value information for the message. -// -// Accessor/mutators for individual fields are keyed by FieldDescriptor. -// For non-extension fields, the descriptor must exactly match the -// field known by the parent message. -// For extension fields, the descriptor must implement ExtensionTypeDescriptor, -// extend the parent message (i.e., have the same message FullName), and -// be within the parent's extension range. -// -// Each field Value can be a scalar or a composite type (Message, List, or Map). -// See Value for the Go types associated with a FieldDescriptor. -// Providing a Value that is invalid or of an incorrect type panics. -type Message interface { - // Descriptor returns message descriptor, which contains only the protobuf - // type information for the message. - Descriptor() MessageDescriptor - - // Type returns the message type, which encapsulates both Go and protobuf - // type information. If the Go type information is not needed, - // it is recommended that the message descriptor be used instead. - Type() MessageType - - // New returns a newly allocated and mutable empty message. - New() Message - - // Interface unwraps the message reflection interface and - // returns the underlying ProtoMessage interface. - Interface() ProtoMessage - - // Range iterates over every populated field in an undefined order, - // calling f for each field descriptor and value encountered. - // Range returns immediately if f returns false. - // While iterating, mutating operations may only be performed - // on the current field descriptor. - Range(f func(FieldDescriptor, Value) bool) - - // Has reports whether a field is populated. - // - // Some fields have the property of nullability where it is possible to - // distinguish between the default value of a field and whether the field - // was explicitly populated with the default value. Singular message fields, - // member fields of a oneof, and proto2 scalar fields are nullable. Such - // fields are populated only if explicitly set. - // - // In other cases (aside from the nullable cases above), - // a proto3 scalar field is populated if it contains a non-zero value, and - // a repeated field is populated if it is non-empty. - Has(FieldDescriptor) bool - - // Clear clears the field such that a subsequent Has call reports false. - // - // Clearing an extension field clears both the extension type and value - // associated with the given field number. - // - // Clear is a mutating operation and unsafe for concurrent use. - Clear(FieldDescriptor) - - // Get retrieves the value for a field. - // - // For unpopulated scalars, it returns the default value, where - // the default value of a bytes scalar is guaranteed to be a copy. - // For unpopulated composite types, it returns an empty, read-only view - // of the value; to obtain a mutable reference, use Mutable. - Get(FieldDescriptor) Value - - // Set stores the value for a field. - // - // For a field belonging to a oneof, it implicitly clears any other field - // that may be currently set within the same oneof. - // For extension fields, it implicitly stores the provided ExtensionType. - // When setting a composite type, it is unspecified whether the stored value - // aliases the source's memory in any way. If the composite value is an - // empty, read-only value, then it panics. - // - // Set is a mutating operation and unsafe for concurrent use. - Set(FieldDescriptor, Value) - - // Mutable returns a mutable reference to a composite type. - // - // If the field is unpopulated, it may allocate a composite value. - // For a field belonging to a oneof, it implicitly clears any other field - // that may be currently set within the same oneof. - // For extension fields, it implicitly stores the provided ExtensionType - // if not already stored. - // It panics if the field does not contain a composite type. - // - // Mutable is a mutating operation and unsafe for concurrent use. - Mutable(FieldDescriptor) Value - - // NewField returns a new value that is assignable to the field - // for the given descriptor. For scalars, this returns the default value. - // For lists, maps, and messages, this returns a new, empty, mutable value. - NewField(FieldDescriptor) Value - - // WhichOneof reports which field within the oneof is populated, - // returning nil if none are populated. - // It panics if the oneof descriptor does not belong to this message. - WhichOneof(OneofDescriptor) FieldDescriptor - - // GetUnknown retrieves the entire list of unknown fields. - // The caller may only mutate the contents of the RawFields - // if the mutated bytes are stored back into the message with SetUnknown. - GetUnknown() RawFields - - // SetUnknown stores an entire list of unknown fields. - // The raw fields must be syntactically valid according to the wire format. - // An implementation may panic if this is not the case. - // Once stored, the caller must not mutate the content of the RawFields. - // An empty RawFields may be passed to clear the fields. - // - // SetUnknown is a mutating operation and unsafe for concurrent use. - SetUnknown(RawFields) - - // IsValid reports whether the message is valid. - // - // An invalid message is an empty, read-only value. - // - // An invalid message often corresponds to a nil pointer of the concrete - // message type, but the details are implementation dependent. - // Validity is not part of the protobuf data model, and may not - // be preserved in marshaling or other operations. - IsValid() bool - - // ProtoMethods returns optional fast-path implementions of various operations. - // This method may return nil. - // - // The returned methods type is identical to - // "google.golang.org/protobuf/runtime/protoiface".Methods. - // Consult the protoiface package documentation for details. - ProtoMethods() *methods -} - -// RawFields is the raw bytes for an ordered sequence of fields. -// Each field contains both the tag (representing field number and wire type), -// and also the wire data itself. -type RawFields []byte - -// IsValid reports whether b is syntactically correct wire format. -func (b RawFields) IsValid() bool { - for len(b) > 0 { - _, _, n := protowire.ConsumeField(b) - if n < 0 { - return false - } - b = b[n:] - } - return true -} - -// List is a zero-indexed, ordered list. -// The element Value type is determined by FieldDescriptor.Kind. -// Providing a Value that is invalid or of an incorrect type panics. -type List interface { - // Len reports the number of entries in the List. - // Get, Set, and Truncate panic with out of bound indexes. - Len() int - - // Get retrieves the value at the given index. - // It never returns an invalid value. - Get(int) Value - - // Set stores a value for the given index. - // When setting a composite type, it is unspecified whether the set - // value aliases the source's memory in any way. - // - // Set is a mutating operation and unsafe for concurrent use. - Set(int, Value) - - // Append appends the provided value to the end of the list. - // When appending a composite type, it is unspecified whether the appended - // value aliases the source's memory in any way. - // - // Append is a mutating operation and unsafe for concurrent use. - Append(Value) - - // AppendMutable appends a new, empty, mutable message value to the end - // of the list and returns it. - // It panics if the list does not contain a message type. - AppendMutable() Value - - // Truncate truncates the list to a smaller length. - // - // Truncate is a mutating operation and unsafe for concurrent use. - Truncate(int) - - // NewElement returns a new value for a list element. - // For enums, this returns the first enum value. - // For other scalars, this returns the zero value. - // For messages, this returns a new, empty, mutable value. - NewElement() Value - - // IsValid reports whether the list is valid. - // - // An invalid list is an empty, read-only value. - // - // Validity is not part of the protobuf data model, and may not - // be preserved in marshaling or other operations. - IsValid() bool -} - -// Map is an unordered, associative map. -// The entry MapKey type is determined by FieldDescriptor.MapKey.Kind. -// The entry Value type is determined by FieldDescriptor.MapValue.Kind. -// Providing a MapKey or Value that is invalid or of an incorrect type panics. -type Map interface { - // Len reports the number of elements in the map. - Len() int - - // Range iterates over every map entry in an undefined order, - // calling f for each key and value encountered. - // Range calls f Len times unless f returns false, which stops iteration. - // While iterating, mutating operations may only be performed - // on the current map key. - Range(f func(MapKey, Value) bool) - - // Has reports whether an entry with the given key is in the map. - Has(MapKey) bool - - // Clear clears the entry associated with they given key. - // The operation does nothing if there is no entry associated with the key. - // - // Clear is a mutating operation and unsafe for concurrent use. - Clear(MapKey) - - // Get retrieves the value for an entry with the given key. - // It returns an invalid value for non-existent entries. - Get(MapKey) Value - - // Set stores the value for an entry with the given key. - // It panics when given a key or value that is invalid or the wrong type. - // When setting a composite type, it is unspecified whether the set - // value aliases the source's memory in any way. - // - // Set is a mutating operation and unsafe for concurrent use. - Set(MapKey, Value) - - // Mutable retrieves a mutable reference to the entry for the given key. - // If no entry exists for the key, it creates a new, empty, mutable value - // and stores it as the entry for the key. - // It panics if the map value is not a message. - Mutable(MapKey) Value - - // NewValue returns a new value assignable as a map value. - // For enums, this returns the first enum value. - // For other scalars, this returns the zero value. - // For messages, this returns a new, empty, mutable value. - NewValue() Value - - // IsValid reports whether the map is valid. - // - // An invalid map is an empty, read-only value. - // - // An invalid message often corresponds to a nil Go map value, - // but the details are implementation dependent. - // Validity is not part of the protobuf data model, and may not - // be preserved in marshaling or other operations. - IsValid() bool -} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go deleted file mode 100644 index 7ced876f4..000000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package protoreflect - -import "google.golang.org/protobuf/internal/pragma" - -type valueType int - -const ( - nilType valueType = iota - boolType - int32Type - int64Type - uint32Type - uint64Type - float32Type - float64Type - stringType - bytesType - enumType - ifaceType -) - -// value is a union where only one type can be represented at a time. -// This uses a distinct field for each type. This is type safe in Go, but -// occupies more memory than necessary (72B). -type value struct { - pragma.DoNotCompare // 0B - - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface interface{} // 16B -} - -func valueOfString(v string) Value { - return Value{typ: stringType, str: v} -} -func valueOfBytes(v []byte) Value { - return Value{typ: bytesType, bin: v} -} -func valueOfIface(v interface{}) Value { - return Value{typ: ifaceType, iface: v} -} - -func (v Value) getString() string { - return v.str -} -func (v Value) getBytes() []byte { - return v.bin -} -func (v Value) getIface() interface{} { - return v.iface -} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go deleted file mode 100644 index eb7764c30..000000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ /dev/null @@ -1,436 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package protoreflect - -import ( - "fmt" - "math" -) - -// Value is a union where only one Go type may be set at a time. -// The Value is used to represent all possible values a field may take. -// The following shows which Go type is used to represent each proto Kind: -// -// ╔════════════╤═════════════════════════════════════╗ -// ║ Go type │ Protobuf kind ║ -// ╠════════════╪═════════════════════════════════════╣ -// ║ bool │ BoolKind ║ -// ║ int32 │ Int32Kind, Sint32Kind, Sfixed32Kind ║ -// ║ int64 │ Int64Kind, Sint64Kind, Sfixed64Kind ║ -// ║ uint32 │ Uint32Kind, Fixed32Kind ║ -// ║ uint64 │ Uint64Kind, Fixed64Kind ║ -// ║ float32 │ FloatKind ║ -// ║ float64 │ DoubleKind ║ -// ║ string │ StringKind ║ -// ║ []byte │ BytesKind ║ -// ║ EnumNumber │ EnumKind ║ -// ║ Message │ MessageKind, GroupKind ║ -// ╚════════════╧═════════════════════════════════════╝ -// -// Multiple protobuf Kinds may be represented by a single Go type if the type -// can losslessly represent the information for the proto kind. For example, -// Int64Kind, Sint64Kind, and Sfixed64Kind are all represented by int64, -// but use different integer encoding methods. -// -// The List or Map types are used if the field cardinality is repeated. -// A field is a List if FieldDescriptor.IsList reports true. -// A field is a Map if FieldDescriptor.IsMap reports true. -// -// Converting to/from a Value and a concrete Go value panics on type mismatch. -// For example, ValueOf("hello").Int() panics because this attempts to -// retrieve an int64 from a string. -// -// List, Map, and Message Values are called "composite" values. -// -// A composite Value may alias (reference) memory at some location, -// such that changes to the Value updates the that location. -// A composite value acquired with a Mutable method, such as Message.Mutable, -// always references the source object. -// -// For example: -// // Append a 0 to a "repeated int32" field. -// // Since the Value returned by Mutable is guaranteed to alias -// // the source message, modifying the Value modifies the message. -// message.Mutable(fieldDesc).(List).Append(protoreflect.ValueOfInt32(0)) -// -// // Assign [0] to a "repeated int32" field by creating a new Value, -// // modifying it, and assigning it. -// list := message.NewField(fieldDesc).(List) -// list.Append(protoreflect.ValueOfInt32(0)) -// message.Set(fieldDesc, list) -// // ERROR: Since it is not defined whether Set aliases the source, -// // appending to the List here may or may not modify the message. -// list.Append(protoreflect.ValueOfInt32(0)) -// -// Some operations, such as Message.Get, may return an "empty, read-only" -// composite Value. Modifying an empty, read-only value panics. -type Value value - -// The protoreflect API uses a custom Value union type instead of interface{} -// to keep the future open for performance optimizations. Using an interface{} -// always incurs an allocation for primitives (e.g., int64) since it needs to -// be boxed on the heap (as interfaces can only contain pointers natively). -// Instead, we represent the Value union as a flat struct that internally keeps -// track of which type is set. Using unsafe, the Value union can be reduced -// down to 24B, which is identical in size to a slice. -// -// The latest compiler (Go1.11) currently suffers from some limitations: -// • With inlining, the compiler should be able to statically prove that -// only one of these switch cases are taken and inline one specific case. -// See https://golang.org/issue/22310. - -// ValueOf returns a Value initialized with the concrete value stored in v. -// This panics if the type does not match one of the allowed types in the -// Value union. -func ValueOf(v interface{}) Value { - switch v := v.(type) { - case nil: - return Value{} - case bool: - return ValueOfBool(v) - case int32: - return ValueOfInt32(v) - case int64: - return ValueOfInt64(v) - case uint32: - return ValueOfUint32(v) - case uint64: - return ValueOfUint64(v) - case float32: - return ValueOfFloat32(v) - case float64: - return ValueOfFloat64(v) - case string: - return ValueOfString(v) - case []byte: - return ValueOfBytes(v) - case EnumNumber: - return ValueOfEnum(v) - case Message, List, Map: - return valueOfIface(v) - case ProtoMessage: - panic(fmt.Sprintf("invalid proto.Message(%T) type, expected a protoreflect.Message type", v)) - default: - panic(fmt.Sprintf("invalid type: %T", v)) - } -} - -// ValueOfBool returns a new boolean value. -func ValueOfBool(v bool) Value { - if v { - return Value{typ: boolType, num: 1} - } else { - return Value{typ: boolType, num: 0} - } -} - -// ValueOfInt32 returns a new int32 value. -func ValueOfInt32(v int32) Value { - return Value{typ: int32Type, num: uint64(v)} -} - -// ValueOfInt64 returns a new int64 value. -func ValueOfInt64(v int64) Value { - return Value{typ: int64Type, num: uint64(v)} -} - -// ValueOfUint32 returns a new uint32 value. -func ValueOfUint32(v uint32) Value { - return Value{typ: uint32Type, num: uint64(v)} -} - -// ValueOfUint64 returns a new uint64 value. -func ValueOfUint64(v uint64) Value { - return Value{typ: uint64Type, num: v} -} - -// ValueOfFloat32 returns a new float32 value. -func ValueOfFloat32(v float32) Value { - return Value{typ: float32Type, num: uint64(math.Float64bits(float64(v)))} -} - -// ValueOfFloat64 returns a new float64 value. -func ValueOfFloat64(v float64) Value { - return Value{typ: float64Type, num: uint64(math.Float64bits(float64(v)))} -} - -// ValueOfString returns a new string value. -func ValueOfString(v string) Value { - return valueOfString(v) -} - -// ValueOfBytes returns a new bytes value. -func ValueOfBytes(v []byte) Value { - return valueOfBytes(v[:len(v):len(v)]) -} - -// ValueOfEnum returns a new enum value. -func ValueOfEnum(v EnumNumber) Value { - return Value{typ: enumType, num: uint64(v)} -} - -// ValueOfMessage returns a new Message value. -func ValueOfMessage(v Message) Value { - return valueOfIface(v) -} - -// ValueOfList returns a new List value. -func ValueOfList(v List) Value { - return valueOfIface(v) -} - -// ValueOfMap returns a new Map value. -func ValueOfMap(v Map) Value { - return valueOfIface(v) -} - -// IsValid reports whether v is populated with a value. -func (v Value) IsValid() bool { - return v.typ != nilType -} - -// Interface returns v as an interface{}. -// -// Invariant: v == ValueOf(v).Interface() -func (v Value) Interface() interface{} { - switch v.typ { - case nilType: - return nil - case boolType: - return v.Bool() - case int32Type: - return int32(v.Int()) - case int64Type: - return int64(v.Int()) - case uint32Type: - return uint32(v.Uint()) - case uint64Type: - return uint64(v.Uint()) - case float32Type: - return float32(v.Float()) - case float64Type: - return float64(v.Float()) - case stringType: - return v.String() - case bytesType: - return v.Bytes() - case enumType: - return v.Enum() - default: - return v.getIface() - } -} - -func (v Value) typeName() string { - switch v.typ { - case nilType: - return "nil" - case boolType: - return "bool" - case int32Type: - return "int32" - case int64Type: - return "int64" - case uint32Type: - return "uint32" - case uint64Type: - return "uint64" - case float32Type: - return "float32" - case float64Type: - return "float64" - case stringType: - return "string" - case bytesType: - return "bytes" - case enumType: - return "enum" - default: - switch v := v.getIface().(type) { - case Message: - return "message" - case List: - return "list" - case Map: - return "map" - default: - return fmt.Sprintf("", v) - } - } -} - -func (v Value) panicMessage(what string) string { - return fmt.Sprintf("type mismatch: cannot convert %v to %s", v.typeName(), what) -} - -// Bool returns v as a bool and panics if the type is not a bool. -func (v Value) Bool() bool { - switch v.typ { - case boolType: - return v.num > 0 - default: - panic(v.panicMessage("bool")) - } -} - -// Int returns v as a int64 and panics if the type is not a int32 or int64. -func (v Value) Int() int64 { - switch v.typ { - case int32Type, int64Type: - return int64(v.num) - default: - panic(v.panicMessage("int")) - } -} - -// Uint returns v as a uint64 and panics if the type is not a uint32 or uint64. -func (v Value) Uint() uint64 { - switch v.typ { - case uint32Type, uint64Type: - return uint64(v.num) - default: - panic(v.panicMessage("uint")) - } -} - -// Float returns v as a float64 and panics if the type is not a float32 or float64. -func (v Value) Float() float64 { - switch v.typ { - case float32Type, float64Type: - return math.Float64frombits(uint64(v.num)) - default: - panic(v.panicMessage("float")) - } -} - -// String returns v as a string. Since this method implements fmt.Stringer, -// this returns the formatted string value for any non-string type. -func (v Value) String() string { - switch v.typ { - case stringType: - return v.getString() - default: - return fmt.Sprint(v.Interface()) - } -} - -// Bytes returns v as a []byte and panics if the type is not a []byte. -func (v Value) Bytes() []byte { - switch v.typ { - case bytesType: - return v.getBytes() - default: - panic(v.panicMessage("bytes")) - } -} - -// Enum returns v as a EnumNumber and panics if the type is not a EnumNumber. -func (v Value) Enum() EnumNumber { - switch v.typ { - case enumType: - return EnumNumber(v.num) - default: - panic(v.panicMessage("enum")) - } -} - -// Message returns v as a Message and panics if the type is not a Message. -func (v Value) Message() Message { - switch vi := v.getIface().(type) { - case Message: - return vi - default: - panic(v.panicMessage("message")) - } -} - -// List returns v as a List and panics if the type is not a List. -func (v Value) List() List { - switch vi := v.getIface().(type) { - case List: - return vi - default: - panic(v.panicMessage("list")) - } -} - -// Map returns v as a Map and panics if the type is not a Map. -func (v Value) Map() Map { - switch vi := v.getIface().(type) { - case Map: - return vi - default: - panic(v.panicMessage("map")) - } -} - -// MapKey returns v as a MapKey and panics for invalid MapKey types. -func (v Value) MapKey() MapKey { - switch v.typ { - case boolType, int32Type, int64Type, uint32Type, uint64Type, stringType: - return MapKey(v) - default: - panic(v.panicMessage("map key")) - } -} - -// MapKey is used to index maps, where the Go type of the MapKey must match -// the specified key Kind (see MessageDescriptor.IsMapEntry). -// The following shows what Go type is used to represent each proto Kind: -// -// ╔═════════╤═════════════════════════════════════╗ -// ║ Go type │ Protobuf kind ║ -// ╠═════════╪═════════════════════════════════════╣ -// ║ bool │ BoolKind ║ -// ║ int32 │ Int32Kind, Sint32Kind, Sfixed32Kind ║ -// ║ int64 │ Int64Kind, Sint64Kind, Sfixed64Kind ║ -// ║ uint32 │ Uint32Kind, Fixed32Kind ║ -// ║ uint64 │ Uint64Kind, Fixed64Kind ║ -// ║ string │ StringKind ║ -// ╚═════════╧═════════════════════════════════════╝ -// -// A MapKey is constructed and accessed through a Value: -// k := ValueOf("hash").MapKey() // convert string to MapKey -// s := k.String() // convert MapKey to string -// -// The MapKey is a strict subset of valid types used in Value; -// converting a Value to a MapKey with an invalid type panics. -type MapKey value - -// IsValid reports whether k is populated with a value. -func (k MapKey) IsValid() bool { - return Value(k).IsValid() -} - -// Interface returns k as an interface{}. -func (k MapKey) Interface() interface{} { - return Value(k).Interface() -} - -// Bool returns k as a bool and panics if the type is not a bool. -func (k MapKey) Bool() bool { - return Value(k).Bool() -} - -// Int returns k as a int64 and panics if the type is not a int32 or int64. -func (k MapKey) Int() int64 { - return Value(k).Int() -} - -// Uint returns k as a uint64 and panics if the type is not a uint32 or uint64. -func (k MapKey) Uint() uint64 { - return Value(k).Uint() -} - -// String returns k as a string. Since this method implements fmt.Stringer, -// this returns the formatted string value for any non-string type. -func (k MapKey) String() string { - return Value(k).String() -} - -// Value returns k as a Value. -func (k MapKey) Value() Value { - return Value(k) -} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go deleted file mode 100644 index 702ddf22a..000000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !purego && !appengine -// +build !purego,!appengine - -package protoreflect - -import ( - "unsafe" - - "google.golang.org/protobuf/internal/pragma" -) - -type ( - stringHeader struct { - Data unsafe.Pointer - Len int - } - sliceHeader struct { - Data unsafe.Pointer - Len int - Cap int - } - ifaceHeader struct { - Type unsafe.Pointer - Data unsafe.Pointer - } -) - -var ( - nilType = typeOf(nil) - boolType = typeOf(*new(bool)) - int32Type = typeOf(*new(int32)) - int64Type = typeOf(*new(int64)) - uint32Type = typeOf(*new(uint32)) - uint64Type = typeOf(*new(uint64)) - float32Type = typeOf(*new(float32)) - float64Type = typeOf(*new(float64)) - stringType = typeOf(*new(string)) - bytesType = typeOf(*new([]byte)) - enumType = typeOf(*new(EnumNumber)) -) - -// typeOf returns a pointer to the Go type information. -// The pointer is comparable and equal if and only if the types are identical. -func typeOf(t interface{}) unsafe.Pointer { - return (*ifaceHeader)(unsafe.Pointer(&t)).Type -} - -// value is a union where only one type can be represented at a time. -// The struct is 24B large on 64-bit systems and requires the minimum storage -// necessary to represent each possible type. -// -// The Go GC needs to be able to scan variables containing pointers. -// As such, pointers and non-pointers cannot be intermixed. -type value struct { - pragma.DoNotCompare // 0B - - // typ stores the type of the value as a pointer to the Go type. - typ unsafe.Pointer // 8B - - // ptr stores the data pointer for a String, Bytes, or interface value. - ptr unsafe.Pointer // 8B - - // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or - // Enum value as a raw uint64. - // - // It is also used to store the length of a String or Bytes value; - // the capacity is ignored. - num uint64 // 8B -} - -func valueOfString(v string) Value { - p := (*stringHeader)(unsafe.Pointer(&v)) - return Value{typ: stringType, ptr: p.Data, num: uint64(len(v))} -} -func valueOfBytes(v []byte) Value { - p := (*sliceHeader)(unsafe.Pointer(&v)) - return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} -} -func valueOfIface(v interface{}) Value { - p := (*ifaceHeader)(unsafe.Pointer(&v)) - return Value{typ: p.Type, ptr: p.Data} -} - -func (v Value) getString() (x string) { - *(*stringHeader)(unsafe.Pointer(&x)) = stringHeader{Data: v.ptr, Len: int(v.num)} - return x -} -func (v Value) getBytes() (x []byte) { - *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} - return x -} -func (v Value) getIface() (x interface{}) { - *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} - return x -} diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go deleted file mode 100644 index 59f024c44..000000000 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ /dev/null @@ -1,880 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package protoregistry provides data structures to register and lookup -// protobuf descriptor types. -// -// The Files registry contains file descriptors and provides the ability -// to iterate over the files or lookup a specific descriptor within the files. -// Files only contains protobuf descriptors and has no understanding of Go -// type information that may be associated with each descriptor. -// -// The Types registry contains descriptor types for which there is a known -// Go type associated with that descriptor. It provides the ability to iterate -// over the registered types or lookup a type by name. -package protoregistry - -import ( - "fmt" - "os" - "strings" - "sync" - - "google.golang.org/protobuf/internal/encoding/messageset" - "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/flags" - "google.golang.org/protobuf/reflect/protoreflect" -) - -// conflictPolicy configures the policy for handling registration conflicts. -// -// It can be over-written at compile time with a linker-initialized variable: -// go build -ldflags "-X google.golang.org/protobuf/reflect/protoregistry.conflictPolicy=warn" -// -// It can be over-written at program execution with an environment variable: -// GOLANG_PROTOBUF_REGISTRATION_CONFLICT=warn ./main -// -// Neither of the above are covered by the compatibility promise and -// may be removed in a future release of this module. -var conflictPolicy = "panic" // "panic" | "warn" | "ignore" - -// ignoreConflict reports whether to ignore a registration conflict -// given the descriptor being registered and the error. -// It is a variable so that the behavior is easily overridden in another file. -var ignoreConflict = func(d protoreflect.Descriptor, err error) bool { - const env = "GOLANG_PROTOBUF_REGISTRATION_CONFLICT" - const faq = "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict" - policy := conflictPolicy - if v := os.Getenv(env); v != "" { - policy = v - } - switch policy { - case "panic": - panic(fmt.Sprintf("%v\nSee %v\n", err, faq)) - case "warn": - fmt.Fprintf(os.Stderr, "WARNING: %v\nSee %v\n\n", err, faq) - return true - case "ignore": - return true - default: - panic("invalid " + env + " value: " + os.Getenv(env)) - } -} - -var globalMutex sync.RWMutex - -// GlobalFiles is a global registry of file descriptors. -var GlobalFiles *Files = new(Files) - -// GlobalTypes is the registry used by default for type lookups -// unless a local registry is provided by the user. -var GlobalTypes *Types = new(Types) - -// NotFound is a sentinel error value to indicate that the type was not found. -// -// Since registry lookup can happen in the critical performance path, resolvers -// must return this exact error value, not an error wrapping it. -var NotFound = errors.New("not found") - -// Files is a registry for looking up or iterating over files and the -// descriptors contained within them. -// The Find and Range methods are safe for concurrent use. -type Files struct { - // The map of descsByName contains: - // EnumDescriptor - // EnumValueDescriptor - // MessageDescriptor - // ExtensionDescriptor - // ServiceDescriptor - // *packageDescriptor - // - // Note that files are stored as a slice, since a package may contain - // multiple files. Only top-level declarations are registered. - // Note that enum values are in the top-level since that are in the same - // scope as the parent enum. - descsByName map[protoreflect.FullName]interface{} - filesByPath map[string][]protoreflect.FileDescriptor - numFiles int -} - -type packageDescriptor struct { - files []protoreflect.FileDescriptor -} - -// RegisterFile registers the provided file descriptor. -// -// If any descriptor within the file conflicts with the descriptor of any -// previously registered file (e.g., two enums with the same full name), -// then the file is not registered and an error is returned. -// -// It is permitted for multiple files to have the same file path. -func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { - if r == GlobalFiles { - globalMutex.Lock() - defer globalMutex.Unlock() - } - if r.descsByName == nil { - r.descsByName = map[protoreflect.FullName]interface{}{ - "": &packageDescriptor{}, - } - r.filesByPath = make(map[string][]protoreflect.FileDescriptor) - } - path := file.Path() - if prev := r.filesByPath[path]; len(prev) > 0 { - r.checkGenProtoConflict(path) - err := errors.New("file %q is already registered", file.Path()) - err = amendErrorWithCaller(err, prev[0], file) - if !(r == GlobalFiles && ignoreConflict(file, err)) { - return err - } - } - - for name := file.Package(); name != ""; name = name.Parent() { - switch prev := r.descsByName[name]; prev.(type) { - case nil, *packageDescriptor: - default: - err := errors.New("file %q has a package name conflict over %v", file.Path(), name) - err = amendErrorWithCaller(err, prev, file) - if r == GlobalFiles && ignoreConflict(file, err) { - err = nil - } - return err - } - } - var err error - var hasConflict bool - rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) { - if prev := r.descsByName[d.FullName()]; prev != nil { - hasConflict = true - err = errors.New("file %q has a name conflict over %v", file.Path(), d.FullName()) - err = amendErrorWithCaller(err, prev, file) - if r == GlobalFiles && ignoreConflict(d, err) { - err = nil - } - } - }) - if hasConflict { - return err - } - - for name := file.Package(); name != ""; name = name.Parent() { - if r.descsByName[name] == nil { - r.descsByName[name] = &packageDescriptor{} - } - } - p := r.descsByName[file.Package()].(*packageDescriptor) - p.files = append(p.files, file) - rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) { - r.descsByName[d.FullName()] = d - }) - r.filesByPath[path] = append(r.filesByPath[path], file) - r.numFiles++ - return nil -} - -// Several well-known types were hosted in the google.golang.org/genproto module -// but were later moved to this module. To avoid a weak dependency on the -// genproto module (and its relatively large set of transitive dependencies), -// we rely on a registration conflict to determine whether the genproto version -// is too old (i.e., does not contain aliases to the new type declarations). -func (r *Files) checkGenProtoConflict(path string) { - if r != GlobalFiles { - return - } - var prevPath string - const prevModule = "google.golang.org/genproto" - const prevVersion = "cb27e3aa (May 26th, 2020)" - switch path { - case "google/protobuf/field_mask.proto": - prevPath = prevModule + "/protobuf/field_mask" - case "google/protobuf/api.proto": - prevPath = prevModule + "/protobuf/api" - case "google/protobuf/type.proto": - prevPath = prevModule + "/protobuf/ptype" - case "google/protobuf/source_context.proto": - prevPath = prevModule + "/protobuf/source_context" - default: - return - } - pkgName := strings.TrimSuffix(strings.TrimPrefix(path, "google/protobuf/"), ".proto") - pkgName = strings.Replace(pkgName, "_", "", -1) + "pb" // e.g., "field_mask" => "fieldmaskpb" - currPath := "google.golang.org/protobuf/types/known/" + pkgName - panic(fmt.Sprintf(""+ - "duplicate registration of %q\n"+ - "\n"+ - "The generated definition for this file has moved:\n"+ - "\tfrom: %q\n"+ - "\tto: %q\n"+ - "A dependency on the %q module must\n"+ - "be at version %v or higher.\n"+ - "\n"+ - "Upgrade the dependency by running:\n"+ - "\tgo get -u %v\n", - path, prevPath, currPath, prevModule, prevVersion, prevPath)) -} - -// FindDescriptorByName looks up a descriptor by the full name. -// -// This returns (nil, NotFound) if not found. -func (r *Files) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { - if r == nil { - return nil, NotFound - } - if r == GlobalFiles { - globalMutex.RLock() - defer globalMutex.RUnlock() - } - prefix := name - suffix := nameSuffix("") - for prefix != "" { - if d, ok := r.descsByName[prefix]; ok { - switch d := d.(type) { - case protoreflect.EnumDescriptor: - if d.FullName() == name { - return d, nil - } - case protoreflect.EnumValueDescriptor: - if d.FullName() == name { - return d, nil - } - case protoreflect.MessageDescriptor: - if d.FullName() == name { - return d, nil - } - if d := findDescriptorInMessage(d, suffix); d != nil && d.FullName() == name { - return d, nil - } - case protoreflect.ExtensionDescriptor: - if d.FullName() == name { - return d, nil - } - case protoreflect.ServiceDescriptor: - if d.FullName() == name { - return d, nil - } - if d := d.Methods().ByName(suffix.Pop()); d != nil && d.FullName() == name { - return d, nil - } - } - return nil, NotFound - } - prefix = prefix.Parent() - suffix = nameSuffix(name[len(prefix)+len("."):]) - } - return nil, NotFound -} - -func findDescriptorInMessage(md protoreflect.MessageDescriptor, suffix nameSuffix) protoreflect.Descriptor { - name := suffix.Pop() - if suffix == "" { - if ed := md.Enums().ByName(name); ed != nil { - return ed - } - for i := md.Enums().Len() - 1; i >= 0; i-- { - if vd := md.Enums().Get(i).Values().ByName(name); vd != nil { - return vd - } - } - if xd := md.Extensions().ByName(name); xd != nil { - return xd - } - if fd := md.Fields().ByName(name); fd != nil { - return fd - } - if od := md.Oneofs().ByName(name); od != nil { - return od - } - } - if md := md.Messages().ByName(name); md != nil { - if suffix == "" { - return md - } - return findDescriptorInMessage(md, suffix) - } - return nil -} - -type nameSuffix string - -func (s *nameSuffix) Pop() (name protoreflect.Name) { - if i := strings.IndexByte(string(*s), '.'); i >= 0 { - name, *s = protoreflect.Name((*s)[:i]), (*s)[i+1:] - } else { - name, *s = protoreflect.Name((*s)), "" - } - return name -} - -// FindFileByPath looks up a file by the path. -// -// This returns (nil, NotFound) if not found. -// This returns an error if multiple files have the same path. -func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { - if r == nil { - return nil, NotFound - } - if r == GlobalFiles { - globalMutex.RLock() - defer globalMutex.RUnlock() - } - fds := r.filesByPath[path] - switch len(fds) { - case 0: - return nil, NotFound - case 1: - return fds[0], nil - default: - return nil, errors.New("multiple files named %q", path) - } -} - -// NumFiles reports the number of registered files, -// including duplicate files with the same name. -func (r *Files) NumFiles() int { - if r == nil { - return 0 - } - if r == GlobalFiles { - globalMutex.RLock() - defer globalMutex.RUnlock() - } - return r.numFiles -} - -// RangeFiles iterates over all registered files while f returns true. -// If multiple files have the same name, RangeFiles iterates over all of them. -// The iteration order is undefined. -func (r *Files) RangeFiles(f func(protoreflect.FileDescriptor) bool) { - if r == nil { - return - } - if r == GlobalFiles { - globalMutex.RLock() - defer globalMutex.RUnlock() - } - for _, files := range r.filesByPath { - for _, file := range files { - if !f(file) { - return - } - } - } -} - -// NumFilesByPackage reports the number of registered files in a proto package. -func (r *Files) NumFilesByPackage(name protoreflect.FullName) int { - if r == nil { - return 0 - } - if r == GlobalFiles { - globalMutex.RLock() - defer globalMutex.RUnlock() - } - p, ok := r.descsByName[name].(*packageDescriptor) - if !ok { - return 0 - } - return len(p.files) -} - -// RangeFilesByPackage iterates over all registered files in a given proto package -// while f returns true. The iteration order is undefined. -func (r *Files) RangeFilesByPackage(name protoreflect.FullName, f func(protoreflect.FileDescriptor) bool) { - if r == nil { - return - } - if r == GlobalFiles { - globalMutex.RLock() - defer globalMutex.RUnlock() - } - p, ok := r.descsByName[name].(*packageDescriptor) - if !ok { - return - } - for _, file := range p.files { - if !f(file) { - return - } - } -} - -// rangeTopLevelDescriptors iterates over all top-level descriptors in a file -// which will be directly entered into the registry. -func rangeTopLevelDescriptors(fd protoreflect.FileDescriptor, f func(protoreflect.Descriptor)) { - eds := fd.Enums() - for i := eds.Len() - 1; i >= 0; i-- { - f(eds.Get(i)) - vds := eds.Get(i).Values() - for i := vds.Len() - 1; i >= 0; i-- { - f(vds.Get(i)) - } - } - mds := fd.Messages() - for i := mds.Len() - 1; i >= 0; i-- { - f(mds.Get(i)) - } - xds := fd.Extensions() - for i := xds.Len() - 1; i >= 0; i-- { - f(xds.Get(i)) - } - sds := fd.Services() - for i := sds.Len() - 1; i >= 0; i-- { - f(sds.Get(i)) - } -} - -// MessageTypeResolver is an interface for looking up messages. -// -// A compliant implementation must deterministically return the same type -// if no error is encountered. -// -// The Types type implements this interface. -type MessageTypeResolver interface { - // FindMessageByName looks up a message by its full name. - // E.g., "google.protobuf.Any" - // - // This return (nil, NotFound) if not found. - FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) - - // FindMessageByURL looks up a message by a URL identifier. - // See documentation on google.protobuf.Any.type_url for the URL format. - // - // This returns (nil, NotFound) if not found. - FindMessageByURL(url string) (protoreflect.MessageType, error) -} - -// ExtensionTypeResolver is an interface for looking up extensions. -// -// A compliant implementation must deterministically return the same type -// if no error is encountered. -// -// The Types type implements this interface. -type ExtensionTypeResolver interface { - // FindExtensionByName looks up a extension field by the field's full name. - // Note that this is the full name of the field as determined by - // where the extension is declared and is unrelated to the full name of the - // message being extended. - // - // This returns (nil, NotFound) if not found. - FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) - - // FindExtensionByNumber looks up a extension field by the field number - // within some parent message, identified by full name. - // - // This returns (nil, NotFound) if not found. - FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) -} - -var ( - _ MessageTypeResolver = (*Types)(nil) - _ ExtensionTypeResolver = (*Types)(nil) -) - -// Types is a registry for looking up or iterating over descriptor types. -// The Find and Range methods are safe for concurrent use. -type Types struct { - typesByName typesByName - extensionsByMessage extensionsByMessage - - numEnums int - numMessages int - numExtensions int -} - -type ( - typesByName map[protoreflect.FullName]interface{} - extensionsByMessage map[protoreflect.FullName]extensionsByNumber - extensionsByNumber map[protoreflect.FieldNumber]protoreflect.ExtensionType -) - -// RegisterMessage registers the provided message type. -// -// If a naming conflict occurs, the type is not registered and an error is returned. -func (r *Types) RegisterMessage(mt protoreflect.MessageType) error { - // Under rare circumstances getting the descriptor might recursively - // examine the registry, so fetch it before locking. - md := mt.Descriptor() - - if r == GlobalTypes { - globalMutex.Lock() - defer globalMutex.Unlock() - } - - if err := r.register("message", md, mt); err != nil { - return err - } - r.numMessages++ - return nil -} - -// RegisterEnum registers the provided enum type. -// -// If a naming conflict occurs, the type is not registered and an error is returned. -func (r *Types) RegisterEnum(et protoreflect.EnumType) error { - // Under rare circumstances getting the descriptor might recursively - // examine the registry, so fetch it before locking. - ed := et.Descriptor() - - if r == GlobalTypes { - globalMutex.Lock() - defer globalMutex.Unlock() - } - - if err := r.register("enum", ed, et); err != nil { - return err - } - r.numEnums++ - return nil -} - -// RegisterExtension registers the provided extension type. -// -// If a naming conflict occurs, the type is not registered and an error is returned. -func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error { - // Under rare circumstances getting the descriptor might recursively - // examine the registry, so fetch it before locking. - // - // A known case where this can happen: Fetching the TypeDescriptor for a - // legacy ExtensionDesc can consult the global registry. - xd := xt.TypeDescriptor() - - if r == GlobalTypes { - globalMutex.Lock() - defer globalMutex.Unlock() - } - - field := xd.Number() - message := xd.ContainingMessage().FullName() - if prev := r.extensionsByMessage[message][field]; prev != nil { - err := errors.New("extension number %d is already registered on message %v", field, message) - err = amendErrorWithCaller(err, prev, xt) - if !(r == GlobalTypes && ignoreConflict(xd, err)) { - return err - } - } - - if err := r.register("extension", xd, xt); err != nil { - return err - } - if r.extensionsByMessage == nil { - r.extensionsByMessage = make(extensionsByMessage) - } - if r.extensionsByMessage[message] == nil { - r.extensionsByMessage[message] = make(extensionsByNumber) - } - r.extensionsByMessage[message][field] = xt - r.numExtensions++ - return nil -} - -func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error { - name := desc.FullName() - prev := r.typesByName[name] - if prev != nil { - err := errors.New("%v %v is already registered", kind, name) - err = amendErrorWithCaller(err, prev, typ) - if !(r == GlobalTypes && ignoreConflict(desc, err)) { - return err - } - } - if r.typesByName == nil { - r.typesByName = make(typesByName) - } - r.typesByName[name] = typ - return nil -} - -// FindEnumByName looks up an enum by its full name. -// E.g., "google.protobuf.Field.Kind". -// -// This returns (nil, NotFound) if not found. -func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumType, error) { - if r == nil { - return nil, NotFound - } - if r == GlobalTypes { - globalMutex.RLock() - defer globalMutex.RUnlock() - } - if v := r.typesByName[enum]; v != nil { - if et, _ := v.(protoreflect.EnumType); et != nil { - return et, nil - } - return nil, errors.New("found wrong type: got %v, want enum", typeName(v)) - } - return nil, NotFound -} - -// FindMessageByName looks up a message by its full name, -// e.g. "google.protobuf.Any". -// -// This returns (nil, NotFound) if not found. -func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { - if r == nil { - return nil, NotFound - } - if r == GlobalTypes { - globalMutex.RLock() - defer globalMutex.RUnlock() - } - if v := r.typesByName[message]; v != nil { - if mt, _ := v.(protoreflect.MessageType); mt != nil { - return mt, nil - } - return nil, errors.New("found wrong type: got %v, want message", typeName(v)) - } - return nil, NotFound -} - -// FindMessageByURL looks up a message by a URL identifier. -// See documentation on google.protobuf.Any.type_url for the URL format. -// -// This returns (nil, NotFound) if not found. -func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { - // This function is similar to FindMessageByName but - // truncates anything before and including '/' in the URL. - if r == nil { - return nil, NotFound - } - if r == GlobalTypes { - globalMutex.RLock() - defer globalMutex.RUnlock() - } - message := protoreflect.FullName(url) - if i := strings.LastIndexByte(url, '/'); i >= 0 { - message = message[i+len("/"):] - } - - if v := r.typesByName[message]; v != nil { - if mt, _ := v.(protoreflect.MessageType); mt != nil { - return mt, nil - } - return nil, errors.New("found wrong type: got %v, want message", typeName(v)) - } - return nil, NotFound -} - -// FindExtensionByName looks up a extension field by the field's full name. -// Note that this is the full name of the field as determined by -// where the extension is declared and is unrelated to the full name of the -// message being extended. -// -// This returns (nil, NotFound) if not found. -func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { - if r == nil { - return nil, NotFound - } - if r == GlobalTypes { - globalMutex.RLock() - defer globalMutex.RUnlock() - } - if v := r.typesByName[field]; v != nil { - if xt, _ := v.(protoreflect.ExtensionType); xt != nil { - return xt, nil - } - - // MessageSet extensions are special in that the name of the extension - // is the name of the message type used to extend the MessageSet. - // This naming scheme is used by text and JSON serialization. - // - // This feature is protected by the ProtoLegacy flag since MessageSets - // are a proto1 feature that is long deprecated. - if flags.ProtoLegacy { - if _, ok := v.(protoreflect.MessageType); ok { - field := field.Append(messageset.ExtensionName) - if v := r.typesByName[field]; v != nil { - if xt, _ := v.(protoreflect.ExtensionType); xt != nil { - if messageset.IsMessageSetExtension(xt.TypeDescriptor()) { - return xt, nil - } - } - } - } - } - - return nil, errors.New("found wrong type: got %v, want extension", typeName(v)) - } - return nil, NotFound -} - -// FindExtensionByNumber looks up a extension field by the field number -// within some parent message, identified by full name. -// -// This returns (nil, NotFound) if not found. -func (r *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { - if r == nil { - return nil, NotFound - } - if r == GlobalTypes { - globalMutex.RLock() - defer globalMutex.RUnlock() - } - if xt, ok := r.extensionsByMessage[message][field]; ok { - return xt, nil - } - return nil, NotFound -} - -// NumEnums reports the number of registered enums. -func (r *Types) NumEnums() int { - if r == nil { - return 0 - } - if r == GlobalTypes { - globalMutex.RLock() - defer globalMutex.RUnlock() - } - return r.numEnums -} - -// RangeEnums iterates over all registered enums while f returns true. -// Iteration order is undefined. -func (r *Types) RangeEnums(f func(protoreflect.EnumType) bool) { - if r == nil { - return - } - if r == GlobalTypes { - globalMutex.RLock() - defer globalMutex.RUnlock() - } - for _, typ := range r.typesByName { - if et, ok := typ.(protoreflect.EnumType); ok { - if !f(et) { - return - } - } - } -} - -// NumMessages reports the number of registered messages. -func (r *Types) NumMessages() int { - if r == nil { - return 0 - } - if r == GlobalTypes { - globalMutex.RLock() - defer globalMutex.RUnlock() - } - return r.numMessages -} - -// RangeMessages iterates over all registered messages while f returns true. -// Iteration order is undefined. -func (r *Types) RangeMessages(f func(protoreflect.MessageType) bool) { - if r == nil { - return - } - if r == GlobalTypes { - globalMutex.RLock() - defer globalMutex.RUnlock() - } - for _, typ := range r.typesByName { - if mt, ok := typ.(protoreflect.MessageType); ok { - if !f(mt) { - return - } - } - } -} - -// NumExtensions reports the number of registered extensions. -func (r *Types) NumExtensions() int { - if r == nil { - return 0 - } - if r == GlobalTypes { - globalMutex.RLock() - defer globalMutex.RUnlock() - } - return r.numExtensions -} - -// RangeExtensions iterates over all registered extensions while f returns true. -// Iteration order is undefined. -func (r *Types) RangeExtensions(f func(protoreflect.ExtensionType) bool) { - if r == nil { - return - } - if r == GlobalTypes { - globalMutex.RLock() - defer globalMutex.RUnlock() - } - for _, typ := range r.typesByName { - if xt, ok := typ.(protoreflect.ExtensionType); ok { - if !f(xt) { - return - } - } - } -} - -// NumExtensionsByMessage reports the number of registered extensions for -// a given message type. -func (r *Types) NumExtensionsByMessage(message protoreflect.FullName) int { - if r == nil { - return 0 - } - if r == GlobalTypes { - globalMutex.RLock() - defer globalMutex.RUnlock() - } - return len(r.extensionsByMessage[message]) -} - -// RangeExtensionsByMessage iterates over all registered extensions filtered -// by a given message type while f returns true. Iteration order is undefined. -func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) { - if r == nil { - return - } - if r == GlobalTypes { - globalMutex.RLock() - defer globalMutex.RUnlock() - } - for _, xt := range r.extensionsByMessage[message] { - if !f(xt) { - return - } - } -} - -func typeName(t interface{}) string { - switch t.(type) { - case protoreflect.EnumType: - return "enum" - case protoreflect.MessageType: - return "message" - case protoreflect.ExtensionType: - return "extension" - default: - return fmt.Sprintf("%T", t) - } -} - -func amendErrorWithCaller(err error, prev, curr interface{}) error { - prevPkg := goPackage(prev) - currPkg := goPackage(curr) - if prevPkg == "" || currPkg == "" || prevPkg == currPkg { - return err - } - return errors.New("%s\n\tpreviously from: %q\n\tcurrently from: %q", err, prevPkg, currPkg) -} - -func goPackage(v interface{}) string { - switch d := v.(type) { - case protoreflect.EnumType: - v = d.Descriptor() - case protoreflect.MessageType: - v = d.Descriptor() - case protoreflect.ExtensionType: - v = d.TypeDescriptor() - } - if d, ok := v.(protoreflect.Descriptor); ok { - v = d.ParentFile() - } - if d, ok := v.(interface{ GoPackagePath() string }); ok { - return d.GoPackagePath() - } - return "" -} diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go b/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go deleted file mode 100644 index c58727675..000000000 --- a/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package protoiface - -type MessageV1 interface { - Reset() - String() string - ProtoMessage() -} - -type ExtensionRangeV1 struct { - Start, End int32 // both inclusive -} diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go deleted file mode 100644 index 44cf467d8..000000000 --- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package protoiface contains types referenced or implemented by messages. -// -// WARNING: This package should only be imported by message implementations. -// The functionality found in this package should be accessed through -// higher-level abstractions provided by the proto package. -package protoiface - -import ( - "google.golang.org/protobuf/internal/pragma" - "google.golang.org/protobuf/reflect/protoreflect" -) - -// Methods is a set of optional fast-path implementations of various operations. -type Methods = struct { - pragma.NoUnkeyedLiterals - - // Flags indicate support for optional features. - Flags SupportFlags - - // Size returns the size in bytes of the wire-format encoding of a message. - // Marshal must be provided if a custom Size is provided. - Size func(SizeInput) SizeOutput - - // Marshal formats a message in the wire-format encoding to the provided buffer. - // Size should be provided if a custom Marshal is provided. - // It must not return an error for a partial message. - Marshal func(MarshalInput) (MarshalOutput, error) - - // Unmarshal parses the wire-format encoding and merges the result into a message. - // It must not reset the target message or return an error for a partial message. - Unmarshal func(UnmarshalInput) (UnmarshalOutput, error) - - // Merge merges the contents of a source message into a destination message. - Merge func(MergeInput) MergeOutput - - // CheckInitialized returns an error if any required fields in the message are not set. - CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error) -} - -// SupportFlags indicate support for optional features. -type SupportFlags = uint64 - -const ( - // SupportMarshalDeterministic reports whether MarshalOptions.Deterministic is supported. - SupportMarshalDeterministic SupportFlags = 1 << iota - - // SupportUnmarshalDiscardUnknown reports whether UnmarshalOptions.DiscardUnknown is supported. - SupportUnmarshalDiscardUnknown -) - -// SizeInput is input to the Size method. -type SizeInput = struct { - pragma.NoUnkeyedLiterals - - Message protoreflect.Message - Flags MarshalInputFlags -} - -// SizeOutput is output from the Size method. -type SizeOutput = struct { - pragma.NoUnkeyedLiterals - - Size int -} - -// MarshalInput is input to the Marshal method. -type MarshalInput = struct { - pragma.NoUnkeyedLiterals - - Message protoreflect.Message - Buf []byte // output is appended to this buffer - Flags MarshalInputFlags -} - -// MarshalOutput is output from the Marshal method. -type MarshalOutput = struct { - pragma.NoUnkeyedLiterals - - Buf []byte // contains marshaled message -} - -// MarshalInputFlags configure the marshaler. -// Most flags correspond to fields in proto.MarshalOptions. -type MarshalInputFlags = uint8 - -const ( - MarshalDeterministic MarshalInputFlags = 1 << iota - MarshalUseCachedSize -) - -// UnmarshalInput is input to the Unmarshal method. -type UnmarshalInput = struct { - pragma.NoUnkeyedLiterals - - Message protoreflect.Message - Buf []byte // input buffer - Flags UnmarshalInputFlags - Resolver interface { - FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) - FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) - } - Depth int -} - -// UnmarshalOutput is output from the Unmarshal method. -type UnmarshalOutput = struct { - pragma.NoUnkeyedLiterals - - Flags UnmarshalOutputFlags -} - -// UnmarshalInputFlags configure the unmarshaler. -// Most flags correspond to fields in proto.UnmarshalOptions. -type UnmarshalInputFlags = uint8 - -const ( - UnmarshalDiscardUnknown UnmarshalInputFlags = 1 << iota -) - -// UnmarshalOutputFlags are output from the Unmarshal method. -type UnmarshalOutputFlags = uint8 - -const ( - // UnmarshalInitialized may be set on return if all required fields are known to be set. - // If unset, then it does not necessarily indicate that the message is uninitialized, - // only that its status could not be confirmed. - UnmarshalInitialized UnmarshalOutputFlags = 1 << iota -) - -// MergeInput is input to the Merge method. -type MergeInput = struct { - pragma.NoUnkeyedLiterals - - Source protoreflect.Message - Destination protoreflect.Message -} - -// MergeOutput is output from the Merge method. -type MergeOutput = struct { - pragma.NoUnkeyedLiterals - - Flags MergeOutputFlags -} - -// MergeOutputFlags are output from the Merge method. -type MergeOutputFlags = uint8 - -const ( - // MergeComplete reports whether the merge was performed. - // If unset, the merger must have made no changes to the destination. - MergeComplete MergeOutputFlags = 1 << iota -) - -// CheckInitializedInput is input to the CheckInitialized method. -type CheckInitializedInput = struct { - pragma.NoUnkeyedLiterals - - Message protoreflect.Message -} - -// CheckInitializedOutput is output from the CheckInitialized method. -type CheckInitializedOutput = struct { - pragma.NoUnkeyedLiterals -} diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go deleted file mode 100644 index 4a1ab7fb3..000000000 --- a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package protoimpl contains the default implementation for messages -// generated by protoc-gen-go. -// -// WARNING: This package should only ever be imported by generated messages. -// The compatibility agreement covers nothing except for functionality needed -// to keep existing generated messages operational. Breakages that occur due -// to unauthorized usages of this package are not the author's responsibility. -package protoimpl - -import ( - "google.golang.org/protobuf/internal/filedesc" - "google.golang.org/protobuf/internal/filetype" - "google.golang.org/protobuf/internal/impl" -) - -// UnsafeEnabled specifies whether package unsafe can be used. -const UnsafeEnabled = impl.UnsafeEnabled - -type ( - // Types used by generated code in init functions. - DescBuilder = filedesc.Builder - TypeBuilder = filetype.Builder - - // Types used by generated code to implement EnumType, MessageType, and ExtensionType. - EnumInfo = impl.EnumInfo - MessageInfo = impl.MessageInfo - ExtensionInfo = impl.ExtensionInfo - - // Types embedded in generated messages. - MessageState = impl.MessageState - SizeCache = impl.SizeCache - WeakFields = impl.WeakFields - UnknownFields = impl.UnknownFields - ExtensionFields = impl.ExtensionFields - ExtensionFieldV1 = impl.ExtensionField - - Pointer = impl.Pointer -) - -var X impl.Export diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go deleted file mode 100644 index ff094e1ba..000000000 --- a/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package protoimpl - -import ( - "google.golang.org/protobuf/internal/version" -) - -const ( - // MaxVersion is the maximum supported version for generated .pb.go files. - // It is always the current version of the module. - MaxVersion = version.Minor - - // GenVersion is the runtime version required by generated .pb.go files. - // This is incremented when generated code relies on new functionality - // in the runtime. - GenVersion = 20 - - // MinVersion is the minimum supported version for generated .pb.go files. - // This is incremented when the runtime drops support for old code. - MinVersion = 0 -) - -// EnforceVersion is used by code generated by protoc-gen-go -// to statically enforce minimum and maximum versions of this package. -// A compilation failure implies either that: -// * the runtime package is too old and needs to be updated OR -// * the generated code is too old and needs to be regenerated. -// -// The runtime package can be upgraded by running: -// go get google.golang.org/protobuf -// -// The generated code can be regenerated by running: -// protoc --go_out=${PROTOC_GEN_GO_ARGS} ${PROTO_FILES} -// -// Example usage by generated code: -// const ( -// // Verify that this generated code is sufficiently up-to-date. -// _ = protoimpl.EnforceVersion(genVersion - protoimpl.MinVersion) -// // Verify that runtime/protoimpl is sufficiently up-to-date. -// _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - genVersion) -// ) -// -// The genVersion is the current minor version used to generated the code. -// This compile-time check relies on negative integer overflow of a uint -// being a compilation failure (guaranteed by the Go specification). -type EnforceVersion uint - -// This enforces the following invariant: -// MinVersion ≤ GenVersion ≤ MaxVersion -const ( - _ = EnforceVersion(GenVersion - MinVersion) - _ = EnforceVersion(MaxVersion - GenVersion) -) diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go deleted file mode 100644 index abe4ab511..000000000 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ /dev/null @@ -1,3957 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Author: kenton@google.com (Kenton Varda) -// Based on original Protocol Buffers design by -// Sanjay Ghemawat, Jeff Dean, and others. -// -// The messages in this file describe the definitions found in .proto files. -// A valid .proto file can be translated directly to a FileDescriptorProto -// without any other information (e.g. without reading its imports). - -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/descriptor.proto - -package descriptorpb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -type FieldDescriptorProto_Type int32 - -const ( - // 0 is reserved for errors. - // Order is weird for historical reasons. - FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 - FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 - FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 - FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 - FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 - FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 - FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 - // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 - // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. - FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 - FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // Length-delimited aggregate. - // New in version 2. - FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 - FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 - FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 - FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 - FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 - FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 // Uses ZigZag encoding. - FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 // Uses ZigZag encoding. -) - -// Enum value maps for FieldDescriptorProto_Type. -var ( - FieldDescriptorProto_Type_name = map[int32]string{ - 1: "TYPE_DOUBLE", - 2: "TYPE_FLOAT", - 3: "TYPE_INT64", - 4: "TYPE_UINT64", - 5: "TYPE_INT32", - 6: "TYPE_FIXED64", - 7: "TYPE_FIXED32", - 8: "TYPE_BOOL", - 9: "TYPE_STRING", - 10: "TYPE_GROUP", - 11: "TYPE_MESSAGE", - 12: "TYPE_BYTES", - 13: "TYPE_UINT32", - 14: "TYPE_ENUM", - 15: "TYPE_SFIXED32", - 16: "TYPE_SFIXED64", - 17: "TYPE_SINT32", - 18: "TYPE_SINT64", - } - FieldDescriptorProto_Type_value = map[string]int32{ - "TYPE_DOUBLE": 1, - "TYPE_FLOAT": 2, - "TYPE_INT64": 3, - "TYPE_UINT64": 4, - "TYPE_INT32": 5, - "TYPE_FIXED64": 6, - "TYPE_FIXED32": 7, - "TYPE_BOOL": 8, - "TYPE_STRING": 9, - "TYPE_GROUP": 10, - "TYPE_MESSAGE": 11, - "TYPE_BYTES": 12, - "TYPE_UINT32": 13, - "TYPE_ENUM": 14, - "TYPE_SFIXED32": 15, - "TYPE_SFIXED64": 16, - "TYPE_SINT32": 17, - "TYPE_SINT64": 18, - } -) - -func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { - p := new(FieldDescriptorProto_Type) - *p = x - return p -} - -func (x FieldDescriptorProto_Type) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() -} - -func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[0] -} - -func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *FieldDescriptorProto_Type) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = FieldDescriptorProto_Type(num) - return nil -} - -// Deprecated: Use FieldDescriptorProto_Type.Descriptor instead. -func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4, 0} -} - -type FieldDescriptorProto_Label int32 - -const ( - // 0 is reserved for errors - FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 - FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 - FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 -) - -// Enum value maps for FieldDescriptorProto_Label. -var ( - FieldDescriptorProto_Label_name = map[int32]string{ - 1: "LABEL_OPTIONAL", - 2: "LABEL_REQUIRED", - 3: "LABEL_REPEATED", - } - FieldDescriptorProto_Label_value = map[string]int32{ - "LABEL_OPTIONAL": 1, - "LABEL_REQUIRED": 2, - "LABEL_REPEATED": 3, - } -) - -func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { - p := new(FieldDescriptorProto_Label) - *p = x - return p -} - -func (x FieldDescriptorProto_Label) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() -} - -func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[1] -} - -func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *FieldDescriptorProto_Label) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = FieldDescriptorProto_Label(num) - return nil -} - -// Deprecated: Use FieldDescriptorProto_Label.Descriptor instead. -func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4, 1} -} - -// Generated classes can be optimized for speed or code size. -type FileOptions_OptimizeMode int32 - -const ( - FileOptions_SPEED FileOptions_OptimizeMode = 1 // Generate complete code for parsing, serialization, - // etc. - FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 // Use ReflectionOps to implement these methods. - FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 // Generate code using MessageLite and the lite runtime. -) - -// Enum value maps for FileOptions_OptimizeMode. -var ( - FileOptions_OptimizeMode_name = map[int32]string{ - 1: "SPEED", - 2: "CODE_SIZE", - 3: "LITE_RUNTIME", - } - FileOptions_OptimizeMode_value = map[string]int32{ - "SPEED": 1, - "CODE_SIZE": 2, - "LITE_RUNTIME": 3, - } -) - -func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { - p := new(FileOptions_OptimizeMode) - *p = x - return p -} - -func (x FileOptions_OptimizeMode) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() -} - -func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[2] -} - -func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *FileOptions_OptimizeMode) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = FileOptions_OptimizeMode(num) - return nil -} - -// Deprecated: Use FileOptions_OptimizeMode.Descriptor instead. -func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{10, 0} -} - -type FieldOptions_CType int32 - -const ( - // Default mode. - FieldOptions_STRING FieldOptions_CType = 0 - FieldOptions_CORD FieldOptions_CType = 1 - FieldOptions_STRING_PIECE FieldOptions_CType = 2 -) - -// Enum value maps for FieldOptions_CType. -var ( - FieldOptions_CType_name = map[int32]string{ - 0: "STRING", - 1: "CORD", - 2: "STRING_PIECE", - } - FieldOptions_CType_value = map[string]int32{ - "STRING": 0, - "CORD": 1, - "STRING_PIECE": 2, - } -) - -func (x FieldOptions_CType) Enum() *FieldOptions_CType { - p := new(FieldOptions_CType) - *p = x - return p -} - -func (x FieldOptions_CType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() -} - -func (FieldOptions_CType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[3] -} - -func (x FieldOptions_CType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *FieldOptions_CType) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = FieldOptions_CType(num) - return nil -} - -// Deprecated: Use FieldOptions_CType.Descriptor instead. -func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 0} -} - -type FieldOptions_JSType int32 - -const ( - // Use the default type. - FieldOptions_JS_NORMAL FieldOptions_JSType = 0 - // Use JavaScript strings. - FieldOptions_JS_STRING FieldOptions_JSType = 1 - // Use JavaScript numbers. - FieldOptions_JS_NUMBER FieldOptions_JSType = 2 -) - -// Enum value maps for FieldOptions_JSType. -var ( - FieldOptions_JSType_name = map[int32]string{ - 0: "JS_NORMAL", - 1: "JS_STRING", - 2: "JS_NUMBER", - } - FieldOptions_JSType_value = map[string]int32{ - "JS_NORMAL": 0, - "JS_STRING": 1, - "JS_NUMBER": 2, - } -) - -func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { - p := new(FieldOptions_JSType) - *p = x - return p -} - -func (x FieldOptions_JSType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() -} - -func (FieldOptions_JSType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[4] -} - -func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *FieldOptions_JSType) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = FieldOptions_JSType(num) - return nil -} - -// Deprecated: Use FieldOptions_JSType.Descriptor instead. -func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} -} - -// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, -// or neither? HTTP based RPC implementation may choose GET verb for safe -// methods, and PUT verb for idempotent methods instead of the default POST. -type MethodOptions_IdempotencyLevel int32 - -const ( - MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 - MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 // implies idempotent - MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 // idempotent, but may have side effects -) - -// Enum value maps for MethodOptions_IdempotencyLevel. -var ( - MethodOptions_IdempotencyLevel_name = map[int32]string{ - 0: "IDEMPOTENCY_UNKNOWN", - 1: "NO_SIDE_EFFECTS", - 2: "IDEMPOTENT", - } - MethodOptions_IdempotencyLevel_value = map[string]int32{ - "IDEMPOTENCY_UNKNOWN": 0, - "NO_SIDE_EFFECTS": 1, - "IDEMPOTENT": 2, - } -) - -func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { - p := new(MethodOptions_IdempotencyLevel) - *p = x - return p -} - -func (x MethodOptions_IdempotencyLevel) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() -} - -func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] -} - -func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = MethodOptions_IdempotencyLevel(num) - return nil -} - -// Deprecated: Use MethodOptions_IdempotencyLevel.Descriptor instead. -func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} -} - -// The protocol compiler can output a FileDescriptorSet containing the .proto -// files it parses. -type FileDescriptorSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` -} - -func (x *FileDescriptorSet) Reset() { - *x = FileDescriptorSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FileDescriptorSet) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FileDescriptorSet) ProtoMessage() {} - -func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FileDescriptorSet.ProtoReflect.Descriptor instead. -func (*FileDescriptorSet) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} -} - -func (x *FileDescriptorSet) GetFile() []*FileDescriptorProto { - if x != nil { - return x.File - } - return nil -} - -// Describes a complete .proto file. -type FileDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // file name, relative to root of source tree - Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc. - // Names of files imported by this file. - Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` - // Indexes of the public imported files in the dependency list above. - PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` - // Indexes of the weak imported files in the dependency list. - // For Google-internal migration only. Do not use. - WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` - // All top-level definitions in this file. - MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` - Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - // This field contains optional information about the original source code. - // You may safely remove this entire field without harming runtime - // functionality of the descriptors -- the information is needed only by - // development tools. - SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` - // The syntax of the proto file. - // The supported values are "proto2" and "proto3". - Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` -} - -func (x *FileDescriptorProto) Reset() { - *x = FileDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FileDescriptorProto) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FileDescriptorProto) ProtoMessage() {} - -func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FileDescriptorProto.ProtoReflect.Descriptor instead. -func (*FileDescriptorProto) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{1} -} - -func (x *FileDescriptorProto) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *FileDescriptorProto) GetPackage() string { - if x != nil && x.Package != nil { - return *x.Package - } - return "" -} - -func (x *FileDescriptorProto) GetDependency() []string { - if x != nil { - return x.Dependency - } - return nil -} - -func (x *FileDescriptorProto) GetPublicDependency() []int32 { - if x != nil { - return x.PublicDependency - } - return nil -} - -func (x *FileDescriptorProto) GetWeakDependency() []int32 { - if x != nil { - return x.WeakDependency - } - return nil -} - -func (x *FileDescriptorProto) GetMessageType() []*DescriptorProto { - if x != nil { - return x.MessageType - } - return nil -} - -func (x *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { - if x != nil { - return x.EnumType - } - return nil -} - -func (x *FileDescriptorProto) GetService() []*ServiceDescriptorProto { - if x != nil { - return x.Service - } - return nil -} - -func (x *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { - if x != nil { - return x.Extension - } - return nil -} - -func (x *FileDescriptorProto) GetOptions() *FileOptions { - if x != nil { - return x.Options - } - return nil -} - -func (x *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { - if x != nil { - return x.SourceCodeInfo - } - return nil -} - -func (x *FileDescriptorProto) GetSyntax() string { - if x != nil && x.Syntax != nil { - return *x.Syntax - } - return "" -} - -// Describes a message type. -type DescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` - NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` - OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` - Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` - ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` - // Reserved field names, which may not be used by fields in the same message. - // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` -} - -func (x *DescriptorProto) Reset() { - *x = DescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DescriptorProto) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DescriptorProto) ProtoMessage() {} - -func (x *DescriptorProto) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DescriptorProto.ProtoReflect.Descriptor instead. -func (*DescriptorProto) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2} -} - -func (x *DescriptorProto) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *DescriptorProto) GetField() []*FieldDescriptorProto { - if x != nil { - return x.Field - } - return nil -} - -func (x *DescriptorProto) GetExtension() []*FieldDescriptorProto { - if x != nil { - return x.Extension - } - return nil -} - -func (x *DescriptorProto) GetNestedType() []*DescriptorProto { - if x != nil { - return x.NestedType - } - return nil -} - -func (x *DescriptorProto) GetEnumType() []*EnumDescriptorProto { - if x != nil { - return x.EnumType - } - return nil -} - -func (x *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { - if x != nil { - return x.ExtensionRange - } - return nil -} - -func (x *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { - if x != nil { - return x.OneofDecl - } - return nil -} - -func (x *DescriptorProto) GetOptions() *MessageOptions { - if x != nil { - return x.Options - } - return nil -} - -func (x *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { - if x != nil { - return x.ReservedRange - } - return nil -} - -func (x *DescriptorProto) GetReservedName() []string { - if x != nil { - return x.ReservedName - } - return nil -} - -type ExtensionRangeOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` -} - -func (x *ExtensionRangeOptions) Reset() { - *x = ExtensionRangeOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExtensionRangeOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExtensionRangeOptions) ProtoMessage() {} - -func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExtensionRangeOptions.ProtoReflect.Descriptor instead. -func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3} -} - -func (x *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { - if x != nil { - return x.UninterpretedOption - } - return nil -} - -// Describes a field within a message. -type FieldDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` - Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` - // If type_name is set, this need not be set. If both this and type_name - // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. - Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` - // For message and enum types, this is the name of the type. If the name - // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping - // rules are used to find the type (i.e. first the nested types within this - // message are searched, then within the parent, on up to the root - // namespace). - TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` - // For extensions, this is the name of the type being extended. It is - // resolved in the same manner as type_name. - Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` - // For numeric types, contains the original text representation of the value. - // For booleans, "true" or "false". - // For strings, contains the default text contents (not escaped in any way). - // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? - DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` - // If set, gives the index of a oneof in the containing type's oneof_decl - // list. This field is a member of that oneof. - OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` - // JSON name of this field. The value is set by protocol compiler. If the - // user has set a "json_name" option on this field, that option's value - // will be used. Otherwise, it's deduced from the field's name by converting - // it to camelCase. - JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` - Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - // If true, this is a proto3 "optional". When a proto3 field is optional, it - // tracks presence regardless of field type. - // - // When proto3_optional is true, this field must be belong to a oneof to - // signal to old proto3 clients that presence is tracked for this field. This - // oneof is known as a "synthetic" oneof, and this field must be its sole - // member (each proto3 optional field gets its own synthetic oneof). Synthetic - // oneofs exist in the descriptor only, and do not generate any API. Synthetic - // oneofs must be ordered after all "real" oneofs. - // - // For message fields, proto3_optional doesn't create any semantic change, - // since non-repeated message fields always track presence. However it still - // indicates the semantic detail of whether the user wrote "optional" or not. - // This can be useful for round-tripping the .proto file. For consistency we - // give message fields a synthetic oneof also, even though it is not required - // to track presence. This is especially important because the parser can't - // tell if a field is a message or an enum, so it must always create a - // synthetic oneof. - // - // Proto2 optional fields do not set this flag, because they already indicate - // optional with `LABEL_OPTIONAL`. - Proto3Optional *bool `protobuf:"varint,17,opt,name=proto3_optional,json=proto3Optional" json:"proto3_optional,omitempty"` -} - -func (x *FieldDescriptorProto) Reset() { - *x = FieldDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FieldDescriptorProto) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FieldDescriptorProto) ProtoMessage() {} - -func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FieldDescriptorProto.ProtoReflect.Descriptor instead. -func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4} -} - -func (x *FieldDescriptorProto) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *FieldDescriptorProto) GetNumber() int32 { - if x != nil && x.Number != nil { - return *x.Number - } - return 0 -} - -func (x *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { - if x != nil && x.Label != nil { - return *x.Label - } - return FieldDescriptorProto_LABEL_OPTIONAL -} - -func (x *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { - if x != nil && x.Type != nil { - return *x.Type - } - return FieldDescriptorProto_TYPE_DOUBLE -} - -func (x *FieldDescriptorProto) GetTypeName() string { - if x != nil && x.TypeName != nil { - return *x.TypeName - } - return "" -} - -func (x *FieldDescriptorProto) GetExtendee() string { - if x != nil && x.Extendee != nil { - return *x.Extendee - } - return "" -} - -func (x *FieldDescriptorProto) GetDefaultValue() string { - if x != nil && x.DefaultValue != nil { - return *x.DefaultValue - } - return "" -} - -func (x *FieldDescriptorProto) GetOneofIndex() int32 { - if x != nil && x.OneofIndex != nil { - return *x.OneofIndex - } - return 0 -} - -func (x *FieldDescriptorProto) GetJsonName() string { - if x != nil && x.JsonName != nil { - return *x.JsonName - } - return "" -} - -func (x *FieldDescriptorProto) GetOptions() *FieldOptions { - if x != nil { - return x.Options - } - return nil -} - -func (x *FieldDescriptorProto) GetProto3Optional() bool { - if x != nil && x.Proto3Optional != nil { - return *x.Proto3Optional - } - return false -} - -// Describes a oneof. -type OneofDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` -} - -func (x *OneofDescriptorProto) Reset() { - *x = OneofDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *OneofDescriptorProto) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OneofDescriptorProto) ProtoMessage() {} - -func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OneofDescriptorProto.ProtoReflect.Descriptor instead. -func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{5} -} - -func (x *OneofDescriptorProto) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *OneofDescriptorProto) GetOptions() *OneofOptions { - if x != nil { - return x.Options - } - return nil -} - -// Describes an enum type. -type EnumDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - // Range of reserved numeric values. Reserved numeric values may not be used - // by enum values in the same enum declaration. Reserved ranges may not - // overlap. - ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` - // Reserved enum value names, which may not be reused. A given name may only - // be reserved once. - ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` -} - -func (x *EnumDescriptorProto) Reset() { - *x = EnumDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EnumDescriptorProto) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EnumDescriptorProto) ProtoMessage() {} - -func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EnumDescriptorProto.ProtoReflect.Descriptor instead. -func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{6} -} - -func (x *EnumDescriptorProto) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { - if x != nil { - return x.Value - } - return nil -} - -func (x *EnumDescriptorProto) GetOptions() *EnumOptions { - if x != nil { - return x.Options - } - return nil -} - -func (x *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { - if x != nil { - return x.ReservedRange - } - return nil -} - -func (x *EnumDescriptorProto) GetReservedName() []string { - if x != nil { - return x.ReservedName - } - return nil -} - -// Describes a value within an enum. -type EnumValueDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` - Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` -} - -func (x *EnumValueDescriptorProto) Reset() { - *x = EnumValueDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EnumValueDescriptorProto) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EnumValueDescriptorProto) ProtoMessage() {} - -func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EnumValueDescriptorProto.ProtoReflect.Descriptor instead. -func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{7} -} - -func (x *EnumValueDescriptorProto) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *EnumValueDescriptorProto) GetNumber() int32 { - if x != nil && x.Number != nil { - return *x.Number - } - return 0 -} - -func (x *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { - if x != nil { - return x.Options - } - return nil -} - -// Describes a service. -type ServiceDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` - Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` -} - -func (x *ServiceDescriptorProto) Reset() { - *x = ServiceDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServiceDescriptorProto) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceDescriptorProto) ProtoMessage() {} - -func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceDescriptorProto.ProtoReflect.Descriptor instead. -func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{8} -} - -func (x *ServiceDescriptorProto) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { - if x != nil { - return x.Method - } - return nil -} - -func (x *ServiceDescriptorProto) GetOptions() *ServiceOptions { - if x != nil { - return x.Options - } - return nil -} - -// Describes a method of a service. -type MethodDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Input and output type names. These are resolved in the same way as - // FieldDescriptorProto.type_name, but must refer to a message type. - InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` - OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` - Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` - // Identifies if client streams multiple client messages - ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` - // Identifies if server streams multiple server messages - ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` -} - -// Default values for MethodDescriptorProto fields. -const ( - Default_MethodDescriptorProto_ClientStreaming = bool(false) - Default_MethodDescriptorProto_ServerStreaming = bool(false) -) - -func (x *MethodDescriptorProto) Reset() { - *x = MethodDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MethodDescriptorProto) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MethodDescriptorProto) ProtoMessage() {} - -func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MethodDescriptorProto.ProtoReflect.Descriptor instead. -func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{9} -} - -func (x *MethodDescriptorProto) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *MethodDescriptorProto) GetInputType() string { - if x != nil && x.InputType != nil { - return *x.InputType - } - return "" -} - -func (x *MethodDescriptorProto) GetOutputType() string { - if x != nil && x.OutputType != nil { - return *x.OutputType - } - return "" -} - -func (x *MethodDescriptorProto) GetOptions() *MethodOptions { - if x != nil { - return x.Options - } - return nil -} - -func (x *MethodDescriptorProto) GetClientStreaming() bool { - if x != nil && x.ClientStreaming != nil { - return *x.ClientStreaming - } - return Default_MethodDescriptorProto_ClientStreaming -} - -func (x *MethodDescriptorProto) GetServerStreaming() bool { - if x != nil && x.ServerStreaming != nil { - return *x.ServerStreaming - } - return Default_MethodDescriptorProto_ServerStreaming -} - -type FileOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - - // Sets the Java package where classes generated from this .proto will be - // placed. By default, the proto package is used, but this is often - // inappropriate because proto packages do not normally start with backwards - // domain names. - JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). - JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` - // If set true, then the Java code generator will generate a separate .java - // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be - // generated to contain the file's getDescriptor() method as well as any - // top-level extensions defined in the file. - JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` - // This option does nothing. - // - // Deprecated: Do not use. - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. - JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` - OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` - // Sets the Go package where structs generated from this .proto will be - // placed. If omitted, the Go package will be derived from the following: - // - The basename of the package import path, if provided. - // - Otherwise, the package statement in the .proto file, if present. - // - Otherwise, the basename of the .proto file, without extension. - GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` - // Should generic services be generated in each language? "Generic" services - // are not specific to any particular RPC system. They are generated by the - // main code generators in each language (without additional plugins). - // Generic services were the only kind of service generation supported by - // early versions of google.protobuf. - // - // Generic services are now considered deprecated in favor of using plugins - // that generate code specific to your particular RPC system. Therefore, - // these default to false. Old code which depends on generic services should - // explicitly set them to true. - CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` - JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` - PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` - PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` - // Is this file deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for everything in the file, or it will be completely ignored; in the very - // least, this is a formalization for deprecating files. - Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Enables the use of arenas for the proto messages in this file. This applies - // only to generated classes for C++. - CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=1" json:"cc_enable_arenas,omitempty"` - // Sets the objective c class prefix which is prepended to all objective c - // generated classes from this .proto. There is no default. - ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` - // Namespace for generated classes; defaults to the package. - CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` - // By default Swift generators will take the proto package and CamelCase it - // replacing '.' with underscore and use that to prefix the types/symbols - // defined. When this options is provided, they will use this value instead - // to prefix the types/symbols defined. - SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` - // Sets the php class prefix which is prepended to all php generated classes - // from this .proto. Default is empty. - PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` - // Use this option to change the namespace of php generated classes. Default - // is empty. When this option is empty, the package name will be used for - // determining the namespace. - PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` - // Use this option to change the namespace of php generated metadata classes. - // Default is empty. When this option is empty, the proto file name will be - // used for determining the namespace. - PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` - // Use this option to change the package of ruby generated classes. Default - // is empty. When this option is not set, the package name will be used for - // determining the ruby package. - RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` - // The parser stores options it doesn't recognize here. - // See the documentation for the "Options" section above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` -} - -// Default values for FileOptions fields. -const ( - Default_FileOptions_JavaMultipleFiles = bool(false) - Default_FileOptions_JavaStringCheckUtf8 = bool(false) - Default_FileOptions_OptimizeFor = FileOptions_SPEED - Default_FileOptions_CcGenericServices = bool(false) - Default_FileOptions_JavaGenericServices = bool(false) - Default_FileOptions_PyGenericServices = bool(false) - Default_FileOptions_PhpGenericServices = bool(false) - Default_FileOptions_Deprecated = bool(false) - Default_FileOptions_CcEnableArenas = bool(true) -) - -func (x *FileOptions) Reset() { - *x = FileOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FileOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FileOptions) ProtoMessage() {} - -func (x *FileOptions) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FileOptions.ProtoReflect.Descriptor instead. -func (*FileOptions) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{10} -} - -func (x *FileOptions) GetJavaPackage() string { - if x != nil && x.JavaPackage != nil { - return *x.JavaPackage - } - return "" -} - -func (x *FileOptions) GetJavaOuterClassname() string { - if x != nil && x.JavaOuterClassname != nil { - return *x.JavaOuterClassname - } - return "" -} - -func (x *FileOptions) GetJavaMultipleFiles() bool { - if x != nil && x.JavaMultipleFiles != nil { - return *x.JavaMultipleFiles - } - return Default_FileOptions_JavaMultipleFiles -} - -// Deprecated: Do not use. -func (x *FileOptions) GetJavaGenerateEqualsAndHash() bool { - if x != nil && x.JavaGenerateEqualsAndHash != nil { - return *x.JavaGenerateEqualsAndHash - } - return false -} - -func (x *FileOptions) GetJavaStringCheckUtf8() bool { - if x != nil && x.JavaStringCheckUtf8 != nil { - return *x.JavaStringCheckUtf8 - } - return Default_FileOptions_JavaStringCheckUtf8 -} - -func (x *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { - if x != nil && x.OptimizeFor != nil { - return *x.OptimizeFor - } - return Default_FileOptions_OptimizeFor -} - -func (x *FileOptions) GetGoPackage() string { - if x != nil && x.GoPackage != nil { - return *x.GoPackage - } - return "" -} - -func (x *FileOptions) GetCcGenericServices() bool { - if x != nil && x.CcGenericServices != nil { - return *x.CcGenericServices - } - return Default_FileOptions_CcGenericServices -} - -func (x *FileOptions) GetJavaGenericServices() bool { - if x != nil && x.JavaGenericServices != nil { - return *x.JavaGenericServices - } - return Default_FileOptions_JavaGenericServices -} - -func (x *FileOptions) GetPyGenericServices() bool { - if x != nil && x.PyGenericServices != nil { - return *x.PyGenericServices - } - return Default_FileOptions_PyGenericServices -} - -func (x *FileOptions) GetPhpGenericServices() bool { - if x != nil && x.PhpGenericServices != nil { - return *x.PhpGenericServices - } - return Default_FileOptions_PhpGenericServices -} - -func (x *FileOptions) GetDeprecated() bool { - if x != nil && x.Deprecated != nil { - return *x.Deprecated - } - return Default_FileOptions_Deprecated -} - -func (x *FileOptions) GetCcEnableArenas() bool { - if x != nil && x.CcEnableArenas != nil { - return *x.CcEnableArenas - } - return Default_FileOptions_CcEnableArenas -} - -func (x *FileOptions) GetObjcClassPrefix() string { - if x != nil && x.ObjcClassPrefix != nil { - return *x.ObjcClassPrefix - } - return "" -} - -func (x *FileOptions) GetCsharpNamespace() string { - if x != nil && x.CsharpNamespace != nil { - return *x.CsharpNamespace - } - return "" -} - -func (x *FileOptions) GetSwiftPrefix() string { - if x != nil && x.SwiftPrefix != nil { - return *x.SwiftPrefix - } - return "" -} - -func (x *FileOptions) GetPhpClassPrefix() string { - if x != nil && x.PhpClassPrefix != nil { - return *x.PhpClassPrefix - } - return "" -} - -func (x *FileOptions) GetPhpNamespace() string { - if x != nil && x.PhpNamespace != nil { - return *x.PhpNamespace - } - return "" -} - -func (x *FileOptions) GetPhpMetadataNamespace() string { - if x != nil && x.PhpMetadataNamespace != nil { - return *x.PhpMetadataNamespace - } - return "" -} - -func (x *FileOptions) GetRubyPackage() string { - if x != nil && x.RubyPackage != nil { - return *x.RubyPackage - } - return "" -} - -func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption { - if x != nil { - return x.UninterpretedOption - } - return nil -} - -type MessageOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - - // Set true to use the old proto1 MessageSet wire format for extensions. - // This is provided for backwards-compatibility with the MessageSet wire - // format. You should not use this for any other reason: It's less - // efficient, has fewer features, and is more complicated. - // - // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } - // Note that the message cannot have any defined fields; MessageSets only - // have extensions. - // - // All extensions of your type must be singular messages; e.g. they cannot - // be int32s, enums, or repeated messages. - // - // Because this is an option, the above two restrictions are not enforced by - // the protocol compiler. - MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` - // Disables the generation of the standard "descriptor()" accessor, which can - // conflict with a field of the same name. This is meant to make migration - // from proto1 easier; new code should avoid fields named "descriptor". - NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` - // Is this message deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the message, or it will be completely ignored; in the very least, - // this is a formalization for deprecating messages. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Whether the message is an automatically generated map entry type for the - // maps field. - // - // For maps fields: - // map map_field = 1; - // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; - // - // Implementations may choose not to generate the map_entry=true message, but - // use a native map in the target language to hold the keys and values. - // The reflection APIs in such implementations still need to work as - // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` -} - -// Default values for MessageOptions fields. -const ( - Default_MessageOptions_MessageSetWireFormat = bool(false) - Default_MessageOptions_NoStandardDescriptorAccessor = bool(false) - Default_MessageOptions_Deprecated = bool(false) -) - -func (x *MessageOptions) Reset() { - *x = MessageOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MessageOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MessageOptions) ProtoMessage() {} - -func (x *MessageOptions) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MessageOptions.ProtoReflect.Descriptor instead. -func (*MessageOptions) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{11} -} - -func (x *MessageOptions) GetMessageSetWireFormat() bool { - if x != nil && x.MessageSetWireFormat != nil { - return *x.MessageSetWireFormat - } - return Default_MessageOptions_MessageSetWireFormat -} - -func (x *MessageOptions) GetNoStandardDescriptorAccessor() bool { - if x != nil && x.NoStandardDescriptorAccessor != nil { - return *x.NoStandardDescriptorAccessor - } - return Default_MessageOptions_NoStandardDescriptorAccessor -} - -func (x *MessageOptions) GetDeprecated() bool { - if x != nil && x.Deprecated != nil { - return *x.Deprecated - } - return Default_MessageOptions_Deprecated -} - -func (x *MessageOptions) GetMapEntry() bool { - if x != nil && x.MapEntry != nil { - return *x.MapEntry - } - return false -} - -func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { - if x != nil { - return x.UninterpretedOption - } - return nil -} - -type FieldOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - - // The ctype option instructs the C++ code generator to use a different - // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! - Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` - // The packed option can be enabled for repeated primitive fields to enable - // a more efficient representation on the wire. Rather than repeatedly - // writing the tag and type for each element, the entire array is encoded as - // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. - Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` - // The jstype option determines the JavaScript type used for values of the - // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING - // is represented as JavaScript string, which avoids loss of precision that - // can happen when a large value is converted to a floating point JavaScript. - // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to - // use the JavaScript "number" type. The behavior of the default option - // JS_NORMAL is implementation dependent. - // - // This option is an enum to permit additional types to be added, e.g. - // goog.math.Integer. - Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` - // Should this field be parsed lazily? Lazy applies only to message-type - // fields. It means that when the outer message is initially parsed, the - // inner message's contents will not be parsed but instead stored in encoded - // form. The inner message will actually be parsed when it is first accessed. - // - // This is only a hint. Implementations are free to choose whether to use - // eager or lazy parsing regardless of the value of this option. However, - // setting this option true suggests that the protocol author believes that - // using lazy parsing on this field is worth the additional bookkeeping - // overhead typically needed to implement it. - // - // This option does not affect the public interface of any generated code; - // all method signatures remain the same. Furthermore, thread-safety of the - // interface is not affected by this option; const methods remain safe to - // call from multiple threads concurrently, while non-const methods continue - // to require exclusive access. - // - // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` - // Is this field deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for accessors, or it will be completely ignored; in the very least, this - // is a formalization for deprecating fields. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // For Google-internal migration only. Do not use. - Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` -} - -// Default values for FieldOptions fields. -const ( - Default_FieldOptions_Ctype = FieldOptions_STRING - Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL - Default_FieldOptions_Lazy = bool(false) - Default_FieldOptions_Deprecated = bool(false) - Default_FieldOptions_Weak = bool(false) -) - -func (x *FieldOptions) Reset() { - *x = FieldOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FieldOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FieldOptions) ProtoMessage() {} - -func (x *FieldOptions) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FieldOptions.ProtoReflect.Descriptor instead. -func (*FieldOptions) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12} -} - -func (x *FieldOptions) GetCtype() FieldOptions_CType { - if x != nil && x.Ctype != nil { - return *x.Ctype - } - return Default_FieldOptions_Ctype -} - -func (x *FieldOptions) GetPacked() bool { - if x != nil && x.Packed != nil { - return *x.Packed - } - return false -} - -func (x *FieldOptions) GetJstype() FieldOptions_JSType { - if x != nil && x.Jstype != nil { - return *x.Jstype - } - return Default_FieldOptions_Jstype -} - -func (x *FieldOptions) GetLazy() bool { - if x != nil && x.Lazy != nil { - return *x.Lazy - } - return Default_FieldOptions_Lazy -} - -func (x *FieldOptions) GetDeprecated() bool { - if x != nil && x.Deprecated != nil { - return *x.Deprecated - } - return Default_FieldOptions_Deprecated -} - -func (x *FieldOptions) GetWeak() bool { - if x != nil && x.Weak != nil { - return *x.Weak - } - return Default_FieldOptions_Weak -} - -func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { - if x != nil { - return x.UninterpretedOption - } - return nil -} - -type OneofOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` -} - -func (x *OneofOptions) Reset() { - *x = OneofOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *OneofOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OneofOptions) ProtoMessage() {} - -func (x *OneofOptions) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OneofOptions.ProtoReflect.Descriptor instead. -func (*OneofOptions) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13} -} - -func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { - if x != nil { - return x.UninterpretedOption - } - return nil -} - -type EnumOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - - // Set this option to true to allow mapping different tag names to the same - // value. - AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` - // Is this enum deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum, or it will be completely ignored; in the very least, this - // is a formalization for deprecating enums. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` -} - -// Default values for EnumOptions fields. -const ( - Default_EnumOptions_Deprecated = bool(false) -) - -func (x *EnumOptions) Reset() { - *x = EnumOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EnumOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EnumOptions) ProtoMessage() {} - -func (x *EnumOptions) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EnumOptions.ProtoReflect.Descriptor instead. -func (*EnumOptions) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{14} -} - -func (x *EnumOptions) GetAllowAlias() bool { - if x != nil && x.AllowAlias != nil { - return *x.AllowAlias - } - return false -} - -func (x *EnumOptions) GetDeprecated() bool { - if x != nil && x.Deprecated != nil { - return *x.Deprecated - } - return Default_EnumOptions_Deprecated -} - -func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { - if x != nil { - return x.UninterpretedOption - } - return nil -} - -type EnumValueOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - - // Is this enum value deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum value, or it will be completely ignored; in the very least, - // this is a formalization for deprecating enum values. - Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` -} - -// Default values for EnumValueOptions fields. -const ( - Default_EnumValueOptions_Deprecated = bool(false) -) - -func (x *EnumValueOptions) Reset() { - *x = EnumValueOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EnumValueOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EnumValueOptions) ProtoMessage() {} - -func (x *EnumValueOptions) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EnumValueOptions.ProtoReflect.Descriptor instead. -func (*EnumValueOptions) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{15} -} - -func (x *EnumValueOptions) GetDeprecated() bool { - if x != nil && x.Deprecated != nil { - return *x.Deprecated - } - return Default_EnumValueOptions_Deprecated -} - -func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { - if x != nil { - return x.UninterpretedOption - } - return nil -} - -type ServiceOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - - // Is this service deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the service, or it will be completely ignored; in the very least, - // this is a formalization for deprecating services. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` -} - -// Default values for ServiceOptions fields. -const ( - Default_ServiceOptions_Deprecated = bool(false) -) - -func (x *ServiceOptions) Reset() { - *x = ServiceOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServiceOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceOptions) ProtoMessage() {} - -func (x *ServiceOptions) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceOptions.ProtoReflect.Descriptor instead. -func (*ServiceOptions) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16} -} - -func (x *ServiceOptions) GetDeprecated() bool { - if x != nil && x.Deprecated != nil { - return *x.Deprecated - } - return Default_ServiceOptions_Deprecated -} - -func (x *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { - if x != nil { - return x.UninterpretedOption - } - return nil -} - -type MethodOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - - // Is this method deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the method, or it will be completely ignored; in the very least, - // this is a formalization for deprecating methods. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` -} - -// Default values for MethodOptions fields. -const ( - Default_MethodOptions_Deprecated = bool(false) - Default_MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN -) - -func (x *MethodOptions) Reset() { - *x = MethodOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MethodOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MethodOptions) ProtoMessage() {} - -func (x *MethodOptions) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MethodOptions.ProtoReflect.Descriptor instead. -func (*MethodOptions) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17} -} - -func (x *MethodOptions) GetDeprecated() bool { - if x != nil && x.Deprecated != nil { - return *x.Deprecated - } - return Default_MethodOptions_Deprecated -} - -func (x *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { - if x != nil && x.IdempotencyLevel != nil { - return *x.IdempotencyLevel - } - return Default_MethodOptions_IdempotencyLevel -} - -func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { - if x != nil { - return x.UninterpretedOption - } - return nil -} - -// A message representing a option the parser does not recognize. This only -// appears in options protos created by the compiler::Parser class. -// DescriptorPool resolves these when building Descriptor objects. Therefore, -// options protos in descriptor objects (e.g. returned by Descriptor::options(), -// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -// in them. -type UninterpretedOption struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` - // The value of the uninterpreted option, in whatever type the tokenizer - // identified it as during parsing. Exactly one of these should be set. - IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` - PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` - NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` - StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` - AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` -} - -func (x *UninterpretedOption) Reset() { - *x = UninterpretedOption{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UninterpretedOption) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UninterpretedOption) ProtoMessage() {} - -func (x *UninterpretedOption) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UninterpretedOption.ProtoReflect.Descriptor instead. -func (*UninterpretedOption) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{18} -} - -func (x *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { - if x != nil { - return x.Name - } - return nil -} - -func (x *UninterpretedOption) GetIdentifierValue() string { - if x != nil && x.IdentifierValue != nil { - return *x.IdentifierValue - } - return "" -} - -func (x *UninterpretedOption) GetPositiveIntValue() uint64 { - if x != nil && x.PositiveIntValue != nil { - return *x.PositiveIntValue - } - return 0 -} - -func (x *UninterpretedOption) GetNegativeIntValue() int64 { - if x != nil && x.NegativeIntValue != nil { - return *x.NegativeIntValue - } - return 0 -} - -func (x *UninterpretedOption) GetDoubleValue() float64 { - if x != nil && x.DoubleValue != nil { - return *x.DoubleValue - } - return 0 -} - -func (x *UninterpretedOption) GetStringValue() []byte { - if x != nil { - return x.StringValue - } - return nil -} - -func (x *UninterpretedOption) GetAggregateValue() string { - if x != nil && x.AggregateValue != nil { - return *x.AggregateValue - } - return "" -} - -// Encapsulates information about the original source file from which a -// FileDescriptorProto was generated. -type SourceCodeInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A Location identifies a piece of source code in a .proto file which - // corresponds to a particular definition. This information is intended - // to be useful to IDEs, code indexers, documentation generators, and similar - // tools. - // - // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } - // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi - // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). - // - // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendant. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. - Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` -} - -func (x *SourceCodeInfo) Reset() { - *x = SourceCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SourceCodeInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SourceCodeInfo) ProtoMessage() {} - -func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SourceCodeInfo.ProtoReflect.Descriptor instead. -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} -} - -func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { - if x != nil { - return x.Location - } - return nil -} - -// Describes the relationship between generated code and its original source -// file. A GeneratedCodeInfo message is associated with only one generated -// source file, but may contain references to different source .proto files. -type GeneratedCodeInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // An Annotation connects some span of text in generated code to an element - // of its generating .proto file. - Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` -} - -func (x *GeneratedCodeInfo) Reset() { - *x = GeneratedCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GeneratedCodeInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GeneratedCodeInfo) ProtoMessage() {} - -func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GeneratedCodeInfo.ProtoReflect.Descriptor instead. -func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} -} - -func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { - if x != nil { - return x.Annotation - } - return nil -} - -type DescriptorProto_ExtensionRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. - Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` -} - -func (x *DescriptorProto_ExtensionRange) Reset() { - *x = DescriptorProto_ExtensionRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DescriptorProto_ExtensionRange) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DescriptorProto_ExtensionRange) ProtoMessage() {} - -func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DescriptorProto_ExtensionRange.ProtoReflect.Descriptor instead. -func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2, 0} -} - -func (x *DescriptorProto_ExtensionRange) GetStart() int32 { - if x != nil && x.Start != nil { - return *x.Start - } - return 0 -} - -func (x *DescriptorProto_ExtensionRange) GetEnd() int32 { - if x != nil && x.End != nil { - return *x.End - } - return 0 -} - -func (x *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { - if x != nil { - return x.Options - } - return nil -} - -// Range of reserved tag numbers. Reserved tag numbers may not be used by -// fields or extension ranges in the same message. Reserved ranges may -// not overlap. -type DescriptorProto_ReservedRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. -} - -func (x *DescriptorProto_ReservedRange) Reset() { - *x = DescriptorProto_ReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DescriptorProto_ReservedRange) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DescriptorProto_ReservedRange) ProtoMessage() {} - -func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DescriptorProto_ReservedRange.ProtoReflect.Descriptor instead. -func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2, 1} -} - -func (x *DescriptorProto_ReservedRange) GetStart() int32 { - if x != nil && x.Start != nil { - return *x.Start - } - return 0 -} - -func (x *DescriptorProto_ReservedRange) GetEnd() int32 { - if x != nil && x.End != nil { - return *x.End - } - return 0 -} - -// Range of reserved numeric values. Reserved values may not be used by -// entries in the same enum. Reserved ranges may not overlap. -// -// Note that this is distinct from DescriptorProto.ReservedRange in that it -// is inclusive such that it can appropriately represent the entire int32 -// domain. -type EnumDescriptorProto_EnumReservedRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Inclusive. -} - -func (x *EnumDescriptorProto_EnumReservedRange) Reset() { - *x = EnumDescriptorProto_EnumReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EnumDescriptorProto_EnumReservedRange) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} - -func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EnumDescriptorProto_EnumReservedRange.ProtoReflect.Descriptor instead. -func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{6, 0} -} - -func (x *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { - if x != nil && x.Start != nil { - return *x.Start - } - return 0 -} - -func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { - if x != nil && x.End != nil { - return *x.End - } - return 0 -} - -// The name of the uninterpreted option. Each string represents a segment in -// a dot-separated name. is_extension is true iff a segment represents an -// extension (denoted with parentheses in options specs in .proto files). -// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents -// "foo.(bar.baz).qux". -type UninterpretedOption_NamePart struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` -} - -func (x *UninterpretedOption_NamePart) Reset() { - *x = UninterpretedOption_NamePart{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UninterpretedOption_NamePart) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UninterpretedOption_NamePart) ProtoMessage() {} - -func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UninterpretedOption_NamePart.ProtoReflect.Descriptor instead. -func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{18, 0} -} - -func (x *UninterpretedOption_NamePart) GetNamePart() string { - if x != nil && x.NamePart != nil { - return *x.NamePart - } - return "" -} - -func (x *UninterpretedOption_NamePart) GetIsExtension() bool { - if x != nil && x.IsExtension != nil { - return *x.IsExtension - } - return false -} - -type SourceCodeInfo_Location struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Identifies which part of the FileDescriptorProto was defined at this - // location. - // - // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] - // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 - // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; - // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; - // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; - // - // Thus, the above path gives the location of a field name. If we removed - // the last element: - // [ 4, 3, 2, 7 ] - // this path refers to the whole field declaration (from the beginning - // of the label to the terminating semicolon). - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Always has exactly three or four elements: start line, start column, - // end line (optional, otherwise assumed same as start line), end column. - // These are packed into a single field for efficiency. Note that line - // and column numbers are zero-based -- typically you will want to add - // 1 to each before displaying to a user. - Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` - // If this SourceCodeInfo represents a complete declaration, these are any - // comments appearing before and after the declaration which appear to be - // attached to the declaration. - // - // A series of line comments appearing on consecutive lines, with no other - // tokens appearing on those lines, will be treated as a single comment. - // - // leading_detached_comments will keep paragraphs of comments that appear - // before (but not connected to) the current element. Each paragraph, - // separated by empty lines, will be one comment element in the repeated - // field. - // - // Only the comment content is provided; comment markers (e.g. //) are - // stripped out. For block comments, leading whitespace and an asterisk - // will be stripped from the beginning of each line other than the first. - // Newlines are included in the output. - // - // Examples: - // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; - // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. - // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; - // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. - // - // // Detached comment for corge paragraph 2. - // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; - // - // // ignored detached comments. - LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` - TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` - LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` -} - -func (x *SourceCodeInfo_Location) Reset() { - *x = SourceCodeInfo_Location{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SourceCodeInfo_Location) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SourceCodeInfo_Location) ProtoMessage() {} - -func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SourceCodeInfo_Location.ProtoReflect.Descriptor instead. -func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} -} - -func (x *SourceCodeInfo_Location) GetPath() []int32 { - if x != nil { - return x.Path - } - return nil -} - -func (x *SourceCodeInfo_Location) GetSpan() []int32 { - if x != nil { - return x.Span - } - return nil -} - -func (x *SourceCodeInfo_Location) GetLeadingComments() string { - if x != nil && x.LeadingComments != nil { - return *x.LeadingComments - } - return "" -} - -func (x *SourceCodeInfo_Location) GetTrailingComments() string { - if x != nil && x.TrailingComments != nil { - return *x.TrailingComments - } - return "" -} - -func (x *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { - if x != nil { - return x.LeadingDetachedComments - } - return nil -} - -type GeneratedCodeInfo_Annotation struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Identifies the element in the original source .proto file. This field - // is formatted the same as SourceCodeInfo.Location.path. - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Identifies the filesystem path to the original source .proto. - SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` - // Identifies the starting offset in bytes in the generated code - // that relates to the identified object. - Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` - // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past - // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` -} - -func (x *GeneratedCodeInfo_Annotation) Reset() { - *x = GeneratedCodeInfo_Annotation{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GeneratedCodeInfo_Annotation) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} - -func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GeneratedCodeInfo_Annotation.ProtoReflect.Descriptor instead. -func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} -} - -func (x *GeneratedCodeInfo_Annotation) GetPath() []int32 { - if x != nil { - return x.Path - } - return nil -} - -func (x *GeneratedCodeInfo_Annotation) GetSourceFile() string { - if x != nil && x.SourceFile != nil { - return *x.SourceFile - } - return "" -} - -func (x *GeneratedCodeInfo_Annotation) GetBegin() int32 { - if x != nil && x.Begin != nil { - return *x.Begin - } - return 0 -} - -func (x *GeneratedCodeInfo_Annotation) GetEnd() int32 { - if x != nil && x.End != nil { - return *x.End - } - return 0 -} - -var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor - -var file_google_protobuf_descriptor_proto_rawDesc = []byte{ - 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x22, 0x4d, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x22, 0xe4, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, - 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, - 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, - 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, - 0x03, 0x28, 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, - 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, - 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, - 0x77, 0x65, 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, - 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, - 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, - 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, - 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, - 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, - 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, - 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, - 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, - 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, - 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, - 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, - 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, - 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, - 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, - 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, - 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, - 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, - 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, - 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, - 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, - 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, - 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, - 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, - 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, - 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, - 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, - 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, - 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, - 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, - 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, - 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, - 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, - 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, - 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, - 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, - 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, - 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, - 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, - 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, - 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, - 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, - 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, - 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, - 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, - 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, - 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, - 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, - 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, - 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, - 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, - 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, - 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, - 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, - 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, - 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, - 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, - 0x10, 0x27, 0x22, 0xd1, 0x02, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, - 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, - 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, - 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, - 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, - 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xe2, 0x03, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, - 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, - 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, - 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, - 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, - 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, - 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, - 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, - 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, - 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, - 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, - 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, - 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, - 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, - 0x22, 0xc0, 0x01, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, - 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, - 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, - 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, - 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, - 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, - 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, - 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, - 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, - 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, - 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, - 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, - 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, - 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, - 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, - 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, - 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, - 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, - 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, - 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, - 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, - 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, - 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, - 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, - 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, - 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, - 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, - 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, - 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd1, 0x01, - 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, - 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, - 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, - 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, - 0x64, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, - 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, -} - -var ( - file_google_protobuf_descriptor_proto_rawDescOnce sync.Once - file_google_protobuf_descriptor_proto_rawDescData = file_google_protobuf_descriptor_proto_rawDesc -) - -func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { - file_google_protobuf_descriptor_proto_rawDescOnce.Do(func() { - file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_descriptor_proto_rawDescData) - }) - return file_google_protobuf_descriptor_proto_rawDescData -} - -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 6) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 27) -var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ - (FieldDescriptorProto_Type)(0), // 0: google.protobuf.FieldDescriptorProto.Type - (FieldDescriptorProto_Label)(0), // 1: google.protobuf.FieldDescriptorProto.Label - (FileOptions_OptimizeMode)(0), // 2: google.protobuf.FileOptions.OptimizeMode - (FieldOptions_CType)(0), // 3: google.protobuf.FieldOptions.CType - (FieldOptions_JSType)(0), // 4: google.protobuf.FieldOptions.JSType - (MethodOptions_IdempotencyLevel)(0), // 5: google.protobuf.MethodOptions.IdempotencyLevel - (*FileDescriptorSet)(nil), // 6: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 7: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 8: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 9: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 10: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 11: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 12: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 13: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 14: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 15: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 16: google.protobuf.FileOptions - (*MessageOptions)(nil), // 17: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 18: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 19: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 20: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 21: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 23: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 24: google.protobuf.UninterpretedOption - (*SourceCodeInfo)(nil), // 25: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 26: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 27: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 28: google.protobuf.DescriptorProto.ReservedRange - (*EnumDescriptorProto_EnumReservedRange)(nil), // 29: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*UninterpretedOption_NamePart)(nil), // 30: google.protobuf.UninterpretedOption.NamePart - (*SourceCodeInfo_Location)(nil), // 31: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 32: google.protobuf.GeneratedCodeInfo.Annotation -} -var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 7, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 8, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 12, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 14, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 10, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 16, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 25, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo - 10, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 10, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 8, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 12, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 27, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 11, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 17, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 28, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 24, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 1, // 16: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label - 0, // 17: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 18, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 19, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 13, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 20, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 29, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 21, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 15, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 22, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 23, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions - 2, // 27: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 24, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 3, // 30: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType - 4, // 31: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 24, // 32: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 33: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 34: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 35: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 36: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 5, // 37: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 24, // 38: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 30, // 39: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 31, // 40: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 32, // 41: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 9, // 42: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 43, // [43:43] is the sub-list for method output_type - 43, // [43:43] is the sub-list for method input_type - 43, // [43:43] is the sub-list for extension type_name - 43, // [43:43] is the sub-list for extension extendee - 0, // [0:43] is the sub-list for field type_name -} - -func init() { file_google_protobuf_descriptor_proto_init() } -func file_google_protobuf_descriptor_proto_init() { - if File_google_protobuf_descriptor_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FileDescriptorSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FileDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionRangeOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FieldDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OneofDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnumDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnumValueDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FileOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MessageOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FieldOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OneofOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnumOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnumValueOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UninterpretedOption); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GeneratedCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescriptorProto_ExtensionRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescriptorProto_ReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UninterpretedOption_NamePart); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceCodeInfo_Location); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GeneratedCodeInfo_Annotation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, - NumEnums: 6, - NumMessages: 27, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_protobuf_descriptor_proto_goTypes, - DependencyIndexes: file_google_protobuf_descriptor_proto_depIdxs, - EnumInfos: file_google_protobuf_descriptor_proto_enumTypes, - MessageInfos: file_google_protobuf_descriptor_proto_msgTypes, - }.Build() - File_google_protobuf_descriptor_proto = out.File - file_google_protobuf_descriptor_proto_rawDesc = nil - file_google_protobuf_descriptor_proto_goTypes = nil - file_google_protobuf_descriptor_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go deleted file mode 100644 index 8c10797b9..000000000 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ /dev/null @@ -1,498 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/any.proto - -// Package anypb contains generated types for google/protobuf/any.proto. -// -// The Any message is a dynamic representation of any other message value. -// It is functionally a tuple of the full name of the remote message type and -// the serialized bytes of the remote message value. -// -// -// Constructing an Any -// -// An Any message containing another message value is constructed using New: -// -// any, err := anypb.New(m) -// if err != nil { -// ... // handle error -// } -// ... // make use of any -// -// -// Unmarshaling an Any -// -// With a populated Any message, the underlying message can be serialized into -// a remote concrete message value in a few ways. -// -// If the exact concrete type is known, then a new (or pre-existing) instance -// of that message can be passed to the UnmarshalTo method: -// -// m := new(foopb.MyMessage) -// if err := any.UnmarshalTo(m); err != nil { -// ... // handle error -// } -// ... // make use of m -// -// If the exact concrete type is not known, then the UnmarshalNew method can be -// used to unmarshal the contents into a new instance of the remote message type: -// -// m, err := any.UnmarshalNew() -// if err != nil { -// ... // handle error -// } -// ... // make use of m -// -// UnmarshalNew uses the global type registry to resolve the message type and -// construct a new instance of that message to unmarshal into. In order for a -// message type to appear in the global registry, the Go type representing that -// protobuf message type must be linked into the Go binary. For messages -// generated by protoc-gen-go, this is achieved through an import of the -// generated Go package representing a .proto file. -// -// A common pattern with UnmarshalNew is to use a type switch with the resulting -// proto.Message value: -// -// switch m := m.(type) { -// case *foopb.MyMessage: -// ... // make use of m as a *foopb.MyMessage -// case *barpb.OtherMessage: -// ... // make use of m as a *barpb.OtherMessage -// case *bazpb.SomeMessage: -// ... // make use of m as a *bazpb.SomeMessage -// } -// -// This pattern ensures that the generated packages containing the message types -// listed in the case clauses are linked into the Go binary and therefore also -// registered in the global registry. -// -// -// Type checking an Any -// -// In order to type check whether an Any message represents some other message, -// then use the MessageIs method: -// -// if any.MessageIs((*foopb.MyMessage)(nil)) { -// ... // make use of any, knowing that it contains a foopb.MyMessage -// } -// -// The MessageIs method can also be used with an allocated instance of the target -// message type if the intention is to unmarshal into it if the type matches: -// -// m := new(foopb.MyMessage) -// if any.MessageIs(m) { -// if err := any.UnmarshalTo(m); err != nil { -// ... // handle error -// } -// ... // make use of m -// } -// -package anypb - -import ( - proto "google.golang.org/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoregistry "google.golang.org/protobuf/reflect/protoregistry" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - strings "strings" - sync "sync" -) - -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := anypb.New(foo) -// if err != nil { -// ... -// } -// ... -// foo := &pb.Foo{} -// if err := any.UnmarshalTo(foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -type Any struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. This string must contain at least - // one "/" character. The last segment of the URL's path must represent - // the fully qualified name of the type (as in - // `path/google.protobuf.Duration`). The name should be in a canonical form - // (e.g., leading "." is not accepted). - // - // In practice, teams usually precompile into the binary all types that they - // expect it to use in the context of Any. However, for URLs which use the - // scheme `http`, `https`, or no scheme, one can optionally set up a type - // server that maps type URLs to message definitions as follows: - // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Note: this functionality is not currently available in the official - // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` - // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -// New marshals src into a new Any instance. -func New(src proto.Message) (*Any, error) { - dst := new(Any) - if err := dst.MarshalFrom(src); err != nil { - return nil, err - } - return dst, nil -} - -// MarshalFrom marshals src into dst as the underlying message -// using the provided marshal options. -// -// If no options are specified, call dst.MarshalFrom instead. -func MarshalFrom(dst *Any, src proto.Message, opts proto.MarshalOptions) error { - const urlPrefix = "type.googleapis.com/" - if src == nil { - return protoimpl.X.NewError("invalid nil source message") - } - b, err := opts.Marshal(src) - if err != nil { - return err - } - dst.TypeUrl = urlPrefix + string(src.ProtoReflect().Descriptor().FullName()) - dst.Value = b - return nil -} - -// UnmarshalTo unmarshals the underlying message from src into dst -// using the provided unmarshal options. -// It reports an error if dst is not of the right message type. -// -// If no options are specified, call src.UnmarshalTo instead. -func UnmarshalTo(src *Any, dst proto.Message, opts proto.UnmarshalOptions) error { - if src == nil { - return protoimpl.X.NewError("invalid nil source message") - } - if !src.MessageIs(dst) { - got := dst.ProtoReflect().Descriptor().FullName() - want := src.MessageName() - return protoimpl.X.NewError("mismatched message type: got %q, want %q", got, want) - } - return opts.Unmarshal(src.GetValue(), dst) -} - -// UnmarshalNew unmarshals the underlying message from src into dst, -// which is newly created message using a type resolved from the type URL. -// The message type is resolved according to opt.Resolver, -// which should implement protoregistry.MessageTypeResolver. -// It reports an error if the underlying message type could not be resolved. -// -// If no options are specified, call src.UnmarshalNew instead. -func UnmarshalNew(src *Any, opts proto.UnmarshalOptions) (dst proto.Message, err error) { - if src.GetTypeUrl() == "" { - return nil, protoimpl.X.NewError("invalid empty type URL") - } - if opts.Resolver == nil { - opts.Resolver = protoregistry.GlobalTypes - } - r, ok := opts.Resolver.(protoregistry.MessageTypeResolver) - if !ok { - return nil, protoregistry.NotFound - } - mt, err := r.FindMessageByURL(src.GetTypeUrl()) - if err != nil { - if err == protoregistry.NotFound { - return nil, err - } - return nil, protoimpl.X.NewError("could not resolve %q: %v", src.GetTypeUrl(), err) - } - dst = mt.New().Interface() - return dst, opts.Unmarshal(src.GetValue(), dst) -} - -// MessageIs reports whether the underlying message is of the same type as m. -func (x *Any) MessageIs(m proto.Message) bool { - if m == nil { - return false - } - url := x.GetTypeUrl() - name := string(m.ProtoReflect().Descriptor().FullName()) - if !strings.HasSuffix(url, name) { - return false - } - return len(url) == len(name) || url[len(url)-len(name)-1] == '/' -} - -// MessageName reports the full name of the underlying message, -// returning an empty string if invalid. -func (x *Any) MessageName() protoreflect.FullName { - url := x.GetTypeUrl() - name := protoreflect.FullName(url) - if i := strings.LastIndexByte(url, '/'); i >= 0 { - name = name[i+len("/"):] - } - if !name.IsValid() { - return "" - } - return name -} - -// MarshalFrom marshals m into x as the underlying message. -func (x *Any) MarshalFrom(m proto.Message) error { - return MarshalFrom(x, m, proto.MarshalOptions{}) -} - -// UnmarshalTo unmarshals the contents of the underlying message of x into m. -// It resets m before performing the unmarshal operation. -// It reports an error if m is not of the right message type. -func (x *Any) UnmarshalTo(m proto.Message) error { - return UnmarshalTo(x, m, proto.UnmarshalOptions{}) -} - -// UnmarshalNew unmarshals the contents of the underlying message of x into -// a newly allocated message of the specified type. -// It reports an error if the underlying message type could not be resolved. -func (x *Any) UnmarshalNew() (proto.Message, error) { - return UnmarshalNew(x, proto.UnmarshalOptions{}) -} - -func (x *Any) Reset() { - *x = Any{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_any_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Any) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Any) ProtoMessage() {} - -func (x *Any) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_any_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Any.ProtoReflect.Descriptor instead. -func (*Any) Descriptor() ([]byte, []int) { - return file_google_protobuf_any_proto_rawDescGZIP(), []int{0} -} - -func (x *Any) GetTypeUrl() string { - if x != nil { - return x.TypeUrl - } - return "" -} - -func (x *Any) GetValue() []byte { - if x != nil { - return x.Value - } - return nil -} - -var File_google_protobuf_any_proto protoreflect.FileDescriptor - -var file_google_protobuf_any_proto_rawDesc = []byte{ - 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03, - 0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, - 0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, - 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_protobuf_any_proto_rawDescOnce sync.Once - file_google_protobuf_any_proto_rawDescData = file_google_protobuf_any_proto_rawDesc -) - -func file_google_protobuf_any_proto_rawDescGZIP() []byte { - file_google_protobuf_any_proto_rawDescOnce.Do(func() { - file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_any_proto_rawDescData) - }) - return file_google_protobuf_any_proto_rawDescData -} - -var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_any_proto_goTypes = []interface{}{ - (*Any)(nil), // 0: google.protobuf.Any -} -var file_google_protobuf_any_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_protobuf_any_proto_init() } -func file_google_protobuf_any_proto_init() { - if File_google_protobuf_any_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Any); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_any_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_protobuf_any_proto_goTypes, - DependencyIndexes: file_google_protobuf_any_proto_depIdxs, - MessageInfos: file_google_protobuf_any_proto_msgTypes, - }.Build() - File_google_protobuf_any_proto = out.File - file_google_protobuf_any_proto_rawDesc = nil - file_google_protobuf_any_proto_goTypes = nil - file_google_protobuf_any_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go deleted file mode 100644 index a583ca2f6..000000000 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ /dev/null @@ -1,379 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/duration.proto - -// Package durationpb contains generated types for google/protobuf/duration.proto. -// -// The Duration message represents a signed span of time. -// -// -// Conversion to a Go Duration -// -// The AsDuration method can be used to convert a Duration message to a -// standard Go time.Duration value: -// -// d := dur.AsDuration() -// ... // make use of d as a time.Duration -// -// Converting to a time.Duration is a common operation so that the extensive -// set of time-based operations provided by the time package can be leveraged. -// See https://golang.org/pkg/time for more information. -// -// The AsDuration method performs the conversion on a best-effort basis. -// Durations with denormal values (e.g., nanoseconds beyond -99999999 and -// +99999999, inclusive; or seconds and nanoseconds with opposite signs) -// are normalized during the conversion to a time.Duration. To manually check for -// invalid Duration per the documented limitations in duration.proto, -// additionally call the CheckValid method: -// -// if err := dur.CheckValid(); err != nil { -// ... // handle error -// } -// -// Note that the documented limitations in duration.proto does not protect a -// Duration from overflowing the representable range of a time.Duration in Go. -// The AsDuration method uses saturation arithmetic such that an overflow clamps -// the resulting value to the closest representable value (e.g., math.MaxInt64 -// for positive overflow and math.MinInt64 for negative overflow). -// -// -// Conversion from a Go Duration -// -// The durationpb.New function can be used to construct a Duration message -// from a standard Go time.Duration value: -// -// dur := durationpb.New(d) -// ... // make use of d as a *durationpb.Duration -// -package durationpb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - math "math" - reflect "reflect" - sync "sync" - time "time" -) - -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (duration.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -type Duration struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -// New constructs a new Duration from the provided time.Duration. -func New(d time.Duration) *Duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &Duration{Seconds: int64(secs), Nanos: int32(nanos)} -} - -// AsDuration converts x to a time.Duration, -// returning the closest duration value in the event of overflow. -func (x *Duration) AsDuration() time.Duration { - secs := x.GetSeconds() - nanos := x.GetNanos() - d := time.Duration(secs) * time.Second - overflow := d/time.Second != time.Duration(secs) - d += time.Duration(nanos) * time.Nanosecond - overflow = overflow || (secs < 0 && nanos < 0 && d > 0) - overflow = overflow || (secs > 0 && nanos > 0 && d < 0) - if overflow { - switch { - case secs < 0: - return time.Duration(math.MinInt64) - case secs > 0: - return time.Duration(math.MaxInt64) - } - } - return d -} - -// IsValid reports whether the duration is valid. -// It is equivalent to CheckValid == nil. -func (x *Duration) IsValid() bool { - return x.check() == 0 -} - -// CheckValid returns an error if the duration is invalid. -// In particular, it checks whether the value is within the range of -// -10000 years to +10000 years inclusive. -// An error is reported for a nil Duration. -func (x *Duration) CheckValid() error { - switch x.check() { - case invalidNil: - return protoimpl.X.NewError("invalid nil Duration") - case invalidUnderflow: - return protoimpl.X.NewError("duration (%v) exceeds -10000 years", x) - case invalidOverflow: - return protoimpl.X.NewError("duration (%v) exceeds +10000 years", x) - case invalidNanosRange: - return protoimpl.X.NewError("duration (%v) has out-of-range nanos", x) - case invalidNanosSign: - return protoimpl.X.NewError("duration (%v) has seconds and nanos with different signs", x) - default: - return nil - } -} - -const ( - _ = iota - invalidNil - invalidUnderflow - invalidOverflow - invalidNanosRange - invalidNanosSign -) - -func (x *Duration) check() uint { - const absDuration = 315576000000 // 10000yr * 365.25day/yr * 24hr/day * 60min/hr * 60sec/min - secs := x.GetSeconds() - nanos := x.GetNanos() - switch { - case x == nil: - return invalidNil - case secs < -absDuration: - return invalidUnderflow - case secs > +absDuration: - return invalidOverflow - case nanos <= -1e9 || nanos >= +1e9: - return invalidNanosRange - case (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0): - return invalidNanosSign - default: - return 0 - } -} - -func (x *Duration) Reset() { - *x = Duration{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_duration_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Duration) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Duration) ProtoMessage() {} - -func (x *Duration) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_duration_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Duration.ProtoReflect.Descriptor instead. -func (*Duration) Descriptor() ([]byte, []int) { - return file_google_protobuf_duration_proto_rawDescGZIP(), []int{0} -} - -func (x *Duration) GetSeconds() int64 { - if x != nil { - return x.Seconds - } - return 0 -} - -func (x *Duration) GetNanos() int32 { - if x != nil { - return x.Nanos - } - return 0 -} - -var File_google_protobuf_duration_proto protoreflect.FileDescriptor - -var file_google_protobuf_duration_proto_rawDesc = []byte{ - 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, - 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01, - 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, - 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, - 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_protobuf_duration_proto_rawDescOnce sync.Once - file_google_protobuf_duration_proto_rawDescData = file_google_protobuf_duration_proto_rawDesc -) - -func file_google_protobuf_duration_proto_rawDescGZIP() []byte { - file_google_protobuf_duration_proto_rawDescOnce.Do(func() { - file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_duration_proto_rawDescData) - }) - return file_google_protobuf_duration_proto_rawDescData -} - -var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_duration_proto_goTypes = []interface{}{ - (*Duration)(nil), // 0: google.protobuf.Duration -} -var file_google_protobuf_duration_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_protobuf_duration_proto_init() } -func file_google_protobuf_duration_proto_init() { - if File_google_protobuf_duration_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Duration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_duration_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_protobuf_duration_proto_goTypes, - DependencyIndexes: file_google_protobuf_duration_proto_depIdxs, - MessageInfos: file_google_protobuf_duration_proto_msgTypes, - }.Build() - File_google_protobuf_duration_proto = out.File - file_google_protobuf_duration_proto_rawDesc = nil - file_google_protobuf_duration_proto_goTypes = nil - file_google_protobuf_duration_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go deleted file mode 100644 index c9ae92132..000000000 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ /dev/null @@ -1,390 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/timestamp.proto - -// Package timestamppb contains generated types for google/protobuf/timestamp.proto. -// -// The Timestamp message represents a timestamp, -// an instant in time since the Unix epoch (January 1st, 1970). -// -// -// Conversion to a Go Time -// -// The AsTime method can be used to convert a Timestamp message to a -// standard Go time.Time value in UTC: -// -// t := ts.AsTime() -// ... // make use of t as a time.Time -// -// Converting to a time.Time is a common operation so that the extensive -// set of time-based operations provided by the time package can be leveraged. -// See https://golang.org/pkg/time for more information. -// -// The AsTime method performs the conversion on a best-effort basis. Timestamps -// with denormal values (e.g., nanoseconds beyond 0 and 99999999, inclusive) -// are normalized during the conversion to a time.Time. To manually check for -// invalid Timestamps per the documented limitations in timestamp.proto, -// additionally call the CheckValid method: -// -// if err := ts.CheckValid(); err != nil { -// ... // handle error -// } -// -// -// Conversion from a Go Time -// -// The timestamppb.New function can be used to construct a Timestamp message -// from a standard Go time.Time value: -// -// ts := timestamppb.New(t) -// ... // make use of ts as a *timestamppb.Timestamp -// -// In order to construct a Timestamp representing the current time, use Now: -// -// ts := timestamppb.Now() -// ... // make use of ts as a *timestamppb.Timestamp -// -package timestamppb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - time "time" -) - -// A Timestamp represents a point in time independent of any time zone or local -// calendar, encoded as a count of seconds and fractions of seconds at -// nanosecond resolution. The count is relative to an epoch at UTC midnight on -// January 1, 1970, in the proleptic Gregorian calendar which extends the -// Gregorian calendar backwards to year one. -// -// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap -// second table is needed for interpretation, using a [24-hour linear -// smear](https://developers.google.com/time/smear). -// -// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By -// restricting to that range, we ensure that we can convert to and from [RFC -// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from Java `Instant.now()`. -// -// Instant now = Instant.now(); -// -// Timestamp timestamp = -// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) -// .setNanos(now.getNano()).build(); -// -// -// Example 6: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required. A proto3 JSON serializer should always use UTC (as indicated by -// "Z") when printing the Timestamp type and a proto3 JSON parser should be -// able to accept both UTC and other timezones (as indicated by an offset). -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard -// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using -// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with -// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use -// the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D -// ) to obtain a formatter capable of generating timestamps in this format. -// -// -type Timestamp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -// Now constructs a new Timestamp from the current time. -func Now() *Timestamp { - return New(time.Now()) -} - -// New constructs a new Timestamp from the provided time.Time. -func New(t time.Time) *Timestamp { - return &Timestamp{Seconds: int64(t.Unix()), Nanos: int32(t.Nanosecond())} -} - -// AsTime converts x to a time.Time. -func (x *Timestamp) AsTime() time.Time { - return time.Unix(int64(x.GetSeconds()), int64(x.GetNanos())).UTC() -} - -// IsValid reports whether the timestamp is valid. -// It is equivalent to CheckValid == nil. -func (x *Timestamp) IsValid() bool { - return x.check() == 0 -} - -// CheckValid returns an error if the timestamp is invalid. -// In particular, it checks whether the value represents a date that is -// in the range of 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive. -// An error is reported for a nil Timestamp. -func (x *Timestamp) CheckValid() error { - switch x.check() { - case invalidNil: - return protoimpl.X.NewError("invalid nil Timestamp") - case invalidUnderflow: - return protoimpl.X.NewError("timestamp (%v) before 0001-01-01", x) - case invalidOverflow: - return protoimpl.X.NewError("timestamp (%v) after 9999-12-31", x) - case invalidNanos: - return protoimpl.X.NewError("timestamp (%v) has out-of-range nanos", x) - default: - return nil - } -} - -const ( - _ = iota - invalidNil - invalidUnderflow - invalidOverflow - invalidNanos -) - -func (x *Timestamp) check() uint { - const minTimestamp = -62135596800 // Seconds between 1970-01-01T00:00:00Z and 0001-01-01T00:00:00Z, inclusive - const maxTimestamp = +253402300799 // Seconds between 1970-01-01T00:00:00Z and 9999-12-31T23:59:59Z, inclusive - secs := x.GetSeconds() - nanos := x.GetNanos() - switch { - case x == nil: - return invalidNil - case secs < minTimestamp: - return invalidUnderflow - case secs > maxTimestamp: - return invalidOverflow - case nanos < 0 || nanos >= 1e9: - return invalidNanos - default: - return 0 - } -} - -func (x *Timestamp) Reset() { - *x = Timestamp{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Timestamp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Timestamp) ProtoMessage() {} - -func (x *Timestamp) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Timestamp.ProtoReflect.Descriptor instead. -func (*Timestamp) Descriptor() ([]byte, []int) { - return file_google_protobuf_timestamp_proto_rawDescGZIP(), []int{0} -} - -func (x *Timestamp) GetSeconds() int64 { - if x != nil { - return x.Seconds - } - return 0 -} - -func (x *Timestamp) GetNanos() int32 { - if x != nil { - return x.Nanos - } - return 0 -} - -var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor - -var file_google_protobuf_timestamp_proto_rawDesc = []byte{ - 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, - 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, - 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, - 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, - 0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01, - 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, - 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_protobuf_timestamp_proto_rawDescOnce sync.Once - file_google_protobuf_timestamp_proto_rawDescData = file_google_protobuf_timestamp_proto_rawDesc -) - -func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { - file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() { - file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_timestamp_proto_rawDescData) - }) - return file_google_protobuf_timestamp_proto_rawDescData -} - -var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_timestamp_proto_goTypes = []interface{}{ - (*Timestamp)(nil), // 0: google.protobuf.Timestamp -} -var file_google_protobuf_timestamp_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_protobuf_timestamp_proto_init() } -func file_google_protobuf_timestamp_proto_init() { - if File_google_protobuf_timestamp_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Timestamp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_timestamp_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_protobuf_timestamp_proto_goTypes, - DependencyIndexes: file_google_protobuf_timestamp_proto_depIdxs, - MessageInfos: file_google_protobuf_timestamp_proto_msgTypes, - }.Build() - File_google_protobuf_timestamp_proto = out.File - file_google_protobuf_timestamp_proto_rawDesc = nil - file_google_protobuf_timestamp_proto_goTypes = nil - file_google_protobuf_timestamp_proto_depIdxs = nil -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 925f5de96..00b6a93fa 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,11 +1,3 @@ -# github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 -## explicit; go 1.16 -github.com/Azure/go-ansiterm -github.com/Azure/go-ansiterm/winterm -# github.com/Microsoft/go-winio v0.5.2 -## explicit; go 1.13 -github.com/Microsoft/go-winio -github.com/Microsoft/go-winio/pkg/guid # github.com/aws/aws-sdk-go v1.40.44 ## explicit; go 1.11 github.com/aws/aws-sdk-go/aws @@ -61,74 +53,14 @@ github.com/aws/aws-sdk-go/service/sso github.com/aws/aws-sdk-go/service/sso/ssoiface github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface -# github.com/cenkalti/backoff/v4 v4.2.0 -## explicit; go 1.18 -github.com/cenkalti/backoff/v4 -# github.com/containerd/containerd v1.6.19 -## explicit; go 1.17 -github.com/containerd/containerd/errdefs -github.com/containerd/containerd/log -github.com/containerd/containerd/pkg/userns -github.com/containerd/containerd/platforms -# github.com/cpuguy83/dockercfg v0.3.1 -## explicit; go 1.13 -github.com/cpuguy83/dockercfg # github.com/cpuguy83/go-md2man/v2 v2.0.2 ## explicit; go 1.11 github.com/cpuguy83/go-md2man/v2/md2man -# github.com/docker/distribution v2.8.2+incompatible -## explicit -github.com/docker/distribution/digestset -github.com/docker/distribution/reference -# github.com/docker/docker v23.0.5+incompatible -## explicit -github.com/docker/docker/api -github.com/docker/docker/api/types -github.com/docker/docker/api/types/blkiodev -github.com/docker/docker/api/types/container -github.com/docker/docker/api/types/events -github.com/docker/docker/api/types/filters -github.com/docker/docker/api/types/image -github.com/docker/docker/api/types/mount -github.com/docker/docker/api/types/network -github.com/docker/docker/api/types/registry -github.com/docker/docker/api/types/strslice -github.com/docker/docker/api/types/swarm -github.com/docker/docker/api/types/swarm/runtime -github.com/docker/docker/api/types/time -github.com/docker/docker/api/types/versions -github.com/docker/docker/api/types/volume -github.com/docker/docker/client -github.com/docker/docker/errdefs -github.com/docker/docker/pkg/archive -github.com/docker/docker/pkg/idtools -github.com/docker/docker/pkg/ioutils -github.com/docker/docker/pkg/jsonmessage -github.com/docker/docker/pkg/longpath -github.com/docker/docker/pkg/pools -github.com/docker/docker/pkg/stdcopy -github.com/docker/docker/pkg/system -# github.com/docker/go-connections v0.4.0 +# github.com/davecgh/go-spew v1.1.1 ## explicit -github.com/docker/go-connections/nat -github.com/docker/go-connections/sockets -github.com/docker/go-connections/tlsconfig -# github.com/docker/go-units v0.5.0 -## explicit -github.com/docker/go-units -# github.com/gogo/protobuf v1.3.2 -## explicit; go 1.15 -github.com/gogo/protobuf/proto # github.com/golang/mock v1.6.0 ## explicit; go 1.11 github.com/golang/mock/gomock -# github.com/golang/protobuf v1.5.2 -## explicit; go 1.9 -github.com/golang/protobuf/proto -github.com/golang/protobuf/ptypes -github.com/golang/protobuf/ptypes/any -github.com/golang/protobuf/ptypes/duration -github.com/golang/protobuf/ptypes/timestamp # github.com/google/go-cmp v0.5.9 ## explicit; go 1.13 github.com/google/go-cmp/cmp @@ -137,9 +69,6 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/uuid v1.3.0 -## explicit -github.com/google/uuid # github.com/hashicorp/errwrap v1.1.0 ## explicit github.com/hashicorp/errwrap @@ -156,9 +85,6 @@ github.com/igungor/gofakes3/backend/s3bolt github.com/igungor/gofakes3/backend/s3mem github.com/igungor/gofakes3/internal/goskipiter github.com/igungor/gofakes3/internal/s3io -# github.com/imdario/mergo v0.3.15 -## explicit; go 1.13 -github.com/imdario/mergo # github.com/jmespath/go-jmespath v0.4.0 ## explicit; go 1.14 github.com/jmespath/go-jmespath @@ -168,49 +94,18 @@ github.com/karrick/godirwalk # github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 ## explicit github.com/kballard/go-shellquote -# github.com/klauspost/compress v1.11.13 -## explicit; go 1.13 -github.com/klauspost/compress/fse -github.com/klauspost/compress/huff0 -github.com/klauspost/compress/snappy -github.com/klauspost/compress/zstd -github.com/klauspost/compress/zstd/internal/xxhash -# github.com/kr/text v0.2.0 -## explicit +# github.com/kr/pretty v0.3.0 +## explicit; go 1.12 # github.com/lanrat/extsort v1.0.0 ## explicit; go 1.13 github.com/lanrat/extsort github.com/lanrat/extsort/queue github.com/lanrat/extsort/tempfile -# github.com/magiconair/properties v1.8.7 -## explicit; go 1.19 -github.com/magiconair/properties -# github.com/moby/patternmatcher v0.5.0 -## explicit; go 1.19 -github.com/moby/patternmatcher -# github.com/moby/sys/sequential v0.5.0 -## explicit; go 1.17 -github.com/moby/sys/sequential -# github.com/moby/term v0.5.0 -## explicit; go 1.18 -github.com/moby/term -github.com/moby/term/windows -# github.com/morikuni/aec v1.0.0 -## explicit -github.com/morikuni/aec -# github.com/opencontainers/go-digest v1.0.0 -## explicit; go 1.13 -github.com/opencontainers/go-digest -# github.com/opencontainers/image-spec v1.1.0-rc2 -## explicit; go 1.17 -github.com/opencontainers/image-spec/specs-go -github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runc v1.1.5 -## explicit; go 1.16 -github.com/opencontainers/runc/libcontainer/user # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors +# github.com/rogpeppe/go-internal v1.8.1 +## explicit; go 1.16 # github.com/russross/blackfriday/v2 v2.1.0 ## explicit github.com/russross/blackfriday/v2 @@ -220,21 +115,9 @@ github.com/ryszard/goskiplist/skiplist # github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63 ## explicit github.com/shabbyrobe/gocovmerge -# github.com/sirupsen/logrus v1.9.0 -## explicit; go 1.13 -github.com/sirupsen/logrus # github.com/termie/go-shutil v0.0.0-20140729215957-bcacb06fecae ## explicit github.com/termie/go-shutil -# github.com/testcontainers/testcontainers-go v0.21.0 -## explicit; go 1.19 -github.com/testcontainers/testcontainers-go -github.com/testcontainers/testcontainers-go/exec -github.com/testcontainers/testcontainers-go/internal -github.com/testcontainers/testcontainers-go/internal/config -github.com/testcontainers/testcontainers-go/internal/testcontainersdocker -github.com/testcontainers/testcontainers-go/internal/testcontainerssession -github.com/testcontainers/testcontainers-go/wait # github.com/urfave/cli/v2 v2.11.2 ## explicit; go 1.18 github.com/urfave/cli/v2 @@ -244,70 +127,25 @@ github.com/xrash/smetrics # go.etcd.io/bbolt v1.3.6 ## explicit; go 1.12 go.etcd.io/bbolt -# golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea -## explicit; go 1.20 -golang.org/x/exp/constraints -golang.org/x/exp/slices # golang.org/x/net v0.7.0 ## explicit; go 1.17 -golang.org/x/net/internal/socks -golang.org/x/net/proxy # golang.org/x/sync v0.1.0 ## explicit golang.org/x/sync/errgroup # golang.org/x/sys v0.8.0 ## explicit; go 1.17 -golang.org/x/sys/execabs -golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix -golang.org/x/sys/windows # golang.org/x/tools v0.2.0 ## explicit; go 1.18 golang.org/x/tools/cover -# google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad -## explicit; go 1.15 -google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.47.0 -## explicit; go 1.14 -google.golang.org/grpc/codes -google.golang.org/grpc/internal/status -google.golang.org/grpc/status -# google.golang.org/protobuf v1.28.0 +# gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c ## explicit; go 1.11 -google.golang.org/protobuf/encoding/prototext -google.golang.org/protobuf/encoding/protowire -google.golang.org/protobuf/internal/descfmt -google.golang.org/protobuf/internal/descopts -google.golang.org/protobuf/internal/detrand -google.golang.org/protobuf/internal/encoding/defval -google.golang.org/protobuf/internal/encoding/messageset -google.golang.org/protobuf/internal/encoding/tag -google.golang.org/protobuf/internal/encoding/text -google.golang.org/protobuf/internal/errors -google.golang.org/protobuf/internal/filedesc -google.golang.org/protobuf/internal/filetype -google.golang.org/protobuf/internal/flags -google.golang.org/protobuf/internal/genid -google.golang.org/protobuf/internal/impl -google.golang.org/protobuf/internal/order -google.golang.org/protobuf/internal/pragma -google.golang.org/protobuf/internal/set -google.golang.org/protobuf/internal/strs -google.golang.org/protobuf/internal/version -google.golang.org/protobuf/proto -google.golang.org/protobuf/reflect/protodesc -google.golang.org/protobuf/reflect/protoreflect -google.golang.org/protobuf/reflect/protoregistry -google.golang.org/protobuf/runtime/protoiface -google.golang.org/protobuf/runtime/protoimpl -google.golang.org/protobuf/types/descriptorpb -google.golang.org/protobuf/types/known/anypb -google.golang.org/protobuf/types/known/durationpb -google.golang.org/protobuf/types/known/timestamppb # gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce ## explicit gopkg.in/mgo.v2/bson gopkg.in/mgo.v2/internal/json +# gopkg.in/yaml.v2 v2.4.0 +## explicit; go 1.15 # gotest.tools/v3 v3.0.3 ## explicit; go 1.11 gotest.tools/v3/assert From 87cca30741c02b1d8e23778b50b00ce404715a92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Wed, 26 Jul 2023 17:21:47 +0300 Subject: [PATCH 09/50] refactor: fix staticcheck, make helper easier to read --- e2e/select_test.go | 201 +++++++++++++++++++++------------------------ e2e/util_test.go | 22 ++--- storage/s3.go | 10 +-- 3 files changed, 109 insertions(+), 124 deletions(-) diff --git a/e2e/select_test.go b/e2e/select_test.go index 60374081d..81b2af31a 100644 --- a/e2e/select_test.go +++ b/e2e/select_test.go @@ -1,126 +1,109 @@ -// go:build external package e2e import ( + "bytes" + "encoding/csv" + jsonpkg "encoding/json" "fmt" - "strings" "testing" + "github.com/google/go-cmp/cmp" "gotest.tools/v3/icmd" ) -/* -query csv to json {_1, _2, ..}, n objects with header -query json(lines) to json {} n objects -query csv to csv a,b,c, with header -query json(lines) to csv n lines, without header -query json(document) to json -> same object -query json(document) to csv -> not supported(?) -*/ -func getSequentialFile(n int, inputForm, outputForm, structure string) (string, map[int]compareFunc) { - sb := strings.Builder{} - expectedLines := make(map[int]compareFunc) - for i := 0; i < n+1; i++ { - var line string - switch inputForm { - case "json": - switch structure { - case "lines": - if i == n { - break - } - line = fmt.Sprintf(`{ "line": "%d", "id": "i%d", "data": "some event %d" }`, i, i, i) - case "document": - if i == 0 { - line = fmt.Sprintf("{\n%s", line) - } else if i == n { - line = fmt.Sprintf("%s\n}", line) - } else { - if i == n-1 { - line = fmt.Sprintf(` "obj%d": {"line": "%d", "id": "i%d", "data": "some event %d"}`, i, i, i, i) - } else { - line = fmt.Sprintf(` "obj%d": {"line": "%d", "id": "i%d", "data": "some event %d"},`, i, i, i, i) - } - } +func getFile(n int, inputForm, outputForm, structure string) (string, string) { + type row struct { + Line string `json:"line"` + ID string `json:"id"` + Data string `json:"data"` + } + var data []row + for i := 0; i < n; i++ { + data = append(data, row{ + Line: fmt.Sprintf("%d", i), + ID: fmt.Sprintf("id%d", i), + Data: fmt.Sprintf("some event %d", i), + }) + } + var input bytes.Buffer + var expected bytes.Buffer + switch inputForm { + case "json": + encoder := jsonpkg.NewEncoder(&input) + switch structure { + case "document": + rows := make(map[string]row) + for i, v := range data { + rows[fmt.Sprintf("obj%d", i)] = v } - case "csv": - if i == n { - break - } - if i == 0 { - line = fmt.Sprintf("line%sid%sdata", structure, structure) - } else { - line = fmt.Sprintf(`%d%si%d%s"some event %d"`, i-1, structure, i-1, structure, i-1) + err := encoder.Encode(rows) + if err != nil { + panic(err) } - } - sb.WriteString(line) - sb.WriteString("\n") - - switch inputForm { - case "json": - // edge case - if structure == "document" && i == n { - totalLine := "" - expectedLines := make(map[int]compareFunc, 1) - for j := 0; j < n+1; j++ { - if j == 0 { - line = "{" - } else if j == n { - line = "}" - } else { - if j == n-1 { - line = fmt.Sprintf(`"obj%d":{"line":"%d","id":"i%d","data":"some event %d"}`, j, j, j, j) - } else { - line = fmt.Sprintf(`"obj%d":{"line":"%d","id":"i%d","data":"some event %d"},`, j, j, j, j) - } - } - totalLine = fmt.Sprintf("%s%s", totalLine, line) + return input.String(), input.String() + default: + for _, d := range data { + err := encoder.Encode(d) + if err != nil { + panic(err) } - expectedLines[0] = equals(totalLine) - return sb.String(), expectedLines - - } - if i == n { - break } switch outputForm { - case "csv": - structure := "," - line = fmt.Sprintf(`%d%si%d%ssome event %d`, i, structure, i, structure, i) case "json": - if i != n { - line = fmt.Sprintf(`{"line":"%d","id":"i%d","data":"some event %d"}`, i, i, i) + return input.String(), input.String() + case "csv": + writer := csv.NewWriter(&expected) + for _, d := range data { + err := writer.Write([]string{d.Line, d.ID, d.Data}) + if err != nil { + panic(err) + } } + writer.Flush() + return input.String(), expected.String() } - case "csv": - if i == n { - break + } + + // edge case + + case "csv": + writer := csv.NewWriter(&input) + writer.Comma = []rune(structure)[0] + writer.Write([]string{"line", "id", "data"}) + for _, d := range data { + writer.Write([]string{d.Line, d.ID, d.Data}) + } + writer.Flush() + switch outputForm { + case "json": + encoder := jsonpkg.NewEncoder(&expected) + encoder.Encode(map[string]string{ + "_1": "line", + "_2": "id", + "_3": "data", + }) + for _, d := range data { + encoder.Encode(map[string]string{ + "_1": d.Line, + "_2": d.ID, + "_3": d.Data, + }) } - switch outputForm { - case "csv": - structure := "," - if i == 0 { - line = fmt.Sprintf("line%sid%sdata", structure, structure) - } else { - line = fmt.Sprintf(`%d%si%d%ssome event %d`, i-1, structure, i-1, structure, i-1) - } - case "json": - if i == 0 { - line = `{"_1":"line","_2":"id","_3":"data"}` - } else { - form := `{"_1":"%d","_2":"i%d","_3":"some event %d"}` - line = fmt.Sprintf(form, i-1, i-1, i-1) - } + return input.String(), expected.String() + case "csv": + writer := csv.NewWriter(&expected) + writer.Write([]string{"line", "id", "data"}) + for _, d := range data { + writer.Write([]string{d.Line, d.ID, d.Data}) } - } - if i != n { - expectedLines[i] = equals(line) + writer.Flush() + + return input.String(), expected.String() } } - - return sb.String(), expectedLines + panic("unreachable") } /* @@ -149,12 +132,12 @@ func TestSelectCommand(t *testing.T) { t.Parallel() // credentials are same for all test cases region := "us-east-1" - accessKeyId := "minioadmin" + accessKeyID := "minioadmin" secretKey := "minioadmin" address := "http://127.0.0.1:9000" // The query is default for all cases, we want to assert the output // is as expected after a query. - query := "SELECT * FROM s3object s LIMIT 5" + query := "SELECT * FROM s3object s LIMIT 6" testcases := []struct { name string cmd []string @@ -282,21 +265,23 @@ func TestSelectCommand(t *testing.T) { for _, tc := range testcases { tc := tc t.Run(tc.name, func(t *testing.T) { - t.Parallel() - - contents, expectedLines := getSequentialFile(5, tc.in, tc.out, tc.structure) + contents, expected := getFile(5, tc.in, tc.out, tc.structure) filename := fmt.Sprintf("file.%s", tc.in) bucket := s3BucketFromTestName(t) src := fmt.Sprintf("s3://%s/%s", bucket, filename) tc.cmd = append(tc.cmd, src) - s3client, s5cmd := setup(t, withEndpointURL(address), withRegion(region), withAccessKeyId(accessKeyId), withSecretKey(secretKey)) + s3client, s5cmd := setup(t, withEndpointURL(address), withRegion(region), withAccessKeyID(accessKeyID), withSecretKey(secretKey)) createBucket(t, s3client, bucket) putFile(t, s3client, bucket, filename, contents) cmd := s5cmd(tc.cmd...) - result := icmd.RunCmd(cmd, withEnv("AWS_ACCESS_KEY_ID", accessKeyId), withEnv("AWS_SECRET_ACCESS_KEY", secretKey)) - assertLines(t, result.Stdout(), expectedLines) + result := icmd.RunCmd(cmd, withEnv("AWS_ACCESS_KEY_ID", accessKeyID), withEnv("AWS_SECRET_ACCESS_KEY", secretKey)) + + if diff := cmp.Diff(expected, result.Stdout()); diff != "" { + t.Errorf("select command mismatch (-want +got):\n%s", diff) + } + }) } } diff --git a/e2e/util_test.go b/e2e/util_test.go index 77456f2d3..f89aafd93 100644 --- a/e2e/util_test.go +++ b/e2e/util_test.go @@ -77,7 +77,7 @@ func init() { type setupOpts struct { s3backend string endpointURL string - accessKeyId string + accessKeyID string secretKey string region string timeSource gofakes3.TimeSource @@ -98,9 +98,9 @@ func withEndpointURL(url string) option { } } -func withAccessKeyId(key string) option { +func withAccessKeyID(key string) option { return func(opts *setupOpts) { - opts.accessKeyId = key + opts.accessKeyID = key } } @@ -129,7 +129,7 @@ func withProxy() option { } type credentialCfg struct { - AccessKeyId string + AccessKeyID string SecretKey string Region string } @@ -165,20 +165,20 @@ func setup(t *testing.T, options ...option) (*s3.S3, func(...string) icmd.Cmd) { secretKey = opts.secretKey } - accessKeyId := "" - if opts.accessKeyId != "" { - accessKeyId = opts.accessKeyId + accessKeyID := "" + if opts.accessKeyID != "" { + accessKeyID = opts.accessKeyID } region := "" if opts.region != "" { - region = opts.accessKeyId + region = opts.accessKeyID } var cfg *credentialCfg - if region != "" || accessKeyId != "" || secretKey != "" { + if region != "" || accessKeyID != "" || secretKey != "" { cfg = &credentialCfg{ - AccessKeyId: accessKeyId, + AccessKeyID: accessKeyID, SecretKey: secretKey, Region: region, } @@ -235,7 +235,7 @@ func s3client(t *testing.T, options storage.Options, creds *credentialCfg) *s3.S var id, key, region string if creds != nil { - id = creds.AccessKeyId + id = creds.AccessKeyID key = creds.SecretKey region = creds.Region } else { diff --git a/storage/s3.go b/storage/s3.go index 5352328bc..1e42e333b 100644 --- a/storage/s3.go +++ b/storage/s3.go @@ -592,17 +592,17 @@ type EventStreamDecoder interface { Decode() ([]byte, error) } -type JsonDecoder struct { +type JSONDecoder struct { decoder *json.Decoder } -func NewJsonDecoder(reader io.Reader) EventStreamDecoder { - return &JsonDecoder{ +func NewJSONDecoder(reader io.Reader) EventStreamDecoder { + return &JSONDecoder{ decoder: json.NewDecoder(reader), } } -func (jd *JsonDecoder) Decode() ([]byte, error) { +func (jd *JSONDecoder) Decode() ([]byte, error) { var val json.RawMessage err := jd.decoder.Decode(&val) if err != nil { @@ -696,7 +696,7 @@ func parseOutputSerialization(val string, reader io.Reader) (*s3.OutputSerializa outputSerialization = &s3.OutputSerialization{ JSON: &s3.JSONOutput{}, } - decoder = NewJsonDecoder(reader) + decoder = NewJSONDecoder(reader) case csvType: outputSerialization = &s3.OutputSerialization{ CSV: &s3.CSVOutput{ From e8b24fa0106c101f0bd51801550198977c97458c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Wed, 26 Jul 2023 17:31:49 +0300 Subject: [PATCH 10/50] ci: service container --- .github/workflows/ci.yml | 9 ++++++++- e2e/select_test.go | 5 ++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ce1f74e0e..0a28d4681 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -43,12 +43,19 @@ jobs: name: test (${{ matrix.os }}/go-${{ matrix.go-version }}) runs-on: ${{ matrix.os }}-latest + services: + minio: + image: minio/minio + ports: + - 9000: 45667 steps: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 with: go-version: ${{ matrix.go-version }} - + env: + MINIO_HOST: localhost + MINIO_PORT: 45667 - run: make test qa: diff --git a/e2e/select_test.go b/e2e/select_test.go index 81b2af31a..11b8e407e 100644 --- a/e2e/select_test.go +++ b/e2e/select_test.go @@ -5,6 +5,7 @@ import ( "encoding/csv" jsonpkg "encoding/json" "fmt" + "os" "testing" "github.com/google/go-cmp/cmp" @@ -134,7 +135,9 @@ func TestSelectCommand(t *testing.T) { region := "us-east-1" accessKeyID := "minioadmin" secretKey := "minioadmin" - address := "http://127.0.0.1:9000" + host := os.Getenv("MINIO_HOST") + port := os.Getenv("MINIO_PORT") + address := fmt.Sprintf("http://%s:%s", host, port) // The query is default for all cases, we want to assert the output // is as expected after a query. query := "SELECT * FROM s3object s LIMIT 6" From efd2aeb8cd58c7a7c95bf956c420fb4fc2a737a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Wed, 26 Jul 2023 18:33:04 +0300 Subject: [PATCH 11/50] ci: add service container --- .github/workflows/ci.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0a28d4681..fb7e6db16 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -47,16 +47,17 @@ jobs: minio: image: minio/minio ports: - - 9000: 45667 + - 9000: 45677 steps: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 with: go-version: ${{ matrix.go-version }} - env: - MINIO_HOST: localhost - MINIO_PORT: 45667 + - run: make test + env: + MINIO_HOST: minio + MINIO_PORT: 45677 qa: strategy: From 04b79bdd623c46abfdedf43758703722c161fa92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Wed, 26 Jul 2023 18:36:12 +0300 Subject: [PATCH 12/50] trigger ci --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fb7e6db16..a1b01ef19 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -47,7 +47,7 @@ jobs: minio: image: minio/minio ports: - - 9000: 45677 + - 9000:45677 steps: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 From 412b4ba1409070f5f5b6969f470a9d3fef253762 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Wed, 26 Jul 2023 18:45:03 +0300 Subject: [PATCH 13/50] cdkasl --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a1b01ef19..b9321604e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -45,7 +45,7 @@ jobs: runs-on: ${{ matrix.os }}-latest services: minio: - image: minio/minio + image: ${{''}} ports: - 9000:45677 steps: From d695389620ebdc58eede938ee01af0b971edd3c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Wed, 26 Jul 2023 18:47:09 +0300 Subject: [PATCH 14/50] trigger ci --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b9321604e..a99097967 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -45,7 +45,7 @@ jobs: runs-on: ${{ matrix.os }}-latest services: minio: - image: ${{''}} + image: ${{ (false) && 'minio/minio' || ''}} ports: - 9000:45677 steps: From ff9a9e3290b61843a8ec853b26030df8bd1e5b18 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Wed, 26 Jul 2023 18:50:29 +0300 Subject: [PATCH 15/50] add version to image --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a99097967..f3b8e1766 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -45,7 +45,7 @@ jobs: runs-on: ${{ matrix.os }}-latest services: minio: - image: ${{ (false) && 'minio/minio' || ''}} + image: ${{ (matrix.os == 'ubuntu') && 'minio/minio:RELEASE.2023-07-21T21-12-44Z' || ''}} ports: - 9000:45677 steps: From 511332730937c85138a73c692ba6bee731938f8f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Wed, 26 Jul 2023 18:54:06 +0300 Subject: [PATCH 16/50] fix yaml indentation --- .github/workflows/ci.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f3b8e1766..cfdbf98e4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -44,10 +44,10 @@ jobs: name: test (${{ matrix.os }}/go-${{ matrix.go-version }}) runs-on: ${{ matrix.os }}-latest services: - minio: - image: ${{ (matrix.os == 'ubuntu') && 'minio/minio:RELEASE.2023-07-21T21-12-44Z' || ''}} - ports: - - 9000:45677 + minio: + image: ${{ (matrix.os == 'ubuntu') && 'minio/minio:RELEASE.2023-07-21T21-12-44Z' || ''}} + ports: + - 9000:45677 steps: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 From 4b3cd812e5679eb1ec3c59652a9cf517a5ddfe0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Wed, 26 Jul 2023 18:58:47 +0300 Subject: [PATCH 17/50] change port --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cfdbf98e4..49189701b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -47,7 +47,7 @@ jobs: minio: image: ${{ (matrix.os == 'ubuntu') && 'minio/minio:RELEASE.2023-07-21T21-12-44Z' || ''}} ports: - - 9000:45677 + - 45677:9000 steps: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 From bcecf07c2230036fbdcd6bfc7d0e8f8f6e7c1d1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Wed, 26 Jul 2023 19:02:04 +0300 Subject: [PATCH 18/50] change to localhost --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 49189701b..1bb2dbbf1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -56,7 +56,7 @@ jobs: - run: make test env: - MINIO_HOST: minio + MINIO_HOST: localhost MINIO_PORT: 45677 qa: From 83b2819b84b731cccab8c02d8b6bfb93defd9f46 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Wed, 26 Jul 2023 19:05:22 +0300 Subject: [PATCH 19/50] change to localhost --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1bb2dbbf1..58d51160f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -56,7 +56,7 @@ jobs: - run: make test env: - MINIO_HOST: localhost + MINIO_HOST: 0.0.0.0 MINIO_PORT: 45677 qa: From 737debfdd8b0c30cffd21b5efba0f9176ed8374f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Wed, 26 Jul 2023 19:20:45 +0300 Subject: [PATCH 20/50] change to localhost --- .github/workflows/ci.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 58d51160f..201e08210 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -48,6 +48,12 @@ jobs: image: ${{ (matrix.os == 'ubuntu') && 'minio/minio:RELEASE.2023-07-21T21-12-44Z' || ''}} ports: - 45677:9000 + options: >- + --health-cmd "curl -I http://localhost:45677/minio/health/live -s" + --health-interval 10s + --health-timeout 5s + --health-retries 5 + server ./temp steps: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 From 01e02ad48e5bc8a5d974facc65a35f13f058d3b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Wed, 26 Jul 2023 19:27:12 +0300 Subject: [PATCH 21/50] change to localhost --- .github/workflows/ci.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 201e08210..9c9dc078b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -45,7 +45,7 @@ jobs: runs-on: ${{ matrix.os }}-latest services: minio: - image: ${{ (matrix.os == 'ubuntu') && 'minio/minio:RELEASE.2023-07-21T21-12-44Z' || ''}} + image: ${{ (matrix.os == 'ubuntu') && 'bitnami/minio:2023.7.18' || ''}} ports: - 45677:9000 options: >- @@ -53,7 +53,6 @@ jobs: --health-interval 10s --health-timeout 5s --health-retries 5 - server ./temp steps: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 From 92699093be6a01a88af9e871e4762bba38894418 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Wed, 26 Jul 2023 19:30:58 +0300 Subject: [PATCH 22/50] change to localhost --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9c9dc078b..d0dfd415e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -49,7 +49,7 @@ jobs: ports: - 45677:9000 options: >- - --health-cmd "curl -I http://localhost:45677/minio/health/live -s" + --health-cmd "curl -I http://localhost:9000/minio/health/live -s" --health-interval 10s --health-timeout 5s --health-retries 5 From fbbc5b0ea10d044a4156c9b163f59a2de43c0233 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Wed, 26 Jul 2023 19:47:40 +0300 Subject: [PATCH 23/50] set creds --- .github/workflows/ci.yml | 3 +++ e2e/select_test.go | 10 ++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d0dfd415e..da55f2479 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -53,6 +53,9 @@ jobs: --health-interval 10s --health-timeout 5s --health-retries 5 + env: + - MINIO_ROOT_USER=minioadmin + - MINIO_ROOT_PASSWORD=minioadmin steps: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 diff --git a/e2e/select_test.go b/e2e/select_test.go index 11b8e407e..b62cbf1e8 100644 --- a/e2e/select_test.go +++ b/e2e/select_test.go @@ -5,7 +5,6 @@ import ( "encoding/csv" jsonpkg "encoding/json" "fmt" - "os" "testing" "github.com/google/go-cmp/cmp" @@ -135,9 +134,12 @@ func TestSelectCommand(t *testing.T) { region := "us-east-1" accessKeyID := "minioadmin" secretKey := "minioadmin" - host := os.Getenv("MINIO_HOST") - port := os.Getenv("MINIO_PORT") - address := fmt.Sprintf("http://%s:%s", host, port) + /* + host := os.Getenv("MINIO_HOST") + port := os.Getenv("MINIO_PORT") + address := fmt.Sprintf("http://%s:%s", host, port) + */ + address := "http://localhost:45677" // The query is default for all cases, we want to assert the output // is as expected after a query. query := "SELECT * FROM s3object s LIMIT 6" From 2154a86621b423eabda793450ce069938dfe0791 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Wed, 26 Jul 2023 19:48:43 +0300 Subject: [PATCH 24/50] set creds --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index da55f2479..d2c447e97 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -54,8 +54,8 @@ jobs: --health-timeout 5s --health-retries 5 env: - - MINIO_ROOT_USER=minioadmin - - MINIO_ROOT_PASSWORD=minioadmin + MINIO_ROOT_USER: minioadmin + MINIO_ROOT_PASSWORD: minioadmin steps: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 From 02e680aad1b64f529743129ddf8bdb76281f980a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Wed, 26 Jul 2023 19:59:17 +0300 Subject: [PATCH 25/50] feat: add service containers --- .github/workflows/ci.yml | 4 +--- e2e/select_test.go | 14 +++++++------- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d2c447e97..632b59caf 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -64,9 +64,7 @@ jobs: - run: make test env: - MINIO_HOST: 0.0.0.0 - MINIO_PORT: 45677 - + S3_ENDPOINT: ${{ (matrix.os == 'ubuntu') && 'http://0.0.0.0:45677' || '' }} qa: strategy: matrix: diff --git a/e2e/select_test.go b/e2e/select_test.go index b62cbf1e8..0cf0143d0 100644 --- a/e2e/select_test.go +++ b/e2e/select_test.go @@ -5,6 +5,7 @@ import ( "encoding/csv" jsonpkg "encoding/json" "fmt" + "os" "testing" "github.com/google/go-cmp/cmp" @@ -134,12 +135,11 @@ func TestSelectCommand(t *testing.T) { region := "us-east-1" accessKeyID := "minioadmin" secretKey := "minioadmin" - /* - host := os.Getenv("MINIO_HOST") - port := os.Getenv("MINIO_PORT") - address := fmt.Sprintf("http://%s:%s", host, port) - */ - address := "http://localhost:45677" + + endpoint := os.Getenv("S3_ENDPOINT") + if endpoint == "" { + t.Skipf("skipping the test because S3_ENDPOINT environment variable is empty") + } // The query is default for all cases, we want to assert the output // is as expected after a query. query := "SELECT * FROM s3object s LIMIT 6" @@ -276,7 +276,7 @@ func TestSelectCommand(t *testing.T) { src := fmt.Sprintf("s3://%s/%s", bucket, filename) tc.cmd = append(tc.cmd, src) - s3client, s5cmd := setup(t, withEndpointURL(address), withRegion(region), withAccessKeyID(accessKeyID), withSecretKey(secretKey)) + s3client, s5cmd := setup(t, withEndpointURL(endpoint), withRegion(region), withAccessKeyID(accessKeyID), withSecretKey(secretKey)) createBucket(t, s3client, bucket) putFile(t, s3client, bucket, filename, contents) cmd := s5cmd(tc.cmd...) From 5e2cbb25748b88633801d47853d68c39e4e44668 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Wed, 26 Jul 2023 20:15:15 +0300 Subject: [PATCH 26/50] change 0.0.0.0 to localhost --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 632b59caf..2e557c8f4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -64,7 +64,7 @@ jobs: - run: make test env: - S3_ENDPOINT: ${{ (matrix.os == 'ubuntu') && 'http://0.0.0.0:45677' || '' }} + S3_ENDPOINT: ${{ (matrix.os == 'ubuntu') && 'http://localhost:45677' || '' }} qa: strategy: matrix: From d5165b560f7c60fc798bec38c2979f7d90b8999c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Wed, 26 Jul 2023 20:57:26 +0300 Subject: [PATCH 27/50] testing: add parquet testing --- e2e/select_test.go | 107 ++++++++++++++++-- .../parquet/five_line_simple.parquet | Bin 0 -> 2726 bytes e2e/testfiles/parquet/output.csv | 5 + e2e/testfiles/parquet/output.json | 5 + 4 files changed, 108 insertions(+), 9 deletions(-) create mode 100644 e2e/testfiles/parquet/five_line_simple.parquet create mode 100644 e2e/testfiles/parquet/output.csv create mode 100644 e2e/testfiles/parquet/output.json diff --git a/e2e/select_test.go b/e2e/select_test.go index 0cf0143d0..573a524cb 100644 --- a/e2e/select_test.go +++ b/e2e/select_test.go @@ -122,14 +122,8 @@ csv: default input structure and output as csv tab input structure and default output tab input structure and output as csv - -parquet: - - // TODO: discuss about testing - default input structure and output - default input structure and output as csv */ -func TestSelectCommand(t *testing.T) { +func TestSelectCommandWithGeneratedFiles(t *testing.T) { t.Parallel() // credentials are same for all test cases region := "us-east-1" @@ -149,8 +143,6 @@ func TestSelectCommand(t *testing.T) { in string structure string out string - expected map[int]compareFunc - src string }{ // json { @@ -290,3 +282,100 @@ func TestSelectCommand(t *testing.T) { }) } } + +func TestSelectWithParquet(t *testing.T) { + t.Parallel() + // credentials are same for all test cases + region := "us-east-1" + accessKeyID := "minioadmin" + secretKey := "minioadmin" + + endpoint := os.Getenv("S3_ENDPOINT") + if endpoint == "" { + t.Skipf("skipping the test because S3_ENDPOINT environment variable is empty") + } + // The query is default for all cases, we want to assert the output + // is as expected + query := "SELECT * FROM s3object s LIMIT 6" + testcases := []struct { + name string + src string + cmd []string + expected string + }{ + { + name: "parquet select with json output", + src: "five_line_simple.parquet", + cmd: []string{ + "select", + "parquet", + "--query", + query, + }, + expected: "output.json", + }, + { + name: "parquet select with json output", + src: "five_line_simple.parquet", + cmd: []string{ + "select", + "parquet", + "--output-format", + "csv", + "--query", + query, + }, + expected: "output.csv", + }, + } + + for _, tc := range testcases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + // change the working directory to ./e2e/testdata/parquet + cwd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + cwd += "/testfiles/parquet/" + sourceFile, err := os.Open(cwd + tc.src) + if err != nil { + t.Fatal(err) + } + defer sourceFile.Close() + var buf bytes.Buffer + // read the file content + _, err = sourceFile.Read(buf.Bytes()) + if err != nil { + t.Fatal(err) + } + + expectedFile, err := os.Open(cwd + tc.expected) + if err != nil { + t.Fatal(err) + } + defer expectedFile.Close() + var expectedBuf bytes.Buffer + // read the file content + _, err = expectedFile.Read(expectedBuf.Bytes()) + if err != nil { + t.Fatal(err) + } + // convert the file content to string + expected := expectedBuf.String() + bucket := s3BucketFromTestName(t) + src := fmt.Sprintf("s3://%s/%s", bucket, tc.src) + tc.cmd = append(tc.cmd, src) + + s3client, s5cmd := setup(t, withEndpointURL(endpoint), withRegion(region), withAccessKeyID(accessKeyID), withSecretKey(secretKey)) + createBucket(t, s3client, bucket) + putFile(t, s3client, bucket, tc.src, buf.String()) + + cmd := s5cmd(tc.cmd...) + result := icmd.RunCmd(cmd, withEnv("AWS_ACCESS_KEY_ID", accessKeyID), withEnv("AWS_SECRET_ACCESS_KEY", secretKey)) + if diff := cmp.Diff(expected, result.Stdout()); diff != "" { + t.Errorf("select command mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/e2e/testfiles/parquet/five_line_simple.parquet b/e2e/testfiles/parquet/five_line_simple.parquet new file mode 100644 index 0000000000000000000000000000000000000000..52a1e1f125b8f1cf75f77b4ede4a030c0c3d6bd1 GIT binary patch literal 2726 zcmcImPfz1k5Pu1f1+`gta0-k z`I?aIVuBDNBxcq{Tu4X?NQxZc(%iSpV7JJnxYW`rd66h)5(4PHdXWS=pO~8ZO(FR| z5=-1dwzNo6+03--_%@eJ&!>M+q^}bBR(|$&V|E#06D~=BEW(Qr;1%wNF_!N#Qy_CJ zVeT0b=bpWmVzv;!9s@MxSj7yGhoC-9-GWLw7K8M6BK=n)|6ekHl|)dtn;!*smEdY) zWIO8%fGq5KcFsPt{U|4_EfOLkk&qOSJO*%KVoxzM$qC@_ZXK^vw~(hTJ~iLJBov{Bn->bAfM=I~eM{UUz_bp4q+7>G~)YrI*4>k%waNqv#V#)D`$kl3AdQ#yf2-p4!ay+$(&F zbc0g53x9lmq_jvY9jZ{;p>l`GDwR9>0Se#V=xZH0=7BOVOs`Tpps)|#=tSPd7w{md zgMH$EZR+aa>Q=nqgZh-{hlu=iqbSVfFdkxT_`6sGgT=GWPQpkFM>qcx}&hjcVh_ z@RSJftwLRbcAWp<0D64T+CH*2-N8sF8lHO*dmIKAf3l-x4|eah;vc0&90;Z~W_0~oT(D4#m4 z@n7izVXJsZWdTPY6_o~R*rs?s8H@8xDXR~gY55fG3p+q?ym$AR6fOy57-a-Uz3QG) zx}l;yIN8Vfhx=p`?*n*tZ-cy5Sdhy2K4Bi13BU^B4sc@$+rms^{>>Yb$pw1vL*I^8 g%ph1{7h-O7;pNy>C}h{+D Date: Thu, 27 Jul 2023 09:17:18 +0300 Subject: [PATCH 28/50] chore: add descriptive errors during testing --- e2e/select_test.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/e2e/select_test.go b/e2e/select_test.go index 573a524cb..cb2714859 100644 --- a/e2e/select_test.go +++ b/e2e/select_test.go @@ -71,6 +71,7 @@ func getFile(n int, inputForm, outputForm, structure string) (string, string) { case "csv": writer := csv.NewWriter(&input) + // set the delimiter for the input writer.Comma = []rune(structure)[0] writer.Write([]string{"line", "id", "data"}) for _, d := range data { @@ -263,14 +264,18 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { contents, expected := getFile(5, tc.in, tc.out, tc.structure) + filename := fmt.Sprintf("file.%s", tc.in) bucket := s3BucketFromTestName(t) src := fmt.Sprintf("s3://%s/%s", bucket, filename) + tc.cmd = append(tc.cmd, src) s3client, s5cmd := setup(t, withEndpointURL(endpoint), withRegion(region), withAccessKeyID(accessKeyID), withSecretKey(secretKey)) + createBucket(t, s3client, bucket) putFile(t, s3client, bucket, filename, contents) + cmd := s5cmd(tc.cmd...) result := icmd.RunCmd(cmd, withEnv("AWS_ACCESS_KEY_ID", accessKeyID), withEnv("AWS_SECRET_ACCESS_KEY", secretKey)) @@ -335,31 +340,31 @@ func TestSelectWithParquet(t *testing.T) { // change the working directory to ./e2e/testdata/parquet cwd, err := os.Getwd() if err != nil { - t.Fatal(err) + t.Fatalf("couldn't reach the current working directory to access testfiles. error: %v\n", err) } cwd += "/testfiles/parquet/" sourceFile, err := os.Open(cwd + tc.src) if err != nil { - t.Fatal(err) + t.Fatalf("couldn't read the parquet file to be queried. error: %v\n", err) } defer sourceFile.Close() var buf bytes.Buffer // read the file content _, err = sourceFile.Read(buf.Bytes()) if err != nil { - t.Fatal(err) + t.Fatalf("couldn't write the parquet file to buffer. error: %v\n", err) } expectedFile, err := os.Open(cwd + tc.expected) if err != nil { - t.Fatal(err) + t.Fatalf("couldnt read the output file to be compared against. error: %v\n", err) } defer expectedFile.Close() var expectedBuf bytes.Buffer // read the file content _, err = expectedFile.Read(expectedBuf.Bytes()) if err != nil { - t.Fatal(err) + t.Fatalf("couldn't write the output file to buffer. error: %v\n", err) } // convert the file content to string expected := expectedBuf.String() From 23465a76645c44f132488260b354d43100ce2d3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Thu, 27 Jul 2023 09:58:54 +0300 Subject: [PATCH 29/50] test: extend it --- CHANGELOG.md | 3 +- command/select.go | 18 ++++++-- e2e/select_test.go | 100 ++++++++++++++++++++++++++++++++++++++++++--- 3 files changed, 112 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ff7326c12..9dcdeaeb1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,8 +5,9 @@ #### Breaking changes #### Features +- Added file types to `select` queries with more range of options to set during the query. ([#494](https://github.com/peak/s5cmd/issues/494)) - Added `--content-disposition` flag to `cp` command. ([#569](https://github.com/peak/s5cmd/issues/569)) -- Added `--show-fullpath` flag to `ls`. (#[596](https://github.com/peak/s5cmd/issues/596)) +- Added `--show-fullpath` flag to `ls`. ([#596](https://github.com/peak/s5cmd/issues/596)) #### Improvements diff --git a/command/select.go b/command/select.go index 0f76a9dcc..8f82c6b4b 100644 --- a/command/select.go +++ b/command/select.go @@ -3,6 +3,7 @@ package command import ( "context" "encoding/json" + "errors" "fmt" "os" "strings" @@ -27,8 +28,11 @@ Options: {{range .VisibleFlags}}{{.}} {{end}} Examples: - 01. Search for all json objects with the foo property set to 'bar' and spit them into stdout - > s5cmd {{.HelpName}} --compression gzip --query "SELECT * FROM S3Object s WHERE s.foo='bar'" "s3://bucket/*" + 01. Search for all objects with the foo property set to 'bar' and spit them into stdout + > {{.HelpName}} --query "SELECT * FROM S3Object s WHERE s.foo='bar'" "s3://bucket/*" + + 02. Search for all objects that use GZIP compression with(parquet does not support) the foo property set to 'bar' and spit them into stdout in csv. + > {{.HelpName}} --compression GZIP --output-format csv --query "SELECT * FROM S3Object s WHERE s.foo='bar'" "s3://bucket/*" ` func beforeFunc(c *cli.Context) error { @@ -187,7 +191,15 @@ func NewSelectCommand() *cli.Command { Usage: "write queries for parquet files", Flags: sharedFlags, CustomHelpTemplate: defaultSelectHelpTemplate, - Before: beforeFunc, + Before: func(c *cli.Context) (err error) { + if c.String("compression") != "" { + err = errors.New("compression is not supported for parquet files") + cmd := commandFromContext(c) + printError(cmd, "select parquet", err) + return err + } + return beforeFunc(c) + }, Action: func(c *cli.Context) (err error) { cmd, err := buildSelect(c, "parquet", nil) if err != nil { diff --git a/e2e/select_test.go b/e2e/select_test.go index cb2714859..228e009d6 100644 --- a/e2e/select_test.go +++ b/e2e/select_test.go @@ -2,6 +2,7 @@ package e2e import ( "bytes" + "compress/gzip" "encoding/csv" jsonpkg "encoding/json" "fmt" @@ -139,11 +140,12 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { // is as expected after a query. query := "SELECT * FROM s3object s LIMIT 6" testcases := []struct { - name string - cmd []string - in string - structure string - out string + name string + cmd []string + in string + structure string + compression bool + out string }{ // json { @@ -185,6 +187,54 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { in: "json", structure: "document", out: "json", + }, { + name: "json-lines select with default input structure and output", + cmd: []string{ + "select", + "json", + "--compression", + "gzip", + "--query", + query, + }, + in: "json", + compression: true, + structure: "lines", + out: "json", + }, + { + name: "json-lines select with default input structure and csv output", + cmd: []string{ + "select", + "json", + "--compression", + "gzip", + "--output-format", + "csv", + "--query", + query, + }, + in: "json", + compression: true, + structure: "lines", + out: "csv", + }, + { + name: "json-lines select with document input structure and output", + cmd: []string{ + "select", + "json", + "--compression", + "gzip", + "--structure", + "document", + "--query", + query, + }, + in: "json", + compression: true, + structure: "document", + out: "json", }, /* { name: "json-lines select with document input structure and csv output", @@ -215,6 +265,38 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { structure: ",", out: "json", }, + { + name: "csv select with gzip compression, default delimiter and output", + cmd: []string{ + "select", + "csv", + "--compression", + "gzip", + "--query", + query, + }, + in: "csv", + compression: true, + structure: ",", + out: "json", + }, + { + name: "csv select with gzip compression, default delimiter and csv output", + cmd: []string{ + "select", + "csv", + "--compression", + "gzip", + "--output-form", + "csv", + "--query", + query, + }, + in: "csv", + compression: true, + structure: ",", + out: "csv", + }, { name: "csv select with default delimiter and csv output", cmd: []string{ @@ -274,6 +356,14 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { s3client, s5cmd := setup(t, withEndpointURL(endpoint), withRegion(region), withAccessKeyID(accessKeyID), withSecretKey(secretKey)) createBucket(t, s3client, bucket) + if tc.compression { + var b bytes.Buffer + gz := gzip.NewWriter(&b) + if _, err := gz.Write([]byte(contents)); err != nil { + t.Errorf("could not compress the input object. error: %v\n", err) + } + contents = b.String() + } putFile(t, s3client, bucket, filename, contents) cmd := s5cmd(tc.cmd...) From b25e177e5f1f565488de308985bd5326be66d423 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Thu, 27 Jul 2023 13:05:24 +0300 Subject: [PATCH 30/50] feat: add default fallback to not break the api --- command/context.go | 2 - command/select.go | 21 +++++- e2e/select_test.go | 143 ++++++++++++++++++++++------------- storage/s3.go | 183 +++++++++++++++++++++++++-------------------- 4 files changed, 213 insertions(+), 136 deletions(-) diff --git a/command/context.go b/command/context.go index 3e3f5d08d..dd21ace29 100644 --- a/command/context.go +++ b/command/context.go @@ -20,11 +20,9 @@ func commandFromContext(c *cli.Context) string { cmd = fmt.Sprintf("%s --%s=%v", cmd, flagname, flagvalue) } } - if c.Args().Len() > 0 { cmd = fmt.Sprintf("%v %v", cmd, strings.Join(c.Args().Slice(), " ")) } - return cmd } diff --git a/command/select.go b/command/select.go index 8f82c6b4b..0dc3da376 100644 --- a/command/select.go +++ b/command/select.go @@ -201,6 +201,7 @@ func NewSelectCommand() *cli.Command { return beforeFunc(c) }, Action: func(c *cli.Context) (err error) { + cmd, err := buildSelect(c, "parquet", nil) if err != nil { printError(cmd.fullCommand, c.Command.Name, err) @@ -210,9 +211,27 @@ func NewSelectCommand() *cli.Command { }, }, }, + Flags: sharedFlags, + Before: func(c *cli.Context) (err error) { + if c.Args().Len() == 0 { + err = fmt.Errorf("expected source argument") + printError(commandFromContext(c), c.Command.Name, err) + return err + } + return nil + }, + Action: func(c *cli.Context) (err error) { + // default fallback + structure := "lines" + cmd, err := buildSelect(c, "json", &structure) + if err != nil { + printError(cmd.fullCommand, c.Command.Name, err) + return err + } + return cmd.Run(c.Context) + }, CustomHelpTemplate: defaultSelectHelpTemplate, } - cmd.BashComplete = getBashCompleteFn(cmd, true, false) return cmd } diff --git a/e2e/select_test.go b/e2e/select_test.go index 228e009d6..033409673 100644 --- a/e2e/select_test.go +++ b/e2e/select_test.go @@ -13,13 +13,19 @@ import ( "gotest.tools/v3/icmd" ) +// getFile is a helper for creating file contents and expected values func getFile(n int, inputForm, outputForm, structure string) (string, string) { type row struct { Line string `json:"line"` ID string `json:"id"` Data string `json:"data"` } - var data []row + + var ( + data []row + input bytes.Buffer + expected bytes.Buffer + ) for i := 0; i < n; i++ { data = append(data, row{ @@ -28,40 +34,44 @@ func getFile(n int, inputForm, outputForm, structure string) (string, string) { Data: fmt.Sprintf("some event %d", i), }) } - var input bytes.Buffer - var expected bytes.Buffer + switch inputForm { case "json": encoder := jsonpkg.NewEncoder(&input) + switch structure { case "document": rows := make(map[string]row) + for i, v := range data { rows[fmt.Sprintf("obj%d", i)] = v } - err := encoder.Encode(rows) - if err != nil { + + if err := encoder.Encode(rows); err != nil { panic(err) } return input.String(), input.String() default: for _, d := range data { err := encoder.Encode(d) + if err != nil { panic(err) } } + switch outputForm { case "json": return input.String(), input.String() case "csv": writer := csv.NewWriter(&expected) + for _, d := range data { - err := writer.Write([]string{d.Line, d.ID, d.Data}) - if err != nil { + if err := writer.Write([]string{d.Line, d.ID, d.Data}); err != nil { panic(err) } } + writer.Flush() return input.String(), expected.String() @@ -75,10 +85,13 @@ func getFile(n int, inputForm, outputForm, structure string) (string, string) { // set the delimiter for the input writer.Comma = []rune(structure)[0] writer.Write([]string{"line", "id", "data"}) + for _, d := range data { writer.Write([]string{d.Line, d.ID, d.Data}) } + writer.Flush() + switch outputForm { case "json": encoder := jsonpkg.NewEncoder(&expected) @@ -87,6 +100,7 @@ func getFile(n int, inputForm, outputForm, structure string) (string, string) { "_2": "id", "_3": "data", }) + for _, d := range data { encoder.Encode(map[string]string{ "_1": d.Line, @@ -98,9 +112,11 @@ func getFile(n int, inputForm, outputForm, structure string) (string, string) { case "csv": writer := csv.NewWriter(&expected) writer.Write([]string{"line", "id", "data"}) + for _, d := range data { writer.Write([]string{d.Line, d.ID, d.Data}) } + writer.Flush() return input.String(), expected.String() @@ -109,36 +125,22 @@ func getFile(n int, inputForm, outputForm, structure string) (string, string) { panic("unreachable") } -/* -test cases: -json: - - default input structure and output - default input structure and output as csv - document input structure and default output - document input structure and output as csv - -csv: - - default input structure and output - default input structure and output as csv - tab input structure and default output - tab input structure and output as csv -*/ func TestSelectCommandWithGeneratedFiles(t *testing.T) { t.Parallel() // credentials are same for all test cases region := "us-east-1" accessKeyID := "minioadmin" secretKey := "minioadmin" + // The query is default for all cases, we want to assert the output + // is as expected after a query. + query := "SELECT * FROM s3object s LIMIT 6" endpoint := os.Getenv("S3_ENDPOINT") + if endpoint == "" { t.Skipf("skipping the test because S3_ENDPOINT environment variable is empty") } - // The query is default for all cases, we want to assert the output - // is as expected after a query. - query := "SELECT * FROM s3object s LIMIT 6" + testcases := []struct { name string cmd []string @@ -188,7 +190,7 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { structure: "document", out: "json", }, { - name: "json-lines select with default input structure and output", + name: "json-lines select with gzip compression default input structure and output", cmd: []string{ "select", "json", @@ -203,7 +205,7 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { out: "json", }, { - name: "json-lines select with default input structure and csv output", + name: "json-lines select with gzip compression default input structure and csv output", cmd: []string{ "select", "json", @@ -220,7 +222,7 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { out: "csv", }, { - name: "json-lines select with document input structure and output", + name: "json-lines select with gzip compression document input structure and output", cmd: []string{ "select", "json", @@ -287,7 +289,7 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { "csv", "--compression", "gzip", - "--output-form", + "--output-format", "csv", "--query", query, @@ -341,29 +343,65 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { structure: "\t", out: "csv", }, + { + name: "query json with default fallback", + cmd: []string{ + "select", + "--query", + query, + }, + in: "json", + structure: "lines", + out: "json", + }, + { + name: "query compressed json with default fallback", + cmd: []string{ + "select", + "--query", + query, + "--compression", + "gzip", + }, + in: "json", + compression: true, + structure: "lines", + out: "json", + }, } for _, tc := range testcases { tc := tc t.Run(tc.name, func(t *testing.T) { + var bucket, src, filename string contents, expected := getFile(5, tc.in, tc.out, tc.structure) - filename := fmt.Sprintf("file.%s", tc.in) - bucket := s3BucketFromTestName(t) - src := fmt.Sprintf("s3://%s/%s", bucket, filename) - - tc.cmd = append(tc.cmd, src) - - s3client, s5cmd := setup(t, withEndpointURL(endpoint), withRegion(region), withAccessKeyID(accessKeyID), withSecretKey(secretKey)) - - createBucket(t, s3client, bucket) if tc.compression { - var b bytes.Buffer + b := bytes.Buffer{} gz := gzip.NewWriter(&b) + filename = fmt.Sprintf("file.%s.gz", tc.in) + src = fmt.Sprintf("s3://%s/%s", bucket, filename) + if _, err := gz.Write([]byte(contents)); err != nil { t.Errorf("could not compress the input object. error: %v\n", err) } + + if err := gz.Close(); err != nil { + t.Errorf("could not close the compressor error: %v\n", err) + } + contents = b.String() + } else { + filename = fmt.Sprintf("file.%s", tc.in) + bucket = s3BucketFromTestName(t) + src = fmt.Sprintf("s3://%s/%s", bucket, filename) } + + s3client, s5cmd := setup(t, withEndpointURL(endpoint), withRegion(region), withAccessKeyID(accessKeyID), withSecretKey(secretKey)) + + createBucket(t, s3client, bucket) + + tc.cmd = append(tc.cmd, src) + putFile(t, s3client, bucket, filename, contents) cmd := s5cmd(tc.cmd...) @@ -384,14 +422,16 @@ func TestSelectWithParquet(t *testing.T) { region := "us-east-1" accessKeyID := "minioadmin" secretKey := "minioadmin" + // The query is default for all cases, we want to assert the output + // is as expected after a query. + query := "SELECT * FROM s3object s LIMIT 6" endpoint := os.Getenv("S3_ENDPOINT") + if endpoint == "" { t.Skipf("skipping the test because S3_ENDPOINT environment variable is empty") } - // The query is default for all cases, we want to assert the output - // is as expected - query := "SELECT * FROM s3object s LIMIT 6" + testcases := []struct { name string src string @@ -427,21 +467,23 @@ func TestSelectWithParquet(t *testing.T) { for _, tc := range testcases { tc := tc t.Run(tc.name, func(t *testing.T) { + var buf bytes.Buffer + var expectedBuf bytes.Buffer + // change the working directory to ./e2e/testdata/parquet cwd, err := os.Getwd() if err != nil { t.Fatalf("couldn't reach the current working directory to access testfiles. error: %v\n", err) } + cwd += "/testfiles/parquet/" sourceFile, err := os.Open(cwd + tc.src) if err != nil { t.Fatalf("couldn't read the parquet file to be queried. error: %v\n", err) } defer sourceFile.Close() - var buf bytes.Buffer - // read the file content - _, err = sourceFile.Read(buf.Bytes()) - if err != nil { + + if _, err := sourceFile.Read(buf.Bytes()); err != nil { t.Fatalf("couldn't write the parquet file to buffer. error: %v\n", err) } @@ -450,12 +492,11 @@ func TestSelectWithParquet(t *testing.T) { t.Fatalf("couldnt read the output file to be compared against. error: %v\n", err) } defer expectedFile.Close() - var expectedBuf bytes.Buffer - // read the file content - _, err = expectedFile.Read(expectedBuf.Bytes()) - if err != nil { + + if _, err := expectedFile.Read(expectedBuf.Bytes()); err != nil { t.Fatalf("couldn't write the output file to buffer. error: %v\n", err) } + // convert the file content to string expected := expectedBuf.String() bucket := s3BucketFromTestName(t) diff --git a/storage/s3.go b/storage/s3.go index 1e42e333b..23ac2a357 100644 --- a/storage/s3.go +++ b/storage/s3.go @@ -588,63 +588,6 @@ func (s *S3) Get( }) } -type EventStreamDecoder interface { - Decode() ([]byte, error) -} - -type JSONDecoder struct { - decoder *json.Decoder -} - -func NewJSONDecoder(reader io.Reader) EventStreamDecoder { - return &JSONDecoder{ - decoder: json.NewDecoder(reader), - } -} - -func (jd *JSONDecoder) Decode() ([]byte, error) { - var val json.RawMessage - err := jd.decoder.Decode(&val) - if err != nil { - return nil, err - } - return val, nil -} - -type CsvDecoder struct { - decoder *csv.Reader - delimiter string -} - -func NewCsvDecoder(reader io.Reader) EventStreamDecoder { - csvDecoder := &CsvDecoder{ - decoder: csv.NewReader(reader), - delimiter: ",", - } - // returned values from AWS has double quotes in it - // so we enable lazy quotes - csvDecoder.decoder.LazyQuotes = true - return csvDecoder -} - -func (cd *CsvDecoder) Decode() ([]byte, error) { - res, err := cd.decoder.Read() - if err != nil { - return nil, err - } - - result := []byte{} - for i, str := range res { - // csv results return carriage returns, which puts \x00 - // to byte array, which breaks the output result. - if i != len(res)-1 { - str = fmt.Sprintf("%s%s", str, cd.delimiter) - } - result = append(result, []byte(str)...) - } - return result, nil -} - type SelectQuery struct { InputFormat string InputContentStructure string @@ -654,78 +597,97 @@ type SelectQuery struct { CompressionType string } +type eventType string + const ( - jsonType = "json" - csvType = "csv" - parquetType = "parquet" + jsonType eventType = "json" + csvType eventType = "csv" + parquetType eventType = "parquet" ) -func parseInputSerialization(val string, delimiter string) (*s3.InputSerialization, error) { - var inputSerialization *s3.InputSerialization +func parseInputSerialization(e eventType, c string, delimiter string) (*s3.InputSerialization, error) { + var s *s3.InputSerialization - switch val { + switch e { case jsonType: - inputSerialization = &s3.InputSerialization{ + s = &s3.InputSerialization{ JSON: &s3.JSONInput{ Type: aws.String(delimiter), }, } + if c != "" { + s.CompressionType = aws.String(c) + } case csvType: - inputSerialization = &s3.InputSerialization{ + s = &s3.InputSerialization{ CSV: &s3.CSVInput{ FieldDelimiter: aws.String(delimiter), }, } + if c != "" { + s.CompressionType = aws.String(c) + } case parquetType: - inputSerialization = &s3.InputSerialization{ + s = &s3.InputSerialization{ Parquet: &s3.ParquetInput{}, } default: - return nil, errors.New("input serialization is not valid") + return nil, fmt.Errorf("input format is not valid") } - return inputSerialization, nil + return s, nil } -func parseOutputSerialization(val string, reader io.Reader) (*s3.OutputSerialization, EventStreamDecoder, error) { - var outputSerialization *s3.OutputSerialization +func parseOutputSerialization(e eventType, reader io.Reader) (*s3.OutputSerialization, EventStreamDecoder, error) { + var s *s3.OutputSerialization var decoder EventStreamDecoder - switch val { + switch e { case jsonType: - outputSerialization = &s3.OutputSerialization{ + s = &s3.OutputSerialization{ JSON: &s3.JSONOutput{}, } decoder = NewJSONDecoder(reader) case csvType: - outputSerialization = &s3.OutputSerialization{ + s = &s3.OutputSerialization{ CSV: &s3.CSVOutput{ FieldDelimiter: aws.String(","), }, } decoder = NewCsvDecoder(reader) default: - return nil, nil, errors.New("output serialization is not valid") + return nil, nil, fmt.Errorf("output serialization is not valid") } - return outputSerialization, decoder, nil + return s, decoder, nil } func (s *S3) Select(ctx context.Context, url *url.URL, query *SelectQuery, resultCh chan<- json.RawMessage) error { if s.dryRun { return nil } - var inputSerialization *s3.InputSerialization - var outputSerialization *s3.OutputSerialization - var decoder EventStreamDecoder + var ( + inputFormat *s3.InputSerialization + outputFormat *s3.OutputSerialization + decoder EventStreamDecoder + ) reader, writer := io.Pipe() - inputSerialization, err := parseInputSerialization(query.InputFormat, query.InputContentStructure) + inputFormat, err := parseInputSerialization( + eventType(query.InputFormat), + query.CompressionType, + query.InputContentStructure, + ) + if err != nil { return err } - outputSerialization, decoder, err = parseOutputSerialization(query.OutputFormat, reader) + outputFormat, decoder, err = parseOutputSerialization( + eventType(query.OutputFormat), + reader, + ) + if err != nil { return err } @@ -735,8 +697,8 @@ func (s *S3) Select(ctx context.Context, url *url.URL, query *SelectQuery, resul Key: aws.String(url.Path), ExpressionType: aws.String(query.ExpressionType), Expression: aws.String(query.Expression), - InputSerialization: inputSerialization, - OutputSerialization: outputSerialization, + InputSerialization: inputFormat, + OutputSerialization: outputFormat, } resp, err := s.api.SelectObjectContentWithContext(ctx, input) @@ -1394,3 +1356,60 @@ func generateRetryID() *string { num, _ := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) return aws.String(num.String()) } + +// EventStreamDecoder decodes a s3.Event with +// the given decoder. +type EventStreamDecoder interface { + Decode() ([]byte, error) +} + +type JSONDecoder struct { + decoder *json.Decoder +} + +func NewJSONDecoder(reader io.Reader) EventStreamDecoder { + return &JSONDecoder{ + decoder: json.NewDecoder(reader), + } +} + +func (jd *JSONDecoder) Decode() ([]byte, error) { + var val json.RawMessage + err := jd.decoder.Decode(&val) + if err != nil { + return nil, err + } + return val, nil +} + +type CsvDecoder struct { + decoder *csv.Reader + delimiter string +} + +func NewCsvDecoder(reader io.Reader) EventStreamDecoder { + csvDecoder := &CsvDecoder{ + decoder: csv.NewReader(reader), + delimiter: ",", + } + // returned values from AWS has double quotes in it + // so we enable lazy quotes + csvDecoder.decoder.LazyQuotes = true + return csvDecoder +} + +func (cd *CsvDecoder) Decode() ([]byte, error) { + res, err := cd.decoder.Read() + if err != nil { + return nil, err + } + + result := []byte{} + for i, str := range res { + if i != len(res)-1 { + str = fmt.Sprintf("%s%s", str, cd.delimiter) + } + result = append(result, []byte(str)...) + } + return result, nil +} From 33c2dc5325f8f14d7a576946a258b2f8f5c82a00 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Thu, 27 Jul 2023 13:09:47 +0300 Subject: [PATCH 31/50] chore: put the newlines back --- command/context.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/command/context.go b/command/context.go index dd21ace29..3e3f5d08d 100644 --- a/command/context.go +++ b/command/context.go @@ -20,9 +20,11 @@ func commandFromContext(c *cli.Context) string { cmd = fmt.Sprintf("%s --%s=%v", cmd, flagname, flagvalue) } } + if c.Args().Len() > 0 { cmd = fmt.Sprintf("%v %v", cmd, strings.Join(c.Args().Slice(), " ")) } + return cmd } From 485c972154009427709cc25c0672c1bea5eba47e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Thu, 27 Jul 2023 13:17:04 +0300 Subject: [PATCH 32/50] fix: bucket name on tests --- e2e/select_test.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/e2e/select_test.go b/e2e/select_test.go index 033409673..8b79b89d3 100644 --- a/e2e/select_test.go +++ b/e2e/select_test.go @@ -268,7 +268,7 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { out: "json", }, { - name: "csv select with gzip compression, default delimiter and output", + name: "csv select with gzip compression default delimiter and output", cmd: []string{ "select", "csv", @@ -283,7 +283,7 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { out: "json", }, { - name: "csv select with gzip compression, default delimiter and csv output", + name: "csv select with gzip compression default delimiter and csv output", cmd: []string{ "select", "csv", @@ -372,7 +372,9 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { for _, tc := range testcases { tc := tc t.Run(tc.name, func(t *testing.T) { - var bucket, src, filename string + var src, filename string + + bucket := s3BucketFromTestName(t) contents, expected := getFile(5, tc.in, tc.out, tc.structure) if tc.compression { @@ -392,7 +394,6 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { contents = b.String() } else { filename = fmt.Sprintf("file.%s", tc.in) - bucket = s3BucketFromTestName(t) src = fmt.Sprintf("s3://%s/%s", bucket, filename) } From 5242e81b017dee2fc276f4351dba037581c60c47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Fri, 28 Jul 2023 15:51:46 +0300 Subject: [PATCH 33/50] chore: fix conflicts --- go.mod | 3 +-- go.sum | 17 +++++++++++------ vendor/modules.txt | 4 ++++ 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index 70af5d9a9..791f53ade 100644 --- a/go.mod +++ b/go.mod @@ -21,8 +21,6 @@ require ( require ( github.com/VividCortex/ewma v1.2.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/hashicorp/errwrap v1.0.0 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/fatih/color v1.15.0 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect @@ -41,5 +39,6 @@ require ( golang.org/x/sync v0.1.0 // indirect golang.org/x/sys v0.7.0 // indirect golang.org/x/tools v0.8.0 // indirect + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce // indirect ) diff --git a/go.sum b/go.sum index 113539bb7..51a72a85e 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,3 @@ -github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/aws/aws-sdk-go v1.44.256 h1:O8VH+bJqgLDguqkH/xQBFz5o/YheeZqgcOYIgsTVWY4= @@ -7,7 +6,7 @@ github.com/cheggaaa/pb/v3 v3.1.4 h1:DN8j4TVVdKu3WxVwcRKu0sG00IIU6FewoABZzXbRQeo= github.com/cheggaaa/pb/v3 v3.1.4/go.mod h1:6wVjILNBaXMs8c21qRiaUM8BR82erfgau1DQ4iUXmSA= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -34,6 +33,13 @@ github.com/karrick/godirwalk v1.15.3 h1:0a2pXOgtB16CqIqXTiT7+K9L73f74n/aNQUnH6Or github.com/karrick/godirwalk v1.15.3/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lanrat/extsort v1.0.0 h1:JjvkCUbD55+gs5s64FHmCU93kWjegEAM5n10XN6GB3c= github.com/lanrat/extsort v1.0.0/go.mod h1:bkDEvem4UnD1h87yKICydXs63mKrIGW3W9OGPMg93Ww= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= @@ -133,14 +139,13 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= diff --git a/vendor/modules.txt b/vendor/modules.txt index b33f2ef08..53c0f341c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -104,6 +104,8 @@ github.com/karrick/godirwalk # github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 ## explicit github.com/kballard/go-shellquote +# github.com/kr/pretty v0.3.0 +## explicit; go 1.12 # github.com/lanrat/extsort v1.0.0 ## explicit; go 1.13 github.com/lanrat/extsort @@ -156,6 +158,8 @@ golang.org/x/sys/windows # golang.org/x/tools v0.8.0 ## explicit; go 1.18 golang.org/x/tools/cover +# gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 +## explicit # gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce ## explicit gopkg.in/mgo.v2/bson From 242617116034a62f48cfed3ca8865a63196e0eea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Fri, 28 Jul 2023 20:00:48 +0300 Subject: [PATCH 34/50] chore: change env variable, make tests more readable --- .github/workflows/ci.yml | 2 +- e2e/select_test.go | 162 ++++++++++++--------------------------- e2e/util_test.go | 2 +- 3 files changed, 52 insertions(+), 114 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a7291b1a3..4d24abdbf 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -62,7 +62,7 @@ jobs: - run: make test env: - S3_ENDPOINT: ${{ (matrix.os == 'ubuntu') && 'http://localhost:45677' || '' }} + S5CMD_TEST_ENDPOINT_URL: ${{ (matrix.os == 'ubuntu') && 'http://localhost:45677' || '' }} qa: strategy: matrix: diff --git a/e2e/select_test.go b/e2e/select_test.go index 8b79b89d3..6f7279305 100644 --- a/e2e/select_test.go +++ b/e2e/select_test.go @@ -135,10 +135,10 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { // is as expected after a query. query := "SELECT * FROM s3object s LIMIT 6" - endpoint := os.Getenv("S3_ENDPOINT") + endpoint := os.Getenv("S5CMD_TEST_ENDPOINT_URL") if endpoint == "" { - t.Skipf("skipping the test because S3_ENDPOINT environment variable is empty") + t.Skipf("skipping the test because S5CMD_TEST_ENDPOINT_URL environment variable is empty") } testcases := []struct { @@ -153,10 +153,8 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { { name: "json-lines select with default input structure and output", cmd: []string{ - "select", - "json", - "--query", - query, + "select", "json", + "--query", query, }, in: "json", structure: "lines", @@ -165,12 +163,9 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { { name: "json-lines select with default input structure and csv output", cmd: []string{ - "select", - "json", - "--output-format", - "csv", - "--query", - query, + "select", "json", + "--output-format", "csv", + "--query", query, }, in: "json", structure: "lines", @@ -179,12 +174,9 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { { name: "json-lines select with document input structure and output", cmd: []string{ - "select", - "json", - "--structure", - "document", - "--query", - query, + "select", "json", + "--structure", "document", + "--query", query, }, in: "json", structure: "document", @@ -192,12 +184,9 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { }, { name: "json-lines select with gzip compression default input structure and output", cmd: []string{ - "select", - "json", - "--compression", - "gzip", - "--query", - query, + "select", "json", + "--compression", "gzip", + "--query", query, }, in: "json", compression: true, @@ -207,14 +196,10 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { { name: "json-lines select with gzip compression default input structure and csv output", cmd: []string{ - "select", - "json", - "--compression", - "gzip", - "--output-format", - "csv", - "--query", - query, + "select", "json", + "--compression", "gzip", + "--output-format", "csv", + "--query", query, }, in: "json", compression: true, @@ -224,44 +209,22 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { { name: "json-lines select with gzip compression document input structure and output", cmd: []string{ - "select", - "json", - "--compression", - "gzip", - "--structure", - "document", - "--query", - query, + "select", "json", + "--compression", "gzip", + "--structure", "document", + "--query", query, }, in: "json", compression: true, structure: "document", out: "json", }, - /* { - name: "json-lines select with document input structure and csv output", - cmd: []string{ - "select", - "json", - "--structure", - "document", - "--output-format", - "csv", - "--query", - query, - }, - in: "json", - structure: "document", - out: "csv", - }, */ // This case is not supported by AWS itself. // csv { name: "csv select with default delimiter and output", cmd: []string{ - "select", - "csv", - "--query", - query, + "select", "csv", + "--query", query, }, in: "csv", structure: ",", @@ -270,12 +233,9 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { { name: "csv select with gzip compression default delimiter and output", cmd: []string{ - "select", - "csv", - "--compression", - "gzip", - "--query", - query, + "select", "csv", + "--compression", "gzip", + "--query", query, }, in: "csv", compression: true, @@ -285,14 +245,10 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { { name: "csv select with gzip compression default delimiter and csv output", cmd: []string{ - "select", - "csv", - "--compression", - "gzip", - "--output-format", - "csv", - "--query", - query, + "select", "csv", + "--compression", "gzip", + "--output-format", "csv", + "--query", query, }, in: "csv", compression: true, @@ -302,12 +258,9 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { { name: "csv select with default delimiter and csv output", cmd: []string{ - "select", - "csv", - "--output-format", - "csv", - "--query", - query, + "select", "csv", + "--output-format", "csv", + "--query", query, }, in: "csv", structure: ",", @@ -316,12 +269,9 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { { name: "csv select with custom delimiter and default output", cmd: []string{ - "select", - "csv", - "--delimiter", - "\t", - "--query", - query, + "select", "csv", + "--delimiter", "\t", + "--query", query, }, in: "csv", structure: "\t", @@ -330,14 +280,10 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { { name: "csv select with custom delimiter and csv output", cmd: []string{ - "select", - "csv", - "--delimiter", - "\t", - "--output-format", - "csv", - "--query", - query, + "select", "csv", + "--delimiter", "\t", + "--output-format", "csv", + "--query", query, }, in: "csv", structure: "\t", @@ -347,8 +293,7 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { name: "query json with default fallback", cmd: []string{ "select", - "--query", - query, + "--query", query, }, in: "json", structure: "lines", @@ -358,10 +303,8 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { name: "query compressed json with default fallback", cmd: []string{ "select", - "--query", - query, - "--compression", - "gzip", + "--query", query, + "--compression", "gzip", }, in: "json", compression: true, @@ -427,10 +370,10 @@ func TestSelectWithParquet(t *testing.T) { // is as expected after a query. query := "SELECT * FROM s3object s LIMIT 6" - endpoint := os.Getenv("S3_ENDPOINT") + endpoint := os.Getenv("S5CMD_TEST_ENDPOINT_URL") if endpoint == "" { - t.Skipf("skipping the test because S3_ENDPOINT environment variable is empty") + t.Skipf("skipping the test because S5CMD_TEST_ENDPOINT_URL environment variable is empty") } testcases := []struct { @@ -443,10 +386,8 @@ func TestSelectWithParquet(t *testing.T) { name: "parquet select with json output", src: "five_line_simple.parquet", cmd: []string{ - "select", - "parquet", - "--query", - query, + "select", "parquet", + "--query", query, }, expected: "output.json", }, @@ -454,12 +395,9 @@ func TestSelectWithParquet(t *testing.T) { name: "parquet select with json output", src: "five_line_simple.parquet", cmd: []string{ - "select", - "parquet", - "--output-format", - "csv", - "--query", - query, + "select", "parquet", + "--output-format", "csv", + "--query", query, }, expected: "output.csv", }, diff --git a/e2e/util_test.go b/e2e/util_test.go index f89aafd93..c65a4ef82 100644 --- a/e2e/util_test.go +++ b/e2e/util_test.go @@ -48,7 +48,7 @@ const ( testDisableRaceFlagVal = "1" s5cmdTestIDEnv = "S5CMD_ACCESS_KEY_ID" s5cmdTestSecretEnv = "S5CMD_SECRET_ACCESS_KEY" - s5cmdTestEndpointEnv = "S5CMD_ENDPOINT_URL" + s5cmdTestEndpointEnv = "S5CMD_TEST_ENDPOINT_URL" s5cmdTestIsVirtualHost = "S5CMD_IS_VIRTUAL_HOST" s5cmdTestRegionEnv = "S5CMD_REGION" s5cmdTestIKnowWhatImDoingEnv = "S5CMD_I_KNOW_WHAT_IM_DOING" From c0dac2347818e0a4471d61b32edeca88e190f0cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Mon, 31 Jul 2023 18:02:40 +0300 Subject: [PATCH 35/50] fix: less strict compression --- command/select.go | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/command/select.go b/command/select.go index 0dc3da376..cd6697736 100644 --- a/command/select.go +++ b/command/select.go @@ -84,17 +84,11 @@ func NewSelectCommand() *cli.Command { Aliases: []string{"e"}, Usage: "SQL expression to use to select from the objects", }, - &cli.GenericFlag{ + &cli.StringFlag{ Name: "compression", Usage: "input compression format", - Value: &EnumValue{ - Enum: []string{"none", "gzip", "bzip2"}, - Default: "none", - ConditionFunction: func(str, target string) bool { - return strings.ToLower(target) == str - }, - }, }, + &cli.GenericFlag{ Name: "output-format", Usage: "output format of the result", From 1d50d12b1b0c78dff50a2371f3cd49a75d2d90d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Mon, 31 Jul 2023 18:20:00 +0300 Subject: [PATCH 36/50] refactor: change help template name --- command/select.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/command/select.go b/command/select.go index cd6697736..b4f779970 100644 --- a/command/select.go +++ b/command/select.go @@ -18,7 +18,7 @@ import ( "github.com/peak/s5cmd/v2/storage/url" ) -var defaultSelectHelpTemplate = `Name: +var selectHelpTemplate = `Name: {{.HelpName}} - {{.Usage}} Usage: @@ -140,7 +140,7 @@ func NewSelectCommand() *cli.Command { Value: ",", }, }, sharedFlags...), - CustomHelpTemplate: defaultSelectHelpTemplate, + CustomHelpTemplate: selectHelpTemplate, Before: beforeFunc, Action: func(c *cli.Context) (err error) { delimiter := c.String("delimiter") @@ -168,7 +168,7 @@ func NewSelectCommand() *cli.Command { }, }, }, sharedFlags...), - CustomHelpTemplate: defaultSelectHelpTemplate, + CustomHelpTemplate: selectHelpTemplate, Before: beforeFunc, Action: func(c *cli.Context) (err error) { structure := c.String("structure") @@ -184,7 +184,7 @@ func NewSelectCommand() *cli.Command { Name: "parquet", Usage: "write queries for parquet files", Flags: sharedFlags, - CustomHelpTemplate: defaultSelectHelpTemplate, + CustomHelpTemplate: selectHelpTemplate, Before: func(c *cli.Context) (err error) { if c.String("compression") != "" { err = errors.New("compression is not supported for parquet files") @@ -224,7 +224,7 @@ func NewSelectCommand() *cli.Command { } return cmd.Run(c.Context) }, - CustomHelpTemplate: defaultSelectHelpTemplate, + CustomHelpTemplate: selectHelpTemplate, } cmd.BashComplete = getBashCompleteFn(cmd, true, false) return cmd From cd8980cd974d9037ee3d245b99c282a6a6617084 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Mon, 31 Jul 2023 18:48:58 +0300 Subject: [PATCH 37/50] fix: assert query to be non-empty --- command/select.go | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/command/select.go b/command/select.go index b4f779970..94a6607b6 100644 --- a/command/select.go +++ b/command/select.go @@ -31,8 +31,8 @@ Examples: 01. Search for all objects with the foo property set to 'bar' and spit them into stdout > {{.HelpName}} --query "SELECT * FROM S3Object s WHERE s.foo='bar'" "s3://bucket/*" - 02. Search for all objects that use GZIP compression with(parquet does not support) the foo property set to 'bar' and spit them into stdout in csv. - > {{.HelpName}} --compression GZIP --output-format csv --query "SELECT * FROM S3Object s WHERE s.foo='bar'" "s3://bucket/*" + 02. Select the average price of the avocado and amount sold, set the output format csv + > {{.HelpName}} --compression GZIP --output-format csv --query "SELECT s.avg_price, s.quantity FROM S3Object s WHERE s.item='avocado'" "s3://bucket/itemprices" ` func beforeFunc(c *cli.Context) error { @@ -48,8 +48,13 @@ func buildSelect(c *cli.Context, inputFormat string, inputStructure *string) (cm fullCommand := commandFromContext(c) - src, err := url.New(c.Args().Get(0), url.WithVersion(c.String("version-id")), - url.WithRaw(c.Bool("raw")), url.WithAllVersions(c.Bool("all-versions"))) + src, err := url.New( + c.Args().Get(0), + url.WithVersion(c.String("version-id")), + url.WithRaw(c.Bool("raw")), + url.WithAllVersions(c.Bool("all-versions")), + ) + if err != nil { printError(fullCommand, c.Command.Name, err) return nil, err @@ -125,6 +130,7 @@ func NewSelectCommand() *cli.Command { Usage: "use the specified version of the object", }, } + cmd := &cli.Command{ Name: "select", HelpName: "select", @@ -132,7 +138,7 @@ func NewSelectCommand() *cli.Command { Subcommands: []*cli.Command{ { Name: "csv", - Usage: "write queries for csv files", + Usage: "run queries on csv files", Flags: append([]cli.Flag{ &cli.StringFlag{ Name: "delimiter", @@ -154,7 +160,7 @@ func NewSelectCommand() *cli.Command { }, { Name: "json", - Usage: "write queries for json files", + Usage: "run queries on json files", Flags: append([]cli.Flag{ &cli.GenericFlag{ Name: "structure", @@ -182,7 +188,7 @@ func NewSelectCommand() *cli.Command { }, { Name: "parquet", - Usage: "write queries for parquet files", + Usage: "run queries on parquet files", Flags: sharedFlags, CustomHelpTemplate: selectHelpTemplate, Before: func(c *cli.Context) (err error) { @@ -195,7 +201,6 @@ func NewSelectCommand() *cli.Command { return beforeFunc(c) }, Action: func(c *cli.Context) (err error) { - cmd, err := buildSelect(c, "parquet", nil) if err != nil { printError(cmd.fullCommand, c.Command.Name, err) @@ -376,8 +381,13 @@ func validateSelectCommand(c *cli.Context) error { return err } - srcurl, err := url.New(c.Args().Get(0), url.WithVersion(c.String("version-id")), - url.WithRaw(c.Bool("raw")), url.WithAllVersions(c.Bool("all-versions"))) + srcurl, err := url.New( + c.Args().Get(0), + url.WithVersion(c.String("version-id")), + url.WithRaw(c.Bool("raw")), + url.WithAllVersions(c.Bool("all-versions")), + ) + if err != nil { return err } @@ -386,5 +396,9 @@ func validateSelectCommand(c *cli.Context) error { return fmt.Errorf("source must be remote") } + if c.String("query") == "" { + return fmt.Errorf("query must be non-empty") + } + return nil } From 126cb95f3e7676c3cd1869fbbde30edbe889b28c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Tue, 1 Aug 2023 11:49:38 +0300 Subject: [PATCH 38/50] refactor: make compression non-shared --- command/select.go | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/command/select.go b/command/select.go index 94a6607b6..2bc64ec64 100644 --- a/command/select.go +++ b/command/select.go @@ -3,7 +3,6 @@ package command import ( "context" "encoding/json" - "errors" "fmt" "os" "strings" @@ -89,11 +88,6 @@ func NewSelectCommand() *cli.Command { Aliases: []string{"e"}, Usage: "SQL expression to use to select from the objects", }, - &cli.StringFlag{ - Name: "compression", - Usage: "input compression format", - }, - &cli.GenericFlag{ Name: "output-format", Usage: "output format of the result", @@ -145,6 +139,10 @@ func NewSelectCommand() *cli.Command { Usage: "delimiter of the csv file", Value: ",", }, + &cli.StringFlag{ + Name: "compression", + Usage: "input compression format", + }, }, sharedFlags...), CustomHelpTemplate: selectHelpTemplate, Before: beforeFunc, @@ -173,6 +171,10 @@ func NewSelectCommand() *cli.Command { }, }, }, + &cli.StringFlag{ + Name: "compression", + Usage: "input compression format", + }, }, sharedFlags...), CustomHelpTemplate: selectHelpTemplate, Before: beforeFunc, @@ -191,15 +193,7 @@ func NewSelectCommand() *cli.Command { Usage: "run queries on parquet files", Flags: sharedFlags, CustomHelpTemplate: selectHelpTemplate, - Before: func(c *cli.Context) (err error) { - if c.String("compression") != "" { - err = errors.New("compression is not supported for parquet files") - cmd := commandFromContext(c) - printError(cmd, "select parquet", err) - return err - } - return beforeFunc(c) - }, + Before: beforeFunc, Action: func(c *cli.Context) (err error) { cmd, err := buildSelect(c, "parquet", nil) if err != nil { From 66a20b1b8ef94ceed65a3fc8bbddf6e4ff4dc676 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Tue, 1 Aug 2023 12:15:49 +0300 Subject: [PATCH 39/50] fix: add compression to default fallback --- command/select.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/command/select.go b/command/select.go index 2bc64ec64..9b146cef3 100644 --- a/command/select.go +++ b/command/select.go @@ -204,7 +204,12 @@ func NewSelectCommand() *cli.Command { }, }, }, - Flags: sharedFlags, + Flags: append([]cli.Flag{ + &cli.StringFlag{ + Name: "compression", + Usage: "input compression format", + }, + }, sharedFlags...), Before: func(c *cli.Context) (err error) { if c.Args().Len() == 0 { err = fmt.Errorf("expected source argument") From d89511930a1f28eaf540b7f5d3b5aaa2288af3e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Thu, 3 Aug 2023 23:29:22 +0300 Subject: [PATCH 40/50] refactor: extended examples, update options, default output inference --- command/select.go | 35 ++++++++++++++++++++--------------- e2e/select_test.go | 9 +++++---- storage/s3.go | 5 +++-- 3 files changed, 28 insertions(+), 21 deletions(-) diff --git a/command/select.go b/command/select.go index 9b146cef3..f12e49f84 100644 --- a/command/select.go +++ b/command/select.go @@ -30,8 +30,14 @@ Examples: 01. Search for all objects with the foo property set to 'bar' and spit them into stdout > {{.HelpName}} --query "SELECT * FROM S3Object s WHERE s.foo='bar'" "s3://bucket/*" - 02. Select the average price of the avocado and amount sold, set the output format csv + 01. Select the average price of the avocado and amount sold, set the output format as csv > {{.HelpName}} --compression GZIP --output-format csv --query "SELECT s.avg_price, s.quantity FROM S3Object s WHERE s.item='avocado'" "s3://bucket/itemprices" + + 02. Query TSV files + > s5cmd select csv --delimiter=\t --compression GZIP --output-format csv --query "SELECT s.avg_price, s.quantity FROM S3Object s WHERE s.item='avocado'" "s3://bucket/itemprices" + + 03. Select the specific field in a JSON document + > s5cmd select json --structure document --compression GZIP --output-format csv --query "SELECT s.tracking_id FROM s3object[*]['metadata']['.zattrs'] s" "s3://bucket/metadataobj" ` func beforeFunc(c *cli.Context) error { @@ -58,13 +64,19 @@ func buildSelect(c *cli.Context, inputFormat string, inputStructure *string) (cm printError(fullCommand, c.Command.Name, err) return nil, err } + + outputFormat := c.String("output-format") + if c.String("output-format") == "" { + outputFormat = inputFormat + } + cmd = &Select{ src: src, op: c.Command.Name, fullCommand: fullCommand, // flags inputFormat: inputFormat, - outputFormat: c.String("output-format"), + outputFormat: outputFormat, query: c.String("query"), compressionType: c.String("compression"), exclude: c.StringSlice("exclude"), @@ -88,16 +100,9 @@ func NewSelectCommand() *cli.Command { Aliases: []string{"e"}, Usage: "SQL expression to use to select from the objects", }, - &cli.GenericFlag{ + &cli.StringFlag{ Name: "output-format", - Usage: "output format of the result", - Value: &EnumValue{ - Enum: []string{"json", "csv"}, - Default: "json", - ConditionFunction: func(str, target string) bool { - return strings.ToLower(target) == str - }, - }, + Usage: "output format of the result (options: json, csv)", }, &cli.StringSliceFlag{ Name: "exclude", @@ -141,7 +146,7 @@ func NewSelectCommand() *cli.Command { }, &cli.StringFlag{ Name: "compression", - Usage: "input compression format", + Usage: "input compression format (for AWS: GZIP or BZIP2)", }, }, sharedFlags...), CustomHelpTemplate: selectHelpTemplate, @@ -162,7 +167,7 @@ func NewSelectCommand() *cli.Command { Flags: append([]cli.Flag{ &cli.GenericFlag{ Name: "structure", - Usage: "how objects are aligned in the json file", + Usage: "how objects are aligned in the json file, options:[lines, document]", Value: &EnumValue{ Enum: []string{"lines", "document"}, Default: "lines", @@ -173,7 +178,7 @@ func NewSelectCommand() *cli.Command { }, &cli.StringFlag{ Name: "compression", - Usage: "input compression format", + Usage: "input compression format (for AWS: GZIP or BZIP2)", }, }, sharedFlags...), CustomHelpTemplate: selectHelpTemplate, @@ -207,7 +212,7 @@ func NewSelectCommand() *cli.Command { Flags: append([]cli.Flag{ &cli.StringFlag{ Name: "compression", - Usage: "input compression format", + Usage: "input compression format (for AWS: GZIP or BZIP2)", }, }, sharedFlags...), Before: func(c *cli.Context) (err error) { diff --git a/e2e/select_test.go b/e2e/select_test.go index 6f7279305..ed17b21cb 100644 --- a/e2e/select_test.go +++ b/e2e/select_test.go @@ -65,7 +65,7 @@ func getFile(n int, inputForm, outputForm, structure string) (string, string) { return input.String(), input.String() case "csv": writer := csv.NewWriter(&expected) - + writer.Comma = []rune(structure)[0] for _, d := range data { if err := writer.Write([]string{d.Line, d.ID, d.Data}); err != nil { panic(err) @@ -111,6 +111,7 @@ func getFile(n int, inputForm, outputForm, structure string) (string, string) { return input.String(), expected.String() case "csv": writer := csv.NewWriter(&expected) + writer.Comma = []rune(structure)[0] writer.Write([]string{"line", "id", "data"}) for _, d := range data { @@ -228,7 +229,7 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { }, in: "csv", structure: ",", - out: "json", + out: "csv", }, { name: "csv select with gzip compression default delimiter and output", @@ -240,7 +241,7 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { in: "csv", compression: true, structure: ",", - out: "json", + out: "csv", }, { name: "csv select with gzip compression default delimiter and csv output", @@ -275,7 +276,7 @@ func TestSelectCommandWithGeneratedFiles(t *testing.T) { }, in: "csv", structure: "\t", - out: "json", + out: "csv", }, { name: "csv select with custom delimiter and csv output", diff --git a/storage/s3.go b/storage/s3.go index 567210d15..59885de9f 100644 --- a/storage/s3.go +++ b/storage/s3.go @@ -638,7 +638,7 @@ func parseInputSerialization(e eventType, c string, delimiter string) (*s3.Input return s, nil } -func parseOutputSerialization(e eventType, reader io.Reader) (*s3.OutputSerialization, EventStreamDecoder, error) { +func parseOutputSerialization(e eventType, delimiter string, reader io.Reader) (*s3.OutputSerialization, EventStreamDecoder, error) { var s *s3.OutputSerialization var decoder EventStreamDecoder @@ -651,7 +651,7 @@ func parseOutputSerialization(e eventType, reader io.Reader) (*s3.OutputSerializ case csvType: s = &s3.OutputSerialization{ CSV: &s3.CSVOutput{ - FieldDelimiter: aws.String(","), + FieldDelimiter: aws.String(delimiter), }, } decoder = NewCsvDecoder(reader) @@ -685,6 +685,7 @@ func (s *S3) Select(ctx context.Context, url *url.URL, query *SelectQuery, resul outputFormat, decoder, err = parseOutputSerialization( eventType(query.OutputFormat), + query.InputContentStructure, reader, ) From c0ea6b9e1c2cf884b6f844f005cbe74c7ed876d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Sat, 5 Aug 2023 14:59:48 +0300 Subject: [PATCH 41/50] cmd: add use-header option to csv subcmd --- command/select.go | 39 +++++++++++++++++++++++++++++++++------ storage/s3.go | 5 ++++- 2 files changed, 37 insertions(+), 7 deletions(-) diff --git a/command/select.go b/command/select.go index f12e49f84..8efca082d 100644 --- a/command/select.go +++ b/command/select.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "os" + "strconv" "strings" "github.com/hashicorp/go-multierror" @@ -142,18 +143,42 @@ func NewSelectCommand() *cli.Command { &cli.StringFlag{ Name: "delimiter", Usage: "delimiter of the csv file", - Value: ",", + Value: "','", + }, + &cli.StringFlag{ + Name: "use-header", + Usage: "use header of the csv file. (options for AWS: IGNORE, NONE, USE)", + Value: "NONE", }, &cli.StringFlag{ Name: "compression", - Usage: "input compression format (for AWS: GZIP or BZIP2)", + Usage: "input compression format (options for AWS: GZIP or BZIP2)", }, }, sharedFlags...), CustomHelpTemplate: selectHelpTemplate, Before: beforeFunc, Action: func(c *cli.Context) (err error) { delimiter := c.String("delimiter") - cmd, err := buildSelect(c, "csv", &delimiter) + quotedDelimiter, err := strconv.Unquote(`"` + delimiter + `"`) + + if err != nil { + printError("select csv", c.Command.Name, err) + return err + } + + cmd, err := buildSelect(c, "csv", "edDelimiter) + + // Since this flag is only specific to csv + // and we set a default value, we tend + // to set it explicitly after building + // the command rather than setting it + // on the shared builder. We also + // show the options, but we don't + // constraint the options that can be + // passed to this flag, since other + // providers might support other options + // that AWS does not support. + cmd.fileHeaderInfo = c.String("use-header") if err != nil { printError(cmd.fullCommand, c.Command.Name, err) return err @@ -167,7 +192,7 @@ func NewSelectCommand() *cli.Command { Flags: append([]cli.Flag{ &cli.GenericFlag{ Name: "structure", - Usage: "how objects are aligned in the json file, options:[lines, document]", + Usage: "how objects are aligned in the json file, options:(lines, document)", Value: &EnumValue{ Enum: []string{"lines", "document"}, Default: "lines", @@ -178,7 +203,7 @@ func NewSelectCommand() *cli.Command { }, &cli.StringFlag{ Name: "compression", - Usage: "input compression format (for AWS: GZIP or BZIP2)", + Usage: "input compression format (options for AWS: GZIP or BZIP2)", }, }, sharedFlags...), CustomHelpTemplate: selectHelpTemplate, @@ -212,7 +237,7 @@ func NewSelectCommand() *cli.Command { Flags: append([]cli.Flag{ &cli.StringFlag{ Name: "compression", - Usage: "input compression format (for AWS: GZIP or BZIP2)", + Usage: "input compression format (options for AWS: GZIP or BZIP2)", }, }, sharedFlags...), Before: func(c *cli.Context) (err error) { @@ -249,6 +274,7 @@ type Select struct { inputFormat string compressionType string inputStructure string + fileHeaderInfo string outputFormat string exclude []string forceGlacierTransfer bool @@ -364,6 +390,7 @@ func (s Select) prepareTask(ctx context.Context, client *storage.S3, url *url.UR Expression: s.query, InputFormat: s.inputFormat, InputContentStructure: s.inputStructure, + FileHeaderInfo: s.fileHeaderInfo, OutputFormat: s.outputFormat, CompressionType: s.compressionType, } diff --git a/storage/s3.go b/storage/s3.go index 59885de9f..16febe215 100644 --- a/storage/s3.go +++ b/storage/s3.go @@ -591,6 +591,7 @@ func (s *S3) Get( type SelectQuery struct { InputFormat string InputContentStructure string + FileHeaderInfo string OutputFormat string ExpressionType string Expression string @@ -605,7 +606,7 @@ const ( parquetType eventType = "parquet" ) -func parseInputSerialization(e eventType, c string, delimiter string) (*s3.InputSerialization, error) { +func parseInputSerialization(e eventType, c string, delimiter string, headerInfo string) (*s3.InputSerialization, error) { var s *s3.InputSerialization switch e { @@ -621,6 +622,7 @@ func parseInputSerialization(e eventType, c string, delimiter string) (*s3.Input case csvType: s = &s3.InputSerialization{ CSV: &s3.CSVInput{ + FileHeaderInfo: aws.String(headerInfo), FieldDelimiter: aws.String(delimiter), }, } @@ -677,6 +679,7 @@ func (s *S3) Select(ctx context.Context, url *url.URL, query *SelectQuery, resul eventType(query.InputFormat), query.CompressionType, query.InputContentStructure, + query.FileHeaderInfo, ) if err != nil { From f4eb26e598a16d3d6b22860f956de4ec8fa4d52f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Tue, 8 Aug 2023 10:17:38 +0300 Subject: [PATCH 42/50] fix: help documentation --- command/select.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/command/select.go b/command/select.go index d92672c76..4d4e00fa7 100644 --- a/command/select.go +++ b/command/select.go @@ -28,17 +28,17 @@ Options: {{range .VisibleFlags}}{{.}} {{end}} Examples: - 01. Search for all objects with the foo property set to 'bar' and spit them into stdout - > {{.HelpName}} --query "SELECT * FROM S3Object s WHERE s.foo='bar'" "s3://bucket/*" - 01. Select the average price of the avocado and amount sold, set the output format as csv > {{.HelpName}} --compression GZIP --output-format csv --query "SELECT s.avg_price, s.quantity FROM S3Object s WHERE s.item='avocado'" "s3://bucket/itemprices" 02. Query TSV files > s5cmd select csv --delimiter=\t --compression GZIP --output-format csv --query "SELECT s.avg_price, s.quantity FROM S3Object s WHERE s.item='avocado'" "s3://bucket/itemprices" - 03. Select the specific field in a JSON document + 03. Select a specific field in a JSON document > s5cmd select json --structure document --compression GZIP --output-format csv --query "SELECT s.tracking_id FROM s3object[*]['metadata']['.zattrs'] s" "s3://bucket/metadataobj" + + 04. Query CSV files using their headers to access columns by their name + > s5cmd select csv --use-header=USE --delimiter "\t" --query "SELECT s.name FROM s3object s WHERE s.price='1.99'" "s3://bucket/prices.tsv" ` func beforeFunc(c *cli.Context) error { From d1ff91e0db0af1179cd53a767e296e3c4297a6f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Tue, 8 Aug 2023 12:12:56 +0300 Subject: [PATCH 43/50] fix: remove unnecessary else block --- e2e/util_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/e2e/util_test.go b/e2e/util_test.go index c65a4ef82..00880fad9 100644 --- a/e2e/util_test.go +++ b/e2e/util_test.go @@ -176,15 +176,15 @@ func setup(t *testing.T, options ...option) (*s3.S3, func(...string) icmd.Cmd) { } var cfg *credentialCfg + if region != "" || accessKeyID != "" || secretKey != "" { cfg = &credentialCfg{ AccessKeyID: accessKeyID, SecretKey: secretKey, Region: region, } - } else { - cfg = nil } + client := s3client(t, storage.Options{ Endpoint: endpoint, NoVerifySSL: true, From a815c95648fd5108d556533c4443ae6c3f4726e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Sun, 20 Aug 2023 13:42:09 +0300 Subject: [PATCH 44/50] test: update test suite, extend feature coverage and add missing cases --- command/select.go | 6 +- e2e/select_test.go | 903 +++++++++++------- .../parquet/five_line_simple.parquet | Bin .../parquet/output.csv | 0 .../parquet/output.json | 0 storage/s3.go | 8 +- vendor/gotest.tools/v3/golden/golden.go | 187 ++++ vendor/modules.txt | 1 + 8 files changed, 756 insertions(+), 349 deletions(-) rename e2e/{testfiles => testdata}/parquet/five_line_simple.parquet (100%) rename e2e/{testfiles => testdata}/parquet/output.csv (100%) rename e2e/{testfiles => testdata}/parquet/output.json (100%) create mode 100644 vendor/gotest.tools/v3/golden/golden.go diff --git a/command/select.go b/command/select.go index 4d4e00fa7..1ac2f5c33 100644 --- a/command/select.go +++ b/command/select.go @@ -141,9 +141,9 @@ func NewSelectCommand() *cli.Command { Usage: "run queries on csv files", Flags: append([]cli.Flag{ &cli.StringFlag{ - Name: "delimiter", - Usage: "delimiter of the csv file", - Value: "','", + Name: "delimiter for the csv file", + Usage: "delimiter of the csv file. The input and output delimiters are same.", + Value: ",", }, &cli.StringFlag{ Name: "use-header", diff --git a/e2e/select_test.go b/e2e/select_test.go index ed17b21cb..df68a6adb 100644 --- a/e2e/select_test.go +++ b/e2e/select_test.go @@ -7,14 +7,559 @@ import ( jsonpkg "encoding/json" "fmt" "os" + "path/filepath" "testing" - "github.com/google/go-cmp/cmp" + "gotest.tools/v3/assert" + "gotest.tools/v3/golden" "gotest.tools/v3/icmd" ) -// getFile is a helper for creating file contents and expected values -func getFile(n int, inputForm, outputForm, structure string) (string, string) { +func TestSelectCommand(t *testing.T) { + t.Parallel() + + const ( + region = "us-east-1" + accessKeyID = "minioadmin" + secretKey = "minioadmin" + + query = "SELECT * FROM s3object s" + queryWithWhereClause = "SELECT s.id FROM s3object s WHERE s.line='0'" + queryWithDocumentAccess = "SELECT s.obj0.id FROM s3object s" + ) + + endpoint := os.Getenv(s5cmdTestEndpointEnv) + if endpoint == "" { + t.Skipf("skipping the test because %v environment variable is empty", s5cmdTestEndpointEnv) + } + + type testcase struct { + name string + cmd []string + informat string + structure string + compression bool + outformat string + expectedValue string + } + + testcasesByGroup := map[string][]testcase{ + "json": { + { + name: "input:json-lines,output:json-lines", + cmd: []string{ + "select", "json", + "--query", query, + }, + informat: "json", + structure: "lines", + outformat: "json", + }, + { + name: "input:json-lines,output:csv", + cmd: []string{ + "select", "json", + "--output-format", "csv", + "--query", query, + }, + informat: "json", + structure: "lines", + outformat: "csv", + }, + { + name: "input:json-document,output:json-lines", + cmd: []string{ + "select", "json", + "--structure", "document", + "--query", query, + }, + informat: "json", + structure: "document", + outformat: "json", + }, { + name: "input:json-lines,output:json-lines,compression:gzip", + cmd: []string{ + "select", "json", + "--compression", "gzip", + "--query", query, + }, + informat: "json", + compression: true, + structure: "lines", + outformat: "json", + }, + { + name: "input:json-lines,output:csv,compression:gzip", + cmd: []string{ + "select", "json", + "--compression", "gzip", + "--output-format", "csv", + "--query", query, + }, + informat: "json", + compression: true, + structure: "lines", + outformat: "csv", + }, + { + name: "input:json-document,output:json-lines,compression:gzip", + cmd: []string{ + "select", "json", + "--compression", "gzip", + "--structure", "document", + "--query", query, + }, + informat: "json", + compression: true, + structure: "document", + outformat: "json", + }, + { + name: "input:json-lines,output:json-lines,select:with-where", + cmd: []string{ + "select", "json", + "--query", queryWithWhereClause, + }, + informat: "json", + structure: "lines", + outformat: "json", + expectedValue: "{\"id\":\"id0\"}\n", + }, + { + name: "input:json-lines,output:json-lines,compression:gzip,select:with-where", + cmd: []string{ + "select", "json", + "--compression", "gzip", + "--query", queryWithWhereClause, + }, + informat: "json", + compression: true, + structure: "lines", + outformat: "json", + expectedValue: "{\"id\":\"id0\"}\n", + }, + { + name: "input:json-lines,output:csv,compression:gzip,select:with-where", + cmd: []string{ + "select", "json", + "--compression", "gzip", + "--output-format", "csv", + "--query", queryWithWhereClause, + }, + informat: "json", + compression: true, + structure: "lines", + outformat: "csv", + expectedValue: "id0\n", + }, + { + name: "input:json-lines,output:csv,compression:gzip,select:with-where", + cmd: []string{ + "select", "json", + "--output-format", "csv", + "--query", queryWithWhereClause, + }, + informat: "json", + structure: "lines", + outformat: "csv", + expectedValue: "id0\n", + }, + { + name: "input:json-document,output:json,select:with-document-access", + cmd: []string{ + "select", "json", + "--structure", "document", + "--query", queryWithDocumentAccess, + }, + informat: "json", + structure: "document", + outformat: "json", + expectedValue: "{\"id\":\"id0\"}\n", + }, + { + name: "input:json-document,output:json,compression:gzip,select:with-document-access", + cmd: []string{ + "select", "json", + "--structure", "document", + "--compression", "gzip", + "--query", queryWithDocumentAccess, + }, + informat: "json", + compression: true, + structure: "document", + outformat: "json", + expectedValue: "{\"id\":\"id0\"}\n", + }, + { + name: "input:json-document,output:csv,select:with-document-access", + cmd: []string{ + "select", "json", + "--structure", "document", + "--output-format", "csv", + "--query", queryWithDocumentAccess, + }, + informat: "json", + structure: "document", + outformat: "json", + expectedValue: "id0\n", + }, + { + name: "input:json-document,output:json,compression:gzip,select:with-document-access", + cmd: []string{ + "select", "json", + "--structure", "document", + "--compression", "gzip", + "--output-format", "csv", + "--query", queryWithDocumentAccess, + }, + informat: "json", + compression: true, + structure: "document", + outformat: "json", + expectedValue: "id0\n", + }, + }, + "csv": { + { + name: "input:csv,output:csv,delimiter:comma", + cmd: []string{ + "select", "csv", + "--query", query, + }, + informat: "csv", + structure: ",", + outformat: "csv", + }, + { + name: "input:csv,output:csv,delimiter:comma,compression:gzip", + cmd: []string{ + "select", "csv", + "--compression", "gzip", + "--query", query, + }, + informat: "csv", + compression: true, + structure: ",", + outformat: "csv", + }, + { + name: "input:csv,output:csv,delimiter:comma,compression:gzip", + cmd: []string{ + "select", "csv", + "--compression", "gzip", + "--output-format", "csv", + "--query", query, + }, + informat: "csv", + compression: true, + structure: ",", + outformat: "csv", + }, + { + name: "input:csv,output:csv,delimiter:comma", + cmd: []string{ + "select", "csv", + "--output-format", "csv", + "--query", query, + }, + informat: "csv", + structure: ",", + outformat: "csv", + }, + { + name: "input:csv,output:csv,delimiter:tab", + cmd: []string{ + "select", "csv", + "--delimiter", "\t", + "--query", query, + }, + informat: "csv", + structure: "\t", + outformat: "csv", + }, + { + name: "input:csv,output:csv,delimiter:tab", + cmd: []string{ + "select", "csv", + "--delimiter", "\t", + "--output-format", "csv", + "--query", query, + }, + informat: "csv", + structure: "\t", + outformat: "csv", + }, + { + name: "input:csv,output:csv,delimiter:comma,select:with-where", + cmd: []string{ + "select", "csv", + "--output-format", "csv", + "--use-header", "USE", + "--query", queryWithWhereClause, + }, + informat: "csv", + structure: ",", + outformat: "csv", + expectedValue: "id0\n", + }, + { + name: "input:csv,output:csv,delimiter:comma,compression:gzip,select:with-where", + cmd: []string{ + "select", "csv", + "--output-format", "csv", + "--use-header", "USE", + "--compression", "gzip", + "--query", queryWithWhereClause, + }, + informat: "csv", + compression: true, + structure: ",", + outformat: "csv", + expectedValue: "id0\n", + }, + { + name: "input:csv,output:csv,delimiter:tab,select:with-where", + cmd: []string{ + "select", "csv", + "--output-format", "csv", + "--delimiter", "\t", + "--use-header", "USE", + "--query", queryWithWhereClause, + }, + informat: "csv", + structure: "\t", + outformat: "csv", + expectedValue: "id0\n", + }, + { + name: "input:csv,output:csv,delimiter:delimiter,compression:gzip,select:with-where", + cmd: []string{ + "select", "csv", + "--output-format", "csv", + "--delimiter", "\t", + "--use-header", "USE", + "--compression", "gzip", + "--query", queryWithWhereClause, + }, + informat: "csv", + compression: true, + structure: "\t", + outformat: "csv", + expectedValue: "id0\n", + }, + { + name: "input:csv,output:json,delimiter:comma,select:with-where", + cmd: []string{ + "select", "csv", + "--output-format", "json", + "--use-header", "USE", + "--query", queryWithWhereClause, + }, + informat: "csv", + structure: ",", + outformat: "csv", + expectedValue: "{\"id\":\"id0\"}\n", + }, + { + name: "input:csv,output:json,delimiter:comma,compression:gzip,select:with-where", + cmd: []string{ + "select", "csv", + "--output-format", "json", + "--use-header", "USE", + "--compression", "gzip", + "--query", queryWithWhereClause, + }, + informat: "csv", + compression: true, + structure: ",", + outformat: "csv", + expectedValue: "{\"id\":\"id0\"}\n", + }, + { + name: "input:csv,output:json,delimiter:tab,select:with-where", + cmd: []string{ + "select", "csv", + "--output-format", "json", + "--delimiter", "\t", + "--use-header", "USE", + "--query", queryWithWhereClause, + }, + informat: "csv", + structure: "\t", + outformat: "csv", + expectedValue: "{\"id\":\"id0\"}\n", + }, + { + name: "input:csv,output:json,delimiter:delimiter,compression:gzip,select:with-where", + cmd: []string{ + "select", "csv", + "--output-format", "json", + "--delimiter", "\t", + "--use-header", "USE", + "--compression", "gzip", + "--query", queryWithWhereClause, + }, + informat: "csv", + compression: true, + structure: "\t", + outformat: "csv", + expectedValue: "{\"id\":\"id0\"}\n", + }, + }, + "backwards-compatibility": { + { + name: "input:json-lines,output:json-lines", + cmd: []string{ + "select", + "--query", query, + }, + informat: "json", + structure: "lines", + outformat: "json", + }, + { + name: "input:json-lines,output:json-lines,compression:gzip", + cmd: []string{ + "select", + "--query", query, + "--compression", "gzip", + }, + informat: "json", + compression: true, + structure: "lines", + outformat: "json", + }, + }, + } + for testgroup, testcases := range testcasesByGroup { + testgroup := testgroup + testcases := testcases + t.Run(testgroup, func(t *testing.T) { + for _, tc := range testcases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + bucket := s3BucketFromTestName(t) + const rowcount = 5 + contents, expected := genTestData(t, rowcount, tc.informat, tc.outformat, tc.structure, false) + + if tc.expectedValue != "" { + expected = tc.expectedValue + } + + var src, filename string + if tc.compression { + b := bytes.Buffer{} + gz := gzip.NewWriter(&b) + filename = fmt.Sprintf("file.%s.gz", tc.informat) + src = fmt.Sprintf("s3://%s/%s", bucket, filename) + + if _, err := gz.Write([]byte(contents)); err != nil { + t.Errorf("could not compress the input object. error: %v\n", err) + } + + if err := gz.Close(); err != nil { + t.Errorf("could not close the compressor. error: %v\n", err) + } + + contents = b.String() + } else { + filename = fmt.Sprintf("file.%s", tc.informat) + src = fmt.Sprintf("s3://%s/%s", bucket, filename) + } + + s3client, s5cmd := setup(t, withEndpointURL(endpoint), withRegion(region), withAccessKeyID(accessKeyID), withSecretKey(secretKey)) + createBucket(t, s3client, bucket) + putFile(t, s3client, bucket, filename, contents) + + tc.cmd = append(tc.cmd, src) + cmd := s5cmd(tc.cmd...) + + result := icmd.RunCmd(cmd, withEnv("AWS_ACCESS_KEY_ID", accessKeyID), withEnv("AWS_SECRET_ACCESS_KEY", secretKey)) + + result.Assert(t, icmd.Success) + assert.DeepEqual(t, expected, result.Stdout()) + }) + } + }) + } +} + +func TestSelectWithParquet(t *testing.T) { + // NOTE(deniz): We are skipping this test until the image we use in the + // service container releases parquet support for select api. + + // TODO(deniz): When bitnami releases a version that is stable for parquet + // queries, enable this test. + t.Skip() + t.Parallel() + + const ( + region = "us-east-1" + accessKeyID = "minioadmin" + secretKey = "minioadmin" + + query = "SELECT * FROM s3object s" + ) + + endpoint := os.Getenv(s5cmdTestEndpointEnv) + if endpoint == "" { + t.Skipf("skipping the test because %v environment variable is empty", s5cmdTestEndpointEnv) + } + + testcases := []struct { + name string + src string + cmd []string + expected string + }{ + { + name: "in:parquet,output:json", + src: "five_line_simple.parquet", + cmd: []string{ + "select", "parquet", + "--query", query, + }, + expected: "output.json", + }, + { + name: "in:parquet,output,output:csv", + src: "five_line_simple.parquet", + cmd: []string{ + "select", "parquet", + "--output-format", "csv", + "--query", query, + }, + expected: "output.csv", + }, + } + + for _, tc := range testcases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + bucket := s3BucketFromTestName(t) + src := fmt.Sprintf("s3://%s/%s", bucket, tc.src) + tc.cmd = append(tc.cmd, src) + + s3client, s5cmd := setup(t, withEndpointURL(endpoint), withRegion(region), withAccessKeyID(accessKeyID), withSecretKey(secretKey)) + createBucket(t, s3client, bucket) + + input := golden.Get(t, filepath.Join("parquet", tc.src)) + putFile(t, s3client, bucket, tc.src, string(input)) + + cmd := s5cmd(tc.cmd...) + result := icmd.RunCmd(cmd, withEnv("AWS_ACCESS_KEY_ID", accessKeyID), withEnv("AWS_SECRET_ACCESS_KEY", secretKey)) + + result.Assert(t, icmd.Success) + assert.Assert(t, golden.String(result.Stdout(), filepath.Join("parquet", tc.expected))) + }) + } +} + +func genTestData(t *testing.T, rowcount int, informat, outformat, structure string, where bool) (string, string) { + t.Helper() + type row struct { Line string `json:"line"` ID string `json:"id"` @@ -27,7 +572,7 @@ func getFile(n int, inputForm, outputForm, structure string) (string, string) { expected bytes.Buffer ) - for i := 0; i < n; i++ { + for i := 0; i < rowcount; i++ { data = append(data, row{ Line: fmt.Sprintf("%d", i), ID: fmt.Sprintf("id%d", i), @@ -35,7 +580,7 @@ func getFile(n int, inputForm, outputForm, structure string) (string, string) { }) } - switch inputForm { + switch informat { case "json": encoder := jsonpkg.NewEncoder(&input) @@ -48,27 +593,27 @@ func getFile(n int, inputForm, outputForm, structure string) (string, string) { } if err := encoder.Encode(rows); err != nil { - panic(err) + t.Fatal(err) } + return input.String(), input.String() default: for _, d := range data { err := encoder.Encode(d) if err != nil { - panic(err) + t.Fatal(err) } } - switch outputForm { + switch outformat { case "json": return input.String(), input.String() case "csv": writer := csv.NewWriter(&expected) - writer.Comma = []rune(structure)[0] for _, d := range data { if err := writer.Write([]string{d.Line, d.ID, d.Data}); err != nil { - panic(err) + t.Fatal(err) } } @@ -77,9 +622,6 @@ func getFile(n int, inputForm, outputForm, structure string) (string, string) { return input.String(), expected.String() } } - - // edge case - case "csv": writer := csv.NewWriter(&input) // set the delimiter for the input @@ -92,7 +634,7 @@ func getFile(n int, inputForm, outputForm, structure string) (string, string) { writer.Flush() - switch outputForm { + switch outformat { case "json": encoder := jsonpkg.NewEncoder(&expected) encoder.Encode(map[string]string{ @@ -123,335 +665,6 @@ func getFile(n int, inputForm, outputForm, structure string) (string, string) { return input.String(), expected.String() } } - panic("unreachable") -} - -func TestSelectCommandWithGeneratedFiles(t *testing.T) { - t.Parallel() - // credentials are same for all test cases - region := "us-east-1" - accessKeyID := "minioadmin" - secretKey := "minioadmin" - // The query is default for all cases, we want to assert the output - // is as expected after a query. - query := "SELECT * FROM s3object s LIMIT 6" - - endpoint := os.Getenv("S5CMD_TEST_ENDPOINT_URL") - - if endpoint == "" { - t.Skipf("skipping the test because S5CMD_TEST_ENDPOINT_URL environment variable is empty") - } - - testcases := []struct { - name string - cmd []string - in string - structure string - compression bool - out string - }{ - // json - { - name: "json-lines select with default input structure and output", - cmd: []string{ - "select", "json", - "--query", query, - }, - in: "json", - structure: "lines", - out: "json", - }, - { - name: "json-lines select with default input structure and csv output", - cmd: []string{ - "select", "json", - "--output-format", "csv", - "--query", query, - }, - in: "json", - structure: "lines", - out: "csv", - }, - { - name: "json-lines select with document input structure and output", - cmd: []string{ - "select", "json", - "--structure", "document", - "--query", query, - }, - in: "json", - structure: "document", - out: "json", - }, { - name: "json-lines select with gzip compression default input structure and output", - cmd: []string{ - "select", "json", - "--compression", "gzip", - "--query", query, - }, - in: "json", - compression: true, - structure: "lines", - out: "json", - }, - { - name: "json-lines select with gzip compression default input structure and csv output", - cmd: []string{ - "select", "json", - "--compression", "gzip", - "--output-format", "csv", - "--query", query, - }, - in: "json", - compression: true, - structure: "lines", - out: "csv", - }, - { - name: "json-lines select with gzip compression document input structure and output", - cmd: []string{ - "select", "json", - "--compression", "gzip", - "--structure", "document", - "--query", query, - }, - in: "json", - compression: true, - structure: "document", - out: "json", - }, - // csv - { - name: "csv select with default delimiter and output", - cmd: []string{ - "select", "csv", - "--query", query, - }, - in: "csv", - structure: ",", - out: "csv", - }, - { - name: "csv select with gzip compression default delimiter and output", - cmd: []string{ - "select", "csv", - "--compression", "gzip", - "--query", query, - }, - in: "csv", - compression: true, - structure: ",", - out: "csv", - }, - { - name: "csv select with gzip compression default delimiter and csv output", - cmd: []string{ - "select", "csv", - "--compression", "gzip", - "--output-format", "csv", - "--query", query, - }, - in: "csv", - compression: true, - structure: ",", - out: "csv", - }, - { - name: "csv select with default delimiter and csv output", - cmd: []string{ - "select", "csv", - "--output-format", "csv", - "--query", query, - }, - in: "csv", - structure: ",", - out: "csv", - }, - { - name: "csv select with custom delimiter and default output", - cmd: []string{ - "select", "csv", - "--delimiter", "\t", - "--query", query, - }, - in: "csv", - structure: "\t", - out: "csv", - }, - { - name: "csv select with custom delimiter and csv output", - cmd: []string{ - "select", "csv", - "--delimiter", "\t", - "--output-format", "csv", - "--query", query, - }, - in: "csv", - structure: "\t", - out: "csv", - }, - { - name: "query json with default fallback", - cmd: []string{ - "select", - "--query", query, - }, - in: "json", - structure: "lines", - out: "json", - }, - { - name: "query compressed json with default fallback", - cmd: []string{ - "select", - "--query", query, - "--compression", "gzip", - }, - in: "json", - compression: true, - structure: "lines", - out: "json", - }, - } - for _, tc := range testcases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - var src, filename string - - bucket := s3BucketFromTestName(t) - contents, expected := getFile(5, tc.in, tc.out, tc.structure) - - if tc.compression { - b := bytes.Buffer{} - gz := gzip.NewWriter(&b) - filename = fmt.Sprintf("file.%s.gz", tc.in) - src = fmt.Sprintf("s3://%s/%s", bucket, filename) - - if _, err := gz.Write([]byte(contents)); err != nil { - t.Errorf("could not compress the input object. error: %v\n", err) - } - - if err := gz.Close(); err != nil { - t.Errorf("could not close the compressor error: %v\n", err) - } - - contents = b.String() - } else { - filename = fmt.Sprintf("file.%s", tc.in) - src = fmt.Sprintf("s3://%s/%s", bucket, filename) - } - - s3client, s5cmd := setup(t, withEndpointURL(endpoint), withRegion(region), withAccessKeyID(accessKeyID), withSecretKey(secretKey)) - - createBucket(t, s3client, bucket) - - tc.cmd = append(tc.cmd, src) - - putFile(t, s3client, bucket, filename, contents) - - cmd := s5cmd(tc.cmd...) - - result := icmd.RunCmd(cmd, withEnv("AWS_ACCESS_KEY_ID", accessKeyID), withEnv("AWS_SECRET_ACCESS_KEY", secretKey)) - - if diff := cmp.Diff(expected, result.Stdout()); diff != "" { - t.Errorf("select command mismatch (-want +got):\n%s", diff) - } - - }) - } -} - -func TestSelectWithParquet(t *testing.T) { - t.Parallel() - // credentials are same for all test cases - region := "us-east-1" - accessKeyID := "minioadmin" - secretKey := "minioadmin" - // The query is default for all cases, we want to assert the output - // is as expected after a query. - query := "SELECT * FROM s3object s LIMIT 6" - - endpoint := os.Getenv("S5CMD_TEST_ENDPOINT_URL") - - if endpoint == "" { - t.Skipf("skipping the test because S5CMD_TEST_ENDPOINT_URL environment variable is empty") - } - - testcases := []struct { - name string - src string - cmd []string - expected string - }{ - { - name: "parquet select with json output", - src: "five_line_simple.parquet", - cmd: []string{ - "select", "parquet", - "--query", query, - }, - expected: "output.json", - }, - { - name: "parquet select with json output", - src: "five_line_simple.parquet", - cmd: []string{ - "select", "parquet", - "--output-format", "csv", - "--query", query, - }, - expected: "output.csv", - }, - } - - for _, tc := range testcases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - var buf bytes.Buffer - var expectedBuf bytes.Buffer - - // change the working directory to ./e2e/testdata/parquet - cwd, err := os.Getwd() - if err != nil { - t.Fatalf("couldn't reach the current working directory to access testfiles. error: %v\n", err) - } - - cwd += "/testfiles/parquet/" - sourceFile, err := os.Open(cwd + tc.src) - if err != nil { - t.Fatalf("couldn't read the parquet file to be queried. error: %v\n", err) - } - defer sourceFile.Close() - - if _, err := sourceFile.Read(buf.Bytes()); err != nil { - t.Fatalf("couldn't write the parquet file to buffer. error: %v\n", err) - } - - expectedFile, err := os.Open(cwd + tc.expected) - if err != nil { - t.Fatalf("couldnt read the output file to be compared against. error: %v\n", err) - } - defer expectedFile.Close() - - if _, err := expectedFile.Read(expectedBuf.Bytes()); err != nil { - t.Fatalf("couldn't write the output file to buffer. error: %v\n", err) - } - - // convert the file content to string - expected := expectedBuf.String() - bucket := s3BucketFromTestName(t) - src := fmt.Sprintf("s3://%s/%s", bucket, tc.src) - tc.cmd = append(tc.cmd, src) - - s3client, s5cmd := setup(t, withEndpointURL(endpoint), withRegion(region), withAccessKeyID(accessKeyID), withSecretKey(secretKey)) - createBucket(t, s3client, bucket) - putFile(t, s3client, bucket, tc.src, buf.String()) - - cmd := s5cmd(tc.cmd...) - result := icmd.RunCmd(cmd, withEnv("AWS_ACCESS_KEY_ID", accessKeyID), withEnv("AWS_SECRET_ACCESS_KEY", secretKey)) - if diff := cmp.Diff(expected, result.Stdout()); diff != "" { - t.Errorf("select command mismatch (-want +got):\n%s", diff) - } - }) - } + t.Fatal("unreachable") + return "", "" } diff --git a/e2e/testfiles/parquet/five_line_simple.parquet b/e2e/testdata/parquet/five_line_simple.parquet similarity index 100% rename from e2e/testfiles/parquet/five_line_simple.parquet rename to e2e/testdata/parquet/five_line_simple.parquet diff --git a/e2e/testfiles/parquet/output.csv b/e2e/testdata/parquet/output.csv similarity index 100% rename from e2e/testfiles/parquet/output.csv rename to e2e/testdata/parquet/output.csv diff --git a/e2e/testfiles/parquet/output.json b/e2e/testdata/parquet/output.json similarity index 100% rename from e2e/testfiles/parquet/output.json rename to e2e/testdata/parquet/output.json diff --git a/storage/s3.go b/storage/s3.go index 16febe215..fde0a45fd 100644 --- a/storage/s3.go +++ b/storage/s3.go @@ -622,8 +622,8 @@ func parseInputSerialization(e eventType, c string, delimiter string, headerInfo case csvType: s = &s3.InputSerialization{ CSV: &s3.CSVInput{ - FileHeaderInfo: aws.String(headerInfo), FieldDelimiter: aws.String(delimiter), + FileHeaderInfo: aws.String(headerInfo), }, } if c != "" { @@ -686,6 +686,12 @@ func (s *S3) Select(ctx context.Context, url *url.URL, query *SelectQuery, resul return err } + // set the delimiter to ','. Otherwise, delimiter is set to "lines" or "document" + // for json queries. + if query.InputFormat == string(jsonType) && query.OutputFormat == string(csvType) { + query.InputContentStructure = "," + } + outputFormat, decoder, err = parseOutputSerialization( eventType(query.OutputFormat), query.InputContentStructure, diff --git a/vendor/gotest.tools/v3/golden/golden.go b/vendor/gotest.tools/v3/golden/golden.go new file mode 100644 index 000000000..54102dd28 --- /dev/null +++ b/vendor/gotest.tools/v3/golden/golden.go @@ -0,0 +1,187 @@ +/*Package golden provides tools for comparing large mutli-line strings. + +Golden files are files in the ./testdata/ subdirectory of the package under test. +Golden files can be automatically updated to match new values by running +`go test pkgname -test.update-golden`. To ensure the update is correct +compare the diff of the old expected value to the new expected value. +*/ +package golden // import "gotest.tools/v3/golden" + +import ( + "bytes" + "flag" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "gotest.tools/v3/assert" + "gotest.tools/v3/assert/cmp" + "gotest.tools/v3/internal/format" +) + +var flagUpdate = flag.Bool("test.update-golden", false, "update golden file") + +type helperT interface { + Helper() +} + +// NormalizeCRLFToLF enables end-of-line normalization for actual values passed +// to Assert and String, as well as the values saved to golden files with +// -test.update-golden. +// +// Defaults to true. If you use the core.autocrlf=true git setting on windows +// you will need to set this to false. +// +// The value may be set to false by setting GOTESTTOOLS_GOLDEN_NormalizeCRLFToLF=false +// in the environment before running tests. +// +// The default value may change in a future major release. +var NormalizeCRLFToLF = os.Getenv("GOTESTTOOLS_GOLDEN_NormalizeCRLFToLF") != "false" + +// FlagUpdate returns true when the -test.update-golden flag has been set. +func FlagUpdate() bool { + return *flagUpdate +} + +// Open opens the file in ./testdata +func Open(t assert.TestingT, filename string) *os.File { + if ht, ok := t.(helperT); ok { + ht.Helper() + } + f, err := os.Open(Path(filename)) + assert.NilError(t, err) + return f +} + +// Get returns the contents of the file in ./testdata +func Get(t assert.TestingT, filename string) []byte { + if ht, ok := t.(helperT); ok { + ht.Helper() + } + expected, err := ioutil.ReadFile(Path(filename)) + assert.NilError(t, err) + return expected +} + +// Path returns the full path to a file in ./testdata +func Path(filename string) string { + if filepath.IsAbs(filename) { + return filename + } + return filepath.Join("testdata", filename) +} + +func removeCarriageReturn(in []byte) []byte { + if !NormalizeCRLFToLF { + return in + } + return bytes.Replace(in, []byte("\r\n"), []byte("\n"), -1) +} + +// Assert compares actual to the expected value in the golden file. +// +// Running `go test pkgname -test.update-golden` will write the value of actual +// to the golden file. +// +// This is equivalent to assert.Assert(t, String(actual, filename)) +func Assert(t assert.TestingT, actual string, filename string, msgAndArgs ...interface{}) { + if ht, ok := t.(helperT); ok { + ht.Helper() + } + assert.Assert(t, String(actual, filename), msgAndArgs...) +} + +// String compares actual to the contents of filename and returns success +// if the strings are equal. +// +// Running `go test pkgname -test.update-golden` will write the value of actual +// to the golden file. +// +// Any \r\n substrings in actual are converted to a single \n character +// before comparing it to the expected string. When updating the golden file the +// normalized version will be written to the file. This allows Windows to use +// the same golden files as other operating systems. +func String(actual string, filename string) cmp.Comparison { + return func() cmp.Result { + actualBytes := removeCarriageReturn([]byte(actual)) + result, expected := compare(actualBytes, filename) + if result != nil { + return result + } + diff := format.UnifiedDiff(format.DiffConfig{ + A: string(expected), + B: string(actualBytes), + From: "expected", + To: "actual", + }) + return cmp.ResultFailure("\n" + diff + failurePostamble(filename)) + } +} + +func failurePostamble(filename string) string { + return fmt.Sprintf(` + +You can run 'go test . -test.update-golden' to automatically update %s to the new expected value.' +`, Path(filename)) +} + +// AssertBytes compares actual to the expected value in the golden. +// +// Running `go test pkgname -test.update-golden` will write the value of actual +// to the golden file. +// +// This is equivalent to assert.Assert(t, Bytes(actual, filename)) +func AssertBytes( + t assert.TestingT, + actual []byte, + filename string, + msgAndArgs ...interface{}, +) { + if ht, ok := t.(helperT); ok { + ht.Helper() + } + assert.Assert(t, Bytes(actual, filename), msgAndArgs...) +} + +// Bytes compares actual to the contents of filename and returns success +// if the bytes are equal. +// +// Running `go test pkgname -test.update-golden` will write the value of actual +// to the golden file. +func Bytes(actual []byte, filename string) cmp.Comparison { + return func() cmp.Result { + result, expected := compare(actual, filename) + if result != nil { + return result + } + msg := fmt.Sprintf("%v (actual) != %v (expected)", actual, expected) + return cmp.ResultFailure(msg + failurePostamble(filename)) + } +} + +func compare(actual []byte, filename string) (cmp.Result, []byte) { + if err := update(filename, actual); err != nil { + return cmp.ResultFromError(err), nil + } + expected, err := ioutil.ReadFile(Path(filename)) + if err != nil { + return cmp.ResultFromError(err), nil + } + if bytes.Equal(expected, actual) { + return cmp.ResultSuccess, nil + } + return nil, expected +} + +func update(filename string, actual []byte) error { + if dir := filepath.Dir(filename); dir != "." { + if err := os.MkdirAll(dir, 0755); err != nil { + return err + } + } + if *flagUpdate { + return ioutil.WriteFile(Path(filename), actual, 0644) + } + return nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 53c0f341c..0fffb8458 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -169,6 +169,7 @@ gopkg.in/mgo.v2/internal/json gotest.tools/v3/assert gotest.tools/v3/assert/cmp gotest.tools/v3/fs +gotest.tools/v3/golden gotest.tools/v3/icmd gotest.tools/v3/internal/assert gotest.tools/v3/internal/cleanup From b5ace8d44feb356575ed2d7d1a473b6b92e30e1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C4=B0brahim=20G=C3=BCng=C3=B6r?= Date: Mon, 21 Aug 2023 15:13:04 +0300 Subject: [PATCH 45/50] Update command/select.go --- command/select.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/command/select.go b/command/select.go index 1ac2f5c33..860b245c0 100644 --- a/command/select.go +++ b/command/select.go @@ -29,7 +29,7 @@ Options: {{end}} Examples: 01. Select the average price of the avocado and amount sold, set the output format as csv - > {{.HelpName}} --compression GZIP --output-format csv --query "SELECT s.avg_price, s.quantity FROM S3Object s WHERE s.item='avocado'" "s3://bucket/itemprices" + > {{.HelpName}} --use-header USE --query "SELECT s.avg_price, s.quantity FROM S3Object s WHERE s.item='avocado'" "s3://bucket/prices.csv" 02. Query TSV files > s5cmd select csv --delimiter=\t --compression GZIP --output-format csv --query "SELECT s.avg_price, s.quantity FROM S3Object s WHERE s.item='avocado'" "s3://bucket/itemprices" From 00bbc5b3f0fc550d6ce2f6ce83002e9f8899466d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C4=B0brahim=20G=C3=BCng=C3=B6r?= Date: Mon, 21 Aug 2023 15:13:15 +0300 Subject: [PATCH 46/50] Update command/select.go --- command/select.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/command/select.go b/command/select.go index 860b245c0..c80db446f 100644 --- a/command/select.go +++ b/command/select.go @@ -35,7 +35,7 @@ Examples: > s5cmd select csv --delimiter=\t --compression GZIP --output-format csv --query "SELECT s.avg_price, s.quantity FROM S3Object s WHERE s.item='avocado'" "s3://bucket/itemprices" 03. Select a specific field in a JSON document - > s5cmd select json --structure document --compression GZIP --output-format csv --query "SELECT s.tracking_id FROM s3object[*]['metadata']['.zattrs'] s" "s3://bucket/metadataobj" + > s5cmd select json --structure document --query "SELECT s.tracking_id FROM s3object[*]['metadata']['.zattrs'] s" "s3://bucket/metadata.json" 04. Query CSV files using their headers to access columns by their name > s5cmd select csv --use-header=USE --delimiter "\t" --query "SELECT s.name FROM s3object s WHERE s.price='1.99'" "s3://bucket/prices.tsv" From a0e3dc8e849b1c5b061408fc620143b7ded22597 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C4=B0brahim=20G=C3=BCng=C3=B6r?= Date: Mon, 21 Aug 2023 15:13:24 +0300 Subject: [PATCH 47/50] Update command/select.go --- command/select.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/command/select.go b/command/select.go index c80db446f..3918fe041 100644 --- a/command/select.go +++ b/command/select.go @@ -32,7 +32,7 @@ Examples: > {{.HelpName}} --use-header USE --query "SELECT s.avg_price, s.quantity FROM S3Object s WHERE s.item='avocado'" "s3://bucket/prices.csv" 02. Query TSV files - > s5cmd select csv --delimiter=\t --compression GZIP --output-format csv --query "SELECT s.avg_price, s.quantity FROM S3Object s WHERE s.item='avocado'" "s3://bucket/itemprices" + > s5cmd select csv --delimiter=\t --use-header USE --query "SELECT s.avg_price, s.quantity FROM S3Object s WHERE s.item='avocado'" "s3://bucket/prices.tsv" 03. Select a specific field in a JSON document > s5cmd select json --structure document --query "SELECT s.tracking_id FROM s3object[*]['metadata']['.zattrs'] s" "s3://bucket/metadata.json" From 4dd41238a0eb0686200c92e4a2511ba871fd85d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C4=B0brahim=20G=C3=BCng=C3=B6r?= Date: Mon, 21 Aug 2023 15:13:34 +0300 Subject: [PATCH 48/50] Update command/select.go --- command/select.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/command/select.go b/command/select.go index 3918fe041..f2c9fd6bd 100644 --- a/command/select.go +++ b/command/select.go @@ -142,7 +142,7 @@ func NewSelectCommand() *cli.Command { Flags: append([]cli.Flag{ &cli.StringFlag{ Name: "delimiter for the csv file", - Usage: "delimiter of the csv file. The input and output delimiters are same.", + Usage: "delimiter of the csv file.", Value: ",", }, &cli.StringFlag{ From 71ef73a8299d72c4f8747590818c2f605e099897 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Mon, 21 Aug 2023 15:45:26 +0300 Subject: [PATCH 49/50] doc: add comment about the operation --- command/select.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/command/select.go b/command/select.go index f2c9fd6bd..1c5758f42 100644 --- a/command/select.go +++ b/command/select.go @@ -159,6 +159,9 @@ func NewSelectCommand() *cli.Command { Before: beforeFunc, Action: func(c *cli.Context) (err error) { delimiter := c.String("delimiter") + // We are doing this because the delimiters that are special characters + // are automatically escaped by go. We quote/unquote to capture the actual + // delimiter. See: https://stackoverflow.com/questions/59952721/how-to-read-t-as-tab-in-golang quotedDelimiter, err := strconv.Unquote(`"` + delimiter + `"`) if err != nil { From f1aef2451b75b6f8e40e24108345666a58f12ff3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deniz=20S=C3=BCrmeli?= Date: Mon, 21 Aug 2023 17:09:45 +0300 Subject: [PATCH 50/50] chore: update examples --- command/select.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/command/select.go b/command/select.go index 1c5758f42..4fc000020 100644 --- a/command/select.go +++ b/command/select.go @@ -29,16 +29,16 @@ Options: {{end}} Examples: 01. Select the average price of the avocado and amount sold, set the output format as csv - > {{.HelpName}} --use-header USE --query "SELECT s.avg_price, s.quantity FROM S3Object s WHERE s.item='avocado'" "s3://bucket/prices.csv" + > s5cmd select csv -use-header USE --query "SELECT s.avg_price, s.quantity FROM S3Object s WHERE s.item='avocado'" "s3://bucket/prices.csv" 02. Query TSV files > s5cmd select csv --delimiter=\t --use-header USE --query "SELECT s.avg_price, s.quantity FROM S3Object s WHERE s.item='avocado'" "s3://bucket/prices.tsv" 03. Select a specific field in a JSON document > s5cmd select json --structure document --query "SELECT s.tracking_id FROM s3object[*]['metadata']['.zattrs'] s" "s3://bucket/metadata.json" - - 04. Query CSV files using their headers to access columns by their name - > s5cmd select csv --use-header=USE --delimiter "\t" --query "SELECT s.name FROM s3object s WHERE s.price='1.99'" "s3://bucket/prices.tsv" + + 04. Query files that contain lines of JSON objects + > s5cmd select json --query "SELECT s.id FROM s3object s WHERE s.lineNumber = 1" ` func beforeFunc(c *cli.Context) error {